replace custom printf with nyx_<level>() wrappers
This introduces generic printf wrappers to replace the various instances of debug_printf, fprintf, QEMU_PR_PRINTF etc. Several more hardcoded printf() are still present and should probably be replaced with nyx_debug_p().
This commit is contained in:
parent
f91ff284ee
commit
e83025a145
@ -2572,7 +2572,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
handle_hypercall_kafl_release(run, cpu, (uint64_t)run->hypercall.args[0]);
|
||||
ret = 0;
|
||||
#else
|
||||
debug_fprintf(stderr, "Got KVM_EXIT_SHUTDOWN while in fuzzing mode => panic\n");
|
||||
nyx_debug("Got KVM_EXIT_SHUTDOWN while in fuzzing mode => panic\n");
|
||||
handle_hypercall_kafl_panic(run, cpu, (uint64_t)run->hypercall.args[0]);
|
||||
ret = 0;
|
||||
#endif
|
||||
@ -2693,7 +2693,7 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
ret = kvm_arch_handle_exit(cpu, run);
|
||||
assert(ret == 0);
|
||||
#else
|
||||
debug_fprintf(stderr, "kvm_arch_handle_exit(%d) => panic\n", run->exit_reason);
|
||||
nyx_debug("kvm_arch_handle_exit(%d) => panic\n", run->exit_reason);
|
||||
ret = kvm_arch_handle_exit(cpu, run);
|
||||
if (ret != 0)
|
||||
handle_hypercall_kafl_panic(run, cpu, (uint64_t)run->hypercall.args[0]);
|
||||
|
@ -55,7 +55,7 @@ static void volatile_memcpy(void* dst, void* src, size_t size){
|
||||
}
|
||||
|
||||
void init_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer){
|
||||
debug_fprintf(stderr, "%s\n", __func__);
|
||||
nyx_trace();
|
||||
volatile_memset((void*) auxilary_buffer, 0, sizeof(auxilary_buffer_t));
|
||||
|
||||
VOLATILE_WRITE_16(auxilary_buffer->header.version, QEMU_PT_VERSION);
|
||||
@ -220,7 +220,7 @@ void set_state_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8
|
||||
VOLATILE_WRITE_8(auxilary_buffer->result.state, state);
|
||||
}
|
||||
else{
|
||||
fprintf(stderr, "WARNING: auxilary_buffer pointer is zero\n");
|
||||
nyx_error("WARNING: auxilary_buffer pointer is zero\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
33
nyx/debug.h
33
nyx/debug.h
@ -8,10 +8,11 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/log.h"
|
||||
#include "qemu/error-report.h"
|
||||
|
||||
#define ENABLE_BACKTRACES
|
||||
|
||||
#define QEMU_PT_PRINT_PREFIX "[QEMU-NYX] "
|
||||
#define NYX_LOG_PREFIX "[QEMU-NYX] "
|
||||
#define CORE_PREFIX "Core: "
|
||||
#define MEM_PREFIX "Memory: "
|
||||
#define RELOAD_PREFIX "Reload: "
|
||||
@ -20,40 +21,28 @@
|
||||
#define REDQUEEN_PREFIX "Redqueen: "
|
||||
#define DISASM_PREFIX "Disasm: "
|
||||
#define PAGE_CACHE_PREFIX "PageCache: "
|
||||
#define INTERFACE_PREFIX "Interface: "
|
||||
#define NESTED_VM_PREFIX "Nested: "
|
||||
|
||||
|
||||
#define DEBUG_VM_PREFIX "Debug: "
|
||||
|
||||
#define COLOR "\033[1;35m"
|
||||
#define ENDC "\033[0m"
|
||||
|
||||
|
||||
#ifdef NYX_DEBUG
|
||||
/*
|
||||
* qemu_log() is the standard logging enabled with -D
|
||||
* qemu_log_mask() is activated with additional -t nyx option
|
||||
*/
|
||||
#define debug_printf(format, ...) qemu_log_mask(LOG_NYX, QEMU_PT_PRINT_PREFIX "(%s#:%d)\t"format, __BASE_FILE__, __LINE__, ##__VA_ARGS__)
|
||||
#define debug_fprintf(fd, format, ...) qemu_log_mask(LOG_NYX, QEMU_PT_PRINT_PREFIX "(%s#:%d)\t"format, __BASE_FILE__, __LINE__, ##__VA_ARGS__)
|
||||
|
||||
#define QEMU_PT_PRINTF(PREFIX, format, ...) qemu_log_mask(LOG_NYX, QEMU_PT_PRINT_PREFIX COLOR PREFIX format ENDC "\n", ##__VA_ARGS__)
|
||||
#define QEMU_PT_PRINTF_DBG(PREFIX, format, ...) qemu_log_mask(LOG_NYX, QEMU_PT_PRINT_PREFIX PREFIX "(%s#:%d)\t"format, __BASE_FILE__, __LINE__, ##__VA_ARGS__)
|
||||
#define QEMU_PT_PRINTF_DEBUG(format, ...) qemu_log_mask(LOG_NYX, QEMU_PT_PRINT_PREFIX DEBUG_VM_PREFIX "(%s#:%d)\t"format, __BASE_FILE__, __LINE__, ##__VA_ARGS__)
|
||||
//#define nyx_debug(format, ...) qemu_log_mask(LOG_NYX, NYX_LOG_PREFIX "(%s#:%d)\t"format, __BASE_FILE__, __LINE__, ##__VA_ARGS__)
|
||||
#define nyx_debug(format, ...) qemu_log_mask(LOG_NYX, NYX_LOG_PREFIX format, ##__VA_ARGS__)
|
||||
#define nyx_debug_p(PREFIX, format, ...) qemu_log_mask(LOG_NYX, NYX_LOG_PREFIX PREFIX format, ##__VA_ARGS__)
|
||||
#else
|
||||
#define debug_printf(format, ...)
|
||||
#define debug_fprintf(fd, format, ...)
|
||||
#define QEMU_PT_PRINTF(PREFIX, format, ...)
|
||||
#define QEMU_PT_PRINTF_DBG(PREFIX, format, ...)
|
||||
#define QEMU_PT_PRINTF_DEBUG(format, ...)
|
||||
#define nyx_debug(...)
|
||||
#define nyx_debug_p(...)
|
||||
#endif
|
||||
|
||||
#define nyx_printf(format, ...) qemu_log(format, ##__VA_ARGS__)
|
||||
#define nyx_error(format, ...) error_printf(format, ##__VA_ARGS__)
|
||||
#define nyx_trace(format, ...) nyx_debug("=> %s\n", __func__)
|
||||
|
||||
|
||||
#ifdef ENABLE_BACKTRACES
|
||||
|
||||
void qemu_backtrace(void);
|
||||
void init_crash_handler(void);
|
||||
void hexdump_kafl(const void* data, size_t size);
|
||||
|
||||
#endif
|
||||
|
@ -289,7 +289,7 @@ void fast_reload_serialize_to_file(fast_reload_t* self, const char* folder, bool
|
||||
|
||||
/* sanity check */
|
||||
if(!folder_exits(folder)){
|
||||
QEMU_PT_PRINTF(RELOAD_PREFIX,"Folder %s does not exist...failed!", folder);
|
||||
nyx_debug_p(RELOAD_PREFIX,"Folder %s does not exist...failed!", folder);
|
||||
assert(0);
|
||||
}
|
||||
|
||||
@ -317,7 +317,7 @@ static void fast_reload_create_from_snapshot(fast_reload_t* self, const char* fo
|
||||
assert(self != NULL);
|
||||
wait_for_snapshot(folder);
|
||||
|
||||
QEMU_PT_PRINTF(RELOAD_PREFIX,"=> CREATING FAST RELOAD SNAPSHOT FROM DUMP (located in: %s)", folder);
|
||||
nyx_debug_p(RELOAD_PREFIX,"=> CREATING FAST RELOAD SNAPSHOT FROM DUMP (located in: %s)", folder);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
@ -363,8 +363,8 @@ void fast_reload_create_from_file_pre_image(fast_reload_t* self, const char* fol
|
||||
void fast_reload_create_in_memory(fast_reload_t* self){
|
||||
|
||||
assert(self != NULL);
|
||||
debug_fprintf(stderr, "===>%s\n", __func__);
|
||||
QEMU_PT_PRINTF(RELOAD_PREFIX,"=> CREATING FAST RELOAD SNAPSHOT FROM CURRENT VM STATE");
|
||||
nyx_trace();
|
||||
nyx_debug_p(RELOAD_PREFIX,"=> CREATING FAST RELOAD SNAPSHOT FROM CURRENT VM STATE");
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
|
@ -118,20 +118,20 @@ static inline void perform_task_debug_mode(fast_vm_reload_sync_t* self, FastRelo
|
||||
|
||||
static inline void create_root_snapshot(void){
|
||||
if (GET_GLOBAL_STATE()->fast_reload_enabled){
|
||||
debug_printf("===> GET_GLOBAL_STATE()->fast_reload_enabled: TRUE\n");
|
||||
nyx_debug("===> GET_GLOBAL_STATE()->fast_reload_enabled: TRUE\n");
|
||||
if (GET_GLOBAL_STATE()->fast_reload_mode){
|
||||
debug_printf("===> GET_GLOBAL_STATE()->fast_reload_mode: TRUE\n");
|
||||
nyx_debug("===> GET_GLOBAL_STATE()->fast_reload_mode: TRUE\n");
|
||||
/* we've loaded an external snapshot folder - so do nothing and don't create any new snapshot files */
|
||||
}
|
||||
else{
|
||||
debug_printf("===> GET_GLOBAL_STATE()->fast_reload_mode: FALSE\n");
|
||||
nyx_debug("===> GET_GLOBAL_STATE()->fast_reload_mode: FALSE\n");
|
||||
/* store the current state as a snapshot folder */
|
||||
fast_reload_create_in_memory(get_fast_reload_snapshot());
|
||||
fast_reload_serialize_to_file(get_fast_reload_snapshot(), GET_GLOBAL_STATE()->fast_reload_path, false);
|
||||
}
|
||||
}
|
||||
else{
|
||||
debug_printf("===> GET_GLOBAL_STATE()->fast_reload_enabled: FALSE\n");
|
||||
nyx_debug("===> GET_GLOBAL_STATE()->fast_reload_enabled: FALSE\n");
|
||||
/* so we haven't set a path for our snapshot files - just store everything in memory */
|
||||
fast_reload_create_in_memory(get_fast_reload_snapshot());
|
||||
}
|
||||
|
@ -115,22 +115,22 @@ bool apply_capabilities(CPUState *cpu){
|
||||
//X86CPU *cpux86 = X86_CPU(cpu);
|
||||
//CPUX86State *env = &cpux86->env;
|
||||
|
||||
debug_fprintf(stderr, "%s: agent supports timeout detection: %d\n", __func__, GET_GLOBAL_STATE()->cap_timeout_detection);
|
||||
debug_fprintf(stderr, "%s: agent supports only-reload mode: %d\n", __func__, GET_GLOBAL_STATE()->cap_only_reload_mode);
|
||||
debug_fprintf(stderr, "%s: agent supports compile-time tracing: %d\n", __func__, GET_GLOBAL_STATE()->cap_compile_time_tracing );
|
||||
nyx_debug("%s: agent supports timeout detection: %d\n", __func__, GET_GLOBAL_STATE()->cap_timeout_detection);
|
||||
nyx_debug("%s: agent supports only-reload mode: %d\n", __func__, GET_GLOBAL_STATE()->cap_only_reload_mode);
|
||||
nyx_debug("%s: agent supports compile-time tracing: %d\n", __func__, GET_GLOBAL_STATE()->cap_compile_time_tracing );
|
||||
|
||||
if(GET_GLOBAL_STATE()->cap_compile_time_tracing){
|
||||
GET_GLOBAL_STATE()->pt_trace_mode = false;
|
||||
|
||||
debug_fprintf(stderr, "%s: agent trace buffer at vaddr: %lx\n", __func__, GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr);
|
||||
nyx_debug("%s: agent trace buffer at vaddr: %lx\n", __func__, GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr);
|
||||
kvm_arch_get_registers_fast(cpu);
|
||||
|
||||
debug_printf("--------------------------\n");
|
||||
debug_printf("GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr: %lx\n", GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr);
|
||||
debug_printf("GET_GLOBAL_STATE()->shared_bitmap_fd: %d\n", GET_GLOBAL_STATE()->shared_bitmap_fd);
|
||||
debug_printf("GET_GLOBAL_STATE()->shared_bitmap_size: %x\n", GET_GLOBAL_STATE()->shared_bitmap_size);
|
||||
debug_printf("GET_GLOBAL_STATE()->cap_cr3: %lx\n", GET_GLOBAL_STATE()->cap_cr3);
|
||||
debug_printf("--------------------------\n");
|
||||
nyx_debug("--------------------------\n");
|
||||
nyx_debug("GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr: %lx\n", GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr);
|
||||
nyx_debug("GET_GLOBAL_STATE()->shared_bitmap_fd: %d\n", GET_GLOBAL_STATE()->shared_bitmap_fd);
|
||||
nyx_debug("GET_GLOBAL_STATE()->shared_bitmap_size: %x\n", GET_GLOBAL_STATE()->shared_bitmap_size);
|
||||
nyx_debug("GET_GLOBAL_STATE()->cap_cr3: %lx\n", GET_GLOBAL_STATE()->cap_cr3);
|
||||
nyx_debug("--------------------------\n");
|
||||
|
||||
if (GET_GLOBAL_STATE()->input_buffer_size != GET_GLOBAL_STATE()->shared_payload_buffer_size){
|
||||
resize_shared_memory(GET_GLOBAL_STATE()->input_buffer_size, &GET_GLOBAL_STATE()->shared_payload_buffer_size, NULL, GET_GLOBAL_STATE()->shared_payload_buffer_fd);
|
||||
@ -153,10 +153,10 @@ bool apply_capabilities(CPUState *cpu){
|
||||
}
|
||||
|
||||
if(GET_GLOBAL_STATE()->cap_ijon_tracing){
|
||||
debug_printf("%s: agent trace buffer at vaddr: %lx\n", __func__, GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr);
|
||||
nyx_debug("%s: agent trace buffer at vaddr: %lx\n", __func__, GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr);
|
||||
|
||||
if(GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr&0xfff){
|
||||
fprintf(stderr, "[QEMU-Nyx] Error: guest's ijon buffer v_addr (0x%lx) is not page aligned!\n", GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr);
|
||||
error_printf("[QEMU-Nyx] Error: guest's ijon buffer v_addr (0x%lx) is not page aligned!\n", GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ void pt_setup_enable_hypercalls(void){
|
||||
}
|
||||
|
||||
void pt_setup_ip_filters(uint8_t filter_id, uint64_t start, uint64_t end){
|
||||
debug_fprintf(stderr, "--> %s\n", __func__);
|
||||
nyx_trace();
|
||||
if (filter_id < INTEL_PT_MAX_RANGES){
|
||||
|
||||
GET_GLOBAL_STATE()->pt_ip_filter_configured[filter_id] = true;
|
||||
@ -93,7 +93,7 @@ bool setup_snapshot_once = false;
|
||||
|
||||
|
||||
bool handle_hypercall_kafl_next_payload(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
//fprintf(stderr, "%s\n", __func__);
|
||||
//nyx_trace();
|
||||
/*
|
||||
kvm_arch_get_registers(cpu);
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
@ -136,9 +136,9 @@ bool handle_hypercall_kafl_next_payload(struct kvm_run *run, CPUState *cpu, uint
|
||||
//printf("DONE!\n");
|
||||
/*
|
||||
qemu_mutex_lock_iothread();
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "...GOOOOOO!!!!");
|
||||
nyx_debug_p(CORE_PREFIX, "...GOOOOOO!!!!");
|
||||
fast_reload_restore(get_fast_reload_snapshot());
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "...DONE!!!!");
|
||||
nyx_debug_p(CORE_PREFIX, "...DONE!!!!");
|
||||
qemu_mutex_unlock_iothread();
|
||||
*/
|
||||
GET_GLOBAL_STATE()->in_fuzzing_mode = true;
|
||||
@ -171,7 +171,7 @@ static void acquire_print_once(CPUState *cpu){
|
||||
kvm_arch_get_registers(cpu);
|
||||
//X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
//CPUX86State *env = &x86_cpu->env;
|
||||
debug_fprintf(stderr, "handle_hypercall_kafl_acquire at:%lx\n", get_rip(cpu));
|
||||
nyx_debug("handle_hypercall_kafl_acquire at:%lx\n", get_rip(cpu));
|
||||
//disassemble_at_rip(STDERR_FILENO, get_rip(cpu), cpu, env->cr[3]);
|
||||
}
|
||||
}
|
||||
@ -193,7 +193,7 @@ void handle_hypercall_kafl_acquire(struct kvm_run *run, CPUState *cpu, uint64_t
|
||||
}
|
||||
|
||||
static void handle_hypercall_get_payload(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
debug_printf("------------ %s\n", __func__);
|
||||
nyx_trace();
|
||||
|
||||
if(is_called_in_fuzzing_mode("KVM_EXIT_KAFL_GET_PAYLOAD")){
|
||||
return;
|
||||
@ -205,11 +205,11 @@ static void handle_hypercall_get_payload(struct kvm_run *run, CPUState *cpu, uin
|
||||
}
|
||||
|
||||
if(hypercall_enabled && !setup_snapshot_once){
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "Payload Address:\t%lx", hypercall_arg);
|
||||
nyx_debug_p(CORE_PREFIX, "Payload Address:\t%lx", hypercall_arg);
|
||||
kvm_arch_get_registers(cpu);
|
||||
CPUX86State *env = &(X86_CPU(cpu))->env;
|
||||
GET_GLOBAL_STATE()->parent_cr3 = env->cr[3] & 0xFFFFFFFFFFFFF000ULL;
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "Payload CR3:\t%lx", (uint64_t)GET_GLOBAL_STATE()->parent_cr3 );
|
||||
nyx_debug_p(CORE_PREFIX, "Payload CR3:\t%lx", (uint64_t)GET_GLOBAL_STATE()->parent_cr3 );
|
||||
//print_48_pagetables(GET_GLOBAL_STATE()->parent_cr3);
|
||||
|
||||
if(hypercall_arg&0xFFF){
|
||||
@ -239,7 +239,7 @@ static void handle_hypercall_kafl_req_stream_data(struct kvm_run *run, CPUState
|
||||
kvm_arch_get_registers(cpu);
|
||||
/* address has to be page aligned */
|
||||
if((hypercall_arg&0xFFF) != 0){
|
||||
debug_fprintf(stderr, "%s: ERROR -> address is not page aligned!\n", __func__);
|
||||
nyx_debug("%s: ERROR -> address is not page aligned!\n", __func__);
|
||||
set_return_value(cpu, 0xFFFFFFFFFFFFFFFFULL);
|
||||
}
|
||||
else{
|
||||
@ -270,7 +270,7 @@ static void handle_hypercall_kafl_req_stream_data_bulk(struct kvm_run *run, CPUS
|
||||
kvm_arch_get_registers(cpu);
|
||||
/* address has to be page aligned */
|
||||
if((hypercall_arg&0xFFF) != 0){
|
||||
debug_fprintf(stderr, "%s: ERROR -> address is not page aligned!\n", __func__);
|
||||
nyx_debug("%s: ERROR -> address is not page aligned!\n", __func__);
|
||||
set_return_value(cpu, 0xFFFFFFFFFFFFFFFFUL);
|
||||
}
|
||||
else{
|
||||
@ -310,12 +310,12 @@ static void handle_hypercall_kafl_range_submit(struct kvm_run *run, CPUState *cp
|
||||
read_virtual_memory(hypercall_arg, (uint8_t*)&buffer, sizeof(buffer), cpu);
|
||||
|
||||
if(buffer[2] >= 2){
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "%s: illegal range=%ld\n", __func__, buffer[2]);
|
||||
nyx_debug_p(CORE_PREFIX, "%s: illegal range=%ld\n", __func__, buffer[2]);
|
||||
return;
|
||||
}
|
||||
|
||||
if(GET_GLOBAL_STATE()->pt_ip_filter_configured[buffer[2]]){
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "Ignoring agent-provided address ranges (abort reason: 1) - %ld", buffer[2]);
|
||||
nyx_debug_p(CORE_PREFIX, "Ignoring agent-provided address ranges (abort reason: 1) - %ld", buffer[2]);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -323,11 +323,11 @@ static void handle_hypercall_kafl_range_submit(struct kvm_run *run, CPUState *cp
|
||||
GET_GLOBAL_STATE()->pt_ip_filter_a[buffer[2]] = buffer[0];
|
||||
GET_GLOBAL_STATE()->pt_ip_filter_b[buffer[2]] = buffer[1];
|
||||
GET_GLOBAL_STATE()->pt_ip_filter_configured[buffer[2]] = true;
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "Configuring agent-provided address ranges:");
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "\tIP%ld: %lx-%lx [ENABLED]", buffer[2], GET_GLOBAL_STATE()->pt_ip_filter_a[buffer[2]], GET_GLOBAL_STATE()->pt_ip_filter_b[buffer[2]]);
|
||||
nyx_debug_p(CORE_PREFIX, "Configuring agent-provided address ranges:");
|
||||
nyx_debug_p(CORE_PREFIX, "\tIP%ld: %lx-%lx [ENABLED]", buffer[2], GET_GLOBAL_STATE()->pt_ip_filter_a[buffer[2]], GET_GLOBAL_STATE()->pt_ip_filter_b[buffer[2]]);
|
||||
}
|
||||
else{
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "Ignoring agent-provided address ranges (abort reason: 2)");
|
||||
nyx_debug_p(CORE_PREFIX, "Ignoring agent-provided address ranges (abort reason: 2)");
|
||||
}
|
||||
|
||||
}
|
||||
@ -338,7 +338,7 @@ static void release_print_once(CPUState *cpu){
|
||||
kvm_arch_get_registers(cpu);
|
||||
//X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
//CPUX86State *env = &x86_cpu->env;
|
||||
debug_fprintf(stderr, "handle_hypercall_kafl_release at:%lx\n", get_rip(cpu));
|
||||
nyx_debug("handle_hypercall_kafl_release at:%lx\n", get_rip(cpu));
|
||||
//disassemble_at_rip(STDERR_FILENO, get_rip(cpu), cpu, env->cr[3]);
|
||||
}
|
||||
}
|
||||
@ -384,10 +384,10 @@ void handle_hypercall_kafl_mtf(struct kvm_run *run, CPUState *cpu, uint64_t hype
|
||||
}
|
||||
|
||||
void handle_hypercall_kafl_page_dump_bp(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg, uint64_t page){
|
||||
//fprintf(stderr, "--> %s\n", __func__);
|
||||
//nyx_trace();
|
||||
kvm_arch_get_registers_fast(cpu);
|
||||
|
||||
debug_fprintf(stderr, "%s --> %lx\n", __func__, get_rip(cpu));
|
||||
nyx_debug("%s --> %lx\n", __func__, get_rip(cpu));
|
||||
|
||||
kvm_vcpu_ioctl(cpu, KVM_VMX_PT_DISABLE_MTF);
|
||||
|
||||
@ -396,13 +396,13 @@ void handle_hypercall_kafl_page_dump_bp(struct kvm_run *run, CPUState *cpu, uint
|
||||
page_cache_fetch(GET_GLOBAL_STATE()->page_cache, page, &success, false);
|
||||
if(success){
|
||||
|
||||
debug_fprintf(stderr, "%s: SUCCESS: %d\n", __func__, success);
|
||||
nyx_debug("%s: SUCCESS: %d\n", __func__, success);
|
||||
kvm_remove_all_breakpoints(cpu);
|
||||
kvm_vcpu_ioctl(cpu, KVM_VMX_PT_DISABLE_PAGE_DUMP_CR3);
|
||||
|
||||
}
|
||||
else{
|
||||
debug_fprintf(stderr, "%s: FAIL: %d\n", __func__, success);
|
||||
nyx_debug("%s: FAIL: %d\n", __func__, success);
|
||||
//assert(false);
|
||||
|
||||
kvm_remove_all_breakpoints(cpu);
|
||||
@ -415,7 +415,7 @@ void handle_hypercall_kafl_page_dump_bp(struct kvm_run *run, CPUState *cpu, uint
|
||||
|
||||
static inline void set_page_dump_bp(CPUState *cpu, uint64_t cr3, uint64_t addr){
|
||||
|
||||
debug_fprintf(stderr, "\n\n%s %lx %lx\n\n", __func__, cr3, addr);
|
||||
nyx_debug("\n\n%s %lx %lx\n\n", __func__, cr3, addr);
|
||||
kvm_remove_all_breakpoints(cpu);
|
||||
kvm_insert_breakpoint(cpu, addr, 1, 1);
|
||||
kvm_update_guest_debug(cpu, 0);
|
||||
@ -426,7 +426,7 @@ static inline void set_page_dump_bp(CPUState *cpu, uint64_t cr3, uint64_t addr){
|
||||
|
||||
static void handle_hypercall_kafl_cr3(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
if(hypercall_enabled){
|
||||
//QEMU_PT_PRINTF(CORE_PREFIX, "CR3 address:\t\t%lx", hypercall_arg);
|
||||
//nyx_debug_p(CORE_PREFIX, "CR3 address:\t\t%lx", hypercall_arg);
|
||||
pt_set_cr3(cpu, hypercall_arg & 0xFFFFFFFFFFFFF000ULL, false);
|
||||
if(GET_GLOBAL_STATE()->dump_page){
|
||||
set_page_dump_bp(cpu, hypercall_arg & 0xFFFFFFFFFFFFF000ULL, GET_GLOBAL_STATE()->dump_page_addr);
|
||||
@ -441,7 +441,7 @@ static void handle_hypercall_kafl_submit_panic(struct kvm_run *run, CPUState *cp
|
||||
}
|
||||
|
||||
if(hypercall_enabled){
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "Panic address:\t%lx", hypercall_arg);
|
||||
nyx_debug_p(CORE_PREFIX, "Panic address:\t%lx", hypercall_arg);
|
||||
|
||||
switch (get_current_mem_mode(cpu)){
|
||||
case mm_32_protected:
|
||||
@ -462,7 +462,7 @@ static void handle_hypercall_kafl_submit_panic(struct kvm_run *run, CPUState *cp
|
||||
|
||||
static void handle_hypercall_kafl_submit_kasan(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
if(hypercall_enabled){
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "kASAN address:\t%lx", hypercall_arg);
|
||||
nyx_debug_p(CORE_PREFIX, "kASAN address:\t%lx", hypercall_arg);
|
||||
|
||||
switch (get_current_mem_mode(cpu)){
|
||||
case mm_32_protected:
|
||||
@ -489,10 +489,10 @@ void handle_hypercall_kafl_panic(struct kvm_run *run, CPUState *cpu, uint64_t hy
|
||||
#ifdef PANIC_DEBUG
|
||||
if(hypercall_arg){
|
||||
//fprintf(stderr, "Panic in user mode!\n");
|
||||
//QEMU_PT_PRINTF(CORE_PREFIX, "Panic in user mode!");
|
||||
//nyx_debug_p(CORE_PREFIX, "Panic in user mode!");
|
||||
} else{
|
||||
debug_fprintf(stderr, "Panic in kernel mode!\n");
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "Panic in kernel mode!");
|
||||
nyx_debug("Panic in kernel mode!\n");
|
||||
nyx_debug_p(CORE_PREFIX, "Panic in kernel mode!");
|
||||
//assert(0);
|
||||
}
|
||||
#endif
|
||||
@ -596,18 +596,18 @@ static void handle_hypercall_kafl_kasan(struct kvm_run *run, CPUState *cpu, uint
|
||||
if(hypercall_enabled){
|
||||
#ifdef PANIC_DEBUG
|
||||
if(hypercall_arg){
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "ASan notification in user mode!");
|
||||
nyx_debug_p(CORE_PREFIX, "ASan notification in user mode!");
|
||||
} else{
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "ASan notification in kernel mode!");
|
||||
nyx_debug_p(CORE_PREFIX, "ASan notification in kernel mode!");
|
||||
}
|
||||
#endif
|
||||
if(fast_reload_snapshot_exists(get_fast_reload_snapshot())){
|
||||
synchronization_lock_asan_found();
|
||||
//synchronization_stop_vm_kasan(cpu);
|
||||
} else{
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "KASAN detected during initialization of stage 1 or stage 2 loader");
|
||||
nyx_debug_p(CORE_PREFIX, "KASAN detected during initialization of stage 1 or stage 2 loader");
|
||||
//hypercall_snd_char(KAFL_PROTO_KASAN);
|
||||
QEMU_PT_PRINTF_DEBUG("Protocol - SEND: KAFL_PROTO_KASAN");
|
||||
nyx_debug_p("Protocol - SEND: KAFL_PROTO_KASAN");
|
||||
|
||||
}
|
||||
}
|
||||
@ -620,13 +620,13 @@ static void handle_hypercall_kafl_lock(struct kvm_run *run, CPUState *cpu, uint6
|
||||
}
|
||||
|
||||
if(!GET_GLOBAL_STATE()->fast_reload_pre_image){
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "Skipping pre image creation (hint: set pre=on) ...");
|
||||
nyx_debug_p(CORE_PREFIX, "Skipping pre image creation (hint: set pre=on) ...");
|
||||
return;
|
||||
}
|
||||
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "Creating pre image snapshot <%s> ...", GET_GLOBAL_STATE()->fast_reload_pre_path);
|
||||
nyx_debug_p(CORE_PREFIX, "Creating pre image snapshot <%s> ...", GET_GLOBAL_STATE()->fast_reload_pre_path);
|
||||
|
||||
debug_printf("Creating pre image snapshot");
|
||||
nyx_debug("Creating pre image snapshot");
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_PRE);
|
||||
}
|
||||
|
||||
@ -666,20 +666,20 @@ static void handle_hypercall_kafl_user_submit_mode(struct kvm_run *run, CPUState
|
||||
|
||||
switch(hypercall_arg){
|
||||
case KAFL_MODE_64:
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "target runs in KAFL_MODE_64 ...");
|
||||
nyx_debug_p(CORE_PREFIX, "target runs in KAFL_MODE_64 ...");
|
||||
GET_GLOBAL_STATE()->disassembler_word_width = 64;
|
||||
break;
|
||||
case KAFL_MODE_32:
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "target runs in KAFL_MODE_32 ...");
|
||||
nyx_debug_p(CORE_PREFIX, "target runs in KAFL_MODE_32 ...");
|
||||
GET_GLOBAL_STATE()->disassembler_word_width = 32;
|
||||
break;
|
||||
case KAFL_MODE_16:
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "target runs in KAFL_MODE_16 ...");
|
||||
nyx_debug_p(CORE_PREFIX, "target runs in KAFL_MODE_16 ...");
|
||||
GET_GLOBAL_STATE()->disassembler_word_width = 16;
|
||||
abort(); /* not implemented in this version (due to hypertrash hacks) */
|
||||
break;
|
||||
default:
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "target runs in unkown mode...");
|
||||
nyx_debug_p(CORE_PREFIX, "target runs in unkown mode...");
|
||||
GET_GLOBAL_STATE()->disassembler_word_width = 0;
|
||||
abort(); /* not implemented in this version (due to hypertrash hacks) */
|
||||
break;
|
||||
@ -794,7 +794,7 @@ static void handle_hypercall_kafl_dump_file(struct kvm_run *run, CPUState *cpu,
|
||||
void* page = malloc(PAGE_SIZE);
|
||||
uint32_t written = 0;
|
||||
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "%s: dump %d bytes to %s (append=%u)",
|
||||
nyx_debug_p(CORE_PREFIX, "%s: dump %d bytes to %s (append=%u)",
|
||||
__func__, bytes, host_path, file_obj.append);
|
||||
|
||||
while (bytes > 0) {
|
||||
|
@ -153,7 +153,7 @@ static int nyx_create_payload_buffer(nyx_interface_state *s, uint64_t buffer_siz
|
||||
fd = open(file, O_CREAT|O_RDWR, S_IRWXU|S_IRWXG|S_IRWXO);
|
||||
assert(ftruncate(fd, buffer_size) == 0);
|
||||
stat(file, &st);
|
||||
QEMU_PT_PRINTF(INTERFACE_PREFIX, "new shm file: (max size: %lx) %lx", buffer_size, st.st_size);
|
||||
nyx_debug_p(INTERFACE_PREFIX, "new shm file: (max size: %lx) %lx", buffer_size, st.st_size);
|
||||
|
||||
assert(buffer_size == st.st_size);
|
||||
ptr = mmap(0, buffer_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
@ -211,7 +211,7 @@ static bool verify_workdir_state(nyx_interface_state *s, Error **errp){
|
||||
char* tmp;
|
||||
|
||||
if (!folder_exits(workdir)){
|
||||
fprintf(stderr, "%s does not exist...\n", workdir);
|
||||
nyx_error("Error: %s does not exist...\n", workdir);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -225,7 +225,7 @@ static bool verify_workdir_state(nyx_interface_state *s, Error **errp){
|
||||
|
||||
assert(asprintf(&tmp, "%s/interface_%d", workdir, id) != -1);
|
||||
if (!file_exits(tmp)){
|
||||
fprintf(stderr, "%s does not exist...\n", tmp);
|
||||
nyx_error("Error: %s does not exist...\n", tmp);
|
||||
free(tmp);
|
||||
return false;
|
||||
}
|
||||
@ -233,7 +233,7 @@ static bool verify_workdir_state(nyx_interface_state *s, Error **errp){
|
||||
|
||||
assert(asprintf(&tmp, "%s/payload_%d", workdir, id) != -1);
|
||||
if (!file_exits(tmp)){
|
||||
fprintf(stderr, "%s does not exist...\n", tmp);
|
||||
nyx_error("Error: %s does not exist...\n", tmp);
|
||||
free(tmp);
|
||||
return false;
|
||||
}
|
||||
@ -244,7 +244,7 @@ static bool verify_workdir_state(nyx_interface_state *s, Error **errp){
|
||||
|
||||
assert(asprintf(&tmp, "%s/bitmap_%d", workdir, id) != -1);
|
||||
if (!file_exits(tmp)){
|
||||
fprintf(stderr, "%s does not exist...\n", tmp);
|
||||
nyx_error("Error: %s does not exist...\n", tmp);
|
||||
free(tmp);
|
||||
return false;
|
||||
} else {
|
||||
@ -254,7 +254,7 @@ static bool verify_workdir_state(nyx_interface_state *s, Error **errp){
|
||||
|
||||
assert(asprintf(&tmp, "%s/ijon_%d", workdir, id) != -1);
|
||||
if (!file_exits(tmp)){
|
||||
fprintf(stderr, "%s does not exist...\n", tmp);
|
||||
nyx_error("Error: %s does not exist...\n", tmp);
|
||||
free(tmp);
|
||||
return false;
|
||||
} else {
|
||||
@ -264,7 +264,7 @@ static bool verify_workdir_state(nyx_interface_state *s, Error **errp){
|
||||
|
||||
assert(asprintf(&tmp, "%s/page_cache.lock", workdir) != -1);
|
||||
if (!file_exits(tmp)){
|
||||
fprintf(stderr, "%s does not exist...", tmp);
|
||||
nyx_error("Error: %s does not exist...", tmp);
|
||||
free(tmp);
|
||||
return false;
|
||||
}
|
||||
@ -272,7 +272,7 @@ static bool verify_workdir_state(nyx_interface_state *s, Error **errp){
|
||||
|
||||
assert(asprintf(&tmp, "%s/page_cache.addr", workdir) != -1);
|
||||
if (!file_exits(tmp)){
|
||||
fprintf(stderr, "%s does not exist...\n", tmp);
|
||||
nyx_error("Error: %s does not exist...\n", tmp);
|
||||
free(tmp);
|
||||
return false;
|
||||
}
|
||||
@ -280,7 +280,7 @@ static bool verify_workdir_state(nyx_interface_state *s, Error **errp){
|
||||
|
||||
assert(asprintf(&tmp, "%s/page_cache.dump", workdir) != -1);
|
||||
if (!file_exits(tmp)){
|
||||
fprintf(stderr, "%s does not exist...\n", tmp);
|
||||
nyx_error("Error: %s does not exist...\n", tmp);
|
||||
free(tmp);
|
||||
return false;
|
||||
}
|
||||
@ -291,7 +291,7 @@ static bool verify_workdir_state(nyx_interface_state *s, Error **errp){
|
||||
|
||||
assert(asprintf(&tmp, "%s/redqueen_workdir_%d/", workdir, id) != -1);
|
||||
if (!folder_exits(tmp)){
|
||||
fprintf(stderr, "%s does not exist...\n", tmp);
|
||||
nyx_error("%s does not exist...\n", tmp);
|
||||
free(tmp);
|
||||
return false;
|
||||
}
|
||||
@ -316,7 +316,7 @@ static bool verify_workdir_state(nyx_interface_state *s, Error **errp){
|
||||
assert(asprintf(&tmp, "%s/aux_buffer_%d", workdir, id) != -1);
|
||||
/*
|
||||
if (file_exits(tmp)){
|
||||
QEMU_PT_PRINTF(INTERFACE_PREFIX, "%s does not already exists...", tmp);
|
||||
nyx_debug_p(INTERFACE_PREFIX, "%s does not already exists...", tmp);
|
||||
free(tmp);
|
||||
return false;
|
||||
}
|
||||
@ -339,12 +339,12 @@ static void check_ipt_range(uint8_t i){
|
||||
ret = ioctl(kvm, KVM_VMX_PT_GET_ADDRN, NULL);
|
||||
|
||||
if(ret == -1){
|
||||
fprintf(stderr, "[QEMU-Nyx] Error: Multi range tracing is not supported!\n");
|
||||
nyx_error("Error: Multi range tracing is not supported!\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if(ret < (i+1)){
|
||||
fprintf(stderr, "[QEMU-Nyx] Error: CPU supports only %d IP filters!\n", ret);
|
||||
nyx_error("Error: CPU supports only %d IP filters!\n", ret);
|
||||
exit(1);
|
||||
}
|
||||
close(kvm);
|
||||
@ -355,7 +355,7 @@ static void check_available_ipt_ranges(nyx_interface_state* s){
|
||||
|
||||
int kvm_fd = qemu_open("/dev/kvm", O_RDWR);
|
||||
if (kvm_fd == -1) {
|
||||
fprintf(stderr, "[QEMU-Nyx] Error: could not access KVM kernel module: %m\n");
|
||||
nyx_error("Error: could not access KVM kernel module: %m\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@ -381,7 +381,6 @@ static bool verify_sharedir_state(nyx_interface_state *s, Error **errp){
|
||||
char* sharedir = s->sharedir;
|
||||
|
||||
if (!folder_exits(sharedir)){
|
||||
QEMU_PT_PRINTF(INTERFACE_PREFIX, "%s does not exist...", sharedir);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -397,7 +396,7 @@ static void nyx_realize(DeviceState *dev, Error **errp){
|
||||
|
||||
|
||||
if(s->worker_id == 0xFFFF){
|
||||
fprintf(stderr, "[QEMU-Nyx] Error: Invalid worker id...\n");
|
||||
nyx_error("Error: Invalid worker id...\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@ -407,12 +406,12 @@ static void nyx_realize(DeviceState *dev, Error **errp){
|
||||
GET_GLOBAL_STATE()->worker_id = s->worker_id;
|
||||
|
||||
if (!s->workdir || !verify_workdir_state(s, errp)){
|
||||
fprintf(stderr, "[QEMU-Nyx] Error: work dir...\n");
|
||||
nyx_error("Error: work dir...\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (!s->sharedir || !verify_sharedir_state(s, errp)){
|
||||
fprintf(stderr, "Invalid sharedir...\n");
|
||||
nyx_error("Warning: Invalid sharedir...\n");
|
||||
//abort();
|
||||
}
|
||||
else{
|
||||
|
@ -198,14 +198,14 @@ static void write_address(uint64_t address, uint64_t size, uint64_t prot){
|
||||
/* do not print guard pages or empty pages without any permissions */
|
||||
if(last_address && (CHECK_BIT(last_prot, 1) || !CHECK_BIT(last_prot, 63))){
|
||||
if(CHECK_BIT(last_prot, 1) && !CHECK_BIT(last_prot, 63)){
|
||||
QEMU_PT_PRINTF(NESTED_VM_PREFIX, "%016lx - %016lx %c%c%c [WARNING]",
|
||||
nyx_debug_p(NESTED_VM_PREFIX, "%016lx - %016lx %c%c%c [WARNING]",
|
||||
last_address, next_address,
|
||||
CHECK_BIT(last_prot, 1) ? 'W' : '-',
|
||||
CHECK_BIT(last_prot, 2) ? 'U' : 'K',
|
||||
!CHECK_BIT(last_prot, 63)? 'X' : '-');
|
||||
}
|
||||
else{
|
||||
QEMU_PT_PRINTF(NESTED_VM_PREFIX, "%016lx - %016lx %c%c%c",
|
||||
nyx_debug_p(NESTED_VM_PREFIX, "%016lx - %016lx %c%c%c",
|
||||
last_address, next_address,
|
||||
CHECK_BIT(last_prot, 1) ? 'W' : '-',
|
||||
CHECK_BIT(last_prot, 2) ? 'U' : 'K',
|
||||
@ -359,10 +359,10 @@ void kvm_nested_get_info(CPUState *cpu){
|
||||
kvm_vcpu_ioctl(cpu, KVM_GET_NESTED_STATE, env->nested_state);
|
||||
|
||||
struct vmcs12* saved_vmcs = (struct vmcs12*)&(env->nested_state->data);
|
||||
QEMU_PT_PRINTF(NESTED_VM_PREFIX, "VMCS host_cr3:\t%lx", saved_vmcs->host_cr3);
|
||||
QEMU_PT_PRINTF(NESTED_VM_PREFIX, "VMCS host_cr4:\t%lx", saved_vmcs->host_cr4);
|
||||
QEMU_PT_PRINTF(NESTED_VM_PREFIX, "VMCS host_ia32_efer:\t%lx", saved_vmcs->host_ia32_efer);
|
||||
QEMU_PT_PRINTF(NESTED_VM_PREFIX, "VMCS host_cr0:\t%lx", saved_vmcs->host_cr0);
|
||||
nyx_debug_p(NESTED_VM_PREFIX, "VMCS host_cr3:\t%lx", saved_vmcs->host_cr3);
|
||||
nyx_debug_p(NESTED_VM_PREFIX, "VMCS host_cr4:\t%lx", saved_vmcs->host_cr4);
|
||||
nyx_debug_p(NESTED_VM_PREFIX, "VMCS host_ia32_efer:\t%lx", saved_vmcs->host_ia32_efer);
|
||||
nyx_debug_p(NESTED_VM_PREFIX, "VMCS host_cr0:\t%lx", saved_vmcs->host_cr0);
|
||||
|
||||
return;
|
||||
|
||||
@ -383,28 +383,28 @@ void kvm_nested_get_info(CPUState *cpu){
|
||||
if (saved_vmcs->host_cr4 & CR4_PAE_MASK) {
|
||||
if (saved_vmcs->host_ia32_efer & (1 << 10)) {
|
||||
if (saved_vmcs->host_cr0 & CR4_LA57_MASK) {
|
||||
QEMU_PT_PRINTF(NESTED_VM_PREFIX, "mem_info_la57");
|
||||
nyx_debug_p(NESTED_VM_PREFIX, "mem_info_la57");
|
||||
abort();
|
||||
//mem_info_la57(mon, env);
|
||||
} else {
|
||||
QEMU_PT_PRINTF(NESTED_VM_PREFIX, " ==== L1 Page Tables ====");
|
||||
nyx_debug_p(NESTED_VM_PREFIX, " ==== L1 Page Tables ====");
|
||||
print_48_paging(saved_vmcs->host_cr3);
|
||||
|
||||
if(saved_vmcs->ept_pointer){
|
||||
QEMU_PT_PRINTF(NESTED_VM_PREFIX, " ==== L2 Page Tables ====");
|
||||
nyx_debug_p(NESTED_VM_PREFIX, " ==== L2 Page Tables ====");
|
||||
print_48_paging(saved_vmcs->ept_pointer);
|
||||
}
|
||||
//mem_info_la48(mon, env);
|
||||
}
|
||||
}
|
||||
else{
|
||||
QEMU_PT_PRINTF(NESTED_VM_PREFIX, "mem_info_pae32");
|
||||
nyx_debug_p(NESTED_VM_PREFIX, "mem_info_pae32");
|
||||
abort();
|
||||
//mem_info_pae32(mon, env);
|
||||
}
|
||||
}
|
||||
else {
|
||||
QEMU_PT_PRINTF(NESTED_VM_PREFIX, "mem_info_32");
|
||||
nyx_debug_p(NESTED_VM_PREFIX, "mem_info_32");
|
||||
abort();
|
||||
//mem_info_32(mon, env);
|
||||
}
|
||||
|
@ -209,17 +209,17 @@ bool remap_slot(uint64_t addr, uint32_t slot, CPUState *cpu, int fd, uint64_t sh
|
||||
|
||||
//printf("phys_addr -> %lx\n", phys_addr);
|
||||
|
||||
debug_fprintf(stderr, "%s: addr => %lx phys_addr => %lx\n", __func__, addr, phys_addr);
|
||||
nyx_debug("%s: addr => %lx phys_addr => %lx\n", __func__, addr, phys_addr);
|
||||
|
||||
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
||||
if(!memcmp(block->idstr, "pc.ram", 6)){
|
||||
/* TODO: put assert calls here */
|
||||
if (munmap((void*)(((uint64_t)block->host) + phys_addr_ram_offset), x86_64_PAGE_SIZE) == -1) {
|
||||
fprintf(stderr, "%s: munmap failed!\n", __func__);
|
||||
nyx_error("%s: munmap failed!\n", __func__);
|
||||
assert(false);
|
||||
}
|
||||
if (mmap((void*)(((uint64_t)block->host) + phys_addr_ram_offset), 0x1000, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, (i*x86_64_PAGE_SIZE)) == MAP_FAILED) {
|
||||
fprintf(stderr, "%s: mmap failed!\n", __func__);
|
||||
nyx_error("%s: mmap failed!\n", __func__);
|
||||
assert(false);
|
||||
}
|
||||
|
||||
@ -307,14 +307,14 @@ bool remap_payload_buffer(uint64_t virt_guest_addr, CPUState *cpu){
|
||||
if(!memcmp(block->idstr, "pc.ram", 6)){
|
||||
//printf("MMUNMAP: %d\n", munmap((void*)(((uint64_t)block->host) + phys_addr), x86_64_PAGE_SIZE));
|
||||
if(munmap((void*)(((uint64_t)block->host) + phys_addr_ram_offset), x86_64_PAGE_SIZE) == -1){
|
||||
fprintf(stderr, "munmap failed!\n");
|
||||
nyx_error("munmap failed!\n");
|
||||
//exit(1);
|
||||
assert(false);
|
||||
}
|
||||
//printf("MMAP: %lx\n", mmap((void*)(((uint64_t)block->host) + phys_addr), 0x1000, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, shared_payload_buffer_fd, (i*x86_64_PAGE_SIZE)));
|
||||
|
||||
if(mmap((void*)(((uint64_t)block->host) + phys_addr_ram_offset), 0x1000, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, GET_GLOBAL_STATE()->shared_payload_buffer_fd, (i*x86_64_PAGE_SIZE)) == MAP_FAILED){
|
||||
fprintf(stderr, "mmap failed!\n");
|
||||
nyx_error("mmap failed!\n");
|
||||
//exit(1);
|
||||
assert(false);
|
||||
}
|
||||
@ -356,14 +356,14 @@ bool write_virtual_memory(uint64_t address, uint8_t* data, uint32_t size, CPUSta
|
||||
phys_addr = cpu_get_phys_page_attrs_debug(cpu, (address & x86_64_PAGE_MASK), &attrs);
|
||||
|
||||
if (phys_addr == INVALID_ADDRESS){
|
||||
QEMU_PT_PRINTF(MEM_PREFIX, "phys_addr == -1:\t%lx", address);
|
||||
nyx_debug_p(MEM_PREFIX, "phys_addr == -1:\t%lx", address);
|
||||
return false;
|
||||
}
|
||||
|
||||
phys_addr += (address & ~x86_64_PAGE_MASK);
|
||||
res = address_space_rw(cpu_get_address_space(cpu, asidx), phys_addr, MEMTXATTRS_UNSPECIFIED, data, l, true);
|
||||
if (res != MEMTX_OK){
|
||||
QEMU_PT_PRINTF(MEM_PREFIX, "!MEMTX_OK:\t%lx", address);
|
||||
nyx_debug_p(MEM_PREFIX, "!MEMTX_OK:\t%lx", address);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -830,7 +830,7 @@ bool read_virtual_memory(uint64_t address, uint8_t* data, uint32_t size, CPUStat
|
||||
phys_addr_2 = cpu_get_phys_page_attrs_debug(cpu, (address & x86_64_PAGE_MASK), &attrs);
|
||||
#endif
|
||||
phys_addr = (hwaddr)get_paging_phys_addr(cpu, env->cr[3], address) & 0xFFFFFFFFFFFFF000ULL;// != 0xFFFFFFFFFFFFFFFFULL)
|
||||
//QEMU_PT_PRINTF(MEM_PREFIX, "TRANSLATE: %lx -> %lx == %lx", address, phys_addr, phys_addr_2);
|
||||
//nyx_debug_p(MEM_PREFIX, "TRANSLATE: %lx -> %lx == %lx", address, phys_addr, phys_addr_2);
|
||||
|
||||
#ifdef DEBUG_48BIT_WALK
|
||||
assert(phys_addr == phys_addr_2);
|
||||
@ -843,8 +843,8 @@ bool read_virtual_memory(uint64_t address, uint8_t* data, uint32_t size, CPUStat
|
||||
len_skipped = size-amount_copied;
|
||||
}
|
||||
|
||||
fprintf(stderr, "Warning, read from unmapped memory:\t%lx, skipping to %lx", address, next_page);
|
||||
QEMU_PT_PRINTF(MEM_PREFIX, "Warning, read from unmapped memory:\t%lx, skipping to %lx", address, next_page);
|
||||
nyx_error("Warning, read from unmapped memory:\t%lx, skipping to %lx", address, next_page);
|
||||
nyx_debug_p(MEM_PREFIX, "Warning, read from unmapped memory:\t%lx, skipping to %lx", address, next_page);
|
||||
memset( data+amount_copied, ' ', len_skipped);
|
||||
address += len_skipped;
|
||||
amount_copied += len_skipped;
|
||||
@ -859,7 +859,7 @@ bool read_virtual_memory(uint64_t address, uint8_t* data, uint32_t size, CPUStat
|
||||
|
||||
MemTxResult txt = address_space_rw(cpu_get_address_space(cpu, asidx), phys_addr, MEMTXATTRS_UNSPECIFIED, tmp_buf, len_to_copy, 0);
|
||||
if(txt){
|
||||
QEMU_PT_PRINTF(MEM_PREFIX, "Warning, read failed:\t%lx (%lx)", address, phys_addr);
|
||||
nyx_debug_p(MEM_PREFIX, "Warning, read failed:\t%lx (%lx)", address, phys_addr);
|
||||
}
|
||||
|
||||
memcpy(data+amount_copied, tmp_buf, len_to_copy);
|
||||
@ -902,7 +902,7 @@ bool dump_page_cr3_ht(uint64_t address, uint8_t* data, CPUState *cpu, uint64_t c
|
||||
int asidx = cpu_asidx_from_attrs(cpu, MEMTXATTRS_UNSPECIFIED);
|
||||
if(phys_addr == INVALID_ADDRESS || address_space_rw(cpu_get_address_space(cpu, asidx), phys_addr, MEMTXATTRS_UNSPECIFIED, data, 0x1000, 0)){
|
||||
if(phys_addr != INVALID_ADDRESS){
|
||||
fprintf(stderr, "%s: Warning, read failed:\t%lx (%lx)\n", __func__, address, phys_addr);
|
||||
nyx_error("%s: Warning, read failed:\t%lx (%lx)\n", __func__, address, phys_addr);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -916,7 +916,7 @@ bool dump_page_ht(uint64_t address, uint8_t* data, CPUState *cpu){
|
||||
int asidx = cpu_asidx_from_attrs(cpu, MEMTXATTRS_UNSPECIFIED);
|
||||
if(phys_addr == 0xffffffffffffffffULL || address_space_rw(cpu_get_address_space(cpu, asidx), phys_addr, MEMTXATTRS_UNSPECIFIED, data, 0x1000, 0)){
|
||||
if(phys_addr != 0xffffffffffffffffULL){
|
||||
fprintf(stderr, "%s: Warning, read failed:\t%lx (%lx)\n", __func__, address, phys_addr);
|
||||
nyx_error("%s: Warning, read failed:\t%lx (%lx)\n", __func__, address, phys_addr);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
@ -943,11 +943,11 @@ uint64_t disassemble_at_rip(int fd, uint64_t address, CPUState *cpu, uint64_t cr
|
||||
int count = cs_disasm(handle, code_ptr, code_size, address, 5, &insn);
|
||||
if(count > 0){
|
||||
for(int i = 0; i < count; i++){
|
||||
fprintf(stderr, "=> 0x%"PRIx64":\t%s\t\t%s\n", insn[i].address, insn[i].mnemonic, insn[i].op_str);
|
||||
nyx_error("=> 0x%"PRIx64":\t%s\t\t%s\n", insn[i].address, insn[i].mnemonic, insn[i].op_str);
|
||||
}
|
||||
}
|
||||
else{
|
||||
fprintf(stderr, "ERROR in %s at %lx (cr3: %lx)\n", __func__, address, cr3);
|
||||
nyx_error("ERROR in %s at %lx (cr3: %lx)\n", __func__, address, cr3);
|
||||
}
|
||||
|
||||
|
||||
|
@ -87,7 +87,7 @@ void handle_hypercall_kafl_nested_prepare(struct kvm_run *run, CPUState *cpu, ui
|
||||
kvm_arch_get_registers(cpu);
|
||||
|
||||
if((uint64_t)run->hypercall.args[0]){
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "handle_hypercall_kafl_nested_prepare:\t NUM:\t%lx\t ADDRESS:\t%lx\t CR3:\t%lx", (uint64_t)run->hypercall.args[0], (uint64_t)run->hypercall.args[1], (uint64_t)run->hypercall.args[2]);
|
||||
nyx_debug_p(CORE_PREFIX, "handle_hypercall_kafl_nested_prepare:\t NUM:\t%lx\t ADDRESS:\t%lx\t CR3:\t%lx", (uint64_t)run->hypercall.args[0], (uint64_t)run->hypercall.args[1], (uint64_t)run->hypercall.args[2]);
|
||||
}
|
||||
else{
|
||||
abort();
|
||||
@ -103,7 +103,7 @@ void handle_hypercall_kafl_nested_prepare(struct kvm_run *run, CPUState *cpu, ui
|
||||
if(i == 0){
|
||||
htos_config = buffer[i];
|
||||
}
|
||||
QEMU_PT_PRINTF(CORE_PREFIX, "ADDRESS: %lx", buffer[i]);
|
||||
nyx_debug_p(CORE_PREFIX, "ADDRESS: %lx", buffer[i]);
|
||||
remap_payload_slot(buffer[i], i, cpu);
|
||||
}
|
||||
|
||||
@ -178,7 +178,7 @@ void handle_hypercall_kafl_nested_release(struct kvm_run *run, CPUState *cpu, ui
|
||||
|
||||
return;
|
||||
//assert(false);
|
||||
QEMU_PT_PRINTF_DEBUG("%s %d", __func__, init_state);
|
||||
nyx_debug_p("%s %d", __func__, init_state);
|
||||
//sleep(10);
|
||||
|
||||
/* magic */
|
||||
@ -193,7 +193,7 @@ void handle_hypercall_kafl_nested_release(struct kvm_run *run, CPUState *cpu, ui
|
||||
|
||||
//synchronization_disable_pt(cpu);
|
||||
|
||||
QEMU_PT_PRINTF_DEBUG("Protocol - SEND: KAFL_PROTO_RELEASE");
|
||||
nyx_debug_p("Protocol - SEND: KAFL_PROTO_RELEASE");
|
||||
|
||||
} else {
|
||||
|
||||
@ -206,7 +206,7 @@ void handle_hypercall_kafl_nested_release(struct kvm_run *run, CPUState *cpu, ui
|
||||
//synchronization_disable_pt(cpu);
|
||||
|
||||
|
||||
QEMU_PT_PRINTF_DEBUG("%s UNLOCKED", __func__);
|
||||
nyx_debug_p("%s UNLOCKED", __func__);
|
||||
|
||||
// printf("INTEL PT is disabled!\n");
|
||||
|
||||
@ -217,7 +217,7 @@ void handle_hypercall_kafl_nested_release(struct kvm_run *run, CPUState *cpu, ui
|
||||
//fast_reload_restore(get_fast_reload_snapshot());
|
||||
qemu_mutex_unlock_iothread();
|
||||
|
||||
QEMU_PT_PRINTF_DEBUG("%s UNLOCKED 2", __func__);
|
||||
nyx_debug_p("%s UNLOCKED 2", __func__);
|
||||
|
||||
|
||||
//kvm_cpu_synchronize_state(cpu);
|
||||
|
@ -306,7 +306,7 @@ uint64_t page_cache_fetch(page_cache_t* self, uint64_t page, bool* success, bool
|
||||
return self->last_addr;
|
||||
}
|
||||
|
||||
//QEMU_PT_PRINTF(PAGE_CACHE_PREFIX, "page_cache_fetch %lx", page);
|
||||
//nyx_debug_p(PAGE_CACHE_PREFIX, "page_cache_fetch %lx", page);
|
||||
|
||||
khiter_t k;
|
||||
k = kh_get(PC_CACHE, self->lookup, page);
|
||||
@ -382,9 +382,9 @@ page_cache_t* page_cache_new(const char* cache_file, uint8_t disassembler_word_w
|
||||
self->last_addr = 0xFFFFFFFFFFFFFFFF;
|
||||
|
||||
#ifndef STANDALONE_DECODER
|
||||
QEMU_PT_PRINTF(PAGE_CACHE_PREFIX, "%s (%s - %s)", __func__, tmp1, tmp2);
|
||||
nyx_debug_p(PAGE_CACHE_PREFIX, "%s (%s - %s)", __func__, tmp1, tmp2);
|
||||
#else
|
||||
QEMU_PT_PRINTF(PAGE_CACHE_PREFIX, "%s (%s - %s) WORD_WIDTH: %d", __func__, tmp1, tmp2, disassembler_word_width);
|
||||
nyx_debug_p(PAGE_CACHE_PREFIX, "%s (%s - %s) WORD_WIDTH: %d", __func__, tmp1, tmp2, disassembler_word_width);
|
||||
#endif
|
||||
|
||||
free(tmp3);
|
||||
|
@ -72,7 +72,7 @@ void patcher_set_addrs(patcher_t *self, uint64_t* addrs, size_t num_addrs){
|
||||
memset(&curr_instruction_code[0], 0, MAX_INSTRUCTION_SIZE);
|
||||
|
||||
for(size_t i=0; i < self->num_patches; i++){
|
||||
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "patching %lx", addrs[i]);
|
||||
//nyx_debug_p(REDQUEEN_PREFIX, "patching %lx", addrs[i]);
|
||||
if( read_virtual_memory(addrs[i], &curr_instruction_code[0], MAX_INSTRUCTION_SIZE, self->cpu) ) {
|
||||
size_t size =_patcher_disassemble_size(self, &curr_instruction_code[0], addrs[i], X86_INS_CMP);
|
||||
assert(size != 0); //csopen failed, shouldn't happen
|
||||
@ -103,12 +103,12 @@ bool patcher_validate_patches(patcher_t *self){
|
||||
should_value = &self->patches[i].orig_bytes[0];
|
||||
}
|
||||
|
||||
QEMU_PT_PRINTF(REDQUEEN_PREFIX, "Validating, mem:");
|
||||
nyx_debug_p(REDQUEEN_PREFIX, "Validating, mem:");
|
||||
print_hexdump(&buf[0], self->patches[i].size);
|
||||
QEMU_PT_PRINTF(REDQUEEN_PREFIX, "should_be:");
|
||||
nyx_debug_p(REDQUEEN_PREFIX, "should_be:");
|
||||
print_hexdump(should_value, self->patches[i].size);
|
||||
if(0 != memcmp(&buf[0], should_value, self->patches[i].size)){
|
||||
QEMU_PT_PRINTF(REDQUEEN_PREFIX, "validating patches failed self->is_currently_applied = %d", self->is_currently_applied);
|
||||
nyx_debug_p(REDQUEEN_PREFIX, "validating patches failed self->is_currently_applied = %d", self->is_currently_applied);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
20
nyx/pt.c
20
nyx/pt.c
@ -56,7 +56,7 @@ static void pt_set(CPUState *cpu, run_on_cpu_data arg){
|
||||
static inline int pt_cmd_hmp_context(CPUState *cpu, uint64_t cmd){
|
||||
cpu->pt_ret = -1;
|
||||
if(pt_hypercalls_enabled()){
|
||||
QEMU_PT_PRINTF(PT_PREFIX, "Error: HMP commands are ignored if kafl tracing mode is enabled (-kafl)!");
|
||||
nyx_debug_p(PT_PREFIX, "Error: HMP commands are ignored if kafl tracing mode is enabled (-kafl)!");
|
||||
}
|
||||
else{
|
||||
cpu->pt_cmd = cmd;
|
||||
@ -170,7 +170,7 @@ int pt_set_cr3(CPUState *cpu, uint64_t val, bool hmp_mode){
|
||||
return -EINVAL;
|
||||
}
|
||||
if (GET_GLOBAL_STATE()->pt_c3_filter && GET_GLOBAL_STATE()->pt_c3_filter != val){
|
||||
//QEMU_PT_PRINTF(PT_PREFIX, "Reconfigure CR3-Filtering!");
|
||||
//nyx_debug_p(PT_PREFIX, "Reconfigure CR3-Filtering!");
|
||||
GET_GLOBAL_STATE()->pt_c3_filter = val;
|
||||
r += pt_cmd(cpu, KVM_VMX_PT_CONFIGURE_CR3, hmp_mode);
|
||||
r += pt_cmd(cpu, KVM_VMX_PT_ENABLE_CR3, hmp_mode);
|
||||
@ -194,7 +194,7 @@ int pt_enable_ip_filtering(CPUState *cpu, uint8_t addrn, bool redqueen, bool hmp
|
||||
}
|
||||
|
||||
if(GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] > GET_GLOBAL_STATE()->pt_ip_filter_b[addrn]){
|
||||
QEMU_PT_PRINTF(PT_PREFIX, "Error (ip_a > ip_b) 0x%lx-0x%lx", GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] , GET_GLOBAL_STATE()->pt_ip_filter_b[addrn]);
|
||||
nyx_debug_p(PT_PREFIX, "Error (ip_a > ip_b) 0x%lx-0x%lx", GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] , GET_GLOBAL_STATE()->pt_ip_filter_b[addrn]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -202,7 +202,7 @@ int pt_enable_ip_filtering(CPUState *cpu, uint8_t addrn, bool redqueen, bool hmp
|
||||
pt_disable_ip_filtering(cpu, addrn, hmp_mode);
|
||||
}
|
||||
|
||||
QEMU_PT_PRINTF(PT_PREFIX, "Configuring new trace region (addr%d, 0x%lx-0x%lx)", addrn, GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] , GET_GLOBAL_STATE()->pt_ip_filter_b[addrn]);
|
||||
nyx_debug_p(PT_PREFIX, "Configuring new trace region (addr%d, 0x%lx-0x%lx)", addrn, GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] , GET_GLOBAL_STATE()->pt_ip_filter_b[addrn]);
|
||||
|
||||
if(GET_GLOBAL_STATE()->pt_ip_filter_configured[addrn] && GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] != 0 && GET_GLOBAL_STATE()->pt_ip_filter_b[addrn] != 0){
|
||||
r += pt_cmd(cpu, KVM_VMX_PT_CONFIGURE_ADDR0+addrn, hmp_mode);
|
||||
@ -283,13 +283,13 @@ void pt_pre_kvm_run(CPUState *cpu){
|
||||
struct vmx_pt_filter_iprs filter_iprs;
|
||||
|
||||
if(GET_GLOBAL_STATE()->patches_disable_pending){
|
||||
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "patches disable");
|
||||
//nyx_debug_p(REDQUEEN_PREFIX, "patches disable");
|
||||
assert(false); /* remove this branch */
|
||||
GET_GLOBAL_STATE()->patches_disable_pending = false;
|
||||
}
|
||||
|
||||
if(GET_GLOBAL_STATE()->patches_enable_pending){
|
||||
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "patches enable");
|
||||
//nyx_debug_p(REDQUEEN_PREFIX, "patches enable");
|
||||
assert(false); /* remove this branch */
|
||||
GET_GLOBAL_STATE()->patches_enable_pending = false;
|
||||
}
|
||||
@ -297,7 +297,7 @@ void pt_pre_kvm_run(CPUState *cpu){
|
||||
|
||||
//if(cpu->redqueen_enable_pending){
|
||||
if(GET_GLOBAL_STATE()->redqueen_enable_pending){
|
||||
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "rq enable");
|
||||
//nyx_debug_p(REDQUEEN_PREFIX, "rq enable");
|
||||
if (GET_GLOBAL_STATE()->redqueen_state){
|
||||
enable_rq_intercept_mode(GET_GLOBAL_STATE()->redqueen_state);
|
||||
}
|
||||
@ -308,7 +308,7 @@ void pt_pre_kvm_run(CPUState *cpu){
|
||||
|
||||
//if(cpu->redqueen_disable_pending){
|
||||
if(GET_GLOBAL_STATE()->redqueen_disable_pending){
|
||||
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "rq disable");
|
||||
//nyx_debug_p(REDQUEEN_PREFIX, "rq disable");
|
||||
if (GET_GLOBAL_STATE()->redqueen_state){
|
||||
disable_rq_intercept_mode(GET_GLOBAL_STATE()->redqueen_state);
|
||||
}
|
||||
@ -326,7 +326,7 @@ void pt_pre_kvm_run(CPUState *cpu){
|
||||
assert(cpu->pt_mmap != (void*)0xFFFFFFFFFFFFFFFF);
|
||||
assert(mmap(cpu->pt_mmap+ret, 0x1000, PROT_READ|PROT_WRITE, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, -1, 0) == (void*)(cpu->pt_mmap+ret)); //;!= (void*)0xFFFFFFFFFFFFFFFF); // add an extra page to have enough space for an additional PT_TRACE_END byte
|
||||
|
||||
debug_printf("\t\t============> pt_mmap:%p - %p\n", cpu->pt_mmap, cpu->pt_mmap+ret);
|
||||
nyx_debug("\t\t============> pt_mmap:%p - %p\n", cpu->pt_mmap, cpu->pt_mmap+ret);
|
||||
|
||||
memset(cpu->pt_mmap+ret, 0x55, 0x1000);
|
||||
}
|
||||
@ -351,7 +351,7 @@ void pt_pre_kvm_run(CPUState *cpu){
|
||||
if (cpu->pt_fd){
|
||||
ret = ioctl(cpu->pt_fd, cpu->pt_cmd, 0);
|
||||
if (ret > 0){
|
||||
//QEMU_PT_PRINTF(PT_PREFIX, "KVM_VMX_PT_DISABLE %d", ret);
|
||||
//nyx_debug_p(PT_PREFIX, "KVM_VMX_PT_DISABLE %d", ret);
|
||||
pt_dump(cpu, ret);
|
||||
cpu->pt_enabled = false;
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ static bool is_interessting_lea_at(redqueen_t* self, cs_insn *ins){
|
||||
if(res){
|
||||
x86_reg reg = op2->mem.index;
|
||||
if(reg == X86_REG_EIP || reg == X86_REG_RIP || reg == X86_REG_EBP || reg == X86_REG_RBP){
|
||||
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "got boring index");
|
||||
//nyx_debug_p(REDQUEEN_PREFIX, "got boring index");
|
||||
res = false;
|
||||
} //don't instrument local stack offset computations
|
||||
}
|
||||
@ -180,26 +180,26 @@ static void opcode_analyzer(redqueen_t* self, cs_insn *ins){
|
||||
//printf("INS %lx\n", ins->address);
|
||||
if(ins->id == X86_INS_CMP){
|
||||
set_rq_instruction(self, ins->address);
|
||||
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "hooking cmp %lx %s %s", ins->address, ins->mnemonic, ins->op_str);
|
||||
//nyx_debug_p(REDQUEEN_PREFIX, "hooking cmp %lx %s %s", ins->address, ins->mnemonic, ins->op_str);
|
||||
}
|
||||
if(ins->id == X86_INS_LEA && is_interessting_lea_at(self, ins)){
|
||||
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "hooking lea %lx", ins->address);
|
||||
//nyx_debug_p(REDQUEEN_PREFIX, "hooking lea %lx", ins->address);
|
||||
set_rq_instruction(self, ins->address);
|
||||
}
|
||||
if(ins->id == X86_INS_SUB && is_interessting_sub_at(self, ins)){
|
||||
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "hooking sub %lx", ins->address);
|
||||
//nyx_debug_p(REDQUEEN_PREFIX, "hooking sub %lx", ins->address);
|
||||
set_rq_instruction(self, ins->address);
|
||||
}
|
||||
if(ins->id == X86_INS_ADD && is_interessting_add_at(self, ins)){
|
||||
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "hooking add %lx", ins->address);
|
||||
//nyx_debug_p(REDQUEEN_PREFIX, "hooking add %lx", ins->address);
|
||||
set_rq_instruction(self, ins->address);
|
||||
}
|
||||
if(ins->id == X86_INS_XOR && is_interessting_xor_at(self, ins)){
|
||||
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "hooking xor %lx %s %s", ins->address, ins->mnemonic, ins->op_str);
|
||||
//nyx_debug_p(REDQUEEN_PREFIX, "hooking xor %lx %s %s", ins->address, ins->mnemonic, ins->op_str);
|
||||
set_rq_instruction(self, ins->address);
|
||||
}
|
||||
if(ins->id ==X86_INS_CALL || ins->id == X86_INS_LCALL){
|
||||
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "hooking call %lx %s %s", ins->address, ins->mnemonic, ins->op_str);
|
||||
//nyx_debug_p(REDQUEEN_PREFIX, "hooking call %lx %s %s", ins->address, ins->mnemonic, ins->op_str);
|
||||
set_rq_instruction(self, ins->address);
|
||||
}
|
||||
}
|
||||
@ -327,7 +327,7 @@ static void insert_hooks_bitmap(redqueen_t* self){
|
||||
void redqueen_insert_hooks(redqueen_t* self){
|
||||
// fprintf(stderr, "%s %x\n", __func__, self->cpu->redqueen_instrumentation_mode);
|
||||
|
||||
QEMU_PT_PRINTF(REDQUEEN_PREFIX, "insert hooks");
|
||||
nyx_debug_p(REDQUEEN_PREFIX, "insert hooks");
|
||||
assert(!self->hooks_applied);
|
||||
//switch(self->cpu->redqueen_instrumentation_mode){
|
||||
switch(GET_GLOBAL_STATE()->redqueen_instrumentation_mode){
|
||||
@ -346,7 +346,7 @@ void redqueen_insert_hooks(redqueen_t* self){
|
||||
}
|
||||
|
||||
void redqueen_remove_hooks(redqueen_t* self){
|
||||
QEMU_PT_PRINTF(REDQUEEN_PREFIX, "remove hooks");
|
||||
nyx_debug_p(REDQUEEN_PREFIX, "remove hooks");
|
||||
// fprintf(stderr, "remove hooks\n");
|
||||
assert(self->hooks_applied);
|
||||
remove_all_breakpoints(self->cpu);
|
||||
@ -492,7 +492,7 @@ static uint64_t eval_mem(cs_x86_op* op){
|
||||
|
||||
uint64_t val = 0;
|
||||
assert(op->size == 1 || op->size == 2 || op->size == 4 || op->size == 8);
|
||||
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "EVAL MEM FOR OP:");
|
||||
//nyx_debug_p(REDQUEEN_PREFIX, "EVAL MEM FOR OP:");
|
||||
|
||||
/* TODO @ sergej: replace me later */
|
||||
read_virtual_memory(eval_addr(op), (uint8_t*) &val, op->size, qemu_get_cpu(0));
|
||||
@ -526,7 +526,7 @@ static void print_comp_result(uint64_t addr, const char* type, uint64_t val1, ui
|
||||
const char *format = NULL;
|
||||
uint8_t pos = 0;
|
||||
pos += snprintf(result_buf+pos, 256-pos, "%lx\t\t %s", addr, type);
|
||||
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "got size: %ld", size);
|
||||
//nyx_debug_p(REDQUEEN_PREFIX, "got size: %ld", size);
|
||||
uint64_t mask = 0;
|
||||
switch(size){
|
||||
case 64: format = " 64\t%016lX-%016lX"; mask = 0xffffffffffffffff; break;
|
||||
@ -697,7 +697,7 @@ static bool test_strcmp(uint64_t arg1, uint64_t arg2){
|
||||
if(!is_addr_mapped(arg1, cpu) || ! is_addr_mapped(arg2, cpu)){
|
||||
return false;
|
||||
}
|
||||
//QEMU_PT_PRINTF(REDQUEEN_PREFIX,"valid ptrs");
|
||||
//nyx_debug_p(REDQUEEN_PREFIX,"valid ptrs");
|
||||
uint8_t buf1[REDQUEEN_MAX_STRCMP_LEN];
|
||||
uint8_t buf2[REDQUEEN_MAX_STRCMP_LEN];
|
||||
/* todo @ sergej */
|
||||
@ -710,7 +710,7 @@ static bool test_strcmp(uint64_t arg1, uint64_t arg2){
|
||||
static bool test_strcmp_cdecl(void){
|
||||
uint64_t arg1 = read_stack(0);
|
||||
uint64_t arg2 = read_stack(1);
|
||||
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "extract call params cdecl %lx %lx", arg1, arg2);
|
||||
//nyx_debug_p(REDQUEEN_PREFIX, "extract call params cdecl %lx %lx", arg1, arg2);
|
||||
test_strchr(arg1, arg2);
|
||||
return test_strcmp(arg1, arg2) ;
|
||||
|
||||
@ -720,7 +720,7 @@ static bool test_strcmp_fastcall(void){
|
||||
CPUX86State *env = &(X86_CPU(qemu_get_cpu(0)))->env;
|
||||
uint64_t arg1 = env->regs[RCX]; //rcx
|
||||
uint64_t arg2 = env->regs[RDX]; //rdx
|
||||
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "extract call params fastcall %lx %lx", arg1, arg2);
|
||||
//nyx_debug_p(REDQUEEN_PREFIX, "extract call params fastcall %lx %lx", arg1, arg2);
|
||||
test_strchr(arg1, arg2);
|
||||
return test_strcmp(arg1, arg2);
|
||||
}
|
||||
@ -730,13 +730,13 @@ static bool test_strcmp_sys_v(void){
|
||||
CPUX86State *env = &(X86_CPU(qemu_get_cpu(0)))->env;
|
||||
uint64_t arg1 = env->regs[RDI]; //rdx
|
||||
uint64_t arg2 = env->regs[RSI]; //rsi
|
||||
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "extract call params sysv %lx %lx", arg1, arg2);
|
||||
//nyx_debug_p(REDQUEEN_PREFIX, "extract call params sysv %lx %lx", arg1, arg2);
|
||||
test_strchr(arg1, arg2);
|
||||
return test_strcmp(arg1, arg2);
|
||||
}
|
||||
|
||||
static void extract_call_params(void){
|
||||
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "extract call at %lx", ip);
|
||||
//nyx_debug_p(REDQUEEN_PREFIX, "extract call at %lx", ip);
|
||||
test_strcmp_cdecl();
|
||||
test_strcmp_fastcall();
|
||||
test_strcmp_sys_v();
|
||||
@ -832,14 +832,14 @@ static void debug_print_disasm(char* desc, uint64_t ip, CPUState* cpu_state){
|
||||
cs_option(handle, CS_OPT_DETAIL, CS_OPT_ON);
|
||||
size_t count = cs_disasm(handle, &code[0], 64, ip, 1, &insn);
|
||||
if(count > 0){
|
||||
QEMU_PT_PRINTF(REDQUEEN_PREFIX,"%s\t %lx: %s %s",desc, ip, insn->mnemonic, insn->op_str);
|
||||
nyx_debug_p(REDQUEEN_PREFIX,"%s\t %lx: %s %s",desc, ip, insn->mnemonic, insn->op_str);
|
||||
} else {
|
||||
QEMU_PT_PRINTF(REDQUEEN_PREFIX,"%s\t Failed to disassemble at: %lx",desc, ip);
|
||||
nyx_debug_p(REDQUEEN_PREFIX,"%s\t Failed to disassemble at: %lx",desc, ip);
|
||||
}
|
||||
cs_close(&handle);
|
||||
cs_free(insn, count);
|
||||
} else {
|
||||
QEMU_PT_PRINTF(REDQUEEN_PREFIX,"%s\t Failed to create capstone instance at: %lx",desc, ip);
|
||||
nyx_debug_p(REDQUEEN_PREFIX,"%s\t Failed to create capstone instance at: %lx",desc, ip);
|
||||
}
|
||||
}
|
||||
*/
|
||||
@ -849,7 +849,7 @@ static void debug_print_state(char* desc, CPUState* cpu_state){
|
||||
X86CPU *cpu = X86_CPU(cpu_state);
|
||||
CPUX86State *env = &cpu->env;
|
||||
debug_print_disasm(desc, env->eip, cpu_state);
|
||||
QEMU_PT_PRINTF(REDQUEEN_PREFIX,"ECX: %lx", get_reg_cpu(cpu_state, (char*)"rcx"));
|
||||
nyx_debug_p(REDQUEEN_PREFIX,"ECX: %lx", get_reg_cpu(cpu_state, (char*)"rcx"));
|
||||
}
|
||||
*/
|
||||
|
||||
|
@ -168,7 +168,7 @@ uint64_t sharedir_request_file(sharedir_t* self, const char* file, uint8_t* page
|
||||
}
|
||||
}
|
||||
else{
|
||||
debug_fprintf(stderr, "WARNING: No such file in sharedir: %s\n", file);
|
||||
nyx_error("Warning: No such file in sharedir: %s\n", file);
|
||||
return 0xFFFFFFFFFFFFFFFFUL;
|
||||
}
|
||||
}
|
||||
|
@ -548,7 +548,7 @@ static int cow_cache_write(cow_cache_t* self, BlockBackend *blk, int64_t offset,
|
||||
void switch_to_fuzz_mode(cow_cache_t* self){
|
||||
self->enabled_fuzz = true;
|
||||
assert(!mprotect(self->data_primary, self->cow_primary_size, PROT_READ));
|
||||
debug_printf("[qemu-nyx] switching to secondary CoW buffer\n");
|
||||
nyx_debug("switching to secondary CoW buffer\n");
|
||||
}
|
||||
|
||||
void cow_cache_read_entry(void* opaque){
|
||||
|
@ -41,7 +41,7 @@ nyx_block_t* nyx_block_snapshot_init_from_file(const char* folder, bool pre_snap
|
||||
|
||||
for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
|
||||
if(blk && blk->cow_cache){
|
||||
debug_printf("%p %s\n", blk->cow_cache, blk->cow_cache->filename);
|
||||
nyx_debug("%p %s\n", blk->cow_cache, blk->cow_cache->filename);
|
||||
self->cow_cache_array_size++;
|
||||
}
|
||||
}
|
||||
@ -50,7 +50,7 @@ nyx_block_t* nyx_block_snapshot_init_from_file(const char* folder, bool pre_snap
|
||||
|
||||
assert(fread(&temp_cow_cache_array_size, sizeof(uint32_t), 1, f) == 1);
|
||||
|
||||
debug_printf("%d vs %x\n", temp_cow_cache_array_size, self->cow_cache_array_size);
|
||||
nyx_debug("%d vs %x\n", temp_cow_cache_array_size, self->cow_cache_array_size);
|
||||
assert(self->cow_cache_array_size == temp_cow_cache_array_size);
|
||||
|
||||
self->cow_cache_array = (cow_cache_t**)malloc(sizeof(cow_cache_t*)*self->cow_cache_array_size);
|
||||
@ -88,7 +88,7 @@ nyx_block_t* nyx_block_snapshot_init(void){
|
||||
BlockBackend *blk;
|
||||
for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
|
||||
if(blk && blk->cow_cache){
|
||||
debug_printf("%p %s\n", blk->cow_cache, blk->cow_cache->filename);
|
||||
nyx_debug("%p %s\n", blk->cow_cache, blk->cow_cache->filename);
|
||||
self->cow_cache_array_size++;
|
||||
}
|
||||
}
|
||||
|
@ -324,7 +324,7 @@ static void add_get(state_reallocation_t* self, void* fptr, void* opaque, size_t
|
||||
uint8_t* data = NULL;
|
||||
|
||||
if(!strcmp(name, "timer")){
|
||||
debug_fprintf(stderr, "SKPPING: %ld\n", size*-1);
|
||||
nyx_debug("SKPPING: %ld\n", size*-1);
|
||||
qemu_file_skip(f, size * -1);
|
||||
handler = fast_timer_get;
|
||||
data = malloc(sizeof(uint64_t));
|
||||
@ -369,7 +369,7 @@ static void add_get(state_reallocation_t* self, void* fptr, void* opaque, size_t
|
||||
self->fast_state_get_fptr_pos++;
|
||||
|
||||
if(self->fast_state_get_fptr_pos >= self->fast_state_get_fptr_size){
|
||||
debug_printf("RESIZE %s\n", __func__);
|
||||
nyx_debug("RESIZE %s\n", __func__);
|
||||
self->fast_state_get_fptr_size += REALLOC_SIZE;
|
||||
self->get_fptr = realloc(self->get_fptr, self->fast_state_get_fptr_size * sizeof(void*));
|
||||
self->get_opaque = realloc(self->get_opaque, self->fast_state_get_fptr_size * sizeof(void*));
|
||||
@ -515,7 +515,7 @@ static inline int get_handler(state_reallocation_t* self, QEMUFile* f, void* cur
|
||||
add_get(self, (void*) field->info->get, curr_elem, size, (void*) field, f, field->info->name);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "fpreg")){
|
||||
debug_fprintf(stderr, "type: %s (size: %lx)\n", field->info->name, size);
|
||||
nyx_debug("type: %s (size: %lx)\n", field->info->name, size);
|
||||
assert(0);
|
||||
add_get(self, (void*) field->info->get, curr_elem, size, (void*) field, f, field->info->name);
|
||||
}
|
||||
@ -693,7 +693,7 @@ static int fdl_vmstate_load_state(state_reallocation_t* self, QEMUFile *f, const
|
||||
//hexDump((void*)field->name, curr_elem, size);
|
||||
#endif
|
||||
|
||||
debug_printf("*** vmstate_info_nullptr.get ***\n");
|
||||
nyx_debug("*** vmstate_info_nullptr.get ***\n");
|
||||
ret = vmstate_info_nullptr.get(f, curr_elem, size, NULL);
|
||||
add_mblock(self, (char*)vmsd->name, (const char*)field->name, field->offset, (uint64_t)(curr_elem), (uint64_t)(size));
|
||||
#ifdef VERBOSE_DEBUG
|
||||
@ -725,12 +725,12 @@ static int fdl_vmstate_load_state(state_reallocation_t* self, QEMUFile *f, const
|
||||
ret = qemu_file_get_error(f);
|
||||
}
|
||||
if (ret < 0) {
|
||||
debug_fprintf(stderr, "RETURNING!\n");
|
||||
nyx_debug("RETURNING!\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
} else if (field->flags & VMS_MUST_EXIST) {
|
||||
debug_printf("Input validation failed: %s/%s", vmsd->name, field->name);
|
||||
nyx_debug("Input validation failed: %s/%s", vmsd->name, field->name);
|
||||
return -1;
|
||||
}
|
||||
else {
|
||||
@ -877,7 +877,7 @@ static int fdl_enumerate_section(state_reallocation_t* self, QEMUFile *f, Migrat
|
||||
ret = fdl_vmstate_load(self, f, se, version_id);
|
||||
}
|
||||
else{
|
||||
debug_fprintf(stderr, "---------------------------------\nVMSD2: %p\n", (void*)se->vmsd);
|
||||
nyx_debug("---------------------------------\nVMSD2: %p\n", (void*)se->vmsd);
|
||||
//abort();
|
||||
//fprintf(stderr, "---------------------------------\nVMSD2: %s\n", (VMStateDescription *)(se->vmsd)->name);
|
||||
ret = vmstate_load(f, se);
|
||||
@ -922,7 +922,7 @@ static void fdl_enumerate_global_states(state_reallocation_t* self, QEMUFile *f)
|
||||
break;
|
||||
default:
|
||||
/* oops */
|
||||
fprintf(stderr, "==> ERROR: unkown section_type: %x\n", section_type);
|
||||
nyx_error("==> ERROR: unkown section_type: %x\n", section_type);
|
||||
//abort();
|
||||
break;
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ shadow_memory_t* shadow_memory_init(void){
|
||||
self->snapshot_ptr = mmap(NULL, self->memory_size, PROT_READ | PROT_WRITE , MAP_SHARED , self->snapshot_ptr_fd, 0);
|
||||
madvise(self->snapshot_ptr, self->memory_size, MADV_RANDOM | MADV_MERGEABLE);
|
||||
|
||||
QEMU_PT_PRINTF(RELOAD_PREFIX, "Allocating Memory (%p) Size: %lx", self->snapshot_ptr, self->memory_size);
|
||||
nyx_debug_p(RELOAD_PREFIX, "Allocating Memory (%p) Size: %lx", self->snapshot_ptr, self->memory_size);
|
||||
|
||||
|
||||
|
||||
@ -75,7 +75,7 @@ shadow_memory_t* shadow_memory_init(void){
|
||||
uint8_t i = 0;
|
||||
uint8_t regions_num = 0;
|
||||
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
||||
QEMU_PT_PRINTF(RELOAD_PREFIX, "%lx %lx %lx\t%s\t%p", block->offset, block->used_length, block->max_length, block->idstr, block->host);
|
||||
nyx_debug_p(RELOAD_PREFIX, "%lx %lx %lx\t%s\t%p", block->offset, block->used_length, block->max_length, block->idstr, block->host);
|
||||
//printf("%lx %lx %lx\t%s\t%p\n", block->offset, block->used_length, block->max_length, block->idstr, block->host);
|
||||
|
||||
block_array[i] = block;
|
||||
@ -175,7 +175,7 @@ shadow_memory_t* shadow_memory_init_from_snapshot(const char* snapshot_folder, b
|
||||
fclose(file_mem_meta);
|
||||
|
||||
if(self->ram_regions_num != head.shadow_memory_regions){
|
||||
fprintf(stderr, "Error: self->ram_regions_num (%d) != head.shadow_memory_regions (%d)\n", self->ram_regions_num, head.shadow_memory_regions);
|
||||
nyx_error("Error: self->ram_regions_num (%d) != head.shadow_memory_regions (%d)\n", self->ram_regions_num, head.shadow_memory_regions);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@ -186,17 +186,17 @@ shadow_memory_t* shadow_memory_init_from_snapshot(const char* snapshot_folder, b
|
||||
fseek(file_mem_dump, 0L, SEEK_END);
|
||||
uint64_t file_mem_dump_size = ftell(file_mem_dump);
|
||||
|
||||
debug_fprintf(stderr, "guest_ram_size == ftell(f) => 0x%lx vs 0x%lx (%s)\n", self->memory_size, file_mem_dump_size, path_dump);
|
||||
nyx_debug("guest_ram_size == ftell(f) => 0x%lx vs 0x%lx (%s)\n", self->memory_size, file_mem_dump_size, path_dump);
|
||||
|
||||
#define VGA_SIZE (16<<20)
|
||||
|
||||
if(self->memory_size != file_mem_dump_size){
|
||||
if (file_mem_dump_size >= VGA_SIZE){
|
||||
fprintf(stderr, "ERROR: guest size should be %ld MB - set it to %ld MB\n", (file_mem_dump_size-VGA_SIZE)>>20, (self->memory_size-VGA_SIZE)>>20);
|
||||
nyx_error("ERROR: guest size should be %ld MB - set it to %ld MB\n", (file_mem_dump_size-VGA_SIZE)>>20, (self->memory_size-VGA_SIZE)>>20);
|
||||
exit(1);
|
||||
}
|
||||
else{
|
||||
fprintf(stderr, "ERROR: guest size: %ld bytes\n", file_mem_dump_size);
|
||||
nyx_error("ERROR: guest size: %ld bytes\n", file_mem_dump_size);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
@ -218,7 +218,7 @@ shadow_memory_t* shadow_memory_init_from_snapshot(const char* snapshot_folder, b
|
||||
uint8_t i = 0;
|
||||
uint8_t regions_num = 0;
|
||||
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
|
||||
QEMU_PT_PRINTF(RELOAD_PREFIX, "%lx %lx %lx\t%s\t%p", block->offset, block->used_length, block->max_length, block->idstr, block->host);
|
||||
nyx_debug_p(RELOAD_PREFIX, "%lx %lx %lx\t%s\t%p", block->offset, block->used_length, block->max_length, block->idstr, block->host);
|
||||
//printf("%lx %lx %lx\t%s\t%p\n", block->offset, block->used_length, block->max_length, block->idstr, block->host);
|
||||
|
||||
block_array[i] = block;
|
||||
@ -354,16 +354,16 @@ void shadow_memory_serialize(shadow_memory_t* self, const char* snapshot_folder)
|
||||
//assert(file_ptr_meta);
|
||||
//assert(file_ptr_data);
|
||||
/*
|
||||
debug_printf("black_list_pages_num: %lx\n", self->black_list_pages_num);
|
||||
debug_printf("black_list_pages_size: %lx\n", self->black_list_pages_size);
|
||||
debug_printf("black_list_pages ...\n");
|
||||
nyx_debug("black_list_pages_num: %lx\n", self->black_list_pages_num);
|
||||
nyx_debug("black_list_pages_size: %lx\n", self->black_list_pages_size);
|
||||
nyx_debug("black_list_pages ...\n");
|
||||
for (uint64_t i = 0; i < self->black_list_pages_num; i++ ){
|
||||
debug_printf("self->black_list_pages[%ld] = %lx\n", i, self->black_list_pages[i]);
|
||||
nyx_debug("self->black_list_pages[%ld] = %lx\n", i, self->black_list_pages[i]);
|
||||
}
|
||||
*/
|
||||
|
||||
//printf("shadow_memory_regions: %d\n", self->ram_regions_num);
|
||||
//debug_printf("ram_region_index: %d\n", self->ram_region_index);
|
||||
//nyx_debug("ram_region_index: %d\n", self->ram_region_index);
|
||||
|
||||
/*
|
||||
for (uint32_t i = 0; i < self->ram_regions_num; i++){
|
||||
|
@ -8,16 +8,15 @@
|
||||
#include <stdio.h>
|
||||
|
||||
void serialize_state(const char* filename_prefix, bool is_pre_snapshot){
|
||||
debug_printf("%s\n", __func__);
|
||||
nyx_trace();
|
||||
|
||||
char* tmp;
|
||||
|
||||
assert(asprintf(&tmp, "%s/global.state", filename_prefix) != -1);
|
||||
debug_printf("%s\n", tmp);
|
||||
|
||||
FILE *fp = fopen(tmp, "wb");
|
||||
if(fp == NULL) {
|
||||
fprintf(stderr, "[%s] Could not open file %s.\n", __func__, tmp);
|
||||
nyx_error("[%s] Could not open file %s.\n", __func__, tmp);
|
||||
assert(false);
|
||||
}
|
||||
|
||||
@ -71,16 +70,15 @@ void serialize_state(const char* filename_prefix, bool is_pre_snapshot){
|
||||
}
|
||||
|
||||
void deserialize_state(const char* filename_prefix){
|
||||
debug_printf("%s\n", __func__);
|
||||
nyx_trace();
|
||||
|
||||
char* tmp;
|
||||
|
||||
assert(asprintf(&tmp, "%s/global.state", filename_prefix) != -1);
|
||||
debug_printf("%s\n", tmp);
|
||||
|
||||
FILE *fp = fopen(tmp, "rb");
|
||||
if(fp == NULL) {
|
||||
debug_fprintf(stderr, "[%s] Could not open file %s.\n", __func__, tmp);
|
||||
nyx_debug("[%s] Could not open file %s.\n", __func__, tmp);
|
||||
assert(false);
|
||||
//exit(EXIT_FAILURE);
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ void init_page_cache(char* path){
|
||||
assert(global_state.page_cache == NULL);
|
||||
global_state.page_cache = page_cache_new((CPUState *)qemu_get_cpu(0), path);
|
||||
#ifdef STATE_VERBOSE
|
||||
debug_printf("\n\nINIT PAGE_CACHE => %s\n", path);
|
||||
nyx_debug("\n\nINIT PAGE_CACHE => %s\n", path);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -203,7 +203,7 @@ static void* alloc_auxiliary_buffer(const char* file){
|
||||
int fd = open(file, O_CREAT|O_RDWR, S_IRWXU|S_IRWXG|S_IRWXO);
|
||||
assert(ftruncate(fd, AUX_BUFFER_SIZE) == 0);
|
||||
stat(file, &st);
|
||||
QEMU_PT_PRINTF(INTERFACE_PREFIX, "new aux buffer file: (max size: %x) %lx", AUX_BUFFER_SIZE, st.st_size);
|
||||
nyx_debug_p(INTERFACE_PREFIX, "new aux buffer file: (max size: %x) %lx", AUX_BUFFER_SIZE, st.st_size);
|
||||
|
||||
assert(AUX_BUFFER_SIZE == st.st_size);
|
||||
ptr = mmap(0, AUX_BUFFER_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
|
27
vl.c
27
vl.c
@ -133,6 +133,7 @@ int main(int argc, char **argv)
|
||||
#include "qemu/guest-random.h"
|
||||
|
||||
#ifdef QEMU_NYX
|
||||
#include "nyx/debug.h"
|
||||
#include "nyx/pt.h"
|
||||
#include "nyx/hypercall/hypercall.h"
|
||||
#include "nyx/synchronization.h"
|
||||
@ -1652,7 +1653,7 @@ void qemu_system_reset_request(ShutdownCause reason)
|
||||
{
|
||||
#ifdef QEMU_NYX
|
||||
if(GET_GLOBAL_STATE()->in_fuzzing_mode){
|
||||
fprintf(stderr, "%s!\n", __func__);
|
||||
nyx_trace();
|
||||
GET_GLOBAL_STATE()->shutdown_requested = true;
|
||||
return;
|
||||
}
|
||||
@ -1678,7 +1679,7 @@ void qemu_system_suspend_request(void)
|
||||
{
|
||||
#ifdef CONFIG_PROCESSOR_TRACE
|
||||
if(GET_GLOBAL_STATE()->in_fuzzing_mode){
|
||||
fprintf(stderr, "%s!\n", __func__);
|
||||
nyx_trace();
|
||||
GET_GLOBAL_STATE()->shutdown_requested = true;
|
||||
return;
|
||||
}
|
||||
@ -1754,7 +1755,7 @@ void qemu_system_shutdown_request(ShutdownCause reason)
|
||||
{
|
||||
#ifdef CONFIG_PROCESSOR_TRACE
|
||||
if(GET_GLOBAL_STATE()->in_fuzzing_mode){
|
||||
fprintf(stderr, "%s!\n", __func__);
|
||||
nyx_trace();
|
||||
GET_GLOBAL_STATE()->shutdown_requested = true;
|
||||
return;
|
||||
}
|
||||
@ -1781,7 +1782,7 @@ void qemu_system_powerdown_request(void)
|
||||
{
|
||||
#ifdef CONFIG_PROCESSOR_TRACE
|
||||
if(GET_GLOBAL_STATE()->in_fuzzing_mode){
|
||||
fprintf(stderr, "%s!\n", __func__);
|
||||
nyx_trace();
|
||||
GET_GLOBAL_STATE()->shutdown_requested = true;
|
||||
return;
|
||||
}
|
||||
@ -4568,7 +4569,7 @@ int main(int argc, char **argv, char **envp)
|
||||
if (fast_vm_reload){
|
||||
|
||||
if(getenv("NYX_DISABLE_BLOCK_COW")){
|
||||
fprintf(stderr, "ERROR: Nyx block COW cache layer cannot be disabled while using fast snapshots\n");
|
||||
nyx_error("Nyx block COW cache layer cannot be disabled while using fast snapshots\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@ -4603,27 +4604,27 @@ int main(int argc, char **argv, char **envp)
|
||||
bool skip_serialization = qemu_opt_get_bool(opts, "skip_serialization", false);
|
||||
|
||||
if((snapshot_used || load_mode || skip_serialization) && getenv("NYX_DISABLE_DIRTY_RING")){
|
||||
fprintf(stderr, "ERROR: NYX_DISABLE_DIRTY_RING is only allowed during pre-snapshot creation\n");
|
||||
error_report("NYX_DISABLE_DIRTY_RING is only allowed during pre-snapshot creation\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if((pre_snapshot_used && !snapshot_used && !load_mode) && !getenv("NYX_DISABLE_DIRTY_RING")){
|
||||
fprintf(stderr, "ERROR: NYX_DISABLE_DIRTY_RING is required during pre-snapshot creation\n");
|
||||
error_report("NYX_DISABLE_DIRTY_RING is required during pre-snapshot creation\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if(pre_snapshot_used && load_mode){
|
||||
fprintf(stderr, "[!] qemu-nyx: invalid argument (pre_snapshot_used && load_mode)!\n");
|
||||
error_report("invalid argument (pre_snapshot_used && load_mode)!\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if((!snapshot_used && !pre_snapshot_used) && load_mode){
|
||||
fprintf(stderr, "[!] qemu-nyx: invalid argument ((!pre_snapshot_used && !pre_snapshot_used) && load_mode)!\n");
|
||||
error_report("invalid argument ((!pre_snapshot_used && !pre_snapshot_used) && load_mode)!\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if(pre_snapshot_used && snapshot_used){
|
||||
fprintf(stderr, "[!] qemu-nyx: loading pre image to start fuzzing...\n");
|
||||
nyx_printf("[Qemu-Nyx]: loading pre image to start fuzzing...\n");
|
||||
set_fast_reload_mode(false);
|
||||
set_fast_reload_path(snapshot_path);
|
||||
if(!skip_serialization){
|
||||
@ -4636,7 +4637,7 @@ int main(int argc, char **argv, char **envp)
|
||||
}
|
||||
else{
|
||||
if(pre_snapshot_used){
|
||||
fprintf(stderr, "[!] qemu-nyx: preparing to create pre image...\n");
|
||||
nyx_printf("[Qemu-Nyx]: preparing to create pre image...\n");
|
||||
set_fast_reload_pre_path(pre_snapshot_path);
|
||||
set_fast_reload_pre_image();
|
||||
}
|
||||
@ -4647,7 +4648,7 @@ int main(int argc, char **argv, char **envp)
|
||||
}
|
||||
if (load_mode){
|
||||
set_fast_reload_mode(true);
|
||||
fprintf(stderr, "[!] qemu-nyx: waiting for snapshot to start fuzzing...\n");
|
||||
nyx_printf("[Qemu-Nyx]: waiting for snapshot to start fuzzing...\n");
|
||||
fast_reload_create_from_file(get_fast_reload_snapshot(), snapshot_path, false);
|
||||
//cpu_synchronize_all_post_reset();
|
||||
set_state_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 3);
|
||||
@ -4655,7 +4656,7 @@ int main(int argc, char **argv, char **envp)
|
||||
//GET_GLOBAL_STATE()->pt_trace_mode = false;
|
||||
}
|
||||
else{
|
||||
fprintf(stderr, "[QEMU-Nyx] Booting VM to start fuzzing...\n");
|
||||
nyx_printf("[Qemu-Nyx]: Booting VM to start fuzzing...\n");
|
||||
set_fast_reload_mode(false);
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user