add various improvements:

- root snapshot serialization / deserialization
	- abort if specific hypercalls are called during fuzzing
	- ignore requests to disable write protection
This commit is contained in:
Sergej Schumilo 2022-01-20 03:43:12 +01:00
parent b5798ba95a
commit 7af65d1fdc
10 changed files with 223 additions and 172 deletions

View File

@ -133,7 +133,9 @@ void check_auxiliary_config_buffer(auxilary_buffer_t* auxilary_buffer, auxilary_
/* modify protect_payload_buffer */
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.protect_payload_buffer);
GET_GLOBAL_STATE()->protect_payload_buffer = aux_byte;
if (GET_GLOBAL_STATE()->protect_payload_buffer == 0 && aux_byte == 1){
GET_GLOBAL_STATE()->protect_payload_buffer = aux_byte;
}
/* modify protect_payload_buffer */
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.discard_tmp_snapshot);

View File

@ -20,6 +20,17 @@ void nyx_abort(char* msg){
synchronization_lock();
}
bool is_called_in_fuzzing_mode(const char* hypercall){
if(GET_GLOBAL_STATE()->in_fuzzing_mode){
char* tmp = NULL;
assert(asprintf(&tmp, "Hypercall <%s> called during fuzzing...", hypercall) != -1);
nyx_abort((char*)tmp);
free(tmp);
return true;
}
return false;
}
uint64_t get_rip(CPUState *cpu){
kvm_arch_get_registers(cpu);
X86CPU *x86_cpu = X86_CPU(cpu);
@ -81,6 +92,20 @@ void coverage_bitmap_copy_from_buffer(nyx_coverage_bitmap_copy_t* buffer){
}
}
static void resize_coverage_bitmap(uint32_t new_bitmap_size){
uint32_t new_bitmap_shm_size = new_bitmap_size;
if (new_bitmap_shm_size % 64 > 0) {
new_bitmap_shm_size = ((new_bitmap_shm_size + 64) >> 6) << 6;
}
GET_GLOBAL_STATE()->shared_bitmap_real_size = new_bitmap_shm_size;
resize_shared_memory(new_bitmap_shm_size, &GET_GLOBAL_STATE()->shared_bitmap_size, &GET_GLOBAL_STATE()->shared_bitmap_ptr, GET_GLOBAL_STATE()->shared_bitmap_fd);
/* pass the actual bitmap buffer size to the front-end */
GET_GLOBAL_STATE()->auxilary_buffer->capabilites.agent_coverage_bitmap_size = new_bitmap_size;
}
bool apply_capabilities(CPUState *cpu){
//X86CPU *cpux86 = X86_CPU(cpu);
//CPUX86State *env = &cpux86->env;
@ -107,6 +132,10 @@ bool apply_capabilities(CPUState *cpu){
return false;
}
if (GET_GLOBAL_STATE()->cap_coverage_bitmap_size){
resize_coverage_bitmap(GET_GLOBAL_STATE()->cap_coverage_bitmap_size);
}
for(uint64_t i = 0; i < GET_GLOBAL_STATE()->shared_bitmap_size; i += 0x1000){
assert(remap_slot(GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr+ i, i/0x1000, cpu, GET_GLOBAL_STATE()->shared_bitmap_fd, GET_GLOBAL_STATE()->shared_bitmap_size, true, GET_GLOBAL_STATE()->cap_cr3));
}
@ -127,6 +156,15 @@ bool apply_capabilities(CPUState *cpu){
}
set_cap_agent_ijon_trace_bitmap(GET_GLOBAL_STATE()->auxilary_buffer, true);
}
if (GET_GLOBAL_STATE()->input_buffer_size != GET_GLOBAL_STATE()->shared_payload_buffer_size){
resize_shared_memory(GET_GLOBAL_STATE()->input_buffer_size, &GET_GLOBAL_STATE()->shared_payload_buffer_size, NULL, GET_GLOBAL_STATE()->shared_payload_buffer_fd);
GET_GLOBAL_STATE()->shared_payload_buffer_size = GET_GLOBAL_STATE()->input_buffer_size;
}
/* pass the actual input buffer size to the front-end */
GET_GLOBAL_STATE()->auxilary_buffer->capabilites.agent_input_buffer_size = GET_GLOBAL_STATE()->shared_payload_buffer_size;
return true;
}

View File

@ -10,6 +10,7 @@ typedef struct nyx_coverage_bitmap_copy_s{
}nyx_coverage_bitmap_copy_t;
void nyx_abort(char* msg);
bool is_called_in_fuzzing_mode(const char* hypercall);
nyx_coverage_bitmap_copy_t* new_coverage_bitmaps(void);
void coverage_bitmap_reset(void);

View File

@ -7,6 +7,11 @@
void handle_hypercall_kafl_get_host_config(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
uint64_t vaddr = hypercall_arg;
host_config_t config;
if(is_called_in_fuzzing_mode("KVM_EXIT_KAFL_GET_HOST_CONFIG")){
return;
}
memset((void*)&config, 0, sizeof(host_config_t));
config.host_magic = NYX_HOST_MAGIC;
@ -18,24 +23,14 @@ void handle_hypercall_kafl_get_host_config(struct kvm_run *run, CPUState *cpu, u
write_virtual_memory(vaddr, (uint8_t*)&config, sizeof(host_config_t), cpu);
}
static void resize_coverage_bitmap(uint32_t new_bitmap_size){
uint32_t new_bitmap_shm_size = new_bitmap_size;
if (new_bitmap_shm_size % 64 > 0) {
new_bitmap_shm_size = ((new_bitmap_shm_size + 64) >> 6) << 6;
}
GET_GLOBAL_STATE()->shared_bitmap_real_size = new_bitmap_shm_size;
resize_shared_memory(new_bitmap_shm_size, &GET_GLOBAL_STATE()->shared_bitmap_size, &GET_GLOBAL_STATE()->shared_bitmap_ptr, GET_GLOBAL_STATE()->shared_bitmap_fd);
/* pass the actual bitmap buffer size to the front-end */
GET_GLOBAL_STATE()->auxilary_buffer->capabilites.agent_coverage_bitmap_size = new_bitmap_size;
}
void handle_hypercall_kafl_set_agent_config(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
uint64_t vaddr = hypercall_arg;
agent_config_t config;
if(is_called_in_fuzzing_mode("KVM_EXIT_KAFL_SET_AGENT_CONFIG")){
return;
}
X86CPU *cpux86 = X86_CPU(cpu);
CPUX86State *env = &cpux86->env;
@ -72,12 +67,12 @@ void handle_hypercall_kafl_set_agent_config(struct kvm_run *run, CPUState *cpu,
GET_GLOBAL_STATE()->cap_cr3 = env->cr[3];
if (config.coverage_bitmap_size){
resize_coverage_bitmap(config.coverage_bitmap_size);
}
GET_GLOBAL_STATE()->cap_coverage_bitmap_size = config.coverage_bitmap_size;
GET_GLOBAL_STATE()->input_buffer_size = GET_GLOBAL_STATE()->shared_payload_buffer_size;
if (config.input_buffer_size){
resize_payload_buffer(config.input_buffer_size);
abort();
}
if(apply_capabilities(cpu) == false){

View File

@ -111,12 +111,6 @@ bool handle_hypercall_kafl_next_payload(struct kvm_run *run, CPUState *cpu, uint
if(!setup_snapshot_once){
//pt_reset_bitmap();
if (GET_GLOBAL_STATE()->pt_trace_mode){
printf("[QEMU-Nyx] coverage mode: Intel-PT (KVM-Nyx and libxdc)\n");
}
else{
printf("[QEMU-Nyx] coverage mode: compile-time instrumentation\n");
}
coverage_bitmap_reset();
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP);
@ -194,6 +188,11 @@ void handle_hypercall_kafl_acquire(struct kvm_run *run, CPUState *cpu, uint64_t
static void handle_hypercall_get_payload(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
debug_printf("------------ %s\n", __func__);
if(is_called_in_fuzzing_mode("KVM_EXIT_KAFL_GET_PAYLOAD")){
return;
}
if(hypercall_enabled && !setup_snapshot_once){
QEMU_PT_PRINTF(CORE_PREFIX, "Payload Address:\t%lx", hypercall_arg);
kvm_arch_get_registers(cpu);
@ -222,16 +221,20 @@ static void set_return_value(CPUState *cpu, uint64_t return_value){
static void handle_hypercall_kafl_req_stream_data(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
static uint8_t req_stream_buffer[0x1000];
if(is_called_in_fuzzing_mode("HYPERCALL_KAFL_REQ_STREAM_DATA")){
return;
}
kvm_arch_get_registers(cpu);
/* address has to be page aligned */
if((hypercall_arg&0xFFF) != 0){
debug_fprintf(stderr, "%s: ERROR -> address is not page aligned!\n", __func__);
set_return_value(cpu, 0xFFFFFFFFFFFFFFFFUL);
set_return_value(cpu, 0xFFFFFFFFFFFFFFFFULL);
}
else{
read_virtual_memory(hypercall_arg, (uint8_t*)req_stream_buffer, 0x100, cpu);
uint64_t bytes = sharedir_request_file(GET_GLOBAL_STATE()->sharedir, (const char *)req_stream_buffer, req_stream_buffer);
if(bytes != 0xFFFFFFFFFFFFFFFFUL){
if(bytes != 0xFFFFFFFFFFFFFFFFULL){
write_virtual_memory(hypercall_arg, (uint8_t*)req_stream_buffer, bytes, cpu);
}
set_return_value(cpu, bytes);
@ -249,6 +252,10 @@ static void handle_hypercall_kafl_req_stream_data_bulk(struct kvm_run *run, CPUS
//static uint64_t addresses[512];
req_data_bulk_t req_data_bulk_data;
if(is_called_in_fuzzing_mode("HYPERCALL_KAFL_REQ_STREAM_DATA_BULK")){
return;
}
kvm_arch_get_registers(cpu);
/* address has to be page aligned */
if((hypercall_arg&0xFFF) != 0){
@ -284,6 +291,11 @@ static void handle_hypercall_kafl_req_stream_data_bulk(struct kvm_run *run, CPUS
static void handle_hypercall_kafl_range_submit(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
uint64_t buffer[3];
if(is_called_in_fuzzing_mode("KVM_EXIT_KAFL_RANGE_SUBMIT")){
return;
}
read_virtual_memory(hypercall_arg, (uint8_t*)&buffer, sizeof(buffer), cpu);
if(buffer[2] >= 2){
@ -405,6 +417,11 @@ static void handle_hypercall_kafl_cr3(struct kvm_run *run, CPUState *cpu, uint64
}
static void handle_hypercall_kafl_submit_panic(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
if(is_called_in_fuzzing_mode("KVM_EXIT_KAFL_SUBMIT_PANIC")){
return;
}
if(hypercall_enabled){
QEMU_PT_PRINTF(CORE_PREFIX, "Panic address:\t%lx", hypercall_arg);
write_virtual_memory(hypercall_arg, (uint8_t*)PANIC_PAYLOAD, PAYLOAD_BUFFER_SIZE, cpu);
@ -524,6 +541,10 @@ static void handle_hypercall_kafl_panic_extended(struct kvm_run *run, CPUState *
static void handle_hypercall_kafl_lock(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
if(is_called_in_fuzzing_mode("KVM_EXIT_KAFL_LOCK")){
return;
}
if(!GET_GLOBAL_STATE()->fast_reload_pre_image){
QEMU_PT_PRINTF(CORE_PREFIX, "Skipping pre image creation (hint: set pre=on) ...");
return;
@ -539,13 +560,17 @@ static void handle_hypercall_kafl_printf(struct kvm_run *run, CPUState *cpu, uin
read_virtual_memory(hypercall_arg, (uint8_t*)hprintf_buffer, HPRINTF_SIZE, cpu);
#ifdef DEBUG_HPRINTF
fprintf(stderr, "%s %s\n", __func__, hprintf_buffer);
#else
#endif
set_hprintf_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, hprintf_buffer, strnlen(hprintf_buffer, HPRINTF_SIZE)+1);
synchronization_lock();
#endif
}
static void handle_hypercall_kafl_user_range_advise(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
if(is_called_in_fuzzing_mode("KVM_EXIT_KAFL_USER_RANGE_ADVISE")){
return;
}
kAFL_ranges* buf = malloc(sizeof(kAFL_ranges));
for(int i = 0; i < INTEL_PT_MAX_RANGES; i++){
@ -560,6 +585,11 @@ static void handle_hypercall_kafl_user_range_advise(struct kvm_run *run, CPUStat
static void handle_hypercall_kafl_user_submit_mode(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
//printf("%s\n", __func__);
if(is_called_in_fuzzing_mode("KVM_EXIT_KAFL_USER_SUBMIT_MODE")){
return;
}
switch(hypercall_arg){
case KAFL_MODE_64:
QEMU_PT_PRINTF(CORE_PREFIX, "target runs in KAFL_MODE_64 ...");
@ -708,6 +738,11 @@ static void handle_hypercall_kafl_dump_file(struct kvm_run *run, CPUState *cpu,
}
static void handle_hypercall_kafl_persist_page_past_snapshot(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
if(is_called_in_fuzzing_mode("KVM_EXIT_KAFL_PERSIST_PAGE_PAST_SNAPSHOT")){
return;
}
CPUX86State *env = &(X86_CPU(cpu))->env;
kvm_arch_get_registers_fast(cpu);
hwaddr phys_addr = (hwaddr) get_paging_phys_addr(cpu, env->cr[3], hypercall_arg&(~0xFFF));

View File

@ -272,7 +272,6 @@ void resize_shared_memory(uint32_t new_size, uint32_t* shm_size, void** shm_ptr,
return;
}
assert(!GET_GLOBAL_STATE()->pt_trace_mode);
assert(!GET_GLOBAL_STATE()->in_fuzzing_mode);
assert(ftruncate(fd, new_size) == 0);
@ -281,25 +280,10 @@ void resize_shared_memory(uint32_t new_size, uint32_t* shm_size, void** shm_ptr,
*shm_ptr = (void*)mmap(0, new_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
assert(*shm_ptr != MAP_FAILED);
}
else{
fprintf(stderr, "=> shm_ptr is NULL\n");
abort();
}
*shm_size = new_size;
}
void resize_payload_buffer(uint32_t new_size){
assert(GET_GLOBAL_STATE()->shared_payload_buffer_fd && GET_GLOBAL_STATE()->shared_payload_buffer_size);
assert(GET_GLOBAL_STATE()->shared_payload_buffer_size < new_size && !(new_size & 0xFFF));
assert(!GET_GLOBAL_STATE()->in_fuzzing_mode);
assert(ftruncate(GET_GLOBAL_STATE()->shared_payload_buffer_fd, new_size) == 0);
GET_GLOBAL_STATE()->shared_payload_buffer_size = new_size;
GET_GLOBAL_STATE()->auxilary_buffer->capabilites.agent_input_buffer_size = new_size;
}
bool remap_payload_buffer(uint64_t virt_guest_addr, CPUState *cpu){
assert(GET_GLOBAL_STATE()->shared_payload_buffer_fd && GET_GLOBAL_STATE()->shared_payload_buffer_size);
RAMBlock *block;

View File

@ -68,6 +68,5 @@ void print_48_paging2(uint64_t cr3);
bool dump_page_ht(uint64_t address, uint8_t* data, CPUState *cpu);
void resize_shared_memory(uint32_t new_size, uint32_t* shm_size, void** shm_ptr, int fd);
void resize_payload_buffer(uint32_t new_size);
#endif

View File

@ -17,71 +17,56 @@ void serialize_state(const char* filename_prefix, bool is_pre_snapshot){
FILE *fp = fopen(tmp, "wb");
if(fp == NULL) {
debug_fprintf(stderr, "[%s] Could not open file %s.\n", __func__, tmp);
fprintf(stderr, "[%s] Could not open file %s.\n", __func__, tmp);
assert(false);
//exit(EXIT_FAILURE);
}
qemu_nyx_state_t* nyx_global_state = GET_GLOBAL_STATE();
serialized_state_header_t header = {0};
debug_printf("DUMPING global_state.pt_ip_filter_configured: -\n");
fwrite(&nyx_global_state->pt_ip_filter_configured, sizeof(bool)*4, 1, fp);
header.magic = NYX_SERIALIZED_STATE_MAGIC;
header.version = NYX_SERIALIZED_STATE_VERSION;
debug_printf("DUMPING global_state.pt_ip_filter_a: -\n");
fwrite(&nyx_global_state->pt_ip_filter_a, sizeof(uint64_t)*4, 1, fp);
debug_printf("DUMPING global_state.pt_ip_filter_b: -\n");
fwrite(&nyx_global_state->pt_ip_filter_b, sizeof(uint64_t)*4, 1, fp);
debug_printf("DUMPING global_state.parent_cr3: %lx\n", global_state.parent_cr3);
fwrite(&nyx_global_state->parent_cr3, sizeof(uint64_t), 1, fp);
debug_printf("DUMPING global_state.disassembler_word_width: %x\n", global_state.disassembler_word_width);
fwrite(&nyx_global_state->disassembler_word_width, sizeof(uint8_t), 1, fp);
debug_printf("DUMPING global_state.fast_reload_pre_image: %x\n", global_state.fast_reload_pre_image);
fwrite(&nyx_global_state->fast_reload_pre_image, sizeof(bool), 1, fp);
debug_printf("DUMPING global_state.mem_mode: %x\n", global_state.mem_mode);
fwrite(&nyx_global_state->mem_mode, sizeof(uint8_t), 1, fp);
debug_printf("DUMPING global_state.pt_trace_mode: %x\n", global_state.pt_trace_mode);
fwrite(&nyx_global_state->pt_trace_mode, sizeof(bool), 1, fp);
debug_printf("DUMPING global_state.nested: %x\n", global_state.nested);
fwrite(&nyx_global_state->nested, sizeof(bool), 1, fp);
if(!global_state.nested){
debug_printf("DUMPING global_state.payload_buffer: %lx\n", global_state.payload_buffer);
fwrite(&nyx_global_state->payload_buffer, sizeof(uint64_t), 1, fp);
fwrite(&nyx_global_state->cap_timeout_detection, sizeof(global_state.cap_timeout_detection), 1, fp);
fwrite(&nyx_global_state->cap_only_reload_mode, sizeof(global_state.cap_only_reload_mode), 1, fp);
fwrite(&nyx_global_state->cap_compile_time_tracing, sizeof(global_state.cap_compile_time_tracing), 1, fp);
fwrite(&nyx_global_state->cap_ijon_tracing, sizeof(global_state.cap_ijon_tracing), 1, fp);
fwrite(&nyx_global_state->cap_cr3, sizeof(global_state.cap_cr3), 1, fp);
fwrite(&nyx_global_state->cap_compile_time_tracing_buffer_vaddr, sizeof(global_state.cap_compile_time_tracing_buffer_vaddr), 1, fp);
fwrite(&nyx_global_state->cap_ijon_tracing_buffer_vaddr, sizeof(global_state.cap_ijon_tracing_buffer_vaddr), 1, fp);
fwrite(&nyx_global_state->protect_payload_buffer, sizeof(bool), 1, fp);
if (is_pre_snapshot){
header.type = NYX_SERIALIZED_TYPE_PRE_SNAPSHOT;
fwrite(&header, sizeof(serialized_state_header_t), 1, fp);
}
else{
assert(global_state.nested_payload_pages != NULL && global_state.nested_payload_pages_num != 0);
debug_printf("DUMPING global_state.nested_payload_pages_num: %x\n", global_state.nested_payload_pages_num);
fwrite(&nyx_global_state->nested_payload_pages_num, sizeof(uint32_t), 1, fp);
header.type = NYX_SERIALIZED_TYPE_ROOT_SNAPSHOT;
fwrite(&header, sizeof(serialized_state_header_t), 1, fp);
if(global_state.nested_payload_pages_num != 0){
debug_printf("DUMPING global_state.protect_payload_buffer: %x\n", global_state.protect_payload_buffer);
fwrite(&nyx_global_state->protect_payload_buffer, sizeof(bool), 1, fp);
}
qemu_nyx_state_t* nyx_global_state = GET_GLOBAL_STATE();
serialized_state_root_snapshot_t root_snapshot = {0};
for(uint32_t i = 0; i < global_state.nested_payload_pages_num; i++){
debug_printf("DUMPING global_state.nested_payload_pages[%d]: %lx\n", i, global_state.nested_payload_pages[i]);
fwrite(&nyx_global_state->nested_payload_pages[i], sizeof(uint64_t), 1, fp);
for (uint8_t i = 0; i < 4; i++){
root_snapshot.pt_ip_filter_configured[i] = nyx_global_state->pt_ip_filter_configured[i];
root_snapshot.pt_ip_filter_a[i] = nyx_global_state->pt_ip_filter_a[i];
root_snapshot.pt_ip_filter_b[i] = nyx_global_state->pt_ip_filter_b[i];
}
root_snapshot.parent_cr3 = nyx_global_state->parent_cr3;
root_snapshot.disassembler_word_width = nyx_global_state->disassembler_word_width;
root_snapshot.fast_reload_pre_image = nyx_global_state->fast_reload_pre_image;
root_snapshot.mem_mode = nyx_global_state->mem_mode;
root_snapshot.pt_trace_mode =nyx_global_state->pt_trace_mode;
root_snapshot.input_buffer_vaddr = nyx_global_state->payload_buffer;
root_snapshot.protect_input_buffer = nyx_global_state->protect_payload_buffer;
root_snapshot.input_buffer_size = nyx_global_state->input_buffer_size;
root_snapshot.cap_timeout_detection = nyx_global_state->cap_timeout_detection;
root_snapshot.cap_only_reload_mode = nyx_global_state->cap_only_reload_mode;
root_snapshot.cap_compile_time_tracing = nyx_global_state->cap_compile_time_tracing;
root_snapshot.cap_ijon_tracing = nyx_global_state->cap_ijon_tracing;
root_snapshot.cap_cr3 = nyx_global_state->cap_cr3;
root_snapshot.cap_compile_time_tracing_buffer_vaddr = nyx_global_state->cap_compile_time_tracing_buffer_vaddr;
root_snapshot.cap_ijon_tracing_buffer_vaddr = nyx_global_state->cap_ijon_tracing_buffer_vaddr;
root_snapshot.cap_coverage_bitmap_size = nyx_global_state->cap_coverage_bitmap_size;
fwrite(&root_snapshot, sizeof(serialized_state_root_snapshot_t), 1, fp);
}
fclose(fp);
free(tmp);
}
@ -100,84 +85,53 @@ void deserialize_state(const char* filename_prefix){
//exit(EXIT_FAILURE);
}
qemu_nyx_state_t* nyx_global_state = GET_GLOBAL_STATE();
assert(fread(&nyx_global_state->pt_ip_filter_configured, sizeof(bool)*4, 1, fp) == 1);
debug_printf("LOADING global_state.pt_ip_filter_configured: -\n");
serialized_state_header_t header = {0};
assert(fread(&header, sizeof(serialized_state_header_t), 1, fp) == 1);
assert(fread(&nyx_global_state->pt_ip_filter_a, sizeof(uint64_t)*4, 1, fp) == 1);
debug_printf("LOADING global_state.pt_ip_filter_a: -\n");
assert(header.magic == NYX_SERIALIZED_STATE_MAGIC);
assert(header.version == NYX_SERIALIZED_STATE_VERSION);
assert(fread(&nyx_global_state->pt_ip_filter_b, sizeof(uint64_t)*4, 1, fp) == 1);
debug_printf("LOADING global_state.pt_ip_filter_b: -\n");
if(header.type == NYX_SERIALIZED_TYPE_PRE_SNAPSHOT){
/* we're done here */
}
else if (header.type == NYX_SERIALIZED_TYPE_ROOT_SNAPSHOT){
qemu_nyx_state_t* nyx_global_state = GET_GLOBAL_STATE();
serialized_state_root_snapshot_t root_snapshot = {0};
assert(fread(&root_snapshot, sizeof(serialized_state_root_snapshot_t), 1, fp) == 1);
assert(fread(&nyx_global_state->parent_cr3, sizeof(uint64_t), 1, fp) == 1);
debug_printf("LOADING global_state.parent_cr3: %lx\n", global_state.parent_cr3);
assert(fread(&nyx_global_state->disassembler_word_width, sizeof(uint8_t), 1, fp) == 1);
debug_printf("LOADING global_state.disassembler_word_width: %x\n", global_state.disassembler_word_width);
assert(fread(&nyx_global_state->fast_reload_pre_image, sizeof(bool), 1, fp) == 1);
debug_printf("LOADING global_state.fast_reload_pre_image: %x\n", global_state.fast_reload_pre_image);
assert(fread(&nyx_global_state->mem_mode, sizeof(uint8_t), 1, fp) == 1);
debug_printf("LOADING global_state.mem_mode: %x\n", global_state.mem_mode);
assert(fread(&nyx_global_state->pt_trace_mode, sizeof(bool), 1, fp) == 1);
debug_printf("LOADING global_state.pt_trace_mode: %x\n", global_state.pt_trace_mode);
assert(fread(&nyx_global_state->nested, sizeof(bool), 1, fp) == 1);
debug_printf("LOADING global_state.nested: %x\n", global_state.nested);
if(!global_state.nested){
assert(fread(&nyx_global_state->payload_buffer, sizeof(uint64_t), 1, fp) == 1);
debug_printf("LOADING global_state.payload_buffer: %lx\n", global_state.payload_buffer);
assert(fread(&nyx_global_state->cap_timeout_detection, sizeof(global_state.cap_timeout_detection), 1, fp) == 1);
assert(fread(&nyx_global_state->cap_only_reload_mode, sizeof(global_state.cap_only_reload_mode), 1, fp) == 1);
assert(fread(&nyx_global_state->cap_compile_time_tracing, sizeof(global_state.cap_compile_time_tracing), 1, fp) == 1);
assert(fread(&nyx_global_state->cap_ijon_tracing, sizeof(global_state.cap_ijon_tracing), 1, fp) == 1);
assert(fread(&nyx_global_state->cap_cr3, sizeof(global_state.cap_cr3), 1, fp) == 1);
assert(fread(&nyx_global_state->cap_compile_time_tracing_buffer_vaddr, sizeof(global_state.cap_compile_time_tracing_buffer_vaddr), 1, fp) == 1);
assert(fread(&nyx_global_state->cap_ijon_tracing_buffer_vaddr, sizeof(global_state.cap_ijon_tracing_buffer_vaddr), 1, fp) == 1);
if(!global_state.fast_reload_pre_image){
assert(fread(&nyx_global_state->protect_payload_buffer, sizeof(bool), 1, fp) == 1);
if(global_state.payload_buffer != 0){
debug_printf("REMAP PAYLOAD BUFFER!\n");
remap_payload_buffer(global_state.payload_buffer, ((CPUState *)qemu_get_cpu(0)) );
}
else{
fprintf(stderr, "WARNING: address of payload buffer in snapshot file is zero!\n");
}
for (uint8_t i = 0; i < 4; i++){
nyx_global_state->pt_ip_filter_configured[i] = root_snapshot.pt_ip_filter_configured[i];
nyx_global_state->pt_ip_filter_a[i] = root_snapshot.pt_ip_filter_a[i];
nyx_global_state->pt_ip_filter_b[i] = root_snapshot.pt_ip_filter_b[i];
}
nyx_global_state->parent_cr3 = root_snapshot.parent_cr3;
nyx_global_state->disassembler_word_width = root_snapshot.disassembler_word_width;
nyx_global_state->fast_reload_pre_image = root_snapshot.fast_reload_pre_image;
nyx_global_state->mem_mode = root_snapshot.mem_mode;
nyx_global_state->pt_trace_mode =root_snapshot.pt_trace_mode;
nyx_global_state->payload_buffer = root_snapshot.input_buffer_vaddr;
nyx_global_state->protect_payload_buffer = root_snapshot.protect_input_buffer;
nyx_global_state->input_buffer_size = root_snapshot.input_buffer_size;
nyx_global_state->cap_timeout_detection = root_snapshot.cap_timeout_detection;
nyx_global_state->cap_only_reload_mode = root_snapshot.cap_only_reload_mode;
nyx_global_state->cap_compile_time_tracing = root_snapshot.cap_compile_time_tracing;
nyx_global_state->cap_ijon_tracing = root_snapshot.cap_ijon_tracing;
nyx_global_state->cap_cr3 = root_snapshot.cap_cr3;
nyx_global_state->cap_compile_time_tracing_buffer_vaddr = root_snapshot.cap_compile_time_tracing_buffer_vaddr;
nyx_global_state->cap_ijon_tracing_buffer_vaddr = root_snapshot.cap_ijon_tracing_buffer_vaddr;
nyx_global_state->cap_coverage_bitmap_size = root_snapshot.cap_coverage_bitmap_size;
assert(apply_capabilities(qemu_get_cpu(0)));
remap_payload_buffer(nyx_global_state->payload_buffer, ((CPUState *)qemu_get_cpu(0)) );
}
else{
assert(fread(&nyx_global_state->nested_payload_pages_num, sizeof(uint32_t), 1, fp) == 1);
debug_printf("LOADING global_state.nested_payload_pages_num: %x\n", global_state.nested_payload_pages_num);
global_state.in_fuzzing_mode = true; /* haaaeeeeh ??? */
if(!global_state.fast_reload_pre_image){
assert(fread(&nyx_global_state->protect_payload_buffer, sizeof(bool), 1, fp) == 1);
debug_printf("LOADING global_state.protect_payload_buffer: %x\n", global_state.protect_payload_buffer);
global_state.nested_payload_pages = (uint64_t*)malloc(sizeof(uint64_t)*global_state.nested_payload_pages_num);
for(uint32_t i = 0; i < global_state.nested_payload_pages_num; i++){
assert(fread(&nyx_global_state->nested_payload_pages[i], sizeof(uint64_t), 1, fp) == 1);
debug_printf("LOADED global_state.nested_payload_pages[%d]: %lx\n", i, global_state.nested_payload_pages[i]);
if(global_state.protect_payload_buffer){
assert(remap_payload_slot_protected(GET_GLOBAL_STATE()->nested_payload_pages[i], i, ((CPUState *)qemu_get_cpu(0))) == true);
}
else{
remap_payload_slot(global_state.nested_payload_pages[i], i, ((CPUState *)qemu_get_cpu(0)));
}
}
}
fprintf(stderr, "[QEMU-Nyx]: this feature is currently missing\n");
abort();
}
fclose(fp);

View File

@ -1,6 +1,47 @@
#pragma once
#include <stdbool.h>
#include <stdint.h>
#define NYX_SERIALIZED_STATE_MAGIC 0x58594E
#define NYX_SERIALIZED_STATE_VERSION 1
#define NYX_SERIALIZED_TYPE_PRE_SNAPSHOT 0
#define NYX_SERIALIZED_TYPE_ROOT_SNAPSHOT 1
#define NYX_SERIALIZED_TYPE_NESTED_SNAPSHOT 2
typedef struct serialized_state_header_s {
uint32_t magic;
uint32_t version;
uint32_t type;
} serialized_state_header_t;
typedef struct serialized_state_root_snapshot_s {
bool pt_ip_filter_configured[4];
uint64_t pt_ip_filter_a[4];
uint64_t pt_ip_filter_b[4];
uint64_t parent_cr3;
uint8_t disassembler_word_width;
bool fast_reload_pre_image;
uint8_t mem_mode;
bool pt_trace_mode;
uint64_t input_buffer_vaddr;
bool protect_input_buffer;
uint32_t input_buffer_size;
uint8_t cap_timeout_detection;
uint8_t cap_only_reload_mode;
uint8_t cap_compile_time_tracing;
uint8_t cap_ijon_tracing;
uint64_t cap_cr3;
uint64_t cap_compile_time_tracing_buffer_vaddr;
uint64_t cap_ijon_tracing_buffer_vaddr;
uint64_t cap_coverage_bitmap_size;
} serialized_state_root_snapshot_t;
void serialize_state(const char* filename_prefix, bool is_pre_snapshot);
void deserialize_state(const char* filename_prefix);

View File

@ -117,6 +117,7 @@ typedef struct qemu_nyx_state_s{
bool protect_payload_buffer;
bool discard_tmp_snapshot;
uint8_t mem_mode;
uint32_t input_buffer_size;
/* NON MIGRATABLE OPTION */
@ -146,6 +147,7 @@ typedef struct qemu_nyx_state_s{
uint64_t cap_cr3;
uint64_t cap_compile_time_tracing_buffer_vaddr;
uint64_t cap_ijon_tracing_buffer_vaddr;
uint64_t cap_coverage_bitmap_size;
auxilary_buffer_t* auxilary_buffer;
auxilary_buffer_config_t shadow_config;