Merge pull request #10 from nyx-fuzz/qemu-nyx-4.2.0-dev-intel

push qemu-nyx-4.2.0-dev-intel to qemu-nyx-4.2.0
This commit is contained in:
Sergej Schumilo 2022-04-07 11:04:36 +02:00 committed by GitHub
commit 1acaa75a8b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
36 changed files with 901 additions and 514 deletions

View File

@ -2562,18 +2562,23 @@ int kvm_cpu_exec(CPUState *cpu)
ret = EXCP_INTERRUPT;
break;
case KVM_EXIT_SHUTDOWN:
DPRINTF("shutdown\n");
#ifndef QEMU_NYX
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
ret = EXCP_INTERRUPT;
#else
fprintf(stderr, "ATTEMPT TO SHUTDOWN MACHINE (KVM_EXIT_SHUTDOWN)!\n");
if(GET_GLOBAL_STATE()->in_fuzzing_mode){
#define CONFIG_KVM_EXIT_SHUTODWN_IS_PANIC // consider triple-fault etc as crash?
#ifndef CONFIG_KVM_EXIT_SHUTODWN_IS_PANIC
/* Fuzzing is enabled at this point -> don't exit */
fprintf(stderr, "Got KVM_EXIT_SHUTDOWN while in fuzzing mode => reload\n",);
handle_hypercall_kafl_release(run, cpu, (uint64_t)run->hypercall.args[0]);
ret = 0;
}
else{
#else
debug_fprintf(stderr "Got KVM_EXIT_SHUTDOWN while in fuzzing mode => panic\n",);
handle_hypercall_kafl_panic(run, cpu, (uint64_t)run->hypercall.args[0]);
ret = 0;
#endif
} else{
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
ret = EXCP_INTERRUPT;
}
@ -2684,8 +2689,17 @@ int kvm_cpu_exec(CPUState *cpu)
#ifndef QEMU_NYX
DPRINTF("kvm_arch_handle_exit\n");
#else
printf("kvm_arch_handle_exit => %d\n", run->exit_reason);
assert(false);
#define CONFIG_UNKNOWN_ERROR_IS_PANIC
#ifndef CONFIG_UNKNOWN_ERROR_IS_PANIC
fprintf(stderr, "Unknown exit code (%d) => ABORT\n", run->exit_reason);
ret = kvm_arch_handle_exit(cpu, run);
assert(ret == 0);
#else
debug_fprintf("kvm_arch_handle_exit(%d) => panic\n", run->exit_reason);
ret = kvm_arch_handle_exit(cpu, run);
if (ret != 0)
handle_hypercall_kafl_panic(run, cpu, (uint64_t)run->hypercall.args[0]);
#endif
#endif
ret = kvm_arch_handle_exit(cpu, run);
break;

View File

@ -533,3 +533,20 @@ static void pc_q35_2_4_machine_options(MachineClass *m)
DEFINE_Q35_MACHINE(v2_4, "pc-q35-2.4", NULL,
pc_q35_2_4_machine_options);
#ifdef QEMU_NYX
static void pc_kAFL64_vmx_v1_0_machine_options(MachineClass *m)
{
pc_q35_4_2_machine_options(m);
m->alias = "kAFL64";
//m->is_default = 1;
m->desc = "kAFL64 PC (Q35 + ICH9, 2009)";
}
static void kAFL64_init(MachineState *machine)
{
pc_q35_init(machine);
}
DEFINE_PC_MACHINE(v1, "kAFL64-Q35", kAFL64_init, pc_kAFL64_vmx_v1_0_machine_options);
#endif

View File

@ -479,6 +479,7 @@ static void ich9_lpc_rcba_update(ICH9LPCState *lpc, uint32_t rcba_old)
if (rcba_old & ICH9_LPC_RCBA_EN) {
memory_region_del_subregion(get_system_memory(), &lpc->rcrb_mem);
}
// Nyx snapshot reload fails here if ICH9_LPC_RCBA_EN=1
if (rcba & ICH9_LPC_RCBA_EN) {
memory_region_add_subregion_overlap(get_system_memory(),
rcba & ICH9_LPC_RCBA_BA_MASK,

View File

@ -2807,7 +2807,9 @@ static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
}
/* A wrapper for use as a VMState .get function */
static int virtio_device_get(QEMUFile *f, void *opaque, size_t size,
int virtio_device_get(QEMUFile *f, void *opaque, size_t size,
const VMStateField *field);
int virtio_device_get(QEMUFile *f, void *opaque, size_t size,
const VMStateField *field)
{
VirtIODevice *vdev = VIRTIO_DEVICE(opaque);

View File

@ -177,7 +177,12 @@ typedef struct ICH9LPCState {
#define ICH9_LPC_RCBA 0xf0
#define ICH9_LPC_RCBA_BA_MASK Q35_MASK(32, 31, 14)
#ifdef QEMU_NYX
// Nyx snapshot restore fails on this
#define ICH9_LPC_RCBA_EN 0x0
#else
#define ICH9_LPC_RCBA_EN 0x1
#endif
#define ICH9_LPC_RCBA_DEFAULT 0x0
#define ICH9_LPC_PIC_NUM_PINS 16

View File

@ -7,6 +7,7 @@ synchronization.o \
page_cache.o \
kvm_nested.o \
debug.o \
trace_dump.o \
auxiliary_buffer.o \
mmh3.o \
nested_hypercalls.o \

View File

@ -25,6 +25,7 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
#include <stdbool.h>
#include "nyx/state/state.h"
#include "nyx/debug.h"
#include "nyx/trace_dump.h"
/* experimental feature (currently broken)
* enabled via trace mode
@ -103,7 +104,9 @@ void check_auxiliary_config_buffer(auxilary_buffer_t* auxilary_buffer, auxilary_
#ifdef SUPPORT_COMPILE_TIME_REDQUEEN
GET_GLOBAL_STATE()->pt_trace_mode_force = true;
#endif
redqueen_set_trace_mode(GET_GLOBAL_STATE()->redqueen_state);
GET_GLOBAL_STATE()->trace_mode = true;
redqueen_set_trace_mode();
pt_trace_dump_enable(true);
}
}
else {
@ -112,7 +115,9 @@ void check_auxiliary_config_buffer(auxilary_buffer_t* auxilary_buffer, auxilary_
#ifdef SUPPORT_COMPILE_TIME_REDQUEEN
GET_GLOBAL_STATE()->pt_trace_mode_force = false;
#endif
redqueen_unset_trace_mode(GET_GLOBAL_STATE()->redqueen_state);
GET_GLOBAL_STATE()->trace_mode = false;
redqueen_unset_trace_mode();
pt_trace_dump_enable(false);
}
}
@ -166,6 +171,10 @@ void set_crash_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_crash);
}
void set_asan_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_sanitizer);
}
void set_timeout_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_timeout);
}
@ -225,7 +234,12 @@ void reset_page_not_found_result_buffer(auxilary_buffer_t* auxilary_buffer){
}
void set_success_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8_t success){
//should refactor to let caller directly set the result codes
if (success == 2) {
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_starved);
} else {
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_success);
}
}
void set_payload_buffer_write_reason_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len){

View File

@ -44,6 +44,8 @@ enum nyx_result_codes {
rc_timeout = 3,
rc_input_buffer_write = 4,
rc_aborted = 5,
rc_sanitizer = 6,
rc_starved = 7,
};
typedef struct auxilary_buffer_header_s{
@ -72,7 +74,7 @@ typedef struct auxilary_buffer_config_s{
/* trigger to enable / disable different QEMU-PT modes */
uint8_t redqueen_mode;
uint8_t trace_mode;
uint8_t trace_mode; /* dump decoded edge transitions to file */
uint8_t reload_mode;
uint8_t verbose_level;
@ -149,6 +151,7 @@ void init_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer);
void check_auxiliary_config_buffer(auxilary_buffer_t* auxilary_buffer, auxilary_buffer_config_t* shadow_config);
void set_crash_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer);
void set_asan_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer);
void set_timeout_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer);
void set_reload_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer);
void set_pt_overflow_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer);

View File

@ -56,7 +56,6 @@ void parse_address_file(char* path, size_t* num_addrs, uint64_t** addrs){
int re_fd = 0;
int se_fd = 0;
int trace_fd = 0;
void write_re_result(char* buf){
int unused __attribute__((unused));
@ -65,20 +64,7 @@ void write_re_result(char* buf){
unused = write(re_fd, buf, strlen(buf));
}
void write_trace_result(redqueen_trace_t* trace_state){
//int fd;
int unused __attribute__((unused));
if (!trace_fd)
trace_fd = open(redqueen_workdir.pt_trace_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
redqueen_trace_write_file(trace_state, trace_fd);
//unused = write(trace_fd, buf, strlen(buf));
//close(fd);
}
void fsync_all_traces(void){
if (!trace_fd){
fsync(trace_fd);
}
void fsync_redqueen_files(void){
if (!se_fd){
fsync(se_fd);
}
@ -96,13 +82,6 @@ void write_se_result(char* buf){
//close(fd);
}
void delete_trace_files(void){
int unused __attribute__((unused));
if (!trace_fd)
trace_fd = open(redqueen_workdir.pt_trace_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
unused = ftruncate(trace_fd, 0);
}
void delete_redqueen_files(void){
int unused __attribute__((unused));
if (!re_fd)

View File

@ -1,7 +1,8 @@
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include "redqueen_trace.h"
#pragma once
//doesn't take ownership of path, num_addrs or addrs
void parse_address_file(char* path, size_t* num_addrs, uint64_t** addrs);
@ -12,14 +13,9 @@ void write_re_result(char* buf);
//doesn't take ownership of buf
void write_se_result(char* buf);
//doesn't take ownership of buf
void write_trace_result(redqueen_trace_t* trace_state);
//doesn' take ownership of buf
void write_debug_result(char* buf);
void delete_redqueen_files(void);
void delete_trace_files(void);
void fsync_all_traces(void);
void fsync_redqueen_files(void);

View File

@ -24,6 +24,7 @@ void handle_hypercall_kafl_get_host_config(struct kvm_run *run, CPUState *cpu, u
config.bitmap_size = GET_GLOBAL_STATE()->shared_bitmap_size;
config.ijon_bitmap_size = GET_GLOBAL_STATE()->shared_ijon_bitmap_size;
config.payload_buffer_size = GET_GLOBAL_STATE()->shared_payload_buffer_size;
config.worker_id = GET_GLOBAL_STATE()->worker_id;
write_virtual_memory(vaddr, (uint8_t*)&config, sizeof(host_config_t), cpu);
GET_GLOBAL_STATE()->get_host_config_done = true;

View File

@ -10,7 +10,7 @@ void handle_hypercall_kafl_set_agent_config(struct kvm_run *run, CPUState *cpu,
#define NYX_HOST_MAGIC 0x4878794e
#define NYX_AGENT_MAGIC 0x4178794e
#define NYX_HOST_VERSION 1
#define NYX_HOST_VERSION 2
#define NYX_AGENT_VERSION 1
typedef struct host_config_s{
@ -19,6 +19,7 @@ typedef struct host_config_s{
uint32_t bitmap_size;
uint32_t ijon_bitmap_size;
uint32_t payload_buffer_size;
uint32_t worker_id;
/* more to come */
} __attribute__((packed)) host_config_t;

View File

@ -210,7 +210,7 @@ static void handle_hypercall_get_payload(struct kvm_run *run, CPUState *cpu, uin
CPUX86State *env = &(X86_CPU(cpu))->env;
GET_GLOBAL_STATE()->parent_cr3 = env->cr[3] & 0xFFFFFFFFFFFFF000ULL;
QEMU_PT_PRINTF(CORE_PREFIX, "Payload CR3:\t%lx", (uint64_t)GET_GLOBAL_STATE()->parent_cr3 );
//print_48_paging2(GET_GLOBAL_STATE()->parent_cr3);
//print_48_pagetables(GET_GLOBAL_STATE()->parent_cr3);
if(hypercall_arg&0xFFF){
fprintf(stderr, "[QEMU-Nyx] Error: Payload buffer is not page-aligned! (0x%lx)\n", hypercall_arg);
@ -349,6 +349,13 @@ void handle_hypercall_kafl_release(struct kvm_run *run, CPUState *cpu, uint64_t
if (init_state){
init_state = false;
} else {
//printf(CORE_PREFIX, "Got STARVED notification (num=%llu)\n", run->hypercall.args[0]);
if (run->hypercall.args[0] > 0) {
GET_GLOBAL_STATE()->starved = 1;
} else {
GET_GLOBAL_STATE()->starved = 0;
}
synchronization_disable_pt(cpu);
release_print_once(cpu);
}
@ -435,13 +442,48 @@ static void handle_hypercall_kafl_submit_panic(struct kvm_run *run, CPUState *cp
if(hypercall_enabled){
QEMU_PT_PRINTF(CORE_PREFIX, "Panic address:\t%lx", hypercall_arg);
write_virtual_memory(hypercall_arg, (uint8_t*)PANIC_PAYLOAD, PAYLOAD_BUFFER_SIZE, cpu);
switch (get_current_mem_mode(cpu)){
case mm_32_protected:
case mm_32_paging:
case mm_32_pae:
write_virtual_memory(hypercall_arg, (uint8_t*)PANIC_PAYLOAD_32, PAYLOAD_BUFFER_SIZE_32, cpu);
break;
case mm_64_l4_paging:
case mm_64_l5_paging:
write_virtual_memory(hypercall_arg, (uint8_t*)PANIC_PAYLOAD_64, PAYLOAD_BUFFER_SIZE_64, cpu);
break;
default:
abort();
break;
}
}
}
static void handle_hypercall_kafl_submit_kasan(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
if(hypercall_enabled){
QEMU_PT_PRINTF(CORE_PREFIX, "kASAN address:\t%lx", hypercall_arg);
switch (get_current_mem_mode(cpu)){
case mm_32_protected:
case mm_32_paging:
case mm_32_pae:
write_virtual_memory(hypercall_arg, (uint8_t*)KASAN_PAYLOAD_32, PAYLOAD_BUFFER_SIZE_32, cpu);
break;
case mm_64_l4_paging:
case mm_64_l5_paging:
write_virtual_memory(hypercall_arg, (uint8_t*)KASAN_PAYLOAD_64, PAYLOAD_BUFFER_SIZE_64, cpu);
break;
default:
abort();
break;
}
}
}
//#define PANIC_DEBUG
static void handle_hypercall_kafl_panic(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
void handle_hypercall_kafl_panic(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
static char reason[1024];
if(hypercall_enabled){
#ifdef PANIC_DEBUG
@ -550,6 +592,27 @@ static void handle_hypercall_kafl_panic_extended(struct kvm_run *run, CPUState *
}
}
static void handle_hypercall_kafl_kasan(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
if(hypercall_enabled){
#ifdef PANIC_DEBUG
if(hypercall_arg){
QEMU_PT_PRINTF(CORE_PREFIX, "ASan notification in user mode!");
} else{
QEMU_PT_PRINTF(CORE_PREFIX, "ASan notification in kernel mode!");
}
#endif
if(fast_reload_snapshot_exists(get_fast_reload_snapshot())){
synchronization_lock_asan_found();
//synchronization_stop_vm_kasan(cpu);
} else{
QEMU_PT_PRINTF(CORE_PREFIX, "KASAN detected during initialization of stage 1 or stage 2 loader");
//hypercall_snd_char(KAFL_PROTO_KASAN);
QEMU_PT_PRINTF_DEBUG("Protocol - SEND: KAFL_PROTO_KASAN");
}
}
}
static void handle_hypercall_kafl_lock(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
if(is_called_in_fuzzing_mode("KVM_EXIT_KAFL_LOCK")){
@ -572,7 +635,7 @@ static void handle_hypercall_kafl_printf(struct kvm_run *run, CPUState *cpu, uin
#ifdef DEBUG_HPRINTF
fprintf(stderr, "%s %s\n", __func__, hprintf_buffer);
#endif
set_hprintf_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, hprintf_buffer, strnlen(hprintf_buffer, HPRINTF_SIZE)+1);
set_hprintf_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, hprintf_buffer, strnlen(hprintf_buffer, HPRINTF_SIZE));
synchronization_lock();
}
@ -671,81 +734,97 @@ void pt_set_disable_patches_pending(CPUState *cpu){
GET_GLOBAL_STATE()->patches_disable_pending = true;
}
void pt_enable_rqi_trace(CPUState *cpu){
if (GET_GLOBAL_STATE()->redqueen_state){
redqueen_set_trace_mode(GET_GLOBAL_STATE()->redqueen_state);
}
}
void pt_disable_rqi_trace(CPUState *cpu){
if (GET_GLOBAL_STATE()->redqueen_state){
redqueen_unset_trace_mode(GET_GLOBAL_STATE()->redqueen_state);
return;
}
}
static void handle_hypercall_kafl_dump_file(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
/* TODO: check via aux buffer if we should allow this hypercall during fuzzing */
/*
if(GET_GLOBAL_STATE()->in_fuzzing_mode){
return;
}
*/
char filename[256] = {0};
uint64_t vaddr = hypercall_arg;
static void handle_hypercall_kafl_dump_file(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg)
{
kafl_dump_file_t file_obj;
memset((void*)&file_obj, 0, sizeof(kafl_dump_file_t));
if(read_virtual_memory(vaddr, (uint8_t*)&file_obj, sizeof(kafl_dump_file_t), cpu)){
void* page = malloc(0x1000);
read_virtual_memory(file_obj.file_name_str_ptr, (uint8_t*)&filename, sizeof(char)*256, cpu);
filename[255] = 0;
char* base_name = basename(filename);
char filename[256] = {0};
char* host_path = NULL;
assert(asprintf(&host_path, "%s/dump/%s", GET_GLOBAL_STATE()->workdir_path , base_name) != -1);
//fprintf(stderr, "dumping file %s -> %s (bytes %ld) in append_mode=%d\n", base_name, host_path, file_obj.bytes, file_obj.append);
FILE* f = NULL;
if(file_obj.append){
f = fopen(host_path, "a+");
uint64_t vaddr = hypercall_arg;
memset((void*)&file_obj, 0, sizeof(kafl_dump_file_t));
if (!read_virtual_memory(vaddr, (uint8_t*)&file_obj, sizeof(kafl_dump_file_t), cpu)){
fprintf(stderr, "Failed to read file_obj in %s. Skipping..\n", __func__);
goto err_out1;
}
else{
if (file_obj.file_name_str_ptr != 0) {
if (!read_virtual_memory(file_obj.file_name_str_ptr, (uint8_t*)filename, sizeof(filename)-1, cpu)) {
fprintf(stderr, "Failed to read file_name_str_ptr in %s. Skipping..\n", __func__);
goto err_out1;
}
filename[sizeof(filename)-1] = 0;
}
//fprintf(stderr, "%s: dump %lu fbytes from %s (append=%u)\n",
// __func__, file_obj.bytes, filename, file_obj.append);
// use a tempfile if file_name_ptr == NULL or points to empty string
if (0 == strnlen(filename, sizeof(filename))) {
strncpy(filename, "tmp.XXXXXX", sizeof(filename)-1);
}
char *base_name = basename(filename); // clobbers the filename buffer!
assert(asprintf(&host_path, "%s/dump/%s", GET_GLOBAL_STATE()->workdir_path , base_name) != -1);
// check if base_name is mkstemp() pattern, otherwise write/append to exact name
char *pattern = strstr(base_name, "XXXXXX");
if (pattern) {
unsigned suffix = strlen(pattern) - strlen("XXXXXX");
f = fdopen(mkstemps(host_path, suffix), "w+");
if (file_obj.append) {
fprintf(stderr, "Warning in %s: Writing unique generated file in append mode?\n", __func__);
}
} else {
if (file_obj.append){
f = fopen(host_path, "a+");
} else{
f = fopen(host_path, "w+");
}
}
if (!f) {
fprintf(stderr, "Error in %s(%s): %s\n", host_path, __func__, strerror(errno));
goto err_out1;
}
int32_t bytes = file_obj.bytes;
uint32_t pos = 0;
int32_t bytes = file_obj.bytes;
void* page = malloc(PAGE_SIZE);
uint32_t written = 0;
while(bytes > 0){
QEMU_PT_PRINTF(CORE_PREFIX, "%s: dump %d bytes to %s (append=%u)\n",
__func__, bytes, host_path, file_obj.append);
if(bytes >= 0x1000){
read_virtual_memory(file_obj.data_ptr+pos, (uint8_t*)page, 0x1000, cpu);
fwrite(page, 1, 0x1000, f);
while (bytes > 0) {
if (bytes >= PAGE_SIZE) {
read_virtual_memory(file_obj.data_ptr+pos, (uint8_t*)page, PAGE_SIZE, cpu);
written = fwrite(page, 1, PAGE_SIZE, f);
}
else{
else {
read_virtual_memory(file_obj.data_ptr+pos, (uint8_t*)page, bytes, cpu);
fwrite(page, 1, bytes, f);
written = fwrite(page, 1, bytes, f);
break;
}
bytes -= 0x1000;
pos += 0x1000;
if (!written) {
fprintf(stderr, "Error in %s(%s): %s\n", host_path, __func__, strerror(errno));
goto err_out2;
}
bytes -= written;
pos += written;
}
fclose(f);
free(host_path);
err_out2:
free(page);
}
fclose(f);
err_out1:
free(host_path);
}
static void handle_hypercall_kafl_persist_page_past_snapshot(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
@ -804,7 +883,9 @@ int handle_kafl_hypercall(struct kvm_run *run, CPUState *cpu, uint64_t hypercall
ret = 0;
break;
case KVM_EXIT_KAFL_SUBMIT_KASAN:
nyx_abort((char*)"Deprecated hypercall called (HYPERCALL_SUBMIT_KASAN)...");
//timeout_reload_pending = false;
//fprintf(stderr, "KVM_EXIT_KAFL_SUBMIT_KASAN\n");
handle_hypercall_kafl_submit_kasan(run, cpu, arg);
ret = 0;
break;
case KVM_EXIT_KAFL_PANIC:
@ -814,7 +895,9 @@ int handle_kafl_hypercall(struct kvm_run *run, CPUState *cpu, uint64_t hypercall
ret = 0;
break;
case KVM_EXIT_KAFL_KASAN:
nyx_abort((char*)"Deprecated hypercall called (HYPERCALL_KAFL_KASAN)...");
//timeout_reload_pending = false;
//fprintf(stderr, "KVM_EXIT_KAFL_KASAN\n");
handle_hypercall_kafl_kasan(run, cpu, arg);
ret = 0;
break;
case KVM_EXIT_KAFL_LOCK:

View File

@ -21,7 +21,8 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
#pragma once
#define PAYLOAD_BUFFER_SIZE 26
#define PAYLOAD_BUFFER_SIZE_64 26
#define PAYLOAD_BUFFER_SIZE_32 20
#define KAFL_MODE_64 0
#define KAFL_MODE_32 1
@ -46,18 +47,40 @@ bool check_bitmap_byte(uint32_t value);
* 0f 01 c1 vmcall
* f4 hlt
*/
#define PANIC_PAYLOAD "\xFA\x48\xC7\xC0\x1F\x00\x00\x00\x48\xC7\xC3\x08\x00\x00\x00\x48\xC7\xC1\x00\x00\x00\x00\x0F\x01\xC1\xF4"
#define PANIC_PAYLOAD_64 "\xFA\x48\xC7\xC0\x1F\x00\x00\x00\x48\xC7\xC3\x08\x00\x00\x00\x48\xC7\xC1\x00\x00\x00\x00\x0F\x01\xC1\xF4"
/*
* Panic Notifier Payload (x86-32)
* fa cli
* b8 1f 00 00 00 mov $0x1f,%eax
* bb 08 00 00 00 mov $0x8,%ebx
* b9 00 00 00 00 mov $0x0,%ecx
* 0f 01 c1 vmcall
* f4 hlt
*/
#define PANIC_PAYLOAD_32 "\xFA\xB8\x1F\x00\x00\x00\xBB\x08\x00\x00\x00\xB9\x00\x00\x00\x00\x0F\x01\xC1\xF4"
/*
* KASAN Notifier Payload (x86-64)
* fa cli
* 48 c7 c0 1f 00 00 00 mov rax,0x1f
* 48 c7 c3 08 00 00 00 mov rbx,0x9
* 48 c7 c3 09 00 00 00 mov rbx,0x9
* 48 c7 c1 00 00 00 00 mov rcx,0x0
* 0f 01 c1 vmcall
* f4 hlt
*/
#define KASAN_PAYLOAD "\xFA\x48\xC7\xC0\x1F\x00\x00\x00\x48\xC7\xC3\x09\x00\x00\x00\x48\xC7\xC1\x00\x00\x00\x00\x0F\x01\xC1\xF4"
#define KASAN_PAYLOAD_64 "\xFA\x48\xC7\xC0\x1F\x00\x00\x00\x48\xC7\xC3\x09\x00\x00\x00\x48\xC7\xC1\x00\x00\x00\x00\x0F\x01\xC1\xF4"
/*
* KASAN Notifier Payload (x86-32)
* fa cli
* b8 1f 00 00 00 mov $0x1f,%eax
* bb 09 00 00 00 mov $0x9,%ebx
* b9 00 00 00 00 mov $0x0,%ecx
* 0f 01 c1 vmcall
* f4 hlt
*/
#define KASAN_PAYLOAD_32 "\xFA\xB8\x1F\x00\x00\x00\xBB\x09\x00\x00\x00\xB9\x00\x00\x00\x00\x0F\x01\xC1\xF4"
/*
* printk Notifier Payload (x86-64)
@ -92,8 +115,7 @@ void hypercall_reload(void);
void handle_hypercall_kafl_acquire(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_panic(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_page_dump_bp(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg, uint64_t page);
@ -109,8 +131,6 @@ void pt_enable_rqo(CPUState *cpu);
void pt_disable_rqo(CPUState *cpu);
void pt_enable_rqi(CPUState *cpu);
void pt_disable_rqi(CPUState *cpu);
void pt_enable_rqi_trace(CPUState *cpu);
void pt_disable_rqi_trace(CPUState *cpu);
void pt_set_redqueen_instrumentation_mode(CPUState *cpu, int redqueen_instruction_mode);
void pt_set_redqueen_update_blacklist(CPUState *cpu, bool newval);
void pt_set_enable_patches_pending(CPUState *cpu);

View File

@ -51,6 +51,7 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
#include "nyx/state/state.h"
#include "nyx/sharedir.h"
#include "nyx/helpers.h"
#include "nyx/trace_dump.h"
#include <time.h>
@ -89,6 +90,7 @@ typedef struct nyx_interface_state {
uint32_t input_buffer_size;
bool dump_pt_trace;
bool edge_cb_trace;
bool redqueen;
@ -260,30 +262,6 @@ static bool verify_workdir_state(nyx_interface_state *s, Error **errp){
}
free(tmp);
assert(asprintf(&tmp, "%s/page_cache.lock", workdir) != -1);
if (!file_exits(tmp)){
fprintf(stderr, "%s does not exist...", tmp);
free(tmp);
return false;
}
free(tmp);
assert(asprintf(&tmp, "%s/page_cache.addr", workdir) != -1);
if (!file_exits(tmp)){
fprintf(stderr, "%s does not exist...\n", tmp);
free(tmp);
return false;
}
free(tmp);
assert(asprintf(&tmp, "%s/page_cache.dump", workdir) != -1);
if (!file_exits(tmp)){
fprintf(stderr, "%s does not exist...\n", tmp);
free(tmp);
return false;
}
free(tmp);
assert(asprintf(&tmp, "%s/page_cache", workdir) != -1);
init_page_cache(tmp);
@ -302,10 +280,14 @@ static bool verify_workdir_state(nyx_interface_state *s, Error **errp){
if(s->dump_pt_trace){
assert(asprintf(&tmp, "%s/pt_trace_dump_%d", workdir, id) != -1);
pt_open_pt_trace_file(tmp);
pt_trace_dump_init(tmp);
free(tmp);
}
if(s->edge_cb_trace){
redqueen_trace_init();
}
assert(asprintf(&tmp, "%s/aux_buffer_%d", workdir, id) != -1);
/*
@ -398,6 +380,7 @@ static void nyx_realize(DeviceState *dev, Error **errp){
if(s->cow_primary_size){
set_global_cow_cache_primary_size(s->cow_primary_size);
}
GET_GLOBAL_STATE()->worker_id = s->worker_id;
if (!s->workdir || !verify_workdir_state(s, errp)){
fprintf(stderr, "[QEMU-Nyx] Error: work dir...\n");
@ -449,6 +432,7 @@ static Property nyx_interface_properties[] = {
DEFINE_PROP_UINT32("bitmap_size", nyx_interface_state, bitmap_size, DEFAULT_NYX_BITMAP_SIZE),
DEFINE_PROP_UINT32("input_buffer_size", nyx_interface_state, input_buffer_size, DEFAULT_NYX_BITMAP_SIZE),
DEFINE_PROP_BOOL("dump_pt_trace", nyx_interface_state, dump_pt_trace, false),
DEFINE_PROP_BOOL("edge_cb_trace", nyx_interface_state, edge_cb_trace, false),
DEFINE_PROP_END_OF_LIST(),

View File

@ -41,39 +41,38 @@ static uint64_t get_48_paging_phys_addr(uint64_t cr3, uint64_t addr, bool read_f
#define x86_64_PAGE_SIZE 0x1000
#define x86_64_PAGE_MASK ~(x86_64_PAGE_SIZE - 1)
static void set_mem_mode(CPUState *cpu){
mem_mode_t get_current_mem_mode(CPUState *cpu){
kvm_arch_get_registers(cpu);
X86CPU *cpux86 = X86_CPU(cpu);
CPUX86State *env = &cpux86->env;
if (!(env->cr[0] & CR0_PG_MASK)) {
GET_GLOBAL_STATE()->mem_mode = mm_32_protected;
return;
return mm_32_protected;
}
else{
if (env->cr[4] & CR4_PAE_MASK) {
if (env->hflags & HF_LMA_MASK) {
if (env->cr[4] & CR4_LA57_MASK) {
GET_GLOBAL_STATE()->mem_mode = mm_64_l5_paging;
return;
return mm_64_l5_paging;
} else {
GET_GLOBAL_STATE()->mem_mode = mm_64_l4_paging;
return;
return mm_64_l4_paging;
}
}
else{
GET_GLOBAL_STATE()->mem_mode = mm_32_pae;
return;
return mm_32_pae;
}
}
else {
GET_GLOBAL_STATE()->mem_mode = mm_32_paging;
return;
return mm_32_paging;
}
}
return;
return mm_unkown;
}
static void set_mem_mode(CPUState *cpu){
GET_GLOBAL_STATE()->mem_mode = get_current_mem_mode(cpu);
}
/* Warning: This might break memory handling for hypervisor fuzzing => FIXME LATER */
@ -215,8 +214,14 @@ bool remap_slot(uint64_t addr, uint32_t slot, CPUState *cpu, int fd, uint64_t sh
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
if(!memcmp(block->idstr, "pc.ram", 6)){
/* TODO: put assert calls here */
munmap((void*)(((uint64_t)block->host) + phys_addr_ram_offset), x86_64_PAGE_SIZE);
mmap((void*)(((uint64_t)block->host) + phys_addr_ram_offset), 0x1000, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, (i*x86_64_PAGE_SIZE));
if (munmap((void*)(((uint64_t)block->host) + phys_addr_ram_offset), x86_64_PAGE_SIZE) == -1) {
fprintf(stderr, "%s: munmap failed!\n", __func__);
assert(false);
}
if (mmap((void*)(((uint64_t)block->host) + phys_addr_ram_offset), 0x1000, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, (i*x86_64_PAGE_SIZE)) == MAP_FAILED) {
fprintf(stderr, "%s: mmap failed!\n", __func__);
assert(false);
}
//printf("MMUNMAP: %d\n", munmap((void*)(((uint64_t)block->host) + phys_addr), x86_64_PAGE_SIZE));
//printf("MMAP: %p\n", mmap((void*)(((uint64_t)block->host) + phys_addr), 0x1000, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, (i*x86_64_PAGE_SIZE)));
@ -285,6 +290,7 @@ void resize_shared_memory(uint32_t new_size, uint32_t* shm_size, void** shm_ptr,
bool remap_payload_buffer(uint64_t virt_guest_addr, CPUState *cpu){
assert(GET_GLOBAL_STATE()->shared_payload_buffer_fd && GET_GLOBAL_STATE()->shared_payload_buffer_size);
assert(GET_GLOBAL_STATE()->shared_payload_buffer_size % x86_64_PAGE_SIZE == 0);
RAMBlock *block;
refresh_kvm_non_dirty(cpu);
@ -563,221 +569,240 @@ void remove_all_breakpoints(CPUState *cpu){
}
#define PPAGE_SIZE 0x1000
#define PENTRIES 0x200
#define PLEVEL_4_SHIFT 12
#define PLEVEL_3_SHIFT 21
#define PLEVEL_2_SHIFT 30
#define PLEVEL_1_SHIFT 39
#define SIGN_EXTEND_TRESHOLD 0x100
#define SIGN_EXTEND 0xFFFF000000000000ULL
#define PAGETABLE_MASK 0x1FFFFFFFFF000ULL
#define PML4_ENTRY_MASK 0x1FFFFFFFFF000ULL
#define PML3_ENTRY_MASK 0x1FFFFC0000000ULL
#define PML2_ENTRY_MASK 0x1FFFFFFE00000ULL
#define PPAGE_SIZE 0x1000
#define CHECK_BIT(var,pos) !!(((var) & (1ULL<<(pos))))
static void write_address(uint64_t address, uint64_t size, uint64_t prot){
//fprintf(stderr, "%s %lx %lx %lx\n", __func__, address, size, prot);
static uint64_t next_address = PAGETABLE_MASK;
static uint64_t last_address = 0x0;
static uint64_t last_prot = 0;
if((address != next_address || prot != last_prot)){
/* do not print guard pages or empty pages without any permissions */
if(last_address && prot && (last_address+size != next_address || prot != last_prot)){
fprintf(stderr, "%016lx - %016lx %c%c%c\n",
last_address, next_address,
CHECK_BIT(last_prot, 1) ? 'W' : '-',
CHECK_BIT(last_prot, 2) ? 'U' : 'K',
!CHECK_BIT(last_prot, 63)? 'X' : '-');
static bool read_memory(uint64_t address, uint64_t* buffer, size_t size, bool read_from_snapshot) {
if (unlikely(address == INVALID_ADDRESS)) {
return false;
}
last_address = address;
}
next_address = address+size;
last_prot = prot;
if (unlikely(read_from_snapshot)) {
return read_snapshot_memory(
get_fast_reload_snapshot(),
address, (uint8_t *)buffer, size);
}
// NB: This API exposed by exec.h doesn't signal failure, although it can
// fail. Figure out how to expose the address space object instead and then
// we can actually check the return value here. Until then, will clear the
// buffer contents first.
memset(buffer, 0, size);
cpu_physical_memory_rw(address, (uint8_t*)buffer, size, false);
return true;
}
void print_48_paging2(uint64_t cr3){
uint64_t paging_entries_level_1[PENTRIES];
uint64_t paging_entries_level_2[PENTRIES];
uint64_t paging_entries_level_3[PENTRIES];
uint64_t paging_entries_level_4[PENTRIES];
uint64_t address_identifier_1, address_identifier_2, address_identifier_3, address_identifier_4;
uint32_t i1, i2, i3,i4;
cpu_physical_memory_rw((cr3&PAGETABLE_MASK), (uint8_t *) paging_entries_level_1, PPAGE_SIZE, false);
for(i1 = 0; i1 < 512; i1++){
if(paging_entries_level_1[i1]){
address_identifier_1 = ((uint64_t)i1) << PLEVEL_1_SHIFT;
if (i1 & SIGN_EXTEND_TRESHOLD){
address_identifier_1 |= SIGN_EXTEND;
}
if(CHECK_BIT(paging_entries_level_1[i1], 0)){ /* otherwise swapped out */
cpu_physical_memory_rw((paging_entries_level_1[i1]&PAGETABLE_MASK), (uint8_t *) paging_entries_level_2, PPAGE_SIZE, false);
for(i2 = 0; i2 < PENTRIES; i2++){
if(paging_entries_level_2[i2]){
address_identifier_2 = (((uint64_t)i2) << PLEVEL_2_SHIFT) + address_identifier_1;
if (CHECK_BIT(paging_entries_level_2[i2], 0)){ /* otherwise swapped out */
if((paging_entries_level_2[i2]&PAGETABLE_MASK) == (paging_entries_level_1[i1]&PAGETABLE_MASK)){
/* loop */
continue;
}
if (CHECK_BIT(paging_entries_level_2[i2], 7)){
write_address(address_identifier_2, 0x40000000, (uint64_t)paging_entries_level_2[i2] & ((1ULL<<63) | (1ULL<<2) | (1ULL<<1)));
}
else{
/* otherwise this PDPE references a 1GB page */
cpu_physical_memory_rw((paging_entries_level_2[i2]&PAGETABLE_MASK), (uint8_t *) paging_entries_level_3, PPAGE_SIZE, false);
for(i3 = 0; i3 < PENTRIES; i3++){
if(paging_entries_level_3[i3]){
address_identifier_3 = (((uint64_t)i3) << PLEVEL_3_SHIFT) + address_identifier_2;
if (CHECK_BIT(paging_entries_level_3[i3], 0)){ /* otherwise swapped out */
if (CHECK_BIT(paging_entries_level_3[i3], 7)){
write_address(address_identifier_3, 0x200000, (uint64_t)paging_entries_level_3[i3] & ((1ULL<<63) | (1ULL<<2) | (1ULL<<1)));
}
else{
cpu_physical_memory_rw((paging_entries_level_3[i3]&PAGETABLE_MASK), (uint8_t *) paging_entries_level_4, PPAGE_SIZE, false);
for(i4 = 0; i4 < PENTRIES; i4++){
if(paging_entries_level_4[i4]){
address_identifier_4 = (((uint64_t)i4) << PLEVEL_4_SHIFT) + address_identifier_3;
if (CHECK_BIT(paging_entries_level_4[i4], 0)){
write_address(address_identifier_4, 0x1000, (uint64_t)paging_entries_level_4[i4] & ((1ULL<<63) | (1ULL<<2) | (1ULL<<1)));
}
}
}
}
}
}
}
}
}
}
}
}
}
}
write_address(0, 0x1000, 0);
__attribute__((always_inline)) inline
static bool bit(uint64_t value, uint8_t lsb) {
return (value >> lsb) & 1;
}
static uint64_t* load_page_table(uint64_t page_table_address, uint64_t* paging_entries_buffer, uint8_t level, bool read_from_snapshot, bool *success){
if(page_table_address == INVALID_ADDRESS){
*success = false;
}
if (read_from_snapshot){
*success = read_snapshot_memory(get_fast_reload_snapshot(), page_table_address, (uint8_t *) paging_entries_buffer, PPAGE_SIZE);
}
else{
cpu_physical_memory_rw(page_table_address, (uint8_t *) paging_entries_buffer, PPAGE_SIZE, false);
*success = true; /* fix this */
}
return paging_entries_buffer;
__attribute__((always_inline)) inline
static uint64_t bits(uint64_t value, uint8_t lsb, uint8_t msb) {
return (value & ((0xffffffffffffffffull >> (64 - (msb - lsb + 1))) << lsb)) >> lsb;
}
static uint64_t get_48_paging_phys_addr(uint64_t cr3, uint64_t addr, bool read_from_snapshot){
/* signedness broken af -> fix me! */
uint16_t pml_4_index = (addr & 0xFF8000000000ULL) >> 39;
uint16_t pml_3_index = (addr & 0x0007FC0000000UL) >> 30;
uint16_t pml_2_index = (addr & 0x000003FE00000UL) >> 21;
uint16_t pml_1_index = (addr & 0x00000001FF000UL) >> 12;
uint64_t address_identifier_4;
uint64_t paging_entries_buffer[PENTRIES];
uint64_t* paging_entries_buffer_ptr = NULL;
uint64_t page_table_address = 0;
bool success = false;
page_table_address = (cr3&PAGETABLE_MASK);
paging_entries_buffer_ptr = load_page_table(page_table_address, paging_entries_buffer, 0, read_from_snapshot, &success);
if (unlikely(success == false)){
goto fail;
// Helper function to load an entire pagetable table. These are PENTRIES
// 64-bit entries, so entries must point to a sufficiently large buffer.
static bool load_table(uint64_t address, uint64_t* entries, bool read_from_snapshot) {
if (unlikely(!read_memory(address, entries, 512 * sizeof(*entries), read_from_snapshot))) {
return false;
}
if(paging_entries_buffer_ptr[pml_4_index]){
address_identifier_4 = ((uint64_t)pml_4_index) << PLEVEL_1_SHIFT;
if (pml_4_index & SIGN_EXTEND_TRESHOLD){
address_identifier_4 |= SIGN_EXTEND;
}
if(CHECK_BIT(paging_entries_buffer_ptr[pml_4_index], 0)){ /* otherwise swapped out */
return true;
}
page_table_address = (paging_entries_buffer_ptr[pml_4_index]&PAGETABLE_MASK);
paging_entries_buffer_ptr = load_page_table(page_table_address, paging_entries_buffer, 1, read_from_snapshot, &success);
if (unlikely(success == false)){
goto fail;
// Helper function to load a single pagetable entry. We simplify things by
// returning the same invalid value (0) for both non-present entries and
// any other error conditions, since we don't need to handle these cases
// differently.
static uint64_t load_entry(uint64_t address, uint64_t index,
bool read_from_snapshot) {
uint64_t entry = 0;
if (unlikely(!read_memory(address + (index * sizeof(entry)), &entry, sizeof(entry),
read_from_snapshot))) {
return 0;
}
if(paging_entries_buffer_ptr[pml_3_index]){
if (CHECK_BIT(paging_entries_buffer_ptr[pml_3_index], 0)){ /* otherwise swapped out */
if (CHECK_BIT(paging_entries_buffer_ptr[pml_3_index], 7)){
/* 1GB PAGE */
return (paging_entries_buffer_ptr[pml_3_index] & PML3_ENTRY_MASK) | (0x7FFFFFFF & addr);
}
else{
page_table_address = (paging_entries_buffer_ptr[pml_3_index]&PAGETABLE_MASK);
paging_entries_buffer_ptr = load_page_table(page_table_address, paging_entries_buffer, 2, read_from_snapshot, &success);
if (unlikely(success == false)){
goto fail;
// Check that the entry is present.
if (unlikely(!bit(entry, 0))) {
return 0;
}
if(paging_entries_buffer_ptr[pml_2_index]){
if (CHECK_BIT(paging_entries_buffer_ptr[pml_2_index], 0)){ /* otherwise swapped out */
if (CHECK_BIT(paging_entries_buffer_ptr[pml_2_index], 7)){
/* 2MB PAGE */
return (paging_entries_buffer_ptr[pml_2_index] & PML2_ENTRY_MASK) | (0x3FFFFF & addr);
}
else{
return entry;
}
page_table_address = (paging_entries_buffer_ptr[pml_2_index]&PAGETABLE_MASK);
paging_entries_buffer_ptr = load_page_table(page_table_address, paging_entries_buffer, 3, read_from_snapshot, &success);
static void print_page(uint64_t address, uint64_t entry, size_t size, bool s, bool w, bool x) {
fprintf(stderr, " %c%c%c %016lx %zx",
s ? 's' : 'u', w ? 'w' : 'r', x ? 'x' : '-',
(bits(entry, 12, 51) << 12) & ~(size - 1), size);
}
if (unlikely(success == false)){
goto fail;
static void print_48_pte(uint64_t address, uint64_t pde_entry, bool read_from_snapshot,
bool s, bool w, bool x) {
uint64_t pte_address = bits(pde_entry, 12, 51) << 12;
uint64_t pte_table[PENTRIES];
if (!load_table(pte_address, pte_table, read_from_snapshot)) {
return;
}
if(paging_entries_buffer_ptr[pml_1_index]){
if (CHECK_BIT(paging_entries_buffer_ptr[pml_1_index], 0)){
/* 4 KB PAGE */
return (paging_entries_buffer_ptr[pml_1_index] & PML4_ENTRY_MASK) | (0xFFF & addr);
}
}
}
}
}
}
}
}
}
for (size_t i = 0; i < PENTRIES; ++i) {
uint64_t entry = pte_table[i];
if (entry) {
fprintf(stderr, "\n 1 %016lx [%ld]", address | i << 12, entry);
}
fail:
if (!bit(entry, 0)) {
// Not present.
} else {
print_page(address | i << 12, entry, 0x1000,
s & !bit(entry, 2), w & bit(entry, 1), x & !bit(entry, 63));
}
}
}
static void print_48_pde(uint64_t address, uint64_t pdpte_entry, bool read_from_snapshot,
bool s, bool w, bool x) {
uint64_t pde_address = bits(pdpte_entry, 12, 51) << 12;
uint64_t pde_table[PENTRIES];
if (!load_table(pde_address, pde_table, read_from_snapshot)) {
return;
}
for (size_t i = 0; i < PENTRIES; ++i) {
uint64_t entry = pde_table[i];
if (entry) {
fprintf(stderr, "\n 2 %016lx [%ld]", address | i << 21, entry);
}
if (!bit(entry, 0)) {
// Not present.
} else if (bit(entry, 7)) {
print_page(address | i << 21, entry, 0x200000,
s & !bit(entry, 2), w & bit(entry, 1), x & !bit(entry, 63));
} else {
print_48_pte(address | i << 21, entry, read_from_snapshot,
s & !bit(entry, 2), w & bit(entry, 1), x & !bit(entry, 63));
}
}
}
static void print_48_pdpte(uint64_t address, uint64_t pml4_entry, bool read_from_snapshot,
bool s, bool w, bool x) {
uint64_t pdpte_address = bits(pml4_entry, 12, 51) << 12;
uint64_t pdpte_table[PENTRIES];
if (!load_table(pdpte_address, pdpte_table, read_from_snapshot)) {
return;
}
for (size_t i = 0; i < PENTRIES; ++i) {
uint64_t entry = pdpte_table[i];
if (entry) {
fprintf(stderr, "\n 3 %016lx [%ld]", address | i << 30, entry);
}
if (!bit(entry, 0)) {
// Not present.
} else if (bit(entry, 7)) {
print_page(address | i << 30, entry, 0x40000000,
s & !bit(entry, 2), w & bit(entry, 1), x & !bit(entry, 63));
} else {
print_48_pde(address | i << 30, entry, read_from_snapshot,
s & !bit(entry, 2), w & bit(entry, 1), x & !bit(entry, 63));
}
}
}
static void print_48_pagetables_(uint64_t cr3, bool read_from_snapshot) {
uint64_t pml4_address = bits(cr3, 12, 51) << 12;
uint64_t pml4_table[PENTRIES];
if (!load_table(pml4_address, pml4_table, read_from_snapshot)) {
return;
}
for (size_t i = 0; i < PENTRIES; ++i) {
uint64_t entry = pml4_table[i];
uint64_t address = i << 39;
// Ensure canonical virtual address
if (bit(address, 47)) {
address |= 0xffff000000000000ul;
}
if (entry) {
fprintf(stderr, "\n4 %016lx [%ld]", address, entry);
}
if (bit(entry, 0)) {
print_48_pdpte(address, entry, read_from_snapshot,
!bit(entry, 2), bit(entry, 1), !bit(entry, 63));
}
}
}
void print_48_pagetables(uint64_t cr3) {
static bool printed = false;
if (!printed) {
fprintf(stderr, "pagetables for cr3 %lx", cr3);
print_48_pagetables_(cr3, false);
printed = true;
fprintf(stderr, "\n");
}
}
static uint64_t get_48_paging_phys_addr(uint64_t cr3, uint64_t addr, bool read_from_snapshot) {
uint64_t pml4_address = bits(cr3, 12, 51) << 12;
uint64_t pml4_offset = bits(addr, 39, 47);
uint64_t pml4_entry = load_entry(pml4_address, pml4_offset, read_from_snapshot);
if (unlikely(!pml4_entry)) {
return INVALID_ADDRESS;
}
uint64_t pdpte_address = bits(pml4_entry, 12, 51) << 12;
uint64_t pdpte_offset = bits(addr, 30, 38);
uint64_t pdpte_entry = load_entry(pdpte_address, pdpte_offset, read_from_snapshot);
if (unlikely(!pdpte_entry)) {
return INVALID_ADDRESS;
}
if (unlikely(bit(pdpte_entry, 7))) {
// 1GByte page translation.
uint64_t page_address = bits(pdpte_entry, 12, 51) << 12;
uint64_t page_offset = bits(addr, 0, 29);
return page_address + page_offset;
}
uint64_t pde_address = bits(pdpte_entry, 12, 51) << 12;
uint64_t pde_offset = bits(addr, 21, 29);
uint64_t pde_entry = load_entry(pde_address, pde_offset, read_from_snapshot);
if (unlikely(!pde_entry)) {
return INVALID_ADDRESS;
}
if (unlikely(bit(pde_entry, 7))) {
// 2MByte page translation.
uint64_t page_address = bits(pde_entry, 12, 51) << 12;
uint64_t page_offset = bits(addr, 0, 20);
return page_address + page_offset;
}
uint64_t pte_address = bits(pde_entry, 12, 51) << 12;
uint64_t pte_offset = bits(addr, 12, 20);
uint64_t pte_entry = load_entry(pte_address, pte_offset, read_from_snapshot);
if (unlikely(!pte_entry)) {
return INVALID_ADDRESS;
}
// 4Kbyte page translation.
uint64_t page_address = bits(pte_entry, 12, 51) << 12;
uint64_t page_offset = bits(addr, 0, 11);
return page_address + page_offset;
}
//#define DEBUG_48BIT_WALK

View File

@ -26,6 +26,7 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
#include <linux/kvm.h>
#include "qemu-common.h"
#include "sysemu/kvm_int.h"
#include "nyx/types.h"
#define MEM_SPLIT_START 0x0C0000000
#define MEM_SPLIT_END 0x100000000
@ -34,6 +35,8 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
#define address_to_ram_offset(offset) (offset >= MEM_SPLIT_END ? (offset - MEM_SPLIT_END) + MEM_SPLIT_START : offset)
#define ram_offset_to_address(offset) (offset >= MEM_SPLIT_START ? (offset - MEM_SPLIT_START) + MEM_SPLIT_END : offset)
mem_mode_t get_current_mem_mode(CPUState *cpu);
uint64_t get_paging_phys_addr(CPUState *cpu, uint64_t cr3, uint64_t addr);
bool read_physical_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu);
@ -63,7 +66,7 @@ bool dump_page_cr3_snapshot(uint64_t address, uint8_t* data, CPUState *cpu, uint
bool dump_page_cr3_ht(uint64_t address, uint8_t* data, CPUState *cpu, uint64_t cr3);
bool is_addr_mapped_cr3_snapshot(uint64_t address, CPUState *cpu, uint64_t cr3);
void print_48_paging2(uint64_t cr3);
void print_48_pagetables(uint64_t cr3);
bool dump_page_ht(uint64_t address, uint8_t* data, CPUState *cpu);

View File

@ -25,6 +25,9 @@
#define UNMAPPED_PAGE 0xFFFFFFFFFFFFFFFFULL
static void page_cache_unlock(page_cache_t* self);
static void page_cache_lock(page_cache_t* self);
#ifndef STANDALONE_DECODER
static bool reload_addresses(page_cache_t* self){
#else
@ -40,6 +43,8 @@ bool reload_addresses(page_cache_t* self){
if(self_offset != self->num_pages*PAGE_CACHE_ADDR_LINE_SIZE){
//fprintf(stderr, "Reloading files ...\n");
page_cache_lock(self); // don't read while someone else is writing?
lseek(self->fd_address_file, self->num_pages*PAGE_CACHE_ADDR_LINE_SIZE, SEEK_SET);
offset = self->num_pages;
while(read(self->fd_address_file, &value, PAGE_CACHE_ADDR_LINE_SIZE)){
@ -80,6 +85,8 @@ bool reload_addresses(page_cache_t* self){
self->num_pages = self_offset/PAGE_CACHE_ADDR_LINE_SIZE;
self->page_data = mmap(NULL, (self->num_pages)*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, self->fd_page_file, 0);
page_cache_unlock(self);
return true;
}
@ -359,12 +366,12 @@ page_cache_t* page_cache_new(const char* cache_file, uint8_t disassembler_word_w
self->lookup = kh_init(PC_CACHE);
self->fd_page_file = open(tmp1, O_CLOEXEC | O_RDWR, S_IRWXU);
self->fd_address_file = open(tmp2, O_CLOEXEC | O_RDWR, S_IRWXU);
self->fd_page_file = open(tmp1, O_CLOEXEC | O_CREAT | O_RDWR, 0644);
self->fd_address_file = open(tmp2, O_CLOEXEC | O_CREAT | O_RDWR, 0644);
#ifndef STANDALONE_DECODER
self->cpu = cpu;
self->fd_lock = open(tmp3, O_CLOEXEC);
self->fd_lock = open(tmp3, O_CLOEXEC | O_CREAT, 0644);
assert(self->fd_lock > 0);
#else
if(self->fd_page_file == -1 || self->fd_address_file == -1){
@ -381,7 +388,11 @@ page_cache_t* page_cache_new(const char* cache_file, uint8_t disassembler_word_w
self->last_page = 0xFFFFFFFFFFFFFFFF;
self->last_addr = 0xFFFFFFFFFFFFFFFF;
#ifndef STANDALONE_DECODER
QEMU_PT_PRINTF(PAGE_CACHE_PREFIX, "%s (%s - %s)", __func__, tmp1, tmp2);
#else
QEMU_PT_PRINTF(PAGE_CACHE_PREFIX, "%s (%s - %s) WORD_WIDTH: %d", __func__, tmp1, tmp2, disassembler_word_width);
#endif
free(tmp3);
free(tmp2);

View File

@ -34,36 +34,21 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
#include "nyx/memory_access.h"
#include "nyx/interface.h"
#include "nyx/debug.h"
#include "nyx/file_helper.h"
#ifdef CONFIG_REDQUEEN
#include "nyx/redqueen.h"
#include "nyx/redqueen_patch.h"
#include "nyx/patcher.h"
#endif
#include "nyx/page_cache.h"
#include "nyx/state/state.h"
#include <libxdc.h>
#include "nyx/helpers.h"
#include "nyx/trace_dump.h"
#include "nyx/redqueen_trace.h"
#define PT_BUFFER_MMAP_ADDR 0x3ffff0000000
uint32_t state_byte = 0;
uint32_t last = 0;
int pt_trace_dump_fd = 0;
bool should_dump_pt_trace= false;
void pt_open_pt_trace_file(char* filename){
printf("using pt trace at %s",filename);
pt_trace_dump_fd = open(filename, O_WRONLY);
should_dump_pt_trace = true;
assert(pt_trace_dump_fd >= 0);
}
void pt_trucate_pt_trace_file(void){
if(should_dump_pt_trace){
assert(lseek(pt_trace_dump_fd, 0, SEEK_SET) == 0);
assert(ftruncate(pt_trace_dump_fd, 0)==0);
}
}
static void pt_set(CPUState *cpu, run_on_cpu_data arg){
asm volatile("" ::: "memory");
}
@ -98,12 +83,6 @@ static inline int pt_ioctl(int fd, unsigned long request, unsigned long arg){
return ioctl(fd, request, arg);
}
static inline uint64_t mix_bits(uint64_t v) {
v ^= (v >> 31);
v *= 0x7fb5d329728ea185;
return v;
}
#ifdef DUMP_AND_DEBUG_PT
void dump_pt_trace(void* buffer, int bytes){
static FILE* f = NULL;
@ -131,12 +110,13 @@ void dump_pt_trace(void* buffer, int bytes){
#endif
void pt_dump(CPUState *cpu, int bytes){
if(should_dump_pt_trace){
assert(bytes == write(pt_trace_dump_fd, cpu->pt_mmap, bytes));
}
//pt_write_pt_dump_file(cpu->pt_mmap, bytes);
if(!(GET_GLOBAL_STATE()->redqueen_state && GET_GLOBAL_STATE()->redqueen_state->intercept_mode)){
if (GET_GLOBAL_STATE()->in_fuzzing_mode && GET_GLOBAL_STATE()->decoder_page_fault == false && GET_GLOBAL_STATE()->decoder && !GET_GLOBAL_STATE()->dump_page){
GET_GLOBAL_STATE()->pt_trace_size += bytes;
//dump_pt_trace(cpu->pt_mmap, bytes);
pt_write_pt_dump_file(cpu->pt_mmap, bytes);
decoder_result_t result = libxdc_decode(GET_GLOBAL_STATE()->decoder, cpu->pt_mmap, bytes);
switch(result){
case decoder_success:
@ -150,7 +130,7 @@ void pt_dump(CPUState *cpu, int bytes){
GET_GLOBAL_STATE()->decoder_page_fault_addr = libxdc_get_page_fault_addr(GET_GLOBAL_STATE()->decoder);
break;
case decoder_unkown_packet:
fprintf(stderr, "WARNING: libxdc_decode returned decoder_error\n");
fprintf(stderr, "WARNING: libxdc_decode returned unknown_packet\n");
break;
case decoder_error:
fprintf(stderr, "WARNING: libxdc_decode returned decoder_error\n");
@ -165,8 +145,11 @@ int pt_enable(CPUState *cpu, bool hmp_mode){
if(!fast_reload_set_bitmap(get_fast_reload_snapshot())){
coverage_bitmap_reset();
}
//pt_reset_bitmap();
pt_trucate_pt_trace_file();
if (GET_GLOBAL_STATE()->trace_mode) {
redqueen_trace_reset();
alt_bitmap_reset();
}
pt_truncate_pt_dump_file();
return pt_cmd(cpu, KVM_VMX_PT_ENABLE, hmp_mode);
}
@ -248,6 +231,10 @@ void pt_init_decoder(CPUState *cpu){
GET_GLOBAL_STATE()->decoder = libxdc_init(filters, (void* (*)(void*, uint64_t, bool*))page_cache_fetch2, GET_GLOBAL_STATE()->page_cache, GET_GLOBAL_STATE()->shared_bitmap_ptr, GET_GLOBAL_STATE()->shared_bitmap_size);
libxdc_register_bb_callback(GET_GLOBAL_STATE()->decoder, (void (*)(void*, disassembler_mode_t, uint64_t, uint64_t))redqueen_callback, GET_GLOBAL_STATE()->redqueen_state);
alt_bitmap_init(
GET_GLOBAL_STATE()->shared_bitmap_ptr,
GET_GLOBAL_STATE()->shared_bitmap_size);
}
int pt_disable_ip_filtering(CPUState *cpu, uint8_t addrn, bool hmp_mode){

View File

@ -24,9 +24,6 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
void pt_init_decoder(CPUState *cpu);
void pt_reset_bitmap(void);
void pt_setup_bitmap(void* ptr);
int pt_enable(CPUState *cpu, bool hmp_mode);
int pt_disable(CPUState *cpu, bool hmp_mode);
int pt_enable_ip_filtering(CPUState *cpu, uint8_t addrn, bool redqueen, bool hmp_mode);
@ -39,9 +36,6 @@ void pt_post_kvm_run(CPUState *cpu);
void pt_handle_overflow(CPUState *cpu);
void pt_dump(CPUState *cpu, int bytes);
void pt_bitmap(uint64_t from, uint64_t to);
void pt_open_pt_trace_file(char* filename);
void pt_trucate_pt_trace_file(void);
#endif

View File

@ -49,7 +49,6 @@ redqueen_t* new_rq_state(CPUState *cpu, page_cache_t* page_cache){
res->cpu = cpu;
res->intercept_mode = false;
res->trace_mode = false;
res->singlestep_enabled = false;
res->hooks_applied = 0;
res->page_cache = page_cache;
@ -225,44 +224,6 @@ void redqueen_callback(void* opaque, disassembler_mode_t mode, uint64_t start_ad
}
}
static void redqueen_trace_enabled(redqueen_t* self){
int unused __attribute__((unused));
if(self->trace_mode){
//libxdc_enable_tracing(GET_GLOBAL_STATE()->decoder);
libxdc_enable_tracing(GET_GLOBAL_STATE()->decoder);
libxdc_register_edge_callback(GET_GLOBAL_STATE()->decoder, (void (*)(void*, disassembler_mode_t, uint64_t, uint64_t))&redqueen_trace_register_transition, self->trace_state);
//redqueen_trace_register_transition(self->trace_state, INIT_TRACE_IP, ip);
//last_ip = ip;
}
}
static void redqueen_trace_disabled(redqueen_t* self){
int unused __attribute__((unused));
if(self->trace_mode){
libxdc_disable_tracing(GET_GLOBAL_STATE()->decoder);
//redqueen_trace_register_transition(self->trace_state, last_ip, ip);
//edqueen_trace_register_transition(self->trace_state, ip, INIT_TRACE_IP);
}
}
void redqueen_set_trace_mode(redqueen_t* self){
delete_trace_files();
self->trace_mode = true;
redqueen_trace_enabled(self);
}
void redqueen_unset_trace_mode(redqueen_t* self){
//write_trace_result(self->trace_state);
//redqueen_trace_reset(self->trace_state);
redqueen_trace_disabled(self);
self->trace_mode = false;
}
void destroy_rq_state(redqueen_t* self){
redqueen_trace_free(self->trace_state);
kh_destroy(RQ, self->lookup);

View File

@ -70,7 +70,6 @@ KHASH_MAP_INIT_INT64(RQ, uint32_t)
typedef struct redqueen_s{
khash_t(RQ) *lookup;
bool intercept_mode;
bool trace_mode;
bool singlestep_enabled;
int hooks_applied;
CPUState *cpu;
@ -109,10 +108,6 @@ void enable_rq_intercept_mode(redqueen_t* self);
void disable_rq_intercept_mode(redqueen_t* self);
void redqueen_register_transition(redqueen_t* self, uint64_t ip, uint64_t transition_val);
void redqueen_set_trace_mode(redqueen_t* self);
void redqueen_unset_trace_mode(redqueen_t* self);
void set_se_instruction(redqueen_t* self, uint64_t addr);
void dump_se_registers(redqueen_t* self);

View File

@ -2,7 +2,73 @@
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
#include "redqueen_trace.h"
#include "redqueen.h"
#include "state/state.h"
/* write full trace of edge transitions rather than sorted list? */
//#define KAFL_FULL_TRACES
int trace_fd = 0;
int redqueen_trace_enabled = false;
uint32_t alt_bitmap_size = 0;
uint8_t* alt_bitmap = NULL;
void alt_bitmap_init(void* ptr, uint32_t size)
{
if (redqueen_trace_enabled) {
alt_bitmap = (uint8_t*)ptr;
alt_bitmap_size = size;
}
}
void alt_bitmap_reset(void)
{
if (alt_bitmap) {
memset(alt_bitmap, 0x00, alt_bitmap_size);
}
}
static inline uint64_t mix_bits(uint64_t v) {
v ^= (v >> 31);
v *= 0x7fb5d329728ea185;
return v;
}
/*
* quick+dirty bitmap based on libxdc trace callback
* similar but not itentical to libxdc bitmap.
*/
static void alt_bitmap_add(uint64_t from, uint64_t to)
{
uint64_t transition_value;
if (GET_GLOBAL_STATE()->trace_mode) {
if(alt_bitmap) {
transition_value = mix_bits(to)^(mix_bits(from)>>1);
alt_bitmap[transition_value & (alt_bitmap_size-1)]++;
}
}
}
static int reset_trace_fd(void) {
if (trace_fd)
close(trace_fd);
trace_fd = open(redqueen_workdir.pt_trace_results, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (trace_fd < 0) {
fprintf(stderr, "Failed to initiate trace output: %s\n", strerror(errno));
assert(0);
}
return trace_fd;
}
void redqueen_trace_init(void) {
redqueen_trace_enabled = true;
}
redqueen_trace_t* redqueen_trace_new(void){
redqueen_trace_t* self = malloc(sizeof(redqueen_trace_t));
@ -13,12 +79,6 @@ redqueen_trace_t* redqueen_trace_new(void){
return self;
}
void redqueen_trace_reset(redqueen_trace_t* self){
kh_destroy(RQ_TRACE, self->lookup);
self->lookup = kh_init(RQ_TRACE);
self->num_ordered_transitions = 0;
}
void redqueen_trace_free(redqueen_trace_t* self){
kh_destroy(RQ_TRACE, self->lookup);
free(self->ordered_transitions);
@ -28,6 +88,15 @@ void redqueen_trace_free(redqueen_trace_t* self){
void redqueen_trace_register_transition(redqueen_trace_t* self, disassembler_mode_t mode, uint64_t from, uint64_t to){
khiter_t k;
int ret;
uint64_t exit_ip = 0xffffffffffffffff;
if (from != exit_ip && to != exit_ip)
alt_bitmap_add(from, to);
#ifdef KAFL_FULL_TRACES
assert(trace_fd >= 0);
dprintf(trace_fd, "%lx,%lx\n", from, to);
return;
#endif
uint128_t key = (((uint128_t)from)<<64) | ((uint128_t)to);
k = kh_get(RQ_TRACE, self->lookup, key);
if(k != kh_end(self->lookup)){
@ -41,30 +110,74 @@ void redqueen_trace_register_transition(redqueen_trace_t* self, disassembler_mod
}
}
void redqueen_trace_write_file(redqueen_trace_t* self, int fd){
static void redqueen_trace_write(void){
#ifdef KAFL_FULL_TRACES
return;
#endif
redqueen_trace_t *self = GET_GLOBAL_STATE()->redqueen_state->trace_state;
assert(trace_fd >= 0);
for(size_t i = 0; i < self->num_ordered_transitions; i++){
khiter_t k;
uint128_t key = self->ordered_transitions[i];
k = kh_get(RQ_TRACE, self->lookup, key);
assert(k != kh_end(self->lookup));
dprintf(fd, "%lx,%lx,%lx\n", (uint64_t)(key>>64), (uint64_t)key, kh_value(self->lookup, k) );
dprintf(trace_fd, "%lx,%lx,%lx\n", (uint64_t)(key>>64), (uint64_t)key, kh_value(self->lookup, k) );
}
}
static void redqueen_state_reset(void){
redqueen_trace_t *self = GET_GLOBAL_STATE()->redqueen_state->trace_state;
kh_destroy(RQ_TRACE, self->lookup);
self->lookup = kh_init(RQ_TRACE);
self->num_ordered_transitions = 0;
}
void redqueen_trace_reset(void){
if (redqueen_trace_enabled) {
redqueen_state_reset();
reset_trace_fd();
}
}
void redqueen_trace_flush(void){
if (redqueen_trace_enabled) {
redqueen_trace_write();
if (trace_fd)
fsync(trace_fd);
}
}
void redqueen_set_trace_mode(void){
if (redqueen_trace_enabled) {
libxdc_enable_tracing(GET_GLOBAL_STATE()->decoder);
libxdc_register_edge_callback(GET_GLOBAL_STATE()->decoder,
(void (*)(void*, disassembler_mode_t, uint64_t, uint64_t))&redqueen_trace_register_transition,
GET_GLOBAL_STATE()->redqueen_state->trace_state);
}
}
void redqueen_unset_trace_mode(void){
if (redqueen_trace_enabled) {
libxdc_disable_tracing(GET_GLOBAL_STATE()->decoder);
}
}
#ifdef DEBUG_MAIN
int main(int argc, char** argv){
redqueen_trace_t* rq_obj = redqueen_trace_new();
reset_trace_fd();
for (uint64_t j = 0; j < 0x5; j++){
redqueen_trace_register_transition(rq_obj, 0xBADF, 0xC0FFEE);
redqueen_trace_register_transition(rq_obj, 0xBADBEEF, 0xC0FFEE);
for (uint64_t i = 0; i < 0x10000; i++){
redqueen_trace_register_transition(rq_obj, 0xBADBEEF, 0xC0FFEE);
}
redqueen_trace_write_file(rq_obj, STDOUT_FILENO);
redqueen_trace_reset(rq_obj);
redqueen_trace_write(rq_obj, STDOUT_FILENO);
redqueen_trace_reset();
}
redqueen_trace_free(rq_obj);

View File

@ -1,3 +1,10 @@
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <stdbool.h>
#include "qemu/osdep.h"
#pragma once
#include "khash.h"
#include <libxdc.h>
@ -36,8 +43,17 @@ typedef struct redqueen_trace_s{
uint128_t* ordered_transitions;
} redqueen_trace_t;
/* libxdc outputs no bitmap in trace mode */
void alt_bitmap_reset(void);
void alt_bitmap_init(void* ptr, uint32_t size);
redqueen_trace_t* redqueen_trace_new(void);
void redqueen_trace_reset(redqueen_trace_t* self);
void redqueen_trace_free(redqueen_trace_t* self);
void redqueen_trace_register_transition(redqueen_trace_t* self, disassembler_mode_t mode, uint64_t from, uint64_t to);
void redqueen_trace_write_file(redqueen_trace_t* self, int fd);
void redqueen_trace_init(void);
void redqueen_set_trace_mode(void);
void redqueen_unset_trace_mode(void);
void redqueen_trace_flush(void);
void redqueen_trace_reset(void);

View File

@ -42,19 +42,19 @@ static size_t get_file_size(const char* file){
static char* sharedir_scan(sharedir_t* self, const char* file){
char* path = NULL;
assert(asprintf(&path, "%s/%s", self->dir, file) != -1);
/*
* Agent is not under our control, but lets roughly constrain
* it to anything stored in or linked from sharedir
*/
chdir(self->dir);
char* real_path = realpath(file, NULL);
char* real_path = realpath(path, NULL);
free(path);
if(real_path && !strncmp(self->dir, real_path, strlen(self->dir)) && file_exits(real_path)){
if (file[0] != '/' && !strstr(file, "/../") &&
real_path && file_exits(real_path)) {
return real_path;
}
if(real_path){
free(real_path);
}
return NULL;
}
@ -115,15 +115,15 @@ static FILE* get_file_ptr(sharedir_t* self, sharedir_file_t* obj){
if(obj == self->last_file_obj_ptr && self->last_file_f){
return self->last_file_f;
}
else{
if(self->last_file_f){
fclose(self->last_file_f);
}
FILE* f = fopen(obj->path, "r");
self->last_file_f = f;
self->last_file_obj_ptr = obj;
return f;
}
}
uint64_t sharedir_request_file(sharedir_t* self, const char* file, uint8_t* page_buffer){
@ -167,6 +167,7 @@ uint64_t sharedir_request_file(sharedir_t* self, const char* file, uint8_t* page
}
}
else{
fprintf(stderr, "WARNING: No such file in sharedir: %s\n", file);
return 0xFFFFFFFFFFFFFFFFUL;
}
}

View File

@ -376,6 +376,12 @@ nyx_device_state_t* nyx_device_state_init_from_snapshot(const char* snapshot_fol
return self;
}
/*
* This is where QemuFile is created for later fast_snapshot creation
* we use fast_qemu_savevm_state() to create a regular snapshot to QEMUFile
* backed by RAM. state_reallocation_new() then uses this file to build an
* optimized sequence of snapshot restore operations.
*/
nyx_device_state_t* nyx_device_state_init(void){
nyx_device_state_t* self = malloc(sizeof(nyx_device_state_t));

View File

@ -283,13 +283,45 @@ static void add_post_fptr(state_reallocation_t* self, void* fptr, uint32_t versi
extern void fast_get_pci_config_device(void* data, size_t size, void* opaque);
void fast_get_pci_irq_state(void* data, size_t size, void* opaque);
//void fast_virtio_device_get(void* data, size_t size, void* opaque);
int virtio_device_get(QEMUFile *f, void *opaque, size_t size, const VMStateField *field);
static int fast_loadvm_fclose(void *opaque){
return 0;
}
static ssize_t fast_loadvm_get_buffer(void *opaque, uint8_t *buf, int64_t pos, size_t size){
assert(pos < ((struct fast_savevm_opaque_t*)(opaque))->buflen);
memcpy(buf, (void*)(((struct fast_savevm_opaque_t*)(opaque))->buf + pos), size);
return size;
}
static const QEMUFileOps fast_loadvm_ops = {
.get_buffer = (QEMUFileGetBufferFunc*)fast_loadvm_get_buffer,
.close = (QEMUFileCloseFunc*)fast_loadvm_fclose
};
/* use opaque data to bootstrap virtio restore from QEMUFile */
static void fast_virtio_device_get(void* data, size_t size, void* opaque)
{
struct fast_savevm_opaque_t fast_loadvm_opaque = {
.buf = data,
.buflen = size,
.f = NULL,
.pos = 0,
};
QEMUFile* f = qemu_fopen_ops(&fast_loadvm_opaque, &fast_loadvm_ops);
virtio_device_get(f, opaque, size, NULL);
}
static void add_get(state_reallocation_t* self, void* fptr, void* opaque, size_t size, void* field, QEMUFile* f, const char* name){
if(!self){
return;
}
void (*handler)(void* , size_t, void*) = NULL;
void* data = NULL;
uint8_t* data = NULL;
if(!strcmp(name, "timer")){
debug_fprintf(stderr, "SKPPING: %ld\n", size*-1);
@ -315,13 +347,21 @@ static void add_get(state_reallocation_t* self, void* fptr, void* opaque, size_t
data = malloc(sizeof(uint8_t)*size);
qemu_get_buffer(f, (uint8_t*)data, size);
}
else if(!strcmp(name, "virtio")){
fprintf(stderr, "WARNING: ATTEMPTING FAST GET for %s\n", name);
qemu_file_skip(f, size * -1);
handler = fast_virtio_device_get;
data = malloc(sizeof(uint8_t)*size);
qemu_get_buffer(f, (uint8_t*)data, size);
}
else{
fprintf(stderr, "WARNING: NOT IMPLEMENTED FAST GET ROUTINE for %s\n", name);
abort();
return;
}
// will be called by pre-/post-save or pre-post-load?
// will be processed by fdl_fast_reload()
self->get_fptr[self->fast_state_get_fptr_pos] = handler;
self->get_opaque[self->fast_state_get_fptr_pos] = opaque;
self->get_size[self->fast_state_get_fptr_pos] = size;
@ -481,19 +521,21 @@ static inline int get_handler(state_reallocation_t* self, QEMUFile* f, void* cur
add_get(self, (void*) field->info->get, curr_elem, size, (void*) field, f, field->info->name);
}
else if(!strcmp(field->info->name, "pci config")){
//fprintf(stderr, "type: %s (size: %x)\n", field->info->name, size);
//fprintf(stderr, "type: %s (size: %lx)\n", field->info->name, size);
add_get(self, (void*) field->info->get, curr_elem, size, (void*) field, f, field->info->name);
}
else if(!strcmp(field->info->name, "pci irq state")){
//fprintf(stderr, "type: %s (size: %x)\n", field->info->name, size);
//fprintf(stderr, "type: %s (size: %lx)\n", field->info->name, size);
add_get(self, (void*) field->info->get, curr_elem, size, (void*) field, f, field->info->name);
}
else if(!strcmp(field->info->name, "virtio")){
fprintf(stderr, "type: %s (size: %lx)\n", field->info->name, size);
abort(); /* not yet implemented */
add_get(self, (void*) field->info->get, curr_elem, size, (void*) field, f, field->info->name);
//fprintf(stderr, "[QEMU-PT] %s: WARNING no handler for %s, type %s, size %lx!\n",
// __func__, vmsd_name, field->info->name, size);
}
else{
fprintf(stderr, "FAIL field->info->name: %s\n", field->info->name);
fprintf(stderr, "[QEMU-PT] %s: WARNING no handler for %s, type %s, size %lx!\n",
__func__, vmsd_name, field->info->name, size);
assert(0);
}
@ -918,6 +960,7 @@ state_reallocation_t* state_reallocation_new(QEMUFile *f){
self->tmp_snapshot.enabled = false;
self->tmp_snapshot.fast_state_size = 0;
// actually enumerate the devices here
fdl_enumerate_global_states(self, f);
self->tmp_snapshot.copy = malloc(sizeof(void*) * self->fast_state_pos);

View File

@ -50,6 +50,7 @@ struct QEMUFile_tmp {
struct fast_savevm_opaque_t{
FILE* f;
uint8_t* buf;
size_t buflen;
uint64_t pos;
void* output_buffer;
uint32_t* output_buffer_size;

View File

@ -45,6 +45,7 @@ void state_init_global(void){
global_state.nyx_fdl = false;
global_state.workdir_path = NULL;
global_state.worker_id = 0xffff;
global_state.fast_reload_enabled = false;
global_state.fast_reload_mode = false;
@ -89,6 +90,8 @@ void state_init_global(void){
global_state.in_fuzzing_mode = false;
global_state.in_reload_mode = true;
global_state.starved = false;
global_state.trace_mode = false;
global_state.shutdown_requested = false;
global_state.cow_cache_full = false;

View File

@ -29,26 +29,19 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
#include "nyx/auxiliary_buffer.h"
#include "nyx/sharedir.h"
#include "nyx/fast_vm_reload_sync.h"
#include "nyx/types.h"
#include <libxdc.h>
#define INTEL_PT_MAX_RANGES 4
enum mem_mode {
mm_unkown,
mm_32_protected, /* 32 Bit / No MMU */
mm_32_paging, /* 32 Bit / L3 Paging */
mm_32_pae, /* 32 Bit / PAE Paging */
mm_64_l4_paging, /* 64 Bit / L4 Paging */
mm_64_l5_paging, /* 32 Bit / L5 Paging */
};
typedef struct qemu_nyx_state_s{
/* set if FDL backend is used (required to perform some additional runtime tests) */
bool nyx_fdl;
char* workdir_path;
uint32_t worker_id;
/* FAST VM RELOAD */
bool fast_reload_enabled;
@ -116,7 +109,7 @@ typedef struct qemu_nyx_state_s{
uint64_t* nested_payload_pages;
bool protect_payload_buffer;
bool discard_tmp_snapshot;
uint8_t mem_mode;
mem_mode_t mem_mode;
uint32_t input_buffer_size;
@ -131,6 +124,8 @@ typedef struct qemu_nyx_state_s{
bool in_fuzzing_mode;
bool in_reload_mode;
bool starved;
bool trace_mode;
bool shutdown_requested;
bool cow_cache_full;

View File

@ -277,12 +277,12 @@ void synchronization_lock(void){
//last_timeout = false;
if(unlikely(GET_GLOBAL_STATE()->in_redqueen_reload_mode || GET_GLOBAL_STATE()->redqueen_state->trace_mode)){
if(GET_GLOBAL_STATE()->redqueen_state->trace_mode){
write_trace_result(GET_GLOBAL_STATE()->redqueen_state->trace_state);
redqueen_trace_reset(GET_GLOBAL_STATE()->redqueen_state->trace_state);
if(unlikely(GET_GLOBAL_STATE()->in_redqueen_reload_mode)) {
fsync_redqueen_files();
}
fsync_all_traces();
if (unlikely(GET_GLOBAL_STATE()->trace_mode)) {
redqueen_trace_flush();
}
interface_send_char(NYX_INTERFACE_PING);
@ -291,6 +291,11 @@ void synchronization_lock(void){
pthread_mutex_unlock(&synchronization_lock_mutex);
check_auxiliary_config_buffer(GET_GLOBAL_STATE()->auxilary_buffer, &GET_GLOBAL_STATE()->shadow_config);
//set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 1);
if (GET_GLOBAL_STATE()->starved == true)
set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 2);
else
set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 1);
GET_GLOBAL_STATE()->pt_trace_size = 0;
@ -335,6 +340,25 @@ void synchronization_lock_crash_found(void){
in_fuzzing_loop = false;
}
void synchronization_lock_asan_found(void){
if(!in_fuzzing_loop){
fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(), run_counter, __func__);
set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 0);
}
pt_disable(qemu_get_cpu(0), false);
handle_tmp_snapshot_state();
set_asan_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer);
perform_reload();
//synchronization_lock();
in_fuzzing_loop = false;
}
void synchronization_lock_timeout_found(void){
//fprintf(stderr, "<%d>\t%s\n", getpid(), __func__);

View File

@ -37,6 +37,7 @@ void synchronization_lock_hprintf(void);
void synchronization_lock(void);
void synchronization_lock_crash_found(void);
void synchronization_lock_asan_found(void);
void synchronization_lock_timeout_found(void);
void synchronization_lock_shutdown_detected(void);
void synchronization_cow_full_detected(void);

66
nyx/trace_dump.c Normal file
View File

@ -0,0 +1,66 @@
#include <stdint.h>
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
#include "state/state.h"
#include "trace_dump.h"
/* dump PT trace as returned from HW */
char *pt_trace_dump_filename;
bool pt_dump_initialized = false;
bool pt_dump_enabled = false;
void pt_trace_dump_enable(bool enable){
if (pt_dump_initialized)
pt_dump_enabled = enable;
}
void pt_trace_dump_init(char* filename)
{
int test_fd;
//fprintf(stderr, "Enable pt trace dump at %s", filename);
pt_dump_initialized = true;
test_fd = open(filename, O_CREAT|O_TRUNC|O_WRONLY, 0644);
if (test_fd < 0)
fprintf(stderr, "Error accessing pt_dump output path %s: %s", pt_trace_dump_filename, strerror(errno));
assert(test_fd >= 0);
pt_trace_dump_filename = strdup(filename);
assert(pt_trace_dump_filename);
}
void pt_truncate_pt_dump_file(void) {
int fd;
if (!pt_dump_enabled)
return;
fd = open(pt_trace_dump_filename, O_CREAT|O_TRUNC|O_WRONLY, 0644);
if (fd < 0) {
fprintf(stderr, "Error truncating %s: %s\n", pt_trace_dump_filename, strerror(errno));
assert(0);
}
close(fd);
}
void pt_write_pt_dump_file(uint8_t *data, size_t bytes)
{
int fd;
if (!pt_dump_enabled)
return;
fd = open(pt_trace_dump_filename, O_APPEND|O_WRONLY, 0644);
//fd = open(pt_trace_dump_filename, O_CREAT|O_TRUNC|O_WRONLY, 0644);
if (fd < 0) {
fprintf(stderr, "Error writing pt_trace_dump to %s: %s\n", pt_trace_dump_filename, strerror(errno));
assert(0);
}
assert(bytes == write(fd, data, bytes));
close(fd);
}

6
nyx/trace_dump.h Normal file
View File

@ -0,0 +1,6 @@
#pragma once
void pt_trace_dump_init(char* filename);
void pt_trace_dump_enable(bool enable);
void pt_write_pt_dump_file(uint8_t *data, size_t bytes);
void pt_truncate_pt_dump_file(void);

12
nyx/types.h Normal file
View File

@ -0,0 +1,12 @@
#pragma once
enum mem_mode {
mm_unkown,
mm_32_protected, /* 32 Bit / No MMU */
mm_32_paging, /* 32 Bit / L3 Paging */
mm_32_pae, /* 32 Bit / PAE Paging */
mm_64_l4_paging, /* 64 Bit / L4 Paging */
mm_64_l5_paging, /* 32 Bit / L5 Paging */
};
typedef uint8_t mem_mode_t;

7
vl.c
View File

@ -2928,6 +2928,7 @@ int main(int argc, char **argv, char **envp)
#ifdef QEMU_NYX
bool fast_vm_reload = false;
state_init_global();
char *fast_vm_reload_opt_arg = NULL;
#endif
int i;
@ -3083,9 +3084,11 @@ int main(int argc, char **argv, char **envp)
#ifdef QEMU_NYX
case QEMU_OPTION_fast_vm_reload:
opts = qemu_opts_parse_noisily(qemu_find_opts("fast_vm_reload-opts"),
optarg, true); if (!opts) {
optarg, true);
if (!opts) {
exit(1);
}
fast_vm_reload_opt_arg = optarg;
fast_vm_reload = true;
break;
#endif
@ -4569,7 +4572,7 @@ int main(int argc, char **argv, char **envp)
exit(1);
}
QemuOpts *opts = qemu_opts_parse_noisily(qemu_find_opts("fast_vm_reload-opts"), optarg, true);
QemuOpts *opts = qemu_opts_parse_noisily(qemu_find_opts("fast_vm_reload-opts"), fast_vm_reload_opt_arg, true);
const char* snapshot_path = qemu_opt_get(opts, "path");
const char* pre_snapshot_path = qemu_opt_get(opts, "pre_path");