auto-apply clang-format

- including vl.c & kvm-all.c
This commit is contained in:
Sergej Schumilo 2022-10-16 23:34:23 +02:00
parent 976d8e8329
commit 8a88edc2a1
74 changed files with 9616 additions and 7849 deletions

View File

@ -48,15 +48,15 @@
#ifdef QEMU_NYX
// clang-format on
#include "nyx/pt.h"
#include "nyx/hypercall/hypercall.h"
#include "nyx/synchronization.h"
#include "nyx/debug.h"
#include "nyx/state/state.h"
#include "nyx/interface.h"
#include "nyx/fast_vm_reload_sync.h"
#include "nyx/snapshot/memory/backend/nyx_dirty_ring.h"
#include "nyx/helpers.h"
#include "nyx/hypercall/hypercall.h"
#include "nyx/interface.h"
#include "nyx/pt.h"
#include "nyx/snapshot/memory/backend/nyx_dirty_ring.h"
#include "nyx/state/state.h"
#include "nyx/synchronization.h"
// clang-format off
#endif
@ -93,7 +93,7 @@ struct KVMState
AccelState parent_obj;
#ifdef QEMU_NYX
// clang-format on
// clang-format on
bool nyx_no_pt_mode;
// clang-format off
#endif
@ -387,12 +387,14 @@ static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
#ifdef QEMU_NYX
// clang-format on
int kvm_get_vm_fd(KVMState *s){
int kvm_get_vm_fd(KVMState *s)
{
return s->vmfd;
}
KVMMemoryListener* kvm_get_kml(int as_id){
return kvm_state->as[as_id].ml;
KVMMemoryListener *kvm_get_kml(int as_id)
{
return kvm_state->as[as_id].ml;
}
// clang-format off
#endif
@ -416,9 +418,9 @@ int kvm_init_vcpu(CPUState *cpu)
cpu->vcpu_dirty = true;
#ifdef QEMU_NYX
// clang-format on
if(s->nyx_no_pt_mode){
if(!getenv("NYX_DISABLE_DIRTY_RING")){
// clang-format on
if (s->nyx_no_pt_mode) {
if (!getenv("NYX_DISABLE_DIRTY_RING")) {
nyx_dirty_ring_pre_init(cpu->kvm_fd, s->vmfd);
}
}
@ -450,7 +452,7 @@ int kvm_init_vcpu(CPUState *cpu)
ret = kvm_arch_init_vcpu(cpu);
#ifdef QEMU_NYX
// clang-format on
// clang-format on
unblock_signals();
// clang-format off
#endif
@ -1928,28 +1930,34 @@ static int kvm_init(MachineState *ms)
goto err;
}
#ifdef QEMU_NYX
// clang-format on
if (ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_PT) != 1 && ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_FDL) != 1) {
// clang-format on
if (ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_PT) != 1 &&
ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_FDL) != 1)
{
/* fallback -> use vanilla KVM module instead (no Intel-PT tracing or nested hypercalls at this point) */
fprintf(stderr, "[QEMU-Nyx] Could not access KVM-PT kernel module!\n[QEMU-Nyx] Trying vanilla KVM...\n");
fprintf(stderr, "[QEMU-Nyx] Could not access KVM-PT kernel "
"module!\n[QEMU-Nyx] Trying vanilla KVM...\n");
if (s->fd == -1) {
fprintf(stderr, "[QEMU-Nyx] Error: NYX fallback failed: Could not access vanilla KVM module!\n");
fprintf(stderr, "[QEMU-Nyx] Error: NYX fallback failed: Could not "
"access vanilla KVM module!\n");
ret = -errno;
goto err;
}
int ret_val = ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_DIRTY_LOG_RING);
if(ret_val == -1 || ret_val == 0){
fprintf(stderr, "[QEMU-Nyx] Error: NYX requires support for KVM_CAP_DIRTY_LOG_RING in fallback mode!\n");
if (ret_val == -1 || ret_val == 0) {
fprintf(stderr, "[QEMU-Nyx] Error: NYX requires support for "
"KVM_CAP_DIRTY_LOG_RING in fallback mode!\n");
ret = -errno;
goto err;
}
/* check for vmware_backdoor support */
int fd = open("/sys/module/kvm/parameters/enable_vmware_backdoor", O_RDONLY);
if(fd == -1){
fprintf(stderr, "ERROR: /sys/module/kvm/parameters/enable_vmware_backdoor file not found...\n");
if (fd == -1) {
fprintf(stderr,
"ERROR: /sys/module/kvm/parameters/enable_vmware_backdoor file "
"not found...\n");
ret = -errno;
goto err;
}
@ -1958,28 +1966,32 @@ static int kvm_init(MachineState *ms)
assert(read(fd, &vmware_backdoor_option, 1) == 1);
close(fd);
if(vmware_backdoor_option == 'N'){
fprintf(stderr, "\n[QEMU-Nyx] ERROR: vmware backdoor is not enabled...\n");
if (vmware_backdoor_option == 'N') {
fprintf(stderr,
"\n[QEMU-Nyx] ERROR: vmware backdoor is not enabled...\n");
fprintf(stderr, "\n\tRun the following commands to fix the issue:\n");
fprintf(stderr, "\t-----------------------------------------\n");
fprintf(stderr, "\tsudo modprobe -r kvm-intel\n");
fprintf(stderr, "\tsudo modprobe -r kvm\n");
fprintf(stderr, "\tsudo modprobe kvm enable_vmware_backdoor=y\n");
fprintf(stderr, "\tsudo modprobe kvm-intel\n");
fprintf(stderr, "\tcat /sys/module/kvm/parameters/enable_vmware_backdoor\n");
fprintf(stderr,
"\tcat /sys/module/kvm/parameters/enable_vmware_backdoor\n");
fprintf(stderr, "\t-----------------------------------------\n\n");
ret = -errno;
goto err;
}
fprintf(stderr, "[QEMU-Nyx] NYX runs in fallback mode (no Intel-PT tracing or nested hypercall support)!\n");
s->nyx_no_pt_mode = true;
fprintf(stderr, "[QEMU-Nyx] NYX runs in fallback mode (no Intel-PT tracing "
"or nested hypercall support)!\n");
s->nyx_no_pt_mode = true;
GET_GLOBAL_STATE()->nyx_fdl = false;
GET_GLOBAL_STATE()->pt_trace_mode = false; // Intel PT is not available in this mode
fast_reload_set_mode(get_fast_reload_snapshot(), RELOAD_MEMORY_MODE_DIRTY_RING);
}
else{
s->nyx_no_pt_mode = false;
GET_GLOBAL_STATE()->pt_trace_mode =
false; // Intel PT is not available in this mode
fast_reload_set_mode(get_fast_reload_snapshot(),
RELOAD_MEMORY_MODE_DIRTY_RING);
} else {
s->nyx_no_pt_mode = false;
GET_GLOBAL_STATE()->nyx_fdl = true;
fast_reload_set_mode(get_fast_reload_snapshot(), RELOAD_MEMORY_MODE_FDL);
}
@ -2050,13 +2062,16 @@ static int kvm_init(MachineState *ms)
s->vmfd = ret;
#ifdef QEMU_NYX
// clang-format on
if(s->nyx_no_pt_mode){
if(getenv("NYX_DISABLE_DIRTY_RING")){
fprintf(stderr, "WARNING: Nyx has disabled KVM's dirty-ring (required to enable full VGA support during pre-snapshot creation procedure)\n");
fast_reload_set_mode(get_fast_reload_snapshot(), RELOAD_MEMORY_MODE_DEBUG_QUIET); /* required to create snapshot */
}
else{
// clang-format on
if (s->nyx_no_pt_mode) {
if (getenv("NYX_DISABLE_DIRTY_RING")) {
fprintf(stderr,
"WARNING: Nyx has disabled KVM's dirty-ring (required to enable "
"full VGA support during pre-snapshot creation procedure)\n");
fast_reload_set_mode(
get_fast_reload_snapshot(),
RELOAD_MEMORY_MODE_DEBUG_QUIET); /* required to create snapshot */
} else {
nyx_dirty_ring_early_init(s->fd, s->vmfd);
}
}
@ -2105,11 +2120,10 @@ static int kvm_init(MachineState *ms)
#ifndef QEMU_NYX
ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, 1);
#else
// clang-format on
if(s->nyx_no_pt_mode){
// clang-format on
if (s->nyx_no_pt_mode) {
ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, 1);
}
else{
} else {
ret = 0;
}
// clang-format off
@ -2405,13 +2419,14 @@ static void kvm_eat_signals(CPUState *cpu)
#ifdef QEMU_NYX
// clang-format on
static int handle_vmware_hypercall(struct kvm_run *run, CPUState *cpu){
kvm_arch_get_registers_fast(cpu);
static int handle_vmware_hypercall(struct kvm_run *run, CPUState *cpu)
{
kvm_arch_get_registers_fast(cpu);
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
return handle_kafl_hypercall(run, cpu, env->regs[R_EBX]+100, env->regs[R_ECX]);
return handle_kafl_hypercall(run, cpu, env->regs[R_EBX] + 100, env->regs[R_ECX]);
}
// clang-format off
#endif
@ -2432,10 +2447,10 @@ int kvm_cpu_exec(CPUState *cpu)
cpu_exec_start(cpu);
#ifdef QEMU_NYX
// clang-format on
// clang-format on
static bool timeout_reload_pending = false;
if(timeout_reload_pending){
synchronization_lock_timeout_found();
if (timeout_reload_pending) {
synchronization_lock_timeout_found();
}
timeout_reload_pending = false;
// clang-format off
@ -2462,8 +2477,8 @@ int kvm_cpu_exec(CPUState *cpu)
}
#ifdef QEMU_NYX
// clang-format on
if(!kvm_state->nyx_no_pt_mode){
// clang-format on
if (!kvm_state->nyx_no_pt_mode) {
pt_pre_kvm_run(cpu);
}
// clang-format off
@ -2475,7 +2490,7 @@ int kvm_cpu_exec(CPUState *cpu)
smp_rmb();
#ifdef QEMU_NYX
// clang-format on
// clang-format on
arm_sigprof_timer(&GET_GLOBAL_STATE()->timeout_detector);
// clang-format off
#endif
@ -2483,8 +2498,8 @@ int kvm_cpu_exec(CPUState *cpu)
run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
#ifdef QEMU_NYX
// clang-format on
if (disarm_sigprof_timer(&GET_GLOBAL_STATE()->timeout_detector)){
// clang-format on
if (disarm_sigprof_timer(&GET_GLOBAL_STATE()->timeout_detector)) {
timeout_reload_pending = true;
}
// clang-format off
@ -2493,10 +2508,10 @@ int kvm_cpu_exec(CPUState *cpu)
attrs = kvm_arch_post_run(cpu, run);
#ifdef QEMU_NYX
// clang-format on
if(!kvm_state->nyx_no_pt_mode){
pt_post_kvm_run(cpu);
}
// clang-format on
if (!kvm_state->nyx_no_pt_mode) {
pt_post_kvm_run(cpu);
}
// clang-format off
#endif
@ -2522,24 +2537,25 @@ int kvm_cpu_exec(CPUState *cpu)
fprintf(stderr, "error: kvm run failed %s\n",
strerror(-run_ret));
#else
// clang-format on
if(run_ret == -EFAULT){
if(GET_GLOBAL_STATE()->protect_payload_buffer){
if (GET_GLOBAL_STATE()->in_fuzzing_mode){
// clang-format on
if (run_ret == -EFAULT) {
if (GET_GLOBAL_STATE()->protect_payload_buffer) {
if (GET_GLOBAL_STATE()->in_fuzzing_mode) {
/* Fuzzing is enabled at this point -> don't exit */
synchronization_payload_buffer_write_detected();
ret = 0;
break;
}
else{
fprintf(stderr, "ERROR: invalid write to input buffer detected before harness was ready (write protection is enabled)!\n");
} else {
fprintf(
stderr,
"ERROR: invalid write to input buffer detected before "
"harness was ready (write protection is enabled)!\n");
exit(1);
}
}
}
fprintf(stderr, "QEMU-PT: error: kvm run failed %s\n",
strerror(-run_ret));
fprintf(stderr, "QEMU-PT: error: kvm run failed %s\n", strerror(-run_ret));
qemu_backtrace();
// clang-format off
#endif
@ -2562,8 +2578,10 @@ int kvm_cpu_exec(CPUState *cpu)
DPRINTF("handle_io\n");
#ifdef QEMU_NYX
// clang-format on
if(run->io.port == 0x5658 && run->io.size == 4 && *((uint32_t*)((uint8_t *)run + run->io.data_offset)) == 0x8080801f) {
// clang-format on
if (run->io.port == 0x5658 && run->io.size == 4 &&
*((uint32_t *)((uint8_t *)run + run->io.data_offset)) == 0x8080801f)
{
assert(kvm_state->nyx_no_pt_mode);
ret = handle_vmware_hypercall(run, cpu);
break;
@ -2598,20 +2616,23 @@ int kvm_cpu_exec(CPUState *cpu)
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
ret = EXCP_INTERRUPT;
#else
// clang-format on
if(GET_GLOBAL_STATE()->in_fuzzing_mode){
// clang-format on
if (GET_GLOBAL_STATE()->in_fuzzing_mode) {
#define CONFIG_KVM_EXIT_SHUTODWN_IS_PANIC // consider triple-fault etc as crash?
#ifndef CONFIG_KVM_EXIT_SHUTODWN_IS_PANIC
/* Fuzzing is enabled at this point -> don't exit */
fprintf(stderr, "Got KVM_EXIT_SHUTDOWN while in fuzzing mode => reload\n",);
handle_hypercall_kafl_release(run, cpu, (uint64_t)run->hypercall.args[0]);
ret = 0;
fprintf(stderr,
"Got KVM_EXIT_SHUTDOWN while in fuzzing mode => reload\n", );
handle_hypercall_kafl_release(run, cpu,
(uint64_t)run->hypercall.args[0]);
ret = 0;
#else
nyx_debug("Got KVM_EXIT_SHUTDOWN while in fuzzing mode => panic\n");
handle_hypercall_kafl_panic(run, cpu, (uint64_t)run->hypercall.args[0]);
ret = 0;
nyx_debug("Got KVM_EXIT_SHUTDOWN while in fuzzing mode => panic\n");
handle_hypercall_kafl_panic(run, cpu,
(uint64_t)run->hypercall.args[0]);
ret = 0;
#endif
} else{
} else {
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
ret = EXCP_INTERRUPT;
}
@ -2623,7 +2644,7 @@ int kvm_cpu_exec(CPUState *cpu)
(uint64_t)run->hw.hardware_exit_reason);
#ifdef QEMU_NYX
// clang-format on
// clang-format on
assert(false);
// clang-format off
#endif
@ -2635,27 +2656,28 @@ int kvm_cpu_exec(CPUState *cpu)
break;
#ifdef QEMU_NYX
// clang-format on
// clang-format on
case KVM_EXIT_DIRTY_RING_FULL:
//printf("[*] WARNING: KVM_EXIT_DIRTY_RING_FULL\n");
// printf("[*] WARNING: KVM_EXIT_DIRTY_RING_FULL\n");
fast_reload_handle_dirty_ring_full(get_fast_reload_snapshot());
ret = 0;
break;
case KVM_EXIT_KAFL_ACQUIRE ... (KVM_EXIT_KAFL_ACQUIRE+100):
ret = handle_kafl_hypercall(run, cpu, (uint64_t)run->exit_reason, (uint64_t)run->hypercall.args[0]);
case KVM_EXIT_KAFL_ACQUIRE ...(KVM_EXIT_KAFL_ACQUIRE + 100):
ret = handle_kafl_hypercall(run, cpu, (uint64_t)run->exit_reason,
(uint64_t)run->hypercall.args[0]);
break;
case KVM_EXIT_DEBUG:
kvm_arch_get_registers(cpu);
if(!handle_hypercall_kafl_hook(run, cpu, (uint64_t)run->hypercall.args[0])){
ret = kvm_arch_handle_exit(cpu, run);
}
else {
ret = 0;
}
break;
// clang-format off
kvm_arch_get_registers(cpu);
if (!handle_hypercall_kafl_hook(run, cpu, (uint64_t)run->hypercall.args[0]))
{
ret = kvm_arch_handle_exit(cpu, run);
} else {
ret = 0;
}
break;
// clang-format off
#endif
case KVM_EXIT_SYSTEM_EVENT:
@ -2665,14 +2687,16 @@ int kvm_cpu_exec(CPUState *cpu)
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
ret = EXCP_INTERRUPT;
#else
// clang-format on
fprintf(stderr, "ATTEMPT TO SHUTDOWN MACHINE (KVM_SYSTEM_EVENT_SHUTDOWN)!\n");
if(GET_GLOBAL_STATE()->in_fuzzing_mode){
// clang-format on
fprintf(
stderr,
"ATTEMPT TO SHUTDOWN MACHINE (KVM_SYSTEM_EVENT_SHUTDOWN)!\n");
if (GET_GLOBAL_STATE()->in_fuzzing_mode) {
/* Fuzzing is enabled at this point -> don't exit */
handle_hypercall_kafl_release(run, cpu, (uint64_t)run->hypercall.args[0]);
handle_hypercall_kafl_release(run, cpu,
(uint64_t)run->hypercall.args[0]);
ret = 0;
}
else{
} else {
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
ret = EXCP_INTERRUPT;
}
@ -2684,14 +2708,15 @@ int kvm_cpu_exec(CPUState *cpu)
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
ret = EXCP_INTERRUPT;
#else
// clang-format on
fprintf(stderr, "ATTEMPT TO SHUTDOWN MACHINE (KVM_SYSTEM_EVENT_RESET)!\n");
if(GET_GLOBAL_STATE()->in_fuzzing_mode){
// clang-format on
fprintf(stderr,
"ATTEMPT TO SHUTDOWN MACHINE (KVM_SYSTEM_EVENT_RESET)!\n");
if (GET_GLOBAL_STATE()->in_fuzzing_mode) {
/* Fuzzing is enabled at this point -> don't exit */
handle_hypercall_kafl_release(run, cpu, (uint64_t)run->hypercall.args[0]);
handle_hypercall_kafl_release(run, cpu,
(uint64_t)run->hypercall.args[0]);
ret = 0;
}
else{
} else {
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
ret = EXCP_INTERRUPT;
}
@ -2706,14 +2731,15 @@ int kvm_cpu_exec(CPUState *cpu)
qemu_mutex_unlock_iothread();
ret = 0;
#else
// clang-format on
fprintf(stderr, "ATTEMPT TO SHUTDOWN MACHINE (KVM_SYSTEM_EVENT_CRASH)!\n");
if(GET_GLOBAL_STATE()->in_fuzzing_mode){
// clang-format on
fprintf(stderr,
"ATTEMPT TO SHUTDOWN MACHINE (KVM_SYSTEM_EVENT_CRASH)!\n");
if (GET_GLOBAL_STATE()->in_fuzzing_mode) {
/* Fuzzing is enabled at this point -> don't exit */
handle_hypercall_kafl_release(run, cpu, (uint64_t)run->hypercall.args[0]);
handle_hypercall_kafl_release(run, cpu,
(uint64_t)run->hypercall.args[0]);
ret = 0;
}
else{
} else {
kvm_cpu_synchronize_state(cpu);
qemu_mutex_lock_iothread();
qemu_system_guest_panicked(cpu_get_crash_info(cpu));
@ -2736,14 +2762,15 @@ int kvm_cpu_exec(CPUState *cpu)
// clang-format on
#define CONFIG_UNKNOWN_ERROR_IS_PANIC
#ifndef CONFIG_UNKNOWN_ERROR_IS_PANIC
fprintf(stderr, "Unknown exit code (%d) => ABORT\n", run->exit_reason);
ret = kvm_arch_handle_exit(cpu, run);
fprintf(stderr, "Unknown exit code (%d) => ABORT\n", run->exit_reason);
ret = kvm_arch_handle_exit(cpu, run);
assert(ret == 0);
#else
nyx_debug("kvm_arch_handle_exit(%d) => panic\n", run->exit_reason);
ret = kvm_arch_handle_exit(cpu, run);
if (ret != 0)
handle_hypercall_kafl_panic(run, cpu, (uint64_t)run->hypercall.args[0]);
ret = kvm_arch_handle_exit(cpu, run);
if (ret != 0)
handle_hypercall_kafl_panic(run, cpu,
(uint64_t)run->hypercall.args[0]);
#endif
// clang-format off
#endif
@ -2751,31 +2778,35 @@ int kvm_cpu_exec(CPUState *cpu)
break;
}
#ifdef QEMU_NYX
// clang-format on
if(GET_GLOBAL_STATE()->in_fuzzing_mode && GET_GLOBAL_STATE()->cow_cache_full){
#ifdef QEMU_NYX
// clang-format on
if (GET_GLOBAL_STATE()->in_fuzzing_mode && GET_GLOBAL_STATE()->cow_cache_full)
{
synchronization_cow_full_detected();
GET_GLOBAL_STATE()->cow_cache_full = false;
ret = 0;
}
else{
if(GET_GLOBAL_STATE()->in_fuzzing_mode && cpu->halted){
ret = 0;
} else {
if (GET_GLOBAL_STATE()->in_fuzzing_mode && cpu->halted) {
fprintf(stderr, "%s: Attempt to halt CPU -> FUCK OFF!\n", __func__);
cpu->halted = 0;
cpu->halted = 0;
GET_GLOBAL_STATE()->shutdown_requested = true;
}
if(GET_GLOBAL_STATE()->in_fuzzing_mode && GET_GLOBAL_STATE()->shutdown_requested){
if (GET_GLOBAL_STATE()->in_fuzzing_mode &&
GET_GLOBAL_STATE()->shutdown_requested)
{
/* Fuzzing is enabled at this point -> don't exit */
fprintf(stderr, "shutdown_requested -> calling handle_hypercall_kafl_release\n");
fprintf(
stderr,
"shutdown_requested -> calling handle_hypercall_kafl_release\n");
//synchronization_lock_shutdown_detected();
// synchronization_lock_shutdown_detected();
synchronization_lock_crash_found();
GET_GLOBAL_STATE()->shutdown_requested = false;
ret = 0;
ret = 0;
}
}
if(reload_request_exists(GET_GLOBAL_STATE()->reload_state)){
if (reload_request_exists(GET_GLOBAL_STATE()->reload_state)) {
break;
}
// clang-format off
@ -2788,7 +2819,7 @@ int kvm_cpu_exec(CPUState *cpu)
if (ret < 0) {
#ifdef QEMU_NYX
// clang-format on
// clang-format on
fprintf(stderr, "ATTEMPT TO SHUTDOWN MACHINE (ret < 0)!\n");
// clang-format off
#endif
@ -2799,8 +2830,8 @@ int kvm_cpu_exec(CPUState *cpu)
atomic_set(&cpu->exit_request, 0);
#ifdef QEMU_NYX
// clang-format on
if(check_if_relood_request_exists_pre(GET_GLOBAL_STATE()->reload_state)){
// clang-format on
if (check_if_relood_request_exists_pre(GET_GLOBAL_STATE()->reload_state)) {
pause_all_vcpus(); /* performance boost ??? */
}
// clang-format off
@ -3203,7 +3234,7 @@ void kvm_init_cpu_signals(CPUState *cpu)
#endif
sigdelset(&set, SIG_IPI);
#ifdef QEMU_NYX
// clang-format on
// clang-format on
sigdelset(&set, SIGALRM);
// clang-format off
#endif

View File

@ -19,259 +19,309 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
*/
#include <string.h>
#include <stdio.h>
#include <stdbool.h>
#include "qemu/osdep.h"
#include "nyx/auxiliary_buffer.h"
#include "nyx/state/state.h"
#include "nyx/debug.h"
#include "nyx/state/state.h"
#include "nyx/trace_dump.h"
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
/* experimental feature (currently broken)
* enabled via trace mode
*/
//#define SUPPORT_COMPILE_TIME_REDQUEEN
// #define SUPPORT_COMPILE_TIME_REDQUEEN
#define VOLATILE_WRITE_64(dst, src) *((volatile uint64_t*)&dst) = (uint64_t)src
#define VOLATILE_WRITE_32(dst, src) *((volatile uint32_t*)&dst) = (uint32_t)src
#define VOLATILE_WRITE_16(dst, src) *((volatile uint16_t*)&dst) = (uint16_t)src
#define VOLATILE_WRITE_8(dst, src) *((volatile uint8_t*)&dst) = (uint8_t)src
#define VOLATILE_WRITE_64(dst, src) *((volatile uint64_t *)&dst) = (uint64_t)src
#define VOLATILE_WRITE_32(dst, src) *((volatile uint32_t *)&dst) = (uint32_t)src
#define VOLATILE_WRITE_16(dst, src) *((volatile uint16_t *)&dst) = (uint16_t)src
#define VOLATILE_WRITE_8(dst, src) *((volatile uint8_t *)&dst) = (uint8_t)src
#define VOLATILE_READ_64(dst, src) dst = *((volatile uint64_t*)(&src))
#define VOLATILE_READ_32(dst, src) dst = *((volatile uint32_t*)(&src))
#define VOLATILE_READ_16(dst, src) dst = *((volatile uint16_t*)(&src))
#define VOLATILE_READ_8(dst, src) dst = *((volatile uint8_t*)(&src))
#define VOLATILE_READ_64(dst, src) dst = *((volatile uint64_t *)(&src))
#define VOLATILE_READ_32(dst, src) dst = *((volatile uint32_t *)(&src))
#define VOLATILE_READ_16(dst, src) dst = *((volatile uint16_t *)(&src))
#define VOLATILE_READ_8(dst, src) dst = *((volatile uint8_t *)(&src))
static void volatile_memset(void* dst, uint8_t ch, size_t count){
for (size_t i = 0; i < count; i++){
VOLATILE_WRITE_8(((uint8_t*)dst)[i], ch);
}
}
static void volatile_memcpy(void* dst, void* src, size_t size){
for (size_t i = 0; i < size; i++){
VOLATILE_WRITE_8(((uint8_t*)dst)[i], ((uint8_t*)src)[i]);
}
}
void init_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer){
nyx_trace();
volatile_memset((void*) auxilary_buffer, 0, sizeof(auxilary_buffer_t));
VOLATILE_WRITE_16(auxilary_buffer->header.version, QEMU_PT_VERSION);
uint16_t hash = (sizeof(auxilary_buffer_header_t) +
sizeof(auxilary_buffer_cap_t) +
sizeof(auxilary_buffer_config_t) +
sizeof(auxilary_buffer_result_t) +
sizeof(auxilary_buffer_misc_t)) % 0xFFFF;
VOLATILE_WRITE_16(auxilary_buffer->header.hash, hash);
VOLATILE_WRITE_64(auxilary_buffer->header.magic, AUX_MAGIC);
}
void check_auxiliary_config_buffer(auxilary_buffer_t* auxilary_buffer, auxilary_buffer_config_t* shadow_config){
uint8_t changed = 0;
VOLATILE_READ_8(changed, auxilary_buffer->configuration.changed);
if (changed){
uint8_t aux_byte;
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.redqueen_mode);
if(aux_byte){
/* enable redqueen mode */
if(aux_byte != shadow_config->redqueen_mode){
GET_GLOBAL_STATE()->in_redqueen_reload_mode = true;
GET_GLOBAL_STATE()->redqueen_enable_pending = true;
GET_GLOBAL_STATE()->redqueen_instrumentation_mode = REDQUEEN_LIGHT_INSTRUMENTATION;
}
}
else{
/* disable redqueen mode */
if(aux_byte != shadow_config->redqueen_mode){
GET_GLOBAL_STATE()->in_redqueen_reload_mode = false;
GET_GLOBAL_STATE()->redqueen_disable_pending = true;
GET_GLOBAL_STATE()->redqueen_instrumentation_mode = REDQUEEN_NO_INSTRUMENTATION;
}
static void volatile_memset(void *dst, uint8_t ch, size_t count)
{
for (size_t i = 0; i < count; i++) {
VOLATILE_WRITE_8(((uint8_t *)dst)[i], ch);
}
}
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.trace_mode);
if(aux_byte){
/* enable trace mode */
if(aux_byte != shadow_config->trace_mode && GET_GLOBAL_STATE()->redqueen_state){
static void volatile_memcpy(void *dst, void *src, size_t size)
{
for (size_t i = 0; i < size; i++) {
VOLATILE_WRITE_8(((uint8_t *)dst)[i], ((uint8_t *)src)[i]);
}
}
void init_auxiliary_buffer(auxilary_buffer_t *auxilary_buffer)
{
nyx_trace();
volatile_memset((void *)auxilary_buffer, 0, sizeof(auxilary_buffer_t));
VOLATILE_WRITE_16(auxilary_buffer->header.version, QEMU_PT_VERSION);
uint16_t hash =
(sizeof(auxilary_buffer_header_t) + sizeof(auxilary_buffer_cap_t) +
sizeof(auxilary_buffer_config_t) + sizeof(auxilary_buffer_result_t) +
sizeof(auxilary_buffer_misc_t)) %
0xFFFF;
VOLATILE_WRITE_16(auxilary_buffer->header.hash, hash);
VOLATILE_WRITE_64(auxilary_buffer->header.magic, AUX_MAGIC);
}
void check_auxiliary_config_buffer(auxilary_buffer_t *auxilary_buffer,
auxilary_buffer_config_t *shadow_config)
{
uint8_t changed = 0;
VOLATILE_READ_8(changed, auxilary_buffer->configuration.changed);
if (changed) {
uint8_t aux_byte;
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.redqueen_mode);
if (aux_byte) {
/* enable redqueen mode */
if (aux_byte != shadow_config->redqueen_mode) {
GET_GLOBAL_STATE()->in_redqueen_reload_mode = true;
GET_GLOBAL_STATE()->redqueen_enable_pending = true;
GET_GLOBAL_STATE()->redqueen_instrumentation_mode =
REDQUEEN_LIGHT_INSTRUMENTATION;
}
} else {
/* disable redqueen mode */
if (aux_byte != shadow_config->redqueen_mode) {
GET_GLOBAL_STATE()->in_redqueen_reload_mode = false;
GET_GLOBAL_STATE()->redqueen_disable_pending = true;
GET_GLOBAL_STATE()->redqueen_instrumentation_mode =
REDQUEEN_NO_INSTRUMENTATION;
}
}
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.trace_mode);
if (aux_byte) {
/* enable trace mode */
if (aux_byte != shadow_config->trace_mode &&
GET_GLOBAL_STATE()->redqueen_state)
{
#ifdef SUPPORT_COMPILE_TIME_REDQUEEN
GET_GLOBAL_STATE()->pt_trace_mode_force = true;
GET_GLOBAL_STATE()->pt_trace_mode_force = true;
#endif
GET_GLOBAL_STATE()->trace_mode = true;
redqueen_set_trace_mode();
pt_trace_dump_enable(true);
}
}
else {
/* disable trace mode */
if(aux_byte != shadow_config->trace_mode && GET_GLOBAL_STATE()->redqueen_state){
GET_GLOBAL_STATE()->trace_mode = true;
redqueen_set_trace_mode();
pt_trace_dump_enable(true);
}
} else {
/* disable trace mode */
if (aux_byte != shadow_config->trace_mode &&
GET_GLOBAL_STATE()->redqueen_state)
{
#ifdef SUPPORT_COMPILE_TIME_REDQUEEN
GET_GLOBAL_STATE()->pt_trace_mode_force = false;
GET_GLOBAL_STATE()->pt_trace_mode_force = false;
#endif
GET_GLOBAL_STATE()->trace_mode = false;
redqueen_unset_trace_mode();
pt_trace_dump_enable(false);
}
GET_GLOBAL_STATE()->trace_mode = false;
redqueen_unset_trace_mode();
pt_trace_dump_enable(false);
}
}
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.page_dump_mode);
if (aux_byte) {
GET_GLOBAL_STATE()->dump_page = true;
uint64_t data;
VOLATILE_READ_64(data, auxilary_buffer->configuration.page_addr);
GET_GLOBAL_STATE()->dump_page_addr = data;
// fprintf(stderr, "%s dump_page_addr => 0x%lx\n", __func__, GET_GLOBAL_STATE()->dump_page_addr);
VOLATILE_WRITE_8(auxilary_buffer->configuration.page_dump_mode, 0);
VOLATILE_WRITE_64(auxilary_buffer->configuration.page_addr, 0);
}
/* modify reload mode */
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.reload_mode);
GET_GLOBAL_STATE()->in_reload_mode = aux_byte;
/* modify protect_payload_buffer */
VOLATILE_READ_8(aux_byte,
auxilary_buffer->configuration.protect_payload_buffer);
if (GET_GLOBAL_STATE()->protect_payload_buffer == 0 && aux_byte == 1) {
GET_GLOBAL_STATE()->protect_payload_buffer = aux_byte;
}
/* modify protect_payload_buffer */
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.discard_tmp_snapshot);
GET_GLOBAL_STATE()->discard_tmp_snapshot = aux_byte;
VOLATILE_WRITE_8(auxilary_buffer->configuration.discard_tmp_snapshot, 0);
/* copy to shodow */
VOLATILE_READ_8(shadow_config->timeout_sec,
auxilary_buffer->configuration.timeout_sec);
VOLATILE_READ_32(shadow_config->timeout_usec,
auxilary_buffer->configuration.timeout_usec);
// if(shadow_config->timeout_sec || shadow_config->timeout_usec){
/* apply only non-zero values */
update_itimer(&(GET_GLOBAL_STATE()->timeout_detector),
shadow_config->timeout_sec, shadow_config->timeout_usec);
//}
VOLATILE_READ_8(shadow_config->redqueen_mode,
auxilary_buffer->configuration.redqueen_mode);
VOLATILE_READ_8(shadow_config->trace_mode,
auxilary_buffer->configuration.trace_mode);
VOLATILE_READ_8(shadow_config->reload_mode,
auxilary_buffer->configuration.reload_mode);
VOLATILE_READ_8(shadow_config->verbose_level,
auxilary_buffer->configuration.verbose_level);
/* reset the 'changed' byte */
VOLATILE_WRITE_8(auxilary_buffer->configuration.changed, 0);
}
}
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.page_dump_mode);
if(aux_byte){
GET_GLOBAL_STATE()->dump_page = true;
uint64_t data;
VOLATILE_READ_64(data, auxilary_buffer->configuration.page_addr);
GET_GLOBAL_STATE()->dump_page_addr = data;
//fprintf(stderr, "%s dump_page_addr => 0x%lx\n", __func__, GET_GLOBAL_STATE()->dump_page_addr);
VOLATILE_WRITE_8(auxilary_buffer->configuration.page_dump_mode, 0);
VOLATILE_WRITE_64(auxilary_buffer->configuration.page_addr, 0);
void set_crash_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer)
{
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_crash);
}
void set_asan_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer)
{
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_sanitizer);
}
void set_timeout_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer)
{
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_timeout);
}
void set_reload_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer)
{
VOLATILE_WRITE_8(auxilary_buffer->result.reloaded, 1);
}
void set_pt_overflow_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer)
{
VOLATILE_WRITE_8(auxilary_buffer->result.pt_overflow, 1);
}
void set_exec_done_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer,
uint32_t sec,
uint32_t usec,
uint32_t num_dirty_pages)
{
VOLATILE_WRITE_8(auxilary_buffer->result.exec_done, 1);
VOLATILE_WRITE_32(auxilary_buffer->result.runtime_sec, sec);
VOLATILE_WRITE_32(auxilary_buffer->result.runtime_usec, usec);
VOLATILE_WRITE_32(auxilary_buffer->result.dirty_pages, num_dirty_pages);
}
void set_hprintf_auxiliary_buffer(auxilary_buffer_t *auxilary_buffer,
char *msg,
uint32_t len)
{
VOLATILE_WRITE_16(auxilary_buffer->misc.len, MIN(len, MISC_SIZE - 2));
volatile_memcpy((void *)&auxilary_buffer->misc.data, (void *)msg,
(size_t)MIN(len, MISC_SIZE - 2));
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_hprintf);
}
void set_crash_reason_auxiliary_buffer(auxilary_buffer_t *auxilary_buffer,
char *msg,
uint32_t len)
{
VOLATILE_WRITE_16(auxilary_buffer->misc.len, MIN(len, MISC_SIZE - 2));
volatile_memcpy((void *)&auxilary_buffer->misc.data, (void *)msg,
(size_t)MIN(len, MISC_SIZE - 2));
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_crash);
}
void set_abort_reason_auxiliary_buffer(auxilary_buffer_t *auxilary_buffer,
char *msg,
uint32_t len)
{
VOLATILE_WRITE_16(auxilary_buffer->misc.len, MIN(len, MISC_SIZE - 2));
volatile_memcpy((void *)&auxilary_buffer->misc.data, (void *)msg,
(size_t)MIN(len, MISC_SIZE - 2));
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_aborted);
}
void set_state_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer,
uint8_t state)
{
if (auxilary_buffer) {
VOLATILE_WRITE_8(auxilary_buffer->result.state, state);
} else {
nyx_error("WARNING: auxilary_buffer pointer is zero\n");
}
}
/* modify reload mode */
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.reload_mode);
GET_GLOBAL_STATE()->in_reload_mode = aux_byte;
void set_page_not_found_result_buffer(auxilary_buffer_t *auxilary_buffer,
uint64_t page_addr)
{
VOLATILE_WRITE_8(auxilary_buffer->result.page_not_found, 1);
VOLATILE_WRITE_64(auxilary_buffer->result.page_addr, page_addr);
}
/* modify protect_payload_buffer */
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.protect_payload_buffer);
if (GET_GLOBAL_STATE()->protect_payload_buffer == 0 && aux_byte == 1){
GET_GLOBAL_STATE()->protect_payload_buffer = aux_byte;
void reset_page_not_found_result_buffer(auxilary_buffer_t *auxilary_buffer)
{
VOLATILE_WRITE_8(auxilary_buffer->result.page_not_found, 0);
}
void set_success_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer,
uint8_t success)
{
// TODO refactor to let caller directly set the result codes
if (success == 2) {
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_starved);
} else {
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_success);
}
/* modify protect_payload_buffer */
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.discard_tmp_snapshot);
GET_GLOBAL_STATE()->discard_tmp_snapshot = aux_byte;
VOLATILE_WRITE_8(auxilary_buffer->configuration.discard_tmp_snapshot, 0);
/* copy to shodow */
VOLATILE_READ_8(shadow_config->timeout_sec, auxilary_buffer->configuration.timeout_sec);
VOLATILE_READ_32(shadow_config->timeout_usec, auxilary_buffer->configuration.timeout_usec);
//if(shadow_config->timeout_sec || shadow_config->timeout_usec){
/* apply only non-zero values */
update_itimer(&(GET_GLOBAL_STATE()->timeout_detector), shadow_config->timeout_sec, shadow_config->timeout_usec);
//}
VOLATILE_READ_8(shadow_config->redqueen_mode, auxilary_buffer->configuration.redqueen_mode);
VOLATILE_READ_8(shadow_config->trace_mode, auxilary_buffer->configuration.trace_mode);
VOLATILE_READ_8(shadow_config->reload_mode, auxilary_buffer->configuration.reload_mode);
VOLATILE_READ_8(shadow_config->verbose_level, auxilary_buffer->configuration.verbose_level);
/* reset the 'changed' byte */
VOLATILE_WRITE_8(auxilary_buffer->configuration.changed, 0);
}
}
void set_crash_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_crash);
}
void set_asan_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_sanitizer);
}
void set_timeout_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_timeout);
}
void set_reload_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){
VOLATILE_WRITE_8(auxilary_buffer->result.reloaded, 1);
}
void set_pt_overflow_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){
VOLATILE_WRITE_8(auxilary_buffer->result.pt_overflow, 1);
}
void set_exec_done_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint32_t sec, uint32_t usec, uint32_t num_dirty_pages){
VOLATILE_WRITE_8(auxilary_buffer->result.exec_done, 1);
VOLATILE_WRITE_32(auxilary_buffer->result.runtime_sec, sec);
VOLATILE_WRITE_32(auxilary_buffer->result.runtime_usec, usec);
VOLATILE_WRITE_32(auxilary_buffer->result.dirty_pages, num_dirty_pages);
void set_payload_buffer_write_reason_auxiliary_buffer(
auxilary_buffer_t *auxilary_buffer, char *msg, uint32_t len)
{
VOLATILE_WRITE_16(auxilary_buffer->misc.len, MIN(len, MISC_SIZE - 2));
volatile_memcpy((void *)&auxilary_buffer->misc.data, (void *)msg,
(size_t)MIN(len, MISC_SIZE - 2));
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_input_buffer_write);
}
void set_hprintf_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len){
VOLATILE_WRITE_16(auxilary_buffer->misc.len, MIN(len, MISC_SIZE-2));
volatile_memcpy((void*)&auxilary_buffer->misc.data, (void*)msg, (size_t)MIN(len, MISC_SIZE-2));
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_hprintf);
void set_tmp_snapshot_created(auxilary_buffer_t *auxilary_buffer, uint8_t value)
{
VOLATILE_WRITE_8(auxilary_buffer->result.tmp_snapshot_created, value);
}
void set_crash_reason_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len){
VOLATILE_WRITE_16(auxilary_buffer->misc.len, MIN(len, MISC_SIZE-2));
volatile_memcpy((void*)&auxilary_buffer->misc.data, (void*)msg, (size_t) MIN(len, MISC_SIZE-2));
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_crash);
void set_cap_agent_trace_bitmap(auxilary_buffer_t *auxilary_buffer, bool value)
{
VOLATILE_WRITE_8(auxilary_buffer->capabilites.agent_trace_bitmap, value);
}
void set_abort_reason_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len){
VOLATILE_WRITE_16(auxilary_buffer->misc.len, MIN(len, MISC_SIZE-2));
volatile_memcpy((void*)&auxilary_buffer->misc.data, (void*)msg, (size_t) MIN(len, MISC_SIZE-2));
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_aborted);
void set_cap_agent_ijon_trace_bitmap(auxilary_buffer_t *auxilary_buffer, bool value)
{
VOLATILE_WRITE_8(auxilary_buffer->capabilites.agent_ijon_trace_bitmap, value);
}
void set_state_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8_t state){
if(auxilary_buffer){
VOLATILE_WRITE_8(auxilary_buffer->result.state, state);
}
else{
nyx_error("WARNING: auxilary_buffer pointer is zero\n");
}
void set_result_dirty_pages(auxilary_buffer_t *auxilary_buffer, uint32_t value)
{
VOLATILE_WRITE_32(auxilary_buffer->result.dirty_pages, value);
}
void set_page_not_found_result_buffer(auxilary_buffer_t* auxilary_buffer, uint64_t page_addr){
VOLATILE_WRITE_8(auxilary_buffer->result.page_not_found, 1);
VOLATILE_WRITE_64(auxilary_buffer->result.page_addr, page_addr);
void set_result_pt_trace_size(auxilary_buffer_t *auxilary_buffer, uint32_t value)
{
VOLATILE_WRITE_32(auxilary_buffer->result.pt_trace_size, value);
}
void reset_page_not_found_result_buffer(auxilary_buffer_t* auxilary_buffer){
VOLATILE_WRITE_8(auxilary_buffer->result.page_not_found, 0);
}
void set_success_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8_t success){
//TODO refactor to let caller directly set the result codes
if (success == 2) {
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_starved);
} else {
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_success);
}
}
void set_payload_buffer_write_reason_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len){
VOLATILE_WRITE_16(auxilary_buffer->misc.len, MIN(len, MISC_SIZE-2));
volatile_memcpy((void*)&auxilary_buffer->misc.data, (void*)msg, (size_t) MIN(len, MISC_SIZE-2));
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_input_buffer_write);
}
void set_tmp_snapshot_created(auxilary_buffer_t* auxilary_buffer, uint8_t value){
VOLATILE_WRITE_8(auxilary_buffer->result.tmp_snapshot_created, value);
}
void set_cap_agent_trace_bitmap(auxilary_buffer_t* auxilary_buffer, bool value){
VOLATILE_WRITE_8(auxilary_buffer->capabilites.agent_trace_bitmap, value);
}
void set_cap_agent_ijon_trace_bitmap(auxilary_buffer_t* auxilary_buffer, bool value){
VOLATILE_WRITE_8(auxilary_buffer->capabilites.agent_ijon_trace_bitmap, value);
}
void set_result_dirty_pages(auxilary_buffer_t* auxilary_buffer, uint32_t value){
VOLATILE_WRITE_32(auxilary_buffer->result.dirty_pages, value);
}
void set_result_pt_trace_size(auxilary_buffer_t* auxilary_buffer, uint32_t value){
VOLATILE_WRITE_32(auxilary_buffer->result.pt_trace_size, value);
}
void set_result_bb_coverage(auxilary_buffer_t* auxilary_buffer, uint32_t value){
if (value != auxilary_buffer->result.bb_coverage){
VOLATILE_WRITE_32(auxilary_buffer->result.bb_coverage, value);
}
void set_result_bb_coverage(auxilary_buffer_t *auxilary_buffer, uint32_t value)
{
if (value != auxilary_buffer->result.bb_coverage) {
VOLATILE_WRITE_32(auxilary_buffer->result.bb_coverage, value);
}
}

View File

@ -21,160 +21,178 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
#pragma once
#include <stdint.h>
#include <stdbool.h>
#include <stdint.h>
#define AUX_BUFFER_SIZE 4096
#define AUX_MAGIC 0x54502d554d4551
#define QEMU_PT_VERSION 3 /* let's start at 1 for the initial version using the aux buffer */
#define QEMU_PT_VERSION \
3 /* let's start at 1 for the initial version using the aux buffer */
#define HEADER_SIZE 128
#define CAP_SIZE 256
#define CAP_SIZE 256
#define CONFIG_SIZE 512
#define STATE_SIZE 512
#define MISC_SIZE 4096-(HEADER_SIZE+CAP_SIZE+CONFIG_SIZE+STATE_SIZE)
#define STATE_SIZE 512
#define MISC_SIZE 4096 - (HEADER_SIZE + CAP_SIZE + CONFIG_SIZE + STATE_SIZE)
#define ADD_PADDING(max, type) uint8_t type ## _padding [max - sizeof(type)]
#define ADD_PADDING(max, type) uint8_t type##_padding[max - sizeof(type)]
enum nyx_result_codes {
rc_success = 0,
rc_crash = 1,
rc_hprintf = 2,
rc_timeout = 3,
rc_input_buffer_write = 4,
rc_aborted = 5,
rc_sanitizer = 6,
rc_starved = 7,
rc_success = 0,
rc_crash = 1,
rc_hprintf = 2,
rc_timeout = 3,
rc_input_buffer_write = 4,
rc_aborted = 5,
rc_sanitizer = 6,
rc_starved = 7,
};
typedef struct auxilary_buffer_header_s{
uint64_t magic;
uint16_t version;
uint16_t hash;
/* more to come */
typedef struct auxilary_buffer_header_s {
uint64_t magic;
uint16_t version;
uint16_t hash;
/* more to come */
} __attribute__((packed)) auxilary_buffer_header_t;
typedef struct auxilary_buffer_cap_s{
uint8_t redqueen;
uint8_t agent_timeout_detection; /* agent implements its own timeout detection; host timeout detection is still in used, but treshold is increased by x2; */
uint8_t agent_trace_bitmap; /* agent implements its own tracing mechanism; PT tracing is disabled */
uint8_t agent_ijon_trace_bitmap; /* agent uses the ijon shm buffer */
typedef struct auxilary_buffer_cap_s {
uint8_t redqueen;
uint8_t agent_timeout_detection; /* agent implements its own timeout detection;
host timeout detection is still in used, but treshold is increased by x2; */
uint8_t agent_trace_bitmap; /* agent implements its own tracing mechanism; PT tracing is disabled */
uint8_t agent_ijon_trace_bitmap; /* agent uses the ijon shm buffer */
uint32_t agent_input_buffer_size; /* agent requests a custom input buffer size (if the size is 0, the minimum buffer size is used) */
uint32_t agent_coverage_bitmap_size; /* agent requests a custom coverage bitmap size (if the size is 0, the minimum buffer size is used) */
/* more to come */
uint32_t agent_input_buffer_size; /* agent requests a custom input buffer size (if
the size is 0, the minimum buffer size is used) */
uint32_t agent_coverage_bitmap_size; /* agent requests a custom coverage bitmap
size (if the size is 0, the minimum buffer size is used) */
/* more to come */
} __attribute__((packed)) auxilary_buffer_cap_t;
typedef struct auxilary_buffer_config_s{
uint8_t changed; /* set this byte to kick in a rescan of this buffer */
typedef struct auxilary_buffer_config_s {
uint8_t changed; /* set this byte to kick in a rescan of this buffer */
uint8_t timeout_sec;
uint32_t timeout_usec;
uint8_t timeout_sec;
uint32_t timeout_usec;
/* trigger to enable / disable different QEMU-PT modes */
uint8_t redqueen_mode;
uint8_t trace_mode; /* dump decoded edge transitions to file */
uint8_t reload_mode;
/* trigger to enable / disable different QEMU-PT modes */
uint8_t redqueen_mode;
uint8_t trace_mode; /* dump decoded edge transitions to file */
uint8_t reload_mode;
uint8_t verbose_level;
uint8_t verbose_level;
uint8_t page_dump_mode;
uint64_t page_addr;
uint8_t page_dump_mode;
uint64_t page_addr;
/* nested mode only */
uint8_t protect_payload_buffer;
/* nested mode only */
uint8_t protect_payload_buffer;
/* 0 -> disabled
1 -> decoding
2 -> decoding + full disassembling
*/
//uint8_t pt_processing_mode;
/* 0 -> disabled
1 -> decoding
2 -> decoding + full disassembling
*/
// uint8_t pt_processing_mode;
/* snapshot extension */
uint8_t discard_tmp_snapshot;
/* snapshot extension */
uint8_t discard_tmp_snapshot;
/* more to come */
/* more to come */
} __attribute__((packed)) auxilary_buffer_config_t;
typedef struct auxilary_buffer_result_s{
/* 0 -> booting,
1 -> loader level 1,
2 -> loader level 2,
3 -> ready to fuzz
*/
uint8_t state;
uint8_t exec_done;
uint8_t exec_result_code;
uint8_t reloaded;
typedef struct auxilary_buffer_result_s {
/* 0 -> booting,
1 -> loader level 1,
2 -> loader level 2,
3 -> ready to fuzz
*/
uint8_t state;
uint8_t exec_done;
uint8_t exec_result_code;
uint8_t reloaded;
uint8_t pt_overflow;
uint8_t page_not_found;
uint8_t tmp_snapshot_created; /* incremental snapshot extension */
uint8_t padding_3;
uint8_t pt_overflow;
uint8_t page_not_found;
uint8_t tmp_snapshot_created; /* incremental snapshot extension */
uint8_t padding_3;
uint64_t page_addr;
uint32_t dirty_pages;
uint32_t pt_trace_size;
uint32_t bb_coverage;
uint32_t runtime_usec;
uint32_t runtime_sec;
uint64_t page_addr;
uint32_t dirty_pages;
uint32_t pt_trace_size;
uint32_t bb_coverage;
uint32_t runtime_usec;
uint32_t runtime_sec;
/* more to come */
/* more to come */
} __attribute__((packed)) auxilary_buffer_result_t;
typedef struct auxilary_buffer_misc_s{
uint16_t len;
uint8_t data;
/* non yet */
typedef struct auxilary_buffer_misc_s {
uint16_t len;
uint8_t data;
/* non yet */
} __attribute__((packed)) auxilary_buffer_misc_t;
typedef struct auxilary_buffer_s{
auxilary_buffer_header_t header;
ADD_PADDING(HEADER_SIZE, auxilary_buffer_header_t);
typedef struct auxilary_buffer_s {
auxilary_buffer_header_t header;
ADD_PADDING(HEADER_SIZE, auxilary_buffer_header_t);
auxilary_buffer_cap_t capabilites;
ADD_PADDING(CAP_SIZE, auxilary_buffer_cap_t);
auxilary_buffer_cap_t capabilites;
ADD_PADDING(CAP_SIZE, auxilary_buffer_cap_t);
auxilary_buffer_config_t configuration;
ADD_PADDING(CONFIG_SIZE, auxilary_buffer_config_t);
auxilary_buffer_config_t configuration;
ADD_PADDING(CONFIG_SIZE, auxilary_buffer_config_t);
auxilary_buffer_result_t result;
ADD_PADDING(STATE_SIZE, auxilary_buffer_result_t);
auxilary_buffer_result_t result;
ADD_PADDING(STATE_SIZE, auxilary_buffer_result_t);
auxilary_buffer_misc_t misc;
ADD_PADDING(MISC_SIZE, auxilary_buffer_misc_t);
auxilary_buffer_misc_t misc;
ADD_PADDING(MISC_SIZE, auxilary_buffer_misc_t);
} __attribute__((packed)) auxilary_buffer_t;
void init_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer);
void check_auxiliary_config_buffer(auxilary_buffer_t* auxilary_buffer, auxilary_buffer_config_t* shadow_config);
void init_auxiliary_buffer(auxilary_buffer_t *auxilary_buffer);
void check_auxiliary_config_buffer(auxilary_buffer_t *auxilary_buffer,
auxilary_buffer_config_t *shadow_config);
void set_crash_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer);
void set_asan_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer);
void set_timeout_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer);
void set_reload_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer);
void set_pt_overflow_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer);
void set_exec_done_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint32_t sec, uint32_t usec, uint32_t num_dirty_pages);
void set_state_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8_t state);
void set_hprintf_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len);
void set_crash_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer);
void set_asan_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer);
void set_timeout_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer);
void set_reload_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer);
void set_pt_overflow_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer);
void set_exec_done_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer,
uint32_t sec,
uint32_t usec,
uint32_t num_dirty_pages);
void set_state_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer,
uint8_t state);
void set_hprintf_auxiliary_buffer(auxilary_buffer_t *auxilary_buffer,
char *msg,
uint32_t len);
void set_page_not_found_result_buffer(auxilary_buffer_t* auxilary_buffer, uint64_t page_addr);
void reset_page_not_found_result_buffer(auxilary_buffer_t* auxilary_buffer);
void set_success_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8_t success);
void set_crash_reason_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len);
void set_abort_reason_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len);
void set_page_not_found_result_buffer(auxilary_buffer_t *auxilary_buffer,
uint64_t page_addr);
void reset_page_not_found_result_buffer(auxilary_buffer_t *auxilary_buffer);
void set_success_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer,
uint8_t success);
void set_crash_reason_auxiliary_buffer(auxilary_buffer_t *auxilary_buffer,
char *msg,
uint32_t len);
void set_abort_reason_auxiliary_buffer(auxilary_buffer_t *auxilary_buffer,
char *msg,
uint32_t len);
void set_tmp_snapshot_created(auxilary_buffer_t* auxilary_buffer, uint8_t value);
void set_tmp_snapshot_created(auxilary_buffer_t *auxilary_buffer, uint8_t value);
void set_cap_agent_trace_bitmap(auxilary_buffer_t* auxilary_buffer, bool value);
void set_cap_agent_ijon_trace_bitmap(auxilary_buffer_t* auxilary_buffer, bool value);
void set_cap_agent_trace_bitmap(auxilary_buffer_t *auxilary_buffer, bool value);
void set_cap_agent_ijon_trace_bitmap(auxilary_buffer_t *auxilary_buffer, bool value);
void set_result_dirty_pages(auxilary_buffer_t* auxilary_buffer, uint32_t value);
void set_result_pt_trace_size(auxilary_buffer_t* auxilary_buffer, uint32_t value);
void set_result_dirty_pages(auxilary_buffer_t *auxilary_buffer, uint32_t value);
void set_result_pt_trace_size(auxilary_buffer_t *auxilary_buffer, uint32_t value);
void set_result_bb_coverage(auxilary_buffer_t* auxilary_buffer, uint32_t value);
void set_result_bb_coverage(auxilary_buffer_t *auxilary_buffer, uint32_t value);
void set_payload_buffer_write_reason_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len);
void set_payload_buffer_write_reason_auxiliary_buffer(
auxilary_buffer_t *auxilary_buffer, char *msg, uint32_t len);

View File

@ -1,8 +1,8 @@
#include <execinfo.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <execinfo.h>
#include "qemu/osdep.h"
#include "nyx/debug.h"
@ -11,54 +11,61 @@
#ifdef ENABLE_BACKTRACES
#define BT_BUF_SIZE 100
void qemu_backtrace(void){
void *buffer[BT_BUF_SIZE];
int nptrs = 0;
int j;
void qemu_backtrace(void)
{
void *buffer[BT_BUF_SIZE];
int nptrs = 0;
int j;
nptrs = backtrace(buffer, BT_BUF_SIZE);
fprintf(stderr, "backtrace() returned %d addresses\n", nptrs);
nptrs = backtrace(buffer, BT_BUF_SIZE);
fprintf(stderr, "backtrace() returned %d addresses\n", nptrs);
char **strings = backtrace_symbols(buffer, nptrs);
if (strings == NULL) {
fprintf(stderr, "backtrace_symbols failed!\n");
return;
//exit(EXIT_FAILURE);
}
char **strings = backtrace_symbols(buffer, nptrs);
if (strings == NULL) {
fprintf(stderr, "backtrace_symbols failed!\n");
return;
// exit(EXIT_FAILURE);
}
for (j = 0; j < nptrs; j++)
fprintf(stderr, "%s\n", strings[j]);
for (j = 0; j < nptrs; j++)
fprintf(stderr, "%s\n", strings[j]);
free(strings);
free(strings);
}
static void sigsegfault_handler(int signo, siginfo_t *info, void *extra) {
fprintf(stderr, "[qemu-nyx] crash detected (pid: %d / signal: %d)\n", getpid(), signo);
qemu_backtrace();
fprintf(stderr, "WAITING FOR GDB ATTACH (PID: %d...\n", getpid());
while(1){
sleep(1);
}
static void sigsegfault_handler(int signo, siginfo_t *info, void *extra)
{
fprintf(stderr, "[qemu-nyx] crash detected (pid: %d / signal: %d)\n", getpid(),
signo);
qemu_backtrace();
fprintf(stderr, "WAITING FOR GDB ATTACH (PID: %d...\n", getpid());
while (1) {
sleep(1);
}
}
static void sigabrt_handler(int signo, siginfo_t *info, void *extra) {
fprintf(stderr, "[qemu-nyx] crash detected (pid: %d / signal: %d)\n", getpid(), signo);
qemu_backtrace();
fprintf(stderr, "WAITING FOR GDB ATTACH (PID: %d...\n", getpid());
while(1){
sleep(1);
}
static void sigabrt_handler(int signo, siginfo_t *info, void *extra)
{
fprintf(stderr, "[qemu-nyx] crash detected (pid: %d / signal: %d)\n", getpid(),
signo);
qemu_backtrace();
fprintf(stderr, "WAITING FOR GDB ATTACH (PID: %d...\n", getpid());
while (1) {
sleep(1);
}
}
static void sigint_handler(int signo, siginfo_t *info, void *extra) {
fprintf(stderr, "[qemu-nyx] bye! (pid: %d / signal: %d)\n", getpid(), signo);
exit(0);
static void sigint_handler(int signo, siginfo_t *info, void *extra)
{
fprintf(stderr, "[qemu-nyx] bye! (pid: %d / signal: %d)\n", getpid(), signo);
exit(0);
}
void init_crash_handler(void){
void init_crash_handler(void)
{
struct sigaction action;
action.sa_flags = SA_SIGINFO;
action.sa_flags = SA_SIGINFO;
action.sa_sigaction = sigsegfault_handler;
if (sigaction(SIGSEGV, &action, NULL) == -1) {
@ -66,8 +73,7 @@ void init_crash_handler(void){
_exit(1);
}
action.sa_sigaction = sigabrt_handler;
if (sigaction(SIGABRT, &action, NULL) == -1) {
@ -75,43 +81,44 @@ void init_crash_handler(void){
_exit(1);
}
/* don't install a SIGINT handler if the nyx block cow cache layer is disabled */
if(!getenv("NYX_DISABLE_BLOCK_COW")){
action.sa_sigaction = sigint_handler;
if (sigaction(SIGINT, &action, NULL) == -1) {
fprintf(stderr, "SIGINT: sigaction failed");
_exit(1);
/* don't install a SIGINT handler if the nyx block cow cache layer is disabled */
if (!getenv("NYX_DISABLE_BLOCK_COW")) {
action.sa_sigaction = sigint_handler;
if (sigaction(SIGINT, &action, NULL) == -1) {
fprintf(stderr, "SIGINT: sigaction failed");
_exit(1);
}
}
}
}
void hexdump_kafl(const void* data, size_t size) {
char ascii[17];
size_t i, j;
ascii[16] = '\0';
for (i = 0; i < size; ++i) {
printf("%02X ", ((unsigned char*)data)[i]);
if (((unsigned char*)data)[i] >= ' ' && ((unsigned char*)data)[i] <= '~') {
ascii[i % 16] = ((unsigned char*)data)[i];
} else {
ascii[i % 16] = '.';
}
if ((i+1) % 8 == 0 || i+1 == size) {
printf(" ");
if ((i+1) % 16 == 0) {
printf("| %s \n", ascii);
} else if (i+1 == size) {
ascii[(i+1) % 16] = '\0';
if ((i+1) % 16 <= 8) {
printf(" ");
}
for (j = (i+1) % 16; j < 16; ++j) {
printf(" ");
}
printf("| %s \n", ascii);
}
}
}
void hexdump_kafl(const void *data, size_t size)
{
char ascii[17];
size_t i, j;
ascii[16] = '\0';
for (i = 0; i < size; ++i) {
printf("%02X ", ((unsigned char *)data)[i]);
if (((unsigned char *)data)[i] >= ' ' && ((unsigned char *)data)[i] <= '~') {
ascii[i % 16] = ((unsigned char *)data)[i];
} else {
ascii[i % 16] = '.';
}
if ((i + 1) % 8 == 0 || i + 1 == size) {
printf(" ");
if ((i + 1) % 16 == 0) {
printf("| %s \n", ascii);
} else if (i + 1 == size) {
ascii[(i + 1) % 16] = '\0';
if ((i + 1) % 16 <= 8) {
printf(" ");
}
for (j = (i + 1) % 16; j < 16; ++j) {
printf(" ");
}
printf("| %s \n", ascii);
}
}
}
}
#endif

View File

@ -5,43 +5,46 @@
#include <stdlib.h>
#include <unistd.h>
#include "qemu-common.h"
#include "qemu/log.h"
#include "qemu/error-report.h"
#include "qemu/log.h"
#include "qemu-common.h"
#define ENABLE_BACKTRACES
#define NYX_LOG_PREFIX "[QEMU-NYX] "
#define CORE_PREFIX "Core: "
#define MEM_PREFIX "Memory: "
#define RELOAD_PREFIX "Reload: "
#define PT_PREFIX "PT: "
#define INTERFACE_PREFIX "Interface: "
#define REDQUEEN_PREFIX "Redqueen: "
#define DISASM_PREFIX "Disasm: "
#define PAGE_CACHE_PREFIX "PageCache: "
#define NESTED_VM_PREFIX "Nested: "
#define NYX_LOG_PREFIX "[QEMU-NYX] "
#define CORE_PREFIX "Core: "
#define MEM_PREFIX "Memory: "
#define RELOAD_PREFIX "Reload: "
#define PT_PREFIX "PT: "
#define INTERFACE_PREFIX "Interface: "
#define REDQUEEN_PREFIX "Redqueen: "
#define DISASM_PREFIX "Disasm: "
#define PAGE_CACHE_PREFIX "PageCache: "
#define NESTED_VM_PREFIX "Nested: "
#ifdef NYX_DEBUG
/*
* qemu_log() is the standard logging enabled with -D
* qemu_log_mask() is activated with additional -t nyx option
*/
//#define nyx_debug(format, ...) qemu_log_mask(LOG_NYX, NYX_LOG_PREFIX "(%s#:%d)\t"format, __BASE_FILE__, __LINE__, ##__VA_ARGS__)
#define nyx_debug(format, ...) qemu_log_mask(LOG_NYX, NYX_LOG_PREFIX format, ##__VA_ARGS__)
#define nyx_debug_p(PREFIX, format, ...) qemu_log_mask(LOG_NYX, NYX_LOG_PREFIX PREFIX format, ##__VA_ARGS__)
// #define nyx_debug(format, ...) qemu_log_mask(LOG_NYX, NYX_LOG_PREFIX
// "(%s#:%d)\t"format, __BASE_FILE__, __LINE__, ##__VA_ARGS__)
#define nyx_debug(format, ...) \
qemu_log_mask(LOG_NYX, NYX_LOG_PREFIX format, ##__VA_ARGS__)
#define nyx_debug_p(PREFIX, format, ...) \
qemu_log_mask(LOG_NYX, NYX_LOG_PREFIX PREFIX format, ##__VA_ARGS__)
#else
#define nyx_debug(...)
#define nyx_debug_p(...)
#endif
#define nyx_printf(format, ...) qemu_log(format, ##__VA_ARGS__)
#define nyx_error(format, ...) error_printf(format, ##__VA_ARGS__)
#define nyx_trace(format, ...) nyx_debug("=> %s\n", __func__)
#define nyx_printf(format, ...) qemu_log(format, ##__VA_ARGS__)
#define nyx_error(format, ...) error_printf(format, ##__VA_ARGS__)
#define nyx_trace(format, ...) nyx_debug("=> %s\n", __func__)
#ifdef ENABLE_BACKTRACES
void qemu_backtrace(void);
void init_crash_handler(void);
void hexdump_kafl(const void* data, size_t size);
void hexdump_kafl(const void *data, size_t size);
#endif

View File

@ -19,136 +19,156 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
*/
#include"qemu/osdep.h"
#include "qemu/osdep.h"
#include<stdint.h>
#include<sys/mman.h>
#include<sys/stat.h>
#include<sys/types.h>
#include <stdint.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include<immintrin.h>
#include <immintrin.h>
#include"block/qapi.h"
#include"exec/ram_addr.h"
#include "block/qapi.h"
#include "exec/ram_addr.h"
#include"migration/global_state.h"
#include"migration/migration.h"
#include"migration/qemu-file.h"
#include"migration/register.h"
#include"migration/savevm.h"
#include"migration/vmstate.h"
#include "migration/global_state.h"
#include "migration/migration.h"
#include "migration/qemu-file.h"
#include "migration/register.h"
#include "migration/savevm.h"
#include "migration/vmstate.h"
#include"qemu/main-loop.h"
#include"qemu/rcu_queue.h"
#include "qemu/main-loop.h"
#include "qemu/rcu_queue.h"
#include"sysemu/block-backend.h"
#include"sysemu/cpus.h"
#include"sysemu/kvm_int.h"
#include"sysemu/reset.h"
#include"sysemu/runstate.h"
#include"sysemu/sysemu.h"
#include "sysemu/block-backend.h"
#include "sysemu/cpus.h"
#include "sysemu/kvm_int.h"
#include "sysemu/reset.h"
#include "sysemu/runstate.h"
#include "sysemu/sysemu.h"
#include"nyx/debug.h"
#include"nyx/fast_vm_reload.h"
#include"nyx/state/snapshot_state.h"
#include"nyx/state/state.h"
#include "nyx/debug.h"
#include "nyx/fast_vm_reload.h"
#include "nyx/state/snapshot_state.h"
#include "nyx/state/state.h"
#include"nyx/helpers.h"
#include"nyx/memory_access.h"
#include "nyx/helpers.h"
#include "nyx/memory_access.h"
#include"nyx/snapshot/helper.h"
#include"nyx/snapshot/memory/block_list.h"
#include"nyx/snapshot/memory/shadow_memory.h"
#include"nyx/snapshot/block/nyx_block_snapshot.h"
#include"nyx/snapshot/devices/nyx_device_state.h"
#include"nyx/snapshot/memory/backend/nyx_debug.h"
#include "nyx/snapshot/block/nyx_block_snapshot.h"
#include "nyx/snapshot/devices/nyx_device_state.h"
#include "nyx/snapshot/helper.h"
#include "nyx/snapshot/memory/backend/nyx_debug.h"
#include "nyx/snapshot/memory/block_list.h"
#include "nyx/snapshot/memory/shadow_memory.h"
FastReloadMemoryMode mode = RELOAD_MEMORY_MODE_DEBUG;
/* basic operations */
static void fast_snapshot_init_operation(fast_reload_t* self, const char* snapshot_folder, bool pre_snapshot){
static void fast_snapshot_init_operation(fast_reload_t *self,
const char *snapshot_folder,
bool pre_snapshot)
{
assert((snapshot_folder == NULL && pre_snapshot == false) || snapshot_folder);
if (snapshot_folder){
self->device_state = nyx_device_state_init_from_snapshot(snapshot_folder, pre_snapshot);
self->shadow_memory_state = shadow_memory_init_from_snapshot(snapshot_folder, pre_snapshot);
}
else{
self->device_state = nyx_device_state_init();
if (snapshot_folder) {
self->device_state =
nyx_device_state_init_from_snapshot(snapshot_folder, pre_snapshot);
self->shadow_memory_state =
shadow_memory_init_from_snapshot(snapshot_folder, pre_snapshot);
} else {
self->device_state = nyx_device_state_init();
self->shadow_memory_state = shadow_memory_init();
}
if(!pre_snapshot){
switch(mode){
case RELOAD_MEMORY_MODE_DEBUG:
break;
case RELOAD_MEMORY_MODE_DEBUG_QUIET:
break;
case RELOAD_MEMORY_MODE_FDL:
self->fdl_state = nyx_fdl_init(self->shadow_memory_state);
break;
case RELOAD_MEMORY_MODE_FDL_DEBUG:
self->fdl_state = nyx_fdl_init(self->shadow_memory_state);
break;
case RELOAD_MEMORY_MODE_DIRTY_RING:
self->dirty_ring_state = nyx_dirty_ring_init(self->shadow_memory_state);
break;
case RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG:
self->dirty_ring_state = nyx_dirty_ring_init(self->shadow_memory_state);
break;
if (!pre_snapshot) {
switch (mode) {
case RELOAD_MEMORY_MODE_DEBUG:
break;
case RELOAD_MEMORY_MODE_DEBUG_QUIET:
break;
case RELOAD_MEMORY_MODE_FDL:
self->fdl_state = nyx_fdl_init(self->shadow_memory_state);
break;
case RELOAD_MEMORY_MODE_FDL_DEBUG:
self->fdl_state = nyx_fdl_init(self->shadow_memory_state);
break;
case RELOAD_MEMORY_MODE_DIRTY_RING:
self->dirty_ring_state = nyx_dirty_ring_init(self->shadow_memory_state);
break;
case RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG:
self->dirty_ring_state = nyx_dirty_ring_init(self->shadow_memory_state);
break;
}
self->fdl_user_state = nyx_fdl_user_init(self->shadow_memory_state);
nyx_fdl_user_enable(self->fdl_user_state);
}
if (snapshot_folder){
self->block_state = nyx_block_snapshot_init_from_file(snapshot_folder, pre_snapshot);
}
else{
if (snapshot_folder) {
self->block_state =
nyx_block_snapshot_init_from_file(snapshot_folder, pre_snapshot);
} else {
self->block_state = nyx_block_snapshot_init();
}
memory_global_dirty_log_start();
if(!pre_snapshot){
if (!pre_snapshot) {
self->root_snapshot_created = true;
}
}
static void fast_snapshot_restore_operation(fast_reload_t* self){
static void fast_snapshot_restore_operation(fast_reload_t *self)
{
uint32_t num_dirty_pages = 0;
switch(mode){
case RELOAD_MEMORY_MODE_DEBUG:
num_dirty_pages += nyx_snapshot_debug_restore(self->shadow_memory_state, self->blocklist, true);
break;
case RELOAD_MEMORY_MODE_DEBUG_QUIET:
num_dirty_pages += nyx_snapshot_debug_restore(self->shadow_memory_state, self->blocklist, false);
break;
case RELOAD_MEMORY_MODE_FDL:
num_dirty_pages += nyx_snapshot_nyx_fdl_restore(self->fdl_state, self->shadow_memory_state, self->blocklist);
break;
case RELOAD_MEMORY_MODE_FDL_DEBUG:
num_dirty_pages += nyx_snapshot_nyx_fdl_restore(self->fdl_state, self->shadow_memory_state, self->blocklist);
num_dirty_pages += nyx_snapshot_debug_restore(self->shadow_memory_state, self->blocklist, true);
break;
case RELOAD_MEMORY_MODE_DIRTY_RING:
num_dirty_pages += nyx_snapshot_nyx_dirty_ring_restore(self->dirty_ring_state, self->shadow_memory_state, self->blocklist);
break;
case RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG:
num_dirty_pages += nyx_snapshot_nyx_dirty_ring_restore(self->dirty_ring_state, self->shadow_memory_state, self->blocklist);
num_dirty_pages += nyx_snapshot_debug_restore(self->shadow_memory_state, self->blocklist, true);
break;
switch (mode) {
case RELOAD_MEMORY_MODE_DEBUG:
num_dirty_pages += nyx_snapshot_debug_restore(self->shadow_memory_state,
self->blocklist, true);
break;
case RELOAD_MEMORY_MODE_DEBUG_QUIET:
num_dirty_pages += nyx_snapshot_debug_restore(self->shadow_memory_state,
self->blocklist, false);
break;
case RELOAD_MEMORY_MODE_FDL:
num_dirty_pages += nyx_snapshot_nyx_fdl_restore(self->fdl_state,
self->shadow_memory_state,
self->blocklist);
break;
case RELOAD_MEMORY_MODE_FDL_DEBUG:
num_dirty_pages += nyx_snapshot_nyx_fdl_restore(self->fdl_state,
self->shadow_memory_state,
self->blocklist);
num_dirty_pages += nyx_snapshot_debug_restore(self->shadow_memory_state,
self->blocklist, true);
break;
case RELOAD_MEMORY_MODE_DIRTY_RING:
num_dirty_pages +=
nyx_snapshot_nyx_dirty_ring_restore(self->dirty_ring_state,
self->shadow_memory_state,
self->blocklist);
break;
case RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG:
num_dirty_pages +=
nyx_snapshot_nyx_dirty_ring_restore(self->dirty_ring_state,
self->shadow_memory_state,
self->blocklist);
num_dirty_pages += nyx_snapshot_debug_restore(self->shadow_memory_state,
self->blocklist, true);
break;
}
num_dirty_pages += nyx_snapshot_user_fdl_restore(self->fdl_user_state, self->shadow_memory_state, self->blocklist);
num_dirty_pages += nyx_snapshot_user_fdl_restore(self->fdl_user_state,
self->shadow_memory_state,
self->blocklist);
GET_GLOBAL_STATE()->num_dirty_pages = num_dirty_pages;
}
static inline void fast_snapshot_pre_create_incremental_operation(fast_reload_t* self){
static inline void fast_snapshot_pre_create_incremental_operation(fast_reload_t *self)
{
/* flush all pending block writes */
bdrv_drain_all();
memory_global_dirty_log_sync();
@ -157,34 +177,49 @@ static inline void fast_snapshot_pre_create_incremental_operation(fast_reload_t*
nyx_block_snapshot_switch_incremental(self->block_state);
}
static inline void fast_snapshot_create_incremental_operation(fast_reload_t* self){
static inline void fast_snapshot_create_incremental_operation(fast_reload_t *self)
{
shadow_memory_prepare_incremental(self->shadow_memory_state);
nyx_device_state_save_tsc_incremental(self->device_state);
switch(mode){
case RELOAD_MEMORY_MODE_DEBUG:
nyx_snapshot_debug_save_root_pages(self->shadow_memory_state, self->blocklist, true);
break;
case RELOAD_MEMORY_MODE_DEBUG_QUIET:
nyx_snapshot_debug_save_root_pages(self->shadow_memory_state, self->blocklist, false);
break;
case RELOAD_MEMORY_MODE_FDL:
nyx_snapshot_nyx_fdl_save_root_pages(self->fdl_state, self->shadow_memory_state, self->blocklist);
break;
case RELOAD_MEMORY_MODE_FDL_DEBUG:
nyx_snapshot_nyx_fdl_save_root_pages(self->fdl_state, self->shadow_memory_state, self->blocklist);
nyx_snapshot_debug_save_root_pages(self->shadow_memory_state, self->blocklist, true);
break;
case RELOAD_MEMORY_MODE_DIRTY_RING:
nyx_snapshot_nyx_dirty_ring_save_root_pages(self->dirty_ring_state, self->shadow_memory_state, self->blocklist);
break;
case RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG:
nyx_snapshot_nyx_dirty_ring_save_root_pages(self->dirty_ring_state, self->shadow_memory_state, self->blocklist);
nyx_snapshot_debug_save_root_pages(self->shadow_memory_state, self->blocklist, true);
break;
switch (mode) {
case RELOAD_MEMORY_MODE_DEBUG:
nyx_snapshot_debug_save_root_pages(self->shadow_memory_state,
self->blocklist, true);
break;
case RELOAD_MEMORY_MODE_DEBUG_QUIET:
nyx_snapshot_debug_save_root_pages(self->shadow_memory_state,
self->blocklist, false);
break;
case RELOAD_MEMORY_MODE_FDL:
nyx_snapshot_nyx_fdl_save_root_pages(self->fdl_state,
self->shadow_memory_state,
self->blocklist);
break;
case RELOAD_MEMORY_MODE_FDL_DEBUG:
nyx_snapshot_nyx_fdl_save_root_pages(self->fdl_state,
self->shadow_memory_state,
self->blocklist);
nyx_snapshot_debug_save_root_pages(self->shadow_memory_state,
self->blocklist, true);
break;
case RELOAD_MEMORY_MODE_DIRTY_RING:
nyx_snapshot_nyx_dirty_ring_save_root_pages(self->dirty_ring_state,
self->shadow_memory_state,
self->blocklist);
break;
case RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG:
nyx_snapshot_nyx_dirty_ring_save_root_pages(self->dirty_ring_state,
self->shadow_memory_state,
self->blocklist);
nyx_snapshot_debug_save_root_pages(self->shadow_memory_state,
self->blocklist, true);
break;
}
nyx_snapshot_nyx_fdl_user_save_root_pages(self->fdl_user_state, self->shadow_memory_state, self->blocklist);
nyx_snapshot_nyx_fdl_user_save_root_pages(self->fdl_user_state,
self->shadow_memory_state,
self->blocklist);
shadow_memory_switch_snapshot(self->shadow_memory_state, true);
kvm_arch_put_registers(qemu_get_cpu(0), KVM_PUT_FULL_STATE_FAST);
@ -192,61 +227,65 @@ static inline void fast_snapshot_create_incremental_operation(fast_reload_t* sel
}
fast_reload_t* fast_reload_new(void){
fast_reload_t* self = malloc(sizeof(fast_reload_t));
fast_reload_t *fast_reload_new(void)
{
fast_reload_t *self = malloc(sizeof(fast_reload_t));
memset(self, 0x0, sizeof(fast_reload_t));
self->root_snapshot_created = false;
self->root_snapshot_created = false;
self->incremental_snapshot_enabled = false;
self->bitmap_copy = NULL;
return self;
return self;
}
void fast_reload_set_mode(fast_reload_t* self, FastReloadMemoryMode m){
void fast_reload_set_mode(fast_reload_t *self, FastReloadMemoryMode m)
{
assert(!self->root_snapshot_created);
mode = m;
}
FastReloadMemoryMode fast_reload_get_mode(fast_reload_t* self){
FastReloadMemoryMode fast_reload_get_mode(fast_reload_t *self)
{
return mode;
}
void fast_reload_init(fast_reload_t* self){
void fast_reload_init(fast_reload_t *self)
{
self->blocklist = snapshot_page_blocklist_init();
}
/* fix this */
void fast_reload_destroy(fast_reload_t* self){
void fast_reload_destroy(fast_reload_t *self)
{
/* TODO: complete me */
//close(self->vmx_fdl_fd);
//munmap(self->fdl_data, (self->guest_ram_size/0x1000)*8);
// close(self->vmx_fdl_fd);
// munmap(self->fdl_data, (self->guest_ram_size/0x1000)*8);
/*
munmap(self->ptr, self->guest_ram_size);
/*
munmap(self->ptr, self->guest_ram_size);
free(self->black_list_pages);
free(self->black_list_pages);
free(self);
*/
free(self);
*/
}
inline static void unlock_snapshot(const char* folder){
char* info_file;
char* lock_file;
inline static void unlock_snapshot(const char *folder)
{
char *info_file;
char *lock_file;
/* info file */
assert(asprintf(&info_file, "%s/INFO.txt", folder) != -1);
FILE* f_info = fopen(info_file, "w+b");
if(GET_GLOBAL_STATE()->fast_reload_pre_image){
const char* msg = "THIS IS A NYX PRE IMAGE SNAPSHOT FOLDER!\n";
FILE *f_info = fopen(info_file, "w+b");
if (GET_GLOBAL_STATE()->fast_reload_pre_image) {
const char *msg = "THIS IS A NYX PRE IMAGE SNAPSHOT FOLDER!\n";
fwrite(msg, strlen(msg), 1, f_info);
}
else{
const char* msg = "THIS IS A NYX SNAPSHOT FOLDER!\n";
} else {
const char *msg = "THIS IS A NYX SNAPSHOT FOLDER!\n";
fwrite(msg, strlen(msg), 1, f_info);
}
fclose(f_info);
@ -258,23 +297,26 @@ inline static void unlock_snapshot(const char* folder){
free(lock_file);
}
inline static void wait_for_snapshot(const char* folder){
char* lock_file;
inline static void wait_for_snapshot(const char *folder)
{
char *lock_file;
assert(asprintf(&lock_file, "%s/ready.lock", folder) != -1);
while( access(lock_file, F_OK ) == -1 ) {
while (access(lock_file, F_OK) == -1) {
sleep(1);
}
free(lock_file);
}
void fast_reload_serialize_to_file(fast_reload_t* self, const char* folder, bool is_pre_snapshot){
void fast_reload_serialize_to_file(fast_reload_t *self,
const char *folder,
bool is_pre_snapshot)
{
nyx_trace();
/* sanity check */
if(!folder_exits(folder)){
nyx_debug_p(RELOAD_PREFIX,"Folder %s does not exist...failed!", folder);
if (!folder_exits(folder)) {
nyx_debug_p(RELOAD_PREFIX, "Folder %s does not exist...failed!", folder);
assert(0);
}
@ -286,7 +328,7 @@ void fast_reload_serialize_to_file(fast_reload_t* self, const char* folder, bool
/* block device state */
nyx_block_snapshot_serialize(self->block_state, folder);
/* NYX's state */
serialize_state(folder, is_pre_snapshot);
@ -295,14 +337,18 @@ void fast_reload_serialize_to_file(fast_reload_t* self, const char* folder, bool
}
static void fast_reload_create_from_snapshot(fast_reload_t* self, const char* folder, bool lock_iothread, bool pre_snapshot){
static void fast_reload_create_from_snapshot(fast_reload_t *self,
const char *folder,
bool lock_iothread,
bool pre_snapshot)
{
nyx_trace();
assert(self != NULL);
wait_for_snapshot(folder);
nyx_debug_p(RELOAD_PREFIX,"=> CREATING FAST RELOAD SNAPSHOT FROM DUMP (located in: %s)", folder);
nyx_debug_p(RELOAD_PREFIX,
"=> CREATING FAST RELOAD SNAPSHOT FROM DUMP (located in: %s)", folder);
rcu_read_lock();
@ -311,7 +357,7 @@ static void fast_reload_create_from_snapshot(fast_reload_t* self, const char* fo
cpu_synchronize_all_pre_loadvm();
if(!pre_snapshot){
if (!pre_snapshot) {
memory_global_dirty_log_stop();
memory_global_dirty_log_sync();
}
@ -320,30 +366,36 @@ static void fast_reload_create_from_snapshot(fast_reload_t* self, const char* fo
rcu_read_unlock();
if(!pre_snapshot){
if (!pre_snapshot) {
deserialize_state(folder);
}
cpu_synchronize_all_post_init();
qemu_get_cpu(0)->vcpu_dirty = true;
kvm_arch_put_registers(qemu_get_cpu(0), KVM_PUT_FULL_STATE);
if(!pre_snapshot){
if (!pre_snapshot) {
nyx_device_state_save_tsc(self->device_state);
}
}
void fast_reload_create_from_file(fast_reload_t* self, const char* folder, bool lock_iothread){
void fast_reload_create_from_file(fast_reload_t *self,
const char *folder,
bool lock_iothread)
{
nyx_trace();
fast_reload_create_from_snapshot(self, folder, lock_iothread, false);
}
void fast_reload_create_from_file_pre_image(fast_reload_t* self, const char* folder, bool lock_iothread){
void fast_reload_create_from_file_pre_image(fast_reload_t *self,
const char *folder,
bool lock_iothread)
{
nyx_trace();
fast_reload_create_from_snapshot(self, folder, lock_iothread, true);
}
void fast_reload_create_in_memory(fast_reload_t* self){
void fast_reload_create_in_memory(fast_reload_t *self)
{
nyx_trace();
assert(self != NULL);
@ -358,17 +410,18 @@ void fast_reload_create_in_memory(fast_reload_t* self){
memory_global_dirty_log_sync();
fast_snapshot_init_operation(self, NULL, false);
rcu_read_unlock();
cpu_synchronize_all_post_init();
}
void fast_reload_restore(fast_reload_t* self){
assert(self != NULL);
void fast_reload_restore(fast_reload_t *self)
{
assert(self != NULL);
self->dirty_pages = 0;
/* flush all pending block writes */
bdrv_drain_all();
bdrv_drain_all();
memory_global_dirty_log_sync();
nyx_block_snapshot_reset(self->block_state);
@ -387,49 +440,54 @@ void fast_reload_restore(fast_reload_t* self){
}
bool read_snapshot_memory(fast_reload_t* self, uint64_t address, void* ptr, size_t size){
return shadow_memory_read_physical_memory(self->shadow_memory_state, address, ptr, size);
bool read_snapshot_memory(fast_reload_t *self, uint64_t address, void *ptr, size_t size)
{
return shadow_memory_read_physical_memory(self->shadow_memory_state, address,
ptr, size);
}
/* fix this */
void* fast_reload_get_physmem_shadow_ptr(fast_reload_t* self, uint64_t physaddr){
abort(); /* TODO: fix this function first -> pc_piix memory split issue */
void *fast_reload_get_physmem_shadow_ptr(fast_reload_t *self, uint64_t physaddr)
{
abort(); /* TODO: fix this function first -> pc_piix memory split issue */
/*
assert(self != NULL);
assert(!(physaddr&0xFFF)); // physaddr must be 4kb align !
assert(self != NULL);
assert(!(physaddr&0xFFF)); // physaddr must be 4kb align !
if (self->shadow_memory_regions){
for(uint64_t j = 0; j < self->shadow_memory_regions; j++){
if(physaddr >= self->ram_block_array[j]->offset && physaddr < (self->ram_block_array[j]->offset+self->ram_block_array[j]->used_length)){
return self->shadow_memory[j]+(physaddr-self->ram_block_array[j]->offset);
}
if(physaddr >= self->ram_block_array[j]->offset && physaddr <
(self->ram_block_array[j]->offset+self->ram_block_array[j]->used_length)){ return
self->shadow_memory[j]+(physaddr-self->ram_block_array[j]->offset);
}
}
}
*/
return NULL; // not found ... sorry :(
return NULL; // not found ... sorry :(
}
void fast_reload_blacklist_page(fast_reload_t* self, uint64_t physaddr){
void fast_reload_blacklist_page(fast_reload_t *self, uint64_t physaddr)
{
assert(self->blocklist);
snapshot_page_blocklist_add(self->blocklist, physaddr);
}
bool fast_reload_snapshot_exists(fast_reload_t* self){
if(!self){
return false;
}
return true;
bool fast_reload_snapshot_exists(fast_reload_t *self)
{
if (!self) {
return false;
}
return true;
}
void fast_reload_create_tmp_snapshot(fast_reload_t* self){
assert(self);
void fast_reload_create_tmp_snapshot(fast_reload_t *self)
{
assert(self);
self->dirty_pages = 0;
fast_snapshot_pre_create_incremental_operation(self);
if(!self->bitmap_copy){
if (!self->bitmap_copy) {
self->bitmap_copy = new_coverage_bitmaps();
}
coverage_bitmap_copy_to_buffer(self->bitmap_copy);
@ -438,7 +496,8 @@ void fast_reload_create_tmp_snapshot(fast_reload_t* self){
self->incremental_snapshot_enabled = true;
}
void fast_reload_discard_tmp_snapshot(fast_reload_t* self){
void fast_reload_discard_tmp_snapshot(fast_reload_t *self)
{
assert(self && self->incremental_snapshot_enabled);
self->dirty_pages = 0;
@ -459,45 +518,53 @@ void fast_reload_discard_tmp_snapshot(fast_reload_t* self){
self->incremental_snapshot_enabled = false;
}
bool fast_reload_root_created(fast_reload_t* self){
bool fast_reload_root_created(fast_reload_t *self)
{
return self->root_snapshot_created;
}
bool fast_reload_tmp_created(fast_reload_t* self){
bool fast_reload_tmp_created(fast_reload_t *self)
{
return self->incremental_snapshot_enabled;
}
uint32_t get_dirty_page_num(fast_reload_t* self){
if(self){
uint32_t get_dirty_page_num(fast_reload_t *self)
{
if (self) {
return self->dirty_pages;
}
else{
} else {
return 0;
}
}
bool fast_reload_set_bitmap(fast_reload_t* self){
if(self->incremental_snapshot_enabled){
bool fast_reload_set_bitmap(fast_reload_t *self)
{
if (self->incremental_snapshot_enabled) {
coverage_bitmap_copy_from_buffer(self->bitmap_copy);
return true;
}
return false;
}
void fast_reload_qemu_user_fdl_set_dirty(fast_reload_t* self, MemoryRegion *mr, uint64_t addr, uint64_t length){
void fast_reload_qemu_user_fdl_set_dirty(fast_reload_t *self,
MemoryRegion *mr,
uint64_t addr,
uint64_t length)
{
/* works only with PC.RAM's memory region */
assert(mr->alias_offset == 0);
nyx_fdl_user_set(self->fdl_user_state, self->shadow_memory_state, self->fdl_state, addr, length);
nyx_fdl_user_set(self->fdl_user_state, self->shadow_memory_state,
self->fdl_state, addr, length);
}
void fast_reload_handle_dirty_ring_full(fast_reload_t* self){
if(self->dirty_ring_state){
nyx_snapshot_nyx_dirty_ring_flush_and_collect(self->dirty_ring_state, self->shadow_memory_state, self->blocklist);
}
else{
void fast_reload_handle_dirty_ring_full(fast_reload_t *self)
{
if (self->dirty_ring_state) {
nyx_snapshot_nyx_dirty_ring_flush_and_collect(self->dirty_ring_state,
self->shadow_memory_state,
self->blocklist);
} else {
nyx_snapshot_nyx_dirty_ring_flush();
}
}

View File

@ -21,115 +21,123 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
#pragma once
#include"monitor/monitor.h"
#include"sysemu/runstate.h"
#include"qemu-common.h"
#include "monitor/monitor.h"
#include "sysemu/runstate.h"
#include "qemu-common.h"
#include"nyx/snapshot/block/nyx_block_snapshot.h"
#include"nyx/snapshot/devices/nyx_device_state.h"
#include"nyx/snapshot/memory/backend/nyx_dirty_ring.h"
#include"nyx/snapshot/memory/backend/nyx_fdl.h"
#include"nyx/snapshot/memory/block_list.h"
#include"nyx/snapshot/memory/nyx_fdl_user.h"
#include"nyx/snapshot/memory/shadow_memory.h"
#include "nyx/snapshot/block/nyx_block_snapshot.h"
#include "nyx/snapshot/devices/nyx_device_state.h"
#include "nyx/snapshot/memory/backend/nyx_dirty_ring.h"
#include "nyx/snapshot/memory/backend/nyx_fdl.h"
#include "nyx/snapshot/memory/block_list.h"
#include "nyx/snapshot/memory/nyx_fdl_user.h"
#include "nyx/snapshot/memory/shadow_memory.h"
#include"nyx/helpers.h"
#include "nyx/helpers.h"
typedef enum FastReloadMemoryMode {
RELOAD_MEMORY_MODE_DEBUG, /* memcmp-based dirty tracing - it's super slow - only for debug purposes */
RELOAD_MEMORY_MODE_DEBUG_QUIET, /* debug mode in non-verbose mode */
RELOAD_MEMORY_MODE_FDL, /* super fast page tracker build around KVM-PT's dirty tracker (FDL = fast dirty log) */
RELOAD_MEMORY_MODE_FDL_DEBUG, /* FDL + debug mode */
RELOAD_MEMORY_MODE_DIRTY_RING, /* fast page tracker build around KVM's dirty ring API */
RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG, /* dirty ring + debug mode */
RELOAD_MEMORY_MODE_DEBUG, /* memcmp-based dirty tracing - it's super slow - only for debug purposes */
RELOAD_MEMORY_MODE_DEBUG_QUIET, /* debug mode in non-verbose mode */
RELOAD_MEMORY_MODE_FDL, /* super fast page tracker build around KVM-PT's dirty tracker (FDL = fast dirty log) */
RELOAD_MEMORY_MODE_FDL_DEBUG, /* FDL + debug mode */
RELOAD_MEMORY_MODE_DIRTY_RING, /* fast page tracker build around KVM's dirty ring API */
RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG, /* dirty ring + debug mode */
} FastReloadMemoryMode;
typedef struct fast_reload_dump_head_s{
uint32_t shadow_memory_regions;
uint32_t ram_region_index;
} fast_reload_dump_head_t;
typedef struct fast_reload_dump_head_s {
uint32_t shadow_memory_regions;
uint32_t ram_region_index;
} fast_reload_dump_head_t;
typedef struct fast_reload_s{
typedef struct fast_reload_s {
FastReloadMemoryMode mode;
FastReloadMemoryMode mode;
/* memory snapshot */
shadow_memory_t *shadow_memory_state;
/* memory snapshot */
shadow_memory_t* shadow_memory_state;
/* state of page frame blocklist */
snapshot_page_blocklist_t *blocklist;
/* state of page frame blocklist */
snapshot_page_blocklist_t* blocklist;
/* state of FDL */
nyx_fdl_t *fdl_state;
/* state of FDL */
nyx_fdl_t* fdl_state;
/* dirty ring state */
nyx_dirty_ring_t *dirty_ring_state;
/* dirty ring state */
nyx_dirty_ring_t* dirty_ring_state;
/* state of user-level FDL */
nyx_fdl_user_t *fdl_user_state;
/* state of user-level FDL */
nyx_fdl_user_t* fdl_user_state;
/* nyx's serialized device state */
nyx_device_state_t *device_state;
/* nyx's serialized device state */
nyx_device_state_t* device_state;
nyx_block_t *block_state;
nyx_block_t* block_state;
bool root_snapshot_created;
bool incremental_snapshot_enabled;
bool root_snapshot_created;
bool incremental_snapshot_enabled;
/* copy of the fuzzing bitmap & ijon state buffer */
nyx_coverage_bitmap_copy_t* bitmap_copy;
/* copy of the fuzzing bitmap & ijon state buffer */
nyx_coverage_bitmap_copy_t *bitmap_copy;
uint32_t dirty_pages;
uint32_t dirty_pages;
} fast_reload_t;
fast_reload_t* fast_reload_new(void);
fast_reload_t *fast_reload_new(void);
/* TODO: get rid of this */
void fast_reload_create_to_file(fast_reload_t* self, const char* folder, bool lock_iothread);
void fast_reload_create_from_file(fast_reload_t* self, const char* folder, bool lock_iothread);
void fast_reload_create_from_file_pre_image(fast_reload_t* self, const char* folder, bool lock_iothread);
void fast_reload_create_to_file(fast_reload_t *self,
const char *folder,
bool lock_iothread);
void fast_reload_create_from_file(fast_reload_t *self,
const char *folder,
bool lock_iothread);
void fast_reload_create_from_file_pre_image(fast_reload_t *self,
const char *folder,
bool lock_iothread);
/* keep this */
void fast_reload_create_in_memory(fast_reload_t* self);
void fast_reload_create_in_memory(fast_reload_t *self);
void fast_reload_serialize_to_file(fast_reload_t* self, const char* folder, bool is_pre_snapshot);
void fast_reload_serialize_to_file(fast_reload_t *self,
const char *folder,
bool is_pre_snapshot);
void fast_reload_restore(fast_reload_t* self);
void fast_reload_blacklist_page(fast_reload_t* self, uint64_t physaddr);
void* fast_reload_get_physmem_shadow_ptr(fast_reload_t* self, uint64_t physaddr);
bool fast_reload_snapshot_exists(fast_reload_t* self);
void fast_reload_restore(fast_reload_t *self);
void fast_reload_blacklist_page(fast_reload_t *self, uint64_t physaddr);
void *fast_reload_get_physmem_shadow_ptr(fast_reload_t *self, uint64_t physaddr);
bool fast_reload_snapshot_exists(fast_reload_t *self);
bool read_snapshot_memory(fast_reload_t* self, uint64_t address, void* ptr, size_t size);
bool read_snapshot_memory(fast_reload_t *self, uint64_t address, void *ptr, size_t size);
void fast_reload_destroy(fast_reload_t* self);
void fast_reload_destroy(fast_reload_t *self);
void fast_reload_qemu_user_fdl_set_dirty(fast_reload_t* self, MemoryRegion *mr, uint64_t addr, uint64_t length);
void fast_reload_qemu_user_fdl_set_dirty(fast_reload_t *self,
MemoryRegion *mr,
uint64_t addr,
uint64_t length);
void fast_reload_create_tmp_snapshot(fast_reload_t* self);
void fast_reload_discard_tmp_snapshot(fast_reload_t* self);
void fast_reload_create_tmp_snapshot(fast_reload_t *self);
void fast_reload_discard_tmp_snapshot(fast_reload_t *self);
bool fast_reload_root_created(fast_reload_t* self);
bool fast_reload_tmp_created(fast_reload_t* self);
bool fast_reload_root_created(fast_reload_t *self);
bool fast_reload_tmp_created(fast_reload_t *self);
bool fast_reload_set_bitmap(fast_reload_t* self);
bool fast_reload_set_bitmap(fast_reload_t *self);
uint32_t get_dirty_page_num(fast_reload_t* self);
uint32_t get_dirty_page_num(fast_reload_t *self);
void fast_reload_init(fast_reload_t* self);
void fast_reload_init(fast_reload_t *self);
void fast_reload_set_mode(fast_reload_t* self, FastReloadMemoryMode m);
void fast_reload_set_mode(fast_reload_t *self, FastReloadMemoryMode m);
void fast_reload_handle_dirty_ring_full(fast_reload_t* self);
FastReloadMemoryMode fast_reload_get_mode(fast_reload_t* self);
void fast_reload_handle_dirty_ring_full(fast_reload_t *self);
FastReloadMemoryMode fast_reload_get_mode(fast_reload_t *self);

View File

@ -1,346 +1,360 @@
#include"qemu/osdep.h"
#include "qemu/osdep.h"
#include<assert.h>
#include<stdint.h>
#include<stdio.h>
#include <assert.h>
#include <stdint.h>
#include <stdio.h>
#include"exec/memory.h"
#include"qapi/error.h"
#include"qapi/qapi-types-run-state.h"
#include"qemu/main-loop.h"
#include"qemu-common.h"
#include "exec/memory.h"
#include "qapi/error.h"
#include "qapi/qapi-types-run-state.h"
#include "qemu/main-loop.h"
#include "qemu-common.h"
#include"sysemu/kvm.h"
#include"sysemu/kvm_int.h"
#include"sysemu/runstate.h"
#include "sysemu/kvm.h"
#include "sysemu/kvm_int.h"
#include "sysemu/runstate.h"
#include"fast_vm_reload_sync.h"
#include"nyx/debug.h"
#include"nyx/fast_vm_reload.h"
#include"nyx/kvm_nested.h"
#include"nyx/state/state.h"
#include "fast_vm_reload_sync.h"
#include "nyx/debug.h"
#include "nyx/fast_vm_reload.h"
#include "nyx/kvm_nested.h"
#include "nyx/state/state.h"
extern int save_snapshot(const char *name, Error **errp);
extern int load_snapshot(const char *name, Error **errp);
static void adjust_rip(CPUX86State *env, fast_reload_t* snapshot){
switch(fast_reload_get_mode(snapshot)){
case RELOAD_MEMORY_MODE_DEBUG:
case RELOAD_MEMORY_MODE_DEBUG_QUIET:
env->eip -= 1; /* out */
break;
static void adjust_rip(CPUX86State *env, fast_reload_t *snapshot)
{
switch (fast_reload_get_mode(snapshot)) {
case RELOAD_MEMORY_MODE_DEBUG:
case RELOAD_MEMORY_MODE_DEBUG_QUIET:
env->eip -= 1; /* out */
break;
case RELOAD_MEMORY_MODE_FDL:
case RELOAD_MEMORY_MODE_FDL_DEBUG:
env->eip -= 3; /* vmcall */
break;
env->eip -= 3; /* vmcall */
break;
case RELOAD_MEMORY_MODE_DIRTY_RING:
case RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG:
env->eip -= 1; /* out */
break;
}
}
fast_vm_reload_sync_t* init_fast_vm_reload_sync(void){
fast_vm_reload_sync_t* self = malloc(sizeof(fast_vm_reload_sync_t));
memset(self, 0, sizeof(fast_vm_reload_sync_t));
self->request_exists = false;
self->request_exists_pre = false;
self->current_request = REQUEST_VOID;
self->debug_mode = false;
/* TODO: only RELOAD_MODE_NO_BLOCK is supported for actual fuzzing */
self->mode = RELOAD_MODE_NO_BLOCK;
return self;
}
bool fast_snapshot_exists(fast_vm_reload_sync_t* self, FastReloadRequest type){
assert(self->mode != RELOAD_MODE_DEBUG);
switch(type){
case REQUEST_PRE_EXISTS:
abort();
case REQUEST_ROOT_EXISTS:
return fast_reload_root_created(get_fast_reload_snapshot());
case REQUEST_TMP_EXISTS:
return fast_reload_tmp_created(get_fast_reload_snapshot());
default:
abort();
}
}
static inline void perform_task_debug_mode(fast_vm_reload_sync_t* self, FastReloadRequest request){
struct Error* errp = NULL;
switch(request){
case REQUEST_SAVE_SNAPSHOT_PRE_FIX_RIP:
abort();
case REQUEST_SAVE_SNAPSHOT_PRE:
vm_stop(RUN_STATE_SAVE_VM);
save_snapshot("pre_root", &errp);
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
return; /* return here to skip the vm_start call */
case REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP:
abort();
case REQUEST_SAVE_SNAPSHOT_ROOT:
vm_stop(RUN_STATE_SAVE_VM);
save_snapshot("root", &errp);
break;
case REQUEST_SAVE_SNAPSHOT_TMP_FIX_RIP:
abort();
case REQUEST_SAVE_SNAPSHOT_TMP:
vm_stop(RUN_STATE_SAVE_VM);
save_snapshot("tmp", &errp);
break;
case REQUEST_LOAD_SNAPSHOT_PRE:
/* probably never called */
abort();
break;
case REQUEST_LOAD_SNAPSHOT_ROOT:
vm_stop(RUN_STATE_RESTORE_VM);
load_snapshot("root", &errp);
break;
case REQUEST_LOAD_SNAPSHOT_TMP:
vm_stop(RUN_STATE_RESTORE_VM);
load_snapshot("tmp", &errp);
break;
default:
abort();
}
if (errp) {
error_reportf_err(errp, "Error: ");
errp = NULL;
abort();
}
vm_start();
}
static inline void create_root_snapshot(void){
if (GET_GLOBAL_STATE()->fast_reload_enabled){
nyx_debug("===> GET_GLOBAL_STATE()->fast_reload_enabled: TRUE\n");
if (GET_GLOBAL_STATE()->fast_reload_mode){
nyx_debug("===> GET_GLOBAL_STATE()->fast_reload_mode: TRUE\n");
/* we've loaded an external snapshot folder - so do nothing and don't create any new snapshot files */
}
else{
nyx_debug("===> GET_GLOBAL_STATE()->fast_reload_mode: FALSE\n");
/* store the current state as a snapshot folder */
fast_reload_create_in_memory(get_fast_reload_snapshot());
fast_reload_serialize_to_file(get_fast_reload_snapshot(), GET_GLOBAL_STATE()->fast_reload_path, false);
env->eip -= 1; /* out */
break;
}
}
else{
nyx_debug("===> GET_GLOBAL_STATE()->fast_reload_enabled: FALSE\n");
/* so we haven't set a path for our snapshot files - just store everything in memory */
fast_reload_create_in_memory(get_fast_reload_snapshot());
}
}
static inline void perform_task_no_block_mode(fast_vm_reload_sync_t* self, FastReloadRequest request){
CPUState* cpu = qemu_get_cpu(0);
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
qemu_mutex_lock_iothread();
fast_vm_reload_sync_t *init_fast_vm_reload_sync(void)
{
fast_vm_reload_sync_t *self = malloc(sizeof(fast_vm_reload_sync_t));
memset(self, 0, sizeof(fast_vm_reload_sync_t));
switch(request){
case REQUEST_SAVE_SNAPSHOT_PRE:
vm_stop(RUN_STATE_SAVE_VM);
fast_reload_create_in_memory(get_fast_reload_snapshot());
fast_reload_serialize_to_file(get_fast_reload_snapshot(), GET_GLOBAL_STATE()->fast_reload_pre_path, true);
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
qemu_mutex_unlock_iothread();
return; /* return here to skip the vm_start call */
case REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP:
adjust_rip(env, get_fast_reload_snapshot());
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
case REQUEST_SAVE_SNAPSHOT_ROOT:
kvm_arch_get_registers(cpu);
vm_stop(RUN_STATE_SAVE_VM);
create_root_snapshot();
fast_reload_restore(get_fast_reload_snapshot());
break;
case REQUEST_SAVE_SNAPSHOT_TMP_FIX_RIP:
adjust_rip(env, get_fast_reload_snapshot());
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
case REQUEST_SAVE_SNAPSHOT_TMP:
fast_reload_create_tmp_snapshot(get_fast_reload_snapshot());
fast_reload_restore(get_fast_reload_snapshot());
break;
case REQUEST_LOAD_SNAPSHOT_PRE:
abort();
break;
case REQUEST_LOAD_SNAPSHOT_ROOT:
case REQUEST_LOAD_SNAPSHOT_TMP:
fast_reload_restore(get_fast_reload_snapshot());
break;
case REQUEST_SAVE_SNAPSHOT_ROOT_NESTED_FIX_RIP:
kvm_arch_get_registers(cpu);
adjust_rip(env, get_fast_reload_snapshot());
set_nested_rip(cpu, env->eip);
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
kvm_arch_get_registers(cpu);
vm_stop(RUN_STATE_SAVE_VM);
create_root_snapshot();
fast_reload_restore(get_fast_reload_snapshot());
break;
default:
abort();
}
vm_start();
cpu_resume(cpu);
qemu_mutex_unlock_iothread();
}
static inline void perform_task_block_mode(fast_vm_reload_sync_t* self, FastReloadRequest request){
switch(request){
case REQUEST_SAVE_SNAPSHOT_PRE_FIX_RIP:
case REQUEST_SAVE_SNAPSHOT_PRE:
vm_stop(RUN_STATE_SAVE_VM);
fast_reload_create_in_memory(get_fast_reload_snapshot());
fast_reload_serialize_to_file(get_fast_reload_snapshot(), GET_GLOBAL_STATE()->fast_reload_pre_path, true);
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
return; /* return here to skip the vm_start call */
case REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP:
case REQUEST_SAVE_SNAPSHOT_ROOT:
/* TODO: fix this */
vm_stop(RUN_STATE_SAVE_VM);
create_root_snapshot(); /* TODO: fix this -> broken in ahci mode */
break;
case REQUEST_SAVE_SNAPSHOT_TMP_FIX_RIP:
case REQUEST_SAVE_SNAPSHOT_TMP:
vm_stop(RUN_STATE_SAVE_VM);
fast_reload_create_tmp_snapshot(get_fast_reload_snapshot());
break;
case REQUEST_LOAD_SNAPSHOT_PRE:
abort();
break;
case REQUEST_LOAD_SNAPSHOT_ROOT:
case REQUEST_LOAD_SNAPSHOT_TMP:
vm_stop(RUN_STATE_RESTORE_VM);
fast_reload_restore(get_fast_reload_snapshot());
break;
default:
abort();
}
vm_start();
}
static inline void perform_task(fast_vm_reload_sync_t* self, FastReloadRequest request){
switch(self->mode){
case RELOAD_MODE_DEBUG:
abort();
perform_task_debug_mode(self, request);
break;
case RELOAD_MODE_NO_BLOCK:
perform_task_no_block_mode(self, request);
break;
case RELOAD_MODE_BLOCK:
perform_task_block_mode(self, request);
break;
}
}
void request_fast_vm_reload(fast_vm_reload_sync_t* self, FastReloadRequest request){
assert(!self->request_exists);
assert(self->current_request == REQUEST_VOID);
if(self->mode == RELOAD_MODE_NO_BLOCK){
CPUState* cpu = qemu_get_cpu(0);
kvm_arch_get_registers(cpu);
//perform_task(self, request);
perform_task_no_block_mode(self, request);
}
else{
self->current_request = request;
self->request_exists = true;
self->request_exists_pre = true;
}
}
bool reload_request_exists(fast_vm_reload_sync_t* self){
return self->request_exists_pre;
}
void reload_request_discard_tmp(fast_vm_reload_sync_t* self){
fast_reload_discard_tmp_snapshot(get_fast_reload_snapshot());
}
bool check_if_relood_request_exists_pre(fast_vm_reload_sync_t* self){
/* TODO: always returns false or abort() ? */
if(self->request_exists_pre){
self->request_exists = false;
self->request_exists_pre = false;
abort();
self->current_request = REQUEST_VOID;
self->debug_mode = false;
CPUState* cpu = qemu_get_cpu(0);
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
/* TODO: only RELOAD_MODE_NO_BLOCK is supported for actual fuzzing */
self->mode = RELOAD_MODE_NO_BLOCK;
kvm_arch_get_registers(cpu);
return self;
}
switch(self->current_request){
case REQUEST_VOID:
fprintf(stderr, "%s: REQUEST_VOID requested!\n", __func__);
bool fast_snapshot_exists(fast_vm_reload_sync_t *self, FastReloadRequest type)
{
assert(self->mode != RELOAD_MODE_DEBUG);
switch (type) {
case REQUEST_PRE_EXISTS:
abort();
case REQUEST_ROOT_EXISTS:
return fast_reload_root_created(get_fast_reload_snapshot());
case REQUEST_TMP_EXISTS:
return fast_reload_tmp_created(get_fast_reload_snapshot());
default:
abort();
}
}
case REQUEST_SAVE_SNAPSHOT_PRE_FIX_RIP:
case REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP:
case REQUEST_SAVE_SNAPSHOT_TMP_FIX_RIP:
static inline void perform_task_debug_mode(fast_vm_reload_sync_t *self,
FastReloadRequest request)
{
struct Error *errp = NULL;
switch (request) {
case REQUEST_SAVE_SNAPSHOT_PRE_FIX_RIP:
abort();
case REQUEST_SAVE_SNAPSHOT_PRE:
vm_stop(RUN_STATE_SAVE_VM);
save_snapshot("pre_root", &errp);
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
return; /* return here to skip the vm_start call */
case REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP:
abort();
case REQUEST_SAVE_SNAPSHOT_ROOT:
vm_stop(RUN_STATE_SAVE_VM);
save_snapshot("root", &errp);
break;
case REQUEST_SAVE_SNAPSHOT_TMP_FIX_RIP:
abort();
case REQUEST_SAVE_SNAPSHOT_TMP:
vm_stop(RUN_STATE_SAVE_VM);
save_snapshot("tmp", &errp);
break;
case REQUEST_LOAD_SNAPSHOT_PRE:
/* probably never called */
abort();
break;
case REQUEST_LOAD_SNAPSHOT_ROOT:
vm_stop(RUN_STATE_RESTORE_VM);
load_snapshot("root", &errp);
break;
case REQUEST_LOAD_SNAPSHOT_TMP:
vm_stop(RUN_STATE_RESTORE_VM);
load_snapshot("tmp", &errp);
break;
default:
abort();
}
if (errp) {
error_reportf_err(errp, "Error: ");
errp = NULL;
abort();
}
vm_start();
}
static inline void create_root_snapshot(void)
{
if (GET_GLOBAL_STATE()->fast_reload_enabled) {
nyx_debug("===> GET_GLOBAL_STATE()->fast_reload_enabled: TRUE\n");
if (GET_GLOBAL_STATE()->fast_reload_mode) {
nyx_debug("===> GET_GLOBAL_STATE()->fast_reload_mode: TRUE\n");
/* we've loaded an external snapshot folder - so do nothing and don't create any new snapshot files */
} else {
nyx_debug("===> GET_GLOBAL_STATE()->fast_reload_mode: FALSE\n");
/* store the current state as a snapshot folder */
fast_reload_create_in_memory(get_fast_reload_snapshot());
fast_reload_serialize_to_file(get_fast_reload_snapshot(),
GET_GLOBAL_STATE()->fast_reload_path, false);
}
} else {
nyx_debug("===> GET_GLOBAL_STATE()->fast_reload_enabled: FALSE\n");
/* so we haven't set a path for our snapshot files - just store everything in memory */
fast_reload_create_in_memory(get_fast_reload_snapshot());
}
}
static inline void perform_task_no_block_mode(fast_vm_reload_sync_t *self,
FastReloadRequest request)
{
CPUState *cpu = qemu_get_cpu(0);
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
qemu_mutex_lock_iothread();
switch (request) {
case REQUEST_SAVE_SNAPSHOT_PRE:
vm_stop(RUN_STATE_SAVE_VM);
fast_reload_create_in_memory(get_fast_reload_snapshot());
fast_reload_serialize_to_file(get_fast_reload_snapshot(),
GET_GLOBAL_STATE()->fast_reload_pre_path, true);
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
qemu_mutex_unlock_iothread();
return; /* return here to skip the vm_start call */
case REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP:
adjust_rip(env, get_fast_reload_snapshot());
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
qemu_system_vmstop_request(RUN_STATE_SAVE_VM);
break;
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
case REQUEST_SAVE_SNAPSHOT_ROOT:
kvm_arch_get_registers(cpu);
vm_stop(RUN_STATE_SAVE_VM);
create_root_snapshot();
case REQUEST_SAVE_SNAPSHOT_PRE:
case REQUEST_SAVE_SNAPSHOT_ROOT:
case REQUEST_SAVE_SNAPSHOT_TMP:
qemu_system_vmstop_request(RUN_STATE_SAVE_VM);
fast_reload_restore(get_fast_reload_snapshot());
break;
case REQUEST_SAVE_SNAPSHOT_TMP_FIX_RIP:
adjust_rip(env, get_fast_reload_snapshot());
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
case REQUEST_SAVE_SNAPSHOT_TMP:
fast_reload_create_tmp_snapshot(get_fast_reload_snapshot());
fast_reload_restore(get_fast_reload_snapshot());
break;
case REQUEST_LOAD_SNAPSHOT_PRE:
abort();
break;
case REQUEST_LOAD_SNAPSHOT_ROOT:
case REQUEST_LOAD_SNAPSHOT_TMP:
fast_reload_restore(get_fast_reload_snapshot());
break;
case REQUEST_SAVE_SNAPSHOT_ROOT_NESTED_FIX_RIP:
kvm_arch_get_registers(cpu);
case REQUEST_SAVE_SNAPSHOT_ROOT_NESTED_FIX_RIP:
case REQUEST_SAVE_SNAPSHOT_TMP_NESTED_FIX_RIP:
adjust_rip(env, get_fast_reload_snapshot());
set_nested_rip(cpu, env->eip);
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
qemu_system_vmstop_request(RUN_STATE_SAVE_VM);
case REQUEST_LOAD_SNAPSHOT_PRE:
case REQUEST_LOAD_SNAPSHOT_ROOT:
case REQUEST_LOAD_SNAPSHOT_TMP:
qemu_system_vmstop_request(RUN_STATE_RESTORE_VM);
kvm_arch_get_registers(cpu);
vm_stop(RUN_STATE_SAVE_VM);
create_root_snapshot();
fast_reload_restore(get_fast_reload_snapshot());
break;
default:
fprintf(stderr, "%s: Unkown request: %d\n", __func__, self->current_request);
default:
abort();
}
return true;
}
return false;
vm_start();
cpu_resume(cpu);
qemu_mutex_unlock_iothread();
}
bool check_if_relood_request_exists_post(fast_vm_reload_sync_t* self){
if(self->request_exists){
FastReloadRequest request = self->current_request;
self->request_exists = false;
assert(self->current_request != REQUEST_VOID);
self->current_request = REQUEST_VOID;
perform_task(self, request);
return true;
}
return false;
static inline void perform_task_block_mode(fast_vm_reload_sync_t *self,
FastReloadRequest request)
{
switch (request) {
case REQUEST_SAVE_SNAPSHOT_PRE_FIX_RIP:
case REQUEST_SAVE_SNAPSHOT_PRE:
vm_stop(RUN_STATE_SAVE_VM);
fast_reload_create_in_memory(get_fast_reload_snapshot());
fast_reload_serialize_to_file(get_fast_reload_snapshot(),
GET_GLOBAL_STATE()->fast_reload_pre_path, true);
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
return; /* return here to skip the vm_start call */
case REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP:
case REQUEST_SAVE_SNAPSHOT_ROOT:
/* TODO: fix this */
vm_stop(RUN_STATE_SAVE_VM);
create_root_snapshot(); /* TODO: fix this -> broken in ahci mode */
break;
case REQUEST_SAVE_SNAPSHOT_TMP_FIX_RIP:
case REQUEST_SAVE_SNAPSHOT_TMP:
vm_stop(RUN_STATE_SAVE_VM);
fast_reload_create_tmp_snapshot(get_fast_reload_snapshot());
break;
case REQUEST_LOAD_SNAPSHOT_PRE:
abort();
break;
case REQUEST_LOAD_SNAPSHOT_ROOT:
case REQUEST_LOAD_SNAPSHOT_TMP:
vm_stop(RUN_STATE_RESTORE_VM);
fast_reload_restore(get_fast_reload_snapshot());
break;
default:
abort();
}
vm_start();
}
static inline void perform_task(fast_vm_reload_sync_t *self, FastReloadRequest request)
{
switch (self->mode) {
case RELOAD_MODE_DEBUG:
abort();
perform_task_debug_mode(self, request);
break;
case RELOAD_MODE_NO_BLOCK:
perform_task_no_block_mode(self, request);
break;
case RELOAD_MODE_BLOCK:
perform_task_block_mode(self, request);
break;
}
}
void request_fast_vm_reload(fast_vm_reload_sync_t *self, FastReloadRequest request)
{
assert(!self->request_exists);
assert(self->current_request == REQUEST_VOID);
if (self->mode == RELOAD_MODE_NO_BLOCK) {
CPUState *cpu = qemu_get_cpu(0);
kvm_arch_get_registers(cpu);
// perform_task(self, request);
perform_task_no_block_mode(self, request);
} else {
self->current_request = request;
self->request_exists = true;
self->request_exists_pre = true;
}
}
bool reload_request_exists(fast_vm_reload_sync_t *self)
{
return self->request_exists_pre;
}
void reload_request_discard_tmp(fast_vm_reload_sync_t *self)
{
fast_reload_discard_tmp_snapshot(get_fast_reload_snapshot());
}
bool check_if_relood_request_exists_pre(fast_vm_reload_sync_t *self)
{
/* TODO: always returns false or abort() ? */
if (self->request_exists_pre) {
self->request_exists_pre = false;
abort();
CPUState *cpu = qemu_get_cpu(0);
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
kvm_arch_get_registers(cpu);
switch (self->current_request) {
case REQUEST_VOID:
fprintf(stderr, "%s: REQUEST_VOID requested!\n", __func__);
abort();
case REQUEST_SAVE_SNAPSHOT_PRE_FIX_RIP:
case REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP:
case REQUEST_SAVE_SNAPSHOT_TMP_FIX_RIP:
adjust_rip(env, get_fast_reload_snapshot());
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
qemu_system_vmstop_request(RUN_STATE_SAVE_VM);
break;
case REQUEST_SAVE_SNAPSHOT_PRE:
case REQUEST_SAVE_SNAPSHOT_ROOT:
case REQUEST_SAVE_SNAPSHOT_TMP:
qemu_system_vmstop_request(RUN_STATE_SAVE_VM);
break;
case REQUEST_SAVE_SNAPSHOT_ROOT_NESTED_FIX_RIP:
case REQUEST_SAVE_SNAPSHOT_TMP_NESTED_FIX_RIP:
adjust_rip(env, get_fast_reload_snapshot());
set_nested_rip(cpu, env->eip);
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
qemu_system_vmstop_request(RUN_STATE_SAVE_VM);
case REQUEST_LOAD_SNAPSHOT_PRE:
case REQUEST_LOAD_SNAPSHOT_ROOT:
case REQUEST_LOAD_SNAPSHOT_TMP:
qemu_system_vmstop_request(RUN_STATE_RESTORE_VM);
break;
default:
fprintf(stderr, "%s: Unkown request: %d\n", __func__,
self->current_request);
abort();
}
return true;
}
return false;
}
bool check_if_relood_request_exists_post(fast_vm_reload_sync_t *self)
{
if (self->request_exists) {
FastReloadRequest request = self->current_request;
self->request_exists = false;
assert(self->current_request != REQUEST_VOID);
self->current_request = REQUEST_VOID;
perform_task(self, request);
return true;
}
return false;
}

View File

@ -3,59 +3,57 @@
#include <stdbool.h>
typedef enum FastReloadRequest {
REQUEST_VOID,
REQUEST_VOID,
/* create snapshots */
REQUEST_SAVE_SNAPSHOT_PRE,
REQUEST_SAVE_SNAPSHOT_ROOT,
REQUEST_SAVE_SNAPSHOT_TMP,
/* create snapshots */
REQUEST_SAVE_SNAPSHOT_PRE,
REQUEST_SAVE_SNAPSHOT_ROOT,
REQUEST_SAVE_SNAPSHOT_TMP,
/* create snapshot and fix RIP (- sizeof(vmcall)) */
REQUEST_SAVE_SNAPSHOT_PRE_FIX_RIP,
REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP,
REQUEST_SAVE_SNAPSHOT_TMP_FIX_RIP,
/* create snapshot and fix RIP (- sizeof(vmcall)) */
REQUEST_SAVE_SNAPSHOT_PRE_FIX_RIP,
REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP,
REQUEST_SAVE_SNAPSHOT_TMP_FIX_RIP,
/* create nested snapshots */
REQUEST_SAVE_SNAPSHOT_ROOT_NESTED_FIX_RIP,
REQUEST_SAVE_SNAPSHOT_TMP_NESTED_FIX_RIP,
/* create nested snapshots */
REQUEST_SAVE_SNAPSHOT_ROOT_NESTED_FIX_RIP,
REQUEST_SAVE_SNAPSHOT_TMP_NESTED_FIX_RIP,
/* load snapshots*/
REQUEST_LOAD_SNAPSHOT_PRE,
REQUEST_LOAD_SNAPSHOT_ROOT,
REQUEST_LOAD_SNAPSHOT_TMP,
/* load snapshots*/
REQUEST_LOAD_SNAPSHOT_PRE,
REQUEST_LOAD_SNAPSHOT_ROOT,
REQUEST_LOAD_SNAPSHOT_TMP,
/* check if snapshot exists */
REQUEST_PRE_EXISTS,
REQUEST_ROOT_EXISTS,
REQUEST_TMP_EXISTS,
/* check if snapshot exists */
REQUEST_PRE_EXISTS,
REQUEST_ROOT_EXISTS,
REQUEST_TMP_EXISTS,
//REQUEST_DISCARD_SNAPSHOT_TMP,
// REQUEST_DISCARD_SNAPSHOT_TMP,
} FastReloadRequest;
typedef enum FastReloadMode {
RELOAD_MODE_DEBUG, /* savevm / loadvm based on QEMU's qcow2 storage - only for debug purposes */
RELOAD_MODE_NO_BLOCK, /* fastest mode - works only if no active block devices is attached (e.g. initramfs mode) */
RELOAD_MODE_BLOCK,
RELOAD_MODE_DEBUG, /* savevm / loadvm based on QEMU's qcow2 storage - only for debug purposes */
RELOAD_MODE_NO_BLOCK, /* fastest mode - works only if no active block devices is attached (e.g. initramfs mode) */
RELOAD_MODE_BLOCK,
} FastReloadMode;
typedef struct fast_vm_reload_sync_s{
bool request_exists;
bool request_exists_pre;
bool debug_mode;
FastReloadMode mode;
FastReloadRequest current_request;
typedef struct fast_vm_reload_sync_s {
bool request_exists;
bool request_exists_pre;
bool debug_mode;
FastReloadMode mode;
FastReloadRequest current_request;
} fast_vm_reload_sync_t;
fast_vm_reload_sync_t* init_fast_vm_reload_sync(void);
void request_fast_vm_reload(fast_vm_reload_sync_t* self, FastReloadRequest request);
bool reload_request_exists(fast_vm_reload_sync_t* self);
bool check_if_relood_request_exists_pre(fast_vm_reload_sync_t* self);
bool check_if_relood_request_exists_post(fast_vm_reload_sync_t* self);
fast_vm_reload_sync_t *init_fast_vm_reload_sync(void);
void request_fast_vm_reload(fast_vm_reload_sync_t *self, FastReloadRequest request);
bool reload_request_exists(fast_vm_reload_sync_t *self);
bool check_if_relood_request_exists_pre(fast_vm_reload_sync_t *self);
bool check_if_relood_request_exists_post(fast_vm_reload_sync_t *self);
bool fast_snapshot_exists(fast_vm_reload_sync_t* self, FastReloadRequest type);
void reload_request_discard_tmp(fast_vm_reload_sync_t* self);
bool fast_snapshot_exists(fast_vm_reload_sync_t *self, FastReloadRequest type);
void reload_request_discard_tmp(fast_vm_reload_sync_t *self);

View File

@ -1,13 +1,13 @@
#include <assert.h>
#include <string.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <unistd.h>
#include "nyx/redqueen.h"
//#include "debug.h"
// #include "debug.h"
#include "nyx/file_helper.h"
@ -15,106 +15,119 @@
* Private Helper Functions Declarations
*/
size_t _count_lines_in_file(FILE* fp);
size_t _count_lines_in_file(FILE *fp);
void _parse_addresses_in_file(FILE* fp, size_t num_addrs, uint64_t* addrs);
void _parse_addresses_in_file(FILE *fp, size_t num_addrs, uint64_t *addrs);
/*
* Public Functions
*/
void write_debug_result(char* buf){
int unused __attribute__((unused));
int fd = open("/tmp/qemu_debug.txt", O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
assert(fd > 0);
unused = write(fd, buf, strlen(buf));
close(fd);
void write_debug_result(char *buf)
{
int unused __attribute__((unused));
int fd = open("/tmp/qemu_debug.txt", O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
assert(fd > 0);
unused = write(fd, buf, strlen(buf));
close(fd);
}
void parse_address_file(char* path, size_t* num_addrs, uint64_t** addrs){
FILE* fp = fopen(path,"r");
if(!fp){
*num_addrs = 0;
*addrs = NULL;
return;
}
void parse_address_file(char *path, size_t *num_addrs, uint64_t **addrs)
{
FILE *fp = fopen(path, "r");
if (!fp) {
*num_addrs = 0;
*addrs = NULL;
return;
}
*num_addrs = _count_lines_in_file(fp);
if(*num_addrs == 0){
*addrs = NULL;
goto exit_function;
}
*num_addrs = _count_lines_in_file(fp);
if (*num_addrs == 0) {
*addrs = NULL;
goto exit_function;
}
assert(*num_addrs < 0xffff);
*addrs = malloc(sizeof(uint64_t)*(*num_addrs));
_parse_addresses_in_file(fp, *num_addrs, *addrs);
assert(*num_addrs < 0xffff);
*addrs = malloc(sizeof(uint64_t) * (*num_addrs));
_parse_addresses_in_file(fp, *num_addrs, *addrs);
exit_function:
fclose(fp);
exit_function:
fclose(fp);
}
int re_fd = 0;
int se_fd = 0;
void write_re_result(char* buf){
int unused __attribute__((unused));
if (!re_fd)
re_fd = open(redqueen_workdir.redqueen_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
unused = write(re_fd, buf, strlen(buf));
void write_re_result(char *buf)
{
int unused __attribute__((unused));
if (!re_fd)
re_fd = open(redqueen_workdir.redqueen_results,
O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
unused = write(re_fd, buf, strlen(buf));
}
void fsync_redqueen_files(void){
if (!se_fd){
fsync(se_fd);
}
if (!re_fd){
fsync(re_fd);
}
void fsync_redqueen_files(void)
{
if (!se_fd) {
fsync(se_fd);
}
if (!re_fd) {
fsync(re_fd);
}
}
void write_se_result(char* buf){
//int fd;
int unused __attribute__((unused));
if (!se_fd)
se_fd = open(redqueen_workdir.symbolic_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
unused = write(se_fd, buf, strlen(buf));
//close(fd);
void write_se_result(char *buf)
{
// int fd;
int unused __attribute__((unused));
if (!se_fd)
se_fd = open(redqueen_workdir.symbolic_results,
O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
unused = write(se_fd, buf, strlen(buf));
// close(fd);
}
void delete_redqueen_files(void){
int unused __attribute__((unused));
if (!re_fd)
re_fd = open(redqueen_workdir.redqueen_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
if (!se_fd)
se_fd = open(redqueen_workdir.symbolic_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
unused = ftruncate(re_fd, 0);
unused = ftruncate(se_fd, 0);
void delete_redqueen_files(void)
{
int unused __attribute__((unused));
if (!re_fd)
re_fd = open(redqueen_workdir.redqueen_results,
O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
if (!se_fd)
se_fd = open(redqueen_workdir.symbolic_results,
O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
unused = ftruncate(re_fd, 0);
unused = ftruncate(se_fd, 0);
}
/*
* Private Helper Functions Definitions
*/
size_t _count_lines_in_file(FILE* fp){
size_t val = 0;
size_t count = 0;
while(1){
int scanres = fscanf(fp, "%lx", &val);
if(scanres == 0){
printf("WARNING, invalid line in address file");
assert(scanres != 0);
size_t _count_lines_in_file(FILE *fp)
{
size_t val = 0;
size_t count = 0;
while (1) {
int scanres = fscanf(fp, "%lx", &val);
if (scanres == 0) {
printf("WARNING, invalid line in address file");
assert(scanres != 0);
}
if (scanres == -1) {
break;
}
count += 1;
}
if(scanres == -1){break;}
count+=1;
}
rewind(fp);
return count;
rewind(fp);
return count;
}
void _parse_addresses_in_file(FILE* fp, size_t num_addrs, uint64_t* addrs){
for(size_t i = 0; i < num_addrs; i++){
assert(fscanf(fp, "%lx", &addrs[i]) == 1);
}
void _parse_addresses_in_file(FILE *fp, size_t num_addrs, uint64_t *addrs)
{
for (size_t i = 0; i < num_addrs; i++) {
assert(fscanf(fp, "%lx", &addrs[i]) == 1);
}
}

View File

@ -1,20 +1,21 @@
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#pragma once
//doesn't take ownership of path, num_addrs or addrs
void parse_address_file(char* path, size_t* num_addrs, uint64_t** addrs);
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
//doesn't take ownership of buf
void write_re_result(char* buf);
//doesn't take ownership of buf
void write_se_result(char* buf);
// doesn't take ownership of path, num_addrs or addrs
void parse_address_file(char *path, size_t *num_addrs, uint64_t **addrs);
//doesn' take ownership of buf
void write_debug_result(char* buf);
// doesn't take ownership of buf
void write_re_result(char *buf);
// doesn't take ownership of buf
void write_se_result(char *buf);
// doesn' take ownership of buf
void write_debug_result(char *buf);
void delete_redqueen_files(void);

View File

@ -12,175 +12,226 @@
#include "sysemu/kvm_int.h"
#include "qemu-common.h"
#include "nyx/helpers.h"
#include "nyx/debug.h"
#include "nyx/helpers.h"
#include "nyx/memory_access.h"
#include "nyx/state/state.h"
void nyx_abort(char* msg){
set_abort_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, msg, strlen(msg));
synchronization_lock();
exit(1);
void nyx_abort(char *msg)
{
set_abort_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, msg,
strlen(msg));
synchronization_lock();
exit(1);
}
bool is_called_in_fuzzing_mode(const char* hypercall){
if(GET_GLOBAL_STATE()->in_fuzzing_mode){
char* tmp = NULL;
assert(asprintf(&tmp, "Hypercall <%s> called during fuzzing...", hypercall) != -1);
nyx_abort((char*)tmp);
free(tmp);
return true;
}
return false;
}
uint64_t get_rip(CPUState *cpu){
kvm_arch_get_registers(cpu);
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
kvm_cpu_synchronize_state(cpu);
return env->eip;
}
int get_capstone_mode(int word_width_in_bits){
switch(word_width_in_bits){
case 64:
return CS_MODE_64;
case 32:
return CS_MODE_32;
default:
assert(false);
}
}
nyx_coverage_bitmap_copy_t* new_coverage_bitmaps(void){
nyx_coverage_bitmap_copy_t* bitmaps = malloc(sizeof(nyx_coverage_bitmap_copy_t));
memset(bitmaps, 0, sizeof(nyx_coverage_bitmap_copy_t));
assert(GET_GLOBAL_STATE()->shared_bitmap_size);
bitmaps->coverage_bitmap = malloc(GET_GLOBAL_STATE()->shared_bitmap_size);
assert(GET_GLOBAL_STATE()->shared_ijon_bitmap_size);
bitmaps->ijon_bitmap_buffer = malloc(GET_GLOBAL_STATE()->shared_ijon_bitmap_size);
return bitmaps;
}
void coverage_bitmap_reset(void){
if(GET_GLOBAL_STATE()->shared_bitmap_ptr){
memset(GET_GLOBAL_STATE()->shared_bitmap_ptr, 0x00, GET_GLOBAL_STATE()->shared_bitmap_real_size);
}
if (GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr && GET_GLOBAL_STATE()->shared_ijon_bitmap_size){
memset(GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr, 0x00, GET_GLOBAL_STATE()->shared_ijon_bitmap_size);
}
}
void coverage_bitmap_copy_to_buffer(nyx_coverage_bitmap_copy_t* buffer){
if(GET_GLOBAL_STATE()->shared_bitmap_ptr){
memcpy(buffer->coverage_bitmap, GET_GLOBAL_STATE()->shared_bitmap_ptr, GET_GLOBAL_STATE()->shared_bitmap_real_size);
}
if (GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr){
memcpy(buffer->ijon_bitmap_buffer, GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr, GET_GLOBAL_STATE()->shared_ijon_bitmap_size);
}
}
void coverage_bitmap_copy_from_buffer(nyx_coverage_bitmap_copy_t* buffer){
if(GET_GLOBAL_STATE()->shared_bitmap_ptr){
memcpy(GET_GLOBAL_STATE()->shared_bitmap_ptr, buffer->coverage_bitmap, GET_GLOBAL_STATE()->shared_bitmap_real_size);
}
if (GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr){
memcpy(GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr, buffer->ijon_bitmap_buffer, GET_GLOBAL_STATE()->shared_ijon_bitmap_size);
}
}
static void resize_coverage_bitmap(uint32_t new_bitmap_size){
uint32_t new_bitmap_shm_size = new_bitmap_size;
if (new_bitmap_shm_size % 64 > 0) {
new_bitmap_shm_size = ((new_bitmap_shm_size + 64) >> 6) << 6;
bool is_called_in_fuzzing_mode(const char *hypercall)
{
if (GET_GLOBAL_STATE()->in_fuzzing_mode) {
char *tmp = NULL;
assert(asprintf(&tmp, "Hypercall <%s> called during fuzzing...", hypercall) !=
-1);
nyx_abort((char *)tmp);
free(tmp);
return true;
}
return false;
}
GET_GLOBAL_STATE()->shared_bitmap_real_size = new_bitmap_shm_size;
resize_shared_memory(new_bitmap_shm_size, &GET_GLOBAL_STATE()->shared_bitmap_size, &GET_GLOBAL_STATE()->shared_bitmap_ptr, GET_GLOBAL_STATE()->shared_bitmap_fd);
uint64_t get_rip(CPUState *cpu)
{
kvm_arch_get_registers(cpu);
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
kvm_cpu_synchronize_state(cpu);
return env->eip;
}
/* pass the actual bitmap buffer size to the front-end */
GET_GLOBAL_STATE()->auxilary_buffer->capabilites.agent_coverage_bitmap_size = new_bitmap_size;
if(new_bitmap_size & (PAGE_SIZE-1)){
GET_GLOBAL_STATE()->shared_bitmap_size = (new_bitmap_size & ~(PAGE_SIZE-1)) + PAGE_SIZE;
int get_capstone_mode(int word_width_in_bits)
{
switch (word_width_in_bits) {
case 64:
return CS_MODE_64;
case 32:
return CS_MODE_32;
default:
assert(false);
}
}
bool apply_capabilities(CPUState *cpu){
nyx_coverage_bitmap_copy_t *new_coverage_bitmaps(void)
{
nyx_coverage_bitmap_copy_t *bitmaps = malloc(sizeof(nyx_coverage_bitmap_copy_t));
memset(bitmaps, 0, sizeof(nyx_coverage_bitmap_copy_t));
nyx_debug("%s: agent supports timeout detection: %d\n", __func__, GET_GLOBAL_STATE()->cap_timeout_detection);
nyx_debug("%s: agent supports only-reload mode: %d\n", __func__, GET_GLOBAL_STATE()->cap_only_reload_mode);
nyx_debug("%s: agent supports compile-time tracing: %d\n", __func__, GET_GLOBAL_STATE()->cap_compile_time_tracing );
assert(GET_GLOBAL_STATE()->shared_bitmap_size);
bitmaps->coverage_bitmap = malloc(GET_GLOBAL_STATE()->shared_bitmap_size);
if(GET_GLOBAL_STATE()->cap_compile_time_tracing){
GET_GLOBAL_STATE()->pt_trace_mode = false;
assert(GET_GLOBAL_STATE()->shared_ijon_bitmap_size);
bitmaps->ijon_bitmap_buffer = malloc(GET_GLOBAL_STATE()->shared_ijon_bitmap_size);
nyx_debug("%s: agent trace buffer at vaddr: %lx\n", __func__, GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr);
kvm_arch_get_registers_fast(cpu);
nyx_debug("--------------------------\n");
nyx_debug("GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr: %lx\n", GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr);
nyx_debug("GET_GLOBAL_STATE()->shared_bitmap_fd: %d\n", GET_GLOBAL_STATE()->shared_bitmap_fd);
nyx_debug("GET_GLOBAL_STATE()->shared_bitmap_size: %x\n", GET_GLOBAL_STATE()->shared_bitmap_size);
nyx_debug("GET_GLOBAL_STATE()->cap_cr3: %lx\n", GET_GLOBAL_STATE()->cap_cr3);
nyx_debug("--------------------------\n");
if (GET_GLOBAL_STATE()->input_buffer_size != GET_GLOBAL_STATE()->shared_payload_buffer_size){
resize_shared_memory(GET_GLOBAL_STATE()->input_buffer_size, &GET_GLOBAL_STATE()->shared_payload_buffer_size, NULL, GET_GLOBAL_STATE()->shared_payload_buffer_fd);
GET_GLOBAL_STATE()->shared_payload_buffer_size = GET_GLOBAL_STATE()->input_buffer_size;
}
if(GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr&0xfff){
fprintf(stderr, "[QEMU-Nyx] Error: guest's trace bitmap v_addr (0x%lx) is not page aligned!\n", GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr);
return false;
}
if (GET_GLOBAL_STATE()->cap_coverage_bitmap_size){
resize_coverage_bitmap(GET_GLOBAL_STATE()->cap_coverage_bitmap_size);
}
for(uint64_t i = 0; i < GET_GLOBAL_STATE()->shared_bitmap_size; i += 0x1000){
assert(remap_slot(GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr+ i, i/0x1000, cpu, GET_GLOBAL_STATE()->shared_bitmap_fd, GET_GLOBAL_STATE()->shared_bitmap_size, true, GET_GLOBAL_STATE()->cap_cr3));
}
set_cap_agent_trace_bitmap(GET_GLOBAL_STATE()->auxilary_buffer, true);
}
if(GET_GLOBAL_STATE()->cap_ijon_tracing){
nyx_debug("%s: agent trace buffer at vaddr: %lx\n", __func__, GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr);
if(GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr&0xfff){
error_printf("[QEMU-Nyx] Error: guest's ijon buffer v_addr (0x%lx) is not page aligned!\n", GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr);
return false;
}
kvm_arch_get_registers_fast(cpu);
for(uint64_t i = 0; i < GET_GLOBAL_STATE()->shared_ijon_bitmap_size; i += 0x1000){
assert(remap_slot(GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr + i, i/0x1000, cpu, GET_GLOBAL_STATE()->shared_ijon_bitmap_fd, GET_GLOBAL_STATE()->shared_ijon_bitmap_size+GET_GLOBAL_STATE()->shared_ijon_bitmap_size, true, GET_GLOBAL_STATE()->cap_cr3));
}
set_cap_agent_ijon_trace_bitmap(GET_GLOBAL_STATE()->auxilary_buffer, true);
}
/* pass the actual input buffer size to the front-end */
GET_GLOBAL_STATE()->auxilary_buffer->capabilites.agent_input_buffer_size = GET_GLOBAL_STATE()->shared_payload_buffer_size;
return true;
return bitmaps;
}
bool folder_exits(const char* path){
struct stat sb;
return (stat(path, &sb) == 0 && S_ISDIR(sb.st_mode));
void coverage_bitmap_reset(void)
{
if (GET_GLOBAL_STATE()->shared_bitmap_ptr) {
memset(GET_GLOBAL_STATE()->shared_bitmap_ptr, 0x00,
GET_GLOBAL_STATE()->shared_bitmap_real_size);
}
if (GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr &&
GET_GLOBAL_STATE()->shared_ijon_bitmap_size)
{
memset(GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr, 0x00,
GET_GLOBAL_STATE()->shared_ijon_bitmap_size);
}
}
bool file_exits(const char* path){
struct stat sb;
return (stat (path, &sb) == 0);
void coverage_bitmap_copy_to_buffer(nyx_coverage_bitmap_copy_t *buffer)
{
if (GET_GLOBAL_STATE()->shared_bitmap_ptr) {
memcpy(buffer->coverage_bitmap, GET_GLOBAL_STATE()->shared_bitmap_ptr,
GET_GLOBAL_STATE()->shared_bitmap_real_size);
}
if (GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr) {
memcpy(buffer->ijon_bitmap_buffer, GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr,
GET_GLOBAL_STATE()->shared_ijon_bitmap_size);
}
}
void coverage_bitmap_copy_from_buffer(nyx_coverage_bitmap_copy_t *buffer)
{
if (GET_GLOBAL_STATE()->shared_bitmap_ptr) {
memcpy(GET_GLOBAL_STATE()->shared_bitmap_ptr, buffer->coverage_bitmap,
GET_GLOBAL_STATE()->shared_bitmap_real_size);
}
if (GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr) {
memcpy(GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr, buffer->ijon_bitmap_buffer,
GET_GLOBAL_STATE()->shared_ijon_bitmap_size);
}
}
static void resize_coverage_bitmap(uint32_t new_bitmap_size)
{
uint32_t new_bitmap_shm_size = new_bitmap_size;
if (new_bitmap_shm_size % 64 > 0) {
new_bitmap_shm_size = ((new_bitmap_shm_size + 64) >> 6) << 6;
}
GET_GLOBAL_STATE()->shared_bitmap_real_size = new_bitmap_shm_size;
resize_shared_memory(new_bitmap_shm_size, &GET_GLOBAL_STATE()->shared_bitmap_size,
&GET_GLOBAL_STATE()->shared_bitmap_ptr,
GET_GLOBAL_STATE()->shared_bitmap_fd);
/* pass the actual bitmap buffer size to the front-end */
GET_GLOBAL_STATE()->auxilary_buffer->capabilites.agent_coverage_bitmap_size =
new_bitmap_size;
if (new_bitmap_size & (PAGE_SIZE - 1)) {
GET_GLOBAL_STATE()->shared_bitmap_size =
(new_bitmap_size & ~(PAGE_SIZE - 1)) + PAGE_SIZE;
}
}
bool apply_capabilities(CPUState *cpu)
{
nyx_debug("%s: agent supports timeout detection: %d\n", __func__,
GET_GLOBAL_STATE()->cap_timeout_detection);
nyx_debug("%s: agent supports only-reload mode: %d\n", __func__,
GET_GLOBAL_STATE()->cap_only_reload_mode);
nyx_debug("%s: agent supports compile-time tracing: %d\n", __func__,
GET_GLOBAL_STATE()->cap_compile_time_tracing);
if (GET_GLOBAL_STATE()->cap_compile_time_tracing) {
GET_GLOBAL_STATE()->pt_trace_mode = false;
nyx_debug("%s: agent trace buffer at vaddr: %lx\n", __func__,
GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr);
kvm_arch_get_registers_fast(cpu);
nyx_debug("--------------------------\n");
nyx_debug("GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr: %lx\n",
GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr);
nyx_debug("GET_GLOBAL_STATE()->shared_bitmap_fd: %d\n",
GET_GLOBAL_STATE()->shared_bitmap_fd);
nyx_debug("GET_GLOBAL_STATE()->shared_bitmap_size: %x\n",
GET_GLOBAL_STATE()->shared_bitmap_size);
nyx_debug("GET_GLOBAL_STATE()->cap_cr3: %lx\n", GET_GLOBAL_STATE()->cap_cr3);
nyx_debug("--------------------------\n");
if (GET_GLOBAL_STATE()->input_buffer_size !=
GET_GLOBAL_STATE()->shared_payload_buffer_size)
{
resize_shared_memory(GET_GLOBAL_STATE()->input_buffer_size,
&GET_GLOBAL_STATE()->shared_payload_buffer_size,
NULL, GET_GLOBAL_STATE()->shared_payload_buffer_fd);
GET_GLOBAL_STATE()->shared_payload_buffer_size =
GET_GLOBAL_STATE()->input_buffer_size;
}
if (GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr & 0xfff) {
fprintf(stderr, "[QEMU-Nyx] Error: guest's trace bitmap v_addr (0x%lx) is not page aligned!\n",
GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr);
return false;
}
if (GET_GLOBAL_STATE()->cap_coverage_bitmap_size) {
resize_coverage_bitmap(GET_GLOBAL_STATE()->cap_coverage_bitmap_size);
}
for (uint64_t i = 0; i < GET_GLOBAL_STATE()->shared_bitmap_size; i += 0x1000)
{
assert(remap_slot(GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr +
i,
i / 0x1000, cpu, GET_GLOBAL_STATE()->shared_bitmap_fd,
GET_GLOBAL_STATE()->shared_bitmap_size, true,
GET_GLOBAL_STATE()->cap_cr3));
}
set_cap_agent_trace_bitmap(GET_GLOBAL_STATE()->auxilary_buffer, true);
}
if (GET_GLOBAL_STATE()->cap_ijon_tracing) {
nyx_debug("%s: agent trace buffer at vaddr: %lx\n", __func__,
GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr);
if (GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr & 0xfff) {
error_printf("[QEMU-Nyx] Error: guest's ijon buffer v_addr (0x%lx) is "
"not page aligned!\n",
GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr);
return false;
}
kvm_arch_get_registers_fast(cpu);
for (uint64_t i = 0; i < GET_GLOBAL_STATE()->shared_ijon_bitmap_size;
i += 0x1000)
{
assert(remap_slot(GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr + i,
i / 0x1000, cpu,
GET_GLOBAL_STATE()->shared_ijon_bitmap_fd,
GET_GLOBAL_STATE()->shared_ijon_bitmap_size +
GET_GLOBAL_STATE()->shared_ijon_bitmap_size,
true, GET_GLOBAL_STATE()->cap_cr3));
}
set_cap_agent_ijon_trace_bitmap(GET_GLOBAL_STATE()->auxilary_buffer, true);
}
/* pass the actual input buffer size to the front-end */
GET_GLOBAL_STATE()->auxilary_buffer->capabilites.agent_input_buffer_size =
GET_GLOBAL_STATE()->shared_payload_buffer_size;
return true;
}
bool folder_exits(const char *path)
{
struct stat sb;
return (stat(path, &sb) == 0 && S_ISDIR(sb.st_mode));
}
bool file_exits(const char *path)
{
struct stat sb;
return (stat(path, &sb) == 0);
}

View File

@ -1,23 +1,23 @@
#pragma once
#pragma once
uint64_t get_rip(CPUState *cpu);
typedef struct nyx_coverage_bitmap_copy_s{
void* coverage_bitmap;
void* ijon_bitmap_buffer;
}nyx_coverage_bitmap_copy_t;
typedef struct nyx_coverage_bitmap_copy_s {
void *coverage_bitmap;
void *ijon_bitmap_buffer;
} nyx_coverage_bitmap_copy_t;
void nyx_abort(char* msg);
bool is_called_in_fuzzing_mode(const char* hypercall);
void nyx_abort(char *msg);
bool is_called_in_fuzzing_mode(const char *hypercall);
nyx_coverage_bitmap_copy_t* new_coverage_bitmaps(void);
void coverage_bitmap_reset(void);
void coverage_bitmap_copy_to_buffer(nyx_coverage_bitmap_copy_t* buffer);
void coverage_bitmap_copy_from_buffer(nyx_coverage_bitmap_copy_t* buffer);
nyx_coverage_bitmap_copy_t *new_coverage_bitmaps(void);
void coverage_bitmap_reset(void);
void coverage_bitmap_copy_to_buffer(nyx_coverage_bitmap_copy_t *buffer);
void coverage_bitmap_copy_from_buffer(nyx_coverage_bitmap_copy_t *buffer);
int get_capstone_mode(int word_width_in_bits);
bool apply_capabilities(CPUState *cpu);
bool folder_exits(const char* path);
bool file_exits(const char* path);
bool folder_exits(const char *path);
bool file_exits(const char *path);

View File

@ -1,108 +1,125 @@
#include "qemu/osdep.h"
#include "nyx/state/state.h"
#include "nyx/helpers.h"
#include "nyx/hypercall/configuration.h"
#include "nyx/memory_access.h"
#include "nyx/helpers.h"
#include "nyx/state/state.h"
void handle_hypercall_kafl_get_host_config(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
uint64_t vaddr = hypercall_arg;
host_config_t config;
void handle_hypercall_kafl_get_host_config(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg)
{
uint64_t vaddr = hypercall_arg;
host_config_t config;
if(is_called_in_fuzzing_mode("KVM_EXIT_KAFL_GET_HOST_CONFIG")){
return;
}
if (is_called_in_fuzzing_mode("KVM_EXIT_KAFL_GET_HOST_CONFIG")) {
return;
}
if (GET_GLOBAL_STATE()->get_host_config_done){
nyx_abort((char*)"KVM_EXIT_KAFL_GET_HOST_CONFIG called twice...");
return;
}
if (GET_GLOBAL_STATE()->get_host_config_done) {
nyx_abort((char *)"KVM_EXIT_KAFL_GET_HOST_CONFIG called twice...");
return;
}
memset((void*)&config, 0, sizeof(host_config_t));
memset((void *)&config, 0, sizeof(host_config_t));
config.host_magic = NYX_HOST_MAGIC;
config.host_version = NYX_HOST_VERSION;
config.bitmap_size = GET_GLOBAL_STATE()->shared_bitmap_size;
config.ijon_bitmap_size = GET_GLOBAL_STATE()->shared_ijon_bitmap_size;
config.payload_buffer_size = GET_GLOBAL_STATE()->shared_payload_buffer_size;
config.worker_id = GET_GLOBAL_STATE()->worker_id;
config.host_magic = NYX_HOST_MAGIC;
config.host_version = NYX_HOST_VERSION;
config.bitmap_size = GET_GLOBAL_STATE()->shared_bitmap_size;
config.ijon_bitmap_size = GET_GLOBAL_STATE()->shared_ijon_bitmap_size;
config.payload_buffer_size = GET_GLOBAL_STATE()->shared_payload_buffer_size;
config.worker_id = GET_GLOBAL_STATE()->worker_id;
write_virtual_memory(vaddr, (uint8_t*)&config, sizeof(host_config_t), cpu);
GET_GLOBAL_STATE()->get_host_config_done = true;
write_virtual_memory(vaddr, (uint8_t *)&config, sizeof(host_config_t), cpu);
GET_GLOBAL_STATE()->get_host_config_done = true;
}
void handle_hypercall_kafl_set_agent_config(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
uint64_t vaddr = hypercall_arg;
agent_config_t config;
void handle_hypercall_kafl_set_agent_config(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg)
{
uint64_t vaddr = hypercall_arg;
agent_config_t config;
if(is_called_in_fuzzing_mode("KVM_EXIT_KAFL_SET_AGENT_CONFIG")){
return;
}
if (is_called_in_fuzzing_mode("KVM_EXIT_KAFL_SET_AGENT_CONFIG")) {
return;
}
if (GET_GLOBAL_STATE()->set_agent_config_done){
nyx_abort((char*)"KVM_EXIT_KAFL_SET_AGENT_CONFIG called twice...");
return;
}
if (GET_GLOBAL_STATE()->set_agent_config_done) {
nyx_abort((char *)"KVM_EXIT_KAFL_SET_AGENT_CONFIG called twice...");
return;
}
X86CPU *cpux86 = X86_CPU(cpu);
CPUX86State *env = &cpux86->env;
X86CPU *cpux86 = X86_CPU(cpu);
CPUX86State *env = &cpux86->env;
if(read_virtual_memory(vaddr, (uint8_t*)&config, sizeof(agent_config_t), cpu)){
if (read_virtual_memory(vaddr, (uint8_t *)&config, sizeof(agent_config_t), cpu)) {
if (config.agent_magic != NYX_AGENT_MAGIC) {
fprintf(stderr,
"[QEMU-Nyx] Error: NYX_AGENT_MAGIC not found in agent "
"configuration - You are probably using an outdated agent...\n");
exit(1);
}
if (config.agent_magic != NYX_AGENT_MAGIC){
fprintf(stderr, "[QEMU-Nyx] Error: NYX_AGENT_MAGIC not found in agent configuration - You are probably using an outdated agent...\n");
exit(1);
}
if (config.agent_version != NYX_AGENT_VERSION) {
fprintf(stderr,
"[QEMU-Nyx] Error: NYX_AGENT_VERSION does not match in agent "
"configuration (%d != %d) - "
"You are probably using an outdated agent...\n",
config.agent_version, NYX_AGENT_VERSION);
exit(1);
}
if (config.agent_version != NYX_AGENT_VERSION){
fprintf(stderr,
"[QEMU-Nyx] Error: NYX_AGENT_VERSION does not match in agent "
"configuration (%d != %d) - "
"You are probably using an outdated agent...\n",
config.agent_version, NYX_AGENT_VERSION);
exit(1);
}
GET_GLOBAL_STATE()->cap_timeout_detection = config.agent_timeout_detection;
GET_GLOBAL_STATE()->cap_only_reload_mode =
!!!config.agent_non_reload_mode; /* fix this */
GET_GLOBAL_STATE()->cap_compile_time_tracing = config.agent_tracing;
GET_GLOBAL_STATE()->cap_timeout_detection = config.agent_timeout_detection;
GET_GLOBAL_STATE()->cap_only_reload_mode = !!!config.agent_non_reload_mode; /* fix this */
GET_GLOBAL_STATE()->cap_compile_time_tracing = config.agent_tracing;
if (!GET_GLOBAL_STATE()->cap_compile_time_tracing &&
!GET_GLOBAL_STATE()->nyx_fdl)
{
fprintf(
stderr,
"[QEMU-Nyx] Error: Attempt to fuzz target without compile-time "
"instrumentation - Intel PT is not supported on this KVM build!\n");
exit(1);
}
if(!GET_GLOBAL_STATE()->cap_compile_time_tracing && !GET_GLOBAL_STATE()->nyx_fdl){
fprintf(stderr, "[QEMU-Nyx] Error: Attempt to fuzz target without compile-time instrumentation - Intel PT is not supported on this KVM build!\n");
exit(1);
}
GET_GLOBAL_STATE()->cap_ijon_tracing = config.agent_ijon_tracing;
GET_GLOBAL_STATE()->cap_ijon_tracing = config.agent_ijon_tracing;
if(config.agent_tracing){
GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr = config.trace_buffer_vaddr;
if (config.agent_tracing) {
GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr =
config.trace_buffer_vaddr;
GET_GLOBAL_STATE()->pt_trace_mode = false;
}
if(config.agent_ijon_tracing){
GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr = config.ijon_trace_buffer_vaddr;
}
}
if (config.agent_ijon_tracing) {
GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr =
config.ijon_trace_buffer_vaddr;
}
GET_GLOBAL_STATE()->cap_cr3 = env->cr[3];
GET_GLOBAL_STATE()->cap_coverage_bitmap_size = config.coverage_bitmap_size;
GET_GLOBAL_STATE()->input_buffer_size = GET_GLOBAL_STATE()->shared_payload_buffer_size;
GET_GLOBAL_STATE()->cap_cr3 = env->cr[3];
GET_GLOBAL_STATE()->cap_coverage_bitmap_size = config.coverage_bitmap_size;
GET_GLOBAL_STATE()->input_buffer_size =
GET_GLOBAL_STATE()->shared_payload_buffer_size;
if (config.input_buffer_size){
abort();
}
if (config.input_buffer_size) {
abort();
}
if(apply_capabilities(cpu) == false){
nyx_abort((char*)"applying agent configuration failed...");
}
if (apply_capabilities(cpu) == false) {
nyx_abort((char *)"applying agent configuration failed...");
}
if(getenv("DUMP_PAYLOAD_MODE")){
config.dump_payloads = 1;
write_virtual_memory(vaddr, (uint8_t*)&config, sizeof(agent_config_t), cpu);
}
if (getenv("DUMP_PAYLOAD_MODE")) {
config.dump_payloads = 1;
write_virtual_memory(vaddr, (uint8_t *)&config, sizeof(agent_config_t),
cpu);
}
}
else{
fprintf(stderr, "[QEMU-Nyx] Error: %s - failed (vaddr: 0x%lx)!\n", __func__, vaddr);
exit(1);
}
GET_GLOBAL_STATE()->set_agent_config_done = true;
} else {
fprintf(stderr, "[QEMU-Nyx] Error: %s - failed (vaddr: 0x%lx)!\n", __func__,
vaddr);
exit(1);
}
GET_GLOBAL_STATE()->set_agent_config_done = true;
}

View File

@ -5,40 +5,44 @@
#include "sysemu/kvm.h"
void handle_hypercall_kafl_get_host_config(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_get_host_config(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg);
void handle_hypercall_kafl_set_agent_config(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_set_agent_config(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg);
#define NYX_HOST_MAGIC 0x4878794e
#define NYX_AGENT_MAGIC 0x4178794e
#define NYX_HOST_VERSION 2
#define NYX_HOST_VERSION 2
#define NYX_AGENT_VERSION 1
typedef struct host_config_s{
uint32_t host_magic;
uint32_t host_version;
uint32_t bitmap_size;
uint32_t ijon_bitmap_size;
uint32_t payload_buffer_size;
uint32_t worker_id;
/* more to come */
typedef struct host_config_s {
uint32_t host_magic;
uint32_t host_version;
uint32_t bitmap_size;
uint32_t ijon_bitmap_size;
uint32_t payload_buffer_size;
uint32_t worker_id;
/* more to come */
} __attribute__((packed)) host_config_t;
typedef struct agent_config_s{
uint32_t agent_magic;
uint32_t agent_version;
uint8_t agent_timeout_detection;
uint8_t agent_tracing;
uint8_t agent_ijon_tracing;
uint8_t agent_non_reload_mode;
uint64_t trace_buffer_vaddr;
uint64_t ijon_trace_buffer_vaddr;
uint32_t coverage_bitmap_size;
uint32_t input_buffer_size;
typedef struct agent_config_s {
uint32_t agent_magic;
uint32_t agent_version;
uint8_t agent_timeout_detection;
uint8_t agent_tracing;
uint8_t agent_ijon_tracing;
uint8_t agent_non_reload_mode;
uint64_t trace_buffer_vaddr;
uint64_t ijon_trace_buffer_vaddr;
uint8_t dump_payloads; /* set by hypervisor */
/* more to come */
uint32_t coverage_bitmap_size;
uint32_t input_buffer_size;
uint8_t dump_payloads; /* set by hypervisor */
/* more to come */
} __attribute__((packed)) agent_config_t;

View File

@ -1,103 +1,119 @@
#include "qemu/osdep.h"
#include <sys/time.h>
#include "sysemu/kvm.h"
#include <sys/time.h>
#include "nyx/synchronization.h"
#include "nyx/fast_vm_reload.h"
#include "nyx/state/state.h"
#include "nyx/hypercall/debug.h"
#include "nyx/state/state.h"
#include "nyx/synchronization.h"
//#define NYX_ENABLE_DEBUG_HYPERCALLS
// #define NYX_ENABLE_DEBUG_HYPERCALLS
#ifdef NYX_ENABLE_DEBUG_HYPERCALLS
static double get_time(void){
struct timeval t;
struct timezone tzp;
gettimeofday(&t, &tzp);
return t.tv_sec + t.tv_usec*1e-6;
static double get_time(void)
{
struct timeval t;
struct timezone tzp;
gettimeofday(&t, &tzp);
return t.tv_sec + t.tv_usec * 1e-6;
}
static void print_time_diff(int iterations){
static void print_time_diff(int iterations)
{
static bool init = true;
static double start_time = 0.0;
static double end_time = 0.0;
static bool init = true;
static double start_time = 0.0;
static double end_time = 0.0;
if(init){
init = false;
printf("start time is zero!\n");
start_time = get_time();
}
else{
end_time = get_time();
double elapsed_time = end_time - start_time;
printf("Done in %f seconds\n", elapsed_time);
printf("Performance: %f\n", iterations/elapsed_time);
start_time = get_time();
}
if (init) {
init = false;
printf("start time is zero!\n");
start_time = get_time();
} else {
end_time = get_time();
double elapsed_time = end_time - start_time;
printf("Done in %f seconds\n", elapsed_time);
printf("Performance: %f\n", iterations / elapsed_time);
start_time = get_time();
}
}
static void meassure_performance(void){
static int perf_counter = 0;
if ((perf_counter%1000) == 0){
print_time_diff(1000);
}
perf_counter++;
static void meassure_performance(void)
{
static int perf_counter = 0;
if ((perf_counter % 1000) == 0) {
print_time_diff(1000);
}
perf_counter++;
}
void handle_hypercall_kafl_debug_tmp_snapshot(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
static bool first = true;
void handle_hypercall_kafl_debug_tmp_snapshot(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg)
{
static bool first = true;
switch(hypercall_arg&0xFFF){
case 0: /* create root snapshot */
if(!fast_snapshot_exists(GET_GLOBAL_STATE()->reload_state, REQUEST_ROOT_EXISTS)){
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_ROOT);
}
break;
case 1: /* create tmp snapshot */
if(!fast_snapshot_exists(GET_GLOBAL_STATE()->reload_state, REQUEST_TMP_EXISTS)){
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_TMP);
}
break;
case 2: /* load root snapshot (+ discard tmp snapshot) */
if(fast_snapshot_exists(GET_GLOBAL_STATE()->reload_state, REQUEST_TMP_EXISTS)){
reload_request_discard_tmp(GET_GLOBAL_STATE()->reload_state);
}
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_LOAD_SNAPSHOT_ROOT);
meassure_performance();
break;
case 3: /* load tmp snapshot */
if(fast_snapshot_exists(GET_GLOBAL_STATE()->reload_state, REQUEST_TMP_EXISTS)){
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_LOAD_SNAPSHOT_TMP);
meassure_performance();
}
break;
case 5: // firefox debug hypercall
if(first){
first = false;
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_ROOT);
//request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_TMP);
switch (hypercall_arg & 0xFFF) {
case 0: /* create root snapshot */
if (!fast_snapshot_exists(GET_GLOBAL_STATE()->reload_state,
REQUEST_ROOT_EXISTS))
{
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state,
REQUEST_SAVE_SNAPSHOT_ROOT);
}
break;
case 1: /* create tmp snapshot */
if (!fast_snapshot_exists(GET_GLOBAL_STATE()->reload_state, REQUEST_TMP_EXISTS))
{
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state,
REQUEST_SAVE_SNAPSHOT_TMP);
}
break;
case 2: /* load root snapshot (+ discard tmp snapshot) */
if (fast_snapshot_exists(GET_GLOBAL_STATE()->reload_state, REQUEST_TMP_EXISTS))
{
reload_request_discard_tmp(GET_GLOBAL_STATE()->reload_state);
}
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state,
REQUEST_LOAD_SNAPSHOT_ROOT);
meassure_performance();
break;
case 3: /* load tmp snapshot */
if (fast_snapshot_exists(GET_GLOBAL_STATE()->reload_state, REQUEST_TMP_EXISTS))
{
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state,
REQUEST_LOAD_SNAPSHOT_TMP);
meassure_performance();
}
break;
case 5: // firefox debug hypercall
if (first) {
first = false;
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state,
REQUEST_SAVE_SNAPSHOT_ROOT);
// request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_TMP);
break;
}
else{
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_LOAD_SNAPSHOT_ROOT);
break;
}
default:
abort();
}
break;
} else {
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state,
REQUEST_LOAD_SNAPSHOT_ROOT);
break;
}
default:
abort();
}
}
#else /* NYX_ENABLE_DEBUG_HYPERCALLS */
void handle_hypercall_kafl_debug_tmp_snapshot(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
fprintf(stderr, "[QEMU-Nyx] Error: HYPERCALL_KAFL_DEBUG_TMP not enabled!\n");
void handle_hypercall_kafl_debug_tmp_snapshot(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg)
{
fprintf(stderr, "[QEMU-Nyx] Error: HYPERCALL_KAFL_DEBUG_TMP not enabled!\n");
set_abort_reason_auxiliary_buffer(
GET_GLOBAL_STATE()->auxilary_buffer,
(char *)"HYPERCALL_KAFL_DEBUG_TMP is not enabled.",
strlen("HYPERCALL_KAFL_DEBUG_TMP is not enabled."));
synchronization_lock();
synchronization_lock();
}
#endif

View File

@ -1,6 +1,8 @@
#pragma once
#include <stdint.h>
#include "sysemu/kvm.h"
#include <stdint.h>
void handle_hypercall_kafl_debug_tmp_snapshot(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_debug_tmp_snapshot(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg);

File diff suppressed because it is too large Load Diff

View File

@ -19,27 +19,27 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#pragma once
#include <stdint.h>
#define PAYLOAD_BUFFER_SIZE_64 26
#define PAYLOAD_BUFFER_SIZE_32 20
#define PAYLOAD_BUFFER_SIZE_64 26
#define PAYLOAD_BUFFER_SIZE_32 20
// FIXME: move to common nyx.h
#define KAFL_MODE_64 0
#define KAFL_MODE_32 1
#define KAFL_MODE_16 2
#define KAFL_MODE_64 0
#define KAFL_MODE_32 1
#define KAFL_MODE_16 2
typedef struct{
uint64_t ip[4];
uint64_t size[4];
uint8_t enabled[4];
} kAFL_ranges;
typedef struct {
uint64_t ip[4];
uint64_t size[4];
uint8_t enabled[4];
} kAFL_ranges;
bool check_bitmap_byte(uint32_t value);
//#define PANIC_DEBUG
// #define PANIC_DEBUG
/*
* Panic Notifier Payload (x86-64)
@ -50,7 +50,9 @@ bool check_bitmap_byte(uint32_t value);
* 0f 01 c1 vmcall
* f4 hlt
*/
#define PANIC_PAYLOAD_64 "\xFA\x48\xC7\xC0\x1F\x00\x00\x00\x48\xC7\xC3\x08\x00\x00\x00\x48\xC7\xC1\x00\x00\x00\x00\x0F\x01\xC1\xF4"
#define PANIC_PAYLOAD_64 \
"\xFA\x48\xC7\xC0\x1F\x00\x00\x00\x48\xC7\xC3\x08\x00\x00\x00\x48\xC7\xC1\x00" \
"\x00\x00\x00\x0F\x01\xC1\xF4"
/*
* Panic Notifier Payload (x86-32)
@ -61,7 +63,9 @@ bool check_bitmap_byte(uint32_t value);
* 0f 01 c1 vmcall
* f4 hlt
*/
#define PANIC_PAYLOAD_32 "\xFA\xB8\x1F\x00\x00\x00\xBB\x08\x00\x00\x00\xB9\x00\x00\x00\x00\x0F\x01\xC1\xF4"
#define PANIC_PAYLOAD_32 \
"\xFA\xB8\x1F\x00\x00\x00\xBB\x08\x00\x00\x00\xB9\x00\x00\x00\x00\x0F\x01\xC1" \
"\xF4"
/*
* KASAN Notifier Payload (x86-64)
@ -72,7 +76,9 @@ bool check_bitmap_byte(uint32_t value);
* 0f 01 c1 vmcall
* f4 hlt
*/
#define KASAN_PAYLOAD_64 "\xFA\x48\xC7\xC0\x1F\x00\x00\x00\x48\xC7\xC3\x09\x00\x00\x00\x48\xC7\xC1\x00\x00\x00\x00\x0F\x01\xC1\xF4"
#define KASAN_PAYLOAD_64 \
"\xFA\x48\xC7\xC0\x1F\x00\x00\x00\x48\xC7\xC3\x09\x00\x00\x00\x48\xC7\xC1\x00" \
"\x00\x00\x00\x0F\x01\xC1\xF4"
/*
* KASAN Notifier Payload (x86-32)
@ -83,10 +89,12 @@ bool check_bitmap_byte(uint32_t value);
* 0f 01 c1 vmcall
* f4 hlt
*/
#define KASAN_PAYLOAD_32 "\xFA\xB8\x1F\x00\x00\x00\xBB\x09\x00\x00\x00\xB9\x00\x00\x00\x00\x0F\x01\xC1\xF4"
#define KASAN_PAYLOAD_32 \
"\xFA\xB8\x1F\x00\x00\x00\xBB\x09\x00\x00\x00\xB9\x00\x00\x00\x00\x0F\x01\xC1" \
"\xF4"
void pt_setup_program(void* ptr);
void pt_setup_snd_handler(void (*tmp)(char, void*), void* tmp_s);
void pt_setup_program(void *ptr);
void pt_setup_snd_handler(void (*tmp)(char, void *), void *tmp_s);
void pt_setup_ip_filters(uint8_t filter_id, uint64_t start, uint64_t end);
void pt_setup_enable_hypercalls(void);
@ -107,38 +115,56 @@ bool pt_hypercalls_enabled(void);
void hypercall_unlock(void);
void hypercall_reload(void);
void handle_hypercall_kafl_acquire(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_panic(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_acquire(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg);
void handle_hypercall_kafl_release(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg);
void handle_hypercall_kafl_panic(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg);
void handle_hypercall_kafl_page_dump_bp(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg, uint64_t page);
void handle_hypercall_kafl_page_dump_bp(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg,
uint64_t page);
void hprintf(char* msg);
void hprintf(char *msg);
bool handle_hypercall_kafl_next_payload(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
bool handle_hypercall_kafl_next_payload(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg);
void hypercall_reset_hprintf_counter(void);
bool handle_hypercall_kafl_hook(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_mtf(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
bool handle_hypercall_kafl_hook(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg);
void handle_hypercall_kafl_mtf(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg);
void pt_enable_rqo(CPUState *cpu);
void pt_disable_rqo(CPUState *cpu);
void pt_enable_rqi(CPUState *cpu);
void pt_disable_rqi(CPUState *cpu);
void pt_set_redqueen_instrumentation_mode(CPUState *cpu, int redqueen_instruction_mode);
void pt_set_redqueen_instrumentation_mode(CPUState *cpu,
int redqueen_instruction_mode);
void pt_set_redqueen_update_blacklist(CPUState *cpu, bool newval);
void pt_set_enable_patches_pending(CPUState *cpu);
void pt_set_disable_patches_pending(CPUState *cpu);
void create_fast_snapshot(CPUState *cpu, bool nested);
int handle_kafl_hypercall(struct kvm_run *run, CPUState *cpu, uint64_t hypercall, uint64_t arg);
int handle_kafl_hypercall(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall,
uint64_t arg);
void skip_init(void);
typedef struct kafl_dump_file_s{
uint64_t file_name_str_ptr;
uint64_t data_ptr;
uint64_t bytes;
uint8_t append;
typedef struct kafl_dump_file_s {
uint64_t file_name_str_ptr;
uint64_t data_ptr;
uint64_t bytes;
uint8_t append;
} __attribute__((packed)) kafl_dump_file_t;

View File

@ -26,449 +26,476 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
#include <sys/stat.h>
#include <time.h>
#include "qapi/error.h"
#include "qemu/cutils.h"
#include "hw/qdev-properties.h"
#include "chardev/char-fe.h"
#include "exec/ram_addr.h"
#include "hw/hw.h"
#include "hw/i386/pc.h"
#include "hw/pci/pci.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "sysemu/kvm.h"
#include "hw/pci/pci.h"
#include "hw/qdev-properties.h"
#include "migration/migration.h"
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "qemu/cutils.h"
#include "qemu/error-report.h"
#include "qemu/event_notifier.h"
#include "qom/object_interfaces.h"
#include "chardev/char-fe.h"
#include "sysemu/hostmem.h"
#include "sysemu/kvm.h"
#include "sysemu/qtest.h"
#include "qapi/visitor.h"
#include "exec/ram_addr.h"
#include "pt.h"
#include "nyx/debug.h"
#include "nyx/helpers.h"
#include "nyx/hypercall/hypercall.h"
#include "nyx/interface.h"
#include "nyx/debug.h"
#include "nyx/synchronization.h"
#include "nyx/snapshot/devices/state_reallocation.h"
#include "nyx/memory_access.h"
#include "nyx/state/state.h"
#include "nyx/sharedir.h"
#include "nyx/helpers.h"
#include "nyx/snapshot/devices/state_reallocation.h"
#include "nyx/state/state.h"
#include "nyx/synchronization.h"
#include "nyx/trace_dump.h"
#include "pt.h"
#include "redqueen.h"
#define CONVERT_UINT64(x) (uint64_t)(strtoull(x, NULL, 16))
#define TYPE_NYX_MEM "nyx"
#define NYX_MEM(obj) \
OBJECT_CHECK(nyx_interface_state, (obj), TYPE_NYX_MEM)
#define NYX_MEM(obj) OBJECT_CHECK(nyx_interface_state, (obj), TYPE_NYX_MEM)
static void nyx_realize(DeviceState *dev, Error **errp);
typedef struct nyx_interface_state {
DeviceState parent_obj;
DeviceState parent_obj;
Chardev *nyx_chr_drv_state;
CharBackend chr;
Chardev *nyx_chr_drv_state;
CharBackend chr;
char* sharedir;
char *sharedir;
char* workdir;
uint32_t worker_id;
uint64_t cow_primary_size;
char* redqueen_workdir;
char* data_bar_fd_0;
char* data_bar_fd_1;
char* data_bar_fd_2;
char* bitmap_file;
char *workdir;
uint32_t worker_id;
uint64_t cow_primary_size;
char* filter_bitmap[4];
char* ip_filter[4][2];
char *redqueen_workdir;
char *data_bar_fd_0;
char *data_bar_fd_1;
char *data_bar_fd_2;
char *bitmap_file;
uint32_t bitmap_size;
uint32_t input_buffer_size;
char *filter_bitmap[4];
char *ip_filter[4][2];
bool dump_pt_trace;
bool edge_cb_trace;
uint32_t bitmap_size;
uint32_t input_buffer_size;
bool dump_pt_trace;
bool edge_cb_trace;
bool redqueen;
bool redqueen;
} nyx_interface_state;
static void nyx_interface_event(void *opaque, int event){
static void nyx_interface_event(void *opaque, int event)
{
}
static void send_char(char val, void* tmp_s){
nyx_interface_state *s = tmp_s;
static void send_char(char val, void *tmp_s)
{
nyx_interface_state *s = tmp_s;
assert(val == NYX_INTERFACE_PING);
__sync_synchronize();
assert(val == NYX_INTERFACE_PING);
__sync_synchronize();
qemu_chr_fe_write(&s->chr, (const uint8_t *) &val, 1);
qemu_chr_fe_write(&s->chr, (const uint8_t *)&val, 1);
}
static int nyx_interface_can_receive(void * opaque){
return sizeof(int64_t);
static int nyx_interface_can_receive(void *opaque)
{
return sizeof(int64_t);
}
static nyx_interface_state* state = NULL;
static nyx_interface_state *state = NULL;
static void init_send_char(nyx_interface_state* s){
state = s;
static void init_send_char(nyx_interface_state *s)
{
state = s;
}
bool interface_send_char(char val){
if(state){
send_char(val, state);
return true;
}
return false;
bool interface_send_char(char val)
{
if (state) {
send_char(val, state);
return true;
}
return false;
}
static void nyx_interface_receive(void *opaque, const uint8_t * buf, int size){
int i;
for(i = 0; i < size; i++){
switch(buf[i]){
case NYX_INTERFACE_PING:
synchronization_unlock();
break;
case '\n':
break;
case 'E':
exit(0);
default:
break;
assert(false);
}
}
static void nyx_interface_receive(void *opaque, const uint8_t *buf, int size)
{
int i;
for (i = 0; i < size; i++) {
switch (buf[i]) {
case NYX_INTERFACE_PING:
synchronization_unlock();
break;
case '\n':
break;
case 'E':
exit(0);
default:
break;
assert(false);
}
}
}
static int nyx_create_payload_buffer(nyx_interface_state *s, uint64_t buffer_size, const char* file, Error **errp){
void * ptr;
int fd;
struct stat st;
static int nyx_create_payload_buffer(nyx_interface_state *s,
uint64_t buffer_size,
const char *file,
Error **errp)
{
void *ptr;
int fd;
struct stat st;
fd = open(file, O_CREAT|O_RDWR, S_IRWXU|S_IRWXG|S_IRWXO);
assert(ftruncate(fd, buffer_size) == 0);
stat(file, &st);
nyx_debug_p(INTERFACE_PREFIX, "new shm file: (max size: %lx) %lx", buffer_size, st.st_size);
assert(buffer_size == st.st_size);
ptr = mmap(0, buffer_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
fd = open(file, O_CREAT | O_RDWR, S_IRWXU | S_IRWXG | S_IRWXO);
assert(ftruncate(fd, buffer_size) == 0);
stat(file, &st);
nyx_debug_p(INTERFACE_PREFIX, "new shm file: (max size: %lx) %lx", buffer_size,
st.st_size);
if (ptr == MAP_FAILED) {
error_setg_errno(errp, errno, "Failed to mmap memory");
return -1;
}
assert(buffer_size == st.st_size);
ptr = mmap(0, buffer_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
GET_GLOBAL_STATE()->shared_payload_buffer_fd = fd;
GET_GLOBAL_STATE()->shared_payload_buffer_size = buffer_size;
if (ptr == MAP_FAILED) {
error_setg_errno(errp, errno, "Failed to mmap memory");
return -1;
}
init_send_char(s);
GET_GLOBAL_STATE()->shared_payload_buffer_fd = fd;
GET_GLOBAL_STATE()->shared_payload_buffer_size = buffer_size;
return 0;
init_send_char(s);
return 0;
}
static void nyx_guest_setup_bitmap(nyx_interface_state *s, char* filename, uint32_t bitmap_size){
void * ptr;
int fd;
struct stat st;
fd = open(filename, O_CREAT|O_RDWR, S_IRWXU|S_IRWXG|S_IRWXO);
assert(ftruncate(fd, bitmap_size) == 0);
stat(filename, &st);
assert(bitmap_size == st.st_size);
ptr = mmap(0, bitmap_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
GET_GLOBAL_STATE()->shared_bitmap_ptr = (void*)ptr;
GET_GLOBAL_STATE()->shared_bitmap_fd = fd;
GET_GLOBAL_STATE()->shared_bitmap_size = bitmap_size;
GET_GLOBAL_STATE()->shared_bitmap_real_size = bitmap_size;
static void nyx_guest_setup_bitmap(nyx_interface_state *s,
char *filename,
uint32_t bitmap_size)
{
void *ptr;
int fd;
struct stat st;
fd = open(filename, O_CREAT | O_RDWR, S_IRWXU | S_IRWXG | S_IRWXO);
assert(ftruncate(fd, bitmap_size) == 0);
stat(filename, &st);
assert(bitmap_size == st.st_size);
ptr = mmap(0, bitmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
GET_GLOBAL_STATE()->shared_bitmap_ptr = (void *)ptr;
GET_GLOBAL_STATE()->shared_bitmap_fd = fd;
GET_GLOBAL_STATE()->shared_bitmap_size = bitmap_size;
GET_GLOBAL_STATE()->shared_bitmap_real_size = bitmap_size;
}
static void nyx_guest_setup_ijon_buffer(nyx_interface_state *s, char* filename){
void * ptr;
int fd;
struct stat st;
fd = open(filename, O_CREAT|O_RDWR, S_IRWXU|S_IRWXG|S_IRWXO);
assert(ftruncate(fd, DEFAULT_NYX_IJON_BITMAP_SIZE) == 0);
stat(filename, &st);
assert(DEFAULT_NYX_IJON_BITMAP_SIZE == st.st_size);
ptr = mmap(0, DEFAULT_NYX_IJON_BITMAP_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr = (void*)ptr;
GET_GLOBAL_STATE()->shared_ijon_bitmap_fd = fd;
GET_GLOBAL_STATE()->shared_ijon_bitmap_size = DEFAULT_NYX_IJON_BITMAP_SIZE;
static void nyx_guest_setup_ijon_buffer(nyx_interface_state *s, char *filename)
{
void *ptr;
int fd;
struct stat st;
fd = open(filename, O_CREAT | O_RDWR, S_IRWXU | S_IRWXG | S_IRWXO);
assert(ftruncate(fd, DEFAULT_NYX_IJON_BITMAP_SIZE) == 0);
stat(filename, &st);
assert(DEFAULT_NYX_IJON_BITMAP_SIZE == st.st_size);
ptr = mmap(0, DEFAULT_NYX_IJON_BITMAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
fd, 0);
GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr = (void *)ptr;
GET_GLOBAL_STATE()->shared_ijon_bitmap_fd = fd;
GET_GLOBAL_STATE()->shared_ijon_bitmap_size = DEFAULT_NYX_IJON_BITMAP_SIZE;
}
static bool verify_workdir_state(nyx_interface_state *s, Error **errp){
static bool verify_workdir_state(nyx_interface_state *s, Error **errp)
{
char *workdir = s->workdir;
uint32_t id = s->worker_id;
char *tmp;
char* workdir = s->workdir;
uint32_t id = s->worker_id;
char* tmp;
if (!folder_exits(workdir)) {
nyx_error("Error: %s does not exist...\n", workdir);
return false;
}
if (!folder_exits(workdir)){
nyx_error("Error: %s does not exist...\n", workdir);
return false;
}
set_workdir_path(workdir);
set_workdir_path(workdir);
assert(asprintf(&tmp, "%s/dump/", workdir) != -1);
if (!folder_exits(tmp)) {
mkdir(tmp, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
}
free(tmp);
assert(asprintf(&tmp, "%s/dump/", workdir) != -1);
if (!folder_exits(tmp)){
mkdir(tmp, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
}
free(tmp);
assert(asprintf(&tmp, "%s/interface_%d", workdir, id) != -1);
if (!file_exits(tmp)) {
nyx_error("Error: %s does not exist...\n", tmp);
free(tmp);
return false;
}
free(tmp);
assert(asprintf(&tmp, "%s/interface_%d", workdir, id) != -1);
if (!file_exits(tmp)){
nyx_error("Error: %s does not exist...\n", tmp);
free(tmp);
return false;
}
free(tmp);
assert(asprintf(&tmp, "%s/payload_%d", workdir, id) != -1);
if (!file_exits(tmp)) {
nyx_error("Error: %s does not exist...\n", tmp);
free(tmp);
return false;
} else {
nyx_create_payload_buffer(s, s->input_buffer_size, tmp, errp);
}
free(tmp);
assert(asprintf(&tmp, "%s/payload_%d", workdir, id) != -1);
if (!file_exits(tmp)){
nyx_error("Error: %s does not exist...\n", tmp);
free(tmp);
return false;
}
else {
nyx_create_payload_buffer(s, s->input_buffer_size, tmp, errp);
}
free(tmp);
assert(asprintf(&tmp, "%s/bitmap_%d", workdir, id) != -1);
if (!file_exits(tmp)) {
nyx_error("Error: %s does not exist...\n", tmp);
free(tmp);
return false;
} else {
nyx_guest_setup_bitmap(s, tmp, s->bitmap_size);
}
free(tmp);
assert(asprintf(&tmp, "%s/bitmap_%d", workdir, id) != -1);
if (!file_exits(tmp)){
nyx_error("Error: %s does not exist...\n", tmp);
free(tmp);
return false;
} else {
nyx_guest_setup_bitmap(s, tmp, s->bitmap_size);
}
free(tmp);
assert(asprintf(&tmp, "%s/ijon_%d", workdir, id) != -1);
if (!file_exits(tmp)) {
nyx_error("Error: %s does not exist...\n", tmp);
free(tmp);
return false;
} else {
nyx_guest_setup_ijon_buffer(s, tmp);
}
free(tmp);
assert(asprintf(&tmp, "%s/ijon_%d", workdir, id) != -1);
if (!file_exits(tmp)){
nyx_error("Error: %s does not exist...\n", tmp);
free(tmp);
return false;
} else {
nyx_guest_setup_ijon_buffer(s, tmp);
}
free(tmp);
assert(asprintf(&tmp, "%s/page_cache.lock", workdir) != -1);
if (!file_exits(tmp)) {
nyx_error("Error: %s does not exist...", tmp);
free(tmp);
return false;
}
free(tmp);
assert(asprintf(&tmp, "%s/page_cache.lock", workdir) != -1);
if (!file_exits(tmp)){
nyx_error("Error: %s does not exist...", tmp);
free(tmp);
return false;
}
free(tmp);
assert(asprintf(&tmp, "%s/page_cache.addr", workdir) != -1);
if (!file_exits(tmp)) {
nyx_error("Error: %s does not exist...\n", tmp);
free(tmp);
return false;
}
free(tmp);
assert(asprintf(&tmp, "%s/page_cache.addr", workdir) != -1);
if (!file_exits(tmp)){
nyx_error("Error: %s does not exist...\n", tmp);
free(tmp);
return false;
}
free(tmp);
assert(asprintf(&tmp, "%s/page_cache.dump", workdir) != -1);
if (!file_exits(tmp)) {
nyx_error("Error: %s does not exist...\n", tmp);
free(tmp);
return false;
}
free(tmp);
assert(asprintf(&tmp, "%s/page_cache.dump", workdir) != -1);
if (!file_exits(tmp)){
nyx_error("Error: %s does not exist...\n", tmp);
free(tmp);
return false;
}
free(tmp);
assert(asprintf(&tmp, "%s/page_cache", workdir) != -1);
init_page_cache(tmp);
assert(asprintf(&tmp, "%s/page_cache", workdir) != -1);
init_page_cache(tmp);
assert(asprintf(&tmp, "%s/redqueen_workdir_%d/", workdir, id) != -1);
if (!folder_exits(tmp)) {
nyx_error("%s does not exist...\n", tmp);
free(tmp);
return false;
} else {
setup_redqueen_workdir(tmp);
}
free(tmp);
assert(asprintf(&tmp, "%s/redqueen_workdir_%d/", workdir, id) != -1);
if (!folder_exits(tmp)){
nyx_error("%s does not exist...\n", tmp);
free(tmp);
return false;
}
else {
setup_redqueen_workdir(tmp);
}
free(tmp);
init_redqueen_state();
init_redqueen_state();
if (s->dump_pt_trace) {
assert(asprintf(&tmp, "%s/pt_trace_dump_%d", workdir, id) != -1);
pt_trace_dump_init(tmp);
free(tmp);
}
if(s->dump_pt_trace){
assert(asprintf(&tmp, "%s/pt_trace_dump_%d", workdir, id) != -1);
pt_trace_dump_init(tmp);
free(tmp);
}
if(s->edge_cb_trace){
redqueen_trace_init();
}
if (s->edge_cb_trace) {
redqueen_trace_init();
}
assert(asprintf(&tmp, "%s/aux_buffer_%d", workdir, id) != -1);
init_aux_buffer(tmp);
free(tmp);
assert(asprintf(&tmp, "%s/aux_buffer_%d", workdir, id) != -1);
init_aux_buffer(tmp);
free(tmp);
return true;
return true;
}
#define KVM_VMX_PT_GET_ADDRN _IO(KVMIO, 0xe9)
#define KVM_VMX_PT_GET_ADDRN _IO(KVMIO, 0xe9)
static void check_ipt_range(uint8_t i){
int ret = 0;
int kvm = open("/dev/kvm", O_RDWR | O_CLOEXEC);
ret = ioctl(kvm, KVM_VMX_PT_GET_ADDRN, NULL);
static void check_ipt_range(uint8_t i)
{
int ret = 0;
int kvm = open("/dev/kvm", O_RDWR | O_CLOEXEC);
ret = ioctl(kvm, KVM_VMX_PT_GET_ADDRN, NULL);
if(ret == -1){
nyx_error("Error: Multi range tracing is not supported!\n");
exit(1);
}
if (ret == -1) {
nyx_error("Error: Multi range tracing is not supported!\n");
exit(1);
}
if(ret < (i+1)){
nyx_error("Error: CPU supports only %d IP filters!\n", ret);
exit(1);
}
close(kvm);
if (ret < (i + 1)) {
nyx_error("Error: CPU supports only %d IP filters!\n", ret);
exit(1);
}
close(kvm);
}
static void check_available_ipt_ranges(nyx_interface_state* s){
uint64_t addr_a, addr_b;
static void check_available_ipt_ranges(nyx_interface_state *s)
{
uint64_t addr_a, addr_b;
int kvm_fd = qemu_open("/dev/kvm", O_RDWR);
if (kvm_fd == -1) {
nyx_error("Error: could not access KVM kernel module: %m\n");
exit(1);
}
int kvm_fd = qemu_open("/dev/kvm", O_RDWR);
if (kvm_fd == -1) {
nyx_error("Error: could not access KVM kernel module: %m\n");
exit(1);
}
if (ioctl(kvm_fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_PT) == 1 && ioctl(kvm_fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_FDL) == 1) {
for(uint8_t i = 0; i < INTEL_PT_MAX_RANGES; i++){
if(s->ip_filter[i][0] && s->ip_filter[i][1]){
if(i >= 1){
check_ipt_range(i);
}
addr_a = CONVERT_UINT64(s->ip_filter[i][0]);
addr_b = CONVERT_UINT64(s->ip_filter[i][1]);
if (addr_a < addr_b){
pt_setup_ip_filters(i, addr_a, addr_b);
}
}
}
}
close(kvm_fd);
if (ioctl(kvm_fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_PT) == 1 &&
ioctl(kvm_fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_FDL) == 1)
{
for (uint8_t i = 0; i < INTEL_PT_MAX_RANGES; i++) {
if (s->ip_filter[i][0] && s->ip_filter[i][1]) {
if (i >= 1) {
check_ipt_range(i);
}
addr_a = CONVERT_UINT64(s->ip_filter[i][0]);
addr_b = CONVERT_UINT64(s->ip_filter[i][1]);
if (addr_a < addr_b) {
pt_setup_ip_filters(i, addr_a, addr_b);
}
}
}
}
close(kvm_fd);
}
static bool verify_sharedir_state(nyx_interface_state *s, Error **errp){
static bool verify_sharedir_state(nyx_interface_state *s, Error **errp)
{
char *sharedir = s->sharedir;
char* sharedir = s->sharedir;
if (!folder_exits(sharedir)){
return false;
}
return true;
if (!folder_exits(sharedir)) {
return false;
}
return true;
}
static void nyx_realize(DeviceState *dev, Error **errp){
nyx_interface_state *s = NYX_MEM(dev);
static void nyx_realize(DeviceState *dev, Error **errp)
{
nyx_interface_state *s = NYX_MEM(dev);
if(s->bitmap_size <= 0){
s->bitmap_size = DEFAULT_NYX_BITMAP_SIZE;
}
if (s->bitmap_size <= 0) {
s->bitmap_size = DEFAULT_NYX_BITMAP_SIZE;
}
if(s->worker_id == 0xFFFF){
nyx_error("Error: Invalid worker id...\n");
exit(1);
}
if (s->worker_id == 0xFFFF) {
nyx_error("Error: Invalid worker id...\n");
exit(1);
}
if(s->cow_primary_size){
set_global_cow_cache_primary_size(s->cow_primary_size);
}
GET_GLOBAL_STATE()->worker_id = s->worker_id;
if (s->cow_primary_size) {
set_global_cow_cache_primary_size(s->cow_primary_size);
}
GET_GLOBAL_STATE()->worker_id = s->worker_id;
if (!s->workdir || !verify_workdir_state(s, errp)){
nyx_error("Error: Invalid work dir...\n");
exit(1);
}
if (!s->workdir || !verify_workdir_state(s, errp)) {
nyx_error("Error: Invalid work dir...\n");
exit(1);
}
if (!s->sharedir || !verify_sharedir_state(s, errp)){
nyx_error("Warning: Invalid sharedir...\n");
}
else{
sharedir_set_dir(GET_GLOBAL_STATE()->sharedir, s->sharedir);
}
if (!s->sharedir || !verify_sharedir_state(s, errp)) {
nyx_error("Warning: Invalid sharedir...\n");
} else {
sharedir_set_dir(GET_GLOBAL_STATE()->sharedir, s->sharedir);
}
if(&s->chr){
qemu_chr_fe_set_handlers(&s->chr, nyx_interface_can_receive, nyx_interface_receive, nyx_interface_event, NULL, s, NULL, true);
}
if (&s->chr) {
qemu_chr_fe_set_handlers(&s->chr, nyx_interface_can_receive,
nyx_interface_receive, nyx_interface_event, NULL, s,
NULL, true);
}
check_available_ipt_ranges(s);
check_available_ipt_ranges(s);
pt_setup_enable_hypercalls();
init_crash_handler();
pt_setup_enable_hypercalls();
init_crash_handler();
}
static Property nyx_interface_properties[] = {
DEFINE_PROP_CHR("chardev", nyx_interface_state, chr),
DEFINE_PROP_CHR("chardev", nyx_interface_state, chr),
DEFINE_PROP_STRING("sharedir", nyx_interface_state, sharedir),
DEFINE_PROP_STRING("workdir", nyx_interface_state, workdir),
DEFINE_PROP_UINT32("worker_id", nyx_interface_state, worker_id, 0xFFFF),
DEFINE_PROP_UINT64("cow_primary_size", nyx_interface_state, cow_primary_size, 0),
/*
* Since DEFINE_PROP_UINT64 is somehow broken (signed/unsigned madness),
* let's use DEFINE_PROP_STRING and post-process all values by strtol...
*/
DEFINE_PROP_STRING("ip0_a", nyx_interface_state, ip_filter[0][0]),
DEFINE_PROP_STRING("ip0_b", nyx_interface_state, ip_filter[0][1]),
DEFINE_PROP_STRING("ip1_a", nyx_interface_state, ip_filter[1][0]),
DEFINE_PROP_STRING("ip1_b", nyx_interface_state, ip_filter[1][1]),
DEFINE_PROP_STRING("ip2_a", nyx_interface_state, ip_filter[2][0]),
DEFINE_PROP_STRING("ip2_b", nyx_interface_state, ip_filter[2][1]),
DEFINE_PROP_STRING("ip3_a", nyx_interface_state, ip_filter[3][0]),
DEFINE_PROP_STRING("ip3_b", nyx_interface_state, ip_filter[3][1]),
DEFINE_PROP_STRING("sharedir", nyx_interface_state, sharedir),
DEFINE_PROP_STRING("workdir", nyx_interface_state, workdir),
DEFINE_PROP_UINT32("worker_id", nyx_interface_state, worker_id, 0xFFFF),
DEFINE_PROP_UINT64("cow_primary_size", nyx_interface_state, cow_primary_size, 0),
/*
* Since DEFINE_PROP_UINT64 is somehow broken (signed/unsigned madness),
* let's use DEFINE_PROP_STRING and post-process all values by strtol...
*/
DEFINE_PROP_STRING("ip0_a", nyx_interface_state, ip_filter[0][0]),
DEFINE_PROP_STRING("ip0_b", nyx_interface_state, ip_filter[0][1]),
DEFINE_PROP_STRING("ip1_a", nyx_interface_state, ip_filter[1][0]),
DEFINE_PROP_STRING("ip1_b", nyx_interface_state, ip_filter[1][1]),
DEFINE_PROP_STRING("ip2_a", nyx_interface_state, ip_filter[2][0]),
DEFINE_PROP_STRING("ip2_b", nyx_interface_state, ip_filter[2][1]),
DEFINE_PROP_STRING("ip3_a", nyx_interface_state, ip_filter[3][0]),
DEFINE_PROP_STRING("ip3_b", nyx_interface_state, ip_filter[3][1]),
DEFINE_PROP_UINT32("bitmap_size", nyx_interface_state, bitmap_size, DEFAULT_NYX_BITMAP_SIZE),
DEFINE_PROP_UINT32("input_buffer_size", nyx_interface_state, input_buffer_size, DEFAULT_NYX_BITMAP_SIZE),
DEFINE_PROP_BOOL("dump_pt_trace", nyx_interface_state, dump_pt_trace, false),
DEFINE_PROP_BOOL("edge_cb_trace", nyx_interface_state, edge_cb_trace, false),
DEFINE_PROP_UINT32("bitmap_size",
nyx_interface_state,
bitmap_size,
DEFAULT_NYX_BITMAP_SIZE),
DEFINE_PROP_UINT32("input_buffer_size",
nyx_interface_state,
input_buffer_size,
DEFAULT_NYX_BITMAP_SIZE),
DEFINE_PROP_BOOL("dump_pt_trace", nyx_interface_state, dump_pt_trace, false),
DEFINE_PROP_BOOL("edge_cb_trace", nyx_interface_state, edge_cb_trace, false),
DEFINE_PROP_END_OF_LIST(),
DEFINE_PROP_END_OF_LIST(),
};
static void nyx_interface_class_init(ObjectClass *klass, void *data){
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = nyx_realize;
dc->props = nyx_interface_properties;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
dc->desc = "Nyx Interface";
static void nyx_interface_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = nyx_realize;
dc->props = nyx_interface_properties;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
dc->desc = "Nyx Interface";
}
static void nyx_interface_init(Object *obj){
static void nyx_interface_init(Object *obj)
{
}
static const TypeInfo nyx_interface_info = {
.name = TYPE_NYX_MEM,
.parent = TYPE_DEVICE,
.instance_size = sizeof(nyx_interface_state),
.instance_init = nyx_interface_init,
.class_init = nyx_interface_class_init,
.name = TYPE_NYX_MEM,
.parent = TYPE_DEVICE,
.instance_size = sizeof(nyx_interface_state),
.instance_init = nyx_interface_init,
.class_init = nyx_interface_class_init,
};
static void nyx_interface_register_types(void){
type_register_static(&nyx_interface_info);
static void nyx_interface_register_types(void)
{
type_register_static(&nyx_interface_info);
}
type_init(nyx_interface_register_types)

View File

@ -23,10 +23,10 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
#define INTERFACE_H
/* 64k bitmap + 4k ijon buffer */
#define DEFAULT_NYX_IJON_BITMAP_SIZE 0x1000 /* fixed size buffer for IJON -> 4k */
#define DEFAULT_NYX_BITMAP_SIZE 0x10000 /* default bitmap size => 64k */
#define DEFAULT_NYX_IJON_BITMAP_SIZE 0x1000 /* fixed size buffer for IJON -> 4k */
#define DEFAULT_NYX_BITMAP_SIZE 0x10000 /* default bitmap size => 64k */
#define NYX_INTERFACE_PING 'x'
#define NYX_INTERFACE_PING 'x'
bool interface_send_char(char val);

View File

@ -1,282 +1,319 @@
#include "qemu/osdep.h"
#include <linux/kvm.h>
#include "sysemu/kvm.h"
#include <linux/kvm.h>
#include "qemu-common.h"
#include "nyx/kvm_nested.h"
#include "cpu.h"
#include "nyx/debug.h"
#include "exec/ram_addr.h"
#include "qemu/rcu_queue.h"
#include "qemu-common.h"
#include "cpu.h"
#include "nyx/debug.h"
#include "nyx/kvm_nested.h"
#include "nyx/state/state.h"
#include "pt.h"
#define PPAGE_SIZE 0x1000
#define PENTRIES 0x200
#define PLEVEL_4_SHIFT 12
#define PLEVEL_3_SHIFT 21
#define PLEVEL_2_SHIFT 30
#define PLEVEL_1_SHIFT 39
#define PPAGE_SIZE 0x1000
#define PENTRIES 0x200
#define PLEVEL_4_SHIFT 12
#define PLEVEL_3_SHIFT 21
#define PLEVEL_2_SHIFT 30
#define PLEVEL_1_SHIFT 39
#define SIGN_EXTEND_TRESHOLD 0x100
#define SIGN_EXTEND 0xFFFF000000000000ULL
#define PAGETABLE_MASK 0xFFFFFFFFFF000ULL
#define CHECK_BIT(var,pos) !!(((var) & (1ULL<<(pos))))
#define SIGN_EXTEND 0xFFFF000000000000ULL
#define PAGETABLE_MASK 0xFFFFFFFFFF000ULL
#define CHECK_BIT(var, pos) !!(((var) & (1ULL << (pos))))
struct vmcs_hdr {
uint32_t revision_id:31;
uint32_t shadow_vmcs:1;
uint32_t revision_id : 31;
uint32_t shadow_vmcs : 1;
};
struct __attribute__((__packed__)) vmcs12 {
/* According to the Intel spec, a VMCS region must start with the
* following two fields. Then follow implementation-specific data.
*/
struct vmcs_hdr hdr;
uint32_t abort;
/* According to the Intel spec, a VMCS region must start with the
* following two fields. Then follow implementation-specific data.
*/
struct vmcs_hdr hdr;
uint32_t abort;
uint32_t launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
uint32_t padding[7]; /* room for future expansion */
uint32_t launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
uint32_t padding[7]; /* room for future expansion */
uint64_t io_bitmap_a;
uint64_t io_bitmap_b;
uint64_t msr_bitmap;
uint64_t vm_exit_msr_store_addr;
uint64_t vm_exit_msr_load_addr;
uint64_t vm_entry_msr_load_addr;
uint64_t tsc_offset;
uint64_t virtual_apic_page_addr;
uint64_t apic_access_addr;
uint64_t posted_intr_desc_addr;
uint64_t ept_pointer;
uint64_t eoi_exit_bitmap0;
uint64_t eoi_exit_bitmap1;
uint64_t eoi_exit_bitmap2;
uint64_t eoi_exit_bitmap3;
uint64_t xss_exit_bitmap;
uint64_t guest_physical_address;
uint64_t vmcs_link_pointer;
uint64_t guest_ia32_debugctl;
uint64_t guest_ia32_pat;
uint64_t guest_ia32_efer;
uint64_t guest_ia32_perf_global_ctrl;
uint64_t guest_pdptr0;
uint64_t guest_pdptr1;
uint64_t guest_pdptr2;
uint64_t guest_pdptr3;
uint64_t guest_bndcfgs;
uint64_t host_ia32_pat;
uint64_t host_ia32_efer;
uint64_t host_ia32_perf_global_ctrl;
uint64_t vmread_bitmap;
uint64_t vmwrite_bitmap;
uint64_t vm_function_control;
uint64_t eptp_list_address;
uint64_t pml_address;
uint64_t padding64[3]; /* room for future expansion */
/*
* To allow migration of L1 (complete with its L2 guests) between
* machines of different natural widths (32 or 64 bit), we cannot have
* unsigned long fields with no explict size. We use uint64_t (aliased
* uint64_t) instead. Luckily, x86 is little-endian.
*/
uint64_t cr0_guest_host_mask;
uint64_t cr4_guest_host_mask;
uint64_t cr0_read_shadow;
uint64_t cr4_read_shadow;
uint64_t cr3_target_value0;
uint64_t cr3_target_value1;
uint64_t cr3_target_value2;
uint64_t cr3_target_value3;
uint64_t exit_qualification;
uint64_t guest_linear_address;
uint64_t guest_cr0;
uint64_t guest_cr3;
uint64_t guest_cr4;
uint64_t guest_es_base;
uint64_t guest_cs_base;
uint64_t guest_ss_base;
uint64_t guest_ds_base;
uint64_t guest_fs_base;
uint64_t guest_gs_base;
uint64_t guest_ldtr_base;
uint64_t guest_tr_base;
uint64_t guest_gdtr_base;
uint64_t guest_idtr_base;
uint64_t guest_dr7;
uint64_t guest_rsp;
uint64_t guest_rip;
uint64_t guest_rflags;
uint64_t guest_pending_dbg_exceptions;
uint64_t guest_sysenter_esp;
uint64_t guest_sysenter_eip;
uint64_t host_cr0;
uint64_t host_cr3;
uint64_t host_cr4;
uint64_t host_fs_base;
uint64_t host_gs_base;
uint64_t host_tr_base;
uint64_t host_gdtr_base;
uint64_t host_idtr_base;
uint64_t host_ia32_sysenter_esp;
uint64_t host_ia32_sysenter_eip;
uint64_t host_rsp;
uint64_t host_rip;
uint64_t paddingl[8]; /* room for future expansion */
uint32_t pin_based_vm_exec_control;
uint32_t cpu_based_vm_exec_control;
uint32_t exception_bitmap;
uint32_t page_fault_error_code_mask;
uint32_t page_fault_error_code_match;
uint32_t cr3_target_count;
uint32_t vm_exit_controls;
uint32_t vm_exit_msr_store_count;
uint32_t vm_exit_msr_load_count;
uint32_t vm_entry_controls;
uint32_t vm_entry_msr_load_count;
uint32_t vm_entry_intr_info_field;
uint32_t vm_entry_exception_error_code;
uint32_t vm_entry_instruction_len;
uint32_t tpr_threshold;
uint32_t secondary_vm_exec_control;
uint32_t vm_instruction_error;
uint32_t vm_exit_reason;
uint32_t vm_exit_intr_info;
uint32_t vm_exit_intr_error_code;
uint32_t idt_vectoring_info_field;
uint32_t idt_vectoring_error_code;
uint32_t vm_exit_instruction_len;
uint32_t vmx_instruction_info;
uint32_t guest_es_limit;
uint32_t guest_cs_limit;
uint32_t guest_ss_limit;
uint32_t guest_ds_limit;
uint32_t guest_fs_limit;
uint32_t guest_gs_limit;
uint32_t guest_ldtr_limit;
uint32_t guest_tr_limit;
uint32_t guest_gdtr_limit;
uint32_t guest_idtr_limit;
uint32_t guest_es_ar_bytes;
uint32_t guest_cs_ar_bytes;
uint32_t guest_ss_ar_bytes;
uint32_t guest_ds_ar_bytes;
uint32_t guest_fs_ar_bytes;
uint32_t guest_gs_ar_bytes;
uint32_t guest_ldtr_ar_bytes;
uint32_t guest_tr_ar_bytes;
uint32_t guest_interruptibility_info;
uint32_t guest_activity_state;
uint32_t guest_sysenter_cs;
uint32_t host_ia32_sysenter_cs;
uint32_t vmx_preemption_timer_value;
uint32_t padding32[7]; /* room for future expansion */
uint16_t virtual_processor_id;
uint16_t posted_intr_nv;
uint16_t guest_es_selector;
uint16_t guest_cs_selector;
uint16_t guest_ss_selector;
uint16_t guest_ds_selector;
uint16_t guest_fs_selector;
uint16_t guest_gs_selector;
uint16_t guest_ldtr_selector;
uint16_t guest_tr_selector;
uint16_t guest_intr_status;
uint16_t host_es_selector;
uint16_t host_cs_selector;
uint16_t host_ss_selector;
uint16_t host_ds_selector;
uint16_t host_fs_selector;
uint16_t host_gs_selector;
uint16_t host_tr_selector;
uint16_t guest_pml_index;
uint64_t io_bitmap_a;
uint64_t io_bitmap_b;
uint64_t msr_bitmap;
uint64_t vm_exit_msr_store_addr;
uint64_t vm_exit_msr_load_addr;
uint64_t vm_entry_msr_load_addr;
uint64_t tsc_offset;
uint64_t virtual_apic_page_addr;
uint64_t apic_access_addr;
uint64_t posted_intr_desc_addr;
uint64_t ept_pointer;
uint64_t eoi_exit_bitmap0;
uint64_t eoi_exit_bitmap1;
uint64_t eoi_exit_bitmap2;
uint64_t eoi_exit_bitmap3;
uint64_t xss_exit_bitmap;
uint64_t guest_physical_address;
uint64_t vmcs_link_pointer;
uint64_t guest_ia32_debugctl;
uint64_t guest_ia32_pat;
uint64_t guest_ia32_efer;
uint64_t guest_ia32_perf_global_ctrl;
uint64_t guest_pdptr0;
uint64_t guest_pdptr1;
uint64_t guest_pdptr2;
uint64_t guest_pdptr3;
uint64_t guest_bndcfgs;
uint64_t host_ia32_pat;
uint64_t host_ia32_efer;
uint64_t host_ia32_perf_global_ctrl;
uint64_t vmread_bitmap;
uint64_t vmwrite_bitmap;
uint64_t vm_function_control;
uint64_t eptp_list_address;
uint64_t pml_address;
uint64_t padding64[3]; /* room for future expansion */
/*
* To allow migration of L1 (complete with its L2 guests) between
* machines of different natural widths (32 or 64 bit), we cannot have
* unsigned long fields with no explict size. We use uint64_t (aliased
* uint64_t) instead. Luckily, x86 is little-endian.
*/
uint64_t cr0_guest_host_mask;
uint64_t cr4_guest_host_mask;
uint64_t cr0_read_shadow;
uint64_t cr4_read_shadow;
uint64_t cr3_target_value0;
uint64_t cr3_target_value1;
uint64_t cr3_target_value2;
uint64_t cr3_target_value3;
uint64_t exit_qualification;
uint64_t guest_linear_address;
uint64_t guest_cr0;
uint64_t guest_cr3;
uint64_t guest_cr4;
uint64_t guest_es_base;
uint64_t guest_cs_base;
uint64_t guest_ss_base;
uint64_t guest_ds_base;
uint64_t guest_fs_base;
uint64_t guest_gs_base;
uint64_t guest_ldtr_base;
uint64_t guest_tr_base;
uint64_t guest_gdtr_base;
uint64_t guest_idtr_base;
uint64_t guest_dr7;
uint64_t guest_rsp;
uint64_t guest_rip;
uint64_t guest_rflags;
uint64_t guest_pending_dbg_exceptions;
uint64_t guest_sysenter_esp;
uint64_t guest_sysenter_eip;
uint64_t host_cr0;
uint64_t host_cr3;
uint64_t host_cr4;
uint64_t host_fs_base;
uint64_t host_gs_base;
uint64_t host_tr_base;
uint64_t host_gdtr_base;
uint64_t host_idtr_base;
uint64_t host_ia32_sysenter_esp;
uint64_t host_ia32_sysenter_eip;
uint64_t host_rsp;
uint64_t host_rip;
uint64_t paddingl[8]; /* room for future expansion */
uint32_t pin_based_vm_exec_control;
uint32_t cpu_based_vm_exec_control;
uint32_t exception_bitmap;
uint32_t page_fault_error_code_mask;
uint32_t page_fault_error_code_match;
uint32_t cr3_target_count;
uint32_t vm_exit_controls;
uint32_t vm_exit_msr_store_count;
uint32_t vm_exit_msr_load_count;
uint32_t vm_entry_controls;
uint32_t vm_entry_msr_load_count;
uint32_t vm_entry_intr_info_field;
uint32_t vm_entry_exception_error_code;
uint32_t vm_entry_instruction_len;
uint32_t tpr_threshold;
uint32_t secondary_vm_exec_control;
uint32_t vm_instruction_error;
uint32_t vm_exit_reason;
uint32_t vm_exit_intr_info;
uint32_t vm_exit_intr_error_code;
uint32_t idt_vectoring_info_field;
uint32_t idt_vectoring_error_code;
uint32_t vm_exit_instruction_len;
uint32_t vmx_instruction_info;
uint32_t guest_es_limit;
uint32_t guest_cs_limit;
uint32_t guest_ss_limit;
uint32_t guest_ds_limit;
uint32_t guest_fs_limit;
uint32_t guest_gs_limit;
uint32_t guest_ldtr_limit;
uint32_t guest_tr_limit;
uint32_t guest_gdtr_limit;
uint32_t guest_idtr_limit;
uint32_t guest_es_ar_bytes;
uint32_t guest_cs_ar_bytes;
uint32_t guest_ss_ar_bytes;
uint32_t guest_ds_ar_bytes;
uint32_t guest_fs_ar_bytes;
uint32_t guest_gs_ar_bytes;
uint32_t guest_ldtr_ar_bytes;
uint32_t guest_tr_ar_bytes;
uint32_t guest_interruptibility_info;
uint32_t guest_activity_state;
uint32_t guest_sysenter_cs;
uint32_t host_ia32_sysenter_cs;
uint32_t vmx_preemption_timer_value;
uint32_t padding32[7]; /* room for future expansion */
uint16_t virtual_processor_id;
uint16_t posted_intr_nv;
uint16_t guest_es_selector;
uint16_t guest_cs_selector;
uint16_t guest_ss_selector;
uint16_t guest_ds_selector;
uint16_t guest_fs_selector;
uint16_t guest_gs_selector;
uint16_t guest_ldtr_selector;
uint16_t guest_tr_selector;
uint16_t guest_intr_status;
uint16_t host_es_selector;
uint16_t host_cs_selector;
uint16_t host_ss_selector;
uint16_t host_ds_selector;
uint16_t host_fs_selector;
uint16_t host_gs_selector;
uint16_t host_tr_selector;
uint16_t guest_pml_index;
};
static void write_address(uint64_t address, uint64_t size, uint64_t prot){
static uint64_t next_address = PAGETABLE_MASK;
static uint64_t last_address = 0x0;
static uint64_t last_prot = 0;
if(address != next_address || prot != last_prot){
/* do not print guard pages or empty pages without any permissions */
if(last_address && (CHECK_BIT(last_prot, 1) || !CHECK_BIT(last_prot, 63))){
if(CHECK_BIT(last_prot, 1) && !CHECK_BIT(last_prot, 63)){
nyx_debug_p(NESTED_VM_PREFIX, "%016lx - %016lx %c%c%c [WARNING]",
last_address, next_address,
CHECK_BIT(last_prot, 1) ? 'W' : '-',
CHECK_BIT(last_prot, 2) ? 'U' : 'K',
!CHECK_BIT(last_prot, 63)? 'X' : '-');
}
else{
nyx_debug_p(NESTED_VM_PREFIX, "%016lx - %016lx %c%c%c",
last_address, next_address,
CHECK_BIT(last_prot, 1) ? 'W' : '-',
CHECK_BIT(last_prot, 2) ? 'U' : 'K',
!CHECK_BIT(last_prot, 63)? 'X' : '-');
}
}
last_address = address;
}
next_address = address+size;
last_prot = prot;
static void write_address(uint64_t address, uint64_t size, uint64_t prot)
{
static uint64_t next_address = PAGETABLE_MASK;
static uint64_t last_address = 0x0;
static uint64_t last_prot = 0;
if (address != next_address || prot != last_prot) {
/* do not print guard pages or empty pages without any permissions */
if (last_address && (CHECK_BIT(last_prot, 1) || !CHECK_BIT(last_prot, 63))) {
if (CHECK_BIT(last_prot, 1) && !CHECK_BIT(last_prot, 63)) {
nyx_debug_p(NESTED_VM_PREFIX, "%016lx - %016lx %c%c%c [WARNING]",
last_address, next_address,
CHECK_BIT(last_prot, 1) ? 'W' : '-',
CHECK_BIT(last_prot, 2) ? 'U' : 'K',
!CHECK_BIT(last_prot, 63) ? 'X' : '-');
} else {
nyx_debug_p(NESTED_VM_PREFIX, "%016lx - %016lx %c%c%c", last_address,
next_address, CHECK_BIT(last_prot, 1) ? 'W' : '-',
CHECK_BIT(last_prot, 2) ? 'U' : 'K',
!CHECK_BIT(last_prot, 63) ? 'X' : '-');
}
}
last_address = address;
}
next_address = address + size;
last_prot = prot;
}
void print_48_paging(uint64_t cr3){
void print_48_paging(uint64_t cr3)
{
uint64_t paging_entries_level_1[PENTRIES];
uint64_t paging_entries_level_2[PENTRIES];
uint64_t paging_entries_level_3[PENTRIES];
uint64_t paging_entries_level_4[PENTRIES];
uint64_t address_identifier_1, address_identifier_2, address_identifier_3, address_identifier_4;
uint32_t i1, i2, i3,i4;
uint64_t address_identifier_1, address_identifier_2, address_identifier_3,
address_identifier_4;
uint32_t i1, i2, i3, i4;
cpu_physical_memory_rw((cr3&PAGETABLE_MASK), (uint8_t *) paging_entries_level_1, PPAGE_SIZE, false);
for(i1 = 0; i1 < 512; i1++){
if(paging_entries_level_1[i1]){
cpu_physical_memory_rw((cr3 & PAGETABLE_MASK), (uint8_t *)paging_entries_level_1,
PPAGE_SIZE, false);
for (i1 = 0; i1 < 512; i1++) {
if (paging_entries_level_1[i1]) {
address_identifier_1 = ((uint64_t)i1) << PLEVEL_1_SHIFT;
if (i1 & SIGN_EXTEND_TRESHOLD){
if (i1 & SIGN_EXTEND_TRESHOLD) {
address_identifier_1 |= SIGN_EXTEND;
}
if(CHECK_BIT(paging_entries_level_1[i1], 0)){ /* otherwise swapped out */
cpu_physical_memory_rw((paging_entries_level_1[i1]&PAGETABLE_MASK), (uint8_t *) paging_entries_level_2, PPAGE_SIZE, false);
for(i2 = 0; i2 < PENTRIES; i2++){
if(paging_entries_level_2[i2]){
address_identifier_2 = (((uint64_t)i2) << PLEVEL_2_SHIFT) + address_identifier_1;
if (CHECK_BIT(paging_entries_level_2[i2], 0)){ /* otherwise swapped out */
if((paging_entries_level_2[i2]&PAGETABLE_MASK) == (paging_entries_level_1[i1]&PAGETABLE_MASK)){
if (CHECK_BIT(paging_entries_level_1[i1], 0))
{ /* otherwise swapped out */
cpu_physical_memory_rw((paging_entries_level_1[i1] & PAGETABLE_MASK),
(uint8_t *)paging_entries_level_2, PPAGE_SIZE,
false);
for (i2 = 0; i2 < PENTRIES; i2++) {
if (paging_entries_level_2[i2]) {
address_identifier_2 = (((uint64_t)i2) << PLEVEL_2_SHIFT) +
address_identifier_1;
if (CHECK_BIT(paging_entries_level_2[i2], 0))
{ /* otherwise swapped out */
if ((paging_entries_level_2[i2] & PAGETABLE_MASK) ==
(paging_entries_level_1[i1] & PAGETABLE_MASK))
{
/* loop */
continue;
}
if (CHECK_BIT(paging_entries_level_2[i2], 7)){
write_address(address_identifier_2, 0x40000000, (uint64_t)paging_entries_level_2[i2] & ((1ULL<<63) | (1ULL<<2) | (1ULL<<1)));
}
else{
if (CHECK_BIT(paging_entries_level_2[i2], 7)) {
write_address(address_identifier_2, 0x40000000,
(uint64_t)paging_entries_level_2[i2] &
((1ULL << 63) | (1ULL << 2) |
(1ULL << 1)));
} else {
/* otherwise this PDPE references a 1GB page */
cpu_physical_memory_rw((paging_entries_level_2[i2]&PAGETABLE_MASK), (uint8_t *) paging_entries_level_3, PPAGE_SIZE, false);
for(i3 = 0; i3 < PENTRIES; i3++){
if(paging_entries_level_3[i3]){
address_identifier_3 = (((uint64_t)i3) << PLEVEL_3_SHIFT) + address_identifier_2;
if (CHECK_BIT(paging_entries_level_3[i3], 0)){ /* otherwise swapped out */
if (CHECK_BIT(paging_entries_level_3[i3], 7)){
write_address(address_identifier_3, 0x200000, (uint64_t)paging_entries_level_3[i3] & ((1ULL<<63) | (1ULL<<2) | (1ULL<<1)));
}
else{
cpu_physical_memory_rw((paging_entries_level_3[i3]&PAGETABLE_MASK), (uint8_t *) paging_entries_level_4, PPAGE_SIZE, false);
for(i4 = 0; i4 < PENTRIES; i4++){
if(paging_entries_level_4[i4]){
address_identifier_4 = (((uint64_t)i4) << PLEVEL_4_SHIFT) + address_identifier_3;
if (CHECK_BIT(paging_entries_level_4[i4], 0)){
write_address(address_identifier_4, 0x1000, (uint64_t)paging_entries_level_4[i4] & ((1ULL<<63) | (1ULL<<2) | (1ULL<<1)));
}
}
}
}
}
}
}
cpu_physical_memory_rw((paging_entries_level_2[i2] &
PAGETABLE_MASK),
(uint8_t *)paging_entries_level_3,
PPAGE_SIZE, false);
for (i3 = 0; i3 < PENTRIES; i3++) {
if (paging_entries_level_3[i3]) {
address_identifier_3 =
(((uint64_t)i3) << PLEVEL_3_SHIFT) +
address_identifier_2;
if (CHECK_BIT(paging_entries_level_3[i3], 0))
{ /* otherwise swapped out */
if (CHECK_BIT(paging_entries_level_3[i3],
7))
{
write_address(
address_identifier_3, 0x200000,
(uint64_t)paging_entries_level_3[i3] &
((1ULL << 63) | (1ULL << 2) |
(1ULL << 1)));
} else {
cpu_physical_memory_rw(
(paging_entries_level_3[i3] &
PAGETABLE_MASK),
(uint8_t *)paging_entries_level_4,
PPAGE_SIZE, false);
for (i4 = 0; i4 < PENTRIES; i4++) {
if (paging_entries_level_4[i4]) {
address_identifier_4 =
(((uint64_t)i4)
<< PLEVEL_4_SHIFT) +
address_identifier_3;
if (CHECK_BIT(
paging_entries_level_4[i4],
0))
{
write_address(
address_identifier_4,
0x1000,
(uint64_t)paging_entries_level_4
[i4] &
((1ULL << 63) |
(1ULL << 2) |
(1ULL << 1)));
}
}
}
}
}
}
}
}
}
}
@ -284,107 +321,119 @@ void print_48_paging(uint64_t cr3){
}
}
}
write_address(0, 0x1000, 0);
write_address(0, 0x1000, 0);
}
uint64_t get_nested_guest_rip(CPUState *cpu){
uint64_t get_nested_guest_rip(CPUState *cpu)
{
X86CPU *cpux86 = X86_CPU(cpu);
CPUX86State *env = &cpux86->env;
X86CPU *cpux86 = X86_CPU(cpu);
CPUX86State *env = &cpux86->env;
kvm_vcpu_ioctl(cpu, KVM_GET_NESTED_STATE, env->nested_state);
struct vmcs12 *saved_vmcs = (struct vmcs12 *)&(env->nested_state->data);
kvm_vcpu_ioctl(cpu, KVM_GET_NESTED_STATE, env->nested_state);
struct vmcs12* saved_vmcs = (struct vmcs12*)&(env->nested_state->data);
return saved_vmcs->guest_rip;
return saved_vmcs->guest_rip;
}
uint64_t get_nested_host_rip(CPUState *cpu){
uint64_t get_nested_host_rip(CPUState *cpu)
{
X86CPU *cpux86 = X86_CPU(cpu);
CPUX86State *env = &cpux86->env;
X86CPU *cpux86 = X86_CPU(cpu);
CPUX86State *env = &cpux86->env;
kvm_vcpu_ioctl(cpu, KVM_GET_NESTED_STATE, env->nested_state);
struct vmcs12 *saved_vmcs = (struct vmcs12 *)&(env->nested_state->data);
kvm_vcpu_ioctl(cpu, KVM_GET_NESTED_STATE, env->nested_state);
struct vmcs12* saved_vmcs = (struct vmcs12*)&(env->nested_state->data);
return saved_vmcs->host_rip;
return saved_vmcs->host_rip;
}
uint64_t get_nested_host_cr3(CPUState *cpu){
uint64_t get_nested_host_cr3(CPUState *cpu)
{
X86CPU *cpux86 = X86_CPU(cpu);
CPUX86State *env = &cpux86->env;
X86CPU *cpux86 = X86_CPU(cpu);
CPUX86State *env = &cpux86->env;
kvm_vcpu_ioctl(cpu, KVM_GET_NESTED_STATE, env->nested_state);
struct vmcs12 *saved_vmcs = (struct vmcs12 *)&(env->nested_state->data);
kvm_vcpu_ioctl(cpu, KVM_GET_NESTED_STATE, env->nested_state);
struct vmcs12* saved_vmcs = (struct vmcs12*)&(env->nested_state->data);
return saved_vmcs->host_cr3;
return saved_vmcs->host_cr3;
}
void set_nested_rip(CPUState *cpu, uint64_t rip){
void set_nested_rip(CPUState *cpu, uint64_t rip)
{
X86CPU *cpux86 = X86_CPU(cpu);
CPUX86State *env = &cpux86->env;
X86CPU *cpux86 = X86_CPU(cpu);
CPUX86State *env = &cpux86->env;
struct vmcs12 *saved_vmcs = (struct vmcs12 *)&(env->nested_state->data);
struct vmcs12* saved_vmcs = (struct vmcs12*)&(env->nested_state->data);
saved_vmcs->guest_rip = rip;
saved_vmcs->guest_rip = rip;
}
void kvm_nested_get_info(CPUState *cpu){
void kvm_nested_get_info(CPUState *cpu)
{
X86CPU *cpux86 = X86_CPU(cpu);
CPUX86State *env = &cpux86->env;
X86CPU *cpux86 = X86_CPU(cpu);
CPUX86State *env = &cpux86->env;
kvm_vcpu_ioctl(cpu, KVM_GET_NESTED_STATE, env->nested_state);
kvm_vcpu_ioctl(cpu, KVM_GET_NESTED_STATE, env->nested_state);
__attribute__((unused)) struct vmcs12* saved_vmcs = (struct vmcs12*)&(env->nested_state->data);
nyx_debug_p(NESTED_VM_PREFIX, "VMCS host_cr3:\t%lx", saved_vmcs->host_cr3);
nyx_debug_p(NESTED_VM_PREFIX, "VMCS host_cr4:\t%lx", saved_vmcs->host_cr4);
nyx_debug_p(NESTED_VM_PREFIX, "VMCS host_ia32_efer:\t%lx", saved_vmcs->host_ia32_efer);
nyx_debug_p(NESTED_VM_PREFIX, "VMCS host_cr0:\t%lx", saved_vmcs->host_cr0);
__attribute__((unused)) struct vmcs12 *saved_vmcs =
(struct vmcs12 *)&(env->nested_state->data);
nyx_debug_p(NESTED_VM_PREFIX, "VMCS host_cr3:\t%lx", saved_vmcs->host_cr3);
nyx_debug_p(NESTED_VM_PREFIX, "VMCS host_cr4:\t%lx", saved_vmcs->host_cr4);
nyx_debug_p(NESTED_VM_PREFIX, "VMCS host_ia32_efer:\t%lx",
saved_vmcs->host_ia32_efer);
nyx_debug_p(NESTED_VM_PREFIX, "VMCS host_cr0:\t%lx", saved_vmcs->host_cr0);
return;
return;
}
#define AREA_DESC_LEN 256
#define MAGIC_NUMBER 0x41584548U
#define AREA_DESC_LEN 256
#define MAGIC_NUMBER 0x41584548U
typedef struct {
uint32_t base;
uint32_t size;
uint32_t virtual_base;
char desc[AREA_DESC_LEN];
}area_t_export_t;
uint32_t base;
uint32_t size;
uint32_t virtual_base;
char desc[AREA_DESC_LEN];
} area_t_export_t;
typedef struct {
uint32_t magic;
uint8_t num_mmio_areas;
uint8_t num_io_areas;
uint8_t num_alloc_areas;
uint8_t padding;
}config_t;
uint32_t magic;
uint8_t num_mmio_areas;
uint8_t num_io_areas;
uint8_t num_alloc_areas;
uint8_t padding;
} config_t;
void print_configuration(FILE *stream, void* configuration, size_t size){
fprintf(stream, "%s: size: %lx\n", __func__, size);
assert((size-sizeof(config_t))%sizeof(area_t_export_t) == 0);
void print_configuration(FILE *stream, void *configuration, size_t size)
{
fprintf(stream, "%s: size: %lx\n", __func__, size);
assert((size - sizeof(config_t)) % sizeof(area_t_export_t) == 0);
assert(((config_t*)configuration)->magic == MAGIC_NUMBER);
assert(((config_t *)configuration)->magic == MAGIC_NUMBER);
fprintf(stream, "%s: num_mmio_areas: %x\n", __func__, ((config_t*)configuration)->num_mmio_areas);
fprintf(stream, "%s: num_io_areas: %x\n", __func__, ((config_t*)configuration)->num_io_areas);
fprintf(stream, "%s: num_alloc_areas: %x\n", __func__, ((config_t*)configuration)->num_alloc_areas);
fprintf(stream, "%s: num_mmio_areas: %x\n", __func__,
((config_t *)configuration)->num_mmio_areas);
fprintf(stream, "%s: num_io_areas: %x\n", __func__,
((config_t *)configuration)->num_io_areas);
fprintf(stream, "%s: num_alloc_areas: %x\n", __func__,
((config_t *)configuration)->num_alloc_areas);
for(int i = 0; i < ((config_t*)configuration)->num_mmio_areas; i++){
fprintf(stream, "\t-> MMIO: 0x%x (V: 0x%x) [0x%x]\t%s\n", ((area_t_export_t*)(configuration+sizeof(config_t)))[i].base,
((area_t_export_t*)(configuration+sizeof(config_t)))[i].virtual_base,
((area_t_export_t*)(configuration+sizeof(config_t)))[i].size,
((area_t_export_t*)(configuration+sizeof(config_t)))[i].desc );
}
for (int i = 0; i < ((config_t *)configuration)->num_mmio_areas; i++) {
fprintf(stream, "\t-> MMIO: 0x%x (V: 0x%x) [0x%x]\t%s\n",
((area_t_export_t *)(configuration + sizeof(config_t)))[i].base,
((area_t_export_t *)(configuration + sizeof(config_t)))[i].virtual_base,
((area_t_export_t *)(configuration + sizeof(config_t)))[i].size,
((area_t_export_t *)(configuration + sizeof(config_t)))[i].desc);
}
for(int i = ((config_t*)configuration)->num_mmio_areas; i < (((config_t*)configuration)->num_mmio_areas+((config_t*)configuration)->num_io_areas); i++){
fprintf(stream, "\t-> IO: 0x%x [0x%x]\t%s\n", ((area_t_export_t*)(configuration+sizeof(config_t)))[i].base,
((area_t_export_t*)(configuration+sizeof(config_t)))[i].size,
((area_t_export_t*)(configuration+sizeof(config_t)))[i].desc );
}
for (int i = ((config_t *)configuration)->num_mmio_areas;
i < (((config_t *)configuration)->num_mmio_areas +
((config_t *)configuration)->num_io_areas);
i++)
{
fprintf(stream, "\t-> IO: 0x%x [0x%x]\t%s\n",
((area_t_export_t *)(configuration + sizeof(config_t)))[i].base,
((area_t_export_t *)(configuration + sizeof(config_t)))[i].size,
((area_t_export_t *)(configuration + sizeof(config_t)))[i].desc);
}
}

View File

@ -1,8 +1,8 @@
#pragma once
#include <stdint.h>
void print_48_paging(uint64_t cr3);
void kvm_nested_get_info(CPUState *cpu);
void print_48_paging(uint64_t cr3);
void kvm_nested_get_info(CPUState *cpu);
uint64_t get_nested_guest_rip(CPUState *cpu);
uint64_t get_nested_host_rip(CPUState *cpu);
@ -10,4 +10,4 @@ uint64_t get_nested_host_rip(CPUState *cpu);
uint64_t get_nested_host_cr3(CPUState *cpu);
void set_nested_rip(CPUState *cpu, uint64_t rip);
void print_configuration(FILE *stream, void* configuration, size_t size);
void print_configuration(FILE *stream, void *configuration, size_t size);

File diff suppressed because it is too large Load Diff

View File

@ -22,53 +22,68 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
#ifndef MEMORY_ACCESS_H
#define MEMORY_ACCESS_H
#include <linux/kvm.h>
#include "qemu-common.h"
#include "sysemu/kvm_int.h"
#include "qemu-common.h"
#include "nyx/types.h"
#include <linux/kvm.h>
#define MEM_SPLIT_START 0x0C0000000
#define MEM_SPLIT_END 0x100000000
/* i386 pc_piix low_mem address translation */
#define address_to_ram_offset(offset) (offset >= MEM_SPLIT_END ? (offset - MEM_SPLIT_END) + MEM_SPLIT_START : offset)
#define ram_offset_to_address(offset) (offset >= MEM_SPLIT_START ? (offset - MEM_SPLIT_START) + MEM_SPLIT_END : offset)
#define address_to_ram_offset(offset) \
(offset >= MEM_SPLIT_END ? (offset - MEM_SPLIT_END) + MEM_SPLIT_START : offset)
#define ram_offset_to_address(offset) \
(offset >= MEM_SPLIT_START ? (offset - MEM_SPLIT_START) + MEM_SPLIT_END : offset)
mem_mode_t get_current_mem_mode(CPUState *cpu);
uint64_t get_paging_phys_addr(CPUState *cpu, uint64_t cr3, uint64_t addr);
bool read_physical_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu);
bool write_physical_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu);
bool read_physical_memory(uint64_t address, uint8_t *data, uint32_t size, CPUState *cpu);
bool write_physical_memory(uint64_t address, uint8_t *data, uint32_t size, CPUState *cpu);
bool remap_payload_slot(uint64_t phys_addr, uint32_t slot, CPUState *cpu);
bool remap_payload_slot_protected(uint64_t phys_addr, uint32_t slot, CPUState *cpu);
bool remap_payload_buffer(uint64_t virt_guest_addr, CPUState *cpu);
bool remap_slots(uint64_t addr, uint32_t slots, CPUState *cpu, int fd, uint64_t shm_size, bool virtual, uint64_t cr3);
bool remap_slot(uint64_t addr, uint32_t slot, CPUState *cpu, int fd, uint64_t shm_size, bool virtual, uint64_t cr3);
bool remap_slots(uint64_t addr,
uint32_t slots,
CPUState *cpu,
int fd,
uint64_t shm_size,
bool virtual,
uint64_t cr3);
bool remap_slot(uint64_t addr,
uint32_t slot,
CPUState *cpu,
int fd,
uint64_t shm_size,
bool virtual,
uint64_t cr3);
bool read_virtual_memory_cr3(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu, uint64_t cr3);
bool read_virtual_memory_cr3(
uint64_t address, uint8_t *data, uint32_t size, CPUState *cpu, uint64_t cr3);
bool read_virtual_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu);
bool write_virtual_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu);
bool read_virtual_memory(uint64_t address, uint8_t *data, uint32_t size, CPUState *cpu);
bool write_virtual_memory(uint64_t address, uint8_t *data, uint32_t size, CPUState *cpu);
void hexdump_virtual_memory(uint64_t address, uint32_t size, CPUState *cpu);
bool is_addr_mapped(uint64_t address, CPUState *cpu);
bool is_addr_mapped_cr3(uint64_t address, CPUState *cpu, uint64_t cr3);
int insert_breakpoint(CPUState *cpu, uint64_t addr, uint64_t len);
int remove_breakpoint(CPUState *cpu, uint64_t addr, uint64_t len);
int insert_breakpoint(CPUState *cpu, uint64_t addr, uint64_t len);
int remove_breakpoint(CPUState *cpu, uint64_t addr, uint64_t len);
void remove_all_breakpoints(CPUState *cpu);
uint64_t disassemble_at_rip(int fd, uint64_t address, CPUState *cpu, uint64_t cr3);
bool dump_page_cr3_snapshot(uint64_t address, uint8_t* data, CPUState *cpu, uint64_t cr3);
bool dump_page_cr3_ht(uint64_t address, uint8_t* data, CPUState *cpu, uint64_t cr3);
bool dump_page_cr3_snapshot(uint64_t address, uint8_t *data, CPUState *cpu, uint64_t cr3);
bool dump_page_cr3_ht(uint64_t address, uint8_t *data, CPUState *cpu, uint64_t cr3);
bool is_addr_mapped_cr3_snapshot(uint64_t address, CPUState *cpu, uint64_t cr3);
void print_48_pagetables(uint64_t cr3);
bool dump_page_ht(uint64_t address, uint8_t* data, CPUState *cpu);
bool dump_page_ht(uint64_t address, uint8_t *data, CPUState *cpu);
void resize_shared_memory(uint32_t new_size, uint32_t* shm_size, void** shm_ptr, int fd);
void resize_shared_memory(uint32_t new_size, uint32_t *shm_size, void **shm_ptr, int fd);
#endif

View File

@ -1,25 +1,25 @@
#include "qemu/osdep.h"
#include <stdio.h>
#include <stdint.h>
#include "qemu/main-loop.h"
#include "sysemu/kvm.h"
#include "sysemu/sysemu.h"
#include "nested_hypercalls.h"
#include "debug.h"
#include "interface.h"
#include "kvm_nested.h"
#include "memory_access.h"
#include "debug.h"
#include "nested_hypercalls.h"
#include "interface.h"
#include "state/state.h"
#include "pt.h"
#include "sysemu/sysemu.h"
#include "sysemu/kvm.h"
#include "qemu/main-loop.h"
#include "nyx/helpers.h"
#include "pt.h"
#include "state/state.h"
#include <stdint.h>
#include <stdio.h>
//#define DEBUG_NESTED_HYPERCALLS
// #define DEBUG_NESTED_HYPERCALLS
bool hypercalls_enabled = false;
bool create_snapshot = false;
bool create_snapshot = false;
uint64_t htos_cr3 = 0;
uint64_t htos_cr3 = 0;
uint64_t htos_config = 0;
int nested_once = 0;
@ -27,149 +27,174 @@ int nested_once = 0;
bool nested_setup_snapshot_once = false;
void handle_hypercall_kafl_nested_config(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg)
{
/* magic */
nyx_trace();
uint32_t size = 0;
read_physical_memory(htos_config, (uint8_t *)&size, sizeof(uint32_t), cpu);
void handle_hypercall_kafl_nested_config(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
/* magic */
nyx_trace();
uint32_t size = 0;
read_physical_memory(htos_config, (uint8_t*) &size, sizeof(uint32_t), cpu);
void *buffer = malloc(size);
void* buffer = malloc(size);
read_physical_memory(htos_config + sizeof(uint32_t), buffer, size, cpu);
print_configuration(stderr, buffer, size);
read_physical_memory(htos_config+sizeof(uint32_t), buffer, size, cpu);
print_configuration(stderr, buffer, size);
FILE *f = fopen("/tmp/hypertrash_configration", "w");
print_configuration(f, buffer, size);
fclose(f);
FILE* f = fopen("/tmp/hypertrash_configration", "w");
print_configuration(f, buffer, size);
fclose(f);
free(buffer);
free(buffer);
}
void handle_hypercall_kafl_nested_hprintf(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
nyx_trace();
char hprintf_buffer[0x1000];
read_physical_memory((uint64_t)run->hypercall.args[0], (uint8_t*)hprintf_buffer, 0x1000, cpu);
void handle_hypercall_kafl_nested_hprintf(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg)
{
nyx_trace();
char hprintf_buffer[0x1000];
read_physical_memory((uint64_t)run->hypercall.args[0], (uint8_t *)hprintf_buffer,
0x1000, cpu);
set_hprintf_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, hprintf_buffer, strnlen(hprintf_buffer, 0x1000)+1);
synchronization_lock_hprintf();
set_hprintf_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, hprintf_buffer,
strnlen(hprintf_buffer, 0x1000) + 1);
synchronization_lock_hprintf();
}
void handle_hypercall_kafl_nested_prepare(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
nyx_trace();
kvm_arch_get_registers(cpu);
void handle_hypercall_kafl_nested_prepare(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg)
{
nyx_trace();
kvm_arch_get_registers(cpu);
if((uint64_t)run->hypercall.args[0]){
nyx_debug_p(CORE_PREFIX, "handle_hypercall_kafl_nested_prepare:\t NUM:\t%lx\t ADDRESS:\t%lx\t CR3:\t%lx", (uint64_t)run->hypercall.args[0], (uint64_t)run->hypercall.args[1], (uint64_t)run->hypercall.args[2]);
}
else{
abort();
}
if ((uint64_t)run->hypercall.args[0]) {
nyx_debug_p(CORE_PREFIX,
"handle_hypercall_kafl_nested_prepare:\t NUM:\t%lx\t "
"ADDRESS:\t%lx\t CR3:\t%lx",
(uint64_t)run->hypercall.args[0], (uint64_t)run->hypercall.args[1],
(uint64_t)run->hypercall.args[2]);
} else {
abort();
}
size_t buffer_size = (size_t)((uint64_t)run->hypercall.args[0] * sizeof(uint64_t));
uint64_t* buffer = malloc(buffer_size);
memset(buffer, 0x0, buffer_size);
size_t buffer_size = (size_t)((uint64_t)run->hypercall.args[0] * sizeof(uint64_t));
uint64_t *buffer = malloc(buffer_size);
memset(buffer, 0x0, buffer_size);
read_physical_memory((uint64_t)run->hypercall.args[1], (uint8_t*)buffer, buffer_size, cpu);
htos_cr3 = (uint64_t)run->hypercall.args[0];
read_physical_memory((uint64_t)run->hypercall.args[1], (uint8_t *)buffer,
buffer_size, cpu);
htos_cr3 = (uint64_t)run->hypercall.args[0];
for(uint64_t i = 0; i < (uint64_t)run->hypercall.args[0]; i++){
if(i == 0){
htos_config = buffer[i];
}
nyx_debug_p(CORE_PREFIX, "ADDRESS: %lx", buffer[i]);
remap_payload_slot(buffer[i], i, cpu);
}
for (uint64_t i = 0; i < (uint64_t)run->hypercall.args[0]; i++) {
if (i == 0) {
htos_config = buffer[i];
}
nyx_debug_p(CORE_PREFIX, "ADDRESS: %lx", buffer[i]);
remap_payload_slot(buffer[i], i, cpu);
}
set_payload_pages(buffer, (uint32_t)run->hypercall.args[0]);
set_payload_pages(buffer, (uint32_t)run->hypercall.args[0]);
// wipe memory
memset(buffer, 0x00, buffer_size);
write_physical_memory((uint64_t)run->hypercall.args[1], (uint8_t*)buffer, buffer_size, cpu);
// wipe memory
memset(buffer, 0x00, buffer_size);
write_physical_memory((uint64_t)run->hypercall.args[1], (uint8_t *)buffer,
buffer_size, cpu);
free(buffer);
free(buffer);
}
bool acquired = false;
void handle_hypercall_kafl_nested_early_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
nyx_trace();
void handle_hypercall_kafl_nested_early_release(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg)
{
nyx_trace();
if(!hypercalls_enabled){
return;
}
if (!hypercalls_enabled) {
return;
}
bool state = GET_GLOBAL_STATE()->in_reload_mode;
if(!state){
GET_GLOBAL_STATE()->in_reload_mode = true;
synchronization_disable_pt(cpu);
GET_GLOBAL_STATE()->in_reload_mode = false;
}
else{
synchronization_disable_pt(cpu);
}
bool state = GET_GLOBAL_STATE()->in_reload_mode;
if (!state) {
GET_GLOBAL_STATE()->in_reload_mode = true;
synchronization_disable_pt(cpu);
GET_GLOBAL_STATE()->in_reload_mode = false;
} else {
synchronization_disable_pt(cpu);
}
}
void handle_hypercall_kafl_nested_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
nyx_trace();
void handle_hypercall_kafl_nested_release(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg)
{
nyx_trace();
// TODO not implemented - see git history for scraps
nyx_error("Not implemented.\n");
abort();
}
static inline void set_page_dump_bp_nested(CPUState *cpu, uint64_t cr3, uint64_t addr){
nyx_trace();
static inline void set_page_dump_bp_nested(CPUState *cpu, uint64_t cr3, uint64_t addr)
{
nyx_trace();
kvm_remove_all_breakpoints(cpu);
kvm_insert_breakpoint(cpu, addr, 1, 1);
kvm_update_guest_debug(cpu, 0);
kvm_remove_all_breakpoints(cpu);
kvm_insert_breakpoint(cpu, addr, 1, 1);
kvm_update_guest_debug(cpu, 0);
kvm_vcpu_ioctl(cpu, KVM_VMX_PT_SET_PAGE_DUMP_CR3, cr3);
kvm_vcpu_ioctl(cpu, KVM_VMX_PT_ENABLE_PAGE_DUMP_CR3);
kvm_vcpu_ioctl(cpu, KVM_VMX_PT_SET_PAGE_DUMP_CR3, cr3);
kvm_vcpu_ioctl(cpu, KVM_VMX_PT_ENABLE_PAGE_DUMP_CR3);
}
void handle_hypercall_kafl_nested_acquire(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
nyx_trace();
if (!acquired){
acquired = true;
void handle_hypercall_kafl_nested_acquire(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg)
{
nyx_trace();
//create_fast_snapshot(cpu, true);
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_ROOT_NESTED_FIX_RIP);
if (!acquired) {
acquired = true;
for(int i = 0; i < INTEL_PT_MAX_RANGES; i++){
if(GET_GLOBAL_STATE()->pt_ip_filter_configured[i]){
pt_enable_ip_filtering(cpu, i, true, false);
}
}
pt_init_decoder(cpu);
// create_fast_snapshot(cpu, true);
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state,
REQUEST_SAVE_SNAPSHOT_ROOT_NESTED_FIX_RIP);
qemu_mutex_lock_iothread();
fast_reload_restore(get_fast_reload_snapshot());
qemu_mutex_unlock_iothread();
for (int i = 0; i < INTEL_PT_MAX_RANGES; i++) {
if (GET_GLOBAL_STATE()->pt_ip_filter_configured[i]) {
pt_enable_ip_filtering(cpu, i, true, false);
}
}
pt_init_decoder(cpu);
kvm_arch_get_registers(cpu);
GET_GLOBAL_STATE()->in_fuzzing_mode = true;
set_state_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 3);
}
qemu_mutex_lock_iothread();
fast_reload_restore(get_fast_reload_snapshot());
qemu_mutex_unlock_iothread();
synchronization_lock();
kvm_arch_get_registers(cpu);
kvm_arch_get_registers(cpu);
uint64_t cr3 = get_nested_host_cr3(cpu) & 0xFFFFFFFFFFFFF000ULL;
pt_set_cr3(cpu, cr3, false);
GET_GLOBAL_STATE()->parent_cr3 = cr3;
GET_GLOBAL_STATE()->in_fuzzing_mode = true;
set_state_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 3);
}
if(GET_GLOBAL_STATE()->dump_page){
set_page_dump_bp_nested(cpu, cr3, GET_GLOBAL_STATE()->dump_page_addr);
}
synchronization_lock();
kvm_nested_get_info(cpu);
kvm_arch_get_registers(cpu);
synchronization_enter_fuzzing_loop(cpu);
uint64_t cr3 = get_nested_host_cr3(cpu) & 0xFFFFFFFFFFFFF000ULL;
pt_set_cr3(cpu, cr3, false);
GET_GLOBAL_STATE()->parent_cr3 = cr3;
return;
if (GET_GLOBAL_STATE()->dump_page) {
set_page_dump_bp_nested(cpu, cr3, GET_GLOBAL_STATE()->dump_page_addr);
}
kvm_nested_get_info(cpu);
synchronization_enter_fuzzing_loop(cpu);
return;
}

View File

@ -1,11 +1,23 @@
#pragma once
#pragma once
#include <stdint.h>
/* HyperTrash! */
void handle_hypercall_kafl_nested_hprintf(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_nested_prepare(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_nested_config(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_nested_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_nested_acquire(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_nested_early_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_nested_hprintf(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg);
void handle_hypercall_kafl_nested_prepare(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg);
void handle_hypercall_kafl_nested_config(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg);
void handle_hypercall_kafl_nested_release(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg);
void handle_hypercall_kafl_nested_acquire(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg);
void handle_hypercall_kafl_nested_early_release(struct kvm_run *run,
CPUState *cpu,
uint64_t hypercall_arg);

View File

@ -1,329 +1,353 @@
#include "qemu/osdep.h"
#include <errno.h>
#include <capstone/capstone.h>
#include <capstone/x86.h>
#include <sys/file.h>
#include <sys/types.h>
#include <unistd.h>
#include <stdio.h>
#include <sys/mman.h>
#include <assert.h>
#include "nyx/page_cache.h"
#include "nyx/debug.h"
#include "nyx/fast_vm_reload.h"
#include "nyx/memory_access.h"
#include "nyx/helpers.h"
#include "nyx/memory_access.h"
#include "nyx/state/state.h"
#include <assert.h>
#include <capstone/capstone.h>
#include <capstone/x86.h>
#include <errno.h>
#include <stdio.h>
#include <sys/file.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <unistd.h>
#define PAGE_CACHE_ADDR_LINE_SIZE sizeof(uint64_t)
#define UNMAPPED_PAGE 0xFFFFFFFFFFFFFFFFULL
static bool reload_addresses(page_cache_t* self){
khiter_t k;
int ret;
uint64_t addr, offset;
uint64_t value = 0;
static bool reload_addresses(page_cache_t *self)
{
khiter_t k;
int ret;
uint64_t addr, offset;
uint64_t value = 0;
size_t self_offset = lseek(self->fd_address_file, 0, SEEK_END);
size_t self_offset = lseek(self->fd_address_file, 0, SEEK_END);
if(self_offset != self->num_pages*PAGE_CACHE_ADDR_LINE_SIZE){
/* reload page cache from disk */
lseek(self->fd_address_file, self->num_pages*PAGE_CACHE_ADDR_LINE_SIZE, SEEK_SET);
offset = self->num_pages;
while(read(self->fd_address_file, &value, PAGE_CACHE_ADDR_LINE_SIZE)){
addr = value & 0xFFFFFFFFFFFFF000ULL;
offset++;
if (self_offset != self->num_pages * PAGE_CACHE_ADDR_LINE_SIZE) {
/* reload page cache from disk */
lseek(self->fd_address_file, self->num_pages * PAGE_CACHE_ADDR_LINE_SIZE,
SEEK_SET);
offset = self->num_pages;
while (read(self->fd_address_file, &value, PAGE_CACHE_ADDR_LINE_SIZE)) {
addr = value & 0xFFFFFFFFFFFFF000ULL;
offset++;
/* put new addresses and offsets into the hash map */
k = kh_get(PC_CACHE, self->lookup, addr);
if(k == kh_end(self->lookup)){
/* put new addresses and offsets into the hash map */
k = kh_get(PC_CACHE, self->lookup, addr);
if (k == kh_end(self->lookup)) {
if (value & 0xFFF) {
fprintf(stderr, "Load page: %lx (UNMAPPED)\n", addr);
} else {
k = kh_put(PC_CACHE, self->lookup, addr, &ret);
kh_value(self->lookup, k) = (offset - 1) * PAGE_SIZE;
}
} else {
/* likely a bug / race condition in page_cache itself! */
fprintf(stderr,
"----------> Page duplicate found ...skipping! %lx\n", addr);
// abort();
}
}
if(value & 0xFFF){
fprintf(stderr, "Load page: %lx (UNMAPPED)\n", addr);
}
else{
k = kh_put(PC_CACHE, self->lookup, addr, &ret);
kh_value(self->lookup, k) = (offset-1)*PAGE_SIZE;
}
}
else{
/* likely a bug / race condition in page_cache itself! */
fprintf(stderr, "----------> Page duplicate found ...skipping! %lx\n", addr);
//abort();
}
}
/* reload page dump file */
munmap(self->page_data, self->num_pages * PAGE_SIZE);
self->num_pages = self_offset / PAGE_CACHE_ADDR_LINE_SIZE;
self->page_data = mmap(NULL, (self->num_pages) * PAGE_SIZE,
PROT_READ | PROT_WRITE, MAP_SHARED,
self->fd_page_file, 0);
/* reload page dump file */
munmap(self->page_data, self->num_pages*PAGE_SIZE);
self->num_pages = self_offset/PAGE_CACHE_ADDR_LINE_SIZE;
self->page_data = mmap(NULL, (self->num_pages)*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, self->fd_page_file, 0);
return true;
}
return true;
}
return false;
return false;
}
static bool append_page(page_cache_t* self, uint64_t page, uint64_t cr3){
bool success = true;
if(!self->num_pages){
assert(!ftruncate(self->fd_page_file, (self->num_pages+1)*PAGE_SIZE));
self->page_data = mmap(NULL, (self->num_pages+1)*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, self->fd_page_file, 0);
}
else{
munmap(self->page_data, self->num_pages*PAGE_SIZE);
assert(!ftruncate(self->fd_page_file, (self->num_pages+1)*PAGE_SIZE));
self->page_data = mmap(NULL, (self->num_pages+1)*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, self->fd_page_file, 0);
}
static bool append_page(page_cache_t *self, uint64_t page, uint64_t cr3)
{
bool success = true;
if (!self->num_pages) {
assert(!ftruncate(self->fd_page_file, (self->num_pages + 1) * PAGE_SIZE));
self->page_data = mmap(NULL, (self->num_pages + 1) * PAGE_SIZE,
PROT_READ | PROT_WRITE, MAP_SHARED,
self->fd_page_file, 0);
} else {
munmap(self->page_data, self->num_pages * PAGE_SIZE);
assert(!ftruncate(self->fd_page_file, (self->num_pages + 1) * PAGE_SIZE));
self->page_data = mmap(NULL, (self->num_pages + 1) * PAGE_SIZE,
PROT_READ | PROT_WRITE, MAP_SHARED,
self->fd_page_file, 0);
}
if(!dump_page_cr3_ht(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->pt_c3_filter)){
if(!dump_page_cr3_ht(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->parent_cr3)){
if(!dump_page_cr3_snapshot(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->parent_cr3)){
if (!dump_page_cr3_ht(page, self->page_data + (PAGE_SIZE * self->num_pages),
self->cpu, GET_GLOBAL_STATE()->pt_c3_filter))
{
if (!dump_page_cr3_ht(page, self->page_data + (PAGE_SIZE * self->num_pages),
self->cpu, GET_GLOBAL_STATE()->parent_cr3))
{
if (!dump_page_cr3_snapshot(page,
self->page_data + (PAGE_SIZE * self->num_pages),
self->cpu, GET_GLOBAL_STATE()->parent_cr3))
{
munmap(self->page_data, (self->num_pages + 1) * PAGE_SIZE);
assert(!ftruncate(self->fd_page_file, (self->num_pages) * PAGE_SIZE));
self->page_data = mmap(NULL, (self->num_pages) * PAGE_SIZE,
PROT_READ | PROT_WRITE, MAP_SHARED,
self->fd_page_file, 0);
munmap(self->page_data, (self->num_pages+1)*PAGE_SIZE);
assert(!ftruncate(self->fd_page_file, (self->num_pages)*PAGE_SIZE));
self->page_data = mmap(NULL, (self->num_pages)*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, self->fd_page_file, 0);
success = false;
return success;
}
}
}
fsync(self->fd_page_file);
self->num_pages++;
return success;
success = false;
return success;
}
}
}
fsync(self->fd_page_file);
self->num_pages++;
return success;
}
static void page_cache_lock(page_cache_t* self){
int ret = 0;
while (true){
ret = flock(self->fd_lock, LOCK_EX);
if (ret == 0){
return;
}
else if (ret == EINTR){
/* try again if acquiring this lock has failed */
fprintf(stderr, "%s: interrupted by signal...\n", __func__);
}
else{
assert(false);
}
}
static void page_cache_lock(page_cache_t *self)
{
int ret = 0;
while (true) {
ret = flock(self->fd_lock, LOCK_EX);
if (ret == 0) {
return;
} else if (ret == EINTR) {
/* try again if acquiring this lock has failed */
fprintf(stderr, "%s: interrupted by signal...\n", __func__);
} else {
assert(false);
}
}
}
static void page_cache_unlock(page_cache_t* self){
int ret = 0;
while (true){
ret = flock(self->fd_lock, LOCK_UN);
if (ret == 0){
return;
}
else if (ret == EINTR){
/* try again if releasing this lock has failed */
fprintf(stderr, "%s: interrupted by signal...\n", __func__);
}
else{
assert(false);
}
}
static void page_cache_unlock(page_cache_t *self)
{
int ret = 0;
while (true) {
ret = flock(self->fd_lock, LOCK_UN);
if (ret == 0) {
return;
} else if (ret == EINTR) {
/* try again if releasing this lock has failed */
fprintf(stderr, "%s: interrupted by signal...\n", __func__);
} else {
assert(false);
}
}
}
static bool update_page_cache(page_cache_t* self, uint64_t page, khiter_t* k){
page_cache_lock(self);
static bool update_page_cache(page_cache_t *self, uint64_t page, khiter_t *k)
{
page_cache_lock(self);
if(reload_addresses(self)){
*k = kh_get(PC_CACHE, self->lookup, page);
}
if (reload_addresses(self)) {
*k = kh_get(PC_CACHE, self->lookup, page);
}
if(*k == kh_end(self->lookup)){
int ret;
if (*k == kh_end(self->lookup)) {
int ret;
uint64_t cr3 = GET_GLOBAL_STATE()->parent_cr3;
if(!is_addr_mapped_cr3_snapshot(page, self->cpu, GET_GLOBAL_STATE()->parent_cr3) && !is_addr_mapped_cr3_snapshot(page, self->cpu, GET_GLOBAL_STATE()->pt_c3_filter)){
/* TODO! */
}
uint64_t cr3 = GET_GLOBAL_STATE()->parent_cr3;
if (!is_addr_mapped_cr3_snapshot(page, self->cpu,
GET_GLOBAL_STATE()->parent_cr3) &&
!is_addr_mapped_cr3_snapshot(page, self->cpu,
GET_GLOBAL_STATE()->pt_c3_filter))
{
/* TODO! */
}
*k = kh_get(PC_CACHE, self->lookup, page);
if(*k == kh_end(self->lookup) && reload_addresses(self)){
/* reload sucessful */
*k = kh_get(PC_CACHE, self->lookup, page);
}
else{
*k = kh_get(PC_CACHE, self->lookup, page);
if (*k == kh_end(self->lookup) && reload_addresses(self)) {
/* reload sucessful */
*k = kh_get(PC_CACHE, self->lookup, page);
} else {
if (append_page(self, page, cr3)) {
*k = kh_put(PC_CACHE, self->lookup, page, &ret);
assert(write(self->fd_address_file, &page,
PAGE_CACHE_ADDR_LINE_SIZE) == PAGE_CACHE_ADDR_LINE_SIZE);
kh_value(self->lookup, *k) = (self->num_pages - 1) * PAGE_SIZE;
} else {
page_cache_unlock(self);
return false;
}
if(append_page(self, page, cr3)){
*k = kh_put(PC_CACHE, self->lookup, page, &ret);
assert(write(self->fd_address_file, &page, PAGE_CACHE_ADDR_LINE_SIZE) == PAGE_CACHE_ADDR_LINE_SIZE);
kh_value(self->lookup, *k) = (self->num_pages-1)*PAGE_SIZE;
}
else{
page_cache_unlock(self);
return false;
}
*k = kh_get(PC_CACHE, self->lookup, page);
}
}
page_cache_unlock(self);
return true;
*k = kh_get(PC_CACHE, self->lookup, page);
}
}
page_cache_unlock(self);
return true;
}
uint64_t page_cache_fetch(page_cache_t* self, uint64_t page, bool* success, bool test_mode){
page &= 0xFFFFFFFFFFFFF000ULL;
uint64_t page_cache_fetch(page_cache_t *self, uint64_t page, bool *success, bool test_mode)
{
page &= 0xFFFFFFFFFFFFF000ULL;
if (self->last_page == page){
*success = true;
return self->last_addr;
}
khiter_t k;
k = kh_get(PC_CACHE, self->lookup, page);
if(k == kh_end(self->lookup)){
if(test_mode || update_page_cache(self, page, &k) == false){
*success = false;
return 0;
}
}
if (self->last_page == page) {
*success = true;
return self->last_addr;
}
self->last_page = page;
khiter_t k;
k = kh_get(PC_CACHE, self->lookup, page);
if (k == kh_end(self->lookup)) {
if (test_mode || update_page_cache(self, page, &k) == false) {
*success = false;
return 0;
}
}
if(kh_value(self->lookup, k) == UNMAPPED_PAGE){
self->last_addr = UNMAPPED_PAGE;
}
else{
self->last_addr = (uint64_t)self->page_data+kh_value(self->lookup, k);
}
self->last_page = page;
*success = true;
return self->last_addr;
if (kh_value(self->lookup, k) == UNMAPPED_PAGE) {
self->last_addr = UNMAPPED_PAGE;
} else {
self->last_addr = (uint64_t)self->page_data + kh_value(self->lookup, k);
}
*success = true;
return self->last_addr;
}
/* FIXME */
uint64_t page_cache_fetch2(page_cache_t* self, uint64_t page, bool* success){
return page_cache_fetch(self, page, success, false);
uint64_t page_cache_fetch2(page_cache_t *self, uint64_t page, bool *success)
{
return page_cache_fetch(self, page, success, false);
}
page_cache_t* page_cache_new(CPUState *cpu, const char* cache_file){
page_cache_t* self = malloc(sizeof(page_cache_t));
page_cache_t *page_cache_new(CPUState *cpu, const char *cache_file)
{
page_cache_t *self = malloc(sizeof(page_cache_t));
char* tmp1;
char* tmp2;
char* tmp3;
assert(asprintf(&tmp1, "%s.dump", cache_file) != -1);
assert(asprintf(&tmp2, "%s.addr", cache_file) != -1);
assert(asprintf(&tmp3, "%s.lock", cache_file) != -1);
char *tmp1;
char *tmp2;
char *tmp3;
assert(asprintf(&tmp1, "%s.dump", cache_file) != -1);
assert(asprintf(&tmp2, "%s.addr", cache_file) != -1);
assert(asprintf(&tmp3, "%s.lock", cache_file) != -1);
self->lookup = kh_init(PC_CACHE);
self->fd_page_file = open(tmp1, O_CLOEXEC | O_RDWR, S_IRWXU);
self->fd_address_file = open(tmp2, O_CLOEXEC | O_RDWR, S_IRWXU);
self->lookup = kh_init(PC_CACHE);
self->fd_page_file = open(tmp1, O_CLOEXEC | O_RDWR, S_IRWXU);
self->fd_address_file = open(tmp2, O_CLOEXEC | O_RDWR, S_IRWXU);
self->cpu = cpu;
self->fd_lock = open(tmp3, O_CLOEXEC);
assert(self->fd_lock > 0);
self->cpu = cpu;
self->fd_lock = open(tmp3, O_CLOEXEC);
assert(self->fd_lock > 0);
memset(self->disassemble_cache, 0x0, 16);
memset(self->disassemble_cache, 0x0, 16);
self->page_data = NULL;
self->num_pages = 0;
self->page_data = NULL;
self->num_pages = 0;
self->last_page = 0xFFFFFFFFFFFFFFFF;
self->last_addr = 0xFFFFFFFFFFFFFFFF;
self->last_page = 0xFFFFFFFFFFFFFFFF;
self->last_addr = 0xFFFFFFFFFFFFFFFF;
nyx_debug_p(PAGE_CACHE_PREFIX, "%s (%s - %s)", __func__, tmp1, tmp2);
nyx_debug_p(PAGE_CACHE_PREFIX, "%s (%s - %s)", __func__, tmp1, tmp2);
free(tmp3);
free(tmp2);
free(tmp1);
free(tmp3);
free(tmp2);
free(tmp1);
if (cs_open(CS_ARCH_X86, CS_MODE_16, &self->handle_16) != CS_ERR_OK)
assert(false);
if (cs_open(CS_ARCH_X86, CS_MODE_16, &self->handle_16) != CS_ERR_OK)
assert(false);
if (cs_open(CS_ARCH_X86, CS_MODE_32, &self->handle_32) != CS_ERR_OK)
assert(false);
if (cs_open(CS_ARCH_X86, CS_MODE_32, &self->handle_32) != CS_ERR_OK)
assert(false);
if (cs_open(CS_ARCH_X86, CS_MODE_64, &self->handle_64) != CS_ERR_OK)
assert(false);
if (cs_open(CS_ARCH_X86, CS_MODE_64, &self->handle_64) != CS_ERR_OK)
assert(false);
cs_option(self->handle_16, CS_OPT_DETAIL, CS_OPT_ON);
cs_option(self->handle_32, CS_OPT_DETAIL, CS_OPT_ON);
cs_option(self->handle_64, CS_OPT_DETAIL, CS_OPT_ON);
cs_option(self->handle_16, CS_OPT_DETAIL, CS_OPT_ON);
cs_option(self->handle_32, CS_OPT_DETAIL, CS_OPT_ON);
cs_option(self->handle_64, CS_OPT_DETAIL, CS_OPT_ON);
return self;
return self;
}
bool page_cache_disassemble(page_cache_t* self, uint64_t address, cs_insn **insn){
return true;
bool page_cache_disassemble(page_cache_t *self, uint64_t address, cs_insn **insn)
{
return true;
}
cs_insn* page_cache_cs_malloc(page_cache_t* self, disassembler_mode_t mode){
switch(mode){
case mode_16:
return cs_malloc(self->handle_16);
case mode_32:
return cs_malloc(self->handle_32);
case mode_64:
return cs_malloc(self->handle_64);
default:
assert(false);
}
return NULL;
cs_insn *page_cache_cs_malloc(page_cache_t *self, disassembler_mode_t mode)
{
switch (mode) {
case mode_16:
return cs_malloc(self->handle_16);
case mode_32:
return cs_malloc(self->handle_32);
case mode_64:
return cs_malloc(self->handle_64);
default:
assert(false);
}
return NULL;
}
bool page_cache_disassemble_iter(page_cache_t* self, uint64_t* address, cs_insn *insn, uint64_t* failed_page, disassembler_mode_t mode){
*failed_page = 0xFFFFFFFFFFFFFFFFULL;
bool page_cache_disassemble_iter(page_cache_t *self,
uint64_t *address,
cs_insn *insn,
uint64_t *failed_page,
disassembler_mode_t mode)
{
*failed_page = 0xFFFFFFFFFFFFFFFFULL;
bool success = true;
size_t code_size = 16;
bool success = true;
size_t code_size = 16;
uint8_t* code = (uint8_t*)page_cache_fetch(self, *address, &success, false);
uint8_t* code_ptr = 0;
uint8_t *code = (uint8_t *)page_cache_fetch(self, *address, &success, false);
uint8_t *code_ptr = 0;
csh* current_handle = NULL;
csh *current_handle = NULL;
switch(mode){
case mode_16:
current_handle = &self->handle_16;
break;
case mode_32:
current_handle = &self->handle_32;
break;
case mode_64:
current_handle = &self->handle_64;
break;
default:
assert(false);
}
switch (mode) {
case mode_16:
current_handle = &self->handle_16;
break;
case mode_32:
current_handle = &self->handle_32;
break;
case mode_64:
current_handle = &self->handle_64;
break;
default:
assert(false);
}
if (code == (void*)UNMAPPED_PAGE || success == false){
*failed_page = *address;
return false;
}
if (code == (void *)UNMAPPED_PAGE || success == false) {
*failed_page = *address;
return false;
}
if ((*address & 0xFFF) >= (0x1000-16)){
memcpy((void*)self->disassemble_cache, (void*)((uint64_t)code+(0x1000-16)), 16);
code_ptr = self->disassemble_cache + 0xf-(0xfff-(*address&0xfff));
code = (uint8_t*)page_cache_fetch(self, *address+0x1000, &success, false);
if ((*address & 0xFFF) >= (0x1000 - 16)) {
memcpy((void *)self->disassemble_cache,
(void *)((uint64_t)code + (0x1000 - 16)), 16);
code_ptr = self->disassemble_cache + 0xf - (0xfff - (*address & 0xfff));
code = (uint8_t *)page_cache_fetch(self, *address + 0x1000, &success, false);
if(success == true){
memcpy((void*)(self->disassemble_cache+16), (void*)code, 16);
return cs_disasm_iter(*current_handle, (const uint8_t**) &code_ptr, &code_size, address, insn);
}
else{
code_size = (0xfff-(*address&0xfff));
if(!cs_disasm_iter(*current_handle, (const uint8_t**) &code_ptr, &code_size, address, insn)){
*failed_page = (*address+0x1000) & 0xFFFFFFFFFFFFF000ULL;
return false;
}
return true;
}
}
else {
code_ptr = code + (*address&0xFFF);
return cs_disasm_iter(*current_handle, (const uint8_t**) &code_ptr, &code_size, address, insn);
}
if (success == true) {
memcpy((void *)(self->disassemble_cache + 16), (void *)code, 16);
return cs_disasm_iter(*current_handle, (const uint8_t **)&code_ptr,
&code_size, address, insn);
} else {
code_size = (0xfff - (*address & 0xfff));
if (!cs_disasm_iter(*current_handle, (const uint8_t **)&code_ptr,
&code_size, address, insn))
{
*failed_page = (*address + 0x1000) & 0xFFFFFFFFFFFFF000ULL;
return false;
}
return true;
}
} else {
code_ptr = code + (*address & 0xFFF);
return cs_disasm_iter(*current_handle, (const uint8_t **)&code_ptr,
&code_size, address, insn);
}
}

View File

@ -1,8 +1,8 @@
#pragma once
#include "khash.h"
#include <capstone/capstone.h>
#include <capstone/x86.h>
#include "khash.h"
#include <libxdc.h>
#include "qemu-common.h"
@ -10,30 +10,37 @@
KHASH_MAP_INIT_INT64(PC_CACHE, uint64_t)
typedef struct page_cache_s{
CPUState *cpu;
khash_t(PC_CACHE) *lookup;
int fd_page_file;
int fd_address_file;
int fd_lock;
uint8_t disassemble_cache[32];
void* page_data;
uint32_t num_pages;
typedef struct page_cache_s {
CPUState *cpu;
khash_t(PC_CACHE) * lookup;
int fd_page_file;
int fd_address_file;
int fd_lock;
uint8_t disassemble_cache[32];
void *page_data;
uint32_t num_pages;
csh handle_16;
csh handle_32;
csh handle_64;
csh handle_16;
csh handle_32;
csh handle_64;
uint64_t last_page;
uint64_t last_addr;
uint64_t last_page;
uint64_t last_addr;
} page_cache_t;
page_cache_t* page_cache_new(CPUState *cpu, const char* cache_file);
uint64_t page_cache_fetch(page_cache_t* self, uint64_t page, bool* success, bool test_mode);
page_cache_t *page_cache_new(CPUState *cpu, const char *cache_file);
uint64_t page_cache_fetch(page_cache_t *self,
uint64_t page,
bool *success,
bool test_mode);
bool page_cache_disassemble(page_cache_t* self, uint64_t address, cs_insn **insn);
bool page_cache_disassemble_iter(page_cache_t* self, uint64_t* address, cs_insn *insn, uint64_t* failed_page, disassembler_mode_t mode);
bool page_cache_disassemble(page_cache_t *self, uint64_t address, cs_insn **insn);
bool page_cache_disassemble_iter(page_cache_t *self,
uint64_t *address,
cs_insn *insn,
uint64_t *failed_page,
disassembler_mode_t mode);
cs_insn* page_cache_cs_malloc(page_cache_t* self, disassembler_mode_t mode);
cs_insn *page_cache_cs_malloc(page_cache_t *self, disassembler_mode_t mode);
uint64_t page_cache_fetch2(page_cache_t* self, uint64_t page, bool* success);
uint64_t page_cache_fetch2(page_cache_t *self, uint64_t page, bool *success);

View File

@ -1,10 +1,12 @@
#include "patcher.h"
#include "nyx/memory_access.h"
#include "nyx/disassembler.h"
#include "debug.h"
#include "nyx/disassembler.h"
#include "nyx/memory_access.h"
#include "nyx/state/state.h"
uint8_t cmp_patch_data[] = { 0x38, 0xC0, [2 ... MAX_INSTRUCTION_SIZE]=0x90 }; // CMP AL,AL; NOP, NOP ...
uint8_t cmp_patch_data[] = {
0x38, 0xC0, [2 ... MAX_INSTRUCTION_SIZE] = 0x90
}; // CMP AL,AL; NOP, NOP ...
const uint8_t *cmp_patch = &cmp_patch_data[0];
/*
@ -14,106 +16,128 @@ static void _patcher_apply_patch(patcher_t *self, size_t index);
static void _patcher_restore_patch(patcher_t *self, size_t index);
static void _patcher_save_patch(patcher_t *self, size_t index, uint8_t* data, size_t instruction_size, uint64_t addr);
static void _patcher_save_patch(patcher_t *self,
size_t index,
uint8_t *data,
size_t instruction_size,
uint64_t addr);
static size_t _patcher_disassemble_size(patcher_t *self, uint8_t* data, uint64_t addr, x86_insn id);
static size_t _patcher_disassemble_size(patcher_t *self,
uint8_t *data,
uint64_t addr,
x86_insn id);
static void _patcher_alloc_patch_infos(patcher_t *self, size_t num_patches);
static void _patcher_free_patch_infos(patcher_t *self);
static redqueen_t* _redq_ptr(patcher_t *self);
static redqueen_t *_redq_ptr(patcher_t *self);
/*
* Public Functions
*/
patcher_t* patcher_new(CPUState *cpu){
patcher_t *res = malloc(sizeof(patcher_t));
res->cpu = cpu;
res->num_patches = 0;
res->patches = NULL;
patcher_t *patcher_new(CPUState *cpu)
{
patcher_t *res = malloc(sizeof(patcher_t));
res->cpu = cpu;
res->num_patches = 0;
res->patches = NULL;
res->is_currently_applied = false;
return res;
}
void patcher_free(patcher_t* self){
void patcher_free(patcher_t *self)
{
assert(!self->is_currently_applied);
_patcher_free_patch_infos(self);
free(self);
}
void patcher_apply_all(patcher_t *self){
assert(!self->is_currently_applied);
assert(!_redq_ptr(self)->hooks_applied);
//assert(patcher_validate_patches(self));
for(size_t i=0; i < self->num_patches; i++){
_patcher_apply_patch(self, i);
}
self->is_currently_applied = true;
}
void patcher_restore_all(patcher_t *self){
assert(self->is_currently_applied);
assert(!_redq_ptr(self)->hooks_applied);
//assert(patcher_validate_patches(self));
for(size_t i = 0; i < self->num_patches; i++){
_patcher_restore_patch(self, i);
}
self->is_currently_applied = false;
}
void patcher_set_addrs(patcher_t *self, uint64_t* addrs, size_t num_addrs){
_patcher_free_patch_infos(self);
_patcher_alloc_patch_infos(self, num_addrs);
uint8_t curr_instruction_code[MAX_INSTRUCTION_SIZE];
memset(&curr_instruction_code[0], 0, MAX_INSTRUCTION_SIZE);
for(size_t i=0; i < self->num_patches; i++){
//nyx_debug_p(REDQUEEN_PREFIX, "patching %lx", addrs[i]);
if( read_virtual_memory(addrs[i], &curr_instruction_code[0], MAX_INSTRUCTION_SIZE, self->cpu) ) {
size_t size =_patcher_disassemble_size(self, &curr_instruction_code[0], addrs[i], X86_INS_CMP);
assert(size != 0); //csopen failed, shouldn't happen
_patcher_save_patch(self, i, &curr_instruction_code[0], size, addrs[i]);
void patcher_apply_all(patcher_t *self)
{
assert(!self->is_currently_applied);
assert(!_redq_ptr(self)->hooks_applied);
// assert(patcher_validate_patches(self));
for (size_t i = 0; i < self->num_patches; i++) {
_patcher_apply_patch(self, i);
}
}
self->is_currently_applied = true;
}
static void print_hexdump(const uint8_t* addr, size_t size){
for(size_t i = 0; i < size; i++){
printf (" %02x", addr[i]);
}
printf("\n");
void patcher_restore_all(patcher_t *self)
{
assert(self->is_currently_applied);
assert(!_redq_ptr(self)->hooks_applied);
// assert(patcher_validate_patches(self));
for (size_t i = 0; i < self->num_patches; i++) {
_patcher_restore_patch(self, i);
}
self->is_currently_applied = false;
}
bool patcher_validate_patches(patcher_t *self){
bool was_rq = _redq_ptr(self)->hooks_applied;
if(was_rq)
redqueen_remove_hooks(_redq_ptr(self));
if(!self->patches){return true;}
for(size_t i=0; i<self->num_patches; i++){
uint8_t buf[MAX_INSTRUCTION_SIZE];
read_virtual_memory(self->patches[i].addr, &buf[0], MAX_INSTRUCTION_SIZE, self->cpu);
const uint8_t* should_value = NULL;
if(self->is_currently_applied){
should_value = cmp_patch;
} else {
should_value = &self->patches[i].orig_bytes[0];
}
void patcher_set_addrs(patcher_t *self, uint64_t *addrs, size_t num_addrs)
{
_patcher_free_patch_infos(self);
_patcher_alloc_patch_infos(self, num_addrs);
uint8_t curr_instruction_code[MAX_INSTRUCTION_SIZE];
memset(&curr_instruction_code[0], 0, MAX_INSTRUCTION_SIZE);
nyx_debug_p(REDQUEEN_PREFIX, "Validating, mem:");
print_hexdump(&buf[0], self->patches[i].size);
nyx_debug_p(REDQUEEN_PREFIX, "should_be:");
print_hexdump(should_value, self->patches[i].size);
if(0 != memcmp(&buf[0], should_value, self->patches[i].size)){
nyx_debug_p(REDQUEEN_PREFIX, "validating patches failed self->is_currently_applied = %d", self->is_currently_applied);
return false;
for (size_t i = 0; i < self->num_patches; i++) {
// nyx_debug_p(REDQUEEN_PREFIX, "patching %lx", addrs[i]);
if (read_virtual_memory(addrs[i], &curr_instruction_code[0],
MAX_INSTRUCTION_SIZE, self->cpu))
{
size_t size = _patcher_disassemble_size(self, &curr_instruction_code[0],
addrs[i], X86_INS_CMP);
assert(size != 0); // csopen failed, shouldn't happen
_patcher_save_patch(self, i, &curr_instruction_code[0], size, addrs[i]);
}
}
}
if(was_rq)
redqueen_insert_hooks(_redq_ptr(self));
return true;
}
static void print_hexdump(const uint8_t *addr, size_t size)
{
for (size_t i = 0; i < size; i++) {
printf(" %02x", addr[i]);
}
printf("\n");
}
bool patcher_validate_patches(patcher_t *self)
{
bool was_rq = _redq_ptr(self)->hooks_applied;
if (was_rq)
redqueen_remove_hooks(_redq_ptr(self));
if (!self->patches) {
return true;
}
for (size_t i = 0; i < self->num_patches; i++) {
uint8_t buf[MAX_INSTRUCTION_SIZE];
read_virtual_memory(self->patches[i].addr, &buf[0], MAX_INSTRUCTION_SIZE,
self->cpu);
const uint8_t *should_value = NULL;
if (self->is_currently_applied) {
should_value = cmp_patch;
} else {
should_value = &self->patches[i].orig_bytes[0];
}
nyx_debug_p(REDQUEEN_PREFIX, "Validating, mem:");
print_hexdump(&buf[0], self->patches[i].size);
nyx_debug_p(REDQUEEN_PREFIX, "should_be:");
print_hexdump(should_value, self->patches[i].size);
if (0 != memcmp(&buf[0], should_value, self->patches[i].size)) {
nyx_debug_p(REDQUEEN_PREFIX,
"validating patches failed self->is_currently_applied = %d",
self->is_currently_applied);
return false;
}
}
if (was_rq)
redqueen_insert_hooks(_redq_ptr(self));
return true;
}
@ -121,60 +145,77 @@ bool patcher_validate_patches(patcher_t *self){
* Private Helper Functions Definitions
*/
static void _patcher_apply_patch(patcher_t *self, size_t index) {
abort(); // deprecated function -> remove this code later
static void _patcher_apply_patch(patcher_t *self, size_t index)
{
abort(); // deprecated function -> remove this code later
}
static void _patcher_restore_patch(patcher_t *self, size_t index){
abort(); // deprecated function -> remove this code later
static void _patcher_restore_patch(patcher_t *self, size_t index)
{
abort(); // deprecated function -> remove this code later
}
static void _patcher_save_patch(patcher_t *self, size_t index, uint8_t* data, size_t instruction_size, uint64_t addr) {
assert(instruction_size >= 2);
assert(instruction_size < MAX_INSTRUCTION_SIZE);
patch_info_t *info = &self->patches[index];
memset(&info->orig_bytes[0], 0, MAX_INSTRUCTION_SIZE);
memcpy(&info->orig_bytes[0], data, instruction_size);
info->addr = addr;
info->size = instruction_size;
static void _patcher_save_patch(patcher_t *self,
size_t index,
uint8_t *data,
size_t instruction_size,
uint64_t addr)
{
assert(instruction_size >= 2);
assert(instruction_size < MAX_INSTRUCTION_SIZE);
patch_info_t *info = &self->patches[index];
memset(&info->orig_bytes[0], 0, MAX_INSTRUCTION_SIZE);
memcpy(&info->orig_bytes[0], data, instruction_size);
info->addr = addr;
info->size = instruction_size;
}
static size_t _patcher_disassemble_size(patcher_t *self, uint8_t* data, uint64_t addr, x86_insn type){
static size_t _patcher_disassemble_size(patcher_t *self,
uint8_t *data,
uint64_t addr,
x86_insn type)
{
csh handle;
if (cs_open(CS_ARCH_X86, get_capstone_mode(GET_GLOBAL_STATE()->disassembler_word_width), &handle) == CS_ERR_OK){
cs_insn *insn = cs_malloc(handle);
uint8_t* cur_offset = data;
uint64_t cs_address = addr;
uint64_t code_size = MAX_INSTRUCTION_SIZE;
cs_disasm_iter(handle, (const uint8_t **) &cur_offset, &code_size, &cs_address, insn);
size_t size = insn->size;
if(type != X86_INS_INVALID){
assert(insn->id == type);
}
cs_free(insn, 1);
cs_close(&handle);
return size;
if (cs_open(CS_ARCH_X86,
get_capstone_mode(GET_GLOBAL_STATE()->disassembler_word_width),
&handle) == CS_ERR_OK)
{
cs_insn *insn = cs_malloc(handle);
uint8_t *cur_offset = data;
uint64_t cs_address = addr;
uint64_t code_size = MAX_INSTRUCTION_SIZE;
cs_disasm_iter(handle, (const uint8_t **)&cur_offset, &code_size,
&cs_address, insn);
size_t size = insn->size;
if (type != X86_INS_INVALID) {
assert(insn->id == type);
}
cs_free(insn, 1);
cs_close(&handle);
return size;
}
return 0;
}
static void _patcher_alloc_patch_infos(patcher_t *self, size_t num_patches){
assert(self->num_patches == 0);
assert(self->patches == NULL);
assert(num_patches < 10000);
self->num_patches = num_patches;
self->patches = malloc(sizeof(patch_info_t)*num_patches);
static void _patcher_alloc_patch_infos(patcher_t *self, size_t num_patches)
{
assert(self->num_patches == 0);
assert(self->patches == NULL);
assert(num_patches < 10000);
self->num_patches = num_patches;
self->patches = malloc(sizeof(patch_info_t) * num_patches);
}
static void _patcher_free_patch_infos(patcher_t *self){
assert(!self->is_currently_applied);
free(self->patches);
self->patches = NULL;
self->num_patches = 0;
static void _patcher_free_patch_infos(patcher_t *self)
{
assert(!self->is_currently_applied);
free(self->patches);
self->patches = NULL;
self->num_patches = 0;
}
static redqueen_t* _redq_ptr(patcher_t *self){
redqueen_t* res = GET_GLOBAL_STATE()->redqueen_state; //self->cpu->redqueen_state;
return res;
static redqueen_t *_redq_ptr(patcher_t *self)
{
redqueen_t *res = GET_GLOBAL_STATE()->redqueen_state; // self->cpu->redqueen_state;
return res;
}

View File

@ -1,8 +1,8 @@
#ifndef __GUARD_REDQUEEN_PATCHER_STRUCT__
#define __GUARD_REDQUEEN_PATCHER_STRUCT__
#include <stdint.h>
#include <stddef.h>
#include <stdint.h>
#include <capstone/capstone.h>
#include <capstone/x86.h>
@ -15,24 +15,23 @@
* which always evaluates to true. This can be used to remove hash checks that
* we suspsect can later on be patched.
*/
extern const uint8_t* cmp_patch;
extern const uint8_t *cmp_patch;
typedef struct patch_info_s{
uint64_t addr;
size_t size;
uint8_t orig_bytes[MAX_INSTRUCTION_SIZE];
typedef struct patch_info_s {
uint64_t addr;
size_t size;
uint8_t orig_bytes[MAX_INSTRUCTION_SIZE];
} patch_info_t;
typedef struct patcher_s{
typedef struct patcher_s {
CPUState *cpu;
CPUState *cpu;
patch_info_t *patches;
size_t num_patches;
bool is_currently_applied;
patch_info_t *patches;
size_t num_patches;
bool is_currently_applied;
} patcher_t;
patcher_t* patcher_new(CPUState *cpu);
patcher_t *patcher_new(CPUState *cpu);
void patcher_free(patcher_t *self);
@ -40,8 +39,8 @@ void patcher_apply_all(patcher_t *self);
void patcher_restore_all(patcher_t *self);
//Doesn't take ownership of addrs
void patcher_set_addrs(patcher_t *self, uint64_t* addrs, size_t num_addrs);
// Doesn't take ownership of addrs
void patcher_set_addrs(patcher_t *self, uint64_t *addrs, size_t num_addrs);
bool patcher_validate_patches(patcher_t *self);

571
nyx/pt.c
View File

@ -53,320 +53,365 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
#define PT_BUFFER_MMAP_ADDR 0x3ffff0000000
static void pt_set(CPUState *cpu, run_on_cpu_data arg){
asm volatile("" ::: "memory");
static void pt_set(CPUState *cpu, run_on_cpu_data arg)
{
asm volatile("" ::: "memory");
}
static inline int pt_cmd_hmp_context(CPUState *cpu, uint64_t cmd){
cpu->pt_ret = -1;
if(pt_hypercalls_enabled()){
nyx_debug_p(PT_PREFIX, "Error: HMP commands are ignored if kafl tracing mode is enabled (-kafl)!");
}
else{
cpu->pt_cmd = cmd;
run_on_cpu(cpu, pt_set, RUN_ON_CPU_NULL);
}
return cpu->pt_ret;
static inline int pt_cmd_hmp_context(CPUState *cpu, uint64_t cmd)
{
cpu->pt_ret = -1;
if (pt_hypercalls_enabled()) {
nyx_debug_p(PT_PREFIX, "Error: HMP commands are ignored if kafl tracing "
"mode is enabled (-kafl)!");
} else {
cpu->pt_cmd = cmd;
run_on_cpu(cpu, pt_set, RUN_ON_CPU_NULL);
}
return cpu->pt_ret;
}
static int pt_cmd(CPUState *cpu, uint64_t cmd, bool hmp_mode){
if (hmp_mode){
return pt_cmd_hmp_context(cpu, cmd);
}
else {
cpu->pt_cmd = cmd;
pt_pre_kvm_run(cpu);
return cpu->pt_ret;
}
static int pt_cmd(CPUState *cpu, uint64_t cmd, bool hmp_mode)
{
if (hmp_mode) {
return pt_cmd_hmp_context(cpu, cmd);
} else {
cpu->pt_cmd = cmd;
pt_pre_kvm_run(cpu);
return cpu->pt_ret;
}
}
static inline int pt_ioctl(int fd, unsigned long request, unsigned long arg){
if (!fd){
return -EINVAL;
}
return ioctl(fd, request, arg);
static inline int pt_ioctl(int fd, unsigned long request, unsigned long arg)
{
if (!fd) {
return -EINVAL;
}
return ioctl(fd, request, arg);
}
void pt_dump(CPUState *cpu, int bytes){
if(!(GET_GLOBAL_STATE()->redqueen_state && GET_GLOBAL_STATE()->redqueen_state->intercept_mode)){
if (GET_GLOBAL_STATE()->in_fuzzing_mode && GET_GLOBAL_STATE()->decoder_page_fault == false && GET_GLOBAL_STATE()->decoder && !GET_GLOBAL_STATE()->dump_page){
GET_GLOBAL_STATE()->pt_trace_size += bytes;
pt_write_pt_dump_file(cpu->pt_mmap, bytes);
decoder_result_t result = libxdc_decode(GET_GLOBAL_STATE()->decoder, cpu->pt_mmap, bytes);
switch(result){
case decoder_success:
break;
case decoder_success_pt_overflow:
cpu->intel_pt_run_trashed = true;
break;
case decoder_page_fault:
//fprintf(stderr, "Page not found => 0x%lx\n", libxdc_get_page_fault_addr(GET_GLOBAL_STATE()->decoder));
GET_GLOBAL_STATE()->decoder_page_fault = true;
GET_GLOBAL_STATE()->decoder_page_fault_addr = libxdc_get_page_fault_addr(GET_GLOBAL_STATE()->decoder);
break;
case decoder_unkown_packet:
fprintf(stderr, "WARNING: libxdc_decode returned unknown_packet\n");
break;
case decoder_error:
fprintf(stderr, "WARNING: libxdc_decode returned decoder_error\n");
break;
}
}
}
void pt_dump(CPUState *cpu, int bytes)
{
if (!(GET_GLOBAL_STATE()->redqueen_state &&
GET_GLOBAL_STATE()->redqueen_state->intercept_mode))
{
if (GET_GLOBAL_STATE()->in_fuzzing_mode &&
GET_GLOBAL_STATE()->decoder_page_fault == false &&
GET_GLOBAL_STATE()->decoder && !GET_GLOBAL_STATE()->dump_page)
{
GET_GLOBAL_STATE()->pt_trace_size += bytes;
pt_write_pt_dump_file(cpu->pt_mmap, bytes);
decoder_result_t result =
libxdc_decode(GET_GLOBAL_STATE()->decoder, cpu->pt_mmap, bytes);
switch (result) {
case decoder_success:
break;
case decoder_success_pt_overflow:
cpu->intel_pt_run_trashed = true;
break;
case decoder_page_fault:
// fprintf(stderr, "Page not found => 0x%lx\n", libxdc_get_page_fault_addr(GET_GLOBAL_STATE()->decoder));
GET_GLOBAL_STATE()->decoder_page_fault = true;
GET_GLOBAL_STATE()->decoder_page_fault_addr =
libxdc_get_page_fault_addr(GET_GLOBAL_STATE()->decoder);
break;
case decoder_unkown_packet:
fprintf(stderr, "WARNING: libxdc_decode returned unknown_packet\n");
break;
case decoder_error:
fprintf(stderr, "WARNING: libxdc_decode returned decoder_error\n");
break;
}
}
}
}
int pt_enable(CPUState *cpu, bool hmp_mode){
if(!fast_reload_set_bitmap(get_fast_reload_snapshot())){
coverage_bitmap_reset();
}
if (GET_GLOBAL_STATE()->trace_mode) {
redqueen_trace_reset();
alt_bitmap_reset();
}
pt_truncate_pt_dump_file();
return pt_cmd(cpu, KVM_VMX_PT_ENABLE, hmp_mode);
}
int pt_disable(CPUState *cpu, bool hmp_mode){
int r = pt_cmd(cpu, KVM_VMX_PT_DISABLE, hmp_mode);
return r;
int pt_enable(CPUState *cpu, bool hmp_mode)
{
if (!fast_reload_set_bitmap(get_fast_reload_snapshot())) {
coverage_bitmap_reset();
}
if (GET_GLOBAL_STATE()->trace_mode) {
redqueen_trace_reset();
alt_bitmap_reset();
}
pt_truncate_pt_dump_file();
return pt_cmd(cpu, KVM_VMX_PT_ENABLE, hmp_mode);
}
int pt_set_cr3(CPUState *cpu, uint64_t val, bool hmp_mode){
int r = 0;
if (val == GET_GLOBAL_STATE()->pt_c3_filter){
return 0; // nothing changed
}
if (cpu->pt_enabled){
return -EINVAL;
}
if (GET_GLOBAL_STATE()->pt_c3_filter && GET_GLOBAL_STATE()->pt_c3_filter != val){
//nyx_debug_p(PT_PREFIX, "Reconfigure CR3-Filtering!");
GET_GLOBAL_STATE()->pt_c3_filter = val;
r += pt_cmd(cpu, KVM_VMX_PT_CONFIGURE_CR3, hmp_mode);
r += pt_cmd(cpu, KVM_VMX_PT_ENABLE_CR3, hmp_mode);
return r;
}
GET_GLOBAL_STATE()->pt_c3_filter = val;
r += pt_cmd(cpu, KVM_VMX_PT_CONFIGURE_CR3, hmp_mode);
r += pt_cmd(cpu, KVM_VMX_PT_ENABLE_CR3, hmp_mode);
return r;
int pt_disable(CPUState *cpu, bool hmp_mode)
{
int r = pt_cmd(cpu, KVM_VMX_PT_DISABLE, hmp_mode);
return r;
}
int pt_enable_ip_filtering(CPUState *cpu, uint8_t addrn, bool redqueen, bool hmp_mode){
int r = 0;
int pt_set_cr3(CPUState *cpu, uint64_t val, bool hmp_mode)
{
int r = 0;
if(addrn > 3){
return -1;
}
if (val == GET_GLOBAL_STATE()->pt_c3_filter) {
return 0; // nothing changed
}
if (cpu->pt_enabled){
return -EINVAL;
}
if(GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] > GET_GLOBAL_STATE()->pt_ip_filter_b[addrn]){
nyx_debug_p(PT_PREFIX, "Error (ip_a > ip_b) 0x%lx-0x%lx", GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] , GET_GLOBAL_STATE()->pt_ip_filter_b[addrn]);
return -EINVAL;
}
if(GET_GLOBAL_STATE()->pt_ip_filter_enabled[addrn]){
pt_disable_ip_filtering(cpu, addrn, hmp_mode);
}
nyx_debug_p(PT_PREFIX, "Configuring new trace region (addr%d, 0x%lx-0x%lx)", addrn, GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] , GET_GLOBAL_STATE()->pt_ip_filter_b[addrn]);
if(GET_GLOBAL_STATE()->pt_ip_filter_configured[addrn] && GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] != 0 && GET_GLOBAL_STATE()->pt_ip_filter_b[addrn] != 0){
r += pt_cmd(cpu, KVM_VMX_PT_CONFIGURE_ADDR0+addrn, hmp_mode);
r += pt_cmd(cpu, KVM_VMX_PT_ENABLE_ADDR0+addrn, hmp_mode);
GET_GLOBAL_STATE()->pt_ip_filter_enabled[addrn] = true;
}
return r;
if (cpu->pt_enabled) {
return -EINVAL;
}
if (GET_GLOBAL_STATE()->pt_c3_filter && GET_GLOBAL_STATE()->pt_c3_filter != val) {
// nyx_debug_p(PT_PREFIX, "Reconfigure CR3-Filtering!");
GET_GLOBAL_STATE()->pt_c3_filter = val;
r += pt_cmd(cpu, KVM_VMX_PT_CONFIGURE_CR3, hmp_mode);
r += pt_cmd(cpu, KVM_VMX_PT_ENABLE_CR3, hmp_mode);
return r;
}
GET_GLOBAL_STATE()->pt_c3_filter = val;
r += pt_cmd(cpu, KVM_VMX_PT_CONFIGURE_CR3, hmp_mode);
r += pt_cmd(cpu, KVM_VMX_PT_ENABLE_CR3, hmp_mode);
return r;
}
void pt_init_decoder(CPUState *cpu){
uint64_t filters[4][2] = {0};
int pt_enable_ip_filtering(CPUState *cpu, uint8_t addrn, bool redqueen, bool hmp_mode)
{
int r = 0;
/* TODO time to clean up this code -.- */
filters[0][0] = GET_GLOBAL_STATE()->pt_ip_filter_a[0];
filters[0][1] = GET_GLOBAL_STATE()->pt_ip_filter_b[0];
filters[1][0] = GET_GLOBAL_STATE()->pt_ip_filter_a[1];
filters[1][1] = GET_GLOBAL_STATE()->pt_ip_filter_b[1];
filters[2][0] = GET_GLOBAL_STATE()->pt_ip_filter_a[2];
filters[2][1] = GET_GLOBAL_STATE()->pt_ip_filter_b[2];
filters[3][0] = GET_GLOBAL_STATE()->pt_ip_filter_a[3];
filters[3][1] = GET_GLOBAL_STATE()->pt_ip_filter_b[3];
if (addrn > 3) {
return -1;
}
assert(GET_GLOBAL_STATE()->decoder == NULL);
assert(GET_GLOBAL_STATE()->shared_bitmap_ptr != NULL);
assert(GET_GLOBAL_STATE()->shared_bitmap_size != 0);
GET_GLOBAL_STATE()->decoder = libxdc_init(filters, (void* (*)(void*, uint64_t, bool*))page_cache_fetch2, GET_GLOBAL_STATE()->page_cache, GET_GLOBAL_STATE()->shared_bitmap_ptr, GET_GLOBAL_STATE()->shared_bitmap_size);
if (cpu->pt_enabled) {
return -EINVAL;
}
libxdc_register_bb_callback(GET_GLOBAL_STATE()->decoder, (void (*)(void*, disassembler_mode_t, uint64_t, uint64_t))redqueen_callback, GET_GLOBAL_STATE()->redqueen_state);
if (GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] >
GET_GLOBAL_STATE()->pt_ip_filter_b[addrn])
{
nyx_debug_p(PT_PREFIX, "Error (ip_a > ip_b) 0x%lx-0x%lx",
GET_GLOBAL_STATE()->pt_ip_filter_a[addrn],
GET_GLOBAL_STATE()->pt_ip_filter_b[addrn]);
return -EINVAL;
}
alt_bitmap_init(
GET_GLOBAL_STATE()->shared_bitmap_ptr,
GET_GLOBAL_STATE()->shared_bitmap_size);
if (GET_GLOBAL_STATE()->pt_ip_filter_enabled[addrn]) {
pt_disable_ip_filtering(cpu, addrn, hmp_mode);
}
nyx_debug_p(PT_PREFIX, "Configuring new trace region (addr%d, 0x%lx-0x%lx)",
addrn, GET_GLOBAL_STATE()->pt_ip_filter_a[addrn],
GET_GLOBAL_STATE()->pt_ip_filter_b[addrn]);
if (GET_GLOBAL_STATE()->pt_ip_filter_configured[addrn] &&
GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] != 0 &&
GET_GLOBAL_STATE()->pt_ip_filter_b[addrn] != 0)
{
r += pt_cmd(cpu, KVM_VMX_PT_CONFIGURE_ADDR0 + addrn, hmp_mode);
r += pt_cmd(cpu, KVM_VMX_PT_ENABLE_ADDR0 + addrn, hmp_mode);
GET_GLOBAL_STATE()->pt_ip_filter_enabled[addrn] = true;
}
return r;
}
int pt_disable_ip_filtering(CPUState *cpu, uint8_t addrn, bool hmp_mode){
int r = 0;
switch(addrn){
case 0:
case 1:
case 2:
case 3:
r = pt_cmd(cpu, KVM_VMX_PT_DISABLE_ADDR0+addrn, hmp_mode);
if(GET_GLOBAL_STATE()->pt_ip_filter_enabled[addrn]){
GET_GLOBAL_STATE()->pt_ip_filter_enabled[addrn] = false;
}
break;
default:
r = -EINVAL;
}
return r;
void pt_init_decoder(CPUState *cpu)
{
uint64_t filters[4][2] = { 0 };
/* TODO time to clean up this code -.- */
filters[0][0] = GET_GLOBAL_STATE()->pt_ip_filter_a[0];
filters[0][1] = GET_GLOBAL_STATE()->pt_ip_filter_b[0];
filters[1][0] = GET_GLOBAL_STATE()->pt_ip_filter_a[1];
filters[1][1] = GET_GLOBAL_STATE()->pt_ip_filter_b[1];
filters[2][0] = GET_GLOBAL_STATE()->pt_ip_filter_a[2];
filters[2][1] = GET_GLOBAL_STATE()->pt_ip_filter_b[2];
filters[3][0] = GET_GLOBAL_STATE()->pt_ip_filter_a[3];
filters[3][1] = GET_GLOBAL_STATE()->pt_ip_filter_b[3];
assert(GET_GLOBAL_STATE()->decoder == NULL);
assert(GET_GLOBAL_STATE()->shared_bitmap_ptr != NULL);
assert(GET_GLOBAL_STATE()->shared_bitmap_size != 0);
GET_GLOBAL_STATE()->decoder =
libxdc_init(filters, (void *(*)(void *, uint64_t, bool *))page_cache_fetch2,
GET_GLOBAL_STATE()->page_cache,
GET_GLOBAL_STATE()->shared_bitmap_ptr,
GET_GLOBAL_STATE()->shared_bitmap_size);
libxdc_register_bb_callback(GET_GLOBAL_STATE()->decoder,
(void (*)(void *, disassembler_mode_t, uint64_t,
uint64_t))redqueen_callback,
GET_GLOBAL_STATE()->redqueen_state);
alt_bitmap_init(GET_GLOBAL_STATE()->shared_bitmap_ptr,
GET_GLOBAL_STATE()->shared_bitmap_size);
}
void pt_kvm_init(CPUState *cpu){
cpu->pt_cmd = 0;
cpu->pt_enabled = false;
cpu->pt_fd = 0;
int pt_disable_ip_filtering(CPUState *cpu, uint8_t addrn, bool hmp_mode)
{
int r = 0;
switch (addrn) {
case 0:
case 1:
case 2:
case 3:
r = pt_cmd(cpu, KVM_VMX_PT_DISABLE_ADDR0 + addrn, hmp_mode);
if (GET_GLOBAL_STATE()->pt_ip_filter_enabled[addrn]) {
GET_GLOBAL_STATE()->pt_ip_filter_enabled[addrn] = false;
}
break;
default:
r = -EINVAL;
}
return r;
}
cpu->pt_decoder_state = NULL;
void pt_kvm_init(CPUState *cpu)
{
cpu->pt_cmd = 0;
cpu->pt_enabled = false;
cpu->pt_fd = 0;
cpu->reload_pending = false;
cpu->intel_pt_run_trashed = false;
cpu->pt_decoder_state = NULL;
cpu->reload_pending = false;
cpu->intel_pt_run_trashed = false;
}
struct vmx_pt_filter_iprs {
__u64 a;
__u64 b;
__u64 a;
__u64 b;
};
pthread_mutex_t pt_dump_mutex = PTHREAD_MUTEX_INITIALIZER;
void pt_pre_kvm_run(CPUState *cpu){
pthread_mutex_lock(&pt_dump_mutex);
int ret;
struct vmx_pt_filter_iprs filter_iprs;
void pt_pre_kvm_run(CPUState *cpu)
{
pthread_mutex_lock(&pt_dump_mutex);
int ret;
struct vmx_pt_filter_iprs filter_iprs;
if(GET_GLOBAL_STATE()->patches_disable_pending){
//nyx_debug_p(REDQUEEN_PREFIX, "patches disable");
assert(false); /* remove this branch */
GET_GLOBAL_STATE()->patches_disable_pending = false;
}
if (GET_GLOBAL_STATE()->patches_disable_pending) {
// nyx_debug_p(REDQUEEN_PREFIX, "patches disable");
assert(false); /* remove this branch */
GET_GLOBAL_STATE()->patches_disable_pending = false;
}
if(GET_GLOBAL_STATE()->patches_enable_pending){
//nyx_debug_p(REDQUEEN_PREFIX, "patches enable");
assert(false); /* remove this branch */
GET_GLOBAL_STATE()->patches_enable_pending = false;
}
if (GET_GLOBAL_STATE()->patches_enable_pending) {
// nyx_debug_p(REDQUEEN_PREFIX, "patches enable");
assert(false); /* remove this branch */
GET_GLOBAL_STATE()->patches_enable_pending = false;
}
if(GET_GLOBAL_STATE()->redqueen_enable_pending){
//nyx_debug_p(REDQUEEN_PREFIX, "rq enable");
if (GET_GLOBAL_STATE()->redqueen_state){
enable_rq_intercept_mode(GET_GLOBAL_STATE()->redqueen_state);
}
GET_GLOBAL_STATE()->redqueen_enable_pending = false;
}
if (GET_GLOBAL_STATE()->redqueen_enable_pending) {
// nyx_debug_p(REDQUEEN_PREFIX, "rq enable");
if (GET_GLOBAL_STATE()->redqueen_state) {
enable_rq_intercept_mode(GET_GLOBAL_STATE()->redqueen_state);
}
GET_GLOBAL_STATE()->redqueen_enable_pending = false;
}
if(GET_GLOBAL_STATE()->redqueen_disable_pending){
//nyx_debug_p(REDQUEEN_PREFIX, "rq disable");
if (GET_GLOBAL_STATE()->redqueen_state){
disable_rq_intercept_mode(GET_GLOBAL_STATE()->redqueen_state);
}
GET_GLOBAL_STATE()->redqueen_disable_pending = false;
}
if (GET_GLOBAL_STATE()->redqueen_disable_pending) {
// nyx_debug_p(REDQUEEN_PREFIX, "rq disable");
if (GET_GLOBAL_STATE()->redqueen_state) {
disable_rq_intercept_mode(GET_GLOBAL_STATE()->redqueen_state);
}
GET_GLOBAL_STATE()->redqueen_disable_pending = false;
}
if(GET_GLOBAL_STATE()->pt_trace_mode || GET_GLOBAL_STATE()->pt_trace_mode_force){
if (!cpu->pt_fd) {
cpu->pt_fd = kvm_vcpu_ioctl(cpu, KVM_VMX_PT_SETUP_FD, (unsigned long)0);
assert(cpu->pt_fd != -1);
ret = ioctl(cpu->pt_fd, KVM_VMX_PT_GET_TOPA_SIZE, (unsigned long)0x0);
cpu->pt_mmap = mmap((void*)PT_BUFFER_MMAP_ADDR, ret, PROT_READ|PROT_WRITE, MAP_SHARED, cpu->pt_fd, 0);
assert(cpu->pt_mmap != (void*)0xFFFFFFFFFFFFFFFF);
// add an extra page to have enough space for an additional PT_TRACE_END byte
if (GET_GLOBAL_STATE()->pt_trace_mode || GET_GLOBAL_STATE()->pt_trace_mode_force)
{
if (!cpu->pt_fd) {
cpu->pt_fd = kvm_vcpu_ioctl(cpu, KVM_VMX_PT_SETUP_FD, (unsigned long)0);
assert(cpu->pt_fd != -1);
ret = ioctl(cpu->pt_fd, KVM_VMX_PT_GET_TOPA_SIZE, (unsigned long)0x0);
cpu->pt_mmap = mmap((void *)PT_BUFFER_MMAP_ADDR, ret,
PROT_READ | PROT_WRITE, MAP_SHARED, cpu->pt_fd, 0);
assert(cpu->pt_mmap != (void *)0xFFFFFFFFFFFFFFFF);
// add an extra page to have enough space for an additional PT_TRACE_END byte
assert(mmap(cpu->pt_mmap + ret, 0x1000, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, -1,
0) == (void *)(cpu->pt_mmap + ret));
nyx_debug("=> pt_mmap: %p - %p\n", cpu->pt_mmap, cpu->pt_mmap + ret);
memset(cpu->pt_mmap+ret, 0x55, 0x1000);
}
if (cpu->pt_cmd){
switch(cpu->pt_cmd){
case KVM_VMX_PT_ENABLE:
if (cpu->pt_fd){
/* dump for the very last time before enabling VMX_PT ... just in case */
ioctl(cpu->pt_fd, KVM_VMX_PT_CHECK_TOPA_OVERFLOW, (unsigned long)0);
memset(cpu->pt_mmap + ret, 0x55, 0x1000);
}
if (!ioctl(cpu->pt_fd, cpu->pt_cmd, 0)){
cpu->pt_enabled = true;
}
}
break;
case KVM_VMX_PT_DISABLE:
if (cpu->pt_fd){
ret = ioctl(cpu->pt_fd, cpu->pt_cmd, 0);
if (ret > 0){
//nyx_debug_p(PT_PREFIX, "KVM_VMX_PT_DISABLE %d", ret);
pt_dump(cpu, ret);
cpu->pt_enabled = false;
}
}
break;
/* ip filtering configuration */
case KVM_VMX_PT_CONFIGURE_ADDR0:
case KVM_VMX_PT_CONFIGURE_ADDR1:
case KVM_VMX_PT_CONFIGURE_ADDR2:
case KVM_VMX_PT_CONFIGURE_ADDR3:
filter_iprs.a = GET_GLOBAL_STATE()->pt_ip_filter_a[(cpu->pt_cmd)-KVM_VMX_PT_CONFIGURE_ADDR0];
filter_iprs.b = GET_GLOBAL_STATE()->pt_ip_filter_b[(cpu->pt_cmd)-KVM_VMX_PT_CONFIGURE_ADDR0];
ret = pt_ioctl(cpu->pt_fd, cpu->pt_cmd, (unsigned long)&filter_iprs);
break;
case KVM_VMX_PT_ENABLE_ADDR0:
case KVM_VMX_PT_ENABLE_ADDR1:
case KVM_VMX_PT_ENABLE_ADDR2:
case KVM_VMX_PT_ENABLE_ADDR3:
ret = pt_ioctl(cpu->pt_fd, cpu->pt_cmd, (unsigned long)0);
break;
case KVM_VMX_PT_CONFIGURE_CR3:
ret = pt_ioctl(cpu->pt_fd, cpu->pt_cmd, GET_GLOBAL_STATE()->pt_c3_filter);
break;
case KVM_VMX_PT_ENABLE_CR3:
ret = pt_ioctl(cpu->pt_fd, cpu->pt_cmd, (unsigned long)0);
break;
default:
if (cpu->pt_fd){
ioctl(cpu->pt_fd, cpu->pt_cmd, 0);
}
break;
}
cpu->pt_cmd = 0;
cpu->pt_ret = 0;
}
}
pthread_mutex_unlock(&pt_dump_mutex);
if (cpu->pt_cmd) {
switch (cpu->pt_cmd) {
case KVM_VMX_PT_ENABLE:
if (cpu->pt_fd) {
/* dump for the very last time before enabling VMX_PT ... just in case */
ioctl(cpu->pt_fd, KVM_VMX_PT_CHECK_TOPA_OVERFLOW,
(unsigned long)0);
if (!ioctl(cpu->pt_fd, cpu->pt_cmd, 0)) {
cpu->pt_enabled = true;
}
}
break;
case KVM_VMX_PT_DISABLE:
if (cpu->pt_fd) {
ret = ioctl(cpu->pt_fd, cpu->pt_cmd, 0);
if (ret > 0) {
// nyx_debug_p(PT_PREFIX, "KVM_VMX_PT_DISABLE %d", ret);
pt_dump(cpu, ret);
cpu->pt_enabled = false;
}
}
break;
/* ip filtering configuration */
case KVM_VMX_PT_CONFIGURE_ADDR0:
case KVM_VMX_PT_CONFIGURE_ADDR1:
case KVM_VMX_PT_CONFIGURE_ADDR2:
case KVM_VMX_PT_CONFIGURE_ADDR3:
filter_iprs.a =
GET_GLOBAL_STATE()
->pt_ip_filter_a[(cpu->pt_cmd) - KVM_VMX_PT_CONFIGURE_ADDR0];
filter_iprs.b =
GET_GLOBAL_STATE()
->pt_ip_filter_b[(cpu->pt_cmd) - KVM_VMX_PT_CONFIGURE_ADDR0];
ret = pt_ioctl(cpu->pt_fd, cpu->pt_cmd, (unsigned long)&filter_iprs);
break;
case KVM_VMX_PT_ENABLE_ADDR0:
case KVM_VMX_PT_ENABLE_ADDR1:
case KVM_VMX_PT_ENABLE_ADDR2:
case KVM_VMX_PT_ENABLE_ADDR3:
ret = pt_ioctl(cpu->pt_fd, cpu->pt_cmd, (unsigned long)0);
break;
case KVM_VMX_PT_CONFIGURE_CR3:
ret = pt_ioctl(cpu->pt_fd, cpu->pt_cmd,
GET_GLOBAL_STATE()->pt_c3_filter);
break;
case KVM_VMX_PT_ENABLE_CR3:
ret = pt_ioctl(cpu->pt_fd, cpu->pt_cmd, (unsigned long)0);
break;
default:
if (cpu->pt_fd) {
ioctl(cpu->pt_fd, cpu->pt_cmd, 0);
}
break;
}
cpu->pt_cmd = 0;
cpu->pt_ret = 0;
}
}
pthread_mutex_unlock(&pt_dump_mutex);
}
void pt_handle_overflow(CPUState *cpu){
pthread_mutex_lock(&pt_dump_mutex);
int overflow = ioctl(cpu->pt_fd, KVM_VMX_PT_CHECK_TOPA_OVERFLOW, (unsigned long)0);
if (overflow > 0){
pt_dump(cpu, overflow);
}
pthread_mutex_unlock(&pt_dump_mutex);
void pt_handle_overflow(CPUState *cpu)
{
pthread_mutex_lock(&pt_dump_mutex);
int overflow = ioctl(cpu->pt_fd, KVM_VMX_PT_CHECK_TOPA_OVERFLOW, (unsigned long)0);
if (overflow > 0) {
pt_dump(cpu, overflow);
}
pthread_mutex_unlock(&pt_dump_mutex);
}
void pt_post_kvm_run(CPUState *cpu){
if(GET_GLOBAL_STATE()->pt_trace_mode || GET_GLOBAL_STATE()->pt_trace_mode_force){
pt_handle_overflow(cpu);
}
void pt_post_kvm_run(CPUState *cpu)
{
if (GET_GLOBAL_STATE()->pt_trace_mode || GET_GLOBAL_STATE()->pt_trace_mode_force)
{
pt_handle_overflow(cpu);
}
}

View File

@ -38,4 +38,3 @@ void pt_handle_overflow(CPUState *cpu);
void pt_dump(CPUState *cpu, int bytes);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -22,100 +22,120 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
#pragma once
#include "qemu/osdep.h"
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <stdbool.h>
#include <linux/kvm.h>
#include <capstone/capstone.h>
#include <capstone/x86.h>
#include "nyx/redqueen_trace.h"
#include "nyx/khash.h"
#include "nyx/page_cache.h"
#include "nyx/redqueen_trace.h"
#include <capstone/capstone.h>
#include <capstone/x86.h>
#include <linux/kvm.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
//#define RQ_DEBUG
// #define RQ_DEBUG
#define REDQUEEN_MAX_STRCMP_LEN 64
#define REDQUEEN_TRAP_LIMIT 16
#define REDQUEEN_TRAP_LIMIT 16
#define REG64_NUM 16
#define REG32_NUM 16
//seems we don't want to include rip, since this index is used to acces the qemu cpu structure or something?
#define REG16_NUM 16
// seems we don't want to include rip, since this index is used to acces the qemu cpu structure or something?
#define REG16_NUM 16
#define REG8L_NUM 16
#define REG8H_NUM 8
#define REG8H_NUM 8
#define EXTRA_REG_RIP 16
#define EXTRA_REG_NOP 17
#define REDQUEEN_NO_INSTRUMENTATION 0
#define REDQUEEN_LIGHT_INSTRUMENTATION 1
#define REDQUEEN_SE_INSTRUMENTATION 2
#define REDQUEEN_NO_INSTRUMENTATION 0
#define REDQUEEN_LIGHT_INSTRUMENTATION 1
#define REDQUEEN_SE_INSTRUMENTATION 2
#define REDQUEEN_WHITELIST_INSTRUMENTATION 3
enum reg_types{RAX, RCX, RDX, RBX, RSP, RBP, RSI, RDI, R8, R9, R10, R11, R12, R13, R14, R15};
enum reg_types {
RAX,
RCX,
RDX,
RBX,
RSP,
RBP,
RSI,
RDI,
R8,
R9,
R10,
R11,
R12,
R13,
R14,
R15
};
#define CMP_BITMAP_NOP 0x0000000UL
#define CMP_BITMAP_RQ_INSTRUCTION 0x1000000UL
#define CMP_BITMAP_SE_INSTRUCTION 0x2000000UL
#define CMP_BITMAP_BLACKLISTED 0x4000000UL
#define CMP_BITMAP_TRACE_ENABLED 0x8000000UL
#define CMP_BITMAP_SHOULD_HOOK_SE (CMP_BITMAP_SE_INSTRUCTION|CMP_BITMAP_TRACE_ENABLED)
#define CMP_BITMAP_SHOULD_HOOK_RQ (CMP_BITMAP_RQ_INSTRUCTION)
#define CMP_BITMAP_NOP 0x0000000UL
#define CMP_BITMAP_RQ_INSTRUCTION 0x1000000UL
#define CMP_BITMAP_SE_INSTRUCTION 0x2000000UL
#define CMP_BITMAP_BLACKLISTED 0x4000000UL
#define CMP_BITMAP_TRACE_ENABLED 0x8000000UL
#define CMP_BITMAP_SHOULD_HOOK_SE \
(CMP_BITMAP_SE_INSTRUCTION | CMP_BITMAP_TRACE_ENABLED)
#define CMP_BITMAP_SHOULD_HOOK_RQ (CMP_BITMAP_RQ_INSTRUCTION)
KHASH_MAP_INIT_INT64(RQ, uint32_t)
typedef struct redqueen_s{
khash_t(RQ) *lookup;
bool intercept_mode;
bool singlestep_enabled;
int hooks_applied;
CPUState *cpu;
uint64_t last_rip;
uint64_t next_rip;
uint64_t *breakpoint_whitelist;
uint64_t num_breakpoint_whitelist;
redqueen_trace_t* trace_state;
page_cache_t* page_cache;
typedef struct redqueen_s {
khash_t(RQ) * lookup;
bool intercept_mode;
bool singlestep_enabled;
int hooks_applied;
CPUState *cpu;
uint64_t last_rip;
uint64_t next_rip;
uint64_t *breakpoint_whitelist;
uint64_t num_breakpoint_whitelist;
redqueen_trace_t *trace_state;
page_cache_t *page_cache;
} redqueen_t;
typedef struct redqueen_workdir_s{
char* redqueen_results;
char* symbolic_results;
char* pt_trace_results;
char* redqueen_patches;
char* breakpoint_white;
char* breakpoint_black;
char* target_code_dump;
typedef struct redqueen_workdir_s {
char *redqueen_results;
char *symbolic_results;
char *pt_trace_results;
char *redqueen_patches;
char *breakpoint_white;
char *breakpoint_black;
char *target_code_dump;
} redqueen_workdir_t;
extern redqueen_workdir_t redqueen_workdir;
void setup_redqueen_workdir(char* workdir);
void setup_redqueen_workdir(char *workdir);
redqueen_t* new_rq_state(CPUState *cpu, page_cache_t* page_cache);
void destroy_rq_state(redqueen_t* self);
redqueen_t *new_rq_state(CPUState *cpu, page_cache_t *page_cache);
void destroy_rq_state(redqueen_t *self);
void set_rq_instruction(redqueen_t* self, uint64_t addr);
void set_rq_blacklist(redqueen_t* self, uint64_t addr);
void set_rq_instruction(redqueen_t *self, uint64_t addr);
void set_rq_blacklist(redqueen_t *self, uint64_t addr);
void handle_hook(redqueen_t* self);
void handel_se_hook(redqueen_t* self);
void handle_hook(redqueen_t *self);
void handel_se_hook(redqueen_t *self);
void enable_rq_intercept_mode(redqueen_t* self);
void disable_rq_intercept_mode(redqueen_t* self);
void enable_rq_intercept_mode(redqueen_t *self);
void disable_rq_intercept_mode(redqueen_t *self);
void set_se_instruction(redqueen_t* self, uint64_t addr);
void set_se_instruction(redqueen_t *self, uint64_t addr);
void dump_se_registers(redqueen_t* self);
void dump_se_memory_access(redqueen_t* self, cs_insn* insn);
void dump_se_return_access(redqueen_t* self, cs_insn* insn);
void dump_se_memory_access_at(redqueen_t* self, uint64_t instr_addr, uint64_t mem_addr);
void dump_se_registers(redqueen_t *self);
void dump_se_memory_access(redqueen_t *self, cs_insn *insn);
void dump_se_return_access(redqueen_t *self, cs_insn *insn);
void dump_se_memory_access_at(redqueen_t *self, uint64_t instr_addr, uint64_t mem_addr);
void redqueen_insert_hooks(redqueen_t* self);
void redqueen_remove_hooks(redqueen_t* self);
void redqueen_callback(void* opaque, disassembler_mode_t mode, uint64_t start_addr, uint64_t end_addr);
void redqueen_insert_hooks(redqueen_t *self);
void redqueen_remove_hooks(redqueen_t *self);
void redqueen_callback(void *opaque,
disassembler_mode_t mode,
uint64_t start_addr,
uint64_t end_addr);

View File

@ -1,27 +1,29 @@
#include "qemu/osdep.h"
#include "redqueen_patch.h"
#include "redqueen.h"
#include "patcher.h"
#include "file_helper.h"
#include "debug.h"
#include "file_helper.h"
#include "patcher.h"
#include "redqueen.h"
/*
* Private Helper Functions Declarations
*/
void _load_and_set_patches(patcher_t* self);
void _load_and_set_patches(patcher_t *self);
/*
* Public Functions
*/
void pt_enable_patches(patcher_t *self){
_load_and_set_patches(self);
patcher_apply_all(self);
void pt_enable_patches(patcher_t *self)
{
_load_and_set_patches(self);
patcher_apply_all(self);
}
void pt_disable_patches(patcher_t *self){
patcher_restore_all(self);
void pt_disable_patches(patcher_t *self)
{
patcher_restore_all(self);
}
@ -30,12 +32,13 @@ void pt_disable_patches(patcher_t *self){
*/
void _load_and_set_patches(patcher_t* self){
size_t num_addrs = 0;
uint64_t *addrs = NULL;
parse_address_file(redqueen_workdir.redqueen_patches, &num_addrs, &addrs);
if(num_addrs){
patcher_set_addrs(self, addrs, num_addrs);
free(addrs);
}
void _load_and_set_patches(patcher_t *self)
{
size_t num_addrs = 0;
uint64_t *addrs = NULL;
parse_address_file(redqueen_workdir.redqueen_patches, &num_addrs, &addrs);
if (num_addrs) {
patcher_set_addrs(self, addrs, num_addrs);
free(addrs);
}
}

View File

@ -1,42 +1,43 @@
#include "qemu/osdep.h"
#include <stdint.h>
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
#include <stdint.h>
#include <stdio.h>
#include <unistd.h>
#include "redqueen_trace.h"
#include "redqueen.h"
#include "redqueen_trace.h"
#include "state/state.h"
/* write full trace of edge transitions rather than sorted list? */
//#define KAFL_FULL_TRACES
// #define KAFL_FULL_TRACES
int trace_fd = 0;
int trace_fd = 0;
int redqueen_trace_enabled = false;
uint32_t alt_bitmap_size = 0;
uint8_t* alt_bitmap = NULL;
uint8_t *alt_bitmap = NULL;
void alt_bitmap_init(void* ptr, uint32_t size)
void alt_bitmap_init(void *ptr, uint32_t size)
{
if (redqueen_trace_enabled) {
alt_bitmap = (uint8_t*)ptr;
alt_bitmap_size = size;
}
if (redqueen_trace_enabled) {
alt_bitmap = (uint8_t *)ptr;
alt_bitmap_size = size;
}
}
void alt_bitmap_reset(void)
{
if (alt_bitmap) {
memset(alt_bitmap, 0x00, alt_bitmap_size);
}
if (alt_bitmap) {
memset(alt_bitmap, 0x00, alt_bitmap_size);
}
}
static inline uint64_t mix_bits(uint64_t v) {
v ^= (v >> 31);
v *= 0x7fb5d329728ea185;
return v;
static inline uint64_t mix_bits(uint64_t v)
{
v ^= (v >> 31);
v *= 0x7fb5d329728ea185;
return v;
}
/*
@ -45,143 +46,162 @@ static inline uint64_t mix_bits(uint64_t v) {
*/
static void alt_bitmap_add(uint64_t from, uint64_t to)
{
uint64_t transition_value;
uint64_t transition_value;
if (GET_GLOBAL_STATE()->trace_mode) {
if(alt_bitmap) {
transition_value = mix_bits(to)^(mix_bits(from)>>1);
alt_bitmap[transition_value & (alt_bitmap_size-1)]++;
}
}
if (GET_GLOBAL_STATE()->trace_mode) {
if (alt_bitmap) {
transition_value = mix_bits(to) ^ (mix_bits(from) >> 1);
alt_bitmap[transition_value & (alt_bitmap_size - 1)]++;
}
}
}
static int reset_trace_fd(void) {
if (trace_fd)
close(trace_fd);
trace_fd = open(redqueen_workdir.pt_trace_results, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (trace_fd < 0) {
fprintf(stderr, "Failed to initiate trace output: %s\n", strerror(errno));
assert(0);
}
return trace_fd;
static int reset_trace_fd(void)
{
if (trace_fd)
close(trace_fd);
trace_fd =
open(redqueen_workdir.pt_trace_results, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (trace_fd < 0) {
fprintf(stderr, "Failed to initiate trace output: %s\n", strerror(errno));
assert(0);
}
return trace_fd;
}
void redqueen_trace_init(void) {
redqueen_trace_enabled = true;
void redqueen_trace_init(void)
{
redqueen_trace_enabled = true;
}
redqueen_trace_t* redqueen_trace_new(void){
redqueen_trace_t* self = malloc(sizeof(redqueen_trace_t));
self->lookup = kh_init(RQ_TRACE);
self->num_ordered_transitions = 0;
self->max_ordered_transitions = INIT_NUM_OF_STORED_TRANSITIONS;
self->ordered_transitions = malloc(INIT_NUM_OF_STORED_TRANSITIONS*sizeof(uint128_t));
return self;
redqueen_trace_t *redqueen_trace_new(void)
{
redqueen_trace_t *self = malloc(sizeof(redqueen_trace_t));
self->lookup = kh_init(RQ_TRACE);
self->num_ordered_transitions = 0;
self->max_ordered_transitions = INIT_NUM_OF_STORED_TRANSITIONS;
self->ordered_transitions =
malloc(INIT_NUM_OF_STORED_TRANSITIONS * sizeof(uint128_t));
return self;
}
void redqueen_trace_free(redqueen_trace_t* self){
kh_destroy(RQ_TRACE, self->lookup);
free(self->ordered_transitions);
free(self);
void redqueen_trace_free(redqueen_trace_t *self)
{
kh_destroy(RQ_TRACE, self->lookup);
free(self->ordered_transitions);
free(self);
}
void redqueen_trace_register_transition(redqueen_trace_t* self, disassembler_mode_t mode, uint64_t from, uint64_t to){
khiter_t k;
int ret;
uint64_t exit_ip = 0xffffffffffffffff;
void redqueen_trace_register_transition(redqueen_trace_t *self,
disassembler_mode_t mode,
uint64_t from,
uint64_t to)
{
khiter_t k;
int ret;
uint64_t exit_ip = 0xffffffffffffffff;
if (from != exit_ip && to != exit_ip)
alt_bitmap_add(from, to);
if (from != exit_ip && to != exit_ip)
alt_bitmap_add(from, to);
#ifdef KAFL_FULL_TRACES
assert(trace_fd >= 0);
dprintf(trace_fd, "%lx,%lx\n", from, to);
return;
assert(trace_fd >= 0);
dprintf(trace_fd, "%lx,%lx\n", from, to);
return;
#endif
uint128_t key = (((uint128_t)from)<<64) | ((uint128_t)to);
k = kh_get(RQ_TRACE, self->lookup, key);
if(k != kh_end(self->lookup)){
kh_value(self->lookup, k) += 1;
} else{
k = kh_put(RQ_TRACE, self->lookup, key, &ret);
kh_value(self->lookup, k) = 1;
self->ordered_transitions[self->num_ordered_transitions] = key;
self->num_ordered_transitions++;
assert(self->num_ordered_transitions < self->max_ordered_transitions);
}
}
uint128_t key = (((uint128_t)from) << 64) | ((uint128_t)to);
k = kh_get(RQ_TRACE, self->lookup, key);
if (k != kh_end(self->lookup)) {
kh_value(self->lookup, k) += 1;
} else {
k = kh_put(RQ_TRACE, self->lookup, key, &ret);
kh_value(self->lookup, k) = 1;
self->ordered_transitions[self->num_ordered_transitions] = key;
self->num_ordered_transitions++;
assert(self->num_ordered_transitions < self->max_ordered_transitions);
}
}
static void redqueen_trace_write(void){
static void redqueen_trace_write(void)
{
#ifdef KAFL_FULL_TRACES
return;
return;
#endif
redqueen_trace_t *self = GET_GLOBAL_STATE()->redqueen_state->trace_state;
assert(trace_fd >= 0);
for(size_t i = 0; i < self->num_ordered_transitions; i++){
khiter_t k;
uint128_t key = self->ordered_transitions[i];
k = kh_get(RQ_TRACE, self->lookup, key);
assert(k != kh_end(self->lookup));
dprintf(trace_fd, "%lx,%lx,%lx\n", (uint64_t)(key>>64), (uint64_t)key, kh_value(self->lookup, k) );
}
redqueen_trace_t *self = GET_GLOBAL_STATE()->redqueen_state->trace_state;
assert(trace_fd >= 0);
for (size_t i = 0; i < self->num_ordered_transitions; i++) {
khiter_t k;
uint128_t key = self->ordered_transitions[i];
k = kh_get(RQ_TRACE, self->lookup, key);
assert(k != kh_end(self->lookup));
dprintf(trace_fd, "%lx,%lx,%lx\n", (uint64_t)(key >> 64), (uint64_t)key,
kh_value(self->lookup, k));
}
}
static void redqueen_state_reset(void){
redqueen_trace_t *self = GET_GLOBAL_STATE()->redqueen_state->trace_state;
kh_destroy(RQ_TRACE, self->lookup);
self->lookup = kh_init(RQ_TRACE);
self->num_ordered_transitions = 0;
static void redqueen_state_reset(void)
{
redqueen_trace_t *self = GET_GLOBAL_STATE()->redqueen_state->trace_state;
kh_destroy(RQ_TRACE, self->lookup);
self->lookup = kh_init(RQ_TRACE);
self->num_ordered_transitions = 0;
}
void redqueen_trace_reset(void){
if (redqueen_trace_enabled) {
redqueen_state_reset();
reset_trace_fd();
}
void redqueen_trace_reset(void)
{
if (redqueen_trace_enabled) {
redqueen_state_reset();
reset_trace_fd();
}
}
void redqueen_trace_flush(void){
if (redqueen_trace_enabled) {
redqueen_trace_write();
if (trace_fd)
fsync(trace_fd);
}
void redqueen_trace_flush(void)
{
if (redqueen_trace_enabled) {
redqueen_trace_write();
if (trace_fd)
fsync(trace_fd);
}
}
void redqueen_set_trace_mode(void){
if (redqueen_trace_enabled) {
libxdc_enable_tracing(GET_GLOBAL_STATE()->decoder);
libxdc_register_edge_callback(GET_GLOBAL_STATE()->decoder,
(void (*)(void*, disassembler_mode_t, uint64_t, uint64_t))&redqueen_trace_register_transition,
GET_GLOBAL_STATE()->redqueen_state->trace_state);
}
void redqueen_set_trace_mode(void)
{
if (redqueen_trace_enabled) {
libxdc_enable_tracing(GET_GLOBAL_STATE()->decoder);
libxdc_register_edge_callback(GET_GLOBAL_STATE()->decoder,
(void (*)(void *, disassembler_mode_t,
uint64_t, uint64_t)) &
redqueen_trace_register_transition,
GET_GLOBAL_STATE()->redqueen_state->trace_state);
}
}
void redqueen_unset_trace_mode(void){
if (redqueen_trace_enabled) {
libxdc_disable_tracing(GET_GLOBAL_STATE()->decoder);
}
void redqueen_unset_trace_mode(void)
{
if (redqueen_trace_enabled) {
libxdc_disable_tracing(GET_GLOBAL_STATE()->decoder);
}
}
#ifdef DEBUG_MAIN
int main(int argc, char** argv){
int main(int argc, char **argv)
{
redqueen_trace_t *rq_obj = redqueen_trace_new();
redqueen_trace_t* rq_obj = redqueen_trace_new();
reset_trace_fd();
reset_trace_fd();
for (uint64_t j = 0; j < 0x5; j++) {
redqueen_trace_register_transition(rq_obj, 0xBADF, 0xC0FFEE);
redqueen_trace_register_transition(rq_obj, 0xBADBEEF, 0xC0FFEE);
for (uint64_t i = 0; i < 0x10000; i++) {
redqueen_trace_register_transition(rq_obj, 0xBADBEEF, 0xC0FFEE);
}
redqueen_trace_write(rq_obj, STDOUT_FILENO);
redqueen_trace_reset();
}
for (uint64_t j = 0; j < 0x5; j++){
redqueen_trace_register_transition(rq_obj, 0xBADF, 0xC0FFEE);
redqueen_trace_register_transition(rq_obj, 0xBADBEEF, 0xC0FFEE);
for (uint64_t i = 0; i < 0x10000; i++){
redqueen_trace_register_transition(rq_obj, 0xBADBEEF, 0xC0FFEE);
}
redqueen_trace_write(rq_obj, STDOUT_FILENO);
redqueen_trace_reset();
}
redqueen_trace_free(rq_obj);
return 0;
redqueen_trace_free(rq_obj);
return 0;
}
#endif

View File

@ -1,9 +1,9 @@
#pragma once
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <stdbool.h>
#include "qemu/osdep.h"
@ -11,7 +11,7 @@
#include <libxdc.h>
typedef unsigned __int128 uint128_t;
typedef uint128_t khint128_t;
typedef uint128_t khint128_t;
#define INIT_NUM_OF_STORED_TRANSITIONS 0xfffff
@ -20,8 +20,9 @@ typedef uint128_t khint128_t;
@param key The integer [khint64_t]
@return The hash value [khint_t]
*/
#define kh_int128_hash_func(key) \
(khint32_t)((key) >> 33 ^ (key) ^ (key) << 11) ^ (((key >> 64)) >> 33 ^ ((key >> 64)) ^ ((key >> 64)) << 11)
#define kh_int128_hash_func(key) \
(khint32_t)((key) >> 33 ^ (key) ^ (key) << 11) ^ \
(((key >> 64)) >> 33 ^ ((key >> 64)) ^ ((key >> 64)) << 11)
/*! @function
@abstract 64-bit integer comparison function
*/
@ -32,27 +33,31 @@ typedef uint128_t khint128_t;
@param name Name of the hash table [symbol]
@param khval_t Type of values [type]
*/
#define KHASH_MAP_INIT_INT128(name, khval_t) \
KHASH_INIT(name, khint128_t, khval_t, 1, kh_int128_hash_func, kh_int128_hash_equal)
#define KHASH_MAP_INIT_INT128(name, khval_t) \
KHASH_INIT(name, khint128_t, khval_t, 1, kh_int128_hash_func, \
kh_int128_hash_equal)
KHASH_MAP_INIT_INT128(RQ_TRACE, uint64_t)
#define INIT_TRACE_IP 0xFFFFFFFFFFFFFFFFULL
typedef struct redqueen_trace_s{
khash_t(RQ_TRACE) *lookup;
size_t num_ordered_transitions;
size_t max_ordered_transitions;
uint128_t* ordered_transitions;
typedef struct redqueen_trace_s {
khash_t(RQ_TRACE) * lookup;
size_t num_ordered_transitions;
size_t max_ordered_transitions;
uint128_t *ordered_transitions;
} redqueen_trace_t;
/* libxdc outputs no bitmap in trace mode */
void alt_bitmap_reset(void);
void alt_bitmap_init(void* ptr, uint32_t size);
void alt_bitmap_init(void *ptr, uint32_t size);
redqueen_trace_t* redqueen_trace_new(void);
void redqueen_trace_free(redqueen_trace_t* self);
void redqueen_trace_register_transition(redqueen_trace_t* self, disassembler_mode_t mode, uint64_t from, uint64_t to);
redqueen_trace_t *redqueen_trace_new(void);
void redqueen_trace_free(redqueen_trace_t *self);
void redqueen_trace_register_transition(redqueen_trace_t *self,
disassembler_mode_t mode,
uint64_t from,
uint64_t to);
void redqueen_trace_init(void);
void redqueen_set_trace_mode(void);

View File

@ -11,166 +11,171 @@
#include "nyx/debug.h"
#include "sharedir.h"
//#define SHAREDIR_DEBUG
// #define SHAREDIR_DEBUG
sharedir_t* sharedir_new(void){
sharedir_t* self = malloc(sizeof(sharedir_t));
self->dir = NULL;
self->lookup = kh_init(SHAREDIR_LOOKUP);
self->last_file_f = NULL;
self->last_file_obj_ptr = NULL;
return self;
sharedir_t *sharedir_new(void)
{
sharedir_t *self = malloc(sizeof(sharedir_t));
self->dir = NULL;
self->lookup = kh_init(SHAREDIR_LOOKUP);
self->last_file_f = NULL;
self->last_file_obj_ptr = NULL;
return self;
}
void sharedir_set_dir(sharedir_t* self, const char* dir){
assert(!self->dir);
assert(asprintf(&self->dir, "%s", dir) != -1);
void sharedir_set_dir(sharedir_t *self, const char *dir)
{
assert(!self->dir);
assert(asprintf(&self->dir, "%s", dir) != -1);
}
static bool file_exits(const char* file){
struct stat sb;
return (stat (file, &sb) == 0);
static bool file_exits(const char *file)
{
struct stat sb;
return (stat(file, &sb) == 0);
}
static time_t get_file_mod_time(char *file){
static time_t get_file_mod_time(char *file)
{
struct stat attr;
stat(file, &attr);
return attr.st_mtime;
}
static size_t get_file_size(const char* file){
struct stat st;
stat(file, &st);
return st.st_size;
static size_t get_file_size(const char *file)
{
struct stat st;
stat(file, &st);
return st.st_size;
}
static char* sharedir_scan(sharedir_t* self, const char* file){
static char *sharedir_scan(sharedir_t *self, const char *file)
{
/*
* Agent is not under our control, but lets roughly constrain
* it to anything stored in or linked from sharedir
*/
chdir(self->dir);
char *real_path = realpath(file, NULL);
/*
* Agent is not under our control, but lets roughly constrain
* it to anything stored in or linked from sharedir
*/
chdir(self->dir);
char* real_path = realpath(file, NULL);
if (file[0] != '/' && !strstr(file, "/../") &&
real_path && file_exits(real_path)) {
return real_path;
}
free(real_path);
return NULL;
}
static sharedir_file_t* sharedir_get_object(sharedir_t* self, const char* file){
khiter_t k;
int ret;
sharedir_file_t* obj = NULL;
k = kh_get(SHAREDIR_LOOKUP, self->lookup, file);
if(k != kh_end(self->lookup)){
/* file already exists in our hash map */
obj = kh_value(self->lookup, k);
/* check if file still exists */
assert(file_exits(obj->path));
/* check if mod time matches */
assert(get_file_mod_time(obj->path) == obj->mod_time);
/* check if file size matches */
assert(get_file_size(obj->path) == obj->size);
return obj;
}
else{
/* nope ! */
char* realpath = sharedir_scan(self, file);
struct stat sb;
if(realpath != NULL){
if (stat(realpath, &sb) == 0 && S_ISDIR(sb.st_mode)){
return NULL; // is dir
}
obj = malloc(sizeof(sharedir_file_t));
memset(obj, 0x0, sizeof(sharedir_file_t));
assert(asprintf(&obj->file, "%s", basename(realpath)) != -1);
obj->path = realpath;
obj->size = get_file_size(obj->path);
obj->bytes_left = (uint64_t) obj->size;
obj->mod_time = get_file_mod_time(obj->path);
/* put into hash_list */
char* new_file = NULL;
assert(asprintf(&new_file, "%s", file) != -1);
k = kh_put(SHAREDIR_LOOKUP, self->lookup, new_file, &ret);
kh_value(self->lookup, k) = obj;
return obj;
if (file[0] != '/' && !strstr(file, "/../") && real_path && file_exits(real_path))
{
return real_path;
}
/* file not found */
free(real_path);
return NULL;
}
}
static FILE* get_file_ptr(sharedir_t* self, sharedir_file_t* obj){
if(obj == self->last_file_obj_ptr && self->last_file_f){
return self->last_file_f;
}
static sharedir_file_t *sharedir_get_object(sharedir_t *self, const char *file)
{
khiter_t k;
int ret;
sharedir_file_t *obj = NULL;
if(self->last_file_f){
fclose(self->last_file_f);
}
k = kh_get(SHAREDIR_LOOKUP, self->lookup, file);
FILE* f = fopen(obj->path, "r");
self->last_file_f = f;
self->last_file_obj_ptr = obj;
return f;
if (k != kh_end(self->lookup)) {
/* file already exists in our hash map */
obj = kh_value(self->lookup, k);
/* check if file still exists */
assert(file_exits(obj->path));
/* check if mod time matches */
assert(get_file_mod_time(obj->path) == obj->mod_time);
/* check if file size matches */
assert(get_file_size(obj->path) == obj->size);
return obj;
} else {
/* nope ! */
char *realpath = sharedir_scan(self, file);
struct stat sb;
if (realpath != NULL) {
if (stat(realpath, &sb) == 0 && S_ISDIR(sb.st_mode)) {
return NULL; // is dir
}
obj = malloc(sizeof(sharedir_file_t));
memset(obj, 0x0, sizeof(sharedir_file_t));
assert(asprintf(&obj->file, "%s", basename(realpath)) != -1);
obj->path = realpath;
obj->size = get_file_size(obj->path);
obj->bytes_left = (uint64_t)obj->size;
obj->mod_time = get_file_mod_time(obj->path);
/* put into hash_list */
char *new_file = NULL;
assert(asprintf(&new_file, "%s", file) != -1);
k = kh_put(SHAREDIR_LOOKUP, self->lookup, new_file, &ret);
kh_value(self->lookup, k) = obj;
return obj;
}
/* file not found */
return NULL;
}
}
uint64_t sharedir_request_file(sharedir_t* self, const char* file, uint8_t* page_buffer){
if(!self->dir){
fprintf(stderr, "WARNING: New file request received, but no share dir configured! [FILE: %s]\n", file);
return 0xFFFFFFFFFFFFFFFFUL;
}
static FILE *get_file_ptr(sharedir_t *self, sharedir_file_t *obj)
{
if (obj == self->last_file_obj_ptr && self->last_file_f) {
return self->last_file_f;
}
FILE* f = NULL;
if (self->last_file_f) {
fclose(self->last_file_f);
}
sharedir_file_t* obj = sharedir_get_object(self, file);
if(obj != NULL){
FILE *f = fopen(obj->path, "r");
self->last_file_f = f;
self->last_file_obj_ptr = obj;
return f;
}
uint64_t sharedir_request_file(sharedir_t *self, const char *file, uint8_t *page_buffer)
{
if (!self->dir) {
fprintf(stderr, "WARNING: New file request received, but no share dir configured! [FILE: %s]\n",
file);
return 0xFFFFFFFFFFFFFFFFUL;
}
FILE *f = NULL;
sharedir_file_t *obj = sharedir_get_object(self, file);
if (obj != NULL) {
#ifdef SHAREDIR_DEBUG
printf("sharedir_get_object->file: %s\n", obj->file);
printf("sharedir_get_object->path: %s\n", obj->path);
printf("sharedir_get_object->size: %ld\n", obj->size);
printf("sharedir_get_object->bytes_left: %ld\n", obj->bytes_left);
printf("sharedir_get_object->file: %s\n", obj->file);
printf("sharedir_get_object->path: %s\n", obj->path);
printf("sharedir_get_object->size: %ld\n", obj->size);
printf("sharedir_get_object->bytes_left: %ld\n", obj->bytes_left);
#endif
if(obj->bytes_left >= 0x1000){
f = get_file_ptr(self, obj);
fseek(f, obj->size-obj->bytes_left, SEEK_SET);
assert(fread(page_buffer, 1, 0x1000, f) == 0x1000);
obj->bytes_left -= 0x1000;
return 0x1000;
}
else {
if (obj->bytes_left != 0){
f = get_file_ptr(self, obj);
fseek(f, obj->size-obj->bytes_left, SEEK_SET);
assert(fread(page_buffer, 1, obj->bytes_left, f) == obj->bytes_left);
if (obj->bytes_left >= 0x1000) {
f = get_file_ptr(self, obj);
fseek(f, obj->size - obj->bytes_left, SEEK_SET);
assert(fread(page_buffer, 1, 0x1000, f) == 0x1000);
obj->bytes_left -= 0x1000;
return 0x1000;
} else {
if (obj->bytes_left != 0) {
f = get_file_ptr(self, obj);
fseek(f, obj->size - obj->bytes_left, SEEK_SET);
assert(fread(page_buffer, 1, obj->bytes_left, f) == obj->bytes_left);
uint64_t ret_value = obj->bytes_left;
obj->bytes_left = 0;
uint64_t ret_value = obj->bytes_left;
obj->bytes_left = 0;
return ret_value;
}
else {
obj->bytes_left = (uint_fast64_t)obj->size;
return 0;
}
return ret_value;
} else {
obj->bytes_left = (uint_fast64_t)obj->size;
return 0;
}
}
} else {
nyx_error("Warning: No such file in sharedir: %s\n", file);
return 0xFFFFFFFFFFFFFFFFUL;
}
}
else{
nyx_error("Warning: No such file in sharedir: %s\n", file);
return 0xFFFFFFFFFFFFFFFFUL;
}
}

View File

@ -1,26 +1,26 @@
#pragma once
#include <stdio.h>
#include <stdint.h>
#include "khash.h"
#include <stdint.h>
#include <stdio.h>
typedef struct sharedir_file_s{
char* file;
char* path;
size_t size;
uint64_t bytes_left;
time_t mod_time;
typedef struct sharedir_file_s {
char *file;
char *path;
size_t size;
uint64_t bytes_left;
time_t mod_time;
} sharedir_file_t;
KHASH_MAP_INIT_STR(SHAREDIR_LOOKUP, sharedir_file_t*)
KHASH_MAP_INIT_STR(SHAREDIR_LOOKUP, sharedir_file_t *)
typedef struct sharedir_s{
char* dir;
khash_t(SHAREDIR_LOOKUP) *lookup;
FILE* last_file_f;
sharedir_file_t* last_file_obj_ptr;
typedef struct sharedir_s {
char *dir;
khash_t(SHAREDIR_LOOKUP) * lookup;
FILE *last_file_f;
sharedir_file_t *last_file_obj_ptr;
} sharedir_t;
sharedir_t* sharedir_new(void);
void sharedir_set_dir(sharedir_t* self, const char* dir);
uint64_t sharedir_request_file(sharedir_t* self, const char* file, uint8_t* page_buffer);
sharedir_t *sharedir_new(void);
void sharedir_set_dir(sharedir_t *self, const char *dir);
uint64_t sharedir_request_file(sharedir_t *self, const char *file, uint8_t *page_buffer);

View File

@ -1,515 +1,614 @@
#include "qemu/osdep.h"
#include <stdint.h>
#include <sys/types.h>
#include <sys/mman.h>
#include <sys/types.h>
#include "nyx/snapshot/block/block_cow.h"
#include "sysemu/block-backend.h"
#include "nyx/state/state.h"
#include "nyx/debug.h"
#include "nyx/snapshot/block/block_cow.h"
#include "nyx/state/state.h"
//#define COW_CACHE_DEBUG
//#define COW_CACHE_VERBOSE
// #define COW_CACHE_DEBUG
// #define COW_CACHE_VERBOSE
#define CHUNK_SIZE 0x1000
//0x200
#define CHUNK_SIZE 0x1000
// 0x200
#define PAGE_MASK 0xFFFFFFFFFFFFF000
uint64_t global_cow_primary_size = COW_CACHE_PRIMARY_MINIMUM_SIZE;
bool global_cow_primary_size_adjustable = true;
uint64_t global_cow_primary_size = COW_CACHE_PRIMARY_MINIMUM_SIZE;
bool global_cow_primary_size_adjustable = true;
void set_global_cow_cache_primary_size(uint64_t new_size){
if (global_cow_primary_size_adjustable && new_size > COW_CACHE_PRIMARY_MINIMUM_SIZE && (new_size & 0xFFF) == 0){
global_cow_primary_size = new_size;
global_cow_primary_size_adjustable = false;
}
void set_global_cow_cache_primary_size(uint64_t new_size)
{
if (global_cow_primary_size_adjustable &&
new_size > COW_CACHE_PRIMARY_MINIMUM_SIZE && (new_size & 0xFFF) == 0)
{
global_cow_primary_size = new_size;
global_cow_primary_size_adjustable = false;
}
}
static inline uint64_t get_global_cow_cache_primary_size(void){
return global_cow_primary_size;
static inline uint64_t get_global_cow_cache_primary_size(void)
{
return global_cow_primary_size;
}
cow_cache_t* cow_cache_new(const char* filename){
cow_cache_t *cow_cache_new(const char *filename)
{
cow_cache_t *self = malloc(sizeof(cow_cache_t));
self->lookup_primary = kh_init(COW_CACHE);
self->lookup_secondary = kh_init(COW_CACHE);
self->lookup_secondary_tmp = kh_init(COW_CACHE);
cow_cache_t* self = malloc(sizeof(cow_cache_t));
self->lookup_primary = kh_init(COW_CACHE);
self->lookup_secondary = kh_init(COW_CACHE);
self->lookup_secondary_tmp = kh_init(COW_CACHE);
self->cow_primary_size = COW_CACHE_PRIMARY_MINIMUM_SIZE;
self->data_primary = mmap(NULL, self->cow_primary_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
assert(self->data_primary != MAP_FAILED);
self->cow_primary_size = COW_CACHE_PRIMARY_MINIMUM_SIZE;
self->data_primary = mmap(NULL, self->cow_primary_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
assert(self->data_primary != MAP_FAILED);
self->data_secondary = mmap(NULL, COW_CACHE_SECONDARY_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
assert(self->data_secondary != MAP_FAILED);
self->data_secondary = mmap(NULL, COW_CACHE_SECONDARY_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
assert(self->data_secondary != MAP_FAILED);
self->data_secondary_tmp = mmap(NULL, COW_CACHE_SECONDARY_SIZE,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
assert(self->data_secondary_tmp != MAP_FAILED);
self->data_secondary_tmp = mmap(NULL, COW_CACHE_SECONDARY_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
assert(self->data_secondary_tmp != MAP_FAILED);
self->filename = strdup(basename(filename));
self->offset_primary = 0;
self->offset_secondary = 0;
self->offset_secondary_tmp = 0;
self->filename = strdup(basename(filename));
self->offset_primary = 0;
self->offset_secondary = 0;
self->offset_secondary_tmp = 0;
if(getenv("NYX_DISABLE_BLOCK_COW")){
fprintf(stderr, "WARNING: Nyx block COW layer disabled for %s (** write operations are not cached **)\n", filename);
self->enabled = false;
}
else{
self->enabled = true;
}
self->enabled_fuzz = false;
self->enabled_fuzz_tmp = false;
if (getenv("NYX_DISABLE_BLOCK_COW")) {
fprintf(stderr,
"WARNING: Nyx block COW layer disabled for %s (** write operations "
"are not cached **)\n",
filename);
self->enabled = false;
} else {
self->enabled = true;
}
self->enabled_fuzz = false;
self->enabled_fuzz_tmp = false;
#ifdef DEBUG_COW_LAYER
self->read_calls = 0;
self->write_calls = 0;
self->read_calls_tmp = 0;
self->write_calls_tmp = 0;
self->read_calls = 0;
self->write_calls = 0;
self->read_calls_tmp = 0;
self->write_calls_tmp = 0;
#endif
return self;
return self;
}
static char* gen_file_name(cow_cache_t* self, const char* filename_prefix, const char* filename_postfix){
char* tmp1;
char* tmp2;
static char *gen_file_name(cow_cache_t *self,
const char *filename_prefix,
const char *filename_postfix)
{
char *tmp1;
char *tmp2;
assert(asprintf(&tmp2, "%s", self->filename) != -1);
assert(asprintf(&tmp2, "%s", self->filename) != -1);
for(int i = 0; i < strlen(tmp2); i++){
if(tmp2[i] == '/'){
tmp2[i] = '_';
}
}
for (int i = 0; i < strlen(tmp2); i++) {
if (tmp2[i] == '/') {
tmp2[i] = '_';
}
}
assert(asprintf(&tmp1, "%s_%s.%s", filename_prefix, tmp2, filename_postfix) != -1);
free(tmp2);
assert(asprintf(&tmp1, "%s_%s.%s", filename_prefix, tmp2, filename_postfix) != -1);
free(tmp2);
return tmp1;
return tmp1;
}
void read_primary_buffer(cow_cache_t* self, const char* filename_prefix, bool switch_mode){
assert(!self->enabled_fuzz);
global_cow_primary_size_adjustable = false;
void read_primary_buffer(cow_cache_t *self, const char *filename_prefix, bool switch_mode)
{
assert(!self->enabled_fuzz);
global_cow_primary_size_adjustable = false;
char* tmp1;
char* tmp2;
char *tmp1;
char *tmp2;
tmp1 = gen_file_name(self, filename_prefix, "khash");
tmp2 = gen_file_name(self, filename_prefix, "pcow");
tmp1 = gen_file_name(self, filename_prefix, "khash");
tmp2 = gen_file_name(self, filename_prefix, "pcow");
kh_destroy(COW_CACHE, self->lookup_primary);
kh_destroy(COW_CACHE, self->lookup_primary);
struct stat buffer;
assert(stat (tmp2, &buffer) == 0);
struct stat buffer;
assert(stat(tmp2, &buffer) == 0);
if (buffer.st_size > get_global_cow_cache_primary_size()){
fprintf(stderr, "ERROR: in-memory CoW buffer is too small compared to snapshot file (buffer: 0x%lx / file: 0x%lx)\n", get_global_cow_cache_primary_size(), buffer.st_size);
exit(1);
}
if (buffer.st_size > get_global_cow_cache_primary_size()) {
fprintf(stderr,
"ERROR: in-memory CoW buffer is too small compared to snapshot file "
"(buffer: 0x%lx / file: 0x%lx)\n",
get_global_cow_cache_primary_size(), buffer.st_size);
exit(1);
}
if(buffer.st_size){
self->lookup_primary = kh_load(COW_CACHE, tmp1);
}
else {
self->lookup_primary = kh_init(COW_CACHE);
}
if (buffer.st_size) {
self->lookup_primary = kh_load(COW_CACHE, tmp1);
} else {
self->lookup_primary = kh_init(COW_CACHE);
}
int fd = open(tmp2, O_RDONLY);
if(switch_mode){
munmap(self->data_primary, self->cow_primary_size);
self->cow_primary_size = get_global_cow_cache_primary_size();
self->data_primary = mmap(0, self->cow_primary_size, PROT_READ, MAP_SHARED, fd, 0);
assert(self->data_primary);
}
else{
int fd = open(tmp2, O_RDONLY);
if(get_global_cow_cache_primary_size() != self->cow_primary_size){
munmap(self->data_primary, self->cow_primary_size);
self->cow_primary_size = get_global_cow_cache_primary_size();
self->data_primary = mmap(NULL, self->cow_primary_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
assert(self->data_primary != MAP_FAILED);
}
if (switch_mode) {
munmap(self->data_primary, self->cow_primary_size);
self->cow_primary_size = get_global_cow_cache_primary_size();
self->data_primary =
mmap(0, self->cow_primary_size, PROT_READ, MAP_SHARED, fd, 0);
assert(self->data_primary);
} else {
if (get_global_cow_cache_primary_size() != self->cow_primary_size) {
munmap(self->data_primary, self->cow_primary_size);
self->cow_primary_size = get_global_cow_cache_primary_size();
self->data_primary = mmap(NULL, self->cow_primary_size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
assert(self->data_primary != MAP_FAILED);
}
void* ptr = mmap(0, COW_CACHE_PRIMARY_MINIMUM_SIZE, PROT_READ , MAP_SHARED, fd, 0);
assert(ptr);
memcpy(self->data_primary, ptr, buffer.st_size);
munmap(ptr, COW_CACHE_PRIMARY_MINIMUM_SIZE);
}
close(fd);
void *ptr =
mmap(0, COW_CACHE_PRIMARY_MINIMUM_SIZE, PROT_READ, MAP_SHARED, fd, 0);
assert(ptr);
memcpy(self->data_primary, ptr, buffer.st_size);
munmap(ptr, COW_CACHE_PRIMARY_MINIMUM_SIZE);
}
close(fd);
self->offset_primary = buffer.st_size;
self->offset_primary = buffer.st_size;
if(switch_mode){
switch_to_fuzz_mode(self);
}
if (switch_mode) {
switch_to_fuzz_mode(self);
}
free(tmp1);
free(tmp2);
free(tmp1);
free(tmp2);
}
void dump_primary_buffer(cow_cache_t* self, const char* filename_prefix){
assert(self->enabled_fuzz);
void dump_primary_buffer(cow_cache_t *self, const char *filename_prefix)
{
assert(self->enabled_fuzz);
char* tmp1;
char* tmp2;
char *tmp1;
char *tmp2;
tmp1 = gen_file_name(self, filename_prefix, "khash");
tmp2 = gen_file_name(self, filename_prefix, "pcow");
tmp1 = gen_file_name(self, filename_prefix, "khash");
tmp2 = gen_file_name(self, filename_prefix, "pcow");
if(self->offset_primary){
kh_write(COW_CACHE, self->lookup_primary, tmp1);
}
else{
fclose(fopen(tmp1, "wb"));
}
if (self->offset_primary) {
kh_write(COW_CACHE, self->lookup_primary, tmp1);
} else {
fclose(fopen(tmp1, "wb"));
}
FILE *fp = fopen(tmp2, "wb");
if(fp == NULL) {
fprintf(stderr, "[%s] Could not open file %s.\n", __func__, tmp2);
assert(false);
}
FILE *fp = fopen(tmp2, "wb");
if (fp == NULL) {
fprintf(stderr, "[%s] Could not open file %s.\n", __func__, tmp2);
assert(false);
}
if(self->offset_primary){
fwrite(self->data_primary, CHUNK_SIZE, self->offset_primary/CHUNK_SIZE, fp);
}
if (self->offset_primary) {
fwrite(self->data_primary, CHUNK_SIZE, self->offset_primary / CHUNK_SIZE, fp);
}
fclose(fp);
fclose(fp);
free(tmp1);
free(tmp2);
free(tmp1);
free(tmp2);
}
void cow_cache_reset(cow_cache_t* self){
if(!self->enabled_fuzz)
return;
/* TODO */
assert(self->enabled_fuzz);
if(self->enabled_fuzz){
void cow_cache_reset(cow_cache_t *self)
{
if (!self->enabled_fuzz)
return;
/* TODO */
assert(self->enabled_fuzz);
if (self->enabled_fuzz) {
#ifdef DEBUG_COW_LAYER
printf("%s: read_calls =>\t%ld\n", __func__, self->read_calls);
printf("%s: write_calls =>\t%ld\n", __func__, self->write_calls);
printf("%s: read_calls_tmp =>\t%ld\n", __func__, self->read_calls_tmp);
printf("%s: write_calls_tmp =>\t%ld\n", __func__, self->write_calls_tmp);
#endif
if(!self->enabled_fuzz_tmp){
self->offset_secondary = 0;
kh_clear(COW_CACHE, self->lookup_secondary);
#ifdef DEBUG_COW_LAYER
self->read_calls = 0;
self->write_calls = 0;
printf("%s: read_calls =>\t%ld\n", __func__, self->read_calls);
printf("%s: write_calls =>\t%ld\n", __func__, self->write_calls);
printf("%s: read_calls_tmp =>\t%ld\n", __func__, self->read_calls_tmp);
printf("%s: write_calls_tmp =>\t%ld\n", __func__, self->write_calls_tmp);
#endif
}
else {
self->offset_secondary_tmp = 0;
kh_clear(COW_CACHE, self->lookup_secondary_tmp);
if (!self->enabled_fuzz_tmp) {
self->offset_secondary = 0;
kh_clear(COW_CACHE, self->lookup_secondary);
#ifdef DEBUG_COW_LAYER
printf("CLEAR lookup_secondary_tmp\n");
self->read_calls_tmp = 0;
self->write_calls_tmp = 0;
self->read_calls = 0;
self->write_calls = 0;
#endif
}
}
} else {
self->offset_secondary_tmp = 0;
kh_clear(COW_CACHE, self->lookup_secondary_tmp);
#ifdef DEBUG_COW_LAYER
printf("CLEAR lookup_secondary_tmp\n");
self->read_calls_tmp = 0;
self->write_calls_tmp = 0;
#endif
}
}
}
void cow_cache_enable_tmp_mode(cow_cache_t* self){
assert(self->enabled_fuzz);
self->enabled_fuzz_tmp = true;
void cow_cache_enable_tmp_mode(cow_cache_t *self)
{
assert(self->enabled_fuzz);
self->enabled_fuzz_tmp = true;
}
void cow_cache_disable_tmp_mode(cow_cache_t* self){
assert(self->enabled_fuzz);
assert(self->enabled_fuzz_tmp);
cow_cache_reset(self);
self->enabled_fuzz_tmp = false;
void cow_cache_disable_tmp_mode(cow_cache_t *self)
{
assert(self->enabled_fuzz);
assert(self->enabled_fuzz_tmp);
cow_cache_reset(self);
self->enabled_fuzz_tmp = false;
}
void cow_cache_enable(cow_cache_t* self){
cow_cache_reset(self);
self->enabled = true;
void cow_cache_enable(cow_cache_t *self)
{
cow_cache_reset(self);
self->enabled = true;
}
void cow_cache_disable(cow_cache_t* self){
cow_cache_reset(self);
self->enabled = false;
void cow_cache_disable(cow_cache_t *self)
{
cow_cache_reset(self);
self->enabled = false;
}
typedef struct BlkRwCo {
BlockBackend *blk;
int64_t offset;
QEMUIOVector *qiov;
int ret;
BlockBackend *blk;
int64_t offset;
QEMUIOVector *qiov;
int ret;
BdrvRequestFlags flags;
} BlkRwCo;
typedef struct BlkAioEmAIOCB {
BlockAIOCB common;
BlkRwCo rwco;
int bytes;
bool has_returned;
BlkRwCo rwco;
int bytes;
bool has_returned;
} BlkAioEmAIOCB;
extern void blk_aio_write_entry(void *opaque);
extern int blk_check_byte_request(BlockBackend *blk, int64_t offset, size_t size);
extern int blk_check_byte_request(BlockBackend *blk, int64_t offset, size_t size);
extern void blk_aio_complete(BlkAioEmAIOCB *acb);
/* read from primary buffer */
static inline void read_from_primary_buffer(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags, uint64_t offset_addr, uint64_t iov_offset){
khiter_t k;
static inline void read_from_primary_buffer(cow_cache_t *self,
BlockBackend *blk,
int64_t offset,
unsigned int bytes,
QEMUIOVector *qiov,
BdrvRequestFlags flags,
uint64_t offset_addr,
uint64_t iov_offset)
{
khiter_t k;
k = kh_get(COW_CACHE, self->lookup_primary, offset_addr);
if(k != kh_end(self->lookup_primary)){
#ifdef COW_CACHE_DEBUG
printf("[PRE ] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n", offset_addr, iov_offset, self->offset_primary);
#endif
qemu_iovec_from_buf(qiov, iov_offset, self->data_primary + kh_value(self->lookup_primary, k), CHUNK_SIZE);
}
return;
k = kh_get(COW_CACHE, self->lookup_primary, offset_addr);
if (k != kh_end(self->lookup_primary)) {
#ifdef COW_CACHE_DEBUG
printf("[PRE ] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA "
"OFFSET: %lx\n",
offset_addr, iov_offset, self->offset_primary);
#endif
qemu_iovec_from_buf(qiov, iov_offset,
self->data_primary + kh_value(self->lookup_primary, k),
CHUNK_SIZE);
}
return;
}
/* try to read from secondary buffer
* read from primary buffer if the data is not available yet */
static inline void read_from_secondary_buffer(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags, uint64_t offset_addr, uint64_t iov_offset){
/* read from L2 TMP buffer */
khiter_t k;
if(self->enabled_fuzz_tmp){
k = kh_get(COW_CACHE, self->lookup_secondary_tmp, offset_addr);
if(k != kh_end(self->lookup_secondary_tmp)){
#ifdef COW_CACHE_DEBUG
printf("[FTMP] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n", offset_addr, iov_offset, self->offset_secondary);
#endif
qemu_iovec_from_buf(qiov, iov_offset, self->data_secondary_tmp + kh_value(self->lookup_secondary_tmp, k), CHUNK_SIZE);
return;
}
}
static inline void read_from_secondary_buffer(cow_cache_t *self,
BlockBackend *blk,
int64_t offset,
unsigned int bytes,
QEMUIOVector *qiov,
BdrvRequestFlags flags,
uint64_t offset_addr,
uint64_t iov_offset)
{
/* read from L2 TMP buffer */
khiter_t k;
if (self->enabled_fuzz_tmp) {
k = kh_get(COW_CACHE, self->lookup_secondary_tmp, offset_addr);
if (k != kh_end(self->lookup_secondary_tmp)) {
#ifdef COW_CACHE_DEBUG
printf("[FTMP] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA "
"OFFSET: %lx\n",
offset_addr, iov_offset, self->offset_secondary);
#endif
qemu_iovec_from_buf(qiov, iov_offset,
self->data_secondary_tmp +
kh_value(self->lookup_secondary_tmp, k),
CHUNK_SIZE);
return;
}
}
/* read from L2 buffer */
k = kh_get(COW_CACHE, self->lookup_secondary, offset_addr);
if(k != kh_end(self->lookup_secondary)){
#ifdef COW_CACHE_DEBUG
printf("[FUZZ] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n", offset_addr, iov_offset, self->offset_secondary);
#endif
qemu_iovec_from_buf(qiov, iov_offset, self->data_secondary + kh_value(self->lookup_secondary, k), CHUNK_SIZE);
return;
}
/* read from L2 buffer */
k = kh_get(COW_CACHE, self->lookup_secondary, offset_addr);
if (k != kh_end(self->lookup_secondary)) {
#ifdef COW_CACHE_DEBUG
printf("[FUZZ] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA "
"OFFSET: %lx\n",
offset_addr, iov_offset, self->offset_secondary);
#endif
qemu_iovec_from_buf(qiov, iov_offset,
self->data_secondary + kh_value(self->lookup_secondary, k),
CHUNK_SIZE);
return;
}
/* read from L1 buffer */
k = kh_get(COW_CACHE, self->lookup_primary, offset_addr);
if(k != kh_end(self->lookup_primary)){
#ifdef COW_CACHE_DEBUG
printf("[PRE ] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n", offset_addr, iov_offset, self->offset_primary);
#endif
qemu_iovec_from_buf(qiov, iov_offset, self->data_primary + kh_value(self->lookup_primary, k), CHUNK_SIZE);
}
/* read from L1 buffer */
k = kh_get(COW_CACHE, self->lookup_primary, offset_addr);
if (k != kh_end(self->lookup_primary)) {
#ifdef COW_CACHE_DEBUG
printf("[PRE ] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA "
"OFFSET: %lx\n",
offset_addr, iov_offset, self->offset_primary);
#endif
qemu_iovec_from_buf(qiov, iov_offset,
self->data_primary + kh_value(self->lookup_primary, k),
CHUNK_SIZE);
}
}
/* read data from cow cache */
static int cow_cache_read(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags){
static int cow_cache_read(cow_cache_t *self,
BlockBackend *blk,
int64_t offset,
unsigned int bytes,
QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
#ifdef DEBUG_COW_LAYER
if(self->enabled_fuzz){
if(!self->enabled_fuzz_tmp){
self->read_calls++;
}
else{
self->read_calls_tmp++;
}
}
#endif
blk_co_preadv(blk, offset, bytes, qiov, flags);
if ((qiov->size%CHUNK_SIZE)){
#ifdef COW_CACHE_DEBUG
fprintf(stderr, "%s: FAILED %lx!\n", __func__, qiov->size);
#endif
return 0;
if (self->enabled_fuzz) {
if (!self->enabled_fuzz_tmp) {
self->read_calls++;
} else {
self->read_calls_tmp++;
}
}
assert(!(qiov->size%CHUNK_SIZE));
uint64_t iov_offset = 0;
for(uint64_t offset_addr = offset; offset_addr < (offset+(qiov->size)); offset_addr+= CHUNK_SIZE){
#endif
blk_co_preadv(blk, offset, bytes, qiov, flags);
if(self->enabled_fuzz){
read_from_secondary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags, offset_addr, iov_offset);
}
else{
read_from_primary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags, offset_addr, iov_offset);
}
if ((qiov->size % CHUNK_SIZE)) {
#ifdef COW_CACHE_DEBUG
fprintf(stderr, "%s: FAILED %lx!\n", __func__, qiov->size);
#endif
return 0;
}
assert(!(qiov->size % CHUNK_SIZE));
iov_offset+= CHUNK_SIZE;
}
uint64_t iov_offset = 0;
for (uint64_t offset_addr = offset; offset_addr < (offset + (qiov->size));
offset_addr += CHUNK_SIZE)
{
if (self->enabled_fuzz) {
read_from_secondary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags,
offset_addr, iov_offset);
} else {
read_from_primary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags,
offset_addr, iov_offset);
}
return 0;
iov_offset += CHUNK_SIZE;
}
return 0;
}
/* write to primary buffer */
static inline void write_to_primary_buffer(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags, uint64_t offset_addr, uint64_t iov_offset){
int ret;
khiter_t k;
static inline void write_to_primary_buffer(cow_cache_t *self,
BlockBackend *blk,
int64_t offset,
unsigned int bytes,
QEMUIOVector *qiov,
BdrvRequestFlags flags,
uint64_t offset_addr,
uint64_t iov_offset)
{
int ret;
khiter_t k;
k = kh_get(COW_CACHE, self->lookup_primary, offset_addr);
if(unlikely(k == kh_end(self->lookup_primary))){
/* create page */
k = kh_put(COW_CACHE, self->lookup_primary, offset_addr, &ret);
#ifdef COW_CACHE_DEBUG
printf("ADD NEW COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n", offset_addr, iov_offset, self->offset_primary);
#endif
k = kh_get(COW_CACHE, self->lookup_primary, offset_addr);
if (unlikely(k == kh_end(self->lookup_primary))) {
/* create page */
k = kh_put(COW_CACHE, self->lookup_primary, offset_addr, &ret);
#ifdef COW_CACHE_DEBUG
printf("ADD NEW COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n",
offset_addr, iov_offset, self->offset_primary);
#endif
kh_value(self->lookup_primary, k) = self->offset_primary;
kh_value(self->lookup_primary, k) = self->offset_primary;
self->offset_primary += CHUNK_SIZE;
self->offset_primary += CHUNK_SIZE;
#ifdef COW_CACHE_VERBOSE
printf("COW CACHE IS 0x%lx BYTES (KB: %ld / MB: %ld / GB: %ld) IN SIZE!\n", self->offset, self->offset >> 10, self->offset >> 20, self->offset >> 30);
#endif
#ifdef COW_CACHE_VERBOSE
printf("COW CACHE IS 0x%lx BYTES (KB: %ld / MB: %ld / GB: %ld) IN SIZE!\n",
self->offset, self->offset >> 10, self->offset >> 20,
self->offset >> 30);
#endif
/* IN CASE THE BUFFER IS FULL -> ABORT! */
assert(self->offset_primary < self->cow_primary_size);
}
/* IN CASE THE BUFFER IS FULL -> ABORT! */
assert(self->offset_primary < self->cow_primary_size);
}
#ifdef COW_CACHE_DEBUG
printf("LOAD COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx (%s)\n", offset_addr, iov_offset, kh_value(self->lookup_primary, k), self->filename);
#endif
#ifdef COW_CACHE_DEBUG
printf("LOAD COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx (%s)\n",
offset_addr, iov_offset, kh_value(self->lookup_primary, k), self->filename);
#endif
/* write to cached page */
qemu_iovec_to_buf(qiov, iov_offset, self->data_primary + kh_value(self->lookup_primary, k), CHUNK_SIZE);
/* write to cached page */
qemu_iovec_to_buf(qiov, iov_offset,
self->data_primary + kh_value(self->lookup_primary, k),
CHUNK_SIZE);
}
static inline void write_to_secondary_buffer(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags, uint64_t offset_addr, uint64_t iov_offset){
int ret;
static inline void write_to_secondary_buffer(cow_cache_t *self,
BlockBackend *blk,
int64_t offset,
unsigned int bytes,
QEMUIOVector *qiov,
BdrvRequestFlags flags,
uint64_t offset_addr,
uint64_t iov_offset)
{
int ret;
if(!self->enabled_fuzz_tmp){
/* L2 mode */
if (!self->enabled_fuzz_tmp) {
/* L2 mode */
/* IN CASE THE BUFFER IS FULL -> ABORT! */
if(self->offset_secondary >= COW_CACHE_SECONDARY_SIZE){
GET_GLOBAL_STATE()->cow_cache_full = true;
abort();
return;
/* IN CASE THE BUFFER IS FULL -> ABORT! */
if (self->offset_secondary >= COW_CACHE_SECONDARY_SIZE) {
GET_GLOBAL_STATE()->cow_cache_full = true;
abort();
return;
}
khiter_t k_secondary = kh_get(COW_CACHE, self->lookup_secondary, offset_addr);
if (unlikely(k_secondary == kh_end(self->lookup_secondary))) {
/* if page is not cached in secondary buffer yet */
k_secondary = kh_put(COW_CACHE, self->lookup_secondary, offset_addr, &ret);
kh_value(self->lookup_secondary, k_secondary) = self->offset_secondary;
self->offset_secondary += CHUNK_SIZE;
}
/* write to cache */
qemu_iovec_to_buf(qiov, iov_offset,
self->data_secondary +
kh_value(self->lookup_secondary, k_secondary),
CHUNK_SIZE);
} else {
/* L2 TMP mode */
/* IN CASE THE BUFFER IS FULL -> ABORT! */
if (self->offset_secondary_tmp >= COW_CACHE_SECONDARY_SIZE) {
GET_GLOBAL_STATE()->cow_cache_full = true;
abort();
return;
}
khiter_t k_secondary_tmp =
kh_get(COW_CACHE, self->lookup_secondary_tmp, offset_addr);
if (unlikely(k_secondary_tmp == kh_end(self->lookup_secondary_tmp))) {
/* if page is not cached in secondary tmp buffer yet */
k_secondary_tmp =
kh_put(COW_CACHE, self->lookup_secondary_tmp, offset_addr, &ret);
kh_value(self->lookup_secondary_tmp, k_secondary_tmp) =
self->offset_secondary_tmp;
self->offset_secondary_tmp += CHUNK_SIZE;
}
/* write to cache */
qemu_iovec_to_buf(qiov, iov_offset,
self->data_secondary_tmp +
kh_value(self->lookup_secondary_tmp, k_secondary_tmp),
CHUNK_SIZE);
}
khiter_t k_secondary = kh_get(COW_CACHE, self->lookup_secondary, offset_addr);
if(unlikely(k_secondary == kh_end(self->lookup_secondary))){
/* if page is not cached in secondary buffer yet */
k_secondary = kh_put(COW_CACHE, self->lookup_secondary, offset_addr, &ret);
kh_value(self->lookup_secondary, k_secondary) = self->offset_secondary;
self->offset_secondary += CHUNK_SIZE;
}
/* write to cache */
qemu_iovec_to_buf(qiov, iov_offset, self->data_secondary + kh_value(self->lookup_secondary, k_secondary), CHUNK_SIZE);
}
else{
/* L2 TMP mode */
/* IN CASE THE BUFFER IS FULL -> ABORT! */
if(self->offset_secondary_tmp >= COW_CACHE_SECONDARY_SIZE){
GET_GLOBAL_STATE()->cow_cache_full = true;
abort();
return;
}
khiter_t k_secondary_tmp = kh_get(COW_CACHE, self->lookup_secondary_tmp, offset_addr);
if(unlikely(k_secondary_tmp == kh_end(self->lookup_secondary_tmp))){
/* if page is not cached in secondary tmp buffer yet */
k_secondary_tmp = kh_put(COW_CACHE, self->lookup_secondary_tmp, offset_addr, &ret);
kh_value(self->lookup_secondary_tmp, k_secondary_tmp) = self->offset_secondary_tmp;
self->offset_secondary_tmp += CHUNK_SIZE;
}
/* write to cache */
qemu_iovec_to_buf(qiov, iov_offset, self->data_secondary_tmp + kh_value(self->lookup_secondary_tmp, k_secondary_tmp), CHUNK_SIZE);
}
}
/* write data to cow cache */
static int cow_cache_write(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags){
static int cow_cache_write(cow_cache_t *self,
BlockBackend *blk,
int64_t offset,
unsigned int bytes,
QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
#ifdef DEBUG_COW_LAYER
if(self->enabled_fuzz){
if(!self->enabled_fuzz_tmp){
self->write_calls++;
}
else{
self->write_calls_tmp++;
}
}
if (self->enabled_fuzz) {
if (!self->enabled_fuzz_tmp) {
self->write_calls++;
} else {
self->write_calls_tmp++;
}
}
#endif
if ((qiov->size%CHUNK_SIZE)){
if ((qiov->size % CHUNK_SIZE)) {
#ifdef COW_CACHE_DEBUG
fprintf(stderr, "%s: FAILED %lx!\n", __func__, qiov->size);
fprintf(stderr, "%s: FAILED %lx!\n", __func__, qiov->size);
#endif
return 0;
}
if((qiov->size%CHUNK_SIZE) && GET_GLOBAL_STATE()->in_fuzzing_mode){
GET_GLOBAL_STATE()->cow_cache_full = true;
fprintf(stderr, "WARNING: %s write in %lx CHUNKSIZE\n", __func__, qiov->size);
return 0;
}
else{
assert(!(qiov->size%CHUNK_SIZE));
}
uint64_t iov_offset = 0;
for(uint64_t offset_addr = offset; offset_addr < (offset+(qiov->size)); offset_addr+= CHUNK_SIZE){
if(self->enabled_fuzz){
write_to_secondary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags, offset_addr, iov_offset);
}
else{
write_to_primary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags, offset_addr, iov_offset);
}
return 0;
}
if ((qiov->size % CHUNK_SIZE) && GET_GLOBAL_STATE()->in_fuzzing_mode) {
GET_GLOBAL_STATE()->cow_cache_full = true;
fprintf(stderr, "WARNING: %s write in %lx CHUNKSIZE\n", __func__, qiov->size);
return 0;
} else {
assert(!(qiov->size % CHUNK_SIZE));
}
iov_offset+= CHUNK_SIZE;
}
uint64_t iov_offset = 0;
for (uint64_t offset_addr = offset; offset_addr < (offset + (qiov->size));
offset_addr += CHUNK_SIZE)
{
if (self->enabled_fuzz) {
write_to_secondary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags,
offset_addr, iov_offset);
} else {
write_to_primary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags,
offset_addr, iov_offset);
}
return 0;
iov_offset += CHUNK_SIZE;
}
return 0;
}
void switch_to_fuzz_mode(cow_cache_t* self){
self->enabled_fuzz = true;
assert(!mprotect(self->data_primary, self->cow_primary_size, PROT_READ));
nyx_debug("switching to secondary CoW buffer\n");
void switch_to_fuzz_mode(cow_cache_t *self)
{
self->enabled_fuzz = true;
assert(!mprotect(self->data_primary, self->cow_primary_size, PROT_READ));
nyx_debug("switching to secondary CoW buffer\n");
}
void cow_cache_read_entry(void* opaque){
BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco;
void cow_cache_read_entry(void *opaque)
{
BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco;
#ifdef COW_CACHE_DEBUG
printf("%s %lx %lx\n", __func__, rwco->offset, acb->bytes);
printf("%s %lx %lx\n", __func__, rwco->offset, acb->bytes);
#endif
rwco->ret = cow_cache_read( *((cow_cache_t**)(rwco->blk)), rwco->blk, rwco->offset, acb->bytes, rwco->qiov, rwco->flags);
rwco->ret = cow_cache_read(*((cow_cache_t **)(rwco->blk)), rwco->blk,
rwco->offset, acb->bytes, rwco->qiov, rwco->flags);
blk_aio_complete(acb);
}
void cow_cache_write_entry(void* opaque){
BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco;
void cow_cache_write_entry(void *opaque)
{
BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco;
#ifdef COW_CACHE_DEBUG
printf("%s\n", __func__);
printf("%s\n", __func__);
#endif
rwco->ret = cow_cache_write( *((cow_cache_t**)(rwco->blk)), rwco->blk, rwco->offset, acb->bytes, rwco->qiov, rwco->flags);
rwco->ret = cow_cache_write(*((cow_cache_t **)(rwco->blk)), rwco->blk,
rwco->offset, acb->bytes, rwco->qiov, rwco->flags);
blk_aio_complete(acb);
}

View File

@ -1,4 +1,4 @@
#pragma once
#pragma once
#include <stdint.h>
@ -10,66 +10,68 @@
#include "nyx/khash.h"
#include "nyx/redqueen_trace.h"
//#define DEBUG_COW_LAYER
// #define DEBUG_COW_LAYER
/* Minimum size of CoW buffer that stores data written to
/* Minimum size of CoW buffer that stores data written to
* the block device between boot time and root snapshot (3GB)
*/
#define COW_CACHE_PRIMARY_MINIMUM_SIZE 0xC0000000
/* Size of CoW buffer which stores data written to
* the block device between the root snapshot and the
* next snapshot restore (3GB). This buffer is allocated
* twice to store the incremental snapshot delta.
/* Size of CoW buffer which stores data written to
* the block device between the root snapshot and the
* next snapshot restore (3GB). This buffer is allocated
* twice to store the incremental snapshot delta.
*/
#define COW_CACHE_SECONDARY_SIZE 0xC0000000
KHASH_MAP_INIT_INT64(COW_CACHE, uint64_t)
typedef struct cow_cache_s{
khash_t(COW_CACHE) *lookup_primary;
khash_t(COW_CACHE) *lookup_secondary;
khash_t(COW_CACHE) *lookup_secondary_tmp;
typedef struct cow_cache_s {
khash_t(COW_CACHE) * lookup_primary;
khash_t(COW_CACHE) * lookup_secondary;
khash_t(COW_CACHE) * lookup_secondary_tmp;
void* data_primary;
void* data_secondary;
void* data_secondary_tmp;
void *data_primary;
void *data_secondary;
void *data_secondary_tmp;
uint64_t cow_primary_size;
uint64_t cow_primary_size;
char* filename;
uint64_t offset_primary;
uint64_t offset_secondary;
uint64_t offset_secondary_tmp;
char *filename;
uint64_t offset_primary;
uint64_t offset_secondary;
uint64_t offset_secondary_tmp;
bool enabled;
bool enabled_fuzz;
bool enabled_fuzz_tmp;
bool enabled;
bool enabled_fuzz;
bool enabled_fuzz_tmp;
#ifdef DEBUG_COW_LAYER
uint64_t read_calls;
uint64_t write_calls;
uint64_t read_calls_tmp;
uint64_t write_calls_tmp;
uint64_t read_calls;
uint64_t write_calls;
uint64_t read_calls_tmp;
uint64_t write_calls_tmp;
#endif
} cow_cache_t;
cow_cache_t* cow_cache_new(const char* filename);
void cow_cache_reset(cow_cache_t* self);
cow_cache_t *cow_cache_new(const char *filename);
void cow_cache_reset(cow_cache_t *self);
void switch_to_fuzz_mode(cow_cache_t* self);
void switch_to_fuzz_mode(cow_cache_t *self);
void read_primary_buffer(cow_cache_t* self, const char* filename_prefix, bool switch_mode);
void dump_primary_buffer(cow_cache_t* self, const char* filename_prefix);
void read_primary_buffer(cow_cache_t *self,
const char *filename_prefix,
bool switch_mode);
void dump_primary_buffer(cow_cache_t *self, const char *filename_prefix);
void cow_cache_read_entry(void* opaque);
void cow_cache_write_entry(void* opaque);
void cow_cache_read_entry(void *opaque);
void cow_cache_write_entry(void *opaque);
void cow_cache_enable(cow_cache_t* self);
void cow_cache_disable(cow_cache_t* self);
void cow_cache_enable(cow_cache_t *self);
void cow_cache_disable(cow_cache_t *self);
void cow_cache_enable_tmp_mode(cow_cache_t* self);
void cow_cache_disable_tmp_mode(cow_cache_t* self);
void cow_cache_enable_tmp_mode(cow_cache_t *self);
void cow_cache_disable_tmp_mode(cow_cache_t *self);
void set_global_cow_cache_primary_size(uint64_t new_size);

View File

@ -11,22 +11,22 @@
#include "nyx/snapshot/block/nyx_block_snapshot.h"
#include "nyx/state/state.h"
typedef struct fast_reload_cow_entry_s{
uint32_t id;
char idstr[256];
} fast_reload_cow_entry_t;
typedef struct fast_reload_cow_entry_s {
uint32_t id;
char idstr[256];
} fast_reload_cow_entry_t;
nyx_block_t* nyx_block_snapshot_init_from_file(const char* folder, bool pre_snapshot){
nyx_block_t* self = malloc(sizeof(nyx_block_t));
nyx_block_t *nyx_block_snapshot_init_from_file(const char *folder, bool pre_snapshot)
{
nyx_block_t *self = malloc(sizeof(nyx_block_t));
memset(self, 0, sizeof(nyx_block_t));
BlockBackend *blk;
BlockBackend *blk;
fast_reload_cow_entry_t entry;
char* tmp1;
char* tmp2;
char *tmp1;
char *tmp2;
assert(asprintf(&tmp1, "%s/fs_cache.meta", folder) != -1);
assert(asprintf(&tmp2, "%s/fs_drv", folder) != -1);
@ -34,29 +34,30 @@ nyx_block_t* nyx_block_snapshot_init_from_file(const char* folder, bool pre_snap
self->cow_cache_array_size = 0;
FILE* f = fopen (tmp1, "r");
FILE *f = fopen(tmp1, "r");
assert(f != NULL);
for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
if(blk && blk->cow_cache){
if (blk && blk->cow_cache) {
nyx_debug("%p %s\n", blk->cow_cache, blk->cow_cache->filename);
self->cow_cache_array_size++;
}
}
uint32_t temp_cow_cache_array_size;
uint32_t temp_cow_cache_array_size;
assert(fread(&temp_cow_cache_array_size, sizeof(uint32_t), 1, f) == 1);
nyx_debug("%d vs %x\n", temp_cow_cache_array_size, self->cow_cache_array_size);
assert(self->cow_cache_array_size == temp_cow_cache_array_size);
self->cow_cache_array = (cow_cache_t**)malloc(sizeof(cow_cache_t*)*self->cow_cache_array_size);
self->cow_cache_array =
(cow_cache_t **)malloc(sizeof(cow_cache_t *) * self->cow_cache_array_size);
uint32_t i = 0;
uint32_t i = 0;
uint32_t id = 0;
for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
if(blk && blk->cow_cache){
if (blk && blk->cow_cache) {
self->cow_cache_array[i++] = blk->cow_cache;
assert(fread(&entry, sizeof(fast_reload_cow_entry_t), 1, f) == 1);
@ -69,7 +70,7 @@ nyx_block_t* nyx_block_snapshot_init_from_file(const char* folder, bool pre_snap
fclose(f);
for(i = 0; i < self->cow_cache_array_size; i++){
for (i = 0; i < self->cow_cache_array_size; i++) {
read_primary_buffer(self->cow_cache_array[i], tmp2, !pre_snapshot);
}
@ -78,75 +79,82 @@ nyx_block_t* nyx_block_snapshot_init_from_file(const char* folder, bool pre_snap
return self;
}
nyx_block_t* nyx_block_snapshot_init(void){
nyx_block_t* self = malloc(sizeof(nyx_block_t));
nyx_block_t *nyx_block_snapshot_init(void)
{
nyx_block_t *self = malloc(sizeof(nyx_block_t));
memset(self, 0, sizeof(nyx_block_t));
BlockBackend *blk;
for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
if(blk && blk->cow_cache){
if (blk && blk->cow_cache) {
nyx_debug("%p %s\n", blk->cow_cache, blk->cow_cache->filename);
self->cow_cache_array_size++;
}
}
self->cow_cache_array = (cow_cache_t**)malloc(sizeof(cow_cache_t*)*self->cow_cache_array_size);
self->cow_cache_array =
(cow_cache_t **)malloc(sizeof(cow_cache_t *) * self->cow_cache_array_size);
uint32_t i = 0;
for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
if(blk && blk->cow_cache){
if (blk && blk->cow_cache) {
self->cow_cache_array[i++] = blk->cow_cache;
}
}
for(i = 0; i < self->cow_cache_array_size; i++){
for (i = 0; i < self->cow_cache_array_size; i++) {
switch_to_fuzz_mode(self->cow_cache_array[i]);
}
return self;
}
void nyx_block_snapshot_flush(nyx_block_t* self){
void nyx_block_snapshot_flush(nyx_block_t *self)
{
GET_GLOBAL_STATE()->cow_cache_full = false;
}
void nyx_block_snapshot_switch_incremental(nyx_block_t* self){
for(uint32_t i = 0; i < self->cow_cache_array_size; i++){
void nyx_block_snapshot_switch_incremental(nyx_block_t *self)
{
for (uint32_t i = 0; i < self->cow_cache_array_size; i++) {
cow_cache_enable_tmp_mode(self->cow_cache_array[i]);
}
nyx_block_snapshot_flush(self);
}
void nyx_block_snapshot_disable_incremental(nyx_block_t* self){
for(uint32_t i = 0; i < self->cow_cache_array_size; i++){
void nyx_block_snapshot_disable_incremental(nyx_block_t *self)
{
for (uint32_t i = 0; i < self->cow_cache_array_size; i++) {
cow_cache_disable_tmp_mode(self->cow_cache_array[i]);
}
}
void nyx_block_snapshot_reset(nyx_block_t* self){
for(uint32_t i = 0; i < self->cow_cache_array_size; i++){
void nyx_block_snapshot_reset(nyx_block_t *self)
{
for (uint32_t i = 0; i < self->cow_cache_array_size; i++) {
cow_cache_reset(self->cow_cache_array[i]);
}
}
void nyx_block_snapshot_serialize(nyx_block_t* self, const char* snapshot_folder){
void nyx_block_snapshot_serialize(nyx_block_t *self, const char *snapshot_folder)
{
fast_reload_cow_entry_t entry;
char* tmp1;
char* tmp2;
char *tmp1;
char *tmp2;
assert(asprintf(&tmp1, "%s/fs_cache.meta", snapshot_folder) != -1);
assert(asprintf(&tmp2, "%s/fs_drv", snapshot_folder) != -1);
FILE* f = fopen (tmp1, "w");
FILE *f = fopen(tmp1, "w");
fwrite(&(self->cow_cache_array_size), sizeof(uint32_t), 1, f);
for(uint32_t i = 0; i < self->cow_cache_array_size; i++){
for (uint32_t i = 0; i < self->cow_cache_array_size; i++) {
entry.id = i;
strncpy((char*)&entry.idstr, (const char*)self->cow_cache_array[i]->filename, 255);
strncpy((char *)&entry.idstr,
(const char *)self->cow_cache_array[i]->filename, 255);
fwrite(&entry, sizeof(fast_reload_cow_entry_t), 1, f);
dump_primary_buffer(self->cow_cache_array[i], tmp2);

View File

@ -1,21 +1,21 @@
#pragma once
#pragma once
#include <stdint.h>
#include "nyx/snapshot/block/block_cow.h"
#include <stdint.h>
typedef struct nyx_block_s{
cow_cache_t **cow_cache_array;
uint32_t cow_cache_array_size;
typedef struct nyx_block_s {
cow_cache_t **cow_cache_array;
uint32_t cow_cache_array_size;
} nyx_block_t;
} nyx_block_t;
nyx_block_t* nyx_block_snapshot_init_from_file(const char* folder, bool pre_snapshot);
nyx_block_t* nyx_block_snapshot_init(void);
void nyx_block_snapshot_switch_to_incremental(nyx_block_t*);
nyx_block_t *nyx_block_snapshot_init_from_file(const char *folder, bool pre_snapshot);
nyx_block_t *nyx_block_snapshot_init(void);
void nyx_block_snapshot_switch_to_incremental(nyx_block_t *);
void nyx_block_snapshot_flush(nyx_block_t* self);
void nyx_block_snapshot_switch_incremental(nyx_block_t* self);
void nyx_block_snapshot_disable_incremental(nyx_block_t* self);
void nyx_block_snapshot_reset(nyx_block_t* self);
void nyx_block_snapshot_flush(nyx_block_t *self);
void nyx_block_snapshot_switch_incremental(nyx_block_t *self);
void nyx_block_snapshot_disable_incremental(nyx_block_t *self);
void nyx_block_snapshot_reset(nyx_block_t *self);
void nyx_block_snapshot_serialize(nyx_block_t* self, const char* snapshot_folder);
void nyx_block_snapshot_serialize(nyx_block_t *self, const char *snapshot_folder);

View File

@ -29,37 +29,39 @@
#include "nyx/snapshot/devices/state_reallocation.h"
#include "nyx/snapshot/devices/vm_change_state_handlers.h"
#define STATE_BUFFER 0x8000000 /* up to 128MB */
#define STATE_BUFFER 0x8000000 /* up to 128MB */
extern void enable_fast_snapshot_rtc(void);
extern void enable_fast_snapshot_kvm_clock(void);
static void enable_fast_snapshot_mode(void){
static void enable_fast_snapshot_mode(void)
{
enable_fast_snapshot_rtc();
enable_fast_snapshot_kvm_clock();
}
extern int kvm_nyx_put_tsc_value(CPUState *cs, uint64_t data);
static void set_tsc_value(nyx_device_state_t* self, bool tmp_snapshot){
if(self->incremental_mode){
static void set_tsc_value(nyx_device_state_t *self, bool tmp_snapshot)
{
if (self->incremental_mode) {
assert(self->tsc_value_incremental);
assert(kvm_nyx_put_tsc_value(qemu_get_cpu(0), self->tsc_value_incremental) == 0);
}
else{
assert(kvm_nyx_put_tsc_value(qemu_get_cpu(0), self->tsc_value_incremental) ==
0);
} else {
assert(self->tsc_value);
assert(kvm_nyx_put_tsc_value(qemu_get_cpu(0), self->tsc_value) == 0);
}
}
static void save_tsc_value(nyx_device_state_t* self, bool incremental_mode){
X86CPU *cpu = X86_CPU(qemu_get_cpu(0));
static void save_tsc_value(nyx_device_state_t *self, bool incremental_mode)
{
X86CPU *cpu = X86_CPU(qemu_get_cpu(0));
CPUX86State *env = &cpu->env;
if(incremental_mode){
if (incremental_mode) {
self->tsc_value_incremental = env->tsc;
}
else{
} else {
self->tsc_value = env->tsc;
}
}
@ -69,28 +71,28 @@ extern int qemu_savevm_state(QEMUFile *f, Error **errp);
/* new savevm routine */
typedef struct SaveStateEntry {
QTAILQ_ENTRY(SaveStateEntry) entry;
char idstr[256];
int instance_id;
int alias_id;
int version_id;
int load_version_id;
int section_id;
int load_section_id;
SaveVMHandlers *ops;
char idstr[256];
int instance_id;
int alias_id;
int version_id;
int load_version_id;
int section_id;
int load_section_id;
SaveVMHandlers *ops;
const VMStateDescription *vmsd;
void *opaque;
void *compat;
int is_ram;
void *opaque;
void *compat;
int is_ram;
} SaveStateEntry;
typedef struct SaveState {
QTAILQ_HEAD(, SaveStateEntry) handlers;
int global_section_id;
bool skip_configuration;
uint32_t len;
int global_section_id;
bool skip_configuration;
uint32_t len;
const char *name;
uint32_t target_page_bits;
uint32_t target_page_bits;
} SaveState;
extern SaveState savevm_state;
@ -107,20 +109,20 @@ extern void save_section_header(QEMUFile *f, SaveStateEntry *se, uint8_t section
/* skip block ram */
static void fast_qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only)
{
QJSON *vmdesc;
int vmdesc_len;
QJSON *vmdesc;
int vmdesc_len;
SaveStateEntry *se;
int ret;
bool in_postcopy = migration_in_postcopy();
int ret;
bool in_postcopy = migration_in_postcopy();
cpu_synchronize_all_states();
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if(strcmp(se->idstr, "ram") && strcmp(se->idstr, "block")){
if (!se->ops ||
(in_postcopy && se->ops->save_live_complete_postcopy) ||
QTAILQ_FOREACH (se, &savevm_state.handlers, entry) {
if (strcmp(se->idstr, "ram") && strcmp(se->idstr, "block")) {
if (!se->ops || (in_postcopy && se->ops->save_live_complete_postcopy) ||
(in_postcopy && !iterable_only) ||
!se->ops->save_live_complete_precopy) {
!se->ops->save_live_complete_precopy)
{
continue;
}
@ -148,8 +150,8 @@ static void fast_qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_o
vmdesc = qjson_new();
json_prop_int(vmdesc, "page_size", TARGET_PAGE_SIZE);
json_start_array(vmdesc, "devices");
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if(strcmp(se->idstr, "ram") && strcmp(se->idstr, "block")){
QTAILQ_FOREACH (se, &savevm_state.handlers, entry) {
if (strcmp(se->idstr, "ram") && strcmp(se->idstr, "block")) {
if ((!se->ops || !se->ops->save_state) && !se->vmsd) {
continue;
}
@ -189,12 +191,13 @@ static void fast_qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_o
}
static int fast_qemu_savevm_state_iterate(QEMUFile *f, bool postcopy) {
static int fast_qemu_savevm_state_iterate(QEMUFile *f, bool postcopy)
{
SaveStateEntry *se;
int ret = 1;
int ret = 1;
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if(strcmp(se->idstr, "ram") && strcmp(se->idstr, "block")){
QTAILQ_FOREACH (se, &savevm_state.handlers, entry) {
if (strcmp(se->idstr, "ram") && strcmp(se->idstr, "block")) {
if (!se->ops || !se->ops->save_live_iterate) {
continue;
}
@ -236,12 +239,13 @@ static int fast_qemu_savevm_state_iterate(QEMUFile *f, bool postcopy) {
return ret;
}
static void fast_qemu_savevm_state_setup(QEMUFile *f){
static void fast_qemu_savevm_state_setup(QEMUFile *f)
{
SaveStateEntry *se;
int ret;
int ret;
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if(strcmp(se->idstr, "ram") && strcmp(se->idstr, "block")){
QTAILQ_FOREACH (se, &savevm_state.handlers, entry) {
if (strcmp(se->idstr, "ram") && strcmp(se->idstr, "block")) {
if (!se->ops || !se->ops->save_setup) {
continue;
}
@ -263,7 +267,8 @@ static void fast_qemu_savevm_state_setup(QEMUFile *f){
}
static int fast_qemu_savevm_state(QEMUFile *f, Error **errp) {
static int fast_qemu_savevm_state(QEMUFile *f, Error **errp)
{
qemu_savevm_state_header(f);
fast_qemu_savevm_state_setup(f);
@ -278,84 +283,99 @@ static int fast_qemu_savevm_state(QEMUFile *f, Error **errp) {
}
/* QEMUFile RAM Emulation */
static ssize_t fast_savevm_writev_buffer(void *opaque, struct iovec *iov, int iovcnt, int64_t pos){
static ssize_t fast_savevm_writev_buffer(void *opaque,
struct iovec *iov,
int iovcnt,
int64_t pos)
{
ssize_t retval = 0;
for(uint32_t i = 0; i < iovcnt; i++){
memcpy((void*)(((struct fast_savevm_opaque_t*)(opaque))->buf + ((struct fast_savevm_opaque_t*)(opaque))->pos), iov[i].iov_base, iov[i].iov_len);
((struct fast_savevm_opaque_t*)(opaque))->pos += iov[i].iov_len;
for (uint32_t i = 0; i < iovcnt; i++) {
memcpy((void *)(((struct fast_savevm_opaque_t *)(opaque))->buf +
((struct fast_savevm_opaque_t *)(opaque))->pos),
iov[i].iov_base, iov[i].iov_len);
((struct fast_savevm_opaque_t *)(opaque))->pos += iov[i].iov_len;
retval += iov[i].iov_len;
}
}
return retval;
}
static int fast_savevm_fclose_save_to_buffer(void *opaque){
memcpy(((struct fast_savevm_opaque_t*)(opaque))->output_buffer, ((struct fast_savevm_opaque_t*)(opaque))->buf, ((struct fast_savevm_opaque_t*)(opaque))->pos);
*((struct fast_savevm_opaque_t*)(opaque))->output_buffer_size = ((struct fast_savevm_opaque_t*)(opaque))->pos;
//printf("DUMPED: %d\n", *((struct fast_savevm_opaque_t*)(opaque))->output_buffer_size);
static int fast_savevm_fclose_save_to_buffer(void *opaque)
{
memcpy(((struct fast_savevm_opaque_t *)(opaque))->output_buffer,
((struct fast_savevm_opaque_t *)(opaque))->buf,
((struct fast_savevm_opaque_t *)(opaque))->pos);
*((struct fast_savevm_opaque_t *)(opaque))->output_buffer_size =
((struct fast_savevm_opaque_t *)(opaque))->pos;
// printf("DUMPED: %d\n", *((struct fast_savevm_opaque_t*)(opaque))->output_buffer_size);
return 0;
}
static int fast_loadvm_fclose(void *opaque){
static int fast_loadvm_fclose(void *opaque)
{
return 0;
}
static ssize_t fast_loadvm_get_buffer(void *opaque, uint8_t *buf, int64_t pos, size_t size){
memcpy(buf, (void*)(((struct fast_savevm_opaque_t*)(opaque))->buf + pos), size);
static ssize_t fast_loadvm_get_buffer(void *opaque, uint8_t *buf, int64_t pos, size_t size)
{
memcpy(buf, (void *)(((struct fast_savevm_opaque_t *)(opaque))->buf + pos), size);
return size;
}
static const QEMUFileOps fast_loadvm_ops = {
.get_buffer = (QEMUFileGetBufferFunc*)fast_loadvm_get_buffer,
.close = (QEMUFileCloseFunc*)fast_loadvm_fclose
.get_buffer = (QEMUFileGetBufferFunc *)fast_loadvm_get_buffer,
.close = (QEMUFileCloseFunc *)fast_loadvm_fclose
};
static const QEMUFileOps fast_savevm_ops_to_buffer = {
.writev_buffer = (QEMUFileWritevBufferFunc*)fast_savevm_writev_buffer,
.close = (QEMUFileCloseFunc*)fast_savevm_fclose_save_to_buffer
.writev_buffer = (QEMUFileWritevBufferFunc *)fast_savevm_writev_buffer,
.close = (QEMUFileCloseFunc *)fast_savevm_fclose_save_to_buffer
};
nyx_device_state_t* nyx_device_state_init_from_snapshot(const char* snapshot_folder, bool pre_snapshot){
nyx_device_state_t* self = malloc(sizeof(nyx_device_state_t));
nyx_device_state_t *nyx_device_state_init_from_snapshot(const char *snapshot_folder,
bool pre_snapshot)
{
nyx_device_state_t *self = malloc(sizeof(nyx_device_state_t));
memset(self, 0, sizeof(nyx_device_state_t));
self->state_buf = malloc(STATE_BUFFER);
self->state_buf = malloc(STATE_BUFFER);
self->state_buf_size = 0;
char* qemu_state_file;
assert(asprintf(&qemu_state_file, "%s/fast_snapshot.qemu_state", snapshot_folder) != -1);
char *qemu_state_file;
assert(asprintf(&qemu_state_file, "%s/fast_snapshot.qemu_state",
snapshot_folder) != -1);
struct fast_savevm_opaque_t fast_savevm_opaque;
FILE* f;
FILE *f;
uint8_t ret = global_state_store();
assert(!ret);
struct stat buffer;
assert(stat (qemu_state_file, &buffer) == 0);
struct stat buffer;
assert(stat(qemu_state_file, &buffer) == 0);
void* state_buf2 = malloc(STATE_BUFFER);
void *state_buf2 = malloc(STATE_BUFFER);
f = fopen(qemu_state_file, "r");
assert(fread(state_buf2, buffer.st_size, 1, f) == 1);
fclose(f);
fast_savevm_opaque.buf = state_buf2;
fast_savevm_opaque.f = NULL;
fast_savevm_opaque.f = NULL;
fast_savevm_opaque.pos = 0;
QEMUFile* file_dump = qemu_fopen_ops(&fast_savevm_opaque, &fast_loadvm_ops);
QEMUFile *file_dump = qemu_fopen_ops(&fast_savevm_opaque, &fast_loadvm_ops);
qemu_devices_reset();
qemu_loadvm_state(file_dump);
if(!pre_snapshot){
if (!pre_snapshot) {
self->qemu_state = state_reallocation_new(file_dump);
}
free(state_buf2);
if(!pre_snapshot){
if (!pre_snapshot) {
enable_fast_snapshot_mode();
save_tsc_value(self, false);
}
@ -369,36 +389,36 @@ nyx_device_state_t* nyx_device_state_init_from_snapshot(const char* snapshot_fol
* backed by RAM. state_reallocation_new() then uses this file to build an
* optimized sequence of snapshot restore operations.
*/
nyx_device_state_t* nyx_device_state_init(void){
nyx_device_state_t* self = malloc(sizeof(nyx_device_state_t));
nyx_device_state_t *nyx_device_state_init(void)
{
nyx_device_state_t *self = malloc(sizeof(nyx_device_state_t));
memset(self, 0, sizeof(nyx_device_state_t));
self->state_buf = malloc(STATE_BUFFER);
self->state_buf = malloc(STATE_BUFFER);
self->state_buf_size = 0;
Error *local_err = NULL;
Error *local_err = NULL;
struct fast_savevm_opaque_t fast_savevm_opaque, fast_loadvm_opaque;
void* tmp_buf = malloc(1024*1024*16);
void *tmp_buf = malloc(1024 * 1024 * 16);
fast_savevm_opaque.output_buffer = self->state_buf;
fast_savevm_opaque.output_buffer = self->state_buf;
fast_savevm_opaque.output_buffer_size = &self->state_buf_size;
fast_savevm_opaque.buf = tmp_buf;
fast_savevm_opaque.f = NULL;
fast_savevm_opaque.f = NULL;
fast_savevm_opaque.pos = 0;
uint8_t ret = global_state_store();
assert(!ret);
QEMUFile* f = qemu_fopen_ops(&fast_savevm_opaque, &fast_savevm_ops_to_buffer);
ret = fast_qemu_savevm_state(f, &local_err);
QEMUFile *f = qemu_fopen_ops(&fast_savevm_opaque, &fast_savevm_ops_to_buffer);
ret = fast_qemu_savevm_state(f, &local_err);
fast_loadvm_opaque.buf = tmp_buf;
fast_loadvm_opaque.f = NULL;
fast_loadvm_opaque.f = NULL;
fast_loadvm_opaque.pos = 0;
QEMUFile* file_dump = qemu_fopen_ops(&fast_loadvm_opaque, &fast_loadvm_ops);
QEMUFile *file_dump = qemu_fopen_ops(&fast_loadvm_opaque, &fast_loadvm_ops);
self->qemu_state = state_reallocation_new(file_dump);
qemu_fclose(file_dump);
@ -411,41 +431,49 @@ nyx_device_state_t* nyx_device_state_init(void){
return self;
}
void nyx_device_state_switch_incremental(nyx_device_state_t* self){
void nyx_device_state_switch_incremental(nyx_device_state_t *self)
{
self->incremental_mode = true;
fdl_fast_create_tmp(self->qemu_state);
fdl_fast_enable_tmp(self->qemu_state);
}
void nyx_device_state_disable_incremental(nyx_device_state_t* self){
void nyx_device_state_disable_incremental(nyx_device_state_t *self)
{
fdl_fast_disable_tmp(self->qemu_state);
self->incremental_mode = false;
}
void nyx_device_state_restore(nyx_device_state_t* self){
void nyx_device_state_restore(nyx_device_state_t *self)
{
fdl_fast_reload(self->qemu_state);
call_fast_change_handlers();
}
void nyx_device_state_post_restore(nyx_device_state_t* self){
void nyx_device_state_post_restore(nyx_device_state_t *self)
{
set_tsc_value(self, self->incremental_mode);
}
void nyx_device_state_save_tsc(nyx_device_state_t* self){
void nyx_device_state_save_tsc(nyx_device_state_t *self)
{
save_tsc_value(self, false);
}
void nyx_device_state_save_tsc_incremental(nyx_device_state_t* self){
void nyx_device_state_save_tsc_incremental(nyx_device_state_t *self)
{
save_tsc_value(self, true);
}
void nyx_device_state_serialize(nyx_device_state_t* self, const char* snapshot_folder){
char* tmp;
void nyx_device_state_serialize(nyx_device_state_t *self, const char *snapshot_folder)
{
char *tmp;
assert(asprintf(&tmp, "%s/fast_snapshot.qemu_state", snapshot_folder) != -1);
FILE* f_qemu_state = fopen(tmp, "w+b");
assert(fwrite(self->state_buf, 1, self->state_buf_size, f_qemu_state) == self->state_buf_size);
FILE *f_qemu_state = fopen(tmp, "w+b");
assert(fwrite(self->state_buf, 1, self->state_buf_size, f_qemu_state) ==
self->state_buf_size);
fclose(f_qemu_state);
}

View File

@ -1,33 +1,34 @@
#pragma once
#pragma once
#include <stdint.h>
#include <stdbool.h>
#include "nyx/snapshot/devices/state_reallocation.h"
#include <stdbool.h>
#include <stdint.h>
typedef struct nyx_device_state_s{
state_reallocation_t* qemu_state;
typedef struct nyx_device_state_s {
state_reallocation_t *qemu_state;
uint64_t tsc_value;
uint64_t tsc_value_incremental;
uint64_t tsc_value;
uint64_t tsc_value_incremental;
bool incremental_mode;
bool incremental_mode;
void* state_buf; /* QEMU's serialized state */
uint32_t state_buf_size;
void *state_buf; /* QEMU's serialized state */
uint32_t state_buf_size;
} nyx_device_state_t;
} nyx_device_state_t;
nyx_device_state_t* nyx_device_state_init(void);
nyx_device_state_t* nyx_device_state_init_from_snapshot(const char* snapshot_folder, bool pre_snapshot);
nyx_device_state_t *nyx_device_state_init(void);
nyx_device_state_t *nyx_device_state_init_from_snapshot(const char *snapshot_folder,
bool pre_snapshot);
void nyx_device_state_restore(nyx_device_state_t* self);
void nyx_device_state_post_restore(nyx_device_state_t* self);
void nyx_device_state_restore(nyx_device_state_t *self);
void nyx_device_state_post_restore(nyx_device_state_t *self);
void nyx_device_state_switch_incremental(nyx_device_state_t* self);
void nyx_device_state_disable_incremental(nyx_device_state_t* self);
void nyx_device_state_switch_incremental(nyx_device_state_t *self);
void nyx_device_state_disable_incremental(nyx_device_state_t *self);
void nyx_device_state_save_tsc(nyx_device_state_t* self);
void nyx_device_state_save_tsc_incremental(nyx_device_state_t* self);
void nyx_device_state_save_tsc(nyx_device_state_t *self);
void nyx_device_state_save_tsc_incremental(nyx_device_state_t *self);
void nyx_device_state_serialize(nyx_device_state_t* self, const char* snapshot_folder);
void nyx_device_state_serialize(nyx_device_state_t *self, const char *snapshot_folder);

File diff suppressed because it is too large Load Diff

View File

@ -21,8 +21,8 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
#pragma once
#include "qemu/osdep.h"
#include "monitor/monitor.h"
#include "migration/migration.h"
#include "monitor/monitor.h"
#include "nyx/khash.h"
#define IO_BUF_SIZE 32768
@ -35,68 +35,68 @@ struct QEMUFile_tmp {
int64_t bytes_xfer;
int64_t xfer_limit;
int64_t pos; // buffer start on write, end on read
int64_t pos; // buffer start on write, end on read
volatile int buf_index;
int buf_size; // 0 when writing
uint8_t buf[IO_BUF_SIZE];
int buf_size; // 0 when writing
uint8_t buf[IO_BUF_SIZE];
};
struct fast_savevm_opaque_t{
FILE* f;
uint8_t* buf;
size_t buflen;
uint64_t pos;
void* output_buffer;
uint32_t* output_buffer_size;
struct fast_savevm_opaque_t {
FILE *f;
uint8_t *buf;
size_t buflen;
uint64_t pos;
void *output_buffer;
uint32_t *output_buffer_size;
};
#define REALLOC_SIZE 0x8000
#define PRE_ALLOC_BLOCK_SIZE 0x8000000 /* 128 MB */
typedef struct state_reallocation_tmp_s{
void **copy;
uint32_t fast_state_size;
bool enabled;
typedef struct state_reallocation_tmp_s {
void **copy;
uint32_t fast_state_size;
bool enabled;
} state_reallocation_tmp_t;
typedef struct state_reallocation_s{
void **ptr;
void **copy;
typedef struct state_reallocation_s {
void **ptr;
void **copy;
size_t *size;
uint32_t fast_state_size;
uint32_t fast_state_size;
uint32_t fast_state_pos;
void **fptr;
void **opaque;
void **fptr;
void **opaque;
uint32_t *version;
uint32_t fast_state_fptr_size;
uint32_t fast_state_fptr_size;
uint32_t fast_state_fptr_pos;
void **get_fptr;
void **get_opaque;
void **get_fptr;
void **get_opaque;
size_t *get_size;
void **get_data;
void **get_data;
uint32_t fast_state_get_fptr_size;
uint32_t fast_state_get_fptr_size;
uint32_t fast_state_get_fptr_pos;
/* prevents heap fragmentation and additional 2GB mem usage */
void* pre_alloc_block;
void *pre_alloc_block;
uint32_t pre_alloc_block_offset;
state_reallocation_tmp_t tmp_snapshot;
} state_reallocation_t;
state_reallocation_t* state_reallocation_new(QEMUFile *f);
state_reallocation_t *state_reallocation_new(QEMUFile *f);
void fdl_fast_reload(state_reallocation_t* self);
void fdl_fast_reload(state_reallocation_t *self);
void fdl_fast_create_tmp(state_reallocation_t* self);
void fdl_fast_enable_tmp(state_reallocation_t* self);
void fdl_fast_disable_tmp(state_reallocation_t* self);
void fdl_fast_create_tmp(state_reallocation_t *self);
void fdl_fast_enable_tmp(state_reallocation_t *self);
void fdl_fast_disable_tmp(state_reallocation_t *self);

View File

@ -1,23 +1,24 @@
#include <assert.h>
#include "qemu/osdep.h"
#include "sysemu/sysemu.h"
#include "qemu/main-loop.h"
#include "sysemu/sysemu.h"
#include "nyx/snapshot/devices/vm_change_state_handlers.h"
#include <assert.h>
VMChangeStateHandler* change_kvm_clock_handler = NULL;
VMChangeStateHandler* change_kvm_pit_handler = NULL;
VMChangeStateHandler* change_cpu_handler = NULL;
void* change_kvm_clock_opaque = NULL;
void* change_kvm_pit_opaque = NULL;
void* change_cpu_opaque = NULL;
VMChangeStateHandler *change_kvm_clock_handler = NULL;
VMChangeStateHandler *change_kvm_pit_handler = NULL;
VMChangeStateHandler *change_cpu_handler = NULL;
void *change_kvm_clock_opaque = NULL;
void *change_kvm_pit_opaque = NULL;
void *change_cpu_opaque = NULL;
VMChangeStateHandler* change_ide_core_handler = NULL;
uint8_t change_ide_core_opaque_num = 0;
void* change_ide_core_opaque[32] = {NULL};
VMChangeStateHandler *change_ide_core_handler = NULL;
uint8_t change_ide_core_opaque_num = 0;
void *change_ide_core_opaque[32] = { NULL };
void call_fast_change_handlers(void){
void call_fast_change_handlers(void)
{
assert(change_kvm_clock_handler && change_kvm_pit_handler && change_cpu_handler);
change_kvm_clock_handler(change_kvm_clock_opaque, 1, RUN_STATE_RUNNING);
@ -25,35 +26,36 @@ void call_fast_change_handlers(void){
change_cpu_handler(change_cpu_opaque, 1, RUN_STATE_RUNNING);
return;
/* TODO: check if necessary */
if(change_ide_core_handler){
for(uint8_t i = 0; i < change_ide_core_opaque_num; i++){
if (change_ide_core_handler) {
for (uint8_t i = 0; i < change_ide_core_opaque_num; i++) {
change_ide_core_handler(change_ide_core_opaque[i], 1, RUN_STATE_RUNNING);
}
}
}
void add_fast_reload_change_handler(VMChangeStateHandler *cb, void *opaque, int id){
switch(id){
case RELOAD_HANDLER_KVM_CLOCK:
change_kvm_clock_handler = cb;
change_kvm_clock_opaque = opaque;
return;
case RELOAD_HANDLER_KVM_PIT:
change_kvm_pit_handler = cb;
change_kvm_pit_opaque = opaque;
return;
case RELOAD_HANDLER_KVM_CPU:
change_cpu_handler = cb;
change_cpu_opaque = opaque;
return;
case RELOAD_HANDLER_IDE_CORE:
change_ide_core_handler = cb;
change_ide_core_opaque[change_ide_core_opaque_num] = opaque;
change_ide_core_opaque_num++;
return;
default:
abort();
void add_fast_reload_change_handler(VMChangeStateHandler *cb, void *opaque, int id)
{
switch (id) {
case RELOAD_HANDLER_KVM_CLOCK:
change_kvm_clock_handler = cb;
change_kvm_clock_opaque = opaque;
return;
case RELOAD_HANDLER_KVM_PIT:
change_kvm_pit_handler = cb;
change_kvm_pit_opaque = opaque;
return;
case RELOAD_HANDLER_KVM_CPU:
change_cpu_handler = cb;
change_cpu_opaque = opaque;
return;
case RELOAD_HANDLER_IDE_CORE:
change_ide_core_handler = cb;
change_ide_core_opaque[change_ide_core_opaque_num] = opaque;
change_ide_core_opaque_num++;
return;
default:
abort();
}
}

View File

@ -1,13 +1,13 @@
#pragma once
#pragma once
#include <stdlib.h>
#include <stdint.h>
#include "sysemu/runstate.h"
#include <stdint.h>
#include <stdlib.h>
#define RELOAD_HANDLER_KVM_CLOCK 0
#define RELOAD_HANDLER_KVM_PIT 1
#define RELOAD_HANDLER_KVM_CPU 2
#define RELOAD_HANDLER_IDE_CORE 3
#define RELOAD_HANDLER_KVM_PIT 1
#define RELOAD_HANDLER_KVM_CPU 2
#define RELOAD_HANDLER_IDE_CORE 3
void call_fast_change_handlers(void);
void add_fast_reload_change_handler(VMChangeStateHandler *cb, void *opaque, int id);

View File

@ -1,31 +1,32 @@
#include "qemu/osdep.h"
#include "sysemu/sysemu.h"
#include "sysemu/cpus.h"
#include "qemu/main-loop.h"
#include "sysemu/cpus.h"
#include "sysemu/sysemu.h"
#include "qemu/bitmap.h"
#include "exec/ram_addr.h"
#include "qemu/rcu_queue.h"
#include "migration/migration.h"
#include "qemu/bitmap.h"
#include "qemu/rcu_queue.h"
#include "nyx/memory_access.h"
#include "nyx/snapshot/helper.h"
#include "nyx/fast_vm_reload.h"
#include "nyx/snapshot/helper.h"
//#define DEBUG_NYX_SNAPSHOT_HELPER
// #define DEBUG_NYX_SNAPSHOT_HELPER
uint64_t get_ram_size(void){
RAMBlock *block;
uint64_t guest_ram_size = 0;
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
guest_ram_size += block->used_length;
uint64_t get_ram_size(void)
{
RAMBlock *block;
uint64_t guest_ram_size = 0;
QLIST_FOREACH_RCU (block, &ram_list.blocks, next) {
guest_ram_size += block->used_length;
#ifdef DEBUG_NYX_SNAPSHOT_HELPER
printf("Block: %s (%lx)\n", block->idstr, block->used_length);
printf("Block: %s (%lx)\n", block->idstr, block->used_length);
#endif
}
}
#ifdef DEBUG_NYX_SNAPSHOT_HELPER
printf("%s - guest_ram_size: %lx\n", __func__, guest_ram_size);
printf("%s - guest_ram_size: %lx\n", __func__, guest_ram_size);
#endif
return guest_ram_size;
return guest_ram_size;
}

View File

@ -1,4 +1,4 @@
#pragma once
#pragma once
#include <stdint.h>
@ -6,8 +6,8 @@
#define PAGE_SIZE qemu_real_host_page_size
#endif
#define BITMAP_SIZE(x) ((x/PAGE_SIZE)/8)
#define DIRTY_STACK_SIZE(x) ((x/PAGE_SIZE)*sizeof(uint64_t))
#define BITMAP_SIZE(x) ((x / PAGE_SIZE) / 8)
#define DIRTY_STACK_SIZE(x) ((x / PAGE_SIZE) * sizeof(uint64_t))
uint64_t get_ram_size(void);

View File

@ -1,115 +1,139 @@
#include "qemu/osdep.h"
#include "sysemu/sysemu.h"
#include "qemu/main-loop.h"
#include "sysemu/sysemu.h"
#include "exec/ram_addr.h"
#include "qemu/rcu_queue.h"
#include "migration/migration.h"
#include "qemu/rcu_queue.h"
#include "nyx/memory_access.h"
#include "nyx/snapshot/memory/backend/nyx_debug.h"
#include "nyx/fast_vm_reload.h"
#include "nyx/snapshot/memory/backend/nyx_debug.h"
/* init operation */
void nyx_snapshot_debug_pre_init(void){
/* TODO */
void nyx_snapshot_debug_pre_init(void)
{
/* TODO */
}
/* init operation */
void nyx_snapshot_debug_init(fast_reload_t* self){
/* TODO */
void nyx_snapshot_debug_init(fast_reload_t *self)
{
/* TODO */
}
/* enable operation */
void nyx_snapshot_debug_enable(fast_reload_t* self){
/* TODO */
void nyx_snapshot_debug_enable(fast_reload_t *self)
{
/* TODO */
}
/* restore operation */
uint32_t nyx_snapshot_debug_restore(shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, bool verbose){
uint32_t num_dirty_pages = 0;
uint32_t nyx_snapshot_debug_restore(shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist,
bool verbose)
{
uint32_t num_dirty_pages = 0;
void* current_region = NULL;
int counter = 0;
for(uint8_t i = 0; i < shadow_memory_state->ram_regions_num; i++){
if(shadow_memory_state->incremental_enabled){
current_region = shadow_memory_state->ram_regions[i].incremental_region_ptr;
}
else{
current_region = shadow_memory_state->ram_regions[i].snapshot_region_ptr;
}
for(uint64_t addr = 0; addr < shadow_memory_state->ram_regions[i].size; addr+=0x1000){
void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + addr;
void* snapshot_addr = current_region + addr;
uint64_t physical_addr = addr + shadow_memory_state->ram_regions[i].base;
/* check first if the page is dirty (this is super slow, but quite useful for debugging) */
if(memcmp(host_addr, snapshot_addr, TARGET_PAGE_SIZE)){
/* check if page is not on the block list */
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == false){
//fprintf(stderr, "(2) DIRTY: 0x%lx (NUM: %d - OFFSET: 0x%lx)\n", physical_addr, i, addr);
if(verbose){
printf("%s -> (phys: 0x%lx) %p <-- %p [%d]\n", __func__, physical_addr, host_addr, snapshot_addr, shadow_memory_state->incremental_enabled);
counter++;
}
memcpy(host_addr, snapshot_addr, TARGET_PAGE_SIZE);
num_dirty_pages++;
void *current_region = NULL;
int counter = 0;
for (uint8_t i = 0; i < shadow_memory_state->ram_regions_num; i++) {
if (shadow_memory_state->incremental_enabled) {
current_region =
shadow_memory_state->ram_regions[i].incremental_region_ptr;
} else {
current_region = shadow_memory_state->ram_regions[i].snapshot_region_ptr;
}
for (uint64_t addr = 0; addr < shadow_memory_state->ram_regions[i].size;
addr += 0x1000)
{
void *host_addr =
shadow_memory_state->ram_regions[i].host_region_ptr + addr;
void *snapshot_addr = current_region + addr;
uint64_t physical_addr = addr + shadow_memory_state->ram_regions[i].base;
/* check first if the page is dirty (this is super slow, but quite useful for debugging) */
if (memcmp(host_addr, snapshot_addr, TARGET_PAGE_SIZE)) {
/* check if page is not on the block list */
if (snapshot_page_blocklist_check_phys_addr(blocklist,
physical_addr) == false)
{
// fprintf(stderr, "(2) DIRTY: 0x%lx (NUM: %d - OFFSET: 0x%lx)\n", physical_addr, i, addr);
if (verbose) {
printf("%s -> (phys: 0x%lx) %p <-- %p [%d]\n", __func__,
physical_addr, host_addr, snapshot_addr,
shadow_memory_state->incremental_enabled);
counter++;
}
memcpy(host_addr, snapshot_addr, TARGET_PAGE_SIZE);
num_dirty_pages++;
}
}
}
}
}
}
if(verbose){
printf("TOTAL: %d\n", counter);
}
return num_dirty_pages;
if (verbose) {
printf("TOTAL: %d\n", counter);
}
return num_dirty_pages;
}
void nyx_snapshot_debug_save_root_pages(shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, bool verbose){
void* current_region = NULL;
void nyx_snapshot_debug_save_root_pages(shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist,
bool verbose)
{
void *current_region = NULL;
for(uint8_t i = 0; i < shadow_memory_state->ram_regions_num; i++){
if(shadow_memory_state->incremental_enabled){
current_region = shadow_memory_state->ram_regions[i].incremental_region_ptr;
}
else{
current_region = shadow_memory_state->ram_regions[i].snapshot_region_ptr;
}
for(uint64_t addr = 0; addr < shadow_memory_state->ram_regions[i].size; addr+=0x1000){
void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + addr;
void* snapshot_addr = current_region + addr;
uint64_t physical_addr = addr + shadow_memory_state->ram_regions[i].base;
void* incremental_addr = shadow_memory_state->ram_regions[i].incremental_region_ptr + addr;
/* check first if the page is dirty (this is super slow, but quite useful for debugging) */
if(memcmp(host_addr, snapshot_addr, TARGET_PAGE_SIZE)){
/* check if page is not on the block list */
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == false){
//fprintf(stderr, "(2) DIRTY: 0x%lx (NUM: %d - OFFSET: 0x%lx)\n", physical_addr, i, addr);
if(verbose && !shadow_memory_is_root_page_tracked(shadow_memory_state, addr, i)){
printf("%s -> %p <-- %p [%d]\n", __func__, host_addr, snapshot_addr, shadow_memory_state->incremental_enabled);
}
shadow_memory_track_dirty_root_pages(shadow_memory_state, addr, i);
memcpy(incremental_addr, host_addr, TARGET_PAGE_SIZE);
for (uint8_t i = 0; i < shadow_memory_state->ram_regions_num; i++) {
if (shadow_memory_state->incremental_enabled) {
current_region =
shadow_memory_state->ram_regions[i].incremental_region_ptr;
} else {
current_region = shadow_memory_state->ram_regions[i].snapshot_region_ptr;
}
for (uint64_t addr = 0; addr < shadow_memory_state->ram_regions[i].size;
addr += 0x1000)
{
void *host_addr =
shadow_memory_state->ram_regions[i].host_region_ptr + addr;
void *snapshot_addr = current_region + addr;
uint64_t physical_addr = addr + shadow_memory_state->ram_regions[i].base;
void *incremental_addr =
shadow_memory_state->ram_regions[i].incremental_region_ptr + addr;
/* check first if the page is dirty (this is super slow, but quite useful for debugging) */
if (memcmp(host_addr, snapshot_addr, TARGET_PAGE_SIZE)) {
/* check if page is not on the block list */
if (snapshot_page_blocklist_check_phys_addr(blocklist,
physical_addr) == false)
{
// fprintf(stderr, "(2) DIRTY: 0x%lx (NUM: %d - OFFSET: 0x%lx)\n", physical_addr, i, addr);
if (verbose &&
!shadow_memory_is_root_page_tracked(shadow_memory_state,
addr, i))
{
printf("%s -> %p <-- %p [%d]\n", __func__, host_addr,
snapshot_addr,
shadow_memory_state->incremental_enabled);
}
shadow_memory_track_dirty_root_pages(shadow_memory_state, addr, i);
memcpy(incremental_addr, host_addr, TARGET_PAGE_SIZE);
}
}
}
}
}
}
}
/* set operation */
void nyx_snapshot_debug_set(fast_reload_t* self){
/* TODO */
void nyx_snapshot_debug_set(fast_reload_t *self)
{
/* TODO */
}

View File

@ -1,11 +1,15 @@
#pragma once
#pragma once
#include <stdint.h>
#include "nyx/fast_vm_reload.h"
#include <stdint.h>
void nyx_snapshot_debug_pre_init(void);
void nyx_snapshot_debug_init(fast_reload_t* self);
void nyx_snapshot_debug_enable(fast_reload_t* self);
uint32_t nyx_snapshot_debug_restore(shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, bool verbose);
void nyx_snapshot_debug_set(fast_reload_t* self);
void nyx_snapshot_debug_save_root_pages(shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, bool verbose);
void nyx_snapshot_debug_pre_init(void);
void nyx_snapshot_debug_init(fast_reload_t *self);
void nyx_snapshot_debug_enable(fast_reload_t *self);
uint32_t nyx_snapshot_debug_restore(shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist,
bool verbose);
void nyx_snapshot_debug_set(fast_reload_t *self);
void nyx_snapshot_debug_save_root_pages(shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist,
bool verbose);

View File

@ -2,8 +2,8 @@
#include "qemu/osdep.h"
#include <sys/ioctl.h>
#include "nyx/snapshot/memory/backend/nyx_dirty_ring.h"
#include "nyx/snapshot/helper.h"
#include "nyx/snapshot/memory/backend/nyx_dirty_ring.h"
#include "sysemu/kvm.h"
#include "sysemu/kvm_int.h"
@ -13,329 +13,399 @@
#define FAST_IN_RANGE(address, start, end) (address < end && address >= start)
/* dirty ring specific defines */
#define KVM_DIRTY_LOG_PAGE_OFFSET 64
#define KVM_EXIT_DIRTY_RING_FULL 31
#define KVM_RESET_DIRTY_RINGS _IO(KVMIO, 0xc7)
#define KVM_CAP_DIRTY_LOG_RING 192
#define KVM_DIRTY_LOG_PAGE_OFFSET 64
#define KVM_EXIT_DIRTY_RING_FULL 31
#define KVM_RESET_DIRTY_RINGS _IO(KVMIO, 0xc7)
#define KVM_CAP_DIRTY_LOG_RING 192
/* global vars */
int dirty_ring_size = 0;
int dirty_ring_max_size_global = 0;
struct kvm_dirty_gfn *kvm_dirty_gfns = NULL; /* dirty ring mmap ptr */
uint32_t kvm_dirty_gfns_index = 0;
uint32_t kvm_dirty_gfns_index_mask = 0;
int dirty_ring_size = 0;
int dirty_ring_max_size_global = 0;
struct kvm_dirty_gfn *kvm_dirty_gfns = NULL; /* dirty ring mmap ptr */
uint32_t kvm_dirty_gfns_index = 0;
uint32_t kvm_dirty_gfns_index_mask = 0;
static int vm_enable_dirty_ring(int vm_fd, uint32_t ring_size){
struct kvm_enable_cap cap = { 0 };
static int vm_enable_dirty_ring(int vm_fd, uint32_t ring_size)
{
struct kvm_enable_cap cap = { 0 };
cap.cap = KVM_CAP_DIRTY_LOG_RING;
cap.args[0] = ring_size;
cap.cap = KVM_CAP_DIRTY_LOG_RING;
cap.args[0] = ring_size;
int ret = ioctl(vm_fd, KVM_ENABLE_CAP, &cap);
if(ret != 0){
printf("[QEMU-Nyx] Error: KVM_ENABLE_CAP ioctl failed\n");
}
return ring_size;
}
static int check_dirty_ring_size(int kvm_fd, int vm_fd){
int ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, KVM_CAP_DIRTY_LOG_RING);
if(ret < 0 ){
printf("[QEMU-Nyx] Error: KVM_CAP_DIRTY_LOG_RING failed (dirty ring not supported?)\n");
exit(1);
}
printf("[QEMU-Nyx] Max Dirty Ring Size -> %d (Entries: %d)\n", ret, ret/(int)sizeof(struct kvm_dirty_gfn));
uint64_t dirty_ring_max_size = ret; //kvm_dirty_ring_size * sizeof(struct kvm_dirty_gfn);
/* DIRTY RING -> 1MB in size results in 256M trackable memory */
ret = vm_enable_dirty_ring(vm_fd, dirty_ring_max_size);
if(ret < 0 ){
printf("[QEMU-Nyx] Error: Enabling dirty ring (size: %ld) failed\n", dirty_ring_max_size);
exit(1);
}
dirty_ring_max_size_global = dirty_ring_max_size;
return ret;
}
static void allocate_dirty_ring(int kvm_vcpu, int vm_fd){
assert(dirty_ring_size);
if (dirty_ring_size) {
kvm_dirty_gfns = mmap(NULL, dirty_ring_size, PROT_READ | PROT_WRITE, MAP_SHARED, kvm_vcpu, PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET);
if (kvm_dirty_gfns == MAP_FAILED) {
printf("[QEMU-Nyx] Error: Dirty ring mmap failed!\n");
exit(1);
int ret = ioctl(vm_fd, KVM_ENABLE_CAP, &cap);
if (ret != 0) {
printf("[QEMU-Nyx] Error: KVM_ENABLE_CAP ioctl failed\n");
}
}
printf("[QEMU-Nyx] Dirty ring mmap region located at %p\n", kvm_dirty_gfns);
int ret = ioctl(vm_fd, KVM_RESET_DIRTY_RINGS, 0);
assert(ret == 0);
return ring_size;
}
static int check_dirty_ring_size(int kvm_fd, int vm_fd)
{
int ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, KVM_CAP_DIRTY_LOG_RING);
if (ret < 0) {
printf("[QEMU-Nyx] Error: KVM_CAP_DIRTY_LOG_RING failed (dirty ring not "
"supported?)\n");
exit(1);
}
printf("[QEMU-Nyx] Max Dirty Ring Size -> %d (Entries: %d)\n", ret,
ret / (int)sizeof(struct kvm_dirty_gfn));
uint64_t dirty_ring_max_size =
ret; // kvm_dirty_ring_size * sizeof(struct kvm_dirty_gfn);
/* DIRTY RING -> 1MB in size results in 256M trackable memory */
ret = vm_enable_dirty_ring(vm_fd, dirty_ring_max_size);
if (ret < 0) {
printf("[QEMU-Nyx] Error: Enabling dirty ring (size: %ld) failed\n",
dirty_ring_max_size);
exit(1);
}
dirty_ring_max_size_global = dirty_ring_max_size;
return ret;
}
static void allocate_dirty_ring(int kvm_vcpu, int vm_fd)
{
assert(dirty_ring_size);
if (dirty_ring_size) {
kvm_dirty_gfns = mmap(NULL, dirty_ring_size, PROT_READ | PROT_WRITE,
MAP_SHARED, kvm_vcpu,
PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET);
if (kvm_dirty_gfns == MAP_FAILED) {
printf("[QEMU-Nyx] Error: Dirty ring mmap failed!\n");
exit(1);
}
}
printf("[QEMU-Nyx] Dirty ring mmap region located at %p\n", kvm_dirty_gfns);
int ret = ioctl(vm_fd, KVM_RESET_DIRTY_RINGS, 0);
assert(ret == 0);
}
/* pre_init operation */
void nyx_dirty_ring_early_init(int kvm_fd, int vm_fd){
dirty_ring_size = check_dirty_ring_size(kvm_fd, vm_fd);
void nyx_dirty_ring_early_init(int kvm_fd, int vm_fd)
{
dirty_ring_size = check_dirty_ring_size(kvm_fd, vm_fd);
}
void nyx_dirty_ring_pre_init(int kvm_fd, int vm_fd){
allocate_dirty_ring(kvm_fd, vm_fd);
kvm_dirty_gfns_index = 0;
kvm_dirty_gfns_index_mask = ((dirty_ring_max_size_global/sizeof(struct kvm_dirty_gfn)) - 1);
void nyx_dirty_ring_pre_init(int kvm_fd, int vm_fd)
{
allocate_dirty_ring(kvm_fd, vm_fd);
kvm_dirty_gfns_index = 0;
kvm_dirty_gfns_index_mask =
((dirty_ring_max_size_global / sizeof(struct kvm_dirty_gfn)) - 1);
}
static inline void dirty_ring_collect(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, uint64_t slot, uint64_t gfn){
static inline void dirty_ring_collect(nyx_dirty_ring_t *self,
shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist,
uint64_t slot,
uint64_t gfn)
{
/* sanity check */
assert((slot & 0xFFFF0000) == 0);
/* sanity check */
assert((slot&0xFFFF0000) == 0);
slot_t *kvm_region_slot = &self->kvm_region_slots[slot & 0xFFFF];
slot_t* kvm_region_slot = &self->kvm_region_slots[slot&0xFFFF];
if(test_and_set_bit(gfn, (void*)kvm_region_slot->bitmap) == false){
kvm_region_slot->stack[kvm_region_slot->stack_ptr] = gfn;
kvm_region_slot->stack_ptr++;
}
if (test_and_set_bit(gfn, (void *)kvm_region_slot->bitmap) == false) {
kvm_region_slot->stack[kvm_region_slot->stack_ptr] = gfn;
kvm_region_slot->stack_ptr++;
}
}
static void dirty_ring_flush_and_collect(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, int vm_fd){
struct kvm_dirty_gfn *entry = NULL;
int cleared = 0;
static void dirty_ring_flush_and_collect(nyx_dirty_ring_t *self,
shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist,
int vm_fd)
{
struct kvm_dirty_gfn *entry = NULL;
int cleared = 0;
while(true){
while (true) {
entry = &kvm_dirty_gfns[kvm_dirty_gfns_index & kvm_dirty_gfns_index_mask];
entry = &kvm_dirty_gfns[kvm_dirty_gfns_index & kvm_dirty_gfns_index_mask];
if ((entry->flags & 0x3) == 0) {
break;
}
if((entry->flags & 0x3) == 0){
break;
}
if ((entry->flags & 0x1) == 1) {
dirty_ring_collect(self, shadow_memory_state, blocklist, entry->slot,
entry->offset);
cleared++;
entry->flags |= 0x2; // reset dirty entry
} else {
printf("[QEMU-Nyx] [%p] kvm_dirty_gfn -> flags: %d slot: %d offset: %lx "
"{ERROR}\n",
entry, entry->flags, entry->slot, entry->offset);
fflush(stdout);
exit(1);
}
if((entry->flags & 0x1) == 1){
dirty_ring_collect(self, shadow_memory_state, blocklist, entry->slot, entry->offset);
cleared++;
entry->flags |= 0x2; // reset dirty entry
}
else{
printf("[QEMU-Nyx] [%p] kvm_dirty_gfn -> flags: %d slot: %d offset: %lx {ERROR}\n", entry, entry->flags, entry->slot, entry->offset);
fflush(stdout);
exit(1);
}
kvm_dirty_gfns_index++;
}
kvm_dirty_gfns_index++;
}
int ret = ioctl(vm_fd, KVM_RESET_DIRTY_RINGS, 0);
assert(ret == cleared);
int ret = ioctl(vm_fd, KVM_RESET_DIRTY_RINGS, 0);
assert(ret == cleared);
}
static void dirty_ring_flush(int vm_fd){
struct kvm_dirty_gfn *entry = NULL;
int cleared = 0;
static void dirty_ring_flush(int vm_fd)
{
struct kvm_dirty_gfn *entry = NULL;
int cleared = 0;
while(true){
while (true) {
entry = &kvm_dirty_gfns[kvm_dirty_gfns_index & kvm_dirty_gfns_index_mask];
entry = &kvm_dirty_gfns[kvm_dirty_gfns_index & kvm_dirty_gfns_index_mask];
if ((entry->flags & 0x3) == 0) {
break;
}
if((entry->flags & 0x3) == 0){
break;
}
if ((entry->flags & 0x1) == 1) {
cleared++;
entry->flags |= 0x2; // reset dirty entry
} else {
printf("[QEMU-Nyx] [%p] kvm_dirty_gfn -> flags: %d slot: %d offset: %lx "
"{ERROR}\n",
entry, entry->flags, entry->slot, entry->offset);
fflush(stdout);
exit(1);
}
if((entry->flags & 0x1) == 1){
cleared++;
entry->flags |= 0x2; // reset dirty entry
}
else{
printf("[QEMU-Nyx] [%p] kvm_dirty_gfn -> flags: %d slot: %d offset: %lx {ERROR}\n", entry, entry->flags, entry->slot, entry->offset);
fflush(stdout);
exit(1);
}
kvm_dirty_gfns_index++;
}
kvm_dirty_gfns_index++;
}
int ret = ioctl(vm_fd, KVM_RESET_DIRTY_RINGS, 0);
assert(ret == cleared);
int ret = ioctl(vm_fd, KVM_RESET_DIRTY_RINGS, 0);
assert(ret == cleared);
}
/* init operation */
nyx_dirty_ring_t* nyx_dirty_ring_init(shadow_memory_t* shadow_memory){
nyx_dirty_ring_t* self = malloc(sizeof(nyx_dirty_ring_t));
memset(self, 0, sizeof(nyx_dirty_ring_t));
nyx_dirty_ring_t *nyx_dirty_ring_init(shadow_memory_t *shadow_memory)
{
nyx_dirty_ring_t *self = malloc(sizeof(nyx_dirty_ring_t));
memset(self, 0, sizeof(nyx_dirty_ring_t));
assert(kvm_state);
assert(kvm_state);
KVMMemoryListener *kml = kvm_get_kml(0);
KVMSlot *mem;
KVMMemoryListener *kml = kvm_get_kml(0);
KVMSlot *mem;
for (int i = 0; i < kvm_get_max_memslots(); i++) {
mem = &kml->slots[i];
for (int i = 0; i < kvm_get_max_memslots(); i++) {
mem = &kml->slots[i];
if(mem->start_addr == 0 && mem->memory_size == 0){
break;
}
if (mem->start_addr == 0 && mem->memory_size == 0) {
break;
}
self->kvm_region_slots_num++;
}
self->kvm_region_slots_num++;
}
self->kvm_region_slots = malloc(sizeof(slot_t) * self->kvm_region_slots_num);
memset(self->kvm_region_slots, 0, sizeof(slot_t) * self->kvm_region_slots_num);
self->kvm_region_slots = malloc(sizeof(slot_t) * self->kvm_region_slots_num);
memset(self->kvm_region_slots, 0, sizeof(slot_t) * self->kvm_region_slots_num);
for (int i = 0; i < kvm_get_max_memslots(); i++) {
mem = &kml->slots[i];
for (int i = 0; i < kvm_get_max_memslots(); i++) {
mem = &kml->slots[i];
if(mem->start_addr == 0 && mem->memory_size == 0){
break;
}
if (mem->start_addr == 0 && mem->memory_size == 0) {
break;
}
self->kvm_region_slots[i].enabled = (mem->flags&KVM_MEM_READONLY) == 0;
self->kvm_region_slots[i].bitmap = malloc(BITMAP_SIZE(mem->memory_size));
self->kvm_region_slots[i].stack = malloc(DIRTY_STACK_SIZE(mem->memory_size));
self->kvm_region_slots[i].enabled = (mem->flags & KVM_MEM_READONLY) == 0;
self->kvm_region_slots[i].bitmap = malloc(BITMAP_SIZE(mem->memory_size));
self->kvm_region_slots[i].stack = malloc(DIRTY_STACK_SIZE(mem->memory_size));
memset(self->kvm_region_slots[i].bitmap, 0, BITMAP_SIZE(mem->memory_size));
memset(self->kvm_region_slots[i].stack, 0, DIRTY_STACK_SIZE(mem->memory_size));
memset(self->kvm_region_slots[i].bitmap, 0, BITMAP_SIZE(mem->memory_size));
memset(self->kvm_region_slots[i].stack, 0, DIRTY_STACK_SIZE(mem->memory_size));
self->kvm_region_slots[i].bitmap_size = BITMAP_SIZE(mem->memory_size);
self->kvm_region_slots[i].bitmap_size = BITMAP_SIZE(mem->memory_size);
self->kvm_region_slots[i].stack_ptr = 0;
self->kvm_region_slots[i].stack_ptr = 0;
if(self->kvm_region_slots[i].enabled){
bool ram_region_found = false;
for(int j = 0; j < shadow_memory->ram_regions_num; j++){
if (self->kvm_region_slots[i].enabled) {
bool ram_region_found = false;
for (int j = 0; j < shadow_memory->ram_regions_num; j++) {
if (FAST_IN_RANGE(mem->start_addr, shadow_memory->ram_regions[j].base,
(shadow_memory->ram_regions[j].base +
shadow_memory->ram_regions[j].size)))
{
assert(FAST_IN_RANGE((mem->start_addr + mem->memory_size - 1),
shadow_memory->ram_regions[j].base,
(shadow_memory->ram_regions[j].base +
shadow_memory->ram_regions[j].size)));
if(FAST_IN_RANGE(mem->start_addr, shadow_memory->ram_regions[j].base, (shadow_memory->ram_regions[j].base+shadow_memory->ram_regions[j].size))){
assert(FAST_IN_RANGE((mem->start_addr+mem->memory_size-1), shadow_memory->ram_regions[j].base, (shadow_memory->ram_regions[j].base+shadow_memory->ram_regions[j].size)));
self->kvm_region_slots[i].region_id = j;
self->kvm_region_slots[i].region_offset = mem->start_addr - shadow_memory->ram_regions[j].base;
ram_region_found = true;
break;
}
}
assert(ram_region_found);
}
}
self->kvm_region_slots[i].region_id = j;
self->kvm_region_slots[i].region_offset =
mem->start_addr - shadow_memory->ram_regions[j].base;
ram_region_found = true;
break;
}
}
assert(ram_region_found);
}
}
#ifdef DEBUG__PRINT_DIRTY_RING
for(int i = 0; i < self->kvm_region_slots_num; i++){
printf("[%d].enabled = %d\n", i, self->kvm_region_slots[i].enabled);
printf("[%d].bitmap = %p\n", i, self->kvm_region_slots[i].bitmap);
printf("[%d].stack = %p\n", i, self->kvm_region_slots[i].stack);
printf("[%d].stack_ptr = %ld\n", i, self->kvm_region_slots[i].stack_ptr);
if(self->kvm_region_slots[i].enabled){
printf("[%d].region_id = %d\n", i, self->kvm_region_slots[i].region_id);
printf("[%d].region_offset = 0x%lx\n", i, self->kvm_region_slots[i].region_offset);
}
else{
printf("[%d].region_id = -\n", i);
printf("[%d].region_offset = -\n", i);
}
}
for (int i = 0; i < self->kvm_region_slots_num; i++) {
printf("[%d].enabled = %d\n", i, self->kvm_region_slots[i].enabled);
printf("[%d].bitmap = %p\n", i, self->kvm_region_slots[i].bitmap);
printf("[%d].stack = %p\n", i, self->kvm_region_slots[i].stack);
printf("[%d].stack_ptr = %ld\n", i, self->kvm_region_slots[i].stack_ptr);
if (self->kvm_region_slots[i].enabled) {
printf("[%d].region_id = %d\n", i,
self->kvm_region_slots[i].region_id);
printf("[%d].region_offset = 0x%lx\n", i,
self->kvm_region_slots[i].region_offset);
} else {
printf("[%d].region_id = -\n", i);
printf("[%d].region_offset = -\n", i);
}
}
#endif
dirty_ring_flush(kvm_get_vm_fd(kvm_state));
return self;
dirty_ring_flush(kvm_get_vm_fd(kvm_state));
return self;
}
static uint32_t restore_memory(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
uint32_t num_dirty_pages = 0;
void* host_addr = NULL;
void* snapshot_addr = NULL;
uint64_t physical_addr = 0;
uint64_t gfn = 0;
uint64_t entry_offset_addr = 0;
static uint32_t restore_memory(nyx_dirty_ring_t *self,
shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist)
{
uint32_t num_dirty_pages = 0;
void *host_addr = NULL;
void *snapshot_addr = NULL;
uint64_t physical_addr = 0;
uint64_t gfn = 0;
uint64_t entry_offset_addr = 0;
for(uint8_t j = 0; j < self->kvm_region_slots_num; j++){
slot_t* kvm_region_slot = &self->kvm_region_slots[j];
if(kvm_region_slot->enabled && kvm_region_slot->stack_ptr){
for(uint64_t i = 0; i < kvm_region_slot->stack_ptr; i++){
gfn = kvm_region_slot->stack[i];
for (uint8_t j = 0; j < self->kvm_region_slots_num; j++) {
slot_t *kvm_region_slot = &self->kvm_region_slots[j];
if (kvm_region_slot->enabled && kvm_region_slot->stack_ptr) {
for (uint64_t i = 0; i < kvm_region_slot->stack_ptr; i++) {
gfn = kvm_region_slot->stack[i];
entry_offset_addr = kvm_region_slot->region_offset + (gfn<<12);
entry_offset_addr = kvm_region_slot->region_offset + (gfn << 12);
physical_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].base + entry_offset_addr;
physical_addr =
shadow_memory_state->ram_regions[kvm_region_slot->region_id].base +
entry_offset_addr;
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){
continue;
}
if (snapshot_page_blocklist_check_phys_addr(blocklist,
physical_addr) == true)
{
continue;
}
host_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].host_region_ptr + entry_offset_addr;
if(shadow_memory_state->incremental_enabled){
snapshot_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].incremental_region_ptr + entry_offset_addr;
}
else{
snapshot_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].snapshot_region_ptr + entry_offset_addr;
}
host_addr =
shadow_memory_state->ram_regions[kvm_region_slot->region_id]
.host_region_ptr +
entry_offset_addr;
memcpy(host_addr, snapshot_addr, TARGET_PAGE_SIZE);
if (shadow_memory_state->incremental_enabled) {
snapshot_addr =
shadow_memory_state->ram_regions[kvm_region_slot->region_id]
.incremental_region_ptr +
entry_offset_addr;
} else {
snapshot_addr =
shadow_memory_state->ram_regions[kvm_region_slot->region_id]
.snapshot_region_ptr +
entry_offset_addr;
}
clear_bit(gfn, (void*)kvm_region_slot->bitmap);
num_dirty_pages++;
}
kvm_region_slot->stack_ptr = 0;
}
}
return num_dirty_pages;
memcpy(host_addr, snapshot_addr, TARGET_PAGE_SIZE);
clear_bit(gfn, (void *)kvm_region_slot->bitmap);
num_dirty_pages++;
}
kvm_region_slot->stack_ptr = 0;
}
}
return num_dirty_pages;
}
static void save_root_pages(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
void* host_addr = NULL;
void* incremental_addr = NULL;
uint64_t physical_addr = 0;
uint64_t gfn = 0;
uint64_t entry_offset_addr = 0;
static void save_root_pages(nyx_dirty_ring_t *self,
shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist)
{
void *host_addr = NULL;
void *incremental_addr = NULL;
uint64_t physical_addr = 0;
uint64_t gfn = 0;
uint64_t entry_offset_addr = 0;
for(uint8_t j = 0; j < self->kvm_region_slots_num; j++){
slot_t* kvm_region_slot = &self->kvm_region_slots[j];
if(kvm_region_slot->enabled && kvm_region_slot->stack_ptr){
for(uint64_t i = 0; i < kvm_region_slot->stack_ptr; i++){
gfn = kvm_region_slot->stack[i];
for (uint8_t j = 0; j < self->kvm_region_slots_num; j++) {
slot_t *kvm_region_slot = &self->kvm_region_slots[j];
if (kvm_region_slot->enabled && kvm_region_slot->stack_ptr) {
for (uint64_t i = 0; i < kvm_region_slot->stack_ptr; i++) {
gfn = kvm_region_slot->stack[i];
entry_offset_addr = kvm_region_slot->region_offset + (gfn<<12);
entry_offset_addr = kvm_region_slot->region_offset + (gfn << 12);
physical_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].base + entry_offset_addr;
physical_addr =
shadow_memory_state->ram_regions[kvm_region_slot->region_id].base +
entry_offset_addr;
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){
continue;
}
if (snapshot_page_blocklist_check_phys_addr(blocklist,
physical_addr) == true)
{
continue;
}
host_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].host_region_ptr + entry_offset_addr;
incremental_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].incremental_region_ptr + entry_offset_addr;
host_addr =
shadow_memory_state->ram_regions[kvm_region_slot->region_id]
.host_region_ptr +
entry_offset_addr;
incremental_addr =
shadow_memory_state->ram_regions[kvm_region_slot->region_id]
.incremental_region_ptr +
entry_offset_addr;
shadow_memory_track_dirty_root_pages(shadow_memory_state, entry_offset_addr, kvm_region_slot->region_id);
memcpy(incremental_addr, host_addr, TARGET_PAGE_SIZE);
shadow_memory_track_dirty_root_pages(shadow_memory_state,
entry_offset_addr,
kvm_region_slot->region_id);
memcpy(incremental_addr, host_addr, TARGET_PAGE_SIZE);
clear_bit(gfn, (void*)kvm_region_slot->bitmap);
}
kvm_region_slot->stack_ptr = 0;
}
}
clear_bit(gfn, (void *)kvm_region_slot->bitmap);
}
kvm_region_slot->stack_ptr = 0;
}
}
}
uint32_t nyx_snapshot_nyx_dirty_ring_restore(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
dirty_ring_flush_and_collect(self, shadow_memory_state, blocklist, kvm_get_vm_fd(kvm_state));
return restore_memory(self, shadow_memory_state, blocklist);
uint32_t nyx_snapshot_nyx_dirty_ring_restore(nyx_dirty_ring_t *self,
shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist)
{
dirty_ring_flush_and_collect(self, shadow_memory_state, blocklist,
kvm_get_vm_fd(kvm_state));
return restore_memory(self, shadow_memory_state, blocklist);
}
void nyx_snapshot_nyx_dirty_ring_save_root_pages(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
dirty_ring_flush_and_collect(self, shadow_memory_state, blocklist, kvm_get_vm_fd(kvm_state));
save_root_pages(self, shadow_memory_state, blocklist);
void nyx_snapshot_nyx_dirty_ring_save_root_pages(nyx_dirty_ring_t *self,
shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist)
{
dirty_ring_flush_and_collect(self, shadow_memory_state, blocklist,
kvm_get_vm_fd(kvm_state));
save_root_pages(self, shadow_memory_state, blocklist);
}
void nyx_snapshot_nyx_dirty_ring_flush(void){
dirty_ring_flush(kvm_get_vm_fd(kvm_state));
void nyx_snapshot_nyx_dirty_ring_flush(void)
{
dirty_ring_flush(kvm_get_vm_fd(kvm_state));
}
void nyx_snapshot_nyx_dirty_ring_flush_and_collect(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
dirty_ring_flush_and_collect(self, shadow_memory_state, blocklist, kvm_get_vm_fd(kvm_state));
void nyx_snapshot_nyx_dirty_ring_flush_and_collect(nyx_dirty_ring_t *self,
shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist)
{
dirty_ring_flush_and_collect(self, shadow_memory_state, blocklist,
kvm_get_vm_fd(kvm_state));
}

View File

@ -1,30 +1,30 @@
#pragma once
#pragma once
#include <stdint.h>
#include "nyx/snapshot/memory/block_list.h"
#include "nyx/snapshot/memory/shadow_memory.h"
#include <stdint.h>
struct kvm_dirty_gfn {
uint32_t flags;
uint32_t slot;
uint64_t offset;
uint32_t flags;
uint32_t slot;
uint64_t offset;
};
typedef struct slot_s{
bool enabled; /* set if slot is not marked as read-only */
typedef struct slot_s {
bool enabled; /* set if slot is not marked as read-only */
uint8_t region_id; /* shadow_memory region id */
uint64_t region_offset; /* shadow_memory region offset*/
uint8_t region_id; /* shadow_memory region id */
uint64_t region_offset; /* shadow_memory region offset*/
void* bitmap;
void *bitmap;
uint64_t bitmap_size; // remove me later
uint64_t* stack;
uint64_t stack_ptr;
uint64_t bitmap_size; // remove me later
uint64_t *stack;
uint64_t stack_ptr;
} slot_t;
typedef struct nyx_dirty_ring_s{
slot_t* kvm_region_slots;
typedef struct nyx_dirty_ring_s {
slot_t *kvm_region_slots;
uint8_t kvm_region_slots_num;
} nyx_dirty_ring_t;
@ -35,12 +35,18 @@ void nyx_dirty_ring_early_init(int kvm_fd, int vm_fd);
/* must be called right after KVM_CREATE_VCPU */
void nyx_dirty_ring_pre_init(int kvm_fd, int vm_fd);
nyx_dirty_ring_t* nyx_dirty_ring_init(shadow_memory_t* shadow_memory);
nyx_dirty_ring_t *nyx_dirty_ring_init(shadow_memory_t *shadow_memory);
uint32_t nyx_snapshot_nyx_dirty_ring_restore(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist);
uint32_t nyx_snapshot_nyx_dirty_ring_restore(nyx_dirty_ring_t *self,
shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist);
void nyx_snapshot_nyx_dirty_ring_save_root_pages(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist);
void nyx_snapshot_nyx_dirty_ring_save_root_pages(nyx_dirty_ring_t *self,
shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist);
void nyx_snapshot_nyx_dirty_ring_flush(void);
void nyx_snapshot_nyx_dirty_ring_flush_and_collect(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist);
void nyx_snapshot_nyx_dirty_ring_flush_and_collect(nyx_dirty_ring_t *self,
shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist);

View File

@ -1,10 +1,10 @@
#include "qemu/osdep.h"
#include "sysemu/sysemu.h"
#include "qemu/main-loop.h"
#include "sysemu/sysemu.h"
#include "exec/ram_addr.h"
#include "qemu/rcu_queue.h"
#include "migration/migration.h"
#include "qemu/rcu_queue.h"
#include "nyx/memory_access.h"
@ -16,39 +16,41 @@
#include "nyx/snapshot/memory/nyx_fdl_user.h"
/* debug option for the FDL constructor */
//#define DEBUG_VMX_FDL_ALLOC
// #define DEBUG_VMX_FDL_ALLOC
/* additional output to debug the FDL restore operation */
//#define SHOW_NUM_DIRTY_PAGES
// #define SHOW_NUM_DIRTY_PAGES
/* option to include restore of VRAM memory */
//#define RESET_VRAM
//#define DEBUG_FDL_VRAM
nyx_fdl_t* nyx_fdl_init(shadow_memory_t* shadow_memory){
// #define RESET_VRAM
// #define DEBUG_FDL_VRAM
nyx_fdl_t *nyx_fdl_init(shadow_memory_t *shadow_memory)
{
static bool fdl_created = false;
/* not sure if we're able to create another FDL instance -> probably not */
assert(fdl_created == false);
fdl_created = true;
nyx_fdl_t* self = malloc(sizeof(nyx_fdl_t));
nyx_fdl_t *self = malloc(sizeof(nyx_fdl_t));
memset(self, 0, sizeof(nyx_fdl_t));
int ret;
CPUState* cpu = qemu_get_cpu(0);
int ret;
CPUState *cpu = qemu_get_cpu(0);
kvm_cpu_synchronize_state(cpu);
struct fdl_conf configuration;
struct fdl_conf configuration;
assert(kvm_state);
self->vmx_fdl_fd = kvm_vm_ioctl(kvm_state, KVM_VMX_FDL_SETUP_FD, (unsigned long)0);
configuration.num = 0;
for(uint8_t i = 0; i < shadow_memory->ram_regions_num; i++){
configuration.areas[configuration.num].base_address = shadow_memory->ram_regions[i].base;
configuration.areas[configuration.num].size = shadow_memory->ram_regions[i].size;
for (uint8_t i = 0; i < shadow_memory->ram_regions_num; i++) {
configuration.areas[configuration.num].base_address =
shadow_memory->ram_regions[i].base;
configuration.areas[configuration.num].size =
shadow_memory->ram_regions[i].size;
configuration.num++;
}
@ -58,19 +60,24 @@ nyx_fdl_t* nyx_fdl_init(shadow_memory_t* shadow_memory){
#ifdef DEBUG_VMX_FDL_ALLOC
printf("KVM_VMX_FDL_SET: %d\n", ret);
printf("configuration.mmap_size = 0x%lx\n", configuration.mmap_size);
for(uint8_t i = 0; i < configuration.num; i++){
printf("configuration.areas[%d].mmap_bitmap_offset = 0x%lx\n", i, configuration.areas[i].mmap_bitmap_offset);
printf("configuration.areas[%d].mmap_stack_offset = 0x%lx\n", i, configuration.areas[i].mmap_stack_offset);
for (uint8_t i = 0; i < configuration.num; i++) {
printf("configuration.areas[%d].mmap_bitmap_offset = 0x%lx\n", i,
configuration.areas[i].mmap_bitmap_offset);
printf("configuration.areas[%d].mmap_stack_offset = 0x%lx\n", i,
configuration.areas[i].mmap_stack_offset);
}
#endif
self->vmx_fdl_mmap = mmap(NULL, configuration.mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, self->vmx_fdl_fd, 0);
self->vmx_fdl_mmap = mmap(NULL, configuration.mmap_size, PROT_READ | PROT_WRITE,
MAP_SHARED, self->vmx_fdl_fd, 0);
assert(self->vmx_fdl_mmap != (void*)0xFFFFFFFFFFFFFFFF);
assert(self->vmx_fdl_mmap != (void *)0xFFFFFFFFFFFFFFFF);
for(uint8_t i = 0; i < configuration.num; i++){
self->entry[i].stack = self->vmx_fdl_mmap + configuration.areas[i].mmap_stack_offset;
self->entry[i].bitmap = self->vmx_fdl_mmap + configuration.areas[i].mmap_bitmap_offset;
for (uint8_t i = 0; i < configuration.num; i++) {
self->entry[i].stack =
self->vmx_fdl_mmap + configuration.areas[i].mmap_stack_offset;
self->entry[i].bitmap =
self->vmx_fdl_mmap + configuration.areas[i].mmap_bitmap_offset;
#ifdef DEBUG_VMX_FDL_ALLOC
printf("fdl_stacks[%d] -> %p\n", i, self->entry[i].stack);
@ -84,9 +91,9 @@ nyx_fdl_t* nyx_fdl_init(shadow_memory_t* shadow_memory){
memset(&result, 0, sizeof(struct fdl_result));
ret = ioctl(self->vmx_fdl_fd, KVM_VMX_FDL_GET_INDEX, &result);
#ifdef DEBUG_VMX_FDL_ALLOC
#ifdef DEBUG_VMX_FDL_ALLOC
printf("result: %d\n", result.num);
for(uint8_t i = 0; i < result.num; i++){
for (uint8_t i = 0; i < result.num; i++) {
printf("result.values[%d]: %ld\n", i, result.values[i]);
}
#endif
@ -95,99 +102,110 @@ nyx_fdl_t* nyx_fdl_init(shadow_memory_t* shadow_memory){
}
/* restore operation */
uint32_t nyx_snapshot_nyx_fdl_restore(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
uint32_t nyx_snapshot_nyx_fdl_restore(nyx_fdl_t *self,
shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist)
{
uint32_t num_dirty_pages = 0;
void* current_region = NULL;
void *current_region = NULL;
struct fdl_result result;
memset(&result, 0, sizeof(struct fdl_result));
int res = ioctl(self->vmx_fdl_fd, KVM_VMX_FDL_GET_INDEX, &result);
assert(!res);
//nyx_snapshot_nyx_fdl_unset_blocklisted_pages(self, shadow_memory_state, blocklist);
// nyx_snapshot_nyx_fdl_unset_blocklisted_pages(self, shadow_memory_state, blocklist);
for(uint8_t i = 0; i < result.num; i++){
for (uint8_t i = 0; i < result.num; i++) {
#ifdef SHOW_NUM_DIRTY_PAGES
printf("Kernel -> [%d] %ld \t%ldKB\n", i, result.values[i], (0x1000*result.values[i])>>0x10);
printf("Kernel -> [%d] %ld \t%ldKB\n", i, result.values[i],
(0x1000 * result.values[i]) >> 0x10);
#endif
if(shadow_memory_state->incremental_enabled){
current_region = shadow_memory_state->ram_regions[i].incremental_region_ptr;
}
else{
if (shadow_memory_state->incremental_enabled) {
current_region =
shadow_memory_state->ram_regions[i].incremental_region_ptr;
} else {
current_region = shadow_memory_state->ram_regions[i].snapshot_region_ptr;
}
for(uint64_t j = 0; j < result.values[i]; j++){
for (uint64_t j = 0; j < result.values[i]; j++) {
uint64_t physical_addr = self->entry[i].stack[j];
uint64_t entry_offset_addr = physical_addr - shadow_memory_state->ram_regions[i].base;
uint64_t entry_offset_addr =
physical_addr - shadow_memory_state->ram_regions[i].base;
void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + entry_offset_addr;
void* snapshot_addr = current_region + entry_offset_addr;
void *host_addr = shadow_memory_state->ram_regions[i].host_region_ptr +
entry_offset_addr;
void *snapshot_addr = current_region + entry_offset_addr;
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){
if (snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) ==
true)
{
#ifdef DEBUG_VERFIY_BITMAP
if(!is_black_listed_addr(self, entry_offset_addr)){
printf("WARNING: %s: -> %lx is not blacklisted\n", __func__, entry_offset_addr);
if (!is_black_listed_addr(self, entry_offset_addr)) {
printf("WARNING: %s: -> %lx is not blacklisted\n", __func__,
entry_offset_addr);
abort();
}
#endif
continue; // blacklisted page
continue; // blacklisted page
}
clear_bit(entry_offset_addr>>12, (void*)self->entry[i].bitmap);
clear_bit(entry_offset_addr >> 12, (void *)self->entry[i].bitmap);
memcpy(host_addr, snapshot_addr, TARGET_PAGE_SIZE);
num_dirty_pages++;
}
}
#ifdef RESET_VRAM
//nyx_snapshot_nyx_fdl_restore_vram(self, shadow_memory_state);
// nyx_snapshot_nyx_fdl_restore_vram(self, shadow_memory_state);
#endif
return num_dirty_pages;
}
void nyx_snapshot_nyx_fdl_save_root_pages(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
void nyx_snapshot_nyx_fdl_save_root_pages(nyx_fdl_t *self,
shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist)
{
struct fdl_result result;
memset(&result, 0, sizeof(struct fdl_result));
int res = ioctl(self->vmx_fdl_fd, KVM_VMX_FDL_GET_INDEX, &result);
assert(!res);
for(uint8_t i = 0; i < result.num; i++){
for (uint8_t i = 0; i < result.num; i++) {
#ifdef SHOW_NUM_DIRTY_PAGES
printf("Kernel -> [%d] %ld \t%ldKB\n", i, result.values[i], (0x1000*result.values[i])>>0x10);
printf("Kernel -> [%d] %ld \t%ldKB\n", i, result.values[i],
(0x1000 * result.values[i]) >> 0x10);
#endif
for(uint64_t j = 0; j < result.values[i]; j++){
for (uint64_t j = 0; j < result.values[i]; j++) {
uint64_t physical_addr = self->entry[i].stack[j];
uint64_t entry_offset_addr = physical_addr - shadow_memory_state->ram_regions[i].base;
uint64_t entry_offset_addr =
physical_addr - shadow_memory_state->ram_regions[i].base;
void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + entry_offset_addr;
void* incremental_addr = shadow_memory_state->ram_regions[i].incremental_region_ptr + entry_offset_addr;
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){
void *host_addr = shadow_memory_state->ram_regions[i].host_region_ptr +
entry_offset_addr;
void *incremental_addr =
shadow_memory_state->ram_regions[i].incremental_region_ptr +
entry_offset_addr;
if (snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) ==
true)
{
#ifdef DEBUG_VERFIY_BITMAP
if(!is_black_listed_addr(self, entry_offset_addr)){
printf("WARNING: %s: -> %lx is not blacklisted\n", __func__, entry_offset_addr);
if (!is_black_listed_addr(self, entry_offset_addr)) {
printf("WARNING: %s: -> %lx is not blacklisted\n", __func__,
entry_offset_addr);
abort();
}
#endif
continue; // skip blacklisted page
}
clear_bit(entry_offset_addr>>12, (void*)self->entry[i].bitmap);
shadow_memory_track_dirty_root_pages(shadow_memory_state, entry_offset_addr, i);
clear_bit(entry_offset_addr >> 12, (void *)self->entry[i].bitmap);
shadow_memory_track_dirty_root_pages(shadow_memory_state,
entry_offset_addr, i);
memcpy(incremental_addr, host_addr, TARGET_PAGE_SIZE);
}
}
}

View File

@ -1,61 +1,65 @@
#pragma once
#pragma once
#include <stdint.h>
#include "nyx/snapshot/memory/block_list.h"
#include "nyx/snapshot/memory/shadow_memory.h"
#define STATE_BUFFER 0x8000000 /* up to 128MB */
#define STATE_BUFFER 0x8000000 /* up to 128MB */
#define USER_FDL_SLOTS 0x400000 /* fix this later */
#define KVM_VMX_FDL_SETUP_FD _IO(KVMIO, 0xe5)
#define KVM_VMX_FDL_SET _IOW(KVMIO, 0xe6, __u64)
#define KVM_VMX_FDL_FLUSH _IO(KVMIO, 0xe7)
#define KVM_VMX_FDL_GET_INDEX _IOR(KVMIO, 0xe8, __u64)
#define KVM_VMX_FDL_SETUP_FD _IO(KVMIO, 0xe5)
#define KVM_VMX_FDL_SET _IOW(KVMIO, 0xe6, __u64)
#define KVM_VMX_FDL_FLUSH _IO(KVMIO, 0xe7)
#define KVM_VMX_FDL_GET_INDEX _IOR(KVMIO, 0xe8, __u64)
#define FAST_IN_RANGE(address, start, end) (address < end && address >= start)
#define FDL_MAX_AREAS 8
struct fdl_area{
uint64_t base_address;
uint64_t size;
uint64_t mmap_bitmap_offset;
uint64_t mmap_stack_offset;
struct fdl_area {
uint64_t base_address;
uint64_t size;
uint64_t mmap_bitmap_offset;
uint64_t mmap_stack_offset;
uint64_t mmap_bitmap_size;
uint64_t mmap_stack_size;
uint64_t mmap_stack_size;
};
struct fdl_conf{
uint8_t num;
uint64_t mmap_size;
struct fdl_area areas[FDL_MAX_AREAS];
struct fdl_conf {
uint8_t num;
uint64_t mmap_size;
struct fdl_area areas[FDL_MAX_AREAS];
};
struct fdl_result{
uint8_t num;
uint64_t values[FDL_MAX_AREAS];
struct fdl_result {
uint8_t num;
uint64_t values[FDL_MAX_AREAS];
};
typedef struct nyx_fdl_s{
typedef struct nyx_fdl_s {
/* vmx_fdl file descriptor */
int vmx_fdl_fd;
/* mmap mapping of fdl data -> might be useful for destructor */
void* vmx_fdl_mmap;
void *vmx_fdl_mmap;
struct {
uint64_t* stack;
uint8_t* bitmap;
}entry[FDL_MAX_AREAS];
uint64_t *stack;
uint8_t *bitmap;
} entry[FDL_MAX_AREAS];
uint8_t num;
}nyx_fdl_t;
nyx_fdl_t* nyx_fdl_init(shadow_memory_t* self);
uint32_t nyx_snapshot_nyx_fdl_restore(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist);
} nyx_fdl_t;
void nyx_snapshot_nyx_fdl_save_root_pages(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist);
nyx_fdl_t *nyx_fdl_init(shadow_memory_t *self);
uint32_t nyx_snapshot_nyx_fdl_restore(nyx_fdl_t *self,
shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist);
void nyx_snapshot_nyx_fdl_save_root_pages(nyx_fdl_t *self,
shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist);

View File

@ -1,68 +1,72 @@
#include "qemu/osdep.h"
#include "sysemu/sysemu.h"
#include "qemu/main-loop.h"
#include "sysemu/sysemu.h"
#include "exec/ram_addr.h"
#include "qemu/rcu_queue.h"
#include "migration/migration.h"
#include "qemu/rcu_queue.h"
#include "nyx/memory_access.h"
#include "nyx/snapshot/memory/block_list.h"
#include "nyx/snapshot/helper.h"
#include "nyx/snapshot/memory/block_list.h"
#include "nyx/snapshot/memory/shadow_memory.h"
#define REALLOC_SIZE 0x8000
//#define DEBUG_NYX_SNAPSHOT_PAGE_BLOCKLIST
// #define DEBUG_NYX_SNAPSHOT_PAGE_BLOCKLIST
snapshot_page_blocklist_t* snapshot_page_blocklist_init(void){
snapshot_page_blocklist_t *snapshot_page_blocklist_init(void)
{
snapshot_page_blocklist_t *self = malloc(sizeof(snapshot_page_blocklist_t));
snapshot_page_blocklist_t* self = malloc(sizeof(snapshot_page_blocklist_t));
uint64_t ram_size = get_ram_size();
self->phys_area_size = ram_size <= MEM_SPLIT_START ?
ram_size :
ram_size + (MEM_SPLIT_END - MEM_SPLIT_START);
uint64_t ram_size = get_ram_size();
self->phys_area_size = ram_size <= MEM_SPLIT_START ? ram_size : ram_size + (MEM_SPLIT_END-MEM_SPLIT_START);
self->phys_bitmap = malloc(BITMAP_SIZE(self->phys_area_size));
memset(self->phys_bitmap, 0x0, BITMAP_SIZE(self->phys_area_size));
self->phys_bitmap = malloc(BITMAP_SIZE(self->phys_area_size));
memset(self->phys_bitmap, 0x0, BITMAP_SIZE(self->phys_area_size));
if (ram_size > MEM_SPLIT_START) {
memset(self->phys_bitmap + BITMAP_SIZE(MEM_SPLIT_START), 0xff,
BITMAP_SIZE((MEM_SPLIT_END - MEM_SPLIT_START)));
}
if(ram_size > MEM_SPLIT_START){
memset(self->phys_bitmap+BITMAP_SIZE(MEM_SPLIT_START), 0xff, BITMAP_SIZE((MEM_SPLIT_END-MEM_SPLIT_START)));
}
self->pages_num = 0;
self->pages_size = REALLOC_SIZE;
self->pages = malloc(sizeof(uint64_t) * REALLOC_SIZE);
self->pages_num = 0;
self->pages_size = REALLOC_SIZE;
self->pages = malloc(sizeof(uint64_t) * REALLOC_SIZE);
return self;
return self;
}
void snapshot_page_blocklist_add(snapshot_page_blocklist_t* self, uint64_t phys_addr){
if(phys_addr == -1){
fprintf(stderr, "ERROR %s: phys_addr=%lx\n", __func__, phys_addr);
return;
}
assert(self != NULL);
void snapshot_page_blocklist_add(snapshot_page_blocklist_t *self, uint64_t phys_addr)
{
if (phys_addr == -1) {
fprintf(stderr, "ERROR %s: phys_addr=%lx\n", __func__, phys_addr);
return;
}
assert(self != NULL);
assert(phys_addr < self->phys_area_size);
assert(phys_addr < self->phys_area_size);
if(self->pages_num <= self->pages_size){
self->pages_size += REALLOC_SIZE;
self->pages = realloc(self->pages, sizeof(uint64_t) * self->pages_size);
}
if (self->pages_num <= self->pages_size) {
self->pages_size += REALLOC_SIZE;
self->pages = realloc(self->pages, sizeof(uint64_t) * self->pages_size);
}
self->pages[self->pages_num] = phys_addr;
self->pages_num++;
self->pages[self->pages_num] = phys_addr;
self->pages_num++;
/* check if bit is empty */
assert(test_bit(phys_addr>>12, (const unsigned long *)self->phys_bitmap) == 0);
/* check if bit is empty */
assert(test_bit(phys_addr >> 12, (const unsigned long *)self->phys_bitmap) == 0);
/* set bit for lookup */
set_bit(phys_addr>>12, (unsigned long *)self->phys_bitmap);
/* set bit for lookup */
set_bit(phys_addr >> 12, (unsigned long *)self->phys_bitmap);
#ifdef DEBUG_NYX_SNAPSHOT_PAGE_BLOCKLIST
printf("%s: %lx\n", __func__, phys_addr);
printf("%s: %lx\n", __func__, phys_addr);
#endif
}

View File

@ -1,32 +1,34 @@
#pragma once
#pragma once
#include <stdint.h>
#include <stdbool.h>
#include "nyx/snapshot/memory/shadow_memory.h"
#include <stdbool.h>
#include <stdint.h>
typedef struct snapshot_page_blocklist_s{
typedef struct snapshot_page_blocklist_s {
/* total number of blocklisted page frames */
uint64_t pages_num;
/* total number of blocklisted page frames */
uint64_t pages_num;
/* lookup array */
uint64_t *pages;
/* lookup array */
uint64_t* pages;
/* current size of our array */
uint64_t pages_size;
/* current size of our array */
uint64_t pages_size;
/* lookup bitmap of guest's physical memory layout (PCI-area between 3GB-4GB is set by default) */
uint8_t *phys_bitmap;
/* lookup bitmap of guest's physical memory layout (PCI-area between 3GB-4GB is set by default) */
uint8_t* phys_bitmap;
/* area of guest's physical memory (including RAM + PCI-hole) */
uint64_t phys_area_size;
} snapshot_page_blocklist_t;
/* area of guest's physical memory (including RAM + PCI-hole) */
uint64_t phys_area_size;
}snapshot_page_blocklist_t;
void snapshot_page_blocklist_add(snapshot_page_blocklist_t* self, uint64_t phys_addr);
void snapshot_page_blocklist_add(snapshot_page_blocklist_t *self, uint64_t phys_addr);
/* returns true if phys_addr is on the blocklis */
static inline bool snapshot_page_blocklist_check_phys_addr(snapshot_page_blocklist_t* self, uint64_t phys_addr){
return phys_addr < self->phys_area_size && test_bit(phys_addr>>12, (const unsigned long *)self->phys_bitmap) != 0;
static inline bool snapshot_page_blocklist_check_phys_addr(
snapshot_page_blocklist_t *self, uint64_t phys_addr)
{
return phys_addr < self->phys_area_size &&
test_bit(phys_addr >> 12, (const unsigned long *)self->phys_bitmap) != 0;
}
snapshot_page_blocklist_t* snapshot_page_blocklist_init(void);
snapshot_page_blocklist_t *snapshot_page_blocklist_init(void);

View File

@ -1,6 +1,6 @@
#include "qemu/osdep.h"
#include "sysemu/sysemu.h"
#include "qemu/main-loop.h"
#include "sysemu/sysemu.h"
#include "exec/ram_addr.h"
#include "migration/migration.h"
@ -11,144 +11,200 @@
#include <sys/ioctl.h>
#include "nyx/snapshot/helper.h"
#include "nyx/snapshot/memory/shadow_memory.h"
#include "nyx/snapshot/memory/nyx_fdl_user.h"
#include "nyx/snapshot/memory/shadow_memory.h"
/* debug option */
//#define DEBUG_USER_FDL
// #define DEBUG_USER_FDL
/* init operation */
nyx_fdl_user_t* nyx_fdl_user_init(shadow_memory_t* shadow_memory_state){
nyx_fdl_user_t* self = malloc(sizeof(nyx_fdl_user_t));
nyx_fdl_user_t *nyx_fdl_user_init(shadow_memory_t *shadow_memory_state)
{
nyx_fdl_user_t *self = malloc(sizeof(nyx_fdl_user_t));
memset(self, 0, sizeof(nyx_fdl_user_t));
/* get rid of that? */
self->num = shadow_memory_state->ram_regions_num;
for(uint8_t i = 0; i < shadow_memory_state->ram_regions_num; i++){
self->entry[i].stack = malloc(DIRTY_STACK_SIZE(shadow_memory_state->ram_regions[i].size));
self->entry[i].bitmap = malloc(BITMAP_SIZE(shadow_memory_state->ram_regions[i].size));
for (uint8_t i = 0; i < shadow_memory_state->ram_regions_num; i++) {
self->entry[i].stack =
malloc(DIRTY_STACK_SIZE(shadow_memory_state->ram_regions[i].size));
self->entry[i].bitmap =
malloc(BITMAP_SIZE(shadow_memory_state->ram_regions[i].size));
}
return self;
}
/* enable operation */
void nyx_fdl_user_enable(nyx_fdl_user_t* self){
void nyx_fdl_user_enable(nyx_fdl_user_t *self)
{
assert(self);
self->enabled = true;
}
static void nyx_snapshot_user_fdl_reset(nyx_fdl_user_t* self){
if(self){
for(uint8_t i = 0; i < self->num; i++){
static void nyx_snapshot_user_fdl_reset(nyx_fdl_user_t *self)
{
if (self) {
for (uint8_t i = 0; i < self->num; i++) {
self->entry[i].pos = 0;
}
}
}
/* reset operation */
uint32_t nyx_snapshot_user_fdl_restore(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
uint32_t nyx_snapshot_user_fdl_restore(nyx_fdl_user_t *self,
shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist)
{
uint32_t num_dirty_pages = 0;
if(self){
void* current_region = NULL;
if (self) {
void *current_region = NULL;
for(uint8_t i = 0; i < self->num; i++){
for (uint8_t i = 0; i < self->num; i++) {
#ifdef DEBUG_USER_FDL
printf("User -> [%d] %ld \t%ldKB\n", i, self->entry[i].pos, (0x1000*self->entry[i].pos)>>0x10);
printf("User -> [%d] %ld \t%ldKB\n", i, self->entry[i].pos,
(0x1000 * self->entry[i].pos) >> 0x10);
#endif
if(shadow_memory_state->incremental_enabled){
current_region = shadow_memory_state->ram_regions[i].incremental_region_ptr;
}
else{
current_region = shadow_memory_state->ram_regions[i].snapshot_region_ptr;
if (shadow_memory_state->incremental_enabled) {
current_region =
shadow_memory_state->ram_regions[i].incremental_region_ptr;
} else {
current_region =
shadow_memory_state->ram_regions[i].snapshot_region_ptr;
}
for(uint64_t j = 0; j < self->entry[i].pos; j++){
for (uint64_t j = 0; j < self->entry[i].pos; j++) {
uint64_t physical_addr = self->entry[i].stack[j];
uint64_t entry_offset_addr = physical_addr - shadow_memory_state->ram_regions[i].base;
uint64_t entry_offset_addr =
physical_addr - shadow_memory_state->ram_regions[i].base;
void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + entry_offset_addr;
void* snapshot_addr = current_region + entry_offset_addr;
void *host_addr = shadow_memory_state->ram_regions[i].host_region_ptr +
entry_offset_addr;
void *snapshot_addr = current_region + entry_offset_addr;
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){
if (snapshot_page_blocklist_check_phys_addr(blocklist,
physical_addr) == true)
{
continue;
}
#ifdef DEBUG_USER_FDL
printf("%s -> %p <-- %p\n", __func__, host_addr, snapshot_addr);
#endif
clear_bit(entry_offset_addr>>12, (void*)self->entry[i].bitmap);
clear_bit(entry_offset_addr >> 12, (void *)self->entry[i].bitmap);
memcpy(host_addr, snapshot_addr, TARGET_PAGE_SIZE);
num_dirty_pages++;
}
}
}
nyx_snapshot_user_fdl_reset(self);
return num_dirty_pages;
}
/* set operation (mark pf as dirty) */
void nyx_fdl_user_set(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, nyx_fdl_t* nyx_fdl_state, uint64_t addr, uint64_t length){
if(length < 0x1000){
/* set operation (mark pf as dirty) */
void nyx_fdl_user_set(nyx_fdl_user_t *self,
shadow_memory_t *shadow_memory_state,
nyx_fdl_t *nyx_fdl_state,
uint64_t addr,
uint64_t length)
{
if (length < 0x1000) {
length = 0x1000;
}
if(self && self->enabled && length >= 0x1000){
uint8_t ram_area = 0xff;
if (self && self->enabled && length >= 0x1000) {
uint8_t ram_area = 0xff;
/* optimize this? */
addr = ram_offset_to_address(addr);
switch(MAX_REGIONS-shadow_memory_state->ram_regions_num){
case 0:
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[7].base, shadow_memory_state->ram_regions[7].base+(shadow_memory_state->ram_regions[7].size-1)) ? 7 : ram_area;
case 1:
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[6].base, shadow_memory_state->ram_regions[6].base+(shadow_memory_state->ram_regions[6].size-1)) ? 6 : ram_area;
case 2:
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[5].base, shadow_memory_state->ram_regions[5].base+(shadow_memory_state->ram_regions[5].size-1)) ? 5 : ram_area;
case 3:
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[4].base, shadow_memory_state->ram_regions[4].base+(shadow_memory_state->ram_regions[4].size-1)) ? 4 : ram_area;
case 4:
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[3].base, shadow_memory_state->ram_regions[3].base+(shadow_memory_state->ram_regions[3].size-1)) ? 3 : ram_area;
case 5:
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[2].base, shadow_memory_state->ram_regions[2].base+(shadow_memory_state->ram_regions[2].size-1)) ? 2 : ram_area;
case 6:
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[1].base, shadow_memory_state->ram_regions[1].base+(shadow_memory_state->ram_regions[1].size-1)) ? 1 : ram_area;
case 7:
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[0].base, shadow_memory_state->ram_regions[0].base+(shadow_memory_state->ram_regions[0].size-1)) ? 0 : ram_area;
default:
break;
}
switch (MAX_REGIONS - shadow_memory_state->ram_regions_num) {
case 0:
ram_area =
FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[7].base,
shadow_memory_state->ram_regions[7].base +
(shadow_memory_state->ram_regions[7].size - 1)) ?
7 :
ram_area;
case 1:
ram_area =
FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[6].base,
shadow_memory_state->ram_regions[6].base +
(shadow_memory_state->ram_regions[6].size - 1)) ?
6 :
ram_area;
case 2:
ram_area =
FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[5].base,
shadow_memory_state->ram_regions[5].base +
(shadow_memory_state->ram_regions[5].size - 1)) ?
5 :
ram_area;
case 3:
ram_area =
FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[4].base,
shadow_memory_state->ram_regions[4].base +
(shadow_memory_state->ram_regions[4].size - 1)) ?
4 :
ram_area;
case 4:
ram_area =
FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[3].base,
shadow_memory_state->ram_regions[3].base +
(shadow_memory_state->ram_regions[3].size - 1)) ?
3 :
ram_area;
case 5:
ram_area =
FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[2].base,
shadow_memory_state->ram_regions[2].base +
(shadow_memory_state->ram_regions[2].size - 1)) ?
2 :
ram_area;
case 6:
ram_area =
FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[1].base,
shadow_memory_state->ram_regions[1].base +
(shadow_memory_state->ram_regions[1].size - 1)) ?
1 :
ram_area;
case 7:
ram_area =
FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[0].base,
shadow_memory_state->ram_regions[0].base +
(shadow_memory_state->ram_regions[0].size - 1)) ?
0 :
ram_area;
default:
break;
}
if(ram_area == 0xff){
printf("ERROR: %s %lx [%d]\n", __func__, addr, ram_area);
if (ram_area == 0xff) {
printf("ERROR: %s %lx [%d]\n", __func__, addr, ram_area);
abort();
return;
}
return;
}
for(uint64_t offset = 0; offset < length; offset+=0x1000){
for (uint64_t offset = 0; offset < length; offset += 0x1000) {
uint64_t current_addr = (addr + offset) & 0xFFFFFFFFFFFFF000;
uint64_t current_addr = (addr+offset) & 0xFFFFFFFFFFFFF000;
long pfn = (long) ((current_addr-shadow_memory_state->ram_regions[ram_area].base)>>12);
long pfn = (long)((current_addr -
shadow_memory_state->ram_regions[ram_area].base) >>
12);
assert(self->entry[ram_area].bitmap);
/* TODO -> better handling of nyx_fdl_state */
if(!test_bit(pfn, (const unsigned long*)self->entry[ram_area].bitmap)){
set_bit(pfn, (unsigned long*)self->entry[ram_area].bitmap);
if (!test_bit(pfn, (const unsigned long *)self->entry[ram_area].bitmap)) {
set_bit(pfn, (unsigned long *)self->entry[ram_area].bitmap);
self->entry[ram_area].stack[self->entry[ram_area].pos] = current_addr & 0xFFFFFFFFFFFFF000;
self->entry[ram_area].stack[self->entry[ram_area].pos] =
current_addr & 0xFFFFFFFFFFFFF000;
self->entry[ram_area].pos++;
#ifdef DEBUG_USER_FDL
@ -159,21 +215,30 @@ void nyx_fdl_user_set(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state
}
}
void nyx_snapshot_nyx_fdl_user_save_root_pages(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
for(uint8_t i = 0; i < self->num; i++){
void nyx_snapshot_nyx_fdl_user_save_root_pages(nyx_fdl_user_t *self,
shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist)
{
for (uint8_t i = 0; i < self->num; i++) {
#ifdef DEBUG_USER_FDL
printf("User -> [%d] %ld \t%ldKB\n", i, self->entry[i].pos, (0x1000*self->entry[i].pos)>>0x10);
printf("User -> [%d] %ld \t%ldKB\n", i, self->entry[i].pos,
(0x1000 * self->entry[i].pos) >> 0x10);
#endif
for(uint64_t j = 0; j < self->entry[i].pos; j++){
for (uint64_t j = 0; j < self->entry[i].pos; j++) {
uint64_t physical_addr = self->entry[i].stack[j];
uint64_t entry_offset_addr = physical_addr - shadow_memory_state->ram_regions[i].base;
uint64_t entry_offset_addr =
physical_addr - shadow_memory_state->ram_regions[i].base;
void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + entry_offset_addr;
void* incremental_addr = shadow_memory_state->ram_regions[i].incremental_region_ptr + entry_offset_addr;
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){
void *host_addr = shadow_memory_state->ram_regions[i].host_region_ptr +
entry_offset_addr;
void *incremental_addr =
shadow_memory_state->ram_regions[i].incremental_region_ptr +
entry_offset_addr;
if (snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) ==
true)
{
printf("%s: 0x%lx is dirty\n", __func__, physical_addr);
continue;
}
@ -181,10 +246,10 @@ void nyx_snapshot_nyx_fdl_user_save_root_pages(nyx_fdl_user_t* self, shadow_memo
printf("%s -> %p <-- %p\n", __func__, incremental_addr, host_addr);
#endif
clear_bit(entry_offset_addr>>12, (void*)self->entry[i].bitmap);
shadow_memory_track_dirty_root_pages(shadow_memory_state, entry_offset_addr, i);
clear_bit(entry_offset_addr >> 12, (void *)self->entry[i].bitmap);
shadow_memory_track_dirty_root_pages(shadow_memory_state,
entry_offset_addr, i);
memcpy(incremental_addr, host_addr, TARGET_PAGE_SIZE);
}
}

View File

@ -1,30 +1,38 @@
#pragma once
#pragma once
#include <stdint.h>
#include "nyx/snapshot/helper.h"
#include "nyx/snapshot/memory/backend/nyx_fdl.h"
#include "nyx/snapshot/memory/block_list.h"
#include "nyx/snapshot/memory/shadow_memory.h"
#include "nyx/snapshot/memory/backend/nyx_fdl.h"
#include <stdint.h>
#define MAX_REGIONS 8 /* don't */
typedef struct nyx_fdl_user_s{
typedef struct nyx_fdl_user_s {
struct {
uint64_t* stack;
uint8_t* bitmap;
uint64_t pos;
}entry[MAX_REGIONS];
uint64_t *stack;
uint8_t *bitmap;
uint64_t pos;
} entry[MAX_REGIONS];
uint8_t num;
bool enabled;
}nyx_fdl_user_t;
bool enabled;
} nyx_fdl_user_t;
nyx_fdl_user_t* nyx_fdl_user_init(shadow_memory_t* shadow_memory_state);
nyx_fdl_user_t *nyx_fdl_user_init(shadow_memory_t *shadow_memory_state);
void nyx_fdl_user_enable(nyx_fdl_user_t* self);
void nyx_fdl_user_enable(nyx_fdl_user_t *self);
void nyx_fdl_user_set(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, nyx_fdl_t* nyx_fdl_state, uint64_t addr, uint64_t length);
void nyx_fdl_user_set(nyx_fdl_user_t *self,
shadow_memory_t *shadow_memory_state,
nyx_fdl_t *nyx_fdl_state,
uint64_t addr,
uint64_t length);
uint32_t nyx_snapshot_user_fdl_restore(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist);
uint32_t nyx_snapshot_user_fdl_restore(nyx_fdl_user_t *self,
shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist);
void nyx_snapshot_nyx_fdl_user_save_root_pages(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist);
void nyx_snapshot_nyx_fdl_user_save_root_pages(nyx_fdl_user_t *self,
shadow_memory_t *shadow_memory_state,
snapshot_page_blocklist_t *blocklist);

View File

@ -1,194 +1,219 @@
#include "qemu/osdep.h"
#include "sysemu/sysemu.h"
#include "qemu/main-loop.h"
#include "sysemu/sysemu.h"
#include "exec/ram_addr.h"
#include "qemu/rcu_queue.h"
#include "migration/migration.h"
#include "qemu/rcu_queue.h"
#include "nyx/debug.h"
#include "nyx/memory_access.h"
#include "nyx/snapshot/memory/shadow_memory.h"
#include "nyx/snapshot/helper.h"
#include "nyx/snapshot/memory/shadow_memory.h"
typedef struct fast_reload_dump_head_s{
uint32_t shadow_memory_regions;
uint32_t ram_region_index; // remove
} fast_reload_dump_head_t;
typedef struct fast_reload_dump_head_s {
uint32_t shadow_memory_regions;
uint32_t ram_region_index; // remove
} fast_reload_dump_head_t;
typedef struct fast_reload_dump_entry_s{
uint64_t shadow_memory_offset;
char idstr[256];
} fast_reload_dump_entry_t;
typedef struct fast_reload_dump_entry_s {
uint64_t shadow_memory_offset;
char idstr[256];
} fast_reload_dump_entry_t;
static void shadow_memory_set_incremental_ptrs(shadow_memory_t* self){
for(uint8_t i = 0; i < self->ram_regions_num; i++){
self->ram_regions[i].incremental_region_ptr = self->incremental_ptr + self->ram_regions[i].offset;
static void shadow_memory_set_incremental_ptrs(shadow_memory_t *self)
{
for (uint8_t i = 0; i < self->ram_regions_num; i++) {
self->ram_regions[i].incremental_region_ptr =
self->incremental_ptr + self->ram_regions[i].offset;
}
}
static void shadow_memory_pre_alloc_incremental(shadow_memory_t* self){
self->incremental_ptr = mmap(0, self->memory_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->snapshot_ptr_fd, 0);
static void shadow_memory_pre_alloc_incremental(shadow_memory_t *self)
{
self->incremental_ptr = mmap(0, self->memory_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE, self->snapshot_ptr_fd, 0);
shadow_memory_set_incremental_ptrs(self);
}
static void shadow_memory_init_generic(shadow_memory_t* self){
self->root_track_pages_num = 0;
static void shadow_memory_init_generic(shadow_memory_t *self)
{
self->root_track_pages_num = 0;
self->root_track_pages_size = 32 << 10;
self->root_track_pages_stack = malloc(sizeof(uint64_t)*self->root_track_pages_size);
self->root_track_pages_stack =
malloc(sizeof(uint64_t) * self->root_track_pages_size);
shadow_memory_pre_alloc_incremental(self);
self->incremental_enabled = false;
}
shadow_memory_t* shadow_memory_init(void){
shadow_memory_t *shadow_memory_init(void)
{
RAMBlock *block;
RAMBlock* block_array[10];
void* snapshot_ptr_offset_array[10];
RAMBlock *block_array[10];
void *snapshot_ptr_offset_array[10];
shadow_memory_t* self = malloc(sizeof(shadow_memory_t));
shadow_memory_t *self = malloc(sizeof(shadow_memory_t));
memset(self, 0x0, sizeof(shadow_memory_t));
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
QLIST_FOREACH_RCU (block, &ram_list.blocks, next) {
self->memory_size += block->used_length;
}
self->snapshot_ptr_fd = memfd_create("in_memory_root_snapshot", MFD_CLOEXEC | MFD_ALLOW_SEALING);
self->snapshot_ptr_fd =
memfd_create("in_memory_root_snapshot", MFD_CLOEXEC | MFD_ALLOW_SEALING);
assert(!ftruncate(self->snapshot_ptr_fd, self->memory_size));
fcntl(self->snapshot_ptr_fd, F_ADD_SEALS, F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL);
fcntl(self->snapshot_ptr_fd, F_ADD_SEALS,
F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL);
self->snapshot_ptr = mmap(NULL, self->memory_size, PROT_READ | PROT_WRITE , MAP_SHARED , self->snapshot_ptr_fd, 0);
self->snapshot_ptr = mmap(NULL, self->memory_size, PROT_READ | PROT_WRITE,
MAP_SHARED, self->snapshot_ptr_fd, 0);
madvise(self->snapshot_ptr, self->memory_size, MADV_RANDOM | MADV_MERGEABLE);
nyx_debug_p(RELOAD_PREFIX, "Allocating Memory (%p) Size: %lx", self->snapshot_ptr, self->memory_size);
nyx_debug_p(RELOAD_PREFIX, "Allocating Memory (%p) Size: %lx",
self->snapshot_ptr, self->memory_size);
uint64_t offset = 0;
uint8_t i = 0;
uint8_t regions_num = 0;
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
nyx_debug_p(RELOAD_PREFIX, "%lx %lx %lx\t%s\t%p", block->offset, block->used_length, block->max_length, block->idstr, block->host);
uint64_t offset = 0;
uint8_t i = 0;
uint8_t regions_num = 0;
QLIST_FOREACH_RCU (block, &ram_list.blocks, next) {
nyx_debug_p(RELOAD_PREFIX, "%lx %lx %lx\t%s\t%p", block->offset,
block->used_length, block->max_length, block->idstr, block->host);
block_array[i] = block;
memcpy(self->snapshot_ptr+offset, block->host, block->used_length);
snapshot_ptr_offset_array[i++] = self->snapshot_ptr+offset;
memcpy(self->snapshot_ptr + offset, block->host, block->used_length);
snapshot_ptr_offset_array[i++] = self->snapshot_ptr + offset;
offset += block->used_length;
regions_num++;
}
}
for(uint8_t i = 0; i < regions_num; i++){
for (uint8_t i = 0; i < regions_num; i++) {
block = block_array[i];
if(!block->mr->readonly){
if(self->ram_regions_num == 0 && block->used_length >= MEM_SPLIT_START){
if (!block->mr->readonly) {
if (self->ram_regions_num == 0 && block->used_length >= MEM_SPLIT_START) {
self->ram_regions[self->ram_regions_num].ram_region = i;
self->ram_regions[self->ram_regions_num].base = block->mr->addr;
self->ram_regions[self->ram_regions_num].size = MEM_SPLIT_START;
self->ram_regions[self->ram_regions_num].offset = snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0];
self->ram_regions[self->ram_regions_num].offset =
snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0];
self->ram_regions[self->ram_regions_num].host_region_ptr = block->host;
self->ram_regions[self->ram_regions_num].snapshot_region_ptr = self->snapshot_ptr+self->ram_regions[self->ram_regions_num].offset;
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1);
self->ram_regions[self->ram_regions_num].snapshot_region_ptr =
self->snapshot_ptr +
self->ram_regions[self->ram_regions_num].offset;
self->ram_regions[self->ram_regions_num].idstr =
malloc(strlen(block->idstr) + 1);
memset(self->ram_regions[self->ram_regions_num].idstr, 0,
strlen(block->idstr) + 1);
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
self->ram_regions_num++;
self->ram_regions[self->ram_regions_num].ram_region = i;
self->ram_regions[self->ram_regions_num].base = MEM_SPLIT_END;
self->ram_regions[self->ram_regions_num].size = block->used_length-MEM_SPLIT_START;
self->ram_regions[self->ram_regions_num].offset = (snapshot_ptr_offset_array[i] + MEM_SPLIT_START) - snapshot_ptr_offset_array[0];
self->ram_regions[self->ram_regions_num].host_region_ptr = block->host+MEM_SPLIT_START;
self->ram_regions[self->ram_regions_num].snapshot_region_ptr = snapshot_ptr_offset_array[i]+MEM_SPLIT_START;
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1);
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
}
else{
self->ram_regions[self->ram_regions_num].ram_region = i;
self->ram_regions[self->ram_regions_num].base = block->mr->addr;
self->ram_regions[self->ram_regions_num].size = block->used_length;
self->ram_regions[self->ram_regions_num].offset = snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0];
self->ram_regions[self->ram_regions_num].host_region_ptr = block->host;
self->ram_regions[self->ram_regions_num].snapshot_region_ptr = self->snapshot_ptr+self->ram_regions[self->ram_regions_num].offset;
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1);
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
}
self->ram_regions_num++;
}
}
shadow_memory_init_generic(self);
return self;
self->ram_regions[self->ram_regions_num].base =
MEM_SPLIT_END
;
self->ram_regions[self->ram_regions_num].size = block->used_length - MEM_SPLIT_START;
self->ram_regions[self->ram_regions_num].offset =
(snapshot_ptr_offset_array[i] + MEM_SPLIT_START) - snapshot_ptr_offset_array[0];
self->ram_regions[self->ram_regions_num].host_region_ptr =
block->host + MEM_SPLIT_START;
self->ram_regions[self->ram_regions_num].snapshot_region_ptr =
snapshot_ptr_offset_array[i] + MEM_SPLIT_START;
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1);
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
}
else {
self->ram_regions[self->ram_regions_num].ram_region = i;
self->ram_regions[self->ram_regions_num].base = block->mr->addr;
self->ram_regions[self->ram_regions_num].size = block->used_length;
self->ram_regions[self->ram_regions_num].offset =
snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0];
self->ram_regions[self->ram_regions_num].host_region_ptr = block->host;
self->ram_regions[self->ram_regions_num].snapshot_region_ptr =
self->snapshot_ptr + self->ram_regions[self->ram_regions_num].offset;
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
memset(self->ram_regions[self->ram_regions_num].idstr, 0,
strlen(block->idstr) + 1);
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
}
shadow_memory_t* shadow_memory_init_from_snapshot(const char* snapshot_folder, bool pre_snapshot){
RAMBlock *block;
RAMBlock* block_array[10];
void* snapshot_ptr_offset_array[10];
self->ram_regions_num++;
}
}
shadow_memory_t* self = malloc(sizeof(shadow_memory_t));
shadow_memory_init_generic(self);
return self;
}
shadow_memory_t *shadow_memory_init_from_snapshot(const char *snapshot_folder,
bool pre_snapshot)
{
RAMBlock *block;
RAMBlock *block_array[10];
void *snapshot_ptr_offset_array[10];
shadow_memory_t *self = malloc(sizeof(shadow_memory_t));
memset(self, 0x0, sizeof(shadow_memory_t));
/* count total memory size */
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
QLIST_FOREACH_RCU (block, &ram_list.blocks, next) {
self->memory_size += block->used_length;
}
/* count number of ram regions */
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
if(!block->mr->readonly){
if(self->ram_regions_num == 0 && block->used_length >= MEM_SPLIT_START){
QLIST_FOREACH_RCU (block, &ram_list.blocks, next) {
if (!block->mr->readonly) {
if (self->ram_regions_num == 0 && block->used_length >= MEM_SPLIT_START) {
self->ram_regions_num++;
}
self->ram_regions_num++;
}
}
char* path_meta;
char* path_dump;
char *path_meta;
char *path_dump;
assert(asprintf(&path_meta, "%s/fast_snapshot.mem_meta", snapshot_folder) != -1);
assert(asprintf(&path_dump, "%s/fast_snapshot.mem_dump", snapshot_folder) != -1);
fast_reload_dump_head_t head;
fast_reload_dump_head_t head;
FILE* file_mem_meta = fopen (path_meta, "r");
FILE *file_mem_meta = fopen(path_meta, "r");
assert(file_mem_meta != NULL);
assert(fread(&head, sizeof(fast_reload_dump_head_t), 1, file_mem_meta) == 1);
fclose(file_mem_meta);
if(self->ram_regions_num != head.shadow_memory_regions){
nyx_error("Error: self->ram_regions_num (%d) != head.shadow_memory_regions (%d)\n", self->ram_regions_num, head.shadow_memory_regions);
if (self->ram_regions_num != head.shadow_memory_regions) {
nyx_error(
"Error: self->ram_regions_num (%d) != head.shadow_memory_regions (%d)\n",
self->ram_regions_num, head.shadow_memory_regions);
exit(1);
}
//printf("LOAD -> self->ram_regions_num: %d\n", self->ram_regions_num);
// printf("LOAD -> self->ram_regions_num: %d\n", self->ram_regions_num);
FILE* file_mem_dump = fopen (path_dump, "r");
FILE *file_mem_dump = fopen(path_dump, "r");
assert(file_mem_dump != NULL);
fseek(file_mem_dump, 0L, SEEK_END);
uint64_t file_mem_dump_size = ftell(file_mem_dump);
nyx_debug("guest_ram_size == ftell(f) => 0x%lx vs 0x%lx (%s)\n", self->memory_size, file_mem_dump_size, path_dump);
nyx_debug("guest_ram_size == ftell(f) => 0x%lx vs 0x%lx (%s)\n",
self->memory_size, file_mem_dump_size, path_dump);
#define VGA_SIZE (16<<20)
#define VGA_SIZE (16 << 20)
if(self->memory_size != file_mem_dump_size){
if (file_mem_dump_size >= VGA_SIZE){
nyx_error("ERROR: guest size should be %ld MB - set it to %ld MB\n", (file_mem_dump_size-VGA_SIZE)>>20, (self->memory_size-VGA_SIZE)>>20);
if (self->memory_size != file_mem_dump_size) {
if (file_mem_dump_size >= VGA_SIZE) {
nyx_error("ERROR: guest size should be %ld MB - set it to %ld MB\n",
(file_mem_dump_size - VGA_SIZE) >> 20,
(self->memory_size - VGA_SIZE) >> 20);
exit(1);
}
else{
nyx_error("ERROR: guest size: %ld bytes\n", file_mem_dump_size);
} else {
nyx_error("ERROR: guest size: %ld bytes\n", file_mem_dump_size);
exit(1);
}
}
@ -197,116 +222,137 @@ shadow_memory_t* shadow_memory_init_from_snapshot(const char* snapshot_folder, b
fclose(file_mem_dump);
self->snapshot_ptr_fd = open(path_dump, O_RDONLY);
self->snapshot_ptr = mmap(0, self->memory_size, PROT_READ, MAP_SHARED, self->snapshot_ptr_fd, 0);
self->snapshot_ptr =
mmap(0, self->memory_size, PROT_READ, MAP_SHARED, self->snapshot_ptr_fd, 0);
assert(self->snapshot_ptr != (void*)-1);
assert(self->snapshot_ptr != (void *)-1);
madvise(self->snapshot_ptr, self->memory_size, MADV_MERGEABLE);
uint64_t offset = 0;
uint8_t i = 0;
uint8_t regions_num = 0;
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
nyx_debug_p(RELOAD_PREFIX, "%lx %lx %lx\t%s\t%p", block->offset, block->used_length, block->max_length, block->idstr, block->host);
uint64_t offset = 0;
uint8_t i = 0;
uint8_t regions_num = 0;
QLIST_FOREACH_RCU (block, &ram_list.blocks, next) {
nyx_debug_p(RELOAD_PREFIX, "%lx %lx %lx\t%s\t%p", block->offset,
block->used_length, block->max_length, block->idstr, block->host);
block_array[i] = block;
snapshot_ptr_offset_array[i++] = self->snapshot_ptr+offset;
block_array[i] = block;
snapshot_ptr_offset_array[i++] = self->snapshot_ptr + offset;
offset += block->used_length;
regions_num++;
}
self->ram_regions_num = 0;
for(uint8_t i = 0; i < regions_num; i++){
for (uint8_t i = 0; i < regions_num; i++) {
block = block_array[i];
if(!block->mr->readonly){
if(self->ram_regions_num == 0 && block->used_length >= MEM_SPLIT_START){
if (!block->mr->readonly) {
if (self->ram_regions_num == 0 && block->used_length >= MEM_SPLIT_START) {
self->ram_regions[self->ram_regions_num].ram_region = i;
self->ram_regions[self->ram_regions_num].base = block->mr->addr;
self->ram_regions[self->ram_regions_num].size = MEM_SPLIT_START;
self->ram_regions[self->ram_regions_num].offset = snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0];
self->ram_regions[self->ram_regions_num].offset =
snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0];
self->ram_regions[self->ram_regions_num].host_region_ptr = block->host;
self->ram_regions[self->ram_regions_num].snapshot_region_ptr = self->snapshot_ptr+self->ram_regions[self->ram_regions_num].offset;
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1);
self->ram_regions[self->ram_regions_num].snapshot_region_ptr =
self->snapshot_ptr +
self->ram_regions[self->ram_regions_num].offset;
self->ram_regions[self->ram_regions_num].idstr =
malloc(strlen(block->idstr) + 1);
memset(self->ram_regions[self->ram_regions_num].idstr, 0,
strlen(block->idstr) + 1);
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
self->ram_regions_num++;
self->ram_regions[self->ram_regions_num].ram_region = i;
self->ram_regions[self->ram_regions_num].base = MEM_SPLIT_END;
self->ram_regions[self->ram_regions_num].size = block->used_length-MEM_SPLIT_START;
self->ram_regions[self->ram_regions_num].offset = (snapshot_ptr_offset_array[i] + MEM_SPLIT_START) - snapshot_ptr_offset_array[0];
self->ram_regions[self->ram_regions_num].host_region_ptr = block->host+MEM_SPLIT_START;
self->ram_regions[self->ram_regions_num].snapshot_region_ptr = snapshot_ptr_offset_array[i]+MEM_SPLIT_START;
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1);
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
}
else{
self->ram_regions[self->ram_regions_num].ram_region = i;
self->ram_regions[self->ram_regions_num].base = block->mr->addr;
self->ram_regions[self->ram_regions_num].size = block->used_length;
self->ram_regions[self->ram_regions_num].offset = snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0];
self->ram_regions[self->ram_regions_num].host_region_ptr = block->host;
self->ram_regions[self->ram_regions_num].snapshot_region_ptr = self->snapshot_ptr+self->ram_regions[self->ram_regions_num].offset;
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1);
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
}
self->ram_regions[self->ram_regions_num].base =
MEM_SPLIT_END
;
self->ram_regions[self->ram_regions_num].size = block->used_length - MEM_SPLIT_START;
self->ram_regions[self->ram_regions_num].offset =
(snapshot_ptr_offset_array[i] + MEM_SPLIT_START) - snapshot_ptr_offset_array[0];
self->ram_regions[self->ram_regions_num].host_region_ptr =
block->host + MEM_SPLIT_START;
self->ram_regions[self->ram_regions_num].snapshot_region_ptr =
snapshot_ptr_offset_array[i] + MEM_SPLIT_START;
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1);
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
}
else {
self->ram_regions[self->ram_regions_num].ram_region = i;
self->ram_regions[self->ram_regions_num].base = block->mr->addr;
self->ram_regions[self->ram_regions_num].size = block->used_length;
self->ram_regions[self->ram_regions_num].offset =
snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0];
self->ram_regions[self->ram_regions_num].host_region_ptr = block->host;
self->ram_regions[self->ram_regions_num].snapshot_region_ptr =
self->snapshot_ptr + self->ram_regions[self->ram_regions_num].offset;
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
memset(self->ram_regions[self->ram_regions_num].idstr, 0,
strlen(block->idstr) + 1);
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
}
self->ram_regions_num++;
}
}
self->ram_regions_num++;
}
}
#ifdef DEBUG_SHADOW_MEMCPY_VERSION
/* memcpy version */
for(uint8_t i = 0; i < self->ram_regions_num; i++){
void* host_addr = self->ram_regions[i].host_region_ptr + 0;
void* snapshot_addr = self->ram_regions[i].snapshot_region_ptr + 0;
memcpy(host_addr, snapshot_addr, self->ram_regions[i].size);
}
/* memcpy version */
for (uint8_t i = 0; i < self->ram_regions_num; i++) {
void *host_addr = self->ram_regions[i].host_region_ptr + 0;
void *snapshot_addr = self->ram_regions[i].snapshot_region_ptr + 0;
memcpy(host_addr, snapshot_addr, self->ram_regions[i].size);
}
#else
/* munmap + mmap version */
for(uint8_t i = 0; i < self->ram_regions_num; i++){
void* host_addr = self->ram_regions[i].host_region_ptr + 0;
assert(munmap(host_addr, self->ram_regions[i].size) != EINVAL);
assert(mmap(host_addr, self->ram_regions[i].size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_FIXED, self->snapshot_ptr_fd, self->ram_regions[i].offset) != MAP_FAILED);
}
/* munmap + mmap version */
for (uint8_t i = 0; i < self->ram_regions_num; i++) {
void *host_addr = self->ram_regions[i].host_region_ptr + 0;
assert(munmap(host_addr, self->ram_regions[i].size) != EINVAL);
assert(mmap(host_addr, self->ram_regions[i].size,
PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_FIXED,
self->snapshot_ptr_fd, self->ram_regions[i].offset) != MAP_FAILED);
}
#endif
shadow_memory_init_generic(self);
return self;
shadow_memory_init_generic(self);
return self;
}
void shadow_memory_prepare_incremental(shadow_memory_t* self){
void shadow_memory_prepare_incremental(shadow_memory_t *self)
{
static int count = 0;
if(count >= RESTORE_RATE){
if (count >= RESTORE_RATE) {
count = 0;
munmap(self->incremental_ptr, self->memory_size);
self->incremental_ptr = mmap(0, self->memory_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->snapshot_ptr_fd, 0);
shadow_memory_set_incremental_ptrs(self);
self->incremental_ptr = mmap(0, self->memory_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE, self->snapshot_ptr_fd, 0);
shadow_memory_set_incremental_ptrs(self);
}
count++;
}
void shadow_memory_switch_snapshot(shadow_memory_t* self, bool incremental){
void shadow_memory_switch_snapshot(shadow_memory_t *self, bool incremental)
{
self->incremental_enabled = incremental;
}
void shadow_memory_restore_memory(shadow_memory_t* self){
void shadow_memory_restore_memory(shadow_memory_t *self)
{
rcu_read_lock();
uint8_t slot = 0;
uint8_t slot = 0;
uint64_t addr = 0;
for(uint64_t i = 0; i < self->root_track_pages_num; i++){
for (uint64_t i = 0; i < self->root_track_pages_num; i++) {
addr = self->root_track_pages_stack[i] & 0xFFFFFFFFFFFFF000;
slot = self->root_track_pages_stack[i] & 0xFFF;
memcpy(self->ram_regions[slot].host_region_ptr+addr, self->ram_regions[slot].snapshot_region_ptr+addr, TARGET_PAGE_SIZE);
memcpy(self->ram_regions[slot].incremental_region_ptr+addr, self->ram_regions[slot].snapshot_region_ptr+addr, TARGET_PAGE_SIZE);
memcpy(self->ram_regions[slot].host_region_ptr + addr,
self->ram_regions[slot].snapshot_region_ptr + addr, TARGET_PAGE_SIZE);
memcpy(self->ram_regions[slot].incremental_region_ptr + addr,
self->ram_regions[slot].snapshot_region_ptr + addr, TARGET_PAGE_SIZE);
}
self->root_track_pages_num = 0;
@ -315,38 +361,42 @@ void shadow_memory_restore_memory(shadow_memory_t* self){
/* only used in debug mode -> no need to be fast */
bool shadow_memory_is_root_page_tracked(shadow_memory_t* self, uint64_t address, uint8_t slot){
bool shadow_memory_is_root_page_tracked(shadow_memory_t *self,
uint64_t address,
uint8_t slot)
{
uint64_t value = (address & 0xFFFFFFFFFFFFF000) | slot;
for(uint64_t i = 0; i < self->root_track_pages_num; i++){
if(self->root_track_pages_stack[i] == value){
for (uint64_t i = 0; i < self->root_track_pages_num; i++) {
if (self->root_track_pages_stack[i] == value) {
return true;
}
}
return false;
}
void shadow_memory_serialize(shadow_memory_t* self, const char* snapshot_folder){
char* tmp1;
char* tmp2;
void shadow_memory_serialize(shadow_memory_t *self, const char *snapshot_folder)
{
char *tmp1;
char *tmp2;
assert(asprintf(&tmp1, "%s/fast_snapshot.mem_meta", snapshot_folder) != -1);
assert(asprintf(&tmp2, "%s/fast_snapshot.mem_dump", snapshot_folder) != -1);
FILE* file_mem_meta = fopen(tmp1, "w+b");
FILE* file_mem_data = fopen(tmp2, "w+b");
FILE *file_mem_meta = fopen(tmp1, "w+b");
FILE *file_mem_data = fopen(tmp2, "w+b");
fast_reload_dump_head_t head;
fast_reload_dump_head_t head;
fast_reload_dump_entry_t entry;
head.shadow_memory_regions = self->ram_regions_num;
head.ram_region_index = 0; /* due to legacy reasons */
head.ram_region_index = 0; /* due to legacy reasons */
fwrite(&head, sizeof(fast_reload_dump_head_t), 1, file_mem_meta);
for (uint64_t i = 0; i < self->ram_regions_num; i++){
for (uint64_t i = 0; i < self->ram_regions_num; i++) {
memset(&entry, 0x0, sizeof(fast_reload_dump_entry_t));
entry.shadow_memory_offset = (uint64_t)self->ram_regions[i].offset;
strncpy((char*)&entry.idstr, (const char*)self->ram_regions[i].idstr, 255);
strncpy((char *)&entry.idstr, (const char *)self->ram_regions[i].idstr, 255);
fwrite(&entry, sizeof(fast_reload_dump_entry_t), 1, file_mem_meta);
}
@ -356,43 +406,57 @@ void shadow_memory_serialize(shadow_memory_t* self, const char* snapshot_folder)
fclose(file_mem_data);
}
static bool shadow_memory_read_page_frame(shadow_memory_t* self, uint64_t address, void* ptr, uint16_t offset, uint16_t size){
static bool shadow_memory_read_page_frame(shadow_memory_t *self,
uint64_t address,
void *ptr,
uint16_t offset,
uint16_t size)
{
assert((offset + size) <= 0x1000);
for(uint8_t i = 0; i < self->ram_regions_num; i++){
if(address >= self->ram_regions[i].base && address < (self->ram_regions[i].base + self->ram_regions[i].size)){
void* snapshot_ptr = self->ram_regions[i].snapshot_region_ptr + (address-self->ram_regions[i].base);
memcpy(ptr, snapshot_ptr+offset, size);
for (uint8_t i = 0; i < self->ram_regions_num; i++) {
if (address >= self->ram_regions[i].base &&
address < (self->ram_regions[i].base + self->ram_regions[i].size))
{
void *snapshot_ptr = self->ram_regions[i].snapshot_region_ptr +
(address - self->ram_regions[i].base);
memcpy(ptr, snapshot_ptr + offset, size);
return true;
}
}
return false;
}
bool shadow_memory_read_physical_memory(shadow_memory_t* self, uint64_t address, void* ptr, size_t size){
size_t bytes_left = size;
size_t copy_bytes = 0;
bool shadow_memory_read_physical_memory(shadow_memory_t *self,
uint64_t address,
void *ptr,
size_t size)
{
size_t bytes_left = size;
size_t copy_bytes = 0;
uint64_t current_address = address;
uint64_t offset = 0;
uint64_t offset = 0;
while (bytes_left != 0) {
/* full page */
if ((current_address & 0xFFF) == 0){
if ((current_address & 0xFFF) == 0) {
copy_bytes = 0x1000;
}
/* partial page (starting at an offset) */
else {
else
{
copy_bytes = 0x1000 - (current_address & 0xFFF);
}
/* partial page */
if (bytes_left < copy_bytes){
if (bytes_left < copy_bytes) {
copy_bytes = bytes_left;
}
if (shadow_memory_read_page_frame(self, current_address & ~0xFFFULL, ptr + offset, current_address & 0xFFFULL, copy_bytes) == false){
if (shadow_memory_read_page_frame(self, current_address & ~0xFFFULL,
ptr + offset, current_address & 0xFFFULL,
copy_bytes) == false)
{
return false;
}
current_address += copy_bytes;

View File

@ -1,90 +1,101 @@
#pragma once
#pragma once
#include <stdint.h>
#include "nyx/snapshot/devices/state_reallocation.h"
#include <stdint.h>
/* munmap & mmap incremental snapshot area after RESTORE_RATE restores to avoid high memory pressure */
#define RESTORE_RATE 2000
typedef struct ram_region_s{
typedef struct ram_region_s {
/* simple numeric identifier
* (can be the same for multiple regions if the memory is
* actually splitted across different bases in the guest's memory
* but related to the same mapping)
*/
uint8_t ram_region;
/* simple numeric identifier
* (can be the same for multiple regions if the memory is
* actually splitted across different bases in the guest's memory
* but related to the same mapping)
*/
uint8_t ram_region;
/* base in the guest's physical address space */
uint64_t base;
/* base in the guest's physical address space */
uint64_t base;
/* size of this region */
uint64_t size;
/* size of this region */
uint64_t size;
/* mmap offset of this region (does not apply to the actual guest's memory) */
uint64_t offset;
/* mmap offset of this region (does not apply to the actual guest's memory) */
uint64_t offset;
/* pointer to the actual mmap region used by KVM */
void *host_region_ptr;
/* pointer to the actual mmap region used by KVM */
void* host_region_ptr;
/* pointer to the snapshot mmap + offset */
void *snapshot_region_ptr;
/* pointer to the snapshot mmap + offset */
void* snapshot_region_ptr;
/* pointer to the incremental CoW mmap + offset */
void *incremental_region_ptr;
/* pointer to the incremental CoW mmap + offset */
void* incremental_region_ptr;
char* idstr;
char *idstr;
} ram_region_t;
typedef struct shadow_memory_s{
/* snapshot memory backup */
void* snapshot_ptr;
typedef struct shadow_memory_s {
/* snapshot memory backup */
void *snapshot_ptr;
/* snapshot memory backup memfd */
int snapshot_ptr_fd;
/* snapshot memory backup memfd */
int snapshot_ptr_fd;
/* incremental memory backup */
void* incremental_ptr;
/* incremental memory backup */
void *incremental_ptr;
//fast_reload_tmp_snapshot_t tmp_snapshot;
// fast_reload_tmp_snapshot_t tmp_snapshot;
/* total memory size */
uint64_t memory_size;
/* total memory size */
uint64_t memory_size;
/* keep this */
ram_region_t ram_regions[10];
uint8_t ram_regions_num;
/* keep this */
ram_region_t ram_regions[10];
uint8_t ram_regions_num;
/* additional dirty stack to restore root snapshot */
uint64_t root_track_pages_num;
uint64_t root_track_pages_size;
uint64_t* root_track_pages_stack;
/* additional dirty stack to restore root snapshot */
uint64_t root_track_pages_num;
uint64_t root_track_pages_size;
uint64_t *root_track_pages_stack;
bool incremental_enabled;
}shadow_memory_t;
bool incremental_enabled;
} shadow_memory_t;
shadow_memory_t* shadow_memory_init(void);
shadow_memory_t* shadow_memory_init_from_snapshot(const char* snapshot_folder, bool pre_snapshot);
shadow_memory_t *shadow_memory_init(void);
shadow_memory_t *shadow_memory_init_from_snapshot(const char *snapshot_folder,
bool pre_snapshot);
void shadow_memory_prepare_incremental(shadow_memory_t* self);
void shadow_memory_switch_snapshot(shadow_memory_t* self, bool incremental);
void shadow_memory_prepare_incremental(shadow_memory_t *self);
void shadow_memory_switch_snapshot(shadow_memory_t *self, bool incremental);
void shadow_memory_restore_memory(shadow_memory_t* self);
void shadow_memory_restore_memory(shadow_memory_t *self);
static inline void shadow_memory_track_dirty_root_pages(shadow_memory_t* self, uint64_t address, uint8_t slot){
if(unlikely(self->root_track_pages_num >= self->root_track_pages_size)){
self->root_track_pages_size <<= 2;
self->root_track_pages_stack = realloc(self->root_track_pages_stack, self->root_track_pages_size*sizeof(uint64_t));
static inline void shadow_memory_track_dirty_root_pages(shadow_memory_t *self,
uint64_t address,
uint8_t slot)
{
if (unlikely(self->root_track_pages_num >= self->root_track_pages_size)) {
self->root_track_pages_size <<= 2;
self->root_track_pages_stack =
realloc(self->root_track_pages_stack,
self->root_track_pages_size * sizeof(uint64_t));
}
self->root_track_pages_stack[self->root_track_pages_num] = (address & 0xFFFFFFFFFFFFF000) | slot;
self->root_track_pages_stack[self->root_track_pages_num] =
(address & 0xFFFFFFFFFFFFF000) | slot;
self->root_track_pages_num++;
}
bool shadow_memory_is_root_page_tracked(shadow_memory_t* self, uint64_t address, uint8_t slot);
bool shadow_memory_is_root_page_tracked(shadow_memory_t *self,
uint64_t address,
uint8_t slot);
void shadow_memory_serialize(shadow_memory_t* self, const char* snapshot_folder);
void shadow_memory_serialize(shadow_memory_t *self, const char *snapshot_folder);
bool shadow_memory_read_physical_memory(shadow_memory_t* self, uint64_t address, void* ptr, size_t size);
bool shadow_memory_read_physical_memory(shadow_memory_t *self,
uint64_t address,
void *ptr,
size_t size);

View File

@ -3,140 +3,152 @@
#include "sysemu/cpus.h"
#include "nyx/state/snapshot_state.h"
#include "nyx/debug.h"
#include "nyx/state/state.h"
#include "nyx/memory_access.h"
#include <stdio.h>
#include "nyx/state/state.h"
#include <stdint.h>
#include <stdio.h>
void serialize_state(const char* filename_prefix, bool is_pre_snapshot){
nyx_trace();
void serialize_state(const char *filename_prefix, bool is_pre_snapshot)
{
nyx_trace();
char* tmp;
char *tmp;
assert(asprintf(&tmp, "%s/global.state", filename_prefix) != -1);
assert(asprintf(&tmp, "%s/global.state", filename_prefix) != -1);
FILE *fp = fopen(tmp, "wb");
if(fp == NULL) {
FILE *fp = fopen(tmp, "wb");
if (fp == NULL) {
nyx_error("[%s] Could not open file %s.\n", __func__, tmp);
assert(false);
}
serialized_state_header_t header = {0};
serialized_state_header_t header = { 0 };
header.magic = NYX_SERIALIZED_STATE_MAGIC;
header.magic = NYX_SERIALIZED_STATE_MAGIC;
header.version = NYX_SERIALIZED_STATE_VERSION;
if (is_pre_snapshot){
if (is_pre_snapshot) {
header.type = NYX_SERIALIZED_TYPE_PRE_SNAPSHOT;
fwrite(&header, sizeof(serialized_state_header_t), 1, fp);
}
else{
} else {
header.type = NYX_SERIALIZED_TYPE_ROOT_SNAPSHOT;
fwrite(&header, sizeof(serialized_state_header_t), 1, fp);
qemu_nyx_state_t* nyx_global_state = GET_GLOBAL_STATE();
serialized_state_root_snapshot_t root_snapshot = {0};
qemu_nyx_state_t *nyx_global_state = GET_GLOBAL_STATE();
serialized_state_root_snapshot_t root_snapshot = { 0 };
for (uint8_t i = 0; i < 4; i++){
root_snapshot.pt_ip_filter_configured[i] = nyx_global_state->pt_ip_filter_configured[i];
for (uint8_t i = 0; i < 4; i++) {
root_snapshot.pt_ip_filter_configured[i] =
nyx_global_state->pt_ip_filter_configured[i];
root_snapshot.pt_ip_filter_a[i] = nyx_global_state->pt_ip_filter_a[i];
root_snapshot.pt_ip_filter_b[i] = nyx_global_state->pt_ip_filter_b[i];
}
root_snapshot.parent_cr3 = nyx_global_state->parent_cr3;
root_snapshot.disassembler_word_width = nyx_global_state->disassembler_word_width;
root_snapshot.disassembler_word_width =
nyx_global_state->disassembler_word_width;
root_snapshot.fast_reload_pre_image = nyx_global_state->fast_reload_pre_image;
root_snapshot.mem_mode = nyx_global_state->mem_mode;
root_snapshot.pt_trace_mode =nyx_global_state->pt_trace_mode;
root_snapshot.mem_mode = nyx_global_state->mem_mode;
root_snapshot.pt_trace_mode = nyx_global_state->pt_trace_mode;
root_snapshot.input_buffer_vaddr = nyx_global_state->payload_buffer;
root_snapshot.protect_input_buffer = nyx_global_state->protect_payload_buffer;
root_snapshot.input_buffer_size = nyx_global_state->input_buffer_size;
root_snapshot.cap_timeout_detection = nyx_global_state->cap_timeout_detection;
root_snapshot.cap_only_reload_mode = nyx_global_state->cap_only_reload_mode;
root_snapshot.cap_compile_time_tracing = nyx_global_state->cap_compile_time_tracing;
root_snapshot.cap_compile_time_tracing =
nyx_global_state->cap_compile_time_tracing;
root_snapshot.cap_ijon_tracing = nyx_global_state->cap_ijon_tracing;
root_snapshot.cap_cr3 = nyx_global_state->cap_cr3;
root_snapshot.cap_compile_time_tracing_buffer_vaddr = nyx_global_state->cap_compile_time_tracing_buffer_vaddr;
root_snapshot.cap_ijon_tracing_buffer_vaddr = nyx_global_state->cap_ijon_tracing_buffer_vaddr;
root_snapshot.cap_coverage_bitmap_size = nyx_global_state->cap_coverage_bitmap_size;
root_snapshot.cap_cr3 = nyx_global_state->cap_cr3;
root_snapshot.cap_compile_time_tracing_buffer_vaddr =
nyx_global_state->cap_compile_time_tracing_buffer_vaddr;
root_snapshot.cap_ijon_tracing_buffer_vaddr =
nyx_global_state->cap_ijon_tracing_buffer_vaddr;
root_snapshot.cap_coverage_bitmap_size =
nyx_global_state->cap_coverage_bitmap_size;
fwrite(&root_snapshot, sizeof(serialized_state_root_snapshot_t), 1, fp);
}
fclose(fp);
free(tmp);
free(tmp);
}
void deserialize_state(const char* filename_prefix){
nyx_trace();
void deserialize_state(const char *filename_prefix)
{
nyx_trace();
char* tmp;
char *tmp;
assert(asprintf(&tmp, "%s/global.state", filename_prefix) != -1);
assert(asprintf(&tmp, "%s/global.state", filename_prefix) != -1);
FILE *fp = fopen(tmp, "rb");
if(fp == NULL) {
FILE *fp = fopen(tmp, "rb");
if (fp == NULL) {
nyx_debug("[%s] Could not open file %s.\n", __func__, tmp);
assert(false);
}
serialized_state_header_t header = {0};
serialized_state_header_t header = { 0 };
assert(fread(&header, sizeof(serialized_state_header_t), 1, fp) == 1);
assert(header.magic == NYX_SERIALIZED_STATE_MAGIC);
assert(header.version == NYX_SERIALIZED_STATE_VERSION);
if(header.type == NYX_SERIALIZED_TYPE_PRE_SNAPSHOT){
if (header.type == NYX_SERIALIZED_TYPE_PRE_SNAPSHOT) {
/* we're done here */
}
else if (header.type == NYX_SERIALIZED_TYPE_ROOT_SNAPSHOT){
qemu_nyx_state_t* nyx_global_state = GET_GLOBAL_STATE();
serialized_state_root_snapshot_t root_snapshot = {0};
assert(fread(&root_snapshot, sizeof(serialized_state_root_snapshot_t), 1, fp) == 1);
} else if (header.type == NYX_SERIALIZED_TYPE_ROOT_SNAPSHOT) {
qemu_nyx_state_t *nyx_global_state = GET_GLOBAL_STATE();
serialized_state_root_snapshot_t root_snapshot = { 0 };
assert(fread(&root_snapshot, sizeof(serialized_state_root_snapshot_t), 1,
fp) == 1);
for (uint8_t i = 0; i < 4; i++){
nyx_global_state->pt_ip_filter_configured[i] = root_snapshot.pt_ip_filter_configured[i];
for (uint8_t i = 0; i < 4; i++) {
nyx_global_state->pt_ip_filter_configured[i] =
root_snapshot.pt_ip_filter_configured[i];
nyx_global_state->pt_ip_filter_a[i] = root_snapshot.pt_ip_filter_a[i];
nyx_global_state->pt_ip_filter_b[i] = root_snapshot.pt_ip_filter_b[i];
}
nyx_global_state->parent_cr3 = root_snapshot.parent_cr3;
nyx_global_state->disassembler_word_width = root_snapshot.disassembler_word_width;
nyx_global_state->disassembler_word_width =
root_snapshot.disassembler_word_width;
nyx_global_state->fast_reload_pre_image = root_snapshot.fast_reload_pre_image;
nyx_global_state->mem_mode = root_snapshot.mem_mode;
nyx_global_state->pt_trace_mode =root_snapshot.pt_trace_mode;
nyx_global_state->mem_mode = root_snapshot.mem_mode;
nyx_global_state->pt_trace_mode = root_snapshot.pt_trace_mode;
nyx_global_state->payload_buffer = root_snapshot.input_buffer_vaddr;
nyx_global_state->protect_payload_buffer = root_snapshot.protect_input_buffer;
nyx_global_state->input_buffer_size = root_snapshot.input_buffer_size;
nyx_global_state->cap_timeout_detection = root_snapshot.cap_timeout_detection;
nyx_global_state->cap_only_reload_mode = root_snapshot.cap_only_reload_mode;
nyx_global_state->cap_compile_time_tracing = root_snapshot.cap_compile_time_tracing;
nyx_global_state->cap_compile_time_tracing =
root_snapshot.cap_compile_time_tracing;
nyx_global_state->cap_ijon_tracing = root_snapshot.cap_ijon_tracing;
nyx_global_state->cap_cr3 = root_snapshot.cap_cr3;
nyx_global_state->cap_compile_time_tracing_buffer_vaddr = root_snapshot.cap_compile_time_tracing_buffer_vaddr;
nyx_global_state->cap_ijon_tracing_buffer_vaddr = root_snapshot.cap_ijon_tracing_buffer_vaddr;
nyx_global_state->cap_coverage_bitmap_size = root_snapshot.cap_coverage_bitmap_size;
nyx_global_state->cap_cr3 = root_snapshot.cap_cr3;
nyx_global_state->cap_compile_time_tracing_buffer_vaddr =
root_snapshot.cap_compile_time_tracing_buffer_vaddr;
nyx_global_state->cap_ijon_tracing_buffer_vaddr =
root_snapshot.cap_ijon_tracing_buffer_vaddr;
nyx_global_state->cap_coverage_bitmap_size =
root_snapshot.cap_coverage_bitmap_size;
assert(apply_capabilities(qemu_get_cpu(0)));
remap_payload_buffer(nyx_global_state->payload_buffer, ((CPUState *)qemu_get_cpu(0)) );
remap_payload_buffer(nyx_global_state->payload_buffer,
((CPUState *)qemu_get_cpu(0)));
/* makes sure that we are allowed to enter the fuzzing loop */
nyx_global_state->get_host_config_done = true;
nyx_global_state->get_host_config_done = true;
nyx_global_state->set_agent_config_done = true;
}
else{
} else {
fprintf(stderr, "[QEMU-Nyx]: this feature is currently missing\n");
abort();
}
fclose(fp);
free(tmp);
free(tmp);
}

View File

@ -3,12 +3,12 @@
#include <stdbool.h>
#include <stdint.h>
#define NYX_SERIALIZED_STATE_MAGIC 0x58594E
#define NYX_SERIALIZED_STATE_VERSION 1
#define NYX_SERIALIZED_STATE_MAGIC 0x58594E
#define NYX_SERIALIZED_STATE_VERSION 1
#define NYX_SERIALIZED_TYPE_PRE_SNAPSHOT 0
#define NYX_SERIALIZED_TYPE_ROOT_SNAPSHOT 1
#define NYX_SERIALIZED_TYPE_NESTED_SNAPSHOT 2
#define NYX_SERIALIZED_TYPE_PRE_SNAPSHOT 0
#define NYX_SERIALIZED_TYPE_ROOT_SNAPSHOT 1
#define NYX_SERIALIZED_TYPE_NESTED_SNAPSHOT 2
typedef struct serialized_state_header_s {
uint32_t magic;
@ -17,24 +17,24 @@ typedef struct serialized_state_header_s {
} serialized_state_header_t;
typedef struct serialized_state_root_snapshot_s {
bool pt_ip_filter_configured[4];
bool pt_ip_filter_configured[4];
uint64_t pt_ip_filter_a[4];
uint64_t pt_ip_filter_b[4];
uint64_t parent_cr3;
uint8_t disassembler_word_width;
bool fast_reload_pre_image;
uint8_t mem_mode;
bool pt_trace_mode;
uint8_t disassembler_word_width;
bool fast_reload_pre_image;
uint8_t mem_mode;
bool pt_trace_mode;
uint64_t input_buffer_vaddr;
bool protect_input_buffer;
bool protect_input_buffer;
uint32_t input_buffer_size;
uint8_t cap_timeout_detection;
uint8_t cap_only_reload_mode;
uint8_t cap_compile_time_tracing;
uint8_t cap_ijon_tracing;
uint64_t cap_cr3;
uint8_t cap_timeout_detection;
uint8_t cap_only_reload_mode;
uint8_t cap_compile_time_tracing;
uint8_t cap_ijon_tracing;
uint64_t cap_cr3;
uint64_t cap_compile_time_tracing_buffer_vaddr;
uint64_t cap_ijon_tracing_buffer_vaddr;
uint64_t cap_coverage_bitmap_size;
@ -42,6 +42,5 @@ typedef struct serialized_state_root_snapshot_s {
} serialized_state_root_snapshot_t;
void serialize_state(const char* filename_prefix, bool is_pre_snapshot);
void deserialize_state(const char* filename_prefix);
void serialize_state(const char *filename_prefix, bool is_pre_snapshot);
void deserialize_state(const char *filename_prefix);

View File

@ -24,23 +24,24 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
#include <stdint.h>
#include <stdio.h>
#include "nyx/state/state.h"
#include "nyx/debug.h"
#include "nyx/memory_access.h"
#include "sysemu/kvm.h"
#include "nyx/auxiliary_buffer.h"
#include "nyx/sharedir.h"
#include "nyx/debug.h"
#include "nyx/fast_vm_reload_sync.h"
#include "nyx/helpers.h"
#include "nyx/memory_access.h"
#include "nyx/sharedir.h"
#include "nyx/state/state.h"
//#define STATE_VERBOSE
// #define STATE_VERBOSE
/* global singleton */
qemu_nyx_state_t global_state;
#define LIBXDC_RELEASE_VERSION_REQUIRED 2
void state_init_global(void){
void state_init_global(void)
{
#ifdef STATE_VERBOSE
fprintf(stderr, "--> %s <--\n", __func__);
#endif
@ -50,194 +51,213 @@ void state_init_global(void){
global_state.nyx_fdl = false;
global_state.workdir_path = NULL;
global_state.worker_id = 0xffff;
global_state.worker_id = 0xffff;
global_state.fast_reload_enabled = false;
global_state.fast_reload_mode = false;
global_state.fast_reload_path = NULL;
global_state.fast_reload_pre_path = NULL;
global_state.fast_reload_enabled = false;
global_state.fast_reload_mode = false;
global_state.fast_reload_path = NULL;
global_state.fast_reload_pre_path = NULL;
global_state.fast_reload_pre_image = false;
global_state.fast_reload_snapshot = fast_reload_new();
global_state.reload_state = init_fast_vm_reload_sync();
global_state.reload_state = init_fast_vm_reload_sync();
global_state.decoder = NULL;
global_state.page_cache = NULL;
global_state.redqueen_enable_pending = false;
global_state.redqueen_disable_pending = false;
global_state.redqueen_enable_pending = false;
global_state.redqueen_disable_pending = false;
global_state.redqueen_instrumentation_mode = 0;
global_state.redqueen_update_blacklist = false;
global_state.patches_enable_pending = false;
global_state.patches_disable_pending = false;
global_state.redqueen_state = NULL;
global_state.redqueen_update_blacklist = false;
global_state.patches_enable_pending = false;
global_state.patches_disable_pending = false;
global_state.redqueen_state = NULL;
for(uint8_t i = 0; i < INTEL_PT_MAX_RANGES; i++){
for (uint8_t i = 0; i < INTEL_PT_MAX_RANGES; i++) {
global_state.pt_ip_filter_configured[i] = false;
global_state.pt_ip_filter_enabled[i] = false;
global_state.pt_ip_filter_a[i] = 0x0;
global_state.pt_ip_filter_b[i] = 0x0;
global_state.pt_ip_filter_enabled[i] = false;
global_state.pt_ip_filter_a[i] = 0x0;
global_state.pt_ip_filter_b[i] = 0x0;
}
global_state.pt_c3_filter = 0;
global_state.parent_cr3 = 0;
global_state.disassembler_word_width = 64;
global_state.nested = false;
global_state.payload_buffer = 0;
global_state.nested_payload_pages = NULL;
global_state.parent_cr3 = 0;
global_state.disassembler_word_width = 64;
global_state.nested = false;
global_state.payload_buffer = 0;
global_state.nested_payload_pages = NULL;
global_state.nested_payload_pages_num = 0;
global_state.protect_payload_buffer = 0;
global_state.discard_tmp_snapshot = 0;
global_state.mem_mode = mm_unkown;
global_state.protect_payload_buffer = 0;
global_state.discard_tmp_snapshot = 0;
global_state.mem_mode = mm_unkown;
init_timeout_detector(&(global_state.timeout_detector));
global_state.in_fuzzing_mode = false;
global_state.in_reload_mode = true;
global_state.starved = false;
global_state.trace_mode = false;
global_state.in_fuzzing_mode = false;
global_state.in_reload_mode = true;
global_state.starved = false;
global_state.trace_mode = false;
global_state.shutdown_requested = false;
global_state.cow_cache_full = false;
global_state.cow_cache_full = false;
global_state.auxilary_buffer = NULL;
memset(&global_state.shadow_config, 0x0, sizeof(auxilary_buffer_config_t));
global_state.decoder_page_fault = false;
global_state.decoder_page_fault = false;
global_state.decoder_page_fault_addr = 0x0;
global_state.dump_page = false;
global_state.dump_page = false;
global_state.dump_page_addr = 0x0;
global_state.in_redqueen_reload_mode = false;
global_state.pt_trace_mode = true;
global_state.pt_trace_mode = true;
global_state.pt_trace_mode_force = false;
global_state.num_dirty_pages = 0;
global_state.get_host_config_done = false;
global_state.get_host_config_done = false;
global_state.set_agent_config_done = false;
global_state.sharedir = sharedir_new();
global_state.shared_bitmap_fd = 0;
global_state.shared_bitmap_size = 0;
global_state.shared_bitmap_fd = 0;
global_state.shared_bitmap_size = 0;
global_state.shared_bitmap_real_size = 0;
global_state.shared_bitmap_ptr = NULL;
global_state.shared_bitmap_ptr = NULL;
global_state.shared_payload_buffer_fd = 0;
global_state.shared_payload_buffer_fd = 0;
global_state.shared_payload_buffer_size = 0;
global_state.shared_ijon_bitmap_fd = 0;
global_state.shared_ijon_bitmap_fd = 0;
global_state.shared_ijon_bitmap_size = 0;
global_state.shared_ijon_bitmap_ptr = NULL;
global_state.shared_ijon_bitmap_ptr = NULL;
global_state.pt_trace_size = 0;
global_state.bb_coverage = 0;
global_state.bb_coverage = 0;
global_state.cap_timeout_detection = 0;
global_state.cap_only_reload_mode = 0;
global_state.cap_compile_time_tracing = 0;
global_state.cap_ijon_tracing = 0;
global_state.cap_cr3 = 0;
global_state.cap_timeout_detection = 0;
global_state.cap_only_reload_mode = 0;
global_state.cap_compile_time_tracing = 0;
global_state.cap_ijon_tracing = 0;
global_state.cap_cr3 = 0;
global_state.cap_compile_time_tracing_buffer_vaddr = 0;
global_state.cap_ijon_tracing_buffer_vaddr = 0;
global_state.cap_ijon_tracing_buffer_vaddr = 0;
QTAILQ_INIT(&global_state.redqueen_breakpoints);
}
fast_reload_t* get_fast_reload_snapshot(void){
fast_reload_t *get_fast_reload_snapshot(void)
{
return global_state.fast_reload_snapshot;
}
void set_fast_reload_mode(bool mode){
void set_fast_reload_mode(bool mode)
{
global_state.fast_reload_mode = mode;
}
void set_fast_reload_path(const char* path){
void set_fast_reload_path(const char *path)
{
assert(global_state.fast_reload_path == NULL);
global_state.fast_reload_path = malloc(strlen(path)+1);
global_state.fast_reload_path = malloc(strlen(path) + 1);
strcpy(global_state.fast_reload_path, path);
}
void set_fast_reload_pre_path(const char* path){
void set_fast_reload_pre_path(const char *path)
{
assert(global_state.fast_reload_pre_path == NULL);
global_state.fast_reload_pre_path = malloc(strlen(path)+1);
global_state.fast_reload_pre_path = malloc(strlen(path) + 1);
strcpy(global_state.fast_reload_pre_path, path);
}
void set_fast_reload_pre_image(void){
void set_fast_reload_pre_image(void)
{
assert(global_state.fast_reload_pre_path != NULL);
global_state.fast_reload_pre_image = true;
}
void enable_fast_reloads(void){
void enable_fast_reloads(void)
{
assert(global_state.fast_reload_path != NULL);
global_state.fast_reload_enabled = true;
}
void init_page_cache(char* path){
void init_page_cache(char *path)
{
assert(global_state.page_cache == NULL);
global_state.page_cache = page_cache_new((CPUState *)qemu_get_cpu(0), path);
}
page_cache_t* get_page_cache(void){
page_cache_t *get_page_cache(void)
{
assert(global_state.page_cache);
return global_state.page_cache;
}
void init_redqueen_state(void){
global_state.redqueen_state = new_rq_state((CPUState *)qemu_get_cpu(0), get_page_cache());
void init_redqueen_state(void)
{
global_state.redqueen_state =
new_rq_state((CPUState *)qemu_get_cpu(0), get_page_cache());
}
redqueen_t* get_redqueen_state(void){
redqueen_t *get_redqueen_state(void)
{
assert(global_state.redqueen_state != NULL);
return global_state.redqueen_state;
}
static void* alloc_auxiliary_buffer(const char* file){
void* ptr;
struct stat st;
int fd = open(file, O_CREAT|O_RDWR, S_IRWXU|S_IRWXG|S_IRWXO);
static void *alloc_auxiliary_buffer(const char *file)
{
void *ptr;
struct stat st;
int fd = open(file, O_CREAT | O_RDWR, S_IRWXU | S_IRWXG | S_IRWXO);
assert(ftruncate(fd, AUX_BUFFER_SIZE) == 0);
stat(file, &st);
nyx_debug_p(INTERFACE_PREFIX, "new aux buffer file: (max size: %x) %lx", AUX_BUFFER_SIZE, st.st_size);
assert(AUX_BUFFER_SIZE == st.st_size);
ptr = mmap(0, AUX_BUFFER_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (ptr == MAP_FAILED) {
fprintf(stderr, "aux buffer allocation failed!\n");
return (void*)-1;
}
return ptr;
assert(ftruncate(fd, AUX_BUFFER_SIZE) == 0);
stat(file, &st);
nyx_debug_p(INTERFACE_PREFIX, "new aux buffer file: (max size: %x) %lx",
AUX_BUFFER_SIZE, st.st_size);
assert(AUX_BUFFER_SIZE == st.st_size);
ptr = mmap(0, AUX_BUFFER_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (ptr == MAP_FAILED) {
fprintf(stderr, "aux buffer allocation failed!\n");
return (void *)-1;
}
return ptr;
}
void init_aux_buffer(const char* filename){
global_state.auxilary_buffer = (auxilary_buffer_t*)alloc_auxiliary_buffer(filename);
void init_aux_buffer(const char *filename)
{
global_state.auxilary_buffer =
(auxilary_buffer_t *)alloc_auxiliary_buffer(filename);
init_auxiliary_buffer(global_state.auxilary_buffer);
}
void set_payload_buffer(uint64_t payload_buffer){
void set_payload_buffer(uint64_t payload_buffer)
{
assert(global_state.payload_buffer == 0 && global_state.nested == false);
global_state.payload_buffer = payload_buffer;
global_state.nested = false;
global_state.nested = false;
}
void set_payload_pages(uint64_t* payload_pages, uint32_t pages){
assert(global_state.nested_payload_pages == NULL && global_state.nested_payload_pages_num == 0);
global_state.nested_payload_pages = (uint64_t*)malloc(sizeof(uint64_t)*pages);
void set_payload_pages(uint64_t *payload_pages, uint32_t pages)
{
assert(global_state.nested_payload_pages == NULL &&
global_state.nested_payload_pages_num == 0);
global_state.nested_payload_pages = (uint64_t *)malloc(sizeof(uint64_t) * pages);
global_state.nested_payload_pages_num = pages;
memcpy(global_state.nested_payload_pages, payload_pages, sizeof(uint64_t)*pages);
memcpy(global_state.nested_payload_pages, payload_pages, sizeof(uint64_t) * pages);
global_state.nested = true;
}
void set_workdir_path(char* workdir){
void set_workdir_path(char *workdir)
{
assert(workdir && !global_state.workdir_path);
assert(asprintf(&global_state.workdir_path, "%s", workdir) != -1);
assert(asprintf(&global_state.workdir_path, "%s", workdir) != -1);
}

View File

@ -21,109 +21,109 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
#pragma once
#include "nyx/auxiliary_buffer.h"
#include "nyx/fast_vm_reload.h"
#include "nyx/fast_vm_reload_sync.h"
#include "nyx/page_cache.h"
#include "nyx/redqueen.h"
#include "nyx/redqueen_patch.h"
#include "nyx/fast_vm_reload.h"
#include "nyx/page_cache.h"
#include "nyx/synchronization.h"
#include "nyx/auxiliary_buffer.h"
#include "nyx/sharedir.h"
#include "nyx/fast_vm_reload_sync.h"
#include "nyx/synchronization.h"
#include "nyx/types.h"
#include <libxdc.h>
#define INTEL_PT_MAX_RANGES 4
typedef struct qemu_nyx_state_s{
#define INTEL_PT_MAX_RANGES 4
typedef struct qemu_nyx_state_s {
/* set if FDL backend is used (required to perform some additional runtime tests) */
bool nyx_fdl;
char* workdir_path;
char *workdir_path;
uint32_t worker_id;
/* FAST VM RELOAD */
bool fast_reload_enabled;
bool fast_reload_mode;
char* fast_reload_path;
char* fast_reload_pre_path;
bool fast_reload_pre_image;
fast_reload_t* fast_reload_snapshot;
fast_vm_reload_sync_t* reload_state;
bool fast_reload_enabled;
bool fast_reload_mode;
char *fast_reload_path;
char *fast_reload_pre_path;
bool fast_reload_pre_image;
fast_reload_t *fast_reload_snapshot;
fast_vm_reload_sync_t *reload_state;
/* PAGE CACHE */
page_cache_t* page_cache;
page_cache_t *page_cache;
/* Decoder */
libxdc_t* decoder;
libxdc_t *decoder;
/* REDQUEEN */
bool redqueen_enable_pending;
bool redqueen_disable_pending;
int redqueen_instrumentation_mode;
bool redqueen_update_blacklist;
bool patches_enable_pending;
bool patches_disable_pending;
redqueen_t* redqueen_state;
bool redqueen_enable_pending;
bool redqueen_disable_pending;
int redqueen_instrumentation_mode;
bool redqueen_update_blacklist;
bool patches_enable_pending;
bool patches_disable_pending;
redqueen_t *redqueen_state;
/* Intel PT Options (not migratable) */
uint64_t pt_c3_filter;
uint64_t pt_c3_filter;
volatile bool pt_ip_filter_enabled[4];
bool pt_trace_mode; // enabled by default; disabled if compile-time tracing is implemented by agent
/* disabled by default; enable to force usage of PT tracing
* (useful for targets that use compile-time tracing and redqueen at the same time (which obviously relies on PT traces))
* This mode is usually enabled by the fuzzing logic by enabling trace mode.
* *** THIS FEATURES IS STILL EXPERIMENTAL ***
/* disabled by default; enable to force usage of PT tracing
* (useful for targets that use compile-time tracing and redqueen at the same
* time (which obviously relies on PT traces)) This mode is usually enabled by
* the fuzzing logic by enabling trace mode.
* *** THIS FEATURES IS STILL EXPERIMENTAL ***
* */
bool pt_trace_mode_force;
bool pt_trace_mode_force;
uint32_t pt_trace_size; // trace size counter
uint32_t bb_coverage; // trace size counter
uint32_t pt_trace_size; // trace size counter
uint32_t bb_coverage; // trace size counter
/* mmap Options (not migratable) */
int shared_bitmap_fd;
uint32_t shared_bitmap_size; /* size of the shared memory file */
uint32_t shared_bitmap_real_size; /* actual size of the bitmap */
void* shared_bitmap_ptr;
int shared_bitmap_fd;
uint32_t shared_bitmap_size; /* size of the shared memory file */
uint32_t shared_bitmap_real_size; /* actual size of the bitmap */
void *shared_bitmap_ptr;
int shared_payload_buffer_fd;
int shared_payload_buffer_fd;
uint32_t shared_payload_buffer_size;
int shared_ijon_bitmap_fd;
int shared_ijon_bitmap_fd;
uint32_t shared_ijon_bitmap_size;
void* shared_ijon_bitmap_ptr;
void *shared_ijon_bitmap_ptr;
/* Intel PT Options (migratable) */
bool pt_ip_filter_configured[4];
bool pt_ip_filter_configured[4];
uint64_t pt_ip_filter_a[4];
uint64_t pt_ip_filter_b[4];
/* OPTIONS (MIGRATABLE VIA FAST SNAPSHOTS) */
uint64_t parent_cr3;
uint8_t disassembler_word_width;
bool nested;
uint64_t payload_buffer;
uint32_t nested_payload_pages_num;
uint64_t* nested_payload_pages;
bool protect_payload_buffer;
bool discard_tmp_snapshot;
mem_mode_t mem_mode;
uint32_t input_buffer_size;
uint64_t parent_cr3;
uint8_t disassembler_word_width;
bool nested;
uint64_t payload_buffer;
uint32_t nested_payload_pages_num;
uint64_t *nested_payload_pages;
bool protect_payload_buffer;
bool discard_tmp_snapshot;
mem_mode_t mem_mode;
uint32_t input_buffer_size;
/* NON MIGRATABLE OPTION */
timeout_detector_t timeout_detector;
bool decoder_page_fault;
bool decoder_page_fault;
uint64_t decoder_page_fault_addr;
bool dump_page;
bool dump_page;
uint64_t dump_page_addr;
bool in_fuzzing_mode;
bool in_reload_mode;
bool in_reload_mode;
bool starved;
bool trace_mode;
@ -138,18 +138,18 @@ typedef struct qemu_nyx_state_s{
bool set_agent_config_done;
/* capabilites */
uint8_t cap_timeout_detection;
uint8_t cap_only_reload_mode;
uint8_t cap_compile_time_tracing;
uint8_t cap_ijon_tracing;
uint64_t cap_cr3;
uint8_t cap_timeout_detection;
uint8_t cap_only_reload_mode;
uint8_t cap_compile_time_tracing;
uint8_t cap_ijon_tracing;
uint64_t cap_cr3;
uint64_t cap_compile_time_tracing_buffer_vaddr;
uint64_t cap_ijon_tracing_buffer_vaddr;
uint64_t cap_coverage_bitmap_size;
auxilary_buffer_t* auxilary_buffer;
auxilary_buffer_t *auxilary_buffer;
auxilary_buffer_config_t shadow_config;
sharedir_t* sharedir;
sharedir_t *sharedir;
QTAILQ_HEAD(, kvm_sw_breakpoint) redqueen_breakpoints;
} qemu_nyx_state_t;
@ -158,27 +158,27 @@ extern qemu_nyx_state_t global_state;
#define GET_GLOBAL_STATE() (&global_state)
void state_init_global(void);
fast_reload_t* get_fast_reload_snapshot(void);
void set_fast_reload_mode(bool mode);
void set_fast_reload_path(const char* path);
void set_fast_reload_pre_image(void);
void state_init_global(void);
fast_reload_t *get_fast_reload_snapshot(void);
void set_fast_reload_mode(bool mode);
void set_fast_reload_path(const char *path);
void set_fast_reload_pre_image(void);
void enable_fast_reloads(void);
/* Page Cache */
void init_page_cache(char* path);
page_cache_t* get_page_cache(void);
void init_page_cache(char *path);
page_cache_t *get_page_cache(void);
void init_redqueen_state(void);
redqueen_t* get_redqueen_state(void);
redqueen_t *get_redqueen_state(void);
void init_aux_buffer(const char* filename);
void set_fast_reload_pre_path(const char* path);
void init_aux_buffer(const char *filename);
void set_fast_reload_pre_path(const char *path);
void set_payload_buffer(uint64_t payload_buffer);
void set_payload_pages(uint64_t* payload_pages, uint32_t pages);
void set_payload_pages(uint64_t *payload_pages, uint32_t pages);
void set_workdir_path(char* workdir);
void set_workdir_path(char *workdir);

View File

@ -1,409 +1,442 @@
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "sysemu/kvm.h"
#include "sysemu/sysemu.h"
#include "qemu-common.h"
#include "nyx/synchronization.h"
#include "nyx/debug.h"
#include "nyx/fast_vm_reload.h"
#include "nyx/file_helper.h"
#include "nyx/helpers.h"
#include "nyx/hypercall/hypercall.h"
#include "nyx/interface.h"
#include "nyx/fast_vm_reload.h"
#include "qemu-common.h"
#include "qemu/osdep.h"
#include "sysemu/sysemu.h"
#include "sysemu/kvm.h"
#include "nyx/debug.h"
#include "nyx/state/state.h"
#include <sys/syscall.h>
#include <linux/kvm.h>
#include "qemu/main-loop.h"
#include "nyx/helpers.h"
#include "nyx/file_helper.h"
#include <sys/syscall.h>
#include "pt.h"
pthread_mutex_t synchronization_lock_mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t synchronization_lock_condition = PTHREAD_COND_INITIALIZER;
pthread_mutex_t synchronization_lock_mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t synchronization_lock_condition = PTHREAD_COND_INITIALIZER;
pthread_mutex_t synchronization_disable_pt_mutex = PTHREAD_MUTEX_INITIALIZER;
volatile bool synchronization_reload_pending = false;
volatile bool synchronization_reload_pending = false;
volatile bool synchronization_kvm_loop_waiting = false;
/* SIGALRM based timeout detection */
//#define DEBUG_TIMEOUT_DETECTOR
// #define DEBUG_TIMEOUT_DETECTOR
void init_timeout_detector(timeout_detector_t* timer){
timer->kvm_tid = 0;
timer->detection_enabled = false;
void init_timeout_detector(timeout_detector_t *timer)
{
timer->kvm_tid = 0;
timer->detection_enabled = false;
timer->config.tv_sec = 0;
timer->config.tv_usec = 0;
timer->alarm.it_interval.tv_sec = 0;
timer->alarm.it_interval.tv_usec = 0;
timer->alarm.it_value.tv_sec = 0;
timer->alarm.it_value.tv_usec = 0;
timer->config.tv_sec = 0;
timer->config.tv_usec = 0;
timer->alarm.it_interval.tv_sec = 0;
timer->alarm.it_interval.tv_usec = 0;
timer->alarm.it_value.tv_sec = 0;
timer->alarm.it_value.tv_usec = 0;
}
static void sigalarm_handler(int signum) {
/* ensure that SIGALARM is ALWAYS handled by kvm thread */
assert(GET_GLOBAL_STATE()->timeout_detector.kvm_tid == syscall(SYS_gettid));
static void sigalarm_handler(int signum)
{
/* ensure that SIGALARM is ALWAYS handled by kvm thread */
assert(GET_GLOBAL_STATE()->timeout_detector.kvm_tid == syscall(SYS_gettid));
#ifdef DEBUG_TIMEOUT_DETECTOR
fprintf(stderr, "Handled! %d %ld\n", signum, syscall(SYS_gettid));
fprintf(stderr, "Handled! %d %ld\n", signum, syscall(SYS_gettid));
#endif
}
void install_timeout_detector(timeout_detector_t* timer){
timer->kvm_tid = syscall(SYS_gettid);
if (signal(SIGALRM, sigalarm_handler) == SIG_ERR) {
fprintf(stderr, "%s failed!\n", __func__);
assert(false);
}
void install_timeout_detector(timeout_detector_t *timer)
{
timer->kvm_tid = syscall(SYS_gettid);
if (signal(SIGALRM, sigalarm_handler) == SIG_ERR) {
fprintf(stderr, "%s failed!\n", __func__);
assert(false);
}
#ifdef DEBUG_TIMEOUT_DETECTOR
fprintf(stderr, "SIGALRM HANDLER INSTALLED! tid=%ld\n", syscall(SYS_gettid));
fprintf(stderr, "SIGALRM HANDLER INSTALLED! tid=%ld\n", syscall(SYS_gettid));
#endif
}
void reset_timeout_detector(timeout_detector_t* timer){
#ifdef DEBUG_TIMEOUT_DETECTOR
fprintf(stderr, "%s!\n", __func__);
#endif
if (timer->config.tv_sec || timer->config.tv_usec) {
timer->alarm.it_value.tv_sec = timer->config.tv_sec;
timer->alarm.it_value.tv_usec = timer->config.tv_usec;
timer->detection_enabled = true;
} else {
timer->detection_enabled = false;
}
}
void update_itimer(timeout_detector_t* timer, uint8_t sec, uint32_t usec)
void reset_timeout_detector(timeout_detector_t *timer)
{
#ifdef DEBUG_TIMEOUT_DETECTOR
//fprintf(stderr, "%s: %x %x\n", __func__, sec, usec);
fprintf(stderr, "%s!\n", __func__);
#endif
if (sec || usec) {
timer->config.tv_sec = (time_t)sec;
timer->config.tv_usec = (suseconds_t)usec;
timer->detection_enabled = true;
} else {
timer->detection_enabled = false;
}
if (timer->config.tv_sec || timer->config.tv_usec) {
timer->alarm.it_value.tv_sec = timer->config.tv_sec;
timer->alarm.it_value.tv_usec = timer->config.tv_usec;
timer->detection_enabled = true;
} else {
timer->detection_enabled = false;
}
}
void arm_sigprof_timer(timeout_detector_t* timer){
void update_itimer(timeout_detector_t *timer, uint8_t sec, uint32_t usec)
{
#ifdef DEBUG_TIMEOUT_DETECTOR
fprintf(stderr, "%s (%ld %ld)\n", __func__, timer->alarm.it_value.tv_sec, timer->alarm.it_value.tv_usec);
// fprintf(stderr, "%s: %x %x\n", __func__, sec, usec);
#endif
if (timer->detection_enabled) {
if (timer->alarm.it_value.tv_usec == 0 && timer->alarm.it_value.tv_sec == 0) {
fprintf(stderr, "Attempting to re-arm an expired timer! => reset(%ld.%ld)\n",
timer->config.tv_sec, timer->config.tv_usec);
reset_timeout_detector(timer);
}
assert(setitimer(ITIMER_REAL, &timer->alarm, NULL) == 0);
}
if (sec || usec) {
timer->config.tv_sec = (time_t)sec;
timer->config.tv_usec = (suseconds_t)usec;
timer->detection_enabled = true;
} else {
timer->detection_enabled = false;
}
}
bool disarm_sigprof_timer(timeout_detector_t* timer){
void arm_sigprof_timer(timeout_detector_t *timer)
{
#ifdef DEBUG_TIMEOUT_DETECTOR
fprintf(stderr, "%s (%ld %ld)\n", __func__, timer->alarm.it_value.tv_sec, timer->alarm.it_value.tv_usec);
fprintf(stderr, "%s (%ld %ld)\n", __func__, timer->alarm.it_value.tv_sec,
timer->alarm.it_value.tv_usec);
#endif
if (timer->detection_enabled) {
struct itimerval disable = {0};
assert(setitimer(ITIMER_REAL, &disable, &timer->alarm) == 0);
assert(timer->alarm.it_interval.tv_usec == 0);
if (timer->alarm.it_value.tv_usec == 0 && timer->alarm.it_value.tv_sec == 0) {
reset_timeout_detector(timer);
return true;
}
}
return false;
if (timer->detection_enabled) {
if (timer->alarm.it_value.tv_usec == 0 && timer->alarm.it_value.tv_sec == 0) {
fprintf(stderr,
"Attempting to re-arm an expired timer! => reset(%ld.%ld)\n",
timer->config.tv_sec, timer->config.tv_usec);
reset_timeout_detector(timer);
}
assert(setitimer(ITIMER_REAL, &timer->alarm, NULL) == 0);
}
}
void block_signals(void){
sigset_t set;
bool disarm_sigprof_timer(timeout_detector_t *timer)
{
#ifdef DEBUG_TIMEOUT_DETECTOR
fprintf(stderr, "%s (%ld %ld)\n", __func__, timer->alarm.it_value.tv_sec,
timer->alarm.it_value.tv_usec);
#endif
sigemptyset(&set);
sigaddset(&set, SIGALRM);
sigaddset(&set, SIGABRT);
sigaddset(&set, SIGSEGV);
pthread_sigmask(SIG_BLOCK, &set, NULL);
//fprintf(stderr, "%s!\n", __func__);
if (timer->detection_enabled) {
struct itimerval disable = { 0 };
assert(setitimer(ITIMER_REAL, &disable, &timer->alarm) == 0);
assert(timer->alarm.it_interval.tv_usec == 0);
if (timer->alarm.it_value.tv_usec == 0 && timer->alarm.it_value.tv_sec == 0) {
reset_timeout_detector(timer);
return true;
}
}
return false;
}
void unblock_signals(void){
sigset_t set;
void block_signals(void)
{
sigset_t set;
sigemptyset(&set);
sigaddset(&set, SIGABRT);
sigaddset(&set, SIGSEGV);
sigaddset(&set, SIGALRM);
sigprocmask(SIG_UNBLOCK, &set, NULL);
sigemptyset(&set);
sigaddset(&set, SIGALRM);
sigaddset(&set, SIGABRT);
sigaddset(&set, SIGSEGV);
pthread_sigmask(SIG_BLOCK, &set, NULL);
// fprintf(stderr, "%s!\n", __func__);
}
void unblock_signals(void)
{
sigset_t set;
sigemptyset(&set);
sigaddset(&set, SIGABRT);
sigaddset(&set, SIGSEGV);
sigaddset(&set, SIGALRM);
sigprocmask(SIG_UNBLOCK, &set, NULL);
}
/* -------------------- */
static inline void handle_tmp_snapshot_state(void){
if(GET_GLOBAL_STATE()->discard_tmp_snapshot){
if(fast_reload_tmp_created(get_fast_reload_snapshot())){
qemu_mutex_lock_iothread();
fast_reload_discard_tmp_snapshot(get_fast_reload_snapshot()); /* bye bye */
qemu_mutex_unlock_iothread();
//fprintf(stderr, "======= SNAPSHOT REMOVED! =======\n");
}
GET_GLOBAL_STATE()->discard_tmp_snapshot = false;
set_tmp_snapshot_created(GET_GLOBAL_STATE()->auxilary_buffer, 0);
}
static inline void handle_tmp_snapshot_state(void)
{
if (GET_GLOBAL_STATE()->discard_tmp_snapshot) {
if (fast_reload_tmp_created(get_fast_reload_snapshot())) {
qemu_mutex_lock_iothread();
fast_reload_discard_tmp_snapshot(get_fast_reload_snapshot()); /* bye bye */
qemu_mutex_unlock_iothread();
// fprintf(stderr, "======= SNAPSHOT REMOVED! =======\n");
}
GET_GLOBAL_STATE()->discard_tmp_snapshot = false;
set_tmp_snapshot_created(GET_GLOBAL_STATE()->auxilary_buffer, 0);
}
}
static inline bool synchronization_check_page_not_found(void){
bool failure = false;
static inline bool synchronization_check_page_not_found(void)
{
bool failure = false;
/* a page is missing in the current execution */
if(GET_GLOBAL_STATE()->decoder_page_fault){
set_page_not_found_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, GET_GLOBAL_STATE()->decoder_page_fault_addr);
GET_GLOBAL_STATE()->decoder_page_fault = false;
GET_GLOBAL_STATE()->decoder_page_fault_addr = 0;
failure = true;
}
/* a page is missing in the current execution */
if (GET_GLOBAL_STATE()->decoder_page_fault) {
set_page_not_found_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer,
GET_GLOBAL_STATE()->decoder_page_fault_addr);
GET_GLOBAL_STATE()->decoder_page_fault = false;
GET_GLOBAL_STATE()->decoder_page_fault_addr = 0;
failure = true;
}
/* page was dumped during this execution */
if(GET_GLOBAL_STATE()->dump_page){
kvm_remove_all_breakpoints(qemu_get_cpu(0));
kvm_vcpu_ioctl(qemu_get_cpu(0), KVM_VMX_PT_DISABLE_PAGE_DUMP_CR3);
kvm_vcpu_ioctl(qemu_get_cpu(0), KVM_VMX_PT_DISABLE_MTF);
reset_page_not_found_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer);
failure = true;
}
/* page was dumped during this execution */
if (GET_GLOBAL_STATE()->dump_page) {
kvm_remove_all_breakpoints(qemu_get_cpu(0));
kvm_vcpu_ioctl(qemu_get_cpu(0), KVM_VMX_PT_DISABLE_PAGE_DUMP_CR3);
kvm_vcpu_ioctl(qemu_get_cpu(0), KVM_VMX_PT_DISABLE_MTF);
reset_page_not_found_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer);
failure = true;
}
return failure;
return failure;
}
void synchronization_unlock(void){
//fprintf(stderr, "%s\n", __func__);
void synchronization_unlock(void)
{
// fprintf(stderr, "%s\n", __func__);
pthread_mutex_lock(&synchronization_lock_mutex);
pthread_cond_signal(&synchronization_lock_condition);
pthread_mutex_unlock(&synchronization_lock_mutex);
pthread_mutex_lock(&synchronization_lock_mutex);
pthread_cond_signal(&synchronization_lock_condition);
pthread_mutex_unlock(&synchronization_lock_mutex);
}
uint64_t run_counter = 0;
bool in_fuzzing_loop = false;
uint64_t run_counter = 0;
bool in_fuzzing_loop = false;
void synchronization_lock_hprintf(void){
pthread_mutex_lock(&synchronization_lock_mutex);
interface_send_char(NYX_INTERFACE_PING);
void synchronization_lock_hprintf(void)
{
pthread_mutex_lock(&synchronization_lock_mutex);
interface_send_char(NYX_INTERFACE_PING);
pthread_cond_wait(&synchronization_lock_condition, &synchronization_lock_mutex);
pthread_mutex_unlock(&synchronization_lock_mutex);
pthread_cond_wait(&synchronization_lock_condition, &synchronization_lock_mutex);
pthread_mutex_unlock(&synchronization_lock_mutex);
}
void synchronization_lock(void){
void synchronization_lock(void)
{
timeout_detector_t timer = GET_GLOBAL_STATE()->timeout_detector;
pthread_mutex_lock(&synchronization_lock_mutex);
run_counter++;
timeout_detector_t timer = GET_GLOBAL_STATE()->timeout_detector;
pthread_mutex_lock(&synchronization_lock_mutex);
run_counter++;
if (qemu_get_cpu(0)->intel_pt_run_trashed) {
set_pt_overflow_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer);
}
if(qemu_get_cpu(0)->intel_pt_run_trashed){
set_pt_overflow_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer);
}
long runtime_sec = timer.config.tv_sec - timer.alarm.it_value.tv_sec;
long runtime_usec = timer.config.tv_usec - timer.alarm.it_value.tv_usec;
long runtime_sec = timer.config.tv_sec - timer.alarm.it_value.tv_sec;
long runtime_usec = timer.config.tv_usec - timer.alarm.it_value.tv_usec;
if (runtime_usec < 0) {
if (runtime_sec < 1) {
fprintf(stderr, "Error: negative payload runtime?!\n");
}
runtime_sec -= 1;
runtime_usec = timer.config.tv_usec - timer.alarm.it_value.tv_usec + 1000000;
}
set_exec_done_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer,
runtime_sec, runtime_usec,
GET_GLOBAL_STATE()->num_dirty_pages);
if (runtime_usec < 0) {
if (runtime_sec < 1) {
fprintf(stderr, "Error: negative payload runtime?!\n");
}
runtime_sec -= 1;
runtime_usec = timer.config.tv_usec - timer.alarm.it_value.tv_usec + 1000000;
}
set_exec_done_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer,
runtime_sec, runtime_usec,
GET_GLOBAL_STATE()->num_dirty_pages);
if (synchronization_check_page_not_found()) {
set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 0);
}
if(synchronization_check_page_not_found()){
set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 0);
}
if (GET_GLOBAL_STATE()->dump_page) {
GET_GLOBAL_STATE()->dump_page = false;
GET_GLOBAL_STATE()->dump_page_addr = 0x0;
kvm_remove_all_breakpoints(qemu_get_cpu(0));
kvm_vcpu_ioctl(qemu_get_cpu(0), KVM_VMX_PT_DISABLE_PAGE_DUMP_CR3);
}
if(GET_GLOBAL_STATE()->dump_page){
GET_GLOBAL_STATE()->dump_page = false;
GET_GLOBAL_STATE()->dump_page_addr = 0x0;
kvm_remove_all_breakpoints(qemu_get_cpu(0));
kvm_vcpu_ioctl(qemu_get_cpu(0), KVM_VMX_PT_DISABLE_PAGE_DUMP_CR3);
}
if (unlikely(GET_GLOBAL_STATE()->in_redqueen_reload_mode)) {
fsync_redqueen_files();
}
if(unlikely(GET_GLOBAL_STATE()->in_redqueen_reload_mode)) {
fsync_redqueen_files();
}
if (unlikely(GET_GLOBAL_STATE()->trace_mode)) {
redqueen_trace_flush();
}
if (unlikely(GET_GLOBAL_STATE()->trace_mode)) {
redqueen_trace_flush();
}
interface_send_char(NYX_INTERFACE_PING);
interface_send_char(NYX_INTERFACE_PING);
pthread_cond_wait(&synchronization_lock_condition, &synchronization_lock_mutex);
pthread_mutex_unlock(&synchronization_lock_mutex);
pthread_cond_wait(&synchronization_lock_condition, &synchronization_lock_mutex);
pthread_mutex_unlock(&synchronization_lock_mutex);
check_auxiliary_config_buffer(GET_GLOBAL_STATE()->auxilary_buffer,
&GET_GLOBAL_STATE()->shadow_config);
check_auxiliary_config_buffer(GET_GLOBAL_STATE()->auxilary_buffer, &GET_GLOBAL_STATE()->shadow_config);
if (GET_GLOBAL_STATE()->starved == true)
set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 2);
else
set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 1);
if (GET_GLOBAL_STATE()->starved == true)
set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 2);
else
set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 1);
GET_GLOBAL_STATE()->pt_trace_size = 0;
GET_GLOBAL_STATE()->pt_trace_size = 0;
}
static void perform_reload(void){
if(fast_reload_root_created(get_fast_reload_snapshot())){
qemu_mutex_lock_iothread();
fast_reload_restore(get_fast_reload_snapshot());
qemu_mutex_unlock_iothread();
set_reload_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer);
set_result_dirty_pages(GET_GLOBAL_STATE()->auxilary_buffer, get_dirty_page_num(get_fast_reload_snapshot()));
}
else{
fprintf(stderr, "WARNING: Root snapshot is not available yet!\n");
}
static void perform_reload(void)
{
if (fast_reload_root_created(get_fast_reload_snapshot())) {
qemu_mutex_lock_iothread();
fast_reload_restore(get_fast_reload_snapshot());
qemu_mutex_unlock_iothread();
set_reload_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer);
set_result_dirty_pages(GET_GLOBAL_STATE()->auxilary_buffer,
get_dirty_page_num(get_fast_reload_snapshot()));
} else {
fprintf(stderr, "WARNING: Root snapshot is not available yet!\n");
}
}
void synchronization_lock_crash_found(void){
if(!in_fuzzing_loop && GET_GLOBAL_STATE()->in_fuzzing_mode){
fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP] at %lx\n", getpid(), run_counter, __func__, get_rip(qemu_get_cpu(0)));
//abort();
}
void synchronization_lock_crash_found(void)
{
if (!in_fuzzing_loop && GET_GLOBAL_STATE()->in_fuzzing_mode) {
fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP] at %lx\n", getpid(),
run_counter, __func__, get_rip(qemu_get_cpu(0)));
// abort();
}
pt_disable(qemu_get_cpu(0), false);
pt_disable(qemu_get_cpu(0), false);
handle_tmp_snapshot_state();
handle_tmp_snapshot_state();
set_crash_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer);
perform_reload();
set_crash_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer);
in_fuzzing_loop = false;
perform_reload();
in_fuzzing_loop = false;
}
void synchronization_lock_asan_found(void){
if(!in_fuzzing_loop){
fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(), run_counter, __func__);
set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 0);
}
void synchronization_lock_asan_found(void)
{
if (!in_fuzzing_loop) {
fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(),
run_counter, __func__);
set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 0);
}
pt_disable(qemu_get_cpu(0), false);
pt_disable(qemu_get_cpu(0), false);
handle_tmp_snapshot_state();
handle_tmp_snapshot_state();
set_asan_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer);
perform_reload();
set_asan_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer);
in_fuzzing_loop = false;
perform_reload();
in_fuzzing_loop = false;
}
void synchronization_lock_timeout_found(void){
//fprintf(stderr, "<%d>\t%s\n", getpid(), __func__);
void synchronization_lock_timeout_found(void)
{
// fprintf(stderr, "<%d>\t%s\n", getpid(), __func__);
if(!in_fuzzing_loop){
//fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(), run_counter, __func__);
set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 0);
}
if (!in_fuzzing_loop) {
// fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(), run_counter, __func__);
set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 0);
}
pt_disable(qemu_get_cpu(0), false);
pt_disable(qemu_get_cpu(0), false);
handle_tmp_snapshot_state();
handle_tmp_snapshot_state();
set_timeout_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer);
set_timeout_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer);
perform_reload();
perform_reload();
in_fuzzing_loop = false;
in_fuzzing_loop = false;
}
void synchronization_lock_shutdown_detected(void){
if(!in_fuzzing_loop){
fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(), run_counter, __func__);
set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 0);
}
void synchronization_lock_shutdown_detected(void)
{
if (!in_fuzzing_loop) {
fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(),
run_counter, __func__);
set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 0);
}
pt_disable(qemu_get_cpu(0), false);
pt_disable(qemu_get_cpu(0), false);
handle_tmp_snapshot_state();
handle_tmp_snapshot_state();
perform_reload();
perform_reload();
in_fuzzing_loop = false;
in_fuzzing_loop = false;
}
void synchronization_payload_buffer_write_detected(void){
static char reason[1024];
void synchronization_payload_buffer_write_detected(void)
{
static char reason[1024];
if(!in_fuzzing_loop){
fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(), run_counter, __func__);
}
pt_disable(qemu_get_cpu(0), false);
if (!in_fuzzing_loop) {
fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(),
run_counter, __func__);
}
handle_tmp_snapshot_state();
pt_disable(qemu_get_cpu(0), false);
int bytes = snprintf(reason, 1024, "Payload buffer write attempt at RIP: %lx\n", get_rip(qemu_get_cpu(0)));
set_payload_buffer_write_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, reason, bytes);
set_reload_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer);
handle_tmp_snapshot_state();
perform_reload();
int bytes = snprintf(reason, 1024, "Payload buffer write attempt at RIP: %lx\n",
get_rip(qemu_get_cpu(0)));
set_payload_buffer_write_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer,
reason, bytes);
set_reload_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer);
in_fuzzing_loop = false;
perform_reload();
in_fuzzing_loop = false;
}
void synchronization_cow_full_detected(void){
if(!in_fuzzing_loop){
fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(), run_counter, __func__);
}
void synchronization_cow_full_detected(void)
{
if (!in_fuzzing_loop) {
fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(),
run_counter, __func__);
}
pt_disable(qemu_get_cpu(0), false);
pt_disable(qemu_get_cpu(0), false);
handle_tmp_snapshot_state();
handle_tmp_snapshot_state();
perform_reload();
perform_reload();
in_fuzzing_loop = false;
in_fuzzing_loop = false;
}
void synchronization_disable_pt(CPUState *cpu){
// nyx_trace();
if(!in_fuzzing_loop){
//fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(), run_counter, __func__);
set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 0);
}
void synchronization_disable_pt(CPUState *cpu)
{
// nyx_trace();
if (!in_fuzzing_loop) {
// fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(), run_counter, __func__);
set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 0);
}
pt_disable(qemu_get_cpu(0), false);
pt_disable(qemu_get_cpu(0), false);
handle_tmp_snapshot_state();
handle_tmp_snapshot_state();
if(GET_GLOBAL_STATE()->in_reload_mode || GET_GLOBAL_STATE()->in_redqueen_reload_mode || GET_GLOBAL_STATE()->dump_page || fast_reload_tmp_created(get_fast_reload_snapshot())){
perform_reload();
}
if (GET_GLOBAL_STATE()->in_reload_mode ||
GET_GLOBAL_STATE()->in_redqueen_reload_mode || GET_GLOBAL_STATE()->dump_page ||
fast_reload_tmp_created(get_fast_reload_snapshot()))
{
perform_reload();
}
set_result_pt_trace_size(GET_GLOBAL_STATE()->auxilary_buffer, GET_GLOBAL_STATE()->pt_trace_size);
set_result_bb_coverage(GET_GLOBAL_STATE()->auxilary_buffer, GET_GLOBAL_STATE()->bb_coverage);
set_result_pt_trace_size(GET_GLOBAL_STATE()->auxilary_buffer,
GET_GLOBAL_STATE()->pt_trace_size);
set_result_bb_coverage(GET_GLOBAL_STATE()->auxilary_buffer,
GET_GLOBAL_STATE()->bb_coverage);
in_fuzzing_loop = false;
in_fuzzing_loop = false;
}
void synchronization_enter_fuzzing_loop(CPUState *cpu){
if (pt_enable(cpu, false) == 0){
cpu->pt_enabled = true;
}
in_fuzzing_loop = true;
void synchronization_enter_fuzzing_loop(CPUState *cpu)
{
if (pt_enable(cpu, false) == 0) {
cpu->pt_enabled = true;
}
in_fuzzing_loop = true;
reset_timeout_detector(&(GET_GLOBAL_STATE()->timeout_detector));
reset_timeout_detector(&(GET_GLOBAL_STATE()->timeout_detector));
}

View File

@ -6,19 +6,19 @@
#include <sys/time.h>
typedef struct timeout_detector_s {
int kvm_tid;
volatile bool detection_enabled;
int kvm_tid;
volatile bool detection_enabled;
struct timeval config;
struct itimerval alarm;
struct timeval config;
struct itimerval alarm;
} timeout_detector_t;
void init_timeout_detector(timeout_detector_t* timeout_detector);
void install_timeout_detector(timeout_detector_t* timeout_detector);
void reset_timeout_detector(timeout_detector_t* timeout_detector);
void arm_sigprof_timer(timeout_detector_t* timeout_detector);
bool disarm_sigprof_timer(timeout_detector_t* timeout_detector);
void update_itimer(timeout_detector_t* timeout_detector, uint8_t sec, uint32_t usec);
void init_timeout_detector(timeout_detector_t *timeout_detector);
void install_timeout_detector(timeout_detector_t *timeout_detector);
void reset_timeout_detector(timeout_detector_t *timeout_detector);
void arm_sigprof_timer(timeout_detector_t *timeout_detector);
bool disarm_sigprof_timer(timeout_detector_t *timeout_detector);
void update_itimer(timeout_detector_t *timeout_detector, uint8_t sec, uint32_t usec);
void block_signals(void);
void unblock_signals(void);

View File

@ -1,9 +1,9 @@
#include "qemu/osdep.h"
#include <stdint.h>
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
#include <stdint.h>
#include <stdio.h>
#include <unistd.h>
#include "nyx/debug.h"
#include "nyx/state/state.h"
@ -12,57 +12,61 @@
/* dump PT trace as returned from HW */
char *pt_trace_dump_filename;
bool pt_dump_initialized = false;
bool pt_dump_enabled = false;
bool pt_dump_initialized = false;
bool pt_dump_enabled = false;
void pt_trace_dump_enable(bool enable){
if (pt_dump_initialized)
pt_dump_enabled = enable;
}
void pt_trace_dump_init(char* filename)
void pt_trace_dump_enable(bool enable)
{
int test_fd;
nyx_debug("Enable pt trace dump at %s", filename);
pt_dump_initialized = true;
test_fd = open(filename, O_CREAT|O_TRUNC|O_WRONLY, 0644);
if (test_fd < 0)
fprintf(stderr, "Error accessing pt_dump output path %s: %s", pt_trace_dump_filename, strerror(errno));
assert(test_fd >= 0);
pt_trace_dump_filename = strdup(filename);
assert(pt_trace_dump_filename);
if (pt_dump_initialized)
pt_dump_enabled = enable;
}
void pt_truncate_pt_dump_file(void) {
int fd;
void pt_trace_dump_init(char *filename)
{
int test_fd;
if (!pt_dump_enabled)
return;
nyx_debug("Enable pt trace dump at %s", filename);
pt_dump_initialized = true;
fd = open(pt_trace_dump_filename, O_CREAT|O_TRUNC|O_WRONLY, 0644);
if (fd < 0) {
fprintf(stderr, "Error truncating %s: %s\n", pt_trace_dump_filename, strerror(errno));
assert(0);
}
close(fd);
test_fd = open(filename, O_CREAT | O_TRUNC | O_WRONLY, 0644);
if (test_fd < 0)
fprintf(stderr, "Error accessing pt_dump output path %s: %s",
pt_trace_dump_filename, strerror(errno));
assert(test_fd >= 0);
pt_trace_dump_filename = strdup(filename);
assert(pt_trace_dump_filename);
}
void pt_truncate_pt_dump_file(void)
{
int fd;
if (!pt_dump_enabled)
return;
fd = open(pt_trace_dump_filename, O_CREAT | O_TRUNC | O_WRONLY, 0644);
if (fd < 0) {
fprintf(stderr, "Error truncating %s: %s\n", pt_trace_dump_filename,
strerror(errno));
assert(0);
}
close(fd);
}
void pt_write_pt_dump_file(uint8_t *data, size_t bytes)
{
int fd;
int fd;
if (!pt_dump_enabled)
return;
if (!pt_dump_enabled)
return;
fd = open(pt_trace_dump_filename, O_APPEND|O_WRONLY, 0644);
if (fd < 0) {
fprintf(stderr, "Error writing pt_trace_dump to %s: %s\n", pt_trace_dump_filename, strerror(errno));
assert(0);
}
fd = open(pt_trace_dump_filename, O_APPEND | O_WRONLY, 0644);
if (fd < 0) {
fprintf(stderr, "Error writing pt_trace_dump to %s: %s\n",
pt_trace_dump_filename, strerror(errno));
assert(0);
}
assert(bytes == write(fd, data, bytes));
close(fd);
close(fd);
}

View File

@ -2,7 +2,7 @@
#include <stdint.h>
void pt_trace_dump_init(char* filename);
void pt_trace_dump_init(char *filename);
void pt_trace_dump_enable(bool enable);
void pt_write_pt_dump_file(uint8_t *data, size_t bytes);
void pt_truncate_pt_dump_file(void);

View File

@ -1,12 +1,12 @@
#pragma once
enum mem_mode {
enum mem_mode {
mm_unkown,
mm_32_protected, /* 32 Bit / No MMU */
mm_32_paging, /* 32 Bit / L3 Paging */
mm_32_pae, /* 32 Bit / PAE Paging */
mm_64_l4_paging, /* 64 Bit / L4 Paging */
mm_64_l5_paging, /* 32 Bit / L5 Paging */
mm_32_protected, /* 32 Bit / No MMU */
mm_32_paging, /* 32 Bit / L3 Paging */
mm_32_pae, /* 32 Bit / PAE Paging */
mm_64_l4_paging, /* 64 Bit / L4 Paging */
mm_64_l5_paging, /* 32 Bit / L5 Paging */
};
typedef uint8_t mem_mode_t;

175
vl.c
View File

@ -137,12 +137,12 @@ int main(int argc, char **argv)
#ifdef QEMU_NYX
// clang-format on
#include "nyx/debug.h"
#include "nyx/pt.h"
#include "nyx/hypercall/hypercall.h"
#include "nyx/synchronization.h"
#include "nyx/fast_vm_reload.h"
#include "nyx/state/state.h"
#include "nyx/fast_vm_reload_sync.h"
#include "nyx/hypercall/hypercall.h"
#include "nyx/pt.h"
#include "nyx/state/state.h"
#include "nyx/synchronization.h"
// clang-format off
#endif
@ -257,26 +257,23 @@ static struct {
#ifdef QEMU_NYX
// clang-format on
static QemuOptsList qemu_fast_vm_reloads_opts = {
.name = "fast_vm_reload-opts",
.name = "fast_vm_reload-opts",
.implied_opt_name = "order",
.head = QTAILQ_HEAD_INITIALIZER(qemu_fast_vm_reloads_opts.head),
.merge_lists = true,
.desc = {
{
.name = "path",
.type = QEMU_OPT_STRING,
},{
.name = "load",
.type = QEMU_OPT_BOOL,
},{
.name = "pre_path",
.type = QEMU_OPT_STRING,
},{
.name = "skip_serialization",
.type = QEMU_OPT_BOOL,
},
{ }
},
.head = QTAILQ_HEAD_INITIALIZER(qemu_fast_vm_reloads_opts.head),
.merge_lists = true,
.desc = {{
.name = "path",
.type = QEMU_OPT_STRING,
}, {
.name = "load",
.type = QEMU_OPT_BOOL,
}, {
.name = "pre_path",
.type = QEMU_OPT_STRING,
}, {
.name = "skip_serialization",
.type = QEMU_OPT_BOOL,
}, {}},
};
// clang-format off
#endif
@ -1481,7 +1478,7 @@ void vm_state_notify(int running, RunState state)
#ifdef QEMU_NYX
// clang-format on
char* loadvm_global = NULL;
char *loadvm_global = NULL;
// clang-format off
#endif
@ -1660,8 +1657,8 @@ void qemu_system_guest_panicked(GuestPanicInformation *info)
void qemu_system_reset_request(ShutdownCause reason)
{
#ifdef QEMU_NYX
// clang-format on
if(GET_GLOBAL_STATE()->in_fuzzing_mode){
// clang-format on
if (GET_GLOBAL_STATE()->in_fuzzing_mode) {
nyx_trace();
GET_GLOBAL_STATE()->shutdown_requested = true;
return;
@ -1872,8 +1869,8 @@ static bool main_loop_should_exit(void)
}
if (qemu_vmstop_requested(&r)) {
#ifdef QEMU_NYX
// clang-format on
if(check_if_relood_request_exists_post(GET_GLOBAL_STATE()->reload_state)){
// clang-format on
if (check_if_relood_request_exists_post(GET_GLOBAL_STATE()->reload_state)) {
return false;
}
// clang-format off
@ -1902,9 +1899,9 @@ static void main_loop(void)
static void version(void)
{
#ifdef QEMU_NYX
// clang-format on
printf("QEMU-PT emulator version " QEMU_VERSION QEMU_PKGVERSION " (kAFL)\n"
QEMU_COPYRIGHT "\n");
// clang-format on
printf("QEMU-PT emulator version " QEMU_VERSION QEMU_PKGVERSION
" (kAFL)\n" QEMU_COPYRIGHT "\n");
// clang-format off
#else
printf("QEMU emulator version " QEMU_FULL_VERSION "\n"
@ -2809,17 +2806,17 @@ static bool object_create_delayed(const char *type, QemuOpts *opts)
#ifdef QEMU_NYX
// clang-format on
static bool verifiy_snapshot_folder(const char* folder){
static bool verifiy_snapshot_folder(const char *folder)
{
struct stat s;
if(!folder){
if (!folder) {
return false;
}
if(-1 != stat(folder, &s)) {
if(S_ISDIR(s.st_mode)) {
if (-1 != stat(folder, &s)) {
if (S_ISDIR(s.st_mode)) {
return true;
}
else{
} else {
error_report("fast_vm_reload: path is not a folder");
exit(1);
}
@ -2943,7 +2940,7 @@ int main(int argc, char **argv, char **envp)
{
#ifdef QEMU_NYX
// clang-format on
// clang-format on
bool fast_vm_reload = false;
state_init_global();
const char *fast_vm_reload_opt_arg = NULL;
@ -3011,7 +3008,7 @@ int main(int argc, char **argv, char **envp)
qemu_add_opts(&qemu_nic_opts);
qemu_add_opts(&qemu_net_opts);
#ifdef QEMU_NYX
// clang-format on
// clang-format on
qemu_add_opts(&qemu_fast_vm_reloads_opts);
// clang-format off
#endif
@ -3103,7 +3100,7 @@ int main(int argc, char **argv, char **envp)
}
switch(popt->index) {
#ifdef QEMU_NYX
// clang-format on
// clang-format on
case QEMU_OPTION_fast_vm_reload:
opts = qemu_opts_parse_noisily(qemu_find_opts("fast_vm_reload-opts"),
optarg, true);
@ -3111,7 +3108,7 @@ int main(int argc, char **argv, char **envp)
exit(1);
}
fast_vm_reload_opt_arg = optarg;
fast_vm_reload = true;
fast_vm_reload = true;
break;
// clang-format off
#endif
@ -3571,8 +3568,8 @@ int main(int argc, char **argv, char **envp)
case QEMU_OPTION_loadvm:
loadvm = optarg;
#ifdef QEMU_NYX
// clang-format on
loadvm_global = (char*)optarg;
// clang-format on
loadvm_global = (char *)optarg;
// clang-format off
#endif
break;
@ -4015,7 +4012,7 @@ int main(int argc, char **argv, char **envp)
}
#ifdef QEMU_NYX
// clang-format on
// clang-format on
block_signals();
// clang-format off
#endif
@ -4590,99 +4587,109 @@ int main(int argc, char **argv, char **envp)
register_global_state();
#ifdef QEMU_NYX
// clang-format on
// clang-format on
fast_reload_init(GET_GLOBAL_STATE()->fast_reload_snapshot);
if (fast_vm_reload){
if(getenv("NYX_DISABLE_BLOCK_COW")){
nyx_error("Nyx block COW cache layer cannot be disabled while using fast snapshots\n");
if (fast_vm_reload) {
if (getenv("NYX_DISABLE_BLOCK_COW")) {
nyx_error("Nyx block COW cache layer cannot be disabled while using "
"fast snapshots\n");
exit(1);
}
QemuOpts *opts = qemu_opts_parse_noisily(qemu_find_opts("fast_vm_reload-opts"), fast_vm_reload_opt_arg, true);
const char* snapshot_path = qemu_opt_get(opts, "path");
const char* pre_snapshot_path = qemu_opt_get(opts, "pre_path");
QemuOpts *opts =
qemu_opts_parse_noisily(qemu_find_opts("fast_vm_reload-opts"),
fast_vm_reload_opt_arg, true);
const char *snapshot_path = qemu_opt_get(opts, "path");
const char *pre_snapshot_path = qemu_opt_get(opts, "pre_path");
/*
valid arguments:
/*
valid arguments:
// create root snapshot to path (load pre_snapshot first)
-> path=foo,pre_path=bar,load=off // ALLOWED
// create root snapshot im memory (load pre_snapshot first)
-> pre_path=bar,load=off,skip_serialization // ALLOWED
// create root snapshot to path
-> path=foo,load=off // ALLOWED
// load root snapshot from path
// load root snapshot from path
-> path=foo,load=on // ALLOWED
// create pre snapshot to pre_path
-> pre_path=bar,load=off // ALLOWED
invalid arguments:
invalid arguments:
-> load=off // ALLOWED but useless
-> path=foo,pre_path=bar,load=on // INVALID
-> pre_path=bar,load=on // INVALID
-> load=on // INVALID
*/
bool snapshot_used = verifiy_snapshot_folder(snapshot_path);
bool pre_snapshot_used = verifiy_snapshot_folder(pre_snapshot_path);
bool load_mode = qemu_opt_get_bool(opts, "load", false);
bool snapshot_used = verifiy_snapshot_folder(snapshot_path);
bool pre_snapshot_used = verifiy_snapshot_folder(pre_snapshot_path);
bool load_mode = qemu_opt_get_bool(opts, "load", false);
bool skip_serialization = qemu_opt_get_bool(opts, "skip_serialization", false);
if((snapshot_used || load_mode || skip_serialization) && getenv("NYX_DISABLE_DIRTY_RING")){
error_report("NYX_DISABLE_DIRTY_RING is only allowed during pre-snapshot creation\n");
if ((snapshot_used || load_mode || skip_serialization) &&
getenv("NYX_DISABLE_DIRTY_RING"))
{
error_report("NYX_DISABLE_DIRTY_RING is only allowed during "
"pre-snapshot creation\n");
exit(1);
}
if((pre_snapshot_used && !snapshot_used && !load_mode) && !getenv("NYX_DISABLE_DIRTY_RING")){
error_report("NYX_DISABLE_DIRTY_RING is required during pre-snapshot creation\n");
if ((pre_snapshot_used && !snapshot_used && !load_mode) &&
!getenv("NYX_DISABLE_DIRTY_RING"))
{
error_report(
"NYX_DISABLE_DIRTY_RING is required during pre-snapshot creation\n");
exit(1);
}
if(pre_snapshot_used && load_mode){
if (pre_snapshot_used && load_mode) {
error_report("invalid argument (pre_snapshot_used && load_mode)!\n");
exit(1);
}
if((!snapshot_used && !pre_snapshot_used) && load_mode){
error_report("invalid argument ((!pre_snapshot_used && !pre_snapshot_used) && load_mode)!\n");
if ((!snapshot_used && !pre_snapshot_used) && load_mode) {
error_report("invalid argument ((!pre_snapshot_used && "
"!pre_snapshot_used) && load_mode)!\n");
exit(1);
}
if(pre_snapshot_used && snapshot_used){
if (pre_snapshot_used && snapshot_used) {
nyx_printf("[Qemu-Nyx]: loading pre image to start fuzzing...\n");
set_fast_reload_mode(false);
set_fast_reload_path(snapshot_path);
if(!skip_serialization){
if (!skip_serialization) {
enable_fast_reloads();
}
fast_reload_create_from_file_pre_image(get_fast_reload_snapshot(), pre_snapshot_path, false);
fast_reload_create_from_file_pre_image(get_fast_reload_snapshot(),
pre_snapshot_path, false);
fast_reload_destroy(get_fast_reload_snapshot());
GET_GLOBAL_STATE()->fast_reload_snapshot = fast_reload_new();
fast_reload_init(GET_GLOBAL_STATE()->fast_reload_snapshot);
}
else{
if(pre_snapshot_used){
} else {
if (pre_snapshot_used) {
nyx_printf("[Qemu-Nyx]: preparing to create pre image...\n");
set_fast_reload_pre_path(pre_snapshot_path);
set_fast_reload_pre_image();
}
else if(snapshot_used){
} else if (snapshot_used) {
set_fast_reload_path(snapshot_path);
if(!skip_serialization){
if (!skip_serialization) {
enable_fast_reloads();
}
if (load_mode){
if (load_mode) {
set_fast_reload_mode(true);
nyx_printf("[Qemu-Nyx]: waiting for snapshot to start fuzzing...\n");
fast_reload_create_from_file(get_fast_reload_snapshot(), snapshot_path, false);
//cpu_synchronize_all_post_reset();
set_state_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 3);
nyx_printf(
"[Qemu-Nyx]: waiting for snapshot to start fuzzing...\n");
fast_reload_create_from_file(get_fast_reload_snapshot(),
snapshot_path, false);
// cpu_synchronize_all_post_reset();
set_state_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer,
3);
skip_init();
//GET_GLOBAL_STATE()->pt_trace_mode = false;
}
else{
// GET_GLOBAL_STATE()->pt_trace_mode = false;
} else {
nyx_printf("[Qemu-Nyx]: Booting VM to start fuzzing...\n");
set_fast_reload_mode(false);
}