diff --git a/.gitignore b/.gitignore index 7de868d1ea..9b8d3ea5d9 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +/roms/vgabios/ /.doctrees /config-devices.* /config-all-devices.* @@ -162,3 +163,6 @@ trace-dtrace-root.dtrace trace-ust-all.h trace-ust-all.c /target/arm/decode-sve.inc.c + +/libxdc +/capstone_v4 diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml deleted file mode 100644 index be57c6a454..0000000000 --- a/.gitlab-ci.yml +++ /dev/null @@ -1,83 +0,0 @@ -before_script: - - apt-get update -qq - - apt-get install -y -qq flex bison libglib2.0-dev libpixman-1-dev genisoimage - -build-system1: - script: - - apt-get install -y -qq libgtk-3-dev libvte-dev nettle-dev libcacard-dev - libusb-dev libvde-dev libspice-protocol-dev libgl1-mesa-dev libvdeplug-dev - - ./configure --enable-werror --target-list="aarch64-softmmu alpha-softmmu - cris-softmmu hppa-softmmu lm32-softmmu moxie-softmmu microblazeel-softmmu - mips64el-softmmu m68k-softmmu ppc-softmmu riscv64-softmmu sparc-softmmu" - - make -j2 - - make -j2 check - -build-system2: - script: - - apt-get install -y -qq libsdl2-dev libgcrypt-dev libbrlapi-dev libaio-dev - libfdt-dev liblzo2-dev librdmacm-dev libibverbs-dev libibumad-dev - - ./configure --enable-werror --target-list="tricore-softmmu unicore32-softmmu - microblaze-softmmu mips-softmmu riscv32-softmmu s390x-softmmu sh4-softmmu - sparc64-softmmu x86_64-softmmu xtensa-softmmu nios2-softmmu or1k-softmmu" - - make -j2 - - make -j2 check - -build-disabled: - script: - - ./configure --enable-werror --disable-rdma --disable-slirp --disable-curl - --disable-capstone --disable-live-block-migration --disable-glusterfs - --disable-replication --disable-coroutine-pool --disable-smartcard - --disable-guest-agent --disable-curses --disable-libxml2 --disable-tpm - --disable-qom-cast-debug --disable-spice --disable-vhost-vsock - --disable-vhost-net --disable-vhost-crypto --disable-vhost-user - --target-list="i386-softmmu ppc64-softmmu mips64-softmmu i386-linux-user" - - make -j2 - - make -j2 check-qtest SPEED=slow - -build-tcg-disabled: - script: - - apt-get install -y -qq clang libgtk-3-dev libbluetooth-dev libusb-dev - - ./configure --cc=clang --enable-werror --disable-tcg --audio-drv-list="" - - make -j2 - - make check-unit - - make check-qapi-schema - - cd tests/qemu-iotests/ - - ./check -raw 001 002 003 004 005 008 009 010 011 012 021 025 032 033 048 - 052 063 077 086 101 104 106 113 147 148 150 151 152 157 159 160 - 163 170 171 183 184 192 194 197 205 208 215 221 222 226 227 236 - - ./check -qcow2 028 040 051 056 057 058 065 067 068 082 085 091 095 096 102 - 122 124 127 129 132 139 142 144 145 147 151 152 155 157 165 194 - 196 197 200 202 203 205 208 209 215 216 218 222 227 234 246 247 - 248 250 254 255 256 - -build-user: - script: - - ./configure --enable-werror --disable-system --disable-guest-agent - --disable-capstone --disable-slirp --disable-fdt - - make -j2 - - make run-tcg-tests-i386-linux-user run-tcg-tests-x86_64-linux-user - -build-clang: - script: - - apt-get install -y -qq clang libsdl2-dev libattr1-dev libcap-dev - xfslibs-dev libiscsi-dev libnfs-dev libseccomp-dev gnutls-dev librbd-dev - - ./configure --cc=clang --cxx=clang++ --enable-werror - --target-list="alpha-softmmu arm-softmmu m68k-softmmu mips64-softmmu - ppc-softmmu s390x-softmmu x86_64-softmmu arm-linux-user" - - make -j2 - - make -j2 check - -build-tci: - script: - - TARGETS="aarch64 alpha arm hppa m68k microblaze moxie ppc64 s390x x86_64" - - ./configure --enable-tcg-interpreter - --target-list="$(for tg in $TARGETS; do echo -n ${tg}'-softmmu '; done)" - - make -j2 - - make tests/boot-serial-test tests/cdrom-test tests/pxe-test - - for tg in $TARGETS ; do - export QTEST_QEMU_BINARY="${tg}-softmmu/qemu-system-${tg}" ; - ./tests/boot-serial-test || exit 1 ; - ./tests/cdrom-test || exit 1 ; - done - - QTEST_QEMU_BINARY="x86_64-softmmu/qemu-system-x86_64" ./tests/pxe-test - - QTEST_QEMU_BINARY="s390x-softmmu/qemu-system-s390x" ./tests/pxe-test -m slow diff --git a/Makefile.target b/Makefile.target index 24d79d26eb..c188d0a63a 100644 --- a/Makefile.target +++ b/Makefile.target @@ -157,6 +157,7 @@ obj-y += dump/ obj-y += hw/ obj-y += monitor/ obj-y += qapi/ +obj-$(CONFIG_QEMU_NYX) += nyx/ obj-y += memory.o obj-y += memory_mapping.o obj-y += migration/ram.o diff --git a/README.md b/README.md new file mode 100644 index 0000000000..87e3b51f17 --- /dev/null +++ b/README.md @@ -0,0 +1,29 @@ +# QEMU-NYX + +This repository contains Nyx's fork of Qemu. To enable Hypervisor based snapshots, Intel-PT based tracing, and Redqueen style magic byte resolution, we made various extensions to QEMU. This includes the ability to quickly reset memory and devices, ontain precise disassembly of the code running (even when code is partially swapped out / unavailable) & intel-PT decoding, instrument code running in the VM with breakpoint based hooks as well as communicating with a fuzzing frontend (e.g. based on libnyx). + +You can find more detailed information in our main repository. + +

+ +

+ +## Build + +``` +sh compile_qemu_nyx.sh +``` + +## Bug Reports and Contributions + +If you found and fixed a bug on your own: We are very open to patches, please create a pull request! + +### License + +This tool is provided under **AGPL license**. + +**Free Software Hell Yeah!** + +Proudly provided by: +* [Sergej Schumilo](http://schumilo.de) - sergej@schumilo.de / [@ms_s3c](https://twitter.com/ms_s3c) +* [Cornelius Aschermann](https://hexgolems.com) - cornelius@hexgolems.com / [@is_eqv](https://twitter.com/is_eqv) diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index ca00daa2f5..c530bbcac5 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -44,6 +44,18 @@ #include "hw/boards.h" +#ifdef QEMU_NYX +#include "nyx/pt.h" +#include "nyx/hypercall.h" +#include "nyx/synchronization.h" +#include "nyx/debug.h" +#include "nyx/state.h" +#include "nyx/interface.h" +#include "nyx/fast_vm_reload_sync.h" +#include "nyx/snapshot/memory/backend/nyx_dirty_ring.h" +#include "nyx/helpers.h" +#endif + /* This check must be after config-host.h is included */ #ifdef CONFIG_EVENTFD #include @@ -76,6 +88,10 @@ struct KVMState { AccelState parent_obj; +#ifdef QEMU_NYX + bool nyx_no_pt_mode; +#endif + int nr_slots; int fd; int vmfd; @@ -363,6 +379,16 @@ static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id) return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id); } +#ifdef QEMU_NYX +int kvm_get_vm_fd(KVMState *s){ + return s->vmfd; +} + +KVMMemoryListener* kvm_get_kml(int as_id){ + return kvm_state->as[as_id].ml; +} +#endif + int kvm_init_vcpu(CPUState *cpu) { KVMState *s = kvm_state; @@ -381,6 +407,16 @@ int kvm_init_vcpu(CPUState *cpu) cpu->kvm_state = s; cpu->vcpu_dirty = true; +#ifdef QEMU_NYX + if(s->nyx_no_pt_mode){ + if(!getenv("NYX_DISABLE_DIRTY_RING")){ + nyx_dirty_ring_pre_init(cpu->kvm_fd, s->vmfd); + } + } + pt_kvm_init(cpu); + install_timeout_detector(&GET_GLOBAL_STATE()->timeout_detector); +#endif + mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); if (mmap_size < 0) { ret = mmap_size; @@ -402,6 +438,11 @@ int kvm_init_vcpu(CPUState *cpu) } ret = kvm_arch_init_vcpu(cpu); + +#ifdef QEMU_NYX + unblock_signals(); +#endif + err: return ret; } @@ -1874,7 +1915,62 @@ static int kvm_init(MachineState *ms) ret = -errno; goto err; } +#ifdef QEMU_NYX + if (ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_PT) != 1 && ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_FDL) != 1) { + fprintf(stderr, "[!] Could not access KVM-PT kernel module!\n [*] Trying vanilla KVM...\n"); + /* fallback -> use vanilla KVM module instead (no Intel-PT tracing or nested hypercalls at this point) */ + s->fd = qemu_open("/dev/kvm", O_RDWR); + if (s->fd == -1) { + fprintf(stderr, "Error: NYX fallback failed: Could not access vanilla KVM module!\n"); + ret = -errno; + goto err; + } + + int ret_val = ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_DIRTY_LOG_RING); + if(ret_val == -1 || ret_val == 0){ + fprintf(stderr, "Error: NYX requires support for KVM_CAP_DIRTY_LOG_RING in fallback mode!\n"); + ret = -errno; + goto err; + } + + /* check for vmware_backdoor support */ + int fd = open("/sys/module/kvm/parameters/enable_vmware_backdoor", O_RDONLY); + if(fd == -1){ + fprintf(stderr, "ERROR: /sys/module/kvm/parameters/enable_vmware_backdoor file not found...\n"); + ret = -errno; + goto err; + } + + char vmware_backdoor_option = 0; + assert(read(fd, &vmware_backdoor_option, 1) == 1); + close(fd); + + if(vmware_backdoor_option == 'N'){ + fprintf(stderr, "\nERROR: vmware backdoor is not enabled...\n"); + fprintf(stderr, "\n\tRun the following commands to fix the issue:\n"); + fprintf(stderr, "\t-----------------------------------------\n"); + fprintf(stderr, "\tsudo modprobe -r kvm-intel\n"); + fprintf(stderr, "\tsudo modprobe -r kvm\n"); + fprintf(stderr, "\tsudo modprobe kvm enable_vmware_backdoor=y\n"); + fprintf(stderr, "\tsudo modprobe kvm-intel\n"); + fprintf(stderr, "\tcat /sys/module/kvm/parameters/enable_vmware_backdoor\n"); + fprintf(stderr, "\t-----------------------------------------\n\n"); + ret = -errno; + goto err; + } + + fprintf(stderr, "NYX runs in fallback mode (no Intel-PT tracing or nested hypercall support)!\n"); + s->nyx_no_pt_mode = true; + GET_GLOBAL_STATE()->nyx_fdl = false; + fast_reload_set_mode(get_fast_reload_snapshot(), RELOAD_MEMORY_MODE_DIRTY_RING); + } + else{ + s->nyx_no_pt_mode = false; + GET_GLOBAL_STATE()->nyx_fdl = true; + fast_reload_set_mode(get_fast_reload_snapshot(), RELOAD_MEMORY_MODE_FDL); + } +#endif ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0); if (ret < KVM_API_VERSION) { if (ret >= 0) { @@ -1939,6 +2035,18 @@ static int kvm_init(MachineState *ms) s->vmfd = ret; +#ifdef QEMU_NYX + if(s->nyx_no_pt_mode){ + if(getenv("NYX_DISABLE_DIRTY_RING")){ + fprintf(stderr, "WARNING: Nyx has disabled KVM's dirty-ring (required to enable full VGA support during pre-snapshot creation procedure)\n"); + fast_reload_set_mode(get_fast_reload_snapshot(), RELOAD_MEMORY_MODE_DEBUG_QUIET); /* required to create snapshot */ + } + else{ + nyx_dirty_ring_early_init(s->fd, s->vmfd); + } + } +#endif + /* check the vcpu limits */ soft_vcpus_limit = kvm_recommended_vcpus(s); hard_vcpus_limit = kvm_max_vcpus(s); @@ -1978,7 +2086,16 @@ static int kvm_init(MachineState *ms) s->manual_dirty_log_protect = kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2); if (s->manual_dirty_log_protect) { +#ifndef QEMU_NYX ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, 1); +#else + if(s->nyx_no_pt_mode){ + ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, 1); + } + else{ + ret = 0; + } +#endif if (ret) { warn_report("Trying to enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 " "but failed. Falling back to the legacy mode. "); @@ -2268,6 +2385,17 @@ static void kvm_eat_signals(CPUState *cpu) } while (sigismember(&chkset, SIG_IPI)); } +#ifdef QEMU_NYX +static int handle_vmware_hypercall(struct kvm_run *run, CPUState *cpu){ + kvm_arch_get_registers_fast(cpu); + + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + + return handle_kafl_hypercall(run, cpu, env->regs[R_EBX]+100, env->regs[R_ECX]); +} +#endif + int kvm_cpu_exec(CPUState *cpu) { struct kvm_run *run = cpu->kvm_run; @@ -2283,6 +2411,15 @@ int kvm_cpu_exec(CPUState *cpu) qemu_mutex_unlock_iothread(); cpu_exec_start(cpu); +#ifdef QEMU_NYX + static bool timeout_reload_pending = false; + if(timeout_reload_pending){ + synchronization_lock_timeout_found(); + } + timeout_reload_pending = false; +#endif + + do { MemTxAttrs attrs; @@ -2302,15 +2439,39 @@ int kvm_cpu_exec(CPUState *cpu) kvm_cpu_kick_self(); } +#ifdef QEMU_NYX + if(!kvm_state->nyx_no_pt_mode){ + pt_pre_kvm_run(cpu); + } +#endif + /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit. * Matching barrier in kvm_eat_signals. */ smp_rmb(); +#ifdef QEMU_NYX + if(arm_sigprof_timer(&GET_GLOBAL_STATE()->timeout_detector)){ + assert(false); + } +#endif + run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0); +#ifdef QEMU_NYX + if (disarm_sigprof_timer(&GET_GLOBAL_STATE()->timeout_detector)){ + timeout_reload_pending = true; + } +#endif + attrs = kvm_arch_post_run(cpu, run); +#ifdef QEMU_NYX + if(!kvm_state->nyx_no_pt_mode){ + pt_post_kvm_run(cpu); + } +#endif + #ifdef KVM_HAVE_MCE_INJECTION if (unlikely(have_sigbus_pending)) { qemu_mutex_lock_iothread(); @@ -2328,8 +2489,25 @@ int kvm_cpu_exec(CPUState *cpu) ret = EXCP_INTERRUPT; break; } + +#ifndef QEMU_NYX fprintf(stderr, "error: kvm run failed %s\n", strerror(-run_ret)); +#else + if(run_ret == -EFAULT){ + if(GET_GLOBAL_STATE()->protect_payload_buffer && GET_GLOBAL_STATE()->in_fuzzing_mode){ + /* Fuzzing is enabled at this point -> don't exit */ + synchronization_payload_buffer_write_detected(); + ret = 0; + break; + } + } + + fprintf(stderr, "QEMU-PT: error: kvm run failed %s\n", + strerror(-run_ret)); + qemu_backtrace(); +#endif + #ifdef TARGET_PPC if (run_ret == -EBUSY) { fprintf(stderr, @@ -2346,6 +2524,15 @@ int kvm_cpu_exec(CPUState *cpu) switch (run->exit_reason) { case KVM_EXIT_IO: DPRINTF("handle_io\n"); + +#ifdef QEMU_NYX + if(run->io.port == 0x5658 && run->io.size == 4 && *((uint32_t*)((uint8_t *)run + run->io.data_offset)) == 0x8080801f) { + assert(kvm_state->nyx_no_pt_mode); + ret = handle_vmware_hypercall(run, cpu); + break; + } +#endif + /* Called outside BQL */ kvm_handle_io(run->io.port, attrs, (uint8_t *)run + run->io.data_offset, @@ -2370,33 +2557,116 @@ int kvm_cpu_exec(CPUState *cpu) break; case KVM_EXIT_SHUTDOWN: DPRINTF("shutdown\n"); +#ifndef QEMU_NYX qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); ret = EXCP_INTERRUPT; +#else + fprintf(stderr, "ATTEMPT TO SHUTDOWN MACHINE (KVM_EXIT_SHUTDOWN)!\n"); + if(GET_GLOBAL_STATE()->in_fuzzing_mode){ + /* Fuzzing is enabled at this point -> don't exit */ + handle_hypercall_kafl_release(run, cpu, (uint64_t)run->hypercall.args[0]); + ret = 0; + } + else{ + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + ret = EXCP_INTERRUPT; + } +#endif break; case KVM_EXIT_UNKNOWN: fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n", (uint64_t)run->hw.hardware_exit_reason); + +#ifdef QEMU_NYX + assert(false); +#endif + ret = -1; break; case KVM_EXIT_INTERNAL_ERROR: ret = kvm_handle_internal_error(cpu, run); break; + +#ifdef QEMU_NYX + case KVM_EXIT_DIRTY_RING_FULL: + //printf("[*] WARNING: KVM_EXIT_DIRTY_RING_FULL\n"); + fast_reload_handle_dirty_ring_full(get_fast_reload_snapshot()); + ret = 0; + break; + + case KVM_EXIT_KAFL_ACQUIRE ... (KVM_EXIT_KAFL_ACQUIRE+100): + ret = handle_kafl_hypercall(run, cpu, (uint64_t)run->exit_reason, (uint64_t)run->hypercall.args[0]); + break; + + case KVM_EXIT_DEBUG: + kvm_arch_get_registers(cpu); + if(!handle_hypercall_kafl_hook(run, cpu, (uint64_t)run->hypercall.args[0])){ + ret = kvm_arch_handle_exit(cpu, run); + } + else { + ret = 0; + } + break; +#endif + case KVM_EXIT_SYSTEM_EVENT: switch (run->system_event.type) { case KVM_SYSTEM_EVENT_SHUTDOWN: +#ifndef QEMU_NYX qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); ret = EXCP_INTERRUPT; +#else + fprintf(stderr, "ATTEMPT TO SHUTDOWN MACHINE (KVM_SYSTEM_EVENT_SHUTDOWN)!\n"); + if(GET_GLOBAL_STATE()->in_fuzzing_mode){ + /* Fuzzing is enabled at this point -> don't exit */ + handle_hypercall_kafl_release(run, cpu, (uint64_t)run->hypercall.args[0]); + ret = 0; + } + else{ + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + ret = EXCP_INTERRUPT; + } +#endif break; case KVM_SYSTEM_EVENT_RESET: +#ifndef QEMU_NYX qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); ret = EXCP_INTERRUPT; +#else + fprintf(stderr, "ATTEMPT TO SHUTDOWN MACHINE (KVM_SYSTEM_EVENT_RESET)!\n"); + if(GET_GLOBAL_STATE()->in_fuzzing_mode){ + /* Fuzzing is enabled at this point -> don't exit */ + handle_hypercall_kafl_release(run, cpu, (uint64_t)run->hypercall.args[0]); + ret = 0; + } + else{ + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + ret = EXCP_INTERRUPT; + } +#endif break; case KVM_SYSTEM_EVENT_CRASH: +#ifndef QEMU_NYX kvm_cpu_synchronize_state(cpu); qemu_mutex_lock_iothread(); qemu_system_guest_panicked(cpu_get_crash_info(cpu)); qemu_mutex_unlock_iothread(); ret = 0; +#else + fprintf(stderr, "ATTEMPT TO SHUTDOWN MACHINE (KVM_SYSTEM_EVENT_CRASH)!\n"); + if(GET_GLOBAL_STATE()->in_fuzzing_mode){ + /* Fuzzing is enabled at this point -> don't exit */ + handle_hypercall_kafl_release(run, cpu, (uint64_t)run->hypercall.args[0]); + ret = 0; + } + else{ + kvm_cpu_synchronize_state(cpu); + qemu_mutex_lock_iothread(); + qemu_system_guest_panicked(cpu_get_crash_info(cpu)); + qemu_mutex_unlock_iothread(); + ret = 0; + } +#endif break; default: DPRINTF("kvm_arch_handle_exit\n"); @@ -2405,21 +2675,65 @@ int kvm_cpu_exec(CPUState *cpu) } break; default: +#ifndef QEMU_NYX DPRINTF("kvm_arch_handle_exit\n"); +#else + printf("kvm_arch_handle_exit => %d\n", run->exit_reason); + assert(false); +#endif ret = kvm_arch_handle_exit(cpu, run); break; } + +#ifdef QEMU_NYX + if(GET_GLOBAL_STATE()->in_fuzzing_mode && GET_GLOBAL_STATE()->cow_cache_full){ + synchronization_cow_full_detected(); + GET_GLOBAL_STATE()->cow_cache_full = false; + ret = 0; + } + else{ + if(GET_GLOBAL_STATE()->in_fuzzing_mode && cpu->halted){ + fprintf(stderr, "%s: Attempt to halt CPU -> FUCK OFF!\n", __func__); + cpu->halted = 0; + GET_GLOBAL_STATE()->shutdown_requested = true; + } + + if(GET_GLOBAL_STATE()->in_fuzzing_mode && GET_GLOBAL_STATE()->shutdown_requested){ + /* Fuzzing is enabled at this point -> don't exit */ + fprintf(stderr, "shutdown_requested -> calling handle_hypercall_kafl_release\n"); + + //synchronization_lock_shutdown_detected(); + synchronization_lock_crash_found(); + GET_GLOBAL_STATE()->shutdown_requested = false; + ret = 0; + } + } + if(reload_request_exists(GET_GLOBAL_STATE()->reload_state)){ + break; + } +#endif + } while (ret == 0); cpu_exec_end(cpu); qemu_mutex_lock_iothread(); if (ret < 0) { +#ifdef QEMU_NYX + fprintf(stderr, "ATTEMPT TO SHUTDOWN MACHINE (ret < 0)!\n"); +#endif cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); vm_stop(RUN_STATE_INTERNAL_ERROR); } atomic_set(&cpu->exit_request, 0); + +#ifdef QEMU_NYX + if(check_if_relood_request_exists_pre(GET_GLOBAL_STATE()->reload_state)){ + pause_all_vcpus(); /* performance boost ??? */ + } +#endif + return ret; } @@ -2546,6 +2860,12 @@ int kvm_device_access(int fd, int group, uint64_t attr, return err; } +#ifdef QEMU_NYX +int kvm_has_vapic(void){ + return !kvm_check_extension(kvm_state, KVM_CAP_VAPIC); +} +#endif + bool kvm_has_sync_mmu(void) { return kvm_state->sync_mmu; @@ -2815,6 +3135,9 @@ void kvm_init_cpu_signals(CPUState *cpu) pthread_sigmask(SIG_SETMASK, &set, NULL); #endif sigdelset(&set, SIG_IPI); +#ifdef QEMU_NYX + sigdelset(&set, SIGALRM); +#endif if (kvm_immediate_exit) { r = pthread_sigmask(SIG_SETMASK, &set, NULL); } else { diff --git a/block/block-backend.c b/block/block-backend.c index 8b8f2a80a0..f344a68533 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -28,6 +28,10 @@ #include "trace.h" #include "migration/misc.h" +#ifdef QEMU_NYX +#include "nyx/snapshot/block/block_cow.h" +#endif + /* Number of coroutines to reserve per attached device model */ #define COROUTINE_POOL_RESERVATION 64 @@ -42,6 +46,7 @@ typedef struct BlockBackendAioNotifier { QLIST_ENTRY(BlockBackendAioNotifier) list; } BlockBackendAioNotifier; +#ifndef QEMU_NYX struct BlockBackend { char *name; int refcnt; @@ -96,6 +101,7 @@ struct BlockBackend { */ unsigned int in_flight; }; +#endif typedef struct BlockBackendAIOCB { BlockAIOCB common; @@ -335,6 +341,9 @@ BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm) BlockBackend *blk; blk = g_new0(BlockBackend, 1); +#ifdef QEMU_NYX + blk->cow_cache = NULL; +#endif blk->refcnt = 1; blk->ctx = ctx; blk->perm = perm; @@ -407,6 +416,10 @@ BlockBackend *blk_new_open(const char *filename, const char *reference, return NULL; } +#ifdef QEMU_NYX + blk->cow_cache = cow_cache_new(filename); +#endif + return blk; } @@ -1109,8 +1122,13 @@ void blk_set_disable_request_queuing(BlockBackend *blk, bool disable) blk->disable_request_queuing = disable; } +#ifndef QEMU_NYX static int blk_check_byte_request(BlockBackend *blk, int64_t offset, size_t size) +#else +int blk_check_byte_request(BlockBackend *blk, int64_t offset, + size_t size) +#endif { int64_t len; @@ -1333,7 +1351,18 @@ static const AIOCBInfo blk_aio_em_aiocb_info = { .aiocb_size = sizeof(BlkAioEmAIOCB), }; +#ifndef QEMU_NYX static void blk_aio_complete(BlkAioEmAIOCB *acb) +#else +void blk_aio_complete(BlkAioEmAIOCB *acb); +BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes, + void *iobuf, CoroutineEntry co_entry, + BdrvRequestFlags flags, + BlockCompletionFunc *cb, void *opaque); +void blk_aio_write_entry(void *opaque); + +void blk_aio_complete(BlkAioEmAIOCB *acb) +#endif { if (acb->has_returned) { acb->common.cb(acb->common.opaque, acb->rwco.ret); @@ -1349,10 +1378,17 @@ static void blk_aio_complete_bh(void *opaque) blk_aio_complete(acb); } +#ifndef QEMU_NYX static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes, void *iobuf, CoroutineEntry co_entry, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) +#else +BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes, + void *iobuf, CoroutineEntry co_entry, + BdrvRequestFlags flags, + BlockCompletionFunc *cb, void *opaque) +#endif { BlkAioEmAIOCB *acb; Coroutine *co; @@ -1399,7 +1435,11 @@ static void blk_aio_read_entry(void *opaque) blk_aio_complete(acb); } +#ifndef QEMU_NYX static void blk_aio_write_entry(void *opaque) +#else +void blk_aio_write_entry(void *opaque) +#endif { BlkAioEmAIOCB *acb = opaque; BlkRwCo *rwco = &acb->rwco; @@ -1476,16 +1516,34 @@ BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset, QEMUIOVector *qiov, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) { +#ifndef QEMU_NYX return blk_aio_prwv(blk, offset, qiov->size, qiov, blk_aio_read_entry, flags, cb, opaque); +#else + if(blk->cow_cache->enabled){ + return blk_aio_prwv(blk, offset, qiov->size, qiov, cow_cache_read_entry, flags, cb, opaque); + } + else{ + return blk_aio_prwv(blk, offset, qiov->size, qiov, blk_aio_read_entry, flags, cb, opaque); + } +#endif } BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset, QEMUIOVector *qiov, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) { +#ifndef QEMU_NYX return blk_aio_prwv(blk, offset, qiov->size, qiov, blk_aio_write_entry, flags, cb, opaque); +#else + if(blk->cow_cache->enabled){ + return blk_aio_prwv(blk, offset, qiov->size, qiov, cow_cache_write_entry, flags, cb, opaque); + } + else{ + return blk_aio_prwv(blk, offset, qiov->size, qiov, blk_aio_write_entry, flags, cb, opaque); + } +#endif } static void blk_aio_flush_entry(void *opaque) diff --git a/block/file-posix.c b/block/file-posix.c index 1b805bd938..5df0010455 100644 --- a/block/file-posix.c +++ b/block/file-posix.c @@ -781,13 +781,18 @@ static int raw_apply_lock_bytes(BDRVRawState *s, int fd, static int raw_check_lock_bytes(int fd, uint64_t perm, uint64_t shared_perm, Error **errp) { +#ifndef QEMU_NYX int ret; +#endif int i; PERM_FOREACH(i) { +#ifndef QEMU_NYX int off = RAW_LOCK_SHARED_BASE + i; +#endif uint64_t p = 1ULL << i; if (perm & p) { +#ifndef QEMU_NYX ret = qemu_lock_fd_test(fd, off, 1, true); if (ret) { char *perm_name = bdrv_perm_names(p); @@ -797,12 +802,16 @@ static int raw_check_lock_bytes(int fd, uint64_t perm, uint64_t shared_perm, g_free(perm_name); return ret; } +#endif } } PERM_FOREACH(i) { +#ifndef QEMU_NYX int off = RAW_LOCK_PERM_BASE + i; +#endif uint64_t p = 1ULL << i; if (!(shared_perm & p)) { +#ifndef QEMU_NYX ret = qemu_lock_fd_test(fd, off, 1, true); if (ret) { char *perm_name = bdrv_perm_names(p); @@ -812,6 +821,7 @@ static int raw_check_lock_bytes(int fd, uint64_t perm, uint64_t shared_perm, g_free(perm_name); return ret; } +#endif } } return 0; diff --git a/chardev/char-socket.c b/chardev/char-socket.c index 185fe38dda..cdc29933a2 100644 --- a/chardev/char-socket.c +++ b/chardev/char-socket.c @@ -951,8 +951,8 @@ static void tcp_chr_accept_server_sync(Chardev *chr) { SocketChardev *s = SOCKET_CHARDEV(chr); QIOChannelSocket *sioc; - info_report("QEMU waiting for connection on: %s", - chr->filename); + //info_report("QEMU waiting for connection on: %s", + // chr->filename); tcp_chr_change_state(s, TCP_CHARDEV_STATE_CONNECTING); sioc = qio_net_listener_wait_client(s->listener); tcp_chr_set_client_ioc_name(chr, sioc); diff --git a/compile_qemu_nyx.sh b/compile_qemu_nyx.sh new file mode 100755 index 0000000000..7225af05ca --- /dev/null +++ b/compile_qemu_nyx.sh @@ -0,0 +1,52 @@ +#!/bin/bash +set -e + +# Copyright (C) 2021 Sergej Schumilo +# +# This file is part of NYX. +# +# QEMU-PT is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# QEMU-PT is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with QEMU-PT. If not, see . + + +if [ ! -f "/usr/lib/libxdc.so" ] || [ ! -f "/usr/include/libxdc.h" ]; then + echo "[!] libxdc not found! Installing..." + if [ -d "capstone_v4/" ]; then + rm -rf capstone_v4 + fi + + if [ -d "libxdc/" ]; then + rm -rf libxdc + fi + + git clone https://github.com/nyx-fuzz/libxdc.git + git clone https://github.com/aquynh/capstone.git capstone_v4 + cd capstone_v4 + git checkout v4 + make + sudo make install + cd .. + cd libxdc + sudo make install + cd .. +fi + +./configure --target-list=x86_64-softmmu --enable-gtk --disable-werror --disable-capstone --disable-libssh --enable-nyx --disable-tools +#--enable-sanitizers + +if [ -f GNUmakefile ]; then + rm GNUmakefile 2> /dev/null +fi + +make -j + diff --git a/configure b/configure index 6099be1d84..58b3a3533a 100755 --- a/configure +++ b/configure @@ -949,6 +949,8 @@ for opt do case "$opt" in --help|-h) show_help=yes ;; + --enable-nyx) nyx="yes" + ;; --version|-V) exec cat $source_path/VERSION ;; --prefix=*) prefix="$optarg" @@ -1726,6 +1728,7 @@ Advanced options (experts only): Optional features, enabled with --enable-FEATURE and disabled with --disable-FEATURE, default is enabled if available: + nyx build QEMU-NYX system all system emulation targets user supported user emulation targets linux-user all linux usermode emulation targets @@ -2045,7 +2048,7 @@ EOF # check we support --no-pie first... if compile_prog "-Werror -fno-pie" "-no-pie"; then CFLAGS_NOPIE="-fno-pie" - LDFLAGS_NOPIE="-nopie" + #LDFLAGS_NOPIE="-nopie" fi if compile_prog "-fPIE -DPIE" "-pie"; then @@ -6095,7 +6098,12 @@ if test "$gcov" = "yes" ; then CFLAGS="-fprofile-arcs -ftest-coverage -g $CFLAGS" LDFLAGS="-fprofile-arcs -ftest-coverage $LDFLAGS" elif test "$fortify_source" = "yes" ; then - CFLAGS="-O2 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 $CFLAGS" + if test "$nyx" = "yes" ; then + CFLAGS="-DNESTED_PATCH -O3 -rdynamic -Wno-error=maybe-uninitialized -frename-registers -frename-registers -mtune=native -DQEMU_NYX -g -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 $QEMU_CFLAGS" + LIBS="-lcapstone -lxdc $LIBS" + else + CFLAGS="-O2 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 $CFLAGS" + fi elif test "$debug" = "no"; then CFLAGS="-O2 $CFLAGS" fi @@ -6849,6 +6857,11 @@ fi if test "$splice" = "yes" ; then echo "CONFIG_SPLICE=y" >> $config_host_mak fi + +if test "$nyx" = "yes" ; then + echo "CONFIG_QEMU_NYX=y" >> $config_host_mak +fi + if test "$eventfd" = "yes" ; then echo "CONFIG_EVENTFD=y" >> $config_host_mak fi @@ -7515,7 +7528,7 @@ if test "$sparse" = "yes" ; then echo "QEMU_CFLAGS += -Wbitwise -Wno-transparent-union -Wno-old-initializer -Wno-non-pointer-null" >> $config_host_mak fi echo "LDFLAGS=$LDFLAGS" >> $config_host_mak -echo "LDFLAGS_NOPIE=$LDFLAGS_NOPIE" >> $config_host_mak +#echo "LDFLAGS_NOPIE=$LDFLAGS_NOPIE" >> $config_host_mak echo "QEMU_LDFLAGS=$QEMU_LDFLAGS" >> $config_host_mak echo "LD_REL_FLAGS=$LD_REL_FLAGS" >> $config_host_mak echo "LD_I386_EMULATION=$ld_i386_emulation" >> $config_host_mak diff --git a/cpus.c b/cpus.c index 63bda152f5..f6868894af 100644 --- a/cpus.c +++ b/cpus.c @@ -1351,6 +1351,9 @@ static void *qemu_dummy_cpu_thread_fn(void *arg) sigemptyset(&waitset); sigaddset(&waitset, SIG_IPI); +#ifdef QEMU_NYX + sigaddset(&waitset, SIGALRM); +#endif /* signal CPU creation */ cpu->created = true; diff --git a/exec.c b/exec.c index ffdb518535..7548497217 100644 --- a/exec.c +++ b/exec.c @@ -1355,6 +1355,52 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, return dirty; } +#ifdef QEMU_NYX +extern void fast_reload_qemu_user_fdl_set_dirty(void* self, MemoryRegion *mr, uint64_t addr, uint64_t length); +extern void* get_fast_reload_snapshot(void); + +/* Note: start and end must be within the same ram block. */ +bool cpu_physical_memory_test_dirty(ram_addr_t start, + ram_addr_t length, + unsigned client) +{ + DirtyMemoryBlocks *blocks; + unsigned long end, page; + bool dirty = false; + RAMBlock *ramblock; + uint64_t mr_offset, mr_size; + + if (length == 0) { + return false; + } + + end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; + page = start >> TARGET_PAGE_BITS; + + WITH_RCU_READ_LOCK_GUARD() { + blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); + ramblock = qemu_get_ram_block(start); + /* Range sanity check on the ramblock */ + assert(start >= ramblock->offset && + start + length <= ramblock->offset + ramblock->used_length); + + while (page < end) { + unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; + unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE; + unsigned long num = MIN(end - page, + DIRTY_MEMORY_BLOCK_SIZE - offset); + + dirty |= bitmap_test_atomic(blocks->blocks[idx], + offset, num); + + page += num; + } + } + + return dirty; +} +#endif + DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client) { @@ -3025,6 +3071,9 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr, hwaddr length) { +#ifdef QEMU_NYX + fast_reload_qemu_user_fdl_set_dirty(get_fast_reload_snapshot(), mr, addr & 0xFFFFFFFFFFFFF000, length); +#endif uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr); addr += memory_region_get_ram_addr(mr); diff --git a/hw/char/serial.c b/hw/char/serial.c index b4aa250950..a5034ae4ef 100644 --- a/hw/char/serial.c +++ b/hw/char/serial.c @@ -34,6 +34,9 @@ #include "sysemu/runstate.h" #include "qemu/error-report.h" #include "trace.h" +#ifdef QEMU_NYX +#include "nyx/state.h" +#endif //#define DEBUG_SERIAL @@ -241,7 +244,9 @@ static gboolean serial_watch_cb(GIOChannel *chan, GIOCondition cond, static void serial_xmit(SerialState *s) { do { +#ifndef QEMU_NYX assert(!(s->lsr & UART_LSR_TEMT)); +#endif if (s->tsr_retry == 0) { assert(!(s->lsr & UART_LSR_THRE)); @@ -343,6 +348,12 @@ static void serial_ioport_write(void *opaque, hwaddr addr, uint64_t val, { SerialState *s = opaque; +#ifdef QEMU_NYX + if(GET_GLOBAL_STATE()->in_fuzzing_mode){ + return; + } +#endif + addr &= 7; trace_serial_ioport_write(addr, val); switch(addr) { diff --git a/hw/display/vga.c b/hw/display/vga.c index 82ebe53610..6b5b275c27 100644 --- a/hw/display/vga.c +++ b/hw/display/vga.c @@ -153,6 +153,10 @@ static inline uint8_t sr(VGACommonState *s, int idx) return vbe_enabled(s) ? s->sr_vbe[idx] : s->sr[idx]; } +#ifdef QEMU_NYX +bool dirty = false; +#endif + static void vga_update_memory_access(VGACommonState *s) { hwaddr base, offset, size; @@ -166,6 +170,9 @@ static void vga_update_memory_access(VGACommonState *s) object_unparent(OBJECT(&s->chain4_alias)); s->has_chain4_alias = false; s->plane_updated = 0xf; +#ifdef QEMU_NYX + dirty = true; +#endif } if ((sr(s, VGA_SEQ_PLANE_WRITE) & VGA_SR02_ALL_PLANES) == VGA_SR02_ALL_PLANES && sr(s, VGA_SEQ_MEMORY_MODE) & VGA_SR04_CHN_4M) { @@ -2076,10 +2083,21 @@ static int vga_common_post_load(void *opaque, int version_id) { VGACommonState *s = opaque; +#ifndef QEMU_NYX /* force refresh */ s->graphic_mode = -1; vbe_update_vgaregs(s); vga_update_memory_access(s); +#else + if(dirty){ + /* force refresh */ + s->graphic_mode = -1; + vbe_update_vgaregs(s); + //fprintf(stderr, "VGA DIRTY!\n"); + vga_update_memory_access(s); + dirty = false; + } +#endif return 0; } diff --git a/hw/i386/kvm/clock.c b/hw/i386/kvm/clock.c index 2c59b6894b..f85e0e6658 100644 --- a/hw/i386/kvm/clock.c +++ b/hw/i386/kvm/clock.c @@ -30,9 +30,21 @@ #include #include "standard-headers/asm-x86/kvm_para.h" +#ifdef QEMU_NYX +#include "nyx/snapshot/devices/vm_change_state_handlers.h" +#endif + #define TYPE_KVM_CLOCK "kvmclock" #define KVM_CLOCK(obj) OBJECT_CHECK(KVMClockState, (obj), TYPE_KVM_CLOCK) +#ifdef QEMU_NYX +bool fuzz_mode = false; + +void enable_fast_snapshot_kvm_clock(void){ + fuzz_mode = true; +} +#endif + typedef struct KVMClockState { /*< private >*/ SysBusDevice busdev; @@ -176,7 +188,11 @@ static void kvmclock_vm_state_change(void *opaque, int running, * If the host where s->clock was read did not support reliable * KVM_GET_CLOCK, read kvmclock value from memory. */ +#ifndef QEMU_NYX if (!s->clock_is_reliable) { +#else + if (!s->clock_is_reliable && !fuzz_mode) { +#endif uint64_t pvclock_via_mem = kvmclock_current_nsec(s); /* We can't rely on the saved clock value, just discard it */ if (pvclock_via_mem) { @@ -231,6 +247,9 @@ static void kvmclock_realize(DeviceState *dev, Error **errp) kvm_update_clock(s); qemu_add_vm_change_state_handler(kvmclock_vm_state_change, s); +#ifdef QEMU_NYX + add_fast_reload_change_handler(kvmclock_vm_state_change, s, RELOAD_HANDLER_KVM_CLOCK); +#endif } static bool kvmclock_clock_is_reliable_needed(void *opaque) diff --git a/hw/i386/kvm/i8254.c b/hw/i386/kvm/i8254.c index 6a911e23f9..98a0a431c7 100644 --- a/hw/i386/kvm/i8254.c +++ b/hw/i386/kvm/i8254.c @@ -34,6 +34,10 @@ #include "hw/timer/i8254_internal.h" #include "sysemu/kvm.h" +#ifdef QEMU_NYX +#include "nyx/snapshot/devices/vm_change_state_handlers.h" +#endif + #define KVM_PIT_REINJECT_BIT 0 #define CALIBRATION_ROUNDS 3 @@ -300,6 +304,9 @@ static void kvm_pit_realizefn(DeviceState *dev, Error **errp) qdev_init_gpio_in(dev, kvm_pit_irq_control, 1); qemu_add_vm_change_state_handler(kvm_pit_vm_state_change, s); +#ifdef QEMU_NYX + add_fast_reload_change_handler(kvm_pit_vm_state_change, s, RELOAD_HANDLER_KVM_PIT); +#endif kpc->parent_realize(dev, errp); } diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index 1bd70d1abb..b12bec3f55 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -1026,3 +1026,22 @@ static void xenfv_machine_options(MachineClass *m) DEFINE_PC_MACHINE(xenfv, "xenfv", pc_xen_hvm_init, xenfv_machine_options); #endif + +#ifdef QEMU_NYX + +static void pc_kAFL64_vmx_v1_0_machine_options(MachineClass *m) +{ + pc_i440fx_machine_options(m); + m->alias = "kAFL64"; + //m->is_default = 1; + m->desc = "kAFL64 PC (i440FX + PIIX, 1996)"; +} + +static void kAFL64_init(MachineState *machine) +{ + pc_init1(machine, TYPE_I440FX_PCI_HOST_BRIDGE, TYPE_I440FX_PCI_DEVICE); +} + +DEFINE_PC_MACHINE(v1, "kAFL64-v1", kAFL64_init, pc_kAFL64_vmx_v1_0_machine_options); + +#endif diff --git a/hw/ide/core.c b/hw/ide/core.c index 754ff4dc34..e9feacc30a 100644 --- a/hw/ide/core.c +++ b/hw/ide/core.c @@ -41,6 +41,10 @@ #include "hw/ide/internal.h" #include "trace.h" +#ifdef QEMU_NYX +#include "nyx/snapshot/devices/vm_change_state_handlers.h" +#endif + /* These values were based on a Seagate ST3500418AS but have been modified to make more sense in QEMU */ static const int smart_attributes[][12] = { @@ -2654,6 +2658,9 @@ void ide_register_restart_cb(IDEBus *bus) { if (bus->dma->ops->restart_dma) { bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus); +#ifdef QEMU_NYX + add_fast_reload_change_handler(ide_restart_cb, bus, RELOAD_HANDLER_IDE_CORE); +#endif } } diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c index 375cb6abe9..d1333dd3f6 100644 --- a/hw/intc/apic_common.c +++ b/hw/intc/apic_common.c @@ -274,14 +274,18 @@ static void apic_common_realize(DeviceState *dev, Error **errp) info->realize(dev, errp); /* Note: We need at least 1M to map the VAPIC option ROM */ +#ifndef QEMU_NYX if (!vapic && s->vapic_control & VAPIC_ENABLE_MASK && !hax_enabled() && ram_size >= 1024 * 1024) { vapic = sysbus_create_simple("kvmvapic", -1, NULL); } +#endif s->vapic = vapic; +#ifndef QEMU_NYX if (apic_report_tpr_access && info->enable_tpr_reporting) { info->enable_tpr_reporting(s, true); } +#endif if (s->legacy_instance_id) { instance_id = -1; diff --git a/hw/pci/pci.c b/hw/pci/pci.c index cbc7a32568..29e308003e 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -548,6 +548,23 @@ static int get_pci_config_device(QEMUFile *f, void *pv, size_t size, return 0; } +#ifdef QEMU_NYX +void fast_get_pci_config_device(void* data, size_t size, void* opaque){ + PCIDevice *s = container_of(opaque, PCIDevice, config); + PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(s); + uint8_t *config = (uint8_t *) data; + + memcpy(s->config, config, size); + + pci_update_mappings(s); + if (pc->is_bridge) { + PCIBridge *b = PCI_BRIDGE(s); + pci_bridge_update_mappings(b); + } +} +#endif + + /* just put buffer */ static int put_pci_config_device(QEMUFile *f, void *pv, size_t size, const VMStateField *field, QJSON *vmdesc) @@ -587,6 +604,17 @@ static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size, return 0; } +#ifdef QEMU_NYX +void fast_get_pci_irq_state(void* data, size_t size, void* opaque){ + PCIDevice *s = container_of(opaque, PCIDevice, irq_state); + uint32_t* irq_state = (uint32_t*) data; + + for (int i = 0; i < PCI_NUM_PINS; ++i) { + pci_set_irq_state(s, i, irq_state[i]); + } +} +#endif + static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size, const VMStateField *field, QJSON *vmdesc) { diff --git a/hw/rtc/mc146818rtc.c b/hw/rtc/mc146818rtc.c index 74ae74bc5c..2e6de824f0 100644 --- a/hw/rtc/mc146818rtc.c +++ b/hw/rtc/mc146818rtc.c @@ -79,6 +79,14 @@ static void rtc_set_cmos(RTCState *s, const struct tm *tm); static inline int rtc_from_bcd(RTCState *s, int a); static uint64_t get_next_alarm(RTCState *s); +#ifdef QEMU_NYX +static bool fast_snapshot_rtc_enabled = false; + +void enable_fast_snapshot_rtc(void){ + fast_snapshot_rtc_enabled = true; +} +#endif + static inline bool rtc_running(RTCState *s) { return (!(s->cmos_data[RTC_REG_B] & REG_B_SET) && @@ -790,7 +798,11 @@ static int rtc_post_load(void *opaque, int version_id) { RTCState *s = opaque; +#ifndef QEMU_NYX if (version_id <= 2 || rtc_clock == QEMU_CLOCK_REALTIME) { +#else + if (version_id <= 2 || rtc_clock == QEMU_CLOCK_REALTIME || fast_snapshot_rtc_enabled) { +#endif rtc_set_time(s); s->offset = 0; check_update_timer(s); diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index bed0554f4d..70d90c8184 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -448,6 +448,12 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, } #endif /* not _WIN32 */ +#ifdef QEMU_NYX +bool cpu_physical_memory_test_dirty(ram_addr_t start, + ram_addr_t length, + unsigned client); +#endif + bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, ram_addr_t length, unsigned client); diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 77c6f05299..705f635d39 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -409,6 +409,21 @@ struct CPUState { */ uintptr_t mem_io_pc; +#ifdef QEMU_NYX + volatile int pt_cmd; + volatile int pt_ret; + volatile bool pt_enabled; + + int pt_fd; + void* pt_mmap; + + void* pt_decoder_state; + + bool reload_pending; + bool intel_pt_run_trashed; + +#endif + int kvm_fd; struct KVMState *kvm_state; struct kvm_run *kvm_run; diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h index db75c6dfd0..fb9f3591b7 100644 --- a/include/hw/pci/pci.h +++ b/include/hw/pci/pci.h @@ -373,6 +373,10 @@ void pci_del_capability(PCIDevice *pci_dev, uint8_t cap_id, uint8_t cap_size); uint8_t pci_find_capability(PCIDevice *pci_dev, uint8_t cap_id); +#ifdef QEMU_NYX +void fast_get_pci_config_device(void* data, size_t size, void* opaque); +void fast_get_pci_irq_state(void* data, size_t size, void* opaque); +#endif uint32_t pci_default_read_config(PCIDevice *d, uint32_t address, int len); diff --git a/include/qemu/bitmap.h b/include/qemu/bitmap.h index 82a1d2f41f..961b1fd11d 100644 --- a/include/qemu/bitmap.h +++ b/include/qemu/bitmap.h @@ -253,6 +253,9 @@ void bitmap_set(unsigned long *map, long i, long len); void bitmap_set_atomic(unsigned long *map, long i, long len); void bitmap_clear(unsigned long *map, long start, long nr); bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr); +#ifdef QEMU_NYX +bool bitmap_test_atomic(unsigned long *map, long start, long nr); +#endif void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src, long nr); unsigned long bitmap_find_next_zero_area(unsigned long *map, diff --git a/include/sysemu/block-backend.h b/include/sysemu/block-backend.h index b198deca0b..0011e01a8b 100644 --- a/include/sysemu/block-backend.h +++ b/include/sysemu/block-backend.h @@ -16,6 +16,11 @@ #include "qemu/iov.h" #include "block/throttle-groups.h" +#ifdef QEMU_NYX +#include "sysemu/sysemu.h" +#include "nyx/snapshot/block/block_cow.h" +#endif + /* * TODO Have to include block/block.h for a bunch of block layer * types. Unfortunately, this pulls in the whole BlockDriverState @@ -265,4 +270,62 @@ int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in, const BdrvChild *blk_root(BlockBackend *blk); +#ifdef QEMU_NYX +struct BlockBackend { + cow_cache_t* cow_cache; + char *name; + int refcnt; + BdrvChild *root; + AioContext *ctx; + DriveInfo *legacy_dinfo; /* null unless created by drive_new() */ + QTAILQ_ENTRY(BlockBackend) link; /* for block_backends */ + QTAILQ_ENTRY(BlockBackend) monitor_link; /* for monitor_block_backends */ + BlockBackendPublic public; + + DeviceState *dev; /* attached device model, if any */ + const BlockDevOps *dev_ops; + void *dev_opaque; + + /* the block size for which the guest device expects atomicity */ + int guest_block_size; + + /* If the BDS tree is removed, some of its options are stored here (which + * can be used to restore those options in the new BDS on insert) */ + BlockBackendRootState root_state; + + bool enable_write_cache; + + /* I/O stats (display with "info blockstats"). */ + BlockAcctStats stats; + + BlockdevOnError on_read_error, on_write_error; + bool iostatus_enabled; + BlockDeviceIoStatus iostatus; + + uint64_t perm; + uint64_t shared_perm; + bool disable_perm; + + bool allow_aio_context_change; + bool allow_write_beyond_eof; + + NotifierList remove_bs_notifiers, insert_bs_notifiers; + QLIST_HEAD(, BlockBackendAioNotifier) aio_notifiers; + + int quiesce_counter; + CoQueue queued_requests; + bool disable_request_queuing; + + VMChangeStateEntry *vmsh; + bool force_allow_inactivate; + + /* Number of in-flight aio requests. BlockDriverState also counts + * in-flight requests but aio requests can exist even when blk->root is + * NULL, so we cannot rely on its counter for that case. + * Accessed with atomic ops. + */ + unsigned int in_flight; +}; +#endif + #endif diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h index 9fe233b9bf..fa2312e953 100644 --- a/include/sysemu/kvm.h +++ b/include/sysemu/kvm.h @@ -216,6 +216,10 @@ int kvm_has_many_ioeventfds(void); int kvm_has_gsi_routing(void); int kvm_has_intx_set_mask(void); +#ifdef QEMU_NYX +int kvm_get_vm_fd(KVMState *s); +#endif + int kvm_init_vcpu(CPUState *cpu); int kvm_cpu_exec(CPUState *cpu); int kvm_destroy_vcpu(CPUState *cpu); @@ -367,6 +371,12 @@ int kvm_arch_get_registers(CPUState *cpu); /* full state set, modified during initialization or on vmload */ #define KVM_PUT_FULL_STATE 3 +#ifdef QEMU_NYX +#define KVM_PUT_FULL_STATE_FAST 4 + +int kvm_arch_get_registers_fast(CPUState *cpu); +#endif + int kvm_arch_put_registers(CPUState *cpu, int level); int kvm_arch_init(MachineState *ms, KVMState *s); diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h index ac2d1f8b56..9135a95d60 100644 --- a/include/sysemu/kvm_int.h +++ b/include/sysemu/kvm_int.h @@ -41,5 +41,9 @@ typedef struct KVMMemoryListener { void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, AddressSpace *as, int as_id); +#ifdef QEMU_NYX +KVMMemoryListener* kvm_get_kml(int as_id); +#endif + void kvm_set_max_memslot_size(hwaddr max_slot_size); #endif diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h index 3d9b18f7f8..f9e47d9131 100644 --- a/linux-headers/linux/kvm.h +++ b/linux-headers/linux/kvm.h @@ -236,6 +236,77 @@ struct kvm_hyperv_exit { #define KVM_EXIT_IOAPIC_EOI 26 #define KVM_EXIT_HYPERV 27 +#ifdef QEMU_NYX +#define HYPERCALL_KAFL_RAX_ID 0x01f +#define KAFL_EXIT_OFFSET 100 + +#define KVM_EXIT_KAFL_ACQUIRE 100 +#define KVM_EXIT_KAFL_GET_PAYLOAD 101 +#define KVM_EXIT_KAFL_GET_PROGRAM 102 +#define KVM_EXIT_KAFL_GET_ARGV 103 +#define KVM_EXIT_KAFL_RELEASE 104 +#define KVM_EXIT_KAFL_SUBMIT_CR3 105 +#define KVM_EXIT_KAFL_SUBMIT_PANIC 106 +#define KVM_EXIT_KAFL_SUBMIT_KASAN 107 +#define KVM_EXIT_KAFL_PANIC 108 +#define KVM_EXIT_KAFL_KASAN 109 +#define KVM_EXIT_KAFL_LOCK 110 +#define KVM_EXIT_KAFL_INFO 111 +#define KVM_EXIT_KAFL_NEXT_PAYLOAD 112 +#define KVM_EXIT_KAFL_PRINTF 113 + +/* Kernel Printf Debugger */ +#define KVM_EXIT_KAFL_PRINTK_ADDR 114 +#define KVM_EXIT_KAFL_PRINTK 115 + +/* user space only exit reasons */ +#define KVM_EXIT_KAFL_USER_RANGE_ADVISE 116 +#define KVM_EXIT_KAFL_USER_SUBMIT_MODE 117 +#define KVM_EXIT_KAFL_USER_FAST_ACQUIRE 118 +#define KVM_EXIT_KAFL_TOPA_MAIN_FULL 119 +#define KVM_EXIT_KAFL_USER_ABORT 120 + + +/* hypertrash only hypercalls */ +#define HYPERTRASH_HYPERCALL_MASK 0xAA000000 + +#define HYPERCALL_KAFL_NESTED_PREPARE (0 | HYPERTRASH_HYPERCALL_MASK) +#define HYPERCALL_KAFL_NESTED_CONFIG (1 | HYPERTRASH_HYPERCALL_MASK) +#define HYPERCALL_KAFL_NESTED_ACQUIRE (2 | HYPERTRASH_HYPERCALL_MASK) +#define HYPERCALL_KAFL_NESTED_RELEASE (3 | HYPERTRASH_HYPERCALL_MASK) + +#define KVM_EXIT_KAFL_NESTED_CONFIG 121 +#define KVM_EXIT_KAFL_NESTED_PREPARE 122 +#define KVM_EXIT_KAFL_NESTED_ACQUIRE 123 +#define KVM_EXIT_KAFL_NESTED_RELEASE 124 + +#define KVM_EXIT_KAFL_PAGE_DUMP_BP 125 +#define KVM_EXIT_KAFL_TIMEOUT 126 + +#define KVM_EXIT_KAFL_NESTED_HPRINTF 127 +#define KVM_EXIT_KAFL_MTF 128 + +#define KVM_EXIT_KAFL_RANGE_SUBMIT 129 +#define HYPERCALL_KAFL_REQ_STREAM_DATA 130 +#define KVM_EXIT_KAFL_NESTED_EARLY_RELEASE 131 +#define KVM_EXIT_KAFL_PANIC_EXTENDED 132 +#define KVM_EXIT_KAFL_CREATE_TMP_SNAPSHOT 133 + +#define KVM_EXIT_KAFL_DEBUG_TMP_SNAPSHOT 134 /* hypercall for debugging / development purposes */ + +#define KVM_EXIT_KAFL_GET_HOST_CONFIG 135 +#define KVM_EXIT_KAFL_SET_AGENT_CONFIG 136 + +#define KVM_EXIT_KAFL_DUMP_FILE 137 + +#define HYPERCALL_KAFL_REQ_STREAM_DATA_BULK 138 + + +#define KVM_CAP_NYX_PT 512 +#define KVM_CAP_NYX_FDL 513 + +#endif + /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ #define KVM_INTERNAL_ERROR_EMULATION 1 @@ -1611,4 +1682,62 @@ struct kvm_hyperv_eventfd { #define KVM_HYPERV_CONN_ID_MASK 0x00ffffff #define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0) +#ifdef QEMU_NYX +/* + * ioctls for vmx_pt fds + */ +#define KVM_VMX_PT_SETUP_FD _IO(KVMIO, 0xd0) /* apply vmx_pt fd (via vcpu fd ioctl)*/ +#define KVM_VMX_PT_CONFIGURE_ADDR0 _IOW(KVMIO, 0xd1, __u64) /* configure IP-filtering for addr0_a & addr0_b */ +#define KVM_VMX_PT_CONFIGURE_ADDR1 _IOW(KVMIO, 0xd2, __u64) /* configure IP-filtering for addr1_a & addr1_b */ +#define KVM_VMX_PT_CONFIGURE_ADDR2 _IOW(KVMIO, 0xd3, __u64) /* configure IP-filtering for addr2_a & addr2_b */ +#define KVM_VMX_PT_CONFIGURE_ADDR3 _IOW(KVMIO, 0xd4, __u64) /* configure IP-filtering for addr3_a & addr3_b */ + +#define KVM_VMX_PT_CONFIGURE_CR3 _IOW(KVMIO, 0xd5, __u64) /* setup CR3 filtering value */ +#define KVM_VMX_PT_ENABLE _IO(KVMIO, 0xd6) /* enable and lock configuration */ +#define KVM_VMX_PT_GET_TOPA_SIZE _IOR(KVMIO, 0xd7, __u32) /* get defined ToPA size */ +#define KVM_VMX_PT_DISABLE _IO(KVMIO, 0xd8) /* enable and lock configuration */ +#define KVM_VMX_PT_CHECK_TOPA_OVERFLOW _IO(KVMIO, 0xd9) /* check for ToPA overflow */ + +#define KVM_VMX_PT_ENABLE_ADDR0 _IO(KVMIO, 0xaa) /* enable IP-filtering for addr0 */ +#define KVM_VMX_PT_ENABLE_ADDR1 _IO(KVMIO, 0xab) /* enable IP-filtering for addr1 */ +#define KVM_VMX_PT_ENABLE_ADDR2 _IO(KVMIO, 0xac) /* enable IP-filtering for addr2 */ +#define KVM_VMX_PT_ENABLE_ADDR3 _IO(KVMIO, 0xad) /* enable IP-filtering for addr3 */ + +#define KVM_VMX_PT_DISABLE_ADDR0 _IO(KVMIO, 0xae) /* disable IP-filtering for addr0 */ +#define KVM_VMX_PT_DISABLE_ADDR1 _IO(KVMIO, 0xaf) /* disable IP-filtering for addr1 */ +#define KVM_VMX_PT_DISABLE_ADDR2 _IO(KVMIO, 0xe0) /* disable IP-filtering for addr2 */ +#define KVM_VMX_PT_DISABLE_ADDR3 _IO(KVMIO, 0xe1) /* disable IP-filtering for addr3 */ + +#define KVM_VMX_PT_ENABLE_CR3 _IO(KVMIO, 0xe2) /* enable CR3 filtering */ +#define KVM_VMX_PT_DISABLE_CR3 _IO(KVMIO, 0xe3) /* disable CR3 filtering */ + +#define KVM_VMX_PT_SUPPORTED _IO(KVMIO, 0xe4) + +#define KVM_VMX_FDL_SETUP_FD _IO(KVMIO, 0xe5) +#define KVM_VMX_FDL_SET _IOW(KVMIO, 0xe6, __u64) +#define KVM_VMX_FDL_FLUSH _IO(KVMIO, 0xe7) +#define KVM_VMX_FDL_GET_INDEX _IOR(KVMIO, 0xe8, __u64) + +#define KVM_VMX_PT_GET_ADDRN _IO(KVMIO, 0xe9) + +/* Multi CR3 Support */ + +#define KVM_VMX_PT_CONFIGURE_MULTI_CR3 _IOW(KVMIO, 0xea, __u64) /* setup CR3 filtering value */ +#define KVM_VMX_PT_ENABLE_MULTI_CR3 _IO(KVMIO, 0xeb) /* enable CR3 filtering */ +#define KVM_VMX_PT_DISABLE_MULTI_CR3 _IO(KVMIO, 0xec) /* disable CR3 filtering */ + +/* Page Dump Support */ + +#define KVM_VMX_PT_SET_PAGE_DUMP_CR3 _IOW(KVMIO, 0xed, __u64) +#define KVM_VMX_PT_ENABLE_PAGE_DUMP_CR3 _IO(KVMIO, 0xee) +#define KVM_VMX_PT_DISABLE_PAGE_DUMP_CR3 _IO(KVMIO, 0xef) + +#define KVM_VMX_PT_ENABLE_MTF _IO(KVMIO, 0xf0) +#define KVM_VMX_PT_DISABLE_MTF _IO(KVMIO, 0xf1) + +/* KVM dirty-ring */ +#define KVM_CAP_DIRTY_LOG_RING 192 +#define KVM_EXIT_DIRTY_RING_FULL 31 +#endif + #endif /* __LINUX_KVM_H */ diff --git a/logo.png b/logo.png new file mode 100644 index 0000000000..2643cb58d2 Binary files /dev/null and b/logo.png differ diff --git a/memory.c b/memory.c index 06484c2bff..f741b142a8 100644 --- a/memory.c +++ b/memory.c @@ -35,6 +35,11 @@ #include "hw/boards.h" #include "migration/vmstate.h" +#ifdef QEMU_NYX +#include "nyx/state.h" +#include "nyx/fast_vm_reload.h" +#endif + //#define DEBUG_UNASSIGNED static unsigned memory_region_transaction_depth; @@ -2011,6 +2016,9 @@ void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, hwaddr size) { assert(mr->ram_block); +#ifdef QEMU_NYX + fast_reload_qemu_user_fdl_set_dirty(get_fast_reload_snapshot(), mr, addr & 0xFFFFFFFFFFFFF000, size); +#endif cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr, size, memory_region_get_dirty_log_mask(mr)); diff --git a/migration/savevm.c b/migration/savevm.c index a71b930b91..0b0b761920 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -252,14 +252,22 @@ typedef struct SaveState { QTAILQ_HEAD(, SaveStateEntry) handlers; int global_section_id; uint32_t len; +#ifndef QEMU_NYX const char *name; +#else + char *name; +#endif uint32_t target_page_bits; uint32_t caps_count; MigrationCapability *capabilities; QemuUUID uuid; } SaveState; +#ifndef QEMU_NYX static SaveState savevm_state = { +#else +SaveState savevm_state = { +#endif .handlers = QTAILQ_HEAD_INITIALIZER(savevm_state.handlers), .global_section_id = 0, }; @@ -289,6 +297,18 @@ static uint32_t get_validatable_capabilities_count(void) return result; } +#ifdef QEMU_NYX +int vmstate_load(QEMUFile *f, SaveStateEntry *se); +int vmstate_save(QEMUFile *f, SaveStateEntry *se, QJSON *vmdesc); +void save_section_header(QEMUFile *f, SaveStateEntry *se, uint8_t section_type); +void save_section_footer(QEMUFile *f, SaveStateEntry *se); +bool should_send_vmdesc(void); +int qemu_savevm_state(QEMUFile *f, Error **errp); +bool check_section_footer(QEMUFile *f, SaveStateEntry *se); +int qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis); +int qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis); +#endif + static int configuration_pre_save(void *opaque) { SaveState *state = opaque; @@ -297,7 +317,15 @@ static int configuration_pre_save(void *opaque) int i, j; state->len = strlen(current_name); +#ifndef QEMU_NYX state->name = current_name; +#else + if(state->name){ + free(state->name); + } + state->name = strdup(current_name); +#endif + state->target_page_bits = qemu_target_page_bits(); state->caps_count = get_validatable_capabilities_count(); @@ -508,7 +536,11 @@ static const VMStateDescription vmstate_uuid = { } }; +#ifndef QEMU_NYX static const VMStateDescription vmstate_configuration = { +#else +const VMStateDescription vmstate_configuration = { +#endif .name = "configuration", .version_id = 1, .pre_load = configuration_pre_load, @@ -848,7 +880,11 @@ void vmstate_unregister(DeviceState *dev, const VMStateDescription *vmsd, } } +#ifndef QEMU_NYX static int vmstate_load(QEMUFile *f, SaveStateEntry *se) +#else +int vmstate_load(QEMUFile *f, SaveStateEntry *se) +#endif { trace_vmstate_load(se->idstr, se->vmsd ? se->vmsd->name : "(old)"); if (!se->vmsd) { /* Old style */ @@ -877,7 +913,11 @@ static void vmstate_save_old_style(QEMUFile *f, SaveStateEntry *se, QJSON *vmdes } } +#ifndef QEMU_NYX static int vmstate_save(QEMUFile *f, SaveStateEntry *se, QJSON *vmdesc) +#else +int vmstate_save(QEMUFile *f, SaveStateEntry *se, QJSON *vmdesc) +#endif { trace_vmstate_save(se->idstr, se->vmsd ? se->vmsd->name : "(old)"); if (!se->vmsd) { @@ -890,8 +930,13 @@ static int vmstate_save(QEMUFile *f, SaveStateEntry *se, QJSON *vmdesc) /* * Write the header for device section (QEMU_VM_SECTION START/END/PART/FULL) */ +#ifndef QEMU_NYX static void save_section_header(QEMUFile *f, SaveStateEntry *se, uint8_t section_type) +#else +void save_section_header(QEMUFile *f, SaveStateEntry *se, + uint8_t section_type) +#endif { qemu_put_byte(f, section_type); qemu_put_be32(f, se->section_id); @@ -912,7 +957,11 @@ static void save_section_header(QEMUFile *f, SaveStateEntry *se, * Write a footer onto device sections that catches cases misformatted device * sections. */ +#ifndef QEMU_NYX static void save_section_footer(QEMUFile *f, SaveStateEntry *se) +#else +void save_section_footer(QEMUFile *f, SaveStateEntry *se) +#endif { if (migrate_get_current()->send_section_footer) { qemu_put_byte(f, QEMU_VM_SECTION_FOOTER); @@ -1262,7 +1311,11 @@ int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy) return ret; } +#ifndef QEMU_NYX static bool should_send_vmdesc(void) +#else +bool should_send_vmdesc(void) +#endif { MachineState *machine = MACHINE(qdev_get_machine()); bool in_postcopy = migration_in_postcopy(); @@ -1498,7 +1551,11 @@ void qemu_savevm_state_cleanup(void) } } +#ifndef QEMU_NYX static int qemu_savevm_state(QEMUFile *f, Error **errp) +#else +int qemu_savevm_state(QEMUFile *f, Error **errp) +#endif { int ret; MigrationState *ms = migrate_get_current(); @@ -2200,7 +2257,11 @@ static int loadvm_process_command(QEMUFile *f) * Returns: true if the footer was good * false if there is a problem (and calls error_report to say why) */ +#ifndef QEMU_NYX static bool check_section_footer(QEMUFile *f, SaveStateEntry *se) +#else +bool check_section_footer(QEMUFile *f, SaveStateEntry *se) +#endif { int ret; uint8_t read_mark; @@ -2237,8 +2298,13 @@ static bool check_section_footer(QEMUFile *f, SaveStateEntry *se) return true; } +#ifndef QEMU_NYX static int qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis) +#else +int +qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis) +#endif { uint32_t instance_id, version_id, section_id; SaveStateEntry *se; @@ -2302,8 +2368,13 @@ qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis) return 0; } +#ifndef QEMU_NYX static int qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis) +#else +int +qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis) +#endif { uint32_t section_id; SaveStateEntry *se; diff --git a/migration/vmstate.c b/migration/vmstate.c index 7dd8ef66c6..8076cd92d1 100644 --- a/migration/vmstate.c +++ b/migration/vmstate.c @@ -22,10 +22,25 @@ static int vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd, void *opaque, QJSON *vmdesc); + +#ifndef QEMU_NYX static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd, void *opaque); +#else +int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd, + void *opaque); +int vmstate_n_elems(void *opaque, const VMStateField *field); +int vmstate_size(void *opaque, const VMStateField *field); +void vmstate_handle_alloc(void *ptr, const VMStateField *field, void *opaque); +const VMStateDescription * vmstate_get_subsection(const VMStateDescription **sub, char *idstr); +#endif + +#ifndef QEMU_NYX static int vmstate_n_elems(void *opaque, const VMStateField *field) +#else +int vmstate_n_elems(void *opaque, const VMStateField *field) +#endif { int n_elems = 1; @@ -49,7 +64,11 @@ static int vmstate_n_elems(void *opaque, const VMStateField *field) return n_elems; } +#ifndef QEMU_NYX static int vmstate_size(void *opaque, const VMStateField *field) +#else +int vmstate_size(void *opaque, const VMStateField *field) +#endif { int size = field->size; @@ -63,8 +82,13 @@ static int vmstate_size(void *opaque, const VMStateField *field) return size; } +#ifndef QEMU_NYX static void vmstate_handle_alloc(void *ptr, const VMStateField *field, void *opaque) +#else +void vmstate_handle_alloc(void *ptr, const VMStateField *field, + void *opaque) +#endif { if (field->flags & VMS_POINTER && field->flags & VMS_ALLOC) { gsize size = vmstate_size(opaque, field); @@ -428,8 +452,13 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd, return ret; } +#ifndef QEMU_NYX static const VMStateDescription * vmstate_get_subsection(const VMStateDescription **sub, char *idstr) +#else +const VMStateDescription * +vmstate_get_subsection(const VMStateDescription **sub, char *idstr) +#endif { while (sub && *sub) { if (strcmp(idstr, (*sub)->name) == 0) { @@ -440,8 +469,13 @@ vmstate_get_subsection(const VMStateDescription **sub, char *idstr) return NULL; } +#ifndef QEMU_NYX static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd, void *opaque) +#else +int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd, + void *opaque) +#endif { trace_vmstate_subsection_load(vmsd->name); diff --git a/nyx/Makefile.objs b/nyx/Makefile.objs new file mode 100644 index 0000000000..51e86689b2 --- /dev/null +++ b/nyx/Makefile.objs @@ -0,0 +1,33 @@ +obj-y += \ +hypercall.o \ +memory_access.o \ +interface.o \ +fast_vm_reload.o \ +fast_vm_reload_sync.o \ +printk.o synchronization.o \ +page_cache.o \ +kvm_nested.o \ +state.o \ +debug.o \ +auxiliary_buffer.o \ +mmh3.o \ +nested_hypercalls.o \ +sharedir.o \ +helpers.o \ +redqueen.o \ +file_helper.o \ +redqueen_trace.o \ +snapshot/helper.o \ +snapshot/devices/nyx_device_state.o \ +snapshot/devices/state_reallocation.o \ +snapshot/devices/vm_change_state_handlers.o \ +snapshot/block/nyx_block_snapshot.o \ +snapshot/block/block_cow.o \ +snapshot/memory/shadow_memory.o \ +snapshot/memory/nyx_fdl_user.o \ +snapshot/memory/block_list.o \ +snapshot/memory/backend/nyx_debug.o \ +snapshot/memory/backend/nyx_fdl.o \ +snapshot/memory/backend/nyx_dirty_ring.o \ +pt.o + diff --git a/nyx/auxiliary_buffer.c b/nyx/auxiliary_buffer.c new file mode 100644 index 0000000000..99a9750b1e --- /dev/null +++ b/nyx/auxiliary_buffer.c @@ -0,0 +1,287 @@ +/* + +Copyright (C) 2019 Sergej Schumilo + +This file is part of QEMU-PT (HyperTrash / kAFL). + +QEMU-PT is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +QEMU-PT is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with QEMU-PT. If not, see . + +*/ + +#include "nyx/auxiliary_buffer.h" +#include +#include +#include +#include "nyx/state.h" +#include "nyx/debug.h" + +/* experimental feature (currently broken) + * enabled via trace mode + */ +//#define SUPPORT_COMPILE_TIME_REDQUEEN + +#define VOLATILE_WRITE_64(dst, src) *((volatile uint64_t*)&dst) = (uint64_t)src +#define VOLATILE_WRITE_32(dst, src) *((volatile uint32_t*)&dst) = (uint32_t)src +#define VOLATILE_WRITE_16(dst, src) *((volatile uint16_t*)&dst) = (uint16_t)src +#define VOLATILE_WRITE_8(dst, src) *((volatile uint8_t*)&dst) = (uint8_t)src + +#define VOLATILE_READ_64(dst, src) dst = *((volatile uint64_t*)(&src)) +#define VOLATILE_READ_32(dst, src) dst = *((volatile uint32_t*)(&src)) +#define VOLATILE_READ_16(dst, src) dst = *((volatile uint16_t*)(&src)) +#define VOLATILE_READ_8(dst, src) dst = *((volatile uint8_t*)(&src)) + +static void volatile_memset(void* dst, uint8_t ch, size_t count){ + for (size_t i = 0; i < count; i++){ + VOLATILE_WRITE_8(((uint8_t*)dst)[i], ch); + } +} + +static void volatile_memcpy(void* dst, void* src, size_t size){ + for (size_t i = 0; i < size; i++){ + VOLATILE_WRITE_8(((uint8_t*)dst)[i], ((uint8_t*)src)[i]); + } +} + +void init_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer){ + debug_fprintf(stderr, "%s\n", __func__); + volatile_memset((void*) auxilary_buffer, 0, sizeof(auxilary_buffer_t)); + + VOLATILE_WRITE_16(auxilary_buffer->header.version, QEMU_PT_VERSION); + + uint16_t hash = (sizeof(auxilary_buffer_header_t) + + sizeof(auxilary_buffer_cap_t) + + sizeof(auxilary_buffer_config_t) + + sizeof(auxilary_buffer_result_t) + + sizeof(auxilary_buffer_misc_t)) % 0xFFFF; + + VOLATILE_WRITE_16(auxilary_buffer->header.hash, hash); + + VOLATILE_WRITE_64(auxilary_buffer->header.magic, AUX_MAGIC); +} + +void check_auxiliary_config_buffer(auxilary_buffer_t* auxilary_buffer, auxilary_buffer_config_t* shadow_config){ + uint8_t changed = 0; + VOLATILE_READ_8(changed, auxilary_buffer->configuration.changed); + if (changed){ + + + uint8_t aux_byte; + + VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.redqueen_mode); + if(aux_byte){ + /* enable redqueen mode */ + if(aux_byte != shadow_config->redqueen_mode){ + GET_GLOBAL_STATE()->in_redqueen_reload_mode = true; + GET_GLOBAL_STATE()->redqueen_enable_pending = true; + GET_GLOBAL_STATE()->redqueen_instrumentation_mode = REDQUEEN_LIGHT_INSTRUMENTATION; + } + } + else{ + /* disable redqueen mode */ + if(aux_byte != shadow_config->redqueen_mode){ + GET_GLOBAL_STATE()->in_redqueen_reload_mode = false; + GET_GLOBAL_STATE()->redqueen_disable_pending = true; + GET_GLOBAL_STATE()->redqueen_instrumentation_mode = REDQUEEN_NO_INSTRUMENTATION; + } + } + + VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.trace_mode); + if(aux_byte){ + /* enable trace mode */ + if(aux_byte != shadow_config->trace_mode && GET_GLOBAL_STATE()->redqueen_state){ +#ifdef SUPPORT_COMPILE_TIME_REDQUEEN + GET_GLOBAL_STATE()->pt_trace_mode_force = true; +#endif + redqueen_set_trace_mode(GET_GLOBAL_STATE()->redqueen_state); + } + } + else { + /* disable trace mode */ + if(aux_byte != shadow_config->trace_mode && GET_GLOBAL_STATE()->redqueen_state){ +#ifdef SUPPORT_COMPILE_TIME_REDQUEEN + GET_GLOBAL_STATE()->pt_trace_mode_force = false; +#endif + redqueen_unset_trace_mode(GET_GLOBAL_STATE()->redqueen_state); + } + } + + VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.page_dump_mode); + if(aux_byte){ + GET_GLOBAL_STATE()->dump_page = true; + uint64_t data; + VOLATILE_READ_64(data, auxilary_buffer->configuration.page_addr); + GET_GLOBAL_STATE()->dump_page_addr = data; + //fprintf(stderr, "%s dump_page_addr => 0x%lx\n", __func__, GET_GLOBAL_STATE()->dump_page_addr); + VOLATILE_WRITE_8(auxilary_buffer->configuration.page_dump_mode, 0); + VOLATILE_WRITE_64(auxilary_buffer->configuration.page_addr, 0); + } + + /* modify reload mode */ + VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.reload_mode); + GET_GLOBAL_STATE()->in_reload_mode = aux_byte; + + /* modify protect_payload_buffer */ + VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.protect_payload_buffer); + GET_GLOBAL_STATE()->protect_payload_buffer = aux_byte; + + /* modify protect_payload_buffer */ + VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.discard_tmp_snapshot); + GET_GLOBAL_STATE()->discard_tmp_snapshot = aux_byte; + VOLATILE_WRITE_8(auxilary_buffer->configuration.discard_tmp_snapshot, 0); + + /* copy to shodow */ + VOLATILE_READ_8(shadow_config->timeout_sec, auxilary_buffer->configuration.timeout_sec); + VOLATILE_READ_32(shadow_config->timeout_usec, auxilary_buffer->configuration.timeout_usec); + + //if(shadow_config->timeout_sec || shadow_config->timeout_usec){ + /* apply only non-zero values */ + update_itimer(&(GET_GLOBAL_STATE()->timeout_detector), shadow_config->timeout_sec, shadow_config->timeout_usec); + //} + + VOLATILE_READ_8(shadow_config->redqueen_mode, auxilary_buffer->configuration.redqueen_mode); + VOLATILE_READ_8(shadow_config->trace_mode, auxilary_buffer->configuration.trace_mode); + VOLATILE_READ_8(shadow_config->reload_mode, auxilary_buffer->configuration.reload_mode); + + VOLATILE_READ_8(shadow_config->verbose_level, auxilary_buffer->configuration.verbose_level); + + /* reset the 'changed' byte */ + VOLATILE_WRITE_8(auxilary_buffer->configuration.changed, 0); + } +} + +void set_crash_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){ + VOLATILE_WRITE_8(auxilary_buffer->result.crash_found, 1); +} + +void set_asan_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){ + VOLATILE_WRITE_8(auxilary_buffer->result.asan_found, 1); +} + +void set_timeout_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){ + VOLATILE_WRITE_8(auxilary_buffer->result.timeout_found, 1); +} + +void set_reload_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){ + VOLATILE_WRITE_8(auxilary_buffer->result.reloaded, 1); +} + +void set_pt_overflow_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){ + VOLATILE_WRITE_8(auxilary_buffer->result.pt_overflow, 1); +} + +void set_exec_done_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8_t sec, uint32_t usec){ + VOLATILE_WRITE_8(auxilary_buffer->result.exec_done, 1); + + VOLATILE_WRITE_8(auxilary_buffer->result.runtime_sec, sec); + VOLATILE_WRITE_32(auxilary_buffer->result.runtime_usec, usec); +} + +void flush_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){ + + memset(&auxilary_buffer->result.hprintf, 0x0, sizeof(auxilary_buffer_result_t)-2); + + + //memset(&(auxilary_buffer->result) + offsetof(auxilary_buffer_result_t, hprintf), 0x0, sizeof(auxilary_buffer_result_t) - offsetof(auxilary_buffer_result_t, hprintf)); + + /* + VOLATILE_WRITE_8(auxilary_buffer->result.exec_done, 0); + VOLATILE_WRITE_8(auxilary_buffer->result.hprintf, 0); + + + VOLATILE_WRITE_8(auxilary_buffer->result.crash_found, 0); + VOLATILE_WRITE_8(auxilary_buffer->result.asan_found, 0); + VOLATILE_WRITE_8(auxilary_buffer->result.timeout_found, 0); + VOLATILE_WRITE_8(auxilary_buffer->result.reloaded, 0); + VOLATILE_WRITE_8(auxilary_buffer->result.pt_overflow, 0); + + VOLATILE_WRITE_8(auxilary_buffer->result.runtime_sec, 0); + VOLATILE_WRITE_32(auxilary_buffer->result.runtime_usec, 0); + + VOLATILE_WRITE_8(auxilary_buffer->result.page_not_found, 0); + VOLATILE_WRITE_64(auxilary_buffer->result.page_addr, 0); + + VOLATILE_WRITE_8(auxilary_buffer->result.payload_buffer_write_attempt_found, 0); + + VOLATILE_WRITE_32(auxilary_buffer->result.dirty_pages, 0); + VOLATILE_WRITE_32(auxilary_buffer->result.pt_trace_size, 0); + */ + +} + +void set_hprintf_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len){ + VOLATILE_WRITE_16(auxilary_buffer->misc.len, MIN(len, MISC_SIZE-2)); + volatile_memcpy((void*)&auxilary_buffer->misc.data, (void*)msg, (size_t)MIN(len, MISC_SIZE-2)); + VOLATILE_WRITE_8(auxilary_buffer->result.hprintf, 1); +} + +void set_crash_reason_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len){ + VOLATILE_WRITE_16(auxilary_buffer->misc.len, MIN(len, MISC_SIZE-2)); + volatile_memcpy((void*)&auxilary_buffer->misc.data, (void*)msg, (size_t) MIN(len, MISC_SIZE-2)); + VOLATILE_WRITE_8(auxilary_buffer->result.crash_found, 1); +} + +void flush_hprintf_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer){ + VOLATILE_WRITE_8(auxilary_buffer->result.hprintf, 0); +} + +void set_state_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8_t state){ + if(auxilary_buffer){ + VOLATILE_WRITE_8(auxilary_buffer->result.state, state); + } + else{ + fprintf(stderr, "WARNING: auxilary_buffer pointer is zero\n"); + } +} + +void set_page_not_found_result_buffer(auxilary_buffer_t* auxilary_buffer, uint64_t page_addr){ + VOLATILE_WRITE_8(auxilary_buffer->result.page_not_found, 1); + VOLATILE_WRITE_64(auxilary_buffer->result.page_addr, page_addr); +} + +void set_success_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8_t success){ + VOLATILE_WRITE_8(auxilary_buffer->result.success, success); +} + +void set_payload_buffer_write_reason_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len){ + VOLATILE_WRITE_16(auxilary_buffer->misc.len, MIN(len, MISC_SIZE-2)); + volatile_memcpy((void*)&auxilary_buffer->misc.data, (void*)msg, (size_t) MIN(len, MISC_SIZE-2)); + VOLATILE_WRITE_8(auxilary_buffer->result.payload_buffer_write_attempt_found, 1); +} + + +void set_tmp_snapshot_created(auxilary_buffer_t* auxilary_buffer, uint8_t value){ + VOLATILE_WRITE_8(auxilary_buffer->result.tmp_snapshot_created, value); +} + +void set_cap_agent_trace_bitmap(auxilary_buffer_t* auxilary_buffer, bool value){ + VOLATILE_WRITE_8(auxilary_buffer->capabilites.agent_trace_bitmap, value); +} + +void set_cap_agent_ijon_trace_bitmap(auxilary_buffer_t* auxilary_buffer, bool value){ + VOLATILE_WRITE_8(auxilary_buffer->capabilites.agent_ijon_trace_bitmap, value); +} + +void set_result_dirty_pages(auxilary_buffer_t* auxilary_buffer, uint32_t value){ + VOLATILE_WRITE_32(auxilary_buffer->result.dirty_pages, value); +} + +void set_result_pt_trace_size(auxilary_buffer_t* auxilary_buffer, uint32_t value){ + VOLATILE_WRITE_32(auxilary_buffer->result.pt_trace_size, value); +} + +void set_result_bb_coverage(auxilary_buffer_t* auxilary_buffer, uint32_t value){ + if (value != auxilary_buffer->result.bb_coverage){ + VOLATILE_WRITE_32(auxilary_buffer->result.bb_coverage, value); + } +} \ No newline at end of file diff --git a/nyx/auxiliary_buffer.h b/nyx/auxiliary_buffer.h new file mode 100644 index 0000000000..c52f85f883 --- /dev/null +++ b/nyx/auxiliary_buffer.h @@ -0,0 +1,185 @@ +/* + +Copyright (C) 2019 Sergej Schumilo + +This file is part of QEMU-PT (HyperTrash / kAFL). + +QEMU-PT is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +QEMU-PT is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with QEMU-PT. If not, see . + +*/ + +#pragma once +#include +#include + +#define AUX_BUFFER_SIZE 4096 + +#define AUX_MAGIC 0x54502d554d4551 + +#define QEMU_PT_VERSION 1 /* let's start at 1 for the initial version using the aux buffer */ + +#define HEADER_SIZE 128 +#define CAP_SIZE 256 +#define CONFIG_SIZE 512 +#define STATE_SIZE 512 +#define MISC_SIZE 4096-(HEADER_SIZE+CAP_SIZE+CONFIG_SIZE+STATE_SIZE) + +#define ADD_PADDING(max, type) uint8_t type ## _padding [max - sizeof(type)] + +typedef struct auxilary_buffer_header_s{ + uint64_t magic; /* 0x54502d554d4551 */ + uint16_t version; + uint16_t hash; + /* more to come */ +} __attribute__((packed)) auxilary_buffer_header_t; + +typedef struct auxilary_buffer_cap_s{ + uint8_t redqueen; + uint8_t agent_timeout_detection; /* agent implements own timeout detection; host timeout detection is still in used, but treshold is increased by x2; */ + uint8_t agent_trace_bitmap; /* agent implements own tracing mechanism; PT tracing is disabled */ + uint8_t agent_ijon_trace_bitmap; /* agent uses the ijon shm buffer */ + + /* more to come */ +} __attribute__((packed)) auxilary_buffer_cap_t; + +typedef struct auxilary_buffer_config_s{ + uint8_t changed; /* set this byte to kick in a rescan of this buffer */ + + uint8_t timeout_sec; + uint32_t timeout_usec; + + /* trigger to enable / disable different QEMU-PT modes */ + uint8_t redqueen_mode; + uint8_t trace_mode; + uint8_t reload_mode; + + uint8_t verbose_level; + + uint8_t page_dump_mode; + uint64_t page_addr; + + /* nested mode only */ + uint8_t protect_payload_buffer; + + /* 0 -> disabled + 1 -> decoding + 2 -> decoding + full disassembling + */ + //uint8_t pt_processing_mode; + + /* snapshot extension */ + uint8_t discard_tmp_snapshot; + + /* more to come */ +} __attribute__((packed)) auxilary_buffer_config_t; + +typedef struct auxilary_buffer_result_s{ + /* 0 -> booting, + 1 -> loader level 1, + 2 -> loader level 2, + 3 -> ready to fuzz + */ + uint8_t state; + /* snapshot extension */ + uint8_t tmp_snapshot_created; + + /* FML */ + uint8_t padding_1; + uint8_t padding_2; + + uint32_t bb_coverage; + + uint8_t padding_3; + uint8_t padding_4; + + uint8_t hprintf; + uint8_t exec_done; + + uint8_t crash_found; + uint8_t asan_found; + + uint8_t timeout_found; + uint8_t reloaded; + + uint8_t pt_overflow; + uint8_t runtime_sec; + + uint8_t page_not_found; + uint8_t success; + + uint32_t runtime_usec; + uint64_t page_addr; + uint32_t dirty_pages; + uint32_t pt_trace_size; + + uint8_t payload_buffer_write_attempt_found; + + /* more to come */ +} __attribute__((packed)) auxilary_buffer_result_t; + +typedef struct auxilary_buffer_misc_s{ + uint16_t len; + uint8_t data; + /* non yet */ +} __attribute__((packed)) auxilary_buffer_misc_t; + +typedef struct auxilary_buffer_s{ + auxilary_buffer_header_t header; + ADD_PADDING(HEADER_SIZE, auxilary_buffer_header_t); + + auxilary_buffer_cap_t capabilites; + ADD_PADDING(CAP_SIZE, auxilary_buffer_cap_t); + + auxilary_buffer_config_t configuration; + ADD_PADDING(CONFIG_SIZE, auxilary_buffer_config_t); + + auxilary_buffer_result_t result; + ADD_PADDING(STATE_SIZE, auxilary_buffer_result_t); + + auxilary_buffer_misc_t misc; + ADD_PADDING(MISC_SIZE, auxilary_buffer_misc_t); + +} __attribute__((packed)) auxilary_buffer_t; + +void init_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer); +void check_auxiliary_config_buffer(auxilary_buffer_t* auxilary_buffer, auxilary_buffer_config_t* shadow_config); + +void flush_hprintf_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer); + +void set_crash_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer); +void set_asan_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer); +void set_timeout_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer); +void set_reload_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer); +void set_pt_overflow_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer); +void flush_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer); +void set_exec_done_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8_t sec, uint32_t usec); +void set_state_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8_t state); +void set_hprintf_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len); + +void set_page_not_found_result_buffer(auxilary_buffer_t* auxilary_buffer, uint64_t page_addr); +void set_success_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8_t success); +void set_crash_reason_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len); + +void set_tmp_snapshot_created(auxilary_buffer_t* auxilary_buffer, uint8_t value); + +void set_cap_agent_trace_bitmap(auxilary_buffer_t* auxilary_buffer, bool value); +void set_cap_agent_ijon_trace_bitmap(auxilary_buffer_t* auxilary_buffer, bool value); + + +void set_result_dirty_pages(auxilary_buffer_t* auxilary_buffer, uint32_t value); +void set_result_pt_trace_size(auxilary_buffer_t* auxilary_buffer, uint32_t value); + +void set_result_bb_coverage(auxilary_buffer_t* auxilary_buffer, uint32_t value); + +void set_payload_buffer_write_reason_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len); \ No newline at end of file diff --git a/nyx/debug.c b/nyx/debug.c new file mode 100644 index 0000000000..77416f2fdc --- /dev/null +++ b/nyx/debug.c @@ -0,0 +1,135 @@ +#include +#include +#include +#include +#include "nyx/debug.h" +#include "signal.h" + +#ifdef ENABLE_BACKTRACES +#define BT_BUF_SIZE 100 + +void qemu_backtrace(void){ + void *buffer[BT_BUF_SIZE]; + int nptrs = 0; + int j; + + nptrs = backtrace(buffer, BT_BUF_SIZE); + fprintf(stderr, "backtrace() returned %d addresses\n", nptrs); + + + char **strings = backtrace_symbols(buffer, nptrs); + if (strings == NULL) { + //perror("backtrace_symbols"); + fprintf(stderr, "backtrace_symbols failed!\n"); + return; + //exit(EXIT_FAILURE); + } + + for (j = 0; j < nptrs; j++) + fprintf(stderr, "%s\n", strings[j]); + + free(strings); +} + +static void sigsegfault_handler(int signo, siginfo_t *info, void *extra) { + fprintf(stderr, "[qemu-nyx] crash detected (pid: %d / signal: %d)\n", getpid(), signo); + qemu_backtrace(); + fprintf(stderr, "WAITING FOR GDB ATTACH (PID: %d...\n", getpid()); + while(1){ + sleep(1); + } +} + +static void sigabrt_handler(int signo, siginfo_t *info, void *extra) { + fprintf(stderr, "[qemu-nyx] crash detected (pid: %d / signal: %d)\n", getpid(), signo); + qemu_backtrace(); + fprintf(stderr, "WAITING FOR GDB ATTACH (PID: %d...\n", getpid()); + while(1){ + sleep(1); + } +} + +static void sigint_handler(int signo, siginfo_t *info, void *extra) { + fprintf(stderr, "[qemu-nyx] bye! (pid: %d / signal: %d)\n", getpid(), signo); + exit(0); +} + +/* +static void aexit_handler(void) { + fprintf(stderr, "ATTEMPT TO CALL EXIT (PID: %d)\n", getpid()); + qemu_backtrace(); + fprintf(stderr, "WAITING FOR GDB ATTACH (PID: %d...\n", getpid()); + while(1){ + sleep(1); + } +} +*/ + +void init_crash_handler(void){ + + //qemu_backtrace(); + + struct sigaction action; + action.sa_flags = SA_SIGINFO; + action.sa_sigaction = sigsegfault_handler; + + if (sigaction(SIGSEGV, &action, NULL) == -1) { + fprintf(stderr, "SIGSEGV: sigaction failed"); + _exit(1); + } + + + + action.sa_sigaction = sigabrt_handler; + + if (sigaction(SIGABRT, &action, NULL) == -1) { + fprintf(stderr, "SIGABRT: sigaction failed"); + _exit(1); + } + + /* don't install a SIGINT handler if the nyx block cow cache layer is disabled */ + if(!getenv("NYX_DISABLE_BLOCK_COW")){ + action.sa_sigaction = sigint_handler; + if (sigaction(SIGINT, &action, NULL) == -1) { + fprintf(stderr, "SIGINT: sigaction failed"); + _exit(1); + } + } + //atexit(aexit_handler); + + /* test */ + //int i = 0; + //((char*)i)[3] = 0; + +} + +void hexdump_kafl(const void* data, size_t size) { + char ascii[17]; + size_t i, j; + ascii[16] = '\0'; + for (i = 0; i < size; ++i) { + printf("%02X ", ((unsigned char*)data)[i]); + if (((unsigned char*)data)[i] >= ' ' && ((unsigned char*)data)[i] <= '~') { + ascii[i % 16] = ((unsigned char*)data)[i]; + } else { + ascii[i % 16] = '.'; + } + if ((i+1) % 8 == 0 || i+1 == size) { + printf(" "); + if ((i+1) % 16 == 0) { + printf("| %s \n", ascii); + } else if (i+1 == size) { + ascii[(i+1) % 16] = '\0'; + if ((i+1) % 16 <= 8) { + printf(" "); + } + for (j = (i+1) % 16; j < 16; ++j) { + printf(" "); + } + printf("| %s \n", ascii); + } + } + } +} + +#endif diff --git a/nyx/debug.h b/nyx/debug.h new file mode 100644 index 0000000000..4553792941 --- /dev/null +++ b/nyx/debug.h @@ -0,0 +1,49 @@ +#pragma once + +#include +#include +#include +#include + +#define ENABLE_BACKTRACES + +#define QEMU_PT_PRINT_PREFIX "[QEMU-PT]\t" +#define CORE_PREFIX "Core: " +#define MEM_PREFIX "Memory: " +#define RELOAD_PREFIX "Reload: " +#define PT_PREFIX "PT: " +#define INTERFACE_PREFIX "Interface: " +#define REDQUEEN_PREFIX "Redqueen: " +#define DISASM_PREFIX "Disasm: " +#define PAGE_CACHE_PREFIX "PageCache: " +#define INTERFACE_PREFIX "Interface: " +#define NESTED_VM_PREFIX "Nested: " + + +#define DEBUG_VM_PREFIX "Debug: " + +#define COLOR "\033[1;35m" +#define ENDC "\033[0m" + + +//#define debug_printf(format, ...) printf (format, ##__VA_ARGS__) +//#define debug_fprintf(fd, format, ...) fprintf (fd, format, ##__VA_ARGS__) +//#define QEMU_PT_PRINTF(PREFIX, format, ...) printf (QEMU_PT_PRINT_PREFIX COLOR PREFIX format ENDC "\n", ##__VA_ARGS__) +//#define QEMU_PT_PRINTF_DBG(PREFIX, format, ...) printf (QEMU_PT_PRINT_PREFIX PREFIX "(%s#:%d)\t"format, __BASE_FILE__, __LINE__, ##__VA_ARGS__) +//#define QEMU_PT_PRINTF_DEBUG(format, ...) fprintf (stderr, QEMU_PT_PRINT_PREFIX DEBUG_VM_PREFIX "(%s#:%d)\t"format "\n", __BASE_FILE__, __LINE__, ##__VA_ARGS__) + +#define debug_printf(format, ...) +#define debug_fprintf(fd, format, ...) +#define QEMU_PT_PRINTF(PREFIX, format, ...) +#define QEMU_PT_PRINTF_DBG(PREFIX, format, ...) +#define QEMU_PT_PRINTF_DEBUG(format, ...) + + + +#ifdef ENABLE_BACKTRACES + +void qemu_backtrace(void); +void init_crash_handler(void); +void hexdump_kafl(const void* data, size_t size); + +#endif \ No newline at end of file diff --git a/nyx/fast_vm_reload.c b/nyx/fast_vm_reload.c new file mode 100644 index 0000000000..de3d231ef2 --- /dev/null +++ b/nyx/fast_vm_reload.c @@ -0,0 +1,613 @@ +/* + +Copyright (C) 2017 Sergej Schumilo + +This file is part of QEMU-PT (HyperTrash / kAFL). + +QEMU-PT is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +QEMU-PT is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with QEMU-PT. If not, see . + +*/ + +#include "qemu/osdep.h" +#include "sysemu/sysemu.h" +#include "cpu.h" +#include "qemu/main-loop.h" + +#include "exec/ram_addr.h" +#include "qemu/rcu_queue.h" +#include "migration/migration.h" +#include "migration/register.h" +#include "migration/savevm.h" +#include "migration/qemu-file.h" +#include "migration/global_state.h" + + +#include +#include +#include +#include +#include +#include +#include + +#include "sysemu/kvm_int.h" +#include "sysemu/cpus.h" +#include "sysemu/reset.h" + +#include "nyx/fast_vm_reload.h" +#include "nyx/debug.h" +#include "nyx/state.h" + +#include "sysemu/block-backend.h" +#include "block/qapi.h" +#include "sysemu/runstate.h" +#include "migration/vmstate.h" + +#include "nyx/memory_access.h" + +#include "nyx/helpers.h" + +#include "nyx/snapshot/helper.h" +#include "nyx/snapshot/memory/block_list.h" +#include "nyx/snapshot/memory/shadow_memory.h" + +#include "nyx/snapshot/memory/backend/nyx_debug.h" +#include "nyx/snapshot/memory/backend/nyx_fdl.h" +#include "nyx/snapshot/memory/nyx_fdl_user.h" +#include "nyx/snapshot/devices/nyx_device_state.h" +#include "nyx/snapshot/block/nyx_block_snapshot.h" + +FastReloadMemoryMode mode = RELOAD_MEMORY_MODE_DEBUG; + +/* basic operations */ + +static void fast_snapshot_init_operation(fast_reload_t* self, const char* snapshot_folder, bool pre_snapshot){ + + assert((snapshot_folder == NULL && pre_snapshot == false) || snapshot_folder); + + if (snapshot_folder){ + self->device_state = nyx_device_state_init_from_snapshot(snapshot_folder, pre_snapshot); + self->shadow_memory_state = shadow_memory_init_from_snapshot(snapshot_folder, pre_snapshot); + } + else{ + self->device_state = nyx_device_state_init(); + self->shadow_memory_state = shadow_memory_init(); + } + + if(!pre_snapshot){ + switch(mode){ + case RELOAD_MEMORY_MODE_DEBUG: + break; + case RELOAD_MEMORY_MODE_DEBUG_QUIET: + break; + case RELOAD_MEMORY_MODE_FDL: + self->fdl_state = nyx_fdl_init(self->shadow_memory_state); + break; + case RELOAD_MEMORY_MODE_FDL_DEBUG: + self->fdl_state = nyx_fdl_init(self->shadow_memory_state); + break; + case RELOAD_MEMORY_MODE_DIRTY_RING: + self->dirty_ring_state = nyx_dirty_ring_init(self->shadow_memory_state); + break; + case RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG: + self->dirty_ring_state = nyx_dirty_ring_init(self->shadow_memory_state); + break; + } + + self->fdl_user_state = nyx_fdl_user_init(self->shadow_memory_state); + + nyx_fdl_user_enable(self->fdl_user_state); + } + + if (snapshot_folder){ + self->block_state = nyx_block_snapshot_init_from_file(snapshot_folder, pre_snapshot); + } + else{ + self->block_state = nyx_block_snapshot_init(); + } + + memory_global_dirty_log_start(); + if(!pre_snapshot){ + self->root_snapshot_created = true; + } +} + +static void fast_snapshot_init_from_snapshot_operation(fast_reload_t* self, const char* folder){ + + self->device_state = nyx_device_state_init(); + + self->shadow_memory_state = shadow_memory_init(); + + switch(mode){ + case RELOAD_MEMORY_MODE_DEBUG: + break; + case RELOAD_MEMORY_MODE_DEBUG_QUIET: + break; + case RELOAD_MEMORY_MODE_FDL: + self->fdl_state = nyx_fdl_init(self->shadow_memory_state); + break; + case RELOAD_MEMORY_MODE_FDL_DEBUG: + self->fdl_state = nyx_fdl_init(self->shadow_memory_state); + break; + case RELOAD_MEMORY_MODE_DIRTY_RING: + self->dirty_ring_state = nyx_dirty_ring_init(self->shadow_memory_state); + break; + case RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG: + self->dirty_ring_state = nyx_dirty_ring_init(self->shadow_memory_state); + break; + } + + self->fdl_user_state = nyx_fdl_user_init(self->shadow_memory_state); + + nyx_fdl_user_enable(self->fdl_user_state); + + self->block_state = nyx_block_snapshot_init(); + + memory_global_dirty_log_start(); + self->root_snapshot_created = true; +} + +static void fast_snapshot_restore_operation(fast_reload_t* self){ + + switch(mode){ + case RELOAD_MEMORY_MODE_DEBUG: + nyx_snapshot_debug_restore(self->shadow_memory_state, self->blocklist, true); + break; + case RELOAD_MEMORY_MODE_DEBUG_QUIET: + nyx_snapshot_debug_restore(self->shadow_memory_state, self->blocklist, false); + break; + case RELOAD_MEMORY_MODE_FDL: + nyx_snapshot_nyx_fdl_restore(self->fdl_state, self->shadow_memory_state, self->blocklist); + break; + case RELOAD_MEMORY_MODE_FDL_DEBUG: + nyx_snapshot_nyx_fdl_restore(self->fdl_state, self->shadow_memory_state, self->blocklist); + nyx_snapshot_debug_restore(self->shadow_memory_state, self->blocklist, true); + break; + case RELOAD_MEMORY_MODE_DIRTY_RING: + nyx_snapshot_nyx_dirty_ring_restore(self->dirty_ring_state, self->shadow_memory_state, self->blocklist); + break; + case RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG: + nyx_snapshot_nyx_dirty_ring_restore(self->dirty_ring_state, self->shadow_memory_state, self->blocklist); + nyx_snapshot_debug_restore(self->shadow_memory_state, self->blocklist, true); + //assert(false); + //sleep(1); + break; + } + + nyx_snapshot_user_fdl_restore(self->fdl_user_state, self->shadow_memory_state, self->blocklist); + //nyx_device_state_post_restore(self->device_state); +} + +static inline void fast_snapshot_pre_create_incremental_operation(fast_reload_t* self){ + /* flush all pending block writes */ + bdrv_drain_all(); + + memory_global_dirty_log_sync(); + + nyx_device_state_switch_incremental(self->device_state); + nyx_block_snapshot_switch_incremental(self->block_state); +} + +static inline void fast_snapshot_create_incremental_operation(fast_reload_t* self){ + shadow_memory_prepare_incremental(self->shadow_memory_state); + nyx_device_state_save_tsc_incremental(self->device_state); + + switch(mode){ + case RELOAD_MEMORY_MODE_DEBUG: + nyx_snapshot_debug_save_root_pages(self->shadow_memory_state, self->blocklist, true); + break; + case RELOAD_MEMORY_MODE_DEBUG_QUIET: + nyx_snapshot_debug_save_root_pages(self->shadow_memory_state, self->blocklist, false); + break; + case RELOAD_MEMORY_MODE_FDL: + nyx_snapshot_nyx_fdl_save_root_pages(self->fdl_state, self->shadow_memory_state, self->blocklist); + break; + case RELOAD_MEMORY_MODE_FDL_DEBUG: + nyx_snapshot_nyx_fdl_save_root_pages(self->fdl_state, self->shadow_memory_state, self->blocklist); + nyx_snapshot_debug_save_root_pages(self->shadow_memory_state, self->blocklist, true); + break; + case RELOAD_MEMORY_MODE_DIRTY_RING: + nyx_snapshot_nyx_dirty_ring_save_root_pages(self->dirty_ring_state, self->shadow_memory_state, self->blocklist); + break; + case RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG: + nyx_snapshot_nyx_dirty_ring_save_root_pages(self->dirty_ring_state, self->shadow_memory_state, self->blocklist); + nyx_snapshot_debug_save_root_pages(self->shadow_memory_state, self->blocklist, true); + break; + } + + nyx_snapshot_nyx_fdl_user_save_root_pages(self->fdl_user_state, self->shadow_memory_state, self->blocklist); + shadow_memory_switch_snapshot(self->shadow_memory_state, true); + + kvm_arch_put_registers(qemu_get_cpu(0), KVM_PUT_FULL_STATE_FAST); + qemu_get_cpu(0)->vcpu_dirty = false; +} + + +fast_reload_t* fast_reload_new(void){ + fast_reload_t* self = malloc(sizeof(fast_reload_t)); + memset(self, 0x0, sizeof(fast_reload_t)); + + self->root_snapshot_created = false; + self->incremental_snapshot_enabled = false; + + self->bitmap_copy = NULL; + + return self; +} + +void fast_reload_set_mode(fast_reload_t* self, FastReloadMemoryMode m){ + assert(!self->root_snapshot_created); + mode = m; +} + +FastReloadMemoryMode fast_reload_get_mode(fast_reload_t* self){ + return mode; +} + +void fast_reload_init(fast_reload_t* self){ + self->blocklist = snapshot_page_blocklist_init(); +} + +/* fix this */ +void fast_reload_destroy(fast_reload_t* self){ + + /* complete me */ + + //close(self->vmx_fdl_fd); + //munmap(self->fdl_data, (self->guest_ram_size/0x1000)*8); + +/* + munmap(self->ptr, self->guest_ram_size); + + free(self->black_list_pages); + + free(self); +*/ +} + +inline static void unlock_snapshot(const char* folder){ + char* info_file; + char* lock_file; + + assert(asprintf(&info_file, "%s/INFO.txt", folder) != -1); + + /* info file */ + FILE* f_info = fopen(info_file, "w+b"); + if(GET_GLOBAL_STATE()->fast_reload_pre_image){ + const char* msg = "THIS IS A NYX PRE IMAGE SNAPSHOT FOLDER!\n"; + fwrite(msg, strlen(msg), 1, f_info); + } + else{ + const char* msg = "THIS IS A NYX SNAPSHOT FOLDER!\n"; + fwrite(msg, strlen(msg), 1, f_info); + } + fclose(f_info); + + assert(asprintf(&lock_file, "%s/ready.lock", folder) != -1); + + int fd = open(lock_file, O_WRONLY | O_CREAT, S_IRWXU); + close(fd); + + free(lock_file); +} + +inline static void wait_for_snapshot(const char* folder){ + char* lock_file; + + assert(asprintf(&lock_file, "%s/ready.lock", folder) != -1); + + while( access(lock_file, F_OK ) == -1 ) { + sleep(1); + + } + free(lock_file); +} + +void fast_reload_serialize_to_file(fast_reload_t* self, const char* folder){ + + //printf("================ %s => %s =============\n", __func__, folder); + + /* sanity check */ + if(!folder_exits(folder)){ + QEMU_PT_PRINTF(RELOAD_PREFIX,"Folder %s does not exist...failed!", folder); + assert(0); + } + + /* shadow memory state */ + shadow_memory_serialize(self->shadow_memory_state, folder); + + /* device state */ + nyx_device_state_serialize(self->device_state, folder); + + /* block device state */ + nyx_block_snapshot_serialize(self->block_state, folder); + + /* NYX's state */ + dump_global_state(folder); + + /* finalize snapshot */ + unlock_snapshot(folder); +} + + + +static void fast_reload_create_from_snapshot(fast_reload_t* self, const char* folder, bool lock_iothread, bool pre_snapshot){ + //printf("%s called\n", __func__); + + assert(self != NULL); + wait_for_snapshot(folder); + + QEMU_PT_PRINTF(RELOAD_PREFIX,"=> CREATING FAST RELOAD SNAPSHOT FROM DUMP (located in: %s)", folder); + + rcu_read_lock(); + + bdrv_drain_all(); + bdrv_flush_all(); + + cpu_synchronize_all_pre_loadvm(); + + if(!pre_snapshot){ + memory_global_dirty_log_stop(); + memory_global_dirty_log_sync(); + } + + fast_snapshot_init_operation(self, folder, pre_snapshot); + + rcu_read_unlock(); + + if(!pre_snapshot){ + load_global_state(folder); + } + + cpu_synchronize_all_post_init(); + qemu_get_cpu(0)->vcpu_dirty = true; + kvm_arch_put_registers(qemu_get_cpu(0), KVM_PUT_FULL_STATE); + if(!pre_snapshot){ + nyx_device_state_save_tsc(self->device_state); + } + + //fast_reload_restore(self); + vm_start(); +} + +void fast_reload_create_from_file(fast_reload_t* self, const char* folder, bool lock_iothread){ + //printf("CALL: %s\n", __func__); + fast_reload_create_from_snapshot(self, folder, lock_iothread, false); +} + +void fast_reload_create_from_file_pre_image(fast_reload_t* self, const char* folder, bool lock_iothread){ + //printf("CALL: %s\n", __func__); + fast_reload_create_from_snapshot(self, folder, lock_iothread, true); +} + +void fast_reload_create_in_memory(fast_reload_t* self){ + + assert(self != NULL); + debug_fprintf(stderr, "===>%s\n", __func__); + QEMU_PT_PRINTF(RELOAD_PREFIX,"=> CREATING FAST RELOAD SNAPSHOT FROM CURRENT VM STATE"); + + rcu_read_lock(); + + bdrv_drain_all(); + bdrv_flush_all(); + + cpu_synchronize_all_pre_loadvm(); + + memory_global_dirty_log_stop(); + memory_global_dirty_log_sync(); + + fast_snapshot_init_operation(self, NULL, false); + + rcu_read_unlock(); + cpu_synchronize_all_post_init(); + +} + + + +void fast_reload_restore(fast_reload_t* self){ + assert(self != NULL); + self->dirty_pages = 0; + + //rcu_read_lock(); + //cpu_synchronize_all_states(); + //bdrv_drain_all_begin(); + + /* flush all pending block writes */ + bdrv_drain_all(); + //bdrv_flush_all(); + + memory_global_dirty_log_sync(); + //unset_black_list_pages(self); + + nyx_block_snapshot_reset(self->block_state); + /* + for(uint32_t i = 0; i < self->cow_cache_array_size; i++){ + //if(!self->tmp_snapshot.enabled) + cow_cache_reset(self->cow_cache_array[i]); + } + */ + + + + nyx_device_state_restore(self->device_state); + //fdl_fast_reload(self->qemu_state); + //fdl_fast_reload(self->device_state->qemu_state); + + nyx_block_snapshot_flush(self->block_state); + //GET_GLOBAL_STATE()->cow_cache_full = false; + //call_fast_change_handlers(); + + + fast_snapshot_restore_operation(self); + + //find_dirty_pages_fdl(self); + //fast_reload_qemu_user_fdl_restore(self); + + + //set_tsc_value(self, self->tmp_snapshot.enabled); + nyx_device_state_post_restore(self->device_state); + kvm_arch_put_registers(qemu_get_cpu(0), KVM_PUT_FULL_STATE_FAST); + qemu_get_cpu(0)->vcpu_dirty = false; + + //bdrv_drain_all_end(); + //rcu_read_unlock(); + + + //printf("========================= NEXT\n\n"); + + return; +} + + +bool read_snapshot_memory(fast_reload_t* self, uint64_t address, void* ptr, size_t size){ + return shadow_memory_read_physical_memory(self->shadow_memory_state, address, ptr, size); +} + +/* fix this */ +void* fast_reload_get_physmem_shadow_ptr(fast_reload_t* self, uint64_t physaddr){ + + abort(); /* fix this function first -> pc_piix memory split issue */ + + /* + assert(self != NULL); + assert(!(physaddr&0xFFF)); // physaddr must be 4kb align ! + if (self->shadow_memory_regions){ + for(uint64_t j = 0; j < self->shadow_memory_regions; j++){ + if(physaddr >= self->ram_block_array[j]->offset && physaddr < (self->ram_block_array[j]->offset+self->ram_block_array[j]->used_length)){ + return self->shadow_memory[j]+(physaddr-self->ram_block_array[j]->offset); + } + } + } + */ + return NULL; // not found ... sorry :( +} + +void fast_reload_blacklist_page(fast_reload_t* self, uint64_t physaddr){ + + assert(self->blocklist); + snapshot_page_blocklist_add(self->blocklist, physaddr); + return; +} + +bool fast_reload_snapshot_exists(fast_reload_t* self){ + if(!self){ // || !self->qemu_state){ + return false; + } + return true; +} + +void fast_reload_create_tmp_snapshot(fast_reload_t* self){ + assert(self); // && self->qemu_state); + + self->dirty_pages = 0; + + fast_snapshot_pre_create_incremental_operation(self); + + if(!self->bitmap_copy){ + if(GET_GLOBAL_STATE()->shared_bitmap_size+GET_GLOBAL_STATE()->shared_ijon_bitmap_size){ + assert(GET_GLOBAL_STATE()->shared_bitmap_size+GET_GLOBAL_STATE()->shared_ijon_bitmap_size); + self->bitmap_copy = malloc(GET_GLOBAL_STATE()->shared_bitmap_size+GET_GLOBAL_STATE()->shared_ijon_bitmap_size); + } + } + fuzz_bitmap_copy_to_buffer(self->bitmap_copy); + + //GET_GLOBAL_STATE()->cow_cache_full = false; + + //self->tmp_snapshot.root_dirty_pages_num = 0; + + + fast_snapshot_create_incremental_operation(self); + self->incremental_snapshot_enabled = true; +} + +void fast_reload_discard_tmp_snapshot(fast_reload_t* self){ + assert(self && self->incremental_snapshot_enabled); + + self->dirty_pages = 0; + + /* flush all pending block writes */ + bdrv_drain_all(); + + memory_global_dirty_log_sync(); + //unset_black_list_pages(self); + + fast_snapshot_restore_operation(self); + + //find_dirty_pages_fdl(self); + //fast_reload_qemu_user_fdl_restore(self); + + shadow_memory_restore_memory(self->shadow_memory_state); + shadow_memory_switch_snapshot(self->shadow_memory_state, false); + //restore_root_memory(self); + + + + nyx_device_state_disable_incremental(self->device_state); + //fdl_fast_disable_tmp(self->qemu_state); + //fdl_fast_disable_tmp(self->device_state->qemu_state); + + nyx_block_snapshot_disable_incremental(self->block_state); + + /* + for(uint32_t i = 0; i < self->cow_cache_array_size; i++){ + cow_cache_disable_tmp_mode(self->cow_cache_array[i]); + } + */ + self->incremental_snapshot_enabled = false; + +} + +bool fast_reload_root_created(fast_reload_t* self){ + return self->root_snapshot_created; +} + +bool fast_reload_tmp_created(fast_reload_t* self){ + return self->incremental_snapshot_enabled; +} + +uint32_t get_dirty_page_num(fast_reload_t* self){ + if(self){ + return self->dirty_pages; + } + else{ + + return 0; + } +} + +bool fast_reload_set_bitmap(fast_reload_t* self){ + if(self->incremental_snapshot_enabled){ + fuzz_bitmap_copy_from_buffer(self->bitmap_copy); + return true; + } + return false; +} + +void fast_reload_qemu_user_fdl_set_dirty(fast_reload_t* self, MemoryRegion *mr, uint64_t addr, uint64_t length){ + /* works only with PC.RAM's memory region */ + assert(mr->alias_offset == 0); + + nyx_fdl_user_set(self->fdl_user_state, self->shadow_memory_state, self->fdl_state, addr, length); +} + +void fast_reload_handle_dirty_ring_full(fast_reload_t* self){ + if(self->dirty_ring_state){ + nyx_snapshot_nyx_dirty_ring_flush_and_collect(self->dirty_ring_state, self->shadow_memory_state, self->blocklist); + } + else{ + nyx_snapshot_nyx_dirty_ring_flush(); + } + +} \ No newline at end of file diff --git a/nyx/fast_vm_reload.h b/nyx/fast_vm_reload.h new file mode 100644 index 0000000000..0c829377e4 --- /dev/null +++ b/nyx/fast_vm_reload.h @@ -0,0 +1,136 @@ +/* + +Copyright (C) 2017 Sergej Schumilo + +This file is part of QEMU-PT (HyperTrash / kAFL). + +QEMU-PT is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +QEMU-PT is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with QEMU-PT. If not, see . + +*/ + +#pragma once + +#include "qemu/osdep.h" +#include "monitor/monitor.h" +#include "qemu-common.h" +#include "sysemu/runstate.h" + +#include "nyx/snapshot/memory/block_list.h" +#include "nyx/snapshot/memory/shadow_memory.h" +#include "nyx/snapshot/memory/backend/nyx_fdl.h" +#include "nyx/snapshot/memory/nyx_fdl_user.h" +#include "nyx/snapshot/devices/nyx_device_state.h" + +#include "nyx/snapshot/block/nyx_block_snapshot.h" + +#include "nyx/snapshot/memory/backend/nyx_dirty_ring.h" + + +typedef enum FastReloadMemoryMode { + RELOAD_MEMORY_MODE_DEBUG, /* memcmp-based dirty tracing - it's super slow - only for debug purposes */ + RELOAD_MEMORY_MODE_DEBUG_QUIET, /* debug mode in non-verbose mode */ + RELOAD_MEMORY_MODE_FDL, /* super fast page tracker build around KVM-PT's dirty tracker (FDL = fast dirty log) */ + RELOAD_MEMORY_MODE_FDL_DEBUG, /* FDL + debug mode */ + RELOAD_MEMORY_MODE_DIRTY_RING, /* fast page tracker build around KVM's dirty ring API */ + RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG, /* dirty ring + debug mode */ +} FastReloadMemoryMode; + + + +typedef struct fast_reload_dump_head_s{ + uint32_t shadow_memory_regions; + uint32_t ram_region_index; +} fast_reload_dump_head_t; + + +typedef struct fast_reload_s{ + + FastReloadMemoryMode mode; + + /* memory snapshot */ + shadow_memory_t* shadow_memory_state; + + /* state of page frame blocklist */ + snapshot_page_blocklist_t* blocklist; + + /* state of FDL */ + nyx_fdl_t* fdl_state; + + /* dirty ring state */ + nyx_dirty_ring_t* dirty_ring_state; + + /* state of user-level FDL */ + nyx_fdl_user_t* fdl_user_state; + + /* nyx's serialized device state */ + nyx_device_state_t* device_state; + + nyx_block_t* block_state; + + bool root_snapshot_created; + bool incremental_snapshot_enabled; + + /* copy of the fuzzing bitmap & ijon state buffer */ + void* bitmap_copy; + + + + uint32_t dirty_pages; + +} fast_reload_t; + + +fast_reload_t* fast_reload_new(void); + + +/* get rid of this */ +void fast_reload_create_to_file(fast_reload_t* self, const char* folder, bool lock_iothread); +void fast_reload_create_from_file(fast_reload_t* self, const char* folder, bool lock_iothread); +void fast_reload_create_from_file_pre_image(fast_reload_t* self, const char* folder, bool lock_iothread); + + +/* keep this */ +void fast_reload_create_in_memory(fast_reload_t* self); + + +void fast_reload_serialize_to_file(fast_reload_t* self, const char* folder); + + +void fast_reload_restore(fast_reload_t* self); +void fast_reload_blacklist_page(fast_reload_t* self, uint64_t physaddr); +void* fast_reload_get_physmem_shadow_ptr(fast_reload_t* self, uint64_t physaddr); +bool fast_reload_snapshot_exists(fast_reload_t* self); + +bool read_snapshot_memory(fast_reload_t* self, uint64_t address, void* ptr, size_t size); + +void fast_reload_destroy(fast_reload_t* self); + +void fast_reload_qemu_user_fdl_set_dirty(fast_reload_t* self, MemoryRegion *mr, uint64_t addr, uint64_t length); + +void fast_reload_create_tmp_snapshot(fast_reload_t* self); +void fast_reload_discard_tmp_snapshot(fast_reload_t* self); + +bool fast_reload_root_created(fast_reload_t* self); +bool fast_reload_tmp_created(fast_reload_t* self); + +bool fast_reload_set_bitmap(fast_reload_t* self); + +uint32_t get_dirty_page_num(fast_reload_t* self); + +void fast_reload_init(fast_reload_t* self); + +void fast_reload_set_mode(fast_reload_t* self, FastReloadMemoryMode m); + +void fast_reload_handle_dirty_ring_full(fast_reload_t* self); +FastReloadMemoryMode fast_reload_get_mode(fast_reload_t* self); diff --git a/nyx/fast_vm_reload_sync.c b/nyx/fast_vm_reload_sync.c new file mode 100644 index 0000000000..02c93c5201 --- /dev/null +++ b/nyx/fast_vm_reload_sync.c @@ -0,0 +1,369 @@ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "fast_vm_reload_sync.h" +#include +#include +#include +#include "qapi/qapi-types-run-state.h" +#include "qemu-common.h" +#include "exec/memory.h" +#include "qemu/main-loop.h" +#include "sysemu/kvm_int.h" +#include "sysemu/kvm.h" +#include "sysemu/runstate.h" +#include "nyx/state.h" +#include "nyx/fast_vm_reload.h" +#include "nyx/debug.h" +#include "nyx/kvm_nested.h" + +extern int save_snapshot(const char *name, Error **errp); +extern int load_snapshot(const char *name, Error **errp); + +static void adjust_rip(CPUX86State *env, fast_reload_t* snapshot){ + switch(fast_reload_get_mode(snapshot)){ + case RELOAD_MEMORY_MODE_DEBUG: + case RELOAD_MEMORY_MODE_DEBUG_QUIET: + env->eip -= 1; /* out */ + break; + case RELOAD_MEMORY_MODE_FDL: + case RELOAD_MEMORY_MODE_FDL_DEBUG: + env->eip -= 3; /* vmcall */ + break; + case RELOAD_MEMORY_MODE_DIRTY_RING: + case RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG: + env->eip -= 1; /* out */ + break; + } +} + +fast_vm_reload_sync_t* init_fast_vm_reload_sync(void){ + fast_vm_reload_sync_t* self = malloc(sizeof(fast_vm_reload_sync_t)); + memset(self, 0, sizeof(fast_vm_reload_sync_t)); + + self->request_exists = false; + self->request_exists_pre = false; + self->current_request = REQUEST_VOID; + self->debug_mode = false; + + /* TODO: only RELOAD_MODE_NO_BLOCK is supported for actual fuzzing */ + self->mode = RELOAD_MODE_NO_BLOCK; + + return self; +} + +bool fast_snapshot_exists(fast_vm_reload_sync_t* self, FastReloadRequest type){ + + assert(self->mode != RELOAD_MODE_DEBUG); + + switch(type){ + case REQUEST_PRE_EXISTS: + abort(); + case REQUEST_ROOT_EXISTS: + return fast_reload_root_created(get_fast_reload_snapshot()); + case REQUEST_TMP_EXISTS: + return fast_reload_tmp_created(get_fast_reload_snapshot()); + default: + abort(); + } +} + + + +static inline void perform_task_debug_mode(fast_vm_reload_sync_t* self, FastReloadRequest request){ + struct Error* errp = NULL; + + switch(request){ + case REQUEST_SAVE_SNAPSHOT_PRE_FIX_RIP: + abort(); + case REQUEST_SAVE_SNAPSHOT_PRE: + vm_stop(RUN_STATE_SAVE_VM); + save_snapshot("pre_root", &errp); + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + return; /* return here to skip the vm_start call */ + case REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP: + abort(); + case REQUEST_SAVE_SNAPSHOT_ROOT: + vm_stop(RUN_STATE_SAVE_VM); + save_snapshot("root", &errp); + break; + case REQUEST_SAVE_SNAPSHOT_TMP_FIX_RIP: + abort(); + case REQUEST_SAVE_SNAPSHOT_TMP: + vm_stop(RUN_STATE_SAVE_VM); + save_snapshot("tmp", &errp); + break; + case REQUEST_LOAD_SNAPSHOT_PRE: + /* probably never called */ + abort(); + break; + case REQUEST_LOAD_SNAPSHOT_ROOT: + vm_stop(RUN_STATE_RESTORE_VM); + load_snapshot("root", &errp); + break; + case REQUEST_LOAD_SNAPSHOT_TMP: + vm_stop(RUN_STATE_RESTORE_VM); + load_snapshot("tmp", &errp); + break; + + default: + abort(); + } + if (errp) { + error_reportf_err(errp, "Error: "); + errp = NULL; + abort(); + } + vm_start(); +} + +static inline void create_root_snapshot(void){ + if (GET_GLOBAL_STATE()->fast_reload_enabled){ + debug_printf("===> GET_GLOBAL_STATE()->fast_reload_enabled: TRUE\n"); + if (GET_GLOBAL_STATE()->fast_reload_mode){ + debug_printf("===> GET_GLOBAL_STATE()->fast_reload_mode: TRUE\n"); + /* we've loaded an external snapshot folder - so do nothing and don't create any new snapshot files */ + } + else{ + debug_printf("===> GET_GLOBAL_STATE()->fast_reload_mode: FALSE\n"); + /* store the current state as a snapshot folder */ + fast_reload_create_in_memory(get_fast_reload_snapshot()); + fast_reload_serialize_to_file(get_fast_reload_snapshot(), GET_GLOBAL_STATE()->fast_reload_path); + } + } + else{ + debug_printf("===> GET_GLOBAL_STATE()->fast_reload_enabled: FALSE\n"); + /* so we haven't set a path for our snapshot files - just store everything in memory */ + fast_reload_create_in_memory(get_fast_reload_snapshot()); + } +} + +static inline void perform_task_no_block_mode(fast_vm_reload_sync_t* self, FastReloadRequest request){ + CPUState* cpu = qemu_get_cpu(0); + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + + qemu_mutex_lock_iothread(); + + switch(request){ + case REQUEST_SAVE_SNAPSHOT_PRE: + vm_stop(RUN_STATE_SAVE_VM); + //fast_reload_create_to_file(get_fast_reload_snapshot(), GET_GLOBAL_STATE()->fast_reload_pre_path, true); + fast_reload_create_in_memory(get_fast_reload_snapshot()); + fast_reload_serialize_to_file(get_fast_reload_snapshot(), GET_GLOBAL_STATE()->fast_reload_pre_path); + + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + qemu_mutex_unlock_iothread(); + return; /* return here to skip the vm_start call */ + case REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP: + adjust_rip(env, get_fast_reload_snapshot()); + kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); + case REQUEST_SAVE_SNAPSHOT_ROOT: + + kvm_arch_get_registers(cpu); + vm_stop(RUN_STATE_SAVE_VM); + create_root_snapshot(); + + fast_reload_restore(get_fast_reload_snapshot()); + //call_fast_change_handlers(); + break; + + case REQUEST_SAVE_SNAPSHOT_TMP_FIX_RIP: + adjust_rip(env, get_fast_reload_snapshot()); + kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); + case REQUEST_SAVE_SNAPSHOT_TMP: + fast_reload_create_tmp_snapshot(get_fast_reload_snapshot()); + fast_reload_restore(get_fast_reload_snapshot()); + + break; + case REQUEST_LOAD_SNAPSHOT_PRE: + abort(); + break; + case REQUEST_LOAD_SNAPSHOT_ROOT: + case REQUEST_LOAD_SNAPSHOT_TMP: + + //vm_stop(RUN_STATE_RESTORE_VM); + fast_reload_restore(get_fast_reload_snapshot()); + //call_fast_change_handlers(); + break; + + case REQUEST_SAVE_SNAPSHOT_ROOT_NESTED_FIX_RIP: + kvm_arch_get_registers(cpu); + + adjust_rip(env, get_fast_reload_snapshot()); + set_nested_rip(cpu, env->eip); + kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); + + //case REQUEST_SAVE_SNAPSHOT_ROOT_NESTED: + kvm_arch_get_registers(cpu); + vm_stop(RUN_STATE_SAVE_VM); + create_root_snapshot(); + + fast_reload_restore(get_fast_reload_snapshot()); + break; + + default: + abort(); + } + + vm_start(); + //call_fast_change_handlers(); + cpu_resume(cpu); + qemu_mutex_unlock_iothread(); +} + +static inline void perform_task_block_mode(fast_vm_reload_sync_t* self, FastReloadRequest request){ + switch(request){ + case REQUEST_SAVE_SNAPSHOT_PRE_FIX_RIP: + case REQUEST_SAVE_SNAPSHOT_PRE: + vm_stop(RUN_STATE_SAVE_VM); + //fast_reload_create_to_file(get_fast_reload_snapshot(), GET_GLOBAL_STATE()->fast_reload_pre_path, true); + fast_reload_create_in_memory(get_fast_reload_snapshot()); + fast_reload_serialize_to_file(get_fast_reload_snapshot(), GET_GLOBAL_STATE()->fast_reload_pre_path); + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + return; /* return here to skip the vm_start call */ + case REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP: + case REQUEST_SAVE_SNAPSHOT_ROOT: + /* TODO: fix this */ + vm_stop(RUN_STATE_SAVE_VM); + create_root_snapshot(); /* TODO: Fix this -> fucky in ahci mode */ + //fast_reload_create_in_memory(get_fast_reload_snapshot()); + break; + case REQUEST_SAVE_SNAPSHOT_TMP_FIX_RIP: + case REQUEST_SAVE_SNAPSHOT_TMP: + vm_stop(RUN_STATE_SAVE_VM); + fast_reload_create_tmp_snapshot(get_fast_reload_snapshot()); + break; + case REQUEST_LOAD_SNAPSHOT_PRE: + abort(); + break; + case REQUEST_LOAD_SNAPSHOT_ROOT: + case REQUEST_LOAD_SNAPSHOT_TMP: + vm_stop(RUN_STATE_RESTORE_VM); + fast_reload_restore(get_fast_reload_snapshot()); + break; + + default: + abort(); + } + vm_start(); +} + +static inline void perform_task(fast_vm_reload_sync_t* self, FastReloadRequest request){ + switch(self->mode){ + case RELOAD_MODE_DEBUG: + abort(); + perform_task_debug_mode(self, request); + break; + case RELOAD_MODE_NO_BLOCK: + perform_task_no_block_mode(self, request); + break; + case RELOAD_MODE_BLOCK: + perform_task_block_mode(self, request); + break; + } +} + +void request_fast_vm_reload(fast_vm_reload_sync_t* self, FastReloadRequest request){ + assert(!self->request_exists); + assert(self->current_request == REQUEST_VOID); + + if(self->mode == RELOAD_MODE_NO_BLOCK){ + CPUState* cpu = qemu_get_cpu(0); + kvm_arch_get_registers(cpu); + //perform_task(self, request); + perform_task_no_block_mode(self, request); + } + else{ + self->current_request = request; + self->request_exists = true; + self->request_exists_pre = true; + } +} + +bool reload_request_exists(fast_vm_reload_sync_t* self){ + return self->request_exists_pre; +} + +void reload_request_discard_tmp(fast_vm_reload_sync_t* self){ + fast_reload_discard_tmp_snapshot(get_fast_reload_snapshot()); +} + +bool check_if_relood_request_exists_pre(fast_vm_reload_sync_t* self){ + if(self->request_exists_pre){ + self->request_exists_pre = false; + abort(); + +/* + qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false); + qemu_clock_enable(QEMU_CLOCK_VIRTUAL_RT, false); + qemu_clock_enable(QEMU_CLOCK_HOST, false); +*/ + + //printf("%s: task found: %d\n", __func__, self->current_request); + + CPUState* cpu = qemu_get_cpu(0); + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + + kvm_arch_get_registers(cpu); + + switch(self->current_request){ + case REQUEST_VOID: + fprintf(stderr, "%s: REQUEST_VOID requested!\n", __func__); + abort(); + + case REQUEST_SAVE_SNAPSHOT_PRE_FIX_RIP: + case REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP: + case REQUEST_SAVE_SNAPSHOT_TMP_FIX_RIP: + adjust_rip(env, get_fast_reload_snapshot()); + kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); + qemu_system_vmstop_request(RUN_STATE_SAVE_VM); + break; + + case REQUEST_SAVE_SNAPSHOT_PRE: + case REQUEST_SAVE_SNAPSHOT_ROOT: + case REQUEST_SAVE_SNAPSHOT_TMP: + qemu_system_vmstop_request(RUN_STATE_SAVE_VM); + break; + + case REQUEST_SAVE_SNAPSHOT_ROOT_NESTED_FIX_RIP: + case REQUEST_SAVE_SNAPSHOT_TMP_NESTED_FIX_RIP: + adjust_rip(env, get_fast_reload_snapshot()); + set_nested_rip(cpu, env->eip); + kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); + qemu_system_vmstop_request(RUN_STATE_SAVE_VM); + + case REQUEST_LOAD_SNAPSHOT_PRE: + case REQUEST_LOAD_SNAPSHOT_ROOT: + case REQUEST_LOAD_SNAPSHOT_TMP: + qemu_system_vmstop_request(RUN_STATE_RESTORE_VM); + break; + + default: + fprintf(stderr, "%s: Unkown request: %d\n", __func__, self->current_request); + abort(); + } + return true; + } + return false; +} + +bool check_if_relood_request_exists_post(fast_vm_reload_sync_t* self){ + if(self->request_exists){ + FastReloadRequest request = self->current_request; + self->request_exists = false; + + assert(self->current_request != REQUEST_VOID); + self->current_request = REQUEST_VOID; + perform_task(self, request); + +/* + qemu_clock_enable(QEMU_CLOCK_HOST, true); + qemu_clock_enable(QEMU_CLOCK_VIRTUAL_RT, true); + qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true); +*/ + + return true; + } + return false; +} diff --git a/nyx/fast_vm_reload_sync.h b/nyx/fast_vm_reload_sync.h new file mode 100644 index 0000000000..d11bffe472 --- /dev/null +++ b/nyx/fast_vm_reload_sync.h @@ -0,0 +1,64 @@ +#pragma once + +#include + +typedef enum FastReloadRequest { + REQUEST_VOID, + + /* create snapshots */ + REQUEST_SAVE_SNAPSHOT_PRE, + REQUEST_SAVE_SNAPSHOT_ROOT, + REQUEST_SAVE_SNAPSHOT_TMP, + + /* create snapshot and fix RIP (- sizeof(vmcall)) */ + REQUEST_SAVE_SNAPSHOT_PRE_FIX_RIP, + REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP, + REQUEST_SAVE_SNAPSHOT_TMP_FIX_RIP, + + /* create nested snapshots */ + REQUEST_SAVE_SNAPSHOT_ROOT_NESTED_FIX_RIP, + REQUEST_SAVE_SNAPSHOT_TMP_NESTED_FIX_RIP, + + /* load snapshots*/ + REQUEST_LOAD_SNAPSHOT_PRE, + REQUEST_LOAD_SNAPSHOT_ROOT, + REQUEST_LOAD_SNAPSHOT_TMP, + + /* check if snapshot exists */ + REQUEST_PRE_EXISTS, + REQUEST_ROOT_EXISTS, + REQUEST_TMP_EXISTS, + + //REQUEST_DISCARD_SNAPSHOT_TMP, +} FastReloadRequest; + +typedef enum FastReloadMode { + RELOAD_MODE_DEBUG, /* savevm / loadvm based on QEMU's qcow2 storage - only for debug purposes */ + RELOAD_MODE_NO_BLOCK, /* fastest mode - works only if no active block devices is attached (e.g. initramfs mode) */ + RELOAD_MODE_BLOCK, +} FastReloadMode; + + + +typedef struct fast_vm_reload_sync_s{ + + bool request_exists; + bool request_exists_pre; + FastReloadRequest current_request; + + bool debug_mode; + + FastReloadMode mode; + +} fast_vm_reload_sync_t; + + +fast_vm_reload_sync_t* init_fast_vm_reload_sync(void); +void request_fast_vm_reload(fast_vm_reload_sync_t* self, FastReloadRequest request); +bool reload_request_exists(fast_vm_reload_sync_t* self); +bool check_if_relood_request_exists_pre(fast_vm_reload_sync_t* self); +bool check_if_relood_request_exists_post(fast_vm_reload_sync_t* self); + + +bool fast_snapshot_exists(fast_vm_reload_sync_t* self, FastReloadRequest type); +void reload_request_discard_tmp(fast_vm_reload_sync_t* self); \ No newline at end of file diff --git a/nyx/file_helper.c b/nyx/file_helper.c new file mode 100644 index 0000000000..5ab7153b49 --- /dev/null +++ b/nyx/file_helper.c @@ -0,0 +1,141 @@ +#include +#include + +#include +#include +#include +#include + +#include "redqueen.h" +#include "debug.h" +#include "file_helper.h" + + +/////////////////////////////////////////////////////////////////////////////////// +// Private Helper Functions Declarations +/////////////////////////////////////////////////////////////////////////////////// + +size_t _count_lines_in_file(FILE* fp); + +void _parse_addresses_in_file(FILE* fp, size_t num_addrs, uint64_t* addrs); + +/////////////////////////////////////////////////////////////////////////////////// +// Public Functions +/////////////////////////////////////////////////////////////////////////////////// + +void write_debug_result(char* buf){ + int unused __attribute__((unused)); + int fd = open("/tmp/qemu_debug.txt", O_WRONLY | O_CREAT | O_APPEND, S_IRWXU); + assert(fd > 0); + unused = write(fd, buf, strlen(buf)); + close(fd); +} + +void parse_address_file(char* path, size_t* num_addrs, uint64_t** addrs){ + FILE* fp = fopen(path,"r"); + if(!fp){ + *num_addrs = 0; + *addrs = NULL; + return; + } + + *num_addrs = _count_lines_in_file(fp); + if(*num_addrs == 0){ + *addrs = NULL; + goto exit_function; + } + + assert(*num_addrs < 0xffff); + *addrs = malloc(sizeof(uint64_t)*(*num_addrs)); + _parse_addresses_in_file(fp, *num_addrs, *addrs); + + exit_function: + fclose(fp); +} + + +int re_fd = 0; +int se_fd = 0; +int trace_fd = 0; + +void write_re_result(char* buf){ + int unused __attribute__((unused)); + if (!re_fd) + re_fd = open(redqueen_workdir.redqueen_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU); + unused = write(re_fd, buf, strlen(buf)); +} + +void write_trace_result(redqueen_trace_t* trace_state){ + //int fd; + int unused __attribute__((unused)); + if (!trace_fd) + trace_fd = open(redqueen_workdir.pt_trace_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU); + redqueen_trace_write_file(trace_state, trace_fd); + //unused = write(trace_fd, buf, strlen(buf)); + //close(fd); +} + +void fsync_all_traces(void){ + if (!trace_fd){ + fsync(trace_fd); + } + if (!se_fd){ + fsync(se_fd); + } + if (!re_fd){ + fsync(re_fd); + } +} + +void write_se_result(char* buf){ + //int fd; + int unused __attribute__((unused)); + if (!se_fd) + se_fd = open(redqueen_workdir.symbolic_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU); + unused = write(se_fd, buf, strlen(buf)); + //close(fd); +} + +void delete_trace_files(void){ + int unused __attribute__((unused)); + if (!trace_fd) + trace_fd = open(redqueen_workdir.pt_trace_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU); + unused = ftruncate(trace_fd, 0); +} + +void delete_redqueen_files(void){ + int unused __attribute__((unused)); + if (!re_fd) + re_fd = open(redqueen_workdir.redqueen_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU); + if (!se_fd) + se_fd = open(redqueen_workdir.symbolic_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU); + unused = ftruncate(re_fd, 0); + unused = ftruncate(se_fd, 0); +} + +/////////////////////////////////////////////////////////////////////////////////// +// Private Helper Functions Definitions +/////////////////////////////////////////////////////////////////////////////////// + +size_t _count_lines_in_file(FILE* fp){ + size_t val = 0; + size_t count = 0; + while(1){ + int scanres = fscanf(fp, "%lx", &val); + if(scanres == 0){ + printf("WARNING, invalid line in address file"); + assert(scanres != 0); + } + if(scanres == -1){break;} + count+=1; + } + rewind(fp); + return count; +} + +void _parse_addresses_in_file(FILE* fp, size_t num_addrs, uint64_t* addrs){ + for(size_t i = 0; i < num_addrs; i++){ + assert(fscanf(fp, "%lx", &addrs[i]) == 1); + } +} + diff --git a/nyx/file_helper.h b/nyx/file_helper.h new file mode 100644 index 0000000000..485863e8b8 --- /dev/null +++ b/nyx/file_helper.h @@ -0,0 +1,25 @@ +#include +#include +#include +#include "redqueen_trace.h" + +//doesn't take ownership of path, num_addrs or addrs +void parse_address_file(char* path, size_t* num_addrs, uint64_t** addrs); + +//doesn't take ownership of buf +void write_re_result(char* buf); + +//doesn't take ownership of buf +void write_se_result(char* buf); + +//doesn't take ownership of buf +void write_trace_result(redqueen_trace_t* trace_state); + +//doesn' take ownership of buf +void write_debug_result(char* buf); + +void delete_redqueen_files(void); + +void delete_trace_files(void); + +void fsync_all_traces(void); diff --git a/nyx/helpers.c b/nyx/helpers.c new file mode 100644 index 0000000000..84548da91d --- /dev/null +++ b/nyx/helpers.c @@ -0,0 +1,101 @@ +#include +#include +#include "nyx/helpers.h" +#include "qemu/osdep.h" +#include +#include +#include +#include "qemu-common.h" +#include "exec/memory.h" +#include "qemu/main-loop.h" +#include "sysemu/kvm_int.h" +#include "sysemu/kvm.h" +#include "nyx/state.h" +#include "nyx/memory_access.h" +#include "nyx/debug.h" + +uint64_t get_rip(CPUState *cpu){ + kvm_arch_get_registers(cpu); + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + kvm_cpu_synchronize_state(cpu); + return env->eip; +} + +int get_capstone_mode(int word_width_in_bits){ + switch(word_width_in_bits){ + case 64: + return CS_MODE_64; + case 32: + return CS_MODE_32; + default: + assert(false); + } +} + +void fuzz_bitmap_reset(void){ + if(GET_GLOBAL_STATE()->shared_bitmap_ptr){ + //fprintf(stderr, "%s: %lx %lx\n", __func__, fuzz_bitmap, fuzz_bitmap_size); + memset(GET_GLOBAL_STATE()->shared_bitmap_ptr, 0x00, GET_GLOBAL_STATE()->shared_bitmap_size + GET_GLOBAL_STATE()->shared_ijon_bitmap_size); + } +} + +void fuzz_bitmap_copy_to_buffer(void* buffer){ + if(GET_GLOBAL_STATE()->shared_bitmap_ptr){ + memcpy(buffer, GET_GLOBAL_STATE()->shared_bitmap_ptr, GET_GLOBAL_STATE()->shared_bitmap_size + GET_GLOBAL_STATE()->shared_ijon_bitmap_size); + } +} + +void fuzz_bitmap_copy_from_buffer(void* buffer){ + if(GET_GLOBAL_STATE()->shared_bitmap_ptr){ + memcpy(GET_GLOBAL_STATE()->shared_bitmap_ptr, buffer, GET_GLOBAL_STATE()->shared_bitmap_size + GET_GLOBAL_STATE()->shared_ijon_bitmap_size); + } +} + +void apply_capabilities(CPUState *cpu){ + //X86CPU *cpux86 = X86_CPU(cpu); + //CPUX86State *env = &cpux86->env; + + debug_fprintf(stderr, "%s: agent supports timeout detection: %d\n", __func__, GET_GLOBAL_STATE()->cap_timeout_detection); + debug_fprintf(stderr, "%s: agent supports only-reload mode: %d\n", __func__, GET_GLOBAL_STATE()->cap_only_reload_mode); + debug_fprintf(stderr, "%s: agent supports compile-time tracing: %d\n", __func__, GET_GLOBAL_STATE()->cap_compile_time_tracing ); + + if(GET_GLOBAL_STATE()->cap_compile_time_tracing){ + GET_GLOBAL_STATE()->pt_trace_mode = false; + + debug_fprintf(stderr, "%s: agent trace buffer at vaddr: %lx\n", __func__, GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr); + kvm_arch_get_registers_fast(cpu); + + debug_printf("--------------------------\n"); + debug_printf("GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr: %lx\n", GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr); + debug_printf("GET_GLOBAL_STATE()->shared_bitmap_fd: %lx\n", GET_GLOBAL_STATE()->shared_bitmap_fd); + debug_printf("GET_GLOBAL_STATE()->shared_bitmap_size: %lx\n", GET_GLOBAL_STATE()->shared_bitmap_size); + debug_printf("GET_GLOBAL_STATE()->cap_cr3: %lx\n", GET_GLOBAL_STATE()->cap_cr3); + debug_printf("--------------------------\n"); + + for(uint64_t i = 0; i < GET_GLOBAL_STATE()->shared_bitmap_size; i += 0x1000){ + assert(remap_slot(GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr+ i, i/0x1000, cpu, GET_GLOBAL_STATE()->shared_bitmap_fd, GET_GLOBAL_STATE()->shared_bitmap_size, true, GET_GLOBAL_STATE()->cap_cr3)); + } + set_cap_agent_trace_bitmap(GET_GLOBAL_STATE()->auxilary_buffer, true); + } + + if(GET_GLOBAL_STATE()->cap_ijon_tracing){ + debug_fprintf(stderr, "%s: agent trace buffer at vaddr: %lx\n", __func__, GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr); + + kvm_arch_get_registers_fast(cpu); + for(uint64_t i = 0; i < GET_GLOBAL_STATE()->shared_ijon_bitmap_size; i += 0x1000){ + assert(remap_slot(GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr + i, (GET_GLOBAL_STATE()->shared_bitmap_size+i)/0x1000, cpu, GET_GLOBAL_STATE()->shared_bitmap_fd, GET_GLOBAL_STATE()->shared_bitmap_size+GET_GLOBAL_STATE()->shared_ijon_bitmap_size, true, GET_GLOBAL_STATE()->cap_cr3)); + } + set_cap_agent_ijon_trace_bitmap(GET_GLOBAL_STATE()->auxilary_buffer, true); + } +} + +bool folder_exits(const char* path){ + struct stat sb; + return (stat(path, &sb) == 0 && S_ISDIR(sb.st_mode)); +} + +bool file_exits(const char* path){ + struct stat sb; + return (stat (path, &sb) == 0); +} diff --git a/nyx/helpers.h b/nyx/helpers.h new file mode 100644 index 0000000000..88335a37ea --- /dev/null +++ b/nyx/helpers.h @@ -0,0 +1,15 @@ +#pragma once + +#include "qemu/osdep.h" + +uint64_t get_rip(CPUState *cpu); +void fuzz_bitmap_reset(void); +void fuzz_bitmap_copy_to_buffer(void* buffer); +void fuzz_bitmap_copy_from_buffer(void* buffer); + +int get_capstone_mode(int word_width_in_bits); + +void apply_capabilities(CPUState *cpu); + +bool folder_exits(const char* path); +bool file_exits(const char* path); diff --git a/nyx/hypercall.c b/nyx/hypercall.c new file mode 100644 index 0000000000..d11ad05383 --- /dev/null +++ b/nyx/hypercall.c @@ -0,0 +1,1299 @@ +/* + +Copyright (C) 2017 Sergej Schumilo + +This file is part of QEMU-PT (kAFL). + +QEMU-PT is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +QEMU-PT is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with QEMU-PT. If not, see . + +*/ + +#include "qemu/osdep.h" +#include +#include +#include +#include "qemu-common.h" +#include "exec/memory.h" +#include "qemu/main-loop.h" + + +#include "sysemu/kvm_int.h" +#include "sysemu/runstate.h" +#include "sysemu/cpus.h" + +#include "sysemu/kvm_int.h" +#include "sysemu/kvm.h" +#include "sysemu/cpus.h" + +#include "sysemu/hw_accel.h" + + +#include "nyx/pt.h" +#include "nyx/hypercall.h" +#include "nyx/memory_access.h" +#include "nyx/interface.h" +#include "nyx/printk.h" +#include "nyx/debug.h" +#include "nyx/synchronization.h" +#include "nyx/fast_vm_reload.h" +#include "nyx/kvm_nested.h" +#include "nyx/state.h" +#include "sysemu/runstate.h" +#include "nyx/helpers.h" +#include "nyx/nested_hypercalls.h" +#include "nyx/fast_vm_reload_sync.h" + +#include "nyx/redqueen.h" + +//#define DEBUG_HPRINTF + +bool reload_mode_temp = false; +bool notifiers_enabled = false; +//uint32_t hprintf_counter = 0; + +bool hypercall_enabled = false; +void* program_buffer = NULL; +char info_buffer[INFO_SIZE]; +char hprintf_buffer[HPRINTF_SIZE]; + +static bool init_state = true; + +void skip_init(void){ + init_state = false; +} + +bool pt_hypercalls_enabled(void){ + return hypercall_enabled; +} + +void pt_setup_enable_hypercalls(void){ + hypercall_enabled = true; +} + +void pt_setup_ip_filters(uint8_t filter_id, uint64_t start, uint64_t end){ + debug_fprintf(stderr, "--> %s\n", __func__); + if (filter_id < INTEL_PT_MAX_RANGES){ + + GET_GLOBAL_STATE()->pt_ip_filter_configured[filter_id] = true; + GET_GLOBAL_STATE()->pt_ip_filter_a[filter_id] = start; + GET_GLOBAL_STATE()->pt_ip_filter_b[filter_id] = end; + + } +} + +void hypercall_commit_filter(void){ +} + +bool setup_snapshot_once = false; + + +void pt_setup_program(void* ptr){ + program_buffer = ptr; +} + + +bool handle_hypercall_kafl_next_payload(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + //fprintf(stderr, "%s\n", __func__); +/* + kvm_arch_get_registers(cpu); + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + + printf("%s: exception_injected: %d\n", __func__, env->exception_injected); +*/ + if(hypercall_enabled){ + if (init_state){ + set_state_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 2); + //fprintf(stderr, "--------------------\n"); + synchronization_lock(); + + } else { + if(!setup_snapshot_once){ + //pt_reset_bitmap(); + fuzz_bitmap_reset(); + request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP); + setup_snapshot_once = true; + + for(int i = 0; i < INTEL_PT_MAX_RANGES; i++){ + //printf("=> %d\n", i); + //if(filter_enabled[i]){ + if(GET_GLOBAL_STATE()->pt_ip_filter_configured[i]){ + pt_enable_ip_filtering(cpu, i, true, false); + } + } + pt_init_decoder(cpu); + + request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_LOAD_SNAPSHOT_ROOT); + + //printf("DONE!\n"); + /* + qemu_mutex_lock_iothread(); + QEMU_PT_PRINTF(CORE_PREFIX, "...GOOOOOO!!!!"); + fast_reload_restore(get_fast_reload_snapshot()); + QEMU_PT_PRINTF(CORE_PREFIX, "...DONE!!!!"); + qemu_mutex_unlock_iothread(); + */ + GET_GLOBAL_STATE()->in_fuzzing_mode = true; + set_state_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 3); + + //sigprof_enabled = true; + //reset_timeout_detector(&GET_GLOBAL_STATE()->timeout_detector); + } + else{ + //set_illegal_payload(); + synchronization_lock(); + reset_timeout_detector(&GET_GLOBAL_STATE()->timeout_detector); + GET_GLOBAL_STATE()->in_fuzzing_mode = true; + + + //printf("RIP => %lx\n", get_rip(cpu)); + return true; + } + } + } + return false; +} + +bool acquire_print_once_bool = true; +bool release_print_once_bool = true; + +static void acquire_print_once(CPUState *cpu){ + if(acquire_print_once_bool){ + acquire_print_once_bool = false; + kvm_arch_get_registers(cpu); + //X86CPU *x86_cpu = X86_CPU(cpu); + //CPUX86State *env = &x86_cpu->env; + debug_fprintf(stderr, "handle_hypercall_kafl_acquire at:%lx\n", get_rip(cpu)); + //disassemble_at_rip(STDERR_FILENO, get_rip(cpu), cpu, env->cr[3]); + } +} + +void handle_hypercall_kafl_acquire(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + //return; + if(hypercall_enabled){ + if (!init_state){ + acquire_print_once(cpu); + //init_det_filter(); + synchronization_enter_fuzzing_loop(cpu); + /* + if (pt_enable(cpu, false) == 0){ + cpu->pt_enabled = true; + } + */ + } + } +} + +static void handle_hypercall_get_payload(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + debug_printf("------------ %s\n", __func__); + if(hypercall_enabled && !setup_snapshot_once){ + QEMU_PT_PRINTF(CORE_PREFIX, "Payload Address:\t%lx", hypercall_arg); + kvm_arch_get_registers(cpu); + CPUX86State *env = &(X86_CPU(cpu))->env; + GET_GLOBAL_STATE()->parent_cr3 = env->cr[3] & 0xFFFFFFFFFFFFF000ULL; + QEMU_PT_PRINTF(CORE_PREFIX, "Payload CR3:\t%lx", (uint64_t)GET_GLOBAL_STATE()->parent_cr3 ); + //print_48_paging2(GET_GLOBAL_STATE()->parent_cr3); + + if(hypercall_arg&0xFFF){ + fprintf(stderr, "Error: Payload buffer is not page-aligned! (0x%lx)\n", hypercall_arg); + abort(); + } + + remap_payload_buffer(hypercall_arg, cpu); + set_payload_buffer(hypercall_arg); + } +} + +static void set_return_value(CPUState *cpu, uint64_t return_value){ + kvm_arch_get_registers(cpu); + CPUX86State *env = &(X86_CPU(cpu))->env; + env->regs[R_EAX] = return_value; + kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE); +} + +static void handle_hypercall_kafl_req_stream_data(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + static uint8_t req_stream_buffer[0x1000]; + + kvm_arch_get_registers(cpu); + /* address has to be page aligned */ + if((hypercall_arg&0xFFF) != 0){ + debug_fprintf(stderr, "%s: ERROR -> address is not page aligned!\n", __func__); + set_return_value(cpu, 0xFFFFFFFFFFFFFFFFUL); + } + else{ + read_virtual_memory(hypercall_arg, (uint8_t*)req_stream_buffer, 0x100, cpu); + uint64_t bytes = sharedir_request_file(GET_GLOBAL_STATE()->sharedir, (const char *)req_stream_buffer, req_stream_buffer); + if(bytes != 0xFFFFFFFFFFFFFFFFUL){ + write_virtual_memory(hypercall_arg, (uint8_t*)req_stream_buffer, bytes, cpu); + } + set_return_value(cpu, bytes); + } +} + +typedef struct req_data_bulk_s{ + char file_name[256]; + uint64_t num_addresses; + uint64_t addresses[479]; +} req_data_bulk_t; + +static void handle_hypercall_kafl_req_stream_data_bulk(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + static uint8_t req_stream_buffer[0x1000]; + //static uint64_t addresses[512]; + req_data_bulk_t req_data_bulk_data; + + kvm_arch_get_registers(cpu); + /* address has to be page aligned */ + if((hypercall_arg&0xFFF) != 0){ + debug_fprintf(stderr, "%s: ERROR -> address is not page aligned!\n", __func__); + set_return_value(cpu, 0xFFFFFFFFFFFFFFFFUL); + } + else{ + uint64_t bytes = 0; + read_virtual_memory(hypercall_arg, (uint8_t*)&req_data_bulk_data, 0x1000, cpu); + + assert(req_data_bulk_data.num_addresses <= 479); + for(int i = 0; i < req_data_bulk_data.num_addresses; i++){ + uint64_t ret_val = sharedir_request_file(GET_GLOBAL_STATE()->sharedir, (const char *)req_data_bulk_data.file_name, req_stream_buffer); + if(ret_val != 0xFFFFFFFFFFFFFFFFUL){ + bytes += ret_val; + write_virtual_memory((uint64_t)req_data_bulk_data.addresses[i], (uint8_t*)req_stream_buffer, ret_val, cpu); + } + else if(ret_val == 0){ + break; + } + else{ + bytes = 0xFFFFFFFFFFFFFFFFUL; + break; + } + + } + + //fprintf(stderr, "%s -> %d\n", __func__, bytes); + set_return_value(cpu, bytes); + } +} + + +static void handle_hypercall_kafl_range_submit(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + uint64_t buffer[3]; + read_virtual_memory(hypercall_arg, (uint8_t*)&buffer, sizeof(buffer), cpu); + + if(buffer[2] >= 2){ + QEMU_PT_PRINTF(CORE_PREFIX, "%s: illegal range=%ld\n", __func__, buffer[2]); + return; + } + + if(GET_GLOBAL_STATE()->pt_ip_filter_configured[buffer[2]]){ + QEMU_PT_PRINTF(CORE_PREFIX, "Ignoring agent-provided address ranges (abort reason: 1)"); + return; + } + + if (buffer[0] != 0 && buffer[1] != 0 ){ + GET_GLOBAL_STATE()->pt_ip_filter_a[buffer[2]] = buffer[0]; + GET_GLOBAL_STATE()->pt_ip_filter_b[buffer[2]] = buffer[1]; + GET_GLOBAL_STATE()->pt_ip_filter_configured[buffer[2]] = true; + QEMU_PT_PRINTF(CORE_PREFIX, "Configuring agent-provided address ranges:"); + QEMU_PT_PRINTF(CORE_PREFIX, "\tIP0: %lx-%lx [ENABLED]", GET_GLOBAL_STATE()->pt_ip_filter_a[buffer[2]], GET_GLOBAL_STATE()->pt_ip_filter_b[buffer[2]]); + } + else{ + QEMU_PT_PRINTF(CORE_PREFIX, "Ignoring agent-provided address ranges (abort reason: 2)"); + } + +} + +static void handle_hypercall_get_program(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + + //fprintf(stderr, "%s\n", __func__); + /* + return; + + if(!get_fast_reload_snapshot()->qemu_state){ + fast_reload_create_in_memory(get_fast_reload_snapshot(), true); + } + */ +/* + qemu_mutex_lock_iothread(); + fast_reload_restore(get_fast_reload_snapshot()); + + qemu_mutex_unlock_iothread(); + return; + */ + kvm_arch_get_registers(cpu); + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + + if(hypercall_enabled){ + if(program_buffer){ + + if (env->cr[4] & CR4_PAE_MASK) { + if (env->hflags & HF_LMA_MASK) { + //fprintf(stderr, "IN 64Bit MODE\n"); + } + else{ + debug_fprintf(stderr, "IN 32Bit PAE MODE\n"); + abort(); + } + } + else{ + debug_fprintf(stderr, "IN 32Bit MODE\n"); + abort(); + } + + //print_48_paging2(env->cr[3]); + write_virtual_memory(hypercall_arg, program_buffer, PROGRAM_SIZE, cpu); + } + } +} + + +static void release_print_once(CPUState *cpu){ + if(release_print_once_bool){ + release_print_once_bool = false; + kvm_arch_get_registers(cpu); + //X86CPU *x86_cpu = X86_CPU(cpu); + //CPUX86State *env = &x86_cpu->env; + debug_fprintf(stderr, "handle_hypercall_kafl_release at:%lx\n", get_rip(cpu)); + //disassemble_at_rip(STDERR_FILENO, get_rip(cpu), cpu, env->cr[3]); + } +} + +void handle_hypercall_kafl_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + //fprintf(stderr, "%s\n", __func__); + if(hypercall_enabled){ + if (init_state){ + init_state = false; + + + + //hypercall_snd_char(KAFL_PROTO_RELEASE); + //QEMU_PT_PRINTF_DEBUG("Protocol - SEND: KAFL_PROTO_RELEASE"); + + } else { + + + synchronization_disable_pt(cpu); + release_print_once(cpu); + /* + if(reload_mode || reload_mode_temp){ + qemu_mutex_lock_iothread(); + //QEMU_PT_PRINTF(CORE_PREFIX, "...GOOOOOO 2 !!!!"); + fast_reload_restore(get_fast_reload_snapshot()); + //QEMU_PT_PRINTF(CORE_PREFIX, "...DONE 2 !!!!"); + qemu_mutex_unlock_iothread(); + } + */ + } + } +} + +struct kvm_set_guest_debug_data { + struct kvm_guest_debug dbg; + int err; +}; + +void handle_hypercall_kafl_mtf(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + //assert(false); + kvm_arch_get_registers_fast(cpu); + + debug_fprintf(stderr, "%s --> %lx\n", __func__, get_rip(cpu)); + + kvm_vcpu_ioctl(cpu, KVM_VMX_PT_DISABLE_MTF); + + kvm_remove_all_breakpoints(cpu); + kvm_insert_breakpoint(cpu, GET_GLOBAL_STATE()->dump_page_addr, 1, 1); + kvm_update_guest_debug(cpu, 0); + + kvm_vcpu_ioctl(cpu, KVM_VMX_PT_SET_PAGE_DUMP_CR3, GET_GLOBAL_STATE()->pt_c3_filter); + kvm_vcpu_ioctl(cpu, KVM_VMX_PT_ENABLE_PAGE_DUMP_CR3); +} + +void handle_hypercall_kafl_page_dump_bp(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg, uint64_t page){ + //fprintf(stderr, "--> %s\n", __func__); + kvm_arch_get_registers_fast(cpu); + + debug_fprintf(stderr, "%s --> %lx\n", __func__, get_rip(cpu)); + + kvm_vcpu_ioctl(cpu, KVM_VMX_PT_DISABLE_MTF); + + bool success = false; + //fprintf(stderr, "page_cache_fetch = %lx\n", page_cache_fetch(GET_GLOBAL_STATE()->page_cache, page, &success, false)); + page_cache_fetch(GET_GLOBAL_STATE()->page_cache, page, &success, false); + if(success){ + + debug_fprintf(stderr, "%s: SUCCESS: %d\n", __func__, success); + kvm_remove_all_breakpoints(cpu); + kvm_vcpu_ioctl(cpu, KVM_VMX_PT_DISABLE_PAGE_DUMP_CR3); + + } + else{ + debug_fprintf(stderr, "%s: FAIL: %d\n", __func__, success); + //assert(false); + + kvm_remove_all_breakpoints(cpu); + + kvm_vcpu_ioctl(cpu, KVM_VMX_PT_DISABLE_PAGE_DUMP_CR3); + kvm_vcpu_ioctl(cpu, KVM_VMX_PT_ENABLE_MTF); + } + +} + +static inline void set_page_dump_bp(CPUState *cpu, uint64_t cr3, uint64_t addr){ + + debug_fprintf(stderr, "\n\n%s %lx %lx\n\n", __func__, cr3, addr); + kvm_remove_all_breakpoints(cpu); + kvm_insert_breakpoint(cpu, addr, 1, 1); + kvm_update_guest_debug(cpu, 0); + + kvm_vcpu_ioctl(cpu, KVM_VMX_PT_SET_PAGE_DUMP_CR3, cr3); + kvm_vcpu_ioctl(cpu, KVM_VMX_PT_ENABLE_PAGE_DUMP_CR3); +} + +static void handle_hypercall_kafl_cr3(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + if(hypercall_enabled){ + //QEMU_PT_PRINTF(CORE_PREFIX, "CR3 address:\t\t%lx", hypercall_arg); + pt_set_cr3(cpu, hypercall_arg & 0xFFFFFFFFFFFFF000ULL, false); + if(GET_GLOBAL_STATE()->dump_page){ + set_page_dump_bp(cpu, hypercall_arg & 0xFFFFFFFFFFFFF000ULL, GET_GLOBAL_STATE()->dump_page_addr); + } + } +} + +static void handle_hypercall_kafl_submit_panic(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + if(hypercall_enabled){ + QEMU_PT_PRINTF(CORE_PREFIX, "Panic address:\t%lx", hypercall_arg); + if(notifiers_enabled){ + write_virtual_memory(hypercall_arg, (uint8_t*)PANIC_PAYLOAD, PAYLOAD_BUFFER_SIZE, cpu); + } + } +} + +static void handle_hypercall_kafl_submit_kasan(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + if(hypercall_enabled){ + QEMU_PT_PRINTF(CORE_PREFIX, "kASAN address:\t%lx", hypercall_arg); + if(notifiers_enabled){ + write_virtual_memory(hypercall_arg, (uint8_t*)KASAN_PAYLOAD, PAYLOAD_BUFFER_SIZE, cpu); + } + } +} + +//#define PANIC_DEBUG + +static void handle_hypercall_kafl_panic(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + static char reason[1024]; + if(hypercall_enabled){ +#ifdef PANIC_DEBUG + if(hypercall_arg){ + //fprintf(stderr, "Panic in user mode!\n"); + //QEMU_PT_PRINTF(CORE_PREFIX, "Panic in user mode!"); + } else{ + debug_fprintf(stderr, "Panic in kernel mode!\n"); + QEMU_PT_PRINTF(CORE_PREFIX, "Panic in kernel mode!"); + //assert(0); + } +#endif + if(fast_reload_snapshot_exists(get_fast_reload_snapshot())){ + + if(hypercall_arg & 0x8000000000000000ULL){ + + reason[0] = '\x00'; + + uint64_t address = hypercall_arg & 0x7FFFFFFFFFFFULL; + uint64_t signal = (hypercall_arg & 0x7800000000000ULL) >> 47; + + snprintf(reason, 1024, "PANIC IN USER MODE (SIG: %d\tat 0x%lx)\n", (uint8_t)signal, address); + set_crash_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, reason, strlen(reason)); + } + else{ + switch(hypercall_arg){ + case 0: + set_crash_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, (char*)"PANIC IN KERNEL MODE!\n", strlen("PANIC IN KERNEL MODE!\n")); + break; + case 1: + set_crash_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, (char*)"PANIC IN USER MODE!\n", strlen("PANIC IN USER MODE!\n")); + break; + default: + set_crash_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, (char*)"???\n", strlen("???\n")); + break; + } + + } + synchronization_lock_crash_found(); + //synchronization_stop_vm_crash(cpu); + } else{ + fprintf(stderr, "Panic detected during initialization of stage 1 or stage 2 loader (%lx)\n", hypercall_arg); + abort(); + //hypercall_snd_char(KAFL_PROTO_CRASH); + QEMU_PT_PRINTF_DEBUG("Protocol - SEND: KAFL_PROTO_CRASH"); + + } + } +} + +static double get_time(void){ + struct timeval t; + struct timezone tzp; + gettimeofday(&t, &tzp); + return t.tv_sec + t.tv_usec*1e-6; +} + +static void print_time_diff(int iterations){ + + static bool init = true; + static double start_time = 0.0; + static double end_time = 0.0; + + if(init){ + init = false; + printf("start time is zero!\n"); + start_time = get_time(); + } + else{ + end_time = get_time(); + double elapsed_time = end_time - start_time; + printf("Done in %f seconds\n", elapsed_time); + printf("Performance: %f\n", iterations/elapsed_time); + start_time = get_time(); + } +} + +static void meassure_performance(void){ + static int perf_counter = 0; + if ((perf_counter%1000) == 0){ + //printf("perf_counter -> %d \n", perf_counter); + print_time_diff(1000); + } + perf_counter++; +} +static void handle_hypercall_kafl_debug_tmp_snapshot(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + //X86CPU *x86_cpu = X86_CPU(cpu); + //CPUX86State *env = &x86_cpu->env; + static bool first = true; + + //printf("CALLED %s: %lx\n", __func__, hypercall_arg); + switch(hypercall_arg&0xFFF){ + case 0: /* create root snapshot */ + if(!fast_snapshot_exists(GET_GLOBAL_STATE()->reload_state, REQUEST_ROOT_EXISTS)){ + request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_ROOT); + } + break; + case 1: /* create tmp snapshot */ + //printf("%s: create tmp...(RIP: %lx)\n", __func__, get_rip(cpu)); + if(!fast_snapshot_exists(GET_GLOBAL_STATE()->reload_state, REQUEST_TMP_EXISTS)){ + request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_TMP); + } + break; + case 2: /* load root snapshot (+ discard tmp snapshot) */ + //printf("%s: load root...(RIP: %lx)\n", __func__, get_rip(cpu)); + if(fast_snapshot_exists(GET_GLOBAL_STATE()->reload_state, REQUEST_TMP_EXISTS)){ + reload_request_discard_tmp(GET_GLOBAL_STATE()->reload_state); + } + request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_LOAD_SNAPSHOT_ROOT); + //meassure_performance(); + break; + case 3: /* load tmp snapshot */ + if(fast_snapshot_exists(GET_GLOBAL_STATE()->reload_state, REQUEST_TMP_EXISTS)){ + request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_LOAD_SNAPSHOT_TMP); + //meassure_performance(); + } + break; + case 5: // firefox debug hypercall + if(first){ + first = false; + request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_ROOT); + //request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_TMP); + + break; + } + else{ + request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_LOAD_SNAPSHOT_ROOT); + break; + } + /* + case 6: + printf("%s: -> request to add 0x%lx to block-list\n", __func__, hypercall_arg&(~0xFFF)); + CPUX86State *env = &(X86_CPU(cpu))->env; + kvm_arch_get_registers_fast(cpu); + hwaddr phys_addr = (hwaddr) get_paging_phys_addr(cpu, env->cr[3], hypercall_arg&(~0xFFF)); + fast_reload_blacklist_page(get_fast_reload_snapshot(), phys_addr); + + break; + */ + default: + abort(); + } +} + +static void handle_hypercall_kafl_create_tmp_snapshot(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + //X86CPU *x86_cpu = X86_CPU(cpu); + //CPUX86State *env = &x86_cpu->env; + if(!fast_reload_tmp_created(get_fast_reload_snapshot())){ + + /* decode PT data */ + pt_disable(qemu_get_cpu(0), false); + pt_sync(); + + /* + kvm_arch_get_registers(cpu); + kvm_cpu_synchronize_state(cpu); + //fprintf(stderr, "%s: CREATE at %lx\n", __func__, get_rip(cpu)); + + //env->eip -= 3; // vmcall size + //kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); + fast_reload_create_tmp_snapshot(get_fast_reload_snapshot()); + //kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); + + qemu_mutex_lock_iothread(); + fast_reload_restore(get_fast_reload_snapshot()); + qemu_mutex_unlock_iothread(); + + */ + + + + request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_TMP); //_TMP_FIX_RIP); + + set_tmp_snapshot_created(GET_GLOBAL_STATE()->auxilary_buffer, 1); + //handle_hypercall_kafl_acquire(run, cpu); + //fprintf(stderr, "%s: CREATE DONE at %lx\n", __func__, get_rip(cpu)); + + handle_hypercall_kafl_release(run, cpu, (uint64_t)run->hypercall.args[0]); + } + else{ + //fprintf(stderr, "%s: LOAD Continue at %lx\n", __func__, get_rip(cpu)); + //fprintf(stderr, "%s: LOAD at %lx\n", __func__, get_rip(cpu)); + + /* + qemu_mutex_lock_iothread(); + fast_reload_restore(get_fast_reload_snapshot()); + qemu_mutex_unlock_iothread(); + + fprintf(stderr, "%s: LOAD Continue at %lx\n", __func__, get_rip(cpu)); + */ + + //handle_hypercall_kafl_acquire(run, cpu); + } +} + +static void handle_hypercall_kafl_panic_extended(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + if(fast_reload_snapshot_exists(get_fast_reload_snapshot())){ + read_virtual_memory(hypercall_arg, (uint8_t*)hprintf_buffer, HPRINTF_SIZE, cpu); + set_crash_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, hprintf_buffer, strlen(hprintf_buffer)); + synchronization_lock_crash_found(); + } else{ + read_virtual_memory(hypercall_arg, (uint8_t*)hprintf_buffer, HPRINTF_SIZE, cpu); + fprintf(stderr, "Panic detected during initialization of stage 1 or stage 2 loader\n"); + fprintf(stderr, "REASON:\n%s\n", hprintf_buffer); + abort(); + QEMU_PT_PRINTF(CORE_PREFIX, "Panic detected during initialization of stage 1 or stage 2 loader"); + //hypercall_snd_char(KAFL_PROTO_CRASH); + QEMU_PT_PRINTF_DEBUG("Protocol - SEND: KAFL_PROTO_CRASH"); + //read_virtual_memory(hypercall_arg, (uint8_t*)hprintf_buffer, HPRINTF_SIZE, cpu); + //fprintf(stderr, "-> %s\n", hprintf_buffer); + } +} + + +static void handle_hypercall_kafl_kasan(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + if(hypercall_enabled){ +#ifdef PANIC_DEBUG + if(hypercall_arg){ + QEMU_PT_PRINTF(CORE_PREFIX, "ASan notification in user mode!"); + } else{ + QEMU_PT_PRINTF(CORE_PREFIX, "ASan notification in kernel mode!"); + } +#endif + if(fast_reload_snapshot_exists(get_fast_reload_snapshot())){ + synchronization_lock_asan_found(); + //synchronization_stop_vm_kasan(cpu); + } else{ + QEMU_PT_PRINTF(CORE_PREFIX, "KASAN detected during initialization of stage 1 or stage 2 loader"); + //hypercall_snd_char(KAFL_PROTO_KASAN); + QEMU_PT_PRINTF_DEBUG("Protocol - SEND: KAFL_PROTO_KASAN"); + + } + } +} + + +/* +static uint64_t get_rsp(CPUState *cpu){ + kvm_arch_get_registers(cpu); + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + kvm_cpu_synchronize_state(cpu); + return env->regs[4]; +} +*/ + +static void handle_hypercall_kafl_lock(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + + if(!GET_GLOBAL_STATE()->fast_reload_pre_image){ + QEMU_PT_PRINTF(CORE_PREFIX, "Skipping pre image creation (hint: set pre=on) ..."); + return; + +/* + + fast_reload_create_in_memory(get_fast_reload_snapshot(), true); + +qemu_mutex_lock_iothread(); + fast_reload_restore(get_fast_reload_snapshot()); + + qemu_mutex_unlock_iothread(); + */ + //return; + } + + QEMU_PT_PRINTF(CORE_PREFIX, "Creating pre image snapshot <%s> ...", GET_GLOBAL_STATE()->fast_reload_pre_path); + + printf("Creating pre image snapshot"); + request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_PRE); +} + +static void handle_hypercall_kafl_info(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + if(setup_snapshot_once) + return; + + debug_printf("%s\n", __func__); +/* + printf("[*] EXEC: %s\t%lx %lx\n", __func__, get_rip(cpu), get_rsp(cpu)); + hexdump_virtual_memory(get_rsp(cpu), 0x100, cpu); + + kvm_arch_get_registers(cpu); + kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); +*/ + /* + qemu_mutex_lock_iothread(); + //fast_reload_restore((fast_reload_t*)cpu->fast_reload_snapshot); + fast_reload_restore(get_fast_reload_snapshot()); + //kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); + qemu_mutex_unlock_iothread(); + return; + */ +/* + kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); + + + printf("[*] EXIT: %s\t%lx %lx\n", __func__, get_rip(cpu), get_rsp(cpu)); + hexdump_virtual_memory(get_rsp(cpu), 0x100, cpu); +*/ +// return; + + read_virtual_memory(hypercall_arg, (uint8_t*)info_buffer, INFO_SIZE, cpu); + FILE* info_file_fd = fopen(INFO_FILE, "w"); + fprintf(info_file_fd, "%s\n", info_buffer); + fclose(info_file_fd); + if(hypercall_enabled){ + //hypercall_snd_char(KAFL_PROTO_INFO); + QEMU_PT_PRINTF_DEBUG("Protocol - SEND: KAFL_PROTO_INFO"); + abort(); + + } + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); +} + +void enable_notifies(void){ + notifiers_enabled = true; +} + + +/* +void hprintf(char* msg){ + char file_name[256]; + if(!(hprintf_counter >= HPRINTF_LIMIT) && GET_GLOBAL_STATE()->enable_hprintf){ + if(hypercall_enabled){ + snprintf(file_name, 256, "%s.%d", HPRINTF_FILE, hprintf_counter); + //printf("%s: %s\n", __func__, msg); + FILE* printf_file_fd = fopen(file_name, "w"); + fprintf(printf_file_fd, "%s", msg); + fclose(printf_file_fd); + //hypercall_snd_char(KAFL_PROTO_PRINTF); + QEMU_PT_PRINTF_DEBUG("Protocol - SEND: KAFL_PROTO_PRINTF"); + + } + hprintf_counter++; + + } +} +*/ + +static void handle_hypercall_kafl_printf(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + //fprintf(stderr, "%s\n", __func__); + //if( /* !(hprintf_counter >= HPRINTF_LIMIT) && */ GET_GLOBAL_STATE()->enable_hprintf){ // && !GET_GLOBAL_STATE()->in_fuzzing_mode){ + read_virtual_memory(hypercall_arg, (uint8_t*)hprintf_buffer, HPRINTF_SIZE, cpu); + //hprintf(hprintf_buffer); +#ifdef DEBUG_HPRINTF + fprintf(stderr, "%s %s\n", __func__, hprintf_buffer); +#else + set_hprintf_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, hprintf_buffer, strnlen(hprintf_buffer, HPRINTF_SIZE)+1); + synchronization_lock(); +#endif + //} +} + + +static void handle_hypercall_kafl_printk(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + if(!notifiers_enabled){ + if (hypercall_enabled && GET_GLOBAL_STATE()->enable_hprintf){ + if(kafl_linux_printk(cpu)){ + handle_hypercall_kafl_panic(run, cpu, (uint64_t)run->hypercall.args[0]); + } + } + } +} + +static void handle_hypercall_kafl_printk_addr(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + if(!notifiers_enabled){ + debug_printf("%s\n", __func__); + debug_printf("%lx\n", hypercall_arg); + write_virtual_memory(hypercall_arg, (uint8_t*)PRINTK_PAYLOAD, PRINTK_PAYLOAD_SIZE, cpu); + debug_printf("Done\n"); + } +} + +static void handle_hypercall_kafl_user_range_advise(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + kAFL_ranges* buf = malloc(sizeof(kAFL_ranges)); + + for(int i = 0; i < INTEL_PT_MAX_RANGES; i++){ + buf->ip[i] = GET_GLOBAL_STATE()->pt_ip_filter_a[i]; + buf->size[i] = (GET_GLOBAL_STATE()->pt_ip_filter_b[i]-GET_GLOBAL_STATE()->pt_ip_filter_a[i]); + buf->enabled[i] = (uint8_t)GET_GLOBAL_STATE()->pt_ip_filter_configured[i]; + } + + write_virtual_memory(hypercall_arg, (uint8_t *)buf, sizeof(kAFL_ranges), cpu); + free(buf); +} + +static void handle_hypercall_kafl_user_submit_mode(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + //printf("%s\n", __func__); + switch(hypercall_arg){ + case KAFL_MODE_64: + QEMU_PT_PRINTF(CORE_PREFIX, "target runs in KAFL_MODE_64 ..."); + GET_GLOBAL_STATE()->disassembler_word_width = 64; + break; + case KAFL_MODE_32: + QEMU_PT_PRINTF(CORE_PREFIX, "target runs in KAFL_MODE_32 ..."); + GET_GLOBAL_STATE()->disassembler_word_width = 32; + break; + case KAFL_MODE_16: + QEMU_PT_PRINTF(CORE_PREFIX, "target runs in KAFL_MODE_16 ..."); + GET_GLOBAL_STATE()->disassembler_word_width = 16; + abort(); /* not implemented in this version (due to hypertrash hacks) */ + break; + default: + QEMU_PT_PRINTF(CORE_PREFIX, "target runs in unkown mode..."); + GET_GLOBAL_STATE()->disassembler_word_width = 0; + abort(); /* not implemented in this version (due to hypertrash hacks) */ + break; + } +} + +bool handle_hypercall_kafl_hook(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + X86CPU *cpux86 = X86_CPU(cpu); + CPUX86State *env = &cpux86->env; + + for(uint8_t i = 0; i < INTEL_PT_MAX_RANGES; i++){ + if (GET_GLOBAL_STATE()->redqueen_state && (env->eip >= GET_GLOBAL_STATE()->pt_ip_filter_a[i]) && (env->eip <= GET_GLOBAL_STATE()->pt_ip_filter_b[i])){ + handle_hook(GET_GLOBAL_STATE()->redqueen_state); + return true; + }else if (cpu->singlestep_enabled && (GET_GLOBAL_STATE()->redqueen_state)->singlestep_enabled){ + handle_hook(GET_GLOBAL_STATE()->redqueen_state); + return true; + } + } + return false; +} + +static void handle_hypercall_kafl_user_abort(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + read_virtual_memory(hypercall_arg, (uint8_t*)hprintf_buffer, HPRINTF_SIZE, cpu); + + fprintf(stderr, "%s: %s\n", __func__, hprintf_buffer); + + abort(); + + if(hypercall_enabled){ + //hypercall_snd_char(KAFL_PROTO_PT_ABORT); + QEMU_PT_PRINTF_DEBUG("Protocol - SEND: KAFL_PROTO_PT_ABORT"); + } + debug_fprintf(stderr, "USER ABORT!\n"); + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); +} + +void pt_enable_rqi(CPUState *cpu){ + reload_mode_temp = true; + //cpu->redqueen_enable_pending = true; + GET_GLOBAL_STATE()->redqueen_enable_pending = true; +} + +void pt_disable_rqi(CPUState *cpu){ + reload_mode_temp = false; + //cpu->redqueen_disable_pending = true; + GET_GLOBAL_STATE()->redqueen_disable_pending = true; + GET_GLOBAL_STATE()->redqueen_instrumentation_mode = REDQUEEN_NO_INSTRUMENTATION; + //cpu->redqueen_instrumentation_mode = REDQUEEN_NO_INSTRUMENTATION; +} + +void pt_set_enable_patches_pending(CPUState *cpu){ + GET_GLOBAL_STATE()->patches_enable_pending = true; +} + +void pt_set_redqueen_instrumentation_mode(CPUState *cpu, int redqueen_mode){ + //cpu->redqueen_instrumentation_mode = redqueen_mode; + GET_GLOBAL_STATE()->redqueen_instrumentation_mode = redqueen_mode; +} + +void pt_set_redqueen_update_blacklist(CPUState *cpu, bool newval){ + assert(!newval || !GET_GLOBAL_STATE()->redqueen_update_blacklist); + //cpu->redqueen_update_blacklist = newval; + GET_GLOBAL_STATE()->redqueen_update_blacklist = newval; +} + +void pt_set_disable_patches_pending(CPUState *cpu){ + GET_GLOBAL_STATE()->patches_disable_pending = true; +} + +void pt_enable_rqi_trace(CPUState *cpu){ + if (GET_GLOBAL_STATE()->redqueen_state){ + redqueen_set_trace_mode(GET_GLOBAL_STATE()->redqueen_state); + } +} + +void pt_disable_rqi_trace(CPUState *cpu){ + if (GET_GLOBAL_STATE()->redqueen_state){ + redqueen_unset_trace_mode(GET_GLOBAL_STATE()->redqueen_state); + return; + } +} + +static void handle_hypercall_kafl_get_host_config(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + uint64_t vaddr = hypercall_arg; + host_config_t config; + memset((void*)&config, 0, sizeof(host_config_t)); + + config.bitmap_size = GET_GLOBAL_STATE()->shared_bitmap_size; + config.ijon_bitmap_size = GET_GLOBAL_STATE()->shared_ijon_bitmap_size; + config.payload_buffer_size = GET_GLOBAL_STATE()->shared_payload_buffer_size; + + write_virtual_memory(vaddr, (uint8_t*)&config, sizeof(host_config_t), cpu); +} + +static void handle_hypercall_kafl_set_agent_config(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + uint64_t vaddr = hypercall_arg; + agent_config_t config; + + X86CPU *cpux86 = X86_CPU(cpu); + CPUX86State *env = &cpux86->env; + + if(read_virtual_memory(vaddr, (uint8_t*)&config, sizeof(agent_config_t), cpu)){ + + GET_GLOBAL_STATE()->cap_timeout_detection = config.agent_timeout_detection; + GET_GLOBAL_STATE()->cap_only_reload_mode = !!!config.agent_non_reload_mode; /* fix this */ + GET_GLOBAL_STATE()->cap_compile_time_tracing = config.agent_tracing; + + if(!GET_GLOBAL_STATE()->cap_compile_time_tracing && !GET_GLOBAL_STATE()->nyx_fdl){ + fprintf(stderr, "[!] Error: Attempt to fuzz target without compile-time instrumentation - Intel PT is not supported on this KVM build!\n"); + exit(1); + } + + GET_GLOBAL_STATE()->cap_ijon_tracing = config.agent_ijon_tracing; + + if(config.agent_tracing){ + GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr = config.trace_buffer_vaddr; + } + if(config.agent_ijon_tracing){ + GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr = config.ijon_trace_buffer_vaddr; + } + + GET_GLOBAL_STATE()->cap_cr3 = env->cr[3]; + + apply_capabilities(cpu); + + if(getenv("DUMP_PAYLOAD_MODE")){ + config.dump_payloads = 1; + write_virtual_memory(vaddr, (uint8_t*)&config, sizeof(agent_config_t), cpu); + } + } + else{ + fprintf(stderr, "%s: failed (vaddr: 0x%lx)!\n", __func__, vaddr); + } +} + +static void handle_hypercall_kafl_dump_file(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + + /* TODO: check via aux buffer if we should allow this hypercall during fuzzing */ + /* + if(GET_GLOBAL_STATE()->in_fuzzing_mode){ + return; + } + */ + + char filename[256] = {0}; + + uint64_t vaddr = hypercall_arg; + kafl_dump_file_t file_obj; + memset((void*)&file_obj, 0, sizeof(kafl_dump_file_t)); + + + if(read_virtual_memory(vaddr, (uint8_t*)&file_obj, sizeof(kafl_dump_file_t), cpu)){ + + void* page = malloc(0x1000); + + read_virtual_memory(file_obj.file_name_str_ptr, (uint8_t*)&filename, sizeof(char)*256, cpu); + filename[255] = 0; + + char* base_name = basename(filename); + char* host_path = NULL; + + assert(asprintf(&host_path, "%s/dump/%s", GET_GLOBAL_STATE()->workdir_path , base_name) != -1); + //fprintf(stderr, "dumping file %s -> %s (bytes %ld) in append_mode=%d\n", base_name, host_path, file_obj.bytes, file_obj.append); + + FILE* f = NULL; + + if(file_obj.append){ + f = fopen(host_path, "a+"); + } + else{ + f = fopen(host_path, "w+"); + } + + int32_t bytes = file_obj.bytes; + uint32_t pos = 0; + + while(bytes > 0){ + + if(bytes >= 0x1000){ + read_virtual_memory(file_obj.data_ptr+pos, (uint8_t*)page, 0x1000, cpu); + fwrite(page, 1, 0x1000, f); + } + else{ + read_virtual_memory(file_obj.data_ptr+pos, (uint8_t*)page, bytes, cpu); + fwrite(page, 1, bytes, f); + } + + bytes -= 0x1000; + pos += 0x1000; + } + + + fclose(f); + free(host_path); + free(page); + + } +} + +int handle_kafl_hypercall(struct kvm_run *run, CPUState *cpu, uint64_t hypercall, uint64_t arg){ + int ret = -1; + //fprintf(stderr, "%s -> %ld\n", __func__, hypercall); + switch(hypercall){ + case KVM_EXIT_KAFL_ACQUIRE: + //timeout_reload_pending = false; + //fprintf(stderr, "KVM_EXIT_KAFL_ACQUIRE\n"); + handle_hypercall_kafl_acquire(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_GET_PAYLOAD: + // = false; + //fprintf(stderr, "KVM_EXIT_KAFL_GET_PAYLOAD\n"); + handle_hypercall_get_payload(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_GET_PROGRAM: + //timeout_reload_pending = false; + //fprintf(stderr, "KVM_EXIT_KAFL_GET_PROGRAM\n"); + handle_hypercall_get_program(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_RELEASE: + //timeout_reload_pending = false; + //fprintf(stderr, "KVM_EXIT_KAFL_RELEASE\n"); + handle_hypercall_kafl_release(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_SUBMIT_CR3: + //timeout_reload_pending = false; + //fprintf(stderr, "KVM_EXIT_KAFL_SUBMIT_CR3\n"); + handle_hypercall_kafl_cr3(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_SUBMIT_PANIC: + //timeout_reload_pending = false; + //fprintf(stderr, "KVM_EXIT_KAFL_SUBMIT_PANIC\n"); + handle_hypercall_kafl_submit_panic(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_SUBMIT_KASAN: + //timeout_reload_pending = false; + //fprintf(stderr, "KVM_EXIT_KAFL_SUBMIT_KASAN\n"); + handle_hypercall_kafl_submit_kasan(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_PANIC: + //timeout_reload_pending = false; + //fprintf(stderr, "KVM_EXIT_KAFL_PANIC\n"); + handle_hypercall_kafl_panic(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_KASAN: + //timeout_reload_pending = false; + //fprintf(stderr, "KVM_EXIT_KAFL_KASAN\n"); + handle_hypercall_kafl_kasan(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_LOCK: + //timeout_reload_pending = false; + //fprintf(stderr, "KVM_EXIT_KAFL_LOCK\n"); + handle_hypercall_kafl_lock(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_INFO: + //timeout_reload_pending = false; + //fprintf(stderr, "KVM_EXIT_KAFL_INFO\n"); + handle_hypercall_kafl_info(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_NEXT_PAYLOAD: + //timeout_reload_pending = false; + //fprintf(stderr, "KVM_EXIT_KAFL_NEXT_PAYLOAD\n"); + handle_hypercall_kafl_next_payload(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_PRINTF: + //timeout_reload_pending = false; + //fprintf(stderr, "KVM_EXIT_KAFL_PRINTF\n"); + handle_hypercall_kafl_printf(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_PRINTK_ADDR: + //timeout_reload_pending = false; + handle_hypercall_kafl_printk_addr(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_PRINTK: + //timeout_reload_pending = false; + handle_hypercall_kafl_printk(run, cpu, arg); + ret = 0; + break; + + /* user space only exit reasons */ + case KVM_EXIT_KAFL_USER_RANGE_ADVISE: + //timeout_reload_pending = false; + handle_hypercall_kafl_user_range_advise(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_USER_SUBMIT_MODE: + //timeout_reload_pending = false; + handle_hypercall_kafl_user_submit_mode(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_USER_FAST_ACQUIRE: + //timeout_reload_pending = false; + if(handle_hypercall_kafl_next_payload(run, cpu, arg)){ + handle_hypercall_kafl_cr3(run, cpu, arg); + handle_hypercall_kafl_acquire(run, cpu, arg); + } + ret = 0; + break; + case KVM_EXIT_KAFL_TOPA_MAIN_FULL: + //timeout_reload_pending = false; + //fprintf(stderr, "pt_handle_overflow\n"); + pt_handle_overflow(cpu); + ret = 0; + break; + case KVM_EXIT_KAFL_USER_ABORT: + //timeout_reload_pending = false; + handle_hypercall_kafl_user_abort(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_NESTED_CONFIG: + //timeout_reload_pending = false; + handle_hypercall_kafl_nested_config(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_NESTED_PREPARE: + //timeout_reload_pending = false; + handle_hypercall_kafl_nested_prepare(run, cpu, arg); + ret = 0; + break; + + case KVM_EXIT_KAFL_NESTED_ACQUIRE: + //timeout_reload_pending = false; + handle_hypercall_kafl_nested_acquire(run, cpu, arg); + ret = 0; + break; + + case KVM_EXIT_KAFL_NESTED_RELEASE: + //timeout_reload_pending = false; + //KVM_EXIT_KAFL_NESTED_RELEASE_GOTO: + handle_hypercall_kafl_nested_release(run, cpu, arg); + //unlock_reload_pending(cpu); + ret = 0; + break; + + case KVM_EXIT_KAFL_NESTED_HPRINTF: + handle_hypercall_kafl_nested_hprintf(run, cpu, arg); + ret = 0; + break; + + case KVM_EXIT_KAFL_PAGE_DUMP_BP: + handle_hypercall_kafl_page_dump_bp(run, cpu, arg, run->debug.arch.pc); + ret = 0; + break; + case KVM_EXIT_KAFL_MTF: + handle_hypercall_kafl_mtf(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_RANGE_SUBMIT: + handle_hypercall_kafl_range_submit(run, cpu, arg); + ret = 0; + break; + case HYPERCALL_KAFL_REQ_STREAM_DATA: + handle_hypercall_kafl_req_stream_data(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_NESTED_EARLY_RELEASE: + handle_hypercall_kafl_nested_early_release(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_PANIC_EXTENDED: + handle_hypercall_kafl_panic_extended(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_CREATE_TMP_SNAPSHOT: + handle_hypercall_kafl_create_tmp_snapshot(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_DEBUG_TMP_SNAPSHOT: + handle_hypercall_kafl_debug_tmp_snapshot(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_GET_HOST_CONFIG: + handle_hypercall_kafl_get_host_config(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_SET_AGENT_CONFIG: + handle_hypercall_kafl_set_agent_config(run, cpu, arg); + ret = 0; + break; + case KVM_EXIT_KAFL_DUMP_FILE: + handle_hypercall_kafl_dump_file(run, cpu, arg); + ret = 0; + break; + case HYPERCALL_KAFL_REQ_STREAM_DATA_BULK: + handle_hypercall_kafl_req_stream_data_bulk(run, cpu, arg); + ret = 0; + break; + } + return ret; +} + diff --git a/nyx/hypercall.h b/nyx/hypercall.h new file mode 100644 index 0000000000..4ba05db749 --- /dev/null +++ b/nyx/hypercall.h @@ -0,0 +1,152 @@ +/* + +Copyright (C) 2017 Sergej Schumilo + +This file is part of QEMU-PT (kAFL). + +QEMU-PT is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +QEMU-PT is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with QEMU-PT. If not, see . + +*/ + +#pragma once + +#define PAYLOAD_BUFFER_SIZE 26 +#define PRINTK_PAYLOAD_SIZE 4 + +#define KAFL_MODE_64 0 +#define KAFL_MODE_32 1 +#define KAFL_MODE_16 2 + +typedef struct{ + uint64_t ip[4]; + uint64_t size[4]; + uint8_t enabled[4]; +} kAFL_ranges; + +bool check_bitmap_byte(uint32_t value); + +//#define PANIC_DEBUG + +/* + * Panic Notifier Payload (x86-64) + * fa cli + * 48 c7 c0 1f 00 00 00 mov rax,0x1f + * 48 c7 c3 08 00 00 00 mov rbx,0x8 + * 48 c7 c1 00 00 00 00 mov rcx,0x0 + * 0f 01 c1 vmcall + * f4 hlt + */ +#define PANIC_PAYLOAD "\xFA\x48\xC7\xC0\x1F\x00\x00\x00\x48\xC7\xC3\x08\x00\x00\x00\x48\xC7\xC1\x00\x00\x00\x00\x0F\x01\xC1\xF4" + +/* + * KASAN Notifier Payload (x86-64) + * fa cli + * 48 c7 c0 1f 00 00 00 mov rax,0x1f + * 48 c7 c3 08 00 00 00 mov rbx,0x9 + * 48 c7 c1 00 00 00 00 mov rcx,0x0 + * 0f 01 c1 vmcall + * f4 hlt + */ +#define KASAN_PAYLOAD "\xFA\x48\xC7\xC0\x1F\x00\x00\x00\x48\xC7\xC3\x09\x00\x00\x00\x48\xC7\xC1\x00\x00\x00\x00\x0F\x01\xC1\xF4" + +/* + * printk Notifier Payload (x86-64) + * 0f 01 c1 vmcall + * c3 retn + */ +#define PRINTK_PAYLOAD "\x0F\x01\xC1\xC3" + +void pt_setup_program(void* ptr); +void pt_setup_snd_handler(void (*tmp)(char, void*), void* tmp_s); +void pt_setup_ip_filters(uint8_t filter_id, uint64_t start, uint64_t end); +void pt_setup_enable_hypercalls(void); + +void pt_disable_wrapper(CPUState *cpu); + +void hypercall_submit_address(uint64_t address); +bool hypercall_check_tuple(uint64_t current_addr, uint64_t prev_addr); +//void hypercall_check_in_range(uint64_t* addr); + + +bool hypercall_check_transition(uint64_t value); +void hypercall_submit_transition(uint32_t value); + +void hypercall_enable_filter(void); +void hypercall_disable_filter(void); +void hypercall_commit_filter(void); + +bool pt_hypercalls_enabled(void); + +void hypercall_unlock(void); +void hypercall_reload(void); + +void handle_hypercall_kafl_acquire(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg); +void handle_hypercall_kafl_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg); + + + +void handle_hypercall_kafl_page_dump_bp(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg, uint64_t page); + + +void hprintf(char* msg); +void enable_notifies(void); + +bool handle_hypercall_kafl_next_payload(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg); +void hypercall_reset_hprintf_counter(void); + +bool handle_hypercall_kafl_hook(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg); +void handle_hypercall_kafl_mtf(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg); +void pt_enable_rqo(CPUState *cpu); +void pt_disable_rqo(CPUState *cpu); +void pt_enable_rqi(CPUState *cpu); +void pt_disable_rqi(CPUState *cpu); +void pt_enable_rqi_trace(CPUState *cpu); +void pt_disable_rqi_trace(CPUState *cpu); +void pt_set_redqueen_instrumentation_mode(CPUState *cpu, int redqueen_instruction_mode); +void pt_set_redqueen_update_blacklist(CPUState *cpu, bool newval); +void pt_set_enable_patches_pending(CPUState *cpu); +void pt_set_disable_patches_pending(CPUState *cpu); + +void create_fast_snapshot(CPUState *cpu, bool nested); +int handle_kafl_hypercall(struct kvm_run *run, CPUState *cpu, uint64_t hypercall, uint64_t arg); + +void skip_init(void); + + +typedef struct host_config_s{ + uint32_t bitmap_size; + uint32_t ijon_bitmap_size; + uint32_t payload_buffer_size; + /* more to come */ +} __attribute__((packed)) host_config_t; + +typedef struct agent_config_s{ + uint8_t agent_timeout_detection; + uint8_t agent_tracing; + uint8_t agent_ijon_tracing; + uint8_t agent_non_reload_mode; + uint64_t trace_buffer_vaddr; + uint64_t ijon_trace_buffer_vaddr; + + uint8_t dump_payloads; /* set by hypervisor */ + /* more to come */ +} __attribute__((packed)) agent_config_t; + +typedef struct kafl_dump_file_s{ + uint64_t file_name_str_ptr; + uint64_t data_ptr; + uint64_t bytes; + uint8_t append; +} __attribute__((packed)) kafl_dump_file_t; + diff --git a/nyx/interface.c b/nyx/interface.c new file mode 100644 index 0000000000..629a1a774b --- /dev/null +++ b/nyx/interface.c @@ -0,0 +1,458 @@ +/* + +Copyright (C) 2017 Sergej Schumilo + +This file is part of QEMU-PT (kAFL). + +QEMU-PT is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +QEMU-PT is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with QEMU-PT. If not, see . + +*/ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/cutils.h" +#include "hw/qdev-properties.h" +#include "hw/hw.h" +#include "hw/i386/pc.h" +#include "hw/pci/pci.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" +#include "sysemu/kvm.h" +#include "migration/migration.h" +#include "qemu/error-report.h" +#include "qemu/event_notifier.h" +#include "qom/object_interfaces.h" +#include "chardev/char-fe.h" +#include "sysemu/hostmem.h" +#include "sysemu/qtest.h" +#include "qapi/visitor.h" +#include "exec/ram_addr.h" +#include +#include +#include "pt.h" +#include "nyx/hypercall.h" +#include "nyx/interface.h" +#include "nyx/debug.h" +#include "nyx/synchronization.h" +#include "nyx/snapshot/devices/state_reallocation.h" +#include "nyx/memory_access.h" +#include +#include "nyx/state.h" +#include "nyx/sharedir.h" +#include "nyx/helpers.h" + +#include + +#include "redqueen.h" + +#define CONVERT_UINT64(x) (uint64_t)(strtoull(x, NULL, 16)) + +#define TYPE_KAFLMEM "kafl" +#define KAFLMEM(obj) \ + OBJECT_CHECK(kafl_mem_state, (obj), TYPE_KAFLMEM) + +uint32_t kafl_bitmap_size = DEFAULT_KAFL_BITMAP_SIZE; + +static void pci_kafl_guest_realize(DeviceState *dev, Error **errp); + +typedef struct kafl_mem_state { + DeviceState parent_obj; + + Chardev *kafl_chr_drv_state; + CharBackend chr; + + char* sharedir; + + char* workdir; + uint32_t worker_id; + + char* redqueen_workdir; + char* data_bar_fd_0; + char* data_bar_fd_1; + char* data_bar_fd_2; + char* bitmap_file; + + char* filter_bitmap[4]; + char* ip_filter[4][2]; + + uint64_t bitmap_size; + + bool debug_mode; /* support for hprintf */ + bool notifier; + bool dump_pt_trace; + + bool redqueen; + +} kafl_mem_state; + +static void kafl_guest_event(void *opaque, QEMUChrEvent event){ +} + +static void send_char(char val, void* tmp_s){ + kafl_mem_state *s = tmp_s; + + assert(val == KAFL_PING); + __sync_synchronize(); + + qemu_chr_fe_write(&s->chr, (const uint8_t *) &val, 1); +} + +static int kafl_guest_can_receive(void * opaque){ + return sizeof(int64_t); +} + +static kafl_mem_state* state = NULL; + +static void init_send_char(kafl_mem_state* s){ + state = s; +} + +bool interface_send_char(char val){ + + if(state){ + send_char(val, state); + return true; + } + return false; +} + +static void kafl_guest_receive(void *opaque, const uint8_t * buf, int size){ + int i; + for(i = 0; i < size; i++){ + switch(buf[i]){ + case KAFL_PING: + //fprintf(stderr, "Protocol - RECV: KAFL_PING\n"); + synchronization_unlock(); + break; + case '\n': + break; + case 'E': + exit(0); + default: + break; + assert(false); + } + } +} + +static int kafl_guest_create_memory_bar(kafl_mem_state *s, int region_num, uint64_t bar_size, const char* file, Error **errp){ + void * ptr; + int fd; + struct stat st; + + fd = open(file, O_CREAT|O_RDWR, S_IRWXU|S_IRWXG|S_IRWXO); + assert(ftruncate(fd, bar_size) == 0); + stat(file, &st); + QEMU_PT_PRINTF(INTERFACE_PREFIX, "new shm file: (max size: %lx) %lx", bar_size, st.st_size); + + assert(bar_size == st.st_size); + ptr = mmap(0, bar_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + + if (ptr == MAP_FAILED) { + error_setg_errno(errp, errno, "Failed to mmap memory"); + return -1; + } + + switch(region_num){ + case 1: pt_setup_program((void*)ptr); + break; + case 2: + GET_GLOBAL_STATE()->shared_payload_buffer_fd = fd; + GET_GLOBAL_STATE()->shared_payload_buffer_size = bar_size; + break; + } + + init_send_char(s); + + return 0; +} + +static void kafl_guest_setup_bitmap(kafl_mem_state *s, char* filename, uint32_t bitmap_size){ + void * ptr; + int fd; + struct stat st; + + fd = open(filename, O_CREAT|O_RDWR, S_IRWXU|S_IRWXG|S_IRWXO); + assert(ftruncate(fd, bitmap_size) == 0); + stat(filename, &st); + assert(bitmap_size == st.st_size); + ptr = mmap(0, bitmap_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); + GET_GLOBAL_STATE()->shared_bitmap_ptr = (void*)ptr; + GET_GLOBAL_STATE()->shared_bitmap_fd = fd; + GET_GLOBAL_STATE()->shared_bitmap_size = bitmap_size-DEFAULT_KAFL_IJON_BITMAP_SIZE; + GET_GLOBAL_STATE()->shared_ijon_bitmap_size = DEFAULT_KAFL_IJON_BITMAP_SIZE; +} + +static bool verify_workdir_state(kafl_mem_state *s, Error **errp){ + + char* workdir = s->workdir; + uint32_t id = s->worker_id; + char* tmp; + + if (!folder_exits(workdir)){ + fprintf(stderr, "%s does not exist...\n", workdir); + return false; + } + + set_workdir_path(workdir); + + assert(asprintf(&tmp, "%s/dump/", workdir) != -1); + if (!folder_exits(tmp)){ + mkdir(tmp, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); + } + + assert(asprintf(&tmp, "%s/interface_%d", workdir, id) != -1); + if (!file_exits(tmp)){ + fprintf(stderr, "%s does not exist...\n", tmp); + free(tmp); + return false; + } + free(tmp); + + assert(asprintf(&tmp, "%s/payload_%d", workdir, id) != -1); + if (!file_exits(tmp)){ + fprintf(stderr, "%s does not exist...\n", tmp); + free(tmp); + return false; + } + else { + kafl_guest_create_memory_bar(s, 2, PAYLOAD_SIZE, tmp, errp); + } + free(tmp); + + assert(asprintf(&tmp, "%s/bitmap_%d", workdir, id) != -1); + if (!file_exits(tmp)){ + fprintf(stderr, "%s does not exist...\n", tmp); + free(tmp); + return false; + } else { + kafl_guest_setup_bitmap(s, tmp, s->bitmap_size); + } + free(tmp); + + + assert(asprintf(&tmp, "%s/page_cache.lock", workdir) != -1); + if (!file_exits(tmp)){ + fprintf(stderr, "%s does not exist...", tmp); + free(tmp); + return false; + } + free(tmp); + + assert(asprintf(&tmp, "%s/page_cache.addr", workdir) != -1); + if (!file_exits(tmp)){ + fprintf(stderr, "%s does not exist...\n", tmp); + free(tmp); + return false; + } + free(tmp); + + assert(asprintf(&tmp, "%s/page_cache.dump", workdir) != -1); + if (!file_exits(tmp)){ + fprintf(stderr, "%s does not exist...\n", tmp); + free(tmp); + return false; + } + free(tmp); + + assert(asprintf(&tmp, "%s/page_cache", workdir) != -1); + init_page_cache(tmp); + + assert(asprintf(&tmp, "%s/redqueen_workdir_%d/", workdir, id) != -1); + if (!folder_exits(tmp)){ + fprintf(stderr, "%s does not exist...\n", tmp); + free(tmp); + return false; + } + else { + setup_redqueen_workdir(tmp); + } + free(tmp); + + init_redqueen_state(); + + if(s->dump_pt_trace){ + assert(asprintf(&tmp, "%s/pt_trace_dump_%d", workdir, id) != -1); + pt_open_pt_trace_file(tmp); + free(tmp); + } + + + assert(asprintf(&tmp, "%s/aux_buffer_%d", workdir, id) != -1); + /* + if (file_exits(tmp)){ + QEMU_PT_PRINTF(INTERFACE_PREFIX, "%s does not already exists...", tmp); + free(tmp); + return false; + } + else { + init_aux_buffer(tmp); + } + */ + init_aux_buffer(tmp); + free(tmp); + + + return true; +} + +#define KVM_VMX_PT_GET_ADDRN _IO(KVMIO, 0xe9) + +static void check_range(uint8_t i){ + int ret = 0; + int kvm = open("/dev/dell", O_RDWR | O_CLOEXEC); + ret = ioctl(kvm, KVM_VMX_PT_GET_ADDRN, NULL); + + if(ret == -1){ + QEMU_PT_PRINTF(INTERFACE_PREFIX, "ERROR: Multi range tracing is not supported! Please upgrade your kernel to 4.20-rc4!\n"); + abort(); + } + + if(ret < (i+1)){ + QEMU_PT_PRINTF(INTERFACE_PREFIX, "ERROR: CPU supports only %d IP filters!\n", ret); + abort(); + } + close(kvm); +} + +static bool verify_sharedir_state(kafl_mem_state *s, Error **errp){ + + char* sharedir = s->sharedir; + + if (!folder_exits(sharedir)){ + QEMU_PT_PRINTF(INTERFACE_PREFIX, "%s does not exist...", sharedir); + return false; + } + return true; +} + + +static void pci_kafl_guest_realize(DeviceState *dev, Error **errp){ + uint64_t tmp0, tmp1; + kafl_mem_state *s = KAFLMEM(dev); + + if(s->bitmap_size <= 0){ + s->bitmap_size = DEFAULT_KAFL_BITMAP_SIZE; + } + + assert((uint32_t)s->bitmap_size > (0x1000 + DEFAULT_KAFL_IJON_BITMAP_SIZE)); + assert((((uint32_t)s->bitmap_size-DEFAULT_KAFL_IJON_BITMAP_SIZE) & (((uint32_t)s->bitmap_size-DEFAULT_KAFL_IJON_BITMAP_SIZE) - 1)) == 0 ); + + if(s->worker_id == 0xFFFF){ + fprintf(stderr, "Invalid worker id...\n"); + abort(); + } + + if (!s->workdir || !verify_workdir_state(s, errp)){ + fprintf(stderr, "Invalid work dir...\n"); + abort(); + } + + if (!s->sharedir || !verify_sharedir_state(s, errp)){ + fprintf(stderr, "Invalid sharedir...\n"); + //abort(); + } + else{ + sharedir_set_dir(GET_GLOBAL_STATE()->sharedir, s->sharedir); + } + + if(&s->chr) + qemu_chr_fe_set_handlers(&s->chr, kafl_guest_can_receive, kafl_guest_receive, kafl_guest_event, NULL, s, NULL, true); + + for(uint8_t i = 0; i < INTEL_PT_MAX_RANGES; i++){ + if(s->ip_filter[i][0] && s->ip_filter[i][1]){ + if(i >= 1){ + check_range(i); + } + tmp0 = CONVERT_UINT64(s->ip_filter[i][0]); + tmp1 = CONVERT_UINT64(s->ip_filter[i][1]); + if (tmp0 < tmp1){ + //if(s->filter_bitmap[i]){ + // tmp = kafl_guest_setup_filter_bitmap(s, s->filter_bitmap[i], (uint64_t)(s->bitmap_size)); + //} + pt_setup_ip_filters(i, tmp0, tmp1); + } + } + } + + if(s->debug_mode){ + GET_GLOBAL_STATE()->enable_hprintf = true; + } + + if(s->notifier){ + enable_notifies(); + } + + pt_setup_enable_hypercalls(); + init_crash_handler(); +} + +static Property kafl_guest_properties[] = { + DEFINE_PROP_CHR("chardev", kafl_mem_state, chr), + + DEFINE_PROP_STRING("sharedir", kafl_mem_state, sharedir), + + + DEFINE_PROP_STRING("workdir", kafl_mem_state, workdir), + DEFINE_PROP_UINT32("worker_id", kafl_mem_state, worker_id, 0xFFFF), + + /* + * Since DEFINE_PROP_UINT64 is somehow broken (signed/unsigned madness), + * let's use DEFINE_PROP_STRING and post-process all values by strtol... + */ + DEFINE_PROP_STRING("ip0_a", kafl_mem_state, ip_filter[0][0]), + DEFINE_PROP_STRING("ip0_b", kafl_mem_state, ip_filter[0][1]), + DEFINE_PROP_STRING("ip1_a", kafl_mem_state, ip_filter[1][0]), + DEFINE_PROP_STRING("ip1_b", kafl_mem_state, ip_filter[1][1]), + DEFINE_PROP_STRING("ip2_a", kafl_mem_state, ip_filter[2][0]), + DEFINE_PROP_STRING("ip2_b", kafl_mem_state, ip_filter[2][1]), + DEFINE_PROP_STRING("ip3_a", kafl_mem_state, ip_filter[3][0]), + DEFINE_PROP_STRING("ip3_b", kafl_mem_state, ip_filter[3][1]), + + + DEFINE_PROP_UINT64("bitmap_size", kafl_mem_state, bitmap_size, DEFAULT_KAFL_BITMAP_SIZE), + DEFINE_PROP_BOOL("debug_mode", kafl_mem_state, debug_mode, false), + DEFINE_PROP_BOOL("crash_notifier", kafl_mem_state, notifier, true), + DEFINE_PROP_BOOL("dump_pt_trace", kafl_mem_state, dump_pt_trace, false), + + + DEFINE_PROP_END_OF_LIST(), +}; + +static void kafl_guest_class_init(ObjectClass *klass, void *data){ + DeviceClass *dc = DEVICE_CLASS(klass); + //PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + dc->realize = pci_kafl_guest_realize; + //k->class_id = PCI_CLASS_MEMORY_RAM; + dc->props = kafl_guest_properties; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->desc = "KAFL Inter-VM shared memory"; +} + +static void kafl_guest_init(Object *obj){ +} + +static const TypeInfo kafl_guest_info = { + .name = TYPE_KAFLMEM, + .parent = TYPE_DEVICE, + .instance_size = sizeof(kafl_mem_state), + .instance_init = kafl_guest_init, + .class_init = kafl_guest_class_init, +}; + +static void kafl_guest_register_types(void){ + type_register_static(&kafl_guest_info); +} + +type_init(kafl_guest_register_types) diff --git a/nyx/interface.h b/nyx/interface.h new file mode 100644 index 0000000000..726f2d5085 --- /dev/null +++ b/nyx/interface.h @@ -0,0 +1,44 @@ +/* + +Copyright (C) 2017 Sergej Schumilo + +This file is part of QEMU-PT (kAFL). + +QEMU-PT is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +QEMU-PT is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with QEMU-PT. If not, see . + +*/ + +#ifndef INTERFACE_H +#define INTERFACE_H + +/* 64k bitmap + 4k ijon buffer */ +#define DEFAULT_KAFL_IJON_BITMAP_SIZE 0x1000 +#define DEFAULT_KAFL_BITMAP_SIZE 0x10000 + DEFAULT_KAFL_IJON_BITMAP_SIZE +#define DEFAULT_EDGE_FILTER_SIZE 0x1000000 + +#define PROGRAM_SIZE (128 << 20) /* 128MB Application Data */ +#define PAYLOAD_SIZE (128 << 10) /* 128KB Payload Data */ +#define INFO_SIZE (128 << 10) /* 128KB Info Data */ +#define HPRINTF_SIZE 0x1000 /* 4KB hprintf Data */ + +#define INFO_FILE "/tmp/kAFL_info.txt" +#define HPRINTF_FILE "/tmp/kAFL_printf.txt" + +#define HPRINTF_LIMIT 512 + +#define KAFL_PING 'x' + +bool interface_send_char(char val); + +#endif diff --git a/nyx/khash.h b/nyx/khash.h new file mode 100644 index 0000000000..fdabc18a73 --- /dev/null +++ b/nyx/khash.h @@ -0,0 +1,677 @@ +/* The MIT License + + Copyright (c) 2008, 2009, 2011 by Attractive Chaos + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. +*/ + +/* + An example: + +#include "khash.h" +KHASH_MAP_INIT_INT(32, char) +int main() { + int ret, is_missing; + khiter_t k; + khash_t(32) *h = kh_init(32); + k = kh_put(32, h, 5, &ret); + kh_value(h, k) = 10; + k = kh_get(32, h, 10); + is_missing = (k == kh_end(h)); + k = kh_get(32, h, 5); + kh_del(32, h, k); + for (k = kh_begin(h); k != kh_end(h); ++k) + if (kh_exist(h, k)) kh_value(h, k) = 1; + kh_destroy(32, h); + return 0; +} +*/ + +/* + 2013-05-02 (0.2.8): + + * Use quadratic probing. When the capacity is power of 2, stepping function + i*(i+1)/2 guarantees to traverse each bucket. It is better than double + hashing on cache performance and is more robust than linear probing. + + In theory, double hashing should be more robust than quadratic probing. + However, my implementation is probably not for large hash tables, because + the second hash function is closely tied to the first hash function, + which reduce the effectiveness of double hashing. + + Reference: http://research.cs.vt.edu/AVresearch/hashing/quadratic.php + + 2011-12-29 (0.2.7): + + * Minor code clean up; no actual effect. + + 2011-09-16 (0.2.6): + + * The capacity is a power of 2. This seems to dramatically improve the + speed for simple keys. Thank Zilong Tan for the suggestion. Reference: + + - http://code.google.com/p/ulib/ + - http://nothings.org/computer/judy/ + + * Allow to optionally use linear probing which usually has better + performance for random input. Double hashing is still the default as it + is more robust to certain non-random input. + + * Added Wang's integer hash function (not used by default). This hash + function is more robust to certain non-random input. + + 2011-02-14 (0.2.5): + + * Allow to declare global functions. + + 2009-09-26 (0.2.4): + + * Improve portability + + 2008-09-19 (0.2.3): + + * Corrected the example + * Improved interfaces + + 2008-09-11 (0.2.2): + + * Improved speed a little in kh_put() + + 2008-09-10 (0.2.1): + + * Added kh_clear() + * Fixed a compiling error + + 2008-09-02 (0.2.0): + + * Changed to token concatenation which increases flexibility. + + 2008-08-31 (0.1.2): + + * Fixed a bug in kh_get(), which has not been tested previously. + + 2008-08-31 (0.1.1): + + * Added destructor +*/ + + +#ifndef __AC_KHASH_H +#define __AC_KHASH_H + +/*! + @header + + Generic hash table library. + */ + +#define AC_VERSION_KHASH_H "0.2.8" + +#include +#include +#include +#include + +/* compiler specific configuration */ + +#if UINT_MAX == 0xffffffffu +typedef unsigned int khint32_t; +#elif ULONG_MAX == 0xffffffffu +typedef unsigned long khint32_t; +#endif + +#if ULONG_MAX == ULLONG_MAX +typedef unsigned long khint64_t; +#else +typedef unsigned long long khint64_t; +#endif + +#ifndef kh_inline +#ifdef _MSC_VER +#define kh_inline __inline +#else +#define kh_inline inline +#endif +#endif /* kh_inline */ + +#ifndef klib_unused +#if (defined __clang__ && __clang_major__ >= 3) || (defined __GNUC__ && __GNUC__ >= 3) +#define klib_unused __attribute__ ((__unused__)) +#else +#define klib_unused +#endif +#endif /* klib_unused */ + +typedef khint64_t khint_t; +typedef khint_t khiter_t; + +#define __ac_isempty(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&2) +#define __ac_isdel(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&1) +#define __ac_iseither(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&3) +#define __ac_set_isdel_false(flag, i) (flag[i>>4]&=~(1ul<<((i&0xfU)<<1))) +#define __ac_set_isempty_false(flag, i) (flag[i>>4]&=~(2ul<<((i&0xfU)<<1))) +#define __ac_set_isboth_false(flag, i) (flag[i>>4]&=~(3ul<<((i&0xfU)<<1))) +#define __ac_set_isdel_true(flag, i) (flag[i>>4]|=1ul<<((i&0xfU)<<1)) +#define __ac_fw(item, fp) (fwrite(&(item), 1, sizeof(item), fp)) + +#define __ac_fsize(m) ((m) < 16? 1 : (m)>>4) + +#ifndef kroundup32 +#define kroundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x)) +#endif + +#ifndef kcalloc +#define kcalloc(N,Z) calloc(N,Z) +#endif +#ifndef kmalloc +#define kmalloc(Z) malloc(Z) +#endif +#ifndef krealloc +#define krealloc(P,Z) realloc(P,Z) +#endif +#ifndef kfree +#define kfree(P) free(P) +#endif + +static const double __ac_HASH_UPPER = 0.77; + +#define __KHASH_TYPE(name, khkey_t, khval_t) \ + typedef struct kh_##name##_s { \ + khint_t n_buckets, size, n_occupied, upper_bound; \ + khint32_t *flags; \ + khkey_t *keys; \ + khval_t *vals; \ + } kh_##name##_t; + +#define __KHASH_PROTOTYPES(name, khkey_t, khval_t) \ + extern kh_##name##_t *kh_init_##name(void); \ + extern void kh_destroy_##name(kh_##name##_t *h); \ + extern void kh_clear_##name(kh_##name##_t *h); \ + extern khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key); \ + extern int kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets); \ + extern khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret); \ + extern void kh_del_##name(kh_##name##_t *h, khint_t x); + +#define __KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \ + SCOPE kh_##name##_t *kh_init_##name(void) { \ + return (kh_##name##_t*)kcalloc(1, sizeof(kh_##name##_t)); \ + } \ + SCOPE void kh_destroy_##name(kh_##name##_t *h) \ + { \ + if (h) { \ + kfree((void *)h->keys); kfree(h->flags); \ + kfree((void *)h->vals); \ + kfree(h); \ + } \ + } \ + SCOPE void kh_clear_##name(kh_##name##_t *h) \ + { \ + if (h && h->flags) { \ + memset(h->flags, 0xaa, __ac_fsize(h->n_buckets) * sizeof(khint32_t)); \ + h->size = h->n_occupied = 0; \ + } \ + } \ + SCOPE khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key) \ + { \ + if (h->n_buckets) { \ + khint_t k, i, last, mask, step = 0; \ + mask = h->n_buckets - 1; \ + k = __hash_func(key); i = k & mask; \ + last = i; \ + while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \ + i = (i + (++step)) & mask; \ + if (i == last) return h->n_buckets; \ + } \ + return __ac_iseither(h->flags, i)? h->n_buckets : i; \ + } else return 0; \ + } \ + SCOPE int kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets) \ + { /* This function uses 0.25*n_buckets bytes of working space instead of [sizeof(key_t+val_t)+.25]*n_buckets. */ \ + khint32_t *new_flags = 0; \ + khint_t j = 1; \ + { \ + kroundup32(new_n_buckets); \ + if (new_n_buckets < 4) new_n_buckets = 4; \ + if (h->size >= (khint_t)(new_n_buckets * __ac_HASH_UPPER + 0.5)) j = 0; /* requested size is too small */ \ + else { /* hash table size to be changed (shrink or expand); rehash */ \ + new_flags = (khint32_t*)kmalloc(__ac_fsize(new_n_buckets) * sizeof(khint32_t)); \ + if (!new_flags) return -1; \ + memset(new_flags, 0xaa, __ac_fsize(new_n_buckets) * sizeof(khint32_t)); \ + if (h->n_buckets < new_n_buckets) { /* expand */ \ + khkey_t *new_keys = (khkey_t*)krealloc((void *)h->keys, new_n_buckets * sizeof(khkey_t)); \ + if (!new_keys) { kfree(new_flags); return -1; } \ + h->keys = new_keys; \ + if (kh_is_map) { \ + khval_t *new_vals = (khval_t*)krealloc((void *)h->vals, new_n_buckets * sizeof(khval_t)); \ + if (!new_vals) { kfree(new_flags); return -1; } \ + h->vals = new_vals; \ + } \ + } /* otherwise shrink */ \ + } \ + } \ + if (j) { /* rehashing is needed */ \ + for (j = 0; j != h->n_buckets; ++j) { \ + if (__ac_iseither(h->flags, j) == 0) { \ + khkey_t key = h->keys[j]; \ + khval_t val; \ + khint_t new_mask; \ + new_mask = new_n_buckets - 1; \ + if (kh_is_map) val = h->vals[j]; \ + __ac_set_isdel_true(h->flags, j); \ + while (1) { /* kick-out process; sort of like in Cuckoo hashing */ \ + khint_t k, i, step = 0; \ + k = __hash_func(key); \ + i = k & new_mask; \ + while (!__ac_isempty(new_flags, i)) i = (i + (++step)) & new_mask; \ + __ac_set_isempty_false(new_flags, i); \ + if (i < h->n_buckets && __ac_iseither(h->flags, i) == 0) { /* kick out the existing element */ \ + { khkey_t tmp = h->keys[i]; h->keys[i] = key; key = tmp; } \ + if (kh_is_map) { khval_t tmp = h->vals[i]; h->vals[i] = val; val = tmp; } \ + __ac_set_isdel_true(h->flags, i); /* mark it as deleted in the old hash table */ \ + } else { /* write the element and jump out of the loop */ \ + h->keys[i] = key; \ + if (kh_is_map) h->vals[i] = val; \ + break; \ + } \ + } \ + } \ + } \ + if (h->n_buckets > new_n_buckets) { /* shrink the hash table */ \ + h->keys = (khkey_t*)krealloc((void *)h->keys, new_n_buckets * sizeof(khkey_t)); \ + if (kh_is_map) h->vals = (khval_t*)krealloc((void *)h->vals, new_n_buckets * sizeof(khval_t)); \ + } \ + kfree(h->flags); /* free the working space */ \ + h->flags = new_flags; \ + h->n_buckets = new_n_buckets; \ + h->n_occupied = h->size; \ + h->upper_bound = (khint_t)(h->n_buckets * __ac_HASH_UPPER + 0.5); \ + } \ + return 0; \ + } \ + SCOPE khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret) \ + { \ + khint_t x; \ + if (h->n_occupied >= h->upper_bound) { /* update the hash table */ \ + if (h->n_buckets > (h->size<<1)) { \ + if (kh_resize_##name(h, h->n_buckets - 1) < 0) { /* clear "deleted" elements */ \ + *ret = -1; return h->n_buckets; \ + } \ + } else if (kh_resize_##name(h, h->n_buckets + 1) < 0) { /* expand the hash table */ \ + *ret = -1; return h->n_buckets; \ + } \ + } /* TODO: to implement automatically shrinking; resize() already support shrinking */ \ + { \ + khint_t k, i, site, last, mask = h->n_buckets - 1, step = 0; \ + x = site = h->n_buckets; k = __hash_func(key); i = k & mask; \ + if (__ac_isempty(h->flags, i)) x = i; /* for speed up */ \ + else { \ + last = i; \ + while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \ + if (__ac_isdel(h->flags, i)) site = i; \ + i = (i + (++step)) & mask; \ + if (i == last) { x = site; break; } \ + } \ + if (x == h->n_buckets) { \ + if (__ac_isempty(h->flags, i) && site != h->n_buckets) x = site; \ + else x = i; \ + } \ + } \ + } \ + if (__ac_isempty(h->flags, x)) { /* not present at all */ \ + h->keys[x] = key; \ + __ac_set_isboth_false(h->flags, x); \ + ++h->size; ++h->n_occupied; \ + *ret = 1; \ + } else if (__ac_isdel(h->flags, x)) { /* deleted */ \ + h->keys[x] = key; \ + __ac_set_isboth_false(h->flags, x); \ + ++h->size; \ + *ret = 2; \ + } else *ret = 0; /* Don't touch h->keys[x] if present and not deleted */ \ + return x; \ + } \ + SCOPE void kh_del_##name(kh_##name##_t *h, khint_t x) \ + { \ + if (x != h->n_buckets && !__ac_iseither(h->flags, x)) { \ + __ac_set_isdel_true(h->flags, x); \ + --h->size; \ + } \ + } \ + SCOPE void kh_write_##name(kh_##name##_t *map, const char *path) { \ + FILE *fp = fopen(path, "wb"); \ + if(fp == NULL) { \ + fprintf(stderr, "[%s] Could not open file %s.\n", __func__, path);\ + assert(0); \ + /*exit(EXIT_FAILURE);*/ \ + } \ + __ac_fw(map->n_buckets, fp); \ + __ac_fw(map->n_occupied, fp); \ + __ac_fw(map->size, fp); \ + __ac_fw(map->upper_bound, fp); \ + fwrite(map->flags, __ac_fsize(map->n_buckets), sizeof(khint32_t), fp);\ + fwrite(map->keys, map->n_buckets, sizeof(*map->keys), fp); \ + fwrite(map->vals, map->n_buckets, sizeof(*map->vals), fp); \ + fclose(fp); \ + } \ + SCOPE kh_##name##_t *khash_load_##name(const char *path) \ + { \ + kh_##name##_t *ret = calloc(1, sizeof(kh_##name##_t)); \ + FILE *fp = fopen(path, "rb"); \ + assert(sizeof(ret->n_buckets) == fread(&ret->n_buckets, 1, sizeof(ret->n_buckets), fp)); \ + assert(sizeof(ret->n_occupied) == fread(&ret->n_occupied, 1, sizeof(ret->n_occupied), fp)); \ + assert(sizeof(ret->size) == fread(&ret->size, 1, sizeof(ret->size), fp)); \ + assert(sizeof(ret->upper_bound) == fread(&ret->upper_bound, 1, sizeof(ret->upper_bound), fp)); \ + ret->flags = malloc(sizeof(*ret->flags) * __ac_fsize(ret->n_buckets));\ + ret->keys = malloc(sizeof(khkey_t) * ret->n_buckets); \ + ret->vals = malloc(sizeof(khval_t) * ret->n_buckets); \ + assert(sizeof(*ret->flags) == fread(ret->flags, __ac_fsize(ret->n_buckets), sizeof(*ret->flags), fp));\ + assert(ret->n_buckets * sizeof(*ret->keys) == fread(ret->keys, 1, ret->n_buckets * sizeof(*ret->keys), fp)); \ + assert(ret->n_buckets * sizeof(*ret->vals) == fread(ret->vals, 1, ret->n_buckets * sizeof(*ret->vals), fp)); \ + fclose(fp); \ + return ret; \ + } + +#define KHASH_DECLARE(name, khkey_t, khval_t) \ + __KHASH_TYPE(name, khkey_t, khval_t) \ + __KHASH_PROTOTYPES(name, khkey_t, khval_t) + +#define KHASH_INIT2(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \ + __KHASH_TYPE(name, khkey_t, khval_t) \ + __KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) + +#define KHASH_INIT(name, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \ + KHASH_INIT2(name, static kh_inline klib_unused, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) + +/* --- BEGIN OF HASH FUNCTIONS --- */ + +/*! @function + @abstract Integer hash function + @param key The integer [khint32_t] + @return The hash value [khint_t] + */ +#define kh_int_hash_func(key) (khint32_t)(key) +/*! @function + @abstract Integer comparison function + */ +#define kh_int_hash_equal(a, b) ((a) == (b)) +/*! @function + @abstract 64-bit integer hash function + @param key The integer [khint64_t] + @return The hash value [khint_t] + */ +#define kh_int64_hash_func(key) (khint32_t)((key)>>33^(key)^(key)<<11) +/*! @function + @abstract 64-bit integer comparison function + */ +#define kh_int64_hash_equal(a, b) ((a) == (b)) +/*! @function + @abstract const char* hash function + @param s Pointer to a null terminated string + @return The hash value + */ +static kh_inline khint_t __ac_X31_hash_string(const char *s) +{ + khint_t h = (khint_t)*s; + if (h) for (++s ; *s; ++s) h = (h << 5) - h + (khint_t)*s; + return h; +} +/*! @function + @abstract Another interface to const char* hash function + @param key Pointer to a null terminated string [const char*] + @return The hash value [khint_t] + */ +#define kh_str_hash_func(key) __ac_X31_hash_string(key) +/*! @function + @abstract Const char* comparison function + */ +#define kh_str_hash_equal(a, b) (strcmp(a, b) == 0) + +static kh_inline khint_t __ac_Wang_hash(khint_t key) +{ + key += ~(key << 15); + key ^= (key >> 10); + key += (key << 3); + key ^= (key >> 6); + key += ~(key << 11); + key ^= (key >> 16); + return key; +} +#define kh_int_hash_func2(key) __ac_Wang_hash((khint_t)key) + +/* --- END OF HASH FUNCTIONS --- */ + +/* Other convenient macros... */ + +/*! + @abstract Type of the hash table. + @param name Name of the hash table [symbol] + */ +#define khash_t(name) kh_##name##_t + +/*! @function + @abstract Initiate a hash table. + @param name Name of the hash table [symbol] + @return Pointer to the hash table [khash_t(name)*] + */ +#define kh_init(name) kh_init_##name() + +/*! @function + @abstract Destroy a hash table. + @param name Name of the hash table [symbol] + @param h Pointer to the hash table [khash_t(name)*] + */ +#define kh_destroy(name, h) kh_destroy_##name(h) + +/*! @function + @abstract Reset a hash table without deallocating memory. + @param name Name of the hash table [symbol] + @param h Pointer to the hash table [khash_t(name)*] + */ +#define kh_clear(name, h) kh_clear_##name(h) + +/*! @function + @abstract Resize a hash table. + @param name Name of the hash table [symbol] + @param h Pointer to the hash table [khash_t(name)*] + @param s New size [khint_t] + */ +#define kh_resize(name, h, s) kh_resize_##name(h, s) + +/*! @function + @abstract Insert a key to the hash table. + @param name Name of the hash table [symbol] + @param h Pointer to the hash table [khash_t(name)*] + @param k Key [type of keys] + @param r Extra return code: -1 if the operation failed; + 0 if the key is present in the hash table; + 1 if the bucket is empty (never used); 2 if the element in + the bucket has been deleted [int*] + @return Iterator to the inserted element [khint_t] + */ +#define kh_put(name, h, k, r) kh_put_##name(h, k, r) + +/*! @function + @abstract Retrieve a key from the hash table. + @param name Name of the hash table [symbol] + @param h Pointer to the hash table [khash_t(name)*] + @param k Key [type of keys] + @return Iterator to the found element, or kh_end(h) if the element is absent [khint_t] + */ +#define kh_get(name, h, k) kh_get_##name(h, k) + +/*! @function + @abstract Remove a key from the hash table. + @param name Name of the hash table [symbol] + @param h Pointer to the hash table [khash_t(name)*] + @param k Iterator to the element to be deleted [khint_t] + */ +#define kh_del(name, h, k) kh_del_##name(h, k) + +/*! @function + @abstract Write a hash map to disk. + @param h Pointer to the hash table [khash_t(name)*] + @param path Path to which to write. [const char *] + */ +#define kh_write(name, h, path) kh_write_##name(h, path) + +/*! @function + @abstract Load a hash table from disk + @param name Name of the hash table [symbol] + @param path Path to file from which to load [const char *] + */ + +#define kh_load(name, path) khash_load_##name(path) + +/*! @function + @abstract Test whether a bucket contains data. + @param h Pointer to the hash table [khash_t(name)*] + @param x Iterator to the bucket [khint_t] + @return 1 if containing data; 0 otherwise [int] + */ +#define kh_exist(h, x) (!__ac_iseither((h)->flags, (x))) + +/*! @function + @abstract Get key given an iterator + @param h Pointer to the hash table [khash_t(name)*] + @param x Iterator to the bucket [khint_t] + @return Key [type of keys] + */ +#define kh_key(h, x) ((h)->keys[x]) + +/*! @function + @abstract Get value given an iterator + @param h Pointer to the hash table [khash_t(name)*] + @param x Iterator to the bucket [khint_t] + @return Value [type of values] + @discussion For hash sets, calling this results in segfault. + */ +#define kh_val(h, x) ((h)->vals[x]) + +/*! @function + @abstract Alias of kh_val() + */ +#define kh_value(h, x) ((h)->vals[x]) + +/*! @function + @abstract Get the start iterator + @param h Pointer to the hash table [khash_t(name)*] + @return The start iterator [khint_t] + */ +#define kh_begin(h) (khint_t)(0) + +/*! @function + @abstract Get the end iterator + @param h Pointer to the hash table [khash_t(name)*] + @return The end iterator [khint_t] + */ +#define kh_end(h) ((h)->n_buckets) + +/*! @function + @abstract Get the number of elements in the hash table + @param h Pointer to the hash table [khash_t(name)*] + @return Number of elements in the hash table [khint_t] + */ +#define kh_size(h) ((h)->size) + +/*! @function + @abstract Get the number of buckets in the hash table + @param h Pointer to the hash table [khash_t(name)*] + @return Number of buckets in the hash table [khint_t] + */ +#define kh_n_buckets(h) ((h)->n_buckets) + +/*! @function + @abstract Iterate over the entries in the hash table + @param h Pointer to the hash table [khash_t(name)*] + @param kvar Variable to which key will be assigned + @param vvar Variable to which value will be assigned + @param code Block of code to execute + */ +#define kh_foreach(h, kvar, vvar, code) { khint_t __i; \ + for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \ + if (!kh_exist(h,__i)) continue; \ + (kvar) = kh_key(h,__i); \ + (vvar) = kh_val(h,__i); \ + code; \ + } } + +/*! @function + @abstract Iterate over the values in the hash table + @param h Pointer to the hash table [khash_t(name)*] + @param vvar Variable to which value will be assigned + @param code Block of code to execute + */ +#define kh_foreach_value(h, vvar, code) { khint_t __i; \ + for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \ + if (!kh_exist(h,__i)) continue; \ + (vvar) = kh_val(h,__i); \ + code; \ + } } + +/* More conenient interfaces */ + +/*! @function + @abstract Instantiate a hash set containing integer keys + @param name Name of the hash table [symbol] + */ +#define KHASH_SET_INIT_INT(name) \ + KHASH_INIT(name, khint32_t, char, 0, kh_int_hash_func, kh_int_hash_equal) + +/*! @function + @abstract Instantiate a hash map containing integer keys + @param name Name of the hash table [symbol] + @param khval_t Type of values [type] + */ +#define KHASH_MAP_INIT_INT(name, khval_t) \ + KHASH_INIT(name, khint32_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal) + +/*! @function + @abstract Instantiate a hash map containing 64-bit integer keys + @param name Name of the hash table [symbol] + */ +#define KHASH_SET_INIT_INT64(name) \ + KHASH_INIT(name, khint64_t, char, 0, kh_int64_hash_func, kh_int64_hash_equal) + +/*! @function + @abstract Instantiate a hash map containing 64-bit integer keys + @param name Name of the hash table [symbol] + @param khval_t Type of values [type] + */ +#define KHASH_MAP_INIT_INT64(name, khval_t) \ + KHASH_INIT(name, khint64_t, khval_t, 1, kh_int64_hash_func, kh_int64_hash_equal) + +typedef const char *kh_cstr_t; +/*! @function + @abstract Instantiate a hash map containing const char* keys + @param name Name of the hash table [symbol] + */ +#define KHASH_SET_INIT_STR(name) \ + KHASH_INIT(name, kh_cstr_t, char, 0, kh_str_hash_func, kh_str_hash_equal) + +/*! @function + @abstract Instantiate a hash map containing const char* keys + @param name Name of the hash table [symbol] + @param khval_t Type of values [type] + */ +#define KHASH_MAP_INIT_STR(name, khval_t) \ + KHASH_INIT(name, kh_cstr_t, khval_t, 1, kh_str_hash_func, kh_str_hash_equal) + +#endif /* __AC_KHASH_H */ \ No newline at end of file diff --git a/nyx/kvm_nested.c b/nyx/kvm_nested.c new file mode 100644 index 0000000000..521c44f234 --- /dev/null +++ b/nyx/kvm_nested.c @@ -0,0 +1,457 @@ +#include "nyx/kvm_nested.h" +#include "cpu.h" +#include +#include "nyx/debug.h" +#include "exec/ram_addr.h" +#include "qemu/rcu_queue.h" +#include "nyx/state.h" +#include "sysemu/kvm.h" +#include "pt.h" + +#define PPAGE_SIZE 0x1000 +#define PENTRIES 0x200 +#define PLEVEL_4_SHIFT 12 +#define PLEVEL_3_SHIFT 21 +#define PLEVEL_2_SHIFT 30 +#define PLEVEL_1_SHIFT 39 +#define SIGN_EXTEND_TRESHOLD 0x100 +#define SIGN_EXTEND 0xFFFF000000000000ULL +#define PAGETABLE_MASK 0xFFFFFFFFFF000ULL +#define CHECK_BIT(var,pos) !!(((var) & (1ULL<<(pos)))) + + +struct vmcs_hdr { + uint32_t revision_id:31; + uint32_t shadow_vmcs:1; +}; + +struct __attribute__((__packed__)) vmcs12 { + /* According to the Intel spec, a VMCS region must start with the + * following two fields. Then follow implementation-specific data. + */ + struct vmcs_hdr hdr; + uint32_t abort; + + uint32_t launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */ + uint32_t padding[7]; /* room for future expansion */ + + uint64_t io_bitmap_a; + uint64_t io_bitmap_b; + uint64_t msr_bitmap; + uint64_t vm_exit_msr_store_addr; + uint64_t vm_exit_msr_load_addr; + uint64_t vm_entry_msr_load_addr; + uint64_t tsc_offset; + uint64_t virtual_apic_page_addr; + uint64_t apic_access_addr; + uint64_t posted_intr_desc_addr; + uint64_t ept_pointer; + uint64_t eoi_exit_bitmap0; + uint64_t eoi_exit_bitmap1; + uint64_t eoi_exit_bitmap2; + uint64_t eoi_exit_bitmap3; + uint64_t xss_exit_bitmap; + uint64_t guest_physical_address; + uint64_t vmcs_link_pointer; + uint64_t guest_ia32_debugctl; + uint64_t guest_ia32_pat; + uint64_t guest_ia32_efer; + uint64_t guest_ia32_perf_global_ctrl; + uint64_t guest_pdptr0; + uint64_t guest_pdptr1; + uint64_t guest_pdptr2; + uint64_t guest_pdptr3; + uint64_t guest_bndcfgs; + uint64_t host_ia32_pat; + uint64_t host_ia32_efer; + uint64_t host_ia32_perf_global_ctrl; + uint64_t vmread_bitmap; + uint64_t vmwrite_bitmap; + uint64_t vm_function_control; + uint64_t eptp_list_address; + uint64_t pml_address; + uint64_t padding64[3]; /* room for future expansion */ + /* + * To allow migration of L1 (complete with its L2 guests) between + * machines of different natural widths (32 or 64 bit), we cannot have + * unsigned long fields with no explict size. We use uint64_t (aliased + * uint64_t) instead. Luckily, x86 is little-endian. + */ + uint64_t cr0_guest_host_mask; + uint64_t cr4_guest_host_mask; + uint64_t cr0_read_shadow; + uint64_t cr4_read_shadow; + uint64_t cr3_target_value0; + uint64_t cr3_target_value1; + uint64_t cr3_target_value2; + uint64_t cr3_target_value3; + uint64_t exit_qualification; + uint64_t guest_linear_address; + uint64_t guest_cr0; + uint64_t guest_cr3; + uint64_t guest_cr4; + uint64_t guest_es_base; + uint64_t guest_cs_base; + uint64_t guest_ss_base; + uint64_t guest_ds_base; + uint64_t guest_fs_base; + uint64_t guest_gs_base; + uint64_t guest_ldtr_base; + uint64_t guest_tr_base; + uint64_t guest_gdtr_base; + uint64_t guest_idtr_base; + uint64_t guest_dr7; + uint64_t guest_rsp; + uint64_t guest_rip; + uint64_t guest_rflags; + uint64_t guest_pending_dbg_exceptions; + uint64_t guest_sysenter_esp; + uint64_t guest_sysenter_eip; + uint64_t host_cr0; + uint64_t host_cr3; + uint64_t host_cr4; + uint64_t host_fs_base; + uint64_t host_gs_base; + uint64_t host_tr_base; + uint64_t host_gdtr_base; + uint64_t host_idtr_base; + uint64_t host_ia32_sysenter_esp; + uint64_t host_ia32_sysenter_eip; + uint64_t host_rsp; + uint64_t host_rip; + uint64_t paddingl[8]; /* room for future expansion */ + uint32_t pin_based_vm_exec_control; + uint32_t cpu_based_vm_exec_control; + uint32_t exception_bitmap; + uint32_t page_fault_error_code_mask; + uint32_t page_fault_error_code_match; + uint32_t cr3_target_count; + uint32_t vm_exit_controls; + uint32_t vm_exit_msr_store_count; + uint32_t vm_exit_msr_load_count; + uint32_t vm_entry_controls; + uint32_t vm_entry_msr_load_count; + uint32_t vm_entry_intr_info_field; + uint32_t vm_entry_exception_error_code; + uint32_t vm_entry_instruction_len; + uint32_t tpr_threshold; + uint32_t secondary_vm_exec_control; + uint32_t vm_instruction_error; + uint32_t vm_exit_reason; + uint32_t vm_exit_intr_info; + uint32_t vm_exit_intr_error_code; + uint32_t idt_vectoring_info_field; + uint32_t idt_vectoring_error_code; + uint32_t vm_exit_instruction_len; + uint32_t vmx_instruction_info; + uint32_t guest_es_limit; + uint32_t guest_cs_limit; + uint32_t guest_ss_limit; + uint32_t guest_ds_limit; + uint32_t guest_fs_limit; + uint32_t guest_gs_limit; + uint32_t guest_ldtr_limit; + uint32_t guest_tr_limit; + uint32_t guest_gdtr_limit; + uint32_t guest_idtr_limit; + uint32_t guest_es_ar_bytes; + uint32_t guest_cs_ar_bytes; + uint32_t guest_ss_ar_bytes; + uint32_t guest_ds_ar_bytes; + uint32_t guest_fs_ar_bytes; + uint32_t guest_gs_ar_bytes; + uint32_t guest_ldtr_ar_bytes; + uint32_t guest_tr_ar_bytes; + uint32_t guest_interruptibility_info; + uint32_t guest_activity_state; + uint32_t guest_sysenter_cs; + uint32_t host_ia32_sysenter_cs; + uint32_t vmx_preemption_timer_value; + uint32_t padding32[7]; /* room for future expansion */ + uint16_t virtual_processor_id; + uint16_t posted_intr_nv; + uint16_t guest_es_selector; + uint16_t guest_cs_selector; + uint16_t guest_ss_selector; + uint16_t guest_ds_selector; + uint16_t guest_fs_selector; + uint16_t guest_gs_selector; + uint16_t guest_ldtr_selector; + uint16_t guest_tr_selector; + uint16_t guest_intr_status; + uint16_t host_es_selector; + uint16_t host_cs_selector; + uint16_t host_ss_selector; + uint16_t host_ds_selector; + uint16_t host_fs_selector; + uint16_t host_gs_selector; + uint16_t host_tr_selector; + uint16_t guest_pml_index; +}; + + +static void write_address(uint64_t address, uint64_t size, uint64_t prot){ + static uint64_t next_address = PAGETABLE_MASK; + static uint64_t last_address = 0x0; + static uint64_t last_prot = 0; + if(address != next_address || prot != last_prot){ + /* do not print guard pages or empty pages without any permissions */ + if(last_address && (CHECK_BIT(last_prot, 1) || !CHECK_BIT(last_prot, 63))){ + if(CHECK_BIT(last_prot, 1) && !CHECK_BIT(last_prot, 63)){ + QEMU_PT_PRINTF(NESTED_VM_PREFIX, "%016lx - %016lx %c%c%c [WARNING]", + last_address, next_address, + CHECK_BIT(last_prot, 1) ? 'W' : '-', + CHECK_BIT(last_prot, 2) ? 'U' : 'K', + !CHECK_BIT(last_prot, 63)? 'X' : '-'); + } + else{ + QEMU_PT_PRINTF(NESTED_VM_PREFIX, "%016lx - %016lx %c%c%c", + last_address, next_address, + CHECK_BIT(last_prot, 1) ? 'W' : '-', + CHECK_BIT(last_prot, 2) ? 'U' : 'K', + !CHECK_BIT(last_prot, 63)? 'X' : '-'); + } + } + last_address = address; + } + next_address = address+size; + last_prot = prot; + +} + +void print_48_paging(uint64_t cr3){ + uint64_t paging_entries_level_1[PENTRIES]; + uint64_t paging_entries_level_2[PENTRIES]; + uint64_t paging_entries_level_3[PENTRIES]; + uint64_t paging_entries_level_4[PENTRIES]; + + uint64_t address_identifier_1, address_identifier_2, address_identifier_3, address_identifier_4; + uint32_t i1, i2, i3,i4; + + cpu_physical_memory_rw((cr3&PAGETABLE_MASK), (uint8_t *) paging_entries_level_1, PPAGE_SIZE, false); + for(i1 = 0; i1 < 512; i1++){ + if(paging_entries_level_1[i1]){ + address_identifier_1 = ((uint64_t)i1) << PLEVEL_1_SHIFT; + if (i1 & SIGN_EXTEND_TRESHOLD){ + address_identifier_1 |= SIGN_EXTEND; + } + if(CHECK_BIT(paging_entries_level_1[i1], 0)){ /* otherwise swapped out */ + cpu_physical_memory_rw((paging_entries_level_1[i1]&PAGETABLE_MASK), (uint8_t *) paging_entries_level_2, PPAGE_SIZE, false); + for(i2 = 0; i2 < PENTRIES; i2++){ + if(paging_entries_level_2[i2]){ + address_identifier_2 = (((uint64_t)i2) << PLEVEL_2_SHIFT) + address_identifier_1; + if (CHECK_BIT(paging_entries_level_2[i2], 0)){ /* otherwise swapped out */ + if((paging_entries_level_2[i2]&PAGETABLE_MASK) == (paging_entries_level_1[i1]&PAGETABLE_MASK)){ + /* loop */ + continue; + } + + if (CHECK_BIT(paging_entries_level_2[i2], 7)){ + write_address(address_identifier_2, 0x40000000, (uint64_t)paging_entries_level_2[i2] & ((1ULL<<63) | (1ULL<<2) | (1ULL<<1))); + } + else{ + /* otherwise this PDPE references a 1GB page */ + cpu_physical_memory_rw((paging_entries_level_2[i2]&PAGETABLE_MASK), (uint8_t *) paging_entries_level_3, PPAGE_SIZE, false); + for(i3 = 0; i3 < PENTRIES; i3++){ + if(paging_entries_level_3[i3]){ + address_identifier_3 = (((uint64_t)i3) << PLEVEL_3_SHIFT) + address_identifier_2; + if (CHECK_BIT(paging_entries_level_3[i3], 0)){ /* otherwise swapped out */ + if (CHECK_BIT(paging_entries_level_3[i3], 7)){ + write_address(address_identifier_3, 0x200000, (uint64_t)paging_entries_level_3[i3] & ((1ULL<<63) | (1ULL<<2) | (1ULL<<1))); + } + else{ + cpu_physical_memory_rw((paging_entries_level_3[i3]&PAGETABLE_MASK), (uint8_t *) paging_entries_level_4, PPAGE_SIZE, false); + for(i4 = 0; i4 < PENTRIES; i4++){ + if(paging_entries_level_4[i4]){ + address_identifier_4 = (((uint64_t)i4) << PLEVEL_4_SHIFT) + address_identifier_3; + if (CHECK_BIT(paging_entries_level_4[i4], 0)){ + write_address(address_identifier_4, 0x1000, (uint64_t)paging_entries_level_4[i4] & ((1ULL<<63) | (1ULL<<2) | (1ULL<<1))); + } + } + } + } + } + } + } + + } + } + } + } + } + } + } + write_address(0, 0x1000, 0); +} + +/* +static bool change_page_permissions(uint64_t phys_addr, CPUState *cpu){ + RAMBlock *block; + + //MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; + + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + if(!memcmp(block->idstr, "pc.ram", 6)){ + printf("FOUND AND MODIFIED! %lx\n", mprotect((void*)(((uint64_t)block->host) + phys_addr), 0x1000, PROT_NONE)); + break; + } + } + + return true; +} +*/ + +uint64_t get_nested_guest_rip(CPUState *cpu){ + + X86CPU *cpux86 = X86_CPU(cpu); + CPUX86State *env = &cpux86->env; + + kvm_vcpu_ioctl(cpu, KVM_GET_NESTED_STATE, env->nested_state); + + struct vmcs12* saved_vmcs = (struct vmcs12*)&(env->nested_state->data); + + return saved_vmcs->guest_rip; +} + +uint64_t get_nested_host_rip(CPUState *cpu){ + + X86CPU *cpux86 = X86_CPU(cpu); + CPUX86State *env = &cpux86->env; + + kvm_vcpu_ioctl(cpu, KVM_GET_NESTED_STATE, env->nested_state); + + struct vmcs12* saved_vmcs = (struct vmcs12*)&(env->nested_state->data); + + return saved_vmcs->host_rip; +} + +uint64_t get_nested_host_cr3(CPUState *cpu){ + + X86CPU *cpux86 = X86_CPU(cpu); + CPUX86State *env = &cpux86->env; + + kvm_vcpu_ioctl(cpu, KVM_GET_NESTED_STATE, env->nested_state); + + struct vmcs12* saved_vmcs = (struct vmcs12*)&(env->nested_state->data); + + return saved_vmcs->host_cr3; +} + +void set_nested_rip(CPUState *cpu, uint64_t rip){ + + X86CPU *cpux86 = X86_CPU(cpu); + CPUX86State *env = &cpux86->env; + + //kvm_vcpu_ioctl(cpu, KVM_GET_NESTED_STATE, env->nested_state); + + struct vmcs12* saved_vmcs = (struct vmcs12*)&(env->nested_state->data); + + saved_vmcs->guest_rip = rip; + + //return saved_vmcs->guest_rip; +} + +void kvm_nested_get_info(CPUState *cpu){ + + X86CPU *cpux86 = X86_CPU(cpu); + CPUX86State *env = &cpux86->env; + + kvm_vcpu_ioctl(cpu, KVM_GET_NESTED_STATE, env->nested_state); + + struct vmcs12* saved_vmcs = (struct vmcs12*)&(env->nested_state->data); + QEMU_PT_PRINTF(NESTED_VM_PREFIX, "VMCS host_cr3:\t%lx", saved_vmcs->host_cr3); + QEMU_PT_PRINTF(NESTED_VM_PREFIX, "VMCS host_cr4:\t%lx", saved_vmcs->host_cr4); + QEMU_PT_PRINTF(NESTED_VM_PREFIX, "VMCS host_ia32_efer:\t%lx", saved_vmcs->host_ia32_efer); + QEMU_PT_PRINTF(NESTED_VM_PREFIX, "VMCS host_cr0:\t%lx", saved_vmcs->host_cr0); + + return; + + //cpu->parent_cr3 = saved_vmcs->host_cr3+0x1000; + GET_GLOBAL_STATE()->parent_cr3 = saved_vmcs->host_cr3+0x1000; + fprintf(stderr, "saved_vmcs->guest_cr3: %lx %lx %lx\n", saved_vmcs->guest_cr3, saved_vmcs->host_cr3, env->cr[3]); + pt_set_cr3(cpu, saved_vmcs->host_cr3+0x1000, false); /* USERSPACE */ + //pt_set_cr3(cpu, saved_vmcs->host_cr3+0x1000, false); /* KERNELSPACE QEMU fuzzing fix...fucking kpti (https://gruss.cc/files/kaiser.pdf)!!! */ + + /* let's modify page permissions of our CR3 referencing PTs */ + //change_page_permissions(cpu->parent_cr3, cpu); + + + if (!(saved_vmcs->host_cr0 & CR0_PG_MASK)) { + printf("PG disabled\n"); + } + else{ + if (saved_vmcs->host_cr4 & CR4_PAE_MASK) { + if (saved_vmcs->host_ia32_efer & (1 << 10)) { + if (saved_vmcs->host_cr0 & CR4_LA57_MASK) { + QEMU_PT_PRINTF(NESTED_VM_PREFIX, "mem_info_la57"); + abort(); + //mem_info_la57(mon, env); + } else { + QEMU_PT_PRINTF(NESTED_VM_PREFIX, " ==== L1 Page Tables ===="); + print_48_paging(saved_vmcs->host_cr3); + + if(saved_vmcs->ept_pointer){ + QEMU_PT_PRINTF(NESTED_VM_PREFIX, " ==== L2 Page Tables ===="); + print_48_paging(saved_vmcs->ept_pointer); + } + //mem_info_la48(mon, env); + } + } + else{ + QEMU_PT_PRINTF(NESTED_VM_PREFIX, "mem_info_pae32"); + abort(); + //mem_info_pae32(mon, env); + } + } + else { + QEMU_PT_PRINTF(NESTED_VM_PREFIX, "mem_info_32"); + abort(); + //mem_info_32(mon, env); + } + } +} + +#define AREA_DESC_LEN 256 +#define MAGIC_NUMBER 0x41584548U + +typedef struct { + uint32_t base; + uint32_t size; + uint32_t virtual_base; + char desc[AREA_DESC_LEN]; +}area_t_export_t; + +typedef struct { + uint32_t magic; + uint8_t num_mmio_areas; + uint8_t num_io_areas; + uint8_t num_alloc_areas; + uint8_t padding; +}config_t; + +void print_configuration(FILE *stream, void* configuration, size_t size){ +//void print_configuration(void* configuration, size_t size){ + + fprintf(stream, "%s: size: %lx\n", __func__, size); + assert((size-sizeof(config_t))%sizeof(area_t_export_t) == 0); + + assert(((config_t*)configuration)->magic == MAGIC_NUMBER); + + fprintf(stream, "%s: num_mmio_areas: %x\n", __func__, ((config_t*)configuration)->num_mmio_areas); + fprintf(stream, "%s: num_io_areas: %x\n", __func__, ((config_t*)configuration)->num_io_areas); + fprintf(stream, "%s: num_alloc_areas: %x\n", __func__, ((config_t*)configuration)->num_alloc_areas); + + + for(int i = 0; i < ((config_t*)configuration)->num_mmio_areas; i++){ + fprintf(stream, "\t-> MMIO: 0x%x (V: 0x%x) [0x%x]\t%s\n", ((area_t_export_t*)(configuration+sizeof(config_t)))[i].base, + ((area_t_export_t*)(configuration+sizeof(config_t)))[i].virtual_base, + ((area_t_export_t*)(configuration+sizeof(config_t)))[i].size, + ((area_t_export_t*)(configuration+sizeof(config_t)))[i].desc ); + } + + for(int i = ((config_t*)configuration)->num_mmio_areas; i < (((config_t*)configuration)->num_mmio_areas+((config_t*)configuration)->num_io_areas); i++){ + fprintf(stream, "\t-> IO: 0x%x [0x%x]\t%s\n", ((area_t_export_t*)(configuration+sizeof(config_t)))[i].base, + ((area_t_export_t*)(configuration+sizeof(config_t)))[i].size, + ((area_t_export_t*)(configuration+sizeof(config_t)))[i].desc ); + } +} \ No newline at end of file diff --git a/nyx/kvm_nested.h b/nyx/kvm_nested.h new file mode 100644 index 0000000000..bf284a7154 --- /dev/null +++ b/nyx/kvm_nested.h @@ -0,0 +1,13 @@ +#pragma once +#include "qemu/osdep.h" + +void print_48_paging(uint64_t cr3); +void kvm_nested_get_info(CPUState *cpu); +uint64_t get_nested_guest_rip(CPUState *cpu); +uint64_t get_nested_host_rip(CPUState *cpu); + + +uint64_t get_nested_host_cr3(CPUState *cpu); + +void set_nested_rip(CPUState *cpu, uint64_t rip); +void print_configuration(FILE *stream, void* configuration, size_t size); \ No newline at end of file diff --git a/nyx/memory_access.c b/nyx/memory_access.c new file mode 100644 index 0000000000..f1660b3e7e --- /dev/null +++ b/nyx/memory_access.c @@ -0,0 +1,1388 @@ +/* + +Copyright (C) 2017 Sergej Schumilo + +This file is part of QEMU-PT (kAFL). + +QEMU-PT is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +QEMU-PT is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with QEMU-PT. If not, see . + +*/ +#include +#include "qemu/osdep.h" +#include "sysemu/sysemu.h" +#include "cpu.h" +#include "exec/ram_addr.h" +#include "qemu/rcu_queue.h" + +#include "memory_access.h" +#include "hypercall.h" +#include "debug.h" +#include "nyx/fast_vm_reload.h" +#include "exec/gdbstub.h" +#include "nyx/state.h" +#include "sysemu/kvm.h" +#include "nyx/helpers.h" + +static uint64_t get_48_paging_phys_addr(uint64_t cr3, uint64_t addr); +static uint64_t get_48_paging_phys_addr_snapshot(uint64_t cr3, uint64_t addr); + +#define x86_64_PAGE_SIZE 0x1000 +#define x86_64_PAGE_MASK ~(x86_64_PAGE_SIZE - 1) + +static void set_mem_mode(CPUState *cpu){ + kvm_arch_get_registers(cpu); + + X86CPU *cpux86 = X86_CPU(cpu); + CPUX86State *env = &cpux86->env; + + if (!(env->cr[0] & CR0_PG_MASK)) { + GET_GLOBAL_STATE()->mem_mode = mm_32_protected; + return; + } + else{ + if (env->cr[4] & CR4_PAE_MASK) { + if (env->hflags & HF_LMA_MASK) { + if (env->cr[4] & CR4_LA57_MASK) { + GET_GLOBAL_STATE()->mem_mode = mm_64_l5_paging; + return; + } else { + GET_GLOBAL_STATE()->mem_mode = mm_64_l4_paging; + return; + } + } + else{ + GET_GLOBAL_STATE()->mem_mode = mm_32_pae; + return; + } + } + else { + GET_GLOBAL_STATE()->mem_mode = mm_32_paging; + return; + } + } + + return; +} + +/* Warning: This might break memory handling for hypervisor fuzzing => FIXME LATER */ +static uint64_t get_paging_phys_addr(CPUState *cpu, uint64_t cr3, uint64_t addr){ + if(GET_GLOBAL_STATE()->mem_mode == mm_unkown){ + set_mem_mode(cpu); + } + + switch(GET_GLOBAL_STATE()->mem_mode){ + case mm_32_protected: + return addr & 0xFFFFFFFFULL; + case mm_32_paging: + fprintf(stderr, "mem_mode: mm_32_paging not implemented!\n"); + abort(); + case mm_32_pae: + fprintf(stderr, "mem_mode: mm_32_pae not implemented!\n"); + abort(); + case mm_64_l4_paging: + return get_48_paging_phys_addr(cr3, addr); + case mm_64_l5_paging: + fprintf(stderr, "mem_mode: mm_64_l5_paging not implemented!\n"); + abort(); + case mm_unkown: + fprintf(stderr, "mem_mode: unkown!\n"); + abort(); + } + return 0; +} + +static uint64_t get_paging_phys_addr_snapshot(CPUState *cpu, uint64_t cr3, uint64_t addr){ + if(GET_GLOBAL_STATE()->mem_mode == mm_unkown){ + set_mem_mode(cpu); + } + + switch(GET_GLOBAL_STATE()->mem_mode){ + case mm_32_protected: + return addr & 0xFFFFFFFFULL; + case mm_32_paging: + fprintf(stderr, "mem_mode: mm_32_paging not implemented!\n"); + abort(); + case mm_32_pae: + fprintf(stderr, "mem_mode: mm_32_pae not implemented!\n"); + abort(); + case mm_64_l4_paging: + return get_48_paging_phys_addr_snapshot(cr3, addr); + case mm_64_l5_paging: + fprintf(stderr, "mem_mode: mm_64_l5_paging not implemented!\n"); + abort(); + case mm_unkown: + fprintf(stderr, "mem_mode: unkown!\n"); + abort(); + } + return 0; +} + + +//bool is_addr_mapped_ht(uint64_t address, CPUState *cpu, uint64_t cr3, bool host); + +bool read_physical_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu){ + kvm_arch_get_registers(cpu); + cpu_physical_memory_read(address, data, size); + return true; +} + +bool write_physical_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu){ + kvm_arch_get_registers(cpu); + cpu_physical_memory_write(address, data, size); + return true; +} + +static void refresh_kvm(CPUState *cpu){ + //int ret = 0; + if (!cpu->vcpu_dirty) { + //kvm_arch_get_registers_fast(cpu); + kvm_arch_get_registers(cpu); + + //cpu->vcpu_dirty = true; + } +} + +static void refresh_kvm_non_dirty(CPUState *cpu){ + if (!cpu->vcpu_dirty) { + kvm_arch_get_registers_fast(cpu); + //kvm_arch_get_registers(cpu); + } +} + +//uint8_t* buffer = NULL; +/* +void set_illegal_payload(void){ + printf("%s\n", __func__); + if(buffer){ + memset(buffer, 0xff, 4); + } + else{ + abort(); + } +} +*/ + +bool remap_payload_slot(uint64_t phys_addr, uint32_t slot, CPUState *cpu){ + //assert(0); /* nested code -> test me later */ + + assert(GET_GLOBAL_STATE()->shared_payload_buffer_fd && GET_GLOBAL_STATE()->shared_payload_buffer_size); + RAMBlock *block; + refresh_kvm_non_dirty(cpu); + + uint32_t i = slot; + + phys_addr = address_to_ram_offset(phys_addr); + + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + if(!memcmp(block->idstr, "pc.ram", 6)){ + /* TODO: put assert calls here */ + munmap((void*)(((uint64_t)block->host) + phys_addr), x86_64_PAGE_SIZE); + mmap((void*)(((uint64_t)block->host) + phys_addr), 0x1000, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, GET_GLOBAL_STATE()->shared_payload_buffer_fd, (i*x86_64_PAGE_SIZE)); + + //printf("MMUNMAP: %d\n", munmap((void*)(((uint64_t)block->host) + phys_addr), x86_64_PAGE_SIZE)); + //printf("MMAP: %p\n", mmap((void*)(((uint64_t)block->host) + phys_addr), 0x1000, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, GET_GLOBAL_STATE()->shared_payload_buffer_fd, (i*x86_64_PAGE_SIZE))); + + fast_reload_blacklist_page(get_fast_reload_snapshot(), phys_addr); + break; + } + } + + return true; +} + +bool remap_slot(uint64_t addr, uint32_t slot, CPUState *cpu, int fd, uint64_t shm_size, bool virtual, uint64_t cr3){ + + assert(fd && shm_size); + assert((slot*x86_64_PAGE_SIZE) < shm_size); + + RAMBlock *block; + refresh_kvm_non_dirty(cpu); + + uint32_t i = slot; + + uint64_t phys_addr = addr; + if(virtual){ + phys_addr = get_paging_phys_addr(cpu, cr3, (addr & x86_64_PAGE_MASK)); + + phys_addr = address_to_ram_offset(phys_addr); + } + + debug_fprintf(stderr, "%s: addr => %lx phys_addr => %lx\n", __func__, addr, phys_addr); + + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + if(!memcmp(block->idstr, "pc.ram", 6)){ + /* TODO: put assert calls here */ + munmap((void*)(((uint64_t)block->host) + phys_addr), x86_64_PAGE_SIZE); + mmap((void*)(((uint64_t)block->host) + phys_addr), 0x1000, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, (i*x86_64_PAGE_SIZE)); + + //printf("MMUNMAP: %d\n", munmap((void*)(((uint64_t)block->host) + phys_addr), x86_64_PAGE_SIZE)); + //printf("MMAP: %p\n", mmap((void*)(((uint64_t)block->host) + phys_addr), 0x1000, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, (i*x86_64_PAGE_SIZE))); + + fast_reload_blacklist_page(get_fast_reload_snapshot(), phys_addr); + break; + } + } + + return true; +} + + + +bool remap_payload_slot_protected(uint64_t phys_addr, uint32_t slot, CPUState *cpu){ + //assert(0); /* nested code -> test me later */ + + assert(GET_GLOBAL_STATE()->shared_payload_buffer_fd && GET_GLOBAL_STATE()->shared_payload_buffer_size); + RAMBlock *block; + refresh_kvm_non_dirty(cpu); + + uint32_t i = slot; + + phys_addr = address_to_ram_offset(phys_addr); + + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + if(!memcmp(block->idstr, "pc.ram", 6)){ + + /* TODO: put assert calls here */ + munmap((void*)(((uint64_t)block->host) + phys_addr), x86_64_PAGE_SIZE); + mmap((void*)(((uint64_t)block->host) + phys_addr), 0x1000, PROT_READ , MAP_SHARED | MAP_FIXED, GET_GLOBAL_STATE()->shared_payload_buffer_fd, (i*x86_64_PAGE_SIZE)); + + //printf("MMUNMAP: %d\n", munmap((void*)(((uint64_t)block->host) + phys_addr), x86_64_PAGE_SIZE)); + //printf("MMAP: %p\n", mmap((void*)(((uint64_t)block->host) + phys_addr), 0x1000, PROT_READ , MAP_SHARED | MAP_FIXED, GET_GLOBAL_STATE()->shared_payload_buffer_fd, (i*x86_64_PAGE_SIZE))); + + fast_reload_blacklist_page(get_fast_reload_snapshot(), phys_addr); + break; + } + } + + return true; +} + +bool remap_payload_buffer(uint64_t virt_guest_addr, CPUState *cpu){ + assert(GET_GLOBAL_STATE()->shared_payload_buffer_fd && GET_GLOBAL_STATE()->shared_payload_buffer_size); + RAMBlock *block; + refresh_kvm_non_dirty(cpu); + + + for(uint32_t i = 0; i < (GET_GLOBAL_STATE()->shared_payload_buffer_size/x86_64_PAGE_SIZE); i++){ + //MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; + //hwaddr phys_addr = cpu_get_phys_page_attrs_debug(cpu, ((virt_guest_addr+(i*x86_64_PAGE_SIZE)) & x86_64_PAGE_MASK), &attrs); + uint64_t phys_addr = get_paging_phys_addr(cpu, GET_GLOBAL_STATE()->parent_cr3, ((virt_guest_addr+(i*x86_64_PAGE_SIZE)) & x86_64_PAGE_MASK)); + + assert(phys_addr != 0xFFFFFFFFFFFFFFFFULL); + + phys_addr = address_to_ram_offset(phys_addr); + + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + if(!memcmp(block->idstr, "pc.ram", 6)){ + //printf("MMUNMAP: %d\n", munmap((void*)(((uint64_t)block->host) + phys_addr), x86_64_PAGE_SIZE)); + if(munmap((void*)(((uint64_t)block->host) + phys_addr), x86_64_PAGE_SIZE) == -1){ + fprintf(stderr, "munmap failed!\n"); + //exit(1); + assert(false); + } + //printf("MMAP: %lx\n", mmap((void*)(((uint64_t)block->host) + phys_addr), 0x1000, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, shared_payload_buffer_fd, (i*x86_64_PAGE_SIZE))); + + if(mmap((void*)(((uint64_t)block->host) + phys_addr), 0x1000, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, GET_GLOBAL_STATE()->shared_payload_buffer_fd, (i*x86_64_PAGE_SIZE)) == MAP_FAILED){ + fprintf(stderr, "mmap failed!\n"); + //exit(1); + assert(false); + } + + memset((block->host) + phys_addr, 0xab, 0x1000); + + fast_reload_blacklist_page(get_fast_reload_snapshot(), phys_addr); + break; + } + } + } + return true; +} + +/* +bool set_guest_pages_readonly(uint64_t virt_guest_addr, uint64_t to, CPUState *cpu){ + RAMBlock *block; + refresh_kvm_non_dirty(cpu); + + void* cp = malloc(0x1000); + + + for(uint32_t i = 0; i < ((to-virt_guest_addr)/x86_64_PAGE_SIZE); i++){ + printf("%s -> %lx %lx\n", __func__, virt_guest_addr, virt_guest_addr+(i*0x1000)); + MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; + //hwaddr phys_addr = cpu_get_phys_page_attrs_debug(cpu, ((virt_guest_addr+(i*x86_64_PAGE_SIZE)) & x86_64_PAGE_MASK), &attrs); + uint64_t phys_addr = get_48_paging_phys_addr(GET_GLOBAL_STATE()->parent_cr3, ((virt_guest_addr+(i*x86_64_PAGE_SIZE)) & x86_64_PAGE_MASK)); + + assert(phys_addr != 0xFFFFFFFFFFFFFFFFULL); + + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + if(!memcmp(block->idstr, "pc.ram", 6)){ + + if(mprotect((void*)(((uint64_t)block->host) + phys_addr), 0x1000, PROT_READ)){ + fprintf(stderr, "mprotect failed!\n"); + //exit(1); + assert(false); + } +*/ + /* + + //printf("MMUNMAP: %d\n", munmap((void*)(((uint64_t)block->host) + phys_addr), x86_64_PAGE_SIZE)); + memcpy(cp, (void*)(((uint64_t)block->host) + phys_addr), 0x1000); + if(munmap((void*)(((uint64_t)block->host) + phys_addr), x86_64_PAGE_SIZE) == -1){ + fprintf(stderr, "munmap failed!\n"); + //exit(1); + assert(false); + } + //printf("MMAP: %lx\n", mmap((void*)(((uint64_t)block->host) + phys_addr), 0x1000, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, shared_payload_buffer_fd, (i*x86_64_PAGE_SIZE))); + + if(mmap((void*)(((uint64_t)block->host) + phys_addr), 0x1000, PROT_READ , MAP_ANONYMOUS | MAP_FIXED, 0, 0) == MAP_FAILED){ + fprintf(stderr, "mmap failed!\n"); + //exit(1); + assert(false); + } + memcpy((void*)(((uint64_t)block->host) + phys_addr), cp, 0x1000); + + + if(i == 0){ + buffer = (uint8_t*)(((uint64_t)block->host) + phys_addr); + } + //fast_reload_blacklist_page(get_fast_reload_snapshot(), phys_addr); + break; + */ + /* + break; + } + } + } + free(cp); + return true; +} +*/ + +/* +bool read_virtual_memory_cr3(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu, uint64_t cr3){ + fprintf(stderr, "%s -> %lx\n", __func__, address); + CPUX86State *env = &(X86_CPU(cpu))->env; + uint64_t old_cr3 = 0; + bool return_value = false; + + + uint64_t old_cr4 = 0; + uint64_t old_hflags = 0; + + refresh_kvm(cpu); + + //refresh_kvm(cpu); + //old_cr3 = env->cr[3]; + //env->cr[3] = cr3; + //return_value = read_virtual_memory(address, data, size, cpu); + //env->cr[3] = old_cr3; + + + + old_cr3 = env->cr[3]; + env->cr[3] = cr3; + + old_cr4 = env->cr[4]; + env->cr[4] = CR4_PAE_MASK | old_cr4; + + old_hflags = env->hflags; + env->hflags = HF_LMA_MASK | old_hflags; + + return_value = read_virtual_memory(address, data, size, cpu); + env->cr[3] = old_cr3; + env->cr[4] = old_cr4; + env->hflags = old_hflags; + + return return_value; +} +*/ + +bool write_virtual_memory_cr3(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu, uint64_t cr3){ + CPUX86State *env = &(X86_CPU(cpu))->env; + uint64_t old_cr3 = 0; + bool return_value = false; + + + uint64_t old_cr4 = 0; + uint64_t old_hflags = 0; + + refresh_kvm(cpu); + + old_cr3 = env->cr[3]; + env->cr[3] = cr3; + + old_cr4 = env->cr[4]; + env->cr[4] = CR4_PAE_MASK | old_cr4; + + old_hflags = env->hflags; + env->hflags = HF_LMA_MASK | old_hflags; + return_value = write_virtual_memory(address, data, size, cpu); + env->cr[3] = old_cr3; + env->cr[4] = old_cr4; + env->hflags = old_hflags; + + + return return_value; +} + +bool write_virtual_shadow_memory_cr3(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu, uint64_t cr3){ + debug_fprintf(stderr, "%s\n", __func__); + CPUX86State *env = &(X86_CPU(cpu))->env; + uint64_t old_cr3 = 0; + bool return_value = false; + uint64_t old_cr4 = 0; + uint64_t old_hflags = 0; + + refresh_kvm(cpu); + old_cr3 = env->cr[3]; + env->cr[3] = cr3; + + old_cr4 = env->cr[4]; + env->cr[4] = CR4_PAE_MASK | old_cr4; + + old_hflags = env->hflags; + env->hflags = HF_LMA_MASK | old_hflags; + return_value = write_virtual_shadow_memory(address, data, size, cpu); + env->cr[3] = old_cr3; + env->cr[4] = old_cr4; + env->hflags = old_hflags; + + return return_value; +} + +/* +bool read_virtual_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu){ + uint8_t tmp_buf[x86_64_PAGE_SIZE]; + MemTxAttrs attrs; + hwaddr phys_addr; + int asidx; + + uint64_t amount_copied = 0; + + refresh_kvm(cpu); + + // copy per page + while(amount_copied < size){ + uint64_t len_to_copy = (size - amount_copied); + if(len_to_copy > x86_64_PAGE_SIZE) + len_to_copy = x86_64_PAGE_SIZE; + + asidx = cpu_asidx_from_attrs(cpu, MEMTXATTRS_UNSPECIFIED); + attrs = MEMTXATTRS_UNSPECIFIED; + phys_addr = cpu_get_phys_page_attrs_debug(cpu, (address & x86_64_PAGE_MASK), &attrs); + + if (phys_addr == -1){ + uint64_t next_page = (address & x86_64_PAGE_MASK) + x86_64_PAGE_SIZE; + uint64_t len_skipped =next_page-address; + if(len_skipped > size-amount_copied){ + len_skipped = size-amount_copied; + } + + fprintf(stderr, "Warning, read from unmapped memory:\t%lx, skipping to %lx", address, next_page); + QEMU_PT_PRINTF(MEM_PREFIX, "Warning, read from unmapped memory:\t%lx, skipping to %lx", address, next_page); + memset( data+amount_copied, ' ', len_skipped); + address += len_skipped; + amount_copied += len_skipped; + continue; + } + + phys_addr += (address & ~x86_64_PAGE_MASK); + uint64_t remaining_on_page = x86_64_PAGE_SIZE - (address & ~x86_64_PAGE_MASK); + if(len_to_copy > remaining_on_page){ + len_to_copy = remaining_on_page; + } + + MemTxResult txt = address_space_rw(cpu_get_address_space(cpu, asidx), phys_addr, MEMTXATTRS_UNSPECIFIED, tmp_buf, len_to_copy, 0); + if(txt){ + QEMU_PT_PRINTF(MEM_PREFIX, "Warning, read failed:\t%lx", address); + } + + memcpy(data+amount_copied, tmp_buf, len_to_copy); + + address += len_to_copy; + amount_copied += len_to_copy; + } + + return true; +} +*/ + +/* +bool is_addr_mapped2(uint64_t address, CPUState *cpu){ + MemTxAttrs attrs; + hwaddr phys_addr; + refresh_kvm(cpu); + attrs = MEMTXATTRS_UNSPECIFIED; + phys_addr = cpu_get_phys_page_attrs_debug(cpu, (address & x86_64_PAGE_MASK), &attrs); + return phys_addr != -1; +} + + +bool is_addr_mapped(uint64_t address, CPUState *cpu){ + //fprintf(stderr, "%s -> %lx\n", __func__, address); + + CPUX86State *env = &(X86_CPU(cpu))->env; + + return is_addr_mapped_ht(address, cpu, env->cr[3], true); + + + + uint64_t old_cr4 = 0; + uint64_t old_hflags = 0; + bool return_value = false; + + refresh_kvm(cpu); + + old_cr4 = env->cr[4]; + env->cr[4] = CR4_PAE_MASK | old_cr4; + + old_hflags = env->hflags; + env->hflags = HF_LMA_MASK | old_hflags; + + return_value = is_addr_mapped2(address, cpu); + env->cr[4] = old_cr4; + env->hflags = old_hflags; + + assert(return_value == is_addr_mapped_ht(address, cpu, env->cr[3], true)); + + return return_value; +} + +bool is_addr_mapped_cr3(uint64_t address, CPUState *cpu, uint64_t cr3){ + return is_addr_mapped_ht(address, cpu, cr3, true); + fprintf(stderr, "%s -> %lx\n", __func__, address); + + CPUX86State *env = &(X86_CPU(cpu))->env; + uint64_t old_cr3 = 0; + uint64_t old_cr4 = 0; + uint64_t old_hflags = 0; + bool return_value = false; + bool return_value2 = false; + + fprintf(stderr, "%s: TRY TO REFRESH KVM\n", __func__); + refresh_kvm(cpu); + fprintf(stderr, "%s: TRY TO REFRESH KVM DONE\n", __func__); + + old_cr3 = env->cr[3]; + env->cr[3] = cr3; + + old_cr4 = env->cr[4]; + env->cr[4] = CR4_PAE_MASK | old_cr4; + + old_hflags = env->hflags; + env->hflags = HF_LMA_MASK | old_hflags; + + fprintf(stderr, "%s: TRY TO CALL is_addr_mapped2\n", __func__); + + return_value = is_addr_mapped2(address, cpu); + + fprintf(stderr, "%s: TRY TO CALL is_addr_mapped2 DONE\n", __func__); + + env->cr[3] = old_cr3; + env->cr[4] = old_cr4; + env->hflags = old_hflags; + + return_value2 = is_addr_mapped_ht(address, cpu, cr3, true); + + printf("%s: %d %d\n", __func__, return_value, return_value2); + assert(return_value == return_value2); + + return return_value; +} +*/ + +bool write_virtual_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu) +{ + /* Todo: later &address_space_memory + phys_addr -> mmap SHARED */ + int asidx; + MemTxAttrs attrs; + hwaddr phys_addr; + MemTxResult res; + + uint64_t counter, l, i; + + counter = size; + while(counter != 0){ + l = x86_64_PAGE_SIZE; + if (l > counter) + l = counter; + + refresh_kvm(cpu); + //cpu_synchronize_state(cpu); + asidx = cpu_asidx_from_attrs(cpu, MEMTXATTRS_UNSPECIFIED); + attrs = MEMTXATTRS_UNSPECIFIED; + phys_addr = cpu_get_phys_page_attrs_debug(cpu, (address & x86_64_PAGE_MASK), &attrs); + + if (phys_addr == -1){ + QEMU_PT_PRINTF(MEM_PREFIX, "phys_addr == -1:\t%lx", address); + return false; + } + + phys_addr += (address & ~x86_64_PAGE_MASK); + res = address_space_rw(cpu_get_address_space(cpu, asidx), phys_addr, MEMTXATTRS_UNSPECIFIED, data, l, true); + if (res != MEMTX_OK){ + QEMU_PT_PRINTF(MEM_PREFIX, "!MEMTX_OK:\t%lx", address); + return false; + } + + i++; + data += l; + address += l; + counter -= l; + } + + return true; +} + + +void hexdump_virtual_memory(uint64_t address, uint32_t size, CPUState *cpu){ + assert(size < 0x100000); // 1MB max + uint64_t i = 0; + uint8_t tmp[17]; + uint8_t* data = malloc(size); + bool success = read_virtual_memory(address, data, size, cpu); + + if(success){ + for (i = 0; i < size; i++){ + if(!(i % 16)){ + if (i != 0){ + printf (" %s\n", tmp); + } + printf (" %04lx ", i); + } + printf (" %02x", data[i]); + + if ((data[i] < 0x20) || (data[i] > 0x7e)) + tmp[i % 16] = '.'; + else + tmp[i % 16] = data[i]; + tmp[(i % 16) + 1] = '\0'; + } + + while ((i % 16) != 0) { + printf (" "); + i++; + } + printf (" %s\n", tmp); + } + + free(data); +} + + +bool write_virtual_shadow_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu) +{ + debug_fprintf(stderr, "%s\n", __func__); + /* Todo: later &address_space_memory + phys_addr -> mmap SHARED */ + int asidx; + MemTxAttrs attrs; + hwaddr phys_addr; + MemTxResult res; + + uint64_t counter, l, i; + + void* shadow_memory = NULL; + + counter = size; + while(counter != 0){ + l = x86_64_PAGE_SIZE; + if (l > counter) + l = counter; + + refresh_kvm(cpu); + kvm_cpu_synchronize_state(cpu); + asidx = cpu_asidx_from_attrs(cpu, MEMTXATTRS_UNSPECIFIED); + attrs = MEMTXATTRS_UNSPECIFIED; + phys_addr = cpu_get_phys_page_attrs_debug(cpu, (address & x86_64_PAGE_MASK), &attrs); + + if (phys_addr == -1){ + QEMU_PT_PRINTF(MEM_PREFIX, "phys_addr == -1:\t%lx", address); + return false; + } + + res = address_space_rw(cpu_get_address_space(cpu, asidx), (phys_addr + (address & ~x86_64_PAGE_MASK)), MEMTXATTRS_UNSPECIFIED, data, l, true); + if (res != MEMTX_OK){ + QEMU_PT_PRINTF(MEM_PREFIX, "!MEMTX_OK:\t%lx", address); + return false; + } + + shadow_memory = fast_reload_get_physmem_shadow_ptr(get_fast_reload_snapshot(), phys_addr); + if (shadow_memory){ + memcpy(shadow_memory + (address & ~x86_64_PAGE_MASK), data, l); + } + else{ + QEMU_PT_PRINTF(MEM_PREFIX, "get_physmem_shadow_ptr(%lx) == NULL", phys_addr); + assert(false); + return false; + } + + phys_addr += (address & ~x86_64_PAGE_MASK); + + + i++; + data += l; + address += l; + counter -= l; + } + + return true; +} + +static int redqueen_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) +{ + static const uint8_t int3 = 0xcc; + + hwaddr phys_addr = (hwaddr) get_paging_phys_addr(cs, GET_GLOBAL_STATE()->parent_cr3, bp->pc); + int asidx = cpu_asidx_from_attrs(cs, MEMTXATTRS_UNSPECIFIED); + + if (address_space_rw(cpu_get_address_space(cs, asidx), phys_addr, MEMTXATTRS_UNSPECIFIED, (uint8_t *)&bp->saved_insn, 1, 0) || + address_space_rw(cpu_get_address_space(cs, asidx), phys_addr, MEMTXATTRS_UNSPECIFIED, (uint8_t *)&int3, 1, 1)) { + //fprintf(stderr, "%s WRITTE AT %lx %lx failed!\n", __func__, bp->pc, phys_addr); + return -EINVAL; + } + + return 0; +} + +static int redqueen_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) +{ + uint8_t int3; + + hwaddr phys_addr = (hwaddr) get_paging_phys_addr(cs, GET_GLOBAL_STATE()->parent_cr3, bp->pc); + int asidx = cpu_asidx_from_attrs(cs, MEMTXATTRS_UNSPECIFIED); + + if (address_space_rw(cpu_get_address_space(cs, asidx), phys_addr, MEMTXATTRS_UNSPECIFIED, (uint8_t *)&int3, 1, 0) || int3 != 0xcc || + address_space_rw(cpu_get_address_space(cs, asidx), phys_addr, MEMTXATTRS_UNSPECIFIED, (uint8_t *)&bp->saved_insn, 1, 1)) { + //fprintf(stderr, "%s failed\n", __func__); + return -EINVAL; + } + + return 0; +} + +static struct kvm_sw_breakpoint *redqueen_find_breakpoint(CPUState *cpu, target_ulong pc){ + struct kvm_sw_breakpoint *bp; + + QTAILQ_FOREACH(bp, &GET_GLOBAL_STATE()->redqueen_breakpoints, entry) { + if (bp->pc == pc) { + return bp; + } + } + return NULL; +} + +static int redqueen_breakpoints_active(CPUState *cpu){ + return !QTAILQ_EMPTY(&GET_GLOBAL_STATE()->redqueen_breakpoints); +} + +struct kvm_set_guest_debug_data { + struct kvm_guest_debug dbg; + int err; +}; + +static int redqueen_update_guest_debug(CPUState *cpu) { + struct kvm_set_guest_debug_data data; + + data.dbg.control = 0; + + if (redqueen_breakpoints_active(cpu)) { + data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; + } + + return kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG, &data.dbg); + + return 0; +} + +static void redqueen_remove_all_breakpoints(CPUState *cpu) { + struct kvm_sw_breakpoint *bp, *next; + + QTAILQ_FOREACH_SAFE(bp, &GET_GLOBAL_STATE()->redqueen_breakpoints, entry, next) { + redqueen_remove_sw_breakpoint(cpu, bp); + QTAILQ_REMOVE(&GET_GLOBAL_STATE()->redqueen_breakpoints, bp, entry); + g_free(bp); + } + + redqueen_update_guest_debug(cpu); +} + +static int redqueen_insert_breakpoint(CPUState *cpu, target_ulong addr, target_ulong len){ + struct kvm_sw_breakpoint *bp; + int err; + + bp = redqueen_find_breakpoint(cpu, addr); + if (bp) { + bp->use_count++; + return 0; + } + + bp = g_malloc(sizeof(struct kvm_sw_breakpoint)); + bp->pc = addr; + bp->use_count = 1; + + err = redqueen_insert_sw_breakpoint(cpu, bp); + if (err) { + g_free(bp); + return err; + } + + QTAILQ_INSERT_HEAD(&GET_GLOBAL_STATE()->redqueen_breakpoints, bp, entry); + + err = redqueen_update_guest_debug(cpu); + if(err){ + return err; + } + + return 0; +} + +static int redqueen_remove_breakpoint(CPUState *cpu, target_ulong addr, target_ulong len){ + struct kvm_sw_breakpoint *bp; + int err; + + bp = redqueen_find_breakpoint(cpu, addr); + if (!bp) { + return -ENOENT; + } + + if (bp->use_count > 1) { + bp->use_count--; + return 0; + } + + err = redqueen_remove_sw_breakpoint(cpu, bp); + if (err) { + return err; + } + + QTAILQ_REMOVE(&GET_GLOBAL_STATE()->redqueen_breakpoints, bp, entry); + g_free(bp); + + err = redqueen_update_guest_debug(cpu); + if(err){ + return err; + } + + return 0; +} + +int insert_breakpoint(CPUState *cpu, uint64_t addr, uint64_t len){ + redqueen_insert_breakpoint(cpu, addr, len); + redqueen_update_guest_debug(cpu); + return 0; +} + + +int remove_breakpoint(CPUState *cpu, uint64_t addr, uint64_t len){ + //fprintf(stderr, "%s %lx\n", __func__, addr); + redqueen_remove_breakpoint(cpu, addr, len); + redqueen_update_guest_debug(cpu); + return 0; +} + +void remove_all_breakpoints(CPUState *cpu){ + redqueen_remove_all_breakpoints(cpu); +} + + + + + + + + + + + + + +#define PPAGE_SIZE 0x1000 +#define PENTRIES 0x200 +#define PLEVEL_4_SHIFT 12 +#define PLEVEL_3_SHIFT 21 +#define PLEVEL_2_SHIFT 30 +#define PLEVEL_1_SHIFT 39 +#define SIGN_EXTEND_TRESHOLD 0x100 +#define SIGN_EXTEND 0xFFFF000000000000ULL +#define PAGETABLE_MASK 0x1FFFFFFFFF000ULL +#define PML4_ENTRY_MASK 0x1FFFFFFFFF000ULL +#define PML3_ENTRY_MASK 0x1FFFFC0000000ULL +#define PML2_ENTRY_MASK 0x1FFFFFFE00000ULL + +#define CHECK_BIT(var,pos) !!(((var) & (1ULL<<(pos)))) + + +static void write_address(uint64_t address, uint64_t size, uint64_t prot){ + //fprintf(stderr, "%s %lx\n", __func__, address); + static uint64_t next_address = PAGETABLE_MASK; + static uint64_t last_address = 0x0; + static uint64_t last_prot = 0; + if(address != next_address || prot != last_prot){ + /* do not print guard pages or empty pages without any permissions */ + if(last_address && (CHECK_BIT(last_prot, 1) || !CHECK_BIT(last_prot, 63))){ + if(CHECK_BIT(last_prot, 1) && !CHECK_BIT(last_prot, 63)){ + fprintf(stderr, "%016lx - %016lx %c%c%c [WARNING]\n", + last_address, next_address, + CHECK_BIT(last_prot, 1) ? 'W' : '-', + CHECK_BIT(last_prot, 2) ? 'U' : 'K', + !CHECK_BIT(last_prot, 63)? 'X' : '-'); + } + else{ + fprintf(stderr, "%016lx - %016lx %c%c%c\n", + last_address, next_address, + CHECK_BIT(last_prot, 1) ? 'W' : '-', + CHECK_BIT(last_prot, 2) ? 'U' : 'K', + !CHECK_BIT(last_prot, 63)? 'X' : '-'); + } + } + last_address = address; + } + next_address = address+size; + last_prot = prot; + +} + +void print_48_paging2(uint64_t cr3){ + uint64_t paging_entries_level_1[PENTRIES]; + uint64_t paging_entries_level_2[PENTRIES]; + uint64_t paging_entries_level_3[PENTRIES]; + uint64_t paging_entries_level_4[PENTRIES]; + + uint64_t address_identifier_1, address_identifier_2, address_identifier_3, address_identifier_4; + uint32_t i1, i2, i3,i4; + + cpu_physical_memory_rw((cr3&PAGETABLE_MASK), (uint8_t *) paging_entries_level_1, PPAGE_SIZE, false); + for(i1 = 0; i1 < 512; i1++){ + if(paging_entries_level_1[i1]){ + address_identifier_1 = ((uint64_t)i1) << PLEVEL_1_SHIFT; + if (i1 & SIGN_EXTEND_TRESHOLD){ + address_identifier_1 |= SIGN_EXTEND; + } + if(CHECK_BIT(paging_entries_level_1[i1], 0)){ /* otherwise swapped out */ + cpu_physical_memory_rw((paging_entries_level_1[i1]&PAGETABLE_MASK), (uint8_t *) paging_entries_level_2, PPAGE_SIZE, false); + for(i2 = 0; i2 < PENTRIES; i2++){ + if(paging_entries_level_2[i2]){ + address_identifier_2 = (((uint64_t)i2) << PLEVEL_2_SHIFT) + address_identifier_1; + if (CHECK_BIT(paging_entries_level_2[i2], 0)){ /* otherwise swapped out */ + if((paging_entries_level_2[i2]&PAGETABLE_MASK) == (paging_entries_level_1[i1]&PAGETABLE_MASK)){ + /* loop */ + continue; + } + + if (CHECK_BIT(paging_entries_level_2[i2], 7)){ + write_address(address_identifier_2, 0x40000000, (uint64_t)paging_entries_level_2[i2] & ((1ULL<<63) | (1ULL<<2) | (1ULL<<1))); + } + else{ + /* otherwise this PDPE references a 1GB page */ + cpu_physical_memory_rw((paging_entries_level_2[i2]&PAGETABLE_MASK), (uint8_t *) paging_entries_level_3, PPAGE_SIZE, false); + for(i3 = 0; i3 < PENTRIES; i3++){ + if(paging_entries_level_3[i3]){ + address_identifier_3 = (((uint64_t)i3) << PLEVEL_3_SHIFT) + address_identifier_2; + if (CHECK_BIT(paging_entries_level_3[i3], 0)){ /* otherwise swapped out */ + if (CHECK_BIT(paging_entries_level_3[i3], 7)){ + write_address(address_identifier_3, 0x200000, (uint64_t)paging_entries_level_3[i3] & ((1ULL<<63) | (1ULL<<2) | (1ULL<<1))); + } + else{ + cpu_physical_memory_rw((paging_entries_level_3[i3]&PAGETABLE_MASK), (uint8_t *) paging_entries_level_4, PPAGE_SIZE, false); + for(i4 = 0; i4 < PENTRIES; i4++){ + if(paging_entries_level_4[i4]){ + address_identifier_4 = (((uint64_t)i4) << PLEVEL_4_SHIFT) + address_identifier_3; + if (CHECK_BIT(paging_entries_level_4[i4], 0)){ + write_address(address_identifier_4, 0x1000, (uint64_t)paging_entries_level_4[i4] & ((1ULL<<63) | (1ULL<<2) | (1ULL<<1))); + } + } + } + } + } + } + } + + } + } + } + } + } + } + } + write_address(0, 0x1000, 0); +} + + + + +/* FIX ME */ +static uint64_t get_48_paging_phys_addr(uint64_t cr3, uint64_t addr){ + static int once = 0; + if(once){ + print_48_paging2(cr3); + once = 0; + } + + //if(addr == 0x7ffff7f4e000){ + //fprintf(stderr, "GDB ME NOW\n"); + //while(true){} + // print_48_paging2(cr3); + //} + + //fprintf(stderr, "CALLING: %s (%lx) %lx\n", __func__, cr3, addr); + + /* signedness broken af -> fix me! */ + uint16_t pml_4_index = (addr & 0xFF8000000000ULL) >> 39; + uint16_t pml_3_index = (addr & 0x0007FC0000000UL) >> 30; + uint16_t pml_2_index = (addr & 0x000003FE00000UL) >> 21; + uint16_t pml_1_index = (addr & 0x00000001FF000UL) >> 12; + + //if(addr == 0x7ffff7f4e000){ + // printf("pml_4_index: %lx\n", pml_4_index); + // printf("pml_3_index: %lx\n", pml_3_index); + // printf("pml_2_index: %lx\n", pml_2_index); + // printf("pml_1_index: %lx\n", pml_1_index); + // + //} + + uint64_t address_identifier_4; + uint64_t paging_entries_buffer[PENTRIES]; + + cpu_physical_memory_rw((cr3&PAGETABLE_MASK), (uint8_t *) paging_entries_buffer, PPAGE_SIZE, false); + if(paging_entries_buffer[pml_4_index]){ + address_identifier_4 = ((uint64_t)pml_4_index) << PLEVEL_1_SHIFT; + if (pml_4_index & SIGN_EXTEND_TRESHOLD){ + address_identifier_4 |= SIGN_EXTEND; + } + if(CHECK_BIT(paging_entries_buffer[pml_4_index], 0)){ /* otherwise swapped out */ + cpu_physical_memory_rw((paging_entries_buffer[pml_4_index]&PAGETABLE_MASK), (uint8_t *) paging_entries_buffer, PPAGE_SIZE, false); + if(paging_entries_buffer[pml_3_index]){ + + //address_identifier_3 = (((uint64_t)pml_3_index) << PLEVEL_2_SHIFT) + address_identifier_4; + if (CHECK_BIT(paging_entries_buffer[pml_3_index], 0)){ /* otherwise swapped out */ + + if (CHECK_BIT(paging_entries_buffer[pml_3_index], 7)){ + /* 1GB PAGE */ + return (paging_entries_buffer[pml_3_index] & PML3_ENTRY_MASK) | (0x7FFFFFFF & addr); + } + else{ + cpu_physical_memory_rw((paging_entries_buffer[pml_3_index]&PAGETABLE_MASK), (uint8_t *) paging_entries_buffer, PPAGE_SIZE, false); + if(paging_entries_buffer[pml_2_index]){ + //address_identifier_2 = (((uint64_t)pml_2_index) << PLEVEL_3_SHIFT) + address_identifier_3; + if (CHECK_BIT(paging_entries_buffer[pml_2_index], 0)){ /* otherwise swapped out */ + if (CHECK_BIT(paging_entries_buffer[pml_2_index], 7)){ + /* 2MB PAGE */ + return (paging_entries_buffer[pml_2_index] & PML2_ENTRY_MASK) | (0x3FFFFF & addr); + } + else{ + cpu_physical_memory_rw((paging_entries_buffer[pml_2_index]&PAGETABLE_MASK), (uint8_t *) paging_entries_buffer, PPAGE_SIZE, false); + if(paging_entries_buffer[pml_1_index]){ + //uint64_t address_identifier_1 = (((uint64_t)pml_1_index) << PLEVEL_4_SHIFT) + address_identifier_2; + if (CHECK_BIT(paging_entries_buffer[pml_1_index], 0)){ + /* 4 KB PAGE */ + return (paging_entries_buffer[pml_1_index] & PML4_ENTRY_MASK) | (0xFFF & addr); + } + } + } + } + } + } + } + } + } + } + + //fprintf(stderr, "FAILED: %s %lx\n", __func__, addr); + //qemu_backtrace(); + //print_48_paging2(cr3); + return 0xFFFFFFFFFFFFFFFFULL; /* invalid */ +} + +/* FIX ME */ +static uint64_t get_48_paging_phys_addr_snapshot(uint64_t cr3, uint64_t addr){ + //if(addr == 0x7ffff7f4e000){ + //fprintf(stderr, "GDB ME NOW\n"); + //while(true){} + // print_48_paging2(cr3); + //} + + //fprintf(stderr, "CALLING: %s (%lx) %lx\n", __func__, cr3, addr); + + /* signedness broken af -> fix me! */ + uint16_t pml_4_index = (addr & 0xFF8000000000ULL) >> 39; + uint16_t pml_3_index = (addr & 0x0007FC0000000UL) >> 30; + uint16_t pml_2_index = (addr & 0x000003FE00000UL) >> 21; + uint16_t pml_1_index = (addr & 0x00000001FF000UL) >> 12; + + //if(addr == 0x7ffff7f4e000){ + // printf("pml_4_index: %lx\n", pml_4_index); + // printf("pml_3_index: %lx\n", pml_3_index); + // printf("pml_2_index: %lx\n", pml_2_index); + // printf("pml_1_index: %lx\n", pml_1_index); + // + //} + + /* + printf("pml_4_index: %lx\n", pml_4_index); + printf("pml_3_index: %lx\n", pml_3_index); + printf("pml_2_index: %lx\n", pml_2_index); + printf("pml_1_index: %lx\n", pml_1_index); + */ + + fast_reload_t* snapshot = get_fast_reload_snapshot(); + + uint64_t address_identifier_4; + uint64_t paging_entries_buffer[PENTRIES]; + + read_snapshot_memory(snapshot, (cr3&PAGETABLE_MASK), (uint8_t *) paging_entries_buffer, PPAGE_SIZE); + //cpu_physical_memory_rw((cr3&PAGETABLE_MASK), (uint8_t *) paging_entries_buffer, PPAGE_SIZE, false); + if(paging_entries_buffer[pml_4_index]){ + address_identifier_4 = ((uint64_t)pml_4_index) << PLEVEL_1_SHIFT; + if (pml_4_index & SIGN_EXTEND_TRESHOLD){ + address_identifier_4 |= SIGN_EXTEND; + } + if(CHECK_BIT(paging_entries_buffer[pml_4_index], 0)){ /* otherwise swapped out */ + read_snapshot_memory(snapshot, (paging_entries_buffer[pml_4_index]&PAGETABLE_MASK), (uint8_t *) paging_entries_buffer, PPAGE_SIZE); + //cpu_physical_memory_rw((paging_entries_buffer[pml_4_index]&PAGETABLE_MASK), (uint8_t *) paging_entries_buffer, PPAGE_SIZE, false); + if(paging_entries_buffer[pml_3_index]){ + + //address_identifier_3 = (((uint64_t)pml_3_index) << PLEVEL_2_SHIFT) + address_identifier_4; + if (CHECK_BIT(paging_entries_buffer[pml_3_index], 0)){ /* otherwise swapped out */ + + if (CHECK_BIT(paging_entries_buffer[pml_3_index], 7)){ + /* 1GB PAGE */ + return (paging_entries_buffer[pml_3_index] & PML3_ENTRY_MASK) | (0x7FFFFFFF & addr); + } + else{ + read_snapshot_memory(snapshot, (paging_entries_buffer[pml_3_index]&PAGETABLE_MASK), (uint8_t *) paging_entries_buffer, PPAGE_SIZE); + //cpu_physical_memory_rw((paging_entries_buffer[pml_3_index]&PAGETABLE_MASK), (uint8_t *) paging_entries_buffer, PPAGE_SIZE, false); + if(paging_entries_buffer[pml_2_index]){ + //address_identifier_2 = (((uint64_t)pml_2_index) << PLEVEL_3_SHIFT) + address_identifier_3; + if (CHECK_BIT(paging_entries_buffer[pml_2_index], 0)){ /* otherwise swapped out */ + if (CHECK_BIT(paging_entries_buffer[pml_2_index], 7)){ + /* 2MB PAGE */ + return (paging_entries_buffer[pml_2_index] & PML2_ENTRY_MASK) | (0x3FFFFF & addr); + } + else{ + read_snapshot_memory(snapshot, (paging_entries_buffer[pml_2_index]&PAGETABLE_MASK), (uint8_t *) paging_entries_buffer, PPAGE_SIZE); + //cpu_physical_memory_rw((paging_entries_buffer[pml_2_index]&PAGETABLE_MASK), (uint8_t *) paging_entries_buffer, PPAGE_SIZE, false); + if(paging_entries_buffer[pml_1_index]){ + //address_identifier_1 = (((uint64_t)pml_1_index) << PLEVEL_4_SHIFT) + address_identifier_2; + if (CHECK_BIT(paging_entries_buffer[pml_1_index], 0)){ + /* 4 KB PAGE */ + return (paging_entries_buffer[pml_1_index] & PML4_ENTRY_MASK) | (0xFFF & addr); + + } + } + } + } + } + } + } + } + } + } + + debug_fprintf(stderr, "FAILED: %s %lx\n", __func__, addr); + //qemu_backtrace(); + //print_48_paging2(cr3); + return 0xFFFFFFFFFFFFFFFFULL; /* invalid */ +} + +/* +bool is_addr_mapped_ht(uint64_t address, CPUState *cpu, uint64_t cr3, bool host){ + return (get_48_paging_phys_addr(cr3, address) != 0xFFFFFFFFFFFFFFFFULL); + + fprintf(stderr, "CALLING: %s\n", __func__); + kvm_arch_get_registers_fast(cpu); + fprintf(stderr, "CALLING: 2 %s\n", __func__); + + CPUX86State *env = &(X86_CPU(cpu))->env; + + fprintf(stderr, "CALLING: 3 %s\n", __func__); + + + if (!(env->cr[0] & CR0_PG_MASK)) { + fprintf(stderr, "PG disabled\n"); + abort(); + } + else{ + if (env->cr[4] & CR4_PAE_MASK) { + if (env->efer & (1 << 10)) { + if (env->cr[0] & CR4_LA57_MASK) { + fprintf(stderr, "mem_info_la57\n"); + abort(); + //mem_info_la57(mon, env); + } else { + return (get_48_paging_phys_addr(cr3, address) != 0xFFFFFFFFFFFFFFFFULL); + } + } + else{ + fprintf(stderr, "mem_info_pae32\n"); + abort(); + //mem_info_pae32(mon, env); + } + } + else { + fprintf(stderr, "mem_info_32\n"); + abort(); + //mem_info_32(mon, env); + } + } + return false; +} +*/ + +//#define DEBUG_48BIT_WALK + +bool read_virtual_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu){ + uint8_t tmp_buf[x86_64_PAGE_SIZE]; + //MemTxAttrs attrs; + hwaddr phys_addr; + int asidx; + + uint64_t amount_copied = 0; + + kvm_arch_get_registers_fast(cpu); + CPUX86State *env = &(X86_CPU(cpu))->env; + + // copy per page + while(amount_copied < size){ + uint64_t len_to_copy = (size - amount_copied); + if(len_to_copy > x86_64_PAGE_SIZE) + len_to_copy = x86_64_PAGE_SIZE; + + asidx = cpu_asidx_from_attrs(cpu, MEMTXATTRS_UNSPECIFIED); + //MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; +#ifdef DEBUG_48BIT_WALK + phys_addr_2 = cpu_get_phys_page_attrs_debug(cpu, (address & x86_64_PAGE_MASK), &attrs); +#endif + phys_addr = (hwaddr)get_paging_phys_addr(cpu, env->cr[3], address) & 0xFFFFFFFFFFFFF000ULL;// != 0xFFFFFFFFFFFFFFFFULL) + //QEMU_PT_PRINTF(MEM_PREFIX, "TRANSLATE: %lx -> %lx == %lx", address, phys_addr, phys_addr_2); + +#ifdef DEBUG_48BIT_WALK + assert(phys_addr == phys_addr_2); +#endif + + if (phys_addr == 0xFFFFFFFFFFFFFFFFULL){ + uint64_t next_page = (address & x86_64_PAGE_MASK) + x86_64_PAGE_SIZE; + uint64_t len_skipped =next_page-address; + if(len_skipped > size-amount_copied){ + len_skipped = size-amount_copied; + } + + fprintf(stderr, "Warning, read from unmapped memory:\t%lx, skipping to %lx", address, next_page); + QEMU_PT_PRINTF(MEM_PREFIX, "Warning, read from unmapped memory:\t%lx, skipping to %lx", address, next_page); + memset( data+amount_copied, ' ', len_skipped); + address += len_skipped; + amount_copied += len_skipped; + continue; + } + + phys_addr += (address & ~x86_64_PAGE_MASK); + uint64_t remaining_on_page = x86_64_PAGE_SIZE - (address & ~x86_64_PAGE_MASK); + if(len_to_copy > remaining_on_page){ + len_to_copy = remaining_on_page; + } + + MemTxResult txt = address_space_rw(cpu_get_address_space(cpu, asidx), phys_addr, MEMTXATTRS_UNSPECIFIED, tmp_buf, len_to_copy, 0); + if(txt){ + QEMU_PT_PRINTF(MEM_PREFIX, "Warning, read failed:\t%lx (%lx)", address, phys_addr); + } + + memcpy(data+amount_copied, tmp_buf, len_to_copy); + + address += len_to_copy; + amount_copied += len_to_copy; + } + + return true; +} + +bool is_addr_mapped_cr3(uint64_t address, CPUState *cpu, uint64_t cr3){ + return (get_paging_phys_addr(cpu, cr3, address) != 0xFFFFFFFFFFFFFFFFULL); +} + +bool is_addr_mapped(uint64_t address, CPUState *cpu){ + CPUX86State *env = &(X86_CPU(cpu))->env; + kvm_arch_get_registers_fast(cpu); + return (get_paging_phys_addr(cpu, env->cr[3], address) != 0xFFFFFFFFFFFFFFFFULL); +} + +bool is_addr_mapped_cr3_snapshot(uint64_t address, CPUState *cpu, uint64_t cr3){ + return (get_paging_phys_addr_snapshot(cpu, cr3, address) != 0xFFFFFFFFFFFFFFFFULL); +} + +bool dump_page_cr3_snapshot(uint64_t address, uint8_t* data, CPUState *cpu, uint64_t cr3){ + fast_reload_t* snapshot = get_fast_reload_snapshot(); + return read_snapshot_memory(snapshot, get_paging_phys_addr_snapshot(cpu, cr3, address), data, PPAGE_SIZE); +} + + +bool dump_page_cr3_ht(uint64_t address, uint8_t* data, CPUState *cpu, uint64_t cr3){ + hwaddr phys_addr = (hwaddr) get_paging_phys_addr(cpu, cr3, address); + int asidx = cpu_asidx_from_attrs(cpu, MEMTXATTRS_UNSPECIFIED); + if(phys_addr == 0xffffffffffffffffULL || address_space_rw(cpu_get_address_space(cpu, asidx), phys_addr, MEMTXATTRS_UNSPECIFIED, data, 0x1000, 0)){ + if(phys_addr != 0xffffffffffffffffULL){ + fprintf(stderr, "%s: Warning, read failed:\t%lx (%lx)\n", __func__, address, phys_addr); + } + return false; + } + return true; +} + +bool dump_page_ht(uint64_t address, uint8_t* data, CPUState *cpu){ + CPUX86State *env = &(X86_CPU(cpu))->env; + kvm_arch_get_registers_fast(cpu); + hwaddr phys_addr = (hwaddr) get_paging_phys_addr(cpu, env->cr[3], address); + int asidx = cpu_asidx_from_attrs(cpu, MEMTXATTRS_UNSPECIFIED); + if(phys_addr == 0xffffffffffffffffULL || address_space_rw(cpu_get_address_space(cpu, asidx), phys_addr, MEMTXATTRS_UNSPECIFIED, data, 0x1000, 0)){ + if(phys_addr != 0xffffffffffffffffULL){ + fprintf(stderr, "%s: Warning, read failed:\t%lx (%lx)\n", __func__, address, phys_addr); + } + } + return true; +} + +uint64_t disassemble_at_rip(int fd, uint64_t address, CPUState *cpu, uint64_t cr3){ + + csh handle; + + size_t code_size = 256; + uint8_t code_ptr[256]; + + + /* don't => GET_GLOBAL_STATE()->disassembler_word_width */ + if (cs_open(CS_ARCH_X86, get_capstone_mode(GET_GLOBAL_STATE()->disassembler_word_width), &handle) != CS_ERR_OK) + assert(false); + + cs_option(handle, CS_OPT_DETAIL, CS_OPT_ON); + + cs_insn* insn = cs_malloc(handle); + + read_virtual_memory(address, code_ptr, code_size, cpu); + + int count = cs_disasm(handle, code_ptr, code_size, address, 5, &insn); + if(count > 0){ + for(int i = 0; i < count; i++){ + fprintf(stderr, "=> 0x%"PRIx64":\t%s\t\t%s\n", insn[i].address, insn[i].mnemonic, insn[i].op_str); + } + } + else{ + fprintf(stderr, "ERROR in %s at %lx (cr3: %lx)\n", __func__, address, cr3); + } + + + cs_free(insn, 1); + cs_close(&handle); + return 0; +} + + + diff --git a/nyx/memory_access.h b/nyx/memory_access.h new file mode 100644 index 0000000000..eec745f10e --- /dev/null +++ b/nyx/memory_access.h @@ -0,0 +1,70 @@ +/* + +Copyright (C) 2017 Sergej Schumilo + +This file is part of QEMU-PT (kAFL). + +QEMU-PT is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +QEMU-PT is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with QEMU-PT. If not, see . + +*/ + +#ifndef MEMORY_ACCESS_H +#define MEMORY_ACCESS_H + +#include "qemu/osdep.h" +#include +#include "qemu-common.h" +#include "sysemu/kvm_int.h" + +#define MEM_SPLIT_START 0x0C0000000 +#define MEM_SPLIT_END 0x100000000 + +/* i386 pc_piix low_mem address translation */ +#define address_to_ram_offset(offset) (offset >= MEM_SPLIT_END ? (offset - MEM_SPLIT_END) + MEM_SPLIT_START : offset) +#define ram_offset_to_address(offset) (offset >= MEM_SPLIT_START ? (offset - MEM_SPLIT_START) + MEM_SPLIT_END : offset) + +bool read_physical_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu); +bool write_physical_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu); + +bool remap_payload_slot(uint64_t phys_addr, uint32_t slot, CPUState *cpu); +bool remap_payload_slot_protected(uint64_t phys_addr, uint32_t slot, CPUState *cpu); +bool remap_payload_buffer(uint64_t virt_guest_addr, CPUState *cpu); +bool remap_slot(uint64_t addr, uint32_t slot, CPUState *cpu, int fd, uint64_t shm_size, bool virtual, uint64_t cr3); + +bool read_virtual_memory_cr3(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu, uint64_t cr3); +bool write_virtual_memory_cr3(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu, uint64_t cr3); +bool write_virtual_shadow_memory_cr3(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu, uint64_t cr3); + +bool read_virtual_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu); +bool write_virtual_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu); +void hexdump_virtual_memory(uint64_t address, uint32_t size, CPUState *cpu); +bool write_virtual_shadow_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu); +bool is_addr_mapped(uint64_t address, CPUState *cpu); +bool is_addr_mapped_cr3(uint64_t address, CPUState *cpu, uint64_t cr3); + +int insert_breakpoint(CPUState *cpu, uint64_t addr, uint64_t len); +int remove_breakpoint(CPUState *cpu, uint64_t addr, uint64_t len); +void remove_all_breakpoints(CPUState *cpu); + +uint64_t disassemble_at_rip(int fd, uint64_t address, CPUState *cpu, uint64_t cr3); +bool dump_page_cr3_snapshot(uint64_t address, uint8_t* data, CPUState *cpu, uint64_t cr3); +bool dump_page_cr3_ht(uint64_t address, uint8_t* data, CPUState *cpu, uint64_t cr3); +bool is_addr_mapped_cr3_snapshot(uint64_t address, CPUState *cpu, uint64_t cr3); + +void print_48_paging2(uint64_t cr3); + +bool dump_page_ht(uint64_t address, uint8_t* data, CPUState *cpu); + + +#endif diff --git a/nyx/mmh3.c b/nyx/mmh3.c new file mode 100644 index 0000000000..89ab1096e9 --- /dev/null +++ b/nyx/mmh3.c @@ -0,0 +1,303 @@ +#include +#include +#include "mmh3.h" + +#define FORCE_INLINE inline __attribute__((always_inline)) + +FORCE_INLINE uint32_t rotl32(uint32_t x, int8_t r) { + return (x << r) | (x >> (32 - r)); +} + +FORCE_INLINE uint64_t rotl64(uint64_t x, int8_t r) { + return (x << r) | (x >> (64 - r)); +} + +#define ROTL32(x, y) rotl32(x, y) +#define ROTL64(x, y) rotl64(x, y) +#define BIG_CONSTANT(x) (x##LLU) + +/** + * Block read -- endian swapping, if required, or handle aligned reads + */ +FORCE_INLINE uint32_t getblock32(const uint32_t *p, int i) { + return p[i]; +} + +FORCE_INLINE uint64_t getblock64(const uint64_t *p, int i) { + return p[i]; +} + +/** + * Force all bits of a hash block to avalanche + */ +FORCE_INLINE uint32_t fmix32(uint32_t h) { + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + h ^= h >> 16; + return h; +} + +FORCE_INLINE uint64_t fmix64(uint64_t k) { + k ^= k >> 33; + k *= BIG_CONSTANT(0xff51afd7ed558ccd); + k ^= k >> 33; + k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53); + k ^= k >> 33; + return k; +} + +void mmh3_x86_32(const void *key, int len, uint32_t seed, void *out) { + const uint8_t *data = (const uint8_t *) key; + const int nblocks = len/4; + uint32_t h1 = seed; + const uint32_t c1 = 0xcc9e2d51; + const uint32_t c2 = 0x1b873593; + + // Hashing -- body of the function + const uint32_t *blocks = (const uint32_t *) (data + 4*nblocks); + for (int i = -nblocks; i; i++) { + uint32_t k1 = getblock32(blocks, i); + k1 *= c1; + k1 = ROTL32(k1, 15); + k1 *= c2; + + h1 ^= k1; + h1 = ROTL32(h1, 13); + h1 = 5*h1 + 0xe6546b64; + } + + const uint8_t *tail = (const uint8_t *) (data + 4*nblocks); + uint32_t k1 = 0; + + switch (len & 3) { + case 3: k1 ^= tail[2] << 16; + case 2: k1 ^= tail[1] << 8; + case 1: k1 ^= tail[0]; + k1 *= c1; + k1 = ROTL32(k1, 15); + k1 *= c2; + h1 ^= k1; + }; + + // Finalize + h1 ^= len; + h1 = fmix32(h1); + *(uint32_t *) out = h1; +} + +void mmh3_x86_128(const void *key, const int len, uint32_t seed, void *out) { + const uint8_t *data = (const uint8_t *) key; + const int nblocks = len/16; + + uint32_t h1 = seed; + uint32_t h2 = seed; + uint32_t h3 = seed; + uint32_t h4 = seed; + + const uint32_t c1 = 0x239b961b; + const uint32_t c2 = 0xab0e9789; + const uint32_t c3 = 0x38b34ae5; + const uint32_t c4 = 0xa1e38b93; + + const uint32_t *blocks = (const uint32_t *)(data + 16*nblocks); + + for (int i = -nblocks; i; i++) { + uint32_t k1 = getblock32(blocks, i*4 + 0); + uint32_t k2 = getblock32(blocks, i*4 + 1); + uint32_t k3 = getblock32(blocks, i*4 + 2); + uint32_t k4 = getblock32(blocks, i*4 + 3); + + k1 *= c1; + k1 = ROTL32(k1, 15); + k1 *= c2; + h1 ^= k1; + + h1 = ROTL32(h1, 19); + h1 += h2; + h1 = 5*h1 + 0x561ccd1b; + + k2 *= c2; + k2 = ROTL32(k2, 16); + k2 *= c3; + h2 ^= k2; + + h2 = ROTL32(h2, 17); + h2 += h3; + h2 = 5*h2 + 0x0bcaa747; + + k3 *= c3; + k3 = ROTL32(k3, 17); + k3 *= c4; + h3 ^= k3; + + h3 = ROTL32(h3, 15); + h3 += h4; + h3 = 5*h3 + 0x96cd1c35; + + k4 *= c4; + k4 = ROTL32(k4, 18); + k4 *= c1; + h4 ^= k4; + + h4 = ROTL32(h4, 13); + h4 += h1; + h4 = 5*h4 + 0x32ac3b17; + } + + // Tail + const uint8_t *tail = (const uint8_t *) (data + 16*nblocks); + + uint32_t k1 = 0; + uint32_t k2 = 0; + uint32_t k3 = 0; + uint32_t k4 = 0; + + switch (len & 15) { + case 15: k4 ^= tail[14] << 16; + case 14: k4 ^= tail[13] << 8; + case 13: k4 ^= tail[12] << 0; + k4 *= c4; + k4 = ROTL32(k4, 18); + k4 *= c1; + h4 ^= k4; + case 12: k3 ^= tail[11] << 24; + case 11: k3 ^= tail[10] << 16; + case 10: k3 ^= tail[9] << 8; + case 9: k3 ^= tail[8] << 0; + k3 *= c3; + k3 = ROTL32(k3, 17); + k3 *= c4; + h3 ^= k3; + case 8: k2 ^= tail[7] << 24; + case 7: k2 ^= tail[6] << 16; + case 6: k2 ^= tail[5] << 8; + case 5: k2 ^= tail[4] << 0; + k2 *= c2; + k2 = ROTL32(k2, 16); + k2 *= c3; + h2 ^= k2; + case 4: k1 ^= tail[3] << 24; + case 3: k1 ^= tail[2] << 16; + case 2: k1 ^= tail[1] << 8; + case 1: k1 ^= tail[0] << 0; + k1 *= c1; + k1 = ROTL32(k1, 15); + k1 *= c2; + h1 ^= k1; + }; + + // Finalize + h1 ^= len; + h2 ^= len; + h3 ^= len; + h4 ^= len; + + h1 += h2; + h1 += h3; + h1 += h4; + h2 += h1; + h3 += h1; + h4 += h1; + + h1 = fmix32(h1); + h2 = fmix32(h2); + h3 = fmix32(h3); + h4 = fmix32(h4); + + h1 += h2; + h1 += h3; + h1 += h4; + h2 += h1; + h3 += h1; + h4 += h1; + + ((uint32_t *) out)[0] = h1; + ((uint32_t *) out)[1] = h2; + ((uint32_t *) out)[2] = h3; + ((uint32_t *) out)[3] = h4; +} + +void mmh3_x64_128(const void *key, const int len, const uint32_t seed, void *out) { + const uint8_t *data = (const uint8_t *) key; + const int nblocks = len/16; + uint64_t h1 = seed; + uint64_t h2 = seed; + + const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5); + const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f); + + // Body + const uint64_t *blocks = (const uint64_t *) (data); + + for (int i = 0; i < nblocks; i++) { + uint64_t k1 = getblock64(blocks, i*2 + 0); + uint64_t k2 = getblock64(blocks, i*2 + 1); + + k1 *= c1; + k1 = ROTL64(k1, 31); + k1 *= c2; + h1 ^= k1; + + h1 = ROTL64(h1, 27); + h1 += h2; + h1 = 5*h1 + 0x52dce729; + + k2 *= c2; + k2 = ROTL64(k2, 33); + k2 *= c1; + h2 ^= k2; + + h2 = ROTL64(h2, 31); + h2 += h1; + h2 = 5*h2 + 0x38495ab5; + } + + // tail + const uint8_t *tail = (const uint8_t *) (data + 16*nblocks); + uint64_t k1 = 0; + uint64_t k2 = 0; + + switch (len & 15) { + case 15: k2 ^= ((uint64_t) tail[14]) << 48; + case 14: k2 ^= ((uint64_t) tail[13]) << 40; + case 13: k2 ^= ((uint64_t) tail[12]) << 32; + case 12: k2 ^= ((uint64_t) tail[11]) << 24; + case 11: k2 ^= ((uint64_t) tail[10]) << 16; + case 10: k2 ^= ((uint64_t) tail[9]) << 8; + case 9: k2 ^= ((uint64_t) tail[8]) << 0; + k2 *= c2; + k2 = ROTL64(k2, 33); + k2 *= c1; + h2 ^= k2; + case 8: k1 ^= ((uint64_t) tail[7]) << 56; + case 7: k1 ^= ((uint64_t) tail[6]) << 48; + case 6: k1 ^= ((uint64_t) tail[5]) << 40; + case 5: k1 ^= ((uint64_t) tail[4]) << 32; + case 4: k1 ^= ((uint64_t) tail[3]) << 24; + case 3: k1 ^= ((uint64_t) tail[2]) << 16; + case 2: k1 ^= ((uint64_t) tail[1]) << 8; + case 1: k1 ^= ((uint64_t) tail[0]) << 0; + k1 *= c1; + k1 = ROTL64(k1, 31); + k1 *= c2; + h1 ^= k1; + }; + + // finalize + h1 ^= len; + h2 ^= len; + + h1 += h2; + h2 += h1; + + h1 = fmix64(h1); + h2 = fmix64(h2); + + h1 += h2; + h2 += h1; + + ((uint64_t *) out)[0] = h1; + ((uint64_t *) out)[1] = h2; +} diff --git a/nyx/mmh3.h b/nyx/mmh3.h new file mode 100644 index 0000000000..a46a0bff1a --- /dev/null +++ b/nyx/mmh3.h @@ -0,0 +1,12 @@ +#ifndef _MMH3_H +#define _MMH3_H + +#include + +typedef unsigned __int128 uint128_t; + +void mmh3_x86_32(const void *key, int len, uint32_t seed, void *out); +void mmh3_x86_128(const void *key, int len, uint32_t seed, void *out); +void mmh3_x64_128(const void *key, int len, uint32_t seed, void *out); + +#endif diff --git a/nyx/nested_hypercalls.c b/nyx/nested_hypercalls.c new file mode 100644 index 0000000000..0600ab832b --- /dev/null +++ b/nyx/nested_hypercalls.c @@ -0,0 +1,306 @@ +#include +#include +#include "kvm_nested.h" +#include "memory_access.h" +#include "debug.h" +#include "nested_hypercalls.h" +#include "interface.h" +#include "state.h" +#include "pt.h" +#include "sysemu/sysemu.h" +#include "sysemu/kvm.h" +#include "qemu/main-loop.h" +#include "nyx/helpers.h" + +//#define DEBUG_NESTED_HYPERCALLS + + +bool hypercalls_enabled = false; + +bool create_snapshot = false; + +uint64_t htos_cr3 = 0; +uint64_t htos_config = 0; + +static bool init_state = true; + +int nested_once = 0; + +bool nested_setup_snapshot_once = false; + + + +void handle_hypercall_kafl_nested_config(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + /* magic */ +#ifdef DEBUG_NESTED_HYPERCALLS + printf("============> %s\n", __func__); +#endif + uint32_t size = 0; + read_physical_memory(htos_config, (uint8_t*) &size, sizeof(uint32_t), cpu); + fprintf(stderr, "--> %x\n", size); + void* buffer = malloc(size); + + read_physical_memory(htos_config+sizeof(uint32_t), buffer, size, cpu); + /* + hexdump_kafl(buffer, size); + + FILE *f = fopen("/tmp/htos_configuration", "w"); + fwrite(buffer, size, 1, f); + fclose(f); + + */ + print_configuration(stderr, buffer, size); + + FILE* f = fopen("/tmp/hypertrash_configration", "w"); + print_configuration(f, buffer, size); + fclose(f); + + free(buffer); + /* + hexdump_virtual_memory() + _memory(0x38d31000, 0x2000, cpu); + */ +} + +#define ANSI_COLOR_YELLOW "\x1b[33m" +#define ANSI_COLOR_RESET "\x1b[0m" + +void handle_hypercall_kafl_nested_hprintf(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + char hprintf_buffer[0x1000]; +#ifdef DEBUG_NESTED_HYPERCALLS + printf("============> %s\n", __func__); +#endif + read_physical_memory((uint64_t)run->hypercall.args[0], (uint8_t*)hprintf_buffer, 0x1000, cpu); + + //fprintf(stderr, ANSI_COLOR_YELLOW "%s" ANSI_COLOR_RESET, hprintf_buffer); + + set_hprintf_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, hprintf_buffer, strnlen(hprintf_buffer, 0x1000)+1); + synchronization_lock_hprintf(); + //hexdump_kafl(hprintf_buffer, 0x200); +} + +void handle_hypercall_kafl_nested_prepare(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + //cpu->fast_reload_snapshot = (void*)fast_reload_new(); +#ifdef DEBUG_NESTED_HYPERCALLS + printf("============> %s\n", __func__); +#endif + kvm_arch_get_registers(cpu); + + if((uint64_t)run->hypercall.args[0]){ + QEMU_PT_PRINTF(CORE_PREFIX, "handle_hypercall_kafl_nested_prepare:\t NUM:\t%lx\t ADDRESS:\t%lx\t CR3:\t%lx", (uint64_t)run->hypercall.args[0], (uint64_t)run->hypercall.args[1], (uint64_t)run->hypercall.args[2]); + } + else{ + abort(); + } + size_t buffer_size = (size_t)((uint64_t)run->hypercall.args[0] * sizeof(uint64_t)); + uint64_t* buffer = malloc(buffer_size); + memset(buffer, 0x0, buffer_size); + + read_physical_memory((uint64_t)run->hypercall.args[1], (uint8_t*)buffer, buffer_size, cpu); + htos_cr3 = (uint64_t)run->hypercall.args[0]; + + for(uint64_t i = 0; i < (uint64_t)run->hypercall.args[0]; i++){ + if(i == 0){ + htos_config = buffer[i]; + } + QEMU_PT_PRINTF(CORE_PREFIX, "ADDRESS: %lx", buffer[i]); + remap_payload_slot(buffer[i], i, cpu); + } + + set_payload_pages(buffer, (uint32_t)run->hypercall.args[0]); + + // wipe memory + memset(buffer, 0x00, buffer_size); + write_physical_memory((uint64_t)run->hypercall.args[1], (uint8_t*)buffer, buffer_size, cpu); + + free(buffer); +} + +bool acquired = false; + +void handle_hypercall_kafl_nested_early_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + if(!hypercalls_enabled){ + return; + } +#ifdef DEBUG_NESTED_HYPERCALLS + printf("============> %s\n", __func__); +#endif + bool state = GET_GLOBAL_STATE()->in_reload_mode; + if(!state){ + GET_GLOBAL_STATE()->in_reload_mode = true; + synchronization_disable_pt(cpu); + GET_GLOBAL_STATE()->in_reload_mode = false; + } + else{ + synchronization_disable_pt(cpu); + } +} + +void handle_hypercall_kafl_nested_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ + hypercalls_enabled = true; + static int rcount = 0; +#ifdef DEBUG_NESTED_HYPERCALLS + printf("============> %s\n", __func__); +#endif + + if((rcount%100) == 0){ + + kvm_arch_get_registers(cpu); + //printf("TRY %s %lx %lx %lx (%d)\n", __func__, get_rip(cpu), get_nested_guest_rip(cpu), get_nested_host_rip(cpu), rcount); + + // sleep(rand()%4); + } + rcount++; + synchronization_disable_pt(cpu); + /* + //vm_stop(RUN_STATE_RESTORE_VM); + qemu_mutex_lock_iothread(); + //load_snapshot("kafl", NULL); + //vm_start(); + fast_reload_restore(get_fast_reload_snapshot()); + qemu_mutex_unlock_iothread(); +*/ + //kvm_vm_ioctl(kvm_state, KVM_SET_CLOCK, &data); + + // printf("DONE %s\n", __func__); + + /* + kvm_arch_get_registers(cpu); + fprintf(stderr, "RELOADING DUDE %d!\n", rcount); + qemu_mutex_lock_iothread(); + fast_reload_restore(get_fast_reload_snapshot()); + qemu_mutex_unlock_iothread(); + */ + //} + //sleep(1); + + + + return; + //assert(false); + QEMU_PT_PRINTF_DEBUG("%s %d", __func__, init_state); + //sleep(10); + + /* magic */ + + //X86CPU *x86_cpu = X86_CPU(cpu); + //CPUX86State *env = &x86_cpu->env; + + + if (init_state){ + printf("INIT STATE\n"); + init_state = false; + + //synchronization_disable_pt(cpu); + + QEMU_PT_PRINTF_DEBUG("Protocol - SEND: KAFL_PROTO_RELEASE"); + + } else { + + + + //if(reload_mode || reload_mode_temp){ + + //} + + //synchronization_disable_pt(cpu); + + + QEMU_PT_PRINTF_DEBUG("%s UNLOCKED", __func__); + + // printf("INTEL PT is disabled!\n"); + + } + + + qemu_mutex_lock_iothread(); + //fast_reload_restore(get_fast_reload_snapshot()); + qemu_mutex_unlock_iothread(); + + QEMU_PT_PRINTF_DEBUG("%s UNLOCKED 2", __func__); + + + //kvm_cpu_synchronize_state(cpu); + + acquired = false; + +} + +static inline void set_page_dump_bp_nested(CPUState *cpu, uint64_t cr3, uint64_t addr){ +#ifdef DEBUG_NESTED_HYPERCALLS + printf("============> %s\n", __func__); +#endif + kvm_remove_all_breakpoints(cpu); + kvm_insert_breakpoint(cpu, addr, 1, 1); + kvm_update_guest_debug(cpu, 0); + + kvm_vcpu_ioctl(cpu, KVM_VMX_PT_SET_PAGE_DUMP_CR3, cr3); + kvm_vcpu_ioctl(cpu, KVM_VMX_PT_ENABLE_PAGE_DUMP_CR3); +} + +void handle_hypercall_kafl_nested_acquire(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){ +#ifdef DEBUG_NESTED_HYPERCALLS + printf("============> %s\n", __func__); +#endif + if (!acquired){ + printf("TRY %s\n", __func__); + + + printf("DONE %s\n", __func__); + + acquired = true; + + //create_fast_snapshot(cpu, true); + request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_ROOT_NESTED_FIX_RIP); + + for(int i = 0; i < INTEL_PT_MAX_RANGES; i++){ + if(GET_GLOBAL_STATE()->pt_ip_filter_configured[i]){ + pt_enable_ip_filtering(cpu, i, true, false); + } + } + pt_init_decoder(cpu); + + + qemu_mutex_lock_iothread(); + fast_reload_restore(get_fast_reload_snapshot()); + qemu_mutex_unlock_iothread(); + + kvm_arch_get_registers(cpu); + + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + + printf("IN FUZZING LOOP! %lx\n", env->eip); + GET_GLOBAL_STATE()->in_fuzzing_mode = true; + set_state_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 3); + + /* + if(GET_GLOBAL_STATE()->protect_payload_buffer){ + for(int i = 0; i < GET_GLOBAL_STATE()->nested_payload_pages_num; i++){ + remap_payload_slot_protected(GET_GLOBAL_STATE()->nested_payload_pages[i], i, cpu); + } + } + */ + + } + + synchronization_lock(); + + + kvm_arch_get_registers(cpu); + + uint64_t cr3 = get_nested_host_cr3(cpu) & 0xFFFFFFFFFFFFF000ULL; + //fprintf(stderr, "CR3 -> 0x%lx\n", cr3); + pt_set_cr3(cpu, cr3, false); + GET_GLOBAL_STATE()->parent_cr3 = cr3; + + if(GET_GLOBAL_STATE()->dump_page){ + set_page_dump_bp_nested(cpu, cr3, GET_GLOBAL_STATE()->dump_page_addr); + } + + kvm_nested_get_info(cpu); + + synchronization_enter_fuzzing_loop(cpu); + + return; +} \ No newline at end of file diff --git a/nyx/nested_hypercalls.h b/nyx/nested_hypercalls.h new file mode 100644 index 0000000000..e8e01d7fea --- /dev/null +++ b/nyx/nested_hypercalls.h @@ -0,0 +1,9 @@ +#pragma once + +/* HyperTrash! */ +void handle_hypercall_kafl_nested_hprintf(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg); +void handle_hypercall_kafl_nested_prepare(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg); +void handle_hypercall_kafl_nested_config(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg); +void handle_hypercall_kafl_nested_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg); +void handle_hypercall_kafl_nested_acquire(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg); +void handle_hypercall_kafl_nested_early_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg); \ No newline at end of file diff --git a/nyx/page_cache.c b/nyx/page_cache.c new file mode 100644 index 0000000000..dacfd09edb --- /dev/null +++ b/nyx/page_cache.c @@ -0,0 +1,543 @@ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "page_cache.h" +#include "debug.h" +#ifndef STANDALONE_DECODER +#include "cpu.h" +#include "memory_access.h" +#include "fast_vm_reload.h" +#include "kvm_nested.h" +#include "nyx/state.h" +#endif + + +#define PAGE_SIZE 0x1000UL +#define PAGE_CACHE_ADDR_LINE_SIZE sizeof(uint64_t) + +#define UNMAPPED_PAGE 0xFFFFFFFFFFFFFFFFULL + +#ifndef STANDALONE_DECODER +static bool reload_addresses(page_cache_t* self){ +#else +bool reload_addresses(page_cache_t* self){ +#endif + khiter_t k; + int ret; + uint64_t addr, offset; + uint64_t value = 0; + + size_t self_offset = lseek(self->fd_address_file, 0, SEEK_END); + + if(self_offset != self->num_pages*PAGE_CACHE_ADDR_LINE_SIZE){ + //fprintf(stderr, "Reloading files ...\n"); + + lseek(self->fd_address_file, self->num_pages*PAGE_CACHE_ADDR_LINE_SIZE, SEEK_SET); + offset = self->num_pages; + while(read(self->fd_address_file, &value, PAGE_CACHE_ADDR_LINE_SIZE)){ + addr = value & 0xFFFFFFFFFFFFF000ULL; + offset++; + + /* put new addresses and offsets into the hash map */ + k = kh_get(PC_CACHE, self->lookup, addr); + if(k == kh_end(self->lookup)){ + + if(value & 0xFFF){ + fprintf(stderr, "Load page: %lx (UMAPPED)\n", addr); + //k = kh_put(PC_CACHE, self->lookup, addr, &ret); + //kh_value(self->lookup, k) = UNMAPPED_PAGE; + } + else{ + //fprintf(stderr, "Load page: %lx\n", addr); + k = kh_put(PC_CACHE, self->lookup, addr, &ret); + kh_value(self->lookup, k) = (offset-1)*PAGE_SIZE; + } + + /* + k = kh_put(PC_CACHE, self->lookup, addr, &ret); + kh_value(self->lookup, k) = (offset-1)*PAGE_SIZE; + */ + } + else{ + fprintf(stderr, "----------> Page duplicate found ...skipping! %lx\n", addr); + /* should not be possible ... */ + //abort(); + } + } + + //fprintf(stderr, "Old Value: %d - New Value: %ld\n", self->num_pages, (uint32_t)self_offset/PAGE_CACHE_ADDR_LINE_SIZE); + + /* reload page dump file */ + munmap(self->page_data, self->num_pages*PAGE_SIZE); + self->num_pages = self_offset/PAGE_CACHE_ADDR_LINE_SIZE; + self->page_data = mmap(NULL, (self->num_pages)*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, self->fd_page_file, 0); + + return true; + } + + return false; +} + +#ifndef STANDALONE_DECODER +static bool append_page(page_cache_t* self, uint64_t page, uint64_t cr3){ + bool success = true; + if(!self->num_pages){ + assert(!ftruncate(self->fd_page_file, (self->num_pages+1)*PAGE_SIZE)); + self->page_data = mmap(NULL, (self->num_pages+1)*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, self->fd_page_file, 0); + } + else{ + munmap(self->page_data, self->num_pages*PAGE_SIZE); + assert(!ftruncate(self->fd_page_file, (self->num_pages+1)*PAGE_SIZE)); + self->page_data = mmap(NULL, (self->num_pages+1)*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, self->fd_page_file, 0); + } + + + //if(!dump_page_cr3_snapshot(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->pt_c3_filter)){ + // if(!dump_page_cr3_snapshot(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->parent_cr3)){ + if(!dump_page_cr3_ht(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->pt_c3_filter)){ + if(!dump_page_cr3_ht(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->parent_cr3)){ + if(!dump_page_cr3_snapshot(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->parent_cr3)){ + + fprintf(stderr, "FAILED DUMP PROCESS of PAGE %lx\n", page); + //memset(self->page_data+(PAGE_SIZE*self->num_pages), 0xff, PAGE_SIZE); + + munmap(self->page_data, (self->num_pages+1)*PAGE_SIZE); + assert(!ftruncate(self->fd_page_file, (self->num_pages)*PAGE_SIZE)); + self->page_data = mmap(NULL, (self->num_pages)*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, self->fd_page_file, 0); + + //qemu_backtrace(); + success = false; + return success; + //assert(false); + } + } + } + //} + +/* + if(!dump_page_cr3_ht(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->parent_cr3)){ + fprintf(stderr, "FAILED DUMP PROCESS of PAGE %lx\n", page); + assert(false); + } +*/ + + /* + //fast_loadvm(); + if(cr3){ + dump_page_cr3_ht(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->parent_cr3); //self->cpu->parent_cr3); + //assert(dump_page_cr3_snapshot(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->parent_cr3)); //self->cpu->parent_cr3); + + //read_virtual_memory_cr3(page, self->page_data+(PAGE_SIZE*self->num_pages), PAGE_SIZE, self->cpu, self->cpu->parent_cr3); + } + else{ + dump_page_ht(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu); + //read_virtual_memory(page, self->page_data+(PAGE_SIZE*self->num_pages), PAGE_SIZE, self->cpu); + } + */ + fsync(self->fd_page_file); + self->num_pages++; + return success; +} +#else +bool append_page(page_cache_t* self, uint64_t page, uint8_t* ptr){ + self->last_page = 0xFFFFFFFFFFFFFFFF; + self->last_addr = 0xFFFFFFFFFFFFFFFF; + page &= 0xFFFFFFFFFFFFF000ULL; + bool success = true; + if(!self->num_pages){ + assert(!ftruncate(self->fd_page_file, (self->num_pages+1)*PAGE_SIZE)); + self->page_data = mmap(NULL, (self->num_pages+1)*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, self->fd_page_file, 0); + } + else{ + munmap(self->page_data, self->num_pages*PAGE_SIZE); + assert(!ftruncate(self->fd_page_file, (self->num_pages+1)*PAGE_SIZE)); + self->page_data = mmap(NULL, (self->num_pages+1)*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, self->fd_page_file, 0); + } + + memcpy(self->page_data+(PAGE_SIZE*self->num_pages), ptr, PAGE_SIZE); + + fsync(self->fd_page_file); + + int ret; + khiter_t k; + k = kh_put(PC_CACHE, self->lookup, page, &ret); + kh_value(self->lookup, k) = self->num_pages*PAGE_SIZE; + assert(write(self->fd_address_file, &page, PAGE_CACHE_ADDR_LINE_SIZE) == PAGE_CACHE_ADDR_LINE_SIZE); + + self->num_pages++; + + return success; +} +#endif + +static void page_cache_lock(page_cache_t* self){ +#ifndef STANDALONE_DECODER + int ret = 0; + while (true){ + ret = flock(self->fd_lock, LOCK_EX); + if (ret == 0){ + return; + } + else if (ret == EINTR){ + /* try again if acquiring this lock has failed */ + fprintf(stderr, "%s: interrupted by signal...\n", __func__); + } + else{ + assert(false); + } + } +#endif +} + +static void page_cache_unlock(page_cache_t* self){ +#ifndef STANDALONE_DECODER + int ret = 0; + while (true){ + ret = flock(self->fd_lock, LOCK_UN); + if (ret == 0){ + return; + } + else if (ret == EINTR){ + /* try again if releasing this lock has failed */ + fprintf(stderr, "%s: interrupted by signal...\n", __func__); + } + else{ + assert(false); + } + } +#endif +} + +static bool update_page_cache(page_cache_t* self, uint64_t page, khiter_t* k){ + + //#define DEBUG_PAGE_CACHE_LOCK + + page_cache_lock(self); +#ifdef DEBUG_PAGE_CACHE_LOCK + fprintf(stderr, "%d: LOCKING PAGE CACHE\n", getpid()); +#endif + + if(reload_addresses(self)){ + *k = kh_get(PC_CACHE, self->lookup, page); + } + + + if(*k == kh_end(self->lookup)){ +#ifndef STANDALONE_DECODER + int ret; + + uint64_t cr3 = GET_GLOBAL_STATE()->parent_cr3; //self->cpu->parent_cr3; + if(!is_addr_mapped_cr3_snapshot(page, self->cpu, GET_GLOBAL_STATE()->parent_cr3) && !is_addr_mapped_cr3_snapshot(page, self->cpu, GET_GLOBAL_STATE()->pt_c3_filter)){ //self->cpu->parent_cr3)){ + //fprintf(stderr, "PAGE NOT FOUND in SNAPSHOT %lx\n", page); + //assert(false); + } + + *k = kh_get(PC_CACHE, self->lookup, page); + + if(*k == kh_end(self->lookup) && reload_addresses(self)){ + /* reload sucessful */ + *k = kh_get(PC_CACHE, self->lookup, page); + } + else{ + + + if(append_page(self, page, cr3)){ + *k = kh_put(PC_CACHE, self->lookup, page, &ret); + assert(write(self->fd_address_file, &page, PAGE_CACHE_ADDR_LINE_SIZE) == PAGE_CACHE_ADDR_LINE_SIZE); + kh_value(self->lookup, *k) = (self->num_pages-1)*PAGE_SIZE; + } + else{ + fprintf(stderr, "Fail!!!!\n"); + page_cache_unlock(self); + return false; + /* + uint64_t new_page = page | 0xFFF; + assert(write(self->fd_address_file, &new_page, PAGE_CACHE_ADDR_LINE_SIZE) == PAGE_CACHE_ADDR_LINE_SIZE); + kh_value(self->lookup, *k) = UNMAPPED_PAGE; + fprintf(stderr, "APPEND UNMAPPED PAGE %lx!\n", page); + */ + } + + *k = kh_get(PC_CACHE, self->lookup, page); + } +#else + //printf("PAGE NOT FOUND: %lx! ABORTING\n", page); + page_cache_unlock(self); + return false; + abort(); +#endif + } + +#ifdef DEBUG_PAGE_CACHE_LOCK + fprintf(stderr, "%d: UNLOCKING PAGE CACHE\n", getpid()); +#endif + + page_cache_unlock(self); + return true; +} + +uint64_t page_cache_fetch(page_cache_t* self, uint64_t page, bool* success, bool test_mode){ + page &= 0xFFFFFFFFFFFFF000ULL; + + /* + if(test_mode){ + *success = false; + return 0; + } + */ + + //if(page == 0x7ffca45b5000) + // return UNMAPPED_PAGE; + //printf("%s %lx\n", __func__, page); + + //if (page == 0x0434000) + // return 0; + + if (self->last_page == page){ + *success = true; + return self->last_addr; + } + + //QEMU_PT_PRINTF(PAGE_CACHE_PREFIX, "page_cache_fetch %lx", page); + + khiter_t k; + k = kh_get(PC_CACHE, self->lookup, page); + if(k == kh_end(self->lookup)){ + if(test_mode || update_page_cache(self, page, &k) == false){ + //fprintf(stderr, "%s: fail!\n", __func__); + *success = false; + //abort(); + return 0; + } + } + + self->last_page = page; + //fprintf(stderr, "[%d]\tkh_n_buckets: %d %d\n", getpid(), kh_n_buckets(self->lookup), k); + + if(kh_value(self->lookup, k) == UNMAPPED_PAGE){ + self->last_addr = UNMAPPED_PAGE; + } + else{ + self->last_addr = (uint64_t)self->page_data+kh_value(self->lookup, k); + } + + + + //fprintf(stderr, "try to unlock flock!\n"); + //fprintf(stderr, "flock unlocked!\n"); + + *success = true; + return self->last_addr; +} + +/* fix this */ +uint64_t page_cache_fetch2(page_cache_t* self, uint64_t page, bool* success){ + return page_cache_fetch(self, page, success, false); +} + +#ifndef STANDALONE_DECODER +page_cache_t* page_cache_new(CPUState *cpu, const char* cache_file){ +#else +page_cache_t* page_cache_new(const char* cache_file, uint8_t disassembler_word_width){ +#endif + page_cache_t* self = malloc(sizeof(page_cache_t)); + + char* tmp1; + char* tmp2; + char* tmp3; + assert(asprintf(&tmp1, "%s.dump", cache_file) != -1); + assert(asprintf(&tmp2, "%s.addr", cache_file) != -1); + assert(asprintf(&tmp3, "%s.lock", cache_file) != -1); + + + self->lookup = kh_init(PC_CACHE); + self->fd_page_file = open(tmp1, O_CLOEXEC | O_RDWR, S_IRWXU); + self->fd_address_file = open(tmp2, O_CLOEXEC | O_RDWR, S_IRWXU); + +#ifndef STANDALONE_DECODER + self->cpu = cpu; + self->fd_lock = open(tmp3, O_CLOEXEC); + assert(self->fd_lock > 0); +#else + if(self->fd_page_file == -1 || self->fd_address_file == -1){ + printf("[ ] Page cache files not found...\n"); + exit(1); + } +#endif + + memset(self->disassemble_cache, 0x0, 16); + + self->page_data = NULL; + self->num_pages = 0; + + self->last_page = 0xFFFFFFFFFFFFFFFF; + self->last_addr = 0xFFFFFFFFFFFFFFFF; + + QEMU_PT_PRINTF(PAGE_CACHE_PREFIX, "%s (%s - %s) WORD_WIDTH: %d", __func__, tmp1, tmp2, disassembler_word_width); + + free(tmp3); + free(tmp2); + free(tmp1); + + if (cs_open(CS_ARCH_X86, CS_MODE_16, &self->handle_16) != CS_ERR_OK) + assert(false); + + if (cs_open(CS_ARCH_X86, CS_MODE_32, &self->handle_32) != CS_ERR_OK) + assert(false); + + if (cs_open(CS_ARCH_X86, CS_MODE_64, &self->handle_64) != CS_ERR_OK) + assert(false); + + cs_option(self->handle_16, CS_OPT_DETAIL, CS_OPT_ON); + cs_option(self->handle_32, CS_OPT_DETAIL, CS_OPT_ON); + cs_option(self->handle_64, CS_OPT_DETAIL, CS_OPT_ON); + + return self; +} + +#ifdef STANDALONE_DECODER +void page_cache_destroy(page_cache_t* self){ + munmap(self->page_data, self->num_pages * 0x1000); + kh_destroy(PC_CACHE, self->lookup); + + cs_close(&self->handle_16); + cs_close(&self->handle_32); + cs_close(&self->handle_64); + free(self); +} +#endif + + +/* +static bool page_cache_load(uint64_t virtual_addr){ + + + return true; +} +*/ + +/* + +static bool page_cache_load_cr3(uint64_t virtual_addr, uint64_t cr3){ + return true; +} + +*/ + +bool page_cache_disassemble(page_cache_t* self, uint64_t address, cs_insn **insn){ + return true; +} + +cs_insn* page_cache_cs_malloc(page_cache_t* self, disassembler_mode_t mode){ + switch(mode){ + case mode_16: + return cs_malloc(self->handle_16); + case mode_32: + return cs_malloc(self->handle_32); + case mode_64: + return cs_malloc(self->handle_64); + default: + assert(false); + } + return NULL; +} + +//#define EXPERIMENTAL_PAGE_FETCH + +bool page_cache_disassemble_iter(page_cache_t* self, uint64_t* address, cs_insn *insn, uint64_t* failed_page, disassembler_mode_t mode){ + + //printf("%s %lx\n", __func__, *address); + + *failed_page = 0xFFFFFFFFFFFFFFFFULL; + + bool success = true; + size_t code_size = 16; + +#if defined(STANDALONE_DECODER) || !defined(EXPERIMENTAL_PAGE_FETCH) + uint8_t* code = (uint8_t*)page_cache_fetch(self, *address, &success, false); +#else + uint8_t* code = (uint8_t*)page_cache_fetch(self, *address, &success, true); +#endif + uint8_t* code_ptr = 0; + + + //disassembler_mode_t mode = mode_16; + csh* current_handle = NULL; + + switch(mode){ + case mode_16: + current_handle = &self->handle_16; + break; + case mode_32: + current_handle = &self->handle_32; + break; + case mode_64: + current_handle = &self->handle_64; + break; + default: + assert(false); + } + + if (code == (void*)UNMAPPED_PAGE || success == false){ + *failed_page = *address;// & 0xFFFFFFFFFFFFF000ULL; + //printf("FAIL???? (0x%lx) %lx %d\n", *address, code, success); + return false; + } + + if ((*address & 0xFFF) >= (0x1000-16)){ + //printf("-------------> Disassemble between pages...%lx (%lx %lx %lx)\n", *address, (*address&0xFFF), (0x1000-16), 0xf-(0xfff-(*address&0xfff))); + memcpy((void*)self->disassemble_cache, (void*)((uint64_t)code+(0x1000-16)), 16); + code_ptr = self->disassemble_cache + 0xf-(0xfff-(*address&0xfff)); + +#if defined(STANDALONE_DECODER) || !defined(EXPERIMENTAL_PAGE_FETCH) + code = (uint8_t*)page_cache_fetch(self, *address+0x1000, &success, false); +#else + code = (uint8_t*)page_cache_fetch(self, *address+0x1000, &success, true); +#endif + + /* broken AF */ + if(success == true){ + //printf("=> A\n"); + //*failed_page = (*address+0x1000) & 0xFFFFFFFFFFFFF000ULL; + //return false; + //printf("=> %lx %lx\n", (0xfff-(*address&0xfff)), *address); + memcpy((void*)(self->disassemble_cache+16), (void*)code, 16); + //code_size = 16; + return cs_disasm_iter(*current_handle, (const uint8_t**) &code_ptr, &code_size, address, insn); + } + else{ + //printf("=> B\n"); + code_size = (0xfff-(*address&0xfff)); + //printf("%lx\n", code_size); + //abort(); + //*failed_page = *address; + if(!cs_disasm_iter(*current_handle, (const uint8_t**) &code_ptr, &code_size, address, insn)){ + *failed_page = (*address+0x1000) & 0xFFFFFFFFFFFFF000ULL; + //fprintf(stderr, "%s FAIL: %lx %lx\n", __func__, *address, *failed_page); + //if(*address != 0x555555554ffe && *address != 0x7ffff7478ffc && *address != 0x7ffff7820ff6 && *address != 0x7ffff7822ffa) + // abort(); + return false; + } + return true; + //return cs_disasm_iter(self->handle, (const uint8_t**) &code_ptr, &code_size, address, insn); + } + } + else { + //printf("=> C\n"); + code_ptr = code + (*address&0xFFF); + + //printf("Disassemble...(%lx %x)\n", code_ptr, *code_ptr); + return cs_disasm_iter(*current_handle, (const uint8_t**) &code_ptr, &code_size, address, insn); + } +} + + diff --git a/nyx/page_cache.h b/nyx/page_cache.h new file mode 100644 index 0000000000..861522ee74 --- /dev/null +++ b/nyx/page_cache.h @@ -0,0 +1,49 @@ +#pragma once + +#include +#include +#ifndef STANDALONE_DECODER +#include "qemu/osdep.h" +#endif +#include "khash.h" +#include + +KHASH_MAP_INIT_INT64(PC_CACHE, uint64_t) + +typedef struct page_cache_s{ +#ifndef STANDALONE_DECODER + CPUState *cpu; +#endif + khash_t(PC_CACHE) *lookup; + int fd_page_file; + int fd_address_file; + int fd_lock; + uint8_t disassemble_cache[32]; + void* page_data; + uint32_t num_pages; + + csh handle_16; + csh handle_32; + csh handle_64; + + uint64_t last_page; + uint64_t last_addr; +} page_cache_t; + +#ifndef STANDALONE_DECODER +page_cache_t* page_cache_new(CPUState *cpu, const char* cache_file); +#else +page_cache_t* page_cache_new(const char* cache_file, uint8_t disassembler_word_width); +void page_cache_destroy(page_cache_t* self); +bool append_page(page_cache_t* self, uint64_t page, uint8_t* ptr); +#endif + +uint64_t page_cache_fetch(page_cache_t* self, uint64_t page, bool* success, bool test_mode); + +bool page_cache_disassemble(page_cache_t* self, uint64_t address, cs_insn **insn); +bool page_cache_disassemble_iter(page_cache_t* self, uint64_t* address, cs_insn *insn, uint64_t* failed_page, disassembler_mode_t mode); + +cs_insn* page_cache_cs_malloc(page_cache_t* self, disassembler_mode_t mode); + + +uint64_t page_cache_fetch2(page_cache_t* self, uint64_t page, bool* success); \ No newline at end of file diff --git a/nyx/patcher.c b/nyx/patcher.c new file mode 100644 index 0000000000..123d98d508 --- /dev/null +++ b/nyx/patcher.c @@ -0,0 +1,184 @@ +#include "patcher.h" +#include "nyx/memory_access.h" +#include "nyx/disassembler.h" +#include "debug.h" +#include "nyx/state.h" + +uint8_t cmp_patch_data[] = { 0x38, 0xC0, [2 ... MAX_INSTRUCTION_SIZE]=0x90 }; // CMP AL,AL; NOP, NOP ... +const uint8_t *cmp_patch = &cmp_patch_data[0]; + +/////////////////////////////////////////////////////////////////////////////////// +// Private Helper Functions Declarations +/////////////////////////////////////////////////////////////////////////////////// +// +static void _patcher_apply_patch(patcher_t *self, size_t index); + +static void _patcher_restore_patch(patcher_t *self, size_t index); + +static void _patcher_save_patch(patcher_t *self, size_t index, uint8_t* data, size_t instruction_size, uint64_t addr); + +static size_t _patcher_disassemble_size(patcher_t *self, uint8_t* data, uint64_t addr, x86_insn id); + +static void _patcher_alloc_patch_infos(patcher_t *self, size_t num_patches); + +static void _patcher_free_patch_infos(patcher_t *self); + +static redqueen_t* _redq_ptr(patcher_t *self); + + +/////////////////////////////////////////////////////////////////////////////////// +// Public Functions +/////////////////////////////////////////////////////////////////////////////////// + +patcher_t* patcher_new(CPUState *cpu){ + patcher_t *res = malloc(sizeof(patcher_t)); + res->cpu = cpu; + res->num_patches = 0; + res->patches = NULL; + res->is_currently_applied = false; + return res; +} + +void patcher_free(patcher_t* self){ + assert(!self->is_currently_applied); + _patcher_free_patch_infos(self); + free(self); +} + +void patcher_apply_all(patcher_t *self){ + assert(!self->is_currently_applied); + assert(!_redq_ptr(self)->hooks_applied); + //assert(patcher_validate_patches(self)); + for(size_t i=0; i < self->num_patches; i++){ + _patcher_apply_patch(self, i); + } + self->is_currently_applied = true; +} + +void patcher_restore_all(patcher_t *self){ + assert(self->is_currently_applied); + assert(!_redq_ptr(self)->hooks_applied); + //assert(patcher_validate_patches(self)); + for(size_t i = 0; i < self->num_patches; i++){ + _patcher_restore_patch(self, i); + } + self->is_currently_applied = false; +} + +void patcher_set_addrs(patcher_t *self, uint64_t* addrs, size_t num_addrs){ + _patcher_free_patch_infos(self); + _patcher_alloc_patch_infos(self, num_addrs); + uint8_t curr_instruction_code[MAX_INSTRUCTION_SIZE]; + memset(&curr_instruction_code[0], 0, MAX_INSTRUCTION_SIZE); + + for(size_t i=0; i < self->num_patches; i++){ + //QEMU_PT_PRINTF(REDQUEEN_PREFIX, "patching %lx", addrs[i]); + if( read_virtual_memory(addrs[i], &curr_instruction_code[0], MAX_INSTRUCTION_SIZE, self->cpu) ) { + size_t size =_patcher_disassemble_size(self, &curr_instruction_code[0], addrs[i], X86_INS_CMP); + assert(size != 0); //csopen failed, shouldn't happen + _patcher_save_patch(self, i, &curr_instruction_code[0], size, addrs[i]); + } + } +} + +static void print_hexdump(const uint8_t* addr, size_t size){ + for(size_t i = 0; i < size; i++){ + printf (" %02x", addr[i]); + } + printf("\n"); +} + +bool patcher_validate_patches(patcher_t *self){ + bool was_rq = _redq_ptr(self)->hooks_applied; + if(was_rq) + redqueen_remove_hooks(_redq_ptr(self)); + if(!self->patches){return true;} + for(size_t i=0; inum_patches; i++){ + uint8_t buf[MAX_INSTRUCTION_SIZE]; + read_virtual_memory(self->patches[i].addr, &buf[0], MAX_INSTRUCTION_SIZE, self->cpu); + const uint8_t* should_value = NULL; + if(self->is_currently_applied){ + should_value = cmp_patch; + } else { + should_value = &self->patches[i].orig_bytes[0]; + } + + QEMU_PT_PRINTF(REDQUEEN_PREFIX, "Validating, mem:"); + print_hexdump(&buf[0], self->patches[i].size); + QEMU_PT_PRINTF(REDQUEEN_PREFIX, "should_be:"); + print_hexdump(should_value, self->patches[i].size); + if(0 != memcmp(&buf[0], should_value, self->patches[i].size)){ + QEMU_PT_PRINTF(REDQUEEN_PREFIX, "validating patches failed self->is_currently_applied = %d", self->is_currently_applied); + return false; + } + } + if(was_rq) + redqueen_insert_hooks(_redq_ptr(self)); + return true; +} + + +/////////////////////////////////////////////////////////////////////////////////// +// Private Helper Functions Definitions +/////////////////////////////////////////////////////////////////////////////////// + + +static void _patcher_apply_patch(patcher_t *self, size_t index) { + patch_info_t *info = &self->patches[index]; + write_virtual_shadow_memory_cr3(info->addr, (uint8_t*)cmp_patch, info->size, self->cpu, GET_GLOBAL_STATE()->parent_cr3); //self->cpu->parent_cr3); +} + +static void _patcher_restore_patch(patcher_t *self, size_t index){ + patch_info_t *info = &self->patches[index]; + write_virtual_shadow_memory_cr3(info->addr, (uint8_t*)&info->orig_bytes[0], info->size, self->cpu, GET_GLOBAL_STATE()->parent_cr3); //self->cpu->parent_cr3); +} + +static void _patcher_save_patch(patcher_t *self, size_t index, uint8_t* data, size_t instruction_size, uint64_t addr) { + assert(instruction_size >= 2); + assert(instruction_size < MAX_INSTRUCTION_SIZE); + patch_info_t *info = &self->patches[index]; + memset(&info->orig_bytes[0], 0, MAX_INSTRUCTION_SIZE); + memcpy(&info->orig_bytes[0], data, instruction_size); + info->addr = addr; + info->size = instruction_size; +} + +static size_t _patcher_disassemble_size(patcher_t *self, uint8_t* data, uint64_t addr, x86_insn type){ + + csh handle; + if (cs_open(CS_ARCH_X86, get_capstone_mode(GET_GLOBAL_STATE()->disassembler_word_width), &handle) == CS_ERR_OK){ + cs_insn *insn = cs_malloc(handle); + uint8_t* cur_offset = data; + uint64_t cs_address = addr; + uint64_t code_size = MAX_INSTRUCTION_SIZE; + cs_disasm_iter(handle, (const uint8_t **) &cur_offset, &code_size, &cs_address, insn); + size_t size = insn->size; + if(type != X86_INS_INVALID){ + assert(insn->id == type); + } + cs_free(insn, 1); + cs_close(&handle); + return size; + } + return 0; +} + +static void _patcher_alloc_patch_infos(patcher_t *self, size_t num_patches){ + assert(self->num_patches == 0); + assert(self->patches == NULL); + assert(num_patches < 10000); + self->num_patches = num_patches; + self->patches = malloc(sizeof(patch_info_t)*num_patches); +} + +static void _patcher_free_patch_infos(patcher_t *self){ + assert(!self->is_currently_applied); + free(self->patches); + self->patches = NULL; + self->num_patches = 0; +} + +static redqueen_t* _redq_ptr(patcher_t *self){ + redqueen_t* res = GET_GLOBAL_STATE()->redqueen_state; //self->cpu->redqueen_state; + return res; +} diff --git a/nyx/patcher.h b/nyx/patcher.h new file mode 100644 index 0000000000..5fc6c24207 --- /dev/null +++ b/nyx/patcher.h @@ -0,0 +1,45 @@ +#ifndef __GUARD_REDQUEEN_PATCHER_STRUCT__ +#define __GUARD_REDQUEEN_PATCHER_STRUCT__ + +#include +#include + +#include +#include + +#include "qemu/osdep.h" + +#define MAX_INSTRUCTION_SIZE 64 +//Patch used to replace cmp instructions. It encodes CMP AL, AL a comparision which always evaluates to true. This can +//be used to remove hash checks that we suspsect can later on be patched. +extern const uint8_t* cmp_patch; + +typedef struct patch_info_s{ + uint64_t addr; + size_t size; + uint8_t orig_bytes[MAX_INSTRUCTION_SIZE]; +} patch_info_t; + +typedef struct patcher_s{ + + CPUState *cpu; + + patch_info_t *patches; + size_t num_patches; + bool is_currently_applied; +} patcher_t; + +patcher_t* patcher_new(CPUState *cpu); + +void patcher_free(patcher_t *self); + +void patcher_apply_all(patcher_t *self); + +void patcher_restore_all(patcher_t *self); + +//Doesn't take ownership of addrs +void patcher_set_addrs(patcher_t *self, uint64_t* addrs, size_t num_addrs); + +bool patcher_validate_patches(patcher_t *self); + +#endif diff --git a/nyx/printk.c b/nyx/printk.c new file mode 100644 index 0000000000..c27ebab902 --- /dev/null +++ b/nyx/printk.c @@ -0,0 +1,104 @@ +#include "qemu/osdep.h" +#include +#include "qemu-common.h" +#include "nyx/memory_access.h" +#include "nyx/hypercall.h" +#include "nyx/printk.h" + +enum reg_types{RAX, RCX, RDX, RBX, RSP, RBP, RSI, RDI, R8, R9, R10, R11, R12, R13, R14, R15, RIP}; + +uint8_t types[] = {RSI, RDX, RCX, R8, R9} ; +/* calling convention: RDI, RSI, RDX, RCX, R8, R9 */ + +/* https://www.kernel.org/doc/Documentation/printk-formats.txt :-( */ + +bool kafl_linux_printk(CPUState *cpu){ + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + + char printk_buf[0x1000]; + + uint8_t rsp_buf[0x1000]; + uint8_t rdi_buf[0x1000]; + uint8_t rsi_buf[0x1000]; + uint8_t rdx_buf[0x1000]; + uint8_t rcx_buf[0x1000]; + uint8_t r8_buf[0x1000]; + uint8_t r9_buf[0x1000]; + + read_virtual_memory((uint64_t)env->regs[RSP], (uint8_t*)rsp_buf, 0x1000, cpu); + read_virtual_memory((uint64_t)env->regs[RDI], (uint8_t*)rdi_buf, 0x1000, cpu); + + uint8_t* buf[] = {rsi_buf, rdx_buf, rcx_buf, r8_buf, r9_buf}; + + + + for(uint16_t i = 0, type = 0; i < 0x1000 && rdi_buf[i] != '\x00'; i++){ + + if(i > 1 && rdi_buf[i-2] == '%' && rdi_buf[i-1] != '%'){ + + if(rdi_buf[i-1] == 's' || rdi_buf[i-1] == 'p' || rdi_buf[i-1] == '.'){ + + + if(rdi_buf[i] == 'B'){ + rdi_buf[i-1] = 'l'; + rdi_buf[i] = 'x'; + buf[type] = (uint8_t*)env->regs[types[type]]; + } + + else if(rdi_buf[i-1] == 'p' && rdi_buf[i] == 'V'){ + rdi_buf[i-1] = 's'; + rdi_buf[i] = ' '; + read_virtual_memory((uint64_t)env->regs[types[type]], (uint8_t*)buf[type], 0x1000, cpu); + uint64_t tmp = *((uint64_t*)buf[type]); + read_virtual_memory(tmp, (uint8_t*)buf[type], 0x1000, cpu); + + } + else if(rdi_buf[i-1] == 'p'){ + rdi_buf[i-1] = 'l'; + memmove(rdi_buf+i+1, rdi_buf+i, 0x1000-i-1); + rdi_buf[i] = 'x'; + buf[type] = (uint8_t*)env->regs[types[type]]; + + } + else { + read_virtual_memory((uint64_t)env->regs[types[type]], (uint8_t*)buf[type], 0x1000, cpu); + } + } + else{ + buf[type] = (uint8_t*)env->regs[types[type]]; + } + + type++; + + + if(type > 4){ + rdi_buf[i] = '\n'; + rdi_buf[i+1] = '\x00'; + break; + } + } + + } + + snprintf(printk_buf, 0x1000, (char*)rdi_buf, buf[0], buf[1], buf[2], buf[3], buf[4]); + + if(printk_buf[0] == 0x1){ + //printf("%s", rdi_buf+2); + //hprintf(printk_buf+2); + //printf("%s", printk_buf+2); + if(!strncmp(printk_buf+2, "---[ end Kernel panic", 21)){ + return true; + } + } + else { + //printf("%s", rdi_buf); + //hprintf(printk_buf); + //printf("%s", printk_buf); + if(!strncmp(printk_buf, "---[ end Kernel panic", 21)){ + return true; + } + } + return false; + +} \ No newline at end of file diff --git a/nyx/printk.h b/nyx/printk.h new file mode 100644 index 0000000000..9fac4721db --- /dev/null +++ b/nyx/printk.h @@ -0,0 +1,28 @@ +/* + +Copyright (C) 2017 Sergej Schumilo + +This file is part of QEMU-PT (kAFL). + +QEMU-PT is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +QEMU-PT is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with QEMU-PT. If not, see . + +*/ + +#ifndef PRINTK_H +#define PRINTK_H + +bool kafl_linux_printk(CPUState *cpu); + + +#endif \ No newline at end of file diff --git a/nyx/pt.c b/nyx/pt.c new file mode 100644 index 0000000000..3ef3c63c9d --- /dev/null +++ b/nyx/pt.c @@ -0,0 +1,509 @@ +/* + +Copyright (C) 2017 Sergej Schumilo + +This file is part of QEMU-PT (kAFL). + +QEMU-PT is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +QEMU-PT is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with QEMU-PT. If not, see . + +*/ + +#include "qemu/osdep.h" +#include +#include +#include +#include "qemu-common.h" +#include "target/i386/cpu.h" +#include "nyx/pt.h" +#include "exec/memory.h" +#include "sysemu/kvm_int.h" +#include "sysemu/kvm.h" +#include "sysemu/cpus.h" +#include "nyx/hypercall.h" +#include "nyx/memory_access.h" +#include "nyx/interface.h" +#include "nyx/debug.h" +#include "nyx/redqueen.h" +#include "nyx/redqueen_patch.h" +#include "nyx/patcher.h" +#include "nyx/page_cache.h" +#include "nyx/state.h" +#include +#include "nyx/helpers.h" + +#define PT_BUFFER_MMAP_ADDR 0x3ffff0000000 + +/* +extern uint32_t kafl_bitmap_size; +uint8_t* bitmap = NULL; +*/ + +uint32_t state_byte = 0; +uint32_t last = 0; + +int pt_trace_dump_fd = 0; +bool should_dump_pt_trace= false; + +void pt_open_pt_trace_file(char* filename){ + printf("using pt trace at %s",filename); + pt_trace_dump_fd = open(filename, O_WRONLY); + should_dump_pt_trace = true; + assert(pt_trace_dump_fd >= 0); +} + +void pt_trucate_pt_trace_file(void){ + if(should_dump_pt_trace){ + assert(lseek(pt_trace_dump_fd, 0, SEEK_SET) == 0); + assert(ftruncate(pt_trace_dump_fd, 0)==0); + } +} + +void pt_sync(void){ + /* + if(bitmap){ + msync(bitmap, kafl_bitmap_size, MS_SYNC); + } + */ +} + +static void pt_set(CPUState *cpu, run_on_cpu_data arg){ + asm volatile("" ::: "memory"); +} + +static inline int pt_cmd_hmp_context(CPUState *cpu, uint64_t cmd){ + cpu->pt_ret = -1; + if(pt_hypercalls_enabled()){ + QEMU_PT_PRINTF(PT_PREFIX, "Error: HMP commands are ignored if kafl tracing mode is enabled (-kafl)!"); + } + else{ + cpu->pt_cmd = cmd; + run_on_cpu(cpu, pt_set, RUN_ON_CPU_NULL); + } + return cpu->pt_ret; +} + +static int pt_cmd(CPUState *cpu, uint64_t cmd, bool hmp_mode){ + if (hmp_mode){ + return pt_cmd_hmp_context(cpu, cmd); + } + else { + cpu->pt_cmd = cmd; + pt_pre_kvm_run(cpu); + return cpu->pt_ret; + } +} + +static inline int pt_ioctl(int fd, unsigned long request, unsigned long arg){ + if (!fd){ + return -EINVAL; + } + return ioctl(fd, request, arg); +} + +/* +void pt_setup_bitmap(void* ptr){ + bitmap = (uint8_t*)ptr; +} + +void pt_reset_bitmap(void){ + if(bitmap){ + state_byte = 0; + last = 0; + memset(bitmap, 0x00, kafl_bitmap_size); + } +} +*/ + +static inline uint64_t mix_bits(uint64_t v) { + v ^= (v >> 31); + v *= 0x7fb5d329728ea185; + /* + v ^= (v >> 27); + v *= 0x81dadef4bc2dd44d; + v ^= (v >> 33); + */ + return v; +} + +/* +void pt_bitmap(uint64_t from, uint64_t to){ + + //if(to == 0x400965 || (last == 0x400965 && to == 0x40087A)){ + // last = to; + // state_byte = mix_bits(state_byte)^to; + // bitmap[state_byte & (kafl_bitmap_size-1)]++; + //} + + //printf("from: %lx\tto: %lx\n", from, to); + + uint32_t transition_value = 0; + #ifdef SAMPLE_DECODED + sample_decoded(from,to); + #endif + if(bitmap){ + transition_value = mix_bits(to)^(mix_bits(from)>>1); + // + //if ((from == 0x7ffff7884e8f && to == 0x7ffff7884eff) || (from == 0x7ffff7884f10 && to == 0x7ffff7884f12) || (from == 0x7ffff7884f14 && to == 0x7ffff7884e80)){ + // return; + //} + //fprintf(stderr, "%lx %lx %x\n", from, to, check_bitmap_byte(transition_value & (kafl_bitmap_size-1))); + if (check_bitmap_byte(transition_value & (kafl_bitmap_size-1)) == 0) + bitmap[transition_value & (kafl_bitmap_size-1)]++; + } +} +*/ + +#ifdef DUMP_AND_DEBUG_PT +void dump_pt_trace(void* buffer, int bytes){ + static FILE* f = NULL; + static int fcounter = 0; + static size_t size = 0; + char filename[256]; + + + + if(!f){ + snprintf(filename, 256, "/tmp/trace_data/data_%d", fcounter); + f = fopen(filename, "wb"); + } + + size += fwrite(buffer, bytes , 1, f) * bytes; + + if(size >= 0x80000000){ // 2GB + fclose(f); + fcounter++; + size = 0; + snprintf(filename, 256, "/tmp/trace_data/data_%d", fcounter); + f = fopen(filename, "wb"); + } +} +#endif + +void pt_dump(CPUState *cpu, int bytes){ + if(should_dump_pt_trace){ + assert(bytes == write(pt_trace_dump_fd, cpu->pt_mmap, bytes)); + } + if(!(GET_GLOBAL_STATE()->redqueen_state && GET_GLOBAL_STATE()->redqueen_state->intercept_mode)){ + if (GET_GLOBAL_STATE()->in_fuzzing_mode && GET_GLOBAL_STATE()->decoder_page_fault == false && GET_GLOBAL_STATE()->decoder && !GET_GLOBAL_STATE()->dump_page){ + GET_GLOBAL_STATE()->pt_trace_size += bytes; + decoder_result_t result = libxdc_decode(GET_GLOBAL_STATE()->decoder, cpu->pt_mmap, bytes); + switch(result){ + case decoder_success: + break; + case decoder_success_pt_overflow: + cpu->intel_pt_run_trashed = true; + break; + case decoder_page_fault: + fprintf(stderr, "Page not found => 0x%lx\n", libxdc_get_page_fault_addr(GET_GLOBAL_STATE()->decoder)); + GET_GLOBAL_STATE()->decoder_page_fault = true; + GET_GLOBAL_STATE()->decoder_page_fault_addr = libxdc_get_page_fault_addr(GET_GLOBAL_STATE()->decoder); + break; + case decoder_unkown_packet: + fprintf(stderr, "WARNING: libxdc_decode returned decoder_error\n"); + break; + case decoder_error: + fprintf(stderr, "WARNING: libxdc_decode returned decoder_error\n"); + break; + } + } + } +} + + +int pt_enable(CPUState *cpu, bool hmp_mode){ + if(!fast_reload_set_bitmap(get_fast_reload_snapshot())){ + fuzz_bitmap_reset(); + } + //pt_reset_bitmap(); + pt_trucate_pt_trace_file(); + return pt_cmd(cpu, KVM_VMX_PT_ENABLE, hmp_mode); +} + +int pt_disable(CPUState *cpu, bool hmp_mode){ + //printf("%s\n", __func__); + int r = pt_cmd(cpu, KVM_VMX_PT_DISABLE, hmp_mode); + return r; +} + +int pt_set_cr3(CPUState *cpu, uint64_t val, bool hmp_mode){ + if (val == GET_GLOBAL_STATE()->pt_c3_filter){ + return 0; // nothing changed + } + //fprintf(stderr, "=========== %s %lx ============\n", __func__, val); + int r = 0; + + if (cpu->pt_enabled){ + return -EINVAL; + } + if (GET_GLOBAL_STATE()->pt_c3_filter && GET_GLOBAL_STATE()->pt_c3_filter != val){ + //QEMU_PT_PRINTF(PT_PREFIX, "Reconfigure CR3-Filtering!"); + GET_GLOBAL_STATE()->pt_c3_filter = val; + r += pt_cmd(cpu, KVM_VMX_PT_CONFIGURE_CR3, hmp_mode); + r += pt_cmd(cpu, KVM_VMX_PT_ENABLE_CR3, hmp_mode); + return r; + } + GET_GLOBAL_STATE()->pt_c3_filter = val; + r += pt_cmd(cpu, KVM_VMX_PT_CONFIGURE_CR3, hmp_mode); + r += pt_cmd(cpu, KVM_VMX_PT_ENABLE_CR3, hmp_mode); + return r; +} + +int pt_enable_ip_filtering(CPUState *cpu, uint8_t addrn, bool redqueen, bool hmp_mode){ + int r = 0; + + if(addrn > 3){ + return -1; + } + + if (cpu->pt_enabled){ + return -EINVAL; + } + + if(GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] > GET_GLOBAL_STATE()->pt_ip_filter_b[addrn]){ + QEMU_PT_PRINTF(PT_PREFIX, "Error (ip_a > ip_b) 0x%lx-0x%lx", GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] , GET_GLOBAL_STATE()->pt_ip_filter_b[addrn]); + return -EINVAL; + } + + if(GET_GLOBAL_STATE()->pt_ip_filter_enabled[addrn]){ + pt_disable_ip_filtering(cpu, addrn, hmp_mode); + } + + QEMU_PT_PRINTF(PT_PREFIX, "Configuring new trace region (addr%d, 0x%lx-0x%lx)", addrn, GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] , GET_GLOBAL_STATE()->pt_ip_filter_b[addrn]); + + if(GET_GLOBAL_STATE()->pt_ip_filter_configured[addrn] && GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] != 0 && GET_GLOBAL_STATE()->pt_ip_filter_b[addrn] != 0){ + r += pt_cmd(cpu, KVM_VMX_PT_CONFIGURE_ADDR0+addrn, hmp_mode); + r += pt_cmd(cpu, KVM_VMX_PT_ENABLE_ADDR0+addrn, hmp_mode); + GET_GLOBAL_STATE()->pt_ip_filter_enabled[addrn] = true; + } + return r; +} + +void pt_init_decoder(CPUState *cpu){ + uint64_t filters[4][2] = {0}; + + /* it's time to clean up this code -.- */ + filters[0][0] = GET_GLOBAL_STATE()->pt_ip_filter_a[0]; + filters[0][1] = GET_GLOBAL_STATE()->pt_ip_filter_b[0]; + filters[1][0] = GET_GLOBAL_STATE()->pt_ip_filter_a[1]; + filters[1][1] = GET_GLOBAL_STATE()->pt_ip_filter_b[1]; + filters[2][0] = GET_GLOBAL_STATE()->pt_ip_filter_a[2]; + filters[2][1] = GET_GLOBAL_STATE()->pt_ip_filter_b[2]; + filters[3][0] = GET_GLOBAL_STATE()->pt_ip_filter_a[3]; + filters[3][1] = GET_GLOBAL_STATE()->pt_ip_filter_b[3]; + + assert(GET_GLOBAL_STATE()->decoder == NULL); + assert(GET_GLOBAL_STATE()->shared_bitmap_ptr != NULL); + assert(GET_GLOBAL_STATE()->shared_bitmap_size != 0); + GET_GLOBAL_STATE()->decoder = libxdc_init(filters, (void* (*)(void*, uint64_t, bool*))page_cache_fetch2, GET_GLOBAL_STATE()->page_cache, GET_GLOBAL_STATE()->shared_bitmap_ptr, GET_GLOBAL_STATE()->shared_bitmap_size); + + libxdc_register_bb_callback(GET_GLOBAL_STATE()->decoder, (void (*)(void*, uint64_t, uint64_t))redqueen_callback, GET_GLOBAL_STATE()->redqueen_state); +} + +int pt_disable_ip_filtering(CPUState *cpu, uint8_t addrn, bool hmp_mode){ + int r = 0; + switch(addrn){ + case 0: + case 1: + case 2: + case 3: + r = pt_cmd(cpu, KVM_VMX_PT_DISABLE_ADDR0+addrn, hmp_mode); + if(GET_GLOBAL_STATE()->pt_ip_filter_enabled[addrn]){ + GET_GLOBAL_STATE()->pt_ip_filter_enabled[addrn] = false; + } + break; + default: + r = -EINVAL; + } + return r; +} + +void pt_kvm_init(CPUState *cpu){ + cpu->pt_cmd = 0; + cpu->pt_enabled = false; + cpu->pt_fd = 0; + + cpu->pt_decoder_state = NULL; + //cpu->redqueen_state=NULL; + //cpu->redqueen_patch_state = patcher_new(cpu); + + //init_redqueen_patch_state(); + + cpu->reload_pending = false; + cpu->intel_pt_run_trashed = false; +} + +struct vmx_pt_filter_iprs { + __u64 a; + __u64 b; +}; + +pthread_mutex_t pt_dump_mutex = PTHREAD_MUTEX_INITIALIZER; + +void pt_pre_kvm_run(CPUState *cpu){ + pthread_mutex_lock(&pt_dump_mutex); + int ret; + struct vmx_pt_filter_iprs filter_iprs; + + if(GET_GLOBAL_STATE()->patches_disable_pending){ + //QEMU_PT_PRINTF(REDQUEEN_PREFIX, "patches disable"); + assert(false); /* remove this branch */ + GET_GLOBAL_STATE()->patches_disable_pending = false; + } + + if(GET_GLOBAL_STATE()->patches_enable_pending){ + //QEMU_PT_PRINTF(REDQUEEN_PREFIX, "patches enable"); + assert(false); /* remove this branch */ + GET_GLOBAL_STATE()->patches_enable_pending = false; + } + + + //if(cpu->redqueen_enable_pending){ + if(GET_GLOBAL_STATE()->redqueen_enable_pending){ + //QEMU_PT_PRINTF(REDQUEEN_PREFIX, "rq enable"); + if (GET_GLOBAL_STATE()->redqueen_state){ + enable_rq_intercept_mode(GET_GLOBAL_STATE()->redqueen_state); + } + //cpu->redqueen_enable_pending = false; + GET_GLOBAL_STATE()->redqueen_enable_pending = false; + //qemu_cpu_kick_self(); + } + + //if(cpu->redqueen_disable_pending){ + if(GET_GLOBAL_STATE()->redqueen_disable_pending){ + //QEMU_PT_PRINTF(REDQUEEN_PREFIX, "rq disable"); + if (GET_GLOBAL_STATE()->redqueen_state){ + disable_rq_intercept_mode(GET_GLOBAL_STATE()->redqueen_state); + } + //cpu->redqueen_disable_pending = false; + GET_GLOBAL_STATE()->redqueen_disable_pending = false; + //qemu_cpu_kick_self(); + } + if(GET_GLOBAL_STATE()->pt_trace_mode || GET_GLOBAL_STATE()->pt_trace_mode_force){ + if (!cpu->pt_fd) { + cpu->pt_fd = kvm_vcpu_ioctl(cpu, KVM_VMX_PT_SETUP_FD, (unsigned long)0); + assert(cpu->pt_fd != -1); + ret = ioctl(cpu->pt_fd, KVM_VMX_PT_GET_TOPA_SIZE, (unsigned long)0x0); + + cpu->pt_mmap = mmap((void*)PT_BUFFER_MMAP_ADDR, ret, PROT_READ|PROT_WRITE, MAP_SHARED, cpu->pt_fd, 0); + assert(cpu->pt_mmap != (void*)0xFFFFFFFFFFFFFFFF); + assert(mmap(cpu->pt_mmap+ret, 0x1000, PROT_READ|PROT_WRITE, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, -1, 0) == (void*)(cpu->pt_mmap+ret)); //;!= (void*)0xFFFFFFFFFFFFFFFF); // add an extra page to have enough space for an additional PT_TRACE_END byte + + debug_printf("\t\t============> pt_mmap:%p - %p\n", cpu->pt_mmap, cpu->pt_mmap+ret); + + memset(cpu->pt_mmap+ret, 0x55, 0x1000); + } + + if (cpu->pt_cmd){ + switch(cpu->pt_cmd){ + case KVM_VMX_PT_ENABLE: + //fprintf(stderr, "=========== KVM_VMX_PT_ENABLE ============\n"); + + if (cpu->pt_fd){ + /* dump for the very last time before enabling VMX_PT ... just in case */ + ioctl(cpu->pt_fd, KVM_VMX_PT_CHECK_TOPA_OVERFLOW, (unsigned long)0); + + if (!ioctl(cpu->pt_fd, cpu->pt_cmd, 0)){ + cpu->pt_enabled = true; + } + } + break; + case KVM_VMX_PT_DISABLE: + //fprintf(stderr, "=========== KVM_VMX_PT_DISABLE ============\n"); + + if (cpu->pt_fd){ + ret = ioctl(cpu->pt_fd, cpu->pt_cmd, 0); + if (ret > 0){ + //QEMU_PT_PRINTF(PT_PREFIX, "KVM_VMX_PT_DISABLE %d", ret); + pt_dump(cpu, ret); + cpu->pt_enabled = false; + } + } + break; + + /* ip filtering configuration */ + case KVM_VMX_PT_CONFIGURE_ADDR0: + case KVM_VMX_PT_CONFIGURE_ADDR1: + case KVM_VMX_PT_CONFIGURE_ADDR2: + case KVM_VMX_PT_CONFIGURE_ADDR3: + filter_iprs.a = GET_GLOBAL_STATE()->pt_ip_filter_a[(cpu->pt_cmd)-KVM_VMX_PT_CONFIGURE_ADDR0]; + filter_iprs.b = GET_GLOBAL_STATE()->pt_ip_filter_b[(cpu->pt_cmd)-KVM_VMX_PT_CONFIGURE_ADDR0]; + ret = pt_ioctl(cpu->pt_fd, cpu->pt_cmd, (unsigned long)&filter_iprs); + break; + case KVM_VMX_PT_ENABLE_ADDR0: + case KVM_VMX_PT_ENABLE_ADDR1: + case KVM_VMX_PT_ENABLE_ADDR2: + case KVM_VMX_PT_ENABLE_ADDR3: + ret = pt_ioctl(cpu->pt_fd, cpu->pt_cmd, (unsigned long)0); + break; + case KVM_VMX_PT_CONFIGURE_CR3: + //fprintf(stderr, "=========== KVM_VMX_PT_CONFIGURE_CR3 ============\n"); + + ret = pt_ioctl(cpu->pt_fd, cpu->pt_cmd, GET_GLOBAL_STATE()->pt_c3_filter); + break; + case KVM_VMX_PT_ENABLE_CR3: + //fprintf(stderr, "=========== KVM_VMX_PT_ENABLE_CR3 ============\n"); + + ret = pt_ioctl(cpu->pt_fd, cpu->pt_cmd, (unsigned long)0); + break; + default: + if (cpu->pt_fd){ + ioctl(cpu->pt_fd, cpu->pt_cmd, 0); + } + break; + } + cpu->pt_cmd = 0; + cpu->pt_ret = 0; + //kvm_cpu_synchronize_state(cpu); + } + } + pthread_mutex_unlock(&pt_dump_mutex); +} + +void pt_handle_overflow(CPUState *cpu){ + pthread_mutex_lock(&pt_dump_mutex); + int overflow = ioctl(cpu->pt_fd, KVM_VMX_PT_CHECK_TOPA_OVERFLOW, (unsigned long)0); + if (overflow > 0){ + //cpu->overflow_counter++; + pt_dump(cpu, overflow); + } + + /*else{ + printf("CPU NOT ENABLED?!\n"); + assert(false); + } + */ + pthread_mutex_unlock(&pt_dump_mutex); +} + +void pt_post_kvm_run(CPUState *cpu){ + if(GET_GLOBAL_STATE()->pt_trace_mode || GET_GLOBAL_STATE()->pt_trace_mode_force){ + + //printf("%s\n", __func__); + //int overflow; + //if (cpu->pt_enabled){ + pt_handle_overflow(cpu); + //unlock_reload_pending(cpu); + //} + } +} + +/* +void pt_sync_kvm_run_lock(void){ + pthread_mutex_lock(&pt_dump_mutex); +} + +void pt_sync_kvm_run_unlock(void){ + pthread_mutex_unlock(&pt_dump_mutex); +} +*/ diff --git a/nyx/pt.h b/nyx/pt.h new file mode 100644 index 0000000000..9badf1d96e --- /dev/null +++ b/nyx/pt.h @@ -0,0 +1,48 @@ +/* + +Copyright (C) 2017 Sergej Schumilo + +This file is part of QEMU-PT (kAFL). + +QEMU-PT is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +QEMU-PT is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with QEMU-PT. If not, see . + +*/ + +#ifndef PT_H +#define PT_H + +void pt_init_decoder(CPUState *cpu); + +void pt_sync(void); +void pt_reset_bitmap(void); +void pt_setup_bitmap(void* ptr); + +int pt_enable(CPUState *cpu, bool hmp_mode); +int pt_disable(CPUState *cpu, bool hmp_mode); +int pt_enable_ip_filtering(CPUState *cpu, uint8_t addrn, bool redqueen, bool hmp_mode); +int pt_disable_ip_filtering(CPUState *cpu, uint8_t addrn, bool hmp_mode); +int pt_set_cr3(CPUState *cpu, uint64_t val, bool hmp_mode); + +void pt_kvm_init(CPUState *cpu); +void pt_pre_kvm_run(CPUState *cpu); +void pt_post_kvm_run(CPUState *cpu); + +void pt_handle_overflow(CPUState *cpu); +void pt_dump(CPUState *cpu, int bytes); +void pt_bitmap(uint64_t from, uint64_t to); + +void pt_open_pt_trace_file(char* filename); +void pt_trucate_pt_trace_file(void); +#endif + diff --git a/nyx/redqueen.c b/nyx/redqueen.c new file mode 100644 index 0000000000..72b9478680 --- /dev/null +++ b/nyx/redqueen.c @@ -0,0 +1,1041 @@ +/* + +Copyright (C) 2017 Sergej Schumilo + +This file is part of QEMU-PT (kAFL). + +QEMU-PT is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +QEMU-PT is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with QEMU-PT. If not, see . + +*/ + +#include +#include "nyx/redqueen.h" +#include "nyx/memory_access.h" +#include "nyx/interface.h" +#include +#include "file_helper.h" +#include "patcher.h" +#include "debug.h" +#include "redqueen_trace.h" +#include "nyx/state.h" +#include +#include + +redqueen_workdir_t redqueen_workdir = {0}; + +void setup_redqueen_workdir(char* workdir){ + assert(asprintf(&redqueen_workdir.redqueen_results,"%s/redqueen_results.txt", workdir)>0); + assert(asprintf(&redqueen_workdir.symbolic_results,"%s/symbolic_results.txt", workdir)>0); + assert(asprintf(&redqueen_workdir.pt_trace_results,"%s/pt_trace_results.txt", workdir)>0); + assert(asprintf(&redqueen_workdir.redqueen_patches,"%s/redqueen_patches.txt", workdir)>0); + assert(asprintf(&redqueen_workdir.breakpoint_white,"%s/breakpoint_white.txt", workdir)>0); + assert(asprintf(&redqueen_workdir.breakpoint_black,"%s/breakpoint_black.txt", workdir)>0); + assert(asprintf(&redqueen_workdir.target_code_dump,"%s/target_code_dump.img", workdir)>0); +} + +redqueen_t* new_rq_state(CPUState *cpu, page_cache_t* page_cache){ + redqueen_t* res = malloc(sizeof(redqueen_t)); + + res->cpu = cpu; + res->intercept_mode = false; + res->trace_mode = false; + res->singlestep_enabled = false; + res->hooks_applied = 0; + res->page_cache = page_cache; + + res->lookup = kh_init(RQ); + res->last_rip = 0x0; + res->next_rip = 0x0; + res->num_breakpoint_whitelist=0; + res->breakpoint_whitelist=NULL; + + res->trace_state=redqueen_trace_new(); + + //FILE* pt_file = fopen("/tmp/redqueen_vm.img", "wb"); + //delete_redqueen_files(); + //fwrite(&start_range, sizeof(uint64_t), 1, pt_file); + //fwrite(code, sizeof(uint8_t), end_range-start_range, pt_file); + //fclose(pt_file); + return res; +} + + +static bool is_interessting_lea_at(redqueen_t* self, cs_insn *ins){ + bool res = false; + + assert(ins); + cs_x86 *x86 = &(ins->detail->x86); + + assert(x86->op_count == 2); + cs_x86_op *op2 = &(x86->operands[1]); + + assert(op2->type == X86_OP_MEM); + + x86_reg reg = op2->mem.index; + int64_t disp = (int64_t)op2->mem.disp; + res = disp < 0 && (-disp) > 0xff && op2->mem.scale == 1 && op2->mem.base == X86_REG_INVALID && reg != X86_REG_INVALID; + + if(res){ + x86_reg reg = op2->mem.index; + if(reg == X86_REG_EIP || reg == X86_REG_RIP || reg == X86_REG_EBP || reg == X86_REG_RBP){ + //QEMU_PT_PRINTF(REDQUEEN_PREFIX, "got boring index"); + res = false; + } //don't instrument local stack offset computations + } + return res; +} + +static bool uses_register(cs_x86_op* op, x86_reg reg){ + + if (op->type == X86_OP_REG && op->reg == reg){ + return true; + } + + if (op->type == X86_OP_MEM && op->mem.base == reg){ + return true; + } + + return false; +} + +static bool uses_stack_access(cs_x86_op* op){ + if (uses_register(op, X86_REG_RBP) || uses_register(op, X86_REG_EBP)){ + return true; + } + + if (uses_register(op, X86_REG_RSP) || uses_register(op, X86_REG_ESP)){ + return true; + } + + return false; +} + +static bool is_interessting_add_at(redqueen_t* self, cs_insn *ins){ + assert(ins); + cs_x86 *x86 = &(ins->detail->x86); + + assert(x86->op_count == 2); + cs_x86_op *op1 = &(x86->operands[0]); + cs_x86_op *op2 = &(x86->operands[1]); + + if(op2->type == X86_OP_IMM && (op1->type == X86_OP_REG || op1->type == X86_OP_MEM)){ + //offsets needs to be negative, < -0xff to ensure we only look at multi byte substractions + if((op2->imm > 0x7fff && (((op2->imm>>8)&0xff) != 0xff))){ + if (!uses_stack_access(op1)){ + return true; + } + } + } + return false; +} + +static bool is_interessting_sub_at(redqueen_t* self, cs_insn *ins){ + assert(ins); + cs_x86 *x86 = &(ins->detail->x86); + + assert(x86->op_count == 2); + cs_x86_op *op1 = &(x86->operands[0]); + cs_x86_op *op2 = &(x86->operands[1]); + + if(op2->type == X86_OP_IMM && (op1->type == X86_OP_REG || op1->type == X86_OP_MEM)){ + if(op2->imm > 0xFF){ + if (!uses_stack_access(op1)){ + return true; + } + } + } + return false; +} + +static bool is_interessting_xor_at(redqueen_t* self, cs_insn *ins){ + assert(ins); + cs_x86 *x86 = &(ins->detail->x86); + + assert(x86->op_count == 2); + cs_x86_op *op1 = &(x86->operands[0]); + cs_x86_op *op2 = &(x86->operands[1]); + + if(op1->type == X86_OP_REG && op2->type == X86_OP_REG){ + if(op1->reg != op2->reg){ + return true; + } + } + return false; +} + +static void opcode_analyzer(redqueen_t* self, cs_insn *ins){ + //uint8_t i, j; + //cs_x86 details = ins->detail->x86; + //printf("SELF %p\n", self->redqueen_state); + //printf("INS %lx\n", ins->address); + if(ins->id == X86_INS_CMP){ + set_rq_instruction(self, ins->address); + //QEMU_PT_PRINTF(REDQUEEN_PREFIX, "hooking cmp %lx %s %s", ins->address, ins->mnemonic, ins->op_str); + } + if(ins->id == X86_INS_LEA && is_interessting_lea_at(self, ins)){ + //QEMU_PT_PRINTF(REDQUEEN_PREFIX, "hooking lea %lx", ins->address); + set_rq_instruction(self, ins->address); + } + if(ins->id == X86_INS_SUB && is_interessting_sub_at(self, ins)){ + //QEMU_PT_PRINTF(REDQUEEN_PREFIX, "hooking sub %lx", ins->address); + set_rq_instruction(self, ins->address); + } + if(ins->id == X86_INS_ADD && is_interessting_add_at(self, ins)){ + //QEMU_PT_PRINTF(REDQUEEN_PREFIX, "hooking add %lx", ins->address); + set_rq_instruction(self, ins->address); + } + if(ins->id == X86_INS_XOR && is_interessting_xor_at(self, ins)){ + //QEMU_PT_PRINTF(REDQUEEN_PREFIX, "hooking xor %lx %s %s", ins->address, ins->mnemonic, ins->op_str); + set_rq_instruction(self, ins->address); + } + if(ins->id ==X86_INS_CALL || ins->id == X86_INS_LCALL){ + //QEMU_PT_PRINTF(REDQUEEN_PREFIX, "hooking call %lx %s %s", ins->address, ins->mnemonic, ins->op_str); + set_rq_instruction(self, ins->address); + } +} + +void redqueen_callback(void* opaque, disassembler_mode_t mode, uint64_t start_addr, uint64_t end_addr){ + GET_GLOBAL_STATE()->bb_coverage++; + redqueen_t* self = (redqueen_t*) opaque; + + if(start_addr != end_addr){ + uint64_t failed_page = 0; + uint64_t code = start_addr; + + cs_insn *insn = page_cache_cs_malloc(self->page_cache, mode); + + while(page_cache_disassemble_iter(self->page_cache, &code, insn, &failed_page, mode)){ + if (insn->address > end_addr){ + break; + } + opcode_analyzer(self, insn); + } + cs_free(insn, 1); + } +} + + + +static void redqueen_trace_enabled(redqueen_t* self){ + int unused __attribute__((unused)); + if(self->trace_mode){ + + //libxdc_enable_tracing(GET_GLOBAL_STATE()->decoder); + libxdc_enable_tracing(GET_GLOBAL_STATE()->decoder); + libxdc_register_edge_callback(GET_GLOBAL_STATE()->decoder, (void (*)(void*, uint64_t, uint64_t))&redqueen_trace_register_transition, self->trace_state); + //redqueen_trace_register_transition(self->trace_state, INIT_TRACE_IP, ip); + //last_ip = ip; + } +} + +static void redqueen_trace_disabled(redqueen_t* self){ + int unused __attribute__((unused)); + if(self->trace_mode){ + libxdc_disable_tracing(GET_GLOBAL_STATE()->decoder); + + //redqueen_trace_register_transition(self->trace_state, last_ip, ip); + //edqueen_trace_register_transition(self->trace_state, ip, INIT_TRACE_IP); + } +} + +void redqueen_set_trace_mode(redqueen_t* self){ + delete_trace_files(); + self->trace_mode = true; + redqueen_trace_enabled(self); +} + +void redqueen_unset_trace_mode(redqueen_t* self){ + //write_trace_result(self->trace_state); + //redqueen_trace_reset(self->trace_state); + redqueen_trace_disabled(self); + + self->trace_mode = false; +} + +void destroy_rq_state(redqueen_t* self){ + redqueen_trace_free(self->trace_state); + kh_destroy(RQ, self->lookup); + free(self); +} + +static void redqueen_set_addr_flags(redqueen_t* self, uint64_t addr, uint32_t flags){ + int unused = 0; + //fprintf(stderr, "%s\n", __func__); + khiter_t k = kh_get(RQ, self->lookup, addr); + if(k == kh_end(self->lookup)){ + k = kh_put(RQ, self->lookup, addr, &unused); + kh_value(self->lookup, k) = 0; + } + kh_value(self->lookup, k) |= flags; +} + +static bool redqueen_check_addr_flags(redqueen_t* self, uint64_t addr, uint32_t flags){ + khiter_t k = kh_get(RQ, self->lookup, addr); + if(k != kh_end(self->lookup)){ + return !!(kh_value(self->lookup, k) & flags); + } + else{ + return false; + } +} + +static bool redqueen_check_addr(redqueen_t* self, uint64_t addr){ + khiter_t k = kh_get(RQ, self->lookup, addr); + if(k != kh_end(self->lookup)){ + return true; + } + else{ + return false; + } +} + +static uint32_t redqueen_update_addr_count(redqueen_t* self, uint64_t addr){ + int unused __attribute__((unused)); + uint32_t value = 0; + khiter_t k = kh_get(RQ, self->lookup, addr); + if(k != kh_end(self->lookup)){ + value = kh_value(self->lookup, k); + } + else{ + k = kh_put(RQ, self->lookup, addr, &unused); + } + value++; + kh_value(self->lookup, k) = value; + return value & 0xFF000000UL; +} + +/* +static void set_rq_trace_enabled_bp(redqueen_t* self, uint64_t addr){ + redqueen_set_addr_flags(self, addr, CMP_BITMAP_TRACE_ENABLED); +} +*/ + +void set_rq_instruction(redqueen_t* self, uint64_t addr){ + //fprintf(stderr, "%s\n", __func__); + if( !redqueen_check_addr_flags(self, addr, CMP_BITMAP_BLACKLISTED)){ + //fprintf(stderr, "%s +2\n", __func__); + + redqueen_set_addr_flags(self, addr, CMP_BITMAP_RQ_INSTRUCTION); + } +} + +void set_rq_blacklist(redqueen_t* self, uint64_t addr){ + redqueen_set_addr_flags(self, addr, CMP_BITMAP_BLACKLISTED); +} + +static void insert_hooks_whitelist(redqueen_t* self){ + fprintf(stderr, "%s\n", __func__); + for(size_t i = 0; i < self->num_breakpoint_whitelist; i++){ + insert_breakpoint(self->cpu, self->breakpoint_whitelist[i], 1); + } +} + +static void insert_hooks_bitmap(redqueen_t* self){ + uint64_t c = 0; + //fprintf(stderr, "%s\n", __func__); + + uint64_t addr; + uint32_t value __attribute__((unused)); + uint32_t mode = GET_GLOBAL_STATE()->redqueen_instrumentation_mode; + //uint32_t mode = self->cpu->redqueen_instrumentation_mode; + kh_foreach(self->lookup, addr, value, { + //fprintf(stderr, "%s %lx %x\n", __func__, addr, value); + if(redqueen_check_addr_flags(self, addr, CMP_BITMAP_BLACKLISTED)){ continue; } + + //bool should_hook_se = (mode == REDQUEEN_SE_INSTRUMENTATION) && redqueen_check_addr_flags(self, addr, CMP_BITMAP_SHOULD_HOOK_SE); + bool should_hook_rq = (mode == REDQUEEN_LIGHT_INSTRUMENTATION ) && redqueen_check_addr_flags(self, addr, CMP_BITMAP_SHOULD_HOOK_RQ); + + if( should_hook_rq ){ + insert_breakpoint(self->cpu, addr, 1); + c++; + } + }); +} + +void redqueen_insert_hooks(redqueen_t* self){ + // fprintf(stderr, "%s %x\n", __func__, self->cpu->redqueen_instrumentation_mode); + + QEMU_PT_PRINTF(REDQUEEN_PREFIX, "insert hooks"); + assert(!self->hooks_applied); + //switch(self->cpu->redqueen_instrumentation_mode){ + switch(GET_GLOBAL_STATE()->redqueen_instrumentation_mode){ + case(REDQUEEN_LIGHT_INSTRUMENTATION): + insert_hooks_bitmap(self); + break; + case(REDQUEEN_WHITELIST_INSTRUMENTATION): + insert_hooks_whitelist(self); + break; + case(REDQUEEN_NO_INSTRUMENTATION): + break; + default: + assert(false); + } + self->hooks_applied = 1; +} + +void redqueen_remove_hooks(redqueen_t* self){ + QEMU_PT_PRINTF(REDQUEEN_PREFIX, "remove hooks"); + // fprintf(stderr, "remove hooks\n"); + assert(self->hooks_applied); + remove_all_breakpoints(self->cpu); + + for (khiter_t i = kh_begin(self->lookup); i != kh_end(self->lookup); ++i) { + if (!kh_exist(self->lookup,i)) continue; + kh_val(self->lookup,i) &= 0xFF000000UL; + } + self->hooks_applied = 0; + return; +} +static uint64_t get_segment_register(x86_reg reg) { + X86CPU *cpu = X86_CPU(qemu_get_cpu(0)); + CPUX86State *env = &cpu->env; + switch(reg){ + case X86_REG_GS: return env->segs[R_GS].base; + case X86_REG_FS: return env->segs[R_FS].base; + case X86_REG_CS: return env->segs[R_CS].base; + case X86_REG_DS: return env->segs[R_DS].base; + case X86_REG_SS: return env->segs[R_SS].base; + default: + break; + } + assert(false); +} + +static inline uint64_t sign_extend_from_size(uint64_t value, uint8_t size){ + switch(size){ + case 64: return value; + case 32: return ((int32_t)(value)<0) ? 0xffffffff00000000 | value : value; + case 16: return ((int16_t)(value)<0) ? 0xffffffffffff0000 | value : value; + case 8: return (( int8_t)(value)<0) ? 0xffffffffffffff00 | value : value; + } + assert(false); +} + +static uint64_t eval_reg(x86_reg reg, uint8_t *size){ + uint64_t value = 0; + CPUX86State *env = &(X86_CPU(qemu_get_cpu(0)))->env; + + switch(reg) { + case X86_REG_RAX: value=env->regs[RAX]; *size=64; break; + case X86_REG_RCX: value=env->regs[RCX]; *size=64; break; + case X86_REG_RDX: value=env->regs[RDX]; *size=64; break; + case X86_REG_RBX: value=env->regs[RBX]; *size=64; break; + case X86_REG_RSP: value=env->regs[RSP]; *size=64; break; + case X86_REG_RBP: value=env->regs[RBP]; *size=64; break; + case X86_REG_RSI: value=env->regs[RSI]; *size=64; break; + case X86_REG_RDI: value=env->regs[RDI]; *size=64; break; + case X86_REG_R8: value=env->regs[R8]; *size=64; break; + case X86_REG_R9: value=env->regs[R9]; *size=64; break; + case X86_REG_R10: value=env->regs[R10]; *size=64; break; + case X86_REG_R11: value=env->regs[R11]; *size=64; break; + case X86_REG_R12: value=env->regs[R12]; *size=64; break; + case X86_REG_R13: value=env->regs[R13]; *size=64; break; + case X86_REG_R14: value=env->regs[R14]; *size=64; break; + case X86_REG_R15: value=env->regs[R15]; *size=64; break; + case X86_REG_EAX: value=env->regs[RAX]&0xffffffff; *size=32; break; + case X86_REG_ECX: value=env->regs[RCX]&0xffffffff; *size=32; break; + case X86_REG_EDX: value=env->regs[RDX]&0xffffffff; *size=32; break; + case X86_REG_EBX: value=env->regs[RBX]&0xffffffff; *size=32; break; + case X86_REG_ESP: value=env->regs[RSP]&0xffffffff; *size=32; break; + case X86_REG_EBP: value=env->regs[RBP]&0xffffffff; *size=32; break; + case X86_REG_ESI: value=env->regs[RSI]&0xffffffff; *size=32; break; + case X86_REG_EDI: value=env->regs[RDI]&0xffffffff; *size=32; break; + case X86_REG_R8D: value=env->regs[R8]&0xffffffff; *size=32; break; + case X86_REG_R9D: value=env->regs[R9]&0xffffffff; *size=32; break; + case X86_REG_R10D: value=env->regs[R10]&0xffffffff; *size=32; break; + case X86_REG_R11D: value=env->regs[R11]&0xffffffff; *size=32; break; + case X86_REG_R12D: value=env->regs[R12]&0xffffffff; *size=32; break; + case X86_REG_R13D: value=env->regs[R13]&0xffffffff; *size=32; break; + case X86_REG_R14D: value=env->regs[R14]&0xffffffff; *size=32; break; + case X86_REG_R15D: value=env->regs[R15]&0xffffffff; *size=32; break; + case X86_REG_AX: value=env->regs[RAX]&0xffff; *size=16; break; + case X86_REG_CX: value=env->regs[RCX]&0xffff; *size=16; break; + case X86_REG_DX: value=env->regs[RDX]&0xffff; *size=16; break; + case X86_REG_BX: value=env->regs[RBX]&0xffff; *size=16; break; + case X86_REG_SP: value=env->regs[RSP]&0xffff; *size=16; break; + case X86_REG_BP: value=env->regs[RBP]&0xffff; *size=16; break; + case X86_REG_SI: value=env->regs[RSI]&0xffff; *size=16; break; + case X86_REG_DI: value=env->regs[RDI]&0xffff; *size=16; break; + case X86_REG_R8W: value=env->regs[R8]&0xffff; *size=16; break; + case X86_REG_R9W: value=env->regs[R9]&0xffff; *size=16; break; + case X86_REG_R10W: value=env->regs[R10]&0xffff; *size=16; break; + case X86_REG_R11W: value=env->regs[R11]&0xffff; *size=16; break; + case X86_REG_R12W: value=env->regs[R12]&0xffff; *size=16; break; + case X86_REG_R13W: value=env->regs[R13]&0xffff; *size=16; break; + case X86_REG_R14W: value=env->regs[R14]&0xffff; *size=16; break; + case X86_REG_R15W: value=env->regs[R15]&0xffff; *size=16; break; + case X86_REG_AL: value=env->regs[RAX]&0xff; *size=8; break; + case X86_REG_CL: value=env->regs[RCX]&0xff; *size=8; break; + case X86_REG_DL: value=env->regs[RDX]&0xff; *size=8; break; + case X86_REG_BL: value=env->regs[RBX]&0xff; *size=8; break; + case X86_REG_SPL: value=env->regs[RSP]&0xff; *size=8; break; + case X86_REG_BPL: value=env->regs[RBP]&0xff; *size=8; break; + case X86_REG_SIL: value=env->regs[RSI]&0xff; *size=8; break; + case X86_REG_DIL: value=env->regs[RDI]&0xff; *size=8; break; + case X86_REG_R8B: value=env->regs[R8]&0xff; *size=8; break; + case X86_REG_R9B: value=env->regs[R9]&0xff; *size=8; break; + case X86_REG_R10B: value=env->regs[R10]&0xff; *size=8; break; + case X86_REG_R11B: value=env->regs[R11]&0xff; *size=8; break; + case X86_REG_R12B: value=env->regs[R12]&0xff; *size=8; break; + case X86_REG_R13B: value=env->regs[R13]&0xff; *size=8; break; + case X86_REG_R14B: value=env->regs[R14]&0xff; *size=8; break; + case X86_REG_R15B: value=env->regs[R15]&0xff; *size=8; break; + case X86_REG_AH: value=(env->regs[RAX]>>8)&0xff; *size=8; break; + case X86_REG_CH: value=(env->regs[RCX]>>8)&0xff; *size=8; break; + case X86_REG_DH: value=(env->regs[RDX]>>8)&0xff; *size=8; break; + case X86_REG_BH: value=(env->regs[RBX]>>8)&0xff; *size=8; break; + case X86_REG_RIP: value=env->eip; *size=64; break; + case X86_REG_EIP: value=env->eip&0xffffffff; *size=32; break; + case X86_REG_IP: value=env->eip&0xfffff; *size=16; break; + default: + assert(false); + } + return value; +} + +static uint64_t eval_addr(cs_x86_op* op){ + uint8_t size=0; + uint64_t base = 0; + uint64_t index = 0; + uint64_t segment = 0; + + assert(op->type == X86_OP_MEM); + + if(op->mem.base != X86_REG_INVALID){ + base = eval_reg(op->mem.base, &size); + } + if(op->mem.index != X86_REG_INVALID){ + index = eval_reg(op->mem.index, &size); + } + + if(op->mem.segment != X86_REG_INVALID){ + segment = get_segment_register(op->mem.segment); + } + + uint64_t addr = segment + base + index*op->mem.scale + op->mem.disp; + return addr; +} + +static uint64_t eval_mem(cs_x86_op* op){ + + uint64_t val = 0; + assert(op->size == 1 || op->size == 2 || op->size == 4 || op->size == 8); + //QEMU_PT_PRINTF(REDQUEEN_PREFIX, "EVAL MEM FOR OP:"); + + /* TODO @ sergej: replace me later */ + read_virtual_memory(eval_addr(op), (uint8_t*) &val, op->size, qemu_get_cpu(0)); + return val; +} + +static uint64_t eval(cs_x86_op* op, uint8_t* size){ + switch((int)op->type) { + case X86_OP_REG: + return eval_reg(op->reg, size); + case X86_OP_IMM: + *size=0; + return op->imm; + case X86_OP_MEM: + switch(op->size){ + case 1: *size =8; return eval_mem(op)&0xff; + case 2: *size =16; return eval_mem(op)&0xffff; + case 4: *size =32; return eval_mem(op)&0xffffffff; + case 8: *size =64; return eval_mem(op); + } + } + + /* unreachable, dude! */ + assert(false); + return 0; +} + +static void print_comp_result(uint64_t addr, const char* type, uint64_t val1, uint64_t val2, uint8_t size, bool is_imm){ + + char result_buf[256]; + const char *format = NULL; + uint8_t pos = 0; + pos += snprintf(result_buf+pos, 256-pos, "%lx\t\t %s", addr, type); + //QEMU_PT_PRINTF(REDQUEEN_PREFIX, "got size: %ld", size); + uint64_t mask = 0; + switch(size){ + case 64: format = " 64\t%016lX-%016lX"; mask = 0xffffffffffffffff; break; + case 32: format = " 32\t%08X-%08X"; mask = 0xffffffff; break; + case 16: format = " 16\t%04X-%04X"; mask = 0xffff; break; + case 8: format = " 8\t%02X-%02X"; mask = 0xff; break; + default: + assert(false); + } + pos += snprintf(result_buf+pos, 256-pos, format, val1 & mask, val2 & mask); + if(is_imm){ + pos += snprintf(result_buf+pos, 256-pos, " IMM"); + } + pos += snprintf(result_buf+pos, 256-pos, "\n"); + write_re_result(result_buf); +} + +static void get_cmp_value(cs_insn *ins, const char* type){ + uint8_t size_1=0; + uint8_t size_2=0; + + assert(ins); + cs_x86 *x86 = &(ins->detail->x86); + + assert(x86->op_count == 2); + cs_x86_op *op1 = &(x86->operands[0]); + cs_x86_op *op2 = &(x86->operands[1]); + + uint64_t v1 = eval(op1, &size_1); + uint64_t v2 = eval(op2, &size_2); + + if(GET_GLOBAL_STATE()->redqueen_instrumentation_mode == REDQUEEN_WHITELIST_INSTRUMENTATION || v1 != v2){ + print_comp_result(ins->address, type, v1, v2, (size_1 ? size_1 : size_2), op2->type == X86_OP_IMM); + } +} + +static void get_cmp_value_add(cs_insn *ins){ + uint8_t size_1=0; + uint8_t size_2=0; + + assert(ins); + cs_x86 *x86 = &(ins->detail->x86); + + assert(x86->op_count == 2); + cs_x86_op *op1 = &(x86->operands[0]); + cs_x86_op *op2 = &(x86->operands[1]); + + uint64_t v1 = eval(op1, &size_1); + uint64_t v2 = -sign_extend_from_size(eval(op2, &size_2), size_1); + + if(op2->type != X86_OP_IMM){ + return; + } + + if(GET_GLOBAL_STATE()->redqueen_instrumentation_mode == REDQUEEN_WHITELIST_INSTRUMENTATION || v1 != v2){ + bool is_imm = true; + print_comp_result(ins->address, "SUB", v1, v2, size_1, is_imm); + } +} + +static void get_cmp_value_lea(cs_insn *ins){ + uint64_t index_val = 0; + + assert(ins); + cs_x86 *x86 = &(ins->detail->x86); + + assert(x86->op_count == 2); + cs_x86_op *op2 = &(x86->operands[1]); + + assert(op2->type == X86_OP_MEM); + + uint8_t size=0; + if(op2->mem.base != X86_REG_INVALID && op2->mem.index != X86_REG_INVALID){ + return; + } + + if(op2->mem.base == X86_REG_INVALID && op2->mem.index == X86_REG_INVALID){ + return; + } + + if(op2->mem.base != X86_REG_INVALID ){ + index_val = eval_reg(op2->mem.base, &size); + } + + if(op2->mem.index != X86_REG_INVALID ){ + index_val = eval_reg(op2->mem.index, &size); + } + + if(GET_GLOBAL_STATE()->redqueen_instrumentation_mode == REDQUEEN_WHITELIST_INSTRUMENTATION || index_val != -op2->mem.disp){ + bool is_imm = false; + print_comp_result(ins->address, "LEA", index_val, -op2->mem.disp, op2->size*8, is_imm); + } +} + + +static uint64_t limit_to_word_width(uint64_t val){ + switch(GET_GLOBAL_STATE()->disassembler_word_width){ + case 64: + return val; + case 32: + return val & 0xffffffff; + default: + assert(false); + } +} + +static uint64_t word_width_to_bytes(void){ + switch(GET_GLOBAL_STATE()->disassembler_word_width){ + case 64: + return 8; + case 32: + return 4; + default: + assert(false); + } +} + +static uint64_t read_stack(uint64_t word_index){ + CPUX86State *env = &(X86_CPU(qemu_get_cpu(0)))->env; + uint64_t rsp = env->regs[RSP]; + rsp = limit_to_word_width(rsp); + uint64_t res = 0; + uint64_t stack_ptr = rsp + word_index * word_width_to_bytes(); + /* todo @ sergej */ + assert(read_virtual_memory(stack_ptr, (uint8_t*)(&res), 8, qemu_get_cpu(0))); + return limit_to_word_width(res); +} + +static void format_strcmp(uint8_t* buf1, uint8_t* buf2){ + char out_buf[REDQUEEN_MAX_STRCMP_LEN*4 + 2]; + char* tmp_hex_buf = &out_buf[0]; + for(int i = 0; i < REDQUEEN_MAX_STRCMP_LEN; i++){ + tmp_hex_buf += sprintf(tmp_hex_buf, "%02X", (uint8_t)buf1[i]); + } + *tmp_hex_buf++ = '-'; + for(int i = 0; i < REDQUEEN_MAX_STRCMP_LEN; i++){ + tmp_hex_buf += sprintf(tmp_hex_buf, "%02X", (uint8_t)buf2[i]); + } + char *res=0; + CPUX86State *env = &(X86_CPU(qemu_get_cpu(0)))->env; + uint64_t rip = env->eip; + assert(asprintf( &res, "%lx\t\tSTR %d\t%s\n", rip, REDQUEEN_MAX_STRCMP_LEN*8, out_buf ) != -1); + write_re_result(res); + free(res); +} + +static bool test_strchr(uint64_t arg1, uint64_t arg2){ + CPUState *cpu = qemu_get_cpu(0); + + /* todo @ sergej */ + if(!is_addr_mapped(arg1, cpu) || arg2 & (~0xff)){ + return false; + } + uint8_t buf1[REDQUEEN_MAX_STRCMP_LEN]; + uint8_t buf2[REDQUEEN_MAX_STRCMP_LEN]; + + /* todo @ sergej */ + assert(read_virtual_memory(arg1, &buf1[0], REDQUEEN_MAX_STRCMP_LEN, cpu)); + if(!memchr(buf1,'\0',REDQUEEN_MAX_STRCMP_LEN) ){return false;} + memset(buf2,'\0',REDQUEEN_MAX_STRCMP_LEN); + buf2[0]= (uint8_t)(arg2); + format_strcmp(buf1, buf2); + return true; +} + +static bool test_strcmp(uint64_t arg1, uint64_t arg2){ + CPUState *cpu = qemu_get_cpu(0); + if(!is_addr_mapped(arg1, cpu) || ! is_addr_mapped(arg2, cpu)){ + return false; + } + //QEMU_PT_PRINTF(REDQUEEN_PREFIX,"valid ptrs"); + uint8_t buf1[REDQUEEN_MAX_STRCMP_LEN]; + uint8_t buf2[REDQUEEN_MAX_STRCMP_LEN]; + /* todo @ sergej */ + assert(read_virtual_memory(arg1, &buf1[0], REDQUEEN_MAX_STRCMP_LEN, cpu)); + assert(read_virtual_memory(arg2, &buf2[0], REDQUEEN_MAX_STRCMP_LEN, cpu)); + format_strcmp(buf1,buf2); + return true; +} + +static bool test_strcmp_cdecl(void){ + uint64_t arg1 = read_stack(0); + uint64_t arg2 = read_stack(1); + //QEMU_PT_PRINTF(REDQUEEN_PREFIX, "extract call params cdecl %lx %lx", arg1, arg2); + test_strchr(arg1, arg2); + return test_strcmp(arg1, arg2) ; + +} + +static bool test_strcmp_fastcall(void){ + CPUX86State *env = &(X86_CPU(qemu_get_cpu(0)))->env; + uint64_t arg1 = env->regs[RCX]; //rcx + uint64_t arg2 = env->regs[RDX]; //rdx + //QEMU_PT_PRINTF(REDQUEEN_PREFIX, "extract call params fastcall %lx %lx", arg1, arg2); + test_strchr(arg1, arg2); + return test_strcmp(arg1, arg2); +} + +static bool test_strcmp_sys_v(void){ + if(GET_GLOBAL_STATE()->disassembler_word_width != 64 ){return false;} + CPUX86State *env = &(X86_CPU(qemu_get_cpu(0)))->env; + uint64_t arg1 = env->regs[RDI]; //rdx + uint64_t arg2 = env->regs[RSI]; //rsi + //QEMU_PT_PRINTF(REDQUEEN_PREFIX, "extract call params sysv %lx %lx", arg1, arg2); + test_strchr(arg1, arg2); + return test_strcmp(arg1, arg2); +} + +static void extract_call_params(void){ + //QEMU_PT_PRINTF(REDQUEEN_PREFIX, "extract call at %lx", ip); + test_strcmp_cdecl(); + test_strcmp_fastcall(); + test_strcmp_sys_v(); +} + +/* +static bool is_memory_access(redqueen_t* self, cs_insn* insn){ + return insn->id != X86_INS_LEA && strstr(insn->op_str,"["); +} + +static bool is_trace_entry_point(redqueen_t* self, uint64_t addr){ + //if(addr >= self->address_range_start && addr <= self->address_range_end){ + return redqueen_check_addr_flags(self, addr, CMP_BITMAP_TRACE_ENABLED); + //} + return false; +} +*/ + +static void handle_hook_redqueen_light(redqueen_t* self, uint64_t ip, cs_insn *insn){ + if(insn->id == X86_INS_CMP || insn->id == X86_INS_XOR){ //handle original redqueen case + get_cmp_value(insn, "CMP"); + } else if(insn->id == X86_INS_SUB){ //handle original redqueen case + get_cmp_value(insn, "SUB"); + } else if(insn->id == X86_INS_LEA){ //handle original redqueen case + get_cmp_value_lea(insn); + } else if(insn->id == X86_INS_ADD){ //handle original redqueen case + get_cmp_value_add(insn); + } else if (insn->id == X86_INS_CALL || insn->id == X86_INS_LCALL){ + extract_call_params(); + } +} + +static uint8_t handle_hook_breakpoint(redqueen_t* self, bool write_data){ + //fprintf(stderr, "%s\n", __func__); + //printf("%s\n", __func__); + + X86CPU *cpu = X86_CPU(self->cpu); + CPUX86State *env = &cpu->env; + + cs_insn *insn = NULL; + switch(GET_GLOBAL_STATE()->disassembler_word_width){ + case 64: + insn = page_cache_cs_malloc(self->page_cache, mode_64); + break; + case 32: + insn = page_cache_cs_malloc(self->page_cache, mode_32); + break; + default: + abort(); + } + uint8_t ins_size = 0; + uint64_t ip = env->eip; + uint64_t code = ip; + uint64_t failed_page = 0; + + switch(GET_GLOBAL_STATE()->disassembler_word_width){ + case 64: + assert(page_cache_disassemble_iter(self->page_cache, &code, insn, &failed_page, mode_64)); + break; + case 32: + assert(page_cache_disassemble_iter(self->page_cache, &code, insn, &failed_page, mode_32)); + break; + default: + abort(); + } + + ins_size = insn->size; + + if(write_data){ + //int mode = self->cpu->redqueen_instrumentation_mode; + int mode = GET_GLOBAL_STATE()->redqueen_instrumentation_mode; + if(mode == REDQUEEN_LIGHT_INSTRUMENTATION || mode == REDQUEEN_WHITELIST_INSTRUMENTATION || mode == REDQUEEN_SE_INSTRUMENTATION){ + handle_hook_redqueen_light(self, ip, insn); + } + if(mode == REDQUEEN_SE_INSTRUMENTATION){ + assert(false); + } + } + cs_free(insn, 1); + + assert(ins_size != 0); + return ins_size; +} + +/* +static void debug_print_disasm(char* desc, uint64_t ip, CPUState* cpu_state){ + //uint64_t cs_address = ip; + uint8_t code[64]; + csh handle; + cs_insn *insn; + read_virtual_memory(ip, &code[0], 64, cpu_state); + if (cs_open(CS_ARCH_X86, get_capstone_mode(cpu_state->disassembler_word_width), &handle) == CS_ERR_OK){ + cs_option(handle, CS_OPT_DETAIL, CS_OPT_ON); + size_t count = cs_disasm(handle, &code[0], 64, ip, 1, &insn); + if(count > 0){ + QEMU_PT_PRINTF(REDQUEEN_PREFIX,"%s\t %lx: %s %s",desc, ip, insn->mnemonic, insn->op_str); + } else { + QEMU_PT_PRINTF(REDQUEEN_PREFIX,"%s\t Failed to disassemble at: %lx",desc, ip); + } + cs_close(&handle); + cs_free(insn, count); + } else { + QEMU_PT_PRINTF(REDQUEEN_PREFIX,"%s\t Failed to create capstone instance at: %lx",desc, ip); + } +} +*/ + +/* +static void debug_print_state(char* desc, CPUState* cpu_state){ + X86CPU *cpu = X86_CPU(cpu_state); + CPUX86State *env = &cpu->env; + debug_print_disasm(desc, env->eip, cpu_state); + QEMU_PT_PRINTF(REDQUEEN_PREFIX,"ECX: %lx", get_reg_cpu(cpu_state, (char*)"rcx")); +} +*/ + +/* +int trace_debug = false; + +void handle_hook(redqueen_t* self){ + X86CPU *cpu = X86_CPU(self->cpu); + CPUX86State *env = &cpu->env; + + uint8_t ins; + + read_virtual_memory(env->eip, (uint8_t*)&ins, 1, self->cpu); + + if(ins == 0xcc && self->cpu->singlestep_enabled){ + fprintf(stderr, "fix... %lx\n", env->eip); + self->cpu->singlestep_enabled = false; + self->singlestep_enabled = false; + //kvm_insert_breakpoint(self->cpu, self->last_rip, 1, 0); + kvm_update_guest_debug(self->cpu, 0); + self->last_rip = 0; + return; + } + + if(!self->cpu->singlestep_enabled){ + fprintf(stderr, "HOOK %lx\n", env->eip); + + if(self->last_rip != 0) abort(); + self->last_rip = env->eip; + + read_virtual_memory(env->eip, (uint8_t*)&ins, 1, self->cpu); + if(ins != 0xcc) abort(); + kvm_remove_breakpoint(self->cpu, env->eip, 1, 0); + self->cpu->singlestep_enabled = true; + self->singlestep_enabled = true; + if(self->cpu->pt_enabled && self->cpu->pt_c3_filter == env->cr[3]){ + handle_hook_breakpoint(self); + } + kvm_update_guest_debug(self->cpu, 0); + + } else{ + fprintf(stderr, "HOOK %lx SINGLETEP\n", env->eip); + + + if(self->last_rip == 0) abort(); + + + + + + self->cpu->singlestep_enabled = false; + self->singlestep_enabled = false; + if(self->counter_bitmap[self->last_rip-self->address_range_start]++ < REDQUEEN_TRAP_LIMIT){ + fprintf(stderr, "TRAP INSTALLED\n"); + read_virtual_memory(env->eip, (uint8_t*)&ins, 1, self->cpu); + if(ins == 0xcc) abort(); + + if(ins != 0xcc) + kvm_insert_breakpoint(self->cpu, self->last_rip, 1, 0); + } + else { + fprintf(stderr, "TRAP INSTALLED nOPE %lx %lx\n", self->counter_bitmap[self->last_rip-self->address_range_start], self->counter_bitmap); + } + kvm_update_guest_debug(self->cpu, 0); + self->last_rip = 0; + } +} +*/ + + +void handle_hook(redqueen_t* self){ + X86CPU *cpu = X86_CPU(self->cpu); + CPUX86State *env = &cpu->env; + + if (self->next_rip){ + + //fprintf(stderr, "REMOVE %lx at %lx\n", self->next_rip, env->eip); + remove_breakpoint(self->cpu, self->next_rip, 1); + + if(self->last_rip && redqueen_update_addr_count(self, self->last_rip) < REDQUEEN_TRAP_LIMIT){ + insert_breakpoint(self->cpu, self->last_rip, 1); + } + + kvm_update_guest_debug(self->cpu, 0); + + self->last_rip = 0; + self->next_rip = 0; + } + + if(redqueen_check_addr(self, env->eip)){ + + //fprintf(stderr, "INSERT %lx\n", env->eip); + self->last_rip = env->eip; + remove_breakpoint(self->cpu, env->eip, 1); + + //if(self->cpu->pt_enabled && self->cpu->pt_c3_filter == env->cr[3]){ + if(self->cpu->pt_enabled && GET_GLOBAL_STATE()->pt_c3_filter == env->cr[3]){ + self->next_rip = handle_hook_breakpoint(self, true); + } + else{ + self->next_rip = handle_hook_breakpoint(self, true); + } + } + else { + //fprintf(stderr, "NOPE %lx\n", env->eip); + } +} + + + +static void _redqueen_update_whitelist(redqueen_t* self){ + //if(self->cpu->redqueen_instrumentation_mode == REDQUEEN_WHITELIST_INSTRUMENTATION){ + if(GET_GLOBAL_STATE()->redqueen_instrumentation_mode == REDQUEEN_WHITELIST_INSTRUMENTATION){ + //size_t num_addrs = 0; + //uint64_t *addrs; + free(self->breakpoint_whitelist); + parse_address_file(redqueen_workdir.breakpoint_white, &self->num_breakpoint_whitelist, &self->breakpoint_whitelist); + } +} + +static void _redqueen_update_blacklist(redqueen_t* self){ + //if(self->cpu->redqueen_update_blacklist){ + if(GET_GLOBAL_STATE()->redqueen_update_blacklist){ + size_t num_addrs = 0; + uint64_t *addrs; + parse_address_file(redqueen_workdir.breakpoint_black, &num_addrs, &addrs); + for(size_t i = 0; i< num_addrs; i++){ + set_rq_blacklist(self, addrs[i]); + } + free(addrs); + } +} + +void enable_rq_intercept_mode(redqueen_t* self){ + if(!self->intercept_mode){ + delete_redqueen_files(); + //unlink("/tmp/redqueen_result.txt"); + _redqueen_update_whitelist(self); + _redqueen_update_blacklist(self); + redqueen_insert_hooks(self); + self->intercept_mode = true; + } +} + +void disable_rq_intercept_mode(redqueen_t* self){ + if(self->intercept_mode){ + redqueen_remove_hooks(self); + self->intercept_mode = false; + } +} diff --git a/nyx/redqueen.h b/nyx/redqueen.h new file mode 100644 index 0000000000..ce0d03a788 --- /dev/null +++ b/nyx/redqueen.h @@ -0,0 +1,128 @@ +/* + +Copyright (C) 2017 Sergej Schumilo + +This file is part of QEMU-PT (kAFL). + +QEMU-PT is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +QEMU-PT is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with QEMU-PT. If not, see . + +*/ + +#ifndef REDQUEEN_H +#define REDQUEEN_H + +#include +#include +#include +#include +#include +#include "qemu/osdep.h" +#include +#include +#include +#include "redqueen_trace.h" +#include "khash.h" +#include "page_cache.h" + +//#define RQ_DEBUG + +#define REDQUEEN_MAX_STRCMP_LEN 64 +#define REDQUEEN_TRAP_LIMIT 16 + +#define REG64_NUM 16 +#define REG32_NUM 16 +//seems we don't want to include rip, since this index is used to acces the qemu cpu structure or something? +#define REG16_NUM 16 +#define REG8L_NUM 16 +#define REG8H_NUM 8 + +#define EXTRA_REG_RIP 16 +#define EXTRA_REG_NOP 17 + +#define REDQUEEN_NO_INSTRUMENTATION 0 +#define REDQUEEN_LIGHT_INSTRUMENTATION 1 +#define REDQUEEN_SE_INSTRUMENTATION 2 +#define REDQUEEN_WHITELIST_INSTRUMENTATION 3 + +enum reg_types{RAX, RCX, RDX, RBX, RSP, RBP, RSI, RDI, R8, R9, R10, R11, R12, R13, R14, R15}; + +#define CMP_BITMAP_NOP 0x0000000UL +#define CMP_BITMAP_RQ_INSTRUCTION 0x1000000UL +#define CMP_BITMAP_SE_INSTRUCTION 0x2000000UL +#define CMP_BITMAP_BLACKLISTED 0x4000000UL +#define CMP_BITMAP_TRACE_ENABLED 0x8000000UL +#define CMP_BITMAP_SHOULD_HOOK_SE (CMP_BITMAP_SE_INSTRUCTION|CMP_BITMAP_TRACE_ENABLED) +#define CMP_BITMAP_SHOULD_HOOK_RQ (CMP_BITMAP_RQ_INSTRUCTION) + +KHASH_MAP_INIT_INT64(RQ, uint32_t) + +typedef struct redqueen_s{ + khash_t(RQ) *lookup; + bool intercept_mode; + bool trace_mode; + bool singlestep_enabled; + int hooks_applied; + CPUState *cpu; + uint64_t last_rip; + uint64_t next_rip; + uint64_t *breakpoint_whitelist; + uint64_t num_breakpoint_whitelist; + redqueen_trace_t* trace_state; + page_cache_t* page_cache; +} redqueen_t; + +typedef struct redqueen_workdir_s{ + char* redqueen_results; + char* symbolic_results; + char* pt_trace_results; + char* redqueen_patches; + char* breakpoint_white; + char* breakpoint_black; + char* target_code_dump; +} redqueen_workdir_t; + +extern redqueen_workdir_t redqueen_workdir; + +void setup_redqueen_workdir(char* workdir); + +redqueen_t* new_rq_state(CPUState *cpu, page_cache_t* page_cache); +void destroy_rq_state(redqueen_t* self); + +void set_rq_instruction(redqueen_t* self, uint64_t addr); +void set_rq_blacklist(redqueen_t* self, uint64_t addr); + +void handle_hook(redqueen_t* self); +void handel_se_hook(redqueen_t* self); + +void enable_rq_intercept_mode(redqueen_t* self); +void disable_rq_intercept_mode(redqueen_t* self); + + +void redqueen_register_transition(redqueen_t* self, uint64_t ip, uint64_t transition_val); +void redqueen_set_trace_mode(redqueen_t* self); +void redqueen_unset_trace_mode(redqueen_t* self); + +void set_se_instruction(redqueen_t* self, uint64_t addr); + +void dump_se_registers(redqueen_t* self); +void dump_se_memory_access(redqueen_t* self, cs_insn* insn); +void dump_se_return_access(redqueen_t* self, cs_insn* insn); +void dump_se_memory_access_at(redqueen_t* self, uint64_t instr_addr, uint64_t mem_addr); + +void redqueen_insert_hooks(redqueen_t* self); +void redqueen_remove_hooks(redqueen_t* self); + +void redqueen_callback(void* opaque, disassembler_mode_t mode, uint64_t start_addr, uint64_t end_addr); + +#endif diff --git a/nyx/redqueen_patch.c b/nyx/redqueen_patch.c new file mode 100644 index 0000000000..b0908af4ab --- /dev/null +++ b/nyx/redqueen_patch.c @@ -0,0 +1,40 @@ +#include "redqueen_patch.h" +#include "redqueen.h" +#include "patcher.h" +#include "file_helper.h" +#include "debug.h" + +/////////////////////////////////////////////////////////////////////////////////// +// Private Helper Functions Declarations +/////////////////////////////////////////////////////////////////////////////////// + +void _load_and_set_patches(patcher_t* self); + +/////////////////////////////////////////////////////////////////////////////////// +// Public Functions +/////////////////////////////////////////////////////////////////////////////////// + +void pt_enable_patches(patcher_t *self){ + _load_and_set_patches(self); + patcher_apply_all(self); +} + +void pt_disable_patches(patcher_t *self){ + patcher_restore_all(self); +} + + +/////////////////////////////////////////////////////////////////////////////////// +// Private Helper Functions Definitions +/////////////////////////////////////////////////////////////////////////////////// + + +void _load_and_set_patches(patcher_t* self){ + size_t num_addrs = 0; + uint64_t *addrs = NULL; + parse_address_file(redqueen_workdir.redqueen_patches, &num_addrs, &addrs); + if(num_addrs){ + patcher_set_addrs(self, addrs, num_addrs); + free(addrs); + } +} diff --git a/nyx/redqueen_patch.h b/nyx/redqueen_patch.h new file mode 100644 index 0000000000..07c8849fc9 --- /dev/null +++ b/nyx/redqueen_patch.h @@ -0,0 +1,11 @@ +#ifndef __GUARD_REDQUEEN_PATCH__ +#define __GUARD_REDQUEEN_PATCH__ + +#include "qemu/osdep.h" +#include +#include "nyx/patcher.h" + +void pt_enable_patches(patcher_t *self); + +void pt_disable_patches(patcher_t *self); +#endif diff --git a/nyx/redqueen_trace.c b/nyx/redqueen_trace.c new file mode 100644 index 0000000000..b3702994f2 --- /dev/null +++ b/nyx/redqueen_trace.c @@ -0,0 +1,73 @@ +#include +#include +#include +#include +#include "redqueen_trace.h" + +redqueen_trace_t* redqueen_trace_new(void){ + redqueen_trace_t* self = malloc(sizeof(redqueen_trace_t)); + self->lookup = kh_init(RQ_TRACE); + self->num_ordered_transitions = 0; + self->max_ordered_transitions = INIT_NUM_OF_STORED_TRANSITIONS; + self->ordered_transitions = malloc(INIT_NUM_OF_STORED_TRANSITIONS*sizeof(uint128_t)); + return self; +} + +void redqueen_trace_reset(redqueen_trace_t* self){ + kh_destroy(RQ_TRACE, self->lookup); + self->lookup = kh_init(RQ_TRACE); + self->num_ordered_transitions = 0; +} + +void redqueen_trace_free(redqueen_trace_t* self){ + kh_destroy(RQ_TRACE, self->lookup); + free(self->ordered_transitions); + free(self); +} + +void redqueen_trace_register_transition(redqueen_trace_t* self, uint64_t from, uint64_t to){ + khiter_t k; + int ret; + uint128_t key = (((uint128_t)from)<<64) | ((uint128_t)to); + k = kh_get(RQ_TRACE, self->lookup, key); + if(k != kh_end(self->lookup)){ + kh_value(self->lookup, k) += 1; + } else{ + k = kh_put(RQ_TRACE, self->lookup, key, &ret); + kh_value(self->lookup, k) = 1; + self->ordered_transitions[self->num_ordered_transitions] = key; + self->num_ordered_transitions++; + assert(self->num_ordered_transitions < self->max_ordered_transitions); + } +} + +void redqueen_trace_write_file(redqueen_trace_t* self, int fd){ + for(size_t i = 0; i < self->num_ordered_transitions; i++){ + khiter_t k; + uint128_t key = self->ordered_transitions[i]; + k = kh_get(RQ_TRACE, self->lookup, key); + assert(k != kh_end(self->lookup)); + dprintf(fd, "%lx,%lx,%lx\n", (uint64_t)(key>>64), (uint64_t)key, kh_value(self->lookup, k) ); + } +} + + +#ifdef DEBUG_MAIN +int main(int argc, char** argv){ + + redqueen_trace_t* rq_obj = redqueen_trace_new(); + + for (uint64_t j = 0; j < 0x5; j++){ + redqueen_trace_register_transition(rq_obj, 0xBADF, 0xC0FFEE); + redqueen_trace_register_transition(rq_obj, 0xBADBEEF, 0xC0FFEE); + for (uint64_t i = 0; i < 0x10000; i++){ + redqueen_trace_register_transition(rq_obj, 0xBADBEEF, 0xC0FFEE); + } + redqueen_trace_write_file(rq_obj, STDOUT_FILENO); + redqueen_trace_reset(rq_obj); + } + + redqueen_trace_free(rq_obj); + return 0; +} +#endif diff --git a/nyx/redqueen_trace.h b/nyx/redqueen_trace.h new file mode 100644 index 0000000000..35ead73cf8 --- /dev/null +++ b/nyx/redqueen_trace.h @@ -0,0 +1,42 @@ +#pragma once +#include "khash.h" + +typedef unsigned __int128 uint128_t; +typedef uint128_t khint128_t; + +#define INIT_NUM_OF_STORED_TRANSITIONS 0xfffff + +/*! @function + @abstract 64-bit integer hash function + @param key The integer [khint64_t] + @return The hash value [khint_t] + */ +#define kh_int128_hash_func(key) (khint32_t)((key)>>33^(key)^(key)<<11) ^ (((key>>64))>>33^((key>>64))^((key>>64))<<11) +/*! @function + @abstract 64-bit integer comparison function + */ +#define kh_int128_hash_equal(a, b) ((a) == (b)) +/*! @function + @abstract Instantiate a hash map containing 64-bit integer keys + @param name Name of the hash table [symbol] + @param khval_t Type of values [type] + */ +#define KHASH_MAP_INIT_INT128(name, khval_t) \ + KHASH_INIT(name, khint128_t, khval_t, 1, kh_int128_hash_func, kh_int128_hash_equal) + +KHASH_MAP_INIT_INT128(RQ_TRACE, uint64_t) + +#define INIT_TRACE_IP 0xFFFFFFFFFFFFFFFFULL + +typedef struct redqueen_trace_s{ + khash_t(RQ_TRACE) *lookup; + size_t num_ordered_transitions; + size_t max_ordered_transitions; + uint128_t* ordered_transitions; +} redqueen_trace_t; + +redqueen_trace_t* redqueen_trace_new(void); +void redqueen_trace_reset(redqueen_trace_t* self); +void redqueen_trace_free(redqueen_trace_t* self); +void redqueen_trace_register_transition(redqueen_trace_t* self, uint64_t from, uint64_t to); +void redqueen_trace_write_file(redqueen_trace_t* self, int fd); diff --git a/nyx/sharedir.c b/nyx/sharedir.c new file mode 100644 index 0000000000..5647dc234c --- /dev/null +++ b/nyx/sharedir.c @@ -0,0 +1,172 @@ +#include "sharedir.h" +#include +#include +#include +#include +#include +#include +#include + +//#define SHAREDIR_DEBUG + +sharedir_t* sharedir_new(void){ + sharedir_t* self = malloc(sizeof(sharedir_t)); + self->dir = NULL; + self->lookup = kh_init(SHAREDIR_LOOKUP); + self->last_file_f = NULL; + self->last_file_obj_ptr = NULL; + return self; +} + +void sharedir_set_dir(sharedir_t* self, const char* dir){ + assert(!self->dir); + assert(asprintf(&self->dir, "%s", dir) != -1); +} + +static bool file_exits(const char* file){ + struct stat sb; + return (stat (file, &sb) == 0); +} + +static time_t get_file_mod_time(char *file){ + struct stat attr; + stat(file, &attr); + return attr.st_mtime; +} + +static size_t get_file_size(const char* file){ + struct stat st; + stat(file, &st); + return st.st_size; +} + +static char* sharedir_scan(sharedir_t* self, const char* file){ + + char* path = NULL; + assert(asprintf(&path, "%s/%s", self->dir, file) != -1); + + char* real_path = realpath(path, NULL); + + free(path); + if(real_path && !strncmp(self->dir, real_path, strlen(self->dir)) && file_exits(real_path)){ + return real_path; + } + + if(real_path){ + free(real_path); + } + return NULL; +} + +static sharedir_file_t* sharedir_get_object(sharedir_t* self, const char* file){ + khiter_t k; + int ret; + sharedir_file_t* obj = NULL; + + k = kh_get(SHAREDIR_LOOKUP, self->lookup, file); + + if(k != kh_end(self->lookup)){ + /* file already exists in our hash map */ + obj = kh_value(self->lookup, k); + + /* check if file still exists */ + assert(file_exits(obj->path)); + + /* check if mod time matches */ + assert(get_file_mod_time(obj->path) == obj->mod_time); + + /* check if file size matches */ + assert(get_file_size(obj->path) == obj->size); + + return obj; + } + else{ + /* nope ! */ + char* realpath = sharedir_scan(self, file); + struct stat sb; + if(realpath != NULL){ + if (stat(realpath, &sb) == 0 && S_ISDIR(sb.st_mode)){ + return NULL; // is dir + } + obj = malloc(sizeof(sharedir_file_t)); + memset(obj, 0x0, sizeof(sharedir_file_t)); + assert(asprintf(&obj->file, "%s", basename(realpath)) != -1); + obj->path = realpath; + obj->size = get_file_size(obj->path); + obj->bytes_left = (uint64_t) obj->size; + obj->mod_time = get_file_mod_time(obj->path); + + /* put into hash_list */ + + char* new_file = NULL; + assert(asprintf(&new_file, "%s", file) != -1); + k = kh_put(SHAREDIR_LOOKUP, self->lookup, new_file, &ret); + kh_value(self->lookup, k) = obj; + + return obj; + } + + /* file not found */ + return NULL; + } +} + +static FILE* get_file_ptr(sharedir_t* self, sharedir_file_t* obj){ + if(obj == self->last_file_obj_ptr && self->last_file_f){ + return self->last_file_f; + } + else{ + if(self->last_file_f){ + fclose(self->last_file_f); + } + FILE* f = fopen(obj->path, "r"); + self->last_file_f = f; + self->last_file_obj_ptr = obj; + return f; + } +} + +uint64_t sharedir_request_file(sharedir_t* self, const char* file, uint8_t* page_buffer){ + if(!self->dir){ + fprintf(stderr, "WARNING: New file request received, but no share dir configured! [FILE: %s]\n", file); + return 0xFFFFFFFFFFFFFFFFUL; + } + + FILE* f = NULL; + + sharedir_file_t* obj = sharedir_get_object(self, file); + if(obj != NULL){ +#ifdef SHAREDIR_DEBUG + printf("sharedir_get_object->file: %s\n", obj->file); + printf("sharedir_get_object->path: %s\n", obj->path); + printf("sharedir_get_object->size: %ld\n", obj->size); + printf("sharedir_get_object->bytes_left: %ld\n", obj->bytes_left); +#endif + if(obj->bytes_left >= 0x1000){ + f = get_file_ptr(self, obj); + fseek(f, obj->size-obj->bytes_left, SEEK_SET); + assert(fread(page_buffer, 1, 0x1000, f) == 0x1000); + obj->bytes_left -= 0x1000; + return 0x1000; + } + else { + if (obj->bytes_left != 0){ + f = get_file_ptr(self, obj); + fseek(f, obj->size-obj->bytes_left, SEEK_SET); + assert(fread(page_buffer, 1, obj->bytes_left, f) == obj->bytes_left); + + uint64_t ret_value = obj->bytes_left; + obj->bytes_left = 0; + + return ret_value; + } + else { + obj->bytes_left = (uint_fast64_t)obj->size; + return 0; + } + } + } + else{ + return 0xFFFFFFFFFFFFFFFFUL; + } +} \ No newline at end of file diff --git a/nyx/sharedir.h b/nyx/sharedir.h new file mode 100644 index 0000000000..f6a0d20a5f --- /dev/null +++ b/nyx/sharedir.h @@ -0,0 +1,26 @@ +#pragma once +#include +#include "khash.h" +#include + + +typedef struct sharedir_file_s{ + char* file; + char* path; + size_t size; + uint64_t bytes_left; + time_t mod_time; +} sharedir_file_t; + +KHASH_MAP_INIT_STR(SHAREDIR_LOOKUP, sharedir_file_t*) + +typedef struct sharedir_s{ + char* dir; + khash_t(SHAREDIR_LOOKUP) *lookup; + FILE* last_file_f; + sharedir_file_t* last_file_obj_ptr; +} sharedir_t; + +sharedir_t* sharedir_new(void); +void sharedir_set_dir(sharedir_t* self, const char* dir); +uint64_t sharedir_request_file(sharedir_t* self, const char* file, uint8_t* page_buffer); diff --git a/nyx/snapshot/block/block_cow.c b/nyx/snapshot/block/block_cow.c new file mode 100644 index 0000000000..dcba6de1d8 --- /dev/null +++ b/nyx/snapshot/block/block_cow.c @@ -0,0 +1,552 @@ +#include +#include +#include +#include "nyx/snapshot/block/block_cow.h" +#include "sysemu/block-backend.h" +#include "nyx/state.h" + + +//#define COW_CACHE_DEBUG + +//#define COW_CACHE_VERBOSE + +#define CHUNK_SIZE 0x1000 +//0x200 +#define PAGE_MASK 0xFFFFFFFFFFFFF000 + +cow_cache_t* cow_cache_new(const char* filename){ + + //printf("%s: \"%s\"\n", __func__, filename); + + cow_cache_t* self = malloc(sizeof(cow_cache_t)); + self->lookup_primary = kh_init(COW_CACHE); + self->lookup_secondary = kh_init(COW_CACHE); + self->lookup_secondary_tmp = kh_init(COW_CACHE); + + self->data_primary = mmap(NULL, COW_CACHE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); + assert(self->data_primary != MAP_FAILED); + //memset(self->data_primary, COW_CACHE_SIZE/CHUNK_SIZE, CHUNK_SIZE); + + self->data_secondary = mmap(NULL, COW_CACHE_SECONDARY_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); + assert(self->data_secondary != MAP_FAILED); + + self->data_secondary_tmp = mmap(NULL, COW_CACHE_SECONDARY_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); + assert(self->data_secondary_tmp != MAP_FAILED); + + self->filename = strdup(basename(filename)); + self->offset_primary = 0; + self->offset_secondary = 0; + self->offset_secondary_tmp = 0; + + if(getenv("NYX_DISABLE_BLOCK_COW")){ + fprintf(stderr, "WARNING: Nyx block COW layer disabled for %s (** write operations are not cached **)\n", filename); + self->enabled = false; + } + else{ + self->enabled = true; + } + self->enabled_fuzz = false; + self->enabled_fuzz_tmp = false; + + +#ifdef DEBUG_COW_LAYER + self->read_calls = 0; + self->write_calls = 0; + self->read_calls_tmp = 0; + self->write_calls_tmp = 0; +#endif + + return self; +} + +static char* gen_file_name(cow_cache_t* self, const char* filename_prefix, const char* filename_postfix){ + char* tmp1; + char* tmp2; + + assert(asprintf(&tmp2, "%s", self->filename) != -1); + + for(int i = 0; i < strlen(tmp2); i++){ + if(tmp2[i] == '/'){ + tmp2[i] = '_'; + } + } + + assert(asprintf(&tmp1, "%s_%s.%s", filename_prefix, tmp2, filename_postfix) != -1); + + free(tmp2); + + return tmp1; +} + +void read_primary_buffer(cow_cache_t* self, const char* filename_prefix, bool switch_mode){ + assert(!self->enabled_fuzz); + + //printf("%s: %s\n", __func__, self->filename); + + char* tmp1; + char* tmp2; + + //assert(asprintf(&tmp1, "%s_%s.khash", filename_prefix, self->filename) != -1); + //assert(asprintf(&tmp2, "%s_%s.pcow", filename_prefix, self->filename) != -1); + + tmp1 = gen_file_name(self, filename_prefix, "khash"); + tmp2 = gen_file_name(self, filename_prefix, "pcow"); + + //printf("%s\n", tmp1); + kh_destroy(COW_CACHE, self->lookup_primary); + + struct stat buffer; + assert(stat (tmp2, &buffer) == 0); + + if(buffer.st_size){ + self->lookup_primary = kh_load(COW_CACHE, tmp1); + } + else { + self->lookup_primary = kh_init(COW_CACHE); + } + + int fd = open(tmp2, O_RDONLY); + + //printf("TRY TO MMAP : %lx\n", buffer.st_size); + if(switch_mode){ + self->data_primary = mmap(0, COW_CACHE_SIZE, PROT_READ, MAP_SHARED, fd, 0); + assert(self->data_primary); + } + else{ + void* ptr = mmap(0, COW_CACHE_SIZE, PROT_READ , MAP_SHARED, fd, 0); + assert(ptr); + memcpy(self->data_primary, ptr, buffer.st_size); + munmap(ptr, COW_CACHE_SIZE); + } + //printf("self->data_primary -> %p\n", self->data_primary ); + close(fd); + + self->offset_primary = buffer.st_size; + //fprintf(stderr, "self->offset_primary: %lx\n", self->offset_primary); + + if(switch_mode){ + switch_to_fuzz_mode(self); + } + + free(tmp1); + free(tmp2); + + //printf("DONE!\n"); + +} + +void dump_primary_buffer(cow_cache_t* self, const char* filename_prefix){ + assert(self->enabled_fuzz); + + //printf("%s: %s\n", __func__, self->filename); + + + char* tmp1; + char* tmp2; + + //assert(asprintf(&tmp1, "%s_%s.khash", filename_prefix, self->filename) != -1); + //assert(asprintf(&tmp2, "%s_%s.pcow", filename_prefix, self->filename) != -1); + + tmp1 = gen_file_name(self, filename_prefix, "khash"); + tmp2 = gen_file_name(self, filename_prefix, "pcow"); + + //printf("%s\n", tmp1); + if(self->offset_primary){ + kh_write(COW_CACHE, self->lookup_primary, tmp1); + } + else{ + fclose(fopen(tmp1, "wb")); + } + + FILE *fp = fopen(tmp2, "wb"); + if(fp == NULL) { + fprintf(stderr, "[%s] Could not open file %s.\n", __func__, tmp2); + assert(false); + //exit(EXIT_FAILURE); + } + + if(self->offset_primary){ + fwrite(self->data_primary, CHUNK_SIZE, self->offset_primary/CHUNK_SIZE, fp); + } + //fprintf(stderr, "self->offset_primary: %lx\n", self->offset_primary); + + + fclose(fp); + + free(tmp1); + free(tmp2); + + //printf("DONE!\n"); + + +/* + + qemu_mutex_unlock_iothread(); + fast_reload_t* snapshot = fast_reload_new(); + fast_reload_create(snapshot); + qemu_mutex_lock_iothread(); + + printf("CREATED!\n"); +*/ + +} + +void cow_cache_reset(cow_cache_t* self){ + if(!self->enabled_fuzz) + return; + /* TODO */ + assert(self->enabled_fuzz); + + //fprintf(stderr, "RESETING COW STUFF YO %s (%lx)\n", self->filename, self->offset_secondary); + + + if(self->enabled_fuzz){ + +#ifdef DEBUG_COW_LAYER + printf("%s: read_calls =>\t%ld\n", __func__, self->read_calls); + printf("%s: write_calls =>\t%ld\n", __func__, self->write_calls); + printf("%s: read_calls_tmp =>\t%ld\n", __func__, self->read_calls_tmp); + printf("%s: write_calls_tmp =>\t%ld\n", __func__, self->write_calls_tmp); +#endif + + if(!self->enabled_fuzz_tmp){ + self->offset_secondary = 0; + kh_clear(COW_CACHE, self->lookup_secondary); + +#ifdef DEBUG_COW_LAYER + self->read_calls = 0; + self->write_calls = 0; +#endif + } + else { + self->offset_secondary_tmp = 0; + kh_clear(COW_CACHE, self->lookup_secondary_tmp); + +#ifdef DEBUG_COW_LAYER + printf("CLEAR lookup_secondary_tmp\n"); + self->read_calls_tmp = 0; + self->write_calls_tmp = 0; +#endif + } + } +} + + +void cow_cache_enable_tmp_mode(cow_cache_t* self){ + assert(self->enabled_fuzz); + self->enabled_fuzz_tmp = true; +} + +void cow_cache_disable_tmp_mode(cow_cache_t* self){ + assert(self->enabled_fuzz); + assert(self->enabled_fuzz_tmp); + cow_cache_reset(self); + self->enabled_fuzz_tmp = false; +} + +void cow_cache_enable(cow_cache_t* self){ + cow_cache_reset(self); + self->enabled = true; +} + + +void cow_cache_disable(cow_cache_t* self){ + cow_cache_reset(self); + self->enabled = false; +} + +typedef struct BlkRwCo { + BlockBackend *blk; + int64_t offset; + QEMUIOVector *qiov; + int ret; + BdrvRequestFlags flags; +} BlkRwCo; + +typedef struct BlkAioEmAIOCB { + BlockAIOCB common; + BlkRwCo rwco; + int bytes; + bool has_returned; +} BlkAioEmAIOCB; + +extern void blk_aio_write_entry(void *opaque); +extern int blk_check_byte_request(BlockBackend *blk, int64_t offset, size_t size); +extern void blk_aio_complete(BlkAioEmAIOCB *acb); + +/* read from primary buffer */ +static inline void read_from_primary_buffer(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags, uint64_t offset_addr, uint64_t iov_offset){ + khiter_t k; + + k = kh_get(COW_CACHE, self->lookup_primary, offset_addr); + if(k != kh_end(self->lookup_primary)){ + #ifdef COW_CACHE_DEBUG + printf("[PRE ] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n", offset_addr, iov_offset, self->offset_primary); + #endif + //iov_from_buf_full_register(qiov->iov, qiov->niov, iov_offset, self->data_primary + kh_value(self->lookup_primary, k), CHUNK_SIZE); + qemu_iovec_from_buf(qiov, iov_offset, self->data_primary + kh_value(self->lookup_primary, k), CHUNK_SIZE); + } + return; +} + +/* try to read from secondary buffer + * read from primary buffer if the data is not available yet */ +static inline void read_from_secondary_buffer(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags, uint64_t offset_addr, uint64_t iov_offset){ + /* read from L2 TMP buffer */ + khiter_t k; + if(self->enabled_fuzz_tmp){ + k = kh_get(COW_CACHE, self->lookup_secondary_tmp, offset_addr); + if(k != kh_end(self->lookup_secondary_tmp)){ + #ifdef COW_CACHE_DEBUG + printf("[FTMP] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n", offset_addr, iov_offset, self->offset_secondary); + #endif + //iov_from_buf_full_register(qiov->iov, qiov->niov, iov_offset, self->data_secondary_tmp + kh_value(self->lookup_secondary_tmp, k), CHUNK_SIZE); + qemu_iovec_from_buf(qiov, iov_offset, self->data_secondary_tmp + kh_value(self->lookup_secondary_tmp, k), CHUNK_SIZE); + return; + } + } + + /* read from L2 buffer */ + k = kh_get(COW_CACHE, self->lookup_secondary, offset_addr); + if(k != kh_end(self->lookup_secondary)){ + #ifdef COW_CACHE_DEBUG + printf("[FUZZ] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n", offset_addr, iov_offset, self->offset_secondary); + #endif + //iov_from_buf_full_register(qiov->iov, qiov->niov, iov_offset, self->data_secondary + kh_value(self->lookup_secondary, k), CHUNK_SIZE); + qemu_iovec_from_buf(qiov, iov_offset, self->data_secondary + kh_value(self->lookup_secondary, k), CHUNK_SIZE); + return; + } + + /* read from L1 buffer */ + k = kh_get(COW_CACHE, self->lookup_primary, offset_addr); + if(k != kh_end(self->lookup_primary)){ + #ifdef COW_CACHE_DEBUG + printf("[PRE ] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n", offset_addr, iov_offset, self->offset_primary); + #endif + //iov_from_buf_full_register(qiov->iov, qiov->niov, iov_offset, self->data_primary + kh_value(self->lookup_primary, k), CHUNK_SIZE); + qemu_iovec_from_buf(qiov, iov_offset, self->data_primary + kh_value(self->lookup_primary, k), CHUNK_SIZE); + } +} + +/* read data from cow cache */ +static int cow_cache_read(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags){ + +#ifdef DEBUG_COW_LAYER + if(self->enabled_fuzz){ + if(!self->enabled_fuzz_tmp){ + self->read_calls++; + } + else{ + self->read_calls_tmp++; + } + } +#endif + + //iov_from_buf_full_register(qiov->iov, qiov->niov, offset, NULL, bytes); + + blk_co_preadv(blk, offset, bytes, qiov, flags); + + if ((qiov->size%CHUNK_SIZE)){ +#ifdef COW_CACHE_DEBUG + fprintf(stderr, "%s: FAILED %lx!\n", __func__, qiov->size); +#endif + return 0; + } + assert(!(qiov->size%CHUNK_SIZE)); + + uint64_t iov_offset = 0; + for(uint64_t offset_addr = offset; offset_addr < (offset+(qiov->size)); offset_addr+= CHUNK_SIZE){ + + if(self->enabled_fuzz){ + read_from_secondary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags, offset_addr, iov_offset); + } + else{ + read_from_primary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags, offset_addr, iov_offset); + } + + iov_offset+= CHUNK_SIZE; + } + + return 0; +} + + +/* write to primary buffer */ +static inline void write_to_primary_buffer(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags, uint64_t offset_addr, uint64_t iov_offset){ + int ret; + khiter_t k; + + k = kh_get(COW_CACHE, self->lookup_primary, offset_addr); + if(unlikely(k == kh_end(self->lookup_primary))){ + /* create page */ + + k = kh_put(COW_CACHE, self->lookup_primary, offset_addr, &ret); + #ifdef COW_CACHE_DEBUG + printf("ADD NEW COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n", offset_addr, iov_offset, self->offset_primary); + #endif + + + kh_value(self->lookup_primary, k) = self->offset_primary; + + self->offset_primary += CHUNK_SIZE; + + #ifdef COW_CACHE_VERBOSE + printf("COW CACHE IS 0x%lx BYTES (KB: %ld / MB: %ld / GB: %ld) IN SIZE!\n", self->offset, self->offset >> 10, self->offset >> 20, self->offset >> 30); + #endif + + /* IN CASE THE BUFFER IS FULL -> ABORT! */ + assert(self->offset_primary < COW_CACHE_SIZE); + } + + #ifdef COW_CACHE_DEBUG + printf("LOAD COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx (%s)\n", offset_addr, iov_offset, kh_value(self->lookup_primary, k), self->filename); + #endif + + /* write to cached page */ + qemu_iovec_to_buf(qiov, iov_offset, self->data_primary + kh_value(self->lookup_primary, k), CHUNK_SIZE); + + + /* + if(self->offset_primary >= 0xA00000){ + printf("SWITCH TO SECONDARY\n"); + switch_to_fuzz_mode(self); + dump_primary_buffer(self, "/tmp/cow_dump"); + } + */ + +} + +static inline void write_to_secondary_buffer(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags, uint64_t offset_addr, uint64_t iov_offset){ + int ret; + + //assert((offset_addr&(CHUNK_SIZE-1)) == 0); + + if(!self->enabled_fuzz_tmp){ + /* L2 mode */ + + /* IN CASE THE BUFFER IS FULL -> ABORT! */ + if(self->offset_secondary >= COW_CACHE_SECONDARY_SIZE){ + GET_GLOBAL_STATE()->cow_cache_full = true; + abort(); + return; + } + + khiter_t k_secondary = kh_get(COW_CACHE, self->lookup_secondary, offset_addr); + if(unlikely(k_secondary == kh_end(self->lookup_secondary))){ + /* if page is not cached in secondary buffer yet */ + k_secondary = kh_put(COW_CACHE, self->lookup_secondary, offset_addr, &ret); + kh_value(self->lookup_secondary, k_secondary) = self->offset_secondary; + self->offset_secondary += CHUNK_SIZE; + + } + //printf("WRITE -> %lx\n", kh_value(self->lookup_secondary, k_secondary)); + /* write to cache */ + qemu_iovec_to_buf(qiov, iov_offset, self->data_secondary + kh_value(self->lookup_secondary, k_secondary), CHUNK_SIZE); + } + else{ + /* L2 TMP mode */ + + /* IN CASE THE BUFFER IS FULL -> ABORT! */ + if(self->offset_secondary_tmp >= COW_CACHE_SECONDARY_SIZE){ + GET_GLOBAL_STATE()->cow_cache_full = true; + abort(); + return; + } + + khiter_t k_secondary_tmp = kh_get(COW_CACHE, self->lookup_secondary_tmp, offset_addr); + if(unlikely(k_secondary_tmp == kh_end(self->lookup_secondary_tmp))){ + /* if page is not cached in secondary tmp buffer yet */ + k_secondary_tmp = kh_put(COW_CACHE, self->lookup_secondary_tmp, offset_addr, &ret); + kh_value(self->lookup_secondary_tmp, k_secondary_tmp) = self->offset_secondary_tmp; + self->offset_secondary_tmp += CHUNK_SIZE; + } + + /* write to cache */ + //printf("WRITE TO L2 TMP -> %lx\n", self->data_secondary_tmp + kh_value(self->lookup_secondary_tmp, k_secondary_tmp)); + qemu_iovec_to_buf(qiov, iov_offset, self->data_secondary_tmp + kh_value(self->lookup_secondary_tmp, k_secondary_tmp), CHUNK_SIZE); + } +} + +/* write data to cow cache */ +static int cow_cache_write(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags){ + //khiter_t k; + +#ifdef DEBUG_COW_LAYER + if(self->enabled_fuzz){ + if(!self->enabled_fuzz_tmp){ + self->write_calls++; + } + else{ + self->write_calls_tmp++; + } + } +#endif + + if ((qiov->size%CHUNK_SIZE)){ +#ifdef COW_CACHE_DEBUG + fprintf(stderr, "%s: FAILED %lx!\n", __func__, qiov->size); +#endif + return 0; + } + //printf("qiov->size: %lx %lx\n", qiov->size, CHUNK_SIZE); + if((qiov->size%CHUNK_SIZE) && GET_GLOBAL_STATE()->in_fuzzing_mode){ + GET_GLOBAL_STATE()->cow_cache_full = true; + fprintf(stderr, "WARNING: %s write in %lx CHUNKSIZE\n", __func__, qiov->size); + return 0; + } + else{ + assert(!(qiov->size%CHUNK_SIZE)); + } + + uint64_t iov_offset = 0; + for(uint64_t offset_addr = offset; offset_addr < (offset+(qiov->size)); offset_addr+= CHUNK_SIZE){ + if(self->enabled_fuzz){ + write_to_secondary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags, offset_addr, iov_offset); + } + else{ + write_to_primary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags, offset_addr, iov_offset); + } + + iov_offset+= CHUNK_SIZE; + } + + return 0; +} + +void switch_to_fuzz_mode(cow_cache_t* self){ + self->enabled_fuzz = true; + assert(!mprotect(self->data_primary, COW_CACHE_SIZE, PROT_READ)); + printf("[qemu-nyx] switch to secondary CoW buffer\n"); +} + +void cow_cache_read_entry(void* opaque){ + + BlkAioEmAIOCB *acb = opaque; + BlkRwCo *rwco = &acb->rwco; + +#ifdef COW_CACHE_DEBUG + printf("%s %lx %lx\n", __func__, rwco->offset, acb->bytes); +#endif + + + //printf("rwco->ret: %lx %lx\n", rwco->ret, acb->bytes); + rwco->ret = cow_cache_read( *((cow_cache_t**)(rwco->blk)), rwco->blk, rwco->offset, acb->bytes, rwco->qiov, rwco->flags); + + //last_read = PAGE_MASK; + + blk_aio_complete(acb); +} + + +void cow_cache_write_entry(void* opaque){ + BlkAioEmAIOCB *acb = opaque; + BlkRwCo *rwco = &acb->rwco; + +#ifdef COW_CACHE_DEBUG + printf("%s\n", __func__); +#endif + + rwco->ret = cow_cache_write( *((cow_cache_t**)(rwco->blk)), rwco->blk, rwco->offset, acb->bytes, rwco->qiov, rwco->flags); + + blk_aio_complete(acb); +} diff --git a/nyx/snapshot/block/block_cow.h b/nyx/snapshot/block/block_cow.h new file mode 100644 index 0000000000..28d6fea0bc --- /dev/null +++ b/nyx/snapshot/block/block_cow.h @@ -0,0 +1,72 @@ +#pragma once + + +#include +#include +#include "nyx/khash.h" + +#include "qemu/osdep.h" +#include "block/block.h" + +#include "nyx/redqueen_trace.h" + +//#define DEBUG_COW_LAYER + +/* 2GB Cache */ +//#define COW_CACHE_SIZE 0x80000000 + +// 3GB +#define COW_CACHE_SIZE 0xC0000000 + +// 512MB +//#define COW_CACHE_SECONDARY_SIZE 0x20000000 +#define COW_CACHE_SECONDARY_SIZE 0xC0000000 + + +KHASH_MAP_INIT_INT64(COW_CACHE, uint64_t) + +typedef struct cow_cache_s{ + khash_t(COW_CACHE) *lookup_primary; + khash_t(COW_CACHE) *lookup_secondary; + khash_t(COW_CACHE) *lookup_secondary_tmp; + + void* data_primary; + void* data_secondary; + void* data_secondary_tmp; + + char* filename; + uint64_t offset_primary; + uint64_t offset_secondary; + uint64_t offset_secondary_tmp; + + bool enabled; + bool enabled_fuzz; + bool enabled_fuzz_tmp; + +#ifdef DEBUG_COW_LAYER + uint64_t read_calls; + uint64_t write_calls; + uint64_t read_calls_tmp; + uint64_t write_calls_tmp; +#endif +} cow_cache_t; + +cow_cache_t* cow_cache_new(const char* filename); +void cow_cache_reset(cow_cache_t* self); +//int coroutine_fn cow_cache_read(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags); +//int coroutine_fn cow_cache_write(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags); + + +void switch_to_fuzz_mode(cow_cache_t* self); + +void read_primary_buffer(cow_cache_t* self, const char* filename_prefix, bool switch_mode); +void dump_primary_buffer(cow_cache_t* self, const char* filename_prefix); + +void cow_cache_read_entry(void* opaque); +void cow_cache_write_entry(void* opaque); + +void cow_cache_enable(cow_cache_t* self); +void cow_cache_disable(cow_cache_t* self); + +void cow_cache_enable_tmp_mode(cow_cache_t* self); +void cow_cache_disable_tmp_mode(cow_cache_t* self); diff --git a/nyx/snapshot/block/nyx_block_snapshot.c b/nyx/snapshot/block/nyx_block_snapshot.c new file mode 100644 index 0000000000..e2a0fd897c --- /dev/null +++ b/nyx/snapshot/block/nyx_block_snapshot.c @@ -0,0 +1,194 @@ + +#include "qemu/osdep.h" +#include "sysemu/sysemu.h" +#include "cpu.h" +#include "qemu/main-loop.h" + +#include "sysemu/block-backend.h" +#include "block/qapi.h" +#include "sysemu/runstate.h" +#include "migration/vmstate.h" + +#include "nyx/snapshot/block/nyx_block_snapshot.h" +#include "nyx/debug.h" +#include "nyx/state.h" + +typedef struct fast_reload_cow_entry_s{ + uint32_t id; + char idstr[256]; +} fast_reload_cow_entry_t; + + +nyx_block_t* nyx_block_snapshot_init_from_file(const char* folder, bool pre_snapshot){ + + nyx_block_t* self = malloc(sizeof(nyx_block_t)); + memset(self, 0, sizeof(nyx_block_t)); + + BlockBackend *blk; + fast_reload_cow_entry_t entry; + + char* tmp1; + char* tmp2; + + assert(asprintf(&tmp1, "%s/fs_cache.meta", folder) != -1); + assert(asprintf(&tmp2, "%s/fs_drv", folder) != -1); + + + self->cow_cache_array_size = 0; + + FILE* f = fopen (tmp1, "r"); + assert(f != NULL); + + for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { + if(blk && blk->cow_cache){ + debug_printf("%p %s\n", blk->cow_cache, blk->cow_cache->filename); + self->cow_cache_array_size++; + } + } + + uint32_t temp_cow_cache_array_size; + + assert(fread(&temp_cow_cache_array_size, sizeof(uint32_t), 1, f) == 1); + + debug_printf("%d vs %x\n", temp_cow_cache_array_size, self->cow_cache_array_size); + assert(self->cow_cache_array_size == temp_cow_cache_array_size); + + self->cow_cache_array = (cow_cache_t**)malloc(sizeof(cow_cache_t*)*self->cow_cache_array_size); + + uint32_t i = 0; + uint32_t id = 0; + for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { + if(blk && blk->cow_cache){ + self->cow_cache_array[i++] = blk->cow_cache; + assert(fread(&entry, sizeof(fast_reload_cow_entry_t), 1, f) == 1); + + assert(!strcmp(entry.idstr, blk->cow_cache->filename)); + assert(entry.id == id); + } + id++; + } + + + fclose(f); + + for(i = 0; i < self->cow_cache_array_size; i++){ + read_primary_buffer(self->cow_cache_array[i], tmp2, !pre_snapshot); + } + + free(tmp1); + free(tmp2); + return self; +} + +nyx_block_t* nyx_block_snapshot_init(void){ + + nyx_block_t* self = malloc(sizeof(nyx_block_t)); + memset(self, 0, sizeof(nyx_block_t)); + + BlockBackend *blk; + for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { + if(blk && blk->cow_cache){ + debug_printf("%p %s\n", blk->cow_cache, blk->cow_cache->filename); + self->cow_cache_array_size++; + } + } + + self->cow_cache_array = (cow_cache_t**)malloc(sizeof(cow_cache_t*)*self->cow_cache_array_size); + + uint32_t i = 0; + for (blk = blk_next(NULL); blk; blk = blk_next(blk)) { + if(blk && blk->cow_cache){ + self->cow_cache_array[i++] = blk->cow_cache; + } + } + + + for(i = 0; i < self->cow_cache_array_size; i++){ + switch_to_fuzz_mode(self->cow_cache_array[i]); + } + return self; +} + +/* + + +static void fast_reload_serialize_cow(fast_reload_t* self, const char* folder){ + fast_reload_cow_entry_t entry; + + char* tmp1; + char* tmp2; + + assert(asprintf(&tmp1, "%s/fs_cache.meta", folder) != -1); + assert(asprintf(&tmp2, "%s/fs_drv", folder) != -1); + + + FILE* f = fopen (tmp1, "w"); + + fwrite(&(self->cow_cache_array_size), sizeof(uint32_t), 1, f); + + for(uint32_t i = 0; i < self->cow_cache_array_size; i++){ + entry.id = i; + printf("%d -> %s\n", i, (const char*)self->cow_cache_array[i]->filename); + strncpy((char*)&entry.idstr, (const char*)self->cow_cache_array[i]->filename, 256); + fwrite(&entry, sizeof(fast_reload_cow_entry_t), 1, f); + + dump_primary_buffer(self->cow_cache_array[i], tmp2); + } + fclose(f); + + free(tmp1); + free(tmp2); +} + +*/ + +void nyx_block_snapshot_flush(nyx_block_t* self){ + GET_GLOBAL_STATE()->cow_cache_full = false; +} + +void nyx_block_snapshot_switch_incremental(nyx_block_t* self){ + for(uint32_t i = 0; i < self->cow_cache_array_size; i++){ + cow_cache_enable_tmp_mode(self->cow_cache_array[i]); + } + nyx_block_snapshot_flush(self); +} + +void nyx_block_snapshot_disable_incremental(nyx_block_t* self){ + for(uint32_t i = 0; i < self->cow_cache_array_size; i++){ + cow_cache_disable_tmp_mode(self->cow_cache_array[i]); + } +} + +void nyx_block_snapshot_reset(nyx_block_t* self){ + for(uint32_t i = 0; i < self->cow_cache_array_size; i++){ + cow_cache_reset(self->cow_cache_array[i]); + } +} + +void nyx_block_snapshot_serialize(nyx_block_t* self, const char* snapshot_folder){ + fast_reload_cow_entry_t entry; + + char* tmp1; + char* tmp2; + + assert(asprintf(&tmp1, "%s/fs_cache.meta", snapshot_folder) != -1); + assert(asprintf(&tmp2, "%s/fs_drv", snapshot_folder) != -1); + + + FILE* f = fopen (tmp1, "w"); + + fwrite(&(self->cow_cache_array_size), sizeof(uint32_t), 1, f); + + for(uint32_t i = 0; i < self->cow_cache_array_size; i++){ + entry.id = i; + //printf("%d -> %s\n", i, (const char*)self->cow_cache_array[i]->filename); + strncpy((char*)&entry.idstr, (const char*)self->cow_cache_array[i]->filename, 255); + fwrite(&entry, sizeof(fast_reload_cow_entry_t), 1, f); + + dump_primary_buffer(self->cow_cache_array[i], tmp2); + } + fclose(f); + + free(tmp1); + free(tmp2); +} diff --git a/nyx/snapshot/block/nyx_block_snapshot.h b/nyx/snapshot/block/nyx_block_snapshot.h new file mode 100644 index 0000000000..afeb232cb9 --- /dev/null +++ b/nyx/snapshot/block/nyx_block_snapshot.h @@ -0,0 +1,21 @@ +#pragma once + +#include +#include "nyx/snapshot/block/block_cow.h" + +typedef struct nyx_block_s{ + cow_cache_t **cow_cache_array; + uint32_t cow_cache_array_size; + +} nyx_block_t; + +nyx_block_t* nyx_block_snapshot_init_from_file(const char* folder, bool pre_snapshot); +nyx_block_t* nyx_block_snapshot_init(void); +void nyx_block_snapshot_switch_to_incremental(nyx_block_t*); + +void nyx_block_snapshot_flush(nyx_block_t* self); +void nyx_block_snapshot_switch_incremental(nyx_block_t* self); +void nyx_block_snapshot_disable_incremental(nyx_block_t* self); +void nyx_block_snapshot_reset(nyx_block_t* self); + +void nyx_block_snapshot_serialize(nyx_block_t* self, const char* snapshot_folder); \ No newline at end of file diff --git a/nyx/snapshot/devices/nyx_device_state.c b/nyx/snapshot/devices/nyx_device_state.c new file mode 100644 index 0000000000..3911deb12c --- /dev/null +++ b/nyx/snapshot/devices/nyx_device_state.c @@ -0,0 +1,470 @@ + +#include "qemu/osdep.h" +#include "sysemu/sysemu.h" +#include "cpu.h" +#include "qemu/main-loop.h" + +#include "exec/ram_addr.h" +#include "qemu/rcu_queue.h" +#include "migration/migration.h" +#include "migration/register.h" +#include "migration/savevm.h" +#include "migration/qemu-file.h" +#include "migration/qjson.h" +#include "migration/global_state.h" + +#include "nyx/snapshot/devices/nyx_device_state.h" +#include "nyx/debug.h" + +#include "sysemu/block-backend.h" +#include "block/qapi.h" +#include "sysemu/runstate.h" +#include "migration/vmstate.h" + +#include "nyx/snapshot/devices/state_reallocation.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "sysemu/kvm_int.h" +#include "sysemu/cpus.h" +#include "sysemu/reset.h" + +#include "nyx/snapshot/devices/vm_change_state_handlers.h" + + + +#define STATE_BUFFER 0x8000000 /* up to 128MB */ + +extern void enable_fast_snapshot_rtc(void); +extern void enable_fast_snapshot_kvm_clock(void); + +static void enable_fast_snapshot_mode(void){ + enable_fast_snapshot_rtc(); + enable_fast_snapshot_kvm_clock(); +} + +extern int kvm_nyx_put_tsc_value(CPUState *cs, uint64_t data); + +static void set_tsc_value(nyx_device_state_t* self, bool tmp_snapshot){ + if(self->incremental_mode){ + assert(self->tsc_value_incremental); + assert(kvm_nyx_put_tsc_value(qemu_get_cpu(0), self->tsc_value_incremental) == 0); + } + else{ + assert(self->tsc_value); + assert(kvm_nyx_put_tsc_value(qemu_get_cpu(0), self->tsc_value) == 0); + } +} + +static void save_tsc_value(nyx_device_state_t* self, bool incremental_mode){ + X86CPU *cpu = X86_CPU(qemu_get_cpu(0)); + CPUX86State *env = &cpu->env; + + if(incremental_mode){ + self->tsc_value_incremental = env->tsc; // - 0x200000; /* fml */ + } + else{ + self->tsc_value = env->tsc; + } +} + +extern int qemu_savevm_state(QEMUFile *f, Error **errp); + +/* new savevm routine */ +typedef struct SaveStateEntry { + QTAILQ_ENTRY(SaveStateEntry) entry; + char idstr[256]; + int instance_id; + int alias_id; + int version_id; + int load_version_id; + int section_id; + int load_section_id; + SaveVMHandlers *ops; + const VMStateDescription *vmsd; + void *opaque; + void *compat; + int is_ram; +} SaveStateEntry; + + +typedef struct SaveState { + QTAILQ_HEAD(, SaveStateEntry) handlers; + int global_section_id; + bool skip_configuration; + uint32_t len; + const char *name; + uint32_t target_page_bits; +} SaveState; + +extern SaveState savevm_state; + +extern void vmstate_save(QEMUFile *f, SaveStateEntry *se, QJSON *vmdesc); +extern bool should_send_vmdesc(void); + +extern bool skip_section_footers; + + +extern void save_section_footer(QEMUFile *f, SaveStateEntry *se); +extern void save_section_header(QEMUFile *f, SaveStateEntry *se, uint8_t section_type); + +/* skip block ram */ +static void fast_qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only) +{ + QJSON *vmdesc; + int vmdesc_len; + SaveStateEntry *se; + int ret; + bool in_postcopy = migration_in_postcopy(); + + cpu_synchronize_all_states(); + + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { + if(strcmp(se->idstr, "ram") && strcmp(se->idstr, "block")){ + if (!se->ops || + (in_postcopy && se->ops->save_live_complete_postcopy) || + (in_postcopy && !iterable_only) || + !se->ops->save_live_complete_precopy) { + continue; + } + + if (se->ops && se->ops->is_active) { + if (!se->ops->is_active(se->opaque)) { + continue; + } + } + + save_section_header(f, se, QEMU_VM_SECTION_END); + + ret = se->ops->save_live_complete_precopy(f, se->opaque); + save_section_footer(f, se); + if (ret < 0) { + qemu_file_set_error(f, ret); + return; + } + } + } + + if (iterable_only) { + return; + } + + vmdesc = qjson_new(); + json_prop_int(vmdesc, "page_size", TARGET_PAGE_SIZE); + json_start_array(vmdesc, "devices"); + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { + if(strcmp(se->idstr, "ram") && strcmp(se->idstr, "block")){ + if ((!se->ops || !se->ops->save_state) && !se->vmsd) { + continue; + } + if (se->vmsd && !vmstate_save_needed(se->vmsd, se->opaque)) { + continue; + } + + json_start_object(vmdesc, NULL); + json_prop_str(vmdesc, "name", se->idstr); + json_prop_int(vmdesc, "instance_id", se->instance_id); + + save_section_header(f, se, QEMU_VM_SECTION_FULL); + vmstate_save(f, se, vmdesc); + save_section_footer(f, se); + + json_end_object(vmdesc); + } + } + + if (!in_postcopy) { + /* Postcopy stream will still be going */ + qemu_put_byte(f, QEMU_VM_EOF); + } + + json_end_array(vmdesc); + qjson_finish(vmdesc); + vmdesc_len = strlen(qjson_get_str(vmdesc)); + + if (should_send_vmdesc()) { + qemu_put_byte(f, QEMU_VM_VMDESCRIPTION); + qemu_put_be32(f, vmdesc_len); + qemu_put_buffer(f, (uint8_t *)qjson_get_str(vmdesc), vmdesc_len); + } + qjson_destroy(vmdesc); + + qemu_fflush(f); +} + + +static int fast_qemu_savevm_state_iterate(QEMUFile *f, bool postcopy) { + SaveStateEntry *se; + int ret = 1; + + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { + if(strcmp(se->idstr, "ram") && strcmp(se->idstr, "block")){ + if (!se->ops || !se->ops->save_live_iterate) { + continue; + } + if (se->ops && se->ops->is_active) { + if (!se->ops->is_active(se->opaque)) { + continue; + } + } + /* + * In the postcopy phase, any device that doesn't know how to + * do postcopy should have saved it's state in the _complete + * call that's already run, it might get confused if we call + * iterate afterwards. + */ + if (postcopy && !se->ops->save_live_complete_postcopy) { + continue; + } + if (qemu_file_rate_limit(f)) { + return 0; + } + + save_section_header(f, se, QEMU_VM_SECTION_PART); + + ret = se->ops->save_live_iterate(f, se->opaque); + save_section_footer(f, se); + + if (ret < 0) { + qemu_file_set_error(f, ret); + } + if (ret <= 0) { + /* Do not proceed to the next vmstate before this one reported + completion of the current stage. This serializes the migration + and reduces the probability that a faster changing state is + synchronized over and over again. */ + break; + } + } + } + return ret; +} + +static void fast_qemu_savevm_state_setup(QEMUFile *f){ + SaveStateEntry *se; + int ret; + + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { + if(strcmp(se->idstr, "ram") && strcmp(se->idstr, "block")){ + if (!se->ops || !se->ops->save_setup) { + continue; + } + if (se->ops && se->ops->is_active) { + if (!se->ops->is_active(se->opaque)) { + continue; + } + } + save_section_header(f, se, QEMU_VM_SECTION_START); + + ret = se->ops->save_setup(f, se->opaque); + save_section_footer(f, se); + if (ret < 0) { + qemu_file_set_error(f, ret); + break; + } + } + } +} + + +static int fast_qemu_savevm_state(QEMUFile *f, Error **errp) { + qemu_savevm_state_header(f); + fast_qemu_savevm_state_setup(f); + + while (qemu_file_get_error(f) == 0) { + if (fast_qemu_savevm_state_iterate(f, false) > 0) { + fast_qemu_savevm_state_complete_precopy(f, false); + break; + } + } + + return 0; +} + +/* QEMUFile RAM Emulation */ +static ssize_t fast_savevm_writev_buffer(void *opaque, struct iovec *iov, int iovcnt, int64_t pos){ + ssize_t retval = 0; + for(uint32_t i = 0; i < iovcnt; i++){ + memcpy((void*)(((struct fast_savevm_opaque_t*)(opaque))->buf + ((struct fast_savevm_opaque_t*)(opaque))->pos), iov[i].iov_base, iov[i].iov_len); + ((struct fast_savevm_opaque_t*)(opaque))->pos += iov[i].iov_len; + retval += iov[i].iov_len; + } + return retval; +} + + +static int fast_savevm_fclose_save_to_buffer(void *opaque){ + memcpy(((struct fast_savevm_opaque_t*)(opaque))->output_buffer, ((struct fast_savevm_opaque_t*)(opaque))->buf, ((struct fast_savevm_opaque_t*)(opaque))->pos); + *((struct fast_savevm_opaque_t*)(opaque))->output_buffer_size = ((struct fast_savevm_opaque_t*)(opaque))->pos; + //printf("DUMPED: %d\n", *((struct fast_savevm_opaque_t*)(opaque))->output_buffer_size); + return 0; +} + +static int fast_loadvm_fclose(void *opaque){ + return 0; +} + +static ssize_t fast_loadvm_get_buffer(void *opaque, uint8_t *buf, int64_t pos, size_t size){ + memcpy(buf, (void*)(((struct fast_savevm_opaque_t*)(opaque))->buf + pos), size); + return size; +} + +static const QEMUFileOps fast_loadvm_ops = { + .get_buffer = (QEMUFileGetBufferFunc*)fast_loadvm_get_buffer, + .close = (QEMUFileCloseFunc*)fast_loadvm_fclose +}; + +static const QEMUFileOps fast_savevm_ops_to_buffer = { + .writev_buffer = (QEMUFileWritevBufferFunc*)fast_savevm_writev_buffer, + .close = (QEMUFileCloseFunc*)fast_savevm_fclose_save_to_buffer +}; + + +nyx_device_state_t* nyx_device_state_init_from_snapshot(const char* snapshot_folder, bool pre_snapshot){ + nyx_device_state_t* self = malloc(sizeof(nyx_device_state_t)); + memset(self, 0, sizeof(nyx_device_state_t)); + + self->state_buf = malloc(STATE_BUFFER); + self->state_buf_size = 0; + + char* qemu_state_file; + assert(asprintf(&qemu_state_file, "%s/fast_snapshot.qemu_state", snapshot_folder) != -1); + + struct fast_savevm_opaque_t fast_savevm_opaque; + FILE* f; + + uint8_t ret = global_state_store(); + assert(!ret); + + /* Testing Stuff */ + struct stat buffer; + assert(stat (qemu_state_file, &buffer) == 0); + + debug_printf("FILE EXISTS...\n"); + + void* state_buf2 = malloc(STATE_BUFFER); + + f = fopen(qemu_state_file, "r"); + assert(fread(state_buf2, buffer.st_size, 1, f) == 1); + fclose(f); + + fast_savevm_opaque.buf = state_buf2; + fast_savevm_opaque.f = NULL;//fopen("/tmp/qemu_state", "w"); + fast_savevm_opaque.pos = 0; + QEMUFile* file_dump = qemu_fopen_ops(&fast_savevm_opaque, &fast_loadvm_ops); + + qemu_devices_reset(); + qemu_loadvm_state(file_dump); + + if(!pre_snapshot){ + self->qemu_state = state_reallocation_new(file_dump); + } + + free(state_buf2); + + if(!pre_snapshot){ + enable_fast_snapshot_mode(); + save_tsc_value(self, false); + } + + return self; +} + +nyx_device_state_t* nyx_device_state_init(void){ + + nyx_device_state_t* self = malloc(sizeof(nyx_device_state_t)); + memset(self, 0, sizeof(nyx_device_state_t)); + + self->state_buf = malloc(STATE_BUFFER); + self->state_buf_size = 0; + + Error *local_err = NULL; + struct fast_savevm_opaque_t fast_savevm_opaque, fast_loadvm_opaque; + //state_reallocation_t* qemu_state; + + void* tmp_buf = malloc(1024*1024*16); + //memset(self->state_buf, 0, STATE_BUFFER); + + fast_savevm_opaque.output_buffer = self->state_buf; + fast_savevm_opaque.output_buffer_size = &self->state_buf_size; + + fast_savevm_opaque.buf = tmp_buf;//self->state_buf; + fast_savevm_opaque.f = NULL; //fopen("/tmp/delta", "w"); + fast_savevm_opaque.pos = 0; + + uint8_t ret = global_state_store(); + assert(!ret); + + QEMUFile* f = qemu_fopen_ops(&fast_savevm_opaque, &fast_savevm_ops_to_buffer); + ret = fast_qemu_savevm_state(f, &local_err); + //qemu_fflush(f); + + + fast_loadvm_opaque.buf = tmp_buf; //self->state_buf; + fast_loadvm_opaque.f = NULL; + fast_loadvm_opaque.pos = 0; + QEMUFile* file_dump = qemu_fopen_ops(&fast_loadvm_opaque, &fast_loadvm_ops); + + //qemu_mutex_lock_iothread(); + //qemu_devices_reset(); + self->qemu_state = state_reallocation_new(file_dump); + //qemu_mutex_unlock_iothread(); + qemu_fclose(file_dump); + + + //sleep(1); + qemu_fclose(f); + free(tmp_buf); + + + enable_fast_snapshot_mode(); + save_tsc_value(self, false); + return self; + + //return qemu_state; +} + +void nyx_device_state_switch_incremental(nyx_device_state_t* self){ + self->incremental_mode = true; + fdl_fast_create_tmp(self->qemu_state); + fdl_fast_enable_tmp(self->qemu_state); +} + +void nyx_device_state_disable_incremental(nyx_device_state_t* self){ + fdl_fast_disable_tmp(self->qemu_state); + self->incremental_mode = false; +} + +void nyx_device_state_restore(nyx_device_state_t* self){ + fdl_fast_reload(self->qemu_state); + call_fast_change_handlers(); +} + +void nyx_device_state_post_restore(nyx_device_state_t* self){ + set_tsc_value(self, self->incremental_mode); +} + + +void nyx_device_state_save_tsc(nyx_device_state_t* self){ + save_tsc_value(self, false); +} + + +void nyx_device_state_save_tsc_incremental(nyx_device_state_t* self){ + save_tsc_value(self, true); +} + +void nyx_device_state_serialize(nyx_device_state_t* self, const char* snapshot_folder){ + char* tmp; + assert(asprintf(&tmp, "%s/fast_snapshot.qemu_state", snapshot_folder) != -1); + + FILE* f_qemu_state = fopen(tmp, "w+b"); + assert(fwrite(self->state_buf, 1, self->state_buf_size, f_qemu_state) == self->state_buf_size); + fclose(f_qemu_state); +} diff --git a/nyx/snapshot/devices/nyx_device_state.h b/nyx/snapshot/devices/nyx_device_state.h new file mode 100644 index 0000000000..666330567d --- /dev/null +++ b/nyx/snapshot/devices/nyx_device_state.h @@ -0,0 +1,33 @@ +#pragma once + +#include +#include +#include "nyx/snapshot/devices/state_reallocation.h" + +typedef struct nyx_device_state_s{ + state_reallocation_t* qemu_state; + + uint64_t tsc_value; + uint64_t tsc_value_incremental; + + bool incremental_mode; + + void* state_buf; /* QEMU's serialized state */ + uint32_t state_buf_size; + +} nyx_device_state_t; + + +nyx_device_state_t* nyx_device_state_init(void); +nyx_device_state_t* nyx_device_state_init_from_snapshot(const char* snapshot_folder, bool pre_snapshot); + +void nyx_device_state_restore(nyx_device_state_t* self); +void nyx_device_state_post_restore(nyx_device_state_t* self); + +void nyx_device_state_switch_incremental(nyx_device_state_t* self); +void nyx_device_state_disable_incremental(nyx_device_state_t* self); + +void nyx_device_state_save_tsc(nyx_device_state_t* self); +void nyx_device_state_save_tsc_incremental(nyx_device_state_t* self); + +void nyx_device_state_serialize(nyx_device_state_t* self, const char* snapshot_folder); diff --git a/nyx/snapshot/devices/state_reallocation.c b/nyx/snapshot/devices/state_reallocation.c new file mode 100644 index 0000000000..666a207160 --- /dev/null +++ b/nyx/snapshot/devices/state_reallocation.c @@ -0,0 +1,1000 @@ +/* + +Copyright (C) 2017 Sergej Schumilo + +This file is part of QEMU-PT (HyperTrash / kAFL). + +QEMU-PT is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +QEMU-PT is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with QEMU-PT. If not, see . + +*/ +#include "qemu/osdep.h" +#include "sysemu/sysemu.h" +#include "target/i386/cpu.h" +#include "qemu/main-loop.h" + +#include "sysemu/kvm_int.h" +#include "migration/vmstate.h" +#include "migration/register.h" +#include "migration/savevm.h" +#include "migration/qemu-file.h" +#include "nyx/debug.h" +#include "nyx/snapshot/devices/state_reallocation.h" +#include "nyx/snapshot/devices/nyx_device_state.h" + + +//uint32_t fpos = 0; +#define QEMU_VM_SUBSECTION 0x05 + +typedef struct CompatEntry { + char idstr[256]; + int instance_id; +} CompatEntry; + +typedef struct SaveStateEntry { + QTAILQ_ENTRY(SaveStateEntry) entry; + char idstr[256]; + int instance_id; + int alias_id; + int version_id; + int load_version_id; + int section_id; + int load_section_id; + SaveVMHandlers *ops; + const VMStateDescription *vmsd; + CompatEntry *opaque; + CompatEntry *compat; + int is_ram; +} SaveStateEntry; + +struct LoadStateEntry { + QLIST_ENTRY(LoadStateEntry) entry; + SaveStateEntry *se; + int section_id; + int version_id; +}; + +typedef struct SaveState { + QTAILQ_HEAD(, SaveStateEntry) handlers; + int global_section_id; + bool skip_configuration; + uint32_t len; + const char *name; + uint32_t target_page_bits; +} SaveState; + +extern void* vmstate_configuration; +extern SaveState savevm_state; + +extern int vmstate_n_elems(void *opaque, VMStateField *field); +extern int vmstate_size(void *opaque, VMStateField *field); +extern void vmstate_handle_alloc(void *ptr, VMStateField *field, void *opaque); +extern int vmstate_load(QEMUFile *f, SaveStateEntry *se); + +static void fast_timer_get(void* data, size_t size, void* opaque) +{ + QEMUTimer *ts = (QEMUTimer*) opaque; + uint64_t expire_time = *((uint64_t*)data); + //fprintf(stderr, "%s: VALUE IS: %lx\n", __func__, expire_time); + + if (expire_time != -1) { + timer_mod_ns(ts, expire_time); + } else { + timer_del(ts); + } + //fprintf(stderr, "%s: DONE!\n", __func__); + +} + + + + +static SaveStateEntry *fdl_find_se(const char *idstr, int instance_id) +{ + SaveStateEntry *se; + + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { + if (!strcmp(se->idstr, idstr) && + (instance_id == se->instance_id || + instance_id == se->alias_id)){ + //printf("FOUND 1\n"); + return se; + } + /* Migrating from an older version? */ + if (strstr(se->idstr, idstr) && se->compat) { + if (!strcmp(se->compat->idstr, idstr) && + (instance_id == se->compat->instance_id || + instance_id == se->alias_id)){ + //printf("FOUND 2\n"); + return se; + } + } + } + printf("NOPE\n"); + return NULL; +} + +static int fdl_vmstate_load_state(state_reallocation_t* self, QEMUFile *f, const VMStateDescription *vmsd, void *opaque, int version_id, uintptr_t* opaque_ptr); + + +static inline VMStateDescription* fdl_vmstate_get_subsection(VMStateDescription **sub, char *idstr) +{ + while (sub && *sub && (*sub)->needed) { + if (strcmp(idstr, (*sub)->name) == 0) { + //printf("SUB %p\n", &sub); + //sub_vmsd_ptr = ⊂ + return *sub; /* don't dereference...return ptr */ + } + sub++; + } + return NULL; +} + +static int fdl_vmstate_subsection_load(state_reallocation_t* self, QEMUFile *f, const VMStateDescription *vmsd, void *opaque) +{ + while (qemu_peek_byte(f, 0) == QEMU_VM_SUBSECTION) { + char idstr[256], *idstr_ret; + int ret; + uint8_t version_id, len, size; + const VMStateDescription *sub_vmsd; + + len = qemu_peek_byte(f, 1); + if (len < strlen(vmsd->name) + 1) { + /* subsection name has be be "section_name/a" */ + //fprintf(stderr, "%s: exit\n", __func__); + + return 0; + } + size = qemu_peek_buffer(f, (uint8_t **)&idstr_ret, len, 2); + if (size != len) { + return 0; + } + memcpy(idstr, idstr_ret, size); + idstr[size] = 0; + + if (strncmp(vmsd->name, idstr, strlen(vmsd->name)) != 0) { + /* it doesn't have a valid subsection name */ + //fprintf(stderr, "%s: exit\n", __func__); + return 0; + } + sub_vmsd = fdl_vmstate_get_subsection((VMStateDescription **)vmsd->subsections, idstr); + if (sub_vmsd == NULL) { + return -ENOENT; + } + qemu_file_skip(f, 1); /* subsection */ + qemu_file_skip(f, 1); /* len */ + qemu_file_skip(f, len); /* idstr */ + version_id = qemu_get_be32(f); + + ret = fdl_vmstate_load_state(self, f, sub_vmsd, opaque, version_id, NULL); + if (ret) { + return ret; + } + } + + return 0; +} + +uint32_t post_counter = 0; +void* post_fptr_array[256]; +uint32_t post_version_id_array[256]; +void* post_opaque_array[256]; + + + + + +static void add_post_fptr(state_reallocation_t* self, void* fptr, uint32_t version_id, void* opaque, const char* name){ + + if(!self){ + return; + } + //printf("%s: %s\n", __func__, name); + + + if(!strcmp("I440FX", name)){ + return; + } + + + + if(1){ + /* + if( !strcmp("cpu_common", name) || + !strcmp("cpu/nested_state", name) || + !strcmp("cpu", name) || + //!strcmp("kvm-tpr-opt", name) || + + !strcmp("globalstate", name) || + !strcmp("serial", name) || + + !strcmp("i8259", name) || + !strcmp("i8254", name) || + + !strcmp("apic", name) || + //!strcmp("vga", name) || + //!strcmp("ide", name) || + //!strcmp("ide_drive", name) || + !strcmp("ioapic", name)){ + */ + + /* + --> cpu_common + --> cpu/nested_state + --> cpu + --> apic + --> I440FX + --> PIIX3 + --> i8259 + --> i8259 + --> ioapic + --> vga + --> hpet + --> mc146818rtc + --> i8254 + --> dma + --> dma + --> serial + --> fdrive + --> fdrive + --> fdc + --> ps2kbd + --> ps2mouse + --> pckbd + --> vmmouse + --> e1000/tx_tso_state + --> e1000 + --> ide_drive + --> ide_drive + --> ide_drive + --> ide_drive + --> ide + --> piix4_pm + --> globalstate + */ + + //fprintf(stderr, "--> %s\n", name); + + self->fptr[self->fast_state_fptr_pos] = fptr; + self->opaque[self->fast_state_fptr_pos] = opaque; + self->version[self->fast_state_fptr_pos] = version_id; + self->fast_state_fptr_pos++; + + if(self->fast_state_fptr_pos >= self->fast_state_fptr_size){ + printf("RESIZE %s\n", __func__); + self->fast_state_fptr_size += REALLOC_SIZE; + self->fptr = realloc(self->fptr, self->fast_state_fptr_size * sizeof(void*)); + self->opaque = realloc(self->opaque, self->fast_state_fptr_size * sizeof(void*)); + self->version = realloc(self->version, self->fast_state_fptr_size * sizeof(uint32_t)); + } + } +} + +extern void fast_get_pci_config_device(void* data, size_t size, void* opaque); +void fast_get_pci_irq_state(void* data, size_t size, void* opaque); + +static void add_get(state_reallocation_t* self, void* fptr, void* opaque, size_t size, void* field, QEMUFile* f, const char* name){ + if(!self){ + return; + } + + void (*handler)(void* , size_t, void*) = NULL; + void* data = NULL; + + if(!strcmp(name, "timer")){ + debug_fprintf(stderr, "SKPPING: %ld\n", size*-1); + qemu_file_skip(f, size * -1); + handler = fast_timer_get; + data = malloc(sizeof(uint64_t)); + *((uint64_t*)data) = qemu_get_be64(f); + } + + else if(!strcmp(name, "pci irq state")){ + qemu_file_skip(f, size * -1); + handler = fast_get_pci_irq_state; + data = malloc(sizeof(uint8_t)*size); + + ((uint32_t*)data)[0] = qemu_get_be32(f); + ((uint32_t*)data)[1] = qemu_get_be32(f); + ((uint32_t*)data)[2] = qemu_get_be32(f); + ((uint32_t*)data)[3] = qemu_get_be32(f); + } + else if(!strcmp(name, "pci config")){ + qemu_file_skip(f, size * -1); + handler = fast_get_pci_config_device; + data = malloc(sizeof(uint8_t)*size); + qemu_get_buffer(f, (uint8_t*)data, size); + } + + else{ + fprintf(stderr, "WARNING: NOT IMPLEMENTED FAST GET ROUTINE for %s\n", name); + abort(); + return; + } + + self->get_fptr[self->fast_state_get_fptr_pos] = handler; + self->get_opaque[self->fast_state_get_fptr_pos] = opaque; + self->get_size[self->fast_state_get_fptr_pos] = size; + self->get_data[self->fast_state_get_fptr_pos] = data; + + self->fast_state_get_fptr_pos++; + + if(self->fast_state_get_fptr_pos >= self->fast_state_get_fptr_size){ + debug_printf("RESIZE %s\n", __func__); + self->fast_state_get_fptr_size += REALLOC_SIZE; + self->get_fptr = realloc(self->get_fptr, self->fast_state_get_fptr_size * sizeof(void*)); + self->get_opaque = realloc(self->get_opaque, self->fast_state_get_fptr_size * sizeof(void*)); + self->get_size = realloc(self->get_size, self->fast_state_get_fptr_size * sizeof(size_t)); + self->get_data = realloc(self->get_data, self->fast_state_get_fptr_size * sizeof(void*)); + } + +} + +static void add_mblock(state_reallocation_t* self, char* foo, const char* bar, size_t offset, uint64_t start, uint64_t size){ + + if(!self){ + return; + } + + if(self->fast_state_pos && (uint64_t)(self->ptr[self->fast_state_pos-1]+self->size[self->fast_state_pos-1]) == start){ + void* new = (void*)(self->pre_alloc_block+self->pre_alloc_block_offset); + self->pre_alloc_block_offset += size; + memcpy(new, (void*)start, size); + + self->size[self->fast_state_pos-1] = size + self->size[self->fast_state_pos-1]; + } + else{ + self->ptr[self->fast_state_pos] = (void*)start; + self->copy[self->fast_state_pos] = (void*)(self->pre_alloc_block+self->pre_alloc_block_offset); + self->pre_alloc_block_offset += size; + + memcpy(self->copy[self->fast_state_pos], (void*)start, size); + self->size[self->fast_state_pos] = size; + self->fast_state_pos++; + if(self->fast_state_pos >= self->fast_state_size){ + self->fast_state_size += REALLOC_SIZE; + self->ptr = realloc(self->ptr, self->fast_state_size * sizeof(void*)); + self->copy = realloc(self->copy, self->fast_state_size * sizeof(void*)); + self->size = realloc(self->size, self->fast_state_size * sizeof(size_t)); + } + } +} + +static inline int get_handler(state_reallocation_t* self, QEMUFile* f, void* curr_elem, size_t size, VMStateField *field, char* vmsd_name){ + + int ret; + //printf("%s\n", vmsd_name); + + ret = field->info->get(f, curr_elem, size, field); + + + if (!strcmp(field->info->name, "bool")){ + assert(size == 1); + add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 1); + } + else if(!strcmp(field->info->name, "int8")){ + assert(size == 1); + add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 1); + } + else if(!strcmp(field->info->name, "int16")){ + assert(size == 2); + add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 2); + } + else if(!strcmp(field->info->name, "int32")){ + assert(size == 4); + add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 4); + } + else if(!strcmp(field->info->name, "int32 equal")){ + assert(size == 4); + add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 4); + } + else if(!strcmp(field->info->name, "int32 le")){ + assert(size == 4); + add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 4); + } + else if(!strcmp(field->info->name, "int64")){ + assert(size == 8); + add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 8); + } + else if(!strcmp(field->info->name, "uint8")){ + assert(size == 1); + add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 1); + } + else if(!strcmp(field->info->name, "uint16")){ + assert(size == 2); + add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 2); + } + else if(!strcmp(field->info->name, "uint32")){ + assert(size == 4); + add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 4); + } + else if(!strcmp(field->info->name, "uint32 equal")){ + assert(size == 4); + add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 4); + } + else if(!strcmp(field->info->name, "uint64")){ + assert(size == 8); + add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 8); + } + else if(!strcmp(field->info->name, "int64 equal")){ + assert(size == 8); + add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 8); + } + else if(!strcmp(field->info->name, "uint8 equal")){ + assert(size == 1); + add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 1); + } + else if(!strcmp(field->info->name, "uint16 equal")){ + assert(size == 16); + add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 2); + } + else if(!strcmp(field->info->name, "float64")){ + assert(size == 64); + add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 8); + } + else if(!strcmp(field->info->name, "CPU_Double_U")){ + //fprintf(stderr, "type: %s (size: %x)\n", field->info->name, size); + assert(0); + add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 8); + } + else if(!strcmp(field->info->name, "buffer")){ + //fprintf(stderr, "type: %s (size: %x)\n", field->info->name, size); + add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, size); + } + else if(!strcmp(field->info->name, "unused_buffer")){ + //fprintf(stderr, "type: %s (size: %x)\n", field->info->name, size); + /* save nothing */ + } + else if(!strcmp(field->info->name, "tmp")){ + //fprintf(stderr, "type: %s (size: %x)\n", field->info->name, size); + add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, size); + + /* save nothing */ + } + else if(!strcmp(field->info->name, "bitmap")){ + //fprintf(stderr, "type: %s (size: %x)\n", field->info->name, size); + assert(0); + } + else if(!strcmp(field->info->name, "qtailq")){ + //fprintf(stderr, "type: %s (size: %x)\n", field->info->name, size); + assert(0); + } + else if(!strcmp(field->info->name, "timer")){ + //fprintf(stderr, "type: %s (size: %x)\n", field->info->name, size); + //printf("%s time\n", vmsd_name); + //add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, sizeof(QEMUTimer)); + add_get(self, (void*) field->info->get, curr_elem, size, (void*) field, f, field->info->name); + } + else if(!strcmp(field->info->name, "fpreg")){ + debug_fprintf(stderr, "type: %s (size: %lx)\n", field->info->name, size); + assert(0); + add_get(self, (void*) field->info->get, curr_elem, size, (void*) field, f, field->info->name); + } + else if(!strcmp(field->info->name, "pci config")){ + //fprintf(stderr, "type: %s (size: %x)\n", field->info->name, size); + add_get(self, (void*) field->info->get, curr_elem, size, (void*) field, f, field->info->name); + } + else if(!strcmp(field->info->name, "pci irq state")){ + //fprintf(stderr, "type: %s (size: %x)\n", field->info->name, size); + add_get(self, (void*) field->info->get, curr_elem, size, (void*) field, f, field->info->name); + } + else if(!strcmp(field->info->name, "virtio")){ + fprintf(stderr, "type: %s (size: %lx)\n", field->info->name, size); + abort(); /* not yet implemented */ + } + else{ + fprintf(stderr, "FAIL field->info->name: %s\n", field->info->name); + assert(0); + } + + return ret; +} + + + +//migration_obj_t* obj; +//void* base_opaque; + +//#define VERBOSE_DEBUG + +/* todo: modify opaque_ptr */ +static int fdl_vmstate_load_state(state_reallocation_t* self, QEMUFile *f, const VMStateDescription *vmsd, void *opaque, int version_id, uintptr_t* opaque_ptr) +{ +#ifdef VERBOSE_DEBUG + printf("---------------------------------\nVMSD: %p\t%s\n", opaque, vmsd->name); +#endif + + //fprintf(stderr, "---------------------------------\nVMSD: %p\t%s\n", opaque, vmsd->name); + + VMStateField *field = (VMStateField *)vmsd->fields; + int ret = 0; + + /* + bool alloc_later = false; + if(alloc_block){ + base_opaque = opaque; + alloc_block = false; + alloc_later = true; + obj = alloc_migration_obj(); + } + */ + + uint64_t total_size = 0; + + if (version_id > vmsd->version_id) { + return -EINVAL; + } + if (version_id < vmsd->minimum_version_id) { +#ifdef VERBOSE_DEBUG + printf("OLD LOAD\n"); +#endif + + if (vmsd->load_state_old && + version_id >= vmsd->minimum_version_id_old) { + fprintf(stderr, "OLDSTATE\n"); + assert(0); + ret = vmsd->load_state_old(f, opaque, version_id); + return ret; + } + return -EINVAL; + } + if (vmsd->pre_load) { +#ifdef VERBOSE_DEBUG + printf("\tPRELOAD Function\n"); +#endif + /* TODO ADD PRE FPTR FOR SERIAL */ + //add_pre_fptr(self, vmsd->pre_load, opaque, vmsd->name); + //fprintf(stderr, "PRELOAD RUN: %s\n", vmsd->name); + //add_pre_fptr(self, vmsd->pre_load, opaque, vmsd->name); + add_post_fptr(self, vmsd->pre_load, 1337, opaque, vmsd->name); + //int ret = 0; + //return; + + + + /* + int ret = vmsd->pre_load(opaque); + if (ret) { + return ret; + } + */ + + + } + while (field->name) { +#ifdef VERBOSE_DEBUG + printf("Field: %s %s %s\n", __func__, vmsd->name, field->name); +#endif + //fprintf(stderr, "Field: %s %s %s\n", __func__, vmsd->name, field->name); + + //printf("Field: %s %s %s\n", __func__, vmsd->name, field->name); + if ((field->field_exists && + field->field_exists(opaque, version_id)) || + (!field->field_exists && + field->version_id <= version_id)) { + void *first_elem = opaque + field->offset; + int i, n_elems = vmstate_n_elems(opaque, field); + int size = vmstate_size(opaque, field); + + //printf("\t\t%s %d\n", field->name, size); + +#ifdef VERBOSE_DEBUG + printf("-----------------> vmstate_handle_alloc\n"); +#endif + //fprintf(stderr, "-----------------> vmstate_handle_alloc\n"); + vmstate_handle_alloc(first_elem, field, opaque); + if (field->flags & VMS_POINTER) { +#ifdef VERBOSE_DEBUG + printf("FIX ME VMS_POINTER\n"); +#endif + // printf("Field-Offset 0x%lx-0x%lx\n", opaque+field->offset, opaque+field->offset+(size*n_elems)); + + /* fix me */ + /* broken af */ + //printf("add_translatable_block: %lx %lx %ld\n", *(void **)first_elem, first_elem, n_elems*size); + /* + if((n_elems*size)){ + add_translatable_block((void*)(*(void **)first_elem), (void*)first_elem, (uint64_t)(n_elems*size), field->name, 0, (void*) NULL, (void*) NULL); + } + */ + + //fprintf(stderr, "FIX ME VMS_POINTER\n"); + first_elem = *(void **)first_elem; + assert(first_elem || !n_elems || !size); + } + for (i = 0; i < n_elems; i++) { + uint64_t* tmp_opaque_ptr = 0; + total_size += size; + void *curr_elem = first_elem + size * i; + + //if (!(field->flags & VMS_POINTER)) { + // tmp_opaque_ptr = 0; + //} + //assert(!(field->flags & VMS_POINTER) || n_elems == 1); + + if (field->flags & VMS_ARRAY_OF_POINTER) { + //printf("VMS_ARRAY_OF_POINTER\n"); + //add_mblock((uint64_t)(curr_elem), (uint64_t)(size)); + //add_mblock((uint64_t)(field->offset + (opaque)), (uint64_t)(size*n_elems)); +#ifdef VERBOSE_DEBUG + printf("Field-Offset 1 0x%lx-0x%lx\n", (uint64_t)(field->offset + (opaque)), (uint64_t)(field->offset+(size*n_elems) + (opaque))); + printf("=VMS_ARRAY_OF_POINTER 1= %lx %x\n", *((uint64_t*)curr_elem), size); + //hexDump((void*)field->name, curr_elem, size); +#endif + + tmp_opaque_ptr = curr_elem; + curr_elem = *(void **)curr_elem; + add_mblock(self, (char*)vmsd->name, (const char*)field->name, field->offset, (uint64_t)(curr_elem), (uint64_t)(size)); +#ifdef VERBOSE_DEBUG + //hexDump((void*)field->name, curr_elem, size); +#endif + } + + if (!curr_elem && size) { + // if null pointer check placeholder and do not follow + assert(field->flags & VMS_ARRAY_OF_POINTER); + //printf("=================vmstate_info_nullptr\n");# + //add_mblock((uint64_t)(curr_elem), (uint64_t)(size)); + //add_mblock((uint64_t)(field->offset + (opaque)), (uint64_t)(size*n_elems)); +#ifdef VERBOSE_DEBUG + printf("Field-Offset 2 0x%lx-0x%lx\n", (uint64_t)(field->offset + (opaque)), (uint64_t)(field->offset+(size*n_elems) + (opaque))); + printf("=VMS_ARRAY_OF_POINTER 2= %lx %x\n", *((uint64_t*)curr_elem), size); + //hexDump((void*)field->name, curr_elem, size); +#endif + + debug_printf("*** vmstate_info_nullptr.get ***\n"); + ret = vmstate_info_nullptr.get(f, curr_elem, size, NULL); + add_mblock(self, (char*)vmsd->name, (const char*)field->name, field->offset, (uint64_t)(curr_elem), (uint64_t)(size)); +#ifdef VERBOSE_DEBUG + //hexDump((void*)field->name, curr_elem, size); +#endif + + } else if (field->flags & VMS_STRUCT) { + //printf("Field-Offset 0x%lx-0x%lx\n", field->offset + (opaque-base_opaque), field->offset+(size*n_elems) + (opaque-base_opaque)); +#ifdef VERBOSE_DEBUG + printf("=VMS_STRUCT= %lx %x\n", *((uint64_t*)curr_elem), size); + //hexDump((void*)field->name, curr_elem, size); +#endif + /* fix me */ + //ret = vmstate_load_state(f, field->vmsd, curr_elem, field->vmsd->version_id); + ret = fdl_vmstate_load_state(self, f, field->vmsd, curr_elem, field->vmsd->version_id, tmp_opaque_ptr); +#ifdef VERBOSE_DEBUG + //hexDump((void*)field->name, curr_elem, size); +#endif + + } else { + + + ret = get_handler(self, f, curr_elem, size, field, (char*)vmsd->name); + } + if (ret >= 0) { + //printf("FILE ERROR\n"); + //fprintf(stderr, "FILE ERROR\n"); + //assert(0); + ret = qemu_file_get_error(f); + } + if (ret < 0) { + debug_fprintf(stderr, "RETURNING!\n"); + return ret; + } + } + } else if (field->flags & VMS_MUST_EXIST) { + debug_printf("Input validation failed: %s/%s", vmsd->name, field->name); + return -1; + } + else { + //printf("Field does not exist...\n"); + } + field++; + } + + /* fix me */ + ret = fdl_vmstate_subsection_load(self, f, vmsd, opaque); + //ret = fdl_vmstate_subsection_load(f, vmsd, opaque, opaque_ptr); + + if (ret != 0) { + return ret; + } + + /* + if(alloc_later){ + add_opaque_block(obj, opaque, total_size); + } + */ + + //printf("------\n"); + + if (vmsd->post_load) { +#ifdef VERBOSE_DEBUG + printf("\tPOSTLOAD Function\n"); +#endif + add_post_fptr(self, vmsd->post_load, version_id, opaque, vmsd->name); + //ret = 0; + ret = vmsd->post_load(opaque, version_id); + } +#ifdef VERBOSE_DEBUG + printf("\tTotal Size:%ld\n", total_size); +#endif + return ret; +} + + +static int fdl_vmstate_load(state_reallocation_t* self, QEMUFile *f, SaveStateEntry *se, int version_id) +{ + //trace_vmstate_load(se->idstr, se->vmsd ? se->vmsd->name : "(old)"); + if (!se->vmsd) { /* Old style */ + //fprintf(stderr, "\t<<>>\n"); + return se->ops->load_state(f, se->opaque, version_id); + } + //fprintf(stderr, "NEW Style\n"); + uintptr_t* t = (uintptr_t*)&(se->opaque); + //printf("------>\n"); + //printf("VMSD1: %s\n", (VMStateDescription *)(se->vmsd)->name); + + //printf("SE:\t%p %p %p %p\n", se, se->opaque, &(se->opaque) ,t); + return fdl_vmstate_load_state(self, f, se->vmsd, se->opaque, version_id, (uintptr_t*)t); +} + +static int fdl_enumerate_section(state_reallocation_t* self, QEMUFile *f, MigrationIncomingState *mis){ + uint32_t instance_id, version_id, section_id; + SaveStateEntry *se; + //LoadStateEntry *le = NULL; + + char idstr[256]; + int ret; + + /* Read section start */ + section_id = qemu_get_be32(f); + if (!qemu_get_counted_string(f, idstr)) { + printf("Unable to read ID string for section %u", section_id); + return -EINVAL; + } + instance_id = qemu_get_be32(f); + version_id = qemu_get_be32(f); + + //printf("%s %s %d\n", __func__, idstr, instance_id); + + /* Find savevm section */ + se = fdl_find_se(idstr, instance_id); + //printf("se %p\n", se); + if (se == NULL) { + printf("Unknown savevm section or instance '%s' %d", idstr, instance_id); + return -EINVAL; + } + + /* Validate version */ + if (version_id > se->version_id) { + printf("savevm: unsupported version %d for '%s' v%d", version_id, idstr, se->version_id); + return -EINVAL; + } + /* Add entry */ + /* + le = g_malloc0(sizeof(*le)); + le->se = se; + //printf("\tSE:%s\n", se); + le->section_id = section_id; + le->version_id = version_id; + QLIST_INSERT_HEAD(&mis->loadvm_handlers, le, entry); + */ + + se->load_version_id = version_id; + se->load_section_id = section_id; + + if(se->vmsd && ((strcmp("tiMer", (const char*)(VMStateDescription *)(se->vmsd)->name)) + /* + && (strcmp("cpu_common", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("cpu", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("kvm-tpr-opt", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("apic", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("kvmclock", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("fw_cfg", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("PCIBUS", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("I440FX", (VMStateDescription *)(se->vmsd)->name)) + + && (strcmp("PIIX3", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("i8259", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("ioapic", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("vga", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("hpet", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("mc146818rtc", (VMStateDescription *)(se->vmsd)->name)) + + && (strcmp("i8254", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("pcspk", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("dma", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("serial", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("parallel_isa", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("fdc", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("ps2kbd", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("ps2mouse", (VMStateDescription *)(se->vmsd)->name)) + + + && (strcmp("pckbd", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("vmmouse", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("port92", (VMStateDescription *)(se->vmsd)->name)) + */ + //&& (strcmp("e1000", (VMStateDescription *)(se->vmsd)->name)) + /* + && (strcmp("ide", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("i2c_bus", (VMStateDescription *)(se->vmsd)->name)) + + && (strcmp("piix4_pm", (VMStateDescription *)(se->vmsd)->name)) + && (strcmp("acpi_build", (VMStateDescription *)(se->vmsd)->name)) + */ + + + )){ + ret = fdl_vmstate_load(self, f, se, version_id); + } + else{ + debug_fprintf(stderr, "---------------------------------\nVMSD2: %x\n", se->vmsd); + //abort(); + //fprintf(stderr, "---------------------------------\nVMSD2: %s\n", (VMStateDescription *)(se->vmsd)->name); + ret = vmstate_load(f, se); + } + + //ret = vmstate_load(f, se); + if (ret < 0) { + printf("error while loading state for instance 0x%x of device '%s'", instance_id, idstr); + return ret; + } + + qemu_get_byte(f); + qemu_get_be32(f); + + return 0; +} + +static void fdl_enumerate_global_states(state_reallocation_t* self, QEMUFile *f){ + ((struct QEMUFile_tmp*)f)->pos = 0; + ((struct QEMUFile_tmp*)f)->buf_index = 0; + ((struct QEMUFile_tmp*)f)->buf_size = 0; + + uint8_t section_type; + + MigrationIncomingState *mis = migration_incoming_get_current(); + + qemu_get_be32(f); + qemu_get_be32(f); + qemu_get_byte(f); + + /* migration state */ + vmstate_load_state(f, (VMStateDescription*) &vmstate_configuration, (void*)&savevm_state, 0); + + while ((section_type = qemu_get_byte(f)) != QEMU_VM_EOF) { + switch (section_type) { + case QEMU_VM_SECTION_START: + case QEMU_VM_SECTION_FULL: + //if(!fpos){ + // fpos = qemu_ftell(f); + //} + fdl_enumerate_section(self, f, mis); + break; + default: + /* oops */ + fprintf(stderr, "==> ERROR: unkown section_type: %x\n", section_type); + //abort(); + break; + } + } +} + +state_reallocation_t* state_reallocation_new(QEMUFile *f){ + state_reallocation_t* self = malloc(sizeof(state_reallocation_t)); + self->fast_state_pos = 0; + self->fast_state_size = REALLOC_SIZE; + self->ptr = malloc(sizeof(void*) * REALLOC_SIZE); + self->copy = malloc(sizeof(void*) * REALLOC_SIZE); + self->size = malloc(sizeof(size_t) * REALLOC_SIZE); + + self->fast_state_fptr_pos = 0; + self->fast_state_fptr_size = REALLOC_SIZE; + + self->fptr = malloc(sizeof(void*) * REALLOC_SIZE); + self->opaque = malloc(sizeof(void*) * REALLOC_SIZE); + self->version = malloc(sizeof(uint32_t) * REALLOC_SIZE); + + self->fast_state_get_fptr_pos = 0; + self->fast_state_get_fptr_size = REALLOC_SIZE; + + self->get_fptr = malloc(sizeof(void*) * REALLOC_SIZE); + self->get_opaque = malloc(sizeof(void*) * REALLOC_SIZE); + self->get_size = malloc(sizeof(size_t) * REALLOC_SIZE); + self->get_data = malloc(sizeof(void*) * REALLOC_SIZE); + + self->pre_alloc_block = (uint32_t*)mmap(NULL, PRE_ALLOC_BLOCK_SIZE, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0); + assert(self->pre_alloc_block != (void *) -1); + self->pre_alloc_block_offset = 0; + + self->tmp_snapshot.enabled = false; + self->tmp_snapshot.fast_state_size = 0; + + fdl_enumerate_global_states(self, f); + + self->tmp_snapshot.copy = malloc(sizeof(void*) * self->fast_state_pos); + self->tmp_snapshot.fast_state_size = self->fast_state_pos; + + for(uint32_t i = 0; i < self->fast_state_pos; i++){ + self->tmp_snapshot.copy[i] = malloc(self->size[i]); + } + return self; +} + +/* +void state_reallocation_new_no_fdl(QEMUFile *f){ + fdl_enumerate_global_states(NULL, f); +} +*/ + +void fdl_fast_reload(state_reallocation_t* self){ + //uint64_t count = 0; + + + for(uint32_t i = 0; i < self->fast_state_fptr_pos; i++){ + if((self->version[i]) == 1337){ + ((int (*)(void *opaque))self->fptr[i])(self->opaque[i]); + } + } + + if(!self->tmp_snapshot.enabled){ + for(uint32_t i = 0; i < self->fast_state_pos; i++){ + //count += self->size[i]; + memcpy(self->ptr[i], self->copy[i], self->size[i]); + } + } + else{ + //fprintf(stderr, "====== %s TMP MODE ====== \n", __func__); + for(uint32_t i = 0; i < self->fast_state_pos; i++){ + //count += self->size[i]; + memcpy(self->ptr[i], self->tmp_snapshot.copy[i], self->size[i]); + } + } + + for(uint32_t i = 0; i < self->fast_state_fptr_pos; i++){ + if((self->version[i]) != 1337){ + ((int (*)(void *opaque, int version_id))self->fptr[i])(self->opaque[i], self->version[i]); + } + } +} + +void fdl_fast_create_tmp(state_reallocation_t* self){ + for(uint32_t i = 0; i < self->fast_state_fptr_pos; i++){ + if((self->version[i]) == 1337){ + ((int (*)(void *opaque))self->fptr[i])(self->opaque[i]); + } + else{ + //((int (*)(void *opaque, int version_id))self->fptr[i])(self->opaque[i], self->version[i]); + } + } + + for(uint32_t i = 0; i < self->fast_state_pos; i++){ + memcpy(self->tmp_snapshot.copy[i], self->ptr[i], self->size[i]); + } + + + for(uint32_t i = 0; i < self->fast_state_fptr_pos; i++){ + if((self->version[i]) == 1337){ + //((int (*)(void *opaque))self->fptr[i])(self->opaque[i]); + } + else{ + ((int (*)(void *opaque, int version_id))self->fptr[i])(self->opaque[i], self->version[i]); + } + } +} + +void fdl_fast_enable_tmp(state_reallocation_t* self){ + self->tmp_snapshot.enabled = true; +} + +void fdl_fast_disable_tmp(state_reallocation_t* self){ + self->tmp_snapshot.enabled = false; +} diff --git a/nyx/snapshot/devices/state_reallocation.h b/nyx/snapshot/devices/state_reallocation.h new file mode 100644 index 0000000000..4d594d1fe2 --- /dev/null +++ b/nyx/snapshot/devices/state_reallocation.h @@ -0,0 +1,112 @@ +/* + +Copyright (C) 2017 Sergej Schumilo + +This file is part of QEMU-PT (HyperTrash / kAFL). + +QEMU-PT is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +QEMU-PT is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with QEMU-PT. If not, see . + +*/ + +#ifndef STATE_REALLOCATION +#define STATE_REALLOCATION + +#include "qemu/osdep.h" +#include "monitor/monitor.h" +//#include "qemu-common.h" +#include "migration/migration.h" +#include "nyx/khash.h" + + + +#define IO_BUF_SIZE 32768 + +struct QEMUFile_tmp { + void *ops; + void *hooks; + void *opaque; + + int64_t bytes_xfer; + int64_t xfer_limit; + + int64_t pos; /* start of buffer when writing, end of buffer + when reading */ + volatile int buf_index; + int buf_size; /* 0 when writing */ + uint8_t buf[IO_BUF_SIZE]; +}; + +struct fast_savevm_opaque_t{ + FILE* f; + uint8_t* buf; + uint64_t pos; + void* output_buffer; + uint32_t* output_buffer_size; +}; + +#define REALLOC_SIZE 0x8000 + +#define PRE_ALLOC_BLOCK_SIZE 0x8000000 /* 128 MB */ + +typedef struct state_reallocation_tmp_s{ + void **copy; + uint32_t fast_state_size; + bool enabled; +} state_reallocation_tmp_t; + +typedef struct state_reallocation_s{ + void **ptr; + void **copy; + size_t *size; + + uint32_t fast_state_size; + uint32_t fast_state_pos; + + + void **fptr; + void **opaque; + uint32_t *version; + + uint32_t fast_state_fptr_size; + uint32_t fast_state_fptr_pos; + + + void **get_fptr; + void **get_opaque; + size_t *get_size; + void **get_data; + + //QEMUFile** file; + + uint32_t fast_state_get_fptr_size; + uint32_t fast_state_get_fptr_pos; + + /* prevents heap fragmentation and additional 2GB mem usage */ + void* pre_alloc_block; + uint32_t pre_alloc_block_offset; + + state_reallocation_tmp_t tmp_snapshot; + +} state_reallocation_t; + +state_reallocation_t* state_reallocation_new(QEMUFile *f); + +//void fdl_enumerate_global_states(QEMUFile *f); +void fdl_fast_reload(state_reallocation_t* self); + +void fdl_fast_create_tmp(state_reallocation_t* self); +void fdl_fast_enable_tmp(state_reallocation_t* self); +void fdl_fast_disable_tmp(state_reallocation_t* self); + +#endif \ No newline at end of file diff --git a/nyx/snapshot/devices/vm_change_state_handlers.c b/nyx/snapshot/devices/vm_change_state_handlers.c new file mode 100644 index 0000000000..87de854da1 --- /dev/null +++ b/nyx/snapshot/devices/vm_change_state_handlers.c @@ -0,0 +1,59 @@ + +#include +#include "qemu/osdep.h" +#include "sysemu/sysemu.h" +#include "cpu.h" +#include "qemu/main-loop.h" +#include "nyx/snapshot/devices/vm_change_state_handlers.h" + + +VMChangeStateHandler* change_kvm_clock_handler = NULL; +VMChangeStateHandler* change_kvm_pit_handler = NULL; +VMChangeStateHandler* change_cpu_handler = NULL; +void* change_kvm_clock_opaque = NULL; +void* change_kvm_pit_opaque = NULL; +void* change_cpu_opaque = NULL; + +VMChangeStateHandler* change_ide_core_handler = NULL; +uint8_t change_ide_core_opaque_num = 0; +void* change_ide_core_opaque[32] = {NULL}; + +void call_fast_change_handlers(void){ + assert(change_kvm_clock_handler && change_kvm_pit_handler && change_cpu_handler); + + change_kvm_clock_handler(change_kvm_clock_opaque, 1, RUN_STATE_RUNNING); + change_kvm_pit_handler(change_kvm_pit_opaque, 1, RUN_STATE_RUNNING); + change_cpu_handler(change_cpu_opaque, 1, RUN_STATE_RUNNING); + + return; + /* check if necessary */ + if(change_ide_core_handler){ + for(uint8_t i = 0; i < change_ide_core_opaque_num; i++){ + change_ide_core_handler(change_ide_core_opaque[i], 1, RUN_STATE_RUNNING); + } + } +} + +void add_fast_reload_change_handler(VMChangeStateHandler *cb, void *opaque, int id){ + switch(id){ + case RELOAD_HANDLER_KVM_CLOCK: + change_kvm_clock_handler = cb; + change_kvm_clock_opaque = opaque; + return; + case RELOAD_HANDLER_KVM_PIT: + change_kvm_pit_handler = cb; + change_kvm_pit_opaque = opaque; + return; + case RELOAD_HANDLER_KVM_CPU: + change_cpu_handler = cb; + change_cpu_opaque = opaque; + return; + case RELOAD_HANDLER_IDE_CORE: + change_ide_core_handler = cb; + change_ide_core_opaque[change_ide_core_opaque_num] = opaque; + change_ide_core_opaque_num++; + return; + default: + abort(); + } +} diff --git a/nyx/snapshot/devices/vm_change_state_handlers.h b/nyx/snapshot/devices/vm_change_state_handlers.h new file mode 100644 index 0000000000..88692e6ee3 --- /dev/null +++ b/nyx/snapshot/devices/vm_change_state_handlers.h @@ -0,0 +1,13 @@ +#pragma once + +#include +#include +#include "sysemu/runstate.h" + +#define RELOAD_HANDLER_KVM_CLOCK 0 +#define RELOAD_HANDLER_KVM_PIT 1 +#define RELOAD_HANDLER_KVM_CPU 2 +#define RELOAD_HANDLER_IDE_CORE 3 + +void call_fast_change_handlers(void); +void add_fast_reload_change_handler(VMChangeStateHandler *cb, void *opaque, int id); diff --git a/nyx/snapshot/helper.c b/nyx/snapshot/helper.c new file mode 100644 index 0000000000..714b10da51 --- /dev/null +++ b/nyx/snapshot/helper.c @@ -0,0 +1,31 @@ +#include "qemu/osdep.h" +#include "sysemu/sysemu.h" +#include "sysemu/cpus.h" +#include "qemu/main-loop.h" + +#include "qemu/bitmap.h" +#include "exec/ram_addr.h" +#include "qemu/rcu_queue.h" +#include "migration/migration.h" + +#include "nyx/memory_access.h" + +#include "nyx/snapshot/helper.h" +#include "nyx/fast_vm_reload.h" + +//#define DEBUG_NYX_SNAPSHOT_HELPER + +uint64_t get_ram_size(void){ + RAMBlock *block; + uint64_t guest_ram_size = 0; + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + guest_ram_size += block->used_length; +#ifdef DEBUG_NYX_SNAPSHOT_HELPER + printf("Block: %s (%lx)\n", block->idstr, block->used_length); +#endif + } +#ifdef DEBUG_NYX_SNAPSHOT_HELPER + printf("%s - guest_ram_size: %lx\n", __func__, guest_ram_size); +#endif + return guest_ram_size; +} diff --git a/nyx/snapshot/helper.h b/nyx/snapshot/helper.h new file mode 100644 index 0000000000..870291b2f0 --- /dev/null +++ b/nyx/snapshot/helper.h @@ -0,0 +1,16 @@ +#pragma once + +#include + +/* don't! */ +#define MAX_REGIONS 8 + +#ifndef PAGE_SIZE +#define PAGE_SIZE 0x1000 +#endif + +#define BITMAP_SIZE(x) ((x/PAGE_SIZE)/8) +#define DIRTY_STACK_SIZE(x) ((x/PAGE_SIZE)*sizeof(uint64_t)) + + +uint64_t get_ram_size(void); diff --git a/nyx/snapshot/memory/backend/nyx_debug.c b/nyx/snapshot/memory/backend/nyx_debug.c new file mode 100644 index 0000000000..726b011755 --- /dev/null +++ b/nyx/snapshot/memory/backend/nyx_debug.c @@ -0,0 +1,112 @@ +#include "qemu/osdep.h" +#include "sysemu/sysemu.h" +#include "cpu.h" +#include "qemu/main-loop.h" + +#include "exec/ram_addr.h" +#include "qemu/rcu_queue.h" +#include "migration/migration.h" + +#include "nyx/memory_access.h" + +#include "nyx/snapshot/memory/backend/nyx_debug.h" +#include "nyx/fast_vm_reload.h" + + +/* init operation */ +void nyx_snapshot_debug_pre_init(void){ + /* TODO */ +} + +/* init operation */ +void nyx_snapshot_debug_init(fast_reload_t* self){ + /* TODO */ +} + +/* enable operation */ +void nyx_snapshot_debug_enable(fast_reload_t* self){ + /* TODO */ +} + +/* restore operation */ +void nyx_snapshot_debug_restore(shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, bool verbose){ + void* current_region = NULL; + int counter = 0; + for(uint8_t i = 0; i < shadow_memory_state->ram_regions_num; i++){ + + if(shadow_memory_state->incremental_enabled){ + current_region = shadow_memory_state->ram_regions[i].incremental_region_ptr; + } + else{ + current_region = shadow_memory_state->ram_regions[i].snapshot_region_ptr; + } + + for(uint64_t addr = 0; addr < shadow_memory_state->ram_regions[i].size; addr+=0x1000){ + + void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + addr; + void* snapshot_addr = current_region + addr; + uint64_t physical_addr = addr + shadow_memory_state->ram_regions[i].base; + + /* check first if the page is dirty (this is super slow, but quite useful for debugging) */ + if(memcmp(host_addr, snapshot_addr, TARGET_PAGE_SIZE)){ + /* check if page is not on the block list */ + if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == false){ + //fprintf(stderr, "(2) DIRTY: 0x%lx (NUM: %d - OFFSET: 0x%lx)\n", physical_addr, i, addr); + + if(verbose){ + printf("%s -> (phys: 0x%lx) %p <-- %p [%d]\n", __func__, physical_addr, host_addr, snapshot_addr, shadow_memory_state->incremental_enabled); + counter++; + } + + memcpy(host_addr, snapshot_addr, TARGET_PAGE_SIZE); + } + } + } + } + + if(verbose){ + printf("TOTAL: %d\n", counter); + } +} + +void nyx_snapshot_debug_save_root_pages(shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, bool verbose){ + void* current_region = NULL; + + for(uint8_t i = 0; i < shadow_memory_state->ram_regions_num; i++){ + + if(shadow_memory_state->incremental_enabled){ + current_region = shadow_memory_state->ram_regions[i].incremental_region_ptr; + } + else{ + current_region = shadow_memory_state->ram_regions[i].snapshot_region_ptr; + } + + for(uint64_t addr = 0; addr < shadow_memory_state->ram_regions[i].size; addr+=0x1000){ + + void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + addr; + void* snapshot_addr = current_region + addr; + uint64_t physical_addr = addr + shadow_memory_state->ram_regions[i].base; + void* incremental_addr = shadow_memory_state->ram_regions[i].incremental_region_ptr + addr; + + /* check first if the page is dirty (this is super slow, but quite useful for debugging) */ + if(memcmp(host_addr, snapshot_addr, TARGET_PAGE_SIZE)){ + /* check if page is not on the block list */ + if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == false){ + //fprintf(stderr, "(2) DIRTY: 0x%lx (NUM: %d - OFFSET: 0x%lx)\n", physical_addr, i, addr); + + if(verbose && !shadow_memory_is_root_page_tracked(shadow_memory_state, addr, i)){ + printf("%s -> %p <-- %p [%d]\n", __func__, host_addr, snapshot_addr, shadow_memory_state->incremental_enabled); + } + + shadow_memory_track_dirty_root_pages(shadow_memory_state, addr, i); + memcpy(incremental_addr, host_addr, TARGET_PAGE_SIZE); + } + } + } + } +} + +/* set operation */ +void nyx_snapshot_debug_set(fast_reload_t* self){ + /* TODO */ +} \ No newline at end of file diff --git a/nyx/snapshot/memory/backend/nyx_debug.h b/nyx/snapshot/memory/backend/nyx_debug.h new file mode 100644 index 0000000000..de4682be42 --- /dev/null +++ b/nyx/snapshot/memory/backend/nyx_debug.h @@ -0,0 +1,10 @@ +#pragma once + +#include "nyx/fast_vm_reload.h" + +void nyx_snapshot_debug_pre_init(void); +void nyx_snapshot_debug_init(fast_reload_t* self); +void nyx_snapshot_debug_enable(fast_reload_t* self); +void nyx_snapshot_debug_restore(shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, bool verbose); +void nyx_snapshot_debug_set(fast_reload_t* self); +void nyx_snapshot_debug_save_root_pages(shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, bool verbose); diff --git a/nyx/snapshot/memory/backend/nyx_dirty_ring.c b/nyx/snapshot/memory/backend/nyx_dirty_ring.c new file mode 100644 index 0000000000..27aa3888f6 --- /dev/null +++ b/nyx/snapshot/memory/backend/nyx_dirty_ring.c @@ -0,0 +1,369 @@ +#include "nyx/snapshot/memory/backend/nyx_dirty_ring.h" +#include "nyx/snapshot/helper.h" + +#include "sysemu/kvm.h" +#include "sysemu/kvm_int.h" + +#include +#include + +#define FAST_IN_RANGE(address, start, end) (address < end && address >= start) + +/* dirty ring specific defines */ +#define KVM_DIRTY_LOG_PAGE_OFFSET 64 +#define KVM_EXIT_DIRTY_RING_FULL 31 +#define KVM_RESET_DIRTY_RINGS _IO(KVMIO, 0xc7) +#define KVM_CAP_DIRTY_LOG_RING 192 + +/* global vars */ +int dirty_ring_size = 0; +int dirty_ring_max_size_global = 0; +struct kvm_dirty_gfn *kvm_dirty_gfns = NULL; /* dirty ring mmap ptr */ +uint32_t kvm_dirty_gfns_index = 0; +uint32_t kvm_dirty_gfns_index_mask = 0; + + +static int vm_enable_dirty_ring(int vm_fd, uint32_t ring_size){ + struct kvm_enable_cap cap = { 0 }; + + cap.cap = KVM_CAP_DIRTY_LOG_RING; + cap.args[0] = ring_size; + + int ret = ioctl(vm_fd, KVM_ENABLE_CAP, &cap); + if(ret != 0){ + printf("[ ] KVM_ENABLE_CAP ioctl failed\n"); + } + + return ring_size; +} + +static int check_dirty_ring_size(int kvm_fd, int vm_fd){ + int ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, KVM_CAP_DIRTY_LOG_RING); + if(ret < 0 ){ + printf("[ ] KVM_CAP_DIRTY_LOG_RING failed (dirty ring not supported?)\n"); + exit(1); + } + + printf("[*] Max Dirty Ring Size -> %d (Entries: %d)\n", ret, ret/(int)sizeof(struct kvm_dirty_gfn)); + + uint64_t dirty_ring_max_size = ret; //kvm_dirty_ring_size * sizeof(struct kvm_dirty_gfn); + + /* DIRTY RING -> 1MB in size results in 256M trackable memory */ + ret = vm_enable_dirty_ring(vm_fd, dirty_ring_max_size); + + if(ret < 0 ){ + printf("[ ] Enabling dirty ring (size: %ld) failed\n", dirty_ring_max_size); + exit(1); + } + + dirty_ring_max_size_global = dirty_ring_max_size; + return ret; +} + +static void allocate_dirty_ring(int kvm_vcpu, int vm_fd){ + assert(dirty_ring_size); + + if (dirty_ring_size) { + kvm_dirty_gfns = mmap(NULL, dirty_ring_size, PROT_READ | PROT_WRITE, MAP_SHARED, kvm_vcpu, PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET); + if (kvm_dirty_gfns == MAP_FAILED) { + printf("[ ] Dirty ring mmap failed!\n"); + exit(1); + } + } + printf("[*] Dirty ring mmap region located at %p\n", kvm_dirty_gfns); + + int ret = ioctl(vm_fd, KVM_RESET_DIRTY_RINGS, 0); + assert(ret == 0); +} + +/* pre_init operation */ +void nyx_dirty_ring_early_init(int kvm_fd, int vm_fd){ + dirty_ring_size = check_dirty_ring_size(kvm_fd, vm_fd); +} + +void nyx_dirty_ring_pre_init(int kvm_fd, int vm_fd){ + allocate_dirty_ring(kvm_fd, vm_fd); + + kvm_dirty_gfns_index = 0; + kvm_dirty_gfns_index_mask = ((dirty_ring_max_size_global/sizeof(struct kvm_dirty_gfn)) - 1); + +} + +static inline void dirty_ring_collect(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, uint64_t slot, uint64_t gfn){ + + /* sanity check */ + assert((slot&0xFFFF0000) == 0); + + slot_t* kvm_region_slot = &self->kvm_region_slots[slot&0xFFFF]; + + if(test_and_set_bit(gfn, (void*)kvm_region_slot->bitmap) == false){ + + kvm_region_slot->stack[kvm_region_slot->stack_ptr] = gfn; + kvm_region_slot->stack_ptr++; + } +} + +static void dirty_ring_flush_and_collect(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, int vm_fd){ + struct kvm_dirty_gfn *entry = NULL; + int cleared = 0; + + //fprintf(stderr, "self->kvm_dirty_gfns_index -> %lx\n", kvm_dirty_gfns_index); + while(true){ + + entry = &kvm_dirty_gfns[kvm_dirty_gfns_index & kvm_dirty_gfns_index_mask]; + + if((entry->flags & 0x3) == 0){ + break; + } + + if((entry->flags & 0x1) == 1){ + dirty_ring_collect(self, shadow_memory_state, blocklist, entry->slot, entry->offset); + cleared++; + entry->flags |= 0x2; // reset dirty entry + } + else{ + printf("[%p] kvm_dirty_gfn -> flags: %d slot: %d offset: %lx {ERROR}\n", entry, entry->flags, entry->slot, entry->offset); + fflush(stdout); + exit(1); + } + + kvm_dirty_gfns_index++; + } + + int ret = ioctl(vm_fd, KVM_RESET_DIRTY_RINGS, 0); + //printf("KVM_RESET_DIRTY_RINGS -> (%d vs %d)\n", ret, cleared); + assert(ret == cleared); +} + +static void dirty_ring_flush(int vm_fd){ + struct kvm_dirty_gfn *entry = NULL; + int cleared = 0; + + //printf("self->kvm_dirty_gfns_index -> %lx\n", self->kvm_dirty_gfns_index); + while(true){ + + entry = &kvm_dirty_gfns[kvm_dirty_gfns_index & kvm_dirty_gfns_index_mask]; + + if((entry->flags & 0x3) == 0){ + break; + } + + if((entry->flags & 0x1) == 1){ + cleared++; + entry->flags |= 0x2; // reset dirty entry + } + else{ + printf("[%p] kvm_dirty_gfn -> flags: %d slot: %d offset: %lx {ERROR}\n", entry, entry->flags, entry->slot, entry->offset); + fflush(stdout); + exit(1); + } + + kvm_dirty_gfns_index++; + } + + int ret = ioctl(vm_fd, KVM_RESET_DIRTY_RINGS, 0); + //printf("KVM_RESET_DIRTY_RINGS -> (%d vs %ld)\n", ret, cleared); + assert(ret == cleared); +} + +/* init operation */ +nyx_dirty_ring_t* nyx_dirty_ring_init(shadow_memory_t* shadow_memory){ + nyx_dirty_ring_t* self = malloc(sizeof(nyx_dirty_ring_t)); + memset(self, 0, sizeof(nyx_dirty_ring_t)); + + assert(kvm_state); + + + KVMMemoryListener *kml = kvm_get_kml(0); + KVMSlot *mem; + + //printf("kml -> %p\n", kml); + //printf("MEM-SLOTS -> %d\n", kvm_get_max_memslots()); + for (int i = 0; i < kvm_get_max_memslots(); i++) { + mem = &kml->slots[i]; + + if(mem->start_addr == 0 && mem->memory_size == 0){ + break; + } + + //printf("[%p] SLOT: %d - start: %lx - size: %lx - flags: %x\n", mem, mem->slot, mem->start_addr, mem->memory_size, mem->flags); + + self->kvm_region_slots_num++; + } + + /* + for(int i = 0; i < shadow_memory->ram_regions_num; i++){ + printf("[%d] base: %lx - size: %lx\n", i, shadow_memory->ram_regions[i].base, shadow_memory->ram_regions[i].size); + } + */ + + self->kvm_region_slots = malloc(sizeof(slot_t) * self->kvm_region_slots_num); + memset(self->kvm_region_slots, 0, sizeof(slot_t) * self->kvm_region_slots_num); + + for (int i = 0; i < kvm_get_max_memslots(); i++) { + mem = &kml->slots[i]; + + if(mem->start_addr == 0 && mem->memory_size == 0){ + break; + } + + self->kvm_region_slots[i].enabled = (mem->flags&KVM_MEM_READONLY) == 0; + self->kvm_region_slots[i].bitmap = malloc(BITMAP_SIZE(mem->memory_size)); + self->kvm_region_slots[i].stack = malloc(DIRTY_STACK_SIZE(mem->memory_size)); + + memset(self->kvm_region_slots[i].bitmap, 0, BITMAP_SIZE(mem->memory_size)); + memset(self->kvm_region_slots[i].stack, 0, DIRTY_STACK_SIZE(mem->memory_size)); + + self->kvm_region_slots[i].bitmap_size = BITMAP_SIZE(mem->memory_size); + + self->kvm_region_slots[i].stack_ptr = 0; + + if(self->kvm_region_slots[i].enabled){ + bool ram_region_found = false; + //printf("SEARCHING %lx %lx\n", mem->start_addr, mem->memory_size); + for(int j = 0; j < shadow_memory->ram_regions_num; j++){ + + if(FAST_IN_RANGE(mem->start_addr, shadow_memory->ram_regions[j].base, (shadow_memory->ram_regions[j].base+shadow_memory->ram_regions[j].size))){ + assert(FAST_IN_RANGE((mem->start_addr+mem->memory_size-1), shadow_memory->ram_regions[j].base, (shadow_memory->ram_regions[j].base+shadow_memory->ram_regions[j].size))); + + self->kvm_region_slots[i].region_id = j; + self->kvm_region_slots[i].region_offset = mem->start_addr - shadow_memory->ram_regions[j].base; + ram_region_found = true; + break; + } + } + assert(ram_region_found); + } + } + + /* + for(int i = 0; i < self->kvm_region_slots_num; i++){ + printf("[%d].enabled = %d\n", i, self->kvm_region_slots[i].enabled); + printf("[%d].bitmap = %p\n", i, self->kvm_region_slots[i].bitmap); + printf("[%d].stack = %p\n", i, self->kvm_region_slots[i].stack); + printf("[%d].stack_ptr = %ld\n", i, self->kvm_region_slots[i].stack_ptr); + if(self->kvm_region_slots[i].enabled){ + printf("[%d].region_id = %d\n", i, self->kvm_region_slots[i].region_id); + printf("[%d].region_offset = 0x%lx\n", i, self->kvm_region_slots[i].region_offset); + } + else{ + printf("[%d].region_id = -\n", i); + printf("[%d].region_offset = -\n", i); + } + } + */ + + dirty_ring_flush(kvm_get_vm_fd(kvm_state)); + return self; +} + +static void restore_memory(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){ + void* host_addr = NULL; + void* snapshot_addr = NULL; + uint64_t physical_addr = 0; + uint64_t gfn = 0; + uint64_t entry_offset_addr = 0; + + for(uint8_t j = 0; j < self->kvm_region_slots_num; j++){ + slot_t* kvm_region_slot = &self->kvm_region_slots[j]; + if(kvm_region_slot->enabled && kvm_region_slot->stack_ptr){ + for(uint64_t i = 0; i < kvm_region_slot->stack_ptr; i++){ + gfn = kvm_region_slot->stack[i]; + + entry_offset_addr = kvm_region_slot->region_offset + (gfn<<12); + + physical_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].base + entry_offset_addr; + + if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){ + continue; + } + + host_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].host_region_ptr + entry_offset_addr; + + if(shadow_memory_state->incremental_enabled){ + snapshot_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].incremental_region_ptr + entry_offset_addr; + } + else{ + snapshot_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].snapshot_region_ptr + entry_offset_addr; + } + + memcpy(host_addr, snapshot_addr, TARGET_PAGE_SIZE); + + clear_bit(gfn, (void*)kvm_region_slot->bitmap); + } + kvm_region_slot->stack_ptr = 0; + } + } +} + +static void save_root_pages(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){ + void* host_addr = NULL; + void* incremental_addr = NULL; + uint64_t physical_addr = 0; + uint64_t gfn = 0; + uint64_t entry_offset_addr = 0; + + for(uint8_t j = 0; j < self->kvm_region_slots_num; j++){ + slot_t* kvm_region_slot = &self->kvm_region_slots[j]; + if(kvm_region_slot->enabled && kvm_region_slot->stack_ptr){ + for(uint64_t i = 0; i < kvm_region_slot->stack_ptr; i++){ + gfn = kvm_region_slot->stack[i]; + + entry_offset_addr = kvm_region_slot->region_offset + (gfn<<12); + + physical_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].base + entry_offset_addr; + + if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){ + continue; + } + + host_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].host_region_ptr + entry_offset_addr; + incremental_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].incremental_region_ptr + entry_offset_addr; + + shadow_memory_track_dirty_root_pages(shadow_memory_state, entry_offset_addr, kvm_region_slot->region_id); + memcpy(incremental_addr, host_addr, TARGET_PAGE_SIZE); + + clear_bit(gfn, (void*)kvm_region_slot->bitmap); + } + kvm_region_slot->stack_ptr = 0; + } + } +} + +//entry = &ring->dirty_gfns[ring->reset_index & (ring->size - 1)]; + + +void nyx_snapshot_nyx_dirty_ring_restore(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){ + +/* + static int perf_counter = 0; + + if((perf_counter%1000) == 0){ + fprintf(stderr, "perf_counter -> %d\n", perf_counter); //, self->test_total, self->test); + } + + perf_counter++; +*/ + + dirty_ring_flush_and_collect(self, shadow_memory_state, blocklist, kvm_get_vm_fd(kvm_state)); + restore_memory(self, shadow_memory_state, blocklist); +} + +void nyx_snapshot_nyx_dirty_ring_save_root_pages(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){ + + dirty_ring_flush_and_collect(self, shadow_memory_state, blocklist, kvm_get_vm_fd(kvm_state)); + save_root_pages(self, shadow_memory_state, blocklist); +} + +/* enable operation */ + +/* restore operation */ + + +void nyx_snapshot_nyx_dirty_ring_flush(void){ + dirty_ring_flush(kvm_get_vm_fd(kvm_state)); +} + +void nyx_snapshot_nyx_dirty_ring_flush_and_collect(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){ + dirty_ring_flush_and_collect(self, shadow_memory_state, blocklist, kvm_get_vm_fd(kvm_state)); +} diff --git a/nyx/snapshot/memory/backend/nyx_dirty_ring.h b/nyx/snapshot/memory/backend/nyx_dirty_ring.h new file mode 100644 index 0000000000..d0fb7c89bb --- /dev/null +++ b/nyx/snapshot/memory/backend/nyx_dirty_ring.h @@ -0,0 +1,43 @@ +#pragma once + +#include "nyx/snapshot/memory/block_list.h" +#include "nyx/snapshot/memory/shadow_memory.h" + +struct kvm_dirty_gfn { + uint32_t flags; + uint32_t slot; + uint64_t offset; +}; + +typedef struct slot_s{ + bool enabled; /* set if slot is not marked as read-only */ + + uint8_t region_id; /* shadow_memory region id */ + uint64_t region_offset; /* shadow_memory region offset*/ + + void* bitmap; + + uint64_t bitmap_size; // remove me later + uint64_t* stack; + uint64_t stack_ptr; +} slot_t; + +typedef struct nyx_dirty_ring_s{ + slot_t* kvm_region_slots; + uint8_t kvm_region_slots_num; + +} nyx_dirty_ring_t; + +/* must be called before KVM_SET_USER_MEMORY_REGION & KVM_CREATE_VCPU */ +void nyx_dirty_ring_early_init(int kvm_fd, int vm_fd); + +/* must be called right after KVM_CREATE_VCPU */ +void nyx_dirty_ring_pre_init(int kvm_fd, int vm_fd); + +nyx_dirty_ring_t* nyx_dirty_ring_init(shadow_memory_t* shadow_memory); + +void nyx_snapshot_nyx_dirty_ring_restore(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist); +void nyx_snapshot_nyx_dirty_ring_save_root_pages(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist); + +void nyx_snapshot_nyx_dirty_ring_flush(void); +void nyx_snapshot_nyx_dirty_ring_flush_and_collect(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist); diff --git a/nyx/snapshot/memory/backend/nyx_fdl.c b/nyx/snapshot/memory/backend/nyx_fdl.c new file mode 100644 index 0000000000..169ac5923c --- /dev/null +++ b/nyx/snapshot/memory/backend/nyx_fdl.c @@ -0,0 +1,345 @@ +#include "qemu/osdep.h" +#include "sysemu/sysemu.h" +#include "cpu.h" +#include "qemu/main-loop.h" + +#include "exec/ram_addr.h" +#include "qemu/rcu_queue.h" +#include "migration/migration.h" + +#include "nyx/memory_access.h" + +#include +#include + +#include "nyx/snapshot/helper.h" +#include "nyx/snapshot/memory/backend/nyx_fdl.h" +#include "nyx/snapshot/memory/nyx_fdl_user.h" + +/* debug option for the FDL constructor */ +//#define DEBUG_VMX_FDL_ALLOC + +/* additional output to debug the FDL restore operation */ +//#define SHOW_NUM_DIRTY_PAGES + +/* option to include restore of VRAM memory */ +//#define RESET_VRAM +//#define DEBUG_FDL_VRAM + +nyx_fdl_t* nyx_fdl_init(shadow_memory_t* shadow_memory){ + + static bool fdl_created = false; + assert(fdl_created == false); /* not sure if we're able to create another FDL instance -> probably not */ + fdl_created = true; + + nyx_fdl_t* self = malloc(sizeof(nyx_fdl_t)); + memset(self, 0, sizeof(nyx_fdl_t)); + + int ret; + CPUState* cpu = qemu_get_cpu(0); + kvm_cpu_synchronize_state(cpu); + + struct fdl_conf configuration; + + assert(kvm_state); + self->vmx_fdl_fd = kvm_vm_ioctl(kvm_state, KVM_VMX_FDL_SETUP_FD, (unsigned long)0); + + configuration.num = 0; + //memset(&self->fdl_data2, 0, sizeof(struct fdl_data_t2)); + + for(uint8_t i = 0; i < shadow_memory->ram_regions_num; i++){ + configuration.areas[configuration.num].base_address = shadow_memory->ram_regions[i].base; // block->mr->addr; + configuration.areas[configuration.num].size = shadow_memory->ram_regions[i].size; //MEM_SPLIT_START; //block->used_length; + configuration.num++; + } + + ret = ioctl(self->vmx_fdl_fd, KVM_VMX_FDL_SET, &configuration); + assert(ret == 0); + +#ifdef DEBUG_VMX_FDL_ALLOC + printf("KVM_VMX_FDL_SET: %d\n", ret); + printf("configuration.mmap_size = 0x%lx\n", configuration.mmap_size); + for(uint8_t i = 0; i < configuration.num; i++){ + printf("configuration.areas[%d].mmap_bitmap_offset = 0x%lx\n", i, configuration.areas[i].mmap_bitmap_offset); + printf("configuration.areas[%d].mmap_stack_offset = 0x%lx\n", i, configuration.areas[i].mmap_stack_offset); + } +#endif + + self->vmx_fdl_mmap = mmap(NULL, configuration.mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, self->vmx_fdl_fd, 0); + + assert(self->vmx_fdl_mmap != (void*)0xFFFFFFFFFFFFFFFF); + + for(uint8_t i = 0; i < configuration.num; i++){ + self->entry[i].stack = self->vmx_fdl_mmap + configuration.areas[i].mmap_stack_offset; + self->entry[i].bitmap = self->vmx_fdl_mmap + configuration.areas[i].mmap_bitmap_offset; + +#ifdef DEBUG_VMX_FDL_ALLOC + printf("fdl_stacks[%d] -> %p\n", i, self->entry[i].stack); + printf("fdl_bitmaps[%d] -> %p\n", i, self->entry[i].bitmap); +#endif + } + + self->num = configuration.num; + + struct fdl_result result; + memset(&result, 0, sizeof(struct fdl_result)); + ret = ioctl(self->vmx_fdl_fd, KVM_VMX_FDL_GET_INDEX, &result); + +#ifdef DEBUG_VMX_FDL_ALLOC + printf("result: %d\n", result.num); + for(uint8_t i = 0; i < result.num; i++){ + printf("result.values[%d]: %ld\n", i, result.values[i]); + } +#endif + + return self; +} + + +/* TODO? */ +static void nyx_snapshot_nyx_fdl_unset_blocklisted_pages(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){ + + for(uint32_t i = 0; i < blocklist->pages_num; i++){ + //cpu_physical_memory_test_and_clear_dirty(base_offset+self->black_list_pages[i], TARGET_PAGE_SIZE, DIRTY_MEMORY_MIGRATION); + + if(blocklist->pages[i] >= MEM_SPLIT_START){ + + + uint64_t offset_addr = blocklist->pages[i]-MEM_SPLIT_START; + + //fprintf(stderr, "%s: %lx -> %lx\n", __func__, self->black_list_pages[i], offset_addr); + //abort(); + clear_bit((long)offset_addr>>12, (unsigned long *)self->entry[1].bitmap); + //clear_bit((long)offset_addr>>12, (unsigned long *)self->fdl_data2.entry[1].fdl_user_bitmap); + } + else{ + uint64_t offset_addr = blocklist->pages[i]; + + clear_bit((long)offset_addr>>12, (unsigned long *)self->entry[0].bitmap); + //clear_bit((long)offset_addr>>12, (unsigned long *)self->fdl_data2.entry[0].fdl_user_bitmap); + } + } +} + +#define MEMSET_BITMAP + +#ifdef MEMSET_BITMAP +static void nyx_snapshot_nyx_fdl_restore_new(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){ + + void* current_region = NULL; + + struct fdl_result result; + memset(&result, 0, sizeof(struct fdl_result)); + int res = ioctl(self->vmx_fdl_fd, KVM_VMX_FDL_GET_INDEX, &result); + assert(!res); + + //nyx_snapshot_nyx_fdl_unset_blocklisted_pages(self, shadow_memory_state, blocklist); + + for(uint8_t i = 0; i < result.num; i++){ +#ifdef SHOW_NUM_DIRTY_PAGES + printf("Kernel -> [%d] %ld \t%ldKB\n", i, result.values[i], (0x1000*result.values[i])>>0x10); +#endif + + if(shadow_memory_state->incremental_enabled){ + current_region = shadow_memory_state->ram_regions[i].incremental_region_ptr; + } + else{ + current_region = shadow_memory_state->ram_regions[i].snapshot_region_ptr; + } + + for(uint64_t j = 0; j < result.values[i]; j++){ + + uint64_t physical_addr = self->entry[i].stack[j]; + uint64_t entry_offset_addr = physical_addr - shadow_memory_state->ram_regions[i].base; + + void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + entry_offset_addr; + void* snapshot_addr = current_region + entry_offset_addr; + + + if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){ +#ifdef DEBUG_VERFIY_BITMAP + if(!is_black_listed_addr(self, entry_offset_addr)){ + printf("WARNING: %s: -> %lx is not blacklisted\n", __func__, entry_offset_addr); + abort(); + } +#endif + continue; // blacklisted page + } + + clear_bit(entry_offset_addr>>12, (void*)self->entry[i].bitmap); + memcpy(host_addr, snapshot_addr, TARGET_PAGE_SIZE); + } + + } +#ifdef RESET_VRAM + //nyx_snapshot_nyx_fdl_restore_vram(self, shadow_memory_state); +#endif +} + +#endif + +/* restore operation */ +void nyx_snapshot_nyx_fdl_restore(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){ + +/* not sure which one is faster -> benchmark ASAP */ +#ifdef MEMSET_BITMAP + nyx_snapshot_nyx_fdl_restore_new(self, shadow_memory_state, blocklist); +#else + nyx_snapshot_nyx_fdl_restore_old(self, shadow_memory_state, blocklist); +#endif + +} + +/* +void nyx_snapshot_nyx_fdl_restore(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){ + + void* current_region = NULL; + + struct fdl_result result; + memset(&result, 0, sizeof(struct fdl_result)); + int res = ioctl(self->vmx_fdl_fd, KVM_VMX_FDL_GET_INDEX, &result); + assert(!res); + + //nyx_snapshot_nyx_fdl_unset_blocklisted_pages(self, shadow_memory_state, blocklist); + + + for(uint8_t i = 0; i < result.num; i++){ +#ifdef SHOW_NUM_DIRTY_PAGES + printf("Kernel -> [%d] %ld \t%ldKB\n", i, result.values[i], (0x1000*result.values[i])>>0x10); +#endif + + if(shadow_memory_state->tmp_snapshot.enabled){ + current_region = shadow_memory_state->ram_regions[i].incremental_region_ptr; + } + else{ + current_region = shadow_memory_state->ram_regions[i].snapshot_region_ptr; + } + + for(uint64_t j = 0; j < result.values[i]; j++){ + + uint64_t physical_addr = self->fdl_data2.entry[i].fdl_stack[j]; + uint64_t entry_offset_addr = physical_addr - shadow_memory_state->ram_regions[i].base; + + void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + entry_offset_addr; + void* snapshot_addr = current_region + entry_offset_addr; + + + // optimize this + if(test_and_clear_bit((long)(entry_offset_addr>>12), (unsigned long*)self->fdl_data2.entry[i].fdl_bitmap) == 0 && snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){ +#ifdef DEBUG_VERFIY_BITMAP + if(!is_black_listed_addr(self, entry_offset_addr)){ + printf("WARNING: %s: -> %lx is not blacklisted\n", __func__, entry_offset_addr); + abort(); + } +#endif + printf("SKIP\n"); + continue; // blacklisted page + } + + memcpy(host_addr, snapshot_addr, TARGET_PAGE_SIZE); + } + } +#ifdef RESET_VRAM + //nyx_snapshot_nyx_fdl_restore_vram(self, shadow_memory_state); +#endif +} +*/ + +/* +void nyx_snapshot_nyx_fdl_restore2(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){ + nyx_snapshot_nyx_fdl_unset_blocklisted_pages(self, shadow_memory_state, blocklist); + + struct fdl_result result; + memset(&result, 0, sizeof(struct fdl_result)); + int res = ioctl(self->vmx_fdl_fd, KVM_VMX_FDL_GET_INDEX, &result); + assert(!res); + + + for(uint8_t i = 0; i < result.num; i++){ +#ifdef SHOW_NUM_DIRTY_PAGES + printf("Kernel -> [%d] %ld \t%ldKB\n", i, result.values[i], (0x1000*result.values[i])>>0x10); +#endif + for(uint64_t j = 0; j < result.values[i]; j++){ + uint64_t addr = self->fdl_data2.entry[i].fdl_stack[j]; + uint64_t offset_addr = addr - self->shadow_memory_state[i].base; + + + if(test_and_clear_bit((long)(offset_addr>>12), (unsigned long*)self->fdl_data2.entry[i].fdl_bitmap) == 0){ +#ifdef DEBUG_VERFIY_BITMAP + if(!is_black_listed_addr(self, offset_addr)){ + printf("WARNING: %s: -> %lx is not blacklisted\n", __func__, offset_addr); + abort(); + } +#endif + continue; // blacklisted page + } + + //assert(test_and_clear_bit(offset_addr>>12, fdl_data2.entry[i].fdl_bitmap)); + //fdl_data2.entry[i].fdl_bitmap[(offset_addr/0x1000)/8] = 0; + + //printf("DIRTY -> 0x%lx [BITMAP: %d] [%d]\n", addr, fdl_data2.entry[i].fdl_bitmap[(offset_addr/0x1000)/8], test_bit(offset_addr>>12, fdl_data2.entry[i].fdl_bitmap)); + + + if(shadow_memory_state->incremental_enabled){ + //memcpy((void*)(fdl_data2.entry[i].host_ptr+offset_addr), (void*)(self->tmp_snapshot.shadow_memory[i]+offset_addr), TARGET_PAGE_SIZE); + memcpy((void*)(self->fdl_data2.entry[i].host_ptr+offset_addr), (void*)(self->fdl_data2.entry[i].tmp_shadow_ptr+offset_addr), TARGET_PAGE_SIZE); + } + else{ + memcpy((void*)(self->fdl_data2.entry[i].host_ptr+offset_addr), (void*)(self->fdl_data2.entry[i].shadow_ptr+offset_addr), TARGET_PAGE_SIZE); + } + } + } +#ifdef RESET_VRAM + //nyx_snapshot_nyx_fdl_restore_vram(self, shadow_memory_state); +#endif +} +*/ + + + +void nyx_snapshot_nyx_fdl_save_root_pages(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){ + struct fdl_result result; + memset(&result, 0, sizeof(struct fdl_result)); + int res = ioctl(self->vmx_fdl_fd, KVM_VMX_FDL_GET_INDEX, &result); + assert(!res); + + //nyx_snapshot_nyx_fdl_unset_blocklisted_pages(self, shadow_memory_state, blocklist); + + for(uint8_t i = 0; i < result.num; i++){ +#ifdef SHOW_NUM_DIRTY_PAGES + printf("Kernel -> [%d] %ld \t%ldKB\n", i, result.values[i], (0x1000*result.values[i])>>0x10); +#endif + + for(uint64_t j = 0; j < result.values[i]; j++){ + + uint64_t physical_addr = self->entry[i].stack[j]; + uint64_t entry_offset_addr = physical_addr - shadow_memory_state->ram_regions[i].base; + + void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + entry_offset_addr; + void* incremental_addr = shadow_memory_state->ram_regions[i].incremental_region_ptr + entry_offset_addr; + //void* snapshot_addr = shadow_memory_state->ram_regions[i].snapshot_region_ptr + entry_offset_addr; + + if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){ +#ifdef DEBUG_VERFIY_BITMAP + if(!is_black_listed_addr(self, entry_offset_addr)){ + printf("WARNING: %s: -> %lx is not blacklisted\n", __func__, entry_offset_addr); + abort(); + } +#endif + //printf("SKIP\n"); + continue; // blacklisted page + } + //printf("%s -> %p <-- %p\n", __func__, incremental_addr, host_addr); + + clear_bit(entry_offset_addr>>12, (void*)self->entry[i].bitmap); + shadow_memory_track_dirty_root_pages(shadow_memory_state, entry_offset_addr, i); + memcpy(incremental_addr, host_addr, TARGET_PAGE_SIZE); + } + + } +} + + + + + diff --git a/nyx/snapshot/memory/backend/nyx_fdl.h b/nyx/snapshot/memory/backend/nyx_fdl.h new file mode 100644 index 0000000000..fc9b7b55ba --- /dev/null +++ b/nyx/snapshot/memory/backend/nyx_fdl.h @@ -0,0 +1,59 @@ +#pragma once + +#include "nyx/snapshot/memory/block_list.h" +#include "nyx/snapshot/memory/shadow_memory.h" + +#define STATE_BUFFER 0x8000000 /* up to 128MB */ + +#define USER_FDL_SLOTS 0x400000 /* fix this later */ + +#define KVM_VMX_FDL_SETUP_FD _IO(KVMIO, 0xe5) +#define KVM_VMX_FDL_SET _IOW(KVMIO, 0xe6, __u64) +#define KVM_VMX_FDL_FLUSH _IO(KVMIO, 0xe7) +#define KVM_VMX_FDL_GET_INDEX _IOR(KVMIO, 0xe8, __u64) + + +#define FAST_IN_RANGE(address, start, end) (address < end && address >= start) + +#define FDL_MAX_AREAS 8 + +struct fdl_area{ + uint64_t base_address; + uint64_t size; + uint64_t mmap_bitmap_offset; + uint64_t mmap_stack_offset; + uint64_t mmap_bitmap_size; + uint64_t mmap_stack_size; +}; + +struct fdl_conf{ + uint8_t num; + uint64_t mmap_size; + struct fdl_area areas[FDL_MAX_AREAS]; +}; + +struct fdl_result{ + uint8_t num; + uint64_t values[FDL_MAX_AREAS]; +}; + +typedef struct nyx_fdl_s{ + /* vmx_fdl file descriptor */ + int vmx_fdl_fd; + + /* mmap mapping of fdl data -> might be useful for destructor */ + void* vmx_fdl_mmap; + + struct { + uint64_t* stack; + uint8_t* bitmap; + }entry[FDL_MAX_AREAS]; + + uint8_t num; + +}nyx_fdl_t; + +nyx_fdl_t* nyx_fdl_init(shadow_memory_t* self); +void nyx_snapshot_nyx_fdl_restore(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist); + +void nyx_snapshot_nyx_fdl_save_root_pages(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist); diff --git a/nyx/snapshot/memory/block_list.c b/nyx/snapshot/memory/block_list.c new file mode 100644 index 0000000000..70e99a74e4 --- /dev/null +++ b/nyx/snapshot/memory/block_list.c @@ -0,0 +1,73 @@ +#include "qemu/osdep.h" +#include "sysemu/sysemu.h" +#include "cpu.h" +#include "qemu/main-loop.h" + +#include "exec/ram_addr.h" +#include "qemu/rcu_queue.h" +#include "migration/migration.h" + +#include "nyx/memory_access.h" + +#include "nyx/snapshot/memory/block_list.h" +#include "nyx/snapshot/helper.h" +#include "nyx/snapshot/memory/shadow_memory.h" + +#define REALLOC_SIZE 0x8000 + +//#define DEBUG_NYX_SNAPSHOT_PAGE_BLOCKLIST + + +snapshot_page_blocklist_t* snapshot_page_blocklist_init(void){ + + snapshot_page_blocklist_t* self = malloc(sizeof(snapshot_page_blocklist_t)); + + uint64_t ram_size = get_ram_size(); + //printf("%s: ram_size: 0x%lx\n", __func__, ram_size); + + self->phys_area_size = ram_size <= MEM_SPLIT_START ? ram_size : ram_size + (MEM_SPLIT_END-MEM_SPLIT_START); + + //printf("%s: phys_area_size: 0x%lx\n", __func__, self->phys_area_size); + + self->phys_bitmap = malloc(BITMAP_SIZE(self->phys_area_size)); + memset(self->phys_bitmap, 0x0, BITMAP_SIZE(self->phys_area_size)); + + if(ram_size > MEM_SPLIT_START){ + memset(self->phys_bitmap+BITMAP_SIZE(MEM_SPLIT_START), 0xff, BITMAP_SIZE((MEM_SPLIT_END-MEM_SPLIT_START))); + } + + self->pages_num = 0; + self->pages_size = 0; + self->pages = malloc(sizeof(uint64_t) * REALLOC_SIZE); + + return self; +} + +void snapshot_page_blocklist_add(snapshot_page_blocklist_t* self, uint64_t phys_addr){ + if(phys_addr == -1){ + fprintf(stderr, "ERROR %s: phys_addr=%lx\n", __func__, phys_addr); + return; + } + assert(self != NULL); + + assert(phys_addr < self->phys_area_size); + + if(self->pages_num <= self->pages_size){ + self->pages_size += REALLOC_SIZE; + self->pages = realloc(self->pages, sizeof(uint64_t) * self->pages_size); + } + + self->pages[self->pages_num] = phys_addr; + self->pages_num++; + + /* check if bit is empty */ + assert(test_bit(phys_addr>>12, (const unsigned long *)self->phys_bitmap) == 0); + + /* set bit for lookup */ + set_bit(phys_addr>>12, (unsigned long *)self->phys_bitmap); + + +#ifdef DEBUG_NYX_SNAPSHOT_PAGE_BLOCKLIST + printf("%s: %lx\n", __func__, phys_addr); +#endif +} diff --git a/nyx/snapshot/memory/block_list.h b/nyx/snapshot/memory/block_list.h new file mode 100644 index 0000000000..ad4de1bd72 --- /dev/null +++ b/nyx/snapshot/memory/block_list.h @@ -0,0 +1,35 @@ +#pragma once + +#include +#include +#include "nyx/snapshot/memory/shadow_memory.h" + +typedef struct snapshot_page_blocklist_s{ + + /* total number of blocklisted page frames */ + uint64_t pages_num; + + /* lookup array */ + uint64_t* pages; + + /* current size of our array */ + uint64_t pages_size; + + /* lookup bitmap of guest's physical memory layout (PCI-area between 3GB-4GB is set by default) */ + uint8_t* phys_bitmap; + + /* area of guest's physical memory (including RAM + PCI-hole) */ + uint64_t phys_area_size; +}snapshot_page_blocklist_t; + + +//snapshot_page_blocklist_t* snapshot_page_blocklist_init(shadow_memory_t* snapshot); + +void snapshot_page_blocklist_add(snapshot_page_blocklist_t* self, uint64_t phys_addr); + +/* returns true if phys_addr is on the blocklis */ +static inline bool snapshot_page_blocklist_check_phys_addr(snapshot_page_blocklist_t* self, uint64_t phys_addr){ + return phys_addr < self->phys_area_size && test_bit(phys_addr>>12, (const unsigned long *)self->phys_bitmap) != 0; +} + +snapshot_page_blocklist_t* snapshot_page_blocklist_init(void); diff --git a/nyx/snapshot/memory/nyx_fdl_user.c b/nyx/snapshot/memory/nyx_fdl_user.c new file mode 100644 index 0000000000..fec247d1bc --- /dev/null +++ b/nyx/snapshot/memory/nyx_fdl_user.c @@ -0,0 +1,195 @@ +#include "qemu/osdep.h" +#include "sysemu/sysemu.h" +#include "target/i386/cpu.h" +#include "qemu/main-loop.h" + +#include "exec/ram_addr.h" +#include "qemu/rcu_queue.h" +#include "migration/migration.h" + +#include "nyx/memory_access.h" + +#include +#include + +#include "nyx/snapshot/helper.h" +#include "nyx/snapshot/memory/shadow_memory.h" +#include "nyx/snapshot/memory/nyx_fdl_user.h" + +/* debug option */ +//#define DEBUG_USER_FDL + +/* init operation */ +nyx_fdl_user_t* nyx_fdl_user_init(shadow_memory_t* shadow_memory_state){ + + nyx_fdl_user_t* self = malloc(sizeof(nyx_fdl_user_t)); + memset(self, 0, sizeof(nyx_fdl_user_t)); + + /* get rid of that? */ + self->num = shadow_memory_state->ram_regions_num; + + for(uint8_t i = 0; i < shadow_memory_state->ram_regions_num; i++){ + self->entry[i].stack = malloc(DIRTY_STACK_SIZE(shadow_memory_state->ram_regions[i].size)); + self->entry[i].bitmap = malloc(BITMAP_SIZE(shadow_memory_state->ram_regions[i].size)); + } + //printf("%s -> %p\n", __func__, self); + return self; +} + +/* enable operation */ +void nyx_fdl_user_enable(nyx_fdl_user_t* self){ + assert(self); + self->enabled = true; +} + +static void nyx_snapshot_user_fdl_reset(nyx_fdl_user_t* self){ + if(self){ + for(uint8_t i = 0; i < self->num; i++){ + self->entry[i].pos = 0; + } + } +} + +/* reset operation */ +void nyx_snapshot_user_fdl_restore(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){ + if(self){ + + void* current_region = NULL; + + + for(uint8_t i = 0; i < self->num; i++){ +#ifdef DEBUG_USER_FDL + printf("User -> [%d] %ld \t%ldKB\n", i, self->entry[i].pos, (0x1000*self->entry[i].pos)>>0x10); +#endif + + if(shadow_memory_state->incremental_enabled){ + current_region = shadow_memory_state->ram_regions[i].incremental_region_ptr; + } + else{ + current_region = shadow_memory_state->ram_regions[i].snapshot_region_ptr; + } + + for(uint64_t j = 0; j < self->entry[i].pos; j++){ + uint64_t physical_addr = self->entry[i].stack[j]; + uint64_t entry_offset_addr = physical_addr - shadow_memory_state->ram_regions[i].base; + + void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + entry_offset_addr; + void* snapshot_addr = current_region + entry_offset_addr; + + if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){ + continue; + } + +#ifdef DEBUG_USER_FDL + printf("%s -> %p <-- %p\n", __func__, host_addr, snapshot_addr); +#endif + clear_bit(entry_offset_addr>>12, (void*)self->entry[i].bitmap); + memcpy(host_addr, snapshot_addr, TARGET_PAGE_SIZE); + } + + } + + } + + nyx_snapshot_user_fdl_reset(self); +} + +/* set operation (mark pf as dirty) */ +void nyx_fdl_user_set(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, nyx_fdl_t* nyx_fdl_state, uint64_t addr, uint64_t length){ + if(length < 0x1000){ + length = 0x1000; + } + + if(self && self->enabled && length >= 0x1000){ + + uint8_t ram_area = 0xff; + + /* optimize this? */ + addr = ram_offset_to_address(addr); + + + switch(MAX_REGIONS-shadow_memory_state->ram_regions_num){ + case 0: + ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[7].base, shadow_memory_state->ram_regions[7].base+(shadow_memory_state->ram_regions[7].size-1)) ? 7 : ram_area; + case 1: + ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[6].base, shadow_memory_state->ram_regions[6].base+(shadow_memory_state->ram_regions[6].size-1)) ? 6 : ram_area; + case 2: + ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[5].base, shadow_memory_state->ram_regions[5].base+(shadow_memory_state->ram_regions[5].size-1)) ? 5 : ram_area; + case 3: + ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[4].base, shadow_memory_state->ram_regions[4].base+(shadow_memory_state->ram_regions[4].size-1)) ? 4 : ram_area; + case 4: + ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[3].base, shadow_memory_state->ram_regions[3].base+(shadow_memory_state->ram_regions[3].size-1)) ? 3 : ram_area; + case 5: + ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[2].base, shadow_memory_state->ram_regions[2].base+(shadow_memory_state->ram_regions[2].size-1)) ? 2 : ram_area; + case 6: + ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[1].base, shadow_memory_state->ram_regions[1].base+(shadow_memory_state->ram_regions[1].size-1)) ? 1 : ram_area; + case 7: + ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[0].base, shadow_memory_state->ram_regions[0].base+(shadow_memory_state->ram_regions[0].size-1)) ? 0 : ram_area; + default: + break; + } + + //ram_area = FAST_IN_RANGE(addr, fdl_data2.entry[0].base, fdl_data2.entry[0].base+(fdl_data2.entry[0].size-1)) ? 0 : ram_area; + + if(ram_area == 0xff){ + printf("ERROR: %s %lx [%d]\n", __func__, addr, ram_area); + abort(); + return; + } + + + for(uint64_t offset = 0; offset < length; offset+=0x1000){ + + uint64_t current_addr = (addr+offset) & 0xFFFFFFFFFFFFF000; + + long pfn = (long) ((current_addr-shadow_memory_state->ram_regions[ram_area].base)>>12); + + assert(self->entry[ram_area].bitmap); + + /* todo -> better handling of nyx_fdl_state */ + if(!test_bit(pfn, (const unsigned long*)self->entry[ram_area].bitmap)){ + set_bit(pfn, (unsigned long*)self->entry[ram_area].bitmap); + + self->entry[ram_area].stack[self->entry[ram_area].pos] = current_addr & 0xFFFFFFFFFFFFF000; + self->entry[ram_area].pos++; + +#ifdef DEBUG_USER_FDL + printf("USER DIRTY -> 0x%lx\n", current_addr & 0xFFFFFFFFFFFFF000); +#endif + } + } + } +} + +void nyx_snapshot_nyx_fdl_user_save_root_pages(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){ + + for(uint8_t i = 0; i < self->num; i++){ +#ifdef DEBUG_USER_FDL + printf("User -> [%d] %ld \t%ldKB\n", i, self->entry[i].pos, (0x1000*self->entry[i].pos)>>0x10); +#endif + + for(uint64_t j = 0; j < self->entry[i].pos; j++){ + uint64_t physical_addr = self->entry[i].stack[j]; + uint64_t entry_offset_addr = physical_addr - shadow_memory_state->ram_regions[i].base; + + void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + entry_offset_addr; + void* incremental_addr = shadow_memory_state->ram_regions[i].incremental_region_ptr + entry_offset_addr; + + if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){ + printf("%s: 0x%lx is dirty\n", __func__, physical_addr); + continue; + } +#ifdef DEBUG_USER_FDL + printf("%s -> %p <-- %p\n", __func__, incremental_addr, host_addr); +#endif + //printf("%s -> %p <-- %p\n", __func__, incremental_addr, host_addr); + + clear_bit(entry_offset_addr>>12, (void*)self->entry[i].bitmap); + shadow_memory_track_dirty_root_pages(shadow_memory_state, entry_offset_addr, i); + memcpy(incremental_addr, host_addr, TARGET_PAGE_SIZE); + + } + } + + nyx_snapshot_user_fdl_reset(self); +} \ No newline at end of file diff --git a/nyx/snapshot/memory/nyx_fdl_user.h b/nyx/snapshot/memory/nyx_fdl_user.h new file mode 100644 index 0000000000..d20f51837d --- /dev/null +++ b/nyx/snapshot/memory/nyx_fdl_user.h @@ -0,0 +1,25 @@ +#pragma once + +#include +#include "nyx/snapshot/helper.h" +#include "nyx/snapshot/memory/block_list.h" +#include "nyx/snapshot/memory/shadow_memory.h" +#include "nyx/snapshot/memory/backend/nyx_fdl.h" + +typedef struct nyx_fdl_user_s{ + struct { + uint64_t* stack; + uint8_t* bitmap; + uint64_t pos; + }entry[MAX_REGIONS]; + + uint8_t num; + bool enabled; +}nyx_fdl_user_t; + +nyx_fdl_user_t* nyx_fdl_user_init(shadow_memory_t* shadow_memory_state); +void nyx_fdl_user_enable(nyx_fdl_user_t* self); +void nyx_fdl_user_set(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, nyx_fdl_t* nyx_fdl_state, uint64_t addr, uint64_t length); + +void nyx_snapshot_user_fdl_restore(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist); +void nyx_snapshot_nyx_fdl_user_save_root_pages(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist); diff --git a/nyx/snapshot/memory/shadow_memory.c b/nyx/snapshot/memory/shadow_memory.c new file mode 100644 index 0000000000..ec4fba9513 --- /dev/null +++ b/nyx/snapshot/memory/shadow_memory.c @@ -0,0 +1,414 @@ + +#include "qemu/osdep.h" +#include "sysemu/sysemu.h" +#include "cpu.h" +#include "qemu/main-loop.h" + +#include "exec/ram_addr.h" +#include "qemu/rcu_queue.h" +#include "migration/migration.h" + +#include "nyx/debug.h" +#include "nyx/memory_access.h" + +#include "nyx/snapshot/memory/shadow_memory.h" +#include "nyx/snapshot/helper.h" + +typedef struct fast_reload_dump_head_s{ + uint32_t shadow_memory_regions; + uint32_t ram_region_index; // remove +} fast_reload_dump_head_t; + +typedef struct fast_reload_dump_entry_s{ + uint64_t shadow_memory_offset; + char idstr[256]; +} fast_reload_dump_entry_t; + + +static void shadow_memory_set_incremental_ptrs(shadow_memory_t* self){ + for(uint8_t i = 0; i < self->ram_regions_num; i++){ + self->ram_regions[i].incremental_region_ptr = self->incremental_ptr + self->ram_regions[i].offset; + } +} + +static void shadow_memory_pre_alloc_incremental(shadow_memory_t* self){ + self->incremental_ptr = mmap(0, self->memory_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->snapshot_ptr_fd, 0); + shadow_memory_set_incremental_ptrs(self); +} + +static void shadow_memory_init_generic(shadow_memory_t* self){ + self->root_track_pages_num = 0; + self->root_track_pages_size = 32 << 10; + self->root_track_pages_stack = malloc(sizeof(uint64_t)*self->root_track_pages_size); + shadow_memory_pre_alloc_incremental(self); + + self->incremental_enabled = false; +} + +shadow_memory_t* shadow_memory_init(void){ + + RAMBlock *block; + RAMBlock* block_array[10]; + void* snapshot_ptr_offset_array[10]; + + shadow_memory_t* self = malloc(sizeof(shadow_memory_t)); + memset(self, 0x0, sizeof(shadow_memory_t)); + + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + self->memory_size += block->used_length; + } + + self->snapshot_ptr_fd = memfd_create("in_memory_root_snapshot", MFD_CLOEXEC | MFD_ALLOW_SEALING); + assert(!ftruncate(self->snapshot_ptr_fd, self->memory_size)); + fcntl(self->snapshot_ptr_fd, F_ADD_SEALS, F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL); + + //printf("MMAP -> 0x%lx\n", self->memory_size); + + self->snapshot_ptr = mmap(NULL, self->memory_size, PROT_READ | PROT_WRITE , MAP_SHARED , self->snapshot_ptr_fd, 0); + madvise(self->snapshot_ptr, self->memory_size, MADV_RANDOM | MADV_MERGEABLE); + + QEMU_PT_PRINTF(RELOAD_PREFIX, "Allocating Memory (%p) Size: %lx", self->snapshot_ptr, self->memory_size); + + + + uint64_t offset = 0; + uint8_t i = 0; + uint8_t regions_num = 0; + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + QEMU_PT_PRINTF(RELOAD_PREFIX, "%lx %lx %lx\t%s\t%p", block->offset, block->used_length, block->max_length, block->idstr, block->host); + //printf("%lx %lx %lx\t%s\t%p\n", block->offset, block->used_length, block->max_length, block->idstr, block->host); + + block_array[i] = block; + + memcpy(self->snapshot_ptr+offset, block->host, block->used_length); + snapshot_ptr_offset_array[i++] = self->snapshot_ptr+offset; + offset += block->used_length; + regions_num++; + } + + for(uint8_t i = 0; i < regions_num; i++){ + block = block_array[i]; + if(!block->mr->readonly){ + + if(self->ram_regions_num == 0 && block->used_length >= MEM_SPLIT_START){ + + self->ram_regions[self->ram_regions_num].ram_region = i; + self->ram_regions[self->ram_regions_num].base = block->mr->addr; + self->ram_regions[self->ram_regions_num].size = MEM_SPLIT_START; + self->ram_regions[self->ram_regions_num].offset = snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0]; + self->ram_regions[self->ram_regions_num].host_region_ptr = block->host; + self->ram_regions[self->ram_regions_num].snapshot_region_ptr = self->snapshot_ptr+self->ram_regions[self->ram_regions_num].offset; + self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1); + memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1); + strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr); + self->ram_regions_num++; + + self->ram_regions[self->ram_regions_num].ram_region = i; + self->ram_regions[self->ram_regions_num].base = MEM_SPLIT_END; + self->ram_regions[self->ram_regions_num].size = block->used_length-MEM_SPLIT_START; + self->ram_regions[self->ram_regions_num].offset = (snapshot_ptr_offset_array[i] + MEM_SPLIT_START) - snapshot_ptr_offset_array[0]; + self->ram_regions[self->ram_regions_num].host_region_ptr = block->host+MEM_SPLIT_START; + //self->ram_regions[self->ram_regions_num].snapshot_region_ptr = self->ptr+self->ram_regions[self->ram_regions_num].offset; + self->ram_regions[self->ram_regions_num].snapshot_region_ptr = snapshot_ptr_offset_array[i]+MEM_SPLIT_START; + self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1); + memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1); + strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr); + } + else{ + self->ram_regions[self->ram_regions_num].ram_region = i; + self->ram_regions[self->ram_regions_num].base = block->mr->addr; + self->ram_regions[self->ram_regions_num].size = block->used_length; + self->ram_regions[self->ram_regions_num].offset = snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0]; + self->ram_regions[self->ram_regions_num].host_region_ptr = block->host; + self->ram_regions[self->ram_regions_num].snapshot_region_ptr = self->snapshot_ptr+self->ram_regions[self->ram_regions_num].offset; + self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1); + memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1); + strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr); + } + + self->ram_regions_num++; + } + } + + + + shadow_memory_init_generic(self); + return self; +} + +shadow_memory_t* shadow_memory_init_from_snapshot(const char* snapshot_folder, bool pre_snapshot){ + + RAMBlock *block; + RAMBlock* block_array[10]; + void* snapshot_ptr_offset_array[10]; + + shadow_memory_t* self = malloc(sizeof(shadow_memory_t)); + memset(self, 0x0, sizeof(shadow_memory_t)); + + /* count total memory size */ + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + self->memory_size += block->used_length; + } + + /* count number of ram regions */ + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + if(!block->mr->readonly){ + if(self->ram_regions_num == 0 && block->used_length >= MEM_SPLIT_START){ + self->ram_regions_num++; + } + self->ram_regions_num++; + } + } + + char* path_meta; + char* path_dump; + assert(asprintf(&path_meta, "%s/fast_snapshot.mem_meta", snapshot_folder) != -1); + assert(asprintf(&path_dump, "%s/fast_snapshot.mem_dump", snapshot_folder) != -1); + + fast_reload_dump_head_t head; + + FILE* file_mem_meta = fopen (path_meta, "r"); + assert(file_mem_meta != NULL); + + assert(fread(&head, sizeof(fast_reload_dump_head_t), 1, file_mem_meta) == 1); + + fclose(file_mem_meta); + + if(self->ram_regions_num != head.shadow_memory_regions){ + fprintf(stderr, "Error: self->ram_regions_num (%d) != head.shadow_memory_regions (%d)\n", self->ram_regions_num, head.shadow_memory_regions); + exit(1); + } + + //printf("LOAD -> self->ram_regions_num: %d\n", self->ram_regions_num); + + FILE* file_mem_dump = fopen (path_dump, "r"); + assert(file_mem_dump != NULL); + fseek(file_mem_dump, 0L, SEEK_END); + uint64_t file_mem_dump_size = ftell(file_mem_dump); + + debug_fprintf(stderr, "guest_ram_size == ftell(f) => 0x%lx vs 0x%lx (%s)\n", self->memory_size, file_mem_dump_size, dump_file); + + #define VGA_SIZE (16<<20) + + if(self->memory_size != file_mem_dump_size){ + if (file_mem_dump_size >= VGA_SIZE){ + fprintf(stderr, "ERROR: guest size should be %ld MB - set it to %ld MB\n", (file_mem_dump_size-VGA_SIZE)>>20, (self->memory_size-VGA_SIZE)>>20); + exit(1); + } + else{ + fprintf(stderr, "ERROR: guest size: %ld bytes\n", file_mem_dump_size); + exit(1); + } + } + assert(self->memory_size == ftell(file_mem_dump)); + fseek(file_mem_dump, 0L, SEEK_SET); + + fclose(file_mem_dump); + + self->snapshot_ptr_fd = open(path_dump, O_RDONLY); + //printf("self->snapshot_ptr_fd: %d\n", self->snapshot_ptr_fd); + self->snapshot_ptr = mmap(0, self->memory_size, PROT_READ, MAP_SHARED, self->snapshot_ptr_fd, 0); + //printf("TRY TO MMAP : %p\n", self->snapshot_ptr); + + assert(self->snapshot_ptr != (void*)-1); + madvise(self->snapshot_ptr, self->memory_size, MADV_MERGEABLE); + + + uint64_t offset = 0; + uint8_t i = 0; + uint8_t regions_num = 0; + QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { + QEMU_PT_PRINTF(RELOAD_PREFIX, "%lx %lx %lx\t%s\t%p", block->offset, block->used_length, block->max_length, block->idstr, block->host); + //printf("%lx %lx %lx\t%s\t%p\n", block->offset, block->used_length, block->max_length, block->idstr, block->host); + + block_array[i] = block; + snapshot_ptr_offset_array[i++] = self->snapshot_ptr+offset; + offset += block->used_length; + regions_num++; + } + + self->ram_regions_num = 0; + for(uint8_t i = 0; i < regions_num; i++){ + block = block_array[i]; + if(!block->mr->readonly){ + + if(self->ram_regions_num == 0 && block->used_length >= MEM_SPLIT_START){ + + self->ram_regions[self->ram_regions_num].ram_region = i; + self->ram_regions[self->ram_regions_num].base = block->mr->addr; + self->ram_regions[self->ram_regions_num].size = MEM_SPLIT_START; + self->ram_regions[self->ram_regions_num].offset = snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0]; + self->ram_regions[self->ram_regions_num].host_region_ptr = block->host; + self->ram_regions[self->ram_regions_num].snapshot_region_ptr = self->snapshot_ptr+self->ram_regions[self->ram_regions_num].offset; + self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1); + memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1); + strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr); + self->ram_regions_num++; + + self->ram_regions[self->ram_regions_num].ram_region = i; + self->ram_regions[self->ram_regions_num].base = MEM_SPLIT_END; + self->ram_regions[self->ram_regions_num].size = block->used_length-MEM_SPLIT_START; + self->ram_regions[self->ram_regions_num].offset = (snapshot_ptr_offset_array[i] + MEM_SPLIT_START) - snapshot_ptr_offset_array[0]; + self->ram_regions[self->ram_regions_num].host_region_ptr = block->host+MEM_SPLIT_START; + //self->ram_regions[self->ram_regions_num].snapshot_region_ptr = self->ptr+self->ram_regions[self->ram_regions_num].offset; + self->ram_regions[self->ram_regions_num].snapshot_region_ptr = snapshot_ptr_offset_array[i]+MEM_SPLIT_START; + self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1); + memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1); + strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr); + } + else{ + self->ram_regions[self->ram_regions_num].ram_region = i; + self->ram_regions[self->ram_regions_num].base = block->mr->addr; + self->ram_regions[self->ram_regions_num].size = block->used_length; + self->ram_regions[self->ram_regions_num].offset = snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0]; + self->ram_regions[self->ram_regions_num].host_region_ptr = block->host; + self->ram_regions[self->ram_regions_num].snapshot_region_ptr = self->snapshot_ptr+self->ram_regions[self->ram_regions_num].offset; + self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1); + memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1); + strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr); + } + + self->ram_regions_num++; + } + } + + /* memcpy version */ + /* + for(uint8_t i = 0; i < self->ram_regions_num; i++){ + void* host_addr = self->ram_regions[i].host_region_ptr + 0; + void* snapshot_addr = self->ram_regions[i].snapshot_region_ptr + 0; + memcpy(host_addr, snapshot_addr, self->ram_regions[i].size); + } + */ + + /* munmap + mmap version */ + for(uint8_t i = 0; i < self->ram_regions_num; i++){ + void* host_addr = self->ram_regions[i].host_region_ptr + 0; + assert(munmap(host_addr, self->ram_regions[i].size) != EINVAL); + assert(mmap(host_addr, self->ram_regions[i].size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_FIXED, self->snapshot_ptr_fd, self->ram_regions[i].offset) != MAP_FAILED); + } + + shadow_memory_init_generic(self); + return self; +} + + +void shadow_memory_prepare_incremental(shadow_memory_t* self){ + static int count = 0; + + if(count >= RESTORE_RATE){ + count = 0; + munmap(self->incremental_ptr, self->memory_size); + self->incremental_ptr = mmap(0, self->memory_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->snapshot_ptr_fd, 0); + shadow_memory_set_incremental_ptrs(self); + } + count++; +} + +void shadow_memory_switch_snapshot(shadow_memory_t* self, bool incremental){ + self->incremental_enabled = incremental; +} + +void shadow_memory_restore_memory(shadow_memory_t* self){ + rcu_read_lock(); + + uint8_t slot = 0; + uint64_t addr = 0; + for(uint64_t i = 0; i < self->root_track_pages_num; i++){ + addr = self->root_track_pages_stack[i] & 0xFFFFFFFFFFFFF000; + slot = self->root_track_pages_stack[i] & 0xFFF; + + memcpy(self->ram_regions[slot].host_region_ptr+addr, self->ram_regions[slot].snapshot_region_ptr+addr, TARGET_PAGE_SIZE); + memcpy(self->ram_regions[slot].incremental_region_ptr+addr, self->ram_regions[slot].snapshot_region_ptr+addr, TARGET_PAGE_SIZE); + } + + self->root_track_pages_num = 0; + rcu_read_unlock(); +} + + +/* only used in debug mode -> no need to be fast */ +bool shadow_memory_is_root_page_tracked(shadow_memory_t* self, uint64_t address, uint8_t slot){ + uint64_t value = (address & 0xFFFFFFFFFFFFF000) | slot; + + for(uint64_t i = 0; i < self->root_track_pages_num; i++){ + if(self->root_track_pages_stack[i] == value){ + return true; + } + } + return false; +} + +void shadow_memory_serialize(shadow_memory_t* self, const char* snapshot_folder){ + char* tmp1; + char* tmp2; + assert(asprintf(&tmp1, "%s/fast_snapshot.mem_meta", snapshot_folder) != -1); + assert(asprintf(&tmp2, "%s/fast_snapshot.mem_dump", snapshot_folder) != -1); + + FILE* file_mem_meta = fopen(tmp1, "w+b"); + FILE* file_mem_data = fopen(tmp2, "w+b"); + + //} FILE* file_ptr_meta, FILE* file_ptr_data){ + + //assert(self); + //assert(file_ptr_meta); + //assert(file_ptr_data); + /* + debug_printf("black_list_pages_num: %lx\n", self->black_list_pages_num); + debug_printf("black_list_pages_size: %lx\n", self->black_list_pages_size); + debug_printf("black_list_pages ...\n"); + for (uint64_t i = 0; i < self->black_list_pages_num; i++ ){ + debug_printf("self->black_list_pages[%ld] = %lx\n", i, self->black_list_pages[i]); + } + */ + + //printf("shadow_memory_regions: %d\n", self->ram_regions_num); + //debug_printf("ram_region_index: %d\n", self->ram_region_index); + + /* + for (uint32_t i = 0; i < self->ram_regions_num; i++){ + printf("self->shadow_memory[%d] = %lx %s\n", i, self->ram_regions[i].base, self->ram_regions[i].idstr); + } + + printf("ram_size: %lx\n", self->memory_size); + */ + + fast_reload_dump_head_t head; + fast_reload_dump_entry_t entry; + + head.shadow_memory_regions = self->ram_regions_num; + head.ram_region_index = 0; /* due to legacy reasons */ + + fwrite(&head, sizeof(fast_reload_dump_head_t), 1, file_mem_meta); + + for (uint64_t i = 0; i < self->ram_regions_num; i++){ + memset(&entry, 0x0, sizeof(fast_reload_dump_entry_t)); + entry.shadow_memory_offset = (uint64_t)self->ram_regions[i].offset; + strncpy((char*)&entry.idstr, (const char*)self->ram_regions[i].idstr, 255); + fwrite(&entry, sizeof(fast_reload_dump_entry_t), 1, file_mem_meta); + } + + fwrite(self->snapshot_ptr, self->memory_size, 1, file_mem_data); + + fclose(file_mem_meta); + fclose(file_mem_data); +} + +bool shadow_memory_read_physical_memory(shadow_memory_t* self, uint64_t address, void* ptr, size_t size){ + + assert(size == 0x1000 && (address & 0xFFFULL) == 0); /* remove this limitation later */ + + if(address < self->memory_size){ + + assert(size <= 0x1000); /* remove this limitation later */ + + for(uint8_t i = 0; i < self->ram_regions_num; i++){ + if(address >= self->ram_regions[i].base && address < (self->ram_regions[i].base + self->ram_regions[i].size)){ + void* snapshot_ptr = self->ram_regions[i].snapshot_region_ptr + (address-self->ram_regions[i].base); + memcpy(ptr, snapshot_ptr, size); + return true; + } + } + } + return false; +} diff --git a/nyx/snapshot/memory/shadow_memory.h b/nyx/snapshot/memory/shadow_memory.h new file mode 100644 index 0000000000..f4f3e718cf --- /dev/null +++ b/nyx/snapshot/memory/shadow_memory.h @@ -0,0 +1,92 @@ +#pragma once + +#include +#include "nyx/snapshot/devices/state_reallocation.h" + +/* munmap & mmap incremental snapshot area after RESTORE_RATE restores to avoid high memory pressure */ +#define RESTORE_RATE 2000 + +typedef struct ram_region_s{ + + /* simple numeric identifier + * (can be the same for multiple regions if the memory is + * actually splitted across different bases in the guest's memory + * but related to the same mapping) + */ + uint8_t ram_region; + + /* base in the guest's physical address space */ + uint64_t base; + + /* size of this region */ + uint64_t size; + + /* mmap offset of this region (does not apply to the actual guest's memory) */ + uint64_t offset; + + /* pointer to the actual mmap region used by KVM */ + void* host_region_ptr; + + /* pointer to the snapshot mmap + offset */ + void* snapshot_region_ptr; + + /* pointer to the incremental CoW mmap + offset */ + void* incremental_region_ptr; + + char* idstr; + +} ram_region_t; + + +typedef struct shadow_memory_s{ + /* snapshot memory backup */ + void* snapshot_ptr; + + /* snapshot memory backup memfd */ + int snapshot_ptr_fd; + + /* incremental memory backup */ + void* incremental_ptr; + + //fast_reload_tmp_snapshot_t tmp_snapshot; + + /* total memory size */ + uint64_t memory_size; + + /* keep this */ + ram_region_t ram_regions[10]; + uint8_t ram_regions_num; + + /* additional dirty stack to restore root snapshot */ + uint64_t root_track_pages_num; + uint64_t root_track_pages_size; + uint64_t* root_track_pages_stack; + + bool incremental_enabled; +}shadow_memory_t; + +shadow_memory_t* shadow_memory_init(void); +shadow_memory_t* shadow_memory_init_from_snapshot(const char* snapshot_folder, bool pre_snapshot); + +void shadow_memory_prepare_incremental(shadow_memory_t* self); +void shadow_memory_switch_snapshot(shadow_memory_t* self, bool incremental); + +void shadow_memory_restore_memory(shadow_memory_t* self); + +//void shadow_memory_prepare_incremental_snapshot(shadow_memory_t* self); + +static inline void shadow_memory_track_dirty_root_pages(shadow_memory_t* self, uint64_t address, uint8_t slot){ + if(unlikely(self->root_track_pages_num >= self->root_track_pages_size)){ + self->root_track_pages_size <<= 2; + self->root_track_pages_stack = realloc(self->root_track_pages_stack, self->root_track_pages_size*sizeof(uint64_t)); + } + + self->root_track_pages_stack[self->root_track_pages_num] = (address & 0xFFFFFFFFFFFFF000) | slot; + self->root_track_pages_num++; +} + +bool shadow_memory_is_root_page_tracked(shadow_memory_t* self, uint64_t address, uint8_t slot); + +void shadow_memory_serialize(shadow_memory_t* self, const char* snapshot_folder); + +bool shadow_memory_read_physical_memory(shadow_memory_t* self, uint64_t address, void* ptr, size_t size); diff --git a/nyx/snapshot/misc/nyx_bitmap.c b/nyx/snapshot/misc/nyx_bitmap.c new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nyx/snapshot/misc/nyx_bitmap.h b/nyx/snapshot/misc/nyx_bitmap.h new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nyx/state.c b/nyx/state.c new file mode 100644 index 0000000000..3527469ad8 --- /dev/null +++ b/nyx/state.c @@ -0,0 +1,405 @@ +/* + +Copyright (C) 2019 Sergej Schumilo + +This file is part of QEMU-PT (HyperTrash / kAFL). + +QEMU-PT is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +QEMU-PT is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with QEMU-PT. If not, see . + +*/ + +#include "nyx/state.h" +#include "nyx/debug.h" +#include "nyx/memory_access.h" +#include "sysemu/kvm.h" +#include "nyx/auxiliary_buffer.h" +#include "nyx/sharedir.h" +#include "nyx/fast_vm_reload_sync.h" +#include "nyx/helpers.h" + +//#define STATE_VERBOSE + +/* global singleton */ +struct state_qemu_pt global_state; + +void state_init_global(void){ +#ifdef STATE_VERBOSE + fprintf(stderr, "--> %s <--\n", __func__); +#endif + /* safety first */ + assert(libxdc_get_release_version() == LIBXDC_RELEASE_VERSION); + + global_state.nyx_fdl = false; + + global_state.workdir_path = NULL; + + global_state.fast_reload_enabled = false; + global_state.fast_reload_mode = false; + global_state.fast_reload_path = NULL; + global_state.fast_reload_pre_path = NULL; + global_state.fast_reload_pre_image = false; + + global_state.fast_reload_snapshot = fast_reload_new(); + global_state.reload_state = init_fast_vm_reload_sync(); + + global_state.decoder = NULL; + + global_state.page_cache = NULL; + + global_state.redqueen_enable_pending = false; + global_state.redqueen_disable_pending = false; + global_state.redqueen_instrumentation_mode = 0; + global_state.redqueen_update_blacklist = false; + global_state.patches_enable_pending = false; + global_state.patches_disable_pending = false; + global_state.redqueen_state = NULL; + + for(uint8_t i = 0; i < INTEL_PT_MAX_RANGES; i++){ + global_state.pt_ip_filter_configured[i] = false; + global_state.pt_ip_filter_enabled[i] = false; + global_state.pt_ip_filter_a[i] = 0x0; + global_state.pt_ip_filter_b[i] = 0x0; + } + global_state.pt_c3_filter = 0; + + global_state.enable_hprintf = false; + global_state.parent_cr3 = 0; + global_state.disassembler_word_width = 64; + global_state.nested = false; + global_state.payload_buffer = 0; + global_state.nested_payload_pages = NULL; + global_state.nested_payload_pages_num = 0; + global_state.protect_payload_buffer = 1; + global_state.discard_tmp_snapshot = 0; + global_state.mem_mode = mm_unkown; + + init_timeout_detector(&(global_state.timeout_detector)); + + global_state.in_fuzzing_mode = false; + global_state.in_reload_mode = true; + global_state.shutdown_requested = false; + global_state.cow_cache_full = false; + + global_state.auxilary_buffer = NULL; + memset(&global_state.shadow_config, 0x0, sizeof(auxilary_buffer_config_t)); + + global_state.decoder_page_fault = false; + global_state.decoder_page_fault_addr = 0x0; + + global_state.dump_page = false; + global_state.dump_page_addr = 0x0; + + global_state.in_redqueen_reload_mode = false; + + global_state.pt_trace_mode = true; + global_state.pt_trace_mode_force = false; + + global_state.sharedir = sharedir_new(); + + + global_state.shared_bitmap_fd = 0; + global_state.shared_bitmap_size = 0; + global_state.shared_ijon_bitmap_size = 0; + global_state.shared_payload_buffer_fd = 0; + global_state.shared_payload_buffer_size = 0; + global_state.shared_bitmap_ptr = NULL; + + global_state.pt_trace_size = 0; + global_state.bb_coverage = 0; + + global_state.cap_timeout_detection = 0; + global_state.cap_only_reload_mode = 0; + global_state.cap_compile_time_tracing = 0; + global_state.cap_ijon_tracing = 0; + global_state.cap_cr3 = 0; + global_state.cap_compile_time_tracing_buffer_vaddr = 0; + global_state.cap_ijon_tracing_buffer_vaddr = 0; + + QTAILQ_INIT(&global_state.redqueen_breakpoints); +} + + +fast_reload_t* get_fast_reload_snapshot(void){ + return global_state.fast_reload_snapshot; +} + +void set_fast_reload_mode(bool mode){ + global_state.fast_reload_mode = mode; +} + +void set_fast_reload_path(const char* path){ + assert(global_state.fast_reload_path == NULL); + global_state.fast_reload_path = malloc(strlen(path)+1); + strcpy(global_state.fast_reload_path, path); +} + +void set_fast_reload_pre_path(const char* path){ + assert(global_state.fast_reload_pre_path == NULL); + global_state.fast_reload_pre_path = malloc(strlen(path)+1); + strcpy(global_state.fast_reload_pre_path, path); +} + +void set_fast_reload_pre_image(void){ + assert(global_state.fast_reload_pre_path != NULL); + global_state.fast_reload_pre_image = true; +} + +void enable_fast_reloads(void){ + assert(global_state.fast_reload_path != NULL); + global_state.fast_reload_enabled = true; +} + +void init_page_cache(char* path){ + assert(global_state.page_cache == NULL); + global_state.page_cache = page_cache_new((CPUState *)qemu_get_cpu(0), path); + #ifdef STATE_VERBOSE + debug_printf("\n\nINIT PAGE_CACHE => %s\n", path); + #endif +} + +page_cache_t* get_page_cache(void){ + assert(global_state.page_cache); + return global_state.page_cache; +} + +void init_redqueen_state(void){ + global_state.redqueen_state = new_rq_state((CPUState *)qemu_get_cpu(0), get_page_cache()); +} + + +redqueen_t* get_redqueen_state(void){ + assert(global_state.redqueen_state != NULL); + return global_state.redqueen_state; +} + + +void dump_global_state(const char* filename_prefix){ + debug_printf("%s\n", __func__); + + char* tmp; + + assert(asprintf(&tmp, "%s/global.state", filename_prefix) != -1); + debug_printf("%s\n", tmp); + + FILE *fp = fopen(tmp, "wb"); + if(fp == NULL) { + debug_fprintf(stderr, "[%s] Could not open file %s.\n", __func__, tmp); + assert(false); + //exit(EXIT_FAILURE); + } + + + debug_printf("DUMPING global_state.pt_ip_filter_configured: -\n"); + fwrite(&global_state.pt_ip_filter_configured, sizeof(bool)*4, 1, fp); + + debug_printf("DUMPING global_state.pt_ip_filter_a: -\n"); + fwrite(&global_state.pt_ip_filter_a, sizeof(uint64_t)*4, 1, fp); + + debug_printf("DUMPING global_state.pt_ip_filter_b: -\n"); + fwrite(&global_state.pt_ip_filter_b, sizeof(uint64_t)*4, 1, fp); + + debug_printf("DUMPING global_state.enable_hprintf: %x\n", global_state.enable_hprintf); + fwrite(&global_state.enable_hprintf, sizeof(bool), 1, fp); + debug_printf("DUMPING global_state.parent_cr3: %lx\n", global_state.parent_cr3); + fwrite(&global_state.parent_cr3, sizeof(uint64_t), 1, fp); + + debug_printf("DUMPING global_state.disassembler_word_width: %x\n", global_state.disassembler_word_width); + fwrite(&global_state.disassembler_word_width, sizeof(uint8_t), 1, fp); + debug_printf("DUMPING global_state.fast_reload_pre_image: %x\n", global_state.fast_reload_pre_image); + fwrite(&global_state.fast_reload_pre_image, sizeof(bool), 1, fp); + + debug_printf("DUMPING global_state.mem_mode: %x\n", global_state.mem_mode); + fwrite(&global_state.mem_mode, sizeof(uint8_t), 1, fp); + + debug_printf("DUMPING global_state.pt_trace_mode: %x\n", global_state.pt_trace_mode); + fwrite(&global_state.pt_trace_mode, sizeof(bool), 1, fp); + + debug_printf("DUMPING global_state.nested: %x\n", global_state.nested); + fwrite(&global_state.nested, sizeof(bool), 1, fp); + + if(!global_state.nested){ + debug_printf("DUMPING global_state.payload_buffer: %lx\n", global_state.payload_buffer); + fwrite(&global_state.payload_buffer, sizeof(uint64_t), 1, fp); + + fwrite(&global_state.cap_timeout_detection, sizeof(global_state.cap_timeout_detection), 1, fp); + fwrite(&global_state.cap_only_reload_mode, sizeof(global_state.cap_only_reload_mode), 1, fp); + fwrite(&global_state.cap_compile_time_tracing, sizeof(global_state.cap_compile_time_tracing), 1, fp); + fwrite(&global_state.cap_ijon_tracing, sizeof(global_state.cap_ijon_tracing), 1, fp); + fwrite(&global_state.cap_cr3, sizeof(global_state.cap_cr3), 1, fp); + fwrite(&global_state.cap_compile_time_tracing_buffer_vaddr, sizeof(global_state.cap_compile_time_tracing_buffer_vaddr), 1, fp); + fwrite(&global_state.cap_ijon_tracing_buffer_vaddr, sizeof(global_state.cap_ijon_tracing_buffer_vaddr), 1, fp); + } + else{ + assert(global_state.nested_payload_pages != NULL && global_state.nested_payload_pages_num != 0); + debug_printf("DUMPING global_state.nested_payload_pages_num: %x\n", global_state.nested_payload_pages_num); + fwrite(&global_state.nested_payload_pages_num, sizeof(uint32_t), 1, fp); + + if(global_state.nested_payload_pages_num != 0){ + debug_printf("DUMPING global_state.protect_payload_buffer: %x\n", global_state.protect_payload_buffer); + fwrite(&global_state.protect_payload_buffer, sizeof(bool), 1, fp); + } + + for(uint32_t i = 0; i < global_state.nested_payload_pages_num; i++){ + debug_printf("DUMPING global_state.nested_payload_pages[%d]: %lx\n", i, global_state.nested_payload_pages[i]); + fwrite(&global_state.nested_payload_pages[i], sizeof(uint64_t), 1, fp); + } + } + + + fclose(fp); + + free(tmp); +} + +void load_global_state(const char* filename_prefix){ + debug_printf("%s\n", __func__); + + char* tmp; + + assert(asprintf(&tmp, "%s/global.state", filename_prefix) != -1); + debug_printf("%s\n", tmp); + + FILE *fp = fopen(tmp, "rb"); + if(fp == NULL) { + debug_fprintf(stderr, "[%s] Could not open file %s.\n", __func__, tmp); + assert(false); + //exit(EXIT_FAILURE); + } + + + assert(fread(&global_state.pt_ip_filter_configured, sizeof(bool)*4, 1, fp) == 1); + debug_printf("LOADING global_state.pt_ip_filter_configured: -\n"); + + assert(fread(&global_state.pt_ip_filter_a, sizeof(uint64_t)*4, 1, fp) == 1); + debug_printf("LOADING global_state.pt_ip_filter_a: -\n"); + + assert(fread(&global_state.pt_ip_filter_b, sizeof(uint64_t)*4, 1, fp) == 1); + debug_printf("LOADING global_state.pt_ip_filter_b: -\n"); + + assert(fread(&global_state.enable_hprintf, sizeof(bool), 1, fp) == 1); + debug_printf("LOADING global_state.enable_hprintf: %x\n", global_state.enable_hprintf); + + assert(fread(&global_state.parent_cr3, sizeof(uint64_t), 1, fp) == 1); + debug_printf("LOADING global_state.parent_cr3: %lx\n", global_state.parent_cr3); + + assert(fread(&global_state.disassembler_word_width, sizeof(uint8_t), 1, fp) == 1); + debug_printf("LOADING global_state.disassembler_word_width: %x\n", global_state.disassembler_word_width); + + assert(fread(&global_state.fast_reload_pre_image, sizeof(bool), 1, fp) == 1); + debug_printf("LOADING global_state.fast_reload_pre_image: %x\n", global_state.fast_reload_pre_image); + + assert(fread(&global_state.mem_mode, sizeof(uint8_t), 1, fp) == 1); + debug_printf("LOADING global_state.mem_mode: %x\n", global_state.mem_mode); + + assert(fread(&global_state.pt_trace_mode, sizeof(bool), 1, fp) == 1); + debug_printf("LOADING global_state.pt_trace_mode: %x\n", global_state.pt_trace_mode); + + assert(fread(&global_state.nested, sizeof(bool), 1, fp) == 1); + debug_printf("LOADING global_state.nested: %x\n", global_state.nested); + + if(!global_state.nested){ + assert(fread(&global_state.payload_buffer, sizeof(uint64_t), 1, fp) == 1); + debug_printf("LOADING global_state.payload_buffer: %lx\n", global_state.payload_buffer); + + if(!global_state.fast_reload_pre_image){ + if(global_state.payload_buffer != 0){ + debug_printf("REMAP PAYLOAD BUFFER!\n"); + remap_payload_buffer(global_state.payload_buffer, ((CPUState *)qemu_get_cpu(0)) ); + } + else{ + fprintf(stderr, "WARNING: address of payload buffer in snapshot file is zero!\n"); + } + } + + assert(fread(&global_state.cap_timeout_detection, sizeof(global_state.cap_timeout_detection), 1, fp) == 1); + assert(fread(&global_state.cap_only_reload_mode, sizeof(global_state.cap_only_reload_mode), 1, fp) == 1); + assert(fread(&global_state.cap_compile_time_tracing, sizeof(global_state.cap_compile_time_tracing), 1, fp) == 1); + assert(fread(&global_state.cap_ijon_tracing, sizeof(global_state.cap_ijon_tracing), 1, fp) == 1); + assert(fread(&global_state.cap_cr3, sizeof(global_state.cap_cr3), 1, fp) == 1); + assert(fread(&global_state.cap_compile_time_tracing_buffer_vaddr, sizeof(global_state.cap_compile_time_tracing_buffer_vaddr), 1, fp) == 1); + assert(fread(&global_state.cap_ijon_tracing_buffer_vaddr, sizeof(global_state.cap_ijon_tracing_buffer_vaddr), 1, fp) == 1); + + apply_capabilities(qemu_get_cpu(0)); + } + else{ + assert(fread(&global_state.nested_payload_pages_num, sizeof(uint32_t), 1, fp) == 1); + debug_printf("LOADING global_state.nested_payload_pages_num: %x\n", global_state.nested_payload_pages_num); + + global_state.in_fuzzing_mode = true; /* haaaeeeeh ??? */ + if(!global_state.fast_reload_pre_image){ + + assert(fread(&global_state.protect_payload_buffer, sizeof(bool), 1, fp) == 1); + debug_printf("LOADING global_state.protect_payload_buffer: %x\n", global_state.protect_payload_buffer); + + global_state.nested_payload_pages = (uint64_t*)malloc(sizeof(uint64_t)*global_state.nested_payload_pages_num); + + for(uint32_t i = 0; i < global_state.nested_payload_pages_num; i++){ + assert(fread(&global_state.nested_payload_pages[i], sizeof(uint64_t), 1, fp) == 1); + debug_printf("LOADED global_state.nested_payload_pages[%d]: %lx\n", i, global_state.nested_payload_pages[i]); + if(global_state.protect_payload_buffer){ + assert(remap_payload_slot_protected(GET_GLOBAL_STATE()->nested_payload_pages[i], i, ((CPUState *)qemu_get_cpu(0))) == true); + } + else{ + remap_payload_slot(global_state.nested_payload_pages[i], i, ((CPUState *)qemu_get_cpu(0))); + } + } + + } + } + + fclose(fp); + + free(tmp); +} + +static void* alloc_auxiliary_buffer(const char* file){ + void* ptr; + struct stat st; + int fd = open(file, O_CREAT|O_RDWR, S_IRWXU|S_IRWXG|S_IRWXO); + assert(ftruncate(fd, AUX_BUFFER_SIZE) == 0); + stat(file, &st); + QEMU_PT_PRINTF(INTERFACE_PREFIX, "new aux buffer file: (max size: %x) %lx", AUX_BUFFER_SIZE, st.st_size); + + assert(AUX_BUFFER_SIZE == st.st_size); + ptr = mmap(0, AUX_BUFFER_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + if (ptr == MAP_FAILED) { + fprintf(stderr, "aux buffer allocation failed!\n"); + return (void*)-1; + } + return ptr; +} + +void init_aux_buffer(const char* filename){ + global_state.auxilary_buffer = (auxilary_buffer_t*)alloc_auxiliary_buffer(filename); + init_auxiliary_buffer(global_state.auxilary_buffer); +} + +void set_payload_buffer(uint64_t payload_buffer){ + assert(global_state.payload_buffer == 0 && global_state.nested == false); + global_state.payload_buffer = payload_buffer; + global_state.nested = false; +} + +void set_payload_pages(uint64_t* payload_pages, uint32_t pages){ + assert(global_state.nested_payload_pages == NULL && global_state.nested_payload_pages_num == 0); + global_state.nested_payload_pages = (uint64_t*)malloc(sizeof(uint64_t)*pages); + global_state.nested_payload_pages_num = pages; + memcpy(global_state.nested_payload_pages, payload_pages, sizeof(uint64_t)*pages); + global_state.nested = true; +} + +void set_workdir_path(char* workdir){ + assert(workdir && !global_state.workdir_path); + assert(asprintf(&global_state.workdir_path, "%s", workdir) != -1); +} \ No newline at end of file diff --git a/nyx/state.h b/nyx/state.h new file mode 100644 index 0000000000..e7a8571fc7 --- /dev/null +++ b/nyx/state.h @@ -0,0 +1,181 @@ +/* + +Copyright (C) 2019 Sergej Schumilo + +This file is part of QEMU-PT (HyperTrash / kAFL). + +QEMU-PT is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +QEMU-PT is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with QEMU-PT. If not, see . + +*/ + +#pragma once + +#include "nyx/redqueen.h" +#include "nyx/redqueen_patch.h" +#include "nyx/fast_vm_reload.h" +#include "nyx/page_cache.h" +#include "nyx/synchronization.h" +#include "nyx/auxiliary_buffer.h" +#include "nyx/sharedir.h" +#include "nyx/fast_vm_reload_sync.h" + +#include + +#define INTEL_PT_MAX_RANGES 4 + +enum mem_mode { + mm_unkown, + mm_32_protected, /* 32 Bit / No MMU */ + mm_32_paging, /* 32 Bit / L3 Paging */ + mm_32_pae, /* 32 Bit / PAE Paging */ + mm_64_l4_paging, /* 64 Bit / L4 Paging */ + mm_64_l5_paging, /* 32 Bit / L5 Paging */ +}; + +struct state_qemu_pt{ + + /* set if FDL backend is used (required to perform some additional runtime tests) */ + bool nyx_fdl; + + char* workdir_path; + + /* FAST VM RELOAD */ + bool fast_reload_enabled; + bool fast_reload_mode; + char* fast_reload_path; + char* fast_reload_pre_path; + bool fast_reload_pre_image; + fast_reload_t* fast_reload_snapshot; + fast_vm_reload_sync_t* reload_state; + + /* PAGE CACHE */ + page_cache_t* page_cache; + + /* Decoder */ + libxdc_t* decoder; + + /* REDQUEEN */ + bool redqueen_enable_pending; + bool redqueen_disable_pending; + int redqueen_instrumentation_mode; + bool redqueen_update_blacklist; + bool patches_enable_pending; + bool patches_disable_pending; + redqueen_t* redqueen_state; + + /* Intel PT Options (not migratable) */ + uint64_t pt_c3_filter; + volatile bool pt_ip_filter_enabled[4]; + bool pt_trace_mode; // enable by default; disabled if compile-time tracing is implemented by agent + + /* disabled by default; enable to force usage of PT tracing + * (useful for targets that use compile-time tracing and redqueen at the same time (which obviously relies on PT traces)) + * This mode is usually enabled by the fuzzing logic by enabling trace mode. + * *** THIS FEATURES IS STILL EXPERIMENTAL *** + * */ + bool pt_trace_mode_force; + + uint32_t pt_trace_size; // trace size counter + uint32_t bb_coverage; // trace size counter + + /* mmap Options (not migratable) */ + int shared_bitmap_fd; + uint32_t shared_bitmap_size; + uint32_t shared_ijon_bitmap_size; + int shared_payload_buffer_fd; + uint32_t shared_payload_buffer_size; + void* shared_bitmap_ptr; + + /* Intel PT Options (migratable) */ + bool pt_ip_filter_configured[4]; + uint64_t pt_ip_filter_a[4]; + uint64_t pt_ip_filter_b[4]; + + /* OPTIONS (MIGRATABLE VIA FAST SNAPSHOTS) */ + bool enable_hprintf; + uint64_t parent_cr3; + uint8_t disassembler_word_width; + bool nested; + uint64_t payload_buffer; + uint32_t nested_payload_pages_num; + uint64_t* nested_payload_pages; + bool protect_payload_buffer; + bool discard_tmp_snapshot; + uint8_t mem_mode; + + + /* NON MIGRATABLE OPTION */ + timeout_detector_t timeout_detector; + + bool decoder_page_fault; + uint64_t decoder_page_fault_addr; + + bool dump_page; + uint64_t dump_page_addr; + + bool in_fuzzing_mode; + bool in_reload_mode; + + bool shutdown_requested; + bool cow_cache_full; + + bool in_redqueen_reload_mode; + + /* capabilites */ + uint8_t cap_timeout_detection; + uint8_t cap_only_reload_mode; + uint8_t cap_compile_time_tracing; + uint8_t cap_ijon_tracing; + uint64_t cap_cr3; + uint64_t cap_compile_time_tracing_buffer_vaddr; + uint64_t cap_ijon_tracing_buffer_vaddr; + + auxilary_buffer_t* auxilary_buffer; + auxilary_buffer_config_t shadow_config; + sharedir_t* sharedir; + + QTAILQ_HEAD(, kvm_sw_breakpoint) redqueen_breakpoints; +}; + +extern struct state_qemu_pt global_state; + +#define GET_GLOBAL_STATE() (&global_state) + +void state_init_global(void); +fast_reload_t* get_fast_reload_snapshot(void); +void set_fast_reload_mode(bool mode); +void set_fast_reload_path(const char* path); +void set_fast_reload_pre_image(void); + + +void enable_fast_reloads(void); + +/* Page Cache */ +void init_page_cache(char* path); +page_cache_t* get_page_cache(void); + +void init_redqueen_state(void); + +redqueen_t* get_redqueen_state(void); + +void dump_global_state(const char* filename_prefix); +void load_global_state(const char* filename_prefix); + +void init_aux_buffer(const char* filename); +void set_fast_reload_pre_path(const char* path); + +void set_payload_buffer(uint64_t payload_buffer); +void set_payload_pages(uint64_t* payload_pages, uint32_t pages); + +void set_workdir_path(char* workdir); diff --git a/nyx/synchronization.c b/nyx/synchronization.c new file mode 100644 index 0000000000..584aa4e648 --- /dev/null +++ b/nyx/synchronization.c @@ -0,0 +1,479 @@ +#include "nyx/synchronization.h" +#include "nyx/hypercall.h" +#include "nyx/interface.h" +#include "nyx/fast_vm_reload.h" +#include "qemu-common.h" +#include "qemu/osdep.h" +#include "target/i386/cpu.h" +#include "sysemu/sysemu.h" +#include "sysemu/kvm.h" +#include "nyx/debug.h" +#include "nyx/state.h" +#include +#include +#include "qemu/main-loop.h" +#include "nyx/helpers.h" +#include "nyx/file_helper.h" + + +#include "pt.h" + +pthread_mutex_t synchronization_lock_mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_cond_t synchronization_lock_condition = PTHREAD_COND_INITIALIZER; +pthread_mutex_t synchronization_disable_pt_mutex = PTHREAD_MUTEX_INITIALIZER; + +volatile bool synchronization_reload_pending = false; +volatile bool synchronization_kvm_loop_waiting = false; + + +/* new SIGALRM based timeout detection */ + +//#define DEBUG_TIMEOUT_DETECTOR + +void init_timeout_detector(timeout_detector_t* timeout_detector){ + timeout_detector->kvm_tid = 0; + timeout_detector->reload_pending = false; + timeout_detector->detection_enabled = false; + + timeout_detector->timeout_sec = 0; + timeout_detector->timeout_usec = 0; /* default: disabled */ + + timeout_detector->arm_timeout.it_interval.tv_sec = 0; + timeout_detector->arm_timeout.it_interval.tv_usec = 0; + timeout_detector->arm_timeout.it_value.tv_sec = 0; + timeout_detector->arm_timeout.it_value.tv_usec = 0; + + timeout_detector->disarm_timeout.it_interval.tv_sec = 0; + timeout_detector->disarm_timeout.it_interval.tv_usec = 0; + timeout_detector->arm_timeout.it_value.tv_sec = timeout_detector->timeout_sec; + timeout_detector->arm_timeout.it_value.tv_usec = timeout_detector->timeout_usec; + +} + +static void sigalarm_handler(int signum) { + /* ensure that SIGALARM is ALWAYS handled by kvm thread */ + assert(GET_GLOBAL_STATE()->timeout_detector.kvm_tid == syscall(SYS_gettid)); + //GET_GLOBAL_STATE()->timeout_detector.reload_pending = true; +#ifdef DEBUG_TIMEOUT_DETECTOR +#endif + //fprintf(stderr, "Handled! %d %ld\n", signum, syscall(SYS_gettid)); +} + +void install_timeout_detector(timeout_detector_t* timeout_detector){ + timeout_detector->kvm_tid = syscall(SYS_gettid); + if(signal(SIGALRM, sigalarm_handler) == SIG_ERR) { + fprintf(stderr, "%s failed!\n", __func__); + assert(false); + } + //fprintf(stderr, "SIGALRM HANDLER INSTALLED! %ld\n", syscall(SYS_gettid)); +} + +void reset_timeout_detector(timeout_detector_t* timeout_detector){ +#ifdef DEBUG_TIMEOUT_DETECTOR + fprintf(stderr, "%s!\n", __func__); +#endif + timeout_detector->reload_pending = false; + if(timeout_detector->timeout_sec || timeout_detector->timeout_usec){ + timeout_detector->arm_timeout.it_value.tv_sec = timeout_detector->timeout_sec; + timeout_detector->arm_timeout.it_value.tv_usec = timeout_detector->timeout_usec; + timeout_detector->detection_enabled = true; + } + else{ + timeout_detector->detection_enabled = false; + } +} + +void enable_timeout_detector(timeout_detector_t* timeout_detector){ + timeout_detector->detection_enabled = true; +} + +/* +static void disable_timeout_detector(timeout_detector_t* timeout_detector){ + timeout_detector->detection_enabled = false; + + struct itimerval tmp; + + timeout_detector->disarm_timeout.it_interval.tv_sec = 0; + timeout_detector->disarm_timeout.it_interval.tv_usec = 0; + assert(setitimer(ITIMER_REAL, &timeout_detector->disarm_timeout, &tmp) == 0); +} +*/ + + +void update_itimer(timeout_detector_t* timeout_detector, uint8_t sec, uint32_t usec){ + //fprintf(stderr, "%s: %x %x\n", __func__, sec, usec); + if(sec || usec){ + timeout_detector->timeout_sec = (time_t) sec; + timeout_detector->timeout_usec = (suseconds_t) usec; + timeout_detector->detection_enabled = true; + } + else{ + timeout_detector->detection_enabled = false; + } +} + +bool arm_sigprof_timer(timeout_detector_t* timeout_detector){ + //return false; + if(timeout_detector->detection_enabled){ + if(timeout_detector->reload_pending || (!timeout_detector->arm_timeout.it_value.tv_sec && !timeout_detector->arm_timeout.it_value.tv_usec)){ + //assert(false); + fprintf(stderr, "TIMER EXPIRED 1! %d %ld %ld\n", timeout_detector->reload_pending, timeout_detector->arm_timeout.it_value.tv_sec, timeout_detector->arm_timeout.it_value.tv_usec); + reset_timeout_detector(timeout_detector); + /* TODO: check if this function still works as expected even if we don't return at this point */ + //return true; + } +#ifdef DEBUG_TIMEOUT_DETECTOR + fprintf(stderr, "%s (%ld %ld)\n", __func__, timeout_detector->arm_timeout.it_value.tv_sec, timeout_detector->arm_timeout.it_value.tv_usec); +#endif + timeout_detector->arm_timeout.it_interval.tv_sec = 0; + timeout_detector->arm_timeout.it_interval.tv_usec = 0; + + + assert(setitimer(ITIMER_REAL, &timeout_detector->arm_timeout, 0) == 0); + } + return false; +} + +bool disarm_sigprof_timer(timeout_detector_t* timeout_detector){ + //return false; + struct itimerval tmp; + + if(timeout_detector->detection_enabled){ + timeout_detector->disarm_timeout.it_interval.tv_sec = 0; + timeout_detector->disarm_timeout.it_interval.tv_usec = 0; + assert(setitimer(ITIMER_REAL, &timeout_detector->disarm_timeout, &tmp) == 0); + + timeout_detector->arm_timeout.it_value.tv_sec = tmp.it_value.tv_sec; + timeout_detector->arm_timeout.it_value.tv_usec = tmp.it_value.tv_usec; + +#ifdef DEBUG_TIMEOUT_DETECTOR + fprintf(stderr, "%s (%ld %ld)\n", __func__, timeout_detector->arm_timeout.it_value.tv_sec, timeout_detector->arm_timeout.it_value.tv_usec); +#endif + if(timeout_detector->reload_pending || (!timeout_detector->arm_timeout.it_value.tv_sec && !timeout_detector->arm_timeout.it_value.tv_usec)){ + //fprintf(stderr, "TIMER EXPIRED 2! %d %d %d\n", timeout_detector->reload_pending, timeout_detector->arm_timeout.it_value.tv_sec, timeout_detector->arm_timeout.it_value.tv_usec); + + reset_timeout_detector(timeout_detector); + //timeout_detector->detection_enabled = false; + return true; + } + } + return false; +} + +void block_signals(void){ + sigset_t set; + + sigemptyset(&set); + sigaddset(&set, SIGALRM); + sigaddset(&set, SIGABRT); + sigaddset(&set, SIGSEGV); + pthread_sigmask(SIG_BLOCK, &set, NULL); + //fprintf(stderr, "%s!\n", __func__); + +} + +void unblock_signals(void){ + sigset_t set; + + sigemptyset(&set); + sigaddset(&set, SIGABRT); + sigaddset(&set, SIGSEGV); + sigaddset(&set, SIGALRM); + sigprocmask(SIG_UNBLOCK, &set, NULL); + //fprintf(stderr, "%s!\n", __func__); +} + +/* -------------------- */ + +static inline void handle_tmp_snapshot_state(void){ + if(GET_GLOBAL_STATE()->discard_tmp_snapshot){ + if(fast_reload_tmp_created(get_fast_reload_snapshot())){ + qemu_mutex_lock_iothread(); + fast_reload_discard_tmp_snapshot(get_fast_reload_snapshot()); /* bye bye */ + qemu_mutex_unlock_iothread(); + //fprintf(stderr, "======= SNAPSHOT REMOVED! =======\n"); + } + GET_GLOBAL_STATE()->discard_tmp_snapshot = false; + set_tmp_snapshot_created(GET_GLOBAL_STATE()->auxilary_buffer, 0); + } +} + +static inline bool synchronization_check_page_not_found(void){ + bool failure = false; + + /* a page is missing in the current execution */ + if(GET_GLOBAL_STATE()->decoder_page_fault){ + set_page_not_found_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, GET_GLOBAL_STATE()->decoder_page_fault_addr); + GET_GLOBAL_STATE()->decoder_page_fault = false; + GET_GLOBAL_STATE()->decoder_page_fault_addr = 0; + failure = true; + } + + /* page was dumped during this execution */ + if(GET_GLOBAL_STATE()->dump_page){ + kvm_remove_all_breakpoints(qemu_get_cpu(0)); + kvm_vcpu_ioctl(qemu_get_cpu(0), KVM_VMX_PT_DISABLE_PAGE_DUMP_CR3); + kvm_vcpu_ioctl(qemu_get_cpu(0), KVM_VMX_PT_DISABLE_MTF); + failure = true; + } + + return failure; +} + +void synchronization_unlock(void){ + //fprintf(stderr, "%s\n", __func__); + + pthread_mutex_lock(&synchronization_lock_mutex); + pthread_cond_signal(&synchronization_lock_condition); + //hypercall_reset_hprintf_counter(); + pthread_mutex_unlock(&synchronization_lock_mutex); +} + + +uint64_t run_counter = 0; +bool in_fuzzing_loop = false; + +//bool last_timeout = false; + +void synchronization_lock_hprintf(void){ + pthread_mutex_lock(&synchronization_lock_mutex); + interface_send_char(KAFL_PING); + + pthread_cond_wait(&synchronization_lock_condition, &synchronization_lock_mutex); + pthread_mutex_unlock(&synchronization_lock_mutex); + + flush_hprintf_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer); + +} +void synchronization_lock(void){ + + pthread_mutex_lock(&synchronization_lock_mutex); + run_counter++; + + if(qemu_get_cpu(0)->intel_pt_run_trashed){ + set_pt_overflow_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer); + } + set_exec_done_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, + GET_GLOBAL_STATE()->timeout_detector.timeout_sec - GET_GLOBAL_STATE()->timeout_detector.arm_timeout.it_value.tv_sec, + GET_GLOBAL_STATE()->timeout_detector.timeout_usec - (uint32_t)GET_GLOBAL_STATE()->timeout_detector.arm_timeout.it_value.tv_usec); + /* + if(last_timeout){ + reset_timeout_detector_timeout(&(GET_GLOBAL_STATE()->timeout_detector)); + } + else{ + */ + reset_timeout_detector(&(GET_GLOBAL_STATE()->timeout_detector)); + //} + + if(synchronization_check_page_not_found()){ + set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 0); + } + + if(GET_GLOBAL_STATE()->dump_page){ + GET_GLOBAL_STATE()->dump_page = false; + GET_GLOBAL_STATE()->dump_page_addr = 0x0; + kvm_remove_all_breakpoints(qemu_get_cpu(0)); + kvm_vcpu_ioctl(qemu_get_cpu(0), KVM_VMX_PT_DISABLE_PAGE_DUMP_CR3); + } + + //last_timeout = false; + + if(unlikely(GET_GLOBAL_STATE()->in_redqueen_reload_mode || GET_GLOBAL_STATE()->redqueen_state->trace_mode)){ + if(GET_GLOBAL_STATE()->redqueen_state->trace_mode){ + write_trace_result(GET_GLOBAL_STATE()->redqueen_state->trace_state); + redqueen_trace_reset(GET_GLOBAL_STATE()->redqueen_state->trace_state); + } + fsync_all_traces(); + } + + interface_send_char(KAFL_PING); + //QEMU_PT_PRINTF_DEBUG("Protocol - SEND: KAFL_PING"); + + pthread_cond_wait(&synchronization_lock_condition, &synchronization_lock_mutex); + pthread_mutex_unlock(&synchronization_lock_mutex); + + flush_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer); + check_auxiliary_config_buffer(GET_GLOBAL_STATE()->auxilary_buffer, &GET_GLOBAL_STATE()->shadow_config); + set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 1); + + GET_GLOBAL_STATE()->pt_trace_size = 0; + /* + if(GET_GLOBAL_STATE()->dump_page){ + fprintf(stderr, "DISABLING TIMEOUT DETECTION\n"); + disable_timeout_detector(&(GET_GLOBAL_STATE()->timeout_detector)); + } + */ + +} + +static void perform_reload(void){ + if(fast_reload_root_created(get_fast_reload_snapshot())){ + qemu_mutex_lock_iothread(); + fast_reload_restore(get_fast_reload_snapshot()); + qemu_mutex_unlock_iothread(); + set_reload_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer); + set_result_dirty_pages(GET_GLOBAL_STATE()->auxilary_buffer, get_dirty_page_num(get_fast_reload_snapshot())); + } + else{ + fprintf(stderr, "WARNING: Root snapshot is not available yet!\n"); + } +} + +void synchronization_lock_crash_found(void){ + if(!in_fuzzing_loop && GET_GLOBAL_STATE()->in_fuzzing_mode){ + fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP] at %lx\n", getpid(), run_counter, __func__, get_rip(qemu_get_cpu(0))); + //abort(); + } + + pt_disable(qemu_get_cpu(0), false); + pt_sync(); + + handle_tmp_snapshot_state(); + + set_crash_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer); + + perform_reload(); + + //synchronization_lock(); + + in_fuzzing_loop = false; +} + +void synchronization_lock_asan_found(void){ + if(!in_fuzzing_loop){ + fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(), run_counter, __func__); + set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 0); + } + + pt_disable(qemu_get_cpu(0), false); + pt_sync(); + + handle_tmp_snapshot_state(); + + set_asan_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer); + + perform_reload(); + + //synchronization_lock(); + + in_fuzzing_loop = false; +} + +void synchronization_lock_timeout_found(void){ + + //fprintf(stderr, "<%d>\t%s\n", getpid(), __func__); + + if(!in_fuzzing_loop){ + //fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(), run_counter, __func__); + set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 0); + } + + pt_disable(qemu_get_cpu(0), false); + pt_sync(); + + handle_tmp_snapshot_state(); + + set_timeout_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer); + reset_timeout_detector(&(GET_GLOBAL_STATE()->timeout_detector)); + + perform_reload(); + + in_fuzzing_loop = false; +} + +void synchronization_lock_shutdown_detected(void){ + if(!in_fuzzing_loop){ + fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(), run_counter, __func__); + set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 0); + } + + pt_disable(qemu_get_cpu(0), false); + pt_sync(); + + handle_tmp_snapshot_state(); + + perform_reload(); + + in_fuzzing_loop = false; + //synchronization_lock(); +} + +void synchronization_payload_buffer_write_detected(void){ + static char reason[1024]; + + if(!in_fuzzing_loop){ + fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(), run_counter, __func__); + } + + pt_disable(qemu_get_cpu(0), false); + pt_sync(); + + handle_tmp_snapshot_state(); + + int bytes = snprintf(reason, 1024, "Payload buffer write attempt at RIP: %lx\n", get_rip(qemu_get_cpu(0))); + set_payload_buffer_write_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, reason, bytes); + set_reload_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer); + + perform_reload(); + + in_fuzzing_loop = false; + //synchronization_lock(); +} + +void synchronization_cow_full_detected(void){ + if(!in_fuzzing_loop){ + fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(), run_counter, __func__); + } + + pt_disable(qemu_get_cpu(0), false); + pt_sync(); + + handle_tmp_snapshot_state(); + + perform_reload(); + + in_fuzzing_loop = false; + //synchronization_lock(); +} + +void synchronization_disable_pt(CPUState *cpu){ + //fprintf(stderr, "==============> %s\n", __func__); + if(!in_fuzzing_loop){ + fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(), run_counter, __func__); + set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 0); + /* + qemu_backtrace(); + while(1){ + + } + */ + } + + pt_disable(qemu_get_cpu(0), false); + pt_sync(); + + handle_tmp_snapshot_state(); + + if(GET_GLOBAL_STATE()->in_reload_mode || GET_GLOBAL_STATE()->in_redqueen_reload_mode || GET_GLOBAL_STATE()->dump_page || fast_reload_tmp_created(get_fast_reload_snapshot())){ + perform_reload(); + } + + set_result_pt_trace_size(GET_GLOBAL_STATE()->auxilary_buffer, GET_GLOBAL_STATE()->pt_trace_size); + set_result_bb_coverage(GET_GLOBAL_STATE()->auxilary_buffer, GET_GLOBAL_STATE()->bb_coverage); + + + + + in_fuzzing_loop = false; +} + +void synchronization_enter_fuzzing_loop(CPUState *cpu){ + if (pt_enable(cpu, false) == 0){ + cpu->pt_enabled = true; + } + in_fuzzing_loop = true; + + reset_timeout_detector(&(GET_GLOBAL_STATE()->timeout_detector)); + //enable_timeout_detector(&(GET_GLOBAL_STATE()->timeout_detector)); +} + diff --git a/nyx/synchronization.h b/nyx/synchronization.h new file mode 100644 index 0000000000..8f378a8c69 --- /dev/null +++ b/nyx/synchronization.h @@ -0,0 +1,49 @@ +#pragma once + +#include "qemu/osdep.h" +#include + +typedef struct timeout_detector_s{ + int kvm_tid; + volatile bool reload_pending; + volatile bool detection_enabled; + + time_t timeout_sec; + suseconds_t timeout_usec; + + struct itimerval arm_timeout; + struct itimerval disarm_timeout; + + struct timespec start_time; + struct timespec end_time; +} timeout_detector_t; + +void init_timeout_detector(timeout_detector_t* timeout_detector); +void install_timeout_detector(timeout_detector_t* timeout_detector); +void reset_timeout_detector(timeout_detector_t* timeout_detector); +bool arm_sigprof_timer(timeout_detector_t* timeout_detector); +bool disarm_sigprof_timer(timeout_detector_t* timeout_detector); + +void update_itimer(timeout_detector_t* timeout_detector, uint8_t sec, uint32_t usec); + +void block_signals(void); +void unblock_signals(void); + + +void synchronization_unlock(void); + +void synchronization_lock_hprintf(void); + + +void synchronization_lock(void); +void synchronization_lock_crash_found(void); +void synchronization_lock_asan_found(void); +void synchronization_lock_timeout_found(void); +void synchronization_lock_shutdown_detected(void); +void synchronization_cow_full_detected(void); +void synchronization_disable_pt(CPUState *cpu); +void synchronization_enter_fuzzing_loop(CPUState *cpu); +void synchronization_payload_buffer_write_detected(void); + +void enable_timeout_detector(timeout_detector_t* timeout_detector); +void reset_timeout_detector_timeout(timeout_detector_t* timeout_detector); \ No newline at end of file diff --git a/os-posix.c b/os-posix.c index 86cffd2c7d..f874757935 100644 --- a/os-posix.c +++ b/os-posix.c @@ -75,7 +75,14 @@ void os_setup_signal_handling(void) memset(&act, 0, sizeof(act)); act.sa_sigaction = termsig_handler; act.sa_flags = SA_SIGINFO; +#ifndef QEMU_NYX sigaction(SIGINT, &act, NULL); +#else + /* don't install a special sighandler if the nyx block cow cache layer is disabled */ + if(getenv("NYX_DISABLE_BLOCK_COW")){ + sigaction(SIGINT, &act, NULL); + } +#endif sigaction(SIGHUP, &act, NULL); sigaction(SIGTERM, &act, NULL); } diff --git a/qemu-options.hx b/qemu-options.hx index 65c9473b73..3a6405513d 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -4251,6 +4251,16 @@ STEXI Enable FIPS 140-2 compliance mode. ETEXI +#ifdef QEMU_NYX +DEF("fast_vm_reload", HAS_ARG, QEMU_OPTION_fast_vm_reload, + "-fast_vm_reload snapshot-folder\n", QEMU_ARCH_ALL) +STEXI +@item -fast_vm_reload +@findex -fast_vm_reload +fast_vm_reload. +ETEXI +#endif + HXCOMM Deprecated by -accel tcg DEF("no-kvm", 0, QEMU_OPTION_no_kvm, "", QEMU_ARCH_I386) diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 69f518a21a..048c13d4f2 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -59,8 +59,17 @@ #include "hw/boards.h" #endif +#ifdef QEMU_NYX +#include "nyx/state.h" +#endif + #include "disas/capstone.h" +#ifdef QEMU_NYX +#define NYX_PT_CPU_MODEL "Intel Core (Haswell) NYX vCPU (PT)" +#define NYX_NO_PT_CPU_MODEL "Intel Core (Haswell) NYX vCPU (NO-PT)" +#endif + /* Helpers for building CPUID[2] descriptors: */ struct CPUID2CacheDescriptorInfo { @@ -1963,6 +1972,109 @@ static X86CPUDefinition builtin_x86_defs[] = { .xlevel = 0x80000008, .model_id = "Common KVM processor" }, +#ifdef QEMU_NYX + { + .name = "kAFL64-Hypervisor", /* CPU model used by KMV-PT / FDL */ + .level = 0xd, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 60, + .stepping = 4, + .features[FEAT_1_EDX] = + CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | + CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | + CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | + CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | + CPUID_DE | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | + CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | + CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | + CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | + CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | + CPUID_EXT_PCID | CPUID_EXT_F16C, // | CPUID_EXT_RDRAND, /* RDRAND breaks perl fuzzing (don't know why) */ + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | + CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | + CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | + CPUID_7_0_EBX_RTM, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | + MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PAT | + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, + .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | + MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | + MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | + VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, + .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | + VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | + VMX_PIN_BASED_VMX_PREEMPTION_TIMER , + .features[FEAT_VMX_PROCBASED_CTLS] = + VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | + VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | + VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | + VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | + VMX_CPU_BASED_MONITOR_TRAP_FLAG | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | + VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | + VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | + VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | + VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, + .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, + .xlevel = 0x80000008, + .model_id = NYX_PT_CPU_MODEL, /* don't touch */ + .versions = (X86CPUVersionDefinition[]) { + { .version = 1, + .props = (PropValue[]) { + { "vmx", "on" }, + { "vmx-ept", "on" }, + { /* end of list */ } + }, + }, + { + .version = 2, + .alias = "NYX64-Hypervisor", + .props = (PropValue[]) { + { "model-id", + NYX_NO_PT_CPU_MODEL, /* don't touch */ }, + { /* end of list */ } + } + }, + { /* end of list */ } + } + + }, +#endif { .name = "qemu32", .level = 4, @@ -4371,6 +4483,16 @@ static void x86_cpuid_set_model_id(Object *obj, const char *model_id, if (model_id == NULL) { model_id = ""; } +#ifdef QEMU_NYX + if(strncmp(model_id, NYX_PT_CPU_MODEL, strlen(NYX_PT_CPU_MODEL)) == 0 && GET_GLOBAL_STATE()->nyx_fdl == false){ + fprintf(stderr, "Warning: Attempt to use unsupported CPU model (PT) without KVM-PT (Hint: use '-cpu kAFL64-Hypervisor-v2' instead)\n"); + model_id = NYX_NO_PT_CPU_MODEL; + } + if(strncmp(model_id, NYX_NO_PT_CPU_MODEL, strlen(NYX_NO_PT_CPU_MODEL)) == 0 && GET_GLOBAL_STATE()->nyx_fdl == true){ + fprintf(stderr, "Error: Attempt to use unsupported CPU model (NO-PT) with KVM-PT (Hint: use '-cpu kAFL64-Hypervisor-v1' instead)\n"); + exit(1); + } +#endif len = strlen(model_id); memset(env->cpuid_model, 0, 48); for (i = 0; i < 48; i++) { diff --git a/target/i386/kvm.c b/target/i386/kvm.c index 1d10046a6c..d68196d8fb 100644 --- a/target/i386/kvm.c +++ b/target/i386/kvm.c @@ -50,6 +50,10 @@ #include "exec/memattrs.h" #include "trace.h" +#ifdef QEMU_NYX +#include "nyx/snapshot/devices/vm_change_state_handlers.h" +#endif + //#define DEBUG_KVM #ifdef DEBUG_KVM @@ -118,6 +122,10 @@ static bool has_msr_mcg_ext_ctl; static struct kvm_cpuid2 *cpuid_cache; static struct kvm_msr_list *kvm_feature_msrs; +#ifdef QEMU_NYX +int kvm_nyx_put_tsc_value(CPUState *cs, uint64_t data); +#endif + int kvm_has_pit_state2(void) { return has_pit_state2; @@ -183,6 +191,39 @@ bool kvm_hv_vpindex_settable(void) return hv_vpindex_settable; } +#ifdef QEMU_NYX +int kvm_nyx_put_tsc_value(CPUState *cs, uint64_t data){ + X86CPU *cpu = X86_CPU(cs); + CPUX86State *env = &cpu->env; + struct { + struct kvm_msrs info; + struct kvm_msr_entry entries[2]; + } msr_data = {}; + int ret; + + memset(&msr_data, 0, sizeof(msr_data)); + msr_data.info.nmsrs = 2; + msr_data.entries[0].index = MSR_IA32_TSC; + + /* NYX magic */ + msr_data.entries[0].data = 0x00004e59584e5958ULL; /* reset TSC */ + + msr_data.entries[1].index = MSR_IA32_TSC; + msr_data.entries[1].data = data; /* new TSC value */ + + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data); + + if (ret < 0) { + printf("%s: failed\n", __func__); + return ret; + } + + env->tsc = data; + + return 0; +} +#endif + static int kvm_get_tsc(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); @@ -780,10 +821,12 @@ static int kvm_arch_set_tsc_khz(CPUState *cs) kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : -ENOTSUP; if (cur_freq <= 0 || cur_freq != env->tsc_khz) { +#ifndef QEMU_NYX warn_report("TSC frequency mismatch between " "VM (%" PRId64 " kHz) and host (%d kHz), " "and TSC scaling unavailable", env->tsc_khz, cur_freq); +#endif return r; } } @@ -1760,6 +1803,9 @@ int kvm_arch_init_vcpu(CPUState *cs) } qemu_add_vm_change_state_handler(cpu_update_state, env); +#ifdef QEMU_NYX + add_fast_reload_change_handler(cpu_update_state, env, RELOAD_HANDLER_KVM_CPU); +#endif c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0); if (c) { @@ -2938,13 +2984,15 @@ static int kvm_put_msrs(X86CPU *cpu, int level) return ret; } +#ifndef QEMU_NYX if (ret < cpu->kvm_msr_buf->nmsrs) { struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret]; error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64, (uint32_t)e->index, (uint64_t)e->data); } - assert(ret == cpu->kvm_msr_buf->nmsrs); +#endif + return 0; } @@ -3918,10 +3966,12 @@ int kvm_arch_put_registers(CPUState *cpu, int level) if (ret < 0) { return ret; } +#ifndef QEMU_NYX ret = kvm_put_debugregs(x86_cpu); if (ret < 0) { return ret; } +#endif /* must be last */ ret = kvm_guest_debug_workarounds(x86_cpu); if (ret < 0) { @@ -3930,6 +3980,63 @@ int kvm_arch_put_registers(CPUState *cpu, int level) return 0; } +#ifdef QEMU_NYX +int kvm_arch_get_registers_fast(CPUState *cpu) +{ + X86CPU *x86_cpu = X86_CPU(cpu); + int ret = 0; + //fprintf(stderr, "%s - kvm_getput_regs\n", __func__); + ret = kvm_getput_regs(x86_cpu, 0); + if (ret < 0) { + fprintf(stderr, "%s - WARNING: kvm_getput_regs failed!\n", __func__); + return ret; + } + + //fprintf(stderr, "%s - kvm_get_xsave\n", __func__); + ret = kvm_get_xsave(x86_cpu); + if (ret < 0) { + fprintf(stderr, "%s - WARNING: kvm_get_xsave failed!\n", __func__); + return ret; + } + + //fprintf(stderr, "%s - kvm_get_xcrs\n", __func__); + ret = kvm_get_xcrs(x86_cpu); + if (ret < 0) { + fprintf(stderr, "%s - WARNING: kvm_get_xcrs failed!\n", __func__); + return ret; + } + + //fprintf(stderr, "%s - kvm_get_sregs\n", __func__); + ret = kvm_get_sregs(x86_cpu); + if (ret < 0) { + fprintf(stderr, "%s - WARNING: kvm_get_sregs failed!\n", __func__); + return ret; + } + + //fprintf(stderr, "%s - kvm_get_msrs\n", __func__); + ret = kvm_get_msrs(x86_cpu); + if (ret < 0) { + fprintf(stderr, "%s - WARNING: kvm_get_msrs failed!\n", __func__); + return ret; + } + + //fprintf(stderr, "%s - kvm_get_mp_state\n", __func__); + ret = kvm_get_mp_state(x86_cpu); + if (ret < 0) { + fprintf(stderr, "%s - WARNING: kvm_get_mp_state failed!\n", __func__); + return ret; + } + + //fprintf(stderr, "%s - kvm_get_apic\n", __func__); + ret = kvm_get_apic(x86_cpu); + if (ret < 0) { + fprintf(stderr, "%s - WARNING: kvm_get_apic failed!\n", __func__); + return ret; + } + return ret; +} +#endif + int kvm_arch_get_registers(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); diff --git a/util/bitmap.c b/util/bitmap.c index 1753ff7f5b..c20132ba37 100644 --- a/util/bitmap.c +++ b/util/bitmap.c @@ -287,6 +287,55 @@ bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr) return dirty != 0; } +#ifdef QEMU_NYX +bool bitmap_test_atomic(unsigned long *map, long start, long nr) +{ + unsigned long *p = map + BIT_WORD(start); + const long size = start + nr; + int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); + unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); + unsigned long dirty = 0; + unsigned long old_bits; + + assert(start >= 0 && nr >= 0); + + /* First word */ + if (nr - bits_to_clear > 0) { + old_bits = atomic_fetch_and(p, ULONG_MAX); + dirty |= old_bits & mask_to_clear; + nr -= bits_to_clear; + bits_to_clear = BITS_PER_LONG; + mask_to_clear = ~0UL; + p++; + } + + /* Full words */ + if (bits_to_clear == BITS_PER_LONG) { + while (nr >= BITS_PER_LONG) { + if (*p) { + old_bits = atomic_xchg(p, 0); + dirty |= old_bits; + } + nr -= BITS_PER_LONG; + p++; + } + } + + /* Last word */ + if (nr) { + mask_to_clear &= BITMAP_LAST_WORD_MASK(size); + old_bits = atomic_fetch_and(p, ULONG_MAX); + dirty |= old_bits & mask_to_clear; + } else { + if (!dirty) { + smp_mb(); + } + } + + return dirty != 0; +} +#endif + void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src, long nr) { diff --git a/util/main-loop.c b/util/main-loop.c index eda63fe4e0..df8e7320db 100644 --- a/util/main-loop.c +++ b/util/main-loop.c @@ -88,7 +88,9 @@ static int qemu_signal_init(Error **errp) sigemptyset(&set); sigaddset(&set, SIG_IPI); sigaddset(&set, SIGIO); +#ifndef QEMU_NYX sigaddset(&set, SIGALRM); +#endif sigaddset(&set, SIGBUS); /* SIGINT cannot be handled via signalfd, so that ^C can be used * to interrupt QEMU when it is being run under gdb. SIGHUP and diff --git a/vl.c b/vl.c index 6a65a64bfd..c6d0247da9 100644 --- a/vl.c +++ b/vl.c @@ -132,6 +132,15 @@ int main(int argc, char **argv) #include "sysemu/iothread.h" #include "qemu/guest-random.h" +#ifdef QEMU_NYX +#include "nyx/pt.h" +#include "nyx/hypercall.h" +#include "nyx/synchronization.h" +#include "nyx/fast_vm_reload.h" +#include "nyx/state.h" +#include "nyx/fast_vm_reload_sync.h" +#endif + #define MAX_VIRTIO_CONSOLES 1 static const char *data_dir[16]; @@ -240,6 +249,32 @@ static struct { { .driver = "vhost-user-vga", .flag = &default_vga }, }; +#ifdef QEMU_NYX +static QemuOptsList qemu_fast_vm_reloads_opts = { + .name = "fast_vm_reload-opts", + .implied_opt_name = "order", + .head = QTAILQ_HEAD_INITIALIZER(qemu_fast_vm_reloads_opts.head), + .merge_lists = true, + .desc = { + { + .name = "path", + .type = QEMU_OPT_STRING, + },{ + .name = "load", + .type = QEMU_OPT_BOOL, + },{ + .name = "pre_path", + .type = QEMU_OPT_STRING, + },{ + .name = "skip_serialization", + .type = QEMU_OPT_BOOL, + }, + { } + }, +}; +#endif + + static QemuOptsList qemu_rtc_opts = { .name = "rtc", .head = QTAILQ_HEAD_INITIALIZER(qemu_rtc_opts.head), @@ -1437,6 +1472,10 @@ void vm_state_notify(int running, RunState state) } } +#ifdef QEMU_NYX +char* loadvm_global = NULL; +#endif + static ShutdownCause reset_requested; static ShutdownCause shutdown_requested; static int shutdown_signal; @@ -1611,6 +1650,13 @@ void qemu_system_guest_panicked(GuestPanicInformation *info) void qemu_system_reset_request(ShutdownCause reason) { +#ifdef QEMU_NYX + if(GET_GLOBAL_STATE()->in_fuzzing_mode){ + fprintf(stderr, "%s!\n", __func__); + GET_GLOBAL_STATE()->shutdown_requested = true; + return; + } +#endif if (no_reboot && reason != SHUTDOWN_CAUSE_SUBSYSTEM_RESET) { shutdown_requested = reason; } else { @@ -1630,6 +1676,13 @@ static void qemu_system_suspend(void) void qemu_system_suspend_request(void) { +#ifdef CONFIG_PROCESSOR_TRACE + if(GET_GLOBAL_STATE()->in_fuzzing_mode){ + fprintf(stderr, "%s!\n", __func__); + GET_GLOBAL_STATE()->shutdown_requested = true; + return; + } +#endif if (runstate_check(RUN_STATE_SUSPENDED)) { return; } @@ -1699,6 +1752,13 @@ void qemu_system_killed(int signal, pid_t pid) void qemu_system_shutdown_request(ShutdownCause reason) { +#ifdef CONFIG_PROCESSOR_TRACE + if(GET_GLOBAL_STATE()->in_fuzzing_mode){ + fprintf(stderr, "%s!\n", __func__); + GET_GLOBAL_STATE()->shutdown_requested = true; + return; + } +#endif trace_qemu_system_shutdown_request(reason); replay_shutdown_request(reason); shutdown_requested = reason; @@ -1719,6 +1779,13 @@ static void qemu_system_shutdown(ShutdownCause cause) void qemu_system_powerdown_request(void) { +#ifdef CONFIG_PROCESSOR_TRACE + if(GET_GLOBAL_STATE()->in_fuzzing_mode){ + fprintf(stderr, "%s!\n", __func__); + GET_GLOBAL_STATE()->shutdown_requested = true; + return; + } +#endif trace_qemu_system_powerdown_request(); powerdown_requested = 1; qemu_notify_event(); @@ -1793,6 +1860,11 @@ static bool main_loop_should_exit(void) qemu_system_powerdown(); } if (qemu_vmstop_requested(&r)) { +#ifdef QEMU_NYX + if(check_if_relood_request_exists_post(GET_GLOBAL_STATE()->reload_state)){ + return false; + } +#endif vm_stop(r); } return false; @@ -1816,8 +1888,13 @@ static void main_loop(void) static void version(void) { +#ifdef QEMU_NYX + printf("QEMU-PT emulator version " QEMU_VERSION QEMU_PKGVERSION " (kAFL)\n" + QEMU_COPYRIGHT "\n"); +#else printf("QEMU emulator version " QEMU_FULL_VERSION "\n" QEMU_COPYRIGHT "\n"); +#endif } static void help(int exitcode) @@ -2715,6 +2792,26 @@ static bool object_create_delayed(const char *type, QemuOpts *opts) return !object_create_initial(type, opts); } +#ifdef QEMU_NYX +static bool verifiy_snapshot_folder(const char* folder){ + struct stat s; + + if(!folder){ + return false; + } + if(-1 != stat(folder, &s)) { + if(S_ISDIR(s.st_mode)) { + return true; + } + else{ + error_report("fast_vm_reload: path is not a folder"); + exit(1); + } + } + error_report("fast_vm_reload: path does not exist"); + exit(1); +} +#endif static void set_memory_options(uint64_t *ram_slots, ram_addr_t *maxram_size, MachineClass *mc) @@ -2827,6 +2924,12 @@ static void user_register_global_props(void) int main(int argc, char **argv, char **envp) { + +#ifdef QEMU_NYX + bool fast_vm_reload = false; + state_init_global(); +#endif + int i; int snapshot, linux_boot; const char *initrd_filename; @@ -2887,6 +2990,9 @@ int main(int argc, char **argv, char **envp) qemu_add_opts(&qemu_netdev_opts); qemu_add_opts(&qemu_nic_opts); qemu_add_opts(&qemu_net_opts); +#ifdef QEMU_NYX + qemu_add_opts(&qemu_fast_vm_reloads_opts); +#endif qemu_add_opts(&qemu_rtc_opts); qemu_add_opts(&qemu_global_opts); qemu_add_opts(&qemu_mon_opts); @@ -2974,6 +3080,15 @@ int main(int argc, char **argv, char **envp) exit(1); } switch(popt->index) { +#ifdef QEMU_NYX + case QEMU_OPTION_fast_vm_reload: + opts = qemu_opts_parse_noisily(qemu_find_opts("fast_vm_reload-opts"), + optarg, true); if (!opts) { + exit(1); + } + fast_vm_reload = true; + break; +#endif case QEMU_OPTION_cpu: /* hw initialization will check this */ cpu_option = optarg; @@ -3429,6 +3544,9 @@ int main(int argc, char **argv, char **envp) break; case QEMU_OPTION_loadvm: loadvm = optarg; +#ifdef QEMU_NYX + loadvm_global = (char*)optarg; +#endif break; case QEMU_OPTION_full_screen: dpy.has_full_screen = true; @@ -3868,6 +3986,11 @@ int main(int argc, char **argv, char **envp) exit(1); } +#ifdef QEMU_NYX + block_signals(); +#endif + + #ifdef CONFIG_SECCOMP olist = qemu_find_opts_err("sandbox", NULL); if (olist) { @@ -4435,6 +4558,108 @@ int main(int argc, char **argv, char **envp) replay_checkpoint(CHECKPOINT_RESET); qemu_system_reset(SHUTDOWN_CAUSE_NONE); register_global_state(); + +#ifdef QEMU_NYX + fast_reload_init(GET_GLOBAL_STATE()->fast_reload_snapshot); + + if (fast_vm_reload){ + + if(getenv("NYX_DISABLE_BLOCK_COW")){ + fprintf(stderr, "ERROR: Nyx block COW cache layer cannot be disabled while using fast snapshots\n"); + exit(1); + } + + QemuOpts *opts = qemu_opts_parse_noisily(qemu_find_opts("fast_vm_reload-opts"), optarg, true); + const char* snapshot_path = qemu_opt_get(opts, "path"); + const char* pre_snapshot_path = qemu_opt_get(opts, "pre_path"); + + /* + + valid arguments: + // create root snapshot to path (load pre_snapshot first) + -> path=foo,pre_path=bar,load=off // ALLOWED + // create root snapshot im memory (load pre_snapshot first) + -> pre_path=bar,load=off,skip_serialization // ALLOWED + // create root snapshot to path + -> path=foo,load=off // ALLOWED + // load root snapshot from path + -> path=foo,load=on // ALLOWED + // create pre snapshot to pre_path + -> pre_path=bar,load=off // ALLOWED + + invalid arguments: + -> load=off // ALLOWED but useless + -> path=foo,pre_path=bar,load=on // INVALID + -> pre_path=bar,load=on // INVALID + -> load=on // INVALID + */ + + bool snapshot_used = verifiy_snapshot_folder(snapshot_path); + bool pre_snapshot_used = verifiy_snapshot_folder(pre_snapshot_path); + bool load_mode = qemu_opt_get_bool(opts, "load", false); + bool skip_serialization = qemu_opt_get_bool(opts, "skip_serialization", false); + + if((snapshot_used || load_mode || skip_serialization) && getenv("NYX_DISABLE_DIRTY_RING")){ + fprintf(stderr, "ERROR: NYX_DISABLE_DIRTY_RING is only allowed during pre-snapshot creation\n"); + exit(1); + } + + if((pre_snapshot_used && !snapshot_used && !load_mode) && !getenv("NYX_DISABLE_DIRTY_RING")){ + fprintf(stderr, "ERROR: NYX_DISABLE_DIRTY_RING is required during pre-snapshot creation\n"); + exit(1); + } + + if(pre_snapshot_used && load_mode){ + fprintf(stderr, "[!] qemu-nyx: invalid argument (pre_snapshot_used && load_mode)!\n"); + exit(1); + } + + if((!snapshot_used && !pre_snapshot_used) && load_mode){ + fprintf(stderr, "[!] qemu-nyx: invalid argument ((!pre_snapshot_used && !pre_snapshot_used) && load_mode)!\n"); + exit(1); + } + + if(pre_snapshot_used && snapshot_used){ + fprintf(stderr, "[!] qemu-nyx: loading pre image to start fuzzing...\n"); + set_fast_reload_mode(false); + set_fast_reload_path(snapshot_path); + if(!skip_serialization){ + enable_fast_reloads(); + } + fast_reload_create_from_file_pre_image(get_fast_reload_snapshot(), pre_snapshot_path, false); + fast_reload_destroy(get_fast_reload_snapshot()); + GET_GLOBAL_STATE()->fast_reload_snapshot = fast_reload_new(); + fast_reload_init(GET_GLOBAL_STATE()->fast_reload_snapshot); + } + else{ + if(pre_snapshot_used){ + fprintf(stderr, "[!] qemu-nyx: preparing to create pre image...\n"); + set_fast_reload_pre_path(pre_snapshot_path); + set_fast_reload_pre_image(); + } + else if(snapshot_used){ + set_fast_reload_path(snapshot_path); + if(!skip_serialization){ + enable_fast_reloads(); + } + if (load_mode){ + set_fast_reload_mode(true); + fprintf(stderr, "[!] qemu-nyx: waiting for snapshot to start fuzzing...\n"); + fast_reload_create_from_file(get_fast_reload_snapshot(), snapshot_path, false); + //cpu_synchronize_all_post_reset(); + set_state_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 3); + skip_init(); + //GET_GLOBAL_STATE()->pt_trace_mode = false; + } + else{ + fprintf(stderr, "[!] qemu-nyx: Booting to start fuzzing...\n"); + set_fast_reload_mode(false); + } + } + } + } +#endif + if (loadvm) { Error *local_err = NULL; if (load_snapshot(loadvm, &local_err) < 0) {