Initial Release of Nyx

Co-authored-by: Cornelius Aschermann <cornelius@hexgolems.com>
This commit is contained in:
Sergej Schumilo 2021-10-10 19:34:38 +00:00
parent b0ca999a43
commit cd702b528c
112 changed files with 17385 additions and 89 deletions

4
.gitignore vendored
View File

@ -1,3 +1,4 @@
/roms/vgabios/
/.doctrees /.doctrees
/config-devices.* /config-devices.*
/config-all-devices.* /config-all-devices.*
@ -162,3 +163,6 @@ trace-dtrace-root.dtrace
trace-ust-all.h trace-ust-all.h
trace-ust-all.c trace-ust-all.c
/target/arm/decode-sve.inc.c /target/arm/decode-sve.inc.c
/libxdc
/capstone_v4

View File

@ -1,83 +0,0 @@
before_script:
- apt-get update -qq
- apt-get install -y -qq flex bison libglib2.0-dev libpixman-1-dev genisoimage
build-system1:
script:
- apt-get install -y -qq libgtk-3-dev libvte-dev nettle-dev libcacard-dev
libusb-dev libvde-dev libspice-protocol-dev libgl1-mesa-dev libvdeplug-dev
- ./configure --enable-werror --target-list="aarch64-softmmu alpha-softmmu
cris-softmmu hppa-softmmu lm32-softmmu moxie-softmmu microblazeel-softmmu
mips64el-softmmu m68k-softmmu ppc-softmmu riscv64-softmmu sparc-softmmu"
- make -j2
- make -j2 check
build-system2:
script:
- apt-get install -y -qq libsdl2-dev libgcrypt-dev libbrlapi-dev libaio-dev
libfdt-dev liblzo2-dev librdmacm-dev libibverbs-dev libibumad-dev
- ./configure --enable-werror --target-list="tricore-softmmu unicore32-softmmu
microblaze-softmmu mips-softmmu riscv32-softmmu s390x-softmmu sh4-softmmu
sparc64-softmmu x86_64-softmmu xtensa-softmmu nios2-softmmu or1k-softmmu"
- make -j2
- make -j2 check
build-disabled:
script:
- ./configure --enable-werror --disable-rdma --disable-slirp --disable-curl
--disable-capstone --disable-live-block-migration --disable-glusterfs
--disable-replication --disable-coroutine-pool --disable-smartcard
--disable-guest-agent --disable-curses --disable-libxml2 --disable-tpm
--disable-qom-cast-debug --disable-spice --disable-vhost-vsock
--disable-vhost-net --disable-vhost-crypto --disable-vhost-user
--target-list="i386-softmmu ppc64-softmmu mips64-softmmu i386-linux-user"
- make -j2
- make -j2 check-qtest SPEED=slow
build-tcg-disabled:
script:
- apt-get install -y -qq clang libgtk-3-dev libbluetooth-dev libusb-dev
- ./configure --cc=clang --enable-werror --disable-tcg --audio-drv-list=""
- make -j2
- make check-unit
- make check-qapi-schema
- cd tests/qemu-iotests/
- ./check -raw 001 002 003 004 005 008 009 010 011 012 021 025 032 033 048
052 063 077 086 101 104 106 113 147 148 150 151 152 157 159 160
163 170 171 183 184 192 194 197 205 208 215 221 222 226 227 236
- ./check -qcow2 028 040 051 056 057 058 065 067 068 082 085 091 095 096 102
122 124 127 129 132 139 142 144 145 147 151 152 155 157 165 194
196 197 200 202 203 205 208 209 215 216 218 222 227 234 246 247
248 250 254 255 256
build-user:
script:
- ./configure --enable-werror --disable-system --disable-guest-agent
--disable-capstone --disable-slirp --disable-fdt
- make -j2
- make run-tcg-tests-i386-linux-user run-tcg-tests-x86_64-linux-user
build-clang:
script:
- apt-get install -y -qq clang libsdl2-dev libattr1-dev libcap-dev
xfslibs-dev libiscsi-dev libnfs-dev libseccomp-dev gnutls-dev librbd-dev
- ./configure --cc=clang --cxx=clang++ --enable-werror
--target-list="alpha-softmmu arm-softmmu m68k-softmmu mips64-softmmu
ppc-softmmu s390x-softmmu x86_64-softmmu arm-linux-user"
- make -j2
- make -j2 check
build-tci:
script:
- TARGETS="aarch64 alpha arm hppa m68k microblaze moxie ppc64 s390x x86_64"
- ./configure --enable-tcg-interpreter
--target-list="$(for tg in $TARGETS; do echo -n ${tg}'-softmmu '; done)"
- make -j2
- make tests/boot-serial-test tests/cdrom-test tests/pxe-test
- for tg in $TARGETS ; do
export QTEST_QEMU_BINARY="${tg}-softmmu/qemu-system-${tg}" ;
./tests/boot-serial-test || exit 1 ;
./tests/cdrom-test || exit 1 ;
done
- QTEST_QEMU_BINARY="x86_64-softmmu/qemu-system-x86_64" ./tests/pxe-test
- QTEST_QEMU_BINARY="s390x-softmmu/qemu-system-s390x" ./tests/pxe-test -m slow

View File

@ -157,6 +157,7 @@ obj-y += dump/
obj-y += hw/ obj-y += hw/
obj-y += monitor/ obj-y += monitor/
obj-y += qapi/ obj-y += qapi/
obj-$(CONFIG_QEMU_NYX) += nyx/
obj-y += memory.o obj-y += memory.o
obj-y += memory_mapping.o obj-y += memory_mapping.o
obj-y += migration/ram.o obj-y += migration/ram.o

29
README.md Normal file
View File

@ -0,0 +1,29 @@
# QEMU-NYX
This repository contains Nyx's fork of Qemu. To enable Hypervisor based snapshots, Intel-PT based tracing, and Redqueen style magic byte resolution, we made various extensions to QEMU. This includes the ability to quickly reset memory and devices, ontain precise disassembly of the code running (even when code is partially swapped out / unavailable) & intel-PT decoding, instrument code running in the VM with breakpoint based hooks as well as communicating with a fuzzing frontend (e.g. based on libnyx).
You can find more detailed information in our main repository.
<p>
<img align="right" width="200" src="logo.png">
</p>
## Build
```
sh compile_qemu_nyx.sh
```
## Bug Reports and Contributions
If you found and fixed a bug on your own: We are very open to patches, please create a pull request!
### License
This tool is provided under **AGPL license**.
**Free Software Hell Yeah!**
Proudly provided by:
* [Sergej Schumilo](http://schumilo.de) - sergej@schumilo.de / [@ms_s3c](https://twitter.com/ms_s3c)
* [Cornelius Aschermann](https://hexgolems.com) - cornelius@hexgolems.com / [@is_eqv](https://twitter.com/is_eqv)

View File

@ -44,6 +44,18 @@
#include "hw/boards.h" #include "hw/boards.h"
#ifdef QEMU_NYX
#include "nyx/pt.h"
#include "nyx/hypercall.h"
#include "nyx/synchronization.h"
#include "nyx/debug.h"
#include "nyx/state.h"
#include "nyx/interface.h"
#include "nyx/fast_vm_reload_sync.h"
#include "nyx/snapshot/memory/backend/nyx_dirty_ring.h"
#include "nyx/helpers.h"
#endif
/* This check must be after config-host.h is included */ /* This check must be after config-host.h is included */
#ifdef CONFIG_EVENTFD #ifdef CONFIG_EVENTFD
#include <sys/eventfd.h> #include <sys/eventfd.h>
@ -76,6 +88,10 @@ struct KVMState
{ {
AccelState parent_obj; AccelState parent_obj;
#ifdef QEMU_NYX
bool nyx_no_pt_mode;
#endif
int nr_slots; int nr_slots;
int fd; int fd;
int vmfd; int vmfd;
@ -363,6 +379,16 @@ static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id); return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
} }
#ifdef QEMU_NYX
int kvm_get_vm_fd(KVMState *s){
return s->vmfd;
}
KVMMemoryListener* kvm_get_kml(int as_id){
return kvm_state->as[as_id].ml;
}
#endif
int kvm_init_vcpu(CPUState *cpu) int kvm_init_vcpu(CPUState *cpu)
{ {
KVMState *s = kvm_state; KVMState *s = kvm_state;
@ -381,6 +407,16 @@ int kvm_init_vcpu(CPUState *cpu)
cpu->kvm_state = s; cpu->kvm_state = s;
cpu->vcpu_dirty = true; cpu->vcpu_dirty = true;
#ifdef QEMU_NYX
if(s->nyx_no_pt_mode){
if(!getenv("NYX_DISABLE_DIRTY_RING")){
nyx_dirty_ring_pre_init(cpu->kvm_fd, s->vmfd);
}
}
pt_kvm_init(cpu);
install_timeout_detector(&GET_GLOBAL_STATE()->timeout_detector);
#endif
mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
if (mmap_size < 0) { if (mmap_size < 0) {
ret = mmap_size; ret = mmap_size;
@ -402,6 +438,11 @@ int kvm_init_vcpu(CPUState *cpu)
} }
ret = kvm_arch_init_vcpu(cpu); ret = kvm_arch_init_vcpu(cpu);
#ifdef QEMU_NYX
unblock_signals();
#endif
err: err:
return ret; return ret;
} }
@ -1874,7 +1915,62 @@ static int kvm_init(MachineState *ms)
ret = -errno; ret = -errno;
goto err; goto err;
} }
#ifdef QEMU_NYX
if (ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_PT) != 1 && ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_FDL) != 1) {
fprintf(stderr, "[!] Could not access KVM-PT kernel module!\n [*] Trying vanilla KVM...\n");
/* fallback -> use vanilla KVM module instead (no Intel-PT tracing or nested hypercalls at this point) */
s->fd = qemu_open("/dev/kvm", O_RDWR);
if (s->fd == -1) {
fprintf(stderr, "Error: NYX fallback failed: Could not access vanilla KVM module!\n");
ret = -errno;
goto err;
}
int ret_val = ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_DIRTY_LOG_RING);
if(ret_val == -1 || ret_val == 0){
fprintf(stderr, "Error: NYX requires support for KVM_CAP_DIRTY_LOG_RING in fallback mode!\n");
ret = -errno;
goto err;
}
/* check for vmware_backdoor support */
int fd = open("/sys/module/kvm/parameters/enable_vmware_backdoor", O_RDONLY);
if(fd == -1){
fprintf(stderr, "ERROR: /sys/module/kvm/parameters/enable_vmware_backdoor file not found...\n");
ret = -errno;
goto err;
}
char vmware_backdoor_option = 0;
assert(read(fd, &vmware_backdoor_option, 1) == 1);
close(fd);
if(vmware_backdoor_option == 'N'){
fprintf(stderr, "\nERROR: vmware backdoor is not enabled...\n");
fprintf(stderr, "\n\tRun the following commands to fix the issue:\n");
fprintf(stderr, "\t-----------------------------------------\n");
fprintf(stderr, "\tsudo modprobe -r kvm-intel\n");
fprintf(stderr, "\tsudo modprobe -r kvm\n");
fprintf(stderr, "\tsudo modprobe kvm enable_vmware_backdoor=y\n");
fprintf(stderr, "\tsudo modprobe kvm-intel\n");
fprintf(stderr, "\tcat /sys/module/kvm/parameters/enable_vmware_backdoor\n");
fprintf(stderr, "\t-----------------------------------------\n\n");
ret = -errno;
goto err;
}
fprintf(stderr, "NYX runs in fallback mode (no Intel-PT tracing or nested hypercall support)!\n");
s->nyx_no_pt_mode = true;
GET_GLOBAL_STATE()->nyx_fdl = false;
fast_reload_set_mode(get_fast_reload_snapshot(), RELOAD_MEMORY_MODE_DIRTY_RING);
}
else{
s->nyx_no_pt_mode = false;
GET_GLOBAL_STATE()->nyx_fdl = true;
fast_reload_set_mode(get_fast_reload_snapshot(), RELOAD_MEMORY_MODE_FDL);
}
#endif
ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0); ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
if (ret < KVM_API_VERSION) { if (ret < KVM_API_VERSION) {
if (ret >= 0) { if (ret >= 0) {
@ -1939,6 +2035,18 @@ static int kvm_init(MachineState *ms)
s->vmfd = ret; s->vmfd = ret;
#ifdef QEMU_NYX
if(s->nyx_no_pt_mode){
if(getenv("NYX_DISABLE_DIRTY_RING")){
fprintf(stderr, "WARNING: Nyx has disabled KVM's dirty-ring (required to enable full VGA support during pre-snapshot creation procedure)\n");
fast_reload_set_mode(get_fast_reload_snapshot(), RELOAD_MEMORY_MODE_DEBUG_QUIET); /* required to create snapshot */
}
else{
nyx_dirty_ring_early_init(s->fd, s->vmfd);
}
}
#endif
/* check the vcpu limits */ /* check the vcpu limits */
soft_vcpus_limit = kvm_recommended_vcpus(s); soft_vcpus_limit = kvm_recommended_vcpus(s);
hard_vcpus_limit = kvm_max_vcpus(s); hard_vcpus_limit = kvm_max_vcpus(s);
@ -1978,7 +2086,16 @@ static int kvm_init(MachineState *ms)
s->manual_dirty_log_protect = s->manual_dirty_log_protect =
kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2); kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
if (s->manual_dirty_log_protect) { if (s->manual_dirty_log_protect) {
#ifndef QEMU_NYX
ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, 1); ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, 1);
#else
if(s->nyx_no_pt_mode){
ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, 1);
}
else{
ret = 0;
}
#endif
if (ret) { if (ret) {
warn_report("Trying to enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 " warn_report("Trying to enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 "
"but failed. Falling back to the legacy mode. "); "but failed. Falling back to the legacy mode. ");
@ -2268,6 +2385,17 @@ static void kvm_eat_signals(CPUState *cpu)
} while (sigismember(&chkset, SIG_IPI)); } while (sigismember(&chkset, SIG_IPI));
} }
#ifdef QEMU_NYX
static int handle_vmware_hypercall(struct kvm_run *run, CPUState *cpu){
kvm_arch_get_registers_fast(cpu);
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
return handle_kafl_hypercall(run, cpu, env->regs[R_EBX]+100, env->regs[R_ECX]);
}
#endif
int kvm_cpu_exec(CPUState *cpu) int kvm_cpu_exec(CPUState *cpu)
{ {
struct kvm_run *run = cpu->kvm_run; struct kvm_run *run = cpu->kvm_run;
@ -2283,6 +2411,15 @@ int kvm_cpu_exec(CPUState *cpu)
qemu_mutex_unlock_iothread(); qemu_mutex_unlock_iothread();
cpu_exec_start(cpu); cpu_exec_start(cpu);
#ifdef QEMU_NYX
static bool timeout_reload_pending = false;
if(timeout_reload_pending){
synchronization_lock_timeout_found();
}
timeout_reload_pending = false;
#endif
do { do {
MemTxAttrs attrs; MemTxAttrs attrs;
@ -2302,15 +2439,39 @@ int kvm_cpu_exec(CPUState *cpu)
kvm_cpu_kick_self(); kvm_cpu_kick_self();
} }
#ifdef QEMU_NYX
if(!kvm_state->nyx_no_pt_mode){
pt_pre_kvm_run(cpu);
}
#endif
/* Read cpu->exit_request before KVM_RUN reads run->immediate_exit. /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
* Matching barrier in kvm_eat_signals. * Matching barrier in kvm_eat_signals.
*/ */
smp_rmb(); smp_rmb();
#ifdef QEMU_NYX
if(arm_sigprof_timer(&GET_GLOBAL_STATE()->timeout_detector)){
assert(false);
}
#endif
run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0); run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
#ifdef QEMU_NYX
if (disarm_sigprof_timer(&GET_GLOBAL_STATE()->timeout_detector)){
timeout_reload_pending = true;
}
#endif
attrs = kvm_arch_post_run(cpu, run); attrs = kvm_arch_post_run(cpu, run);
#ifdef QEMU_NYX
if(!kvm_state->nyx_no_pt_mode){
pt_post_kvm_run(cpu);
}
#endif
#ifdef KVM_HAVE_MCE_INJECTION #ifdef KVM_HAVE_MCE_INJECTION
if (unlikely(have_sigbus_pending)) { if (unlikely(have_sigbus_pending)) {
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
@ -2328,8 +2489,25 @@ int kvm_cpu_exec(CPUState *cpu)
ret = EXCP_INTERRUPT; ret = EXCP_INTERRUPT;
break; break;
} }
#ifndef QEMU_NYX
fprintf(stderr, "error: kvm run failed %s\n", fprintf(stderr, "error: kvm run failed %s\n",
strerror(-run_ret)); strerror(-run_ret));
#else
if(run_ret == -EFAULT){
if(GET_GLOBAL_STATE()->protect_payload_buffer && GET_GLOBAL_STATE()->in_fuzzing_mode){
/* Fuzzing is enabled at this point -> don't exit */
synchronization_payload_buffer_write_detected();
ret = 0;
break;
}
}
fprintf(stderr, "QEMU-PT: error: kvm run failed %s\n",
strerror(-run_ret));
qemu_backtrace();
#endif
#ifdef TARGET_PPC #ifdef TARGET_PPC
if (run_ret == -EBUSY) { if (run_ret == -EBUSY) {
fprintf(stderr, fprintf(stderr,
@ -2346,6 +2524,15 @@ int kvm_cpu_exec(CPUState *cpu)
switch (run->exit_reason) { switch (run->exit_reason) {
case KVM_EXIT_IO: case KVM_EXIT_IO:
DPRINTF("handle_io\n"); DPRINTF("handle_io\n");
#ifdef QEMU_NYX
if(run->io.port == 0x5658 && run->io.size == 4 && *((uint32_t*)((uint8_t *)run + run->io.data_offset)) == 0x8080801f) {
assert(kvm_state->nyx_no_pt_mode);
ret = handle_vmware_hypercall(run, cpu);
break;
}
#endif
/* Called outside BQL */ /* Called outside BQL */
kvm_handle_io(run->io.port, attrs, kvm_handle_io(run->io.port, attrs,
(uint8_t *)run + run->io.data_offset, (uint8_t *)run + run->io.data_offset,
@ -2370,33 +2557,116 @@ int kvm_cpu_exec(CPUState *cpu)
break; break;
case KVM_EXIT_SHUTDOWN: case KVM_EXIT_SHUTDOWN:
DPRINTF("shutdown\n"); DPRINTF("shutdown\n");
#ifndef QEMU_NYX
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
ret = EXCP_INTERRUPT; ret = EXCP_INTERRUPT;
#else
fprintf(stderr, "ATTEMPT TO SHUTDOWN MACHINE (KVM_EXIT_SHUTDOWN)!\n");
if(GET_GLOBAL_STATE()->in_fuzzing_mode){
/* Fuzzing is enabled at this point -> don't exit */
handle_hypercall_kafl_release(run, cpu, (uint64_t)run->hypercall.args[0]);
ret = 0;
}
else{
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
ret = EXCP_INTERRUPT;
}
#endif
break; break;
case KVM_EXIT_UNKNOWN: case KVM_EXIT_UNKNOWN:
fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n", fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
(uint64_t)run->hw.hardware_exit_reason); (uint64_t)run->hw.hardware_exit_reason);
#ifdef QEMU_NYX
assert(false);
#endif
ret = -1; ret = -1;
break; break;
case KVM_EXIT_INTERNAL_ERROR: case KVM_EXIT_INTERNAL_ERROR:
ret = kvm_handle_internal_error(cpu, run); ret = kvm_handle_internal_error(cpu, run);
break; break;
#ifdef QEMU_NYX
case KVM_EXIT_DIRTY_RING_FULL:
//printf("[*] WARNING: KVM_EXIT_DIRTY_RING_FULL\n");
fast_reload_handle_dirty_ring_full(get_fast_reload_snapshot());
ret = 0;
break;
case KVM_EXIT_KAFL_ACQUIRE ... (KVM_EXIT_KAFL_ACQUIRE+100):
ret = handle_kafl_hypercall(run, cpu, (uint64_t)run->exit_reason, (uint64_t)run->hypercall.args[0]);
break;
case KVM_EXIT_DEBUG:
kvm_arch_get_registers(cpu);
if(!handle_hypercall_kafl_hook(run, cpu, (uint64_t)run->hypercall.args[0])){
ret = kvm_arch_handle_exit(cpu, run);
}
else {
ret = 0;
}
break;
#endif
case KVM_EXIT_SYSTEM_EVENT: case KVM_EXIT_SYSTEM_EVENT:
switch (run->system_event.type) { switch (run->system_event.type) {
case KVM_SYSTEM_EVENT_SHUTDOWN: case KVM_SYSTEM_EVENT_SHUTDOWN:
#ifndef QEMU_NYX
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
ret = EXCP_INTERRUPT; ret = EXCP_INTERRUPT;
#else
fprintf(stderr, "ATTEMPT TO SHUTDOWN MACHINE (KVM_SYSTEM_EVENT_SHUTDOWN)!\n");
if(GET_GLOBAL_STATE()->in_fuzzing_mode){
/* Fuzzing is enabled at this point -> don't exit */
handle_hypercall_kafl_release(run, cpu, (uint64_t)run->hypercall.args[0]);
ret = 0;
}
else{
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
ret = EXCP_INTERRUPT;
}
#endif
break; break;
case KVM_SYSTEM_EVENT_RESET: case KVM_SYSTEM_EVENT_RESET:
#ifndef QEMU_NYX
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
ret = EXCP_INTERRUPT; ret = EXCP_INTERRUPT;
#else
fprintf(stderr, "ATTEMPT TO SHUTDOWN MACHINE (KVM_SYSTEM_EVENT_RESET)!\n");
if(GET_GLOBAL_STATE()->in_fuzzing_mode){
/* Fuzzing is enabled at this point -> don't exit */
handle_hypercall_kafl_release(run, cpu, (uint64_t)run->hypercall.args[0]);
ret = 0;
}
else{
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
ret = EXCP_INTERRUPT;
}
#endif
break; break;
case KVM_SYSTEM_EVENT_CRASH: case KVM_SYSTEM_EVENT_CRASH:
#ifndef QEMU_NYX
kvm_cpu_synchronize_state(cpu); kvm_cpu_synchronize_state(cpu);
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
qemu_system_guest_panicked(cpu_get_crash_info(cpu)); qemu_system_guest_panicked(cpu_get_crash_info(cpu));
qemu_mutex_unlock_iothread(); qemu_mutex_unlock_iothread();
ret = 0; ret = 0;
#else
fprintf(stderr, "ATTEMPT TO SHUTDOWN MACHINE (KVM_SYSTEM_EVENT_CRASH)!\n");
if(GET_GLOBAL_STATE()->in_fuzzing_mode){
/* Fuzzing is enabled at this point -> don't exit */
handle_hypercall_kafl_release(run, cpu, (uint64_t)run->hypercall.args[0]);
ret = 0;
}
else{
kvm_cpu_synchronize_state(cpu);
qemu_mutex_lock_iothread();
qemu_system_guest_panicked(cpu_get_crash_info(cpu));
qemu_mutex_unlock_iothread();
ret = 0;
}
#endif
break; break;
default: default:
DPRINTF("kvm_arch_handle_exit\n"); DPRINTF("kvm_arch_handle_exit\n");
@ -2405,21 +2675,65 @@ int kvm_cpu_exec(CPUState *cpu)
} }
break; break;
default: default:
#ifndef QEMU_NYX
DPRINTF("kvm_arch_handle_exit\n"); DPRINTF("kvm_arch_handle_exit\n");
#else
printf("kvm_arch_handle_exit => %d\n", run->exit_reason);
assert(false);
#endif
ret = kvm_arch_handle_exit(cpu, run); ret = kvm_arch_handle_exit(cpu, run);
break; break;
} }
#ifdef QEMU_NYX
if(GET_GLOBAL_STATE()->in_fuzzing_mode && GET_GLOBAL_STATE()->cow_cache_full){
synchronization_cow_full_detected();
GET_GLOBAL_STATE()->cow_cache_full = false;
ret = 0;
}
else{
if(GET_GLOBAL_STATE()->in_fuzzing_mode && cpu->halted){
fprintf(stderr, "%s: Attempt to halt CPU -> FUCK OFF!\n", __func__);
cpu->halted = 0;
GET_GLOBAL_STATE()->shutdown_requested = true;
}
if(GET_GLOBAL_STATE()->in_fuzzing_mode && GET_GLOBAL_STATE()->shutdown_requested){
/* Fuzzing is enabled at this point -> don't exit */
fprintf(stderr, "shutdown_requested -> calling handle_hypercall_kafl_release\n");
//synchronization_lock_shutdown_detected();
synchronization_lock_crash_found();
GET_GLOBAL_STATE()->shutdown_requested = false;
ret = 0;
}
}
if(reload_request_exists(GET_GLOBAL_STATE()->reload_state)){
break;
}
#endif
} while (ret == 0); } while (ret == 0);
cpu_exec_end(cpu); cpu_exec_end(cpu);
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
if (ret < 0) { if (ret < 0) {
#ifdef QEMU_NYX
fprintf(stderr, "ATTEMPT TO SHUTDOWN MACHINE (ret < 0)!\n");
#endif
cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
vm_stop(RUN_STATE_INTERNAL_ERROR); vm_stop(RUN_STATE_INTERNAL_ERROR);
} }
atomic_set(&cpu->exit_request, 0); atomic_set(&cpu->exit_request, 0);
#ifdef QEMU_NYX
if(check_if_relood_request_exists_pre(GET_GLOBAL_STATE()->reload_state)){
pause_all_vcpus(); /* performance boost ??? */
}
#endif
return ret; return ret;
} }
@ -2546,6 +2860,12 @@ int kvm_device_access(int fd, int group, uint64_t attr,
return err; return err;
} }
#ifdef QEMU_NYX
int kvm_has_vapic(void){
return !kvm_check_extension(kvm_state, KVM_CAP_VAPIC);
}
#endif
bool kvm_has_sync_mmu(void) bool kvm_has_sync_mmu(void)
{ {
return kvm_state->sync_mmu; return kvm_state->sync_mmu;
@ -2815,6 +3135,9 @@ void kvm_init_cpu_signals(CPUState *cpu)
pthread_sigmask(SIG_SETMASK, &set, NULL); pthread_sigmask(SIG_SETMASK, &set, NULL);
#endif #endif
sigdelset(&set, SIG_IPI); sigdelset(&set, SIG_IPI);
#ifdef QEMU_NYX
sigdelset(&set, SIGALRM);
#endif
if (kvm_immediate_exit) { if (kvm_immediate_exit) {
r = pthread_sigmask(SIG_SETMASK, &set, NULL); r = pthread_sigmask(SIG_SETMASK, &set, NULL);
} else { } else {

View File

@ -28,6 +28,10 @@
#include "trace.h" #include "trace.h"
#include "migration/misc.h" #include "migration/misc.h"
#ifdef QEMU_NYX
#include "nyx/snapshot/block/block_cow.h"
#endif
/* Number of coroutines to reserve per attached device model */ /* Number of coroutines to reserve per attached device model */
#define COROUTINE_POOL_RESERVATION 64 #define COROUTINE_POOL_RESERVATION 64
@ -42,6 +46,7 @@ typedef struct BlockBackendAioNotifier {
QLIST_ENTRY(BlockBackendAioNotifier) list; QLIST_ENTRY(BlockBackendAioNotifier) list;
} BlockBackendAioNotifier; } BlockBackendAioNotifier;
#ifndef QEMU_NYX
struct BlockBackend { struct BlockBackend {
char *name; char *name;
int refcnt; int refcnt;
@ -96,6 +101,7 @@ struct BlockBackend {
*/ */
unsigned int in_flight; unsigned int in_flight;
}; };
#endif
typedef struct BlockBackendAIOCB { typedef struct BlockBackendAIOCB {
BlockAIOCB common; BlockAIOCB common;
@ -335,6 +341,9 @@ BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm)
BlockBackend *blk; BlockBackend *blk;
blk = g_new0(BlockBackend, 1); blk = g_new0(BlockBackend, 1);
#ifdef QEMU_NYX
blk->cow_cache = NULL;
#endif
blk->refcnt = 1; blk->refcnt = 1;
blk->ctx = ctx; blk->ctx = ctx;
blk->perm = perm; blk->perm = perm;
@ -407,6 +416,10 @@ BlockBackend *blk_new_open(const char *filename, const char *reference,
return NULL; return NULL;
} }
#ifdef QEMU_NYX
blk->cow_cache = cow_cache_new(filename);
#endif
return blk; return blk;
} }
@ -1109,8 +1122,13 @@ void blk_set_disable_request_queuing(BlockBackend *blk, bool disable)
blk->disable_request_queuing = disable; blk->disable_request_queuing = disable;
} }
#ifndef QEMU_NYX
static int blk_check_byte_request(BlockBackend *blk, int64_t offset, static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
size_t size) size_t size)
#else
int blk_check_byte_request(BlockBackend *blk, int64_t offset,
size_t size)
#endif
{ {
int64_t len; int64_t len;
@ -1333,7 +1351,18 @@ static const AIOCBInfo blk_aio_em_aiocb_info = {
.aiocb_size = sizeof(BlkAioEmAIOCB), .aiocb_size = sizeof(BlkAioEmAIOCB),
}; };
#ifndef QEMU_NYX
static void blk_aio_complete(BlkAioEmAIOCB *acb) static void blk_aio_complete(BlkAioEmAIOCB *acb)
#else
void blk_aio_complete(BlkAioEmAIOCB *acb);
BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
void *iobuf, CoroutineEntry co_entry,
BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque);
void blk_aio_write_entry(void *opaque);
void blk_aio_complete(BlkAioEmAIOCB *acb)
#endif
{ {
if (acb->has_returned) { if (acb->has_returned) {
acb->common.cb(acb->common.opaque, acb->rwco.ret); acb->common.cb(acb->common.opaque, acb->rwco.ret);
@ -1349,10 +1378,17 @@ static void blk_aio_complete_bh(void *opaque)
blk_aio_complete(acb); blk_aio_complete(acb);
} }
#ifndef QEMU_NYX
static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes, static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
void *iobuf, CoroutineEntry co_entry, void *iobuf, CoroutineEntry co_entry,
BdrvRequestFlags flags, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque) BlockCompletionFunc *cb, void *opaque)
#else
BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
void *iobuf, CoroutineEntry co_entry,
BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque)
#endif
{ {
BlkAioEmAIOCB *acb; BlkAioEmAIOCB *acb;
Coroutine *co; Coroutine *co;
@ -1399,7 +1435,11 @@ static void blk_aio_read_entry(void *opaque)
blk_aio_complete(acb); blk_aio_complete(acb);
} }
#ifndef QEMU_NYX
static void blk_aio_write_entry(void *opaque) static void blk_aio_write_entry(void *opaque)
#else
void blk_aio_write_entry(void *opaque)
#endif
{ {
BlkAioEmAIOCB *acb = opaque; BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco; BlkRwCo *rwco = &acb->rwco;
@ -1476,16 +1516,34 @@ BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
QEMUIOVector *qiov, BdrvRequestFlags flags, QEMUIOVector *qiov, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque) BlockCompletionFunc *cb, void *opaque)
{ {
#ifndef QEMU_NYX
return blk_aio_prwv(blk, offset, qiov->size, qiov, return blk_aio_prwv(blk, offset, qiov->size, qiov,
blk_aio_read_entry, flags, cb, opaque); blk_aio_read_entry, flags, cb, opaque);
#else
if(blk->cow_cache->enabled){
return blk_aio_prwv(blk, offset, qiov->size, qiov, cow_cache_read_entry, flags, cb, opaque);
}
else{
return blk_aio_prwv(blk, offset, qiov->size, qiov, blk_aio_read_entry, flags, cb, opaque);
}
#endif
} }
BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset, BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
QEMUIOVector *qiov, BdrvRequestFlags flags, QEMUIOVector *qiov, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque) BlockCompletionFunc *cb, void *opaque)
{ {
#ifndef QEMU_NYX
return blk_aio_prwv(blk, offset, qiov->size, qiov, return blk_aio_prwv(blk, offset, qiov->size, qiov,
blk_aio_write_entry, flags, cb, opaque); blk_aio_write_entry, flags, cb, opaque);
#else
if(blk->cow_cache->enabled){
return blk_aio_prwv(blk, offset, qiov->size, qiov, cow_cache_write_entry, flags, cb, opaque);
}
else{
return blk_aio_prwv(blk, offset, qiov->size, qiov, blk_aio_write_entry, flags, cb, opaque);
}
#endif
} }
static void blk_aio_flush_entry(void *opaque) static void blk_aio_flush_entry(void *opaque)

View File

@ -781,13 +781,18 @@ static int raw_apply_lock_bytes(BDRVRawState *s, int fd,
static int raw_check_lock_bytes(int fd, uint64_t perm, uint64_t shared_perm, static int raw_check_lock_bytes(int fd, uint64_t perm, uint64_t shared_perm,
Error **errp) Error **errp)
{ {
#ifndef QEMU_NYX
int ret; int ret;
#endif
int i; int i;
PERM_FOREACH(i) { PERM_FOREACH(i) {
#ifndef QEMU_NYX
int off = RAW_LOCK_SHARED_BASE + i; int off = RAW_LOCK_SHARED_BASE + i;
#endif
uint64_t p = 1ULL << i; uint64_t p = 1ULL << i;
if (perm & p) { if (perm & p) {
#ifndef QEMU_NYX
ret = qemu_lock_fd_test(fd, off, 1, true); ret = qemu_lock_fd_test(fd, off, 1, true);
if (ret) { if (ret) {
char *perm_name = bdrv_perm_names(p); char *perm_name = bdrv_perm_names(p);
@ -797,12 +802,16 @@ static int raw_check_lock_bytes(int fd, uint64_t perm, uint64_t shared_perm,
g_free(perm_name); g_free(perm_name);
return ret; return ret;
} }
#endif
} }
} }
PERM_FOREACH(i) { PERM_FOREACH(i) {
#ifndef QEMU_NYX
int off = RAW_LOCK_PERM_BASE + i; int off = RAW_LOCK_PERM_BASE + i;
#endif
uint64_t p = 1ULL << i; uint64_t p = 1ULL << i;
if (!(shared_perm & p)) { if (!(shared_perm & p)) {
#ifndef QEMU_NYX
ret = qemu_lock_fd_test(fd, off, 1, true); ret = qemu_lock_fd_test(fd, off, 1, true);
if (ret) { if (ret) {
char *perm_name = bdrv_perm_names(p); char *perm_name = bdrv_perm_names(p);
@ -812,6 +821,7 @@ static int raw_check_lock_bytes(int fd, uint64_t perm, uint64_t shared_perm,
g_free(perm_name); g_free(perm_name);
return ret; return ret;
} }
#endif
} }
} }
return 0; return 0;

View File

@ -951,8 +951,8 @@ static void tcp_chr_accept_server_sync(Chardev *chr)
{ {
SocketChardev *s = SOCKET_CHARDEV(chr); SocketChardev *s = SOCKET_CHARDEV(chr);
QIOChannelSocket *sioc; QIOChannelSocket *sioc;
info_report("QEMU waiting for connection on: %s", //info_report("QEMU waiting for connection on: %s",
chr->filename); // chr->filename);
tcp_chr_change_state(s, TCP_CHARDEV_STATE_CONNECTING); tcp_chr_change_state(s, TCP_CHARDEV_STATE_CONNECTING);
sioc = qio_net_listener_wait_client(s->listener); sioc = qio_net_listener_wait_client(s->listener);
tcp_chr_set_client_ioc_name(chr, sioc); tcp_chr_set_client_ioc_name(chr, sioc);

52
compile_qemu_nyx.sh Executable file
View File

@ -0,0 +1,52 @@
#!/bin/bash
set -e
# Copyright (C) 2021 Sergej Schumilo
#
# This file is part of NYX.
#
# QEMU-PT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# QEMU-PT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
if [ ! -f "/usr/lib/libxdc.so" ] || [ ! -f "/usr/include/libxdc.h" ]; then
echo "[!] libxdc not found! Installing..."
if [ -d "capstone_v4/" ]; then
rm -rf capstone_v4
fi
if [ -d "libxdc/" ]; then
rm -rf libxdc
fi
git clone https://github.com/nyx-fuzz/libxdc.git
git clone https://github.com/aquynh/capstone.git capstone_v4
cd capstone_v4
git checkout v4
make
sudo make install
cd ..
cd libxdc
sudo make install
cd ..
fi
./configure --target-list=x86_64-softmmu --enable-gtk --disable-werror --disable-capstone --disable-libssh --enable-nyx --disable-tools
#--enable-sanitizers
if [ -f GNUmakefile ]; then
rm GNUmakefile 2> /dev/null
fi
make -j

17
configure vendored
View File

@ -949,6 +949,8 @@ for opt do
case "$opt" in case "$opt" in
--help|-h) show_help=yes --help|-h) show_help=yes
;; ;;
--enable-nyx) nyx="yes"
;;
--version|-V) exec cat $source_path/VERSION --version|-V) exec cat $source_path/VERSION
;; ;;
--prefix=*) prefix="$optarg" --prefix=*) prefix="$optarg"
@ -1726,6 +1728,7 @@ Advanced options (experts only):
Optional features, enabled with --enable-FEATURE and Optional features, enabled with --enable-FEATURE and
disabled with --disable-FEATURE, default is enabled if available: disabled with --disable-FEATURE, default is enabled if available:
nyx build QEMU-NYX
system all system emulation targets system all system emulation targets
user supported user emulation targets user supported user emulation targets
linux-user all linux usermode emulation targets linux-user all linux usermode emulation targets
@ -2045,7 +2048,7 @@ EOF
# check we support --no-pie first... # check we support --no-pie first...
if compile_prog "-Werror -fno-pie" "-no-pie"; then if compile_prog "-Werror -fno-pie" "-no-pie"; then
CFLAGS_NOPIE="-fno-pie" CFLAGS_NOPIE="-fno-pie"
LDFLAGS_NOPIE="-nopie" #LDFLAGS_NOPIE="-nopie"
fi fi
if compile_prog "-fPIE -DPIE" "-pie"; then if compile_prog "-fPIE -DPIE" "-pie"; then
@ -6095,7 +6098,12 @@ if test "$gcov" = "yes" ; then
CFLAGS="-fprofile-arcs -ftest-coverage -g $CFLAGS" CFLAGS="-fprofile-arcs -ftest-coverage -g $CFLAGS"
LDFLAGS="-fprofile-arcs -ftest-coverage $LDFLAGS" LDFLAGS="-fprofile-arcs -ftest-coverage $LDFLAGS"
elif test "$fortify_source" = "yes" ; then elif test "$fortify_source" = "yes" ; then
if test "$nyx" = "yes" ; then
CFLAGS="-DNESTED_PATCH -O3 -rdynamic -Wno-error=maybe-uninitialized -frename-registers -frename-registers -mtune=native -DQEMU_NYX -g -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 $QEMU_CFLAGS"
LIBS="-lcapstone -lxdc $LIBS"
else
CFLAGS="-O2 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 $CFLAGS" CFLAGS="-O2 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 $CFLAGS"
fi
elif test "$debug" = "no"; then elif test "$debug" = "no"; then
CFLAGS="-O2 $CFLAGS" CFLAGS="-O2 $CFLAGS"
fi fi
@ -6849,6 +6857,11 @@ fi
if test "$splice" = "yes" ; then if test "$splice" = "yes" ; then
echo "CONFIG_SPLICE=y" >> $config_host_mak echo "CONFIG_SPLICE=y" >> $config_host_mak
fi fi
if test "$nyx" = "yes" ; then
echo "CONFIG_QEMU_NYX=y" >> $config_host_mak
fi
if test "$eventfd" = "yes" ; then if test "$eventfd" = "yes" ; then
echo "CONFIG_EVENTFD=y" >> $config_host_mak echo "CONFIG_EVENTFD=y" >> $config_host_mak
fi fi
@ -7515,7 +7528,7 @@ if test "$sparse" = "yes" ; then
echo "QEMU_CFLAGS += -Wbitwise -Wno-transparent-union -Wno-old-initializer -Wno-non-pointer-null" >> $config_host_mak echo "QEMU_CFLAGS += -Wbitwise -Wno-transparent-union -Wno-old-initializer -Wno-non-pointer-null" >> $config_host_mak
fi fi
echo "LDFLAGS=$LDFLAGS" >> $config_host_mak echo "LDFLAGS=$LDFLAGS" >> $config_host_mak
echo "LDFLAGS_NOPIE=$LDFLAGS_NOPIE" >> $config_host_mak #echo "LDFLAGS_NOPIE=$LDFLAGS_NOPIE" >> $config_host_mak
echo "QEMU_LDFLAGS=$QEMU_LDFLAGS" >> $config_host_mak echo "QEMU_LDFLAGS=$QEMU_LDFLAGS" >> $config_host_mak
echo "LD_REL_FLAGS=$LD_REL_FLAGS" >> $config_host_mak echo "LD_REL_FLAGS=$LD_REL_FLAGS" >> $config_host_mak
echo "LD_I386_EMULATION=$ld_i386_emulation" >> $config_host_mak echo "LD_I386_EMULATION=$ld_i386_emulation" >> $config_host_mak

3
cpus.c
View File

@ -1351,6 +1351,9 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
sigemptyset(&waitset); sigemptyset(&waitset);
sigaddset(&waitset, SIG_IPI); sigaddset(&waitset, SIG_IPI);
#ifdef QEMU_NYX
sigaddset(&waitset, SIGALRM);
#endif
/* signal CPU creation */ /* signal CPU creation */
cpu->created = true; cpu->created = true;

49
exec.c
View File

@ -1355,6 +1355,52 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
return dirty; return dirty;
} }
#ifdef QEMU_NYX
extern void fast_reload_qemu_user_fdl_set_dirty(void* self, MemoryRegion *mr, uint64_t addr, uint64_t length);
extern void* get_fast_reload_snapshot(void);
/* Note: start and end must be within the same ram block. */
bool cpu_physical_memory_test_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
{
DirtyMemoryBlocks *blocks;
unsigned long end, page;
bool dirty = false;
RAMBlock *ramblock;
uint64_t mr_offset, mr_size;
if (length == 0) {
return false;
}
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
WITH_RCU_READ_LOCK_GUARD() {
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
ramblock = qemu_get_ram_block(start);
/* Range sanity check on the ramblock */
assert(start >= ramblock->offset &&
start + length <= ramblock->offset + ramblock->used_length);
while (page < end) {
unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
unsigned long num = MIN(end - page,
DIRTY_MEMORY_BLOCK_SIZE - offset);
dirty |= bitmap_test_atomic(blocks->blocks[idx],
offset, num);
page += num;
}
}
return dirty;
}
#endif
DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
(MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client) (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client)
{ {
@ -3025,6 +3071,9 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr, static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr length) hwaddr length)
{ {
#ifdef QEMU_NYX
fast_reload_qemu_user_fdl_set_dirty(get_fast_reload_snapshot(), mr, addr & 0xFFFFFFFFFFFFF000, length);
#endif
uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr); uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
addr += memory_region_get_ram_addr(mr); addr += memory_region_get_ram_addr(mr);

View File

@ -34,6 +34,9 @@
#include "sysemu/runstate.h" #include "sysemu/runstate.h"
#include "qemu/error-report.h" #include "qemu/error-report.h"
#include "trace.h" #include "trace.h"
#ifdef QEMU_NYX
#include "nyx/state.h"
#endif
//#define DEBUG_SERIAL //#define DEBUG_SERIAL
@ -241,7 +244,9 @@ static gboolean serial_watch_cb(GIOChannel *chan, GIOCondition cond,
static void serial_xmit(SerialState *s) static void serial_xmit(SerialState *s)
{ {
do { do {
#ifndef QEMU_NYX
assert(!(s->lsr & UART_LSR_TEMT)); assert(!(s->lsr & UART_LSR_TEMT));
#endif
if (s->tsr_retry == 0) { if (s->tsr_retry == 0) {
assert(!(s->lsr & UART_LSR_THRE)); assert(!(s->lsr & UART_LSR_THRE));
@ -343,6 +348,12 @@ static void serial_ioport_write(void *opaque, hwaddr addr, uint64_t val,
{ {
SerialState *s = opaque; SerialState *s = opaque;
#ifdef QEMU_NYX
if(GET_GLOBAL_STATE()->in_fuzzing_mode){
return;
}
#endif
addr &= 7; addr &= 7;
trace_serial_ioport_write(addr, val); trace_serial_ioport_write(addr, val);
switch(addr) { switch(addr) {

View File

@ -153,6 +153,10 @@ static inline uint8_t sr(VGACommonState *s, int idx)
return vbe_enabled(s) ? s->sr_vbe[idx] : s->sr[idx]; return vbe_enabled(s) ? s->sr_vbe[idx] : s->sr[idx];
} }
#ifdef QEMU_NYX
bool dirty = false;
#endif
static void vga_update_memory_access(VGACommonState *s) static void vga_update_memory_access(VGACommonState *s)
{ {
hwaddr base, offset, size; hwaddr base, offset, size;
@ -166,6 +170,9 @@ static void vga_update_memory_access(VGACommonState *s)
object_unparent(OBJECT(&s->chain4_alias)); object_unparent(OBJECT(&s->chain4_alias));
s->has_chain4_alias = false; s->has_chain4_alias = false;
s->plane_updated = 0xf; s->plane_updated = 0xf;
#ifdef QEMU_NYX
dirty = true;
#endif
} }
if ((sr(s, VGA_SEQ_PLANE_WRITE) & VGA_SR02_ALL_PLANES) == if ((sr(s, VGA_SEQ_PLANE_WRITE) & VGA_SR02_ALL_PLANES) ==
VGA_SR02_ALL_PLANES && sr(s, VGA_SEQ_MEMORY_MODE) & VGA_SR04_CHN_4M) { VGA_SR02_ALL_PLANES && sr(s, VGA_SEQ_MEMORY_MODE) & VGA_SR04_CHN_4M) {
@ -2076,10 +2083,21 @@ static int vga_common_post_load(void *opaque, int version_id)
{ {
VGACommonState *s = opaque; VGACommonState *s = opaque;
#ifndef QEMU_NYX
/* force refresh */ /* force refresh */
s->graphic_mode = -1; s->graphic_mode = -1;
vbe_update_vgaregs(s); vbe_update_vgaregs(s);
vga_update_memory_access(s); vga_update_memory_access(s);
#else
if(dirty){
/* force refresh */
s->graphic_mode = -1;
vbe_update_vgaregs(s);
//fprintf(stderr, "VGA DIRTY!\n");
vga_update_memory_access(s);
dirty = false;
}
#endif
return 0; return 0;
} }

View File

@ -30,9 +30,21 @@
#include <linux/kvm.h> #include <linux/kvm.h>
#include "standard-headers/asm-x86/kvm_para.h" #include "standard-headers/asm-x86/kvm_para.h"
#ifdef QEMU_NYX
#include "nyx/snapshot/devices/vm_change_state_handlers.h"
#endif
#define TYPE_KVM_CLOCK "kvmclock" #define TYPE_KVM_CLOCK "kvmclock"
#define KVM_CLOCK(obj) OBJECT_CHECK(KVMClockState, (obj), TYPE_KVM_CLOCK) #define KVM_CLOCK(obj) OBJECT_CHECK(KVMClockState, (obj), TYPE_KVM_CLOCK)
#ifdef QEMU_NYX
bool fuzz_mode = false;
void enable_fast_snapshot_kvm_clock(void){
fuzz_mode = true;
}
#endif
typedef struct KVMClockState { typedef struct KVMClockState {
/*< private >*/ /*< private >*/
SysBusDevice busdev; SysBusDevice busdev;
@ -176,7 +188,11 @@ static void kvmclock_vm_state_change(void *opaque, int running,
* If the host where s->clock was read did not support reliable * If the host where s->clock was read did not support reliable
* KVM_GET_CLOCK, read kvmclock value from memory. * KVM_GET_CLOCK, read kvmclock value from memory.
*/ */
#ifndef QEMU_NYX
if (!s->clock_is_reliable) { if (!s->clock_is_reliable) {
#else
if (!s->clock_is_reliable && !fuzz_mode) {
#endif
uint64_t pvclock_via_mem = kvmclock_current_nsec(s); uint64_t pvclock_via_mem = kvmclock_current_nsec(s);
/* We can't rely on the saved clock value, just discard it */ /* We can't rely on the saved clock value, just discard it */
if (pvclock_via_mem) { if (pvclock_via_mem) {
@ -231,6 +247,9 @@ static void kvmclock_realize(DeviceState *dev, Error **errp)
kvm_update_clock(s); kvm_update_clock(s);
qemu_add_vm_change_state_handler(kvmclock_vm_state_change, s); qemu_add_vm_change_state_handler(kvmclock_vm_state_change, s);
#ifdef QEMU_NYX
add_fast_reload_change_handler(kvmclock_vm_state_change, s, RELOAD_HANDLER_KVM_CLOCK);
#endif
} }
static bool kvmclock_clock_is_reliable_needed(void *opaque) static bool kvmclock_clock_is_reliable_needed(void *opaque)

View File

@ -34,6 +34,10 @@
#include "hw/timer/i8254_internal.h" #include "hw/timer/i8254_internal.h"
#include "sysemu/kvm.h" #include "sysemu/kvm.h"
#ifdef QEMU_NYX
#include "nyx/snapshot/devices/vm_change_state_handlers.h"
#endif
#define KVM_PIT_REINJECT_BIT 0 #define KVM_PIT_REINJECT_BIT 0
#define CALIBRATION_ROUNDS 3 #define CALIBRATION_ROUNDS 3
@ -300,6 +304,9 @@ static void kvm_pit_realizefn(DeviceState *dev, Error **errp)
qdev_init_gpio_in(dev, kvm_pit_irq_control, 1); qdev_init_gpio_in(dev, kvm_pit_irq_control, 1);
qemu_add_vm_change_state_handler(kvm_pit_vm_state_change, s); qemu_add_vm_change_state_handler(kvm_pit_vm_state_change, s);
#ifdef QEMU_NYX
add_fast_reload_change_handler(kvm_pit_vm_state_change, s, RELOAD_HANDLER_KVM_PIT);
#endif
kpc->parent_realize(dev, errp); kpc->parent_realize(dev, errp);
} }

View File

@ -1026,3 +1026,22 @@ static void xenfv_machine_options(MachineClass *m)
DEFINE_PC_MACHINE(xenfv, "xenfv", pc_xen_hvm_init, DEFINE_PC_MACHINE(xenfv, "xenfv", pc_xen_hvm_init,
xenfv_machine_options); xenfv_machine_options);
#endif #endif
#ifdef QEMU_NYX
static void pc_kAFL64_vmx_v1_0_machine_options(MachineClass *m)
{
pc_i440fx_machine_options(m);
m->alias = "kAFL64";
//m->is_default = 1;
m->desc = "kAFL64 PC (i440FX + PIIX, 1996)";
}
static void kAFL64_init(MachineState *machine)
{
pc_init1(machine, TYPE_I440FX_PCI_HOST_BRIDGE, TYPE_I440FX_PCI_DEVICE);
}
DEFINE_PC_MACHINE(v1, "kAFL64-v1", kAFL64_init, pc_kAFL64_vmx_v1_0_machine_options);
#endif

View File

@ -41,6 +41,10 @@
#include "hw/ide/internal.h" #include "hw/ide/internal.h"
#include "trace.h" #include "trace.h"
#ifdef QEMU_NYX
#include "nyx/snapshot/devices/vm_change_state_handlers.h"
#endif
/* These values were based on a Seagate ST3500418AS but have been modified /* These values were based on a Seagate ST3500418AS but have been modified
to make more sense in QEMU */ to make more sense in QEMU */
static const int smart_attributes[][12] = { static const int smart_attributes[][12] = {
@ -2654,6 +2658,9 @@ void ide_register_restart_cb(IDEBus *bus)
{ {
if (bus->dma->ops->restart_dma) { if (bus->dma->ops->restart_dma) {
bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus); bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
#ifdef QEMU_NYX
add_fast_reload_change_handler(ide_restart_cb, bus, RELOAD_HANDLER_IDE_CORE);
#endif
} }
} }

View File

@ -274,14 +274,18 @@ static void apic_common_realize(DeviceState *dev, Error **errp)
info->realize(dev, errp); info->realize(dev, errp);
/* Note: We need at least 1M to map the VAPIC option ROM */ /* Note: We need at least 1M to map the VAPIC option ROM */
#ifndef QEMU_NYX
if (!vapic && s->vapic_control & VAPIC_ENABLE_MASK && if (!vapic && s->vapic_control & VAPIC_ENABLE_MASK &&
!hax_enabled() && ram_size >= 1024 * 1024) { !hax_enabled() && ram_size >= 1024 * 1024) {
vapic = sysbus_create_simple("kvmvapic", -1, NULL); vapic = sysbus_create_simple("kvmvapic", -1, NULL);
} }
#endif
s->vapic = vapic; s->vapic = vapic;
#ifndef QEMU_NYX
if (apic_report_tpr_access && info->enable_tpr_reporting) { if (apic_report_tpr_access && info->enable_tpr_reporting) {
info->enable_tpr_reporting(s, true); info->enable_tpr_reporting(s, true);
} }
#endif
if (s->legacy_instance_id) { if (s->legacy_instance_id) {
instance_id = -1; instance_id = -1;

View File

@ -548,6 +548,23 @@ static int get_pci_config_device(QEMUFile *f, void *pv, size_t size,
return 0; return 0;
} }
#ifdef QEMU_NYX
void fast_get_pci_config_device(void* data, size_t size, void* opaque){
PCIDevice *s = container_of(opaque, PCIDevice, config);
PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(s);
uint8_t *config = (uint8_t *) data;
memcpy(s->config, config, size);
pci_update_mappings(s);
if (pc->is_bridge) {
PCIBridge *b = PCI_BRIDGE(s);
pci_bridge_update_mappings(b);
}
}
#endif
/* just put buffer */ /* just put buffer */
static int put_pci_config_device(QEMUFile *f, void *pv, size_t size, static int put_pci_config_device(QEMUFile *f, void *pv, size_t size,
const VMStateField *field, QJSON *vmdesc) const VMStateField *field, QJSON *vmdesc)
@ -587,6 +604,17 @@ static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size,
return 0; return 0;
} }
#ifdef QEMU_NYX
void fast_get_pci_irq_state(void* data, size_t size, void* opaque){
PCIDevice *s = container_of(opaque, PCIDevice, irq_state);
uint32_t* irq_state = (uint32_t*) data;
for (int i = 0; i < PCI_NUM_PINS; ++i) {
pci_set_irq_state(s, i, irq_state[i]);
}
}
#endif
static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size, static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size,
const VMStateField *field, QJSON *vmdesc) const VMStateField *field, QJSON *vmdesc)
{ {

View File

@ -79,6 +79,14 @@ static void rtc_set_cmos(RTCState *s, const struct tm *tm);
static inline int rtc_from_bcd(RTCState *s, int a); static inline int rtc_from_bcd(RTCState *s, int a);
static uint64_t get_next_alarm(RTCState *s); static uint64_t get_next_alarm(RTCState *s);
#ifdef QEMU_NYX
static bool fast_snapshot_rtc_enabled = false;
void enable_fast_snapshot_rtc(void){
fast_snapshot_rtc_enabled = true;
}
#endif
static inline bool rtc_running(RTCState *s) static inline bool rtc_running(RTCState *s)
{ {
return (!(s->cmos_data[RTC_REG_B] & REG_B_SET) && return (!(s->cmos_data[RTC_REG_B] & REG_B_SET) &&
@ -790,7 +798,11 @@ static int rtc_post_load(void *opaque, int version_id)
{ {
RTCState *s = opaque; RTCState *s = opaque;
#ifndef QEMU_NYX
if (version_id <= 2 || rtc_clock == QEMU_CLOCK_REALTIME) { if (version_id <= 2 || rtc_clock == QEMU_CLOCK_REALTIME) {
#else
if (version_id <= 2 || rtc_clock == QEMU_CLOCK_REALTIME || fast_snapshot_rtc_enabled) {
#endif
rtc_set_time(s); rtc_set_time(s);
s->offset = 0; s->offset = 0;
check_update_timer(s); check_update_timer(s);

View File

@ -448,6 +448,12 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
} }
#endif /* not _WIN32 */ #endif /* not _WIN32 */
#ifdef QEMU_NYX
bool cpu_physical_memory_test_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client);
#endif
bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
ram_addr_t length, ram_addr_t length,
unsigned client); unsigned client);

View File

@ -409,6 +409,21 @@ struct CPUState {
*/ */
uintptr_t mem_io_pc; uintptr_t mem_io_pc;
#ifdef QEMU_NYX
volatile int pt_cmd;
volatile int pt_ret;
volatile bool pt_enabled;
int pt_fd;
void* pt_mmap;
void* pt_decoder_state;
bool reload_pending;
bool intel_pt_run_trashed;
#endif
int kvm_fd; int kvm_fd;
struct KVMState *kvm_state; struct KVMState *kvm_state;
struct kvm_run *kvm_run; struct kvm_run *kvm_run;

View File

@ -373,6 +373,10 @@ void pci_del_capability(PCIDevice *pci_dev, uint8_t cap_id, uint8_t cap_size);
uint8_t pci_find_capability(PCIDevice *pci_dev, uint8_t cap_id); uint8_t pci_find_capability(PCIDevice *pci_dev, uint8_t cap_id);
#ifdef QEMU_NYX
void fast_get_pci_config_device(void* data, size_t size, void* opaque);
void fast_get_pci_irq_state(void* data, size_t size, void* opaque);
#endif
uint32_t pci_default_read_config(PCIDevice *d, uint32_t pci_default_read_config(PCIDevice *d,
uint32_t address, int len); uint32_t address, int len);

View File

@ -253,6 +253,9 @@ void bitmap_set(unsigned long *map, long i, long len);
void bitmap_set_atomic(unsigned long *map, long i, long len); void bitmap_set_atomic(unsigned long *map, long i, long len);
void bitmap_clear(unsigned long *map, long start, long nr); void bitmap_clear(unsigned long *map, long start, long nr);
bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr); bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr);
#ifdef QEMU_NYX
bool bitmap_test_atomic(unsigned long *map, long start, long nr);
#endif
void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src, void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src,
long nr); long nr);
unsigned long bitmap_find_next_zero_area(unsigned long *map, unsigned long bitmap_find_next_zero_area(unsigned long *map,

View File

@ -16,6 +16,11 @@
#include "qemu/iov.h" #include "qemu/iov.h"
#include "block/throttle-groups.h" #include "block/throttle-groups.h"
#ifdef QEMU_NYX
#include "sysemu/sysemu.h"
#include "nyx/snapshot/block/block_cow.h"
#endif
/* /*
* TODO Have to include block/block.h for a bunch of block layer * TODO Have to include block/block.h for a bunch of block layer
* types. Unfortunately, this pulls in the whole BlockDriverState * types. Unfortunately, this pulls in the whole BlockDriverState
@ -265,4 +270,62 @@ int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in,
const BdrvChild *blk_root(BlockBackend *blk); const BdrvChild *blk_root(BlockBackend *blk);
#ifdef QEMU_NYX
struct BlockBackend {
cow_cache_t* cow_cache;
char *name;
int refcnt;
BdrvChild *root;
AioContext *ctx;
DriveInfo *legacy_dinfo; /* null unless created by drive_new() */
QTAILQ_ENTRY(BlockBackend) link; /* for block_backends */
QTAILQ_ENTRY(BlockBackend) monitor_link; /* for monitor_block_backends */
BlockBackendPublic public;
DeviceState *dev; /* attached device model, if any */
const BlockDevOps *dev_ops;
void *dev_opaque;
/* the block size for which the guest device expects atomicity */
int guest_block_size;
/* If the BDS tree is removed, some of its options are stored here (which
* can be used to restore those options in the new BDS on insert) */
BlockBackendRootState root_state;
bool enable_write_cache;
/* I/O stats (display with "info blockstats"). */
BlockAcctStats stats;
BlockdevOnError on_read_error, on_write_error;
bool iostatus_enabled;
BlockDeviceIoStatus iostatus;
uint64_t perm;
uint64_t shared_perm;
bool disable_perm;
bool allow_aio_context_change;
bool allow_write_beyond_eof;
NotifierList remove_bs_notifiers, insert_bs_notifiers;
QLIST_HEAD(, BlockBackendAioNotifier) aio_notifiers;
int quiesce_counter;
CoQueue queued_requests;
bool disable_request_queuing;
VMChangeStateEntry *vmsh;
bool force_allow_inactivate;
/* Number of in-flight aio requests. BlockDriverState also counts
* in-flight requests but aio requests can exist even when blk->root is
* NULL, so we cannot rely on its counter for that case.
* Accessed with atomic ops.
*/
unsigned int in_flight;
};
#endif
#endif #endif

View File

@ -216,6 +216,10 @@ int kvm_has_many_ioeventfds(void);
int kvm_has_gsi_routing(void); int kvm_has_gsi_routing(void);
int kvm_has_intx_set_mask(void); int kvm_has_intx_set_mask(void);
#ifdef QEMU_NYX
int kvm_get_vm_fd(KVMState *s);
#endif
int kvm_init_vcpu(CPUState *cpu); int kvm_init_vcpu(CPUState *cpu);
int kvm_cpu_exec(CPUState *cpu); int kvm_cpu_exec(CPUState *cpu);
int kvm_destroy_vcpu(CPUState *cpu); int kvm_destroy_vcpu(CPUState *cpu);
@ -367,6 +371,12 @@ int kvm_arch_get_registers(CPUState *cpu);
/* full state set, modified during initialization or on vmload */ /* full state set, modified during initialization or on vmload */
#define KVM_PUT_FULL_STATE 3 #define KVM_PUT_FULL_STATE 3
#ifdef QEMU_NYX
#define KVM_PUT_FULL_STATE_FAST 4
int kvm_arch_get_registers_fast(CPUState *cpu);
#endif
int kvm_arch_put_registers(CPUState *cpu, int level); int kvm_arch_put_registers(CPUState *cpu, int level);
int kvm_arch_init(MachineState *ms, KVMState *s); int kvm_arch_init(MachineState *ms, KVMState *s);

View File

@ -41,5 +41,9 @@ typedef struct KVMMemoryListener {
void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
AddressSpace *as, int as_id); AddressSpace *as, int as_id);
#ifdef QEMU_NYX
KVMMemoryListener* kvm_get_kml(int as_id);
#endif
void kvm_set_max_memslot_size(hwaddr max_slot_size); void kvm_set_max_memslot_size(hwaddr max_slot_size);
#endif #endif

View File

@ -236,6 +236,77 @@ struct kvm_hyperv_exit {
#define KVM_EXIT_IOAPIC_EOI 26 #define KVM_EXIT_IOAPIC_EOI 26
#define KVM_EXIT_HYPERV 27 #define KVM_EXIT_HYPERV 27
#ifdef QEMU_NYX
#define HYPERCALL_KAFL_RAX_ID 0x01f
#define KAFL_EXIT_OFFSET 100
#define KVM_EXIT_KAFL_ACQUIRE 100
#define KVM_EXIT_KAFL_GET_PAYLOAD 101
#define KVM_EXIT_KAFL_GET_PROGRAM 102
#define KVM_EXIT_KAFL_GET_ARGV 103
#define KVM_EXIT_KAFL_RELEASE 104
#define KVM_EXIT_KAFL_SUBMIT_CR3 105
#define KVM_EXIT_KAFL_SUBMIT_PANIC 106
#define KVM_EXIT_KAFL_SUBMIT_KASAN 107
#define KVM_EXIT_KAFL_PANIC 108
#define KVM_EXIT_KAFL_KASAN 109
#define KVM_EXIT_KAFL_LOCK 110
#define KVM_EXIT_KAFL_INFO 111
#define KVM_EXIT_KAFL_NEXT_PAYLOAD 112
#define KVM_EXIT_KAFL_PRINTF 113
/* Kernel Printf Debugger */
#define KVM_EXIT_KAFL_PRINTK_ADDR 114
#define KVM_EXIT_KAFL_PRINTK 115
/* user space only exit reasons */
#define KVM_EXIT_KAFL_USER_RANGE_ADVISE 116
#define KVM_EXIT_KAFL_USER_SUBMIT_MODE 117
#define KVM_EXIT_KAFL_USER_FAST_ACQUIRE 118
#define KVM_EXIT_KAFL_TOPA_MAIN_FULL 119
#define KVM_EXIT_KAFL_USER_ABORT 120
/* hypertrash only hypercalls */
#define HYPERTRASH_HYPERCALL_MASK 0xAA000000
#define HYPERCALL_KAFL_NESTED_PREPARE (0 | HYPERTRASH_HYPERCALL_MASK)
#define HYPERCALL_KAFL_NESTED_CONFIG (1 | HYPERTRASH_HYPERCALL_MASK)
#define HYPERCALL_KAFL_NESTED_ACQUIRE (2 | HYPERTRASH_HYPERCALL_MASK)
#define HYPERCALL_KAFL_NESTED_RELEASE (3 | HYPERTRASH_HYPERCALL_MASK)
#define KVM_EXIT_KAFL_NESTED_CONFIG 121
#define KVM_EXIT_KAFL_NESTED_PREPARE 122
#define KVM_EXIT_KAFL_NESTED_ACQUIRE 123
#define KVM_EXIT_KAFL_NESTED_RELEASE 124
#define KVM_EXIT_KAFL_PAGE_DUMP_BP 125
#define KVM_EXIT_KAFL_TIMEOUT 126
#define KVM_EXIT_KAFL_NESTED_HPRINTF 127
#define KVM_EXIT_KAFL_MTF 128
#define KVM_EXIT_KAFL_RANGE_SUBMIT 129
#define HYPERCALL_KAFL_REQ_STREAM_DATA 130
#define KVM_EXIT_KAFL_NESTED_EARLY_RELEASE 131
#define KVM_EXIT_KAFL_PANIC_EXTENDED 132
#define KVM_EXIT_KAFL_CREATE_TMP_SNAPSHOT 133
#define KVM_EXIT_KAFL_DEBUG_TMP_SNAPSHOT 134 /* hypercall for debugging / development purposes */
#define KVM_EXIT_KAFL_GET_HOST_CONFIG 135
#define KVM_EXIT_KAFL_SET_AGENT_CONFIG 136
#define KVM_EXIT_KAFL_DUMP_FILE 137
#define HYPERCALL_KAFL_REQ_STREAM_DATA_BULK 138
#define KVM_CAP_NYX_PT 512
#define KVM_CAP_NYX_FDL 513
#endif
/* For KVM_EXIT_INTERNAL_ERROR */ /* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */ /* Emulate instruction failed. */
#define KVM_INTERNAL_ERROR_EMULATION 1 #define KVM_INTERNAL_ERROR_EMULATION 1
@ -1611,4 +1682,62 @@ struct kvm_hyperv_eventfd {
#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff #define KVM_HYPERV_CONN_ID_MASK 0x00ffffff
#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0) #define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0)
#ifdef QEMU_NYX
/*
* ioctls for vmx_pt fds
*/
#define KVM_VMX_PT_SETUP_FD _IO(KVMIO, 0xd0) /* apply vmx_pt fd (via vcpu fd ioctl)*/
#define KVM_VMX_PT_CONFIGURE_ADDR0 _IOW(KVMIO, 0xd1, __u64) /* configure IP-filtering for addr0_a & addr0_b */
#define KVM_VMX_PT_CONFIGURE_ADDR1 _IOW(KVMIO, 0xd2, __u64) /* configure IP-filtering for addr1_a & addr1_b */
#define KVM_VMX_PT_CONFIGURE_ADDR2 _IOW(KVMIO, 0xd3, __u64) /* configure IP-filtering for addr2_a & addr2_b */
#define KVM_VMX_PT_CONFIGURE_ADDR3 _IOW(KVMIO, 0xd4, __u64) /* configure IP-filtering for addr3_a & addr3_b */
#define KVM_VMX_PT_CONFIGURE_CR3 _IOW(KVMIO, 0xd5, __u64) /* setup CR3 filtering value */
#define KVM_VMX_PT_ENABLE _IO(KVMIO, 0xd6) /* enable and lock configuration */
#define KVM_VMX_PT_GET_TOPA_SIZE _IOR(KVMIO, 0xd7, __u32) /* get defined ToPA size */
#define KVM_VMX_PT_DISABLE _IO(KVMIO, 0xd8) /* enable and lock configuration */
#define KVM_VMX_PT_CHECK_TOPA_OVERFLOW _IO(KVMIO, 0xd9) /* check for ToPA overflow */
#define KVM_VMX_PT_ENABLE_ADDR0 _IO(KVMIO, 0xaa) /* enable IP-filtering for addr0 */
#define KVM_VMX_PT_ENABLE_ADDR1 _IO(KVMIO, 0xab) /* enable IP-filtering for addr1 */
#define KVM_VMX_PT_ENABLE_ADDR2 _IO(KVMIO, 0xac) /* enable IP-filtering for addr2 */
#define KVM_VMX_PT_ENABLE_ADDR3 _IO(KVMIO, 0xad) /* enable IP-filtering for addr3 */
#define KVM_VMX_PT_DISABLE_ADDR0 _IO(KVMIO, 0xae) /* disable IP-filtering for addr0 */
#define KVM_VMX_PT_DISABLE_ADDR1 _IO(KVMIO, 0xaf) /* disable IP-filtering for addr1 */
#define KVM_VMX_PT_DISABLE_ADDR2 _IO(KVMIO, 0xe0) /* disable IP-filtering for addr2 */
#define KVM_VMX_PT_DISABLE_ADDR3 _IO(KVMIO, 0xe1) /* disable IP-filtering for addr3 */
#define KVM_VMX_PT_ENABLE_CR3 _IO(KVMIO, 0xe2) /* enable CR3 filtering */
#define KVM_VMX_PT_DISABLE_CR3 _IO(KVMIO, 0xe3) /* disable CR3 filtering */
#define KVM_VMX_PT_SUPPORTED _IO(KVMIO, 0xe4)
#define KVM_VMX_FDL_SETUP_FD _IO(KVMIO, 0xe5)
#define KVM_VMX_FDL_SET _IOW(KVMIO, 0xe6, __u64)
#define KVM_VMX_FDL_FLUSH _IO(KVMIO, 0xe7)
#define KVM_VMX_FDL_GET_INDEX _IOR(KVMIO, 0xe8, __u64)
#define KVM_VMX_PT_GET_ADDRN _IO(KVMIO, 0xe9)
/* Multi CR3 Support */
#define KVM_VMX_PT_CONFIGURE_MULTI_CR3 _IOW(KVMIO, 0xea, __u64) /* setup CR3 filtering value */
#define KVM_VMX_PT_ENABLE_MULTI_CR3 _IO(KVMIO, 0xeb) /* enable CR3 filtering */
#define KVM_VMX_PT_DISABLE_MULTI_CR3 _IO(KVMIO, 0xec) /* disable CR3 filtering */
/* Page Dump Support */
#define KVM_VMX_PT_SET_PAGE_DUMP_CR3 _IOW(KVMIO, 0xed, __u64)
#define KVM_VMX_PT_ENABLE_PAGE_DUMP_CR3 _IO(KVMIO, 0xee)
#define KVM_VMX_PT_DISABLE_PAGE_DUMP_CR3 _IO(KVMIO, 0xef)
#define KVM_VMX_PT_ENABLE_MTF _IO(KVMIO, 0xf0)
#define KVM_VMX_PT_DISABLE_MTF _IO(KVMIO, 0xf1)
/* KVM dirty-ring */
#define KVM_CAP_DIRTY_LOG_RING 192
#define KVM_EXIT_DIRTY_RING_FULL 31
#endif
#endif /* __LINUX_KVM_H */ #endif /* __LINUX_KVM_H */

BIN
logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 247 KiB

View File

@ -35,6 +35,11 @@
#include "hw/boards.h" #include "hw/boards.h"
#include "migration/vmstate.h" #include "migration/vmstate.h"
#ifdef QEMU_NYX
#include "nyx/state.h"
#include "nyx/fast_vm_reload.h"
#endif
//#define DEBUG_UNASSIGNED //#define DEBUG_UNASSIGNED
static unsigned memory_region_transaction_depth; static unsigned memory_region_transaction_depth;
@ -2011,6 +2016,9 @@ void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr size) hwaddr size)
{ {
assert(mr->ram_block); assert(mr->ram_block);
#ifdef QEMU_NYX
fast_reload_qemu_user_fdl_set_dirty(get_fast_reload_snapshot(), mr, addr & 0xFFFFFFFFFFFFF000, size);
#endif
cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr, cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
size, size,
memory_region_get_dirty_log_mask(mr)); memory_region_get_dirty_log_mask(mr));

View File

@ -252,14 +252,22 @@ typedef struct SaveState {
QTAILQ_HEAD(, SaveStateEntry) handlers; QTAILQ_HEAD(, SaveStateEntry) handlers;
int global_section_id; int global_section_id;
uint32_t len; uint32_t len;
#ifndef QEMU_NYX
const char *name; const char *name;
#else
char *name;
#endif
uint32_t target_page_bits; uint32_t target_page_bits;
uint32_t caps_count; uint32_t caps_count;
MigrationCapability *capabilities; MigrationCapability *capabilities;
QemuUUID uuid; QemuUUID uuid;
} SaveState; } SaveState;
#ifndef QEMU_NYX
static SaveState savevm_state = { static SaveState savevm_state = {
#else
SaveState savevm_state = {
#endif
.handlers = QTAILQ_HEAD_INITIALIZER(savevm_state.handlers), .handlers = QTAILQ_HEAD_INITIALIZER(savevm_state.handlers),
.global_section_id = 0, .global_section_id = 0,
}; };
@ -289,6 +297,18 @@ static uint32_t get_validatable_capabilities_count(void)
return result; return result;
} }
#ifdef QEMU_NYX
int vmstate_load(QEMUFile *f, SaveStateEntry *se);
int vmstate_save(QEMUFile *f, SaveStateEntry *se, QJSON *vmdesc);
void save_section_header(QEMUFile *f, SaveStateEntry *se, uint8_t section_type);
void save_section_footer(QEMUFile *f, SaveStateEntry *se);
bool should_send_vmdesc(void);
int qemu_savevm_state(QEMUFile *f, Error **errp);
bool check_section_footer(QEMUFile *f, SaveStateEntry *se);
int qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis);
int qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis);
#endif
static int configuration_pre_save(void *opaque) static int configuration_pre_save(void *opaque)
{ {
SaveState *state = opaque; SaveState *state = opaque;
@ -297,7 +317,15 @@ static int configuration_pre_save(void *opaque)
int i, j; int i, j;
state->len = strlen(current_name); state->len = strlen(current_name);
#ifndef QEMU_NYX
state->name = current_name; state->name = current_name;
#else
if(state->name){
free(state->name);
}
state->name = strdup(current_name);
#endif
state->target_page_bits = qemu_target_page_bits(); state->target_page_bits = qemu_target_page_bits();
state->caps_count = get_validatable_capabilities_count(); state->caps_count = get_validatable_capabilities_count();
@ -508,7 +536,11 @@ static const VMStateDescription vmstate_uuid = {
} }
}; };
#ifndef QEMU_NYX
static const VMStateDescription vmstate_configuration = { static const VMStateDescription vmstate_configuration = {
#else
const VMStateDescription vmstate_configuration = {
#endif
.name = "configuration", .name = "configuration",
.version_id = 1, .version_id = 1,
.pre_load = configuration_pre_load, .pre_load = configuration_pre_load,
@ -848,7 +880,11 @@ void vmstate_unregister(DeviceState *dev, const VMStateDescription *vmsd,
} }
} }
#ifndef QEMU_NYX
static int vmstate_load(QEMUFile *f, SaveStateEntry *se) static int vmstate_load(QEMUFile *f, SaveStateEntry *se)
#else
int vmstate_load(QEMUFile *f, SaveStateEntry *se)
#endif
{ {
trace_vmstate_load(se->idstr, se->vmsd ? se->vmsd->name : "(old)"); trace_vmstate_load(se->idstr, se->vmsd ? se->vmsd->name : "(old)");
if (!se->vmsd) { /* Old style */ if (!se->vmsd) { /* Old style */
@ -877,7 +913,11 @@ static void vmstate_save_old_style(QEMUFile *f, SaveStateEntry *se, QJSON *vmdes
} }
} }
#ifndef QEMU_NYX
static int vmstate_save(QEMUFile *f, SaveStateEntry *se, QJSON *vmdesc) static int vmstate_save(QEMUFile *f, SaveStateEntry *se, QJSON *vmdesc)
#else
int vmstate_save(QEMUFile *f, SaveStateEntry *se, QJSON *vmdesc)
#endif
{ {
trace_vmstate_save(se->idstr, se->vmsd ? se->vmsd->name : "(old)"); trace_vmstate_save(se->idstr, se->vmsd ? se->vmsd->name : "(old)");
if (!se->vmsd) { if (!se->vmsd) {
@ -890,8 +930,13 @@ static int vmstate_save(QEMUFile *f, SaveStateEntry *se, QJSON *vmdesc)
/* /*
* Write the header for device section (QEMU_VM_SECTION START/END/PART/FULL) * Write the header for device section (QEMU_VM_SECTION START/END/PART/FULL)
*/ */
#ifndef QEMU_NYX
static void save_section_header(QEMUFile *f, SaveStateEntry *se, static void save_section_header(QEMUFile *f, SaveStateEntry *se,
uint8_t section_type) uint8_t section_type)
#else
void save_section_header(QEMUFile *f, SaveStateEntry *se,
uint8_t section_type)
#endif
{ {
qemu_put_byte(f, section_type); qemu_put_byte(f, section_type);
qemu_put_be32(f, se->section_id); qemu_put_be32(f, se->section_id);
@ -912,7 +957,11 @@ static void save_section_header(QEMUFile *f, SaveStateEntry *se,
* Write a footer onto device sections that catches cases misformatted device * Write a footer onto device sections that catches cases misformatted device
* sections. * sections.
*/ */
#ifndef QEMU_NYX
static void save_section_footer(QEMUFile *f, SaveStateEntry *se) static void save_section_footer(QEMUFile *f, SaveStateEntry *se)
#else
void save_section_footer(QEMUFile *f, SaveStateEntry *se)
#endif
{ {
if (migrate_get_current()->send_section_footer) { if (migrate_get_current()->send_section_footer) {
qemu_put_byte(f, QEMU_VM_SECTION_FOOTER); qemu_put_byte(f, QEMU_VM_SECTION_FOOTER);
@ -1262,7 +1311,11 @@ int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy)
return ret; return ret;
} }
#ifndef QEMU_NYX
static bool should_send_vmdesc(void) static bool should_send_vmdesc(void)
#else
bool should_send_vmdesc(void)
#endif
{ {
MachineState *machine = MACHINE(qdev_get_machine()); MachineState *machine = MACHINE(qdev_get_machine());
bool in_postcopy = migration_in_postcopy(); bool in_postcopy = migration_in_postcopy();
@ -1498,7 +1551,11 @@ void qemu_savevm_state_cleanup(void)
} }
} }
#ifndef QEMU_NYX
static int qemu_savevm_state(QEMUFile *f, Error **errp) static int qemu_savevm_state(QEMUFile *f, Error **errp)
#else
int qemu_savevm_state(QEMUFile *f, Error **errp)
#endif
{ {
int ret; int ret;
MigrationState *ms = migrate_get_current(); MigrationState *ms = migrate_get_current();
@ -2200,7 +2257,11 @@ static int loadvm_process_command(QEMUFile *f)
* Returns: true if the footer was good * Returns: true if the footer was good
* false if there is a problem (and calls error_report to say why) * false if there is a problem (and calls error_report to say why)
*/ */
#ifndef QEMU_NYX
static bool check_section_footer(QEMUFile *f, SaveStateEntry *se) static bool check_section_footer(QEMUFile *f, SaveStateEntry *se)
#else
bool check_section_footer(QEMUFile *f, SaveStateEntry *se)
#endif
{ {
int ret; int ret;
uint8_t read_mark; uint8_t read_mark;
@ -2237,8 +2298,13 @@ static bool check_section_footer(QEMUFile *f, SaveStateEntry *se)
return true; return true;
} }
#ifndef QEMU_NYX
static int static int
qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis) qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis)
#else
int
qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis)
#endif
{ {
uint32_t instance_id, version_id, section_id; uint32_t instance_id, version_id, section_id;
SaveStateEntry *se; SaveStateEntry *se;
@ -2302,8 +2368,13 @@ qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis)
return 0; return 0;
} }
#ifndef QEMU_NYX
static int static int
qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis) qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis)
#else
int
qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis)
#endif
{ {
uint32_t section_id; uint32_t section_id;
SaveStateEntry *se; SaveStateEntry *se;

View File

@ -22,10 +22,25 @@
static int vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd, static int vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque, QJSON *vmdesc); void *opaque, QJSON *vmdesc);
#ifndef QEMU_NYX
static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd, static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque); void *opaque);
#else
int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque);
int vmstate_n_elems(void *opaque, const VMStateField *field);
int vmstate_size(void *opaque, const VMStateField *field);
void vmstate_handle_alloc(void *ptr, const VMStateField *field, void *opaque);
const VMStateDescription * vmstate_get_subsection(const VMStateDescription **sub, char *idstr);
#endif
#ifndef QEMU_NYX
static int vmstate_n_elems(void *opaque, const VMStateField *field) static int vmstate_n_elems(void *opaque, const VMStateField *field)
#else
int vmstate_n_elems(void *opaque, const VMStateField *field)
#endif
{ {
int n_elems = 1; int n_elems = 1;
@ -49,7 +64,11 @@ static int vmstate_n_elems(void *opaque, const VMStateField *field)
return n_elems; return n_elems;
} }
#ifndef QEMU_NYX
static int vmstate_size(void *opaque, const VMStateField *field) static int vmstate_size(void *opaque, const VMStateField *field)
#else
int vmstate_size(void *opaque, const VMStateField *field)
#endif
{ {
int size = field->size; int size = field->size;
@ -63,8 +82,13 @@ static int vmstate_size(void *opaque, const VMStateField *field)
return size; return size;
} }
#ifndef QEMU_NYX
static void vmstate_handle_alloc(void *ptr, const VMStateField *field, static void vmstate_handle_alloc(void *ptr, const VMStateField *field,
void *opaque) void *opaque)
#else
void vmstate_handle_alloc(void *ptr, const VMStateField *field,
void *opaque)
#endif
{ {
if (field->flags & VMS_POINTER && field->flags & VMS_ALLOC) { if (field->flags & VMS_POINTER && field->flags & VMS_ALLOC) {
gsize size = vmstate_size(opaque, field); gsize size = vmstate_size(opaque, field);
@ -428,8 +452,13 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
return ret; return ret;
} }
#ifndef QEMU_NYX
static const VMStateDescription * static const VMStateDescription *
vmstate_get_subsection(const VMStateDescription **sub, char *idstr) vmstate_get_subsection(const VMStateDescription **sub, char *idstr)
#else
const VMStateDescription *
vmstate_get_subsection(const VMStateDescription **sub, char *idstr)
#endif
{ {
while (sub && *sub) { while (sub && *sub) {
if (strcmp(idstr, (*sub)->name) == 0) { if (strcmp(idstr, (*sub)->name) == 0) {
@ -440,8 +469,13 @@ vmstate_get_subsection(const VMStateDescription **sub, char *idstr)
return NULL; return NULL;
} }
#ifndef QEMU_NYX
static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd, static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque) void *opaque)
#else
int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque)
#endif
{ {
trace_vmstate_subsection_load(vmsd->name); trace_vmstate_subsection_load(vmsd->name);

33
nyx/Makefile.objs Normal file
View File

@ -0,0 +1,33 @@
obj-y += \
hypercall.o \
memory_access.o \
interface.o \
fast_vm_reload.o \
fast_vm_reload_sync.o \
printk.o synchronization.o \
page_cache.o \
kvm_nested.o \
state.o \
debug.o \
auxiliary_buffer.o \
mmh3.o \
nested_hypercalls.o \
sharedir.o \
helpers.o \
redqueen.o \
file_helper.o \
redqueen_trace.o \
snapshot/helper.o \
snapshot/devices/nyx_device_state.o \
snapshot/devices/state_reallocation.o \
snapshot/devices/vm_change_state_handlers.o \
snapshot/block/nyx_block_snapshot.o \
snapshot/block/block_cow.o \
snapshot/memory/shadow_memory.o \
snapshot/memory/nyx_fdl_user.o \
snapshot/memory/block_list.o \
snapshot/memory/backend/nyx_debug.o \
snapshot/memory/backend/nyx_fdl.o \
snapshot/memory/backend/nyx_dirty_ring.o \
pt.o

287
nyx/auxiliary_buffer.c Normal file
View File

@ -0,0 +1,287 @@
/*
Copyright (C) 2019 Sergej Schumilo
This file is part of QEMU-PT (HyperTrash / kAFL).
QEMU-PT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
QEMU-PT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
*/
#include "nyx/auxiliary_buffer.h"
#include <string.h>
#include <stdio.h>
#include <stdbool.h>
#include "nyx/state.h"
#include "nyx/debug.h"
/* experimental feature (currently broken)
* enabled via trace mode
*/
//#define SUPPORT_COMPILE_TIME_REDQUEEN
#define VOLATILE_WRITE_64(dst, src) *((volatile uint64_t*)&dst) = (uint64_t)src
#define VOLATILE_WRITE_32(dst, src) *((volatile uint32_t*)&dst) = (uint32_t)src
#define VOLATILE_WRITE_16(dst, src) *((volatile uint16_t*)&dst) = (uint16_t)src
#define VOLATILE_WRITE_8(dst, src) *((volatile uint8_t*)&dst) = (uint8_t)src
#define VOLATILE_READ_64(dst, src) dst = *((volatile uint64_t*)(&src))
#define VOLATILE_READ_32(dst, src) dst = *((volatile uint32_t*)(&src))
#define VOLATILE_READ_16(dst, src) dst = *((volatile uint16_t*)(&src))
#define VOLATILE_READ_8(dst, src) dst = *((volatile uint8_t*)(&src))
static void volatile_memset(void* dst, uint8_t ch, size_t count){
for (size_t i = 0; i < count; i++){
VOLATILE_WRITE_8(((uint8_t*)dst)[i], ch);
}
}
static void volatile_memcpy(void* dst, void* src, size_t size){
for (size_t i = 0; i < size; i++){
VOLATILE_WRITE_8(((uint8_t*)dst)[i], ((uint8_t*)src)[i]);
}
}
void init_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer){
debug_fprintf(stderr, "%s\n", __func__);
volatile_memset((void*) auxilary_buffer, 0, sizeof(auxilary_buffer_t));
VOLATILE_WRITE_16(auxilary_buffer->header.version, QEMU_PT_VERSION);
uint16_t hash = (sizeof(auxilary_buffer_header_t) +
sizeof(auxilary_buffer_cap_t) +
sizeof(auxilary_buffer_config_t) +
sizeof(auxilary_buffer_result_t) +
sizeof(auxilary_buffer_misc_t)) % 0xFFFF;
VOLATILE_WRITE_16(auxilary_buffer->header.hash, hash);
VOLATILE_WRITE_64(auxilary_buffer->header.magic, AUX_MAGIC);
}
void check_auxiliary_config_buffer(auxilary_buffer_t* auxilary_buffer, auxilary_buffer_config_t* shadow_config){
uint8_t changed = 0;
VOLATILE_READ_8(changed, auxilary_buffer->configuration.changed);
if (changed){
uint8_t aux_byte;
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.redqueen_mode);
if(aux_byte){
/* enable redqueen mode */
if(aux_byte != shadow_config->redqueen_mode){
GET_GLOBAL_STATE()->in_redqueen_reload_mode = true;
GET_GLOBAL_STATE()->redqueen_enable_pending = true;
GET_GLOBAL_STATE()->redqueen_instrumentation_mode = REDQUEEN_LIGHT_INSTRUMENTATION;
}
}
else{
/* disable redqueen mode */
if(aux_byte != shadow_config->redqueen_mode){
GET_GLOBAL_STATE()->in_redqueen_reload_mode = false;
GET_GLOBAL_STATE()->redqueen_disable_pending = true;
GET_GLOBAL_STATE()->redqueen_instrumentation_mode = REDQUEEN_NO_INSTRUMENTATION;
}
}
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.trace_mode);
if(aux_byte){
/* enable trace mode */
if(aux_byte != shadow_config->trace_mode && GET_GLOBAL_STATE()->redqueen_state){
#ifdef SUPPORT_COMPILE_TIME_REDQUEEN
GET_GLOBAL_STATE()->pt_trace_mode_force = true;
#endif
redqueen_set_trace_mode(GET_GLOBAL_STATE()->redqueen_state);
}
}
else {
/* disable trace mode */
if(aux_byte != shadow_config->trace_mode && GET_GLOBAL_STATE()->redqueen_state){
#ifdef SUPPORT_COMPILE_TIME_REDQUEEN
GET_GLOBAL_STATE()->pt_trace_mode_force = false;
#endif
redqueen_unset_trace_mode(GET_GLOBAL_STATE()->redqueen_state);
}
}
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.page_dump_mode);
if(aux_byte){
GET_GLOBAL_STATE()->dump_page = true;
uint64_t data;
VOLATILE_READ_64(data, auxilary_buffer->configuration.page_addr);
GET_GLOBAL_STATE()->dump_page_addr = data;
//fprintf(stderr, "%s dump_page_addr => 0x%lx\n", __func__, GET_GLOBAL_STATE()->dump_page_addr);
VOLATILE_WRITE_8(auxilary_buffer->configuration.page_dump_mode, 0);
VOLATILE_WRITE_64(auxilary_buffer->configuration.page_addr, 0);
}
/* modify reload mode */
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.reload_mode);
GET_GLOBAL_STATE()->in_reload_mode = aux_byte;
/* modify protect_payload_buffer */
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.protect_payload_buffer);
GET_GLOBAL_STATE()->protect_payload_buffer = aux_byte;
/* modify protect_payload_buffer */
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.discard_tmp_snapshot);
GET_GLOBAL_STATE()->discard_tmp_snapshot = aux_byte;
VOLATILE_WRITE_8(auxilary_buffer->configuration.discard_tmp_snapshot, 0);
/* copy to shodow */
VOLATILE_READ_8(shadow_config->timeout_sec, auxilary_buffer->configuration.timeout_sec);
VOLATILE_READ_32(shadow_config->timeout_usec, auxilary_buffer->configuration.timeout_usec);
//if(shadow_config->timeout_sec || shadow_config->timeout_usec){
/* apply only non-zero values */
update_itimer(&(GET_GLOBAL_STATE()->timeout_detector), shadow_config->timeout_sec, shadow_config->timeout_usec);
//}
VOLATILE_READ_8(shadow_config->redqueen_mode, auxilary_buffer->configuration.redqueen_mode);
VOLATILE_READ_8(shadow_config->trace_mode, auxilary_buffer->configuration.trace_mode);
VOLATILE_READ_8(shadow_config->reload_mode, auxilary_buffer->configuration.reload_mode);
VOLATILE_READ_8(shadow_config->verbose_level, auxilary_buffer->configuration.verbose_level);
/* reset the 'changed' byte */
VOLATILE_WRITE_8(auxilary_buffer->configuration.changed, 0);
}
}
void set_crash_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){
VOLATILE_WRITE_8(auxilary_buffer->result.crash_found, 1);
}
void set_asan_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){
VOLATILE_WRITE_8(auxilary_buffer->result.asan_found, 1);
}
void set_timeout_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){
VOLATILE_WRITE_8(auxilary_buffer->result.timeout_found, 1);
}
void set_reload_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){
VOLATILE_WRITE_8(auxilary_buffer->result.reloaded, 1);
}
void set_pt_overflow_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){
VOLATILE_WRITE_8(auxilary_buffer->result.pt_overflow, 1);
}
void set_exec_done_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8_t sec, uint32_t usec){
VOLATILE_WRITE_8(auxilary_buffer->result.exec_done, 1);
VOLATILE_WRITE_8(auxilary_buffer->result.runtime_sec, sec);
VOLATILE_WRITE_32(auxilary_buffer->result.runtime_usec, usec);
}
void flush_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){
memset(&auxilary_buffer->result.hprintf, 0x0, sizeof(auxilary_buffer_result_t)-2);
//memset(&(auxilary_buffer->result) + offsetof(auxilary_buffer_result_t, hprintf), 0x0, sizeof(auxilary_buffer_result_t) - offsetof(auxilary_buffer_result_t, hprintf));
/*
VOLATILE_WRITE_8(auxilary_buffer->result.exec_done, 0);
VOLATILE_WRITE_8(auxilary_buffer->result.hprintf, 0);
VOLATILE_WRITE_8(auxilary_buffer->result.crash_found, 0);
VOLATILE_WRITE_8(auxilary_buffer->result.asan_found, 0);
VOLATILE_WRITE_8(auxilary_buffer->result.timeout_found, 0);
VOLATILE_WRITE_8(auxilary_buffer->result.reloaded, 0);
VOLATILE_WRITE_8(auxilary_buffer->result.pt_overflow, 0);
VOLATILE_WRITE_8(auxilary_buffer->result.runtime_sec, 0);
VOLATILE_WRITE_32(auxilary_buffer->result.runtime_usec, 0);
VOLATILE_WRITE_8(auxilary_buffer->result.page_not_found, 0);
VOLATILE_WRITE_64(auxilary_buffer->result.page_addr, 0);
VOLATILE_WRITE_8(auxilary_buffer->result.payload_buffer_write_attempt_found, 0);
VOLATILE_WRITE_32(auxilary_buffer->result.dirty_pages, 0);
VOLATILE_WRITE_32(auxilary_buffer->result.pt_trace_size, 0);
*/
}
void set_hprintf_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len){
VOLATILE_WRITE_16(auxilary_buffer->misc.len, MIN(len, MISC_SIZE-2));
volatile_memcpy((void*)&auxilary_buffer->misc.data, (void*)msg, (size_t)MIN(len, MISC_SIZE-2));
VOLATILE_WRITE_8(auxilary_buffer->result.hprintf, 1);
}
void set_crash_reason_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len){
VOLATILE_WRITE_16(auxilary_buffer->misc.len, MIN(len, MISC_SIZE-2));
volatile_memcpy((void*)&auxilary_buffer->misc.data, (void*)msg, (size_t) MIN(len, MISC_SIZE-2));
VOLATILE_WRITE_8(auxilary_buffer->result.crash_found, 1);
}
void flush_hprintf_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer){
VOLATILE_WRITE_8(auxilary_buffer->result.hprintf, 0);
}
void set_state_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8_t state){
if(auxilary_buffer){
VOLATILE_WRITE_8(auxilary_buffer->result.state, state);
}
else{
fprintf(stderr, "WARNING: auxilary_buffer pointer is zero\n");
}
}
void set_page_not_found_result_buffer(auxilary_buffer_t* auxilary_buffer, uint64_t page_addr){
VOLATILE_WRITE_8(auxilary_buffer->result.page_not_found, 1);
VOLATILE_WRITE_64(auxilary_buffer->result.page_addr, page_addr);
}
void set_success_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8_t success){
VOLATILE_WRITE_8(auxilary_buffer->result.success, success);
}
void set_payload_buffer_write_reason_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len){
VOLATILE_WRITE_16(auxilary_buffer->misc.len, MIN(len, MISC_SIZE-2));
volatile_memcpy((void*)&auxilary_buffer->misc.data, (void*)msg, (size_t) MIN(len, MISC_SIZE-2));
VOLATILE_WRITE_8(auxilary_buffer->result.payload_buffer_write_attempt_found, 1);
}
void set_tmp_snapshot_created(auxilary_buffer_t* auxilary_buffer, uint8_t value){
VOLATILE_WRITE_8(auxilary_buffer->result.tmp_snapshot_created, value);
}
void set_cap_agent_trace_bitmap(auxilary_buffer_t* auxilary_buffer, bool value){
VOLATILE_WRITE_8(auxilary_buffer->capabilites.agent_trace_bitmap, value);
}
void set_cap_agent_ijon_trace_bitmap(auxilary_buffer_t* auxilary_buffer, bool value){
VOLATILE_WRITE_8(auxilary_buffer->capabilites.agent_ijon_trace_bitmap, value);
}
void set_result_dirty_pages(auxilary_buffer_t* auxilary_buffer, uint32_t value){
VOLATILE_WRITE_32(auxilary_buffer->result.dirty_pages, value);
}
void set_result_pt_trace_size(auxilary_buffer_t* auxilary_buffer, uint32_t value){
VOLATILE_WRITE_32(auxilary_buffer->result.pt_trace_size, value);
}
void set_result_bb_coverage(auxilary_buffer_t* auxilary_buffer, uint32_t value){
if (value != auxilary_buffer->result.bb_coverage){
VOLATILE_WRITE_32(auxilary_buffer->result.bb_coverage, value);
}
}

185
nyx/auxiliary_buffer.h Normal file
View File

@ -0,0 +1,185 @@
/*
Copyright (C) 2019 Sergej Schumilo
This file is part of QEMU-PT (HyperTrash / kAFL).
QEMU-PT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
QEMU-PT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <stdint.h>
#include <stdbool.h>
#define AUX_BUFFER_SIZE 4096
#define AUX_MAGIC 0x54502d554d4551
#define QEMU_PT_VERSION 1 /* let's start at 1 for the initial version using the aux buffer */
#define HEADER_SIZE 128
#define CAP_SIZE 256
#define CONFIG_SIZE 512
#define STATE_SIZE 512
#define MISC_SIZE 4096-(HEADER_SIZE+CAP_SIZE+CONFIG_SIZE+STATE_SIZE)
#define ADD_PADDING(max, type) uint8_t type ## _padding [max - sizeof(type)]
typedef struct auxilary_buffer_header_s{
uint64_t magic; /* 0x54502d554d4551 */
uint16_t version;
uint16_t hash;
/* more to come */
} __attribute__((packed)) auxilary_buffer_header_t;
typedef struct auxilary_buffer_cap_s{
uint8_t redqueen;
uint8_t agent_timeout_detection; /* agent implements own timeout detection; host timeout detection is still in used, but treshold is increased by x2; */
uint8_t agent_trace_bitmap; /* agent implements own tracing mechanism; PT tracing is disabled */
uint8_t agent_ijon_trace_bitmap; /* agent uses the ijon shm buffer */
/* more to come */
} __attribute__((packed)) auxilary_buffer_cap_t;
typedef struct auxilary_buffer_config_s{
uint8_t changed; /* set this byte to kick in a rescan of this buffer */
uint8_t timeout_sec;
uint32_t timeout_usec;
/* trigger to enable / disable different QEMU-PT modes */
uint8_t redqueen_mode;
uint8_t trace_mode;
uint8_t reload_mode;
uint8_t verbose_level;
uint8_t page_dump_mode;
uint64_t page_addr;
/* nested mode only */
uint8_t protect_payload_buffer;
/* 0 -> disabled
1 -> decoding
2 -> decoding + full disassembling
*/
//uint8_t pt_processing_mode;
/* snapshot extension */
uint8_t discard_tmp_snapshot;
/* more to come */
} __attribute__((packed)) auxilary_buffer_config_t;
typedef struct auxilary_buffer_result_s{
/* 0 -> booting,
1 -> loader level 1,
2 -> loader level 2,
3 -> ready to fuzz
*/
uint8_t state;
/* snapshot extension */
uint8_t tmp_snapshot_created;
/* FML */
uint8_t padding_1;
uint8_t padding_2;
uint32_t bb_coverage;
uint8_t padding_3;
uint8_t padding_4;
uint8_t hprintf;
uint8_t exec_done;
uint8_t crash_found;
uint8_t asan_found;
uint8_t timeout_found;
uint8_t reloaded;
uint8_t pt_overflow;
uint8_t runtime_sec;
uint8_t page_not_found;
uint8_t success;
uint32_t runtime_usec;
uint64_t page_addr;
uint32_t dirty_pages;
uint32_t pt_trace_size;
uint8_t payload_buffer_write_attempt_found;
/* more to come */
} __attribute__((packed)) auxilary_buffer_result_t;
typedef struct auxilary_buffer_misc_s{
uint16_t len;
uint8_t data;
/* non yet */
} __attribute__((packed)) auxilary_buffer_misc_t;
typedef struct auxilary_buffer_s{
auxilary_buffer_header_t header;
ADD_PADDING(HEADER_SIZE, auxilary_buffer_header_t);
auxilary_buffer_cap_t capabilites;
ADD_PADDING(CAP_SIZE, auxilary_buffer_cap_t);
auxilary_buffer_config_t configuration;
ADD_PADDING(CONFIG_SIZE, auxilary_buffer_config_t);
auxilary_buffer_result_t result;
ADD_PADDING(STATE_SIZE, auxilary_buffer_result_t);
auxilary_buffer_misc_t misc;
ADD_PADDING(MISC_SIZE, auxilary_buffer_misc_t);
} __attribute__((packed)) auxilary_buffer_t;
void init_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer);
void check_auxiliary_config_buffer(auxilary_buffer_t* auxilary_buffer, auxilary_buffer_config_t* shadow_config);
void flush_hprintf_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer);
void set_crash_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer);
void set_asan_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer);
void set_timeout_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer);
void set_reload_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer);
void set_pt_overflow_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer);
void flush_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer);
void set_exec_done_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8_t sec, uint32_t usec);
void set_state_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8_t state);
void set_hprintf_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len);
void set_page_not_found_result_buffer(auxilary_buffer_t* auxilary_buffer, uint64_t page_addr);
void set_success_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8_t success);
void set_crash_reason_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len);
void set_tmp_snapshot_created(auxilary_buffer_t* auxilary_buffer, uint8_t value);
void set_cap_agent_trace_bitmap(auxilary_buffer_t* auxilary_buffer, bool value);
void set_cap_agent_ijon_trace_bitmap(auxilary_buffer_t* auxilary_buffer, bool value);
void set_result_dirty_pages(auxilary_buffer_t* auxilary_buffer, uint32_t value);
void set_result_pt_trace_size(auxilary_buffer_t* auxilary_buffer, uint32_t value);
void set_result_bb_coverage(auxilary_buffer_t* auxilary_buffer, uint32_t value);
void set_payload_buffer_write_reason_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len);

135
nyx/debug.c Normal file
View File

@ -0,0 +1,135 @@
#include <execinfo.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include "nyx/debug.h"
#include "signal.h"
#ifdef ENABLE_BACKTRACES
#define BT_BUF_SIZE 100
void qemu_backtrace(void){
void *buffer[BT_BUF_SIZE];
int nptrs = 0;
int j;
nptrs = backtrace(buffer, BT_BUF_SIZE);
fprintf(stderr, "backtrace() returned %d addresses\n", nptrs);
char **strings = backtrace_symbols(buffer, nptrs);
if (strings == NULL) {
//perror("backtrace_symbols");
fprintf(stderr, "backtrace_symbols failed!\n");
return;
//exit(EXIT_FAILURE);
}
for (j = 0; j < nptrs; j++)
fprintf(stderr, "%s\n", strings[j]);
free(strings);
}
static void sigsegfault_handler(int signo, siginfo_t *info, void *extra) {
fprintf(stderr, "[qemu-nyx] crash detected (pid: %d / signal: %d)\n", getpid(), signo);
qemu_backtrace();
fprintf(stderr, "WAITING FOR GDB ATTACH (PID: %d...\n", getpid());
while(1){
sleep(1);
}
}
static void sigabrt_handler(int signo, siginfo_t *info, void *extra) {
fprintf(stderr, "[qemu-nyx] crash detected (pid: %d / signal: %d)\n", getpid(), signo);
qemu_backtrace();
fprintf(stderr, "WAITING FOR GDB ATTACH (PID: %d...\n", getpid());
while(1){
sleep(1);
}
}
static void sigint_handler(int signo, siginfo_t *info, void *extra) {
fprintf(stderr, "[qemu-nyx] bye! (pid: %d / signal: %d)\n", getpid(), signo);
exit(0);
}
/*
static void aexit_handler(void) {
fprintf(stderr, "ATTEMPT TO CALL EXIT (PID: %d)\n", getpid());
qemu_backtrace();
fprintf(stderr, "WAITING FOR GDB ATTACH (PID: %d...\n", getpid());
while(1){
sleep(1);
}
}
*/
void init_crash_handler(void){
//qemu_backtrace();
struct sigaction action;
action.sa_flags = SA_SIGINFO;
action.sa_sigaction = sigsegfault_handler;
if (sigaction(SIGSEGV, &action, NULL) == -1) {
fprintf(stderr, "SIGSEGV: sigaction failed");
_exit(1);
}
action.sa_sigaction = sigabrt_handler;
if (sigaction(SIGABRT, &action, NULL) == -1) {
fprintf(stderr, "SIGABRT: sigaction failed");
_exit(1);
}
/* don't install a SIGINT handler if the nyx block cow cache layer is disabled */
if(!getenv("NYX_DISABLE_BLOCK_COW")){
action.sa_sigaction = sigint_handler;
if (sigaction(SIGINT, &action, NULL) == -1) {
fprintf(stderr, "SIGINT: sigaction failed");
_exit(1);
}
}
//atexit(aexit_handler);
/* test */
//int i = 0;
//((char*)i)[3] = 0;
}
void hexdump_kafl(const void* data, size_t size) {
char ascii[17];
size_t i, j;
ascii[16] = '\0';
for (i = 0; i < size; ++i) {
printf("%02X ", ((unsigned char*)data)[i]);
if (((unsigned char*)data)[i] >= ' ' && ((unsigned char*)data)[i] <= '~') {
ascii[i % 16] = ((unsigned char*)data)[i];
} else {
ascii[i % 16] = '.';
}
if ((i+1) % 8 == 0 || i+1 == size) {
printf(" ");
if ((i+1) % 16 == 0) {
printf("| %s \n", ascii);
} else if (i+1 == size) {
ascii[(i+1) % 16] = '\0';
if ((i+1) % 16 <= 8) {
printf(" ");
}
for (j = (i+1) % 16; j < 16; ++j) {
printf(" ");
}
printf("| %s \n", ascii);
}
}
}
}
#endif

49
nyx/debug.h Normal file
View File

@ -0,0 +1,49 @@
#pragma once
#include <execinfo.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define ENABLE_BACKTRACES
#define QEMU_PT_PRINT_PREFIX "[QEMU-PT]\t"
#define CORE_PREFIX "Core: "
#define MEM_PREFIX "Memory: "
#define RELOAD_PREFIX "Reload: "
#define PT_PREFIX "PT: "
#define INTERFACE_PREFIX "Interface: "
#define REDQUEEN_PREFIX "Redqueen: "
#define DISASM_PREFIX "Disasm: "
#define PAGE_CACHE_PREFIX "PageCache: "
#define INTERFACE_PREFIX "Interface: "
#define NESTED_VM_PREFIX "Nested: "
#define DEBUG_VM_PREFIX "Debug: "
#define COLOR "\033[1;35m"
#define ENDC "\033[0m"
//#define debug_printf(format, ...) printf (format, ##__VA_ARGS__)
//#define debug_fprintf(fd, format, ...) fprintf (fd, format, ##__VA_ARGS__)
//#define QEMU_PT_PRINTF(PREFIX, format, ...) printf (QEMU_PT_PRINT_PREFIX COLOR PREFIX format ENDC "\n", ##__VA_ARGS__)
//#define QEMU_PT_PRINTF_DBG(PREFIX, format, ...) printf (QEMU_PT_PRINT_PREFIX PREFIX "(%s#:%d)\t"format, __BASE_FILE__, __LINE__, ##__VA_ARGS__)
//#define QEMU_PT_PRINTF_DEBUG(format, ...) fprintf (stderr, QEMU_PT_PRINT_PREFIX DEBUG_VM_PREFIX "(%s#:%d)\t"format "\n", __BASE_FILE__, __LINE__, ##__VA_ARGS__)
#define debug_printf(format, ...)
#define debug_fprintf(fd, format, ...)
#define QEMU_PT_PRINTF(PREFIX, format, ...)
#define QEMU_PT_PRINTF_DBG(PREFIX, format, ...)
#define QEMU_PT_PRINTF_DEBUG(format, ...)
#ifdef ENABLE_BACKTRACES
void qemu_backtrace(void);
void init_crash_handler(void);
void hexdump_kafl(const void* data, size_t size);
#endif

613
nyx/fast_vm_reload.c Normal file
View File

@ -0,0 +1,613 @@
/*
Copyright (C) 2017 Sergej Schumilo
This file is part of QEMU-PT (HyperTrash / kAFL).
QEMU-PT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
QEMU-PT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "sysemu/sysemu.h"
#include "cpu.h"
#include "qemu/main-loop.h"
#include "exec/ram_addr.h"
#include "qemu/rcu_queue.h"
#include "migration/migration.h"
#include "migration/register.h"
#include "migration/savevm.h"
#include "migration/qemu-file.h"
#include "migration/global_state.h"
#include <linux/kvm.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <immintrin.h>
#include <stdint.h>
#include "sysemu/kvm_int.h"
#include "sysemu/cpus.h"
#include "sysemu/reset.h"
#include "nyx/fast_vm_reload.h"
#include "nyx/debug.h"
#include "nyx/state.h"
#include "sysemu/block-backend.h"
#include "block/qapi.h"
#include "sysemu/runstate.h"
#include "migration/vmstate.h"
#include "nyx/memory_access.h"
#include "nyx/helpers.h"
#include "nyx/snapshot/helper.h"
#include "nyx/snapshot/memory/block_list.h"
#include "nyx/snapshot/memory/shadow_memory.h"
#include "nyx/snapshot/memory/backend/nyx_debug.h"
#include "nyx/snapshot/memory/backend/nyx_fdl.h"
#include "nyx/snapshot/memory/nyx_fdl_user.h"
#include "nyx/snapshot/devices/nyx_device_state.h"
#include "nyx/snapshot/block/nyx_block_snapshot.h"
FastReloadMemoryMode mode = RELOAD_MEMORY_MODE_DEBUG;
/* basic operations */
static void fast_snapshot_init_operation(fast_reload_t* self, const char* snapshot_folder, bool pre_snapshot){
assert((snapshot_folder == NULL && pre_snapshot == false) || snapshot_folder);
if (snapshot_folder){
self->device_state = nyx_device_state_init_from_snapshot(snapshot_folder, pre_snapshot);
self->shadow_memory_state = shadow_memory_init_from_snapshot(snapshot_folder, pre_snapshot);
}
else{
self->device_state = nyx_device_state_init();
self->shadow_memory_state = shadow_memory_init();
}
if(!pre_snapshot){
switch(mode){
case RELOAD_MEMORY_MODE_DEBUG:
break;
case RELOAD_MEMORY_MODE_DEBUG_QUIET:
break;
case RELOAD_MEMORY_MODE_FDL:
self->fdl_state = nyx_fdl_init(self->shadow_memory_state);
break;
case RELOAD_MEMORY_MODE_FDL_DEBUG:
self->fdl_state = nyx_fdl_init(self->shadow_memory_state);
break;
case RELOAD_MEMORY_MODE_DIRTY_RING:
self->dirty_ring_state = nyx_dirty_ring_init(self->shadow_memory_state);
break;
case RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG:
self->dirty_ring_state = nyx_dirty_ring_init(self->shadow_memory_state);
break;
}
self->fdl_user_state = nyx_fdl_user_init(self->shadow_memory_state);
nyx_fdl_user_enable(self->fdl_user_state);
}
if (snapshot_folder){
self->block_state = nyx_block_snapshot_init_from_file(snapshot_folder, pre_snapshot);
}
else{
self->block_state = nyx_block_snapshot_init();
}
memory_global_dirty_log_start();
if(!pre_snapshot){
self->root_snapshot_created = true;
}
}
static void fast_snapshot_init_from_snapshot_operation(fast_reload_t* self, const char* folder){
self->device_state = nyx_device_state_init();
self->shadow_memory_state = shadow_memory_init();
switch(mode){
case RELOAD_MEMORY_MODE_DEBUG:
break;
case RELOAD_MEMORY_MODE_DEBUG_QUIET:
break;
case RELOAD_MEMORY_MODE_FDL:
self->fdl_state = nyx_fdl_init(self->shadow_memory_state);
break;
case RELOAD_MEMORY_MODE_FDL_DEBUG:
self->fdl_state = nyx_fdl_init(self->shadow_memory_state);
break;
case RELOAD_MEMORY_MODE_DIRTY_RING:
self->dirty_ring_state = nyx_dirty_ring_init(self->shadow_memory_state);
break;
case RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG:
self->dirty_ring_state = nyx_dirty_ring_init(self->shadow_memory_state);
break;
}
self->fdl_user_state = nyx_fdl_user_init(self->shadow_memory_state);
nyx_fdl_user_enable(self->fdl_user_state);
self->block_state = nyx_block_snapshot_init();
memory_global_dirty_log_start();
self->root_snapshot_created = true;
}
static void fast_snapshot_restore_operation(fast_reload_t* self){
switch(mode){
case RELOAD_MEMORY_MODE_DEBUG:
nyx_snapshot_debug_restore(self->shadow_memory_state, self->blocklist, true);
break;
case RELOAD_MEMORY_MODE_DEBUG_QUIET:
nyx_snapshot_debug_restore(self->shadow_memory_state, self->blocklist, false);
break;
case RELOAD_MEMORY_MODE_FDL:
nyx_snapshot_nyx_fdl_restore(self->fdl_state, self->shadow_memory_state, self->blocklist);
break;
case RELOAD_MEMORY_MODE_FDL_DEBUG:
nyx_snapshot_nyx_fdl_restore(self->fdl_state, self->shadow_memory_state, self->blocklist);
nyx_snapshot_debug_restore(self->shadow_memory_state, self->blocklist, true);
break;
case RELOAD_MEMORY_MODE_DIRTY_RING:
nyx_snapshot_nyx_dirty_ring_restore(self->dirty_ring_state, self->shadow_memory_state, self->blocklist);
break;
case RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG:
nyx_snapshot_nyx_dirty_ring_restore(self->dirty_ring_state, self->shadow_memory_state, self->blocklist);
nyx_snapshot_debug_restore(self->shadow_memory_state, self->blocklist, true);
//assert(false);
//sleep(1);
break;
}
nyx_snapshot_user_fdl_restore(self->fdl_user_state, self->shadow_memory_state, self->blocklist);
//nyx_device_state_post_restore(self->device_state);
}
static inline void fast_snapshot_pre_create_incremental_operation(fast_reload_t* self){
/* flush all pending block writes */
bdrv_drain_all();
memory_global_dirty_log_sync();
nyx_device_state_switch_incremental(self->device_state);
nyx_block_snapshot_switch_incremental(self->block_state);
}
static inline void fast_snapshot_create_incremental_operation(fast_reload_t* self){
shadow_memory_prepare_incremental(self->shadow_memory_state);
nyx_device_state_save_tsc_incremental(self->device_state);
switch(mode){
case RELOAD_MEMORY_MODE_DEBUG:
nyx_snapshot_debug_save_root_pages(self->shadow_memory_state, self->blocklist, true);
break;
case RELOAD_MEMORY_MODE_DEBUG_QUIET:
nyx_snapshot_debug_save_root_pages(self->shadow_memory_state, self->blocklist, false);
break;
case RELOAD_MEMORY_MODE_FDL:
nyx_snapshot_nyx_fdl_save_root_pages(self->fdl_state, self->shadow_memory_state, self->blocklist);
break;
case RELOAD_MEMORY_MODE_FDL_DEBUG:
nyx_snapshot_nyx_fdl_save_root_pages(self->fdl_state, self->shadow_memory_state, self->blocklist);
nyx_snapshot_debug_save_root_pages(self->shadow_memory_state, self->blocklist, true);
break;
case RELOAD_MEMORY_MODE_DIRTY_RING:
nyx_snapshot_nyx_dirty_ring_save_root_pages(self->dirty_ring_state, self->shadow_memory_state, self->blocklist);
break;
case RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG:
nyx_snapshot_nyx_dirty_ring_save_root_pages(self->dirty_ring_state, self->shadow_memory_state, self->blocklist);
nyx_snapshot_debug_save_root_pages(self->shadow_memory_state, self->blocklist, true);
break;
}
nyx_snapshot_nyx_fdl_user_save_root_pages(self->fdl_user_state, self->shadow_memory_state, self->blocklist);
shadow_memory_switch_snapshot(self->shadow_memory_state, true);
kvm_arch_put_registers(qemu_get_cpu(0), KVM_PUT_FULL_STATE_FAST);
qemu_get_cpu(0)->vcpu_dirty = false;
}
fast_reload_t* fast_reload_new(void){
fast_reload_t* self = malloc(sizeof(fast_reload_t));
memset(self, 0x0, sizeof(fast_reload_t));
self->root_snapshot_created = false;
self->incremental_snapshot_enabled = false;
self->bitmap_copy = NULL;
return self;
}
void fast_reload_set_mode(fast_reload_t* self, FastReloadMemoryMode m){
assert(!self->root_snapshot_created);
mode = m;
}
FastReloadMemoryMode fast_reload_get_mode(fast_reload_t* self){
return mode;
}
void fast_reload_init(fast_reload_t* self){
self->blocklist = snapshot_page_blocklist_init();
}
/* fix this */
void fast_reload_destroy(fast_reload_t* self){
/* complete me */
//close(self->vmx_fdl_fd);
//munmap(self->fdl_data, (self->guest_ram_size/0x1000)*8);
/*
munmap(self->ptr, self->guest_ram_size);
free(self->black_list_pages);
free(self);
*/
}
inline static void unlock_snapshot(const char* folder){
char* info_file;
char* lock_file;
assert(asprintf(&info_file, "%s/INFO.txt", folder) != -1);
/* info file */
FILE* f_info = fopen(info_file, "w+b");
if(GET_GLOBAL_STATE()->fast_reload_pre_image){
const char* msg = "THIS IS A NYX PRE IMAGE SNAPSHOT FOLDER!\n";
fwrite(msg, strlen(msg), 1, f_info);
}
else{
const char* msg = "THIS IS A NYX SNAPSHOT FOLDER!\n";
fwrite(msg, strlen(msg), 1, f_info);
}
fclose(f_info);
assert(asprintf(&lock_file, "%s/ready.lock", folder) != -1);
int fd = open(lock_file, O_WRONLY | O_CREAT, S_IRWXU);
close(fd);
free(lock_file);
}
inline static void wait_for_snapshot(const char* folder){
char* lock_file;
assert(asprintf(&lock_file, "%s/ready.lock", folder) != -1);
while( access(lock_file, F_OK ) == -1 ) {
sleep(1);
}
free(lock_file);
}
void fast_reload_serialize_to_file(fast_reload_t* self, const char* folder){
//printf("================ %s => %s =============\n", __func__, folder);
/* sanity check */
if(!folder_exits(folder)){
QEMU_PT_PRINTF(RELOAD_PREFIX,"Folder %s does not exist...failed!", folder);
assert(0);
}
/* shadow memory state */
shadow_memory_serialize(self->shadow_memory_state, folder);
/* device state */
nyx_device_state_serialize(self->device_state, folder);
/* block device state */
nyx_block_snapshot_serialize(self->block_state, folder);
/* NYX's state */
dump_global_state(folder);
/* finalize snapshot */
unlock_snapshot(folder);
}
static void fast_reload_create_from_snapshot(fast_reload_t* self, const char* folder, bool lock_iothread, bool pre_snapshot){
//printf("%s called\n", __func__);
assert(self != NULL);
wait_for_snapshot(folder);
QEMU_PT_PRINTF(RELOAD_PREFIX,"=> CREATING FAST RELOAD SNAPSHOT FROM DUMP (located in: %s)", folder);
rcu_read_lock();
bdrv_drain_all();
bdrv_flush_all();
cpu_synchronize_all_pre_loadvm();
if(!pre_snapshot){
memory_global_dirty_log_stop();
memory_global_dirty_log_sync();
}
fast_snapshot_init_operation(self, folder, pre_snapshot);
rcu_read_unlock();
if(!pre_snapshot){
load_global_state(folder);
}
cpu_synchronize_all_post_init();
qemu_get_cpu(0)->vcpu_dirty = true;
kvm_arch_put_registers(qemu_get_cpu(0), KVM_PUT_FULL_STATE);
if(!pre_snapshot){
nyx_device_state_save_tsc(self->device_state);
}
//fast_reload_restore(self);
vm_start();
}
void fast_reload_create_from_file(fast_reload_t* self, const char* folder, bool lock_iothread){
//printf("CALL: %s\n", __func__);
fast_reload_create_from_snapshot(self, folder, lock_iothread, false);
}
void fast_reload_create_from_file_pre_image(fast_reload_t* self, const char* folder, bool lock_iothread){
//printf("CALL: %s\n", __func__);
fast_reload_create_from_snapshot(self, folder, lock_iothread, true);
}
void fast_reload_create_in_memory(fast_reload_t* self){
assert(self != NULL);
debug_fprintf(stderr, "===>%s\n", __func__);
QEMU_PT_PRINTF(RELOAD_PREFIX,"=> CREATING FAST RELOAD SNAPSHOT FROM CURRENT VM STATE");
rcu_read_lock();
bdrv_drain_all();
bdrv_flush_all();
cpu_synchronize_all_pre_loadvm();
memory_global_dirty_log_stop();
memory_global_dirty_log_sync();
fast_snapshot_init_operation(self, NULL, false);
rcu_read_unlock();
cpu_synchronize_all_post_init();
}
void fast_reload_restore(fast_reload_t* self){
assert(self != NULL);
self->dirty_pages = 0;
//rcu_read_lock();
//cpu_synchronize_all_states();
//bdrv_drain_all_begin();
/* flush all pending block writes */
bdrv_drain_all();
//bdrv_flush_all();
memory_global_dirty_log_sync();
//unset_black_list_pages(self);
nyx_block_snapshot_reset(self->block_state);
/*
for(uint32_t i = 0; i < self->cow_cache_array_size; i++){
//if(!self->tmp_snapshot.enabled)
cow_cache_reset(self->cow_cache_array[i]);
}
*/
nyx_device_state_restore(self->device_state);
//fdl_fast_reload(self->qemu_state);
//fdl_fast_reload(self->device_state->qemu_state);
nyx_block_snapshot_flush(self->block_state);
//GET_GLOBAL_STATE()->cow_cache_full = false;
//call_fast_change_handlers();
fast_snapshot_restore_operation(self);
//find_dirty_pages_fdl(self);
//fast_reload_qemu_user_fdl_restore(self);
//set_tsc_value(self, self->tmp_snapshot.enabled);
nyx_device_state_post_restore(self->device_state);
kvm_arch_put_registers(qemu_get_cpu(0), KVM_PUT_FULL_STATE_FAST);
qemu_get_cpu(0)->vcpu_dirty = false;
//bdrv_drain_all_end();
//rcu_read_unlock();
//printf("========================= NEXT\n\n");
return;
}
bool read_snapshot_memory(fast_reload_t* self, uint64_t address, void* ptr, size_t size){
return shadow_memory_read_physical_memory(self->shadow_memory_state, address, ptr, size);
}
/* fix this */
void* fast_reload_get_physmem_shadow_ptr(fast_reload_t* self, uint64_t physaddr){
abort(); /* fix this function first -> pc_piix memory split issue */
/*
assert(self != NULL);
assert(!(physaddr&0xFFF)); // physaddr must be 4kb align !
if (self->shadow_memory_regions){
for(uint64_t j = 0; j < self->shadow_memory_regions; j++){
if(physaddr >= self->ram_block_array[j]->offset && physaddr < (self->ram_block_array[j]->offset+self->ram_block_array[j]->used_length)){
return self->shadow_memory[j]+(physaddr-self->ram_block_array[j]->offset);
}
}
}
*/
return NULL; // not found ... sorry :(
}
void fast_reload_blacklist_page(fast_reload_t* self, uint64_t physaddr){
assert(self->blocklist);
snapshot_page_blocklist_add(self->blocklist, physaddr);
return;
}
bool fast_reload_snapshot_exists(fast_reload_t* self){
if(!self){ // || !self->qemu_state){
return false;
}
return true;
}
void fast_reload_create_tmp_snapshot(fast_reload_t* self){
assert(self); // && self->qemu_state);
self->dirty_pages = 0;
fast_snapshot_pre_create_incremental_operation(self);
if(!self->bitmap_copy){
if(GET_GLOBAL_STATE()->shared_bitmap_size+GET_GLOBAL_STATE()->shared_ijon_bitmap_size){
assert(GET_GLOBAL_STATE()->shared_bitmap_size+GET_GLOBAL_STATE()->shared_ijon_bitmap_size);
self->bitmap_copy = malloc(GET_GLOBAL_STATE()->shared_bitmap_size+GET_GLOBAL_STATE()->shared_ijon_bitmap_size);
}
}
fuzz_bitmap_copy_to_buffer(self->bitmap_copy);
//GET_GLOBAL_STATE()->cow_cache_full = false;
//self->tmp_snapshot.root_dirty_pages_num = 0;
fast_snapshot_create_incremental_operation(self);
self->incremental_snapshot_enabled = true;
}
void fast_reload_discard_tmp_snapshot(fast_reload_t* self){
assert(self && self->incremental_snapshot_enabled);
self->dirty_pages = 0;
/* flush all pending block writes */
bdrv_drain_all();
memory_global_dirty_log_sync();
//unset_black_list_pages(self);
fast_snapshot_restore_operation(self);
//find_dirty_pages_fdl(self);
//fast_reload_qemu_user_fdl_restore(self);
shadow_memory_restore_memory(self->shadow_memory_state);
shadow_memory_switch_snapshot(self->shadow_memory_state, false);
//restore_root_memory(self);
nyx_device_state_disable_incremental(self->device_state);
//fdl_fast_disable_tmp(self->qemu_state);
//fdl_fast_disable_tmp(self->device_state->qemu_state);
nyx_block_snapshot_disable_incremental(self->block_state);
/*
for(uint32_t i = 0; i < self->cow_cache_array_size; i++){
cow_cache_disable_tmp_mode(self->cow_cache_array[i]);
}
*/
self->incremental_snapshot_enabled = false;
}
bool fast_reload_root_created(fast_reload_t* self){
return self->root_snapshot_created;
}
bool fast_reload_tmp_created(fast_reload_t* self){
return self->incremental_snapshot_enabled;
}
uint32_t get_dirty_page_num(fast_reload_t* self){
if(self){
return self->dirty_pages;
}
else{
return 0;
}
}
bool fast_reload_set_bitmap(fast_reload_t* self){
if(self->incremental_snapshot_enabled){
fuzz_bitmap_copy_from_buffer(self->bitmap_copy);
return true;
}
return false;
}
void fast_reload_qemu_user_fdl_set_dirty(fast_reload_t* self, MemoryRegion *mr, uint64_t addr, uint64_t length){
/* works only with PC.RAM's memory region */
assert(mr->alias_offset == 0);
nyx_fdl_user_set(self->fdl_user_state, self->shadow_memory_state, self->fdl_state, addr, length);
}
void fast_reload_handle_dirty_ring_full(fast_reload_t* self){
if(self->dirty_ring_state){
nyx_snapshot_nyx_dirty_ring_flush_and_collect(self->dirty_ring_state, self->shadow_memory_state, self->blocklist);
}
else{
nyx_snapshot_nyx_dirty_ring_flush();
}
}

136
nyx/fast_vm_reload.h Normal file
View File

@ -0,0 +1,136 @@
/*
Copyright (C) 2017 Sergej Schumilo
This file is part of QEMU-PT (HyperTrash / kAFL).
QEMU-PT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
QEMU-PT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "qemu/osdep.h"
#include "monitor/monitor.h"
#include "qemu-common.h"
#include "sysemu/runstate.h"
#include "nyx/snapshot/memory/block_list.h"
#include "nyx/snapshot/memory/shadow_memory.h"
#include "nyx/snapshot/memory/backend/nyx_fdl.h"
#include "nyx/snapshot/memory/nyx_fdl_user.h"
#include "nyx/snapshot/devices/nyx_device_state.h"
#include "nyx/snapshot/block/nyx_block_snapshot.h"
#include "nyx/snapshot/memory/backend/nyx_dirty_ring.h"
typedef enum FastReloadMemoryMode {
RELOAD_MEMORY_MODE_DEBUG, /* memcmp-based dirty tracing - it's super slow - only for debug purposes */
RELOAD_MEMORY_MODE_DEBUG_QUIET, /* debug mode in non-verbose mode */
RELOAD_MEMORY_MODE_FDL, /* super fast page tracker build around KVM-PT's dirty tracker (FDL = fast dirty log) */
RELOAD_MEMORY_MODE_FDL_DEBUG, /* FDL + debug mode */
RELOAD_MEMORY_MODE_DIRTY_RING, /* fast page tracker build around KVM's dirty ring API */
RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG, /* dirty ring + debug mode */
} FastReloadMemoryMode;
typedef struct fast_reload_dump_head_s{
uint32_t shadow_memory_regions;
uint32_t ram_region_index;
} fast_reload_dump_head_t;
typedef struct fast_reload_s{
FastReloadMemoryMode mode;
/* memory snapshot */
shadow_memory_t* shadow_memory_state;
/* state of page frame blocklist */
snapshot_page_blocklist_t* blocklist;
/* state of FDL */
nyx_fdl_t* fdl_state;
/* dirty ring state */
nyx_dirty_ring_t* dirty_ring_state;
/* state of user-level FDL */
nyx_fdl_user_t* fdl_user_state;
/* nyx's serialized device state */
nyx_device_state_t* device_state;
nyx_block_t* block_state;
bool root_snapshot_created;
bool incremental_snapshot_enabled;
/* copy of the fuzzing bitmap & ijon state buffer */
void* bitmap_copy;
uint32_t dirty_pages;
} fast_reload_t;
fast_reload_t* fast_reload_new(void);
/* get rid of this */
void fast_reload_create_to_file(fast_reload_t* self, const char* folder, bool lock_iothread);
void fast_reload_create_from_file(fast_reload_t* self, const char* folder, bool lock_iothread);
void fast_reload_create_from_file_pre_image(fast_reload_t* self, const char* folder, bool lock_iothread);
/* keep this */
void fast_reload_create_in_memory(fast_reload_t* self);
void fast_reload_serialize_to_file(fast_reload_t* self, const char* folder);
void fast_reload_restore(fast_reload_t* self);
void fast_reload_blacklist_page(fast_reload_t* self, uint64_t physaddr);
void* fast_reload_get_physmem_shadow_ptr(fast_reload_t* self, uint64_t physaddr);
bool fast_reload_snapshot_exists(fast_reload_t* self);
bool read_snapshot_memory(fast_reload_t* self, uint64_t address, void* ptr, size_t size);
void fast_reload_destroy(fast_reload_t* self);
void fast_reload_qemu_user_fdl_set_dirty(fast_reload_t* self, MemoryRegion *mr, uint64_t addr, uint64_t length);
void fast_reload_create_tmp_snapshot(fast_reload_t* self);
void fast_reload_discard_tmp_snapshot(fast_reload_t* self);
bool fast_reload_root_created(fast_reload_t* self);
bool fast_reload_tmp_created(fast_reload_t* self);
bool fast_reload_set_bitmap(fast_reload_t* self);
uint32_t get_dirty_page_num(fast_reload_t* self);
void fast_reload_init(fast_reload_t* self);
void fast_reload_set_mode(fast_reload_t* self, FastReloadMemoryMode m);
void fast_reload_handle_dirty_ring_full(fast_reload_t* self);
FastReloadMemoryMode fast_reload_get_mode(fast_reload_t* self);

369
nyx/fast_vm_reload_sync.c Normal file
View File

@ -0,0 +1,369 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "fast_vm_reload_sync.h"
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include "qapi/qapi-types-run-state.h"
#include "qemu-common.h"
#include "exec/memory.h"
#include "qemu/main-loop.h"
#include "sysemu/kvm_int.h"
#include "sysemu/kvm.h"
#include "sysemu/runstate.h"
#include "nyx/state.h"
#include "nyx/fast_vm_reload.h"
#include "nyx/debug.h"
#include "nyx/kvm_nested.h"
extern int save_snapshot(const char *name, Error **errp);
extern int load_snapshot(const char *name, Error **errp);
static void adjust_rip(CPUX86State *env, fast_reload_t* snapshot){
switch(fast_reload_get_mode(snapshot)){
case RELOAD_MEMORY_MODE_DEBUG:
case RELOAD_MEMORY_MODE_DEBUG_QUIET:
env->eip -= 1; /* out */
break;
case RELOAD_MEMORY_MODE_FDL:
case RELOAD_MEMORY_MODE_FDL_DEBUG:
env->eip -= 3; /* vmcall */
break;
case RELOAD_MEMORY_MODE_DIRTY_RING:
case RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG:
env->eip -= 1; /* out */
break;
}
}
fast_vm_reload_sync_t* init_fast_vm_reload_sync(void){
fast_vm_reload_sync_t* self = malloc(sizeof(fast_vm_reload_sync_t));
memset(self, 0, sizeof(fast_vm_reload_sync_t));
self->request_exists = false;
self->request_exists_pre = false;
self->current_request = REQUEST_VOID;
self->debug_mode = false;
/* TODO: only RELOAD_MODE_NO_BLOCK is supported for actual fuzzing */
self->mode = RELOAD_MODE_NO_BLOCK;
return self;
}
bool fast_snapshot_exists(fast_vm_reload_sync_t* self, FastReloadRequest type){
assert(self->mode != RELOAD_MODE_DEBUG);
switch(type){
case REQUEST_PRE_EXISTS:
abort();
case REQUEST_ROOT_EXISTS:
return fast_reload_root_created(get_fast_reload_snapshot());
case REQUEST_TMP_EXISTS:
return fast_reload_tmp_created(get_fast_reload_snapshot());
default:
abort();
}
}
static inline void perform_task_debug_mode(fast_vm_reload_sync_t* self, FastReloadRequest request){
struct Error* errp = NULL;
switch(request){
case REQUEST_SAVE_SNAPSHOT_PRE_FIX_RIP:
abort();
case REQUEST_SAVE_SNAPSHOT_PRE:
vm_stop(RUN_STATE_SAVE_VM);
save_snapshot("pre_root", &errp);
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
return; /* return here to skip the vm_start call */
case REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP:
abort();
case REQUEST_SAVE_SNAPSHOT_ROOT:
vm_stop(RUN_STATE_SAVE_VM);
save_snapshot("root", &errp);
break;
case REQUEST_SAVE_SNAPSHOT_TMP_FIX_RIP:
abort();
case REQUEST_SAVE_SNAPSHOT_TMP:
vm_stop(RUN_STATE_SAVE_VM);
save_snapshot("tmp", &errp);
break;
case REQUEST_LOAD_SNAPSHOT_PRE:
/* probably never called */
abort();
break;
case REQUEST_LOAD_SNAPSHOT_ROOT:
vm_stop(RUN_STATE_RESTORE_VM);
load_snapshot("root", &errp);
break;
case REQUEST_LOAD_SNAPSHOT_TMP:
vm_stop(RUN_STATE_RESTORE_VM);
load_snapshot("tmp", &errp);
break;
default:
abort();
}
if (errp) {
error_reportf_err(errp, "Error: ");
errp = NULL;
abort();
}
vm_start();
}
static inline void create_root_snapshot(void){
if (GET_GLOBAL_STATE()->fast_reload_enabled){
debug_printf("===> GET_GLOBAL_STATE()->fast_reload_enabled: TRUE\n");
if (GET_GLOBAL_STATE()->fast_reload_mode){
debug_printf("===> GET_GLOBAL_STATE()->fast_reload_mode: TRUE\n");
/* we've loaded an external snapshot folder - so do nothing and don't create any new snapshot files */
}
else{
debug_printf("===> GET_GLOBAL_STATE()->fast_reload_mode: FALSE\n");
/* store the current state as a snapshot folder */
fast_reload_create_in_memory(get_fast_reload_snapshot());
fast_reload_serialize_to_file(get_fast_reload_snapshot(), GET_GLOBAL_STATE()->fast_reload_path);
}
}
else{
debug_printf("===> GET_GLOBAL_STATE()->fast_reload_enabled: FALSE\n");
/* so we haven't set a path for our snapshot files - just store everything in memory */
fast_reload_create_in_memory(get_fast_reload_snapshot());
}
}
static inline void perform_task_no_block_mode(fast_vm_reload_sync_t* self, FastReloadRequest request){
CPUState* cpu = qemu_get_cpu(0);
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
qemu_mutex_lock_iothread();
switch(request){
case REQUEST_SAVE_SNAPSHOT_PRE:
vm_stop(RUN_STATE_SAVE_VM);
//fast_reload_create_to_file(get_fast_reload_snapshot(), GET_GLOBAL_STATE()->fast_reload_pre_path, true);
fast_reload_create_in_memory(get_fast_reload_snapshot());
fast_reload_serialize_to_file(get_fast_reload_snapshot(), GET_GLOBAL_STATE()->fast_reload_pre_path);
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
qemu_mutex_unlock_iothread();
return; /* return here to skip the vm_start call */
case REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP:
adjust_rip(env, get_fast_reload_snapshot());
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
case REQUEST_SAVE_SNAPSHOT_ROOT:
kvm_arch_get_registers(cpu);
vm_stop(RUN_STATE_SAVE_VM);
create_root_snapshot();
fast_reload_restore(get_fast_reload_snapshot());
//call_fast_change_handlers();
break;
case REQUEST_SAVE_SNAPSHOT_TMP_FIX_RIP:
adjust_rip(env, get_fast_reload_snapshot());
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
case REQUEST_SAVE_SNAPSHOT_TMP:
fast_reload_create_tmp_snapshot(get_fast_reload_snapshot());
fast_reload_restore(get_fast_reload_snapshot());
break;
case REQUEST_LOAD_SNAPSHOT_PRE:
abort();
break;
case REQUEST_LOAD_SNAPSHOT_ROOT:
case REQUEST_LOAD_SNAPSHOT_TMP:
//vm_stop(RUN_STATE_RESTORE_VM);
fast_reload_restore(get_fast_reload_snapshot());
//call_fast_change_handlers();
break;
case REQUEST_SAVE_SNAPSHOT_ROOT_NESTED_FIX_RIP:
kvm_arch_get_registers(cpu);
adjust_rip(env, get_fast_reload_snapshot());
set_nested_rip(cpu, env->eip);
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
//case REQUEST_SAVE_SNAPSHOT_ROOT_NESTED:
kvm_arch_get_registers(cpu);
vm_stop(RUN_STATE_SAVE_VM);
create_root_snapshot();
fast_reload_restore(get_fast_reload_snapshot());
break;
default:
abort();
}
vm_start();
//call_fast_change_handlers();
cpu_resume(cpu);
qemu_mutex_unlock_iothread();
}
static inline void perform_task_block_mode(fast_vm_reload_sync_t* self, FastReloadRequest request){
switch(request){
case REQUEST_SAVE_SNAPSHOT_PRE_FIX_RIP:
case REQUEST_SAVE_SNAPSHOT_PRE:
vm_stop(RUN_STATE_SAVE_VM);
//fast_reload_create_to_file(get_fast_reload_snapshot(), GET_GLOBAL_STATE()->fast_reload_pre_path, true);
fast_reload_create_in_memory(get_fast_reload_snapshot());
fast_reload_serialize_to_file(get_fast_reload_snapshot(), GET_GLOBAL_STATE()->fast_reload_pre_path);
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
return; /* return here to skip the vm_start call */
case REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP:
case REQUEST_SAVE_SNAPSHOT_ROOT:
/* TODO: fix this */
vm_stop(RUN_STATE_SAVE_VM);
create_root_snapshot(); /* TODO: Fix this -> fucky in ahci mode */
//fast_reload_create_in_memory(get_fast_reload_snapshot());
break;
case REQUEST_SAVE_SNAPSHOT_TMP_FIX_RIP:
case REQUEST_SAVE_SNAPSHOT_TMP:
vm_stop(RUN_STATE_SAVE_VM);
fast_reload_create_tmp_snapshot(get_fast_reload_snapshot());
break;
case REQUEST_LOAD_SNAPSHOT_PRE:
abort();
break;
case REQUEST_LOAD_SNAPSHOT_ROOT:
case REQUEST_LOAD_SNAPSHOT_TMP:
vm_stop(RUN_STATE_RESTORE_VM);
fast_reload_restore(get_fast_reload_snapshot());
break;
default:
abort();
}
vm_start();
}
static inline void perform_task(fast_vm_reload_sync_t* self, FastReloadRequest request){
switch(self->mode){
case RELOAD_MODE_DEBUG:
abort();
perform_task_debug_mode(self, request);
break;
case RELOAD_MODE_NO_BLOCK:
perform_task_no_block_mode(self, request);
break;
case RELOAD_MODE_BLOCK:
perform_task_block_mode(self, request);
break;
}
}
void request_fast_vm_reload(fast_vm_reload_sync_t* self, FastReloadRequest request){
assert(!self->request_exists);
assert(self->current_request == REQUEST_VOID);
if(self->mode == RELOAD_MODE_NO_BLOCK){
CPUState* cpu = qemu_get_cpu(0);
kvm_arch_get_registers(cpu);
//perform_task(self, request);
perform_task_no_block_mode(self, request);
}
else{
self->current_request = request;
self->request_exists = true;
self->request_exists_pre = true;
}
}
bool reload_request_exists(fast_vm_reload_sync_t* self){
return self->request_exists_pre;
}
void reload_request_discard_tmp(fast_vm_reload_sync_t* self){
fast_reload_discard_tmp_snapshot(get_fast_reload_snapshot());
}
bool check_if_relood_request_exists_pre(fast_vm_reload_sync_t* self){
if(self->request_exists_pre){
self->request_exists_pre = false;
abort();
/*
qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
qemu_clock_enable(QEMU_CLOCK_VIRTUAL_RT, false);
qemu_clock_enable(QEMU_CLOCK_HOST, false);
*/
//printf("%s: task found: %d\n", __func__, self->current_request);
CPUState* cpu = qemu_get_cpu(0);
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
kvm_arch_get_registers(cpu);
switch(self->current_request){
case REQUEST_VOID:
fprintf(stderr, "%s: REQUEST_VOID requested!\n", __func__);
abort();
case REQUEST_SAVE_SNAPSHOT_PRE_FIX_RIP:
case REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP:
case REQUEST_SAVE_SNAPSHOT_TMP_FIX_RIP:
adjust_rip(env, get_fast_reload_snapshot());
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
qemu_system_vmstop_request(RUN_STATE_SAVE_VM);
break;
case REQUEST_SAVE_SNAPSHOT_PRE:
case REQUEST_SAVE_SNAPSHOT_ROOT:
case REQUEST_SAVE_SNAPSHOT_TMP:
qemu_system_vmstop_request(RUN_STATE_SAVE_VM);
break;
case REQUEST_SAVE_SNAPSHOT_ROOT_NESTED_FIX_RIP:
case REQUEST_SAVE_SNAPSHOT_TMP_NESTED_FIX_RIP:
adjust_rip(env, get_fast_reload_snapshot());
set_nested_rip(cpu, env->eip);
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
qemu_system_vmstop_request(RUN_STATE_SAVE_VM);
case REQUEST_LOAD_SNAPSHOT_PRE:
case REQUEST_LOAD_SNAPSHOT_ROOT:
case REQUEST_LOAD_SNAPSHOT_TMP:
qemu_system_vmstop_request(RUN_STATE_RESTORE_VM);
break;
default:
fprintf(stderr, "%s: Unkown request: %d\n", __func__, self->current_request);
abort();
}
return true;
}
return false;
}
bool check_if_relood_request_exists_post(fast_vm_reload_sync_t* self){
if(self->request_exists){
FastReloadRequest request = self->current_request;
self->request_exists = false;
assert(self->current_request != REQUEST_VOID);
self->current_request = REQUEST_VOID;
perform_task(self, request);
/*
qemu_clock_enable(QEMU_CLOCK_HOST, true);
qemu_clock_enable(QEMU_CLOCK_VIRTUAL_RT, true);
qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
*/
return true;
}
return false;
}

64
nyx/fast_vm_reload_sync.h Normal file
View File

@ -0,0 +1,64 @@
#pragma once
#include <stdbool.h>
typedef enum FastReloadRequest {
REQUEST_VOID,
/* create snapshots */
REQUEST_SAVE_SNAPSHOT_PRE,
REQUEST_SAVE_SNAPSHOT_ROOT,
REQUEST_SAVE_SNAPSHOT_TMP,
/* create snapshot and fix RIP (- sizeof(vmcall)) */
REQUEST_SAVE_SNAPSHOT_PRE_FIX_RIP,
REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP,
REQUEST_SAVE_SNAPSHOT_TMP_FIX_RIP,
/* create nested snapshots */
REQUEST_SAVE_SNAPSHOT_ROOT_NESTED_FIX_RIP,
REQUEST_SAVE_SNAPSHOT_TMP_NESTED_FIX_RIP,
/* load snapshots*/
REQUEST_LOAD_SNAPSHOT_PRE,
REQUEST_LOAD_SNAPSHOT_ROOT,
REQUEST_LOAD_SNAPSHOT_TMP,
/* check if snapshot exists */
REQUEST_PRE_EXISTS,
REQUEST_ROOT_EXISTS,
REQUEST_TMP_EXISTS,
//REQUEST_DISCARD_SNAPSHOT_TMP,
} FastReloadRequest;
typedef enum FastReloadMode {
RELOAD_MODE_DEBUG, /* savevm / loadvm based on QEMU's qcow2 storage - only for debug purposes */
RELOAD_MODE_NO_BLOCK, /* fastest mode - works only if no active block devices is attached (e.g. initramfs mode) */
RELOAD_MODE_BLOCK,
} FastReloadMode;
typedef struct fast_vm_reload_sync_s{
bool request_exists;
bool request_exists_pre;
FastReloadRequest current_request;
bool debug_mode;
FastReloadMode mode;
} fast_vm_reload_sync_t;
fast_vm_reload_sync_t* init_fast_vm_reload_sync(void);
void request_fast_vm_reload(fast_vm_reload_sync_t* self, FastReloadRequest request);
bool reload_request_exists(fast_vm_reload_sync_t* self);
bool check_if_relood_request_exists_pre(fast_vm_reload_sync_t* self);
bool check_if_relood_request_exists_post(fast_vm_reload_sync_t* self);
bool fast_snapshot_exists(fast_vm_reload_sync_t* self, FastReloadRequest type);
void reload_request_discard_tmp(fast_vm_reload_sync_t* self);

141
nyx/file_helper.c Normal file
View File

@ -0,0 +1,141 @@
#include <assert.h>
#include <string.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include "redqueen.h"
#include "debug.h"
#include "file_helper.h"
///////////////////////////////////////////////////////////////////////////////////
// Private Helper Functions Declarations
///////////////////////////////////////////////////////////////////////////////////
size_t _count_lines_in_file(FILE* fp);
void _parse_addresses_in_file(FILE* fp, size_t num_addrs, uint64_t* addrs);
///////////////////////////////////////////////////////////////////////////////////
// Public Functions
///////////////////////////////////////////////////////////////////////////////////
void write_debug_result(char* buf){
int unused __attribute__((unused));
int fd = open("/tmp/qemu_debug.txt", O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
assert(fd > 0);
unused = write(fd, buf, strlen(buf));
close(fd);
}
void parse_address_file(char* path, size_t* num_addrs, uint64_t** addrs){
FILE* fp = fopen(path,"r");
if(!fp){
*num_addrs = 0;
*addrs = NULL;
return;
}
*num_addrs = _count_lines_in_file(fp);
if(*num_addrs == 0){
*addrs = NULL;
goto exit_function;
}
assert(*num_addrs < 0xffff);
*addrs = malloc(sizeof(uint64_t)*(*num_addrs));
_parse_addresses_in_file(fp, *num_addrs, *addrs);
exit_function:
fclose(fp);
}
int re_fd = 0;
int se_fd = 0;
int trace_fd = 0;
void write_re_result(char* buf){
int unused __attribute__((unused));
if (!re_fd)
re_fd = open(redqueen_workdir.redqueen_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
unused = write(re_fd, buf, strlen(buf));
}
void write_trace_result(redqueen_trace_t* trace_state){
//int fd;
int unused __attribute__((unused));
if (!trace_fd)
trace_fd = open(redqueen_workdir.pt_trace_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
redqueen_trace_write_file(trace_state, trace_fd);
//unused = write(trace_fd, buf, strlen(buf));
//close(fd);
}
void fsync_all_traces(void){
if (!trace_fd){
fsync(trace_fd);
}
if (!se_fd){
fsync(se_fd);
}
if (!re_fd){
fsync(re_fd);
}
}
void write_se_result(char* buf){
//int fd;
int unused __attribute__((unused));
if (!se_fd)
se_fd = open(redqueen_workdir.symbolic_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
unused = write(se_fd, buf, strlen(buf));
//close(fd);
}
void delete_trace_files(void){
int unused __attribute__((unused));
if (!trace_fd)
trace_fd = open(redqueen_workdir.pt_trace_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
unused = ftruncate(trace_fd, 0);
}
void delete_redqueen_files(void){
int unused __attribute__((unused));
if (!re_fd)
re_fd = open(redqueen_workdir.redqueen_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
if (!se_fd)
se_fd = open(redqueen_workdir.symbolic_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
unused = ftruncate(re_fd, 0);
unused = ftruncate(se_fd, 0);
}
///////////////////////////////////////////////////////////////////////////////////
// Private Helper Functions Definitions
///////////////////////////////////////////////////////////////////////////////////
size_t _count_lines_in_file(FILE* fp){
size_t val = 0;
size_t count = 0;
while(1){
int scanres = fscanf(fp, "%lx", &val);
if(scanres == 0){
printf("WARNING, invalid line in address file");
assert(scanres != 0);
}
if(scanres == -1){break;}
count+=1;
}
rewind(fp);
return count;
}
void _parse_addresses_in_file(FILE* fp, size_t num_addrs, uint64_t* addrs){
for(size_t i = 0; i < num_addrs; i++){
assert(fscanf(fp, "%lx", &addrs[i]) == 1);
}
}

25
nyx/file_helper.h Normal file
View File

@ -0,0 +1,25 @@
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include "redqueen_trace.h"
//doesn't take ownership of path, num_addrs or addrs
void parse_address_file(char* path, size_t* num_addrs, uint64_t** addrs);
//doesn't take ownership of buf
void write_re_result(char* buf);
//doesn't take ownership of buf
void write_se_result(char* buf);
//doesn't take ownership of buf
void write_trace_result(redqueen_trace_t* trace_state);
//doesn' take ownership of buf
void write_debug_result(char* buf);
void delete_redqueen_files(void);
void delete_trace_files(void);
void fsync_all_traces(void);

101
nyx/helpers.c Normal file
View File

@ -0,0 +1,101 @@
#include <stdio.h>
#include <stdint.h>
#include "nyx/helpers.h"
#include "qemu/osdep.h"
#include <linux/kvm.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include "qemu-common.h"
#include "exec/memory.h"
#include "qemu/main-loop.h"
#include "sysemu/kvm_int.h"
#include "sysemu/kvm.h"
#include "nyx/state.h"
#include "nyx/memory_access.h"
#include "nyx/debug.h"
uint64_t get_rip(CPUState *cpu){
kvm_arch_get_registers(cpu);
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
kvm_cpu_synchronize_state(cpu);
return env->eip;
}
int get_capstone_mode(int word_width_in_bits){
switch(word_width_in_bits){
case 64:
return CS_MODE_64;
case 32:
return CS_MODE_32;
default:
assert(false);
}
}
void fuzz_bitmap_reset(void){
if(GET_GLOBAL_STATE()->shared_bitmap_ptr){
//fprintf(stderr, "%s: %lx %lx\n", __func__, fuzz_bitmap, fuzz_bitmap_size);
memset(GET_GLOBAL_STATE()->shared_bitmap_ptr, 0x00, GET_GLOBAL_STATE()->shared_bitmap_size + GET_GLOBAL_STATE()->shared_ijon_bitmap_size);
}
}
void fuzz_bitmap_copy_to_buffer(void* buffer){
if(GET_GLOBAL_STATE()->shared_bitmap_ptr){
memcpy(buffer, GET_GLOBAL_STATE()->shared_bitmap_ptr, GET_GLOBAL_STATE()->shared_bitmap_size + GET_GLOBAL_STATE()->shared_ijon_bitmap_size);
}
}
void fuzz_bitmap_copy_from_buffer(void* buffer){
if(GET_GLOBAL_STATE()->shared_bitmap_ptr){
memcpy(GET_GLOBAL_STATE()->shared_bitmap_ptr, buffer, GET_GLOBAL_STATE()->shared_bitmap_size + GET_GLOBAL_STATE()->shared_ijon_bitmap_size);
}
}
void apply_capabilities(CPUState *cpu){
//X86CPU *cpux86 = X86_CPU(cpu);
//CPUX86State *env = &cpux86->env;
debug_fprintf(stderr, "%s: agent supports timeout detection: %d\n", __func__, GET_GLOBAL_STATE()->cap_timeout_detection);
debug_fprintf(stderr, "%s: agent supports only-reload mode: %d\n", __func__, GET_GLOBAL_STATE()->cap_only_reload_mode);
debug_fprintf(stderr, "%s: agent supports compile-time tracing: %d\n", __func__, GET_GLOBAL_STATE()->cap_compile_time_tracing );
if(GET_GLOBAL_STATE()->cap_compile_time_tracing){
GET_GLOBAL_STATE()->pt_trace_mode = false;
debug_fprintf(stderr, "%s: agent trace buffer at vaddr: %lx\n", __func__, GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr);
kvm_arch_get_registers_fast(cpu);
debug_printf("--------------------------\n");
debug_printf("GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr: %lx\n", GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr);
debug_printf("GET_GLOBAL_STATE()->shared_bitmap_fd: %lx\n", GET_GLOBAL_STATE()->shared_bitmap_fd);
debug_printf("GET_GLOBAL_STATE()->shared_bitmap_size: %lx\n", GET_GLOBAL_STATE()->shared_bitmap_size);
debug_printf("GET_GLOBAL_STATE()->cap_cr3: %lx\n", GET_GLOBAL_STATE()->cap_cr3);
debug_printf("--------------------------\n");
for(uint64_t i = 0; i < GET_GLOBAL_STATE()->shared_bitmap_size; i += 0x1000){
assert(remap_slot(GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr+ i, i/0x1000, cpu, GET_GLOBAL_STATE()->shared_bitmap_fd, GET_GLOBAL_STATE()->shared_bitmap_size, true, GET_GLOBAL_STATE()->cap_cr3));
}
set_cap_agent_trace_bitmap(GET_GLOBAL_STATE()->auxilary_buffer, true);
}
if(GET_GLOBAL_STATE()->cap_ijon_tracing){
debug_fprintf(stderr, "%s: agent trace buffer at vaddr: %lx\n", __func__, GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr);
kvm_arch_get_registers_fast(cpu);
for(uint64_t i = 0; i < GET_GLOBAL_STATE()->shared_ijon_bitmap_size; i += 0x1000){
assert(remap_slot(GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr + i, (GET_GLOBAL_STATE()->shared_bitmap_size+i)/0x1000, cpu, GET_GLOBAL_STATE()->shared_bitmap_fd, GET_GLOBAL_STATE()->shared_bitmap_size+GET_GLOBAL_STATE()->shared_ijon_bitmap_size, true, GET_GLOBAL_STATE()->cap_cr3));
}
set_cap_agent_ijon_trace_bitmap(GET_GLOBAL_STATE()->auxilary_buffer, true);
}
}
bool folder_exits(const char* path){
struct stat sb;
return (stat(path, &sb) == 0 && S_ISDIR(sb.st_mode));
}
bool file_exits(const char* path){
struct stat sb;
return (stat (path, &sb) == 0);
}

15
nyx/helpers.h Normal file
View File

@ -0,0 +1,15 @@
#pragma once
#include "qemu/osdep.h"
uint64_t get_rip(CPUState *cpu);
void fuzz_bitmap_reset(void);
void fuzz_bitmap_copy_to_buffer(void* buffer);
void fuzz_bitmap_copy_from_buffer(void* buffer);
int get_capstone_mode(int word_width_in_bits);
void apply_capabilities(CPUState *cpu);
bool folder_exits(const char* path);
bool file_exits(const char* path);

1299
nyx/hypercall.c Normal file

File diff suppressed because it is too large Load Diff

152
nyx/hypercall.h Normal file
View File

@ -0,0 +1,152 @@
/*
Copyright (C) 2017 Sergej Schumilo
This file is part of QEMU-PT (kAFL).
QEMU-PT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
QEMU-PT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#define PAYLOAD_BUFFER_SIZE 26
#define PRINTK_PAYLOAD_SIZE 4
#define KAFL_MODE_64 0
#define KAFL_MODE_32 1
#define KAFL_MODE_16 2
typedef struct{
uint64_t ip[4];
uint64_t size[4];
uint8_t enabled[4];
} kAFL_ranges;
bool check_bitmap_byte(uint32_t value);
//#define PANIC_DEBUG
/*
* Panic Notifier Payload (x86-64)
* fa cli
* 48 c7 c0 1f 00 00 00 mov rax,0x1f
* 48 c7 c3 08 00 00 00 mov rbx,0x8
* 48 c7 c1 00 00 00 00 mov rcx,0x0
* 0f 01 c1 vmcall
* f4 hlt
*/
#define PANIC_PAYLOAD "\xFA\x48\xC7\xC0\x1F\x00\x00\x00\x48\xC7\xC3\x08\x00\x00\x00\x48\xC7\xC1\x00\x00\x00\x00\x0F\x01\xC1\xF4"
/*
* KASAN Notifier Payload (x86-64)
* fa cli
* 48 c7 c0 1f 00 00 00 mov rax,0x1f
* 48 c7 c3 08 00 00 00 mov rbx,0x9
* 48 c7 c1 00 00 00 00 mov rcx,0x0
* 0f 01 c1 vmcall
* f4 hlt
*/
#define KASAN_PAYLOAD "\xFA\x48\xC7\xC0\x1F\x00\x00\x00\x48\xC7\xC3\x09\x00\x00\x00\x48\xC7\xC1\x00\x00\x00\x00\x0F\x01\xC1\xF4"
/*
* printk Notifier Payload (x86-64)
* 0f 01 c1 vmcall
* c3 retn
*/
#define PRINTK_PAYLOAD "\x0F\x01\xC1\xC3"
void pt_setup_program(void* ptr);
void pt_setup_snd_handler(void (*tmp)(char, void*), void* tmp_s);
void pt_setup_ip_filters(uint8_t filter_id, uint64_t start, uint64_t end);
void pt_setup_enable_hypercalls(void);
void pt_disable_wrapper(CPUState *cpu);
void hypercall_submit_address(uint64_t address);
bool hypercall_check_tuple(uint64_t current_addr, uint64_t prev_addr);
//void hypercall_check_in_range(uint64_t* addr);
bool hypercall_check_transition(uint64_t value);
void hypercall_submit_transition(uint32_t value);
void hypercall_enable_filter(void);
void hypercall_disable_filter(void);
void hypercall_commit_filter(void);
bool pt_hypercalls_enabled(void);
void hypercall_unlock(void);
void hypercall_reload(void);
void handle_hypercall_kafl_acquire(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_page_dump_bp(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg, uint64_t page);
void hprintf(char* msg);
void enable_notifies(void);
bool handle_hypercall_kafl_next_payload(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void hypercall_reset_hprintf_counter(void);
bool handle_hypercall_kafl_hook(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_mtf(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void pt_enable_rqo(CPUState *cpu);
void pt_disable_rqo(CPUState *cpu);
void pt_enable_rqi(CPUState *cpu);
void pt_disable_rqi(CPUState *cpu);
void pt_enable_rqi_trace(CPUState *cpu);
void pt_disable_rqi_trace(CPUState *cpu);
void pt_set_redqueen_instrumentation_mode(CPUState *cpu, int redqueen_instruction_mode);
void pt_set_redqueen_update_blacklist(CPUState *cpu, bool newval);
void pt_set_enable_patches_pending(CPUState *cpu);
void pt_set_disable_patches_pending(CPUState *cpu);
void create_fast_snapshot(CPUState *cpu, bool nested);
int handle_kafl_hypercall(struct kvm_run *run, CPUState *cpu, uint64_t hypercall, uint64_t arg);
void skip_init(void);
typedef struct host_config_s{
uint32_t bitmap_size;
uint32_t ijon_bitmap_size;
uint32_t payload_buffer_size;
/* more to come */
} __attribute__((packed)) host_config_t;
typedef struct agent_config_s{
uint8_t agent_timeout_detection;
uint8_t agent_tracing;
uint8_t agent_ijon_tracing;
uint8_t agent_non_reload_mode;
uint64_t trace_buffer_vaddr;
uint64_t ijon_trace_buffer_vaddr;
uint8_t dump_payloads; /* set by hypervisor */
/* more to come */
} __attribute__((packed)) agent_config_t;
typedef struct kafl_dump_file_s{
uint64_t file_name_str_ptr;
uint64_t data_ptr;
uint64_t bytes;
uint8_t append;
} __attribute__((packed)) kafl_dump_file_t;

458
nyx/interface.c Normal file
View File

@ -0,0 +1,458 @@
/*
Copyright (C) 2017 Sergej Schumilo
This file is part of QEMU-PT (kAFL).
QEMU-PT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
QEMU-PT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/cutils.h"
#include "hw/qdev-properties.h"
#include "hw/hw.h"
#include "hw/i386/pc.h"
#include "hw/pci/pci.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "sysemu/kvm.h"
#include "migration/migration.h"
#include "qemu/error-report.h"
#include "qemu/event_notifier.h"
#include "qom/object_interfaces.h"
#include "chardev/char-fe.h"
#include "sysemu/hostmem.h"
#include "sysemu/qtest.h"
#include "qapi/visitor.h"
#include "exec/ram_addr.h"
#include <sys/mman.h>
#include <sys/stat.h>
#include "pt.h"
#include "nyx/hypercall.h"
#include "nyx/interface.h"
#include "nyx/debug.h"
#include "nyx/synchronization.h"
#include "nyx/snapshot/devices/state_reallocation.h"
#include "nyx/memory_access.h"
#include <sys/ioctl.h>
#include "nyx/state.h"
#include "nyx/sharedir.h"
#include "nyx/helpers.h"
#include <time.h>
#include "redqueen.h"
#define CONVERT_UINT64(x) (uint64_t)(strtoull(x, NULL, 16))
#define TYPE_KAFLMEM "kafl"
#define KAFLMEM(obj) \
OBJECT_CHECK(kafl_mem_state, (obj), TYPE_KAFLMEM)
uint32_t kafl_bitmap_size = DEFAULT_KAFL_BITMAP_SIZE;
static void pci_kafl_guest_realize(DeviceState *dev, Error **errp);
typedef struct kafl_mem_state {
DeviceState parent_obj;
Chardev *kafl_chr_drv_state;
CharBackend chr;
char* sharedir;
char* workdir;
uint32_t worker_id;
char* redqueen_workdir;
char* data_bar_fd_0;
char* data_bar_fd_1;
char* data_bar_fd_2;
char* bitmap_file;
char* filter_bitmap[4];
char* ip_filter[4][2];
uint64_t bitmap_size;
bool debug_mode; /* support for hprintf */
bool notifier;
bool dump_pt_trace;
bool redqueen;
} kafl_mem_state;
static void kafl_guest_event(void *opaque, QEMUChrEvent event){
}
static void send_char(char val, void* tmp_s){
kafl_mem_state *s = tmp_s;
assert(val == KAFL_PING);
__sync_synchronize();
qemu_chr_fe_write(&s->chr, (const uint8_t *) &val, 1);
}
static int kafl_guest_can_receive(void * opaque){
return sizeof(int64_t);
}
static kafl_mem_state* state = NULL;
static void init_send_char(kafl_mem_state* s){
state = s;
}
bool interface_send_char(char val){
if(state){
send_char(val, state);
return true;
}
return false;
}
static void kafl_guest_receive(void *opaque, const uint8_t * buf, int size){
int i;
for(i = 0; i < size; i++){
switch(buf[i]){
case KAFL_PING:
//fprintf(stderr, "Protocol - RECV: KAFL_PING\n");
synchronization_unlock();
break;
case '\n':
break;
case 'E':
exit(0);
default:
break;
assert(false);
}
}
}
static int kafl_guest_create_memory_bar(kafl_mem_state *s, int region_num, uint64_t bar_size, const char* file, Error **errp){
void * ptr;
int fd;
struct stat st;
fd = open(file, O_CREAT|O_RDWR, S_IRWXU|S_IRWXG|S_IRWXO);
assert(ftruncate(fd, bar_size) == 0);
stat(file, &st);
QEMU_PT_PRINTF(INTERFACE_PREFIX, "new shm file: (max size: %lx) %lx", bar_size, st.st_size);
assert(bar_size == st.st_size);
ptr = mmap(0, bar_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (ptr == MAP_FAILED) {
error_setg_errno(errp, errno, "Failed to mmap memory");
return -1;
}
switch(region_num){
case 1: pt_setup_program((void*)ptr);
break;
case 2:
GET_GLOBAL_STATE()->shared_payload_buffer_fd = fd;
GET_GLOBAL_STATE()->shared_payload_buffer_size = bar_size;
break;
}
init_send_char(s);
return 0;
}
static void kafl_guest_setup_bitmap(kafl_mem_state *s, char* filename, uint32_t bitmap_size){
void * ptr;
int fd;
struct stat st;
fd = open(filename, O_CREAT|O_RDWR, S_IRWXU|S_IRWXG|S_IRWXO);
assert(ftruncate(fd, bitmap_size) == 0);
stat(filename, &st);
assert(bitmap_size == st.st_size);
ptr = mmap(0, bitmap_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
GET_GLOBAL_STATE()->shared_bitmap_ptr = (void*)ptr;
GET_GLOBAL_STATE()->shared_bitmap_fd = fd;
GET_GLOBAL_STATE()->shared_bitmap_size = bitmap_size-DEFAULT_KAFL_IJON_BITMAP_SIZE;
GET_GLOBAL_STATE()->shared_ijon_bitmap_size = DEFAULT_KAFL_IJON_BITMAP_SIZE;
}
static bool verify_workdir_state(kafl_mem_state *s, Error **errp){
char* workdir = s->workdir;
uint32_t id = s->worker_id;
char* tmp;
if (!folder_exits(workdir)){
fprintf(stderr, "%s does not exist...\n", workdir);
return false;
}
set_workdir_path(workdir);
assert(asprintf(&tmp, "%s/dump/", workdir) != -1);
if (!folder_exits(tmp)){
mkdir(tmp, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
}
assert(asprintf(&tmp, "%s/interface_%d", workdir, id) != -1);
if (!file_exits(tmp)){
fprintf(stderr, "%s does not exist...\n", tmp);
free(tmp);
return false;
}
free(tmp);
assert(asprintf(&tmp, "%s/payload_%d", workdir, id) != -1);
if (!file_exits(tmp)){
fprintf(stderr, "%s does not exist...\n", tmp);
free(tmp);
return false;
}
else {
kafl_guest_create_memory_bar(s, 2, PAYLOAD_SIZE, tmp, errp);
}
free(tmp);
assert(asprintf(&tmp, "%s/bitmap_%d", workdir, id) != -1);
if (!file_exits(tmp)){
fprintf(stderr, "%s does not exist...\n", tmp);
free(tmp);
return false;
} else {
kafl_guest_setup_bitmap(s, tmp, s->bitmap_size);
}
free(tmp);
assert(asprintf(&tmp, "%s/page_cache.lock", workdir) != -1);
if (!file_exits(tmp)){
fprintf(stderr, "%s does not exist...", tmp);
free(tmp);
return false;
}
free(tmp);
assert(asprintf(&tmp, "%s/page_cache.addr", workdir) != -1);
if (!file_exits(tmp)){
fprintf(stderr, "%s does not exist...\n", tmp);
free(tmp);
return false;
}
free(tmp);
assert(asprintf(&tmp, "%s/page_cache.dump", workdir) != -1);
if (!file_exits(tmp)){
fprintf(stderr, "%s does not exist...\n", tmp);
free(tmp);
return false;
}
free(tmp);
assert(asprintf(&tmp, "%s/page_cache", workdir) != -1);
init_page_cache(tmp);
assert(asprintf(&tmp, "%s/redqueen_workdir_%d/", workdir, id) != -1);
if (!folder_exits(tmp)){
fprintf(stderr, "%s does not exist...\n", tmp);
free(tmp);
return false;
}
else {
setup_redqueen_workdir(tmp);
}
free(tmp);
init_redqueen_state();
if(s->dump_pt_trace){
assert(asprintf(&tmp, "%s/pt_trace_dump_%d", workdir, id) != -1);
pt_open_pt_trace_file(tmp);
free(tmp);
}
assert(asprintf(&tmp, "%s/aux_buffer_%d", workdir, id) != -1);
/*
if (file_exits(tmp)){
QEMU_PT_PRINTF(INTERFACE_PREFIX, "%s does not already exists...", tmp);
free(tmp);
return false;
}
else {
init_aux_buffer(tmp);
}
*/
init_aux_buffer(tmp);
free(tmp);
return true;
}
#define KVM_VMX_PT_GET_ADDRN _IO(KVMIO, 0xe9)
static void check_range(uint8_t i){
int ret = 0;
int kvm = open("/dev/dell", O_RDWR | O_CLOEXEC);
ret = ioctl(kvm, KVM_VMX_PT_GET_ADDRN, NULL);
if(ret == -1){
QEMU_PT_PRINTF(INTERFACE_PREFIX, "ERROR: Multi range tracing is not supported! Please upgrade your kernel to 4.20-rc4!\n");
abort();
}
if(ret < (i+1)){
QEMU_PT_PRINTF(INTERFACE_PREFIX, "ERROR: CPU supports only %d IP filters!\n", ret);
abort();
}
close(kvm);
}
static bool verify_sharedir_state(kafl_mem_state *s, Error **errp){
char* sharedir = s->sharedir;
if (!folder_exits(sharedir)){
QEMU_PT_PRINTF(INTERFACE_PREFIX, "%s does not exist...", sharedir);
return false;
}
return true;
}
static void pci_kafl_guest_realize(DeviceState *dev, Error **errp){
uint64_t tmp0, tmp1;
kafl_mem_state *s = KAFLMEM(dev);
if(s->bitmap_size <= 0){
s->bitmap_size = DEFAULT_KAFL_BITMAP_SIZE;
}
assert((uint32_t)s->bitmap_size > (0x1000 + DEFAULT_KAFL_IJON_BITMAP_SIZE));
assert((((uint32_t)s->bitmap_size-DEFAULT_KAFL_IJON_BITMAP_SIZE) & (((uint32_t)s->bitmap_size-DEFAULT_KAFL_IJON_BITMAP_SIZE) - 1)) == 0 );
if(s->worker_id == 0xFFFF){
fprintf(stderr, "Invalid worker id...\n");
abort();
}
if (!s->workdir || !verify_workdir_state(s, errp)){
fprintf(stderr, "Invalid work dir...\n");
abort();
}
if (!s->sharedir || !verify_sharedir_state(s, errp)){
fprintf(stderr, "Invalid sharedir...\n");
//abort();
}
else{
sharedir_set_dir(GET_GLOBAL_STATE()->sharedir, s->sharedir);
}
if(&s->chr)
qemu_chr_fe_set_handlers(&s->chr, kafl_guest_can_receive, kafl_guest_receive, kafl_guest_event, NULL, s, NULL, true);
for(uint8_t i = 0; i < INTEL_PT_MAX_RANGES; i++){
if(s->ip_filter[i][0] && s->ip_filter[i][1]){
if(i >= 1){
check_range(i);
}
tmp0 = CONVERT_UINT64(s->ip_filter[i][0]);
tmp1 = CONVERT_UINT64(s->ip_filter[i][1]);
if (tmp0 < tmp1){
//if(s->filter_bitmap[i]){
// tmp = kafl_guest_setup_filter_bitmap(s, s->filter_bitmap[i], (uint64_t)(s->bitmap_size));
//}
pt_setup_ip_filters(i, tmp0, tmp1);
}
}
}
if(s->debug_mode){
GET_GLOBAL_STATE()->enable_hprintf = true;
}
if(s->notifier){
enable_notifies();
}
pt_setup_enable_hypercalls();
init_crash_handler();
}
static Property kafl_guest_properties[] = {
DEFINE_PROP_CHR("chardev", kafl_mem_state, chr),
DEFINE_PROP_STRING("sharedir", kafl_mem_state, sharedir),
DEFINE_PROP_STRING("workdir", kafl_mem_state, workdir),
DEFINE_PROP_UINT32("worker_id", kafl_mem_state, worker_id, 0xFFFF),
/*
* Since DEFINE_PROP_UINT64 is somehow broken (signed/unsigned madness),
* let's use DEFINE_PROP_STRING and post-process all values by strtol...
*/
DEFINE_PROP_STRING("ip0_a", kafl_mem_state, ip_filter[0][0]),
DEFINE_PROP_STRING("ip0_b", kafl_mem_state, ip_filter[0][1]),
DEFINE_PROP_STRING("ip1_a", kafl_mem_state, ip_filter[1][0]),
DEFINE_PROP_STRING("ip1_b", kafl_mem_state, ip_filter[1][1]),
DEFINE_PROP_STRING("ip2_a", kafl_mem_state, ip_filter[2][0]),
DEFINE_PROP_STRING("ip2_b", kafl_mem_state, ip_filter[2][1]),
DEFINE_PROP_STRING("ip3_a", kafl_mem_state, ip_filter[3][0]),
DEFINE_PROP_STRING("ip3_b", kafl_mem_state, ip_filter[3][1]),
DEFINE_PROP_UINT64("bitmap_size", kafl_mem_state, bitmap_size, DEFAULT_KAFL_BITMAP_SIZE),
DEFINE_PROP_BOOL("debug_mode", kafl_mem_state, debug_mode, false),
DEFINE_PROP_BOOL("crash_notifier", kafl_mem_state, notifier, true),
DEFINE_PROP_BOOL("dump_pt_trace", kafl_mem_state, dump_pt_trace, false),
DEFINE_PROP_END_OF_LIST(),
};
static void kafl_guest_class_init(ObjectClass *klass, void *data){
DeviceClass *dc = DEVICE_CLASS(klass);
//PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
dc->realize = pci_kafl_guest_realize;
//k->class_id = PCI_CLASS_MEMORY_RAM;
dc->props = kafl_guest_properties;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
dc->desc = "KAFL Inter-VM shared memory";
}
static void kafl_guest_init(Object *obj){
}
static const TypeInfo kafl_guest_info = {
.name = TYPE_KAFLMEM,
.parent = TYPE_DEVICE,
.instance_size = sizeof(kafl_mem_state),
.instance_init = kafl_guest_init,
.class_init = kafl_guest_class_init,
};
static void kafl_guest_register_types(void){
type_register_static(&kafl_guest_info);
}
type_init(kafl_guest_register_types)

44
nyx/interface.h Normal file
View File

@ -0,0 +1,44 @@
/*
Copyright (C) 2017 Sergej Schumilo
This file is part of QEMU-PT (kAFL).
QEMU-PT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
QEMU-PT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef INTERFACE_H
#define INTERFACE_H
/* 64k bitmap + 4k ijon buffer */
#define DEFAULT_KAFL_IJON_BITMAP_SIZE 0x1000
#define DEFAULT_KAFL_BITMAP_SIZE 0x10000 + DEFAULT_KAFL_IJON_BITMAP_SIZE
#define DEFAULT_EDGE_FILTER_SIZE 0x1000000
#define PROGRAM_SIZE (128 << 20) /* 128MB Application Data */
#define PAYLOAD_SIZE (128 << 10) /* 128KB Payload Data */
#define INFO_SIZE (128 << 10) /* 128KB Info Data */
#define HPRINTF_SIZE 0x1000 /* 4KB hprintf Data */
#define INFO_FILE "/tmp/kAFL_info.txt"
#define HPRINTF_FILE "/tmp/kAFL_printf.txt"
#define HPRINTF_LIMIT 512
#define KAFL_PING 'x'
bool interface_send_char(char val);
#endif

677
nyx/khash.h Normal file
View File

@ -0,0 +1,677 @@
/* The MIT License
Copyright (c) 2008, 2009, 2011 by Attractive Chaos <attractor@live.co.uk>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
/*
An example:
#include "khash.h"
KHASH_MAP_INIT_INT(32, char)
int main() {
int ret, is_missing;
khiter_t k;
khash_t(32) *h = kh_init(32);
k = kh_put(32, h, 5, &ret);
kh_value(h, k) = 10;
k = kh_get(32, h, 10);
is_missing = (k == kh_end(h));
k = kh_get(32, h, 5);
kh_del(32, h, k);
for (k = kh_begin(h); k != kh_end(h); ++k)
if (kh_exist(h, k)) kh_value(h, k) = 1;
kh_destroy(32, h);
return 0;
}
*/
/*
2013-05-02 (0.2.8):
* Use quadratic probing. When the capacity is power of 2, stepping function
i*(i+1)/2 guarantees to traverse each bucket. It is better than double
hashing on cache performance and is more robust than linear probing.
In theory, double hashing should be more robust than quadratic probing.
However, my implementation is probably not for large hash tables, because
the second hash function is closely tied to the first hash function,
which reduce the effectiveness of double hashing.
Reference: http://research.cs.vt.edu/AVresearch/hashing/quadratic.php
2011-12-29 (0.2.7):
* Minor code clean up; no actual effect.
2011-09-16 (0.2.6):
* The capacity is a power of 2. This seems to dramatically improve the
speed for simple keys. Thank Zilong Tan for the suggestion. Reference:
- http://code.google.com/p/ulib/
- http://nothings.org/computer/judy/
* Allow to optionally use linear probing which usually has better
performance for random input. Double hashing is still the default as it
is more robust to certain non-random input.
* Added Wang's integer hash function (not used by default). This hash
function is more robust to certain non-random input.
2011-02-14 (0.2.5):
* Allow to declare global functions.
2009-09-26 (0.2.4):
* Improve portability
2008-09-19 (0.2.3):
* Corrected the example
* Improved interfaces
2008-09-11 (0.2.2):
* Improved speed a little in kh_put()
2008-09-10 (0.2.1):
* Added kh_clear()
* Fixed a compiling error
2008-09-02 (0.2.0):
* Changed to token concatenation which increases flexibility.
2008-08-31 (0.1.2):
* Fixed a bug in kh_get(), which has not been tested previously.
2008-08-31 (0.1.1):
* Added destructor
*/
#ifndef __AC_KHASH_H
#define __AC_KHASH_H
/*!
@header
Generic hash table library.
*/
#define AC_VERSION_KHASH_H "0.2.8"
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <assert.h>
/* compiler specific configuration */
#if UINT_MAX == 0xffffffffu
typedef unsigned int khint32_t;
#elif ULONG_MAX == 0xffffffffu
typedef unsigned long khint32_t;
#endif
#if ULONG_MAX == ULLONG_MAX
typedef unsigned long khint64_t;
#else
typedef unsigned long long khint64_t;
#endif
#ifndef kh_inline
#ifdef _MSC_VER
#define kh_inline __inline
#else
#define kh_inline inline
#endif
#endif /* kh_inline */
#ifndef klib_unused
#if (defined __clang__ && __clang_major__ >= 3) || (defined __GNUC__ && __GNUC__ >= 3)
#define klib_unused __attribute__ ((__unused__))
#else
#define klib_unused
#endif
#endif /* klib_unused */
typedef khint64_t khint_t;
typedef khint_t khiter_t;
#define __ac_isempty(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&2)
#define __ac_isdel(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&1)
#define __ac_iseither(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&3)
#define __ac_set_isdel_false(flag, i) (flag[i>>4]&=~(1ul<<((i&0xfU)<<1)))
#define __ac_set_isempty_false(flag, i) (flag[i>>4]&=~(2ul<<((i&0xfU)<<1)))
#define __ac_set_isboth_false(flag, i) (flag[i>>4]&=~(3ul<<((i&0xfU)<<1)))
#define __ac_set_isdel_true(flag, i) (flag[i>>4]|=1ul<<((i&0xfU)<<1))
#define __ac_fw(item, fp) (fwrite(&(item), 1, sizeof(item), fp))
#define __ac_fsize(m) ((m) < 16? 1 : (m)>>4)
#ifndef kroundup32
#define kroundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x))
#endif
#ifndef kcalloc
#define kcalloc(N,Z) calloc(N,Z)
#endif
#ifndef kmalloc
#define kmalloc(Z) malloc(Z)
#endif
#ifndef krealloc
#define krealloc(P,Z) realloc(P,Z)
#endif
#ifndef kfree
#define kfree(P) free(P)
#endif
static const double __ac_HASH_UPPER = 0.77;
#define __KHASH_TYPE(name, khkey_t, khval_t) \
typedef struct kh_##name##_s { \
khint_t n_buckets, size, n_occupied, upper_bound; \
khint32_t *flags; \
khkey_t *keys; \
khval_t *vals; \
} kh_##name##_t;
#define __KHASH_PROTOTYPES(name, khkey_t, khval_t) \
extern kh_##name##_t *kh_init_##name(void); \
extern void kh_destroy_##name(kh_##name##_t *h); \
extern void kh_clear_##name(kh_##name##_t *h); \
extern khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key); \
extern int kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets); \
extern khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret); \
extern void kh_del_##name(kh_##name##_t *h, khint_t x);
#define __KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
SCOPE kh_##name##_t *kh_init_##name(void) { \
return (kh_##name##_t*)kcalloc(1, sizeof(kh_##name##_t)); \
} \
SCOPE void kh_destroy_##name(kh_##name##_t *h) \
{ \
if (h) { \
kfree((void *)h->keys); kfree(h->flags); \
kfree((void *)h->vals); \
kfree(h); \
} \
} \
SCOPE void kh_clear_##name(kh_##name##_t *h) \
{ \
if (h && h->flags) { \
memset(h->flags, 0xaa, __ac_fsize(h->n_buckets) * sizeof(khint32_t)); \
h->size = h->n_occupied = 0; \
} \
} \
SCOPE khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key) \
{ \
if (h->n_buckets) { \
khint_t k, i, last, mask, step = 0; \
mask = h->n_buckets - 1; \
k = __hash_func(key); i = k & mask; \
last = i; \
while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \
i = (i + (++step)) & mask; \
if (i == last) return h->n_buckets; \
} \
return __ac_iseither(h->flags, i)? h->n_buckets : i; \
} else return 0; \
} \
SCOPE int kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets) \
{ /* This function uses 0.25*n_buckets bytes of working space instead of [sizeof(key_t+val_t)+.25]*n_buckets. */ \
khint32_t *new_flags = 0; \
khint_t j = 1; \
{ \
kroundup32(new_n_buckets); \
if (new_n_buckets < 4) new_n_buckets = 4; \
if (h->size >= (khint_t)(new_n_buckets * __ac_HASH_UPPER + 0.5)) j = 0; /* requested size is too small */ \
else { /* hash table size to be changed (shrink or expand); rehash */ \
new_flags = (khint32_t*)kmalloc(__ac_fsize(new_n_buckets) * sizeof(khint32_t)); \
if (!new_flags) return -1; \
memset(new_flags, 0xaa, __ac_fsize(new_n_buckets) * sizeof(khint32_t)); \
if (h->n_buckets < new_n_buckets) { /* expand */ \
khkey_t *new_keys = (khkey_t*)krealloc((void *)h->keys, new_n_buckets * sizeof(khkey_t)); \
if (!new_keys) { kfree(new_flags); return -1; } \
h->keys = new_keys; \
if (kh_is_map) { \
khval_t *new_vals = (khval_t*)krealloc((void *)h->vals, new_n_buckets * sizeof(khval_t)); \
if (!new_vals) { kfree(new_flags); return -1; } \
h->vals = new_vals; \
} \
} /* otherwise shrink */ \
} \
} \
if (j) { /* rehashing is needed */ \
for (j = 0; j != h->n_buckets; ++j) { \
if (__ac_iseither(h->flags, j) == 0) { \
khkey_t key = h->keys[j]; \
khval_t val; \
khint_t new_mask; \
new_mask = new_n_buckets - 1; \
if (kh_is_map) val = h->vals[j]; \
__ac_set_isdel_true(h->flags, j); \
while (1) { /* kick-out process; sort of like in Cuckoo hashing */ \
khint_t k, i, step = 0; \
k = __hash_func(key); \
i = k & new_mask; \
while (!__ac_isempty(new_flags, i)) i = (i + (++step)) & new_mask; \
__ac_set_isempty_false(new_flags, i); \
if (i < h->n_buckets && __ac_iseither(h->flags, i) == 0) { /* kick out the existing element */ \
{ khkey_t tmp = h->keys[i]; h->keys[i] = key; key = tmp; } \
if (kh_is_map) { khval_t tmp = h->vals[i]; h->vals[i] = val; val = tmp; } \
__ac_set_isdel_true(h->flags, i); /* mark it as deleted in the old hash table */ \
} else { /* write the element and jump out of the loop */ \
h->keys[i] = key; \
if (kh_is_map) h->vals[i] = val; \
break; \
} \
} \
} \
} \
if (h->n_buckets > new_n_buckets) { /* shrink the hash table */ \
h->keys = (khkey_t*)krealloc((void *)h->keys, new_n_buckets * sizeof(khkey_t)); \
if (kh_is_map) h->vals = (khval_t*)krealloc((void *)h->vals, new_n_buckets * sizeof(khval_t)); \
} \
kfree(h->flags); /* free the working space */ \
h->flags = new_flags; \
h->n_buckets = new_n_buckets; \
h->n_occupied = h->size; \
h->upper_bound = (khint_t)(h->n_buckets * __ac_HASH_UPPER + 0.5); \
} \
return 0; \
} \
SCOPE khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret) \
{ \
khint_t x; \
if (h->n_occupied >= h->upper_bound) { /* update the hash table */ \
if (h->n_buckets > (h->size<<1)) { \
if (kh_resize_##name(h, h->n_buckets - 1) < 0) { /* clear "deleted" elements */ \
*ret = -1; return h->n_buckets; \
} \
} else if (kh_resize_##name(h, h->n_buckets + 1) < 0) { /* expand the hash table */ \
*ret = -1; return h->n_buckets; \
} \
} /* TODO: to implement automatically shrinking; resize() already support shrinking */ \
{ \
khint_t k, i, site, last, mask = h->n_buckets - 1, step = 0; \
x = site = h->n_buckets; k = __hash_func(key); i = k & mask; \
if (__ac_isempty(h->flags, i)) x = i; /* for speed up */ \
else { \
last = i; \
while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \
if (__ac_isdel(h->flags, i)) site = i; \
i = (i + (++step)) & mask; \
if (i == last) { x = site; break; } \
} \
if (x == h->n_buckets) { \
if (__ac_isempty(h->flags, i) && site != h->n_buckets) x = site; \
else x = i; \
} \
} \
} \
if (__ac_isempty(h->flags, x)) { /* not present at all */ \
h->keys[x] = key; \
__ac_set_isboth_false(h->flags, x); \
++h->size; ++h->n_occupied; \
*ret = 1; \
} else if (__ac_isdel(h->flags, x)) { /* deleted */ \
h->keys[x] = key; \
__ac_set_isboth_false(h->flags, x); \
++h->size; \
*ret = 2; \
} else *ret = 0; /* Don't touch h->keys[x] if present and not deleted */ \
return x; \
} \
SCOPE void kh_del_##name(kh_##name##_t *h, khint_t x) \
{ \
if (x != h->n_buckets && !__ac_iseither(h->flags, x)) { \
__ac_set_isdel_true(h->flags, x); \
--h->size; \
} \
} \
SCOPE void kh_write_##name(kh_##name##_t *map, const char *path) { \
FILE *fp = fopen(path, "wb"); \
if(fp == NULL) { \
fprintf(stderr, "[%s] Could not open file %s.\n", __func__, path);\
assert(0); \
/*exit(EXIT_FAILURE);*/ \
} \
__ac_fw(map->n_buckets, fp); \
__ac_fw(map->n_occupied, fp); \
__ac_fw(map->size, fp); \
__ac_fw(map->upper_bound, fp); \
fwrite(map->flags, __ac_fsize(map->n_buckets), sizeof(khint32_t), fp);\
fwrite(map->keys, map->n_buckets, sizeof(*map->keys), fp); \
fwrite(map->vals, map->n_buckets, sizeof(*map->vals), fp); \
fclose(fp); \
} \
SCOPE kh_##name##_t *khash_load_##name(const char *path) \
{ \
kh_##name##_t *ret = calloc(1, sizeof(kh_##name##_t)); \
FILE *fp = fopen(path, "rb"); \
assert(sizeof(ret->n_buckets) == fread(&ret->n_buckets, 1, sizeof(ret->n_buckets), fp)); \
assert(sizeof(ret->n_occupied) == fread(&ret->n_occupied, 1, sizeof(ret->n_occupied), fp)); \
assert(sizeof(ret->size) == fread(&ret->size, 1, sizeof(ret->size), fp)); \
assert(sizeof(ret->upper_bound) == fread(&ret->upper_bound, 1, sizeof(ret->upper_bound), fp)); \
ret->flags = malloc(sizeof(*ret->flags) * __ac_fsize(ret->n_buckets));\
ret->keys = malloc(sizeof(khkey_t) * ret->n_buckets); \
ret->vals = malloc(sizeof(khval_t) * ret->n_buckets); \
assert(sizeof(*ret->flags) == fread(ret->flags, __ac_fsize(ret->n_buckets), sizeof(*ret->flags), fp));\
assert(ret->n_buckets * sizeof(*ret->keys) == fread(ret->keys, 1, ret->n_buckets * sizeof(*ret->keys), fp)); \
assert(ret->n_buckets * sizeof(*ret->vals) == fread(ret->vals, 1, ret->n_buckets * sizeof(*ret->vals), fp)); \
fclose(fp); \
return ret; \
}
#define KHASH_DECLARE(name, khkey_t, khval_t) \
__KHASH_TYPE(name, khkey_t, khval_t) \
__KHASH_PROTOTYPES(name, khkey_t, khval_t)
#define KHASH_INIT2(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
__KHASH_TYPE(name, khkey_t, khval_t) \
__KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal)
#define KHASH_INIT(name, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \
KHASH_INIT2(name, static kh_inline klib_unused, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal)
/* --- BEGIN OF HASH FUNCTIONS --- */
/*! @function
@abstract Integer hash function
@param key The integer [khint32_t]
@return The hash value [khint_t]
*/
#define kh_int_hash_func(key) (khint32_t)(key)
/*! @function
@abstract Integer comparison function
*/
#define kh_int_hash_equal(a, b) ((a) == (b))
/*! @function
@abstract 64-bit integer hash function
@param key The integer [khint64_t]
@return The hash value [khint_t]
*/
#define kh_int64_hash_func(key) (khint32_t)((key)>>33^(key)^(key)<<11)
/*! @function
@abstract 64-bit integer comparison function
*/
#define kh_int64_hash_equal(a, b) ((a) == (b))
/*! @function
@abstract const char* hash function
@param s Pointer to a null terminated string
@return The hash value
*/
static kh_inline khint_t __ac_X31_hash_string(const char *s)
{
khint_t h = (khint_t)*s;
if (h) for (++s ; *s; ++s) h = (h << 5) - h + (khint_t)*s;
return h;
}
/*! @function
@abstract Another interface to const char* hash function
@param key Pointer to a null terminated string [const char*]
@return The hash value [khint_t]
*/
#define kh_str_hash_func(key) __ac_X31_hash_string(key)
/*! @function
@abstract Const char* comparison function
*/
#define kh_str_hash_equal(a, b) (strcmp(a, b) == 0)
static kh_inline khint_t __ac_Wang_hash(khint_t key)
{
key += ~(key << 15);
key ^= (key >> 10);
key += (key << 3);
key ^= (key >> 6);
key += ~(key << 11);
key ^= (key >> 16);
return key;
}
#define kh_int_hash_func2(key) __ac_Wang_hash((khint_t)key)
/* --- END OF HASH FUNCTIONS --- */
/* Other convenient macros... */
/*!
@abstract Type of the hash table.
@param name Name of the hash table [symbol]
*/
#define khash_t(name) kh_##name##_t
/*! @function
@abstract Initiate a hash table.
@param name Name of the hash table [symbol]
@return Pointer to the hash table [khash_t(name)*]
*/
#define kh_init(name) kh_init_##name()
/*! @function
@abstract Destroy a hash table.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
*/
#define kh_destroy(name, h) kh_destroy_##name(h)
/*! @function
@abstract Reset a hash table without deallocating memory.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
*/
#define kh_clear(name, h) kh_clear_##name(h)
/*! @function
@abstract Resize a hash table.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
@param s New size [khint_t]
*/
#define kh_resize(name, h, s) kh_resize_##name(h, s)
/*! @function
@abstract Insert a key to the hash table.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
@param k Key [type of keys]
@param r Extra return code: -1 if the operation failed;
0 if the key is present in the hash table;
1 if the bucket is empty (never used); 2 if the element in
the bucket has been deleted [int*]
@return Iterator to the inserted element [khint_t]
*/
#define kh_put(name, h, k, r) kh_put_##name(h, k, r)
/*! @function
@abstract Retrieve a key from the hash table.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
@param k Key [type of keys]
@return Iterator to the found element, or kh_end(h) if the element is absent [khint_t]
*/
#define kh_get(name, h, k) kh_get_##name(h, k)
/*! @function
@abstract Remove a key from the hash table.
@param name Name of the hash table [symbol]
@param h Pointer to the hash table [khash_t(name)*]
@param k Iterator to the element to be deleted [khint_t]
*/
#define kh_del(name, h, k) kh_del_##name(h, k)
/*! @function
@abstract Write a hash map to disk.
@param h Pointer to the hash table [khash_t(name)*]
@param path Path to which to write. [const char *]
*/
#define kh_write(name, h, path) kh_write_##name(h, path)
/*! @function
@abstract Load a hash table from disk
@param name Name of the hash table [symbol]
@param path Path to file from which to load [const char *]
*/
#define kh_load(name, path) khash_load_##name(path)
/*! @function
@abstract Test whether a bucket contains data.
@param h Pointer to the hash table [khash_t(name)*]
@param x Iterator to the bucket [khint_t]
@return 1 if containing data; 0 otherwise [int]
*/
#define kh_exist(h, x) (!__ac_iseither((h)->flags, (x)))
/*! @function
@abstract Get key given an iterator
@param h Pointer to the hash table [khash_t(name)*]
@param x Iterator to the bucket [khint_t]
@return Key [type of keys]
*/
#define kh_key(h, x) ((h)->keys[x])
/*! @function
@abstract Get value given an iterator
@param h Pointer to the hash table [khash_t(name)*]
@param x Iterator to the bucket [khint_t]
@return Value [type of values]
@discussion For hash sets, calling this results in segfault.
*/
#define kh_val(h, x) ((h)->vals[x])
/*! @function
@abstract Alias of kh_val()
*/
#define kh_value(h, x) ((h)->vals[x])
/*! @function
@abstract Get the start iterator
@param h Pointer to the hash table [khash_t(name)*]
@return The start iterator [khint_t]
*/
#define kh_begin(h) (khint_t)(0)
/*! @function
@abstract Get the end iterator
@param h Pointer to the hash table [khash_t(name)*]
@return The end iterator [khint_t]
*/
#define kh_end(h) ((h)->n_buckets)
/*! @function
@abstract Get the number of elements in the hash table
@param h Pointer to the hash table [khash_t(name)*]
@return Number of elements in the hash table [khint_t]
*/
#define kh_size(h) ((h)->size)
/*! @function
@abstract Get the number of buckets in the hash table
@param h Pointer to the hash table [khash_t(name)*]
@return Number of buckets in the hash table [khint_t]
*/
#define kh_n_buckets(h) ((h)->n_buckets)
/*! @function
@abstract Iterate over the entries in the hash table
@param h Pointer to the hash table [khash_t(name)*]
@param kvar Variable to which key will be assigned
@param vvar Variable to which value will be assigned
@param code Block of code to execute
*/
#define kh_foreach(h, kvar, vvar, code) { khint_t __i; \
for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \
if (!kh_exist(h,__i)) continue; \
(kvar) = kh_key(h,__i); \
(vvar) = kh_val(h,__i); \
code; \
} }
/*! @function
@abstract Iterate over the values in the hash table
@param h Pointer to the hash table [khash_t(name)*]
@param vvar Variable to which value will be assigned
@param code Block of code to execute
*/
#define kh_foreach_value(h, vvar, code) { khint_t __i; \
for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \
if (!kh_exist(h,__i)) continue; \
(vvar) = kh_val(h,__i); \
code; \
} }
/* More conenient interfaces */
/*! @function
@abstract Instantiate a hash set containing integer keys
@param name Name of the hash table [symbol]
*/
#define KHASH_SET_INIT_INT(name) \
KHASH_INIT(name, khint32_t, char, 0, kh_int_hash_func, kh_int_hash_equal)
/*! @function
@abstract Instantiate a hash map containing integer keys
@param name Name of the hash table [symbol]
@param khval_t Type of values [type]
*/
#define KHASH_MAP_INIT_INT(name, khval_t) \
KHASH_INIT(name, khint32_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal)
/*! @function
@abstract Instantiate a hash map containing 64-bit integer keys
@param name Name of the hash table [symbol]
*/
#define KHASH_SET_INIT_INT64(name) \
KHASH_INIT(name, khint64_t, char, 0, kh_int64_hash_func, kh_int64_hash_equal)
/*! @function
@abstract Instantiate a hash map containing 64-bit integer keys
@param name Name of the hash table [symbol]
@param khval_t Type of values [type]
*/
#define KHASH_MAP_INIT_INT64(name, khval_t) \
KHASH_INIT(name, khint64_t, khval_t, 1, kh_int64_hash_func, kh_int64_hash_equal)
typedef const char *kh_cstr_t;
/*! @function
@abstract Instantiate a hash map containing const char* keys
@param name Name of the hash table [symbol]
*/
#define KHASH_SET_INIT_STR(name) \
KHASH_INIT(name, kh_cstr_t, char, 0, kh_str_hash_func, kh_str_hash_equal)
/*! @function
@abstract Instantiate a hash map containing const char* keys
@param name Name of the hash table [symbol]
@param khval_t Type of values [type]
*/
#define KHASH_MAP_INIT_STR(name, khval_t) \
KHASH_INIT(name, kh_cstr_t, khval_t, 1, kh_str_hash_func, kh_str_hash_equal)
#endif /* __AC_KHASH_H */

457
nyx/kvm_nested.c Normal file
View File

@ -0,0 +1,457 @@
#include "nyx/kvm_nested.h"
#include "cpu.h"
#include <linux/kvm.h>
#include "nyx/debug.h"
#include "exec/ram_addr.h"
#include "qemu/rcu_queue.h"
#include "nyx/state.h"
#include "sysemu/kvm.h"
#include "pt.h"
#define PPAGE_SIZE 0x1000
#define PENTRIES 0x200
#define PLEVEL_4_SHIFT 12
#define PLEVEL_3_SHIFT 21
#define PLEVEL_2_SHIFT 30
#define PLEVEL_1_SHIFT 39
#define SIGN_EXTEND_TRESHOLD 0x100
#define SIGN_EXTEND 0xFFFF000000000000ULL
#define PAGETABLE_MASK 0xFFFFFFFFFF000ULL
#define CHECK_BIT(var,pos) !!(((var) & (1ULL<<(pos))))
struct vmcs_hdr {
uint32_t revision_id:31;
uint32_t shadow_vmcs:1;
};
struct __attribute__((__packed__)) vmcs12 {
/* According to the Intel spec, a VMCS region must start with the
* following two fields. Then follow implementation-specific data.
*/
struct vmcs_hdr hdr;
uint32_t abort;
uint32_t launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
uint32_t padding[7]; /* room for future expansion */
uint64_t io_bitmap_a;
uint64_t io_bitmap_b;
uint64_t msr_bitmap;
uint64_t vm_exit_msr_store_addr;
uint64_t vm_exit_msr_load_addr;
uint64_t vm_entry_msr_load_addr;
uint64_t tsc_offset;
uint64_t virtual_apic_page_addr;
uint64_t apic_access_addr;
uint64_t posted_intr_desc_addr;
uint64_t ept_pointer;
uint64_t eoi_exit_bitmap0;
uint64_t eoi_exit_bitmap1;
uint64_t eoi_exit_bitmap2;
uint64_t eoi_exit_bitmap3;
uint64_t xss_exit_bitmap;
uint64_t guest_physical_address;
uint64_t vmcs_link_pointer;
uint64_t guest_ia32_debugctl;
uint64_t guest_ia32_pat;
uint64_t guest_ia32_efer;
uint64_t guest_ia32_perf_global_ctrl;
uint64_t guest_pdptr0;
uint64_t guest_pdptr1;
uint64_t guest_pdptr2;
uint64_t guest_pdptr3;
uint64_t guest_bndcfgs;
uint64_t host_ia32_pat;
uint64_t host_ia32_efer;
uint64_t host_ia32_perf_global_ctrl;
uint64_t vmread_bitmap;
uint64_t vmwrite_bitmap;
uint64_t vm_function_control;
uint64_t eptp_list_address;
uint64_t pml_address;
uint64_t padding64[3]; /* room for future expansion */
/*
* To allow migration of L1 (complete with its L2 guests) between
* machines of different natural widths (32 or 64 bit), we cannot have
* unsigned long fields with no explict size. We use uint64_t (aliased
* uint64_t) instead. Luckily, x86 is little-endian.
*/
uint64_t cr0_guest_host_mask;
uint64_t cr4_guest_host_mask;
uint64_t cr0_read_shadow;
uint64_t cr4_read_shadow;
uint64_t cr3_target_value0;
uint64_t cr3_target_value1;
uint64_t cr3_target_value2;
uint64_t cr3_target_value3;
uint64_t exit_qualification;
uint64_t guest_linear_address;
uint64_t guest_cr0;
uint64_t guest_cr3;
uint64_t guest_cr4;
uint64_t guest_es_base;
uint64_t guest_cs_base;
uint64_t guest_ss_base;
uint64_t guest_ds_base;
uint64_t guest_fs_base;
uint64_t guest_gs_base;
uint64_t guest_ldtr_base;
uint64_t guest_tr_base;
uint64_t guest_gdtr_base;
uint64_t guest_idtr_base;
uint64_t guest_dr7;
uint64_t guest_rsp;
uint64_t guest_rip;
uint64_t guest_rflags;
uint64_t guest_pending_dbg_exceptions;
uint64_t guest_sysenter_esp;
uint64_t guest_sysenter_eip;
uint64_t host_cr0;
uint64_t host_cr3;
uint64_t host_cr4;
uint64_t host_fs_base;
uint64_t host_gs_base;
uint64_t host_tr_base;
uint64_t host_gdtr_base;
uint64_t host_idtr_base;
uint64_t host_ia32_sysenter_esp;
uint64_t host_ia32_sysenter_eip;
uint64_t host_rsp;
uint64_t host_rip;
uint64_t paddingl[8]; /* room for future expansion */
uint32_t pin_based_vm_exec_control;
uint32_t cpu_based_vm_exec_control;
uint32_t exception_bitmap;
uint32_t page_fault_error_code_mask;
uint32_t page_fault_error_code_match;
uint32_t cr3_target_count;
uint32_t vm_exit_controls;
uint32_t vm_exit_msr_store_count;
uint32_t vm_exit_msr_load_count;
uint32_t vm_entry_controls;
uint32_t vm_entry_msr_load_count;
uint32_t vm_entry_intr_info_field;
uint32_t vm_entry_exception_error_code;
uint32_t vm_entry_instruction_len;
uint32_t tpr_threshold;
uint32_t secondary_vm_exec_control;
uint32_t vm_instruction_error;
uint32_t vm_exit_reason;
uint32_t vm_exit_intr_info;
uint32_t vm_exit_intr_error_code;
uint32_t idt_vectoring_info_field;
uint32_t idt_vectoring_error_code;
uint32_t vm_exit_instruction_len;
uint32_t vmx_instruction_info;
uint32_t guest_es_limit;
uint32_t guest_cs_limit;
uint32_t guest_ss_limit;
uint32_t guest_ds_limit;
uint32_t guest_fs_limit;
uint32_t guest_gs_limit;
uint32_t guest_ldtr_limit;
uint32_t guest_tr_limit;
uint32_t guest_gdtr_limit;
uint32_t guest_idtr_limit;
uint32_t guest_es_ar_bytes;
uint32_t guest_cs_ar_bytes;
uint32_t guest_ss_ar_bytes;
uint32_t guest_ds_ar_bytes;
uint32_t guest_fs_ar_bytes;
uint32_t guest_gs_ar_bytes;
uint32_t guest_ldtr_ar_bytes;
uint32_t guest_tr_ar_bytes;
uint32_t guest_interruptibility_info;
uint32_t guest_activity_state;
uint32_t guest_sysenter_cs;
uint32_t host_ia32_sysenter_cs;
uint32_t vmx_preemption_timer_value;
uint32_t padding32[7]; /* room for future expansion */
uint16_t virtual_processor_id;
uint16_t posted_intr_nv;
uint16_t guest_es_selector;
uint16_t guest_cs_selector;
uint16_t guest_ss_selector;
uint16_t guest_ds_selector;
uint16_t guest_fs_selector;
uint16_t guest_gs_selector;
uint16_t guest_ldtr_selector;
uint16_t guest_tr_selector;
uint16_t guest_intr_status;
uint16_t host_es_selector;
uint16_t host_cs_selector;
uint16_t host_ss_selector;
uint16_t host_ds_selector;
uint16_t host_fs_selector;
uint16_t host_gs_selector;
uint16_t host_tr_selector;
uint16_t guest_pml_index;
};
static void write_address(uint64_t address, uint64_t size, uint64_t prot){
static uint64_t next_address = PAGETABLE_MASK;
static uint64_t last_address = 0x0;
static uint64_t last_prot = 0;
if(address != next_address || prot != last_prot){
/* do not print guard pages or empty pages without any permissions */
if(last_address && (CHECK_BIT(last_prot, 1) || !CHECK_BIT(last_prot, 63))){
if(CHECK_BIT(last_prot, 1) && !CHECK_BIT(last_prot, 63)){
QEMU_PT_PRINTF(NESTED_VM_PREFIX, "%016lx - %016lx %c%c%c [WARNING]",
last_address, next_address,
CHECK_BIT(last_prot, 1) ? 'W' : '-',
CHECK_BIT(last_prot, 2) ? 'U' : 'K',
!CHECK_BIT(last_prot, 63)? 'X' : '-');
}
else{
QEMU_PT_PRINTF(NESTED_VM_PREFIX, "%016lx - %016lx %c%c%c",
last_address, next_address,
CHECK_BIT(last_prot, 1) ? 'W' : '-',
CHECK_BIT(last_prot, 2) ? 'U' : 'K',
!CHECK_BIT(last_prot, 63)? 'X' : '-');
}
}
last_address = address;
}
next_address = address+size;
last_prot = prot;
}
void print_48_paging(uint64_t cr3){
uint64_t paging_entries_level_1[PENTRIES];
uint64_t paging_entries_level_2[PENTRIES];
uint64_t paging_entries_level_3[PENTRIES];
uint64_t paging_entries_level_4[PENTRIES];
uint64_t address_identifier_1, address_identifier_2, address_identifier_3, address_identifier_4;
uint32_t i1, i2, i3,i4;
cpu_physical_memory_rw((cr3&PAGETABLE_MASK), (uint8_t *) paging_entries_level_1, PPAGE_SIZE, false);
for(i1 = 0; i1 < 512; i1++){
if(paging_entries_level_1[i1]){
address_identifier_1 = ((uint64_t)i1) << PLEVEL_1_SHIFT;
if (i1 & SIGN_EXTEND_TRESHOLD){
address_identifier_1 |= SIGN_EXTEND;
}
if(CHECK_BIT(paging_entries_level_1[i1], 0)){ /* otherwise swapped out */
cpu_physical_memory_rw((paging_entries_level_1[i1]&PAGETABLE_MASK), (uint8_t *) paging_entries_level_2, PPAGE_SIZE, false);
for(i2 = 0; i2 < PENTRIES; i2++){
if(paging_entries_level_2[i2]){
address_identifier_2 = (((uint64_t)i2) << PLEVEL_2_SHIFT) + address_identifier_1;
if (CHECK_BIT(paging_entries_level_2[i2], 0)){ /* otherwise swapped out */
if((paging_entries_level_2[i2]&PAGETABLE_MASK) == (paging_entries_level_1[i1]&PAGETABLE_MASK)){
/* loop */
continue;
}
if (CHECK_BIT(paging_entries_level_2[i2], 7)){
write_address(address_identifier_2, 0x40000000, (uint64_t)paging_entries_level_2[i2] & ((1ULL<<63) | (1ULL<<2) | (1ULL<<1)));
}
else{
/* otherwise this PDPE references a 1GB page */
cpu_physical_memory_rw((paging_entries_level_2[i2]&PAGETABLE_MASK), (uint8_t *) paging_entries_level_3, PPAGE_SIZE, false);
for(i3 = 0; i3 < PENTRIES; i3++){
if(paging_entries_level_3[i3]){
address_identifier_3 = (((uint64_t)i3) << PLEVEL_3_SHIFT) + address_identifier_2;
if (CHECK_BIT(paging_entries_level_3[i3], 0)){ /* otherwise swapped out */
if (CHECK_BIT(paging_entries_level_3[i3], 7)){
write_address(address_identifier_3, 0x200000, (uint64_t)paging_entries_level_3[i3] & ((1ULL<<63) | (1ULL<<2) | (1ULL<<1)));
}
else{
cpu_physical_memory_rw((paging_entries_level_3[i3]&PAGETABLE_MASK), (uint8_t *) paging_entries_level_4, PPAGE_SIZE, false);
for(i4 = 0; i4 < PENTRIES; i4++){
if(paging_entries_level_4[i4]){
address_identifier_4 = (((uint64_t)i4) << PLEVEL_4_SHIFT) + address_identifier_3;
if (CHECK_BIT(paging_entries_level_4[i4], 0)){
write_address(address_identifier_4, 0x1000, (uint64_t)paging_entries_level_4[i4] & ((1ULL<<63) | (1ULL<<2) | (1ULL<<1)));
}
}
}
}
}
}
}
}
}
}
}
}
}
}
write_address(0, 0x1000, 0);
}
/*
static bool change_page_permissions(uint64_t phys_addr, CPUState *cpu){
RAMBlock *block;
//MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
if(!memcmp(block->idstr, "pc.ram", 6)){
printf("FOUND AND MODIFIED! %lx\n", mprotect((void*)(((uint64_t)block->host) + phys_addr), 0x1000, PROT_NONE));
break;
}
}
return true;
}
*/
uint64_t get_nested_guest_rip(CPUState *cpu){
X86CPU *cpux86 = X86_CPU(cpu);
CPUX86State *env = &cpux86->env;
kvm_vcpu_ioctl(cpu, KVM_GET_NESTED_STATE, env->nested_state);
struct vmcs12* saved_vmcs = (struct vmcs12*)&(env->nested_state->data);
return saved_vmcs->guest_rip;
}
uint64_t get_nested_host_rip(CPUState *cpu){
X86CPU *cpux86 = X86_CPU(cpu);
CPUX86State *env = &cpux86->env;
kvm_vcpu_ioctl(cpu, KVM_GET_NESTED_STATE, env->nested_state);
struct vmcs12* saved_vmcs = (struct vmcs12*)&(env->nested_state->data);
return saved_vmcs->host_rip;
}
uint64_t get_nested_host_cr3(CPUState *cpu){
X86CPU *cpux86 = X86_CPU(cpu);
CPUX86State *env = &cpux86->env;
kvm_vcpu_ioctl(cpu, KVM_GET_NESTED_STATE, env->nested_state);
struct vmcs12* saved_vmcs = (struct vmcs12*)&(env->nested_state->data);
return saved_vmcs->host_cr3;
}
void set_nested_rip(CPUState *cpu, uint64_t rip){
X86CPU *cpux86 = X86_CPU(cpu);
CPUX86State *env = &cpux86->env;
//kvm_vcpu_ioctl(cpu, KVM_GET_NESTED_STATE, env->nested_state);
struct vmcs12* saved_vmcs = (struct vmcs12*)&(env->nested_state->data);
saved_vmcs->guest_rip = rip;
//return saved_vmcs->guest_rip;
}
void kvm_nested_get_info(CPUState *cpu){
X86CPU *cpux86 = X86_CPU(cpu);
CPUX86State *env = &cpux86->env;
kvm_vcpu_ioctl(cpu, KVM_GET_NESTED_STATE, env->nested_state);
struct vmcs12* saved_vmcs = (struct vmcs12*)&(env->nested_state->data);
QEMU_PT_PRINTF(NESTED_VM_PREFIX, "VMCS host_cr3:\t%lx", saved_vmcs->host_cr3);
QEMU_PT_PRINTF(NESTED_VM_PREFIX, "VMCS host_cr4:\t%lx", saved_vmcs->host_cr4);
QEMU_PT_PRINTF(NESTED_VM_PREFIX, "VMCS host_ia32_efer:\t%lx", saved_vmcs->host_ia32_efer);
QEMU_PT_PRINTF(NESTED_VM_PREFIX, "VMCS host_cr0:\t%lx", saved_vmcs->host_cr0);
return;
//cpu->parent_cr3 = saved_vmcs->host_cr3+0x1000;
GET_GLOBAL_STATE()->parent_cr3 = saved_vmcs->host_cr3+0x1000;
fprintf(stderr, "saved_vmcs->guest_cr3: %lx %lx %lx\n", saved_vmcs->guest_cr3, saved_vmcs->host_cr3, env->cr[3]);
pt_set_cr3(cpu, saved_vmcs->host_cr3+0x1000, false); /* USERSPACE */
//pt_set_cr3(cpu, saved_vmcs->host_cr3+0x1000, false); /* KERNELSPACE QEMU fuzzing fix...fucking kpti (https://gruss.cc/files/kaiser.pdf)!!! */
/* let's modify page permissions of our CR3 referencing PTs */
//change_page_permissions(cpu->parent_cr3, cpu);
if (!(saved_vmcs->host_cr0 & CR0_PG_MASK)) {
printf("PG disabled\n");
}
else{
if (saved_vmcs->host_cr4 & CR4_PAE_MASK) {
if (saved_vmcs->host_ia32_efer & (1 << 10)) {
if (saved_vmcs->host_cr0 & CR4_LA57_MASK) {
QEMU_PT_PRINTF(NESTED_VM_PREFIX, "mem_info_la57");
abort();
//mem_info_la57(mon, env);
} else {
QEMU_PT_PRINTF(NESTED_VM_PREFIX, " ==== L1 Page Tables ====");
print_48_paging(saved_vmcs->host_cr3);
if(saved_vmcs->ept_pointer){
QEMU_PT_PRINTF(NESTED_VM_PREFIX, " ==== L2 Page Tables ====");
print_48_paging(saved_vmcs->ept_pointer);
}
//mem_info_la48(mon, env);
}
}
else{
QEMU_PT_PRINTF(NESTED_VM_PREFIX, "mem_info_pae32");
abort();
//mem_info_pae32(mon, env);
}
}
else {
QEMU_PT_PRINTF(NESTED_VM_PREFIX, "mem_info_32");
abort();
//mem_info_32(mon, env);
}
}
}
#define AREA_DESC_LEN 256
#define MAGIC_NUMBER 0x41584548U
typedef struct {
uint32_t base;
uint32_t size;
uint32_t virtual_base;
char desc[AREA_DESC_LEN];
}area_t_export_t;
typedef struct {
uint32_t magic;
uint8_t num_mmio_areas;
uint8_t num_io_areas;
uint8_t num_alloc_areas;
uint8_t padding;
}config_t;
void print_configuration(FILE *stream, void* configuration, size_t size){
//void print_configuration(void* configuration, size_t size){
fprintf(stream, "%s: size: %lx\n", __func__, size);
assert((size-sizeof(config_t))%sizeof(area_t_export_t) == 0);
assert(((config_t*)configuration)->magic == MAGIC_NUMBER);
fprintf(stream, "%s: num_mmio_areas: %x\n", __func__, ((config_t*)configuration)->num_mmio_areas);
fprintf(stream, "%s: num_io_areas: %x\n", __func__, ((config_t*)configuration)->num_io_areas);
fprintf(stream, "%s: num_alloc_areas: %x\n", __func__, ((config_t*)configuration)->num_alloc_areas);
for(int i = 0; i < ((config_t*)configuration)->num_mmio_areas; i++){
fprintf(stream, "\t-> MMIO: 0x%x (V: 0x%x) [0x%x]\t%s\n", ((area_t_export_t*)(configuration+sizeof(config_t)))[i].base,
((area_t_export_t*)(configuration+sizeof(config_t)))[i].virtual_base,
((area_t_export_t*)(configuration+sizeof(config_t)))[i].size,
((area_t_export_t*)(configuration+sizeof(config_t)))[i].desc );
}
for(int i = ((config_t*)configuration)->num_mmio_areas; i < (((config_t*)configuration)->num_mmio_areas+((config_t*)configuration)->num_io_areas); i++){
fprintf(stream, "\t-> IO: 0x%x [0x%x]\t%s\n", ((area_t_export_t*)(configuration+sizeof(config_t)))[i].base,
((area_t_export_t*)(configuration+sizeof(config_t)))[i].size,
((area_t_export_t*)(configuration+sizeof(config_t)))[i].desc );
}
}

13
nyx/kvm_nested.h Normal file
View File

@ -0,0 +1,13 @@
#pragma once
#include "qemu/osdep.h"
void print_48_paging(uint64_t cr3);
void kvm_nested_get_info(CPUState *cpu);
uint64_t get_nested_guest_rip(CPUState *cpu);
uint64_t get_nested_host_rip(CPUState *cpu);
uint64_t get_nested_host_cr3(CPUState *cpu);
void set_nested_rip(CPUState *cpu, uint64_t rip);
void print_configuration(FILE *stream, void* configuration, size_t size);

1388
nyx/memory_access.c Normal file

File diff suppressed because it is too large Load Diff

70
nyx/memory_access.h Normal file
View File

@ -0,0 +1,70 @@
/*
Copyright (C) 2017 Sergej Schumilo
This file is part of QEMU-PT (kAFL).
QEMU-PT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
QEMU-PT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef MEMORY_ACCESS_H
#define MEMORY_ACCESS_H
#include "qemu/osdep.h"
#include <linux/kvm.h>
#include "qemu-common.h"
#include "sysemu/kvm_int.h"
#define MEM_SPLIT_START 0x0C0000000
#define MEM_SPLIT_END 0x100000000
/* i386 pc_piix low_mem address translation */
#define address_to_ram_offset(offset) (offset >= MEM_SPLIT_END ? (offset - MEM_SPLIT_END) + MEM_SPLIT_START : offset)
#define ram_offset_to_address(offset) (offset >= MEM_SPLIT_START ? (offset - MEM_SPLIT_START) + MEM_SPLIT_END : offset)
bool read_physical_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu);
bool write_physical_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu);
bool remap_payload_slot(uint64_t phys_addr, uint32_t slot, CPUState *cpu);
bool remap_payload_slot_protected(uint64_t phys_addr, uint32_t slot, CPUState *cpu);
bool remap_payload_buffer(uint64_t virt_guest_addr, CPUState *cpu);
bool remap_slot(uint64_t addr, uint32_t slot, CPUState *cpu, int fd, uint64_t shm_size, bool virtual, uint64_t cr3);
bool read_virtual_memory_cr3(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu, uint64_t cr3);
bool write_virtual_memory_cr3(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu, uint64_t cr3);
bool write_virtual_shadow_memory_cr3(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu, uint64_t cr3);
bool read_virtual_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu);
bool write_virtual_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu);
void hexdump_virtual_memory(uint64_t address, uint32_t size, CPUState *cpu);
bool write_virtual_shadow_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu);
bool is_addr_mapped(uint64_t address, CPUState *cpu);
bool is_addr_mapped_cr3(uint64_t address, CPUState *cpu, uint64_t cr3);
int insert_breakpoint(CPUState *cpu, uint64_t addr, uint64_t len);
int remove_breakpoint(CPUState *cpu, uint64_t addr, uint64_t len);
void remove_all_breakpoints(CPUState *cpu);
uint64_t disassemble_at_rip(int fd, uint64_t address, CPUState *cpu, uint64_t cr3);
bool dump_page_cr3_snapshot(uint64_t address, uint8_t* data, CPUState *cpu, uint64_t cr3);
bool dump_page_cr3_ht(uint64_t address, uint8_t* data, CPUState *cpu, uint64_t cr3);
bool is_addr_mapped_cr3_snapshot(uint64_t address, CPUState *cpu, uint64_t cr3);
void print_48_paging2(uint64_t cr3);
bool dump_page_ht(uint64_t address, uint8_t* data, CPUState *cpu);
#endif

303
nyx/mmh3.c Normal file
View File

@ -0,0 +1,303 @@
#include <stdio.h>
#include <stdlib.h>
#include "mmh3.h"
#define FORCE_INLINE inline __attribute__((always_inline))
FORCE_INLINE uint32_t rotl32(uint32_t x, int8_t r) {
return (x << r) | (x >> (32 - r));
}
FORCE_INLINE uint64_t rotl64(uint64_t x, int8_t r) {
return (x << r) | (x >> (64 - r));
}
#define ROTL32(x, y) rotl32(x, y)
#define ROTL64(x, y) rotl64(x, y)
#define BIG_CONSTANT(x) (x##LLU)
/**
* Block read -- endian swapping, if required, or handle aligned reads
*/
FORCE_INLINE uint32_t getblock32(const uint32_t *p, int i) {
return p[i];
}
FORCE_INLINE uint64_t getblock64(const uint64_t *p, int i) {
return p[i];
}
/**
* Force all bits of a hash block to avalanche
*/
FORCE_INLINE uint32_t fmix32(uint32_t h) {
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
}
FORCE_INLINE uint64_t fmix64(uint64_t k) {
k ^= k >> 33;
k *= BIG_CONSTANT(0xff51afd7ed558ccd);
k ^= k >> 33;
k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
return k;
}
void mmh3_x86_32(const void *key, int len, uint32_t seed, void *out) {
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len/4;
uint32_t h1 = seed;
const uint32_t c1 = 0xcc9e2d51;
const uint32_t c2 = 0x1b873593;
// Hashing -- body of the function
const uint32_t *blocks = (const uint32_t *) (data + 4*nblocks);
for (int i = -nblocks; i; i++) {
uint32_t k1 = getblock32(blocks, i);
k1 *= c1;
k1 = ROTL32(k1, 15);
k1 *= c2;
h1 ^= k1;
h1 = ROTL32(h1, 13);
h1 = 5*h1 + 0xe6546b64;
}
const uint8_t *tail = (const uint8_t *) (data + 4*nblocks);
uint32_t k1 = 0;
switch (len & 3) {
case 3: k1 ^= tail[2] << 16;
case 2: k1 ^= tail[1] << 8;
case 1: k1 ^= tail[0];
k1 *= c1;
k1 = ROTL32(k1, 15);
k1 *= c2;
h1 ^= k1;
};
// Finalize
h1 ^= len;
h1 = fmix32(h1);
*(uint32_t *) out = h1;
}
void mmh3_x86_128(const void *key, const int len, uint32_t seed, void *out) {
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len/16;
uint32_t h1 = seed;
uint32_t h2 = seed;
uint32_t h3 = seed;
uint32_t h4 = seed;
const uint32_t c1 = 0x239b961b;
const uint32_t c2 = 0xab0e9789;
const uint32_t c3 = 0x38b34ae5;
const uint32_t c4 = 0xa1e38b93;
const uint32_t *blocks = (const uint32_t *)(data + 16*nblocks);
for (int i = -nblocks; i; i++) {
uint32_t k1 = getblock32(blocks, i*4 + 0);
uint32_t k2 = getblock32(blocks, i*4 + 1);
uint32_t k3 = getblock32(blocks, i*4 + 2);
uint32_t k4 = getblock32(blocks, i*4 + 3);
k1 *= c1;
k1 = ROTL32(k1, 15);
k1 *= c2;
h1 ^= k1;
h1 = ROTL32(h1, 19);
h1 += h2;
h1 = 5*h1 + 0x561ccd1b;
k2 *= c2;
k2 = ROTL32(k2, 16);
k2 *= c3;
h2 ^= k2;
h2 = ROTL32(h2, 17);
h2 += h3;
h2 = 5*h2 + 0x0bcaa747;
k3 *= c3;
k3 = ROTL32(k3, 17);
k3 *= c4;
h3 ^= k3;
h3 = ROTL32(h3, 15);
h3 += h4;
h3 = 5*h3 + 0x96cd1c35;
k4 *= c4;
k4 = ROTL32(k4, 18);
k4 *= c1;
h4 ^= k4;
h4 = ROTL32(h4, 13);
h4 += h1;
h4 = 5*h4 + 0x32ac3b17;
}
// Tail
const uint8_t *tail = (const uint8_t *) (data + 16*nblocks);
uint32_t k1 = 0;
uint32_t k2 = 0;
uint32_t k3 = 0;
uint32_t k4 = 0;
switch (len & 15) {
case 15: k4 ^= tail[14] << 16;
case 14: k4 ^= tail[13] << 8;
case 13: k4 ^= tail[12] << 0;
k4 *= c4;
k4 = ROTL32(k4, 18);
k4 *= c1;
h4 ^= k4;
case 12: k3 ^= tail[11] << 24;
case 11: k3 ^= tail[10] << 16;
case 10: k3 ^= tail[9] << 8;
case 9: k3 ^= tail[8] << 0;
k3 *= c3;
k3 = ROTL32(k3, 17);
k3 *= c4;
h3 ^= k3;
case 8: k2 ^= tail[7] << 24;
case 7: k2 ^= tail[6] << 16;
case 6: k2 ^= tail[5] << 8;
case 5: k2 ^= tail[4] << 0;
k2 *= c2;
k2 = ROTL32(k2, 16);
k2 *= c3;
h2 ^= k2;
case 4: k1 ^= tail[3] << 24;
case 3: k1 ^= tail[2] << 16;
case 2: k1 ^= tail[1] << 8;
case 1: k1 ^= tail[0] << 0;
k1 *= c1;
k1 = ROTL32(k1, 15);
k1 *= c2;
h1 ^= k1;
};
// Finalize
h1 ^= len;
h2 ^= len;
h3 ^= len;
h4 ^= len;
h1 += h2;
h1 += h3;
h1 += h4;
h2 += h1;
h3 += h1;
h4 += h1;
h1 = fmix32(h1);
h2 = fmix32(h2);
h3 = fmix32(h3);
h4 = fmix32(h4);
h1 += h2;
h1 += h3;
h1 += h4;
h2 += h1;
h3 += h1;
h4 += h1;
((uint32_t *) out)[0] = h1;
((uint32_t *) out)[1] = h2;
((uint32_t *) out)[2] = h3;
((uint32_t *) out)[3] = h4;
}
void mmh3_x64_128(const void *key, const int len, const uint32_t seed, void *out) {
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len/16;
uint64_t h1 = seed;
uint64_t h2 = seed;
const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
// Body
const uint64_t *blocks = (const uint64_t *) (data);
for (int i = 0; i < nblocks; i++) {
uint64_t k1 = getblock64(blocks, i*2 + 0);
uint64_t k2 = getblock64(blocks, i*2 + 1);
k1 *= c1;
k1 = ROTL64(k1, 31);
k1 *= c2;
h1 ^= k1;
h1 = ROTL64(h1, 27);
h1 += h2;
h1 = 5*h1 + 0x52dce729;
k2 *= c2;
k2 = ROTL64(k2, 33);
k2 *= c1;
h2 ^= k2;
h2 = ROTL64(h2, 31);
h2 += h1;
h2 = 5*h2 + 0x38495ab5;
}
// tail
const uint8_t *tail = (const uint8_t *) (data + 16*nblocks);
uint64_t k1 = 0;
uint64_t k2 = 0;
switch (len & 15) {
case 15: k2 ^= ((uint64_t) tail[14]) << 48;
case 14: k2 ^= ((uint64_t) tail[13]) << 40;
case 13: k2 ^= ((uint64_t) tail[12]) << 32;
case 12: k2 ^= ((uint64_t) tail[11]) << 24;
case 11: k2 ^= ((uint64_t) tail[10]) << 16;
case 10: k2 ^= ((uint64_t) tail[9]) << 8;
case 9: k2 ^= ((uint64_t) tail[8]) << 0;
k2 *= c2;
k2 = ROTL64(k2, 33);
k2 *= c1;
h2 ^= k2;
case 8: k1 ^= ((uint64_t) tail[7]) << 56;
case 7: k1 ^= ((uint64_t) tail[6]) << 48;
case 6: k1 ^= ((uint64_t) tail[5]) << 40;
case 5: k1 ^= ((uint64_t) tail[4]) << 32;
case 4: k1 ^= ((uint64_t) tail[3]) << 24;
case 3: k1 ^= ((uint64_t) tail[2]) << 16;
case 2: k1 ^= ((uint64_t) tail[1]) << 8;
case 1: k1 ^= ((uint64_t) tail[0]) << 0;
k1 *= c1;
k1 = ROTL64(k1, 31);
k1 *= c2;
h1 ^= k1;
};
// finalize
h1 ^= len;
h2 ^= len;
h1 += h2;
h2 += h1;
h1 = fmix64(h1);
h2 = fmix64(h2);
h1 += h2;
h2 += h1;
((uint64_t *) out)[0] = h1;
((uint64_t *) out)[1] = h2;
}

12
nyx/mmh3.h Normal file
View File

@ -0,0 +1,12 @@
#ifndef _MMH3_H
#define _MMH3_H
#include <stdint.h>
typedef unsigned __int128 uint128_t;
void mmh3_x86_32(const void *key, int len, uint32_t seed, void *out);
void mmh3_x86_128(const void *key, int len, uint32_t seed, void *out);
void mmh3_x64_128(const void *key, int len, uint32_t seed, void *out);
#endif

306
nyx/nested_hypercalls.c Normal file
View File

@ -0,0 +1,306 @@
#include <stdio.h>
#include <stdint.h>
#include "kvm_nested.h"
#include "memory_access.h"
#include "debug.h"
#include "nested_hypercalls.h"
#include "interface.h"
#include "state.h"
#include "pt.h"
#include "sysemu/sysemu.h"
#include "sysemu/kvm.h"
#include "qemu/main-loop.h"
#include "nyx/helpers.h"
//#define DEBUG_NESTED_HYPERCALLS
bool hypercalls_enabled = false;
bool create_snapshot = false;
uint64_t htos_cr3 = 0;
uint64_t htos_config = 0;
static bool init_state = true;
int nested_once = 0;
bool nested_setup_snapshot_once = false;
void handle_hypercall_kafl_nested_config(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
/* magic */
#ifdef DEBUG_NESTED_HYPERCALLS
printf("============> %s\n", __func__);
#endif
uint32_t size = 0;
read_physical_memory(htos_config, (uint8_t*) &size, sizeof(uint32_t), cpu);
fprintf(stderr, "--> %x\n", size);
void* buffer = malloc(size);
read_physical_memory(htos_config+sizeof(uint32_t), buffer, size, cpu);
/*
hexdump_kafl(buffer, size);
FILE *f = fopen("/tmp/htos_configuration", "w");
fwrite(buffer, size, 1, f);
fclose(f);
*/
print_configuration(stderr, buffer, size);
FILE* f = fopen("/tmp/hypertrash_configration", "w");
print_configuration(f, buffer, size);
fclose(f);
free(buffer);
/*
hexdump_virtual_memory()
_memory(0x38d31000, 0x2000, cpu);
*/
}
#define ANSI_COLOR_YELLOW "\x1b[33m"
#define ANSI_COLOR_RESET "\x1b[0m"
void handle_hypercall_kafl_nested_hprintf(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
char hprintf_buffer[0x1000];
#ifdef DEBUG_NESTED_HYPERCALLS
printf("============> %s\n", __func__);
#endif
read_physical_memory((uint64_t)run->hypercall.args[0], (uint8_t*)hprintf_buffer, 0x1000, cpu);
//fprintf(stderr, ANSI_COLOR_YELLOW "%s" ANSI_COLOR_RESET, hprintf_buffer);
set_hprintf_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, hprintf_buffer, strnlen(hprintf_buffer, 0x1000)+1);
synchronization_lock_hprintf();
//hexdump_kafl(hprintf_buffer, 0x200);
}
void handle_hypercall_kafl_nested_prepare(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
//cpu->fast_reload_snapshot = (void*)fast_reload_new();
#ifdef DEBUG_NESTED_HYPERCALLS
printf("============> %s\n", __func__);
#endif
kvm_arch_get_registers(cpu);
if((uint64_t)run->hypercall.args[0]){
QEMU_PT_PRINTF(CORE_PREFIX, "handle_hypercall_kafl_nested_prepare:\t NUM:\t%lx\t ADDRESS:\t%lx\t CR3:\t%lx", (uint64_t)run->hypercall.args[0], (uint64_t)run->hypercall.args[1], (uint64_t)run->hypercall.args[2]);
}
else{
abort();
}
size_t buffer_size = (size_t)((uint64_t)run->hypercall.args[0] * sizeof(uint64_t));
uint64_t* buffer = malloc(buffer_size);
memset(buffer, 0x0, buffer_size);
read_physical_memory((uint64_t)run->hypercall.args[1], (uint8_t*)buffer, buffer_size, cpu);
htos_cr3 = (uint64_t)run->hypercall.args[0];
for(uint64_t i = 0; i < (uint64_t)run->hypercall.args[0]; i++){
if(i == 0){
htos_config = buffer[i];
}
QEMU_PT_PRINTF(CORE_PREFIX, "ADDRESS: %lx", buffer[i]);
remap_payload_slot(buffer[i], i, cpu);
}
set_payload_pages(buffer, (uint32_t)run->hypercall.args[0]);
// wipe memory
memset(buffer, 0x00, buffer_size);
write_physical_memory((uint64_t)run->hypercall.args[1], (uint8_t*)buffer, buffer_size, cpu);
free(buffer);
}
bool acquired = false;
void handle_hypercall_kafl_nested_early_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
if(!hypercalls_enabled){
return;
}
#ifdef DEBUG_NESTED_HYPERCALLS
printf("============> %s\n", __func__);
#endif
bool state = GET_GLOBAL_STATE()->in_reload_mode;
if(!state){
GET_GLOBAL_STATE()->in_reload_mode = true;
synchronization_disable_pt(cpu);
GET_GLOBAL_STATE()->in_reload_mode = false;
}
else{
synchronization_disable_pt(cpu);
}
}
void handle_hypercall_kafl_nested_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
hypercalls_enabled = true;
static int rcount = 0;
#ifdef DEBUG_NESTED_HYPERCALLS
printf("============> %s\n", __func__);
#endif
if((rcount%100) == 0){
kvm_arch_get_registers(cpu);
//printf("TRY %s %lx %lx %lx (%d)\n", __func__, get_rip(cpu), get_nested_guest_rip(cpu), get_nested_host_rip(cpu), rcount);
// sleep(rand()%4);
}
rcount++;
synchronization_disable_pt(cpu);
/*
//vm_stop(RUN_STATE_RESTORE_VM);
qemu_mutex_lock_iothread();
//load_snapshot("kafl", NULL);
//vm_start();
fast_reload_restore(get_fast_reload_snapshot());
qemu_mutex_unlock_iothread();
*/
//kvm_vm_ioctl(kvm_state, KVM_SET_CLOCK, &data);
// printf("DONE %s\n", __func__);
/*
kvm_arch_get_registers(cpu);
fprintf(stderr, "RELOADING DUDE %d!\n", rcount);
qemu_mutex_lock_iothread();
fast_reload_restore(get_fast_reload_snapshot());
qemu_mutex_unlock_iothread();
*/
//}
//sleep(1);
return;
//assert(false);
QEMU_PT_PRINTF_DEBUG("%s %d", __func__, init_state);
//sleep(10);
/* magic */
//X86CPU *x86_cpu = X86_CPU(cpu);
//CPUX86State *env = &x86_cpu->env;
if (init_state){
printf("INIT STATE\n");
init_state = false;
//synchronization_disable_pt(cpu);
QEMU_PT_PRINTF_DEBUG("Protocol - SEND: KAFL_PROTO_RELEASE");
} else {
//if(reload_mode || reload_mode_temp){
//}
//synchronization_disable_pt(cpu);
QEMU_PT_PRINTF_DEBUG("%s UNLOCKED", __func__);
// printf("INTEL PT is disabled!\n");
}
qemu_mutex_lock_iothread();
//fast_reload_restore(get_fast_reload_snapshot());
qemu_mutex_unlock_iothread();
QEMU_PT_PRINTF_DEBUG("%s UNLOCKED 2", __func__);
//kvm_cpu_synchronize_state(cpu);
acquired = false;
}
static inline void set_page_dump_bp_nested(CPUState *cpu, uint64_t cr3, uint64_t addr){
#ifdef DEBUG_NESTED_HYPERCALLS
printf("============> %s\n", __func__);
#endif
kvm_remove_all_breakpoints(cpu);
kvm_insert_breakpoint(cpu, addr, 1, 1);
kvm_update_guest_debug(cpu, 0);
kvm_vcpu_ioctl(cpu, KVM_VMX_PT_SET_PAGE_DUMP_CR3, cr3);
kvm_vcpu_ioctl(cpu, KVM_VMX_PT_ENABLE_PAGE_DUMP_CR3);
}
void handle_hypercall_kafl_nested_acquire(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
#ifdef DEBUG_NESTED_HYPERCALLS
printf("============> %s\n", __func__);
#endif
if (!acquired){
printf("TRY %s\n", __func__);
printf("DONE %s\n", __func__);
acquired = true;
//create_fast_snapshot(cpu, true);
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_ROOT_NESTED_FIX_RIP);
for(int i = 0; i < INTEL_PT_MAX_RANGES; i++){
if(GET_GLOBAL_STATE()->pt_ip_filter_configured[i]){
pt_enable_ip_filtering(cpu, i, true, false);
}
}
pt_init_decoder(cpu);
qemu_mutex_lock_iothread();
fast_reload_restore(get_fast_reload_snapshot());
qemu_mutex_unlock_iothread();
kvm_arch_get_registers(cpu);
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
printf("IN FUZZING LOOP! %lx\n", env->eip);
GET_GLOBAL_STATE()->in_fuzzing_mode = true;
set_state_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 3);
/*
if(GET_GLOBAL_STATE()->protect_payload_buffer){
for(int i = 0; i < GET_GLOBAL_STATE()->nested_payload_pages_num; i++){
remap_payload_slot_protected(GET_GLOBAL_STATE()->nested_payload_pages[i], i, cpu);
}
}
*/
}
synchronization_lock();
kvm_arch_get_registers(cpu);
uint64_t cr3 = get_nested_host_cr3(cpu) & 0xFFFFFFFFFFFFF000ULL;
//fprintf(stderr, "CR3 -> 0x%lx\n", cr3);
pt_set_cr3(cpu, cr3, false);
GET_GLOBAL_STATE()->parent_cr3 = cr3;
if(GET_GLOBAL_STATE()->dump_page){
set_page_dump_bp_nested(cpu, cr3, GET_GLOBAL_STATE()->dump_page_addr);
}
kvm_nested_get_info(cpu);
synchronization_enter_fuzzing_loop(cpu);
return;
}

9
nyx/nested_hypercalls.h Normal file
View File

@ -0,0 +1,9 @@
#pragma once
/* HyperTrash! */
void handle_hypercall_kafl_nested_hprintf(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_nested_prepare(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_nested_config(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_nested_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_nested_acquire(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
void handle_hypercall_kafl_nested_early_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);

543
nyx/page_cache.c Normal file
View File

@ -0,0 +1,543 @@
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <errno.h>
#include <capstone/capstone.h>
#include <capstone/x86.h>
#include <sys/file.h>
#include <sys/types.h>
#include <unistd.h>
#include <stdio.h>
#include <sys/mman.h>
#include <assert.h>
#include "page_cache.h"
#include "debug.h"
#ifndef STANDALONE_DECODER
#include "cpu.h"
#include "memory_access.h"
#include "fast_vm_reload.h"
#include "kvm_nested.h"
#include "nyx/state.h"
#endif
#define PAGE_SIZE 0x1000UL
#define PAGE_CACHE_ADDR_LINE_SIZE sizeof(uint64_t)
#define UNMAPPED_PAGE 0xFFFFFFFFFFFFFFFFULL
#ifndef STANDALONE_DECODER
static bool reload_addresses(page_cache_t* self){
#else
bool reload_addresses(page_cache_t* self){
#endif
khiter_t k;
int ret;
uint64_t addr, offset;
uint64_t value = 0;
size_t self_offset = lseek(self->fd_address_file, 0, SEEK_END);
if(self_offset != self->num_pages*PAGE_CACHE_ADDR_LINE_SIZE){
//fprintf(stderr, "Reloading files ...\n");
lseek(self->fd_address_file, self->num_pages*PAGE_CACHE_ADDR_LINE_SIZE, SEEK_SET);
offset = self->num_pages;
while(read(self->fd_address_file, &value, PAGE_CACHE_ADDR_LINE_SIZE)){
addr = value & 0xFFFFFFFFFFFFF000ULL;
offset++;
/* put new addresses and offsets into the hash map */
k = kh_get(PC_CACHE, self->lookup, addr);
if(k == kh_end(self->lookup)){
if(value & 0xFFF){
fprintf(stderr, "Load page: %lx (UMAPPED)\n", addr);
//k = kh_put(PC_CACHE, self->lookup, addr, &ret);
//kh_value(self->lookup, k) = UNMAPPED_PAGE;
}
else{
//fprintf(stderr, "Load page: %lx\n", addr);
k = kh_put(PC_CACHE, self->lookup, addr, &ret);
kh_value(self->lookup, k) = (offset-1)*PAGE_SIZE;
}
/*
k = kh_put(PC_CACHE, self->lookup, addr, &ret);
kh_value(self->lookup, k) = (offset-1)*PAGE_SIZE;
*/
}
else{
fprintf(stderr, "----------> Page duplicate found ...skipping! %lx\n", addr);
/* should not be possible ... */
//abort();
}
}
//fprintf(stderr, "Old Value: %d - New Value: %ld\n", self->num_pages, (uint32_t)self_offset/PAGE_CACHE_ADDR_LINE_SIZE);
/* reload page dump file */
munmap(self->page_data, self->num_pages*PAGE_SIZE);
self->num_pages = self_offset/PAGE_CACHE_ADDR_LINE_SIZE;
self->page_data = mmap(NULL, (self->num_pages)*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, self->fd_page_file, 0);
return true;
}
return false;
}
#ifndef STANDALONE_DECODER
static bool append_page(page_cache_t* self, uint64_t page, uint64_t cr3){
bool success = true;
if(!self->num_pages){
assert(!ftruncate(self->fd_page_file, (self->num_pages+1)*PAGE_SIZE));
self->page_data = mmap(NULL, (self->num_pages+1)*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, self->fd_page_file, 0);
}
else{
munmap(self->page_data, self->num_pages*PAGE_SIZE);
assert(!ftruncate(self->fd_page_file, (self->num_pages+1)*PAGE_SIZE));
self->page_data = mmap(NULL, (self->num_pages+1)*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, self->fd_page_file, 0);
}
//if(!dump_page_cr3_snapshot(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->pt_c3_filter)){
// if(!dump_page_cr3_snapshot(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->parent_cr3)){
if(!dump_page_cr3_ht(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->pt_c3_filter)){
if(!dump_page_cr3_ht(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->parent_cr3)){
if(!dump_page_cr3_snapshot(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->parent_cr3)){
fprintf(stderr, "FAILED DUMP PROCESS of PAGE %lx\n", page);
//memset(self->page_data+(PAGE_SIZE*self->num_pages), 0xff, PAGE_SIZE);
munmap(self->page_data, (self->num_pages+1)*PAGE_SIZE);
assert(!ftruncate(self->fd_page_file, (self->num_pages)*PAGE_SIZE));
self->page_data = mmap(NULL, (self->num_pages)*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, self->fd_page_file, 0);
//qemu_backtrace();
success = false;
return success;
//assert(false);
}
}
}
//}
/*
if(!dump_page_cr3_ht(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->parent_cr3)){
fprintf(stderr, "FAILED DUMP PROCESS of PAGE %lx\n", page);
assert(false);
}
*/
/*
//fast_loadvm();
if(cr3){
dump_page_cr3_ht(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->parent_cr3); //self->cpu->parent_cr3);
//assert(dump_page_cr3_snapshot(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->parent_cr3)); //self->cpu->parent_cr3);
//read_virtual_memory_cr3(page, self->page_data+(PAGE_SIZE*self->num_pages), PAGE_SIZE, self->cpu, self->cpu->parent_cr3);
}
else{
dump_page_ht(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu);
//read_virtual_memory(page, self->page_data+(PAGE_SIZE*self->num_pages), PAGE_SIZE, self->cpu);
}
*/
fsync(self->fd_page_file);
self->num_pages++;
return success;
}
#else
bool append_page(page_cache_t* self, uint64_t page, uint8_t* ptr){
self->last_page = 0xFFFFFFFFFFFFFFFF;
self->last_addr = 0xFFFFFFFFFFFFFFFF;
page &= 0xFFFFFFFFFFFFF000ULL;
bool success = true;
if(!self->num_pages){
assert(!ftruncate(self->fd_page_file, (self->num_pages+1)*PAGE_SIZE));
self->page_data = mmap(NULL, (self->num_pages+1)*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, self->fd_page_file, 0);
}
else{
munmap(self->page_data, self->num_pages*PAGE_SIZE);
assert(!ftruncate(self->fd_page_file, (self->num_pages+1)*PAGE_SIZE));
self->page_data = mmap(NULL, (self->num_pages+1)*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, self->fd_page_file, 0);
}
memcpy(self->page_data+(PAGE_SIZE*self->num_pages), ptr, PAGE_SIZE);
fsync(self->fd_page_file);
int ret;
khiter_t k;
k = kh_put(PC_CACHE, self->lookup, page, &ret);
kh_value(self->lookup, k) = self->num_pages*PAGE_SIZE;
assert(write(self->fd_address_file, &page, PAGE_CACHE_ADDR_LINE_SIZE) == PAGE_CACHE_ADDR_LINE_SIZE);
self->num_pages++;
return success;
}
#endif
static void page_cache_lock(page_cache_t* self){
#ifndef STANDALONE_DECODER
int ret = 0;
while (true){
ret = flock(self->fd_lock, LOCK_EX);
if (ret == 0){
return;
}
else if (ret == EINTR){
/* try again if acquiring this lock has failed */
fprintf(stderr, "%s: interrupted by signal...\n", __func__);
}
else{
assert(false);
}
}
#endif
}
static void page_cache_unlock(page_cache_t* self){
#ifndef STANDALONE_DECODER
int ret = 0;
while (true){
ret = flock(self->fd_lock, LOCK_UN);
if (ret == 0){
return;
}
else if (ret == EINTR){
/* try again if releasing this lock has failed */
fprintf(stderr, "%s: interrupted by signal...\n", __func__);
}
else{
assert(false);
}
}
#endif
}
static bool update_page_cache(page_cache_t* self, uint64_t page, khiter_t* k){
//#define DEBUG_PAGE_CACHE_LOCK
page_cache_lock(self);
#ifdef DEBUG_PAGE_CACHE_LOCK
fprintf(stderr, "%d: LOCKING PAGE CACHE\n", getpid());
#endif
if(reload_addresses(self)){
*k = kh_get(PC_CACHE, self->lookup, page);
}
if(*k == kh_end(self->lookup)){
#ifndef STANDALONE_DECODER
int ret;
uint64_t cr3 = GET_GLOBAL_STATE()->parent_cr3; //self->cpu->parent_cr3;
if(!is_addr_mapped_cr3_snapshot(page, self->cpu, GET_GLOBAL_STATE()->parent_cr3) && !is_addr_mapped_cr3_snapshot(page, self->cpu, GET_GLOBAL_STATE()->pt_c3_filter)){ //self->cpu->parent_cr3)){
//fprintf(stderr, "PAGE NOT FOUND in SNAPSHOT %lx\n", page);
//assert(false);
}
*k = kh_get(PC_CACHE, self->lookup, page);
if(*k == kh_end(self->lookup) && reload_addresses(self)){
/* reload sucessful */
*k = kh_get(PC_CACHE, self->lookup, page);
}
else{
if(append_page(self, page, cr3)){
*k = kh_put(PC_CACHE, self->lookup, page, &ret);
assert(write(self->fd_address_file, &page, PAGE_CACHE_ADDR_LINE_SIZE) == PAGE_CACHE_ADDR_LINE_SIZE);
kh_value(self->lookup, *k) = (self->num_pages-1)*PAGE_SIZE;
}
else{
fprintf(stderr, "Fail!!!!\n");
page_cache_unlock(self);
return false;
/*
uint64_t new_page = page | 0xFFF;
assert(write(self->fd_address_file, &new_page, PAGE_CACHE_ADDR_LINE_SIZE) == PAGE_CACHE_ADDR_LINE_SIZE);
kh_value(self->lookup, *k) = UNMAPPED_PAGE;
fprintf(stderr, "APPEND UNMAPPED PAGE %lx!\n", page);
*/
}
*k = kh_get(PC_CACHE, self->lookup, page);
}
#else
//printf("PAGE NOT FOUND: %lx! ABORTING\n", page);
page_cache_unlock(self);
return false;
abort();
#endif
}
#ifdef DEBUG_PAGE_CACHE_LOCK
fprintf(stderr, "%d: UNLOCKING PAGE CACHE\n", getpid());
#endif
page_cache_unlock(self);
return true;
}
uint64_t page_cache_fetch(page_cache_t* self, uint64_t page, bool* success, bool test_mode){
page &= 0xFFFFFFFFFFFFF000ULL;
/*
if(test_mode){
*success = false;
return 0;
}
*/
//if(page == 0x7ffca45b5000)
// return UNMAPPED_PAGE;
//printf("%s %lx\n", __func__, page);
//if (page == 0x0434000)
// return 0;
if (self->last_page == page){
*success = true;
return self->last_addr;
}
//QEMU_PT_PRINTF(PAGE_CACHE_PREFIX, "page_cache_fetch %lx", page);
khiter_t k;
k = kh_get(PC_CACHE, self->lookup, page);
if(k == kh_end(self->lookup)){
if(test_mode || update_page_cache(self, page, &k) == false){
//fprintf(stderr, "%s: fail!\n", __func__);
*success = false;
//abort();
return 0;
}
}
self->last_page = page;
//fprintf(stderr, "[%d]\tkh_n_buckets: %d %d\n", getpid(), kh_n_buckets(self->lookup), k);
if(kh_value(self->lookup, k) == UNMAPPED_PAGE){
self->last_addr = UNMAPPED_PAGE;
}
else{
self->last_addr = (uint64_t)self->page_data+kh_value(self->lookup, k);
}
//fprintf(stderr, "try to unlock flock!\n");
//fprintf(stderr, "flock unlocked!\n");
*success = true;
return self->last_addr;
}
/* fix this */
uint64_t page_cache_fetch2(page_cache_t* self, uint64_t page, bool* success){
return page_cache_fetch(self, page, success, false);
}
#ifndef STANDALONE_DECODER
page_cache_t* page_cache_new(CPUState *cpu, const char* cache_file){
#else
page_cache_t* page_cache_new(const char* cache_file, uint8_t disassembler_word_width){
#endif
page_cache_t* self = malloc(sizeof(page_cache_t));
char* tmp1;
char* tmp2;
char* tmp3;
assert(asprintf(&tmp1, "%s.dump", cache_file) != -1);
assert(asprintf(&tmp2, "%s.addr", cache_file) != -1);
assert(asprintf(&tmp3, "%s.lock", cache_file) != -1);
self->lookup = kh_init(PC_CACHE);
self->fd_page_file = open(tmp1, O_CLOEXEC | O_RDWR, S_IRWXU);
self->fd_address_file = open(tmp2, O_CLOEXEC | O_RDWR, S_IRWXU);
#ifndef STANDALONE_DECODER
self->cpu = cpu;
self->fd_lock = open(tmp3, O_CLOEXEC);
assert(self->fd_lock > 0);
#else
if(self->fd_page_file == -1 || self->fd_address_file == -1){
printf("[ ] Page cache files not found...\n");
exit(1);
}
#endif
memset(self->disassemble_cache, 0x0, 16);
self->page_data = NULL;
self->num_pages = 0;
self->last_page = 0xFFFFFFFFFFFFFFFF;
self->last_addr = 0xFFFFFFFFFFFFFFFF;
QEMU_PT_PRINTF(PAGE_CACHE_PREFIX, "%s (%s - %s) WORD_WIDTH: %d", __func__, tmp1, tmp2, disassembler_word_width);
free(tmp3);
free(tmp2);
free(tmp1);
if (cs_open(CS_ARCH_X86, CS_MODE_16, &self->handle_16) != CS_ERR_OK)
assert(false);
if (cs_open(CS_ARCH_X86, CS_MODE_32, &self->handle_32) != CS_ERR_OK)
assert(false);
if (cs_open(CS_ARCH_X86, CS_MODE_64, &self->handle_64) != CS_ERR_OK)
assert(false);
cs_option(self->handle_16, CS_OPT_DETAIL, CS_OPT_ON);
cs_option(self->handle_32, CS_OPT_DETAIL, CS_OPT_ON);
cs_option(self->handle_64, CS_OPT_DETAIL, CS_OPT_ON);
return self;
}
#ifdef STANDALONE_DECODER
void page_cache_destroy(page_cache_t* self){
munmap(self->page_data, self->num_pages * 0x1000);
kh_destroy(PC_CACHE, self->lookup);
cs_close(&self->handle_16);
cs_close(&self->handle_32);
cs_close(&self->handle_64);
free(self);
}
#endif
/*
static bool page_cache_load(uint64_t virtual_addr){
return true;
}
*/
/*
static bool page_cache_load_cr3(uint64_t virtual_addr, uint64_t cr3){
return true;
}
*/
bool page_cache_disassemble(page_cache_t* self, uint64_t address, cs_insn **insn){
return true;
}
cs_insn* page_cache_cs_malloc(page_cache_t* self, disassembler_mode_t mode){
switch(mode){
case mode_16:
return cs_malloc(self->handle_16);
case mode_32:
return cs_malloc(self->handle_32);
case mode_64:
return cs_malloc(self->handle_64);
default:
assert(false);
}
return NULL;
}
//#define EXPERIMENTAL_PAGE_FETCH
bool page_cache_disassemble_iter(page_cache_t* self, uint64_t* address, cs_insn *insn, uint64_t* failed_page, disassembler_mode_t mode){
//printf("%s %lx\n", __func__, *address);
*failed_page = 0xFFFFFFFFFFFFFFFFULL;
bool success = true;
size_t code_size = 16;
#if defined(STANDALONE_DECODER) || !defined(EXPERIMENTAL_PAGE_FETCH)
uint8_t* code = (uint8_t*)page_cache_fetch(self, *address, &success, false);
#else
uint8_t* code = (uint8_t*)page_cache_fetch(self, *address, &success, true);
#endif
uint8_t* code_ptr = 0;
//disassembler_mode_t mode = mode_16;
csh* current_handle = NULL;
switch(mode){
case mode_16:
current_handle = &self->handle_16;
break;
case mode_32:
current_handle = &self->handle_32;
break;
case mode_64:
current_handle = &self->handle_64;
break;
default:
assert(false);
}
if (code == (void*)UNMAPPED_PAGE || success == false){
*failed_page = *address;// & 0xFFFFFFFFFFFFF000ULL;
//printf("FAIL???? (0x%lx) %lx %d\n", *address, code, success);
return false;
}
if ((*address & 0xFFF) >= (0x1000-16)){
//printf("-------------> Disassemble between pages...%lx (%lx %lx %lx)\n", *address, (*address&0xFFF), (0x1000-16), 0xf-(0xfff-(*address&0xfff)));
memcpy((void*)self->disassemble_cache, (void*)((uint64_t)code+(0x1000-16)), 16);
code_ptr = self->disassemble_cache + 0xf-(0xfff-(*address&0xfff));
#if defined(STANDALONE_DECODER) || !defined(EXPERIMENTAL_PAGE_FETCH)
code = (uint8_t*)page_cache_fetch(self, *address+0x1000, &success, false);
#else
code = (uint8_t*)page_cache_fetch(self, *address+0x1000, &success, true);
#endif
/* broken AF */
if(success == true){
//printf("=> A\n");
//*failed_page = (*address+0x1000) & 0xFFFFFFFFFFFFF000ULL;
//return false;
//printf("=> %lx %lx\n", (0xfff-(*address&0xfff)), *address);
memcpy((void*)(self->disassemble_cache+16), (void*)code, 16);
//code_size = 16;
return cs_disasm_iter(*current_handle, (const uint8_t**) &code_ptr, &code_size, address, insn);
}
else{
//printf("=> B\n");
code_size = (0xfff-(*address&0xfff));
//printf("%lx\n", code_size);
//abort();
//*failed_page = *address;
if(!cs_disasm_iter(*current_handle, (const uint8_t**) &code_ptr, &code_size, address, insn)){
*failed_page = (*address+0x1000) & 0xFFFFFFFFFFFFF000ULL;
//fprintf(stderr, "%s FAIL: %lx %lx\n", __func__, *address, *failed_page);
//if(*address != 0x555555554ffe && *address != 0x7ffff7478ffc && *address != 0x7ffff7820ff6 && *address != 0x7ffff7822ffa)
// abort();
return false;
}
return true;
//return cs_disasm_iter(self->handle, (const uint8_t**) &code_ptr, &code_size, address, insn);
}
}
else {
//printf("=> C\n");
code_ptr = code + (*address&0xFFF);
//printf("Disassemble...(%lx %x)\n", code_ptr, *code_ptr);
return cs_disasm_iter(*current_handle, (const uint8_t**) &code_ptr, &code_size, address, insn);
}
}

49
nyx/page_cache.h Normal file
View File

@ -0,0 +1,49 @@
#pragma once
#include <capstone/capstone.h>
#include <capstone/x86.h>
#ifndef STANDALONE_DECODER
#include "qemu/osdep.h"
#endif
#include "khash.h"
#include <libxdc.h>
KHASH_MAP_INIT_INT64(PC_CACHE, uint64_t)
typedef struct page_cache_s{
#ifndef STANDALONE_DECODER
CPUState *cpu;
#endif
khash_t(PC_CACHE) *lookup;
int fd_page_file;
int fd_address_file;
int fd_lock;
uint8_t disassemble_cache[32];
void* page_data;
uint32_t num_pages;
csh handle_16;
csh handle_32;
csh handle_64;
uint64_t last_page;
uint64_t last_addr;
} page_cache_t;
#ifndef STANDALONE_DECODER
page_cache_t* page_cache_new(CPUState *cpu, const char* cache_file);
#else
page_cache_t* page_cache_new(const char* cache_file, uint8_t disassembler_word_width);
void page_cache_destroy(page_cache_t* self);
bool append_page(page_cache_t* self, uint64_t page, uint8_t* ptr);
#endif
uint64_t page_cache_fetch(page_cache_t* self, uint64_t page, bool* success, bool test_mode);
bool page_cache_disassemble(page_cache_t* self, uint64_t address, cs_insn **insn);
bool page_cache_disassemble_iter(page_cache_t* self, uint64_t* address, cs_insn *insn, uint64_t* failed_page, disassembler_mode_t mode);
cs_insn* page_cache_cs_malloc(page_cache_t* self, disassembler_mode_t mode);
uint64_t page_cache_fetch2(page_cache_t* self, uint64_t page, bool* success);

184
nyx/patcher.c Normal file
View File

@ -0,0 +1,184 @@
#include "patcher.h"
#include "nyx/memory_access.h"
#include "nyx/disassembler.h"
#include "debug.h"
#include "nyx/state.h"
uint8_t cmp_patch_data[] = { 0x38, 0xC0, [2 ... MAX_INSTRUCTION_SIZE]=0x90 }; // CMP AL,AL; NOP, NOP ...
const uint8_t *cmp_patch = &cmp_patch_data[0];
///////////////////////////////////////////////////////////////////////////////////
// Private Helper Functions Declarations
///////////////////////////////////////////////////////////////////////////////////
//
static void _patcher_apply_patch(patcher_t *self, size_t index);
static void _patcher_restore_patch(patcher_t *self, size_t index);
static void _patcher_save_patch(patcher_t *self, size_t index, uint8_t* data, size_t instruction_size, uint64_t addr);
static size_t _patcher_disassemble_size(patcher_t *self, uint8_t* data, uint64_t addr, x86_insn id);
static void _patcher_alloc_patch_infos(patcher_t *self, size_t num_patches);
static void _patcher_free_patch_infos(patcher_t *self);
static redqueen_t* _redq_ptr(patcher_t *self);
///////////////////////////////////////////////////////////////////////////////////
// Public Functions
///////////////////////////////////////////////////////////////////////////////////
patcher_t* patcher_new(CPUState *cpu){
patcher_t *res = malloc(sizeof(patcher_t));
res->cpu = cpu;
res->num_patches = 0;
res->patches = NULL;
res->is_currently_applied = false;
return res;
}
void patcher_free(patcher_t* self){
assert(!self->is_currently_applied);
_patcher_free_patch_infos(self);
free(self);
}
void patcher_apply_all(patcher_t *self){
assert(!self->is_currently_applied);
assert(!_redq_ptr(self)->hooks_applied);
//assert(patcher_validate_patches(self));
for(size_t i=0; i < self->num_patches; i++){
_patcher_apply_patch(self, i);
}
self->is_currently_applied = true;
}
void patcher_restore_all(patcher_t *self){
assert(self->is_currently_applied);
assert(!_redq_ptr(self)->hooks_applied);
//assert(patcher_validate_patches(self));
for(size_t i = 0; i < self->num_patches; i++){
_patcher_restore_patch(self, i);
}
self->is_currently_applied = false;
}
void patcher_set_addrs(patcher_t *self, uint64_t* addrs, size_t num_addrs){
_patcher_free_patch_infos(self);
_patcher_alloc_patch_infos(self, num_addrs);
uint8_t curr_instruction_code[MAX_INSTRUCTION_SIZE];
memset(&curr_instruction_code[0], 0, MAX_INSTRUCTION_SIZE);
for(size_t i=0; i < self->num_patches; i++){
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "patching %lx", addrs[i]);
if( read_virtual_memory(addrs[i], &curr_instruction_code[0], MAX_INSTRUCTION_SIZE, self->cpu) ) {
size_t size =_patcher_disassemble_size(self, &curr_instruction_code[0], addrs[i], X86_INS_CMP);
assert(size != 0); //csopen failed, shouldn't happen
_patcher_save_patch(self, i, &curr_instruction_code[0], size, addrs[i]);
}
}
}
static void print_hexdump(const uint8_t* addr, size_t size){
for(size_t i = 0; i < size; i++){
printf (" %02x", addr[i]);
}
printf("\n");
}
bool patcher_validate_patches(patcher_t *self){
bool was_rq = _redq_ptr(self)->hooks_applied;
if(was_rq)
redqueen_remove_hooks(_redq_ptr(self));
if(!self->patches){return true;}
for(size_t i=0; i<self->num_patches; i++){
uint8_t buf[MAX_INSTRUCTION_SIZE];
read_virtual_memory(self->patches[i].addr, &buf[0], MAX_INSTRUCTION_SIZE, self->cpu);
const uint8_t* should_value = NULL;
if(self->is_currently_applied){
should_value = cmp_patch;
} else {
should_value = &self->patches[i].orig_bytes[0];
}
QEMU_PT_PRINTF(REDQUEEN_PREFIX, "Validating, mem:");
print_hexdump(&buf[0], self->patches[i].size);
QEMU_PT_PRINTF(REDQUEEN_PREFIX, "should_be:");
print_hexdump(should_value, self->patches[i].size);
if(0 != memcmp(&buf[0], should_value, self->patches[i].size)){
QEMU_PT_PRINTF(REDQUEEN_PREFIX, "validating patches failed self->is_currently_applied = %d", self->is_currently_applied);
return false;
}
}
if(was_rq)
redqueen_insert_hooks(_redq_ptr(self));
return true;
}
///////////////////////////////////////////////////////////////////////////////////
// Private Helper Functions Definitions
///////////////////////////////////////////////////////////////////////////////////
static void _patcher_apply_patch(patcher_t *self, size_t index) {
patch_info_t *info = &self->patches[index];
write_virtual_shadow_memory_cr3(info->addr, (uint8_t*)cmp_patch, info->size, self->cpu, GET_GLOBAL_STATE()->parent_cr3); //self->cpu->parent_cr3);
}
static void _patcher_restore_patch(patcher_t *self, size_t index){
patch_info_t *info = &self->patches[index];
write_virtual_shadow_memory_cr3(info->addr, (uint8_t*)&info->orig_bytes[0], info->size, self->cpu, GET_GLOBAL_STATE()->parent_cr3); //self->cpu->parent_cr3);
}
static void _patcher_save_patch(patcher_t *self, size_t index, uint8_t* data, size_t instruction_size, uint64_t addr) {
assert(instruction_size >= 2);
assert(instruction_size < MAX_INSTRUCTION_SIZE);
patch_info_t *info = &self->patches[index];
memset(&info->orig_bytes[0], 0, MAX_INSTRUCTION_SIZE);
memcpy(&info->orig_bytes[0], data, instruction_size);
info->addr = addr;
info->size = instruction_size;
}
static size_t _patcher_disassemble_size(patcher_t *self, uint8_t* data, uint64_t addr, x86_insn type){
csh handle;
if (cs_open(CS_ARCH_X86, get_capstone_mode(GET_GLOBAL_STATE()->disassembler_word_width), &handle) == CS_ERR_OK){
cs_insn *insn = cs_malloc(handle);
uint8_t* cur_offset = data;
uint64_t cs_address = addr;
uint64_t code_size = MAX_INSTRUCTION_SIZE;
cs_disasm_iter(handle, (const uint8_t **) &cur_offset, &code_size, &cs_address, insn);
size_t size = insn->size;
if(type != X86_INS_INVALID){
assert(insn->id == type);
}
cs_free(insn, 1);
cs_close(&handle);
return size;
}
return 0;
}
static void _patcher_alloc_patch_infos(patcher_t *self, size_t num_patches){
assert(self->num_patches == 0);
assert(self->patches == NULL);
assert(num_patches < 10000);
self->num_patches = num_patches;
self->patches = malloc(sizeof(patch_info_t)*num_patches);
}
static void _patcher_free_patch_infos(patcher_t *self){
assert(!self->is_currently_applied);
free(self->patches);
self->patches = NULL;
self->num_patches = 0;
}
static redqueen_t* _redq_ptr(patcher_t *self){
redqueen_t* res = GET_GLOBAL_STATE()->redqueen_state; //self->cpu->redqueen_state;
return res;
}

45
nyx/patcher.h Normal file
View File

@ -0,0 +1,45 @@
#ifndef __GUARD_REDQUEEN_PATCHER_STRUCT__
#define __GUARD_REDQUEEN_PATCHER_STRUCT__
#include <stdint.h>
#include <stddef.h>
#include <capstone/capstone.h>
#include <capstone/x86.h>
#include "qemu/osdep.h"
#define MAX_INSTRUCTION_SIZE 64
//Patch used to replace cmp instructions. It encodes CMP AL, AL a comparision which always evaluates to true. This can
//be used to remove hash checks that we suspsect can later on be patched.
extern const uint8_t* cmp_patch;
typedef struct patch_info_s{
uint64_t addr;
size_t size;
uint8_t orig_bytes[MAX_INSTRUCTION_SIZE];
} patch_info_t;
typedef struct patcher_s{
CPUState *cpu;
patch_info_t *patches;
size_t num_patches;
bool is_currently_applied;
} patcher_t;
patcher_t* patcher_new(CPUState *cpu);
void patcher_free(patcher_t *self);
void patcher_apply_all(patcher_t *self);
void patcher_restore_all(patcher_t *self);
//Doesn't take ownership of addrs
void patcher_set_addrs(patcher_t *self, uint64_t* addrs, size_t num_addrs);
bool patcher_validate_patches(patcher_t *self);
#endif

104
nyx/printk.c Normal file
View File

@ -0,0 +1,104 @@
#include "qemu/osdep.h"
#include <linux/kvm.h>
#include "qemu-common.h"
#include "nyx/memory_access.h"
#include "nyx/hypercall.h"
#include "nyx/printk.h"
enum reg_types{RAX, RCX, RDX, RBX, RSP, RBP, RSI, RDI, R8, R9, R10, R11, R12, R13, R14, R15, RIP};
uint8_t types[] = {RSI, RDX, RCX, R8, R9} ;
/* calling convention: RDI, RSI, RDX, RCX, R8, R9 */
/* https://www.kernel.org/doc/Documentation/printk-formats.txt :-( */
bool kafl_linux_printk(CPUState *cpu){
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
char printk_buf[0x1000];
uint8_t rsp_buf[0x1000];
uint8_t rdi_buf[0x1000];
uint8_t rsi_buf[0x1000];
uint8_t rdx_buf[0x1000];
uint8_t rcx_buf[0x1000];
uint8_t r8_buf[0x1000];
uint8_t r9_buf[0x1000];
read_virtual_memory((uint64_t)env->regs[RSP], (uint8_t*)rsp_buf, 0x1000, cpu);
read_virtual_memory((uint64_t)env->regs[RDI], (uint8_t*)rdi_buf, 0x1000, cpu);
uint8_t* buf[] = {rsi_buf, rdx_buf, rcx_buf, r8_buf, r9_buf};
for(uint16_t i = 0, type = 0; i < 0x1000 && rdi_buf[i] != '\x00'; i++){
if(i > 1 && rdi_buf[i-2] == '%' && rdi_buf[i-1] != '%'){
if(rdi_buf[i-1] == 's' || rdi_buf[i-1] == 'p' || rdi_buf[i-1] == '.'){
if(rdi_buf[i] == 'B'){
rdi_buf[i-1] = 'l';
rdi_buf[i] = 'x';
buf[type] = (uint8_t*)env->regs[types[type]];
}
else if(rdi_buf[i-1] == 'p' && rdi_buf[i] == 'V'){
rdi_buf[i-1] = 's';
rdi_buf[i] = ' ';
read_virtual_memory((uint64_t)env->regs[types[type]], (uint8_t*)buf[type], 0x1000, cpu);
uint64_t tmp = *((uint64_t*)buf[type]);
read_virtual_memory(tmp, (uint8_t*)buf[type], 0x1000, cpu);
}
else if(rdi_buf[i-1] == 'p'){
rdi_buf[i-1] = 'l';
memmove(rdi_buf+i+1, rdi_buf+i, 0x1000-i-1);
rdi_buf[i] = 'x';
buf[type] = (uint8_t*)env->regs[types[type]];
}
else {
read_virtual_memory((uint64_t)env->regs[types[type]], (uint8_t*)buf[type], 0x1000, cpu);
}
}
else{
buf[type] = (uint8_t*)env->regs[types[type]];
}
type++;
if(type > 4){
rdi_buf[i] = '\n';
rdi_buf[i+1] = '\x00';
break;
}
}
}
snprintf(printk_buf, 0x1000, (char*)rdi_buf, buf[0], buf[1], buf[2], buf[3], buf[4]);
if(printk_buf[0] == 0x1){
//printf("%s", rdi_buf+2);
//hprintf(printk_buf+2);
//printf("%s", printk_buf+2);
if(!strncmp(printk_buf+2, "---[ end Kernel panic", 21)){
return true;
}
}
else {
//printf("%s", rdi_buf);
//hprintf(printk_buf);
//printf("%s", printk_buf);
if(!strncmp(printk_buf, "---[ end Kernel panic", 21)){
return true;
}
}
return false;
}

28
nyx/printk.h Normal file
View File

@ -0,0 +1,28 @@
/*
Copyright (C) 2017 Sergej Schumilo
This file is part of QEMU-PT (kAFL).
QEMU-PT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
QEMU-PT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef PRINTK_H
#define PRINTK_H
bool kafl_linux_printk(CPUState *cpu);
#endif

509
nyx/pt.c Normal file
View File

@ -0,0 +1,509 @@
/*
Copyright (C) 2017 Sergej Schumilo
This file is part of QEMU-PT (kAFL).
QEMU-PT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
QEMU-PT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include <linux/kvm.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include "qemu-common.h"
#include "target/i386/cpu.h"
#include "nyx/pt.h"
#include "exec/memory.h"
#include "sysemu/kvm_int.h"
#include "sysemu/kvm.h"
#include "sysemu/cpus.h"
#include "nyx/hypercall.h"
#include "nyx/memory_access.h"
#include "nyx/interface.h"
#include "nyx/debug.h"
#include "nyx/redqueen.h"
#include "nyx/redqueen_patch.h"
#include "nyx/patcher.h"
#include "nyx/page_cache.h"
#include "nyx/state.h"
#include <libxdc.h>
#include "nyx/helpers.h"
#define PT_BUFFER_MMAP_ADDR 0x3ffff0000000
/*
extern uint32_t kafl_bitmap_size;
uint8_t* bitmap = NULL;
*/
uint32_t state_byte = 0;
uint32_t last = 0;
int pt_trace_dump_fd = 0;
bool should_dump_pt_trace= false;
void pt_open_pt_trace_file(char* filename){
printf("using pt trace at %s",filename);
pt_trace_dump_fd = open(filename, O_WRONLY);
should_dump_pt_trace = true;
assert(pt_trace_dump_fd >= 0);
}
void pt_trucate_pt_trace_file(void){
if(should_dump_pt_trace){
assert(lseek(pt_trace_dump_fd, 0, SEEK_SET) == 0);
assert(ftruncate(pt_trace_dump_fd, 0)==0);
}
}
void pt_sync(void){
/*
if(bitmap){
msync(bitmap, kafl_bitmap_size, MS_SYNC);
}
*/
}
static void pt_set(CPUState *cpu, run_on_cpu_data arg){
asm volatile("" ::: "memory");
}
static inline int pt_cmd_hmp_context(CPUState *cpu, uint64_t cmd){
cpu->pt_ret = -1;
if(pt_hypercalls_enabled()){
QEMU_PT_PRINTF(PT_PREFIX, "Error: HMP commands are ignored if kafl tracing mode is enabled (-kafl)!");
}
else{
cpu->pt_cmd = cmd;
run_on_cpu(cpu, pt_set, RUN_ON_CPU_NULL);
}
return cpu->pt_ret;
}
static int pt_cmd(CPUState *cpu, uint64_t cmd, bool hmp_mode){
if (hmp_mode){
return pt_cmd_hmp_context(cpu, cmd);
}
else {
cpu->pt_cmd = cmd;
pt_pre_kvm_run(cpu);
return cpu->pt_ret;
}
}
static inline int pt_ioctl(int fd, unsigned long request, unsigned long arg){
if (!fd){
return -EINVAL;
}
return ioctl(fd, request, arg);
}
/*
void pt_setup_bitmap(void* ptr){
bitmap = (uint8_t*)ptr;
}
void pt_reset_bitmap(void){
if(bitmap){
state_byte = 0;
last = 0;
memset(bitmap, 0x00, kafl_bitmap_size);
}
}
*/
static inline uint64_t mix_bits(uint64_t v) {
v ^= (v >> 31);
v *= 0x7fb5d329728ea185;
/*
v ^= (v >> 27);
v *= 0x81dadef4bc2dd44d;
v ^= (v >> 33);
*/
return v;
}
/*
void pt_bitmap(uint64_t from, uint64_t to){
//if(to == 0x400965 || (last == 0x400965 && to == 0x40087A)){
// last = to;
// state_byte = mix_bits(state_byte)^to;
// bitmap[state_byte & (kafl_bitmap_size-1)]++;
//}
//printf("from: %lx\tto: %lx\n", from, to);
uint32_t transition_value = 0;
#ifdef SAMPLE_DECODED
sample_decoded(from,to);
#endif
if(bitmap){
transition_value = mix_bits(to)^(mix_bits(from)>>1);
//
//if ((from == 0x7ffff7884e8f && to == 0x7ffff7884eff) || (from == 0x7ffff7884f10 && to == 0x7ffff7884f12) || (from == 0x7ffff7884f14 && to == 0x7ffff7884e80)){
// return;
//}
//fprintf(stderr, "%lx %lx %x\n", from, to, check_bitmap_byte(transition_value & (kafl_bitmap_size-1)));
if (check_bitmap_byte(transition_value & (kafl_bitmap_size-1)) == 0)
bitmap[transition_value & (kafl_bitmap_size-1)]++;
}
}
*/
#ifdef DUMP_AND_DEBUG_PT
void dump_pt_trace(void* buffer, int bytes){
static FILE* f = NULL;
static int fcounter = 0;
static size_t size = 0;
char filename[256];
if(!f){
snprintf(filename, 256, "/tmp/trace_data/data_%d", fcounter);
f = fopen(filename, "wb");
}
size += fwrite(buffer, bytes , 1, f) * bytes;
if(size >= 0x80000000){ // 2GB
fclose(f);
fcounter++;
size = 0;
snprintf(filename, 256, "/tmp/trace_data/data_%d", fcounter);
f = fopen(filename, "wb");
}
}
#endif
void pt_dump(CPUState *cpu, int bytes){
if(should_dump_pt_trace){
assert(bytes == write(pt_trace_dump_fd, cpu->pt_mmap, bytes));
}
if(!(GET_GLOBAL_STATE()->redqueen_state && GET_GLOBAL_STATE()->redqueen_state->intercept_mode)){
if (GET_GLOBAL_STATE()->in_fuzzing_mode && GET_GLOBAL_STATE()->decoder_page_fault == false && GET_GLOBAL_STATE()->decoder && !GET_GLOBAL_STATE()->dump_page){
GET_GLOBAL_STATE()->pt_trace_size += bytes;
decoder_result_t result = libxdc_decode(GET_GLOBAL_STATE()->decoder, cpu->pt_mmap, bytes);
switch(result){
case decoder_success:
break;
case decoder_success_pt_overflow:
cpu->intel_pt_run_trashed = true;
break;
case decoder_page_fault:
fprintf(stderr, "Page not found => 0x%lx\n", libxdc_get_page_fault_addr(GET_GLOBAL_STATE()->decoder));
GET_GLOBAL_STATE()->decoder_page_fault = true;
GET_GLOBAL_STATE()->decoder_page_fault_addr = libxdc_get_page_fault_addr(GET_GLOBAL_STATE()->decoder);
break;
case decoder_unkown_packet:
fprintf(stderr, "WARNING: libxdc_decode returned decoder_error\n");
break;
case decoder_error:
fprintf(stderr, "WARNING: libxdc_decode returned decoder_error\n");
break;
}
}
}
}
int pt_enable(CPUState *cpu, bool hmp_mode){
if(!fast_reload_set_bitmap(get_fast_reload_snapshot())){
fuzz_bitmap_reset();
}
//pt_reset_bitmap();
pt_trucate_pt_trace_file();
return pt_cmd(cpu, KVM_VMX_PT_ENABLE, hmp_mode);
}
int pt_disable(CPUState *cpu, bool hmp_mode){
//printf("%s\n", __func__);
int r = pt_cmd(cpu, KVM_VMX_PT_DISABLE, hmp_mode);
return r;
}
int pt_set_cr3(CPUState *cpu, uint64_t val, bool hmp_mode){
if (val == GET_GLOBAL_STATE()->pt_c3_filter){
return 0; // nothing changed
}
//fprintf(stderr, "=========== %s %lx ============\n", __func__, val);
int r = 0;
if (cpu->pt_enabled){
return -EINVAL;
}
if (GET_GLOBAL_STATE()->pt_c3_filter && GET_GLOBAL_STATE()->pt_c3_filter != val){
//QEMU_PT_PRINTF(PT_PREFIX, "Reconfigure CR3-Filtering!");
GET_GLOBAL_STATE()->pt_c3_filter = val;
r += pt_cmd(cpu, KVM_VMX_PT_CONFIGURE_CR3, hmp_mode);
r += pt_cmd(cpu, KVM_VMX_PT_ENABLE_CR3, hmp_mode);
return r;
}
GET_GLOBAL_STATE()->pt_c3_filter = val;
r += pt_cmd(cpu, KVM_VMX_PT_CONFIGURE_CR3, hmp_mode);
r += pt_cmd(cpu, KVM_VMX_PT_ENABLE_CR3, hmp_mode);
return r;
}
int pt_enable_ip_filtering(CPUState *cpu, uint8_t addrn, bool redqueen, bool hmp_mode){
int r = 0;
if(addrn > 3){
return -1;
}
if (cpu->pt_enabled){
return -EINVAL;
}
if(GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] > GET_GLOBAL_STATE()->pt_ip_filter_b[addrn]){
QEMU_PT_PRINTF(PT_PREFIX, "Error (ip_a > ip_b) 0x%lx-0x%lx", GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] , GET_GLOBAL_STATE()->pt_ip_filter_b[addrn]);
return -EINVAL;
}
if(GET_GLOBAL_STATE()->pt_ip_filter_enabled[addrn]){
pt_disable_ip_filtering(cpu, addrn, hmp_mode);
}
QEMU_PT_PRINTF(PT_PREFIX, "Configuring new trace region (addr%d, 0x%lx-0x%lx)", addrn, GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] , GET_GLOBAL_STATE()->pt_ip_filter_b[addrn]);
if(GET_GLOBAL_STATE()->pt_ip_filter_configured[addrn] && GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] != 0 && GET_GLOBAL_STATE()->pt_ip_filter_b[addrn] != 0){
r += pt_cmd(cpu, KVM_VMX_PT_CONFIGURE_ADDR0+addrn, hmp_mode);
r += pt_cmd(cpu, KVM_VMX_PT_ENABLE_ADDR0+addrn, hmp_mode);
GET_GLOBAL_STATE()->pt_ip_filter_enabled[addrn] = true;
}
return r;
}
void pt_init_decoder(CPUState *cpu){
uint64_t filters[4][2] = {0};
/* it's time to clean up this code -.- */
filters[0][0] = GET_GLOBAL_STATE()->pt_ip_filter_a[0];
filters[0][1] = GET_GLOBAL_STATE()->pt_ip_filter_b[0];
filters[1][0] = GET_GLOBAL_STATE()->pt_ip_filter_a[1];
filters[1][1] = GET_GLOBAL_STATE()->pt_ip_filter_b[1];
filters[2][0] = GET_GLOBAL_STATE()->pt_ip_filter_a[2];
filters[2][1] = GET_GLOBAL_STATE()->pt_ip_filter_b[2];
filters[3][0] = GET_GLOBAL_STATE()->pt_ip_filter_a[3];
filters[3][1] = GET_GLOBAL_STATE()->pt_ip_filter_b[3];
assert(GET_GLOBAL_STATE()->decoder == NULL);
assert(GET_GLOBAL_STATE()->shared_bitmap_ptr != NULL);
assert(GET_GLOBAL_STATE()->shared_bitmap_size != 0);
GET_GLOBAL_STATE()->decoder = libxdc_init(filters, (void* (*)(void*, uint64_t, bool*))page_cache_fetch2, GET_GLOBAL_STATE()->page_cache, GET_GLOBAL_STATE()->shared_bitmap_ptr, GET_GLOBAL_STATE()->shared_bitmap_size);
libxdc_register_bb_callback(GET_GLOBAL_STATE()->decoder, (void (*)(void*, uint64_t, uint64_t))redqueen_callback, GET_GLOBAL_STATE()->redqueen_state);
}
int pt_disable_ip_filtering(CPUState *cpu, uint8_t addrn, bool hmp_mode){
int r = 0;
switch(addrn){
case 0:
case 1:
case 2:
case 3:
r = pt_cmd(cpu, KVM_VMX_PT_DISABLE_ADDR0+addrn, hmp_mode);
if(GET_GLOBAL_STATE()->pt_ip_filter_enabled[addrn]){
GET_GLOBAL_STATE()->pt_ip_filter_enabled[addrn] = false;
}
break;
default:
r = -EINVAL;
}
return r;
}
void pt_kvm_init(CPUState *cpu){
cpu->pt_cmd = 0;
cpu->pt_enabled = false;
cpu->pt_fd = 0;
cpu->pt_decoder_state = NULL;
//cpu->redqueen_state=NULL;
//cpu->redqueen_patch_state = patcher_new(cpu);
//init_redqueen_patch_state();
cpu->reload_pending = false;
cpu->intel_pt_run_trashed = false;
}
struct vmx_pt_filter_iprs {
__u64 a;
__u64 b;
};
pthread_mutex_t pt_dump_mutex = PTHREAD_MUTEX_INITIALIZER;
void pt_pre_kvm_run(CPUState *cpu){
pthread_mutex_lock(&pt_dump_mutex);
int ret;
struct vmx_pt_filter_iprs filter_iprs;
if(GET_GLOBAL_STATE()->patches_disable_pending){
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "patches disable");
assert(false); /* remove this branch */
GET_GLOBAL_STATE()->patches_disable_pending = false;
}
if(GET_GLOBAL_STATE()->patches_enable_pending){
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "patches enable");
assert(false); /* remove this branch */
GET_GLOBAL_STATE()->patches_enable_pending = false;
}
//if(cpu->redqueen_enable_pending){
if(GET_GLOBAL_STATE()->redqueen_enable_pending){
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "rq enable");
if (GET_GLOBAL_STATE()->redqueen_state){
enable_rq_intercept_mode(GET_GLOBAL_STATE()->redqueen_state);
}
//cpu->redqueen_enable_pending = false;
GET_GLOBAL_STATE()->redqueen_enable_pending = false;
//qemu_cpu_kick_self();
}
//if(cpu->redqueen_disable_pending){
if(GET_GLOBAL_STATE()->redqueen_disable_pending){
//QEMU_PT_PRINTF(REDQUEEN_PREFIX, "rq disable");
if (GET_GLOBAL_STATE()->redqueen_state){
disable_rq_intercept_mode(GET_GLOBAL_STATE()->redqueen_state);
}
//cpu->redqueen_disable_pending = false;
GET_GLOBAL_STATE()->redqueen_disable_pending = false;
//qemu_cpu_kick_self();
}
if(GET_GLOBAL_STATE()->pt_trace_mode || GET_GLOBAL_STATE()->pt_trace_mode_force){
if (!cpu->pt_fd) {
cpu->pt_fd = kvm_vcpu_ioctl(cpu, KVM_VMX_PT_SETUP_FD, (unsigned long)0);
assert(cpu->pt_fd != -1);
ret = ioctl(cpu->pt_fd, KVM_VMX_PT_GET_TOPA_SIZE, (unsigned long)0x0);
cpu->pt_mmap = mmap((void*)PT_BUFFER_MMAP_ADDR, ret, PROT_READ|PROT_WRITE, MAP_SHARED, cpu->pt_fd, 0);
assert(cpu->pt_mmap != (void*)0xFFFFFFFFFFFFFFFF);
assert(mmap(cpu->pt_mmap+ret, 0x1000, PROT_READ|PROT_WRITE, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, -1, 0) == (void*)(cpu->pt_mmap+ret)); //;!= (void*)0xFFFFFFFFFFFFFFFF); // add an extra page to have enough space for an additional PT_TRACE_END byte
debug_printf("\t\t============> pt_mmap:%p - %p\n", cpu->pt_mmap, cpu->pt_mmap+ret);
memset(cpu->pt_mmap+ret, 0x55, 0x1000);
}
if (cpu->pt_cmd){
switch(cpu->pt_cmd){
case KVM_VMX_PT_ENABLE:
//fprintf(stderr, "=========== KVM_VMX_PT_ENABLE ============\n");
if (cpu->pt_fd){
/* dump for the very last time before enabling VMX_PT ... just in case */
ioctl(cpu->pt_fd, KVM_VMX_PT_CHECK_TOPA_OVERFLOW, (unsigned long)0);
if (!ioctl(cpu->pt_fd, cpu->pt_cmd, 0)){
cpu->pt_enabled = true;
}
}
break;
case KVM_VMX_PT_DISABLE:
//fprintf(stderr, "=========== KVM_VMX_PT_DISABLE ============\n");
if (cpu->pt_fd){
ret = ioctl(cpu->pt_fd, cpu->pt_cmd, 0);
if (ret > 0){
//QEMU_PT_PRINTF(PT_PREFIX, "KVM_VMX_PT_DISABLE %d", ret);
pt_dump(cpu, ret);
cpu->pt_enabled = false;
}
}
break;
/* ip filtering configuration */
case KVM_VMX_PT_CONFIGURE_ADDR0:
case KVM_VMX_PT_CONFIGURE_ADDR1:
case KVM_VMX_PT_CONFIGURE_ADDR2:
case KVM_VMX_PT_CONFIGURE_ADDR3:
filter_iprs.a = GET_GLOBAL_STATE()->pt_ip_filter_a[(cpu->pt_cmd)-KVM_VMX_PT_CONFIGURE_ADDR0];
filter_iprs.b = GET_GLOBAL_STATE()->pt_ip_filter_b[(cpu->pt_cmd)-KVM_VMX_PT_CONFIGURE_ADDR0];
ret = pt_ioctl(cpu->pt_fd, cpu->pt_cmd, (unsigned long)&filter_iprs);
break;
case KVM_VMX_PT_ENABLE_ADDR0:
case KVM_VMX_PT_ENABLE_ADDR1:
case KVM_VMX_PT_ENABLE_ADDR2:
case KVM_VMX_PT_ENABLE_ADDR3:
ret = pt_ioctl(cpu->pt_fd, cpu->pt_cmd, (unsigned long)0);
break;
case KVM_VMX_PT_CONFIGURE_CR3:
//fprintf(stderr, "=========== KVM_VMX_PT_CONFIGURE_CR3 ============\n");
ret = pt_ioctl(cpu->pt_fd, cpu->pt_cmd, GET_GLOBAL_STATE()->pt_c3_filter);
break;
case KVM_VMX_PT_ENABLE_CR3:
//fprintf(stderr, "=========== KVM_VMX_PT_ENABLE_CR3 ============\n");
ret = pt_ioctl(cpu->pt_fd, cpu->pt_cmd, (unsigned long)0);
break;
default:
if (cpu->pt_fd){
ioctl(cpu->pt_fd, cpu->pt_cmd, 0);
}
break;
}
cpu->pt_cmd = 0;
cpu->pt_ret = 0;
//kvm_cpu_synchronize_state(cpu);
}
}
pthread_mutex_unlock(&pt_dump_mutex);
}
void pt_handle_overflow(CPUState *cpu){
pthread_mutex_lock(&pt_dump_mutex);
int overflow = ioctl(cpu->pt_fd, KVM_VMX_PT_CHECK_TOPA_OVERFLOW, (unsigned long)0);
if (overflow > 0){
//cpu->overflow_counter++;
pt_dump(cpu, overflow);
}
/*else{
printf("CPU NOT ENABLED?!\n");
assert(false);
}
*/
pthread_mutex_unlock(&pt_dump_mutex);
}
void pt_post_kvm_run(CPUState *cpu){
if(GET_GLOBAL_STATE()->pt_trace_mode || GET_GLOBAL_STATE()->pt_trace_mode_force){
//printf("%s\n", __func__);
//int overflow;
//if (cpu->pt_enabled){
pt_handle_overflow(cpu);
//unlock_reload_pending(cpu);
//}
}
}
/*
void pt_sync_kvm_run_lock(void){
pthread_mutex_lock(&pt_dump_mutex);
}
void pt_sync_kvm_run_unlock(void){
pthread_mutex_unlock(&pt_dump_mutex);
}
*/

48
nyx/pt.h Normal file
View File

@ -0,0 +1,48 @@
/*
Copyright (C) 2017 Sergej Schumilo
This file is part of QEMU-PT (kAFL).
QEMU-PT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
QEMU-PT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef PT_H
#define PT_H
void pt_init_decoder(CPUState *cpu);
void pt_sync(void);
void pt_reset_bitmap(void);
void pt_setup_bitmap(void* ptr);
int pt_enable(CPUState *cpu, bool hmp_mode);
int pt_disable(CPUState *cpu, bool hmp_mode);
int pt_enable_ip_filtering(CPUState *cpu, uint8_t addrn, bool redqueen, bool hmp_mode);
int pt_disable_ip_filtering(CPUState *cpu, uint8_t addrn, bool hmp_mode);
int pt_set_cr3(CPUState *cpu, uint64_t val, bool hmp_mode);
void pt_kvm_init(CPUState *cpu);
void pt_pre_kvm_run(CPUState *cpu);
void pt_post_kvm_run(CPUState *cpu);
void pt_handle_overflow(CPUState *cpu);
void pt_dump(CPUState *cpu, int bytes);
void pt_bitmap(uint64_t from, uint64_t to);
void pt_open_pt_trace_file(char* filename);
void pt_trucate_pt_trace_file(void);
#endif

1041
nyx/redqueen.c Normal file

File diff suppressed because it is too large Load Diff

128
nyx/redqueen.h Normal file
View File

@ -0,0 +1,128 @@
/*
Copyright (C) 2017 Sergej Schumilo
This file is part of QEMU-PT (kAFL).
QEMU-PT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
QEMU-PT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef REDQUEEN_H
#define REDQUEEN_H
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <stdbool.h>
#include "qemu/osdep.h"
#include <linux/kvm.h>
#include <capstone/capstone.h>
#include <capstone/x86.h>
#include "redqueen_trace.h"
#include "khash.h"
#include "page_cache.h"
//#define RQ_DEBUG
#define REDQUEEN_MAX_STRCMP_LEN 64
#define REDQUEEN_TRAP_LIMIT 16
#define REG64_NUM 16
#define REG32_NUM 16
//seems we don't want to include rip, since this index is used to acces the qemu cpu structure or something?
#define REG16_NUM 16
#define REG8L_NUM 16
#define REG8H_NUM 8
#define EXTRA_REG_RIP 16
#define EXTRA_REG_NOP 17
#define REDQUEEN_NO_INSTRUMENTATION 0
#define REDQUEEN_LIGHT_INSTRUMENTATION 1
#define REDQUEEN_SE_INSTRUMENTATION 2
#define REDQUEEN_WHITELIST_INSTRUMENTATION 3
enum reg_types{RAX, RCX, RDX, RBX, RSP, RBP, RSI, RDI, R8, R9, R10, R11, R12, R13, R14, R15};
#define CMP_BITMAP_NOP 0x0000000UL
#define CMP_BITMAP_RQ_INSTRUCTION 0x1000000UL
#define CMP_BITMAP_SE_INSTRUCTION 0x2000000UL
#define CMP_BITMAP_BLACKLISTED 0x4000000UL
#define CMP_BITMAP_TRACE_ENABLED 0x8000000UL
#define CMP_BITMAP_SHOULD_HOOK_SE (CMP_BITMAP_SE_INSTRUCTION|CMP_BITMAP_TRACE_ENABLED)
#define CMP_BITMAP_SHOULD_HOOK_RQ (CMP_BITMAP_RQ_INSTRUCTION)
KHASH_MAP_INIT_INT64(RQ, uint32_t)
typedef struct redqueen_s{
khash_t(RQ) *lookup;
bool intercept_mode;
bool trace_mode;
bool singlestep_enabled;
int hooks_applied;
CPUState *cpu;
uint64_t last_rip;
uint64_t next_rip;
uint64_t *breakpoint_whitelist;
uint64_t num_breakpoint_whitelist;
redqueen_trace_t* trace_state;
page_cache_t* page_cache;
} redqueen_t;
typedef struct redqueen_workdir_s{
char* redqueen_results;
char* symbolic_results;
char* pt_trace_results;
char* redqueen_patches;
char* breakpoint_white;
char* breakpoint_black;
char* target_code_dump;
} redqueen_workdir_t;
extern redqueen_workdir_t redqueen_workdir;
void setup_redqueen_workdir(char* workdir);
redqueen_t* new_rq_state(CPUState *cpu, page_cache_t* page_cache);
void destroy_rq_state(redqueen_t* self);
void set_rq_instruction(redqueen_t* self, uint64_t addr);
void set_rq_blacklist(redqueen_t* self, uint64_t addr);
void handle_hook(redqueen_t* self);
void handel_se_hook(redqueen_t* self);
void enable_rq_intercept_mode(redqueen_t* self);
void disable_rq_intercept_mode(redqueen_t* self);
void redqueen_register_transition(redqueen_t* self, uint64_t ip, uint64_t transition_val);
void redqueen_set_trace_mode(redqueen_t* self);
void redqueen_unset_trace_mode(redqueen_t* self);
void set_se_instruction(redqueen_t* self, uint64_t addr);
void dump_se_registers(redqueen_t* self);
void dump_se_memory_access(redqueen_t* self, cs_insn* insn);
void dump_se_return_access(redqueen_t* self, cs_insn* insn);
void dump_se_memory_access_at(redqueen_t* self, uint64_t instr_addr, uint64_t mem_addr);
void redqueen_insert_hooks(redqueen_t* self);
void redqueen_remove_hooks(redqueen_t* self);
void redqueen_callback(void* opaque, disassembler_mode_t mode, uint64_t start_addr, uint64_t end_addr);
#endif

40
nyx/redqueen_patch.c Normal file
View File

@ -0,0 +1,40 @@
#include "redqueen_patch.h"
#include "redqueen.h"
#include "patcher.h"
#include "file_helper.h"
#include "debug.h"
///////////////////////////////////////////////////////////////////////////////////
// Private Helper Functions Declarations
///////////////////////////////////////////////////////////////////////////////////
void _load_and_set_patches(patcher_t* self);
///////////////////////////////////////////////////////////////////////////////////
// Public Functions
///////////////////////////////////////////////////////////////////////////////////
void pt_enable_patches(patcher_t *self){
_load_and_set_patches(self);
patcher_apply_all(self);
}
void pt_disable_patches(patcher_t *self){
patcher_restore_all(self);
}
///////////////////////////////////////////////////////////////////////////////////
// Private Helper Functions Definitions
///////////////////////////////////////////////////////////////////////////////////
void _load_and_set_patches(patcher_t* self){
size_t num_addrs = 0;
uint64_t *addrs = NULL;
parse_address_file(redqueen_workdir.redqueen_patches, &num_addrs, &addrs);
if(num_addrs){
patcher_set_addrs(self, addrs, num_addrs);
free(addrs);
}
}

11
nyx/redqueen_patch.h Normal file
View File

@ -0,0 +1,11 @@
#ifndef __GUARD_REDQUEEN_PATCH__
#define __GUARD_REDQUEEN_PATCH__
#include "qemu/osdep.h"
#include <linux/kvm.h>
#include "nyx/patcher.h"
void pt_enable_patches(patcher_t *self);
void pt_disable_patches(patcher_t *self);
#endif

73
nyx/redqueen_trace.c Normal file
View File

@ -0,0 +1,73 @@
#include <stdint.h>
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
#include "redqueen_trace.h"
redqueen_trace_t* redqueen_trace_new(void){
redqueen_trace_t* self = malloc(sizeof(redqueen_trace_t));
self->lookup = kh_init(RQ_TRACE);
self->num_ordered_transitions = 0;
self->max_ordered_transitions = INIT_NUM_OF_STORED_TRANSITIONS;
self->ordered_transitions = malloc(INIT_NUM_OF_STORED_TRANSITIONS*sizeof(uint128_t));
return self;
}
void redqueen_trace_reset(redqueen_trace_t* self){
kh_destroy(RQ_TRACE, self->lookup);
self->lookup = kh_init(RQ_TRACE);
self->num_ordered_transitions = 0;
}
void redqueen_trace_free(redqueen_trace_t* self){
kh_destroy(RQ_TRACE, self->lookup);
free(self->ordered_transitions);
free(self);
}
void redqueen_trace_register_transition(redqueen_trace_t* self, uint64_t from, uint64_t to){
khiter_t k;
int ret;
uint128_t key = (((uint128_t)from)<<64) | ((uint128_t)to);
k = kh_get(RQ_TRACE, self->lookup, key);
if(k != kh_end(self->lookup)){
kh_value(self->lookup, k) += 1;
} else{
k = kh_put(RQ_TRACE, self->lookup, key, &ret);
kh_value(self->lookup, k) = 1;
self->ordered_transitions[self->num_ordered_transitions] = key;
self->num_ordered_transitions++;
assert(self->num_ordered_transitions < self->max_ordered_transitions);
}
}
void redqueen_trace_write_file(redqueen_trace_t* self, int fd){
for(size_t i = 0; i < self->num_ordered_transitions; i++){
khiter_t k;
uint128_t key = self->ordered_transitions[i];
k = kh_get(RQ_TRACE, self->lookup, key);
assert(k != kh_end(self->lookup));
dprintf(fd, "%lx,%lx,%lx\n", (uint64_t)(key>>64), (uint64_t)key, kh_value(self->lookup, k) );
}
}
#ifdef DEBUG_MAIN
int main(int argc, char** argv){
redqueen_trace_t* rq_obj = redqueen_trace_new();
for (uint64_t j = 0; j < 0x5; j++){
redqueen_trace_register_transition(rq_obj, 0xBADF, 0xC0FFEE);
redqueen_trace_register_transition(rq_obj, 0xBADBEEF, 0xC0FFEE);
for (uint64_t i = 0; i < 0x10000; i++){
redqueen_trace_register_transition(rq_obj, 0xBADBEEF, 0xC0FFEE);
}
redqueen_trace_write_file(rq_obj, STDOUT_FILENO);
redqueen_trace_reset(rq_obj);
}
redqueen_trace_free(rq_obj);
return 0;
}
#endif

42
nyx/redqueen_trace.h Normal file
View File

@ -0,0 +1,42 @@
#pragma once
#include "khash.h"
typedef unsigned __int128 uint128_t;
typedef uint128_t khint128_t;
#define INIT_NUM_OF_STORED_TRANSITIONS 0xfffff
/*! @function
@abstract 64-bit integer hash function
@param key The integer [khint64_t]
@return The hash value [khint_t]
*/
#define kh_int128_hash_func(key) (khint32_t)((key)>>33^(key)^(key)<<11) ^ (((key>>64))>>33^((key>>64))^((key>>64))<<11)
/*! @function
@abstract 64-bit integer comparison function
*/
#define kh_int128_hash_equal(a, b) ((a) == (b))
/*! @function
@abstract Instantiate a hash map containing 64-bit integer keys
@param name Name of the hash table [symbol]
@param khval_t Type of values [type]
*/
#define KHASH_MAP_INIT_INT128(name, khval_t) \
KHASH_INIT(name, khint128_t, khval_t, 1, kh_int128_hash_func, kh_int128_hash_equal)
KHASH_MAP_INIT_INT128(RQ_TRACE, uint64_t)
#define INIT_TRACE_IP 0xFFFFFFFFFFFFFFFFULL
typedef struct redqueen_trace_s{
khash_t(RQ_TRACE) *lookup;
size_t num_ordered_transitions;
size_t max_ordered_transitions;
uint128_t* ordered_transitions;
} redqueen_trace_t;
redqueen_trace_t* redqueen_trace_new(void);
void redqueen_trace_reset(redqueen_trace_t* self);
void redqueen_trace_free(redqueen_trace_t* self);
void redqueen_trace_register_transition(redqueen_trace_t* self, uint64_t from, uint64_t to);
void redqueen_trace_write_file(redqueen_trace_t* self, int fd);

172
nyx/sharedir.c Normal file
View File

@ -0,0 +1,172 @@
#include "sharedir.h"
#include <assert.h>
#include <stdio.h>
#include <dirent.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
//#define SHAREDIR_DEBUG
sharedir_t* sharedir_new(void){
sharedir_t* self = malloc(sizeof(sharedir_t));
self->dir = NULL;
self->lookup = kh_init(SHAREDIR_LOOKUP);
self->last_file_f = NULL;
self->last_file_obj_ptr = NULL;
return self;
}
void sharedir_set_dir(sharedir_t* self, const char* dir){
assert(!self->dir);
assert(asprintf(&self->dir, "%s", dir) != -1);
}
static bool file_exits(const char* file){
struct stat sb;
return (stat (file, &sb) == 0);
}
static time_t get_file_mod_time(char *file){
struct stat attr;
stat(file, &attr);
return attr.st_mtime;
}
static size_t get_file_size(const char* file){
struct stat st;
stat(file, &st);
return st.st_size;
}
static char* sharedir_scan(sharedir_t* self, const char* file){
char* path = NULL;
assert(asprintf(&path, "%s/%s", self->dir, file) != -1);
char* real_path = realpath(path, NULL);
free(path);
if(real_path && !strncmp(self->dir, real_path, strlen(self->dir)) && file_exits(real_path)){
return real_path;
}
if(real_path){
free(real_path);
}
return NULL;
}
static sharedir_file_t* sharedir_get_object(sharedir_t* self, const char* file){
khiter_t k;
int ret;
sharedir_file_t* obj = NULL;
k = kh_get(SHAREDIR_LOOKUP, self->lookup, file);
if(k != kh_end(self->lookup)){
/* file already exists in our hash map */
obj = kh_value(self->lookup, k);
/* check if file still exists */
assert(file_exits(obj->path));
/* check if mod time matches */
assert(get_file_mod_time(obj->path) == obj->mod_time);
/* check if file size matches */
assert(get_file_size(obj->path) == obj->size);
return obj;
}
else{
/* nope ! */
char* realpath = sharedir_scan(self, file);
struct stat sb;
if(realpath != NULL){
if (stat(realpath, &sb) == 0 && S_ISDIR(sb.st_mode)){
return NULL; // is dir
}
obj = malloc(sizeof(sharedir_file_t));
memset(obj, 0x0, sizeof(sharedir_file_t));
assert(asprintf(&obj->file, "%s", basename(realpath)) != -1);
obj->path = realpath;
obj->size = get_file_size(obj->path);
obj->bytes_left = (uint64_t) obj->size;
obj->mod_time = get_file_mod_time(obj->path);
/* put into hash_list */
char* new_file = NULL;
assert(asprintf(&new_file, "%s", file) != -1);
k = kh_put(SHAREDIR_LOOKUP, self->lookup, new_file, &ret);
kh_value(self->lookup, k) = obj;
return obj;
}
/* file not found */
return NULL;
}
}
static FILE* get_file_ptr(sharedir_t* self, sharedir_file_t* obj){
if(obj == self->last_file_obj_ptr && self->last_file_f){
return self->last_file_f;
}
else{
if(self->last_file_f){
fclose(self->last_file_f);
}
FILE* f = fopen(obj->path, "r");
self->last_file_f = f;
self->last_file_obj_ptr = obj;
return f;
}
}
uint64_t sharedir_request_file(sharedir_t* self, const char* file, uint8_t* page_buffer){
if(!self->dir){
fprintf(stderr, "WARNING: New file request received, but no share dir configured! [FILE: %s]\n", file);
return 0xFFFFFFFFFFFFFFFFUL;
}
FILE* f = NULL;
sharedir_file_t* obj = sharedir_get_object(self, file);
if(obj != NULL){
#ifdef SHAREDIR_DEBUG
printf("sharedir_get_object->file: %s\n", obj->file);
printf("sharedir_get_object->path: %s\n", obj->path);
printf("sharedir_get_object->size: %ld\n", obj->size);
printf("sharedir_get_object->bytes_left: %ld\n", obj->bytes_left);
#endif
if(obj->bytes_left >= 0x1000){
f = get_file_ptr(self, obj);
fseek(f, obj->size-obj->bytes_left, SEEK_SET);
assert(fread(page_buffer, 1, 0x1000, f) == 0x1000);
obj->bytes_left -= 0x1000;
return 0x1000;
}
else {
if (obj->bytes_left != 0){
f = get_file_ptr(self, obj);
fseek(f, obj->size-obj->bytes_left, SEEK_SET);
assert(fread(page_buffer, 1, obj->bytes_left, f) == obj->bytes_left);
uint64_t ret_value = obj->bytes_left;
obj->bytes_left = 0;
return ret_value;
}
else {
obj->bytes_left = (uint_fast64_t)obj->size;
return 0;
}
}
}
else{
return 0xFFFFFFFFFFFFFFFFUL;
}
}

26
nyx/sharedir.h Normal file
View File

@ -0,0 +1,26 @@
#pragma once
#include <stdio.h>
#include "khash.h"
#include <stdint.h>
typedef struct sharedir_file_s{
char* file;
char* path;
size_t size;
uint64_t bytes_left;
time_t mod_time;
} sharedir_file_t;
KHASH_MAP_INIT_STR(SHAREDIR_LOOKUP, sharedir_file_t*)
typedef struct sharedir_s{
char* dir;
khash_t(SHAREDIR_LOOKUP) *lookup;
FILE* last_file_f;
sharedir_file_t* last_file_obj_ptr;
} sharedir_t;
sharedir_t* sharedir_new(void);
void sharedir_set_dir(sharedir_t* self, const char* dir);
uint64_t sharedir_request_file(sharedir_t* self, const char* file, uint8_t* page_buffer);

View File

@ -0,0 +1,552 @@
#include <stdint.h>
#include <sys/types.h>
#include <sys/mman.h>
#include "nyx/snapshot/block/block_cow.h"
#include "sysemu/block-backend.h"
#include "nyx/state.h"
//#define COW_CACHE_DEBUG
//#define COW_CACHE_VERBOSE
#define CHUNK_SIZE 0x1000
//0x200
#define PAGE_MASK 0xFFFFFFFFFFFFF000
cow_cache_t* cow_cache_new(const char* filename){
//printf("%s: \"%s\"\n", __func__, filename);
cow_cache_t* self = malloc(sizeof(cow_cache_t));
self->lookup_primary = kh_init(COW_CACHE);
self->lookup_secondary = kh_init(COW_CACHE);
self->lookup_secondary_tmp = kh_init(COW_CACHE);
self->data_primary = mmap(NULL, COW_CACHE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
assert(self->data_primary != MAP_FAILED);
//memset(self->data_primary, COW_CACHE_SIZE/CHUNK_SIZE, CHUNK_SIZE);
self->data_secondary = mmap(NULL, COW_CACHE_SECONDARY_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
assert(self->data_secondary != MAP_FAILED);
self->data_secondary_tmp = mmap(NULL, COW_CACHE_SECONDARY_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
assert(self->data_secondary_tmp != MAP_FAILED);
self->filename = strdup(basename(filename));
self->offset_primary = 0;
self->offset_secondary = 0;
self->offset_secondary_tmp = 0;
if(getenv("NYX_DISABLE_BLOCK_COW")){
fprintf(stderr, "WARNING: Nyx block COW layer disabled for %s (** write operations are not cached **)\n", filename);
self->enabled = false;
}
else{
self->enabled = true;
}
self->enabled_fuzz = false;
self->enabled_fuzz_tmp = false;
#ifdef DEBUG_COW_LAYER
self->read_calls = 0;
self->write_calls = 0;
self->read_calls_tmp = 0;
self->write_calls_tmp = 0;
#endif
return self;
}
static char* gen_file_name(cow_cache_t* self, const char* filename_prefix, const char* filename_postfix){
char* tmp1;
char* tmp2;
assert(asprintf(&tmp2, "%s", self->filename) != -1);
for(int i = 0; i < strlen(tmp2); i++){
if(tmp2[i] == '/'){
tmp2[i] = '_';
}
}
assert(asprintf(&tmp1, "%s_%s.%s", filename_prefix, tmp2, filename_postfix) != -1);
free(tmp2);
return tmp1;
}
void read_primary_buffer(cow_cache_t* self, const char* filename_prefix, bool switch_mode){
assert(!self->enabled_fuzz);
//printf("%s: %s\n", __func__, self->filename);
char* tmp1;
char* tmp2;
//assert(asprintf(&tmp1, "%s_%s.khash", filename_prefix, self->filename) != -1);
//assert(asprintf(&tmp2, "%s_%s.pcow", filename_prefix, self->filename) != -1);
tmp1 = gen_file_name(self, filename_prefix, "khash");
tmp2 = gen_file_name(self, filename_prefix, "pcow");
//printf("%s\n", tmp1);
kh_destroy(COW_CACHE, self->lookup_primary);
struct stat buffer;
assert(stat (tmp2, &buffer) == 0);
if(buffer.st_size){
self->lookup_primary = kh_load(COW_CACHE, tmp1);
}
else {
self->lookup_primary = kh_init(COW_CACHE);
}
int fd = open(tmp2, O_RDONLY);
//printf("TRY TO MMAP : %lx\n", buffer.st_size);
if(switch_mode){
self->data_primary = mmap(0, COW_CACHE_SIZE, PROT_READ, MAP_SHARED, fd, 0);
assert(self->data_primary);
}
else{
void* ptr = mmap(0, COW_CACHE_SIZE, PROT_READ , MAP_SHARED, fd, 0);
assert(ptr);
memcpy(self->data_primary, ptr, buffer.st_size);
munmap(ptr, COW_CACHE_SIZE);
}
//printf("self->data_primary -> %p\n", self->data_primary );
close(fd);
self->offset_primary = buffer.st_size;
//fprintf(stderr, "self->offset_primary: %lx\n", self->offset_primary);
if(switch_mode){
switch_to_fuzz_mode(self);
}
free(tmp1);
free(tmp2);
//printf("DONE!\n");
}
void dump_primary_buffer(cow_cache_t* self, const char* filename_prefix){
assert(self->enabled_fuzz);
//printf("%s: %s\n", __func__, self->filename);
char* tmp1;
char* tmp2;
//assert(asprintf(&tmp1, "%s_%s.khash", filename_prefix, self->filename) != -1);
//assert(asprintf(&tmp2, "%s_%s.pcow", filename_prefix, self->filename) != -1);
tmp1 = gen_file_name(self, filename_prefix, "khash");
tmp2 = gen_file_name(self, filename_prefix, "pcow");
//printf("%s\n", tmp1);
if(self->offset_primary){
kh_write(COW_CACHE, self->lookup_primary, tmp1);
}
else{
fclose(fopen(tmp1, "wb"));
}
FILE *fp = fopen(tmp2, "wb");
if(fp == NULL) {
fprintf(stderr, "[%s] Could not open file %s.\n", __func__, tmp2);
assert(false);
//exit(EXIT_FAILURE);
}
if(self->offset_primary){
fwrite(self->data_primary, CHUNK_SIZE, self->offset_primary/CHUNK_SIZE, fp);
}
//fprintf(stderr, "self->offset_primary: %lx\n", self->offset_primary);
fclose(fp);
free(tmp1);
free(tmp2);
//printf("DONE!\n");
/*
qemu_mutex_unlock_iothread();
fast_reload_t* snapshot = fast_reload_new();
fast_reload_create(snapshot);
qemu_mutex_lock_iothread();
printf("CREATED!\n");
*/
}
void cow_cache_reset(cow_cache_t* self){
if(!self->enabled_fuzz)
return;
/* TODO */
assert(self->enabled_fuzz);
//fprintf(stderr, "RESETING COW STUFF YO %s (%lx)\n", self->filename, self->offset_secondary);
if(self->enabled_fuzz){
#ifdef DEBUG_COW_LAYER
printf("%s: read_calls =>\t%ld\n", __func__, self->read_calls);
printf("%s: write_calls =>\t%ld\n", __func__, self->write_calls);
printf("%s: read_calls_tmp =>\t%ld\n", __func__, self->read_calls_tmp);
printf("%s: write_calls_tmp =>\t%ld\n", __func__, self->write_calls_tmp);
#endif
if(!self->enabled_fuzz_tmp){
self->offset_secondary = 0;
kh_clear(COW_CACHE, self->lookup_secondary);
#ifdef DEBUG_COW_LAYER
self->read_calls = 0;
self->write_calls = 0;
#endif
}
else {
self->offset_secondary_tmp = 0;
kh_clear(COW_CACHE, self->lookup_secondary_tmp);
#ifdef DEBUG_COW_LAYER
printf("CLEAR lookup_secondary_tmp\n");
self->read_calls_tmp = 0;
self->write_calls_tmp = 0;
#endif
}
}
}
void cow_cache_enable_tmp_mode(cow_cache_t* self){
assert(self->enabled_fuzz);
self->enabled_fuzz_tmp = true;
}
void cow_cache_disable_tmp_mode(cow_cache_t* self){
assert(self->enabled_fuzz);
assert(self->enabled_fuzz_tmp);
cow_cache_reset(self);
self->enabled_fuzz_tmp = false;
}
void cow_cache_enable(cow_cache_t* self){
cow_cache_reset(self);
self->enabled = true;
}
void cow_cache_disable(cow_cache_t* self){
cow_cache_reset(self);
self->enabled = false;
}
typedef struct BlkRwCo {
BlockBackend *blk;
int64_t offset;
QEMUIOVector *qiov;
int ret;
BdrvRequestFlags flags;
} BlkRwCo;
typedef struct BlkAioEmAIOCB {
BlockAIOCB common;
BlkRwCo rwco;
int bytes;
bool has_returned;
} BlkAioEmAIOCB;
extern void blk_aio_write_entry(void *opaque);
extern int blk_check_byte_request(BlockBackend *blk, int64_t offset, size_t size);
extern void blk_aio_complete(BlkAioEmAIOCB *acb);
/* read from primary buffer */
static inline void read_from_primary_buffer(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags, uint64_t offset_addr, uint64_t iov_offset){
khiter_t k;
k = kh_get(COW_CACHE, self->lookup_primary, offset_addr);
if(k != kh_end(self->lookup_primary)){
#ifdef COW_CACHE_DEBUG
printf("[PRE ] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n", offset_addr, iov_offset, self->offset_primary);
#endif
//iov_from_buf_full_register(qiov->iov, qiov->niov, iov_offset, self->data_primary + kh_value(self->lookup_primary, k), CHUNK_SIZE);
qemu_iovec_from_buf(qiov, iov_offset, self->data_primary + kh_value(self->lookup_primary, k), CHUNK_SIZE);
}
return;
}
/* try to read from secondary buffer
* read from primary buffer if the data is not available yet */
static inline void read_from_secondary_buffer(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags, uint64_t offset_addr, uint64_t iov_offset){
/* read from L2 TMP buffer */
khiter_t k;
if(self->enabled_fuzz_tmp){
k = kh_get(COW_CACHE, self->lookup_secondary_tmp, offset_addr);
if(k != kh_end(self->lookup_secondary_tmp)){
#ifdef COW_CACHE_DEBUG
printf("[FTMP] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n", offset_addr, iov_offset, self->offset_secondary);
#endif
//iov_from_buf_full_register(qiov->iov, qiov->niov, iov_offset, self->data_secondary_tmp + kh_value(self->lookup_secondary_tmp, k), CHUNK_SIZE);
qemu_iovec_from_buf(qiov, iov_offset, self->data_secondary_tmp + kh_value(self->lookup_secondary_tmp, k), CHUNK_SIZE);
return;
}
}
/* read from L2 buffer */
k = kh_get(COW_CACHE, self->lookup_secondary, offset_addr);
if(k != kh_end(self->lookup_secondary)){
#ifdef COW_CACHE_DEBUG
printf("[FUZZ] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n", offset_addr, iov_offset, self->offset_secondary);
#endif
//iov_from_buf_full_register(qiov->iov, qiov->niov, iov_offset, self->data_secondary + kh_value(self->lookup_secondary, k), CHUNK_SIZE);
qemu_iovec_from_buf(qiov, iov_offset, self->data_secondary + kh_value(self->lookup_secondary, k), CHUNK_SIZE);
return;
}
/* read from L1 buffer */
k = kh_get(COW_CACHE, self->lookup_primary, offset_addr);
if(k != kh_end(self->lookup_primary)){
#ifdef COW_CACHE_DEBUG
printf("[PRE ] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n", offset_addr, iov_offset, self->offset_primary);
#endif
//iov_from_buf_full_register(qiov->iov, qiov->niov, iov_offset, self->data_primary + kh_value(self->lookup_primary, k), CHUNK_SIZE);
qemu_iovec_from_buf(qiov, iov_offset, self->data_primary + kh_value(self->lookup_primary, k), CHUNK_SIZE);
}
}
/* read data from cow cache */
static int cow_cache_read(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags){
#ifdef DEBUG_COW_LAYER
if(self->enabled_fuzz){
if(!self->enabled_fuzz_tmp){
self->read_calls++;
}
else{
self->read_calls_tmp++;
}
}
#endif
//iov_from_buf_full_register(qiov->iov, qiov->niov, offset, NULL, bytes);
blk_co_preadv(blk, offset, bytes, qiov, flags);
if ((qiov->size%CHUNK_SIZE)){
#ifdef COW_CACHE_DEBUG
fprintf(stderr, "%s: FAILED %lx!\n", __func__, qiov->size);
#endif
return 0;
}
assert(!(qiov->size%CHUNK_SIZE));
uint64_t iov_offset = 0;
for(uint64_t offset_addr = offset; offset_addr < (offset+(qiov->size)); offset_addr+= CHUNK_SIZE){
if(self->enabled_fuzz){
read_from_secondary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags, offset_addr, iov_offset);
}
else{
read_from_primary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags, offset_addr, iov_offset);
}
iov_offset+= CHUNK_SIZE;
}
return 0;
}
/* write to primary buffer */
static inline void write_to_primary_buffer(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags, uint64_t offset_addr, uint64_t iov_offset){
int ret;
khiter_t k;
k = kh_get(COW_CACHE, self->lookup_primary, offset_addr);
if(unlikely(k == kh_end(self->lookup_primary))){
/* create page */
k = kh_put(COW_CACHE, self->lookup_primary, offset_addr, &ret);
#ifdef COW_CACHE_DEBUG
printf("ADD NEW COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n", offset_addr, iov_offset, self->offset_primary);
#endif
kh_value(self->lookup_primary, k) = self->offset_primary;
self->offset_primary += CHUNK_SIZE;
#ifdef COW_CACHE_VERBOSE
printf("COW CACHE IS 0x%lx BYTES (KB: %ld / MB: %ld / GB: %ld) IN SIZE!\n", self->offset, self->offset >> 10, self->offset >> 20, self->offset >> 30);
#endif
/* IN CASE THE BUFFER IS FULL -> ABORT! */
assert(self->offset_primary < COW_CACHE_SIZE);
}
#ifdef COW_CACHE_DEBUG
printf("LOAD COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx (%s)\n", offset_addr, iov_offset, kh_value(self->lookup_primary, k), self->filename);
#endif
/* write to cached page */
qemu_iovec_to_buf(qiov, iov_offset, self->data_primary + kh_value(self->lookup_primary, k), CHUNK_SIZE);
/*
if(self->offset_primary >= 0xA00000){
printf("SWITCH TO SECONDARY\n");
switch_to_fuzz_mode(self);
dump_primary_buffer(self, "/tmp/cow_dump");
}
*/
}
static inline void write_to_secondary_buffer(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags, uint64_t offset_addr, uint64_t iov_offset){
int ret;
//assert((offset_addr&(CHUNK_SIZE-1)) == 0);
if(!self->enabled_fuzz_tmp){
/* L2 mode */
/* IN CASE THE BUFFER IS FULL -> ABORT! */
if(self->offset_secondary >= COW_CACHE_SECONDARY_SIZE){
GET_GLOBAL_STATE()->cow_cache_full = true;
abort();
return;
}
khiter_t k_secondary = kh_get(COW_CACHE, self->lookup_secondary, offset_addr);
if(unlikely(k_secondary == kh_end(self->lookup_secondary))){
/* if page is not cached in secondary buffer yet */
k_secondary = kh_put(COW_CACHE, self->lookup_secondary, offset_addr, &ret);
kh_value(self->lookup_secondary, k_secondary) = self->offset_secondary;
self->offset_secondary += CHUNK_SIZE;
}
//printf("WRITE -> %lx\n", kh_value(self->lookup_secondary, k_secondary));
/* write to cache */
qemu_iovec_to_buf(qiov, iov_offset, self->data_secondary + kh_value(self->lookup_secondary, k_secondary), CHUNK_SIZE);
}
else{
/* L2 TMP mode */
/* IN CASE THE BUFFER IS FULL -> ABORT! */
if(self->offset_secondary_tmp >= COW_CACHE_SECONDARY_SIZE){
GET_GLOBAL_STATE()->cow_cache_full = true;
abort();
return;
}
khiter_t k_secondary_tmp = kh_get(COW_CACHE, self->lookup_secondary_tmp, offset_addr);
if(unlikely(k_secondary_tmp == kh_end(self->lookup_secondary_tmp))){
/* if page is not cached in secondary tmp buffer yet */
k_secondary_tmp = kh_put(COW_CACHE, self->lookup_secondary_tmp, offset_addr, &ret);
kh_value(self->lookup_secondary_tmp, k_secondary_tmp) = self->offset_secondary_tmp;
self->offset_secondary_tmp += CHUNK_SIZE;
}
/* write to cache */
//printf("WRITE TO L2 TMP -> %lx\n", self->data_secondary_tmp + kh_value(self->lookup_secondary_tmp, k_secondary_tmp));
qemu_iovec_to_buf(qiov, iov_offset, self->data_secondary_tmp + kh_value(self->lookup_secondary_tmp, k_secondary_tmp), CHUNK_SIZE);
}
}
/* write data to cow cache */
static int cow_cache_write(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags){
//khiter_t k;
#ifdef DEBUG_COW_LAYER
if(self->enabled_fuzz){
if(!self->enabled_fuzz_tmp){
self->write_calls++;
}
else{
self->write_calls_tmp++;
}
}
#endif
if ((qiov->size%CHUNK_SIZE)){
#ifdef COW_CACHE_DEBUG
fprintf(stderr, "%s: FAILED %lx!\n", __func__, qiov->size);
#endif
return 0;
}
//printf("qiov->size: %lx %lx\n", qiov->size, CHUNK_SIZE);
if((qiov->size%CHUNK_SIZE) && GET_GLOBAL_STATE()->in_fuzzing_mode){
GET_GLOBAL_STATE()->cow_cache_full = true;
fprintf(stderr, "WARNING: %s write in %lx CHUNKSIZE\n", __func__, qiov->size);
return 0;
}
else{
assert(!(qiov->size%CHUNK_SIZE));
}
uint64_t iov_offset = 0;
for(uint64_t offset_addr = offset; offset_addr < (offset+(qiov->size)); offset_addr+= CHUNK_SIZE){
if(self->enabled_fuzz){
write_to_secondary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags, offset_addr, iov_offset);
}
else{
write_to_primary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags, offset_addr, iov_offset);
}
iov_offset+= CHUNK_SIZE;
}
return 0;
}
void switch_to_fuzz_mode(cow_cache_t* self){
self->enabled_fuzz = true;
assert(!mprotect(self->data_primary, COW_CACHE_SIZE, PROT_READ));
printf("[qemu-nyx] switch to secondary CoW buffer\n");
}
void cow_cache_read_entry(void* opaque){
BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco;
#ifdef COW_CACHE_DEBUG
printf("%s %lx %lx\n", __func__, rwco->offset, acb->bytes);
#endif
//printf("rwco->ret: %lx %lx\n", rwco->ret, acb->bytes);
rwco->ret = cow_cache_read( *((cow_cache_t**)(rwco->blk)), rwco->blk, rwco->offset, acb->bytes, rwco->qiov, rwco->flags);
//last_read = PAGE_MASK;
blk_aio_complete(acb);
}
void cow_cache_write_entry(void* opaque){
BlkAioEmAIOCB *acb = opaque;
BlkRwCo *rwco = &acb->rwco;
#ifdef COW_CACHE_DEBUG
printf("%s\n", __func__);
#endif
rwco->ret = cow_cache_write( *((cow_cache_t**)(rwco->blk)), rwco->blk, rwco->offset, acb->bytes, rwco->qiov, rwco->flags);
blk_aio_complete(acb);
}

View File

@ -0,0 +1,72 @@
#pragma once
#include <stdint.h>
#include <sys/types.h>
#include "nyx/khash.h"
#include "qemu/osdep.h"
#include "block/block.h"
#include "nyx/redqueen_trace.h"
//#define DEBUG_COW_LAYER
/* 2GB Cache */
//#define COW_CACHE_SIZE 0x80000000
// 3GB
#define COW_CACHE_SIZE 0xC0000000
// 512MB
//#define COW_CACHE_SECONDARY_SIZE 0x20000000
#define COW_CACHE_SECONDARY_SIZE 0xC0000000
KHASH_MAP_INIT_INT64(COW_CACHE, uint64_t)
typedef struct cow_cache_s{
khash_t(COW_CACHE) *lookup_primary;
khash_t(COW_CACHE) *lookup_secondary;
khash_t(COW_CACHE) *lookup_secondary_tmp;
void* data_primary;
void* data_secondary;
void* data_secondary_tmp;
char* filename;
uint64_t offset_primary;
uint64_t offset_secondary;
uint64_t offset_secondary_tmp;
bool enabled;
bool enabled_fuzz;
bool enabled_fuzz_tmp;
#ifdef DEBUG_COW_LAYER
uint64_t read_calls;
uint64_t write_calls;
uint64_t read_calls_tmp;
uint64_t write_calls_tmp;
#endif
} cow_cache_t;
cow_cache_t* cow_cache_new(const char* filename);
void cow_cache_reset(cow_cache_t* self);
//int coroutine_fn cow_cache_read(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags);
//int coroutine_fn cow_cache_write(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags);
void switch_to_fuzz_mode(cow_cache_t* self);
void read_primary_buffer(cow_cache_t* self, const char* filename_prefix, bool switch_mode);
void dump_primary_buffer(cow_cache_t* self, const char* filename_prefix);
void cow_cache_read_entry(void* opaque);
void cow_cache_write_entry(void* opaque);
void cow_cache_enable(cow_cache_t* self);
void cow_cache_disable(cow_cache_t* self);
void cow_cache_enable_tmp_mode(cow_cache_t* self);
void cow_cache_disable_tmp_mode(cow_cache_t* self);

View File

@ -0,0 +1,194 @@
#include "qemu/osdep.h"
#include "sysemu/sysemu.h"
#include "cpu.h"
#include "qemu/main-loop.h"
#include "sysemu/block-backend.h"
#include "block/qapi.h"
#include "sysemu/runstate.h"
#include "migration/vmstate.h"
#include "nyx/snapshot/block/nyx_block_snapshot.h"
#include "nyx/debug.h"
#include "nyx/state.h"
typedef struct fast_reload_cow_entry_s{
uint32_t id;
char idstr[256];
} fast_reload_cow_entry_t;
nyx_block_t* nyx_block_snapshot_init_from_file(const char* folder, bool pre_snapshot){
nyx_block_t* self = malloc(sizeof(nyx_block_t));
memset(self, 0, sizeof(nyx_block_t));
BlockBackend *blk;
fast_reload_cow_entry_t entry;
char* tmp1;
char* tmp2;
assert(asprintf(&tmp1, "%s/fs_cache.meta", folder) != -1);
assert(asprintf(&tmp2, "%s/fs_drv", folder) != -1);
self->cow_cache_array_size = 0;
FILE* f = fopen (tmp1, "r");
assert(f != NULL);
for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
if(blk && blk->cow_cache){
debug_printf("%p %s\n", blk->cow_cache, blk->cow_cache->filename);
self->cow_cache_array_size++;
}
}
uint32_t temp_cow_cache_array_size;
assert(fread(&temp_cow_cache_array_size, sizeof(uint32_t), 1, f) == 1);
debug_printf("%d vs %x\n", temp_cow_cache_array_size, self->cow_cache_array_size);
assert(self->cow_cache_array_size == temp_cow_cache_array_size);
self->cow_cache_array = (cow_cache_t**)malloc(sizeof(cow_cache_t*)*self->cow_cache_array_size);
uint32_t i = 0;
uint32_t id = 0;
for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
if(blk && blk->cow_cache){
self->cow_cache_array[i++] = blk->cow_cache;
assert(fread(&entry, sizeof(fast_reload_cow_entry_t), 1, f) == 1);
assert(!strcmp(entry.idstr, blk->cow_cache->filename));
assert(entry.id == id);
}
id++;
}
fclose(f);
for(i = 0; i < self->cow_cache_array_size; i++){
read_primary_buffer(self->cow_cache_array[i], tmp2, !pre_snapshot);
}
free(tmp1);
free(tmp2);
return self;
}
nyx_block_t* nyx_block_snapshot_init(void){
nyx_block_t* self = malloc(sizeof(nyx_block_t));
memset(self, 0, sizeof(nyx_block_t));
BlockBackend *blk;
for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
if(blk && blk->cow_cache){
debug_printf("%p %s\n", blk->cow_cache, blk->cow_cache->filename);
self->cow_cache_array_size++;
}
}
self->cow_cache_array = (cow_cache_t**)malloc(sizeof(cow_cache_t*)*self->cow_cache_array_size);
uint32_t i = 0;
for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
if(blk && blk->cow_cache){
self->cow_cache_array[i++] = blk->cow_cache;
}
}
for(i = 0; i < self->cow_cache_array_size; i++){
switch_to_fuzz_mode(self->cow_cache_array[i]);
}
return self;
}
/*
static void fast_reload_serialize_cow(fast_reload_t* self, const char* folder){
fast_reload_cow_entry_t entry;
char* tmp1;
char* tmp2;
assert(asprintf(&tmp1, "%s/fs_cache.meta", folder) != -1);
assert(asprintf(&tmp2, "%s/fs_drv", folder) != -1);
FILE* f = fopen (tmp1, "w");
fwrite(&(self->cow_cache_array_size), sizeof(uint32_t), 1, f);
for(uint32_t i = 0; i < self->cow_cache_array_size; i++){
entry.id = i;
printf("%d -> %s\n", i, (const char*)self->cow_cache_array[i]->filename);
strncpy((char*)&entry.idstr, (const char*)self->cow_cache_array[i]->filename, 256);
fwrite(&entry, sizeof(fast_reload_cow_entry_t), 1, f);
dump_primary_buffer(self->cow_cache_array[i], tmp2);
}
fclose(f);
free(tmp1);
free(tmp2);
}
*/
void nyx_block_snapshot_flush(nyx_block_t* self){
GET_GLOBAL_STATE()->cow_cache_full = false;
}
void nyx_block_snapshot_switch_incremental(nyx_block_t* self){
for(uint32_t i = 0; i < self->cow_cache_array_size; i++){
cow_cache_enable_tmp_mode(self->cow_cache_array[i]);
}
nyx_block_snapshot_flush(self);
}
void nyx_block_snapshot_disable_incremental(nyx_block_t* self){
for(uint32_t i = 0; i < self->cow_cache_array_size; i++){
cow_cache_disable_tmp_mode(self->cow_cache_array[i]);
}
}
void nyx_block_snapshot_reset(nyx_block_t* self){
for(uint32_t i = 0; i < self->cow_cache_array_size; i++){
cow_cache_reset(self->cow_cache_array[i]);
}
}
void nyx_block_snapshot_serialize(nyx_block_t* self, const char* snapshot_folder){
fast_reload_cow_entry_t entry;
char* tmp1;
char* tmp2;
assert(asprintf(&tmp1, "%s/fs_cache.meta", snapshot_folder) != -1);
assert(asprintf(&tmp2, "%s/fs_drv", snapshot_folder) != -1);
FILE* f = fopen (tmp1, "w");
fwrite(&(self->cow_cache_array_size), sizeof(uint32_t), 1, f);
for(uint32_t i = 0; i < self->cow_cache_array_size; i++){
entry.id = i;
//printf("%d -> %s\n", i, (const char*)self->cow_cache_array[i]->filename);
strncpy((char*)&entry.idstr, (const char*)self->cow_cache_array[i]->filename, 255);
fwrite(&entry, sizeof(fast_reload_cow_entry_t), 1, f);
dump_primary_buffer(self->cow_cache_array[i], tmp2);
}
fclose(f);
free(tmp1);
free(tmp2);
}

View File

@ -0,0 +1,21 @@
#pragma once
#include <stdint.h>
#include "nyx/snapshot/block/block_cow.h"
typedef struct nyx_block_s{
cow_cache_t **cow_cache_array;
uint32_t cow_cache_array_size;
} nyx_block_t;
nyx_block_t* nyx_block_snapshot_init_from_file(const char* folder, bool pre_snapshot);
nyx_block_t* nyx_block_snapshot_init(void);
void nyx_block_snapshot_switch_to_incremental(nyx_block_t*);
void nyx_block_snapshot_flush(nyx_block_t* self);
void nyx_block_snapshot_switch_incremental(nyx_block_t* self);
void nyx_block_snapshot_disable_incremental(nyx_block_t* self);
void nyx_block_snapshot_reset(nyx_block_t* self);
void nyx_block_snapshot_serialize(nyx_block_t* self, const char* snapshot_folder);

View File

@ -0,0 +1,470 @@
#include "qemu/osdep.h"
#include "sysemu/sysemu.h"
#include "cpu.h"
#include "qemu/main-loop.h"
#include "exec/ram_addr.h"
#include "qemu/rcu_queue.h"
#include "migration/migration.h"
#include "migration/register.h"
#include "migration/savevm.h"
#include "migration/qemu-file.h"
#include "migration/qjson.h"
#include "migration/global_state.h"
#include "nyx/snapshot/devices/nyx_device_state.h"
#include "nyx/debug.h"
#include "sysemu/block-backend.h"
#include "block/qapi.h"
#include "sysemu/runstate.h"
#include "migration/vmstate.h"
#include "nyx/snapshot/devices/state_reallocation.h"
#include <linux/kvm.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <immintrin.h>
#include <stdint.h>
#include "sysemu/kvm_int.h"
#include "sysemu/cpus.h"
#include "sysemu/reset.h"
#include "nyx/snapshot/devices/vm_change_state_handlers.h"
#define STATE_BUFFER 0x8000000 /* up to 128MB */
extern void enable_fast_snapshot_rtc(void);
extern void enable_fast_snapshot_kvm_clock(void);
static void enable_fast_snapshot_mode(void){
enable_fast_snapshot_rtc();
enable_fast_snapshot_kvm_clock();
}
extern int kvm_nyx_put_tsc_value(CPUState *cs, uint64_t data);
static void set_tsc_value(nyx_device_state_t* self, bool tmp_snapshot){
if(self->incremental_mode){
assert(self->tsc_value_incremental);
assert(kvm_nyx_put_tsc_value(qemu_get_cpu(0), self->tsc_value_incremental) == 0);
}
else{
assert(self->tsc_value);
assert(kvm_nyx_put_tsc_value(qemu_get_cpu(0), self->tsc_value) == 0);
}
}
static void save_tsc_value(nyx_device_state_t* self, bool incremental_mode){
X86CPU *cpu = X86_CPU(qemu_get_cpu(0));
CPUX86State *env = &cpu->env;
if(incremental_mode){
self->tsc_value_incremental = env->tsc; // - 0x200000; /* fml */
}
else{
self->tsc_value = env->tsc;
}
}
extern int qemu_savevm_state(QEMUFile *f, Error **errp);
/* new savevm routine */
typedef struct SaveStateEntry {
QTAILQ_ENTRY(SaveStateEntry) entry;
char idstr[256];
int instance_id;
int alias_id;
int version_id;
int load_version_id;
int section_id;
int load_section_id;
SaveVMHandlers *ops;
const VMStateDescription *vmsd;
void *opaque;
void *compat;
int is_ram;
} SaveStateEntry;
typedef struct SaveState {
QTAILQ_HEAD(, SaveStateEntry) handlers;
int global_section_id;
bool skip_configuration;
uint32_t len;
const char *name;
uint32_t target_page_bits;
} SaveState;
extern SaveState savevm_state;
extern void vmstate_save(QEMUFile *f, SaveStateEntry *se, QJSON *vmdesc);
extern bool should_send_vmdesc(void);
extern bool skip_section_footers;
extern void save_section_footer(QEMUFile *f, SaveStateEntry *se);
extern void save_section_header(QEMUFile *f, SaveStateEntry *se, uint8_t section_type);
/* skip block ram */
static void fast_qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only)
{
QJSON *vmdesc;
int vmdesc_len;
SaveStateEntry *se;
int ret;
bool in_postcopy = migration_in_postcopy();
cpu_synchronize_all_states();
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if(strcmp(se->idstr, "ram") && strcmp(se->idstr, "block")){
if (!se->ops ||
(in_postcopy && se->ops->save_live_complete_postcopy) ||
(in_postcopy && !iterable_only) ||
!se->ops->save_live_complete_precopy) {
continue;
}
if (se->ops && se->ops->is_active) {
if (!se->ops->is_active(se->opaque)) {
continue;
}
}
save_section_header(f, se, QEMU_VM_SECTION_END);
ret = se->ops->save_live_complete_precopy(f, se->opaque);
save_section_footer(f, se);
if (ret < 0) {
qemu_file_set_error(f, ret);
return;
}
}
}
if (iterable_only) {
return;
}
vmdesc = qjson_new();
json_prop_int(vmdesc, "page_size", TARGET_PAGE_SIZE);
json_start_array(vmdesc, "devices");
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if(strcmp(se->idstr, "ram") && strcmp(se->idstr, "block")){
if ((!se->ops || !se->ops->save_state) && !se->vmsd) {
continue;
}
if (se->vmsd && !vmstate_save_needed(se->vmsd, se->opaque)) {
continue;
}
json_start_object(vmdesc, NULL);
json_prop_str(vmdesc, "name", se->idstr);
json_prop_int(vmdesc, "instance_id", se->instance_id);
save_section_header(f, se, QEMU_VM_SECTION_FULL);
vmstate_save(f, se, vmdesc);
save_section_footer(f, se);
json_end_object(vmdesc);
}
}
if (!in_postcopy) {
/* Postcopy stream will still be going */
qemu_put_byte(f, QEMU_VM_EOF);
}
json_end_array(vmdesc);
qjson_finish(vmdesc);
vmdesc_len = strlen(qjson_get_str(vmdesc));
if (should_send_vmdesc()) {
qemu_put_byte(f, QEMU_VM_VMDESCRIPTION);
qemu_put_be32(f, vmdesc_len);
qemu_put_buffer(f, (uint8_t *)qjson_get_str(vmdesc), vmdesc_len);
}
qjson_destroy(vmdesc);
qemu_fflush(f);
}
static int fast_qemu_savevm_state_iterate(QEMUFile *f, bool postcopy) {
SaveStateEntry *se;
int ret = 1;
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if(strcmp(se->idstr, "ram") && strcmp(se->idstr, "block")){
if (!se->ops || !se->ops->save_live_iterate) {
continue;
}
if (se->ops && se->ops->is_active) {
if (!se->ops->is_active(se->opaque)) {
continue;
}
}
/*
* In the postcopy phase, any device that doesn't know how to
* do postcopy should have saved it's state in the _complete
* call that's already run, it might get confused if we call
* iterate afterwards.
*/
if (postcopy && !se->ops->save_live_complete_postcopy) {
continue;
}
if (qemu_file_rate_limit(f)) {
return 0;
}
save_section_header(f, se, QEMU_VM_SECTION_PART);
ret = se->ops->save_live_iterate(f, se->opaque);
save_section_footer(f, se);
if (ret < 0) {
qemu_file_set_error(f, ret);
}
if (ret <= 0) {
/* Do not proceed to the next vmstate before this one reported
completion of the current stage. This serializes the migration
and reduces the probability that a faster changing state is
synchronized over and over again. */
break;
}
}
}
return ret;
}
static void fast_qemu_savevm_state_setup(QEMUFile *f){
SaveStateEntry *se;
int ret;
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if(strcmp(se->idstr, "ram") && strcmp(se->idstr, "block")){
if (!se->ops || !se->ops->save_setup) {
continue;
}
if (se->ops && se->ops->is_active) {
if (!se->ops->is_active(se->opaque)) {
continue;
}
}
save_section_header(f, se, QEMU_VM_SECTION_START);
ret = se->ops->save_setup(f, se->opaque);
save_section_footer(f, se);
if (ret < 0) {
qemu_file_set_error(f, ret);
break;
}
}
}
}
static int fast_qemu_savevm_state(QEMUFile *f, Error **errp) {
qemu_savevm_state_header(f);
fast_qemu_savevm_state_setup(f);
while (qemu_file_get_error(f) == 0) {
if (fast_qemu_savevm_state_iterate(f, false) > 0) {
fast_qemu_savevm_state_complete_precopy(f, false);
break;
}
}
return 0;
}
/* QEMUFile RAM Emulation */
static ssize_t fast_savevm_writev_buffer(void *opaque, struct iovec *iov, int iovcnt, int64_t pos){
ssize_t retval = 0;
for(uint32_t i = 0; i < iovcnt; i++){
memcpy((void*)(((struct fast_savevm_opaque_t*)(opaque))->buf + ((struct fast_savevm_opaque_t*)(opaque))->pos), iov[i].iov_base, iov[i].iov_len);
((struct fast_savevm_opaque_t*)(opaque))->pos += iov[i].iov_len;
retval += iov[i].iov_len;
}
return retval;
}
static int fast_savevm_fclose_save_to_buffer(void *opaque){
memcpy(((struct fast_savevm_opaque_t*)(opaque))->output_buffer, ((struct fast_savevm_opaque_t*)(opaque))->buf, ((struct fast_savevm_opaque_t*)(opaque))->pos);
*((struct fast_savevm_opaque_t*)(opaque))->output_buffer_size = ((struct fast_savevm_opaque_t*)(opaque))->pos;
//printf("DUMPED: %d\n", *((struct fast_savevm_opaque_t*)(opaque))->output_buffer_size);
return 0;
}
static int fast_loadvm_fclose(void *opaque){
return 0;
}
static ssize_t fast_loadvm_get_buffer(void *opaque, uint8_t *buf, int64_t pos, size_t size){
memcpy(buf, (void*)(((struct fast_savevm_opaque_t*)(opaque))->buf + pos), size);
return size;
}
static const QEMUFileOps fast_loadvm_ops = {
.get_buffer = (QEMUFileGetBufferFunc*)fast_loadvm_get_buffer,
.close = (QEMUFileCloseFunc*)fast_loadvm_fclose
};
static const QEMUFileOps fast_savevm_ops_to_buffer = {
.writev_buffer = (QEMUFileWritevBufferFunc*)fast_savevm_writev_buffer,
.close = (QEMUFileCloseFunc*)fast_savevm_fclose_save_to_buffer
};
nyx_device_state_t* nyx_device_state_init_from_snapshot(const char* snapshot_folder, bool pre_snapshot){
nyx_device_state_t* self = malloc(sizeof(nyx_device_state_t));
memset(self, 0, sizeof(nyx_device_state_t));
self->state_buf = malloc(STATE_BUFFER);
self->state_buf_size = 0;
char* qemu_state_file;
assert(asprintf(&qemu_state_file, "%s/fast_snapshot.qemu_state", snapshot_folder) != -1);
struct fast_savevm_opaque_t fast_savevm_opaque;
FILE* f;
uint8_t ret = global_state_store();
assert(!ret);
/* Testing Stuff */
struct stat buffer;
assert(stat (qemu_state_file, &buffer) == 0);
debug_printf("FILE EXISTS...\n");
void* state_buf2 = malloc(STATE_BUFFER);
f = fopen(qemu_state_file, "r");
assert(fread(state_buf2, buffer.st_size, 1, f) == 1);
fclose(f);
fast_savevm_opaque.buf = state_buf2;
fast_savevm_opaque.f = NULL;//fopen("/tmp/qemu_state", "w");
fast_savevm_opaque.pos = 0;
QEMUFile* file_dump = qemu_fopen_ops(&fast_savevm_opaque, &fast_loadvm_ops);
qemu_devices_reset();
qemu_loadvm_state(file_dump);
if(!pre_snapshot){
self->qemu_state = state_reallocation_new(file_dump);
}
free(state_buf2);
if(!pre_snapshot){
enable_fast_snapshot_mode();
save_tsc_value(self, false);
}
return self;
}
nyx_device_state_t* nyx_device_state_init(void){
nyx_device_state_t* self = malloc(sizeof(nyx_device_state_t));
memset(self, 0, sizeof(nyx_device_state_t));
self->state_buf = malloc(STATE_BUFFER);
self->state_buf_size = 0;
Error *local_err = NULL;
struct fast_savevm_opaque_t fast_savevm_opaque, fast_loadvm_opaque;
//state_reallocation_t* qemu_state;
void* tmp_buf = malloc(1024*1024*16);
//memset(self->state_buf, 0, STATE_BUFFER);
fast_savevm_opaque.output_buffer = self->state_buf;
fast_savevm_opaque.output_buffer_size = &self->state_buf_size;
fast_savevm_opaque.buf = tmp_buf;//self->state_buf;
fast_savevm_opaque.f = NULL; //fopen("/tmp/delta", "w");
fast_savevm_opaque.pos = 0;
uint8_t ret = global_state_store();
assert(!ret);
QEMUFile* f = qemu_fopen_ops(&fast_savevm_opaque, &fast_savevm_ops_to_buffer);
ret = fast_qemu_savevm_state(f, &local_err);
//qemu_fflush(f);
fast_loadvm_opaque.buf = tmp_buf; //self->state_buf;
fast_loadvm_opaque.f = NULL;
fast_loadvm_opaque.pos = 0;
QEMUFile* file_dump = qemu_fopen_ops(&fast_loadvm_opaque, &fast_loadvm_ops);
//qemu_mutex_lock_iothread();
//qemu_devices_reset();
self->qemu_state = state_reallocation_new(file_dump);
//qemu_mutex_unlock_iothread();
qemu_fclose(file_dump);
//sleep(1);
qemu_fclose(f);
free(tmp_buf);
enable_fast_snapshot_mode();
save_tsc_value(self, false);
return self;
//return qemu_state;
}
void nyx_device_state_switch_incremental(nyx_device_state_t* self){
self->incremental_mode = true;
fdl_fast_create_tmp(self->qemu_state);
fdl_fast_enable_tmp(self->qemu_state);
}
void nyx_device_state_disable_incremental(nyx_device_state_t* self){
fdl_fast_disable_tmp(self->qemu_state);
self->incremental_mode = false;
}
void nyx_device_state_restore(nyx_device_state_t* self){
fdl_fast_reload(self->qemu_state);
call_fast_change_handlers();
}
void nyx_device_state_post_restore(nyx_device_state_t* self){
set_tsc_value(self, self->incremental_mode);
}
void nyx_device_state_save_tsc(nyx_device_state_t* self){
save_tsc_value(self, false);
}
void nyx_device_state_save_tsc_incremental(nyx_device_state_t* self){
save_tsc_value(self, true);
}
void nyx_device_state_serialize(nyx_device_state_t* self, const char* snapshot_folder){
char* tmp;
assert(asprintf(&tmp, "%s/fast_snapshot.qemu_state", snapshot_folder) != -1);
FILE* f_qemu_state = fopen(tmp, "w+b");
assert(fwrite(self->state_buf, 1, self->state_buf_size, f_qemu_state) == self->state_buf_size);
fclose(f_qemu_state);
}

View File

@ -0,0 +1,33 @@
#pragma once
#include <stdint.h>
#include <stdbool.h>
#include "nyx/snapshot/devices/state_reallocation.h"
typedef struct nyx_device_state_s{
state_reallocation_t* qemu_state;
uint64_t tsc_value;
uint64_t tsc_value_incremental;
bool incremental_mode;
void* state_buf; /* QEMU's serialized state */
uint32_t state_buf_size;
} nyx_device_state_t;
nyx_device_state_t* nyx_device_state_init(void);
nyx_device_state_t* nyx_device_state_init_from_snapshot(const char* snapshot_folder, bool pre_snapshot);
void nyx_device_state_restore(nyx_device_state_t* self);
void nyx_device_state_post_restore(nyx_device_state_t* self);
void nyx_device_state_switch_incremental(nyx_device_state_t* self);
void nyx_device_state_disable_incremental(nyx_device_state_t* self);
void nyx_device_state_save_tsc(nyx_device_state_t* self);
void nyx_device_state_save_tsc_incremental(nyx_device_state_t* self);
void nyx_device_state_serialize(nyx_device_state_t* self, const char* snapshot_folder);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,112 @@
/*
Copyright (C) 2017 Sergej Schumilo
This file is part of QEMU-PT (HyperTrash / kAFL).
QEMU-PT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
QEMU-PT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef STATE_REALLOCATION
#define STATE_REALLOCATION
#include "qemu/osdep.h"
#include "monitor/monitor.h"
//#include "qemu-common.h"
#include "migration/migration.h"
#include "nyx/khash.h"
#define IO_BUF_SIZE 32768
struct QEMUFile_tmp {
void *ops;
void *hooks;
void *opaque;
int64_t bytes_xfer;
int64_t xfer_limit;
int64_t pos; /* start of buffer when writing, end of buffer
when reading */
volatile int buf_index;
int buf_size; /* 0 when writing */
uint8_t buf[IO_BUF_SIZE];
};
struct fast_savevm_opaque_t{
FILE* f;
uint8_t* buf;
uint64_t pos;
void* output_buffer;
uint32_t* output_buffer_size;
};
#define REALLOC_SIZE 0x8000
#define PRE_ALLOC_BLOCK_SIZE 0x8000000 /* 128 MB */
typedef struct state_reallocation_tmp_s{
void **copy;
uint32_t fast_state_size;
bool enabled;
} state_reallocation_tmp_t;
typedef struct state_reallocation_s{
void **ptr;
void **copy;
size_t *size;
uint32_t fast_state_size;
uint32_t fast_state_pos;
void **fptr;
void **opaque;
uint32_t *version;
uint32_t fast_state_fptr_size;
uint32_t fast_state_fptr_pos;
void **get_fptr;
void **get_opaque;
size_t *get_size;
void **get_data;
//QEMUFile** file;
uint32_t fast_state_get_fptr_size;
uint32_t fast_state_get_fptr_pos;
/* prevents heap fragmentation and additional 2GB mem usage */
void* pre_alloc_block;
uint32_t pre_alloc_block_offset;
state_reallocation_tmp_t tmp_snapshot;
} state_reallocation_t;
state_reallocation_t* state_reallocation_new(QEMUFile *f);
//void fdl_enumerate_global_states(QEMUFile *f);
void fdl_fast_reload(state_reallocation_t* self);
void fdl_fast_create_tmp(state_reallocation_t* self);
void fdl_fast_enable_tmp(state_reallocation_t* self);
void fdl_fast_disable_tmp(state_reallocation_t* self);
#endif

View File

@ -0,0 +1,59 @@
#include <assert.h>
#include "qemu/osdep.h"
#include "sysemu/sysemu.h"
#include "cpu.h"
#include "qemu/main-loop.h"
#include "nyx/snapshot/devices/vm_change_state_handlers.h"
VMChangeStateHandler* change_kvm_clock_handler = NULL;
VMChangeStateHandler* change_kvm_pit_handler = NULL;
VMChangeStateHandler* change_cpu_handler = NULL;
void* change_kvm_clock_opaque = NULL;
void* change_kvm_pit_opaque = NULL;
void* change_cpu_opaque = NULL;
VMChangeStateHandler* change_ide_core_handler = NULL;
uint8_t change_ide_core_opaque_num = 0;
void* change_ide_core_opaque[32] = {NULL};
void call_fast_change_handlers(void){
assert(change_kvm_clock_handler && change_kvm_pit_handler && change_cpu_handler);
change_kvm_clock_handler(change_kvm_clock_opaque, 1, RUN_STATE_RUNNING);
change_kvm_pit_handler(change_kvm_pit_opaque, 1, RUN_STATE_RUNNING);
change_cpu_handler(change_cpu_opaque, 1, RUN_STATE_RUNNING);
return;
/* check if necessary */
if(change_ide_core_handler){
for(uint8_t i = 0; i < change_ide_core_opaque_num; i++){
change_ide_core_handler(change_ide_core_opaque[i], 1, RUN_STATE_RUNNING);
}
}
}
void add_fast_reload_change_handler(VMChangeStateHandler *cb, void *opaque, int id){
switch(id){
case RELOAD_HANDLER_KVM_CLOCK:
change_kvm_clock_handler = cb;
change_kvm_clock_opaque = opaque;
return;
case RELOAD_HANDLER_KVM_PIT:
change_kvm_pit_handler = cb;
change_kvm_pit_opaque = opaque;
return;
case RELOAD_HANDLER_KVM_CPU:
change_cpu_handler = cb;
change_cpu_opaque = opaque;
return;
case RELOAD_HANDLER_IDE_CORE:
change_ide_core_handler = cb;
change_ide_core_opaque[change_ide_core_opaque_num] = opaque;
change_ide_core_opaque_num++;
return;
default:
abort();
}
}

View File

@ -0,0 +1,13 @@
#pragma once
#include <stdlib.h>
#include <stdint.h>
#include "sysemu/runstate.h"
#define RELOAD_HANDLER_KVM_CLOCK 0
#define RELOAD_HANDLER_KVM_PIT 1
#define RELOAD_HANDLER_KVM_CPU 2
#define RELOAD_HANDLER_IDE_CORE 3
void call_fast_change_handlers(void);
void add_fast_reload_change_handler(VMChangeStateHandler *cb, void *opaque, int id);

31
nyx/snapshot/helper.c Normal file
View File

@ -0,0 +1,31 @@
#include "qemu/osdep.h"
#include "sysemu/sysemu.h"
#include "sysemu/cpus.h"
#include "qemu/main-loop.h"
#include "qemu/bitmap.h"
#include "exec/ram_addr.h"
#include "qemu/rcu_queue.h"
#include "migration/migration.h"
#include "nyx/memory_access.h"
#include "nyx/snapshot/helper.h"
#include "nyx/fast_vm_reload.h"
//#define DEBUG_NYX_SNAPSHOT_HELPER
uint64_t get_ram_size(void){
RAMBlock *block;
uint64_t guest_ram_size = 0;
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
guest_ram_size += block->used_length;
#ifdef DEBUG_NYX_SNAPSHOT_HELPER
printf("Block: %s (%lx)\n", block->idstr, block->used_length);
#endif
}
#ifdef DEBUG_NYX_SNAPSHOT_HELPER
printf("%s - guest_ram_size: %lx\n", __func__, guest_ram_size);
#endif
return guest_ram_size;
}

16
nyx/snapshot/helper.h Normal file
View File

@ -0,0 +1,16 @@
#pragma once
#include <stdint.h>
/* don't! */
#define MAX_REGIONS 8
#ifndef PAGE_SIZE
#define PAGE_SIZE 0x1000
#endif
#define BITMAP_SIZE(x) ((x/PAGE_SIZE)/8)
#define DIRTY_STACK_SIZE(x) ((x/PAGE_SIZE)*sizeof(uint64_t))
uint64_t get_ram_size(void);

View File

@ -0,0 +1,112 @@
#include "qemu/osdep.h"
#include "sysemu/sysemu.h"
#include "cpu.h"
#include "qemu/main-loop.h"
#include "exec/ram_addr.h"
#include "qemu/rcu_queue.h"
#include "migration/migration.h"
#include "nyx/memory_access.h"
#include "nyx/snapshot/memory/backend/nyx_debug.h"
#include "nyx/fast_vm_reload.h"
/* init operation */
void nyx_snapshot_debug_pre_init(void){
/* TODO */
}
/* init operation */
void nyx_snapshot_debug_init(fast_reload_t* self){
/* TODO */
}
/* enable operation */
void nyx_snapshot_debug_enable(fast_reload_t* self){
/* TODO */
}
/* restore operation */
void nyx_snapshot_debug_restore(shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, bool verbose){
void* current_region = NULL;
int counter = 0;
for(uint8_t i = 0; i < shadow_memory_state->ram_regions_num; i++){
if(shadow_memory_state->incremental_enabled){
current_region = shadow_memory_state->ram_regions[i].incremental_region_ptr;
}
else{
current_region = shadow_memory_state->ram_regions[i].snapshot_region_ptr;
}
for(uint64_t addr = 0; addr < shadow_memory_state->ram_regions[i].size; addr+=0x1000){
void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + addr;
void* snapshot_addr = current_region + addr;
uint64_t physical_addr = addr + shadow_memory_state->ram_regions[i].base;
/* check first if the page is dirty (this is super slow, but quite useful for debugging) */
if(memcmp(host_addr, snapshot_addr, TARGET_PAGE_SIZE)){
/* check if page is not on the block list */
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == false){
//fprintf(stderr, "(2) DIRTY: 0x%lx (NUM: %d - OFFSET: 0x%lx)\n", physical_addr, i, addr);
if(verbose){
printf("%s -> (phys: 0x%lx) %p <-- %p [%d]\n", __func__, physical_addr, host_addr, snapshot_addr, shadow_memory_state->incremental_enabled);
counter++;
}
memcpy(host_addr, snapshot_addr, TARGET_PAGE_SIZE);
}
}
}
}
if(verbose){
printf("TOTAL: %d\n", counter);
}
}
void nyx_snapshot_debug_save_root_pages(shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, bool verbose){
void* current_region = NULL;
for(uint8_t i = 0; i < shadow_memory_state->ram_regions_num; i++){
if(shadow_memory_state->incremental_enabled){
current_region = shadow_memory_state->ram_regions[i].incremental_region_ptr;
}
else{
current_region = shadow_memory_state->ram_regions[i].snapshot_region_ptr;
}
for(uint64_t addr = 0; addr < shadow_memory_state->ram_regions[i].size; addr+=0x1000){
void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + addr;
void* snapshot_addr = current_region + addr;
uint64_t physical_addr = addr + shadow_memory_state->ram_regions[i].base;
void* incremental_addr = shadow_memory_state->ram_regions[i].incremental_region_ptr + addr;
/* check first if the page is dirty (this is super slow, but quite useful for debugging) */
if(memcmp(host_addr, snapshot_addr, TARGET_PAGE_SIZE)){
/* check if page is not on the block list */
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == false){
//fprintf(stderr, "(2) DIRTY: 0x%lx (NUM: %d - OFFSET: 0x%lx)\n", physical_addr, i, addr);
if(verbose && !shadow_memory_is_root_page_tracked(shadow_memory_state, addr, i)){
printf("%s -> %p <-- %p [%d]\n", __func__, host_addr, snapshot_addr, shadow_memory_state->incremental_enabled);
}
shadow_memory_track_dirty_root_pages(shadow_memory_state, addr, i);
memcpy(incremental_addr, host_addr, TARGET_PAGE_SIZE);
}
}
}
}
}
/* set operation */
void nyx_snapshot_debug_set(fast_reload_t* self){
/* TODO */
}

View File

@ -0,0 +1,10 @@
#pragma once
#include "nyx/fast_vm_reload.h"
void nyx_snapshot_debug_pre_init(void);
void nyx_snapshot_debug_init(fast_reload_t* self);
void nyx_snapshot_debug_enable(fast_reload_t* self);
void nyx_snapshot_debug_restore(shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, bool verbose);
void nyx_snapshot_debug_set(fast_reload_t* self);
void nyx_snapshot_debug_save_root_pages(shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, bool verbose);

View File

@ -0,0 +1,369 @@
#include "nyx/snapshot/memory/backend/nyx_dirty_ring.h"
#include "nyx/snapshot/helper.h"
#include "sysemu/kvm.h"
#include "sysemu/kvm_int.h"
#include <sys/ioctl.h>
#include <linux/kvm.h>
#define FAST_IN_RANGE(address, start, end) (address < end && address >= start)
/* dirty ring specific defines */
#define KVM_DIRTY_LOG_PAGE_OFFSET 64
#define KVM_EXIT_DIRTY_RING_FULL 31
#define KVM_RESET_DIRTY_RINGS _IO(KVMIO, 0xc7)
#define KVM_CAP_DIRTY_LOG_RING 192
/* global vars */
int dirty_ring_size = 0;
int dirty_ring_max_size_global = 0;
struct kvm_dirty_gfn *kvm_dirty_gfns = NULL; /* dirty ring mmap ptr */
uint32_t kvm_dirty_gfns_index = 0;
uint32_t kvm_dirty_gfns_index_mask = 0;
static int vm_enable_dirty_ring(int vm_fd, uint32_t ring_size){
struct kvm_enable_cap cap = { 0 };
cap.cap = KVM_CAP_DIRTY_LOG_RING;
cap.args[0] = ring_size;
int ret = ioctl(vm_fd, KVM_ENABLE_CAP, &cap);
if(ret != 0){
printf("[ ] KVM_ENABLE_CAP ioctl failed\n");
}
return ring_size;
}
static int check_dirty_ring_size(int kvm_fd, int vm_fd){
int ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, KVM_CAP_DIRTY_LOG_RING);
if(ret < 0 ){
printf("[ ] KVM_CAP_DIRTY_LOG_RING failed (dirty ring not supported?)\n");
exit(1);
}
printf("[*] Max Dirty Ring Size -> %d (Entries: %d)\n", ret, ret/(int)sizeof(struct kvm_dirty_gfn));
uint64_t dirty_ring_max_size = ret; //kvm_dirty_ring_size * sizeof(struct kvm_dirty_gfn);
/* DIRTY RING -> 1MB in size results in 256M trackable memory */
ret = vm_enable_dirty_ring(vm_fd, dirty_ring_max_size);
if(ret < 0 ){
printf("[ ] Enabling dirty ring (size: %ld) failed\n", dirty_ring_max_size);
exit(1);
}
dirty_ring_max_size_global = dirty_ring_max_size;
return ret;
}
static void allocate_dirty_ring(int kvm_vcpu, int vm_fd){
assert(dirty_ring_size);
if (dirty_ring_size) {
kvm_dirty_gfns = mmap(NULL, dirty_ring_size, PROT_READ | PROT_WRITE, MAP_SHARED, kvm_vcpu, PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET);
if (kvm_dirty_gfns == MAP_FAILED) {
printf("[ ] Dirty ring mmap failed!\n");
exit(1);
}
}
printf("[*] Dirty ring mmap region located at %p\n", kvm_dirty_gfns);
int ret = ioctl(vm_fd, KVM_RESET_DIRTY_RINGS, 0);
assert(ret == 0);
}
/* pre_init operation */
void nyx_dirty_ring_early_init(int kvm_fd, int vm_fd){
dirty_ring_size = check_dirty_ring_size(kvm_fd, vm_fd);
}
void nyx_dirty_ring_pre_init(int kvm_fd, int vm_fd){
allocate_dirty_ring(kvm_fd, vm_fd);
kvm_dirty_gfns_index = 0;
kvm_dirty_gfns_index_mask = ((dirty_ring_max_size_global/sizeof(struct kvm_dirty_gfn)) - 1);
}
static inline void dirty_ring_collect(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, uint64_t slot, uint64_t gfn){
/* sanity check */
assert((slot&0xFFFF0000) == 0);
slot_t* kvm_region_slot = &self->kvm_region_slots[slot&0xFFFF];
if(test_and_set_bit(gfn, (void*)kvm_region_slot->bitmap) == false){
kvm_region_slot->stack[kvm_region_slot->stack_ptr] = gfn;
kvm_region_slot->stack_ptr++;
}
}
static void dirty_ring_flush_and_collect(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, int vm_fd){
struct kvm_dirty_gfn *entry = NULL;
int cleared = 0;
//fprintf(stderr, "self->kvm_dirty_gfns_index -> %lx\n", kvm_dirty_gfns_index);
while(true){
entry = &kvm_dirty_gfns[kvm_dirty_gfns_index & kvm_dirty_gfns_index_mask];
if((entry->flags & 0x3) == 0){
break;
}
if((entry->flags & 0x1) == 1){
dirty_ring_collect(self, shadow_memory_state, blocklist, entry->slot, entry->offset);
cleared++;
entry->flags |= 0x2; // reset dirty entry
}
else{
printf("[%p] kvm_dirty_gfn -> flags: %d slot: %d offset: %lx {ERROR}\n", entry, entry->flags, entry->slot, entry->offset);
fflush(stdout);
exit(1);
}
kvm_dirty_gfns_index++;
}
int ret = ioctl(vm_fd, KVM_RESET_DIRTY_RINGS, 0);
//printf("KVM_RESET_DIRTY_RINGS -> (%d vs %d)\n", ret, cleared);
assert(ret == cleared);
}
static void dirty_ring_flush(int vm_fd){
struct kvm_dirty_gfn *entry = NULL;
int cleared = 0;
//printf("self->kvm_dirty_gfns_index -> %lx\n", self->kvm_dirty_gfns_index);
while(true){
entry = &kvm_dirty_gfns[kvm_dirty_gfns_index & kvm_dirty_gfns_index_mask];
if((entry->flags & 0x3) == 0){
break;
}
if((entry->flags & 0x1) == 1){
cleared++;
entry->flags |= 0x2; // reset dirty entry
}
else{
printf("[%p] kvm_dirty_gfn -> flags: %d slot: %d offset: %lx {ERROR}\n", entry, entry->flags, entry->slot, entry->offset);
fflush(stdout);
exit(1);
}
kvm_dirty_gfns_index++;
}
int ret = ioctl(vm_fd, KVM_RESET_DIRTY_RINGS, 0);
//printf("KVM_RESET_DIRTY_RINGS -> (%d vs %ld)\n", ret, cleared);
assert(ret == cleared);
}
/* init operation */
nyx_dirty_ring_t* nyx_dirty_ring_init(shadow_memory_t* shadow_memory){
nyx_dirty_ring_t* self = malloc(sizeof(nyx_dirty_ring_t));
memset(self, 0, sizeof(nyx_dirty_ring_t));
assert(kvm_state);
KVMMemoryListener *kml = kvm_get_kml(0);
KVMSlot *mem;
//printf("kml -> %p\n", kml);
//printf("MEM-SLOTS -> %d\n", kvm_get_max_memslots());
for (int i = 0; i < kvm_get_max_memslots(); i++) {
mem = &kml->slots[i];
if(mem->start_addr == 0 && mem->memory_size == 0){
break;
}
//printf("[%p] SLOT: %d - start: %lx - size: %lx - flags: %x\n", mem, mem->slot, mem->start_addr, mem->memory_size, mem->flags);
self->kvm_region_slots_num++;
}
/*
for(int i = 0; i < shadow_memory->ram_regions_num; i++){
printf("[%d] base: %lx - size: %lx\n", i, shadow_memory->ram_regions[i].base, shadow_memory->ram_regions[i].size);
}
*/
self->kvm_region_slots = malloc(sizeof(slot_t) * self->kvm_region_slots_num);
memset(self->kvm_region_slots, 0, sizeof(slot_t) * self->kvm_region_slots_num);
for (int i = 0; i < kvm_get_max_memslots(); i++) {
mem = &kml->slots[i];
if(mem->start_addr == 0 && mem->memory_size == 0){
break;
}
self->kvm_region_slots[i].enabled = (mem->flags&KVM_MEM_READONLY) == 0;
self->kvm_region_slots[i].bitmap = malloc(BITMAP_SIZE(mem->memory_size));
self->kvm_region_slots[i].stack = malloc(DIRTY_STACK_SIZE(mem->memory_size));
memset(self->kvm_region_slots[i].bitmap, 0, BITMAP_SIZE(mem->memory_size));
memset(self->kvm_region_slots[i].stack, 0, DIRTY_STACK_SIZE(mem->memory_size));
self->kvm_region_slots[i].bitmap_size = BITMAP_SIZE(mem->memory_size);
self->kvm_region_slots[i].stack_ptr = 0;
if(self->kvm_region_slots[i].enabled){
bool ram_region_found = false;
//printf("SEARCHING %lx %lx\n", mem->start_addr, mem->memory_size);
for(int j = 0; j < shadow_memory->ram_regions_num; j++){
if(FAST_IN_RANGE(mem->start_addr, shadow_memory->ram_regions[j].base, (shadow_memory->ram_regions[j].base+shadow_memory->ram_regions[j].size))){
assert(FAST_IN_RANGE((mem->start_addr+mem->memory_size-1), shadow_memory->ram_regions[j].base, (shadow_memory->ram_regions[j].base+shadow_memory->ram_regions[j].size)));
self->kvm_region_slots[i].region_id = j;
self->kvm_region_slots[i].region_offset = mem->start_addr - shadow_memory->ram_regions[j].base;
ram_region_found = true;
break;
}
}
assert(ram_region_found);
}
}
/*
for(int i = 0; i < self->kvm_region_slots_num; i++){
printf("[%d].enabled = %d\n", i, self->kvm_region_slots[i].enabled);
printf("[%d].bitmap = %p\n", i, self->kvm_region_slots[i].bitmap);
printf("[%d].stack = %p\n", i, self->kvm_region_slots[i].stack);
printf("[%d].stack_ptr = %ld\n", i, self->kvm_region_slots[i].stack_ptr);
if(self->kvm_region_slots[i].enabled){
printf("[%d].region_id = %d\n", i, self->kvm_region_slots[i].region_id);
printf("[%d].region_offset = 0x%lx\n", i, self->kvm_region_slots[i].region_offset);
}
else{
printf("[%d].region_id = -\n", i);
printf("[%d].region_offset = -\n", i);
}
}
*/
dirty_ring_flush(kvm_get_vm_fd(kvm_state));
return self;
}
static void restore_memory(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
void* host_addr = NULL;
void* snapshot_addr = NULL;
uint64_t physical_addr = 0;
uint64_t gfn = 0;
uint64_t entry_offset_addr = 0;
for(uint8_t j = 0; j < self->kvm_region_slots_num; j++){
slot_t* kvm_region_slot = &self->kvm_region_slots[j];
if(kvm_region_slot->enabled && kvm_region_slot->stack_ptr){
for(uint64_t i = 0; i < kvm_region_slot->stack_ptr; i++){
gfn = kvm_region_slot->stack[i];
entry_offset_addr = kvm_region_slot->region_offset + (gfn<<12);
physical_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].base + entry_offset_addr;
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){
continue;
}
host_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].host_region_ptr + entry_offset_addr;
if(shadow_memory_state->incremental_enabled){
snapshot_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].incremental_region_ptr + entry_offset_addr;
}
else{
snapshot_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].snapshot_region_ptr + entry_offset_addr;
}
memcpy(host_addr, snapshot_addr, TARGET_PAGE_SIZE);
clear_bit(gfn, (void*)kvm_region_slot->bitmap);
}
kvm_region_slot->stack_ptr = 0;
}
}
}
static void save_root_pages(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
void* host_addr = NULL;
void* incremental_addr = NULL;
uint64_t physical_addr = 0;
uint64_t gfn = 0;
uint64_t entry_offset_addr = 0;
for(uint8_t j = 0; j < self->kvm_region_slots_num; j++){
slot_t* kvm_region_slot = &self->kvm_region_slots[j];
if(kvm_region_slot->enabled && kvm_region_slot->stack_ptr){
for(uint64_t i = 0; i < kvm_region_slot->stack_ptr; i++){
gfn = kvm_region_slot->stack[i];
entry_offset_addr = kvm_region_slot->region_offset + (gfn<<12);
physical_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].base + entry_offset_addr;
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){
continue;
}
host_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].host_region_ptr + entry_offset_addr;
incremental_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].incremental_region_ptr + entry_offset_addr;
shadow_memory_track_dirty_root_pages(shadow_memory_state, entry_offset_addr, kvm_region_slot->region_id);
memcpy(incremental_addr, host_addr, TARGET_PAGE_SIZE);
clear_bit(gfn, (void*)kvm_region_slot->bitmap);
}
kvm_region_slot->stack_ptr = 0;
}
}
}
//entry = &ring->dirty_gfns[ring->reset_index & (ring->size - 1)];
void nyx_snapshot_nyx_dirty_ring_restore(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
/*
static int perf_counter = 0;
if((perf_counter%1000) == 0){
fprintf(stderr, "perf_counter -> %d\n", perf_counter); //, self->test_total, self->test);
}
perf_counter++;
*/
dirty_ring_flush_and_collect(self, shadow_memory_state, blocklist, kvm_get_vm_fd(kvm_state));
restore_memory(self, shadow_memory_state, blocklist);
}
void nyx_snapshot_nyx_dirty_ring_save_root_pages(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
dirty_ring_flush_and_collect(self, shadow_memory_state, blocklist, kvm_get_vm_fd(kvm_state));
save_root_pages(self, shadow_memory_state, blocklist);
}
/* enable operation */
/* restore operation */
void nyx_snapshot_nyx_dirty_ring_flush(void){
dirty_ring_flush(kvm_get_vm_fd(kvm_state));
}
void nyx_snapshot_nyx_dirty_ring_flush_and_collect(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
dirty_ring_flush_and_collect(self, shadow_memory_state, blocklist, kvm_get_vm_fd(kvm_state));
}

View File

@ -0,0 +1,43 @@
#pragma once
#include "nyx/snapshot/memory/block_list.h"
#include "nyx/snapshot/memory/shadow_memory.h"
struct kvm_dirty_gfn {
uint32_t flags;
uint32_t slot;
uint64_t offset;
};
typedef struct slot_s{
bool enabled; /* set if slot is not marked as read-only */
uint8_t region_id; /* shadow_memory region id */
uint64_t region_offset; /* shadow_memory region offset*/
void* bitmap;
uint64_t bitmap_size; // remove me later
uint64_t* stack;
uint64_t stack_ptr;
} slot_t;
typedef struct nyx_dirty_ring_s{
slot_t* kvm_region_slots;
uint8_t kvm_region_slots_num;
} nyx_dirty_ring_t;
/* must be called before KVM_SET_USER_MEMORY_REGION & KVM_CREATE_VCPU */
void nyx_dirty_ring_early_init(int kvm_fd, int vm_fd);
/* must be called right after KVM_CREATE_VCPU */
void nyx_dirty_ring_pre_init(int kvm_fd, int vm_fd);
nyx_dirty_ring_t* nyx_dirty_ring_init(shadow_memory_t* shadow_memory);
void nyx_snapshot_nyx_dirty_ring_restore(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist);
void nyx_snapshot_nyx_dirty_ring_save_root_pages(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist);
void nyx_snapshot_nyx_dirty_ring_flush(void);
void nyx_snapshot_nyx_dirty_ring_flush_and_collect(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist);

View File

@ -0,0 +1,345 @@
#include "qemu/osdep.h"
#include "sysemu/sysemu.h"
#include "cpu.h"
#include "qemu/main-loop.h"
#include "exec/ram_addr.h"
#include "qemu/rcu_queue.h"
#include "migration/migration.h"
#include "nyx/memory_access.h"
#include <linux/kvm.h>
#include <sys/ioctl.h>
#include "nyx/snapshot/helper.h"
#include "nyx/snapshot/memory/backend/nyx_fdl.h"
#include "nyx/snapshot/memory/nyx_fdl_user.h"
/* debug option for the FDL constructor */
//#define DEBUG_VMX_FDL_ALLOC
/* additional output to debug the FDL restore operation */
//#define SHOW_NUM_DIRTY_PAGES
/* option to include restore of VRAM memory */
//#define RESET_VRAM
//#define DEBUG_FDL_VRAM
nyx_fdl_t* nyx_fdl_init(shadow_memory_t* shadow_memory){
static bool fdl_created = false;
assert(fdl_created == false); /* not sure if we're able to create another FDL instance -> probably not */
fdl_created = true;
nyx_fdl_t* self = malloc(sizeof(nyx_fdl_t));
memset(self, 0, sizeof(nyx_fdl_t));
int ret;
CPUState* cpu = qemu_get_cpu(0);
kvm_cpu_synchronize_state(cpu);
struct fdl_conf configuration;
assert(kvm_state);
self->vmx_fdl_fd = kvm_vm_ioctl(kvm_state, KVM_VMX_FDL_SETUP_FD, (unsigned long)0);
configuration.num = 0;
//memset(&self->fdl_data2, 0, sizeof(struct fdl_data_t2));
for(uint8_t i = 0; i < shadow_memory->ram_regions_num; i++){
configuration.areas[configuration.num].base_address = shadow_memory->ram_regions[i].base; // block->mr->addr;
configuration.areas[configuration.num].size = shadow_memory->ram_regions[i].size; //MEM_SPLIT_START; //block->used_length;
configuration.num++;
}
ret = ioctl(self->vmx_fdl_fd, KVM_VMX_FDL_SET, &configuration);
assert(ret == 0);
#ifdef DEBUG_VMX_FDL_ALLOC
printf("KVM_VMX_FDL_SET: %d\n", ret);
printf("configuration.mmap_size = 0x%lx\n", configuration.mmap_size);
for(uint8_t i = 0; i < configuration.num; i++){
printf("configuration.areas[%d].mmap_bitmap_offset = 0x%lx\n", i, configuration.areas[i].mmap_bitmap_offset);
printf("configuration.areas[%d].mmap_stack_offset = 0x%lx\n", i, configuration.areas[i].mmap_stack_offset);
}
#endif
self->vmx_fdl_mmap = mmap(NULL, configuration.mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, self->vmx_fdl_fd, 0);
assert(self->vmx_fdl_mmap != (void*)0xFFFFFFFFFFFFFFFF);
for(uint8_t i = 0; i < configuration.num; i++){
self->entry[i].stack = self->vmx_fdl_mmap + configuration.areas[i].mmap_stack_offset;
self->entry[i].bitmap = self->vmx_fdl_mmap + configuration.areas[i].mmap_bitmap_offset;
#ifdef DEBUG_VMX_FDL_ALLOC
printf("fdl_stacks[%d] -> %p\n", i, self->entry[i].stack);
printf("fdl_bitmaps[%d] -> %p\n", i, self->entry[i].bitmap);
#endif
}
self->num = configuration.num;
struct fdl_result result;
memset(&result, 0, sizeof(struct fdl_result));
ret = ioctl(self->vmx_fdl_fd, KVM_VMX_FDL_GET_INDEX, &result);
#ifdef DEBUG_VMX_FDL_ALLOC
printf("result: %d\n", result.num);
for(uint8_t i = 0; i < result.num; i++){
printf("result.values[%d]: %ld\n", i, result.values[i]);
}
#endif
return self;
}
/* TODO? */
static void nyx_snapshot_nyx_fdl_unset_blocklisted_pages(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
for(uint32_t i = 0; i < blocklist->pages_num; i++){
//cpu_physical_memory_test_and_clear_dirty(base_offset+self->black_list_pages[i], TARGET_PAGE_SIZE, DIRTY_MEMORY_MIGRATION);
if(blocklist->pages[i] >= MEM_SPLIT_START){
uint64_t offset_addr = blocklist->pages[i]-MEM_SPLIT_START;
//fprintf(stderr, "%s: %lx -> %lx\n", __func__, self->black_list_pages[i], offset_addr);
//abort();
clear_bit((long)offset_addr>>12, (unsigned long *)self->entry[1].bitmap);
//clear_bit((long)offset_addr>>12, (unsigned long *)self->fdl_data2.entry[1].fdl_user_bitmap);
}
else{
uint64_t offset_addr = blocklist->pages[i];
clear_bit((long)offset_addr>>12, (unsigned long *)self->entry[0].bitmap);
//clear_bit((long)offset_addr>>12, (unsigned long *)self->fdl_data2.entry[0].fdl_user_bitmap);
}
}
}
#define MEMSET_BITMAP
#ifdef MEMSET_BITMAP
static void nyx_snapshot_nyx_fdl_restore_new(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
void* current_region = NULL;
struct fdl_result result;
memset(&result, 0, sizeof(struct fdl_result));
int res = ioctl(self->vmx_fdl_fd, KVM_VMX_FDL_GET_INDEX, &result);
assert(!res);
//nyx_snapshot_nyx_fdl_unset_blocklisted_pages(self, shadow_memory_state, blocklist);
for(uint8_t i = 0; i < result.num; i++){
#ifdef SHOW_NUM_DIRTY_PAGES
printf("Kernel -> [%d] %ld \t%ldKB\n", i, result.values[i], (0x1000*result.values[i])>>0x10);
#endif
if(shadow_memory_state->incremental_enabled){
current_region = shadow_memory_state->ram_regions[i].incremental_region_ptr;
}
else{
current_region = shadow_memory_state->ram_regions[i].snapshot_region_ptr;
}
for(uint64_t j = 0; j < result.values[i]; j++){
uint64_t physical_addr = self->entry[i].stack[j];
uint64_t entry_offset_addr = physical_addr - shadow_memory_state->ram_regions[i].base;
void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + entry_offset_addr;
void* snapshot_addr = current_region + entry_offset_addr;
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){
#ifdef DEBUG_VERFIY_BITMAP
if(!is_black_listed_addr(self, entry_offset_addr)){
printf("WARNING: %s: -> %lx is not blacklisted\n", __func__, entry_offset_addr);
abort();
}
#endif
continue; // blacklisted page
}
clear_bit(entry_offset_addr>>12, (void*)self->entry[i].bitmap);
memcpy(host_addr, snapshot_addr, TARGET_PAGE_SIZE);
}
}
#ifdef RESET_VRAM
//nyx_snapshot_nyx_fdl_restore_vram(self, shadow_memory_state);
#endif
}
#endif
/* restore operation */
void nyx_snapshot_nyx_fdl_restore(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
/* not sure which one is faster -> benchmark ASAP */
#ifdef MEMSET_BITMAP
nyx_snapshot_nyx_fdl_restore_new(self, shadow_memory_state, blocklist);
#else
nyx_snapshot_nyx_fdl_restore_old(self, shadow_memory_state, blocklist);
#endif
}
/*
void nyx_snapshot_nyx_fdl_restore(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
void* current_region = NULL;
struct fdl_result result;
memset(&result, 0, sizeof(struct fdl_result));
int res = ioctl(self->vmx_fdl_fd, KVM_VMX_FDL_GET_INDEX, &result);
assert(!res);
//nyx_snapshot_nyx_fdl_unset_blocklisted_pages(self, shadow_memory_state, blocklist);
for(uint8_t i = 0; i < result.num; i++){
#ifdef SHOW_NUM_DIRTY_PAGES
printf("Kernel -> [%d] %ld \t%ldKB\n", i, result.values[i], (0x1000*result.values[i])>>0x10);
#endif
if(shadow_memory_state->tmp_snapshot.enabled){
current_region = shadow_memory_state->ram_regions[i].incremental_region_ptr;
}
else{
current_region = shadow_memory_state->ram_regions[i].snapshot_region_ptr;
}
for(uint64_t j = 0; j < result.values[i]; j++){
uint64_t physical_addr = self->fdl_data2.entry[i].fdl_stack[j];
uint64_t entry_offset_addr = physical_addr - shadow_memory_state->ram_regions[i].base;
void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + entry_offset_addr;
void* snapshot_addr = current_region + entry_offset_addr;
// optimize this
if(test_and_clear_bit((long)(entry_offset_addr>>12), (unsigned long*)self->fdl_data2.entry[i].fdl_bitmap) == 0 && snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){
#ifdef DEBUG_VERFIY_BITMAP
if(!is_black_listed_addr(self, entry_offset_addr)){
printf("WARNING: %s: -> %lx is not blacklisted\n", __func__, entry_offset_addr);
abort();
}
#endif
printf("SKIP\n");
continue; // blacklisted page
}
memcpy(host_addr, snapshot_addr, TARGET_PAGE_SIZE);
}
}
#ifdef RESET_VRAM
//nyx_snapshot_nyx_fdl_restore_vram(self, shadow_memory_state);
#endif
}
*/
/*
void nyx_snapshot_nyx_fdl_restore2(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
nyx_snapshot_nyx_fdl_unset_blocklisted_pages(self, shadow_memory_state, blocklist);
struct fdl_result result;
memset(&result, 0, sizeof(struct fdl_result));
int res = ioctl(self->vmx_fdl_fd, KVM_VMX_FDL_GET_INDEX, &result);
assert(!res);
for(uint8_t i = 0; i < result.num; i++){
#ifdef SHOW_NUM_DIRTY_PAGES
printf("Kernel -> [%d] %ld \t%ldKB\n", i, result.values[i], (0x1000*result.values[i])>>0x10);
#endif
for(uint64_t j = 0; j < result.values[i]; j++){
uint64_t addr = self->fdl_data2.entry[i].fdl_stack[j];
uint64_t offset_addr = addr - self->shadow_memory_state[i].base;
if(test_and_clear_bit((long)(offset_addr>>12), (unsigned long*)self->fdl_data2.entry[i].fdl_bitmap) == 0){
#ifdef DEBUG_VERFIY_BITMAP
if(!is_black_listed_addr(self, offset_addr)){
printf("WARNING: %s: -> %lx is not blacklisted\n", __func__, offset_addr);
abort();
}
#endif
continue; // blacklisted page
}
//assert(test_and_clear_bit(offset_addr>>12, fdl_data2.entry[i].fdl_bitmap));
//fdl_data2.entry[i].fdl_bitmap[(offset_addr/0x1000)/8] = 0;
//printf("DIRTY -> 0x%lx [BITMAP: %d] [%d]\n", addr, fdl_data2.entry[i].fdl_bitmap[(offset_addr/0x1000)/8], test_bit(offset_addr>>12, fdl_data2.entry[i].fdl_bitmap));
if(shadow_memory_state->incremental_enabled){
//memcpy((void*)(fdl_data2.entry[i].host_ptr+offset_addr), (void*)(self->tmp_snapshot.shadow_memory[i]+offset_addr), TARGET_PAGE_SIZE);
memcpy((void*)(self->fdl_data2.entry[i].host_ptr+offset_addr), (void*)(self->fdl_data2.entry[i].tmp_shadow_ptr+offset_addr), TARGET_PAGE_SIZE);
}
else{
memcpy((void*)(self->fdl_data2.entry[i].host_ptr+offset_addr), (void*)(self->fdl_data2.entry[i].shadow_ptr+offset_addr), TARGET_PAGE_SIZE);
}
}
}
#ifdef RESET_VRAM
//nyx_snapshot_nyx_fdl_restore_vram(self, shadow_memory_state);
#endif
}
*/
void nyx_snapshot_nyx_fdl_save_root_pages(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
struct fdl_result result;
memset(&result, 0, sizeof(struct fdl_result));
int res = ioctl(self->vmx_fdl_fd, KVM_VMX_FDL_GET_INDEX, &result);
assert(!res);
//nyx_snapshot_nyx_fdl_unset_blocklisted_pages(self, shadow_memory_state, blocklist);
for(uint8_t i = 0; i < result.num; i++){
#ifdef SHOW_NUM_DIRTY_PAGES
printf("Kernel -> [%d] %ld \t%ldKB\n", i, result.values[i], (0x1000*result.values[i])>>0x10);
#endif
for(uint64_t j = 0; j < result.values[i]; j++){
uint64_t physical_addr = self->entry[i].stack[j];
uint64_t entry_offset_addr = physical_addr - shadow_memory_state->ram_regions[i].base;
void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + entry_offset_addr;
void* incremental_addr = shadow_memory_state->ram_regions[i].incremental_region_ptr + entry_offset_addr;
//void* snapshot_addr = shadow_memory_state->ram_regions[i].snapshot_region_ptr + entry_offset_addr;
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){
#ifdef DEBUG_VERFIY_BITMAP
if(!is_black_listed_addr(self, entry_offset_addr)){
printf("WARNING: %s: -> %lx is not blacklisted\n", __func__, entry_offset_addr);
abort();
}
#endif
//printf("SKIP\n");
continue; // blacklisted page
}
//printf("%s -> %p <-- %p\n", __func__, incremental_addr, host_addr);
clear_bit(entry_offset_addr>>12, (void*)self->entry[i].bitmap);
shadow_memory_track_dirty_root_pages(shadow_memory_state, entry_offset_addr, i);
memcpy(incremental_addr, host_addr, TARGET_PAGE_SIZE);
}
}
}

View File

@ -0,0 +1,59 @@
#pragma once
#include "nyx/snapshot/memory/block_list.h"
#include "nyx/snapshot/memory/shadow_memory.h"
#define STATE_BUFFER 0x8000000 /* up to 128MB */
#define USER_FDL_SLOTS 0x400000 /* fix this later */
#define KVM_VMX_FDL_SETUP_FD _IO(KVMIO, 0xe5)
#define KVM_VMX_FDL_SET _IOW(KVMIO, 0xe6, __u64)
#define KVM_VMX_FDL_FLUSH _IO(KVMIO, 0xe7)
#define KVM_VMX_FDL_GET_INDEX _IOR(KVMIO, 0xe8, __u64)
#define FAST_IN_RANGE(address, start, end) (address < end && address >= start)
#define FDL_MAX_AREAS 8
struct fdl_area{
uint64_t base_address;
uint64_t size;
uint64_t mmap_bitmap_offset;
uint64_t mmap_stack_offset;
uint64_t mmap_bitmap_size;
uint64_t mmap_stack_size;
};
struct fdl_conf{
uint8_t num;
uint64_t mmap_size;
struct fdl_area areas[FDL_MAX_AREAS];
};
struct fdl_result{
uint8_t num;
uint64_t values[FDL_MAX_AREAS];
};
typedef struct nyx_fdl_s{
/* vmx_fdl file descriptor */
int vmx_fdl_fd;
/* mmap mapping of fdl data -> might be useful for destructor */
void* vmx_fdl_mmap;
struct {
uint64_t* stack;
uint8_t* bitmap;
}entry[FDL_MAX_AREAS];
uint8_t num;
}nyx_fdl_t;
nyx_fdl_t* nyx_fdl_init(shadow_memory_t* self);
void nyx_snapshot_nyx_fdl_restore(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist);
void nyx_snapshot_nyx_fdl_save_root_pages(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist);

View File

@ -0,0 +1,73 @@
#include "qemu/osdep.h"
#include "sysemu/sysemu.h"
#include "cpu.h"
#include "qemu/main-loop.h"
#include "exec/ram_addr.h"
#include "qemu/rcu_queue.h"
#include "migration/migration.h"
#include "nyx/memory_access.h"
#include "nyx/snapshot/memory/block_list.h"
#include "nyx/snapshot/helper.h"
#include "nyx/snapshot/memory/shadow_memory.h"
#define REALLOC_SIZE 0x8000
//#define DEBUG_NYX_SNAPSHOT_PAGE_BLOCKLIST
snapshot_page_blocklist_t* snapshot_page_blocklist_init(void){
snapshot_page_blocklist_t* self = malloc(sizeof(snapshot_page_blocklist_t));
uint64_t ram_size = get_ram_size();
//printf("%s: ram_size: 0x%lx\n", __func__, ram_size);
self->phys_area_size = ram_size <= MEM_SPLIT_START ? ram_size : ram_size + (MEM_SPLIT_END-MEM_SPLIT_START);
//printf("%s: phys_area_size: 0x%lx\n", __func__, self->phys_area_size);
self->phys_bitmap = malloc(BITMAP_SIZE(self->phys_area_size));
memset(self->phys_bitmap, 0x0, BITMAP_SIZE(self->phys_area_size));
if(ram_size > MEM_SPLIT_START){
memset(self->phys_bitmap+BITMAP_SIZE(MEM_SPLIT_START), 0xff, BITMAP_SIZE((MEM_SPLIT_END-MEM_SPLIT_START)));
}
self->pages_num = 0;
self->pages_size = 0;
self->pages = malloc(sizeof(uint64_t) * REALLOC_SIZE);
return self;
}
void snapshot_page_blocklist_add(snapshot_page_blocklist_t* self, uint64_t phys_addr){
if(phys_addr == -1){
fprintf(stderr, "ERROR %s: phys_addr=%lx\n", __func__, phys_addr);
return;
}
assert(self != NULL);
assert(phys_addr < self->phys_area_size);
if(self->pages_num <= self->pages_size){
self->pages_size += REALLOC_SIZE;
self->pages = realloc(self->pages, sizeof(uint64_t) * self->pages_size);
}
self->pages[self->pages_num] = phys_addr;
self->pages_num++;
/* check if bit is empty */
assert(test_bit(phys_addr>>12, (const unsigned long *)self->phys_bitmap) == 0);
/* set bit for lookup */
set_bit(phys_addr>>12, (unsigned long *)self->phys_bitmap);
#ifdef DEBUG_NYX_SNAPSHOT_PAGE_BLOCKLIST
printf("%s: %lx\n", __func__, phys_addr);
#endif
}

View File

@ -0,0 +1,35 @@
#pragma once
#include <stdint.h>
#include <stdbool.h>
#include "nyx/snapshot/memory/shadow_memory.h"
typedef struct snapshot_page_blocklist_s{
/* total number of blocklisted page frames */
uint64_t pages_num;
/* lookup array */
uint64_t* pages;
/* current size of our array */
uint64_t pages_size;
/* lookup bitmap of guest's physical memory layout (PCI-area between 3GB-4GB is set by default) */
uint8_t* phys_bitmap;
/* area of guest's physical memory (including RAM + PCI-hole) */
uint64_t phys_area_size;
}snapshot_page_blocklist_t;
//snapshot_page_blocklist_t* snapshot_page_blocklist_init(shadow_memory_t* snapshot);
void snapshot_page_blocklist_add(snapshot_page_blocklist_t* self, uint64_t phys_addr);
/* returns true if phys_addr is on the blocklis */
static inline bool snapshot_page_blocklist_check_phys_addr(snapshot_page_blocklist_t* self, uint64_t phys_addr){
return phys_addr < self->phys_area_size && test_bit(phys_addr>>12, (const unsigned long *)self->phys_bitmap) != 0;
}
snapshot_page_blocklist_t* snapshot_page_blocklist_init(void);

View File

@ -0,0 +1,195 @@
#include "qemu/osdep.h"
#include "sysemu/sysemu.h"
#include "target/i386/cpu.h"
#include "qemu/main-loop.h"
#include "exec/ram_addr.h"
#include "qemu/rcu_queue.h"
#include "migration/migration.h"
#include "nyx/memory_access.h"
#include <linux/kvm.h>
#include <sys/ioctl.h>
#include "nyx/snapshot/helper.h"
#include "nyx/snapshot/memory/shadow_memory.h"
#include "nyx/snapshot/memory/nyx_fdl_user.h"
/* debug option */
//#define DEBUG_USER_FDL
/* init operation */
nyx_fdl_user_t* nyx_fdl_user_init(shadow_memory_t* shadow_memory_state){
nyx_fdl_user_t* self = malloc(sizeof(nyx_fdl_user_t));
memset(self, 0, sizeof(nyx_fdl_user_t));
/* get rid of that? */
self->num = shadow_memory_state->ram_regions_num;
for(uint8_t i = 0; i < shadow_memory_state->ram_regions_num; i++){
self->entry[i].stack = malloc(DIRTY_STACK_SIZE(shadow_memory_state->ram_regions[i].size));
self->entry[i].bitmap = malloc(BITMAP_SIZE(shadow_memory_state->ram_regions[i].size));
}
//printf("%s -> %p\n", __func__, self);
return self;
}
/* enable operation */
void nyx_fdl_user_enable(nyx_fdl_user_t* self){
assert(self);
self->enabled = true;
}
static void nyx_snapshot_user_fdl_reset(nyx_fdl_user_t* self){
if(self){
for(uint8_t i = 0; i < self->num; i++){
self->entry[i].pos = 0;
}
}
}
/* reset operation */
void nyx_snapshot_user_fdl_restore(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
if(self){
void* current_region = NULL;
for(uint8_t i = 0; i < self->num; i++){
#ifdef DEBUG_USER_FDL
printf("User -> [%d] %ld \t%ldKB\n", i, self->entry[i].pos, (0x1000*self->entry[i].pos)>>0x10);
#endif
if(shadow_memory_state->incremental_enabled){
current_region = shadow_memory_state->ram_regions[i].incremental_region_ptr;
}
else{
current_region = shadow_memory_state->ram_regions[i].snapshot_region_ptr;
}
for(uint64_t j = 0; j < self->entry[i].pos; j++){
uint64_t physical_addr = self->entry[i].stack[j];
uint64_t entry_offset_addr = physical_addr - shadow_memory_state->ram_regions[i].base;
void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + entry_offset_addr;
void* snapshot_addr = current_region + entry_offset_addr;
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){
continue;
}
#ifdef DEBUG_USER_FDL
printf("%s -> %p <-- %p\n", __func__, host_addr, snapshot_addr);
#endif
clear_bit(entry_offset_addr>>12, (void*)self->entry[i].bitmap);
memcpy(host_addr, snapshot_addr, TARGET_PAGE_SIZE);
}
}
}
nyx_snapshot_user_fdl_reset(self);
}
/* set operation (mark pf as dirty) */
void nyx_fdl_user_set(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, nyx_fdl_t* nyx_fdl_state, uint64_t addr, uint64_t length){
if(length < 0x1000){
length = 0x1000;
}
if(self && self->enabled && length >= 0x1000){
uint8_t ram_area = 0xff;
/* optimize this? */
addr = ram_offset_to_address(addr);
switch(MAX_REGIONS-shadow_memory_state->ram_regions_num){
case 0:
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[7].base, shadow_memory_state->ram_regions[7].base+(shadow_memory_state->ram_regions[7].size-1)) ? 7 : ram_area;
case 1:
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[6].base, shadow_memory_state->ram_regions[6].base+(shadow_memory_state->ram_regions[6].size-1)) ? 6 : ram_area;
case 2:
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[5].base, shadow_memory_state->ram_regions[5].base+(shadow_memory_state->ram_regions[5].size-1)) ? 5 : ram_area;
case 3:
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[4].base, shadow_memory_state->ram_regions[4].base+(shadow_memory_state->ram_regions[4].size-1)) ? 4 : ram_area;
case 4:
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[3].base, shadow_memory_state->ram_regions[3].base+(shadow_memory_state->ram_regions[3].size-1)) ? 3 : ram_area;
case 5:
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[2].base, shadow_memory_state->ram_regions[2].base+(shadow_memory_state->ram_regions[2].size-1)) ? 2 : ram_area;
case 6:
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[1].base, shadow_memory_state->ram_regions[1].base+(shadow_memory_state->ram_regions[1].size-1)) ? 1 : ram_area;
case 7:
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[0].base, shadow_memory_state->ram_regions[0].base+(shadow_memory_state->ram_regions[0].size-1)) ? 0 : ram_area;
default:
break;
}
//ram_area = FAST_IN_RANGE(addr, fdl_data2.entry[0].base, fdl_data2.entry[0].base+(fdl_data2.entry[0].size-1)) ? 0 : ram_area;
if(ram_area == 0xff){
printf("ERROR: %s %lx [%d]\n", __func__, addr, ram_area);
abort();
return;
}
for(uint64_t offset = 0; offset < length; offset+=0x1000){
uint64_t current_addr = (addr+offset) & 0xFFFFFFFFFFFFF000;
long pfn = (long) ((current_addr-shadow_memory_state->ram_regions[ram_area].base)>>12);
assert(self->entry[ram_area].bitmap);
/* todo -> better handling of nyx_fdl_state */
if(!test_bit(pfn, (const unsigned long*)self->entry[ram_area].bitmap)){
set_bit(pfn, (unsigned long*)self->entry[ram_area].bitmap);
self->entry[ram_area].stack[self->entry[ram_area].pos] = current_addr & 0xFFFFFFFFFFFFF000;
self->entry[ram_area].pos++;
#ifdef DEBUG_USER_FDL
printf("USER DIRTY -> 0x%lx\n", current_addr & 0xFFFFFFFFFFFFF000);
#endif
}
}
}
}
void nyx_snapshot_nyx_fdl_user_save_root_pages(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
for(uint8_t i = 0; i < self->num; i++){
#ifdef DEBUG_USER_FDL
printf("User -> [%d] %ld \t%ldKB\n", i, self->entry[i].pos, (0x1000*self->entry[i].pos)>>0x10);
#endif
for(uint64_t j = 0; j < self->entry[i].pos; j++){
uint64_t physical_addr = self->entry[i].stack[j];
uint64_t entry_offset_addr = physical_addr - shadow_memory_state->ram_regions[i].base;
void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + entry_offset_addr;
void* incremental_addr = shadow_memory_state->ram_regions[i].incremental_region_ptr + entry_offset_addr;
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){
printf("%s: 0x%lx is dirty\n", __func__, physical_addr);
continue;
}
#ifdef DEBUG_USER_FDL
printf("%s -> %p <-- %p\n", __func__, incremental_addr, host_addr);
#endif
//printf("%s -> %p <-- %p\n", __func__, incremental_addr, host_addr);
clear_bit(entry_offset_addr>>12, (void*)self->entry[i].bitmap);
shadow_memory_track_dirty_root_pages(shadow_memory_state, entry_offset_addr, i);
memcpy(incremental_addr, host_addr, TARGET_PAGE_SIZE);
}
}
nyx_snapshot_user_fdl_reset(self);
}

View File

@ -0,0 +1,25 @@
#pragma once
#include <stdint.h>
#include "nyx/snapshot/helper.h"
#include "nyx/snapshot/memory/block_list.h"
#include "nyx/snapshot/memory/shadow_memory.h"
#include "nyx/snapshot/memory/backend/nyx_fdl.h"
typedef struct nyx_fdl_user_s{
struct {
uint64_t* stack;
uint8_t* bitmap;
uint64_t pos;
}entry[MAX_REGIONS];
uint8_t num;
bool enabled;
}nyx_fdl_user_t;
nyx_fdl_user_t* nyx_fdl_user_init(shadow_memory_t* shadow_memory_state);
void nyx_fdl_user_enable(nyx_fdl_user_t* self);
void nyx_fdl_user_set(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, nyx_fdl_t* nyx_fdl_state, uint64_t addr, uint64_t length);
void nyx_snapshot_user_fdl_restore(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist);
void nyx_snapshot_nyx_fdl_user_save_root_pages(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist);

View File

@ -0,0 +1,414 @@
#include "qemu/osdep.h"
#include "sysemu/sysemu.h"
#include "cpu.h"
#include "qemu/main-loop.h"
#include "exec/ram_addr.h"
#include "qemu/rcu_queue.h"
#include "migration/migration.h"
#include "nyx/debug.h"
#include "nyx/memory_access.h"
#include "nyx/snapshot/memory/shadow_memory.h"
#include "nyx/snapshot/helper.h"
typedef struct fast_reload_dump_head_s{
uint32_t shadow_memory_regions;
uint32_t ram_region_index; // remove
} fast_reload_dump_head_t;
typedef struct fast_reload_dump_entry_s{
uint64_t shadow_memory_offset;
char idstr[256];
} fast_reload_dump_entry_t;
static void shadow_memory_set_incremental_ptrs(shadow_memory_t* self){
for(uint8_t i = 0; i < self->ram_regions_num; i++){
self->ram_regions[i].incremental_region_ptr = self->incremental_ptr + self->ram_regions[i].offset;
}
}
static void shadow_memory_pre_alloc_incremental(shadow_memory_t* self){
self->incremental_ptr = mmap(0, self->memory_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->snapshot_ptr_fd, 0);
shadow_memory_set_incremental_ptrs(self);
}
static void shadow_memory_init_generic(shadow_memory_t* self){
self->root_track_pages_num = 0;
self->root_track_pages_size = 32 << 10;
self->root_track_pages_stack = malloc(sizeof(uint64_t)*self->root_track_pages_size);
shadow_memory_pre_alloc_incremental(self);
self->incremental_enabled = false;
}
shadow_memory_t* shadow_memory_init(void){
RAMBlock *block;
RAMBlock* block_array[10];
void* snapshot_ptr_offset_array[10];
shadow_memory_t* self = malloc(sizeof(shadow_memory_t));
memset(self, 0x0, sizeof(shadow_memory_t));
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
self->memory_size += block->used_length;
}
self->snapshot_ptr_fd = memfd_create("in_memory_root_snapshot", MFD_CLOEXEC | MFD_ALLOW_SEALING);
assert(!ftruncate(self->snapshot_ptr_fd, self->memory_size));
fcntl(self->snapshot_ptr_fd, F_ADD_SEALS, F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL);
//printf("MMAP -> 0x%lx\n", self->memory_size);
self->snapshot_ptr = mmap(NULL, self->memory_size, PROT_READ | PROT_WRITE , MAP_SHARED , self->snapshot_ptr_fd, 0);
madvise(self->snapshot_ptr, self->memory_size, MADV_RANDOM | MADV_MERGEABLE);
QEMU_PT_PRINTF(RELOAD_PREFIX, "Allocating Memory (%p) Size: %lx", self->snapshot_ptr, self->memory_size);
uint64_t offset = 0;
uint8_t i = 0;
uint8_t regions_num = 0;
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
QEMU_PT_PRINTF(RELOAD_PREFIX, "%lx %lx %lx\t%s\t%p", block->offset, block->used_length, block->max_length, block->idstr, block->host);
//printf("%lx %lx %lx\t%s\t%p\n", block->offset, block->used_length, block->max_length, block->idstr, block->host);
block_array[i] = block;
memcpy(self->snapshot_ptr+offset, block->host, block->used_length);
snapshot_ptr_offset_array[i++] = self->snapshot_ptr+offset;
offset += block->used_length;
regions_num++;
}
for(uint8_t i = 0; i < regions_num; i++){
block = block_array[i];
if(!block->mr->readonly){
if(self->ram_regions_num == 0 && block->used_length >= MEM_SPLIT_START){
self->ram_regions[self->ram_regions_num].ram_region = i;
self->ram_regions[self->ram_regions_num].base = block->mr->addr;
self->ram_regions[self->ram_regions_num].size = MEM_SPLIT_START;
self->ram_regions[self->ram_regions_num].offset = snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0];
self->ram_regions[self->ram_regions_num].host_region_ptr = block->host;
self->ram_regions[self->ram_regions_num].snapshot_region_ptr = self->snapshot_ptr+self->ram_regions[self->ram_regions_num].offset;
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1);
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
self->ram_regions_num++;
self->ram_regions[self->ram_regions_num].ram_region = i;
self->ram_regions[self->ram_regions_num].base = MEM_SPLIT_END;
self->ram_regions[self->ram_regions_num].size = block->used_length-MEM_SPLIT_START;
self->ram_regions[self->ram_regions_num].offset = (snapshot_ptr_offset_array[i] + MEM_SPLIT_START) - snapshot_ptr_offset_array[0];
self->ram_regions[self->ram_regions_num].host_region_ptr = block->host+MEM_SPLIT_START;
//self->ram_regions[self->ram_regions_num].snapshot_region_ptr = self->ptr+self->ram_regions[self->ram_regions_num].offset;
self->ram_regions[self->ram_regions_num].snapshot_region_ptr = snapshot_ptr_offset_array[i]+MEM_SPLIT_START;
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1);
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
}
else{
self->ram_regions[self->ram_regions_num].ram_region = i;
self->ram_regions[self->ram_regions_num].base = block->mr->addr;
self->ram_regions[self->ram_regions_num].size = block->used_length;
self->ram_regions[self->ram_regions_num].offset = snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0];
self->ram_regions[self->ram_regions_num].host_region_ptr = block->host;
self->ram_regions[self->ram_regions_num].snapshot_region_ptr = self->snapshot_ptr+self->ram_regions[self->ram_regions_num].offset;
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1);
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
}
self->ram_regions_num++;
}
}
shadow_memory_init_generic(self);
return self;
}
shadow_memory_t* shadow_memory_init_from_snapshot(const char* snapshot_folder, bool pre_snapshot){
RAMBlock *block;
RAMBlock* block_array[10];
void* snapshot_ptr_offset_array[10];
shadow_memory_t* self = malloc(sizeof(shadow_memory_t));
memset(self, 0x0, sizeof(shadow_memory_t));
/* count total memory size */
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
self->memory_size += block->used_length;
}
/* count number of ram regions */
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
if(!block->mr->readonly){
if(self->ram_regions_num == 0 && block->used_length >= MEM_SPLIT_START){
self->ram_regions_num++;
}
self->ram_regions_num++;
}
}
char* path_meta;
char* path_dump;
assert(asprintf(&path_meta, "%s/fast_snapshot.mem_meta", snapshot_folder) != -1);
assert(asprintf(&path_dump, "%s/fast_snapshot.mem_dump", snapshot_folder) != -1);
fast_reload_dump_head_t head;
FILE* file_mem_meta = fopen (path_meta, "r");
assert(file_mem_meta != NULL);
assert(fread(&head, sizeof(fast_reload_dump_head_t), 1, file_mem_meta) == 1);
fclose(file_mem_meta);
if(self->ram_regions_num != head.shadow_memory_regions){
fprintf(stderr, "Error: self->ram_regions_num (%d) != head.shadow_memory_regions (%d)\n", self->ram_regions_num, head.shadow_memory_regions);
exit(1);
}
//printf("LOAD -> self->ram_regions_num: %d\n", self->ram_regions_num);
FILE* file_mem_dump = fopen (path_dump, "r");
assert(file_mem_dump != NULL);
fseek(file_mem_dump, 0L, SEEK_END);
uint64_t file_mem_dump_size = ftell(file_mem_dump);
debug_fprintf(stderr, "guest_ram_size == ftell(f) => 0x%lx vs 0x%lx (%s)\n", self->memory_size, file_mem_dump_size, dump_file);
#define VGA_SIZE (16<<20)
if(self->memory_size != file_mem_dump_size){
if (file_mem_dump_size >= VGA_SIZE){
fprintf(stderr, "ERROR: guest size should be %ld MB - set it to %ld MB\n", (file_mem_dump_size-VGA_SIZE)>>20, (self->memory_size-VGA_SIZE)>>20);
exit(1);
}
else{
fprintf(stderr, "ERROR: guest size: %ld bytes\n", file_mem_dump_size);
exit(1);
}
}
assert(self->memory_size == ftell(file_mem_dump));
fseek(file_mem_dump, 0L, SEEK_SET);
fclose(file_mem_dump);
self->snapshot_ptr_fd = open(path_dump, O_RDONLY);
//printf("self->snapshot_ptr_fd: %d\n", self->snapshot_ptr_fd);
self->snapshot_ptr = mmap(0, self->memory_size, PROT_READ, MAP_SHARED, self->snapshot_ptr_fd, 0);
//printf("TRY TO MMAP : %p\n", self->snapshot_ptr);
assert(self->snapshot_ptr != (void*)-1);
madvise(self->snapshot_ptr, self->memory_size, MADV_MERGEABLE);
uint64_t offset = 0;
uint8_t i = 0;
uint8_t regions_num = 0;
QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
QEMU_PT_PRINTF(RELOAD_PREFIX, "%lx %lx %lx\t%s\t%p", block->offset, block->used_length, block->max_length, block->idstr, block->host);
//printf("%lx %lx %lx\t%s\t%p\n", block->offset, block->used_length, block->max_length, block->idstr, block->host);
block_array[i] = block;
snapshot_ptr_offset_array[i++] = self->snapshot_ptr+offset;
offset += block->used_length;
regions_num++;
}
self->ram_regions_num = 0;
for(uint8_t i = 0; i < regions_num; i++){
block = block_array[i];
if(!block->mr->readonly){
if(self->ram_regions_num == 0 && block->used_length >= MEM_SPLIT_START){
self->ram_regions[self->ram_regions_num].ram_region = i;
self->ram_regions[self->ram_regions_num].base = block->mr->addr;
self->ram_regions[self->ram_regions_num].size = MEM_SPLIT_START;
self->ram_regions[self->ram_regions_num].offset = snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0];
self->ram_regions[self->ram_regions_num].host_region_ptr = block->host;
self->ram_regions[self->ram_regions_num].snapshot_region_ptr = self->snapshot_ptr+self->ram_regions[self->ram_regions_num].offset;
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1);
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
self->ram_regions_num++;
self->ram_regions[self->ram_regions_num].ram_region = i;
self->ram_regions[self->ram_regions_num].base = MEM_SPLIT_END;
self->ram_regions[self->ram_regions_num].size = block->used_length-MEM_SPLIT_START;
self->ram_regions[self->ram_regions_num].offset = (snapshot_ptr_offset_array[i] + MEM_SPLIT_START) - snapshot_ptr_offset_array[0];
self->ram_regions[self->ram_regions_num].host_region_ptr = block->host+MEM_SPLIT_START;
//self->ram_regions[self->ram_regions_num].snapshot_region_ptr = self->ptr+self->ram_regions[self->ram_regions_num].offset;
self->ram_regions[self->ram_regions_num].snapshot_region_ptr = snapshot_ptr_offset_array[i]+MEM_SPLIT_START;
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1);
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
}
else{
self->ram_regions[self->ram_regions_num].ram_region = i;
self->ram_regions[self->ram_regions_num].base = block->mr->addr;
self->ram_regions[self->ram_regions_num].size = block->used_length;
self->ram_regions[self->ram_regions_num].offset = snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0];
self->ram_regions[self->ram_regions_num].host_region_ptr = block->host;
self->ram_regions[self->ram_regions_num].snapshot_region_ptr = self->snapshot_ptr+self->ram_regions[self->ram_regions_num].offset;
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1);
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
}
self->ram_regions_num++;
}
}
/* memcpy version */
/*
for(uint8_t i = 0; i < self->ram_regions_num; i++){
void* host_addr = self->ram_regions[i].host_region_ptr + 0;
void* snapshot_addr = self->ram_regions[i].snapshot_region_ptr + 0;
memcpy(host_addr, snapshot_addr, self->ram_regions[i].size);
}
*/
/* munmap + mmap version */
for(uint8_t i = 0; i < self->ram_regions_num; i++){
void* host_addr = self->ram_regions[i].host_region_ptr + 0;
assert(munmap(host_addr, self->ram_regions[i].size) != EINVAL);
assert(mmap(host_addr, self->ram_regions[i].size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_FIXED, self->snapshot_ptr_fd, self->ram_regions[i].offset) != MAP_FAILED);
}
shadow_memory_init_generic(self);
return self;
}
void shadow_memory_prepare_incremental(shadow_memory_t* self){
static int count = 0;
if(count >= RESTORE_RATE){
count = 0;
munmap(self->incremental_ptr, self->memory_size);
self->incremental_ptr = mmap(0, self->memory_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->snapshot_ptr_fd, 0);
shadow_memory_set_incremental_ptrs(self);
}
count++;
}
void shadow_memory_switch_snapshot(shadow_memory_t* self, bool incremental){
self->incremental_enabled = incremental;
}
void shadow_memory_restore_memory(shadow_memory_t* self){
rcu_read_lock();
uint8_t slot = 0;
uint64_t addr = 0;
for(uint64_t i = 0; i < self->root_track_pages_num; i++){
addr = self->root_track_pages_stack[i] & 0xFFFFFFFFFFFFF000;
slot = self->root_track_pages_stack[i] & 0xFFF;
memcpy(self->ram_regions[slot].host_region_ptr+addr, self->ram_regions[slot].snapshot_region_ptr+addr, TARGET_PAGE_SIZE);
memcpy(self->ram_regions[slot].incremental_region_ptr+addr, self->ram_regions[slot].snapshot_region_ptr+addr, TARGET_PAGE_SIZE);
}
self->root_track_pages_num = 0;
rcu_read_unlock();
}
/* only used in debug mode -> no need to be fast */
bool shadow_memory_is_root_page_tracked(shadow_memory_t* self, uint64_t address, uint8_t slot){
uint64_t value = (address & 0xFFFFFFFFFFFFF000) | slot;
for(uint64_t i = 0; i < self->root_track_pages_num; i++){
if(self->root_track_pages_stack[i] == value){
return true;
}
}
return false;
}
void shadow_memory_serialize(shadow_memory_t* self, const char* snapshot_folder){
char* tmp1;
char* tmp2;
assert(asprintf(&tmp1, "%s/fast_snapshot.mem_meta", snapshot_folder) != -1);
assert(asprintf(&tmp2, "%s/fast_snapshot.mem_dump", snapshot_folder) != -1);
FILE* file_mem_meta = fopen(tmp1, "w+b");
FILE* file_mem_data = fopen(tmp2, "w+b");
//} FILE* file_ptr_meta, FILE* file_ptr_data){
//assert(self);
//assert(file_ptr_meta);
//assert(file_ptr_data);
/*
debug_printf("black_list_pages_num: %lx\n", self->black_list_pages_num);
debug_printf("black_list_pages_size: %lx\n", self->black_list_pages_size);
debug_printf("black_list_pages ...\n");
for (uint64_t i = 0; i < self->black_list_pages_num; i++ ){
debug_printf("self->black_list_pages[%ld] = %lx\n", i, self->black_list_pages[i]);
}
*/
//printf("shadow_memory_regions: %d\n", self->ram_regions_num);
//debug_printf("ram_region_index: %d\n", self->ram_region_index);
/*
for (uint32_t i = 0; i < self->ram_regions_num; i++){
printf("self->shadow_memory[%d] = %lx %s\n", i, self->ram_regions[i].base, self->ram_regions[i].idstr);
}
printf("ram_size: %lx\n", self->memory_size);
*/
fast_reload_dump_head_t head;
fast_reload_dump_entry_t entry;
head.shadow_memory_regions = self->ram_regions_num;
head.ram_region_index = 0; /* due to legacy reasons */
fwrite(&head, sizeof(fast_reload_dump_head_t), 1, file_mem_meta);
for (uint64_t i = 0; i < self->ram_regions_num; i++){
memset(&entry, 0x0, sizeof(fast_reload_dump_entry_t));
entry.shadow_memory_offset = (uint64_t)self->ram_regions[i].offset;
strncpy((char*)&entry.idstr, (const char*)self->ram_regions[i].idstr, 255);
fwrite(&entry, sizeof(fast_reload_dump_entry_t), 1, file_mem_meta);
}
fwrite(self->snapshot_ptr, self->memory_size, 1, file_mem_data);
fclose(file_mem_meta);
fclose(file_mem_data);
}
bool shadow_memory_read_physical_memory(shadow_memory_t* self, uint64_t address, void* ptr, size_t size){
assert(size == 0x1000 && (address & 0xFFFULL) == 0); /* remove this limitation later */
if(address < self->memory_size){
assert(size <= 0x1000); /* remove this limitation later */
for(uint8_t i = 0; i < self->ram_regions_num; i++){
if(address >= self->ram_regions[i].base && address < (self->ram_regions[i].base + self->ram_regions[i].size)){
void* snapshot_ptr = self->ram_regions[i].snapshot_region_ptr + (address-self->ram_regions[i].base);
memcpy(ptr, snapshot_ptr, size);
return true;
}
}
}
return false;
}

View File

@ -0,0 +1,92 @@
#pragma once
#include <stdint.h>
#include "nyx/snapshot/devices/state_reallocation.h"
/* munmap & mmap incremental snapshot area after RESTORE_RATE restores to avoid high memory pressure */
#define RESTORE_RATE 2000
typedef struct ram_region_s{
/* simple numeric identifier
* (can be the same for multiple regions if the memory is
* actually splitted across different bases in the guest's memory
* but related to the same mapping)
*/
uint8_t ram_region;
/* base in the guest's physical address space */
uint64_t base;
/* size of this region */
uint64_t size;
/* mmap offset of this region (does not apply to the actual guest's memory) */
uint64_t offset;
/* pointer to the actual mmap region used by KVM */
void* host_region_ptr;
/* pointer to the snapshot mmap + offset */
void* snapshot_region_ptr;
/* pointer to the incremental CoW mmap + offset */
void* incremental_region_ptr;
char* idstr;
} ram_region_t;
typedef struct shadow_memory_s{
/* snapshot memory backup */
void* snapshot_ptr;
/* snapshot memory backup memfd */
int snapshot_ptr_fd;
/* incremental memory backup */
void* incremental_ptr;
//fast_reload_tmp_snapshot_t tmp_snapshot;
/* total memory size */
uint64_t memory_size;
/* keep this */
ram_region_t ram_regions[10];
uint8_t ram_regions_num;
/* additional dirty stack to restore root snapshot */
uint64_t root_track_pages_num;
uint64_t root_track_pages_size;
uint64_t* root_track_pages_stack;
bool incremental_enabled;
}shadow_memory_t;
shadow_memory_t* shadow_memory_init(void);
shadow_memory_t* shadow_memory_init_from_snapshot(const char* snapshot_folder, bool pre_snapshot);
void shadow_memory_prepare_incremental(shadow_memory_t* self);
void shadow_memory_switch_snapshot(shadow_memory_t* self, bool incremental);
void shadow_memory_restore_memory(shadow_memory_t* self);
//void shadow_memory_prepare_incremental_snapshot(shadow_memory_t* self);
static inline void shadow_memory_track_dirty_root_pages(shadow_memory_t* self, uint64_t address, uint8_t slot){
if(unlikely(self->root_track_pages_num >= self->root_track_pages_size)){
self->root_track_pages_size <<= 2;
self->root_track_pages_stack = realloc(self->root_track_pages_stack, self->root_track_pages_size*sizeof(uint64_t));
}
self->root_track_pages_stack[self->root_track_pages_num] = (address & 0xFFFFFFFFFFFFF000) | slot;
self->root_track_pages_num++;
}
bool shadow_memory_is_root_page_tracked(shadow_memory_t* self, uint64_t address, uint8_t slot);
void shadow_memory_serialize(shadow_memory_t* self, const char* snapshot_folder);
bool shadow_memory_read_physical_memory(shadow_memory_t* self, uint64_t address, void* ptr, size_t size);

View File

Some files were not shown because too many files have changed in this diff Show More