auto-apply clang-format
- including vl.c & kvm-all.c
This commit is contained in:
parent
976d8e8329
commit
8a88edc2a1
@ -48,15 +48,15 @@
|
||||
|
||||
#ifdef QEMU_NYX
|
||||
// clang-format on
|
||||
#include "nyx/pt.h"
|
||||
#include "nyx/hypercall/hypercall.h"
|
||||
#include "nyx/synchronization.h"
|
||||
#include "nyx/debug.h"
|
||||
#include "nyx/state/state.h"
|
||||
#include "nyx/interface.h"
|
||||
#include "nyx/fast_vm_reload_sync.h"
|
||||
#include "nyx/snapshot/memory/backend/nyx_dirty_ring.h"
|
||||
#include "nyx/helpers.h"
|
||||
#include "nyx/hypercall/hypercall.h"
|
||||
#include "nyx/interface.h"
|
||||
#include "nyx/pt.h"
|
||||
#include "nyx/snapshot/memory/backend/nyx_dirty_ring.h"
|
||||
#include "nyx/state/state.h"
|
||||
#include "nyx/synchronization.h"
|
||||
// clang-format off
|
||||
#endif
|
||||
|
||||
@ -387,11 +387,13 @@ static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
|
||||
|
||||
#ifdef QEMU_NYX
|
||||
// clang-format on
|
||||
int kvm_get_vm_fd(KVMState *s){
|
||||
int kvm_get_vm_fd(KVMState *s)
|
||||
{
|
||||
return s->vmfd;
|
||||
}
|
||||
|
||||
KVMMemoryListener* kvm_get_kml(int as_id){
|
||||
KVMMemoryListener *kvm_get_kml(int as_id)
|
||||
{
|
||||
return kvm_state->as[as_id].ml;
|
||||
}
|
||||
// clang-format off
|
||||
@ -1929,19 +1931,23 @@ static int kvm_init(MachineState *ms)
|
||||
}
|
||||
#ifdef QEMU_NYX
|
||||
// clang-format on
|
||||
if (ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_PT) != 1 && ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_FDL) != 1) {
|
||||
|
||||
if (ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_PT) != 1 &&
|
||||
ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_FDL) != 1)
|
||||
{
|
||||
/* fallback -> use vanilla KVM module instead (no Intel-PT tracing or nested hypercalls at this point) */
|
||||
fprintf(stderr, "[QEMU-Nyx] Could not access KVM-PT kernel module!\n[QEMU-Nyx] Trying vanilla KVM...\n");
|
||||
fprintf(stderr, "[QEMU-Nyx] Could not access KVM-PT kernel "
|
||||
"module!\n[QEMU-Nyx] Trying vanilla KVM...\n");
|
||||
if (s->fd == -1) {
|
||||
fprintf(stderr, "[QEMU-Nyx] Error: NYX fallback failed: Could not access vanilla KVM module!\n");
|
||||
fprintf(stderr, "[QEMU-Nyx] Error: NYX fallback failed: Could not "
|
||||
"access vanilla KVM module!\n");
|
||||
ret = -errno;
|
||||
goto err;
|
||||
}
|
||||
|
||||
int ret_val = ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_DIRTY_LOG_RING);
|
||||
if (ret_val == -1 || ret_val == 0) {
|
||||
fprintf(stderr, "[QEMU-Nyx] Error: NYX requires support for KVM_CAP_DIRTY_LOG_RING in fallback mode!\n");
|
||||
fprintf(stderr, "[QEMU-Nyx] Error: NYX requires support for "
|
||||
"KVM_CAP_DIRTY_LOG_RING in fallback mode!\n");
|
||||
ret = -errno;
|
||||
goto err;
|
||||
}
|
||||
@ -1949,7 +1955,9 @@ static int kvm_init(MachineState *ms)
|
||||
/* check for vmware_backdoor support */
|
||||
int fd = open("/sys/module/kvm/parameters/enable_vmware_backdoor", O_RDONLY);
|
||||
if (fd == -1) {
|
||||
fprintf(stderr, "ERROR: /sys/module/kvm/parameters/enable_vmware_backdoor file not found...\n");
|
||||
fprintf(stderr,
|
||||
"ERROR: /sys/module/kvm/parameters/enable_vmware_backdoor file "
|
||||
"not found...\n");
|
||||
ret = -errno;
|
||||
goto err;
|
||||
}
|
||||
@ -1959,26 +1967,30 @@ static int kvm_init(MachineState *ms)
|
||||
close(fd);
|
||||
|
||||
if (vmware_backdoor_option == 'N') {
|
||||
fprintf(stderr, "\n[QEMU-Nyx] ERROR: vmware backdoor is not enabled...\n");
|
||||
fprintf(stderr,
|
||||
"\n[QEMU-Nyx] ERROR: vmware backdoor is not enabled...\n");
|
||||
fprintf(stderr, "\n\tRun the following commands to fix the issue:\n");
|
||||
fprintf(stderr, "\t-----------------------------------------\n");
|
||||
fprintf(stderr, "\tsudo modprobe -r kvm-intel\n");
|
||||
fprintf(stderr, "\tsudo modprobe -r kvm\n");
|
||||
fprintf(stderr, "\tsudo modprobe kvm enable_vmware_backdoor=y\n");
|
||||
fprintf(stderr, "\tsudo modprobe kvm-intel\n");
|
||||
fprintf(stderr, "\tcat /sys/module/kvm/parameters/enable_vmware_backdoor\n");
|
||||
fprintf(stderr,
|
||||
"\tcat /sys/module/kvm/parameters/enable_vmware_backdoor\n");
|
||||
fprintf(stderr, "\t-----------------------------------------\n\n");
|
||||
ret = -errno;
|
||||
goto err;
|
||||
}
|
||||
|
||||
fprintf(stderr, "[QEMU-Nyx] NYX runs in fallback mode (no Intel-PT tracing or nested hypercall support)!\n");
|
||||
fprintf(stderr, "[QEMU-Nyx] NYX runs in fallback mode (no Intel-PT tracing "
|
||||
"or nested hypercall support)!\n");
|
||||
s->nyx_no_pt_mode = true;
|
||||
GET_GLOBAL_STATE()->nyx_fdl = false;
|
||||
GET_GLOBAL_STATE()->pt_trace_mode = false; // Intel PT is not available in this mode
|
||||
fast_reload_set_mode(get_fast_reload_snapshot(), RELOAD_MEMORY_MODE_DIRTY_RING);
|
||||
}
|
||||
else{
|
||||
GET_GLOBAL_STATE()->pt_trace_mode =
|
||||
false; // Intel PT is not available in this mode
|
||||
fast_reload_set_mode(get_fast_reload_snapshot(),
|
||||
RELOAD_MEMORY_MODE_DIRTY_RING);
|
||||
} else {
|
||||
s->nyx_no_pt_mode = false;
|
||||
GET_GLOBAL_STATE()->nyx_fdl = true;
|
||||
fast_reload_set_mode(get_fast_reload_snapshot(), RELOAD_MEMORY_MODE_FDL);
|
||||
@ -2053,10 +2065,13 @@ static int kvm_init(MachineState *ms)
|
||||
// clang-format on
|
||||
if (s->nyx_no_pt_mode) {
|
||||
if (getenv("NYX_DISABLE_DIRTY_RING")) {
|
||||
fprintf(stderr, "WARNING: Nyx has disabled KVM's dirty-ring (required to enable full VGA support during pre-snapshot creation procedure)\n");
|
||||
fast_reload_set_mode(get_fast_reload_snapshot(), RELOAD_MEMORY_MODE_DEBUG_QUIET); /* required to create snapshot */
|
||||
}
|
||||
else{
|
||||
fprintf(stderr,
|
||||
"WARNING: Nyx has disabled KVM's dirty-ring (required to enable "
|
||||
"full VGA support during pre-snapshot creation procedure)\n");
|
||||
fast_reload_set_mode(
|
||||
get_fast_reload_snapshot(),
|
||||
RELOAD_MEMORY_MODE_DEBUG_QUIET); /* required to create snapshot */
|
||||
} else {
|
||||
nyx_dirty_ring_early_init(s->fd, s->vmfd);
|
||||
}
|
||||
}
|
||||
@ -2108,8 +2123,7 @@ static int kvm_init(MachineState *ms)
|
||||
// clang-format on
|
||||
if (s->nyx_no_pt_mode) {
|
||||
ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, 1);
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
// clang-format off
|
||||
@ -2405,7 +2419,8 @@ static void kvm_eat_signals(CPUState *cpu)
|
||||
|
||||
#ifdef QEMU_NYX
|
||||
// clang-format on
|
||||
static int handle_vmware_hypercall(struct kvm_run *run, CPUState *cpu){
|
||||
static int handle_vmware_hypercall(struct kvm_run *run, CPUState *cpu)
|
||||
{
|
||||
kvm_arch_get_registers_fast(cpu);
|
||||
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
@ -2530,16 +2545,17 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
synchronization_payload_buffer_write_detected();
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
else{
|
||||
fprintf(stderr, "ERROR: invalid write to input buffer detected before harness was ready (write protection is enabled)!\n");
|
||||
} else {
|
||||
fprintf(
|
||||
stderr,
|
||||
"ERROR: invalid write to input buffer detected before "
|
||||
"harness was ready (write protection is enabled)!\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr, "QEMU-PT: error: kvm run failed %s\n",
|
||||
strerror(-run_ret));
|
||||
fprintf(stderr, "QEMU-PT: error: kvm run failed %s\n", strerror(-run_ret));
|
||||
qemu_backtrace();
|
||||
// clang-format off
|
||||
#endif
|
||||
@ -2563,7 +2579,9 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
|
||||
#ifdef QEMU_NYX
|
||||
// clang-format on
|
||||
if(run->io.port == 0x5658 && run->io.size == 4 && *((uint32_t*)((uint8_t *)run + run->io.data_offset)) == 0x8080801f) {
|
||||
if (run->io.port == 0x5658 && run->io.size == 4 &&
|
||||
*((uint32_t *)((uint8_t *)run + run->io.data_offset)) == 0x8080801f)
|
||||
{
|
||||
assert(kvm_state->nyx_no_pt_mode);
|
||||
ret = handle_vmware_hypercall(run, cpu);
|
||||
break;
|
||||
@ -2603,12 +2621,15 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
#define CONFIG_KVM_EXIT_SHUTODWN_IS_PANIC // consider triple-fault etc as crash?
|
||||
#ifndef CONFIG_KVM_EXIT_SHUTODWN_IS_PANIC
|
||||
/* Fuzzing is enabled at this point -> don't exit */
|
||||
fprintf(stderr, "Got KVM_EXIT_SHUTDOWN while in fuzzing mode => reload\n",);
|
||||
handle_hypercall_kafl_release(run, cpu, (uint64_t)run->hypercall.args[0]);
|
||||
fprintf(stderr,
|
||||
"Got KVM_EXIT_SHUTDOWN while in fuzzing mode => reload\n", );
|
||||
handle_hypercall_kafl_release(run, cpu,
|
||||
(uint64_t)run->hypercall.args[0]);
|
||||
ret = 0;
|
||||
#else
|
||||
nyx_debug("Got KVM_EXIT_SHUTDOWN while in fuzzing mode => panic\n");
|
||||
handle_hypercall_kafl_panic(run, cpu, (uint64_t)run->hypercall.args[0]);
|
||||
handle_hypercall_kafl_panic(run, cpu,
|
||||
(uint64_t)run->hypercall.args[0]);
|
||||
ret = 0;
|
||||
#endif
|
||||
} else {
|
||||
@ -2643,15 +2664,16 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
break;
|
||||
|
||||
case KVM_EXIT_KAFL_ACQUIRE ...(KVM_EXIT_KAFL_ACQUIRE + 100):
|
||||
ret = handle_kafl_hypercall(run, cpu, (uint64_t)run->exit_reason, (uint64_t)run->hypercall.args[0]);
|
||||
ret = handle_kafl_hypercall(run, cpu, (uint64_t)run->exit_reason,
|
||||
(uint64_t)run->hypercall.args[0]);
|
||||
break;
|
||||
|
||||
case KVM_EXIT_DEBUG:
|
||||
kvm_arch_get_registers(cpu);
|
||||
if(!handle_hypercall_kafl_hook(run, cpu, (uint64_t)run->hypercall.args[0])){
|
||||
if (!handle_hypercall_kafl_hook(run, cpu, (uint64_t)run->hypercall.args[0]))
|
||||
{
|
||||
ret = kvm_arch_handle_exit(cpu, run);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
break;
|
||||
@ -2666,13 +2688,15 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
ret = EXCP_INTERRUPT;
|
||||
#else
|
||||
// clang-format on
|
||||
fprintf(stderr, "ATTEMPT TO SHUTDOWN MACHINE (KVM_SYSTEM_EVENT_SHUTDOWN)!\n");
|
||||
fprintf(
|
||||
stderr,
|
||||
"ATTEMPT TO SHUTDOWN MACHINE (KVM_SYSTEM_EVENT_SHUTDOWN)!\n");
|
||||
if (GET_GLOBAL_STATE()->in_fuzzing_mode) {
|
||||
/* Fuzzing is enabled at this point -> don't exit */
|
||||
handle_hypercall_kafl_release(run, cpu, (uint64_t)run->hypercall.args[0]);
|
||||
handle_hypercall_kafl_release(run, cpu,
|
||||
(uint64_t)run->hypercall.args[0]);
|
||||
ret = 0;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
|
||||
ret = EXCP_INTERRUPT;
|
||||
}
|
||||
@ -2685,13 +2709,14 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
ret = EXCP_INTERRUPT;
|
||||
#else
|
||||
// clang-format on
|
||||
fprintf(stderr, "ATTEMPT TO SHUTDOWN MACHINE (KVM_SYSTEM_EVENT_RESET)!\n");
|
||||
fprintf(stderr,
|
||||
"ATTEMPT TO SHUTDOWN MACHINE (KVM_SYSTEM_EVENT_RESET)!\n");
|
||||
if (GET_GLOBAL_STATE()->in_fuzzing_mode) {
|
||||
/* Fuzzing is enabled at this point -> don't exit */
|
||||
handle_hypercall_kafl_release(run, cpu, (uint64_t)run->hypercall.args[0]);
|
||||
handle_hypercall_kafl_release(run, cpu,
|
||||
(uint64_t)run->hypercall.args[0]);
|
||||
ret = 0;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
|
||||
ret = EXCP_INTERRUPT;
|
||||
}
|
||||
@ -2707,13 +2732,14 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
ret = 0;
|
||||
#else
|
||||
// clang-format on
|
||||
fprintf(stderr, "ATTEMPT TO SHUTDOWN MACHINE (KVM_SYSTEM_EVENT_CRASH)!\n");
|
||||
fprintf(stderr,
|
||||
"ATTEMPT TO SHUTDOWN MACHINE (KVM_SYSTEM_EVENT_CRASH)!\n");
|
||||
if (GET_GLOBAL_STATE()->in_fuzzing_mode) {
|
||||
/* Fuzzing is enabled at this point -> don't exit */
|
||||
handle_hypercall_kafl_release(run, cpu, (uint64_t)run->hypercall.args[0]);
|
||||
handle_hypercall_kafl_release(run, cpu,
|
||||
(uint64_t)run->hypercall.args[0]);
|
||||
ret = 0;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
kvm_cpu_synchronize_state(cpu);
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_system_guest_panicked(cpu_get_crash_info(cpu));
|
||||
@ -2743,7 +2769,8 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
nyx_debug("kvm_arch_handle_exit(%d) => panic\n", run->exit_reason);
|
||||
ret = kvm_arch_handle_exit(cpu, run);
|
||||
if (ret != 0)
|
||||
handle_hypercall_kafl_panic(run, cpu, (uint64_t)run->hypercall.args[0]);
|
||||
handle_hypercall_kafl_panic(run, cpu,
|
||||
(uint64_t)run->hypercall.args[0]);
|
||||
#endif
|
||||
// clang-format off
|
||||
#endif
|
||||
@ -2753,21 +2780,25 @@ int kvm_cpu_exec(CPUState *cpu)
|
||||
|
||||
#ifdef QEMU_NYX
|
||||
// clang-format on
|
||||
if(GET_GLOBAL_STATE()->in_fuzzing_mode && GET_GLOBAL_STATE()->cow_cache_full){
|
||||
if (GET_GLOBAL_STATE()->in_fuzzing_mode && GET_GLOBAL_STATE()->cow_cache_full)
|
||||
{
|
||||
synchronization_cow_full_detected();
|
||||
GET_GLOBAL_STATE()->cow_cache_full = false;
|
||||
ret = 0;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
if (GET_GLOBAL_STATE()->in_fuzzing_mode && cpu->halted) {
|
||||
fprintf(stderr, "%s: Attempt to halt CPU -> FUCK OFF!\n", __func__);
|
||||
cpu->halted = 0;
|
||||
GET_GLOBAL_STATE()->shutdown_requested = true;
|
||||
}
|
||||
|
||||
if(GET_GLOBAL_STATE()->in_fuzzing_mode && GET_GLOBAL_STATE()->shutdown_requested){
|
||||
if (GET_GLOBAL_STATE()->in_fuzzing_mode &&
|
||||
GET_GLOBAL_STATE()->shutdown_requested)
|
||||
{
|
||||
/* Fuzzing is enabled at this point -> don't exit */
|
||||
fprintf(stderr, "shutdown_requested -> calling handle_hypercall_kafl_release\n");
|
||||
fprintf(
|
||||
stderr,
|
||||
"shutdown_requested -> calling handle_hypercall_kafl_release\n");
|
||||
|
||||
// synchronization_lock_shutdown_detected();
|
||||
synchronization_lock_crash_found();
|
||||
|
@ -19,14 +19,14 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <stdbool.h>
|
||||
#include "qemu/osdep.h"
|
||||
#include "nyx/auxiliary_buffer.h"
|
||||
#include "nyx/state/state.h"
|
||||
#include "nyx/debug.h"
|
||||
#include "nyx/state/state.h"
|
||||
#include "nyx/trace_dump.h"
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
/* experimental feature (currently broken)
|
||||
* enabled via trace mode
|
||||
@ -43,41 +43,44 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
|
||||
#define VOLATILE_READ_16(dst, src) dst = *((volatile uint16_t *)(&src))
|
||||
#define VOLATILE_READ_8(dst, src) dst = *((volatile uint8_t *)(&src))
|
||||
|
||||
static void volatile_memset(void* dst, uint8_t ch, size_t count){
|
||||
static void volatile_memset(void *dst, uint8_t ch, size_t count)
|
||||
{
|
||||
for (size_t i = 0; i < count; i++) {
|
||||
VOLATILE_WRITE_8(((uint8_t *)dst)[i], ch);
|
||||
}
|
||||
}
|
||||
|
||||
static void volatile_memcpy(void* dst, void* src, size_t size){
|
||||
static void volatile_memcpy(void *dst, void *src, size_t size)
|
||||
{
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
VOLATILE_WRITE_8(((uint8_t *)dst)[i], ((uint8_t *)src)[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void init_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer){
|
||||
void init_auxiliary_buffer(auxilary_buffer_t *auxilary_buffer)
|
||||
{
|
||||
nyx_trace();
|
||||
volatile_memset((void *)auxilary_buffer, 0, sizeof(auxilary_buffer_t));
|
||||
|
||||
VOLATILE_WRITE_16(auxilary_buffer->header.version, QEMU_PT_VERSION);
|
||||
|
||||
uint16_t hash = (sizeof(auxilary_buffer_header_t) +
|
||||
sizeof(auxilary_buffer_cap_t) +
|
||||
sizeof(auxilary_buffer_config_t) +
|
||||
sizeof(auxilary_buffer_result_t) +
|
||||
sizeof(auxilary_buffer_misc_t)) % 0xFFFF;
|
||||
uint16_t hash =
|
||||
(sizeof(auxilary_buffer_header_t) + sizeof(auxilary_buffer_cap_t) +
|
||||
sizeof(auxilary_buffer_config_t) + sizeof(auxilary_buffer_result_t) +
|
||||
sizeof(auxilary_buffer_misc_t)) %
|
||||
0xFFFF;
|
||||
|
||||
VOLATILE_WRITE_16(auxilary_buffer->header.hash, hash);
|
||||
|
||||
VOLATILE_WRITE_64(auxilary_buffer->header.magic, AUX_MAGIC);
|
||||
}
|
||||
|
||||
void check_auxiliary_config_buffer(auxilary_buffer_t* auxilary_buffer, auxilary_buffer_config_t* shadow_config){
|
||||
void check_auxiliary_config_buffer(auxilary_buffer_t *auxilary_buffer,
|
||||
auxilary_buffer_config_t *shadow_config)
|
||||
{
|
||||
uint8_t changed = 0;
|
||||
VOLATILE_READ_8(changed, auxilary_buffer->configuration.changed);
|
||||
if (changed) {
|
||||
|
||||
|
||||
uint8_t aux_byte;
|
||||
|
||||
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.redqueen_mode);
|
||||
@ -86,22 +89,25 @@ void check_auxiliary_config_buffer(auxilary_buffer_t* auxilary_buffer, auxilary_
|
||||
if (aux_byte != shadow_config->redqueen_mode) {
|
||||
GET_GLOBAL_STATE()->in_redqueen_reload_mode = true;
|
||||
GET_GLOBAL_STATE()->redqueen_enable_pending = true;
|
||||
GET_GLOBAL_STATE()->redqueen_instrumentation_mode = REDQUEEN_LIGHT_INSTRUMENTATION;
|
||||
GET_GLOBAL_STATE()->redqueen_instrumentation_mode =
|
||||
REDQUEEN_LIGHT_INSTRUMENTATION;
|
||||
}
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
/* disable redqueen mode */
|
||||
if (aux_byte != shadow_config->redqueen_mode) {
|
||||
GET_GLOBAL_STATE()->in_redqueen_reload_mode = false;
|
||||
GET_GLOBAL_STATE()->redqueen_disable_pending = true;
|
||||
GET_GLOBAL_STATE()->redqueen_instrumentation_mode = REDQUEEN_NO_INSTRUMENTATION;
|
||||
GET_GLOBAL_STATE()->redqueen_instrumentation_mode =
|
||||
REDQUEEN_NO_INSTRUMENTATION;
|
||||
}
|
||||
}
|
||||
|
||||
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.trace_mode);
|
||||
if (aux_byte) {
|
||||
/* enable trace mode */
|
||||
if(aux_byte != shadow_config->trace_mode && GET_GLOBAL_STATE()->redqueen_state){
|
||||
if (aux_byte != shadow_config->trace_mode &&
|
||||
GET_GLOBAL_STATE()->redqueen_state)
|
||||
{
|
||||
#ifdef SUPPORT_COMPILE_TIME_REDQUEEN
|
||||
GET_GLOBAL_STATE()->pt_trace_mode_force = true;
|
||||
#endif
|
||||
@ -109,10 +115,11 @@ void check_auxiliary_config_buffer(auxilary_buffer_t* auxilary_buffer, auxilary_
|
||||
redqueen_set_trace_mode();
|
||||
pt_trace_dump_enable(true);
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
/* disable trace mode */
|
||||
if(aux_byte != shadow_config->trace_mode && GET_GLOBAL_STATE()->redqueen_state){
|
||||
if (aux_byte != shadow_config->trace_mode &&
|
||||
GET_GLOBAL_STATE()->redqueen_state)
|
||||
{
|
||||
#ifdef SUPPORT_COMPILE_TIME_REDQUEEN
|
||||
GET_GLOBAL_STATE()->pt_trace_mode_force = false;
|
||||
#endif
|
||||
@ -138,7 +145,8 @@ void check_auxiliary_config_buffer(auxilary_buffer_t* auxilary_buffer, auxilary_
|
||||
GET_GLOBAL_STATE()->in_reload_mode = aux_byte;
|
||||
|
||||
/* modify protect_payload_buffer */
|
||||
VOLATILE_READ_8(aux_byte, auxilary_buffer->configuration.protect_payload_buffer);
|
||||
VOLATILE_READ_8(aux_byte,
|
||||
auxilary_buffer->configuration.protect_payload_buffer);
|
||||
if (GET_GLOBAL_STATE()->protect_payload_buffer == 0 && aux_byte == 1) {
|
||||
GET_GLOBAL_STATE()->protect_payload_buffer = aux_byte;
|
||||
}
|
||||
@ -149,46 +157,62 @@ void check_auxiliary_config_buffer(auxilary_buffer_t* auxilary_buffer, auxilary_
|
||||
VOLATILE_WRITE_8(auxilary_buffer->configuration.discard_tmp_snapshot, 0);
|
||||
|
||||
/* copy to shodow */
|
||||
VOLATILE_READ_8(shadow_config->timeout_sec, auxilary_buffer->configuration.timeout_sec);
|
||||
VOLATILE_READ_32(shadow_config->timeout_usec, auxilary_buffer->configuration.timeout_usec);
|
||||
VOLATILE_READ_8(shadow_config->timeout_sec,
|
||||
auxilary_buffer->configuration.timeout_sec);
|
||||
VOLATILE_READ_32(shadow_config->timeout_usec,
|
||||
auxilary_buffer->configuration.timeout_usec);
|
||||
|
||||
// if(shadow_config->timeout_sec || shadow_config->timeout_usec){
|
||||
/* apply only non-zero values */
|
||||
update_itimer(&(GET_GLOBAL_STATE()->timeout_detector), shadow_config->timeout_sec, shadow_config->timeout_usec);
|
||||
update_itimer(&(GET_GLOBAL_STATE()->timeout_detector),
|
||||
shadow_config->timeout_sec, shadow_config->timeout_usec);
|
||||
//}
|
||||
|
||||
VOLATILE_READ_8(shadow_config->redqueen_mode, auxilary_buffer->configuration.redqueen_mode);
|
||||
VOLATILE_READ_8(shadow_config->trace_mode, auxilary_buffer->configuration.trace_mode);
|
||||
VOLATILE_READ_8(shadow_config->reload_mode, auxilary_buffer->configuration.reload_mode);
|
||||
VOLATILE_READ_8(shadow_config->redqueen_mode,
|
||||
auxilary_buffer->configuration.redqueen_mode);
|
||||
VOLATILE_READ_8(shadow_config->trace_mode,
|
||||
auxilary_buffer->configuration.trace_mode);
|
||||
VOLATILE_READ_8(shadow_config->reload_mode,
|
||||
auxilary_buffer->configuration.reload_mode);
|
||||
|
||||
VOLATILE_READ_8(shadow_config->verbose_level, auxilary_buffer->configuration.verbose_level);
|
||||
VOLATILE_READ_8(shadow_config->verbose_level,
|
||||
auxilary_buffer->configuration.verbose_level);
|
||||
|
||||
/* reset the 'changed' byte */
|
||||
VOLATILE_WRITE_8(auxilary_buffer->configuration.changed, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void set_crash_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){
|
||||
void set_crash_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer)
|
||||
{
|
||||
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_crash);
|
||||
}
|
||||
|
||||
void set_asan_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){
|
||||
void set_asan_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer)
|
||||
{
|
||||
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_sanitizer);
|
||||
}
|
||||
|
||||
void set_timeout_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){
|
||||
void set_timeout_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer)
|
||||
{
|
||||
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_timeout);
|
||||
}
|
||||
|
||||
void set_reload_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){
|
||||
void set_reload_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer)
|
||||
{
|
||||
VOLATILE_WRITE_8(auxilary_buffer->result.reloaded, 1);
|
||||
}
|
||||
|
||||
void set_pt_overflow_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer){
|
||||
void set_pt_overflow_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer)
|
||||
{
|
||||
VOLATILE_WRITE_8(auxilary_buffer->result.pt_overflow, 1);
|
||||
}
|
||||
|
||||
void set_exec_done_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint32_t sec, uint32_t usec, uint32_t num_dirty_pages){
|
||||
void set_exec_done_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer,
|
||||
uint32_t sec,
|
||||
uint32_t usec,
|
||||
uint32_t num_dirty_pages)
|
||||
{
|
||||
VOLATILE_WRITE_8(auxilary_buffer->result.exec_done, 1);
|
||||
|
||||
VOLATILE_WRITE_32(auxilary_buffer->result.runtime_sec, sec);
|
||||
@ -197,44 +221,61 @@ void set_exec_done_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, u
|
||||
}
|
||||
|
||||
|
||||
|
||||
void set_hprintf_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len){
|
||||
void set_hprintf_auxiliary_buffer(auxilary_buffer_t *auxilary_buffer,
|
||||
char *msg,
|
||||
uint32_t len)
|
||||
{
|
||||
VOLATILE_WRITE_16(auxilary_buffer->misc.len, MIN(len, MISC_SIZE - 2));
|
||||
volatile_memcpy((void*)&auxilary_buffer->misc.data, (void*)msg, (size_t)MIN(len, MISC_SIZE-2));
|
||||
volatile_memcpy((void *)&auxilary_buffer->misc.data, (void *)msg,
|
||||
(size_t)MIN(len, MISC_SIZE - 2));
|
||||
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_hprintf);
|
||||
}
|
||||
|
||||
void set_crash_reason_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len){
|
||||
void set_crash_reason_auxiliary_buffer(auxilary_buffer_t *auxilary_buffer,
|
||||
char *msg,
|
||||
uint32_t len)
|
||||
{
|
||||
VOLATILE_WRITE_16(auxilary_buffer->misc.len, MIN(len, MISC_SIZE - 2));
|
||||
volatile_memcpy((void*)&auxilary_buffer->misc.data, (void*)msg, (size_t) MIN(len, MISC_SIZE-2));
|
||||
volatile_memcpy((void *)&auxilary_buffer->misc.data, (void *)msg,
|
||||
(size_t)MIN(len, MISC_SIZE - 2));
|
||||
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_crash);
|
||||
}
|
||||
|
||||
void set_abort_reason_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len){
|
||||
void set_abort_reason_auxiliary_buffer(auxilary_buffer_t *auxilary_buffer,
|
||||
char *msg,
|
||||
uint32_t len)
|
||||
{
|
||||
VOLATILE_WRITE_16(auxilary_buffer->misc.len, MIN(len, MISC_SIZE - 2));
|
||||
volatile_memcpy((void*)&auxilary_buffer->misc.data, (void*)msg, (size_t) MIN(len, MISC_SIZE-2));
|
||||
volatile_memcpy((void *)&auxilary_buffer->misc.data, (void *)msg,
|
||||
(size_t)MIN(len, MISC_SIZE - 2));
|
||||
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_aborted);
|
||||
}
|
||||
|
||||
void set_state_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8_t state){
|
||||
void set_state_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer,
|
||||
uint8_t state)
|
||||
{
|
||||
if (auxilary_buffer) {
|
||||
VOLATILE_WRITE_8(auxilary_buffer->result.state, state);
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
nyx_error("WARNING: auxilary_buffer pointer is zero\n");
|
||||
}
|
||||
}
|
||||
|
||||
void set_page_not_found_result_buffer(auxilary_buffer_t* auxilary_buffer, uint64_t page_addr){
|
||||
void set_page_not_found_result_buffer(auxilary_buffer_t *auxilary_buffer,
|
||||
uint64_t page_addr)
|
||||
{
|
||||
VOLATILE_WRITE_8(auxilary_buffer->result.page_not_found, 1);
|
||||
VOLATILE_WRITE_64(auxilary_buffer->result.page_addr, page_addr);
|
||||
}
|
||||
|
||||
void reset_page_not_found_result_buffer(auxilary_buffer_t* auxilary_buffer){
|
||||
void reset_page_not_found_result_buffer(auxilary_buffer_t *auxilary_buffer)
|
||||
{
|
||||
VOLATILE_WRITE_8(auxilary_buffer->result.page_not_found, 0);
|
||||
}
|
||||
|
||||
void set_success_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8_t success){
|
||||
void set_success_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer,
|
||||
uint8_t success)
|
||||
{
|
||||
// TODO refactor to let caller directly set the result codes
|
||||
if (success == 2) {
|
||||
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_starved);
|
||||
@ -243,34 +284,43 @@ void set_success_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uin
|
||||
}
|
||||
}
|
||||
|
||||
void set_payload_buffer_write_reason_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len){
|
||||
void set_payload_buffer_write_reason_auxiliary_buffer(
|
||||
auxilary_buffer_t *auxilary_buffer, char *msg, uint32_t len)
|
||||
{
|
||||
VOLATILE_WRITE_16(auxilary_buffer->misc.len, MIN(len, MISC_SIZE - 2));
|
||||
volatile_memcpy((void*)&auxilary_buffer->misc.data, (void*)msg, (size_t) MIN(len, MISC_SIZE-2));
|
||||
volatile_memcpy((void *)&auxilary_buffer->misc.data, (void *)msg,
|
||||
(size_t)MIN(len, MISC_SIZE - 2));
|
||||
VOLATILE_WRITE_8(auxilary_buffer->result.exec_result_code, rc_input_buffer_write);
|
||||
}
|
||||
|
||||
|
||||
void set_tmp_snapshot_created(auxilary_buffer_t* auxilary_buffer, uint8_t value){
|
||||
void set_tmp_snapshot_created(auxilary_buffer_t *auxilary_buffer, uint8_t value)
|
||||
{
|
||||
VOLATILE_WRITE_8(auxilary_buffer->result.tmp_snapshot_created, value);
|
||||
}
|
||||
|
||||
void set_cap_agent_trace_bitmap(auxilary_buffer_t* auxilary_buffer, bool value){
|
||||
void set_cap_agent_trace_bitmap(auxilary_buffer_t *auxilary_buffer, bool value)
|
||||
{
|
||||
VOLATILE_WRITE_8(auxilary_buffer->capabilites.agent_trace_bitmap, value);
|
||||
}
|
||||
|
||||
void set_cap_agent_ijon_trace_bitmap(auxilary_buffer_t* auxilary_buffer, bool value){
|
||||
void set_cap_agent_ijon_trace_bitmap(auxilary_buffer_t *auxilary_buffer, bool value)
|
||||
{
|
||||
VOLATILE_WRITE_8(auxilary_buffer->capabilites.agent_ijon_trace_bitmap, value);
|
||||
}
|
||||
|
||||
void set_result_dirty_pages(auxilary_buffer_t* auxilary_buffer, uint32_t value){
|
||||
void set_result_dirty_pages(auxilary_buffer_t *auxilary_buffer, uint32_t value)
|
||||
{
|
||||
VOLATILE_WRITE_32(auxilary_buffer->result.dirty_pages, value);
|
||||
}
|
||||
|
||||
void set_result_pt_trace_size(auxilary_buffer_t* auxilary_buffer, uint32_t value){
|
||||
void set_result_pt_trace_size(auxilary_buffer_t *auxilary_buffer, uint32_t value)
|
||||
{
|
||||
VOLATILE_WRITE_32(auxilary_buffer->result.pt_trace_size, value);
|
||||
}
|
||||
|
||||
void set_result_bb_coverage(auxilary_buffer_t* auxilary_buffer, uint32_t value){
|
||||
void set_result_bb_coverage(auxilary_buffer_t *auxilary_buffer, uint32_t value)
|
||||
{
|
||||
if (value != auxilary_buffer->result.bb_coverage) {
|
||||
VOLATILE_WRITE_32(auxilary_buffer->result.bb_coverage, value);
|
||||
}
|
||||
|
@ -21,14 +21,15 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#define AUX_BUFFER_SIZE 4096
|
||||
|
||||
#define AUX_MAGIC 0x54502d554d4551
|
||||
|
||||
#define QEMU_PT_VERSION 3 /* let's start at 1 for the initial version using the aux buffer */
|
||||
#define QEMU_PT_VERSION \
|
||||
3 /* let's start at 1 for the initial version using the aux buffer */
|
||||
|
||||
#define HEADER_SIZE 128
|
||||
#define CAP_SIZE 256
|
||||
@ -58,12 +59,15 @@ typedef struct auxilary_buffer_header_s{
|
||||
|
||||
typedef struct auxilary_buffer_cap_s {
|
||||
uint8_t redqueen;
|
||||
uint8_t agent_timeout_detection; /* agent implements its own timeout detection; host timeout detection is still in used, but treshold is increased by x2; */
|
||||
uint8_t agent_timeout_detection; /* agent implements its own timeout detection;
|
||||
host timeout detection is still in used, but treshold is increased by x2; */
|
||||
uint8_t agent_trace_bitmap; /* agent implements its own tracing mechanism; PT tracing is disabled */
|
||||
uint8_t agent_ijon_trace_bitmap; /* agent uses the ijon shm buffer */
|
||||
|
||||
uint32_t agent_input_buffer_size; /* agent requests a custom input buffer size (if the size is 0, the minimum buffer size is used) */
|
||||
uint32_t agent_coverage_bitmap_size; /* agent requests a custom coverage bitmap size (if the size is 0, the minimum buffer size is used) */
|
||||
uint32_t agent_input_buffer_size; /* agent requests a custom input buffer size (if
|
||||
the size is 0, the minimum buffer size is used) */
|
||||
uint32_t agent_coverage_bitmap_size; /* agent requests a custom coverage bitmap
|
||||
size (if the size is 0, the minimum buffer size is used) */
|
||||
/* more to come */
|
||||
} __attribute__((packed)) auxilary_buffer_cap_t;
|
||||
|
||||
@ -149,22 +153,35 @@ typedef struct auxilary_buffer_s{
|
||||
} __attribute__((packed)) auxilary_buffer_t;
|
||||
|
||||
void init_auxiliary_buffer(auxilary_buffer_t *auxilary_buffer);
|
||||
void check_auxiliary_config_buffer(auxilary_buffer_t* auxilary_buffer, auxilary_buffer_config_t* shadow_config);
|
||||
void check_auxiliary_config_buffer(auxilary_buffer_t *auxilary_buffer,
|
||||
auxilary_buffer_config_t *shadow_config);
|
||||
|
||||
void set_crash_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer);
|
||||
void set_asan_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer);
|
||||
void set_timeout_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer);
|
||||
void set_reload_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer);
|
||||
void set_pt_overflow_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer);
|
||||
void set_exec_done_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint32_t sec, uint32_t usec, uint32_t num_dirty_pages);
|
||||
void set_state_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8_t state);
|
||||
void set_hprintf_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len);
|
||||
void set_exec_done_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer,
|
||||
uint32_t sec,
|
||||
uint32_t usec,
|
||||
uint32_t num_dirty_pages);
|
||||
void set_state_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer,
|
||||
uint8_t state);
|
||||
void set_hprintf_auxiliary_buffer(auxilary_buffer_t *auxilary_buffer,
|
||||
char *msg,
|
||||
uint32_t len);
|
||||
|
||||
void set_page_not_found_result_buffer(auxilary_buffer_t* auxilary_buffer, uint64_t page_addr);
|
||||
void set_page_not_found_result_buffer(auxilary_buffer_t *auxilary_buffer,
|
||||
uint64_t page_addr);
|
||||
void reset_page_not_found_result_buffer(auxilary_buffer_t *auxilary_buffer);
|
||||
void set_success_auxiliary_result_buffer(auxilary_buffer_t* auxilary_buffer, uint8_t success);
|
||||
void set_crash_reason_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len);
|
||||
void set_abort_reason_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len);
|
||||
void set_success_auxiliary_result_buffer(auxilary_buffer_t *auxilary_buffer,
|
||||
uint8_t success);
|
||||
void set_crash_reason_auxiliary_buffer(auxilary_buffer_t *auxilary_buffer,
|
||||
char *msg,
|
||||
uint32_t len);
|
||||
void set_abort_reason_auxiliary_buffer(auxilary_buffer_t *auxilary_buffer,
|
||||
char *msg,
|
||||
uint32_t len);
|
||||
|
||||
void set_tmp_snapshot_created(auxilary_buffer_t *auxilary_buffer, uint8_t value);
|
||||
|
||||
@ -177,4 +194,5 @@ void set_result_pt_trace_size(auxilary_buffer_t* auxilary_buffer, uint32_t value
|
||||
|
||||
void set_result_bb_coverage(auxilary_buffer_t *auxilary_buffer, uint32_t value);
|
||||
|
||||
void set_payload_buffer_write_reason_auxiliary_buffer(auxilary_buffer_t* auxilary_buffer, char* msg, uint32_t len);
|
||||
void set_payload_buffer_write_reason_auxiliary_buffer(
|
||||
auxilary_buffer_t *auxilary_buffer, char *msg, uint32_t len);
|
||||
|
27
nyx/debug.c
27
nyx/debug.c
@ -1,8 +1,8 @@
|
||||
|
||||
#include <execinfo.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <execinfo.h>
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "nyx/debug.h"
|
||||
@ -11,7 +11,8 @@
|
||||
#ifdef ENABLE_BACKTRACES
|
||||
#define BT_BUF_SIZE 100
|
||||
|
||||
void qemu_backtrace(void){
|
||||
void qemu_backtrace(void)
|
||||
{
|
||||
void *buffer[BT_BUF_SIZE];
|
||||
int nptrs = 0;
|
||||
int j;
|
||||
@ -33,8 +34,10 @@ void qemu_backtrace(void){
|
||||
free(strings);
|
||||
}
|
||||
|
||||
static void sigsegfault_handler(int signo, siginfo_t *info, void *extra) {
|
||||
fprintf(stderr, "[qemu-nyx] crash detected (pid: %d / signal: %d)\n", getpid(), signo);
|
||||
static void sigsegfault_handler(int signo, siginfo_t *info, void *extra)
|
||||
{
|
||||
fprintf(stderr, "[qemu-nyx] crash detected (pid: %d / signal: %d)\n", getpid(),
|
||||
signo);
|
||||
qemu_backtrace();
|
||||
fprintf(stderr, "WAITING FOR GDB ATTACH (PID: %d...\n", getpid());
|
||||
while (1) {
|
||||
@ -42,8 +45,10 @@ static void sigsegfault_handler(int signo, siginfo_t *info, void *extra) {
|
||||
}
|
||||
}
|
||||
|
||||
static void sigabrt_handler(int signo, siginfo_t *info, void *extra) {
|
||||
fprintf(stderr, "[qemu-nyx] crash detected (pid: %d / signal: %d)\n", getpid(), signo);
|
||||
static void sigabrt_handler(int signo, siginfo_t *info, void *extra)
|
||||
{
|
||||
fprintf(stderr, "[qemu-nyx] crash detected (pid: %d / signal: %d)\n", getpid(),
|
||||
signo);
|
||||
qemu_backtrace();
|
||||
fprintf(stderr, "WAITING FOR GDB ATTACH (PID: %d...\n", getpid());
|
||||
while (1) {
|
||||
@ -51,12 +56,14 @@ static void sigabrt_handler(int signo, siginfo_t *info, void *extra) {
|
||||
}
|
||||
}
|
||||
|
||||
static void sigint_handler(int signo, siginfo_t *info, void *extra) {
|
||||
static void sigint_handler(int signo, siginfo_t *info, void *extra)
|
||||
{
|
||||
fprintf(stderr, "[qemu-nyx] bye! (pid: %d / signal: %d)\n", getpid(), signo);
|
||||
exit(0);
|
||||
}
|
||||
|
||||
void init_crash_handler(void){
|
||||
void init_crash_handler(void)
|
||||
{
|
||||
struct sigaction action;
|
||||
action.sa_flags = SA_SIGINFO;
|
||||
action.sa_sigaction = sigsegfault_handler;
|
||||
@ -67,7 +74,6 @@ void init_crash_handler(void){
|
||||
}
|
||||
|
||||
|
||||
|
||||
action.sa_sigaction = sigabrt_handler;
|
||||
|
||||
if (sigaction(SIGABRT, &action, NULL) == -1) {
|
||||
@ -85,7 +91,8 @@ void init_crash_handler(void){
|
||||
}
|
||||
}
|
||||
|
||||
void hexdump_kafl(const void* data, size_t size) {
|
||||
void hexdump_kafl(const void *data, size_t size)
|
||||
{
|
||||
char ascii[17];
|
||||
size_t i, j;
|
||||
ascii[16] = '\0';
|
||||
|
13
nyx/debug.h
13
nyx/debug.h
@ -5,9 +5,9 @@
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/log.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/log.h"
|
||||
#include "qemu-common.h"
|
||||
|
||||
#define ENABLE_BACKTRACES
|
||||
|
||||
@ -27,9 +27,12 @@
|
||||
* qemu_log() is the standard logging enabled with -D
|
||||
* qemu_log_mask() is activated with additional -t nyx option
|
||||
*/
|
||||
//#define nyx_debug(format, ...) qemu_log_mask(LOG_NYX, NYX_LOG_PREFIX "(%s#:%d)\t"format, __BASE_FILE__, __LINE__, ##__VA_ARGS__)
|
||||
#define nyx_debug(format, ...) qemu_log_mask(LOG_NYX, NYX_LOG_PREFIX format, ##__VA_ARGS__)
|
||||
#define nyx_debug_p(PREFIX, format, ...) qemu_log_mask(LOG_NYX, NYX_LOG_PREFIX PREFIX format, ##__VA_ARGS__)
|
||||
// #define nyx_debug(format, ...) qemu_log_mask(LOG_NYX, NYX_LOG_PREFIX
|
||||
// "(%s#:%d)\t"format, __BASE_FILE__, __LINE__, ##__VA_ARGS__)
|
||||
#define nyx_debug(format, ...) \
|
||||
qemu_log_mask(LOG_NYX, NYX_LOG_PREFIX format, ##__VA_ARGS__)
|
||||
#define nyx_debug_p(PREFIX, format, ...) \
|
||||
qemu_log_mask(LOG_NYX, NYX_LOG_PREFIX PREFIX format, ##__VA_ARGS__)
|
||||
#else
|
||||
#define nyx_debug(...)
|
||||
#define nyx_debug_p(...)
|
||||
|
@ -56,26 +56,29 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
|
||||
#include "nyx/helpers.h"
|
||||
#include "nyx/memory_access.h"
|
||||
|
||||
#include"nyx/snapshot/helper.h"
|
||||
#include"nyx/snapshot/memory/block_list.h"
|
||||
#include"nyx/snapshot/memory/shadow_memory.h"
|
||||
#include "nyx/snapshot/block/nyx_block_snapshot.h"
|
||||
#include "nyx/snapshot/devices/nyx_device_state.h"
|
||||
#include "nyx/snapshot/helper.h"
|
||||
#include "nyx/snapshot/memory/backend/nyx_debug.h"
|
||||
#include "nyx/snapshot/memory/block_list.h"
|
||||
#include "nyx/snapshot/memory/shadow_memory.h"
|
||||
|
||||
FastReloadMemoryMode mode = RELOAD_MEMORY_MODE_DEBUG;
|
||||
|
||||
/* basic operations */
|
||||
|
||||
static void fast_snapshot_init_operation(fast_reload_t* self, const char* snapshot_folder, bool pre_snapshot){
|
||||
|
||||
static void fast_snapshot_init_operation(fast_reload_t *self,
|
||||
const char *snapshot_folder,
|
||||
bool pre_snapshot)
|
||||
{
|
||||
assert((snapshot_folder == NULL && pre_snapshot == false) || snapshot_folder);
|
||||
|
||||
if (snapshot_folder) {
|
||||
self->device_state = nyx_device_state_init_from_snapshot(snapshot_folder, pre_snapshot);
|
||||
self->shadow_memory_state = shadow_memory_init_from_snapshot(snapshot_folder, pre_snapshot);
|
||||
}
|
||||
else{
|
||||
self->device_state =
|
||||
nyx_device_state_init_from_snapshot(snapshot_folder, pre_snapshot);
|
||||
self->shadow_memory_state =
|
||||
shadow_memory_init_from_snapshot(snapshot_folder, pre_snapshot);
|
||||
} else {
|
||||
self->device_state = nyx_device_state_init();
|
||||
self->shadow_memory_state = shadow_memory_init();
|
||||
}
|
||||
@ -105,9 +108,9 @@ static void fast_snapshot_init_operation(fast_reload_t* self, const char* snapsh
|
||||
}
|
||||
|
||||
if (snapshot_folder) {
|
||||
self->block_state = nyx_block_snapshot_init_from_file(snapshot_folder, pre_snapshot);
|
||||
}
|
||||
else{
|
||||
self->block_state =
|
||||
nyx_block_snapshot_init_from_file(snapshot_folder, pre_snapshot);
|
||||
} else {
|
||||
self->block_state = nyx_block_snapshot_init();
|
||||
}
|
||||
|
||||
@ -117,38 +120,55 @@ static void fast_snapshot_init_operation(fast_reload_t* self, const char* snapsh
|
||||
}
|
||||
}
|
||||
|
||||
static void fast_snapshot_restore_operation(fast_reload_t* self){
|
||||
|
||||
static void fast_snapshot_restore_operation(fast_reload_t *self)
|
||||
{
|
||||
uint32_t num_dirty_pages = 0;
|
||||
|
||||
switch (mode) {
|
||||
case RELOAD_MEMORY_MODE_DEBUG:
|
||||
num_dirty_pages += nyx_snapshot_debug_restore(self->shadow_memory_state, self->blocklist, true);
|
||||
num_dirty_pages += nyx_snapshot_debug_restore(self->shadow_memory_state,
|
||||
self->blocklist, true);
|
||||
break;
|
||||
case RELOAD_MEMORY_MODE_DEBUG_QUIET:
|
||||
num_dirty_pages += nyx_snapshot_debug_restore(self->shadow_memory_state, self->blocklist, false);
|
||||
num_dirty_pages += nyx_snapshot_debug_restore(self->shadow_memory_state,
|
||||
self->blocklist, false);
|
||||
break;
|
||||
case RELOAD_MEMORY_MODE_FDL:
|
||||
num_dirty_pages += nyx_snapshot_nyx_fdl_restore(self->fdl_state, self->shadow_memory_state, self->blocklist);
|
||||
num_dirty_pages += nyx_snapshot_nyx_fdl_restore(self->fdl_state,
|
||||
self->shadow_memory_state,
|
||||
self->blocklist);
|
||||
break;
|
||||
case RELOAD_MEMORY_MODE_FDL_DEBUG:
|
||||
num_dirty_pages += nyx_snapshot_nyx_fdl_restore(self->fdl_state, self->shadow_memory_state, self->blocklist);
|
||||
num_dirty_pages += nyx_snapshot_debug_restore(self->shadow_memory_state, self->blocklist, true);
|
||||
num_dirty_pages += nyx_snapshot_nyx_fdl_restore(self->fdl_state,
|
||||
self->shadow_memory_state,
|
||||
self->blocklist);
|
||||
num_dirty_pages += nyx_snapshot_debug_restore(self->shadow_memory_state,
|
||||
self->blocklist, true);
|
||||
break;
|
||||
case RELOAD_MEMORY_MODE_DIRTY_RING:
|
||||
num_dirty_pages += nyx_snapshot_nyx_dirty_ring_restore(self->dirty_ring_state, self->shadow_memory_state, self->blocklist);
|
||||
num_dirty_pages +=
|
||||
nyx_snapshot_nyx_dirty_ring_restore(self->dirty_ring_state,
|
||||
self->shadow_memory_state,
|
||||
self->blocklist);
|
||||
break;
|
||||
case RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG:
|
||||
num_dirty_pages += nyx_snapshot_nyx_dirty_ring_restore(self->dirty_ring_state, self->shadow_memory_state, self->blocklist);
|
||||
num_dirty_pages += nyx_snapshot_debug_restore(self->shadow_memory_state, self->blocklist, true);
|
||||
num_dirty_pages +=
|
||||
nyx_snapshot_nyx_dirty_ring_restore(self->dirty_ring_state,
|
||||
self->shadow_memory_state,
|
||||
self->blocklist);
|
||||
num_dirty_pages += nyx_snapshot_debug_restore(self->shadow_memory_state,
|
||||
self->blocklist, true);
|
||||
break;
|
||||
}
|
||||
|
||||
num_dirty_pages += nyx_snapshot_user_fdl_restore(self->fdl_user_state, self->shadow_memory_state, self->blocklist);
|
||||
num_dirty_pages += nyx_snapshot_user_fdl_restore(self->fdl_user_state,
|
||||
self->shadow_memory_state,
|
||||
self->blocklist);
|
||||
GET_GLOBAL_STATE()->num_dirty_pages = num_dirty_pages;
|
||||
}
|
||||
|
||||
static inline void fast_snapshot_pre_create_incremental_operation(fast_reload_t* self){
|
||||
static inline void fast_snapshot_pre_create_incremental_operation(fast_reload_t *self)
|
||||
{
|
||||
/* flush all pending block writes */
|
||||
bdrv_drain_all();
|
||||
memory_global_dirty_log_sync();
|
||||
@ -157,34 +177,49 @@ static inline void fast_snapshot_pre_create_incremental_operation(fast_reload_t*
|
||||
nyx_block_snapshot_switch_incremental(self->block_state);
|
||||
}
|
||||
|
||||
static inline void fast_snapshot_create_incremental_operation(fast_reload_t* self){
|
||||
static inline void fast_snapshot_create_incremental_operation(fast_reload_t *self)
|
||||
{
|
||||
shadow_memory_prepare_incremental(self->shadow_memory_state);
|
||||
nyx_device_state_save_tsc_incremental(self->device_state);
|
||||
|
||||
switch (mode) {
|
||||
case RELOAD_MEMORY_MODE_DEBUG:
|
||||
nyx_snapshot_debug_save_root_pages(self->shadow_memory_state, self->blocklist, true);
|
||||
nyx_snapshot_debug_save_root_pages(self->shadow_memory_state,
|
||||
self->blocklist, true);
|
||||
break;
|
||||
case RELOAD_MEMORY_MODE_DEBUG_QUIET:
|
||||
nyx_snapshot_debug_save_root_pages(self->shadow_memory_state, self->blocklist, false);
|
||||
nyx_snapshot_debug_save_root_pages(self->shadow_memory_state,
|
||||
self->blocklist, false);
|
||||
break;
|
||||
case RELOAD_MEMORY_MODE_FDL:
|
||||
nyx_snapshot_nyx_fdl_save_root_pages(self->fdl_state, self->shadow_memory_state, self->blocklist);
|
||||
nyx_snapshot_nyx_fdl_save_root_pages(self->fdl_state,
|
||||
self->shadow_memory_state,
|
||||
self->blocklist);
|
||||
break;
|
||||
case RELOAD_MEMORY_MODE_FDL_DEBUG:
|
||||
nyx_snapshot_nyx_fdl_save_root_pages(self->fdl_state, self->shadow_memory_state, self->blocklist);
|
||||
nyx_snapshot_debug_save_root_pages(self->shadow_memory_state, self->blocklist, true);
|
||||
nyx_snapshot_nyx_fdl_save_root_pages(self->fdl_state,
|
||||
self->shadow_memory_state,
|
||||
self->blocklist);
|
||||
nyx_snapshot_debug_save_root_pages(self->shadow_memory_state,
|
||||
self->blocklist, true);
|
||||
break;
|
||||
case RELOAD_MEMORY_MODE_DIRTY_RING:
|
||||
nyx_snapshot_nyx_dirty_ring_save_root_pages(self->dirty_ring_state, self->shadow_memory_state, self->blocklist);
|
||||
nyx_snapshot_nyx_dirty_ring_save_root_pages(self->dirty_ring_state,
|
||||
self->shadow_memory_state,
|
||||
self->blocklist);
|
||||
break;
|
||||
case RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG:
|
||||
nyx_snapshot_nyx_dirty_ring_save_root_pages(self->dirty_ring_state, self->shadow_memory_state, self->blocklist);
|
||||
nyx_snapshot_debug_save_root_pages(self->shadow_memory_state, self->blocklist, true);
|
||||
nyx_snapshot_nyx_dirty_ring_save_root_pages(self->dirty_ring_state,
|
||||
self->shadow_memory_state,
|
||||
self->blocklist);
|
||||
nyx_snapshot_debug_save_root_pages(self->shadow_memory_state,
|
||||
self->blocklist, true);
|
||||
break;
|
||||
}
|
||||
|
||||
nyx_snapshot_nyx_fdl_user_save_root_pages(self->fdl_user_state, self->shadow_memory_state, self->blocklist);
|
||||
nyx_snapshot_nyx_fdl_user_save_root_pages(self->fdl_user_state,
|
||||
self->shadow_memory_state,
|
||||
self->blocklist);
|
||||
shadow_memory_switch_snapshot(self->shadow_memory_state, true);
|
||||
|
||||
kvm_arch_put_registers(qemu_get_cpu(0), KVM_PUT_FULL_STATE_FAST);
|
||||
@ -192,7 +227,8 @@ static inline void fast_snapshot_create_incremental_operation(fast_reload_t* sel
|
||||
}
|
||||
|
||||
|
||||
fast_reload_t* fast_reload_new(void){
|
||||
fast_reload_t *fast_reload_new(void)
|
||||
{
|
||||
fast_reload_t *self = malloc(sizeof(fast_reload_t));
|
||||
memset(self, 0x0, sizeof(fast_reload_t));
|
||||
|
||||
@ -204,22 +240,25 @@ fast_reload_t* fast_reload_new(void){
|
||||
return self;
|
||||
}
|
||||
|
||||
void fast_reload_set_mode(fast_reload_t* self, FastReloadMemoryMode m){
|
||||
void fast_reload_set_mode(fast_reload_t *self, FastReloadMemoryMode m)
|
||||
{
|
||||
assert(!self->root_snapshot_created);
|
||||
mode = m;
|
||||
}
|
||||
|
||||
FastReloadMemoryMode fast_reload_get_mode(fast_reload_t* self){
|
||||
FastReloadMemoryMode fast_reload_get_mode(fast_reload_t *self)
|
||||
{
|
||||
return mode;
|
||||
}
|
||||
|
||||
void fast_reload_init(fast_reload_t* self){
|
||||
void fast_reload_init(fast_reload_t *self)
|
||||
{
|
||||
self->blocklist = snapshot_page_blocklist_init();
|
||||
}
|
||||
|
||||
/* fix this */
|
||||
void fast_reload_destroy(fast_reload_t* self){
|
||||
|
||||
void fast_reload_destroy(fast_reload_t *self)
|
||||
{
|
||||
/* TODO: complete me */
|
||||
|
||||
// close(self->vmx_fdl_fd);
|
||||
@ -234,7 +273,8 @@ void fast_reload_destroy(fast_reload_t* self){
|
||||
*/
|
||||
}
|
||||
|
||||
inline static void unlock_snapshot(const char* folder){
|
||||
inline static void unlock_snapshot(const char *folder)
|
||||
{
|
||||
char *info_file;
|
||||
char *lock_file;
|
||||
|
||||
@ -244,8 +284,7 @@ inline static void unlock_snapshot(const char* folder){
|
||||
if (GET_GLOBAL_STATE()->fast_reload_pre_image) {
|
||||
const char *msg = "THIS IS A NYX PRE IMAGE SNAPSHOT FOLDER!\n";
|
||||
fwrite(msg, strlen(msg), 1, f_info);
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
const char *msg = "THIS IS A NYX SNAPSHOT FOLDER!\n";
|
||||
fwrite(msg, strlen(msg), 1, f_info);
|
||||
}
|
||||
@ -258,18 +297,21 @@ inline static void unlock_snapshot(const char* folder){
|
||||
free(lock_file);
|
||||
}
|
||||
|
||||
inline static void wait_for_snapshot(const char* folder){
|
||||
inline static void wait_for_snapshot(const char *folder)
|
||||
{
|
||||
char *lock_file;
|
||||
|
||||
assert(asprintf(&lock_file, "%s/ready.lock", folder) != -1);
|
||||
while (access(lock_file, F_OK) == -1) {
|
||||
sleep(1);
|
||||
|
||||
}
|
||||
free(lock_file);
|
||||
}
|
||||
|
||||
void fast_reload_serialize_to_file(fast_reload_t* self, const char* folder, bool is_pre_snapshot){
|
||||
void fast_reload_serialize_to_file(fast_reload_t *self,
|
||||
const char *folder,
|
||||
bool is_pre_snapshot)
|
||||
{
|
||||
nyx_trace();
|
||||
|
||||
/* sanity check */
|
||||
@ -295,14 +337,18 @@ void fast_reload_serialize_to_file(fast_reload_t* self, const char* folder, bool
|
||||
}
|
||||
|
||||
|
||||
|
||||
static void fast_reload_create_from_snapshot(fast_reload_t* self, const char* folder, bool lock_iothread, bool pre_snapshot){
|
||||
static void fast_reload_create_from_snapshot(fast_reload_t *self,
|
||||
const char *folder,
|
||||
bool lock_iothread,
|
||||
bool pre_snapshot)
|
||||
{
|
||||
nyx_trace();
|
||||
|
||||
assert(self != NULL);
|
||||
wait_for_snapshot(folder);
|
||||
|
||||
nyx_debug_p(RELOAD_PREFIX,"=> CREATING FAST RELOAD SNAPSHOT FROM DUMP (located in: %s)", folder);
|
||||
nyx_debug_p(RELOAD_PREFIX,
|
||||
"=> CREATING FAST RELOAD SNAPSHOT FROM DUMP (located in: %s)", folder);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
@ -332,18 +378,24 @@ static void fast_reload_create_from_snapshot(fast_reload_t* self, const char* fo
|
||||
}
|
||||
}
|
||||
|
||||
void fast_reload_create_from_file(fast_reload_t* self, const char* folder, bool lock_iothread){
|
||||
void fast_reload_create_from_file(fast_reload_t *self,
|
||||
const char *folder,
|
||||
bool lock_iothread)
|
||||
{
|
||||
nyx_trace();
|
||||
fast_reload_create_from_snapshot(self, folder, lock_iothread, false);
|
||||
}
|
||||
|
||||
void fast_reload_create_from_file_pre_image(fast_reload_t* self, const char* folder, bool lock_iothread){
|
||||
void fast_reload_create_from_file_pre_image(fast_reload_t *self,
|
||||
const char *folder,
|
||||
bool lock_iothread)
|
||||
{
|
||||
nyx_trace();
|
||||
fast_reload_create_from_snapshot(self, folder, lock_iothread, true);
|
||||
}
|
||||
|
||||
void fast_reload_create_in_memory(fast_reload_t* self){
|
||||
|
||||
void fast_reload_create_in_memory(fast_reload_t *self)
|
||||
{
|
||||
nyx_trace();
|
||||
assert(self != NULL);
|
||||
|
||||
@ -363,7 +415,8 @@ void fast_reload_create_in_memory(fast_reload_t* self){
|
||||
cpu_synchronize_all_post_init();
|
||||
}
|
||||
|
||||
void fast_reload_restore(fast_reload_t* self){
|
||||
void fast_reload_restore(fast_reload_t *self)
|
||||
{
|
||||
assert(self != NULL);
|
||||
self->dirty_pages = 0;
|
||||
|
||||
@ -387,13 +440,15 @@ void fast_reload_restore(fast_reload_t* self){
|
||||
}
|
||||
|
||||
|
||||
bool read_snapshot_memory(fast_reload_t* self, uint64_t address, void* ptr, size_t size){
|
||||
return shadow_memory_read_physical_memory(self->shadow_memory_state, address, ptr, size);
|
||||
bool read_snapshot_memory(fast_reload_t *self, uint64_t address, void *ptr, size_t size)
|
||||
{
|
||||
return shadow_memory_read_physical_memory(self->shadow_memory_state, address,
|
||||
ptr, size);
|
||||
}
|
||||
|
||||
/* fix this */
|
||||
void* fast_reload_get_physmem_shadow_ptr(fast_reload_t* self, uint64_t physaddr){
|
||||
|
||||
void *fast_reload_get_physmem_shadow_ptr(fast_reload_t *self, uint64_t physaddr)
|
||||
{
|
||||
abort(); /* TODO: fix this function first -> pc_piix memory split issue */
|
||||
|
||||
/*
|
||||
@ -401,8 +456,9 @@ void* fast_reload_get_physmem_shadow_ptr(fast_reload_t* self, uint64_t physaddr)
|
||||
assert(!(physaddr&0xFFF)); // physaddr must be 4kb align !
|
||||
if (self->shadow_memory_regions){
|
||||
for(uint64_t j = 0; j < self->shadow_memory_regions; j++){
|
||||
if(physaddr >= self->ram_block_array[j]->offset && physaddr < (self->ram_block_array[j]->offset+self->ram_block_array[j]->used_length)){
|
||||
return self->shadow_memory[j]+(physaddr-self->ram_block_array[j]->offset);
|
||||
if(physaddr >= self->ram_block_array[j]->offset && physaddr <
|
||||
(self->ram_block_array[j]->offset+self->ram_block_array[j]->used_length)){ return
|
||||
self->shadow_memory[j]+(physaddr-self->ram_block_array[j]->offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -410,20 +466,22 @@ void* fast_reload_get_physmem_shadow_ptr(fast_reload_t* self, uint64_t physaddr)
|
||||
return NULL; // not found ... sorry :(
|
||||
}
|
||||
|
||||
void fast_reload_blacklist_page(fast_reload_t* self, uint64_t physaddr){
|
||||
|
||||
void fast_reload_blacklist_page(fast_reload_t *self, uint64_t physaddr)
|
||||
{
|
||||
assert(self->blocklist);
|
||||
snapshot_page_blocklist_add(self->blocklist, physaddr);
|
||||
}
|
||||
|
||||
bool fast_reload_snapshot_exists(fast_reload_t* self){
|
||||
bool fast_reload_snapshot_exists(fast_reload_t *self)
|
||||
{
|
||||
if (!self) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void fast_reload_create_tmp_snapshot(fast_reload_t* self){
|
||||
void fast_reload_create_tmp_snapshot(fast_reload_t *self)
|
||||
{
|
||||
assert(self);
|
||||
|
||||
self->dirty_pages = 0;
|
||||
@ -438,7 +496,8 @@ void fast_reload_create_tmp_snapshot(fast_reload_t* self){
|
||||
self->incremental_snapshot_enabled = true;
|
||||
}
|
||||
|
||||
void fast_reload_discard_tmp_snapshot(fast_reload_t* self){
|
||||
void fast_reload_discard_tmp_snapshot(fast_reload_t *self)
|
||||
{
|
||||
assert(self && self->incremental_snapshot_enabled);
|
||||
|
||||
self->dirty_pages = 0;
|
||||
@ -459,25 +518,27 @@ void fast_reload_discard_tmp_snapshot(fast_reload_t* self){
|
||||
self->incremental_snapshot_enabled = false;
|
||||
}
|
||||
|
||||
bool fast_reload_root_created(fast_reload_t* self){
|
||||
bool fast_reload_root_created(fast_reload_t *self)
|
||||
{
|
||||
return self->root_snapshot_created;
|
||||
}
|
||||
|
||||
bool fast_reload_tmp_created(fast_reload_t* self){
|
||||
bool fast_reload_tmp_created(fast_reload_t *self)
|
||||
{
|
||||
return self->incremental_snapshot_enabled;
|
||||
}
|
||||
|
||||
uint32_t get_dirty_page_num(fast_reload_t* self){
|
||||
uint32_t get_dirty_page_num(fast_reload_t *self)
|
||||
{
|
||||
if (self) {
|
||||
return self->dirty_pages;
|
||||
}
|
||||
else{
|
||||
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool fast_reload_set_bitmap(fast_reload_t* self){
|
||||
bool fast_reload_set_bitmap(fast_reload_t *self)
|
||||
{
|
||||
if (self->incremental_snapshot_enabled) {
|
||||
coverage_bitmap_copy_from_buffer(self->bitmap_copy);
|
||||
return true;
|
||||
@ -485,19 +546,25 @@ bool fast_reload_set_bitmap(fast_reload_t* self){
|
||||
return false;
|
||||
}
|
||||
|
||||
void fast_reload_qemu_user_fdl_set_dirty(fast_reload_t* self, MemoryRegion *mr, uint64_t addr, uint64_t length){
|
||||
void fast_reload_qemu_user_fdl_set_dirty(fast_reload_t *self,
|
||||
MemoryRegion *mr,
|
||||
uint64_t addr,
|
||||
uint64_t length)
|
||||
{
|
||||
/* works only with PC.RAM's memory region */
|
||||
assert(mr->alias_offset == 0);
|
||||
|
||||
nyx_fdl_user_set(self->fdl_user_state, self->shadow_memory_state, self->fdl_state, addr, length);
|
||||
nyx_fdl_user_set(self->fdl_user_state, self->shadow_memory_state,
|
||||
self->fdl_state, addr, length);
|
||||
}
|
||||
|
||||
void fast_reload_handle_dirty_ring_full(fast_reload_t* self){
|
||||
void fast_reload_handle_dirty_ring_full(fast_reload_t *self)
|
||||
{
|
||||
if (self->dirty_ring_state) {
|
||||
nyx_snapshot_nyx_dirty_ring_flush_and_collect(self->dirty_ring_state, self->shadow_memory_state, self->blocklist);
|
||||
}
|
||||
else{
|
||||
nyx_snapshot_nyx_dirty_ring_flush_and_collect(self->dirty_ring_state,
|
||||
self->shadow_memory_state,
|
||||
self->blocklist);
|
||||
} else {
|
||||
nyx_snapshot_nyx_dirty_ring_flush();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -46,7 +46,6 @@ typedef enum FastReloadMemoryMode {
|
||||
} FastReloadMemoryMode;
|
||||
|
||||
|
||||
|
||||
typedef struct fast_reload_dump_head_s {
|
||||
uint32_t shadow_memory_regions;
|
||||
uint32_t ram_region_index;
|
||||
@ -54,7 +53,6 @@ typedef struct fast_reload_dump_head_s{
|
||||
|
||||
|
||||
typedef struct fast_reload_s {
|
||||
|
||||
FastReloadMemoryMode mode;
|
||||
|
||||
/* memory snapshot */
|
||||
@ -84,7 +82,6 @@ typedef struct fast_reload_s{
|
||||
nyx_coverage_bitmap_copy_t *bitmap_copy;
|
||||
|
||||
|
||||
|
||||
uint32_t dirty_pages;
|
||||
|
||||
} fast_reload_t;
|
||||
@ -94,16 +91,24 @@ fast_reload_t* fast_reload_new(void);
|
||||
|
||||
|
||||
/* TODO: get rid of this */
|
||||
void fast_reload_create_to_file(fast_reload_t* self, const char* folder, bool lock_iothread);
|
||||
void fast_reload_create_from_file(fast_reload_t* self, const char* folder, bool lock_iothread);
|
||||
void fast_reload_create_from_file_pre_image(fast_reload_t* self, const char* folder, bool lock_iothread);
|
||||
void fast_reload_create_to_file(fast_reload_t *self,
|
||||
const char *folder,
|
||||
bool lock_iothread);
|
||||
void fast_reload_create_from_file(fast_reload_t *self,
|
||||
const char *folder,
|
||||
bool lock_iothread);
|
||||
void fast_reload_create_from_file_pre_image(fast_reload_t *self,
|
||||
const char *folder,
|
||||
bool lock_iothread);
|
||||
|
||||
|
||||
/* keep this */
|
||||
void fast_reload_create_in_memory(fast_reload_t *self);
|
||||
|
||||
|
||||
void fast_reload_serialize_to_file(fast_reload_t* self, const char* folder, bool is_pre_snapshot);
|
||||
void fast_reload_serialize_to_file(fast_reload_t *self,
|
||||
const char *folder,
|
||||
bool is_pre_snapshot);
|
||||
|
||||
|
||||
void fast_reload_restore(fast_reload_t *self);
|
||||
@ -115,7 +120,10 @@ bool read_snapshot_memory(fast_reload_t* self, uint64_t address, void* ptr, size
|
||||
|
||||
void fast_reload_destroy(fast_reload_t *self);
|
||||
|
||||
void fast_reload_qemu_user_fdl_set_dirty(fast_reload_t* self, MemoryRegion *mr, uint64_t addr, uint64_t length);
|
||||
void fast_reload_qemu_user_fdl_set_dirty(fast_reload_t *self,
|
||||
MemoryRegion *mr,
|
||||
uint64_t addr,
|
||||
uint64_t length);
|
||||
|
||||
void fast_reload_create_tmp_snapshot(fast_reload_t *self);
|
||||
void fast_reload_discard_tmp_snapshot(fast_reload_t *self);
|
||||
|
@ -23,7 +23,8 @@
|
||||
extern int save_snapshot(const char *name, Error **errp);
|
||||
extern int load_snapshot(const char *name, Error **errp);
|
||||
|
||||
static void adjust_rip(CPUX86State *env, fast_reload_t* snapshot){
|
||||
static void adjust_rip(CPUX86State *env, fast_reload_t *snapshot)
|
||||
{
|
||||
switch (fast_reload_get_mode(snapshot)) {
|
||||
case RELOAD_MEMORY_MODE_DEBUG:
|
||||
case RELOAD_MEMORY_MODE_DEBUG_QUIET:
|
||||
@ -40,7 +41,8 @@ static void adjust_rip(CPUX86State *env, fast_reload_t* snapshot){
|
||||
}
|
||||
}
|
||||
|
||||
fast_vm_reload_sync_t* init_fast_vm_reload_sync(void){
|
||||
fast_vm_reload_sync_t *init_fast_vm_reload_sync(void)
|
||||
{
|
||||
fast_vm_reload_sync_t *self = malloc(sizeof(fast_vm_reload_sync_t));
|
||||
memset(self, 0, sizeof(fast_vm_reload_sync_t));
|
||||
|
||||
@ -55,8 +57,8 @@ fast_vm_reload_sync_t* init_fast_vm_reload_sync(void){
|
||||
return self;
|
||||
}
|
||||
|
||||
bool fast_snapshot_exists(fast_vm_reload_sync_t* self, FastReloadRequest type){
|
||||
|
||||
bool fast_snapshot_exists(fast_vm_reload_sync_t *self, FastReloadRequest type)
|
||||
{
|
||||
assert(self->mode != RELOAD_MODE_DEBUG);
|
||||
|
||||
switch (type) {
|
||||
@ -72,8 +74,9 @@ bool fast_snapshot_exists(fast_vm_reload_sync_t* self, FastReloadRequest type){
|
||||
}
|
||||
|
||||
|
||||
|
||||
static inline void perform_task_debug_mode(fast_vm_reload_sync_t* self, FastReloadRequest request){
|
||||
static inline void perform_task_debug_mode(fast_vm_reload_sync_t *self,
|
||||
FastReloadRequest request)
|
||||
{
|
||||
struct Error *errp = NULL;
|
||||
|
||||
switch (request) {
|
||||
@ -120,28 +123,30 @@ static inline void perform_task_debug_mode(fast_vm_reload_sync_t* self, FastRelo
|
||||
vm_start();
|
||||
}
|
||||
|
||||
static inline void create_root_snapshot(void){
|
||||
static inline void create_root_snapshot(void)
|
||||
{
|
||||
if (GET_GLOBAL_STATE()->fast_reload_enabled) {
|
||||
nyx_debug("===> GET_GLOBAL_STATE()->fast_reload_enabled: TRUE\n");
|
||||
if (GET_GLOBAL_STATE()->fast_reload_mode) {
|
||||
nyx_debug("===> GET_GLOBAL_STATE()->fast_reload_mode: TRUE\n");
|
||||
/* we've loaded an external snapshot folder - so do nothing and don't create any new snapshot files */
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
nyx_debug("===> GET_GLOBAL_STATE()->fast_reload_mode: FALSE\n");
|
||||
/* store the current state as a snapshot folder */
|
||||
fast_reload_create_in_memory(get_fast_reload_snapshot());
|
||||
fast_reload_serialize_to_file(get_fast_reload_snapshot(), GET_GLOBAL_STATE()->fast_reload_path, false);
|
||||
fast_reload_serialize_to_file(get_fast_reload_snapshot(),
|
||||
GET_GLOBAL_STATE()->fast_reload_path, false);
|
||||
}
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
nyx_debug("===> GET_GLOBAL_STATE()->fast_reload_enabled: FALSE\n");
|
||||
/* so we haven't set a path for our snapshot files - just store everything in memory */
|
||||
fast_reload_create_in_memory(get_fast_reload_snapshot());
|
||||
}
|
||||
}
|
||||
|
||||
static inline void perform_task_no_block_mode(fast_vm_reload_sync_t* self, FastReloadRequest request){
|
||||
static inline void perform_task_no_block_mode(fast_vm_reload_sync_t *self,
|
||||
FastReloadRequest request)
|
||||
{
|
||||
CPUState *cpu = qemu_get_cpu(0);
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
CPUX86State *env = &x86_cpu->env;
|
||||
@ -152,7 +157,8 @@ static inline void perform_task_no_block_mode(fast_vm_reload_sync_t* self, FastR
|
||||
case REQUEST_SAVE_SNAPSHOT_PRE:
|
||||
vm_stop(RUN_STATE_SAVE_VM);
|
||||
fast_reload_create_in_memory(get_fast_reload_snapshot());
|
||||
fast_reload_serialize_to_file(get_fast_reload_snapshot(), GET_GLOBAL_STATE()->fast_reload_pre_path, true);
|
||||
fast_reload_serialize_to_file(get_fast_reload_snapshot(),
|
||||
GET_GLOBAL_STATE()->fast_reload_pre_path, true);
|
||||
|
||||
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
|
||||
qemu_mutex_unlock_iothread();
|
||||
@ -203,13 +209,16 @@ static inline void perform_task_no_block_mode(fast_vm_reload_sync_t* self, FastR
|
||||
qemu_mutex_unlock_iothread();
|
||||
}
|
||||
|
||||
static inline void perform_task_block_mode(fast_vm_reload_sync_t* self, FastReloadRequest request){
|
||||
static inline void perform_task_block_mode(fast_vm_reload_sync_t *self,
|
||||
FastReloadRequest request)
|
||||
{
|
||||
switch (request) {
|
||||
case REQUEST_SAVE_SNAPSHOT_PRE_FIX_RIP:
|
||||
case REQUEST_SAVE_SNAPSHOT_PRE:
|
||||
vm_stop(RUN_STATE_SAVE_VM);
|
||||
fast_reload_create_in_memory(get_fast_reload_snapshot());
|
||||
fast_reload_serialize_to_file(get_fast_reload_snapshot(), GET_GLOBAL_STATE()->fast_reload_pre_path, true);
|
||||
fast_reload_serialize_to_file(get_fast_reload_snapshot(),
|
||||
GET_GLOBAL_STATE()->fast_reload_pre_path, true);
|
||||
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
|
||||
return; /* return here to skip the vm_start call */
|
||||
case REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP:
|
||||
@ -237,7 +246,8 @@ static inline void perform_task_block_mode(fast_vm_reload_sync_t* self, FastRelo
|
||||
vm_start();
|
||||
}
|
||||
|
||||
static inline void perform_task(fast_vm_reload_sync_t* self, FastReloadRequest request){
|
||||
static inline void perform_task(fast_vm_reload_sync_t *self, FastReloadRequest request)
|
||||
{
|
||||
switch (self->mode) {
|
||||
case RELOAD_MODE_DEBUG:
|
||||
abort();
|
||||
@ -252,7 +262,8 @@ static inline void perform_task(fast_vm_reload_sync_t* self, FastReloadRequest r
|
||||
}
|
||||
}
|
||||
|
||||
void request_fast_vm_reload(fast_vm_reload_sync_t* self, FastReloadRequest request){
|
||||
void request_fast_vm_reload(fast_vm_reload_sync_t *self, FastReloadRequest request)
|
||||
{
|
||||
assert(!self->request_exists);
|
||||
assert(self->current_request == REQUEST_VOID);
|
||||
|
||||
@ -261,24 +272,25 @@ void request_fast_vm_reload(fast_vm_reload_sync_t* self, FastReloadRequest reque
|
||||
kvm_arch_get_registers(cpu);
|
||||
// perform_task(self, request);
|
||||
perform_task_no_block_mode(self, request);
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
self->current_request = request;
|
||||
self->request_exists = true;
|
||||
self->request_exists_pre = true;
|
||||
}
|
||||
}
|
||||
|
||||
bool reload_request_exists(fast_vm_reload_sync_t* self){
|
||||
bool reload_request_exists(fast_vm_reload_sync_t *self)
|
||||
{
|
||||
return self->request_exists_pre;
|
||||
}
|
||||
|
||||
void reload_request_discard_tmp(fast_vm_reload_sync_t* self){
|
||||
void reload_request_discard_tmp(fast_vm_reload_sync_t *self)
|
||||
{
|
||||
fast_reload_discard_tmp_snapshot(get_fast_reload_snapshot());
|
||||
}
|
||||
|
||||
bool check_if_relood_request_exists_pre(fast_vm_reload_sync_t* self){
|
||||
|
||||
bool check_if_relood_request_exists_pre(fast_vm_reload_sync_t *self)
|
||||
{
|
||||
/* TODO: always returns false or abort() ? */
|
||||
if (self->request_exists_pre) {
|
||||
self->request_exists_pre = false;
|
||||
@ -323,7 +335,8 @@ bool check_if_relood_request_exists_pre(fast_vm_reload_sync_t* self){
|
||||
break;
|
||||
|
||||
default:
|
||||
fprintf(stderr, "%s: Unkown request: %d\n", __func__, self->current_request);
|
||||
fprintf(stderr, "%s: Unkown request: %d\n", __func__,
|
||||
self->current_request);
|
||||
abort();
|
||||
}
|
||||
return true;
|
||||
@ -331,7 +344,8 @@ bool check_if_relood_request_exists_pre(fast_vm_reload_sync_t* self){
|
||||
return false;
|
||||
}
|
||||
|
||||
bool check_if_relood_request_exists_post(fast_vm_reload_sync_t* self){
|
||||
bool check_if_relood_request_exists_post(fast_vm_reload_sync_t *self)
|
||||
{
|
||||
if (self->request_exists) {
|
||||
FastReloadRequest request = self->current_request;
|
||||
self->request_exists = false;
|
||||
|
@ -39,9 +39,7 @@ typedef enum FastReloadMode {
|
||||
} FastReloadMode;
|
||||
|
||||
|
||||
|
||||
typedef struct fast_vm_reload_sync_s {
|
||||
|
||||
bool request_exists;
|
||||
bool request_exists_pre;
|
||||
bool debug_mode;
|
||||
|
@ -1,10 +1,10 @@
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "nyx/redqueen.h"
|
||||
// #include "debug.h"
|
||||
@ -23,7 +23,8 @@ void _parse_addresses_in_file(FILE* fp, size_t num_addrs, uint64_t* addrs);
|
||||
* Public Functions
|
||||
*/
|
||||
|
||||
void write_debug_result(char* buf){
|
||||
void write_debug_result(char *buf)
|
||||
{
|
||||
int unused __attribute__((unused));
|
||||
int fd = open("/tmp/qemu_debug.txt", O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
|
||||
assert(fd > 0);
|
||||
@ -31,7 +32,8 @@ void write_debug_result(char* buf){
|
||||
close(fd);
|
||||
}
|
||||
|
||||
void parse_address_file(char* path, size_t* num_addrs, uint64_t** addrs){
|
||||
void parse_address_file(char *path, size_t *num_addrs, uint64_t **addrs)
|
||||
{
|
||||
FILE *fp = fopen(path, "r");
|
||||
if (!fp) {
|
||||
*num_addrs = 0;
|
||||
@ -57,14 +59,17 @@ void parse_address_file(char* path, size_t* num_addrs, uint64_t** addrs){
|
||||
int re_fd = 0;
|
||||
int se_fd = 0;
|
||||
|
||||
void write_re_result(char* buf){
|
||||
void write_re_result(char *buf)
|
||||
{
|
||||
int unused __attribute__((unused));
|
||||
if (!re_fd)
|
||||
re_fd = open(redqueen_workdir.redqueen_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
|
||||
re_fd = open(redqueen_workdir.redqueen_results,
|
||||
O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
|
||||
unused = write(re_fd, buf, strlen(buf));
|
||||
}
|
||||
|
||||
void fsync_redqueen_files(void){
|
||||
void fsync_redqueen_files(void)
|
||||
{
|
||||
if (!se_fd) {
|
||||
fsync(se_fd);
|
||||
}
|
||||
@ -73,21 +78,26 @@ void fsync_redqueen_files(void){
|
||||
}
|
||||
}
|
||||
|
||||
void write_se_result(char* buf){
|
||||
void write_se_result(char *buf)
|
||||
{
|
||||
// int fd;
|
||||
int unused __attribute__((unused));
|
||||
if (!se_fd)
|
||||
se_fd = open(redqueen_workdir.symbolic_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
|
||||
se_fd = open(redqueen_workdir.symbolic_results,
|
||||
O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
|
||||
unused = write(se_fd, buf, strlen(buf));
|
||||
// close(fd);
|
||||
}
|
||||
|
||||
void delete_redqueen_files(void){
|
||||
void delete_redqueen_files(void)
|
||||
{
|
||||
int unused __attribute__((unused));
|
||||
if (!re_fd)
|
||||
re_fd = open(redqueen_workdir.redqueen_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
|
||||
re_fd = open(redqueen_workdir.redqueen_results,
|
||||
O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
|
||||
if (!se_fd)
|
||||
se_fd = open(redqueen_workdir.symbolic_results, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
|
||||
se_fd = open(redqueen_workdir.symbolic_results,
|
||||
O_WRONLY | O_CREAT | O_APPEND, S_IRWXU);
|
||||
unused = ftruncate(re_fd, 0);
|
||||
unused = ftruncate(se_fd, 0);
|
||||
}
|
||||
@ -96,7 +106,8 @@ void delete_redqueen_files(void){
|
||||
* Private Helper Functions Definitions
|
||||
*/
|
||||
|
||||
size_t _count_lines_in_file(FILE* fp){
|
||||
size_t _count_lines_in_file(FILE *fp)
|
||||
{
|
||||
size_t val = 0;
|
||||
size_t count = 0;
|
||||
while (1) {
|
||||
@ -105,16 +116,18 @@ size_t _count_lines_in_file(FILE* fp){
|
||||
printf("WARNING, invalid line in address file");
|
||||
assert(scanres != 0);
|
||||
}
|
||||
if(scanres == -1){break;}
|
||||
if (scanres == -1) {
|
||||
break;
|
||||
}
|
||||
count += 1;
|
||||
}
|
||||
rewind(fp);
|
||||
return count;
|
||||
}
|
||||
|
||||
void _parse_addresses_in_file(FILE* fp, size_t num_addrs, uint64_t* addrs){
|
||||
void _parse_addresses_in_file(FILE *fp, size_t num_addrs, uint64_t *addrs)
|
||||
{
|
||||
for (size_t i = 0; i < num_addrs; i++) {
|
||||
assert(fscanf(fp, "%lx", &addrs[i]) == 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,8 +1,9 @@
|
||||
#include <stdio.h>
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#pragma once
|
||||
|
||||
// doesn't take ownership of path, num_addrs or addrs
|
||||
void parse_address_file(char *path, size_t *num_addrs, uint64_t **addrs);
|
||||
|
143
nyx/helpers.c
143
nyx/helpers.c
@ -12,22 +12,25 @@
|
||||
#include "sysemu/kvm_int.h"
|
||||
#include "qemu-common.h"
|
||||
|
||||
#include "nyx/helpers.h"
|
||||
#include "nyx/debug.h"
|
||||
#include "nyx/helpers.h"
|
||||
#include "nyx/memory_access.h"
|
||||
#include "nyx/state/state.h"
|
||||
|
||||
void nyx_abort(char* msg){
|
||||
set_abort_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, msg, strlen(msg));
|
||||
void nyx_abort(char *msg)
|
||||
{
|
||||
set_abort_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, msg,
|
||||
strlen(msg));
|
||||
synchronization_lock();
|
||||
exit(1);
|
||||
}
|
||||
|
||||
bool is_called_in_fuzzing_mode(const char* hypercall){
|
||||
bool is_called_in_fuzzing_mode(const char *hypercall)
|
||||
{
|
||||
if (GET_GLOBAL_STATE()->in_fuzzing_mode) {
|
||||
char *tmp = NULL;
|
||||
assert(asprintf(&tmp, "Hypercall <%s> called during fuzzing...", hypercall) != -1);
|
||||
assert(asprintf(&tmp, "Hypercall <%s> called during fuzzing...", hypercall) !=
|
||||
-1);
|
||||
nyx_abort((char *)tmp);
|
||||
free(tmp);
|
||||
return true;
|
||||
@ -35,7 +38,8 @@ bool is_called_in_fuzzing_mode(const char* hypercall){
|
||||
return false;
|
||||
}
|
||||
|
||||
uint64_t get_rip(CPUState *cpu){
|
||||
uint64_t get_rip(CPUState *cpu)
|
||||
{
|
||||
kvm_arch_get_registers(cpu);
|
||||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
CPUX86State *env = &x86_cpu->env;
|
||||
@ -43,7 +47,8 @@ uint64_t get_rip(CPUState *cpu){
|
||||
return env->eip;
|
||||
}
|
||||
|
||||
int get_capstone_mode(int word_width_in_bits){
|
||||
int get_capstone_mode(int word_width_in_bits)
|
||||
{
|
||||
switch (word_width_in_bits) {
|
||||
case 64:
|
||||
return CS_MODE_64;
|
||||
@ -54,7 +59,8 @@ int get_capstone_mode(int word_width_in_bits){
|
||||
}
|
||||
}
|
||||
|
||||
nyx_coverage_bitmap_copy_t* new_coverage_bitmaps(void){
|
||||
nyx_coverage_bitmap_copy_t *new_coverage_bitmaps(void)
|
||||
{
|
||||
nyx_coverage_bitmap_copy_t *bitmaps = malloc(sizeof(nyx_coverage_bitmap_copy_t));
|
||||
memset(bitmaps, 0, sizeof(nyx_coverage_bitmap_copy_t));
|
||||
|
||||
@ -67,36 +73,46 @@ nyx_coverage_bitmap_copy_t* new_coverage_bitmaps(void){
|
||||
return bitmaps;
|
||||
}
|
||||
|
||||
void coverage_bitmap_reset(void){
|
||||
void coverage_bitmap_reset(void)
|
||||
{
|
||||
if (GET_GLOBAL_STATE()->shared_bitmap_ptr) {
|
||||
memset(GET_GLOBAL_STATE()->shared_bitmap_ptr, 0x00, GET_GLOBAL_STATE()->shared_bitmap_real_size);
|
||||
memset(GET_GLOBAL_STATE()->shared_bitmap_ptr, 0x00,
|
||||
GET_GLOBAL_STATE()->shared_bitmap_real_size);
|
||||
}
|
||||
if (GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr && GET_GLOBAL_STATE()->shared_ijon_bitmap_size){
|
||||
memset(GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr, 0x00, GET_GLOBAL_STATE()->shared_ijon_bitmap_size);
|
||||
if (GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr &&
|
||||
GET_GLOBAL_STATE()->shared_ijon_bitmap_size)
|
||||
{
|
||||
memset(GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr, 0x00,
|
||||
GET_GLOBAL_STATE()->shared_ijon_bitmap_size);
|
||||
}
|
||||
}
|
||||
|
||||
void coverage_bitmap_copy_to_buffer(nyx_coverage_bitmap_copy_t* buffer){
|
||||
|
||||
void coverage_bitmap_copy_to_buffer(nyx_coverage_bitmap_copy_t *buffer)
|
||||
{
|
||||
if (GET_GLOBAL_STATE()->shared_bitmap_ptr) {
|
||||
memcpy(buffer->coverage_bitmap, GET_GLOBAL_STATE()->shared_bitmap_ptr, GET_GLOBAL_STATE()->shared_bitmap_real_size);
|
||||
memcpy(buffer->coverage_bitmap, GET_GLOBAL_STATE()->shared_bitmap_ptr,
|
||||
GET_GLOBAL_STATE()->shared_bitmap_real_size);
|
||||
}
|
||||
if (GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr) {
|
||||
memcpy(buffer->ijon_bitmap_buffer, GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr, GET_GLOBAL_STATE()->shared_ijon_bitmap_size);
|
||||
memcpy(buffer->ijon_bitmap_buffer, GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr,
|
||||
GET_GLOBAL_STATE()->shared_ijon_bitmap_size);
|
||||
}
|
||||
}
|
||||
|
||||
void coverage_bitmap_copy_from_buffer(nyx_coverage_bitmap_copy_t* buffer){
|
||||
|
||||
void coverage_bitmap_copy_from_buffer(nyx_coverage_bitmap_copy_t *buffer)
|
||||
{
|
||||
if (GET_GLOBAL_STATE()->shared_bitmap_ptr) {
|
||||
memcpy(GET_GLOBAL_STATE()->shared_bitmap_ptr, buffer->coverage_bitmap, GET_GLOBAL_STATE()->shared_bitmap_real_size);
|
||||
memcpy(GET_GLOBAL_STATE()->shared_bitmap_ptr, buffer->coverage_bitmap,
|
||||
GET_GLOBAL_STATE()->shared_bitmap_real_size);
|
||||
}
|
||||
if (GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr) {
|
||||
memcpy(GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr, buffer->ijon_bitmap_buffer, GET_GLOBAL_STATE()->shared_ijon_bitmap_size);
|
||||
memcpy(GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr, buffer->ijon_bitmap_buffer,
|
||||
GET_GLOBAL_STATE()->shared_ijon_bitmap_size);
|
||||
}
|
||||
}
|
||||
|
||||
static void resize_coverage_bitmap(uint32_t new_bitmap_size){
|
||||
static void resize_coverage_bitmap(uint32_t new_bitmap_size)
|
||||
{
|
||||
uint32_t new_bitmap_shm_size = new_bitmap_size;
|
||||
|
||||
if (new_bitmap_shm_size % 64 > 0) {
|
||||
@ -104,42 +120,59 @@ static void resize_coverage_bitmap(uint32_t new_bitmap_size){
|
||||
}
|
||||
|
||||
GET_GLOBAL_STATE()->shared_bitmap_real_size = new_bitmap_shm_size;
|
||||
resize_shared_memory(new_bitmap_shm_size, &GET_GLOBAL_STATE()->shared_bitmap_size, &GET_GLOBAL_STATE()->shared_bitmap_ptr, GET_GLOBAL_STATE()->shared_bitmap_fd);
|
||||
resize_shared_memory(new_bitmap_shm_size, &GET_GLOBAL_STATE()->shared_bitmap_size,
|
||||
&GET_GLOBAL_STATE()->shared_bitmap_ptr,
|
||||
GET_GLOBAL_STATE()->shared_bitmap_fd);
|
||||
|
||||
/* pass the actual bitmap buffer size to the front-end */
|
||||
GET_GLOBAL_STATE()->auxilary_buffer->capabilites.agent_coverage_bitmap_size = new_bitmap_size;
|
||||
GET_GLOBAL_STATE()->auxilary_buffer->capabilites.agent_coverage_bitmap_size =
|
||||
new_bitmap_size;
|
||||
|
||||
if (new_bitmap_size & (PAGE_SIZE - 1)) {
|
||||
GET_GLOBAL_STATE()->shared_bitmap_size = (new_bitmap_size & ~(PAGE_SIZE-1)) + PAGE_SIZE;
|
||||
GET_GLOBAL_STATE()->shared_bitmap_size =
|
||||
(new_bitmap_size & ~(PAGE_SIZE - 1)) + PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
bool apply_capabilities(CPUState *cpu){
|
||||
|
||||
nyx_debug("%s: agent supports timeout detection: %d\n", __func__, GET_GLOBAL_STATE()->cap_timeout_detection);
|
||||
nyx_debug("%s: agent supports only-reload mode: %d\n", __func__, GET_GLOBAL_STATE()->cap_only_reload_mode);
|
||||
nyx_debug("%s: agent supports compile-time tracing: %d\n", __func__, GET_GLOBAL_STATE()->cap_compile_time_tracing );
|
||||
bool apply_capabilities(CPUState *cpu)
|
||||
{
|
||||
nyx_debug("%s: agent supports timeout detection: %d\n", __func__,
|
||||
GET_GLOBAL_STATE()->cap_timeout_detection);
|
||||
nyx_debug("%s: agent supports only-reload mode: %d\n", __func__,
|
||||
GET_GLOBAL_STATE()->cap_only_reload_mode);
|
||||
nyx_debug("%s: agent supports compile-time tracing: %d\n", __func__,
|
||||
GET_GLOBAL_STATE()->cap_compile_time_tracing);
|
||||
|
||||
if (GET_GLOBAL_STATE()->cap_compile_time_tracing) {
|
||||
GET_GLOBAL_STATE()->pt_trace_mode = false;
|
||||
|
||||
nyx_debug("%s: agent trace buffer at vaddr: %lx\n", __func__, GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr);
|
||||
nyx_debug("%s: agent trace buffer at vaddr: %lx\n", __func__,
|
||||
GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr);
|
||||
kvm_arch_get_registers_fast(cpu);
|
||||
|
||||
nyx_debug("--------------------------\n");
|
||||
nyx_debug("GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr: %lx\n", GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr);
|
||||
nyx_debug("GET_GLOBAL_STATE()->shared_bitmap_fd: %d\n", GET_GLOBAL_STATE()->shared_bitmap_fd);
|
||||
nyx_debug("GET_GLOBAL_STATE()->shared_bitmap_size: %x\n", GET_GLOBAL_STATE()->shared_bitmap_size);
|
||||
nyx_debug("GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr: %lx\n",
|
||||
GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr);
|
||||
nyx_debug("GET_GLOBAL_STATE()->shared_bitmap_fd: %d\n",
|
||||
GET_GLOBAL_STATE()->shared_bitmap_fd);
|
||||
nyx_debug("GET_GLOBAL_STATE()->shared_bitmap_size: %x\n",
|
||||
GET_GLOBAL_STATE()->shared_bitmap_size);
|
||||
nyx_debug("GET_GLOBAL_STATE()->cap_cr3: %lx\n", GET_GLOBAL_STATE()->cap_cr3);
|
||||
nyx_debug("--------------------------\n");
|
||||
|
||||
if (GET_GLOBAL_STATE()->input_buffer_size != GET_GLOBAL_STATE()->shared_payload_buffer_size){
|
||||
resize_shared_memory(GET_GLOBAL_STATE()->input_buffer_size, &GET_GLOBAL_STATE()->shared_payload_buffer_size, NULL, GET_GLOBAL_STATE()->shared_payload_buffer_fd);
|
||||
GET_GLOBAL_STATE()->shared_payload_buffer_size = GET_GLOBAL_STATE()->input_buffer_size;
|
||||
if (GET_GLOBAL_STATE()->input_buffer_size !=
|
||||
GET_GLOBAL_STATE()->shared_payload_buffer_size)
|
||||
{
|
||||
resize_shared_memory(GET_GLOBAL_STATE()->input_buffer_size,
|
||||
&GET_GLOBAL_STATE()->shared_payload_buffer_size,
|
||||
NULL, GET_GLOBAL_STATE()->shared_payload_buffer_fd);
|
||||
GET_GLOBAL_STATE()->shared_payload_buffer_size =
|
||||
GET_GLOBAL_STATE()->input_buffer_size;
|
||||
}
|
||||
|
||||
if (GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr & 0xfff) {
|
||||
fprintf(stderr, "[QEMU-Nyx] Error: guest's trace bitmap v_addr (0x%lx) is not page aligned!\n", GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr);
|
||||
fprintf(stderr, "[QEMU-Nyx] Error: guest's trace bitmap v_addr (0x%lx) is not page aligned!\n",
|
||||
GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -147,40 +180,58 @@ bool apply_capabilities(CPUState *cpu){
|
||||
resize_coverage_bitmap(GET_GLOBAL_STATE()->cap_coverage_bitmap_size);
|
||||
}
|
||||
|
||||
for(uint64_t i = 0; i < GET_GLOBAL_STATE()->shared_bitmap_size; i += 0x1000){
|
||||
assert(remap_slot(GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr+ i, i/0x1000, cpu, GET_GLOBAL_STATE()->shared_bitmap_fd, GET_GLOBAL_STATE()->shared_bitmap_size, true, GET_GLOBAL_STATE()->cap_cr3));
|
||||
for (uint64_t i = 0; i < GET_GLOBAL_STATE()->shared_bitmap_size; i += 0x1000)
|
||||
{
|
||||
assert(remap_slot(GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr +
|
||||
i,
|
||||
i / 0x1000, cpu, GET_GLOBAL_STATE()->shared_bitmap_fd,
|
||||
GET_GLOBAL_STATE()->shared_bitmap_size, true,
|
||||
GET_GLOBAL_STATE()->cap_cr3));
|
||||
}
|
||||
set_cap_agent_trace_bitmap(GET_GLOBAL_STATE()->auxilary_buffer, true);
|
||||
}
|
||||
|
||||
if (GET_GLOBAL_STATE()->cap_ijon_tracing) {
|
||||
nyx_debug("%s: agent trace buffer at vaddr: %lx\n", __func__, GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr);
|
||||
nyx_debug("%s: agent trace buffer at vaddr: %lx\n", __func__,
|
||||
GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr);
|
||||
|
||||
if (GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr & 0xfff) {
|
||||
error_printf("[QEMU-Nyx] Error: guest's ijon buffer v_addr (0x%lx) is not page aligned!\n", GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr);
|
||||
error_printf("[QEMU-Nyx] Error: guest's ijon buffer v_addr (0x%lx) is "
|
||||
"not page aligned!\n",
|
||||
GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr);
|
||||
return false;
|
||||
}
|
||||
|
||||
kvm_arch_get_registers_fast(cpu);
|
||||
for(uint64_t i = 0; i < GET_GLOBAL_STATE()->shared_ijon_bitmap_size; i += 0x1000){
|
||||
assert(remap_slot(GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr + i, i/0x1000, cpu, GET_GLOBAL_STATE()->shared_ijon_bitmap_fd, GET_GLOBAL_STATE()->shared_ijon_bitmap_size+GET_GLOBAL_STATE()->shared_ijon_bitmap_size, true, GET_GLOBAL_STATE()->cap_cr3));
|
||||
for (uint64_t i = 0; i < GET_GLOBAL_STATE()->shared_ijon_bitmap_size;
|
||||
i += 0x1000)
|
||||
{
|
||||
assert(remap_slot(GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr + i,
|
||||
i / 0x1000, cpu,
|
||||
GET_GLOBAL_STATE()->shared_ijon_bitmap_fd,
|
||||
GET_GLOBAL_STATE()->shared_ijon_bitmap_size +
|
||||
GET_GLOBAL_STATE()->shared_ijon_bitmap_size,
|
||||
true, GET_GLOBAL_STATE()->cap_cr3));
|
||||
}
|
||||
set_cap_agent_ijon_trace_bitmap(GET_GLOBAL_STATE()->auxilary_buffer, true);
|
||||
}
|
||||
|
||||
|
||||
/* pass the actual input buffer size to the front-end */
|
||||
GET_GLOBAL_STATE()->auxilary_buffer->capabilites.agent_input_buffer_size = GET_GLOBAL_STATE()->shared_payload_buffer_size;
|
||||
GET_GLOBAL_STATE()->auxilary_buffer->capabilites.agent_input_buffer_size =
|
||||
GET_GLOBAL_STATE()->shared_payload_buffer_size;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool folder_exits(const char* path){
|
||||
bool folder_exits(const char *path)
|
||||
{
|
||||
struct stat sb;
|
||||
return (stat(path, &sb) == 0 && S_ISDIR(sb.st_mode));
|
||||
}
|
||||
|
||||
bool file_exits(const char* path){
|
||||
bool file_exits(const char *path)
|
||||
{
|
||||
struct stat sb;
|
||||
return (stat(path, &sb) == 0);
|
||||
}
|
||||
|
@ -1,11 +1,14 @@
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#include "nyx/state/state.h"
|
||||
#include "nyx/helpers.h"
|
||||
#include "nyx/hypercall/configuration.h"
|
||||
#include "nyx/memory_access.h"
|
||||
#include "nyx/helpers.h"
|
||||
#include "nyx/state/state.h"
|
||||
|
||||
void handle_hypercall_kafl_get_host_config(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
void handle_hypercall_kafl_get_host_config(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
uint64_t vaddr = hypercall_arg;
|
||||
host_config_t config;
|
||||
|
||||
@ -31,7 +34,10 @@ void handle_hypercall_kafl_get_host_config(struct kvm_run *run, CPUState *cpu, u
|
||||
GET_GLOBAL_STATE()->get_host_config_done = true;
|
||||
}
|
||||
|
||||
void handle_hypercall_kafl_set_agent_config(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
void handle_hypercall_kafl_set_agent_config(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
uint64_t vaddr = hypercall_arg;
|
||||
agent_config_t config;
|
||||
|
||||
@ -48,9 +54,10 @@ void handle_hypercall_kafl_set_agent_config(struct kvm_run *run, CPUState *cpu,
|
||||
CPUX86State *env = &cpux86->env;
|
||||
|
||||
if (read_virtual_memory(vaddr, (uint8_t *)&config, sizeof(agent_config_t), cpu)) {
|
||||
|
||||
if (config.agent_magic != NYX_AGENT_MAGIC) {
|
||||
fprintf(stderr, "[QEMU-Nyx] Error: NYX_AGENT_MAGIC not found in agent configuration - You are probably using an outdated agent...\n");
|
||||
fprintf(stderr,
|
||||
"[QEMU-Nyx] Error: NYX_AGENT_MAGIC not found in agent "
|
||||
"configuration - You are probably using an outdated agent...\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@ -64,27 +71,36 @@ void handle_hypercall_kafl_set_agent_config(struct kvm_run *run, CPUState *cpu,
|
||||
}
|
||||
|
||||
GET_GLOBAL_STATE()->cap_timeout_detection = config.agent_timeout_detection;
|
||||
GET_GLOBAL_STATE()->cap_only_reload_mode = !!!config.agent_non_reload_mode; /* fix this */
|
||||
GET_GLOBAL_STATE()->cap_only_reload_mode =
|
||||
!!!config.agent_non_reload_mode; /* fix this */
|
||||
GET_GLOBAL_STATE()->cap_compile_time_tracing = config.agent_tracing;
|
||||
|
||||
if(!GET_GLOBAL_STATE()->cap_compile_time_tracing && !GET_GLOBAL_STATE()->nyx_fdl){
|
||||
fprintf(stderr, "[QEMU-Nyx] Error: Attempt to fuzz target without compile-time instrumentation - Intel PT is not supported on this KVM build!\n");
|
||||
if (!GET_GLOBAL_STATE()->cap_compile_time_tracing &&
|
||||
!GET_GLOBAL_STATE()->nyx_fdl)
|
||||
{
|
||||
fprintf(
|
||||
stderr,
|
||||
"[QEMU-Nyx] Error: Attempt to fuzz target without compile-time "
|
||||
"instrumentation - Intel PT is not supported on this KVM build!\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
GET_GLOBAL_STATE()->cap_ijon_tracing = config.agent_ijon_tracing;
|
||||
|
||||
if (config.agent_tracing) {
|
||||
GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr = config.trace_buffer_vaddr;
|
||||
GET_GLOBAL_STATE()->cap_compile_time_tracing_buffer_vaddr =
|
||||
config.trace_buffer_vaddr;
|
||||
GET_GLOBAL_STATE()->pt_trace_mode = false;
|
||||
}
|
||||
if (config.agent_ijon_tracing) {
|
||||
GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr = config.ijon_trace_buffer_vaddr;
|
||||
GET_GLOBAL_STATE()->cap_ijon_tracing_buffer_vaddr =
|
||||
config.ijon_trace_buffer_vaddr;
|
||||
}
|
||||
|
||||
GET_GLOBAL_STATE()->cap_cr3 = env->cr[3];
|
||||
GET_GLOBAL_STATE()->cap_coverage_bitmap_size = config.coverage_bitmap_size;
|
||||
GET_GLOBAL_STATE()->input_buffer_size = GET_GLOBAL_STATE()->shared_payload_buffer_size;
|
||||
GET_GLOBAL_STATE()->input_buffer_size =
|
||||
GET_GLOBAL_STATE()->shared_payload_buffer_size;
|
||||
|
||||
if (config.input_buffer_size) {
|
||||
abort();
|
||||
@ -96,12 +112,13 @@ void handle_hypercall_kafl_set_agent_config(struct kvm_run *run, CPUState *cpu,
|
||||
|
||||
if (getenv("DUMP_PAYLOAD_MODE")) {
|
||||
config.dump_payloads = 1;
|
||||
write_virtual_memory(vaddr, (uint8_t*)&config, sizeof(agent_config_t), cpu);
|
||||
write_virtual_memory(vaddr, (uint8_t *)&config, sizeof(agent_config_t),
|
||||
cpu);
|
||||
}
|
||||
|
||||
}
|
||||
else{
|
||||
fprintf(stderr, "[QEMU-Nyx] Error: %s - failed (vaddr: 0x%lx)!\n", __func__, vaddr);
|
||||
} else {
|
||||
fprintf(stderr, "[QEMU-Nyx] Error: %s - failed (vaddr: 0x%lx)!\n", __func__,
|
||||
vaddr);
|
||||
exit(1);
|
||||
}
|
||||
GET_GLOBAL_STATE()->set_agent_config_done = true;
|
||||
|
@ -5,9 +5,13 @@
|
||||
|
||||
#include "sysemu/kvm.h"
|
||||
|
||||
void handle_hypercall_kafl_get_host_config(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
|
||||
void handle_hypercall_kafl_get_host_config(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg);
|
||||
|
||||
void handle_hypercall_kafl_set_agent_config(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
|
||||
void handle_hypercall_kafl_set_agent_config(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg);
|
||||
|
||||
|
||||
#define NYX_HOST_MAGIC 0x4878794e
|
||||
|
@ -1,25 +1,26 @@
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#include <sys/time.h>
|
||||
#include "sysemu/kvm.h"
|
||||
#include <sys/time.h>
|
||||
|
||||
#include "nyx/synchronization.h"
|
||||
#include "nyx/fast_vm_reload.h"
|
||||
#include "nyx/state/state.h"
|
||||
#include "nyx/hypercall/debug.h"
|
||||
#include "nyx/state/state.h"
|
||||
#include "nyx/synchronization.h"
|
||||
|
||||
// #define NYX_ENABLE_DEBUG_HYPERCALLS
|
||||
#ifdef NYX_ENABLE_DEBUG_HYPERCALLS
|
||||
|
||||
static double get_time(void){
|
||||
static double get_time(void)
|
||||
{
|
||||
struct timeval t;
|
||||
struct timezone tzp;
|
||||
gettimeofday(&t, &tzp);
|
||||
return t.tv_sec + t.tv_usec * 1e-6;
|
||||
}
|
||||
|
||||
static void print_time_diff(int iterations){
|
||||
|
||||
static void print_time_diff(int iterations)
|
||||
{
|
||||
static bool init = true;
|
||||
static double start_time = 0.0;
|
||||
static double end_time = 0.0;
|
||||
@ -28,8 +29,7 @@ static void print_time_diff(int iterations){
|
||||
init = false;
|
||||
printf("start time is zero!\n");
|
||||
start_time = get_time();
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
end_time = get_time();
|
||||
double elapsed_time = end_time - start_time;
|
||||
printf("Done in %f seconds\n", elapsed_time);
|
||||
@ -38,7 +38,8 @@ static void print_time_diff(int iterations){
|
||||
}
|
||||
}
|
||||
|
||||
static void meassure_performance(void){
|
||||
static void meassure_performance(void)
|
||||
{
|
||||
static int perf_counter = 0;
|
||||
if ((perf_counter % 1000) == 0) {
|
||||
print_time_diff(1000);
|
||||
@ -46,43 +47,56 @@ static void meassure_performance(void){
|
||||
perf_counter++;
|
||||
}
|
||||
|
||||
void handle_hypercall_kafl_debug_tmp_snapshot(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
void handle_hypercall_kafl_debug_tmp_snapshot(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
static bool first = true;
|
||||
|
||||
switch (hypercall_arg & 0xFFF) {
|
||||
case 0: /* create root snapshot */
|
||||
if(!fast_snapshot_exists(GET_GLOBAL_STATE()->reload_state, REQUEST_ROOT_EXISTS)){
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_ROOT);
|
||||
if (!fast_snapshot_exists(GET_GLOBAL_STATE()->reload_state,
|
||||
REQUEST_ROOT_EXISTS))
|
||||
{
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state,
|
||||
REQUEST_SAVE_SNAPSHOT_ROOT);
|
||||
}
|
||||
break;
|
||||
case 1: /* create tmp snapshot */
|
||||
if(!fast_snapshot_exists(GET_GLOBAL_STATE()->reload_state, REQUEST_TMP_EXISTS)){
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_TMP);
|
||||
if (!fast_snapshot_exists(GET_GLOBAL_STATE()->reload_state, REQUEST_TMP_EXISTS))
|
||||
{
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state,
|
||||
REQUEST_SAVE_SNAPSHOT_TMP);
|
||||
}
|
||||
break;
|
||||
case 2: /* load root snapshot (+ discard tmp snapshot) */
|
||||
if(fast_snapshot_exists(GET_GLOBAL_STATE()->reload_state, REQUEST_TMP_EXISTS)){
|
||||
if (fast_snapshot_exists(GET_GLOBAL_STATE()->reload_state, REQUEST_TMP_EXISTS))
|
||||
{
|
||||
reload_request_discard_tmp(GET_GLOBAL_STATE()->reload_state);
|
||||
}
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_LOAD_SNAPSHOT_ROOT);
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state,
|
||||
REQUEST_LOAD_SNAPSHOT_ROOT);
|
||||
meassure_performance();
|
||||
break;
|
||||
case 3: /* load tmp snapshot */
|
||||
if(fast_snapshot_exists(GET_GLOBAL_STATE()->reload_state, REQUEST_TMP_EXISTS)){
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_LOAD_SNAPSHOT_TMP);
|
||||
if (fast_snapshot_exists(GET_GLOBAL_STATE()->reload_state, REQUEST_TMP_EXISTS))
|
||||
{
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state,
|
||||
REQUEST_LOAD_SNAPSHOT_TMP);
|
||||
meassure_performance();
|
||||
}
|
||||
break;
|
||||
case 5: // firefox debug hypercall
|
||||
if (first) {
|
||||
first = false;
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_ROOT);
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state,
|
||||
REQUEST_SAVE_SNAPSHOT_ROOT);
|
||||
// request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_TMP);
|
||||
|
||||
break;
|
||||
}
|
||||
else{
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_LOAD_SNAPSHOT_ROOT);
|
||||
} else {
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state,
|
||||
REQUEST_LOAD_SNAPSHOT_ROOT);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
@ -91,8 +105,10 @@ void handle_hypercall_kafl_debug_tmp_snapshot(struct kvm_run *run, CPUState *cpu
|
||||
}
|
||||
#else /* NYX_ENABLE_DEBUG_HYPERCALLS */
|
||||
|
||||
void handle_hypercall_kafl_debug_tmp_snapshot(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
|
||||
void handle_hypercall_kafl_debug_tmp_snapshot(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
fprintf(stderr, "[QEMU-Nyx] Error: HYPERCALL_KAFL_DEBUG_TMP not enabled!\n");
|
||||
set_abort_reason_auxiliary_buffer(
|
||||
GET_GLOBAL_STATE()->auxilary_buffer,
|
||||
|
@ -1,6 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include "sysemu/kvm.h"
|
||||
#include <stdint.h>
|
||||
|
||||
void handle_hypercall_kafl_debug_tmp_snapshot(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
|
||||
void handle_hypercall_kafl_debug_tmp_snapshot(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg);
|
@ -21,38 +21,36 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#include <linux/kvm.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/mman.h>
|
||||
#include "qemu-common.h"
|
||||
#include "exec/memory.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu-common.h"
|
||||
#include <linux/kvm.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <sys/mman.h>
|
||||
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/hw_accel.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "sysemu/kvm_int.h"
|
||||
#include "sysemu/runstate.h"
|
||||
#include "sysemu/hw_accel.h"
|
||||
|
||||
|
||||
#include "sysemu/runstate.h"
|
||||
|
||||
|
||||
#include "nyx/pt.h"
|
||||
#include "nyx/hypercall/hypercall.h"
|
||||
#include "nyx/memory_access.h"
|
||||
#include "nyx/interface.h"
|
||||
#include "nyx/debug.h"
|
||||
#include "nyx/synchronization.h"
|
||||
#include "nyx/fast_vm_reload.h"
|
||||
#include "nyx/kvm_nested.h"
|
||||
#include "nyx/state/state.h"
|
||||
#include "sysemu/runstate.h"
|
||||
#include "nyx/helpers.h"
|
||||
#include "nyx/nested_hypercalls.h"
|
||||
#include "nyx/fast_vm_reload_sync.h"
|
||||
#include "nyx/redqueen.h"
|
||||
#include "nyx/helpers.h"
|
||||
#include "nyx/hypercall/configuration.h"
|
||||
#include "nyx/hypercall/debug.h"
|
||||
#include "nyx/hypercall/hypercall.h"
|
||||
#include "nyx/interface.h"
|
||||
#include "nyx/kvm_nested.h"
|
||||
#include "nyx/memory_access.h"
|
||||
#include "nyx/nested_hypercalls.h"
|
||||
#include "nyx/pt.h"
|
||||
#include "nyx/redqueen.h"
|
||||
#include "nyx/state/state.h"
|
||||
#include "nyx/synchronization.h"
|
||||
|
||||
// #define DEBUG_HPRINTF
|
||||
#define HPRINTF_SIZE 0x1000 /* FIXME: take from nyx.h */
|
||||
@ -62,36 +60,42 @@ char hprintf_buffer[HPRINTF_SIZE];
|
||||
|
||||
static bool init_state = true;
|
||||
|
||||
void skip_init(void){
|
||||
void skip_init(void)
|
||||
{
|
||||
init_state = false;
|
||||
}
|
||||
|
||||
bool pt_hypercalls_enabled(void){
|
||||
bool pt_hypercalls_enabled(void)
|
||||
{
|
||||
return hypercall_enabled;
|
||||
}
|
||||
|
||||
void pt_setup_enable_hypercalls(void){
|
||||
void pt_setup_enable_hypercalls(void)
|
||||
{
|
||||
hypercall_enabled = true;
|
||||
}
|
||||
|
||||
void pt_setup_ip_filters(uint8_t filter_id, uint64_t start, uint64_t end){
|
||||
void pt_setup_ip_filters(uint8_t filter_id, uint64_t start, uint64_t end)
|
||||
{
|
||||
nyx_trace();
|
||||
if (filter_id < INTEL_PT_MAX_RANGES) {
|
||||
|
||||
GET_GLOBAL_STATE()->pt_ip_filter_configured[filter_id] = true;
|
||||
GET_GLOBAL_STATE()->pt_ip_filter_a[filter_id] = start;
|
||||
GET_GLOBAL_STATE()->pt_ip_filter_b[filter_id] = end;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
void hypercall_commit_filter(void){
|
||||
void hypercall_commit_filter(void)
|
||||
{
|
||||
}
|
||||
|
||||
bool setup_snapshot_once = false;
|
||||
|
||||
|
||||
bool handle_hypercall_kafl_next_payload(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
bool handle_hypercall_kafl_next_payload(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
nyx_trace();
|
||||
|
||||
if (hypercall_enabled) {
|
||||
@ -100,15 +104,16 @@ bool handle_hypercall_kafl_next_payload(struct kvm_run *run, CPUState *cpu, uint
|
||||
synchronization_lock();
|
||||
|
||||
} else {
|
||||
|
||||
if (GET_GLOBAL_STATE()->set_agent_config_done == false) {
|
||||
nyx_abort((char*)"KVM_EXIT_KAFL_SET_AGENT_CONFIG was not called...");
|
||||
nyx_abort(
|
||||
(char *)"KVM_EXIT_KAFL_SET_AGENT_CONFIG was not called...");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!setup_snapshot_once) {
|
||||
coverage_bitmap_reset();
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP);
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state,
|
||||
REQUEST_SAVE_SNAPSHOT_ROOT_FIX_RIP);
|
||||
setup_snapshot_once = true;
|
||||
|
||||
for (int i = 0; i < INTEL_PT_MAX_RANGES; i++) {
|
||||
@ -117,12 +122,13 @@ bool handle_hypercall_kafl_next_payload(struct kvm_run *run, CPUState *cpu, uint
|
||||
}
|
||||
}
|
||||
pt_init_decoder(cpu);
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_LOAD_SNAPSHOT_ROOT);
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state,
|
||||
REQUEST_LOAD_SNAPSHOT_ROOT);
|
||||
|
||||
GET_GLOBAL_STATE()->in_fuzzing_mode = true;
|
||||
set_state_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 3);
|
||||
}
|
||||
else{
|
||||
set_state_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer,
|
||||
3);
|
||||
} else {
|
||||
synchronization_lock();
|
||||
reset_timeout_detector(&GET_GLOBAL_STATE()->timeout_detector);
|
||||
GET_GLOBAL_STATE()->in_fuzzing_mode = true;
|
||||
@ -137,7 +143,8 @@ bool handle_hypercall_kafl_next_payload(struct kvm_run *run, CPUState *cpu, uint
|
||||
bool acquire_print_once_bool = true;
|
||||
bool release_print_once_bool = true;
|
||||
|
||||
static void acquire_print_once(CPUState *cpu){
|
||||
static void acquire_print_once(CPUState *cpu)
|
||||
{
|
||||
if (acquire_print_once_bool) {
|
||||
acquire_print_once_bool = false;
|
||||
kvm_arch_get_registers(cpu);
|
||||
@ -145,7 +152,10 @@ static void acquire_print_once(CPUState *cpu){
|
||||
}
|
||||
}
|
||||
|
||||
void handle_hypercall_kafl_acquire(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
void handle_hypercall_kafl_acquire(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
if (hypercall_enabled) {
|
||||
if (!init_state) {
|
||||
acquire_print_once(cpu);
|
||||
@ -154,7 +164,10 @@ void handle_hypercall_kafl_acquire(struct kvm_run *run, CPUState *cpu, uint64_t
|
||||
}
|
||||
}
|
||||
|
||||
static void handle_hypercall_get_payload(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
static void handle_hypercall_get_payload(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
nyx_trace();
|
||||
|
||||
if (is_called_in_fuzzing_mode("KVM_EXIT_KAFL_GET_PAYLOAD")) {
|
||||
@ -171,11 +184,13 @@ static void handle_hypercall_get_payload(struct kvm_run *run, CPUState *cpu, uin
|
||||
kvm_arch_get_registers(cpu);
|
||||
CPUX86State *env = &(X86_CPU(cpu))->env;
|
||||
GET_GLOBAL_STATE()->parent_cr3 = env->cr[3] & 0xFFFFFFFFFFFFF000ULL;
|
||||
nyx_debug_p(CORE_PREFIX, "Payload CR3:\t%lx", (uint64_t)GET_GLOBAL_STATE()->parent_cr3 );
|
||||
nyx_debug_p(CORE_PREFIX, "Payload CR3:\t%lx",
|
||||
(uint64_t)GET_GLOBAL_STATE()->parent_cr3);
|
||||
// print_48_pagetables(GET_GLOBAL_STATE()->parent_cr3);
|
||||
|
||||
if (hypercall_arg & 0xFFF) {
|
||||
fprintf(stderr, "[QEMU-Nyx] Error: Payload buffer is not page-aligned! (0x%lx)\n", hypercall_arg);
|
||||
fprintf(stderr, "[QEMU-Nyx] Error: Payload buffer is not page-aligned! (0x%lx)\n",
|
||||
hypercall_arg);
|
||||
abort();
|
||||
}
|
||||
|
||||
@ -184,14 +199,18 @@ static void handle_hypercall_get_payload(struct kvm_run *run, CPUState *cpu, uin
|
||||
}
|
||||
}
|
||||
|
||||
static void set_return_value(CPUState *cpu, uint64_t return_value){
|
||||
static void set_return_value(CPUState *cpu, uint64_t return_value)
|
||||
{
|
||||
kvm_arch_get_registers(cpu);
|
||||
CPUX86State *env = &(X86_CPU(cpu))->env;
|
||||
env->regs[R_EAX] = return_value;
|
||||
kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
|
||||
}
|
||||
|
||||
static void handle_hypercall_kafl_req_stream_data(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
static void handle_hypercall_kafl_req_stream_data(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
static uint8_t req_stream_buffer[0x1000];
|
||||
if (is_called_in_fuzzing_mode("HYPERCALL_KAFL_REQ_STREAM_DATA")) {
|
||||
return;
|
||||
@ -202,12 +221,14 @@ static void handle_hypercall_kafl_req_stream_data(struct kvm_run *run, CPUState
|
||||
if ((hypercall_arg & 0xFFF) != 0) {
|
||||
nyx_debug("%s: ERROR -> address is not page aligned!\n", __func__);
|
||||
set_return_value(cpu, 0xFFFFFFFFFFFFFFFFULL);
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
read_virtual_memory(hypercall_arg, (uint8_t *)req_stream_buffer, 0x100, cpu);
|
||||
uint64_t bytes = sharedir_request_file(GET_GLOBAL_STATE()->sharedir, (const char *)req_stream_buffer, req_stream_buffer);
|
||||
uint64_t bytes = sharedir_request_file(GET_GLOBAL_STATE()->sharedir,
|
||||
(const char *)req_stream_buffer,
|
||||
req_stream_buffer);
|
||||
if (bytes != 0xFFFFFFFFFFFFFFFFULL) {
|
||||
write_virtual_memory(hypercall_arg, (uint8_t*)req_stream_buffer, bytes, cpu);
|
||||
write_virtual_memory(hypercall_arg, (uint8_t *)req_stream_buffer, bytes,
|
||||
cpu);
|
||||
}
|
||||
set_return_value(cpu, bytes);
|
||||
}
|
||||
@ -219,7 +240,10 @@ typedef struct req_data_bulk_s{
|
||||
uint64_t addresses[479];
|
||||
} req_data_bulk_t;
|
||||
|
||||
static void handle_hypercall_kafl_req_stream_data_bulk(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
static void handle_hypercall_kafl_req_stream_data_bulk(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
static uint8_t req_stream_buffer[0x1000];
|
||||
// static uint64_t addresses[512];
|
||||
req_data_bulk_t req_data_bulk_data;
|
||||
@ -233,26 +257,27 @@ static void handle_hypercall_kafl_req_stream_data_bulk(struct kvm_run *run, CPUS
|
||||
if ((hypercall_arg & 0xFFF) != 0) {
|
||||
nyx_debug("%s: ERROR -> address is not page aligned!\n", __func__);
|
||||
set_return_value(cpu, 0xFFFFFFFFFFFFFFFFUL);
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
uint64_t bytes = 0;
|
||||
read_virtual_memory(hypercall_arg, (uint8_t*)&req_data_bulk_data, 0x1000, cpu);
|
||||
read_virtual_memory(hypercall_arg, (uint8_t *)&req_data_bulk_data, 0x1000,
|
||||
cpu);
|
||||
|
||||
assert(req_data_bulk_data.num_addresses <= 479);
|
||||
for (int i = 0; i < req_data_bulk_data.num_addresses; i++) {
|
||||
uint64_t ret_val = sharedir_request_file(GET_GLOBAL_STATE()->sharedir, (const char *)req_data_bulk_data.file_name, req_stream_buffer);
|
||||
uint64_t ret_val =
|
||||
sharedir_request_file(GET_GLOBAL_STATE()->sharedir,
|
||||
(const char *)req_data_bulk_data.file_name,
|
||||
req_stream_buffer);
|
||||
if (ret_val != 0xFFFFFFFFFFFFFFFFUL) {
|
||||
bytes += ret_val;
|
||||
write_virtual_memory((uint64_t)req_data_bulk_data.addresses[i], (uint8_t*)req_stream_buffer, ret_val, cpu);
|
||||
}
|
||||
else if(ret_val == 0){
|
||||
write_virtual_memory((uint64_t)req_data_bulk_data.addresses[i],
|
||||
(uint8_t *)req_stream_buffer, ret_val, cpu);
|
||||
} else if (ret_val == 0) {
|
||||
break;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
bytes = 0xFFFFFFFFFFFFFFFFUL;
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
set_return_value(cpu, bytes);
|
||||
@ -260,7 +285,10 @@ static void handle_hypercall_kafl_req_stream_data_bulk(struct kvm_run *run, CPUS
|
||||
}
|
||||
|
||||
|
||||
static void handle_hypercall_kafl_range_submit(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
static void handle_hypercall_kafl_range_submit(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
uint64_t buffer[3];
|
||||
|
||||
if (is_called_in_fuzzing_mode("KVM_EXIT_KAFL_RANGE_SUBMIT")) {
|
||||
@ -275,7 +303,9 @@ static void handle_hypercall_kafl_range_submit(struct kvm_run *run, CPUState *cp
|
||||
}
|
||||
|
||||
if (GET_GLOBAL_STATE()->pt_ip_filter_configured[buffer[2]]) {
|
||||
nyx_debug_p(CORE_PREFIX, "Ignoring agent-provided address ranges (abort reason: 1) - %ld", buffer[2]);
|
||||
nyx_debug_p(CORE_PREFIX,
|
||||
"Ignoring agent-provided address ranges (abort reason: 1) - %ld",
|
||||
buffer[2]);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -284,15 +314,17 @@ static void handle_hypercall_kafl_range_submit(struct kvm_run *run, CPUState *cp
|
||||
GET_GLOBAL_STATE()->pt_ip_filter_b[buffer[2]] = buffer[1];
|
||||
GET_GLOBAL_STATE()->pt_ip_filter_configured[buffer[2]] = true;
|
||||
nyx_debug_p(CORE_PREFIX, "Configuring agent-provided address ranges:");
|
||||
nyx_debug_p(CORE_PREFIX, "\tIP%ld: %lx-%lx [ENABLED]", buffer[2], GET_GLOBAL_STATE()->pt_ip_filter_a[buffer[2]], GET_GLOBAL_STATE()->pt_ip_filter_b[buffer[2]]);
|
||||
nyx_debug_p(CORE_PREFIX, "\tIP%ld: %lx-%lx [ENABLED]", buffer[2],
|
||||
GET_GLOBAL_STATE()->pt_ip_filter_a[buffer[2]],
|
||||
GET_GLOBAL_STATE()->pt_ip_filter_b[buffer[2]]);
|
||||
} else {
|
||||
nyx_debug_p(CORE_PREFIX,
|
||||
"Ignoring agent-provided address ranges (abort reason: 2)");
|
||||
}
|
||||
else{
|
||||
nyx_debug_p(CORE_PREFIX, "Ignoring agent-provided address ranges (abort reason: 2)");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void release_print_once(CPUState *cpu){
|
||||
static void release_print_once(CPUState *cpu)
|
||||
{
|
||||
if (release_print_once_bool) {
|
||||
release_print_once_bool = false;
|
||||
kvm_arch_get_registers(cpu);
|
||||
@ -300,7 +332,10 @@ static void release_print_once(CPUState *cpu){
|
||||
}
|
||||
}
|
||||
|
||||
void handle_hypercall_kafl_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
void handle_hypercall_kafl_release(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
if (hypercall_enabled) {
|
||||
if (init_state) {
|
||||
init_state = false;
|
||||
@ -322,7 +357,8 @@ struct kvm_set_guest_debug_data {
|
||||
int err;
|
||||
};
|
||||
|
||||
void handle_hypercall_kafl_mtf(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
void handle_hypercall_kafl_mtf(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg)
|
||||
{
|
||||
// assert(false);
|
||||
kvm_arch_get_registers_fast(cpu);
|
||||
|
||||
@ -334,38 +370,41 @@ void handle_hypercall_kafl_mtf(struct kvm_run *run, CPUState *cpu, uint64_t hype
|
||||
kvm_insert_breakpoint(cpu, GET_GLOBAL_STATE()->dump_page_addr, 1, 1);
|
||||
kvm_update_guest_debug(cpu, 0);
|
||||
|
||||
kvm_vcpu_ioctl(cpu, KVM_VMX_PT_SET_PAGE_DUMP_CR3, GET_GLOBAL_STATE()->pt_c3_filter);
|
||||
kvm_vcpu_ioctl(cpu, KVM_VMX_PT_SET_PAGE_DUMP_CR3,
|
||||
GET_GLOBAL_STATE()->pt_c3_filter);
|
||||
kvm_vcpu_ioctl(cpu, KVM_VMX_PT_ENABLE_PAGE_DUMP_CR3);
|
||||
}
|
||||
|
||||
void handle_hypercall_kafl_page_dump_bp(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg, uint64_t page){
|
||||
void handle_hypercall_kafl_page_dump_bp(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg,
|
||||
uint64_t page)
|
||||
{
|
||||
// nyx_trace();
|
||||
kvm_arch_get_registers_fast(cpu);
|
||||
nyx_debug("%s --> %lx\n", __func__, get_rip(cpu));
|
||||
kvm_vcpu_ioctl(cpu, KVM_VMX_PT_DISABLE_MTF);
|
||||
|
||||
bool success = false;
|
||||
//fprintf(stderr, "page_cache_fetch = %lx\n", page_cache_fetch(GET_GLOBAL_STATE()->page_cache, page, &success, false));
|
||||
// fprintf(stderr, "page_cache_fetch = %lx\n",
|
||||
// page_cache_fetch(GET_GLOBAL_STATE()->page_cache, page, &success, false));
|
||||
page_cache_fetch(GET_GLOBAL_STATE()->page_cache, page, &success, false);
|
||||
if (success) {
|
||||
|
||||
nyx_debug("%s: SUCCESS: %d\n", __func__, success);
|
||||
kvm_remove_all_breakpoints(cpu);
|
||||
kvm_vcpu_ioctl(cpu, KVM_VMX_PT_DISABLE_PAGE_DUMP_CR3);
|
||||
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
nyx_debug("%s: FAIL: %d\n", __func__, success);
|
||||
|
||||
kvm_remove_all_breakpoints(cpu);
|
||||
kvm_vcpu_ioctl(cpu, KVM_VMX_PT_DISABLE_PAGE_DUMP_CR3);
|
||||
kvm_vcpu_ioctl(cpu, KVM_VMX_PT_ENABLE_MTF);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static inline void set_page_dump_bp(CPUState *cpu, uint64_t cr3, uint64_t addr){
|
||||
|
||||
static inline void set_page_dump_bp(CPUState *cpu, uint64_t cr3, uint64_t addr)
|
||||
{
|
||||
nyx_debug("%s --> %lx %lx\n", __func__, cr3, addr);
|
||||
kvm_remove_all_breakpoints(cpu);
|
||||
kvm_insert_breakpoint(cpu, addr, 1, 1);
|
||||
@ -375,18 +414,24 @@ static inline void set_page_dump_bp(CPUState *cpu, uint64_t cr3, uint64_t addr){
|
||||
kvm_vcpu_ioctl(cpu, KVM_VMX_PT_ENABLE_PAGE_DUMP_CR3);
|
||||
}
|
||||
|
||||
static void handle_hypercall_kafl_cr3(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
static void handle_hypercall_kafl_cr3(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
if (hypercall_enabled) {
|
||||
// nyx_debug_p(CORE_PREFIX, "CR3 address:\t\t%lx", hypercall_arg);
|
||||
pt_set_cr3(cpu, hypercall_arg & 0xFFFFFFFFFFFFF000ULL, false);
|
||||
if (GET_GLOBAL_STATE()->dump_page) {
|
||||
set_page_dump_bp(cpu, hypercall_arg & 0xFFFFFFFFFFFFF000ULL, GET_GLOBAL_STATE()->dump_page_addr);
|
||||
set_page_dump_bp(cpu, hypercall_arg & 0xFFFFFFFFFFFFF000ULL,
|
||||
GET_GLOBAL_STATE()->dump_page_addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void handle_hypercall_kafl_submit_panic(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
|
||||
static void handle_hypercall_kafl_submit_panic(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
if (is_called_in_fuzzing_mode("KVM_EXIT_KAFL_SUBMIT_PANIC")) {
|
||||
return;
|
||||
}
|
||||
@ -398,11 +443,13 @@ static void handle_hypercall_kafl_submit_panic(struct kvm_run *run, CPUState *cp
|
||||
case mm_32_protected:
|
||||
case mm_32_paging:
|
||||
case mm_32_pae:
|
||||
write_virtual_memory(hypercall_arg, (uint8_t*)PANIC_PAYLOAD_32, PAYLOAD_BUFFER_SIZE_32, cpu);
|
||||
write_virtual_memory(hypercall_arg, (uint8_t *)PANIC_PAYLOAD_32,
|
||||
PAYLOAD_BUFFER_SIZE_32, cpu);
|
||||
break;
|
||||
case mm_64_l4_paging:
|
||||
case mm_64_l5_paging:
|
||||
write_virtual_memory(hypercall_arg, (uint8_t*)PANIC_PAYLOAD_64, PAYLOAD_BUFFER_SIZE_64, cpu);
|
||||
write_virtual_memory(hypercall_arg, (uint8_t *)PANIC_PAYLOAD_64,
|
||||
PAYLOAD_BUFFER_SIZE_64, cpu);
|
||||
break;
|
||||
default:
|
||||
abort();
|
||||
@ -411,7 +458,10 @@ static void handle_hypercall_kafl_submit_panic(struct kvm_run *run, CPUState *cp
|
||||
}
|
||||
}
|
||||
|
||||
static void handle_hypercall_kafl_submit_kasan(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
static void handle_hypercall_kafl_submit_kasan(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
if (hypercall_enabled) {
|
||||
nyx_debug_p(CORE_PREFIX, "kASAN address:\t%lx", hypercall_arg);
|
||||
|
||||
@ -419,11 +469,13 @@ static void handle_hypercall_kafl_submit_kasan(struct kvm_run *run, CPUState *cp
|
||||
case mm_32_protected:
|
||||
case mm_32_paging:
|
||||
case mm_32_pae:
|
||||
write_virtual_memory(hypercall_arg, (uint8_t*)KASAN_PAYLOAD_32, PAYLOAD_BUFFER_SIZE_32, cpu);
|
||||
write_virtual_memory(hypercall_arg, (uint8_t *)KASAN_PAYLOAD_32,
|
||||
PAYLOAD_BUFFER_SIZE_32, cpu);
|
||||
break;
|
||||
case mm_64_l4_paging:
|
||||
case mm_64_l5_paging:
|
||||
write_virtual_memory(hypercall_arg, (uint8_t*)KASAN_PAYLOAD_64, PAYLOAD_BUFFER_SIZE_64, cpu);
|
||||
write_virtual_memory(hypercall_arg, (uint8_t *)KASAN_PAYLOAD_64,
|
||||
PAYLOAD_BUFFER_SIZE_64, cpu);
|
||||
break;
|
||||
default:
|
||||
abort();
|
||||
@ -432,107 +484,149 @@ static void handle_hypercall_kafl_submit_kasan(struct kvm_run *run, CPUState *cp
|
||||
}
|
||||
}
|
||||
|
||||
void handle_hypercall_kafl_panic(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
void handle_hypercall_kafl_panic(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
static char reason[1024];
|
||||
if (hypercall_enabled) {
|
||||
if(fast_reload_snapshot_exists(get_fast_reload_snapshot()) && GET_GLOBAL_STATE()->in_fuzzing_mode){
|
||||
if (fast_reload_snapshot_exists(get_fast_reload_snapshot()) &&
|
||||
GET_GLOBAL_STATE()->in_fuzzing_mode)
|
||||
{
|
||||
// TODO: either remove or document + and apply for kasan/timeout as well
|
||||
if (hypercall_arg & 0x8000000000000000ULL) {
|
||||
|
||||
reason[0] = '\x00';
|
||||
|
||||
uint64_t address = hypercall_arg & 0x7FFFFFFFFFFFULL;
|
||||
uint64_t signal = (hypercall_arg & 0x7800000000000ULL) >> 47;
|
||||
|
||||
snprintf(reason, 1024, "PANIC IN USER MODE (SIG: %d\tat 0x%lx)\n", (uint8_t)signal, address);
|
||||
set_crash_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, reason, strlen(reason));
|
||||
}
|
||||
else{
|
||||
snprintf(reason, 1024, "PANIC IN USER MODE (SIG: %d\tat 0x%lx)\n",
|
||||
(uint8_t)signal, address);
|
||||
set_crash_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer,
|
||||
reason, strlen(reason));
|
||||
} else {
|
||||
switch (hypercall_arg) {
|
||||
case 0:
|
||||
set_crash_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, (char*)"PANIC IN KERNEL MODE!\n", strlen("PANIC IN KERNEL MODE!\n"));
|
||||
set_crash_reason_auxiliary_buffer(
|
||||
GET_GLOBAL_STATE()->auxilary_buffer,
|
||||
(char *)"PANIC IN KERNEL MODE!\n",
|
||||
strlen("PANIC IN KERNEL MODE!\n"));
|
||||
break;
|
||||
case 1:
|
||||
set_crash_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, (char*)"PANIC IN USER MODE!\n", strlen("PANIC IN USER MODE!\n"));
|
||||
set_crash_reason_auxiliary_buffer(
|
||||
GET_GLOBAL_STATE()->auxilary_buffer,
|
||||
(char *)"PANIC IN USER MODE!\n",
|
||||
strlen("PANIC IN USER MODE!\n"));
|
||||
break;
|
||||
default:
|
||||
set_crash_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, (char*)"???\n", strlen("???\n"));
|
||||
set_crash_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer,
|
||||
(char *)"???\n",
|
||||
strlen("???\n"));
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
synchronization_lock_crash_found();
|
||||
} else {
|
||||
nyx_abort((char*)"Agent has crashed before initializing the fuzzing loop...");
|
||||
nyx_abort(
|
||||
(char *)"Agent has crashed before initializing the fuzzing loop...");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void handle_hypercall_kafl_create_tmp_snapshot(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
static void handle_hypercall_kafl_create_tmp_snapshot(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
if (!fast_reload_tmp_created(get_fast_reload_snapshot())) {
|
||||
/* decode PT data */
|
||||
pt_disable(qemu_get_cpu(0), false);
|
||||
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_TMP);
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state,
|
||||
REQUEST_SAVE_SNAPSHOT_TMP);
|
||||
set_tmp_snapshot_created(GET_GLOBAL_STATE()->auxilary_buffer, 1);
|
||||
handle_hypercall_kafl_release(run, cpu, hypercall_arg);
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
// TODO: raise an error?
|
||||
}
|
||||
}
|
||||
|
||||
static void handle_hypercall_kafl_panic_extended(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
if(fast_reload_snapshot_exists(get_fast_reload_snapshot()) && GET_GLOBAL_STATE()->in_fuzzing_mode){
|
||||
read_virtual_memory(hypercall_arg, (uint8_t*)hprintf_buffer, HPRINTF_SIZE, cpu);
|
||||
set_crash_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, hprintf_buffer, strlen(hprintf_buffer));
|
||||
static void handle_hypercall_kafl_panic_extended(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
if (fast_reload_snapshot_exists(get_fast_reload_snapshot()) &&
|
||||
GET_GLOBAL_STATE()->in_fuzzing_mode)
|
||||
{
|
||||
read_virtual_memory(hypercall_arg, (uint8_t *)hprintf_buffer, HPRINTF_SIZE,
|
||||
cpu);
|
||||
set_crash_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer,
|
||||
hprintf_buffer, strlen(hprintf_buffer));
|
||||
synchronization_lock_crash_found();
|
||||
} else {
|
||||
read_virtual_memory(hypercall_arg, (uint8_t*)hprintf_buffer, HPRINTF_SIZE, cpu);
|
||||
read_virtual_memory(hypercall_arg, (uint8_t *)hprintf_buffer, HPRINTF_SIZE,
|
||||
cpu);
|
||||
char *report = NULL;
|
||||
assert(asprintf(&report, "Agent has crashed before initializing the fuzzing loop: %s", hprintf_buffer) != -1);
|
||||
assert(asprintf(&report,
|
||||
"Agent has crashed before initializing the fuzzing loop: %s",
|
||||
hprintf_buffer) != -1);
|
||||
nyx_abort(report);
|
||||
}
|
||||
}
|
||||
|
||||
static void handle_hypercall_kafl_kasan(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
static void handle_hypercall_kafl_kasan(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
if (hypercall_enabled) {
|
||||
if (fast_reload_snapshot_exists(get_fast_reload_snapshot())) {
|
||||
synchronization_lock_asan_found();
|
||||
} else {
|
||||
nyx_debug_p(CORE_PREFIX, "KASAN detected during initialization of stage 1 or stage 2 loader");
|
||||
nyx_debug_p(
|
||||
CORE_PREFIX,
|
||||
"KASAN detected during initialization of stage 1 or stage 2 loader");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void handle_hypercall_kafl_lock(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
|
||||
static void handle_hypercall_kafl_lock(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
if (is_called_in_fuzzing_mode("KVM_EXIT_KAFL_LOCK")) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!GET_GLOBAL_STATE()->fast_reload_pre_image) {
|
||||
nyx_debug_p(CORE_PREFIX, "Skipping pre image creation (hint: set pre=on) ...");
|
||||
nyx_debug_p(CORE_PREFIX,
|
||||
"Skipping pre image creation (hint: set pre=on) ...");
|
||||
return;
|
||||
}
|
||||
|
||||
nyx_debug_p(CORE_PREFIX, "Creating pre image snapshot <%s> ...", GET_GLOBAL_STATE()->fast_reload_pre_path);
|
||||
nyx_debug_p(CORE_PREFIX, "Creating pre image snapshot <%s> ...",
|
||||
GET_GLOBAL_STATE()->fast_reload_pre_path);
|
||||
|
||||
nyx_debug("Creating pre image snapshot");
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_PRE);
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state,
|
||||
REQUEST_SAVE_SNAPSHOT_PRE);
|
||||
}
|
||||
|
||||
static void handle_hypercall_kafl_printf(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
static void handle_hypercall_kafl_printf(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
read_virtual_memory(hypercall_arg, (uint8_t *)hprintf_buffer, HPRINTF_SIZE, cpu);
|
||||
#ifdef DEBUG_HPRINTF
|
||||
fprintf(stderr, "%s %s\n", __func__, hprintf_buffer);
|
||||
#endif
|
||||
set_hprintf_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, hprintf_buffer, strnlen(hprintf_buffer, HPRINTF_SIZE));
|
||||
set_hprintf_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, hprintf_buffer,
|
||||
strnlen(hprintf_buffer, HPRINTF_SIZE));
|
||||
synchronization_lock();
|
||||
}
|
||||
|
||||
static void handle_hypercall_kafl_user_range_advise(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
|
||||
static void handle_hypercall_kafl_user_range_advise(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
if (is_called_in_fuzzing_mode("KVM_EXIT_KAFL_USER_RANGE_ADVISE")) {
|
||||
return;
|
||||
}
|
||||
@ -541,7 +635,8 @@ static void handle_hypercall_kafl_user_range_advise(struct kvm_run *run, CPUStat
|
||||
|
||||
for (int i = 0; i < INTEL_PT_MAX_RANGES; i++) {
|
||||
buf->ip[i] = GET_GLOBAL_STATE()->pt_ip_filter_a[i];
|
||||
buf->size[i] = (GET_GLOBAL_STATE()->pt_ip_filter_b[i]-GET_GLOBAL_STATE()->pt_ip_filter_a[i]);
|
||||
buf->size[i] = (GET_GLOBAL_STATE()->pt_ip_filter_b[i] -
|
||||
GET_GLOBAL_STATE()->pt_ip_filter_a[i]);
|
||||
buf->enabled[i] = (uint8_t)GET_GLOBAL_STATE()->pt_ip_filter_configured[i];
|
||||
}
|
||||
|
||||
@ -549,7 +644,10 @@ static void handle_hypercall_kafl_user_range_advise(struct kvm_run *run, CPUStat
|
||||
free(buf);
|
||||
}
|
||||
|
||||
static void handle_hypercall_kafl_user_submit_mode(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
static void handle_hypercall_kafl_user_submit_mode(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
nyx_trace();
|
||||
|
||||
if (is_called_in_fuzzing_mode("KVM_EXIT_KAFL_USER_SUBMIT_MODE")) {
|
||||
@ -578,15 +676,23 @@ static void handle_hypercall_kafl_user_submit_mode(struct kvm_run *run, CPUState
|
||||
}
|
||||
}
|
||||
|
||||
bool handle_hypercall_kafl_hook(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
bool handle_hypercall_kafl_hook(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
X86CPU *cpux86 = X86_CPU(cpu);
|
||||
CPUX86State *env = &cpux86->env;
|
||||
|
||||
for (uint8_t i = 0; i < INTEL_PT_MAX_RANGES; i++) {
|
||||
if (GET_GLOBAL_STATE()->redqueen_state && (env->eip >= GET_GLOBAL_STATE()->pt_ip_filter_a[i]) && (env->eip <= GET_GLOBAL_STATE()->pt_ip_filter_b[i])){
|
||||
if (GET_GLOBAL_STATE()->redqueen_state &&
|
||||
(env->eip >= GET_GLOBAL_STATE()->pt_ip_filter_a[i]) &&
|
||||
(env->eip <= GET_GLOBAL_STATE()->pt_ip_filter_b[i]))
|
||||
{
|
||||
handle_hook(GET_GLOBAL_STATE()->redqueen_state);
|
||||
return true;
|
||||
}else if (cpu->singlestep_enabled && (GET_GLOBAL_STATE()->redqueen_state)->singlestep_enabled){
|
||||
} else if (cpu->singlestep_enabled &&
|
||||
(GET_GLOBAL_STATE()->redqueen_state)->singlestep_enabled)
|
||||
{
|
||||
handle_hook(GET_GLOBAL_STATE()->redqueen_state);
|
||||
return true;
|
||||
}
|
||||
@ -594,39 +700,51 @@ bool handle_hypercall_kafl_hook(struct kvm_run *run, CPUState *cpu, uint64_t hyp
|
||||
return false;
|
||||
}
|
||||
|
||||
static void handle_hypercall_kafl_user_abort(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
static void handle_hypercall_kafl_user_abort(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
read_virtual_memory(hypercall_arg, (uint8_t *)hprintf_buffer, HPRINTF_SIZE, cpu);
|
||||
set_abort_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, hprintf_buffer, strlen(hprintf_buffer));
|
||||
set_abort_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer,
|
||||
hprintf_buffer, strlen(hprintf_buffer));
|
||||
synchronization_lock();
|
||||
}
|
||||
|
||||
void pt_enable_rqi(CPUState *cpu){
|
||||
void pt_enable_rqi(CPUState *cpu)
|
||||
{
|
||||
GET_GLOBAL_STATE()->redqueen_enable_pending = true;
|
||||
}
|
||||
|
||||
void pt_disable_rqi(CPUState *cpu){
|
||||
void pt_disable_rqi(CPUState *cpu)
|
||||
{
|
||||
GET_GLOBAL_STATE()->redqueen_disable_pending = true;
|
||||
GET_GLOBAL_STATE()->redqueen_instrumentation_mode = REDQUEEN_NO_INSTRUMENTATION;
|
||||
}
|
||||
|
||||
void pt_set_enable_patches_pending(CPUState *cpu){
|
||||
void pt_set_enable_patches_pending(CPUState *cpu)
|
||||
{
|
||||
GET_GLOBAL_STATE()->patches_enable_pending = true;
|
||||
}
|
||||
|
||||
void pt_set_redqueen_instrumentation_mode(CPUState *cpu, int redqueen_mode){
|
||||
void pt_set_redqueen_instrumentation_mode(CPUState *cpu, int redqueen_mode)
|
||||
{
|
||||
GET_GLOBAL_STATE()->redqueen_instrumentation_mode = redqueen_mode;
|
||||
}
|
||||
|
||||
void pt_set_redqueen_update_blacklist(CPUState *cpu, bool newval){
|
||||
void pt_set_redqueen_update_blacklist(CPUState *cpu, bool newval)
|
||||
{
|
||||
assert(!newval || !GET_GLOBAL_STATE()->redqueen_update_blacklist);
|
||||
GET_GLOBAL_STATE()->redqueen_update_blacklist = newval;
|
||||
}
|
||||
|
||||
void pt_set_disable_patches_pending(CPUState *cpu){
|
||||
void pt_set_disable_patches_pending(CPUState *cpu)
|
||||
{
|
||||
GET_GLOBAL_STATE()->patches_disable_pending = true;
|
||||
}
|
||||
|
||||
static void handle_hypercall_kafl_dump_file(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg)
|
||||
static void handle_hypercall_kafl_dump_file(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
kafl_dump_file_t file_obj;
|
||||
char filename[256] = { 0 };
|
||||
@ -636,14 +754,19 @@ static void handle_hypercall_kafl_dump_file(struct kvm_run *run, CPUState *cpu,
|
||||
uint64_t vaddr = hypercall_arg;
|
||||
memset((void *)&file_obj, 0, sizeof(kafl_dump_file_t));
|
||||
|
||||
if (!read_virtual_memory(vaddr, (uint8_t*)&file_obj, sizeof(kafl_dump_file_t), cpu)){
|
||||
if (!read_virtual_memory(vaddr, (uint8_t *)&file_obj, sizeof(kafl_dump_file_t),
|
||||
cpu))
|
||||
{
|
||||
fprintf(stderr, "Failed to read file_obj in %s. Skipping..\n", __func__);
|
||||
goto err_out1;
|
||||
}
|
||||
|
||||
if (file_obj.file_name_str_ptr != 0) {
|
||||
if (!read_virtual_memory(file_obj.file_name_str_ptr, (uint8_t*)filename, sizeof(filename)-1, cpu)) {
|
||||
fprintf(stderr, "Failed to read file_name_str_ptr in %s. Skipping..\n", __func__);
|
||||
if (!read_virtual_memory(file_obj.file_name_str_ptr, (uint8_t *)filename,
|
||||
sizeof(filename) - 1, cpu))
|
||||
{
|
||||
fprintf(stderr, "Failed to read file_name_str_ptr in %s. Skipping..\n",
|
||||
__func__);
|
||||
goto err_out1;
|
||||
}
|
||||
filename[sizeof(filename) - 1] = 0;
|
||||
@ -658,7 +781,8 @@ static void handle_hypercall_kafl_dump_file(struct kvm_run *run, CPUState *cpu,
|
||||
}
|
||||
|
||||
char *base_name = basename(filename); // clobbers the filename buffer!
|
||||
assert(asprintf(&host_path, "%s/dump/%s", GET_GLOBAL_STATE()->workdir_path , base_name) != -1);
|
||||
assert(asprintf(&host_path, "%s/dump/%s", GET_GLOBAL_STATE()->workdir_path,
|
||||
base_name) != -1);
|
||||
|
||||
// check if base_name is mkstemp() pattern, otherwise write/append to exact name
|
||||
char *pattern = strstr(base_name, "XXXXXX");
|
||||
@ -666,7 +790,9 @@ static void handle_hypercall_kafl_dump_file(struct kvm_run *run, CPUState *cpu,
|
||||
unsigned suffix = strlen(pattern) - strlen("XXXXXX");
|
||||
f = fdopen(mkstemps(host_path, suffix), "w+");
|
||||
if (file_obj.append) {
|
||||
fprintf(stderr, "Warning in %s: Writing unique generated file in append mode?\n", __func__);
|
||||
fprintf(stderr,
|
||||
"Warning in %s: Writing unique generated file in append mode?\n",
|
||||
__func__);
|
||||
}
|
||||
} else {
|
||||
if (file_obj.append) {
|
||||
@ -686,29 +812,28 @@ static void handle_hypercall_kafl_dump_file(struct kvm_run *run, CPUState *cpu,
|
||||
void *page = malloc(PAGE_SIZE);
|
||||
uint32_t written = 0;
|
||||
|
||||
nyx_debug_p(CORE_PREFIX, "%s: dump %d bytes to %s (append=%u)",
|
||||
__func__, bytes, host_path, file_obj.append);
|
||||
nyx_debug_p(CORE_PREFIX, "%s: dump %d bytes to %s (append=%u)", __func__, bytes,
|
||||
host_path, file_obj.append);
|
||||
|
||||
while (bytes > 0) {
|
||||
|
||||
if (bytes >= PAGE_SIZE) {
|
||||
read_virtual_memory(file_obj.data_ptr+pos, (uint8_t*)page, PAGE_SIZE, cpu);
|
||||
read_virtual_memory(file_obj.data_ptr + pos, (uint8_t *)page, PAGE_SIZE,
|
||||
cpu);
|
||||
written = fwrite(page, 1, PAGE_SIZE, f);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
read_virtual_memory(file_obj.data_ptr + pos, (uint8_t *)page, bytes, cpu);
|
||||
written = fwrite(page, 1, bytes, f);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!written) {
|
||||
fprintf(stderr, "Error in %s(%s): %s\n", host_path, __func__, strerror(errno));
|
||||
fprintf(stderr, "Error in %s(%s): %s\n", host_path, __func__,
|
||||
strerror(errno));
|
||||
goto err_out2;
|
||||
}
|
||||
|
||||
bytes -= written;
|
||||
pos += written;
|
||||
|
||||
}
|
||||
|
||||
|
||||
@ -719,20 +844,27 @@ err_out1:
|
||||
free(host_path);
|
||||
}
|
||||
|
||||
static void handle_hypercall_kafl_persist_page_past_snapshot(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
|
||||
static void handle_hypercall_kafl_persist_page_past_snapshot(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
if (is_called_in_fuzzing_mode("KVM_EXIT_KAFL_PERSIST_PAGE_PAST_SNAPSHOT")) {
|
||||
return;
|
||||
}
|
||||
|
||||
CPUX86State *env = &(X86_CPU(cpu))->env;
|
||||
kvm_arch_get_registers_fast(cpu);
|
||||
hwaddr phys_addr = (hwaddr) get_paging_phys_addr(cpu, env->cr[3], hypercall_arg&(~0xFFF));
|
||||
hwaddr phys_addr =
|
||||
(hwaddr)get_paging_phys_addr(cpu, env->cr[3], hypercall_arg & (~0xFFF));
|
||||
assert(phys_addr != 0xffffffffffffffffULL);
|
||||
fast_reload_blacklist_page(get_fast_reload_snapshot(), phys_addr);
|
||||
}
|
||||
|
||||
int handle_kafl_hypercall(struct kvm_run *run, CPUState *cpu, uint64_t hypercall, uint64_t arg){
|
||||
int handle_kafl_hypercall(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall,
|
||||
uint64_t arg)
|
||||
{
|
||||
int ret = -1;
|
||||
// fprintf(stderr, "%s -> %ld\n", __func__, hypercall);
|
||||
|
||||
@ -747,11 +879,13 @@ int handle_kafl_hypercall(struct kvm_run *run, CPUState *cpu, uint64_t hypercall
|
||||
ret = 0;
|
||||
break;
|
||||
case KVM_EXIT_KAFL_GET_PROGRAM:
|
||||
nyx_abort((char*)"Deprecated hypercall called (HYPERCALL_KAFL_GET_PROGRAM)...");
|
||||
nyx_abort(
|
||||
(char *)"Deprecated hypercall called (HYPERCALL_KAFL_GET_PROGRAM)...");
|
||||
ret = 0;
|
||||
break;
|
||||
case KVM_EXIT_KAFL_GET_ARGV:
|
||||
nyx_abort((char*)"Deprecated hypercall called (HYPERCALL_KAFL_GET_ARGV)...");
|
||||
nyx_abort(
|
||||
(char *)"Deprecated hypercall called (HYPERCALL_KAFL_GET_ARGV)...");
|
||||
ret = 0;
|
||||
break;
|
||||
case KVM_EXIT_KAFL_RELEASE:
|
||||
@ -795,7 +929,8 @@ int handle_kafl_hypercall(struct kvm_run *run, CPUState *cpu, uint64_t hypercall
|
||||
ret = 0;
|
||||
break;
|
||||
case KVM_EXIT_KAFL_PRINTK_ADDR:
|
||||
nyx_abort((char*)"Deprecated hypercall called (KVM_EXIT_KAFL_PRINTK_ADDR)...");
|
||||
nyx_abort(
|
||||
(char *)"Deprecated hypercall called (KVM_EXIT_KAFL_PRINTK_ADDR)...");
|
||||
ret = 0;
|
||||
break;
|
||||
case KVM_EXIT_KAFL_PRINTK:
|
||||
@ -901,4 +1036,3 @@ int handle_kafl_hypercall(struct kvm_run *run, CPUState *cpu, uint64_t hypercall
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -50,7 +50,9 @@ bool check_bitmap_byte(uint32_t value);
|
||||
* 0f 01 c1 vmcall
|
||||
* f4 hlt
|
||||
*/
|
||||
#define PANIC_PAYLOAD_64 "\xFA\x48\xC7\xC0\x1F\x00\x00\x00\x48\xC7\xC3\x08\x00\x00\x00\x48\xC7\xC1\x00\x00\x00\x00\x0F\x01\xC1\xF4"
|
||||
#define PANIC_PAYLOAD_64 \
|
||||
"\xFA\x48\xC7\xC0\x1F\x00\x00\x00\x48\xC7\xC3\x08\x00\x00\x00\x48\xC7\xC1\x00" \
|
||||
"\x00\x00\x00\x0F\x01\xC1\xF4"
|
||||
|
||||
/*
|
||||
* Panic Notifier Payload (x86-32)
|
||||
@ -61,7 +63,9 @@ bool check_bitmap_byte(uint32_t value);
|
||||
* 0f 01 c1 vmcall
|
||||
* f4 hlt
|
||||
*/
|
||||
#define PANIC_PAYLOAD_32 "\xFA\xB8\x1F\x00\x00\x00\xBB\x08\x00\x00\x00\xB9\x00\x00\x00\x00\x0F\x01\xC1\xF4"
|
||||
#define PANIC_PAYLOAD_32 \
|
||||
"\xFA\xB8\x1F\x00\x00\x00\xBB\x08\x00\x00\x00\xB9\x00\x00\x00\x00\x0F\x01\xC1" \
|
||||
"\xF4"
|
||||
|
||||
/*
|
||||
* KASAN Notifier Payload (x86-64)
|
||||
@ -72,7 +76,9 @@ bool check_bitmap_byte(uint32_t value);
|
||||
* 0f 01 c1 vmcall
|
||||
* f4 hlt
|
||||
*/
|
||||
#define KASAN_PAYLOAD_64 "\xFA\x48\xC7\xC0\x1F\x00\x00\x00\x48\xC7\xC3\x09\x00\x00\x00\x48\xC7\xC1\x00\x00\x00\x00\x0F\x01\xC1\xF4"
|
||||
#define KASAN_PAYLOAD_64 \
|
||||
"\xFA\x48\xC7\xC0\x1F\x00\x00\x00\x48\xC7\xC3\x09\x00\x00\x00\x48\xC7\xC1\x00" \
|
||||
"\x00\x00\x00\x0F\x01\xC1\xF4"
|
||||
|
||||
/*
|
||||
* KASAN Notifier Payload (x86-32)
|
||||
@ -83,7 +89,9 @@ bool check_bitmap_byte(uint32_t value);
|
||||
* 0f 01 c1 vmcall
|
||||
* f4 hlt
|
||||
*/
|
||||
#define KASAN_PAYLOAD_32 "\xFA\xB8\x1F\x00\x00\x00\xBB\x09\x00\x00\x00\xB9\x00\x00\x00\x00\x0F\x01\xC1\xF4"
|
||||
#define KASAN_PAYLOAD_32 \
|
||||
"\xFA\xB8\x1F\x00\x00\x00\xBB\x09\x00\x00\x00\xB9\x00\x00\x00\x00\x0F\x01\xC1" \
|
||||
"\xF4"
|
||||
|
||||
void pt_setup_program(void *ptr);
|
||||
void pt_setup_snd_handler(void (*tmp)(char, void *), void *tmp_s);
|
||||
@ -107,31 +115,50 @@ bool pt_hypercalls_enabled(void);
|
||||
void hypercall_unlock(void);
|
||||
void hypercall_reload(void);
|
||||
|
||||
void handle_hypercall_kafl_acquire(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
|
||||
void handle_hypercall_kafl_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
|
||||
void handle_hypercall_kafl_panic(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
|
||||
void handle_hypercall_kafl_acquire(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg);
|
||||
void handle_hypercall_kafl_release(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg);
|
||||
void handle_hypercall_kafl_panic(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg);
|
||||
|
||||
void handle_hypercall_kafl_page_dump_bp(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg, uint64_t page);
|
||||
void handle_hypercall_kafl_page_dump_bp(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg,
|
||||
uint64_t page);
|
||||
|
||||
|
||||
void hprintf(char *msg);
|
||||
|
||||
bool handle_hypercall_kafl_next_payload(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
|
||||
bool handle_hypercall_kafl_next_payload(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg);
|
||||
void hypercall_reset_hprintf_counter(void);
|
||||
|
||||
bool handle_hypercall_kafl_hook(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
|
||||
void handle_hypercall_kafl_mtf(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
|
||||
bool handle_hypercall_kafl_hook(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg);
|
||||
void handle_hypercall_kafl_mtf(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg);
|
||||
void pt_enable_rqo(CPUState *cpu);
|
||||
void pt_disable_rqo(CPUState *cpu);
|
||||
void pt_enable_rqi(CPUState *cpu);
|
||||
void pt_disable_rqi(CPUState *cpu);
|
||||
void pt_set_redqueen_instrumentation_mode(CPUState *cpu, int redqueen_instruction_mode);
|
||||
void pt_set_redqueen_instrumentation_mode(CPUState *cpu,
|
||||
int redqueen_instruction_mode);
|
||||
void pt_set_redqueen_update_blacklist(CPUState *cpu, bool newval);
|
||||
void pt_set_enable_patches_pending(CPUState *cpu);
|
||||
void pt_set_disable_patches_pending(CPUState *cpu);
|
||||
|
||||
void create_fast_snapshot(CPUState *cpu, bool nested);
|
||||
int handle_kafl_hypercall(struct kvm_run *run, CPUState *cpu, uint64_t hypercall, uint64_t arg);
|
||||
int handle_kafl_hypercall(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall,
|
||||
uint64_t arg);
|
||||
|
||||
void skip_init(void);
|
||||
|
||||
@ -141,4 +168,3 @@ typedef struct kafl_dump_file_s{
|
||||
uint64_t bytes;
|
||||
uint8_t append;
|
||||
} __attribute__((packed)) kafl_dump_file_t;
|
||||
|
||||
|
123
nyx/interface.c
123
nyx/interface.c
@ -26,43 +26,42 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
|
||||
#include <sys/stat.h>
|
||||
#include <time.h>
|
||||
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "hw/qdev-properties.h"
|
||||
#include "chardev/char-fe.h"
|
||||
#include "exec/ram_addr.h"
|
||||
#include "hw/hw.h"
|
||||
#include "hw/i386/pc.h"
|
||||
#include "hw/pci/pci.h"
|
||||
#include "hw/pci/msi.h"
|
||||
#include "hw/pci/msix.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "hw/pci/pci.h"
|
||||
#include "hw/qdev-properties.h"
|
||||
#include "migration/migration.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/visitor.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/event_notifier.h"
|
||||
#include "qom/object_interfaces.h"
|
||||
#include "chardev/char-fe.h"
|
||||
#include "sysemu/hostmem.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "sysemu/qtest.h"
|
||||
#include "qapi/visitor.h"
|
||||
#include "exec/ram_addr.h"
|
||||
#include "pt.h"
|
||||
#include "nyx/debug.h"
|
||||
#include "nyx/helpers.h"
|
||||
#include "nyx/hypercall/hypercall.h"
|
||||
#include "nyx/interface.h"
|
||||
#include "nyx/debug.h"
|
||||
#include "nyx/synchronization.h"
|
||||
#include "nyx/snapshot/devices/state_reallocation.h"
|
||||
#include "nyx/memory_access.h"
|
||||
#include "nyx/state/state.h"
|
||||
#include "nyx/sharedir.h"
|
||||
#include "nyx/helpers.h"
|
||||
#include "nyx/snapshot/devices/state_reallocation.h"
|
||||
#include "nyx/state/state.h"
|
||||
#include "nyx/synchronization.h"
|
||||
#include "nyx/trace_dump.h"
|
||||
#include "pt.h"
|
||||
|
||||
#include "redqueen.h"
|
||||
|
||||
#define CONVERT_UINT64(x) (uint64_t)(strtoull(x, NULL, 16))
|
||||
|
||||
#define TYPE_NYX_MEM "nyx"
|
||||
#define NYX_MEM(obj) \
|
||||
OBJECT_CHECK(nyx_interface_state, (obj), TYPE_NYX_MEM)
|
||||
#define NYX_MEM(obj) OBJECT_CHECK(nyx_interface_state, (obj), TYPE_NYX_MEM)
|
||||
|
||||
static void nyx_realize(DeviceState *dev, Error **errp);
|
||||
|
||||
@ -97,10 +96,12 @@ typedef struct nyx_interface_state {
|
||||
|
||||
} nyx_interface_state;
|
||||
|
||||
static void nyx_interface_event(void *opaque, int event){
|
||||
static void nyx_interface_event(void *opaque, int event)
|
||||
{
|
||||
}
|
||||
|
||||
static void send_char(char val, void* tmp_s){
|
||||
static void send_char(char val, void *tmp_s)
|
||||
{
|
||||
nyx_interface_state *s = tmp_s;
|
||||
|
||||
assert(val == NYX_INTERFACE_PING);
|
||||
@ -109,18 +110,20 @@ static void send_char(char val, void* tmp_s){
|
||||
qemu_chr_fe_write(&s->chr, (const uint8_t *)&val, 1);
|
||||
}
|
||||
|
||||
static int nyx_interface_can_receive(void * opaque){
|
||||
static int nyx_interface_can_receive(void *opaque)
|
||||
{
|
||||
return sizeof(int64_t);
|
||||
}
|
||||
|
||||
static nyx_interface_state *state = NULL;
|
||||
|
||||
static void init_send_char(nyx_interface_state* s){
|
||||
static void init_send_char(nyx_interface_state *s)
|
||||
{
|
||||
state = s;
|
||||
}
|
||||
|
||||
bool interface_send_char(char val){
|
||||
|
||||
bool interface_send_char(char val)
|
||||
{
|
||||
if (state) {
|
||||
send_char(val, state);
|
||||
return true;
|
||||
@ -128,7 +131,8 @@ bool interface_send_char(char val){
|
||||
return false;
|
||||
}
|
||||
|
||||
static void nyx_interface_receive(void *opaque, const uint8_t * buf, int size){
|
||||
static void nyx_interface_receive(void *opaque, const uint8_t *buf, int size)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < size; i++) {
|
||||
switch (buf[i]) {
|
||||
@ -146,7 +150,11 @@ static void nyx_interface_receive(void *opaque, const uint8_t * buf, int size){
|
||||
}
|
||||
}
|
||||
|
||||
static int nyx_create_payload_buffer(nyx_interface_state *s, uint64_t buffer_size, const char* file, Error **errp){
|
||||
static int nyx_create_payload_buffer(nyx_interface_state *s,
|
||||
uint64_t buffer_size,
|
||||
const char *file,
|
||||
Error **errp)
|
||||
{
|
||||
void *ptr;
|
||||
int fd;
|
||||
struct stat st;
|
||||
@ -154,7 +162,8 @@ static int nyx_create_payload_buffer(nyx_interface_state *s, uint64_t buffer_siz
|
||||
fd = open(file, O_CREAT | O_RDWR, S_IRWXU | S_IRWXG | S_IRWXO);
|
||||
assert(ftruncate(fd, buffer_size) == 0);
|
||||
stat(file, &st);
|
||||
nyx_debug_p(INTERFACE_PREFIX, "new shm file: (max size: %lx) %lx", buffer_size, st.st_size);
|
||||
nyx_debug_p(INTERFACE_PREFIX, "new shm file: (max size: %lx) %lx", buffer_size,
|
||||
st.st_size);
|
||||
|
||||
assert(buffer_size == st.st_size);
|
||||
ptr = mmap(0, buffer_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
@ -172,7 +181,10 @@ static int nyx_create_payload_buffer(nyx_interface_state *s, uint64_t buffer_siz
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nyx_guest_setup_bitmap(nyx_interface_state *s, char* filename, uint32_t bitmap_size){
|
||||
static void nyx_guest_setup_bitmap(nyx_interface_state *s,
|
||||
char *filename,
|
||||
uint32_t bitmap_size)
|
||||
{
|
||||
void *ptr;
|
||||
int fd;
|
||||
struct stat st;
|
||||
@ -189,7 +201,8 @@ static void nyx_guest_setup_bitmap(nyx_interface_state *s, char* filename, uint3
|
||||
}
|
||||
|
||||
|
||||
static void nyx_guest_setup_ijon_buffer(nyx_interface_state *s, char* filename){
|
||||
static void nyx_guest_setup_ijon_buffer(nyx_interface_state *s, char *filename)
|
||||
{
|
||||
void *ptr;
|
||||
int fd;
|
||||
struct stat st;
|
||||
@ -198,15 +211,16 @@ static void nyx_guest_setup_ijon_buffer(nyx_interface_state *s, char* filename){
|
||||
assert(ftruncate(fd, DEFAULT_NYX_IJON_BITMAP_SIZE) == 0);
|
||||
stat(filename, &st);
|
||||
assert(DEFAULT_NYX_IJON_BITMAP_SIZE == st.st_size);
|
||||
ptr = mmap(0, DEFAULT_NYX_IJON_BITMAP_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
ptr = mmap(0, DEFAULT_NYX_IJON_BITMAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
|
||||
fd, 0);
|
||||
|
||||
GET_GLOBAL_STATE()->shared_ijon_bitmap_ptr = (void *)ptr;
|
||||
GET_GLOBAL_STATE()->shared_ijon_bitmap_fd = fd;
|
||||
GET_GLOBAL_STATE()->shared_ijon_bitmap_size = DEFAULT_NYX_IJON_BITMAP_SIZE;
|
||||
}
|
||||
|
||||
static bool verify_workdir_state(nyx_interface_state *s, Error **errp){
|
||||
|
||||
static bool verify_workdir_state(nyx_interface_state *s, Error **errp)
|
||||
{
|
||||
char *workdir = s->workdir;
|
||||
uint32_t id = s->worker_id;
|
||||
char *tmp;
|
||||
@ -237,8 +251,7 @@ static bool verify_workdir_state(nyx_interface_state *s, Error **errp){
|
||||
nyx_error("Error: %s does not exist...\n", tmp);
|
||||
free(tmp);
|
||||
return false;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
nyx_create_payload_buffer(s, s->input_buffer_size, tmp, errp);
|
||||
}
|
||||
free(tmp);
|
||||
@ -295,8 +308,7 @@ static bool verify_workdir_state(nyx_interface_state *s, Error **errp){
|
||||
nyx_error("%s does not exist...\n", tmp);
|
||||
free(tmp);
|
||||
return false;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
setup_redqueen_workdir(tmp);
|
||||
}
|
||||
free(tmp);
|
||||
@ -324,7 +336,8 @@ static bool verify_workdir_state(nyx_interface_state *s, Error **errp){
|
||||
|
||||
#define KVM_VMX_PT_GET_ADDRN _IO(KVMIO, 0xe9)
|
||||
|
||||
static void check_ipt_range(uint8_t i){
|
||||
static void check_ipt_range(uint8_t i)
|
||||
{
|
||||
int ret = 0;
|
||||
int kvm = open("/dev/kvm", O_RDWR | O_CLOEXEC);
|
||||
ret = ioctl(kvm, KVM_VMX_PT_GET_ADDRN, NULL);
|
||||
@ -341,7 +354,8 @@ static void check_ipt_range(uint8_t i){
|
||||
close(kvm);
|
||||
}
|
||||
|
||||
static void check_available_ipt_ranges(nyx_interface_state* s){
|
||||
static void check_available_ipt_ranges(nyx_interface_state *s)
|
||||
{
|
||||
uint64_t addr_a, addr_b;
|
||||
|
||||
int kvm_fd = qemu_open("/dev/kvm", O_RDWR);
|
||||
@ -350,7 +364,9 @@ static void check_available_ipt_ranges(nyx_interface_state* s){
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (ioctl(kvm_fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_PT) == 1 && ioctl(kvm_fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_FDL) == 1) {
|
||||
if (ioctl(kvm_fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_PT) == 1 &&
|
||||
ioctl(kvm_fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_FDL) == 1)
|
||||
{
|
||||
for (uint8_t i = 0; i < INTEL_PT_MAX_RANGES; i++) {
|
||||
if (s->ip_filter[i][0] && s->ip_filter[i][1]) {
|
||||
if (i >= 1) {
|
||||
@ -367,8 +383,8 @@ static void check_available_ipt_ranges(nyx_interface_state* s){
|
||||
close(kvm_fd);
|
||||
}
|
||||
|
||||
static bool verify_sharedir_state(nyx_interface_state *s, Error **errp){
|
||||
|
||||
static bool verify_sharedir_state(nyx_interface_state *s, Error **errp)
|
||||
{
|
||||
char *sharedir = s->sharedir;
|
||||
|
||||
if (!folder_exits(sharedir)) {
|
||||
@ -378,7 +394,8 @@ static bool verify_sharedir_state(nyx_interface_state *s, Error **errp){
|
||||
}
|
||||
|
||||
|
||||
static void nyx_realize(DeviceState *dev, Error **errp){
|
||||
static void nyx_realize(DeviceState *dev, Error **errp)
|
||||
{
|
||||
nyx_interface_state *s = NYX_MEM(dev);
|
||||
|
||||
if (s->bitmap_size <= 0) {
|
||||
@ -403,13 +420,14 @@ static void nyx_realize(DeviceState *dev, Error **errp){
|
||||
|
||||
if (!s->sharedir || !verify_sharedir_state(s, errp)) {
|
||||
nyx_error("Warning: Invalid sharedir...\n");
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
sharedir_set_dir(GET_GLOBAL_STATE()->sharedir, s->sharedir);
|
||||
}
|
||||
|
||||
if (&s->chr) {
|
||||
qemu_chr_fe_set_handlers(&s->chr, nyx_interface_can_receive, nyx_interface_receive, nyx_interface_event, NULL, s, NULL, true);
|
||||
qemu_chr_fe_set_handlers(&s->chr, nyx_interface_can_receive,
|
||||
nyx_interface_receive, nyx_interface_event, NULL, s,
|
||||
NULL, true);
|
||||
}
|
||||
|
||||
check_available_ipt_ranges(s);
|
||||
@ -439,8 +457,14 @@ static Property nyx_interface_properties[] = {
|
||||
DEFINE_PROP_STRING("ip3_b", nyx_interface_state, ip_filter[3][1]),
|
||||
|
||||
|
||||
DEFINE_PROP_UINT32("bitmap_size", nyx_interface_state, bitmap_size, DEFAULT_NYX_BITMAP_SIZE),
|
||||
DEFINE_PROP_UINT32("input_buffer_size", nyx_interface_state, input_buffer_size, DEFAULT_NYX_BITMAP_SIZE),
|
||||
DEFINE_PROP_UINT32("bitmap_size",
|
||||
nyx_interface_state,
|
||||
bitmap_size,
|
||||
DEFAULT_NYX_BITMAP_SIZE),
|
||||
DEFINE_PROP_UINT32("input_buffer_size",
|
||||
nyx_interface_state,
|
||||
input_buffer_size,
|
||||
DEFAULT_NYX_BITMAP_SIZE),
|
||||
DEFINE_PROP_BOOL("dump_pt_trace", nyx_interface_state, dump_pt_trace, false),
|
||||
DEFINE_PROP_BOOL("edge_cb_trace", nyx_interface_state, edge_cb_trace, false),
|
||||
|
||||
@ -448,7 +472,8 @@ static Property nyx_interface_properties[] = {
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
};
|
||||
|
||||
static void nyx_interface_class_init(ObjectClass *klass, void *data){
|
||||
static void nyx_interface_class_init(ObjectClass *klass, void *data)
|
||||
{
|
||||
DeviceClass *dc = DEVICE_CLASS(klass);
|
||||
dc->realize = nyx_realize;
|
||||
dc->props = nyx_interface_properties;
|
||||
@ -456,7 +481,8 @@ static void nyx_interface_class_init(ObjectClass *klass, void *data){
|
||||
dc->desc = "Nyx Interface";
|
||||
}
|
||||
|
||||
static void nyx_interface_init(Object *obj){
|
||||
static void nyx_interface_init(Object *obj)
|
||||
{
|
||||
}
|
||||
|
||||
static const TypeInfo nyx_interface_info = {
|
||||
@ -467,7 +493,8 @@ static const TypeInfo nyx_interface_info = {
|
||||
.class_init = nyx_interface_class_init,
|
||||
};
|
||||
|
||||
static void nyx_interface_register_types(void){
|
||||
static void nyx_interface_register_types(void)
|
||||
{
|
||||
type_register_static(&nyx_interface_info);
|
||||
}
|
||||
|
||||
|
157
nyx/kvm_nested.c
157
nyx/kvm_nested.c
@ -1,13 +1,13 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include <linux/kvm.h>
|
||||
#include "sysemu/kvm.h"
|
||||
#include <linux/kvm.h>
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "nyx/kvm_nested.h"
|
||||
#include "cpu.h"
|
||||
#include "nyx/debug.h"
|
||||
#include "exec/ram_addr.h"
|
||||
#include "qemu/rcu_queue.h"
|
||||
#include "qemu-common.h"
|
||||
#include "cpu.h"
|
||||
#include "nyx/debug.h"
|
||||
#include "nyx/kvm_nested.h"
|
||||
#include "nyx/state/state.h"
|
||||
#include "pt.h"
|
||||
|
||||
@ -193,7 +193,8 @@ struct __attribute__((__packed__)) vmcs12 {
|
||||
};
|
||||
|
||||
|
||||
static void write_address(uint64_t address, uint64_t size, uint64_t prot){
|
||||
static void write_address(uint64_t address, uint64_t size, uint64_t prot)
|
||||
{
|
||||
static uint64_t next_address = PAGETABLE_MASK;
|
||||
static uint64_t last_address = 0x0;
|
||||
static uint64_t last_prot = 0;
|
||||
@ -206,11 +207,9 @@ static void write_address(uint64_t address, uint64_t size, uint64_t prot){
|
||||
CHECK_BIT(last_prot, 1) ? 'W' : '-',
|
||||
CHECK_BIT(last_prot, 2) ? 'U' : 'K',
|
||||
!CHECK_BIT(last_prot, 63) ? 'X' : '-');
|
||||
}
|
||||
else{
|
||||
nyx_debug_p(NESTED_VM_PREFIX, "%016lx - %016lx %c%c%c",
|
||||
last_address, next_address,
|
||||
CHECK_BIT(last_prot, 1) ? 'W' : '-',
|
||||
} else {
|
||||
nyx_debug_p(NESTED_VM_PREFIX, "%016lx - %016lx %c%c%c", last_address,
|
||||
next_address, CHECK_BIT(last_prot, 1) ? 'W' : '-',
|
||||
CHECK_BIT(last_prot, 2) ? 'U' : 'K',
|
||||
!CHECK_BIT(last_prot, 63) ? 'X' : '-');
|
||||
}
|
||||
@ -219,56 +218,95 @@ static void write_address(uint64_t address, uint64_t size, uint64_t prot){
|
||||
}
|
||||
next_address = address + size;
|
||||
last_prot = prot;
|
||||
|
||||
}
|
||||
|
||||
void print_48_paging(uint64_t cr3){
|
||||
void print_48_paging(uint64_t cr3)
|
||||
{
|
||||
uint64_t paging_entries_level_1[PENTRIES];
|
||||
uint64_t paging_entries_level_2[PENTRIES];
|
||||
uint64_t paging_entries_level_3[PENTRIES];
|
||||
uint64_t paging_entries_level_4[PENTRIES];
|
||||
|
||||
uint64_t address_identifier_1, address_identifier_2, address_identifier_3, address_identifier_4;
|
||||
uint64_t address_identifier_1, address_identifier_2, address_identifier_3,
|
||||
address_identifier_4;
|
||||
uint32_t i1, i2, i3, i4;
|
||||
|
||||
cpu_physical_memory_rw((cr3&PAGETABLE_MASK), (uint8_t *) paging_entries_level_1, PPAGE_SIZE, false);
|
||||
cpu_physical_memory_rw((cr3 & PAGETABLE_MASK), (uint8_t *)paging_entries_level_1,
|
||||
PPAGE_SIZE, false);
|
||||
for (i1 = 0; i1 < 512; i1++) {
|
||||
if (paging_entries_level_1[i1]) {
|
||||
address_identifier_1 = ((uint64_t)i1) << PLEVEL_1_SHIFT;
|
||||
if (i1 & SIGN_EXTEND_TRESHOLD) {
|
||||
address_identifier_1 |= SIGN_EXTEND;
|
||||
}
|
||||
if(CHECK_BIT(paging_entries_level_1[i1], 0)){ /* otherwise swapped out */
|
||||
cpu_physical_memory_rw((paging_entries_level_1[i1]&PAGETABLE_MASK), (uint8_t *) paging_entries_level_2, PPAGE_SIZE, false);
|
||||
if (CHECK_BIT(paging_entries_level_1[i1], 0))
|
||||
{ /* otherwise swapped out */
|
||||
cpu_physical_memory_rw((paging_entries_level_1[i1] & PAGETABLE_MASK),
|
||||
(uint8_t *)paging_entries_level_2, PPAGE_SIZE,
|
||||
false);
|
||||
for (i2 = 0; i2 < PENTRIES; i2++) {
|
||||
if (paging_entries_level_2[i2]) {
|
||||
address_identifier_2 = (((uint64_t)i2) << PLEVEL_2_SHIFT) + address_identifier_1;
|
||||
if (CHECK_BIT(paging_entries_level_2[i2], 0)){ /* otherwise swapped out */
|
||||
if((paging_entries_level_2[i2]&PAGETABLE_MASK) == (paging_entries_level_1[i1]&PAGETABLE_MASK)){
|
||||
address_identifier_2 = (((uint64_t)i2) << PLEVEL_2_SHIFT) +
|
||||
address_identifier_1;
|
||||
if (CHECK_BIT(paging_entries_level_2[i2], 0))
|
||||
{ /* otherwise swapped out */
|
||||
if ((paging_entries_level_2[i2] & PAGETABLE_MASK) ==
|
||||
(paging_entries_level_1[i1] & PAGETABLE_MASK))
|
||||
{
|
||||
/* loop */
|
||||
continue;
|
||||
}
|
||||
|
||||
if (CHECK_BIT(paging_entries_level_2[i2], 7)) {
|
||||
write_address(address_identifier_2, 0x40000000, (uint64_t)paging_entries_level_2[i2] & ((1ULL<<63) | (1ULL<<2) | (1ULL<<1)));
|
||||
}
|
||||
else{
|
||||
write_address(address_identifier_2, 0x40000000,
|
||||
(uint64_t)paging_entries_level_2[i2] &
|
||||
((1ULL << 63) | (1ULL << 2) |
|
||||
(1ULL << 1)));
|
||||
} else {
|
||||
/* otherwise this PDPE references a 1GB page */
|
||||
cpu_physical_memory_rw((paging_entries_level_2[i2]&PAGETABLE_MASK), (uint8_t *) paging_entries_level_3, PPAGE_SIZE, false);
|
||||
cpu_physical_memory_rw((paging_entries_level_2[i2] &
|
||||
PAGETABLE_MASK),
|
||||
(uint8_t *)paging_entries_level_3,
|
||||
PPAGE_SIZE, false);
|
||||
for (i3 = 0; i3 < PENTRIES; i3++) {
|
||||
if (paging_entries_level_3[i3]) {
|
||||
address_identifier_3 = (((uint64_t)i3) << PLEVEL_3_SHIFT) + address_identifier_2;
|
||||
if (CHECK_BIT(paging_entries_level_3[i3], 0)){ /* otherwise swapped out */
|
||||
if (CHECK_BIT(paging_entries_level_3[i3], 7)){
|
||||
write_address(address_identifier_3, 0x200000, (uint64_t)paging_entries_level_3[i3] & ((1ULL<<63) | (1ULL<<2) | (1ULL<<1)));
|
||||
}
|
||||
else{
|
||||
cpu_physical_memory_rw((paging_entries_level_3[i3]&PAGETABLE_MASK), (uint8_t *) paging_entries_level_4, PPAGE_SIZE, false);
|
||||
address_identifier_3 =
|
||||
(((uint64_t)i3) << PLEVEL_3_SHIFT) +
|
||||
address_identifier_2;
|
||||
if (CHECK_BIT(paging_entries_level_3[i3], 0))
|
||||
{ /* otherwise swapped out */
|
||||
if (CHECK_BIT(paging_entries_level_3[i3],
|
||||
7))
|
||||
{
|
||||
write_address(
|
||||
address_identifier_3, 0x200000,
|
||||
(uint64_t)paging_entries_level_3[i3] &
|
||||
((1ULL << 63) | (1ULL << 2) |
|
||||
(1ULL << 1)));
|
||||
} else {
|
||||
cpu_physical_memory_rw(
|
||||
(paging_entries_level_3[i3] &
|
||||
PAGETABLE_MASK),
|
||||
(uint8_t *)paging_entries_level_4,
|
||||
PPAGE_SIZE, false);
|
||||
for (i4 = 0; i4 < PENTRIES; i4++) {
|
||||
if (paging_entries_level_4[i4]) {
|
||||
address_identifier_4 = (((uint64_t)i4) << PLEVEL_4_SHIFT) + address_identifier_3;
|
||||
if (CHECK_BIT(paging_entries_level_4[i4], 0)){
|
||||
write_address(address_identifier_4, 0x1000, (uint64_t)paging_entries_level_4[i4] & ((1ULL<<63) | (1ULL<<2) | (1ULL<<1)));
|
||||
address_identifier_4 =
|
||||
(((uint64_t)i4)
|
||||
<< PLEVEL_4_SHIFT) +
|
||||
address_identifier_3;
|
||||
if (CHECK_BIT(
|
||||
paging_entries_level_4[i4],
|
||||
0))
|
||||
{
|
||||
write_address(
|
||||
address_identifier_4,
|
||||
0x1000,
|
||||
(uint64_t)paging_entries_level_4
|
||||
[i4] &
|
||||
((1ULL << 63) |
|
||||
(1ULL << 2) |
|
||||
(1ULL << 1)));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -276,7 +314,6 @@ void print_48_paging(uint64_t cr3){
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -287,8 +324,8 @@ void print_48_paging(uint64_t cr3){
|
||||
write_address(0, 0x1000, 0);
|
||||
}
|
||||
|
||||
uint64_t get_nested_guest_rip(CPUState *cpu){
|
||||
|
||||
uint64_t get_nested_guest_rip(CPUState *cpu)
|
||||
{
|
||||
X86CPU *cpux86 = X86_CPU(cpu);
|
||||
CPUX86State *env = &cpux86->env;
|
||||
|
||||
@ -298,8 +335,8 @@ uint64_t get_nested_guest_rip(CPUState *cpu){
|
||||
return saved_vmcs->guest_rip;
|
||||
}
|
||||
|
||||
uint64_t get_nested_host_rip(CPUState *cpu){
|
||||
|
||||
uint64_t get_nested_host_rip(CPUState *cpu)
|
||||
{
|
||||
X86CPU *cpux86 = X86_CPU(cpu);
|
||||
CPUX86State *env = &cpux86->env;
|
||||
|
||||
@ -309,8 +346,8 @@ uint64_t get_nested_host_rip(CPUState *cpu){
|
||||
return saved_vmcs->host_rip;
|
||||
}
|
||||
|
||||
uint64_t get_nested_host_cr3(CPUState *cpu){
|
||||
|
||||
uint64_t get_nested_host_cr3(CPUState *cpu)
|
||||
{
|
||||
X86CPU *cpux86 = X86_CPU(cpu);
|
||||
CPUX86State *env = &cpux86->env;
|
||||
|
||||
@ -320,8 +357,8 @@ uint64_t get_nested_host_cr3(CPUState *cpu){
|
||||
return saved_vmcs->host_cr3;
|
||||
}
|
||||
|
||||
void set_nested_rip(CPUState *cpu, uint64_t rip){
|
||||
|
||||
void set_nested_rip(CPUState *cpu, uint64_t rip)
|
||||
{
|
||||
X86CPU *cpux86 = X86_CPU(cpu);
|
||||
CPUX86State *env = &cpux86->env;
|
||||
|
||||
@ -330,17 +367,19 @@ void set_nested_rip(CPUState *cpu, uint64_t rip){
|
||||
saved_vmcs->guest_rip = rip;
|
||||
}
|
||||
|
||||
void kvm_nested_get_info(CPUState *cpu){
|
||||
|
||||
void kvm_nested_get_info(CPUState *cpu)
|
||||
{
|
||||
X86CPU *cpux86 = X86_CPU(cpu);
|
||||
CPUX86State *env = &cpux86->env;
|
||||
|
||||
kvm_vcpu_ioctl(cpu, KVM_GET_NESTED_STATE, env->nested_state);
|
||||
|
||||
__attribute__((unused)) struct vmcs12* saved_vmcs = (struct vmcs12*)&(env->nested_state->data);
|
||||
__attribute__((unused)) struct vmcs12 *saved_vmcs =
|
||||
(struct vmcs12 *)&(env->nested_state->data);
|
||||
nyx_debug_p(NESTED_VM_PREFIX, "VMCS host_cr3:\t%lx", saved_vmcs->host_cr3);
|
||||
nyx_debug_p(NESTED_VM_PREFIX, "VMCS host_cr4:\t%lx", saved_vmcs->host_cr4);
|
||||
nyx_debug_p(NESTED_VM_PREFIX, "VMCS host_ia32_efer:\t%lx", saved_vmcs->host_ia32_efer);
|
||||
nyx_debug_p(NESTED_VM_PREFIX, "VMCS host_ia32_efer:\t%lx",
|
||||
saved_vmcs->host_ia32_efer);
|
||||
nyx_debug_p(NESTED_VM_PREFIX, "VMCS host_cr0:\t%lx", saved_vmcs->host_cr0);
|
||||
|
||||
return;
|
||||
@ -364,26 +403,36 @@ typedef struct {
|
||||
uint8_t padding;
|
||||
} config_t;
|
||||
|
||||
void print_configuration(FILE *stream, void* configuration, size_t size){
|
||||
void print_configuration(FILE *stream, void *configuration, size_t size)
|
||||
{
|
||||
fprintf(stream, "%s: size: %lx\n", __func__, size);
|
||||
assert((size - sizeof(config_t)) % sizeof(area_t_export_t) == 0);
|
||||
|
||||
assert(((config_t *)configuration)->magic == MAGIC_NUMBER);
|
||||
|
||||
fprintf(stream, "%s: num_mmio_areas: %x\n", __func__, ((config_t*)configuration)->num_mmio_areas);
|
||||
fprintf(stream, "%s: num_io_areas: %x\n", __func__, ((config_t*)configuration)->num_io_areas);
|
||||
fprintf(stream, "%s: num_alloc_areas: %x\n", __func__, ((config_t*)configuration)->num_alloc_areas);
|
||||
fprintf(stream, "%s: num_mmio_areas: %x\n", __func__,
|
||||
((config_t *)configuration)->num_mmio_areas);
|
||||
fprintf(stream, "%s: num_io_areas: %x\n", __func__,
|
||||
((config_t *)configuration)->num_io_areas);
|
||||
fprintf(stream, "%s: num_alloc_areas: %x\n", __func__,
|
||||
((config_t *)configuration)->num_alloc_areas);
|
||||
|
||||
|
||||
for (int i = 0; i < ((config_t *)configuration)->num_mmio_areas; i++) {
|
||||
fprintf(stream, "\t-> MMIO: 0x%x (V: 0x%x) [0x%x]\t%s\n", ((area_t_export_t*)(configuration+sizeof(config_t)))[i].base,
|
||||
fprintf(stream, "\t-> MMIO: 0x%x (V: 0x%x) [0x%x]\t%s\n",
|
||||
((area_t_export_t *)(configuration + sizeof(config_t)))[i].base,
|
||||
((area_t_export_t *)(configuration + sizeof(config_t)))[i].virtual_base,
|
||||
((area_t_export_t *)(configuration + sizeof(config_t)))[i].size,
|
||||
((area_t_export_t *)(configuration + sizeof(config_t)))[i].desc);
|
||||
}
|
||||
|
||||
for(int i = ((config_t*)configuration)->num_mmio_areas; i < (((config_t*)configuration)->num_mmio_areas+((config_t*)configuration)->num_io_areas); i++){
|
||||
fprintf(stream, "\t-> IO: 0x%x [0x%x]\t%s\n", ((area_t_export_t*)(configuration+sizeof(config_t)))[i].base,
|
||||
for (int i = ((config_t *)configuration)->num_mmio_areas;
|
||||
i < (((config_t *)configuration)->num_mmio_areas +
|
||||
((config_t *)configuration)->num_io_areas);
|
||||
i++)
|
||||
{
|
||||
fprintf(stream, "\t-> IO: 0x%x [0x%x]\t%s\n",
|
||||
((area_t_export_t *)(configuration + sizeof(config_t)))[i].base,
|
||||
((area_t_export_t *)(configuration + sizeof(config_t)))[i].size,
|
||||
((area_t_export_t *)(configuration + sizeof(config_t)))[i].desc);
|
||||
}
|
||||
|
@ -20,29 +20,32 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include "exec/gdbstub.h"
|
||||
#include <errno.h>
|
||||
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "cpu.h"
|
||||
#include "exec/ram_addr.h"
|
||||
#include "qemu/rcu_queue.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "cpu.h"
|
||||
|
||||
#include "memory_access.h"
|
||||
#include "nyx/hypercall/hypercall.h"
|
||||
#include "debug.h"
|
||||
#include "memory_access.h"
|
||||
#include "nyx/fast_vm_reload.h"
|
||||
#include "nyx/state/state.h"
|
||||
#include "nyx/helpers.h"
|
||||
#include "nyx/hypercall/hypercall.h"
|
||||
#include "nyx/state/state.h"
|
||||
|
||||
#define INVALID_ADDRESS 0xFFFFFFFFFFFFFFFFULL
|
||||
|
||||
static uint64_t get_48_paging_phys_addr(uint64_t cr3, uint64_t addr, bool read_from_snapshot);
|
||||
static uint64_t get_48_paging_phys_addr(uint64_t cr3,
|
||||
uint64_t addr,
|
||||
bool read_from_snapshot);
|
||||
|
||||
#define x86_64_PAGE_SIZE 0x1000
|
||||
#define x86_64_PAGE_MASK ~(x86_64_PAGE_SIZE - 1)
|
||||
|
||||
mem_mode_t get_current_mem_mode(CPUState *cpu){
|
||||
mem_mode_t get_current_mem_mode(CPUState *cpu)
|
||||
{
|
||||
kvm_arch_get_registers(cpu);
|
||||
|
||||
X86CPU *cpux86 = X86_CPU(cpu);
|
||||
@ -50,8 +53,7 @@ mem_mode_t get_current_mem_mode(CPUState *cpu){
|
||||
|
||||
if (!(env->cr[0] & CR0_PG_MASK)) {
|
||||
return mm_32_protected;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
if (env->cr[4] & CR4_PAE_MASK) {
|
||||
if (env->hflags & HF_LMA_MASK) {
|
||||
if (env->cr[4] & CR4_LA57_MASK) {
|
||||
@ -59,12 +61,10 @@ mem_mode_t get_current_mem_mode(CPUState *cpu){
|
||||
} else {
|
||||
return mm_64_l4_paging;
|
||||
}
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
return mm_32_pae;
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
return mm_32_paging;
|
||||
}
|
||||
}
|
||||
@ -72,12 +72,14 @@ mem_mode_t get_current_mem_mode(CPUState *cpu){
|
||||
return mm_unkown;
|
||||
}
|
||||
|
||||
static void set_mem_mode(CPUState *cpu){
|
||||
static void set_mem_mode(CPUState *cpu)
|
||||
{
|
||||
GET_GLOBAL_STATE()->mem_mode = get_current_mem_mode(cpu);
|
||||
}
|
||||
|
||||
/* Warning: This might break memory handling for hypervisor fuzzing => FIXME LATER */
|
||||
uint64_t get_paging_phys_addr(CPUState *cpu, uint64_t cr3, uint64_t addr){
|
||||
uint64_t get_paging_phys_addr(CPUState *cpu, uint64_t cr3, uint64_t addr)
|
||||
{
|
||||
if (GET_GLOBAL_STATE()->mem_mode == mm_unkown) {
|
||||
set_mem_mode(cpu);
|
||||
}
|
||||
@ -103,7 +105,8 @@ uint64_t get_paging_phys_addr(CPUState *cpu, uint64_t cr3, uint64_t addr){
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint64_t get_paging_phys_addr_snapshot(CPUState *cpu, uint64_t cr3, uint64_t addr){
|
||||
static uint64_t get_paging_phys_addr_snapshot(CPUState *cpu, uint64_t cr3, uint64_t addr)
|
||||
{
|
||||
if (GET_GLOBAL_STATE()->mem_mode == mm_unkown) {
|
||||
set_mem_mode(cpu);
|
||||
}
|
||||
@ -129,32 +132,38 @@ static uint64_t get_paging_phys_addr_snapshot(CPUState *cpu, uint64_t cr3, uint6
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool read_physical_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu){
|
||||
bool read_physical_memory(uint64_t address, uint8_t *data, uint32_t size, CPUState *cpu)
|
||||
{
|
||||
kvm_arch_get_registers(cpu);
|
||||
cpu_physical_memory_read(address, data, size);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool write_physical_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu){
|
||||
bool write_physical_memory(uint64_t address, uint8_t *data, uint32_t size, CPUState *cpu)
|
||||
{
|
||||
kvm_arch_get_registers(cpu);
|
||||
cpu_physical_memory_write(address, data, size);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void refresh_kvm(CPUState *cpu){
|
||||
static void refresh_kvm(CPUState *cpu)
|
||||
{
|
||||
if (!cpu->vcpu_dirty) {
|
||||
kvm_arch_get_registers(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static void refresh_kvm_non_dirty(CPUState *cpu){
|
||||
static void refresh_kvm_non_dirty(CPUState *cpu)
|
||||
{
|
||||
if (!cpu->vcpu_dirty) {
|
||||
kvm_arch_get_registers_fast(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
bool remap_payload_slot(uint64_t phys_addr, uint32_t slot, CPUState *cpu){
|
||||
assert(GET_GLOBAL_STATE()->shared_payload_buffer_fd && GET_GLOBAL_STATE()->shared_payload_buffer_size);
|
||||
bool remap_payload_slot(uint64_t phys_addr, uint32_t slot, CPUState *cpu)
|
||||
{
|
||||
assert(GET_GLOBAL_STATE()->shared_payload_buffer_fd &&
|
||||
GET_GLOBAL_STATE()->shared_payload_buffer_size);
|
||||
RAMBlock *block;
|
||||
refresh_kvm_non_dirty(cpu);
|
||||
|
||||
@ -164,8 +173,11 @@ bool remap_payload_slot(uint64_t phys_addr, uint32_t slot, CPUState *cpu){
|
||||
QLIST_FOREACH_RCU (block, &ram_list.blocks, next) {
|
||||
if (!memcmp(block->idstr, "pc.ram", 6)) {
|
||||
/* TODO: put assert calls here */
|
||||
munmap((void*)(((uint64_t)block->host) + phys_addr_ram_offset), x86_64_PAGE_SIZE);
|
||||
mmap((void*)(((uint64_t)block->host) + phys_addr_ram_offset), 0x1000, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, GET_GLOBAL_STATE()->shared_payload_buffer_fd, (i*x86_64_PAGE_SIZE));
|
||||
munmap((void *)(((uint64_t)block->host) + phys_addr_ram_offset),
|
||||
x86_64_PAGE_SIZE);
|
||||
mmap((void *)(((uint64_t)block->host) + phys_addr_ram_offset), 0x1000,
|
||||
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED,
|
||||
GET_GLOBAL_STATE()->shared_payload_buffer_fd, (i * x86_64_PAGE_SIZE));
|
||||
|
||||
fast_reload_blacklist_page(get_fast_reload_snapshot(), phys_addr);
|
||||
break;
|
||||
@ -175,7 +187,14 @@ bool remap_payload_slot(uint64_t phys_addr, uint32_t slot, CPUState *cpu){
|
||||
return true;
|
||||
}
|
||||
|
||||
bool remap_slot(uint64_t addr, uint32_t slot, CPUState *cpu, int fd, uint64_t shm_size, bool virtual, uint64_t cr3){
|
||||
bool remap_slot(uint64_t addr,
|
||||
uint32_t slot,
|
||||
CPUState *cpu,
|
||||
int fd,
|
||||
uint64_t shm_size,
|
||||
bool virtual,
|
||||
uint64_t cr3)
|
||||
{
|
||||
assert(fd && shm_size);
|
||||
assert((slot * x86_64_PAGE_SIZE) < shm_size);
|
||||
|
||||
@ -189,8 +208,10 @@ bool remap_slot(uint64_t addr, uint32_t slot, CPUState *cpu, int fd, uint64_t sh
|
||||
phys_addr = get_paging_phys_addr(cpu, cr3, (addr & x86_64_PAGE_MASK));
|
||||
|
||||
if (phys_addr == INVALID_ADDRESS) {
|
||||
fprintf(stderr, "[QEMU-Nyx] Error: failed to translate v_addr (0x%lx) to p_addr!\n", addr);
|
||||
fprintf(stderr, "[QEMU-Nyx] Check if the buffer is present in the guest's memory...\n");
|
||||
fprintf(stderr, "[QEMU-Nyx] Error: failed to translate v_addr (0x%lx) to p_addr!\n",
|
||||
addr);
|
||||
fprintf(stderr, "[QEMU-Nyx] Check if the buffer is present in the "
|
||||
"guest's memory...\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
@ -201,11 +222,16 @@ bool remap_slot(uint64_t addr, uint32_t slot, CPUState *cpu, int fd, uint64_t sh
|
||||
QLIST_FOREACH_RCU (block, &ram_list.blocks, next) {
|
||||
if (!memcmp(block->idstr, "pc.ram", 6)) {
|
||||
/* TODO: put assert calls here */
|
||||
if (munmap((void*)(((uint64_t)block->host) + phys_addr_ram_offset), x86_64_PAGE_SIZE) == -1) {
|
||||
if (munmap((void *)(((uint64_t)block->host) + phys_addr_ram_offset),
|
||||
x86_64_PAGE_SIZE) == -1)
|
||||
{
|
||||
nyx_error("%s: munmap failed!\n", __func__);
|
||||
assert(false);
|
||||
}
|
||||
if (mmap((void*)(((uint64_t)block->host) + phys_addr_ram_offset), 0x1000, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, (i*x86_64_PAGE_SIZE)) == MAP_FAILED) {
|
||||
if (mmap((void *)(((uint64_t)block->host) + phys_addr_ram_offset),
|
||||
0x1000, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd,
|
||||
(i * x86_64_PAGE_SIZE)) == MAP_FAILED)
|
||||
{
|
||||
nyx_error("%s: mmap failed!\n", __func__);
|
||||
assert(false);
|
||||
}
|
||||
@ -218,8 +244,10 @@ bool remap_slot(uint64_t addr, uint32_t slot, CPUState *cpu, int fd, uint64_t sh
|
||||
return true;
|
||||
}
|
||||
|
||||
bool remap_payload_slot_protected(uint64_t phys_addr, uint32_t slot, CPUState *cpu){
|
||||
assert(GET_GLOBAL_STATE()->shared_payload_buffer_fd && GET_GLOBAL_STATE()->shared_payload_buffer_size);
|
||||
bool remap_payload_slot_protected(uint64_t phys_addr, uint32_t slot, CPUState *cpu)
|
||||
{
|
||||
assert(GET_GLOBAL_STATE()->shared_payload_buffer_fd &&
|
||||
GET_GLOBAL_STATE()->shared_payload_buffer_size);
|
||||
RAMBlock *block;
|
||||
refresh_kvm_non_dirty(cpu);
|
||||
|
||||
@ -229,10 +257,12 @@ bool remap_payload_slot_protected(uint64_t phys_addr, uint32_t slot, CPUState *c
|
||||
|
||||
QLIST_FOREACH_RCU (block, &ram_list.blocks, next) {
|
||||
if (!memcmp(block->idstr, "pc.ram", 6)) {
|
||||
|
||||
/* TODO: put assert calls here */
|
||||
munmap((void*)(((uint64_t)block->host) + phys_addr_ram_offset), x86_64_PAGE_SIZE);
|
||||
mmap((void*)(((uint64_t)block->host) + phys_addr_ram_offset), 0x1000, PROT_READ , MAP_SHARED | MAP_FIXED, GET_GLOBAL_STATE()->shared_payload_buffer_fd, (i*x86_64_PAGE_SIZE));
|
||||
munmap((void *)(((uint64_t)block->host) + phys_addr_ram_offset),
|
||||
x86_64_PAGE_SIZE);
|
||||
mmap((void *)(((uint64_t)block->host) + phys_addr_ram_offset), 0x1000,
|
||||
PROT_READ, MAP_SHARED | MAP_FIXED,
|
||||
GET_GLOBAL_STATE()->shared_payload_buffer_fd, (i * x86_64_PAGE_SIZE));
|
||||
|
||||
fast_reload_blacklist_page(get_fast_reload_snapshot(), phys_addr);
|
||||
break;
|
||||
@ -242,7 +272,8 @@ bool remap_payload_slot_protected(uint64_t phys_addr, uint32_t slot, CPUState *c
|
||||
return true;
|
||||
}
|
||||
|
||||
void resize_shared_memory(uint32_t new_size, uint32_t* shm_size, void** shm_ptr, int fd){
|
||||
void resize_shared_memory(uint32_t new_size, uint32_t *shm_size, void **shm_ptr, int fd)
|
||||
{
|
||||
assert(fd && *shm_size);
|
||||
|
||||
/* check if the new_size is a multiple of PAGE_SIZE */
|
||||
@ -260,21 +291,29 @@ void resize_shared_memory(uint32_t new_size, uint32_t* shm_size, void** shm_ptr,
|
||||
|
||||
if (shm_ptr) {
|
||||
munmap(*shm_ptr, *shm_size);
|
||||
*shm_ptr = (void*)mmap(0, new_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
*shm_ptr =
|
||||
(void *)mmap(0, new_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
assert(*shm_ptr != MAP_FAILED);
|
||||
}
|
||||
|
||||
*shm_size = new_size;
|
||||
}
|
||||
|
||||
bool remap_payload_buffer(uint64_t virt_guest_addr, CPUState *cpu){
|
||||
assert(GET_GLOBAL_STATE()->shared_payload_buffer_fd && GET_GLOBAL_STATE()->shared_payload_buffer_size);
|
||||
bool remap_payload_buffer(uint64_t virt_guest_addr, CPUState *cpu)
|
||||
{
|
||||
assert(GET_GLOBAL_STATE()->shared_payload_buffer_fd &&
|
||||
GET_GLOBAL_STATE()->shared_payload_buffer_size);
|
||||
assert(GET_GLOBAL_STATE()->shared_payload_buffer_size % x86_64_PAGE_SIZE == 0);
|
||||
RAMBlock *block;
|
||||
refresh_kvm_non_dirty(cpu);
|
||||
|
||||
for(uint32_t i = 0; i < (GET_GLOBAL_STATE()->shared_payload_buffer_size/x86_64_PAGE_SIZE); i++){
|
||||
uint64_t phys_addr = get_paging_phys_addr(cpu, GET_GLOBAL_STATE()->parent_cr3, ((virt_guest_addr+(i*x86_64_PAGE_SIZE)) & x86_64_PAGE_MASK));
|
||||
for (uint32_t i = 0;
|
||||
i < (GET_GLOBAL_STATE()->shared_payload_buffer_size / x86_64_PAGE_SIZE); i++)
|
||||
{
|
||||
uint64_t phys_addr =
|
||||
get_paging_phys_addr(cpu, GET_GLOBAL_STATE()->parent_cr3,
|
||||
((virt_guest_addr + (i * x86_64_PAGE_SIZE)) &
|
||||
x86_64_PAGE_MASK));
|
||||
|
||||
assert(phys_addr != INVALID_ADDRESS);
|
||||
|
||||
@ -282,11 +321,17 @@ bool remap_payload_buffer(uint64_t virt_guest_addr, CPUState *cpu){
|
||||
|
||||
QLIST_FOREACH_RCU (block, &ram_list.blocks, next) {
|
||||
if (!memcmp(block->idstr, "pc.ram", 6)) {
|
||||
if(munmap((void*)(((uint64_t)block->host) + phys_addr_ram_offset), x86_64_PAGE_SIZE) == -1){
|
||||
if (munmap((void *)(((uint64_t)block->host) + phys_addr_ram_offset),
|
||||
x86_64_PAGE_SIZE) == -1)
|
||||
{
|
||||
nyx_error("munmap failed!\n");
|
||||
assert(false);
|
||||
}
|
||||
if(mmap((void*)(((uint64_t)block->host) + phys_addr_ram_offset), 0x1000, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, GET_GLOBAL_STATE()->shared_payload_buffer_fd, (i*x86_64_PAGE_SIZE)) == MAP_FAILED){
|
||||
if (mmap((void *)(((uint64_t)block->host) + phys_addr_ram_offset),
|
||||
0x1000, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED,
|
||||
GET_GLOBAL_STATE()->shared_payload_buffer_fd,
|
||||
(i * x86_64_PAGE_SIZE)) == MAP_FAILED)
|
||||
{
|
||||
nyx_error("mmap failed!\n");
|
||||
assert(false);
|
||||
}
|
||||
@ -324,7 +369,8 @@ bool write_virtual_memory(uint64_t address, uint8_t* data, uint32_t size, CPUSta
|
||||
refresh_kvm(cpu);
|
||||
asidx = cpu_asidx_from_attrs(cpu, MEMTXATTRS_UNSPECIFIED);
|
||||
attrs = MEMTXATTRS_UNSPECIFIED;
|
||||
phys_addr = cpu_get_phys_page_attrs_debug(cpu, (address & x86_64_PAGE_MASK), &attrs);
|
||||
phys_addr =
|
||||
cpu_get_phys_page_attrs_debug(cpu, (address & x86_64_PAGE_MASK), &attrs);
|
||||
|
||||
if (phys_addr == INVALID_ADDRESS) {
|
||||
nyx_debug_p(MEM_PREFIX, "phys_addr == -1:\t%lx", address);
|
||||
@ -332,7 +378,8 @@ bool write_virtual_memory(uint64_t address, uint8_t* data, uint32_t size, CPUSta
|
||||
}
|
||||
|
||||
phys_addr += (address & ~x86_64_PAGE_MASK);
|
||||
res = address_space_rw(cpu_get_address_space(cpu, asidx), phys_addr, MEMTXATTRS_UNSPECIFIED, data, l, true);
|
||||
res = address_space_rw(cpu_get_address_space(cpu, asidx), phys_addr,
|
||||
MEMTXATTRS_UNSPECIFIED, data, l, true);
|
||||
if (res != MEMTX_OK) {
|
||||
nyx_debug_p(MEM_PREFIX, "!MEMTX_OK:\t%lx", address);
|
||||
return false;
|
||||
@ -348,7 +395,8 @@ bool write_virtual_memory(uint64_t address, uint8_t* data, uint32_t size, CPUSta
|
||||
}
|
||||
|
||||
|
||||
void hexdump_virtual_memory(uint64_t address, uint32_t size, CPUState *cpu){
|
||||
void hexdump_virtual_memory(uint64_t address, uint32_t size, CPUState *cpu)
|
||||
{
|
||||
assert(size < 0x100000); // 1MB max
|
||||
uint64_t i = 0;
|
||||
uint8_t tmp[17];
|
||||
@ -387,11 +435,15 @@ static int redqueen_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint
|
||||
{
|
||||
static const uint8_t int3 = 0xcc;
|
||||
|
||||
hwaddr phys_addr = (hwaddr) get_paging_phys_addr(cs, GET_GLOBAL_STATE()->parent_cr3, bp->pc);
|
||||
hwaddr phys_addr =
|
||||
(hwaddr)get_paging_phys_addr(cs, GET_GLOBAL_STATE()->parent_cr3, bp->pc);
|
||||
int asidx = cpu_asidx_from_attrs(cs, MEMTXATTRS_UNSPECIFIED);
|
||||
|
||||
if (address_space_rw(cpu_get_address_space(cs, asidx), phys_addr, MEMTXATTRS_UNSPECIFIED, (uint8_t *)&bp->saved_insn, 1, 0) ||
|
||||
address_space_rw(cpu_get_address_space(cs, asidx), phys_addr, MEMTXATTRS_UNSPECIFIED, (uint8_t *)&int3, 1, 1)) {
|
||||
if (address_space_rw(cpu_get_address_space(cs, asidx), phys_addr,
|
||||
MEMTXATTRS_UNSPECIFIED, (uint8_t *)&bp->saved_insn, 1, 0) ||
|
||||
address_space_rw(cpu_get_address_space(cs, asidx), phys_addr,
|
||||
MEMTXATTRS_UNSPECIFIED, (uint8_t *)&int3, 1, 1))
|
||||
{
|
||||
// fprintf(stderr, "%s WRITTE AT %lx %lx failed!\n", __func__, bp->pc, phys_addr);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -403,11 +455,16 @@ static int redqueen_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint
|
||||
{
|
||||
uint8_t int3;
|
||||
|
||||
hwaddr phys_addr = (hwaddr) get_paging_phys_addr(cs, GET_GLOBAL_STATE()->parent_cr3, bp->pc);
|
||||
hwaddr phys_addr =
|
||||
(hwaddr)get_paging_phys_addr(cs, GET_GLOBAL_STATE()->parent_cr3, bp->pc);
|
||||
int asidx = cpu_asidx_from_attrs(cs, MEMTXATTRS_UNSPECIFIED);
|
||||
|
||||
if (address_space_rw(cpu_get_address_space(cs, asidx), phys_addr, MEMTXATTRS_UNSPECIFIED, (uint8_t *)&int3, 1, 0) || int3 != 0xcc ||
|
||||
address_space_rw(cpu_get_address_space(cs, asidx), phys_addr, MEMTXATTRS_UNSPECIFIED, (uint8_t *)&bp->saved_insn, 1, 1)) {
|
||||
if (address_space_rw(cpu_get_address_space(cs, asidx), phys_addr,
|
||||
MEMTXATTRS_UNSPECIFIED, (uint8_t *)&int3, 1, 0) ||
|
||||
int3 != 0xcc ||
|
||||
address_space_rw(cpu_get_address_space(cs, asidx), phys_addr,
|
||||
MEMTXATTRS_UNSPECIFIED, (uint8_t *)&bp->saved_insn, 1, 1))
|
||||
{
|
||||
// fprintf(stderr, "%s failed\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -415,7 +472,9 @@ static int redqueen_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct kvm_sw_breakpoint *redqueen_find_breakpoint(CPUState *cpu, target_ulong pc){
|
||||
static struct kvm_sw_breakpoint *redqueen_find_breakpoint(CPUState *cpu,
|
||||
target_ulong pc)
|
||||
{
|
||||
struct kvm_sw_breakpoint *bp;
|
||||
|
||||
QTAILQ_FOREACH (bp, &GET_GLOBAL_STATE()->redqueen_breakpoints, entry) {
|
||||
@ -426,7 +485,8 @@ static struct kvm_sw_breakpoint *redqueen_find_breakpoint(CPUState *cpu, target_
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int redqueen_breakpoints_active(CPUState *cpu){
|
||||
static int redqueen_breakpoints_active(CPUState *cpu)
|
||||
{
|
||||
return !QTAILQ_EMPTY(&GET_GLOBAL_STATE()->redqueen_breakpoints);
|
||||
}
|
||||
|
||||
@ -435,7 +495,8 @@ struct kvm_set_guest_debug_data {
|
||||
int err;
|
||||
};
|
||||
|
||||
static int redqueen_update_guest_debug(CPUState *cpu) {
|
||||
static int redqueen_update_guest_debug(CPUState *cpu)
|
||||
{
|
||||
struct kvm_set_guest_debug_data data;
|
||||
|
||||
data.dbg.control = 0;
|
||||
@ -447,7 +508,8 @@ static int redqueen_update_guest_debug(CPUState *cpu) {
|
||||
return kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG, &data.dbg);
|
||||
}
|
||||
|
||||
static void redqueen_remove_all_breakpoints(CPUState *cpu) {
|
||||
static void redqueen_remove_all_breakpoints(CPUState *cpu)
|
||||
{
|
||||
struct kvm_sw_breakpoint *bp, *next;
|
||||
|
||||
QTAILQ_FOREACH_SAFE (bp, &GET_GLOBAL_STATE()->redqueen_breakpoints, entry, next) {
|
||||
@ -459,7 +521,8 @@ static void redqueen_remove_all_breakpoints(CPUState *cpu) {
|
||||
redqueen_update_guest_debug(cpu);
|
||||
}
|
||||
|
||||
static int redqueen_insert_breakpoint(CPUState *cpu, target_ulong addr, target_ulong len){
|
||||
static int redqueen_insert_breakpoint(CPUState *cpu, target_ulong addr, target_ulong len)
|
||||
{
|
||||
struct kvm_sw_breakpoint *bp;
|
||||
int err;
|
||||
|
||||
@ -489,7 +552,8 @@ static int redqueen_insert_breakpoint(CPUState *cpu, target_ulong addr, target_u
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int redqueen_remove_breakpoint(CPUState *cpu, target_ulong addr, target_ulong len){
|
||||
static int redqueen_remove_breakpoint(CPUState *cpu, target_ulong addr, target_ulong len)
|
||||
{
|
||||
struct kvm_sw_breakpoint *bp;
|
||||
int err;
|
||||
|
||||
@ -519,20 +583,23 @@ static int redqueen_remove_breakpoint(CPUState *cpu, target_ulong addr, target_u
|
||||
return 0;
|
||||
}
|
||||
|
||||
int insert_breakpoint(CPUState *cpu, uint64_t addr, uint64_t len){
|
||||
int insert_breakpoint(CPUState *cpu, uint64_t addr, uint64_t len)
|
||||
{
|
||||
redqueen_insert_breakpoint(cpu, addr, len);
|
||||
redqueen_update_guest_debug(cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int remove_breakpoint(CPUState *cpu, uint64_t addr, uint64_t len){
|
||||
int remove_breakpoint(CPUState *cpu, uint64_t addr, uint64_t len)
|
||||
{
|
||||
redqueen_remove_breakpoint(cpu, addr, len);
|
||||
redqueen_update_guest_debug(cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void remove_all_breakpoints(CPUState *cpu){
|
||||
void remove_all_breakpoints(CPUState *cpu)
|
||||
{
|
||||
redqueen_remove_all_breakpoints(cpu);
|
||||
}
|
||||
|
||||
@ -540,15 +607,18 @@ void remove_all_breakpoints(CPUState *cpu){
|
||||
#define PENTRIES 0x200
|
||||
#define PPAGE_SIZE 0x1000
|
||||
|
||||
static bool read_memory(uint64_t address, uint64_t* buffer, size_t size, bool read_from_snapshot) {
|
||||
static bool read_memory(uint64_t address,
|
||||
uint64_t *buffer,
|
||||
size_t size,
|
||||
bool read_from_snapshot)
|
||||
{
|
||||
if (unlikely(address == INVALID_ADDRESS)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (unlikely(read_from_snapshot)) {
|
||||
return read_snapshot_memory(
|
||||
get_fast_reload_snapshot(),
|
||||
address, (uint8_t *)buffer, size);
|
||||
return read_snapshot_memory(get_fast_reload_snapshot(), address,
|
||||
(uint8_t *)buffer, size);
|
||||
}
|
||||
|
||||
// NB: This API exposed by exec.h doesn't signal failure, although it can
|
||||
@ -560,20 +630,25 @@ static bool read_memory(uint64_t address, uint64_t* buffer, size_t size, bool re
|
||||
return true;
|
||||
}
|
||||
|
||||
__attribute__((always_inline)) inline
|
||||
static bool bit(uint64_t value, uint8_t lsb) {
|
||||
__attribute__((always_inline)) inline static bool bit(uint64_t value, uint8_t lsb)
|
||||
{
|
||||
return (value >> lsb) & 1;
|
||||
}
|
||||
|
||||
__attribute__((always_inline)) inline
|
||||
static uint64_t bits(uint64_t value, uint8_t lsb, uint8_t msb) {
|
||||
__attribute__((always_inline)) inline static uint64_t bits(uint64_t value,
|
||||
uint8_t lsb,
|
||||
uint8_t msb)
|
||||
{
|
||||
return (value & ((0xffffffffffffffffull >> (64 - (msb - lsb + 1))) << lsb)) >> lsb;
|
||||
}
|
||||
|
||||
// Helper function to load an entire pagetable table. These are PENTRIES
|
||||
// 64-bit entries, so entries must point to a sufficiently large buffer.
|
||||
static bool load_table(uint64_t address, uint64_t* entries, bool read_from_snapshot) {
|
||||
if (unlikely(!read_memory(address, entries, 512 * sizeof(*entries), read_from_snapshot))) {
|
||||
static bool load_table(uint64_t address, uint64_t *entries, bool read_from_snapshot)
|
||||
{
|
||||
if (unlikely(!read_memory(address, entries, 512 * sizeof(*entries),
|
||||
read_from_snapshot)))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -584,11 +659,12 @@ static bool load_table(uint64_t address, uint64_t* entries, bool read_from_snaps
|
||||
// returning the same invalid value (0) for both non-present entries and
|
||||
// any other error conditions, since we don't need to handle these cases
|
||||
// differently.
|
||||
static uint64_t load_entry(uint64_t address, uint64_t index,
|
||||
bool read_from_snapshot) {
|
||||
static uint64_t load_entry(uint64_t address, uint64_t index, bool read_from_snapshot)
|
||||
{
|
||||
uint64_t entry = 0;
|
||||
if (unlikely(!read_memory(address + (index * sizeof(entry)), &entry, sizeof(entry),
|
||||
read_from_snapshot))) {
|
||||
if (unlikely(!read_memory(address + (index * sizeof(entry)), &entry,
|
||||
sizeof(entry), read_from_snapshot)))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -600,14 +676,20 @@ static uint64_t load_entry(uint64_t address, uint64_t index,
|
||||
return entry;
|
||||
}
|
||||
|
||||
static void print_page(uint64_t address, uint64_t entry, size_t size, bool s, bool w, bool x) {
|
||||
fprintf(stderr, " %c%c%c %016lx %zx",
|
||||
s ? 's' : 'u', w ? 'w' : 'r', x ? 'x' : '-',
|
||||
(bits(entry, 12, 51) << 12) & ~(size - 1), size);
|
||||
static void print_page(
|
||||
uint64_t address, uint64_t entry, size_t size, bool s, bool w, bool x)
|
||||
{
|
||||
fprintf(stderr, " %c%c%c %016lx %zx", s ? 's' : 'u', w ? 'w' : 'r',
|
||||
x ? 'x' : '-', (bits(entry, 12, 51) << 12) & ~(size - 1), size);
|
||||
}
|
||||
|
||||
static void print_48_pte(uint64_t address, uint64_t pde_entry, bool read_from_snapshot,
|
||||
bool s, bool w, bool x) {
|
||||
static void print_48_pte(uint64_t address,
|
||||
uint64_t pde_entry,
|
||||
bool read_from_snapshot,
|
||||
bool s,
|
||||
bool w,
|
||||
bool x)
|
||||
{
|
||||
uint64_t pte_address = bits(pde_entry, 12, 51) << 12;
|
||||
uint64_t pte_table[PENTRIES];
|
||||
|
||||
@ -625,14 +707,19 @@ static void print_48_pte(uint64_t address, uint64_t pde_entry, bool read_from_sn
|
||||
if (!bit(entry, 0)) {
|
||||
// Not present.
|
||||
} else {
|
||||
print_page(address | i << 12, entry, 0x1000,
|
||||
s & !bit(entry, 2), w & bit(entry, 1), x & !bit(entry, 63));
|
||||
print_page(address | i << 12, entry, 0x1000, s & !bit(entry, 2),
|
||||
w & bit(entry, 1), x & !bit(entry, 63));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void print_48_pde(uint64_t address, uint64_t pdpte_entry, bool read_from_snapshot,
|
||||
bool s, bool w, bool x) {
|
||||
static void print_48_pde(uint64_t address,
|
||||
uint64_t pdpte_entry,
|
||||
bool read_from_snapshot,
|
||||
bool s,
|
||||
bool w,
|
||||
bool x)
|
||||
{
|
||||
uint64_t pde_address = bits(pdpte_entry, 12, 51) << 12;
|
||||
uint64_t pde_table[PENTRIES];
|
||||
|
||||
@ -650,8 +737,8 @@ static void print_48_pde(uint64_t address, uint64_t pdpte_entry, bool read_from_
|
||||
if (!bit(entry, 0)) {
|
||||
// Not present.
|
||||
} else if (bit(entry, 7)) {
|
||||
print_page(address | i << 21, entry, 0x200000,
|
||||
s & !bit(entry, 2), w & bit(entry, 1), x & !bit(entry, 63));
|
||||
print_page(address | i << 21, entry, 0x200000, s & !bit(entry, 2),
|
||||
w & bit(entry, 1), x & !bit(entry, 63));
|
||||
} else {
|
||||
print_48_pte(address | i << 21, entry, read_from_snapshot,
|
||||
s & !bit(entry, 2), w & bit(entry, 1), x & !bit(entry, 63));
|
||||
@ -659,8 +746,13 @@ static void print_48_pde(uint64_t address, uint64_t pdpte_entry, bool read_from_
|
||||
}
|
||||
}
|
||||
|
||||
static void print_48_pdpte(uint64_t address, uint64_t pml4_entry, bool read_from_snapshot,
|
||||
bool s, bool w, bool x) {
|
||||
static void print_48_pdpte(uint64_t address,
|
||||
uint64_t pml4_entry,
|
||||
bool read_from_snapshot,
|
||||
bool s,
|
||||
bool w,
|
||||
bool x)
|
||||
{
|
||||
uint64_t pdpte_address = bits(pml4_entry, 12, 51) << 12;
|
||||
uint64_t pdpte_table[PENTRIES];
|
||||
|
||||
@ -678,8 +770,8 @@ static void print_48_pdpte(uint64_t address, uint64_t pml4_entry, bool read_from
|
||||
if (!bit(entry, 0)) {
|
||||
// Not present.
|
||||
} else if (bit(entry, 7)) {
|
||||
print_page(address | i << 30, entry, 0x40000000,
|
||||
s & !bit(entry, 2), w & bit(entry, 1), x & !bit(entry, 63));
|
||||
print_page(address | i << 30, entry, 0x40000000, s & !bit(entry, 2),
|
||||
w & bit(entry, 1), x & !bit(entry, 63));
|
||||
} else {
|
||||
print_48_pde(address | i << 30, entry, read_from_snapshot,
|
||||
s & !bit(entry, 2), w & bit(entry, 1), x & !bit(entry, 63));
|
||||
@ -687,7 +779,8 @@ static void print_48_pdpte(uint64_t address, uint64_t pml4_entry, bool read_from
|
||||
}
|
||||
}
|
||||
|
||||
static void print_48_pagetables_(uint64_t cr3, bool read_from_snapshot) {
|
||||
static void print_48_pagetables_(uint64_t cr3, bool read_from_snapshot)
|
||||
{
|
||||
uint64_t pml4_address = bits(cr3, 12, 51) << 12;
|
||||
uint64_t pml4_table[PENTRIES];
|
||||
|
||||
@ -708,13 +801,14 @@ static void print_48_pagetables_(uint64_t cr3, bool read_from_snapshot) {
|
||||
}
|
||||
|
||||
if (bit(entry, 0)) {
|
||||
print_48_pdpte(address, entry, read_from_snapshot,
|
||||
!bit(entry, 2), bit(entry, 1), !bit(entry, 63));
|
||||
print_48_pdpte(address, entry, read_from_snapshot, !bit(entry, 2),
|
||||
bit(entry, 1), !bit(entry, 63));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void print_48_pagetables(uint64_t cr3) {
|
||||
void print_48_pagetables(uint64_t cr3)
|
||||
{
|
||||
static bool printed = false;
|
||||
if (!printed) {
|
||||
fprintf(stderr, "pagetables for cr3 %lx", cr3);
|
||||
@ -724,7 +818,10 @@ void print_48_pagetables(uint64_t cr3) {
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t get_48_paging_phys_addr(uint64_t cr3, uint64_t addr, bool read_from_snapshot) {
|
||||
static uint64_t get_48_paging_phys_addr(uint64_t cr3,
|
||||
uint64_t addr,
|
||||
bool read_from_snapshot)
|
||||
{
|
||||
uint64_t pml4_address = bits(cr3, 12, 51) << 12;
|
||||
uint64_t pml4_offset = bits(addr, 39, 47);
|
||||
uint64_t pml4_entry = load_entry(pml4_address, pml4_offset, read_from_snapshot);
|
||||
@ -775,7 +872,8 @@ static uint64_t get_48_paging_phys_addr(uint64_t cr3, uint64_t addr, bool read_f
|
||||
|
||||
// #define DEBUG_48BIT_WALK
|
||||
|
||||
bool read_virtual_memory(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu){
|
||||
bool read_virtual_memory(uint64_t address, uint8_t *data, uint32_t size, CPUState *cpu)
|
||||
{
|
||||
uint8_t tmp_buf[x86_64_PAGE_SIZE];
|
||||
hwaddr phys_addr;
|
||||
int asidx;
|
||||
@ -793,9 +891,11 @@ bool read_virtual_memory(uint64_t address, uint8_t* data, uint32_t size, CPUStat
|
||||
|
||||
asidx = cpu_asidx_from_attrs(cpu, MEMTXATTRS_UNSPECIFIED);
|
||||
#ifdef DEBUG_48BIT_WALK
|
||||
phys_addr_2 = cpu_get_phys_page_attrs_debug(cpu, (address & x86_64_PAGE_MASK), &attrs);
|
||||
phys_addr_2 =
|
||||
cpu_get_phys_page_attrs_debug(cpu, (address & x86_64_PAGE_MASK), &attrs);
|
||||
#endif
|
||||
phys_addr = (hwaddr)get_paging_phys_addr(cpu, env->cr[3], address) & 0xFFFFFFFFFFFFF000ULL;// != 0xFFFFFFFFFFFFFFFFULL)
|
||||
phys_addr = (hwaddr)get_paging_phys_addr(cpu, env->cr[3], address) &
|
||||
0xFFFFFFFFFFFFF000ULL; // != 0xFFFFFFFFFFFFFFFFULL)
|
||||
|
||||
#ifdef DEBUG_48BIT_WALK
|
||||
assert(phys_addr == phys_addr_2);
|
||||
@ -808,8 +908,11 @@ bool read_virtual_memory(uint64_t address, uint8_t* data, uint32_t size, CPUStat
|
||||
len_skipped = size - amount_copied;
|
||||
}
|
||||
|
||||
nyx_error("Warning, read from unmapped memory:\t%lx, skipping to %lx", address, next_page);
|
||||
nyx_debug_p(MEM_PREFIX, "Warning, read from unmapped memory:\t%lx, skipping to %lx", address, next_page);
|
||||
nyx_error("Warning, read from unmapped memory:\t%lx, skipping to %lx",
|
||||
address, next_page);
|
||||
nyx_debug_p(MEM_PREFIX,
|
||||
"Warning, read from unmapped memory:\t%lx, skipping to %lx",
|
||||
address, next_page);
|
||||
memset(data + amount_copied, ' ', len_skipped);
|
||||
address += len_skipped;
|
||||
amount_copied += len_skipped;
|
||||
@ -822,9 +925,12 @@ bool read_virtual_memory(uint64_t address, uint8_t* data, uint32_t size, CPUStat
|
||||
len_to_copy = remaining_on_page;
|
||||
}
|
||||
|
||||
MemTxResult txt = address_space_rw(cpu_get_address_space(cpu, asidx), phys_addr, MEMTXATTRS_UNSPECIFIED, tmp_buf, len_to_copy, 0);
|
||||
MemTxResult txt = address_space_rw(cpu_get_address_space(cpu, asidx),
|
||||
phys_addr, MEMTXATTRS_UNSPECIFIED,
|
||||
tmp_buf, len_to_copy, 0);
|
||||
if (txt) {
|
||||
nyx_debug_p(MEM_PREFIX, "Warning, read failed:\t%lx (%lx)", address, phys_addr);
|
||||
nyx_debug_p(MEM_PREFIX, "Warning, read failed:\t%lx (%lx)", address,
|
||||
phys_addr);
|
||||
}
|
||||
|
||||
memcpy(data + amount_copied, tmp_buf, len_to_copy);
|
||||
@ -836,59 +942,72 @@ bool read_virtual_memory(uint64_t address, uint8_t* data, uint32_t size, CPUStat
|
||||
return true;
|
||||
}
|
||||
|
||||
bool is_addr_mapped_cr3(uint64_t address, CPUState *cpu, uint64_t cr3){
|
||||
bool is_addr_mapped_cr3(uint64_t address, CPUState *cpu, uint64_t cr3)
|
||||
{
|
||||
return (get_paging_phys_addr(cpu, cr3, address) != INVALID_ADDRESS);
|
||||
}
|
||||
|
||||
bool is_addr_mapped(uint64_t address, CPUState *cpu){
|
||||
bool is_addr_mapped(uint64_t address, CPUState *cpu)
|
||||
{
|
||||
CPUX86State *env = &(X86_CPU(cpu))->env;
|
||||
kvm_arch_get_registers_fast(cpu);
|
||||
return (get_paging_phys_addr(cpu, env->cr[3], address) != INVALID_ADDRESS);
|
||||
}
|
||||
|
||||
bool is_addr_mapped_cr3_snapshot(uint64_t address, CPUState *cpu, uint64_t cr3){
|
||||
bool is_addr_mapped_cr3_snapshot(uint64_t address, CPUState *cpu, uint64_t cr3)
|
||||
{
|
||||
return (get_paging_phys_addr_snapshot(cpu, cr3, address) != INVALID_ADDRESS);
|
||||
}
|
||||
|
||||
bool dump_page_cr3_snapshot(uint64_t address, uint8_t* data, CPUState *cpu, uint64_t cr3){
|
||||
bool dump_page_cr3_snapshot(uint64_t address, uint8_t *data, CPUState *cpu, uint64_t cr3)
|
||||
{
|
||||
fast_reload_t *snapshot = get_fast_reload_snapshot();
|
||||
uint64_t phys_addr = get_paging_phys_addr_snapshot(cpu, cr3, address);
|
||||
if (phys_addr == INVALID_ADDRESS) {
|
||||
return false;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
return read_snapshot_memory(snapshot, phys_addr, data, PPAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool dump_page_cr3_ht(uint64_t address, uint8_t* data, CPUState *cpu, uint64_t cr3){
|
||||
bool dump_page_cr3_ht(uint64_t address, uint8_t *data, CPUState *cpu, uint64_t cr3)
|
||||
{
|
||||
hwaddr phys_addr = (hwaddr)get_paging_phys_addr(cpu, cr3, address);
|
||||
int asidx = cpu_asidx_from_attrs(cpu, MEMTXATTRS_UNSPECIFIED);
|
||||
if(phys_addr == INVALID_ADDRESS || address_space_rw(cpu_get_address_space(cpu, asidx), phys_addr, MEMTXATTRS_UNSPECIFIED, data, 0x1000, 0)){
|
||||
if (phys_addr == INVALID_ADDRESS ||
|
||||
address_space_rw(cpu_get_address_space(cpu, asidx), phys_addr,
|
||||
MEMTXATTRS_UNSPECIFIED, data, 0x1000, 0))
|
||||
{
|
||||
if (phys_addr != INVALID_ADDRESS) {
|
||||
nyx_error("%s: Warning, read failed:\t%lx (%lx)\n", __func__, address, phys_addr);
|
||||
nyx_error("%s: Warning, read failed:\t%lx (%lx)\n", __func__, address,
|
||||
phys_addr);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool dump_page_ht(uint64_t address, uint8_t* data, CPUState *cpu){
|
||||
bool dump_page_ht(uint64_t address, uint8_t *data, CPUState *cpu)
|
||||
{
|
||||
CPUX86State *env = &(X86_CPU(cpu))->env;
|
||||
kvm_arch_get_registers_fast(cpu);
|
||||
hwaddr phys_addr = (hwaddr)get_paging_phys_addr(cpu, env->cr[3], address);
|
||||
int asidx = cpu_asidx_from_attrs(cpu, MEMTXATTRS_UNSPECIFIED);
|
||||
if(phys_addr == 0xffffffffffffffffULL || address_space_rw(cpu_get_address_space(cpu, asidx), phys_addr, MEMTXATTRS_UNSPECIFIED, data, 0x1000, 0)){
|
||||
if (phys_addr == 0xffffffffffffffffULL ||
|
||||
address_space_rw(cpu_get_address_space(cpu, asidx), phys_addr,
|
||||
MEMTXATTRS_UNSPECIFIED, data, 0x1000, 0))
|
||||
{
|
||||
if (phys_addr != 0xffffffffffffffffULL) {
|
||||
nyx_error("%s: Warning, read failed:\t%lx (%lx)\n", __func__, address, phys_addr);
|
||||
nyx_error("%s: Warning, read failed:\t%lx (%lx)\n", __func__, address,
|
||||
phys_addr);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
uint64_t disassemble_at_rip(int fd, uint64_t address, CPUState *cpu, uint64_t cr3){
|
||||
|
||||
uint64_t disassemble_at_rip(int fd, uint64_t address, CPUState *cpu, uint64_t cr3)
|
||||
{
|
||||
csh handle;
|
||||
|
||||
size_t code_size = 256;
|
||||
@ -896,7 +1015,9 @@ uint64_t disassemble_at_rip(int fd, uint64_t address, CPUState *cpu, uint64_t cr
|
||||
|
||||
|
||||
/* don't => GET_GLOBAL_STATE()->disassembler_word_width */
|
||||
if (cs_open(CS_ARCH_X86, get_capstone_mode(GET_GLOBAL_STATE()->disassembler_word_width), &handle) != CS_ERR_OK)
|
||||
if (cs_open(CS_ARCH_X86,
|
||||
get_capstone_mode(GET_GLOBAL_STATE()->disassembler_word_width),
|
||||
&handle) != CS_ERR_OK)
|
||||
assert(false);
|
||||
|
||||
cs_option(handle, CS_OPT_DETAIL, CS_OPT_ON);
|
||||
@ -908,10 +1029,10 @@ uint64_t disassemble_at_rip(int fd, uint64_t address, CPUState *cpu, uint64_t cr
|
||||
int count = cs_disasm(handle, code_ptr, code_size, address, 5, &insn);
|
||||
if (count > 0) {
|
||||
for (int i = 0; i < count; i++) {
|
||||
nyx_error("=> 0x%"PRIx64":\t%s\t\t%s\n", insn[i].address, insn[i].mnemonic, insn[i].op_str);
|
||||
nyx_error("=> 0x%" PRIx64 ":\t%s\t\t%s\n", insn[i].address,
|
||||
insn[i].mnemonic, insn[i].op_str);
|
||||
}
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
nyx_error("ERROR in %s at %lx (cr3: %lx)\n", __func__, address, cr3);
|
||||
}
|
||||
|
||||
@ -920,6 +1041,3 @@ uint64_t disassemble_at_rip(int fd, uint64_t address, CPUState *cpu, uint64_t cr
|
||||
cs_close(&handle);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
@ -22,17 +22,19 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
|
||||
#ifndef MEMORY_ACCESS_H
|
||||
#define MEMORY_ACCESS_H
|
||||
|
||||
#include <linux/kvm.h>
|
||||
#include "qemu-common.h"
|
||||
#include "sysemu/kvm_int.h"
|
||||
#include "qemu-common.h"
|
||||
#include "nyx/types.h"
|
||||
#include <linux/kvm.h>
|
||||
|
||||
#define MEM_SPLIT_START 0x0C0000000
|
||||
#define MEM_SPLIT_END 0x100000000
|
||||
|
||||
/* i386 pc_piix low_mem address translation */
|
||||
#define address_to_ram_offset(offset) (offset >= MEM_SPLIT_END ? (offset - MEM_SPLIT_END) + MEM_SPLIT_START : offset)
|
||||
#define ram_offset_to_address(offset) (offset >= MEM_SPLIT_START ? (offset - MEM_SPLIT_START) + MEM_SPLIT_END : offset)
|
||||
#define address_to_ram_offset(offset) \
|
||||
(offset >= MEM_SPLIT_END ? (offset - MEM_SPLIT_END) + MEM_SPLIT_START : offset)
|
||||
#define ram_offset_to_address(offset) \
|
||||
(offset >= MEM_SPLIT_START ? (offset - MEM_SPLIT_START) + MEM_SPLIT_END : offset)
|
||||
|
||||
mem_mode_t get_current_mem_mode(CPUState *cpu);
|
||||
|
||||
@ -45,10 +47,23 @@ bool remap_payload_slot(uint64_t phys_addr, uint32_t slot, CPUState *cpu);
|
||||
bool remap_payload_slot_protected(uint64_t phys_addr, uint32_t slot, CPUState *cpu);
|
||||
bool remap_payload_buffer(uint64_t virt_guest_addr, CPUState *cpu);
|
||||
|
||||
bool remap_slots(uint64_t addr, uint32_t slots, CPUState *cpu, int fd, uint64_t shm_size, bool virtual, uint64_t cr3);
|
||||
bool remap_slot(uint64_t addr, uint32_t slot, CPUState *cpu, int fd, uint64_t shm_size, bool virtual, uint64_t cr3);
|
||||
bool remap_slots(uint64_t addr,
|
||||
uint32_t slots,
|
||||
CPUState *cpu,
|
||||
int fd,
|
||||
uint64_t shm_size,
|
||||
bool virtual,
|
||||
uint64_t cr3);
|
||||
bool remap_slot(uint64_t addr,
|
||||
uint32_t slot,
|
||||
CPUState *cpu,
|
||||
int fd,
|
||||
uint64_t shm_size,
|
||||
bool virtual,
|
||||
uint64_t cr3);
|
||||
|
||||
bool read_virtual_memory_cr3(uint64_t address, uint8_t* data, uint32_t size, CPUState *cpu, uint64_t cr3);
|
||||
bool read_virtual_memory_cr3(
|
||||
uint64_t address, uint8_t *data, uint32_t size, CPUState *cpu, uint64_t cr3);
|
||||
|
||||
bool read_virtual_memory(uint64_t address, uint8_t *data, uint32_t size, CPUState *cpu);
|
||||
bool write_virtual_memory(uint64_t address, uint8_t *data, uint32_t size, CPUState *cpu);
|
||||
|
@ -1,17 +1,17 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
#include "qemu/main-loop.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "nested_hypercalls.h"
|
||||
#include "debug.h"
|
||||
#include "interface.h"
|
||||
#include "kvm_nested.h"
|
||||
#include "memory_access.h"
|
||||
#include "debug.h"
|
||||
#include "nested_hypercalls.h"
|
||||
#include "interface.h"
|
||||
#include "state/state.h"
|
||||
#include "pt.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "nyx/helpers.h"
|
||||
#include "pt.h"
|
||||
#include "state/state.h"
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
|
||||
// #define DEBUG_NESTED_HYPERCALLS
|
||||
|
||||
@ -27,8 +27,10 @@ int nested_once = 0;
|
||||
bool nested_setup_snapshot_once = false;
|
||||
|
||||
|
||||
|
||||
void handle_hypercall_kafl_nested_config(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
void handle_hypercall_kafl_nested_config(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
/* magic */
|
||||
nyx_trace();
|
||||
uint32_t size = 0;
|
||||
@ -46,23 +48,34 @@ void handle_hypercall_kafl_nested_config(struct kvm_run *run, CPUState *cpu, uin
|
||||
free(buffer);
|
||||
}
|
||||
|
||||
void handle_hypercall_kafl_nested_hprintf(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
void handle_hypercall_kafl_nested_hprintf(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
nyx_trace();
|
||||
char hprintf_buffer[0x1000];
|
||||
read_physical_memory((uint64_t)run->hypercall.args[0], (uint8_t*)hprintf_buffer, 0x1000, cpu);
|
||||
read_physical_memory((uint64_t)run->hypercall.args[0], (uint8_t *)hprintf_buffer,
|
||||
0x1000, cpu);
|
||||
|
||||
set_hprintf_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, hprintf_buffer, strnlen(hprintf_buffer, 0x1000)+1);
|
||||
set_hprintf_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, hprintf_buffer,
|
||||
strnlen(hprintf_buffer, 0x1000) + 1);
|
||||
synchronization_lock_hprintf();
|
||||
}
|
||||
|
||||
void handle_hypercall_kafl_nested_prepare(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
void handle_hypercall_kafl_nested_prepare(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
nyx_trace();
|
||||
kvm_arch_get_registers(cpu);
|
||||
|
||||
if ((uint64_t)run->hypercall.args[0]) {
|
||||
nyx_debug_p(CORE_PREFIX, "handle_hypercall_kafl_nested_prepare:\t NUM:\t%lx\t ADDRESS:\t%lx\t CR3:\t%lx", (uint64_t)run->hypercall.args[0], (uint64_t)run->hypercall.args[1], (uint64_t)run->hypercall.args[2]);
|
||||
}
|
||||
else{
|
||||
nyx_debug_p(CORE_PREFIX,
|
||||
"handle_hypercall_kafl_nested_prepare:\t NUM:\t%lx\t "
|
||||
"ADDRESS:\t%lx\t CR3:\t%lx",
|
||||
(uint64_t)run->hypercall.args[0], (uint64_t)run->hypercall.args[1],
|
||||
(uint64_t)run->hypercall.args[2]);
|
||||
} else {
|
||||
abort();
|
||||
}
|
||||
|
||||
@ -70,7 +83,8 @@ void handle_hypercall_kafl_nested_prepare(struct kvm_run *run, CPUState *cpu, ui
|
||||
uint64_t *buffer = malloc(buffer_size);
|
||||
memset(buffer, 0x0, buffer_size);
|
||||
|
||||
read_physical_memory((uint64_t)run->hypercall.args[1], (uint8_t*)buffer, buffer_size, cpu);
|
||||
read_physical_memory((uint64_t)run->hypercall.args[1], (uint8_t *)buffer,
|
||||
buffer_size, cpu);
|
||||
htos_cr3 = (uint64_t)run->hypercall.args[0];
|
||||
|
||||
for (uint64_t i = 0; i < (uint64_t)run->hypercall.args[0]; i++) {
|
||||
@ -85,14 +99,18 @@ void handle_hypercall_kafl_nested_prepare(struct kvm_run *run, CPUState *cpu, ui
|
||||
|
||||
// wipe memory
|
||||
memset(buffer, 0x00, buffer_size);
|
||||
write_physical_memory((uint64_t)run->hypercall.args[1], (uint8_t*)buffer, buffer_size, cpu);
|
||||
write_physical_memory((uint64_t)run->hypercall.args[1], (uint8_t *)buffer,
|
||||
buffer_size, cpu);
|
||||
|
||||
free(buffer);
|
||||
}
|
||||
|
||||
bool acquired = false;
|
||||
|
||||
void handle_hypercall_kafl_nested_early_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
void handle_hypercall_kafl_nested_early_release(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
nyx_trace();
|
||||
|
||||
if (!hypercalls_enabled) {
|
||||
@ -104,20 +122,23 @@ void handle_hypercall_kafl_nested_early_release(struct kvm_run *run, CPUState *c
|
||||
GET_GLOBAL_STATE()->in_reload_mode = true;
|
||||
synchronization_disable_pt(cpu);
|
||||
GET_GLOBAL_STATE()->in_reload_mode = false;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
synchronization_disable_pt(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
void handle_hypercall_kafl_nested_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
void handle_hypercall_kafl_nested_release(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
nyx_trace();
|
||||
// TODO not implemented - see git history for scraps
|
||||
nyx_error("Not implemented.\n");
|
||||
abort();
|
||||
}
|
||||
|
||||
static inline void set_page_dump_bp_nested(CPUState *cpu, uint64_t cr3, uint64_t addr){
|
||||
static inline void set_page_dump_bp_nested(CPUState *cpu, uint64_t cr3, uint64_t addr)
|
||||
{
|
||||
nyx_trace();
|
||||
|
||||
kvm_remove_all_breakpoints(cpu);
|
||||
@ -128,14 +149,18 @@ static inline void set_page_dump_bp_nested(CPUState *cpu, uint64_t cr3, uint64_t
|
||||
kvm_vcpu_ioctl(cpu, KVM_VMX_PT_ENABLE_PAGE_DUMP_CR3);
|
||||
}
|
||||
|
||||
void handle_hypercall_kafl_nested_acquire(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg){
|
||||
void handle_hypercall_kafl_nested_acquire(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg)
|
||||
{
|
||||
nyx_trace();
|
||||
|
||||
if (!acquired) {
|
||||
acquired = true;
|
||||
|
||||
// create_fast_snapshot(cpu, true);
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state, REQUEST_SAVE_SNAPSHOT_ROOT_NESTED_FIX_RIP);
|
||||
request_fast_vm_reload(GET_GLOBAL_STATE()->reload_state,
|
||||
REQUEST_SAVE_SNAPSHOT_ROOT_NESTED_FIX_RIP);
|
||||
|
||||
for (int i = 0; i < INTEL_PT_MAX_RANGES; i++) {
|
||||
if (GET_GLOBAL_STATE()->pt_ip_filter_configured[i]) {
|
||||
|
@ -3,9 +3,21 @@
|
||||
#include <stdint.h>
|
||||
|
||||
/* HyperTrash! */
|
||||
void handle_hypercall_kafl_nested_hprintf(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
|
||||
void handle_hypercall_kafl_nested_prepare(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
|
||||
void handle_hypercall_kafl_nested_config(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
|
||||
void handle_hypercall_kafl_nested_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
|
||||
void handle_hypercall_kafl_nested_acquire(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
|
||||
void handle_hypercall_kafl_nested_early_release(struct kvm_run *run, CPUState *cpu, uint64_t hypercall_arg);
|
||||
void handle_hypercall_kafl_nested_hprintf(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg);
|
||||
void handle_hypercall_kafl_nested_prepare(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg);
|
||||
void handle_hypercall_kafl_nested_config(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg);
|
||||
void handle_hypercall_kafl_nested_release(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg);
|
||||
void handle_hypercall_kafl_nested_acquire(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg);
|
||||
void handle_hypercall_kafl_nested_early_release(struct kvm_run *run,
|
||||
CPUState *cpu,
|
||||
uint64_t hypercall_arg);
|
156
nyx/page_cache.c
156
nyx/page_cache.c
@ -1,26 +1,27 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include <errno.h>
|
||||
#include <capstone/capstone.h>
|
||||
#include <capstone/x86.h>
|
||||
#include <sys/file.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/mman.h>
|
||||
#include <assert.h>
|
||||
#include "nyx/page_cache.h"
|
||||
#include "nyx/debug.h"
|
||||
#include "nyx/fast_vm_reload.h"
|
||||
#include "nyx/memory_access.h"
|
||||
#include "nyx/helpers.h"
|
||||
#include "nyx/memory_access.h"
|
||||
#include "nyx/state/state.h"
|
||||
#include <assert.h>
|
||||
#include <capstone/capstone.h>
|
||||
#include <capstone/x86.h>
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/file.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
|
||||
#define PAGE_CACHE_ADDR_LINE_SIZE sizeof(uint64_t)
|
||||
|
||||
#define UNMAPPED_PAGE 0xFFFFFFFFFFFFFFFFULL
|
||||
|
||||
static bool reload_addresses(page_cache_t* self){
|
||||
static bool reload_addresses(page_cache_t *self)
|
||||
{
|
||||
khiter_t k;
|
||||
int ret;
|
||||
uint64_t addr, offset;
|
||||
@ -30,7 +31,8 @@ static bool reload_addresses(page_cache_t* self){
|
||||
|
||||
if (self_offset != self->num_pages * PAGE_CACHE_ADDR_LINE_SIZE) {
|
||||
/* reload page cache from disk */
|
||||
lseek(self->fd_address_file, self->num_pages*PAGE_CACHE_ADDR_LINE_SIZE, SEEK_SET);
|
||||
lseek(self->fd_address_file, self->num_pages * PAGE_CACHE_ADDR_LINE_SIZE,
|
||||
SEEK_SET);
|
||||
offset = self->num_pages;
|
||||
while (read(self->fd_address_file, &value, PAGE_CACHE_ADDR_LINE_SIZE)) {
|
||||
addr = value & 0xFFFFFFFFFFFFF000ULL;
|
||||
@ -39,18 +41,16 @@ static bool reload_addresses(page_cache_t* self){
|
||||
/* put new addresses and offsets into the hash map */
|
||||
k = kh_get(PC_CACHE, self->lookup, addr);
|
||||
if (k == kh_end(self->lookup)) {
|
||||
|
||||
if (value & 0xFFF) {
|
||||
fprintf(stderr, "Load page: %lx (UNMAPPED)\n", addr);
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
k = kh_put(PC_CACHE, self->lookup, addr, &ret);
|
||||
kh_value(self->lookup, k) = (offset - 1) * PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
/* likely a bug / race condition in page_cache itself! */
|
||||
fprintf(stderr, "----------> Page duplicate found ...skipping! %lx\n", addr);
|
||||
fprintf(stderr,
|
||||
"----------> Page duplicate found ...skipping! %lx\n", addr);
|
||||
// abort();
|
||||
}
|
||||
}
|
||||
@ -58,7 +58,9 @@ static bool reload_addresses(page_cache_t* self){
|
||||
/* reload page dump file */
|
||||
munmap(self->page_data, self->num_pages * PAGE_SIZE);
|
||||
self->num_pages = self_offset / PAGE_CACHE_ADDR_LINE_SIZE;
|
||||
self->page_data = mmap(NULL, (self->num_pages)*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, self->fd_page_file, 0);
|
||||
self->page_data = mmap(NULL, (self->num_pages) * PAGE_SIZE,
|
||||
PROT_READ | PROT_WRITE, MAP_SHARED,
|
||||
self->fd_page_file, 0);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -66,25 +68,37 @@ static bool reload_addresses(page_cache_t* self){
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool append_page(page_cache_t* self, uint64_t page, uint64_t cr3){
|
||||
static bool append_page(page_cache_t *self, uint64_t page, uint64_t cr3)
|
||||
{
|
||||
bool success = true;
|
||||
if (!self->num_pages) {
|
||||
assert(!ftruncate(self->fd_page_file, (self->num_pages + 1) * PAGE_SIZE));
|
||||
self->page_data = mmap(NULL, (self->num_pages+1)*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, self->fd_page_file, 0);
|
||||
}
|
||||
else{
|
||||
self->page_data = mmap(NULL, (self->num_pages + 1) * PAGE_SIZE,
|
||||
PROT_READ | PROT_WRITE, MAP_SHARED,
|
||||
self->fd_page_file, 0);
|
||||
} else {
|
||||
munmap(self->page_data, self->num_pages * PAGE_SIZE);
|
||||
assert(!ftruncate(self->fd_page_file, (self->num_pages + 1) * PAGE_SIZE));
|
||||
self->page_data = mmap(NULL, (self->num_pages+1)*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, self->fd_page_file, 0);
|
||||
self->page_data = mmap(NULL, (self->num_pages + 1) * PAGE_SIZE,
|
||||
PROT_READ | PROT_WRITE, MAP_SHARED,
|
||||
self->fd_page_file, 0);
|
||||
}
|
||||
|
||||
if(!dump_page_cr3_ht(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->pt_c3_filter)){
|
||||
if(!dump_page_cr3_ht(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->parent_cr3)){
|
||||
if(!dump_page_cr3_snapshot(page, self->page_data+(PAGE_SIZE*self->num_pages), self->cpu, GET_GLOBAL_STATE()->parent_cr3)){
|
||||
|
||||
if (!dump_page_cr3_ht(page, self->page_data + (PAGE_SIZE * self->num_pages),
|
||||
self->cpu, GET_GLOBAL_STATE()->pt_c3_filter))
|
||||
{
|
||||
if (!dump_page_cr3_ht(page, self->page_data + (PAGE_SIZE * self->num_pages),
|
||||
self->cpu, GET_GLOBAL_STATE()->parent_cr3))
|
||||
{
|
||||
if (!dump_page_cr3_snapshot(page,
|
||||
self->page_data + (PAGE_SIZE * self->num_pages),
|
||||
self->cpu, GET_GLOBAL_STATE()->parent_cr3))
|
||||
{
|
||||
munmap(self->page_data, (self->num_pages + 1) * PAGE_SIZE);
|
||||
assert(!ftruncate(self->fd_page_file, (self->num_pages) * PAGE_SIZE));
|
||||
self->page_data = mmap(NULL, (self->num_pages)*PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, self->fd_page_file, 0);
|
||||
self->page_data = mmap(NULL, (self->num_pages) * PAGE_SIZE,
|
||||
PROT_READ | PROT_WRITE, MAP_SHARED,
|
||||
self->fd_page_file, 0);
|
||||
|
||||
success = false;
|
||||
return success;
|
||||
@ -96,41 +110,40 @@ static bool append_page(page_cache_t* self, uint64_t page, uint64_t cr3){
|
||||
return success;
|
||||
}
|
||||
|
||||
static void page_cache_lock(page_cache_t* self){
|
||||
static void page_cache_lock(page_cache_t *self)
|
||||
{
|
||||
int ret = 0;
|
||||
while (true) {
|
||||
ret = flock(self->fd_lock, LOCK_EX);
|
||||
if (ret == 0) {
|
||||
return;
|
||||
}
|
||||
else if (ret == EINTR){
|
||||
} else if (ret == EINTR) {
|
||||
/* try again if acquiring this lock has failed */
|
||||
fprintf(stderr, "%s: interrupted by signal...\n", __func__);
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void page_cache_unlock(page_cache_t* self){
|
||||
static void page_cache_unlock(page_cache_t *self)
|
||||
{
|
||||
int ret = 0;
|
||||
while (true) {
|
||||
ret = flock(self->fd_lock, LOCK_UN);
|
||||
if (ret == 0) {
|
||||
return;
|
||||
}
|
||||
else if (ret == EINTR){
|
||||
} else if (ret == EINTR) {
|
||||
/* try again if releasing this lock has failed */
|
||||
fprintf(stderr, "%s: interrupted by signal...\n", __func__);
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool update_page_cache(page_cache_t* self, uint64_t page, khiter_t* k){
|
||||
static bool update_page_cache(page_cache_t *self, uint64_t page, khiter_t *k)
|
||||
{
|
||||
page_cache_lock(self);
|
||||
|
||||
if (reload_addresses(self)) {
|
||||
@ -141,7 +154,11 @@ static bool update_page_cache(page_cache_t* self, uint64_t page, khiter_t* k){
|
||||
int ret;
|
||||
|
||||
uint64_t cr3 = GET_GLOBAL_STATE()->parent_cr3;
|
||||
if(!is_addr_mapped_cr3_snapshot(page, self->cpu, GET_GLOBAL_STATE()->parent_cr3) && !is_addr_mapped_cr3_snapshot(page, self->cpu, GET_GLOBAL_STATE()->pt_c3_filter)){
|
||||
if (!is_addr_mapped_cr3_snapshot(page, self->cpu,
|
||||
GET_GLOBAL_STATE()->parent_cr3) &&
|
||||
!is_addr_mapped_cr3_snapshot(page, self->cpu,
|
||||
GET_GLOBAL_STATE()->pt_c3_filter))
|
||||
{
|
||||
/* TODO! */
|
||||
}
|
||||
|
||||
@ -149,16 +166,13 @@ static bool update_page_cache(page_cache_t* self, uint64_t page, khiter_t* k){
|
||||
if (*k == kh_end(self->lookup) && reload_addresses(self)) {
|
||||
/* reload sucessful */
|
||||
*k = kh_get(PC_CACHE, self->lookup, page);
|
||||
}
|
||||
else{
|
||||
|
||||
|
||||
} else {
|
||||
if (append_page(self, page, cr3)) {
|
||||
*k = kh_put(PC_CACHE, self->lookup, page, &ret);
|
||||
assert(write(self->fd_address_file, &page, PAGE_CACHE_ADDR_LINE_SIZE) == PAGE_CACHE_ADDR_LINE_SIZE);
|
||||
assert(write(self->fd_address_file, &page,
|
||||
PAGE_CACHE_ADDR_LINE_SIZE) == PAGE_CACHE_ADDR_LINE_SIZE);
|
||||
kh_value(self->lookup, *k) = (self->num_pages - 1) * PAGE_SIZE;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
page_cache_unlock(self);
|
||||
return false;
|
||||
}
|
||||
@ -170,7 +184,8 @@ static bool update_page_cache(page_cache_t* self, uint64_t page, khiter_t* k){
|
||||
return true;
|
||||
}
|
||||
|
||||
uint64_t page_cache_fetch(page_cache_t* self, uint64_t page, bool* success, bool test_mode){
|
||||
uint64_t page_cache_fetch(page_cache_t *self, uint64_t page, bool *success, bool test_mode)
|
||||
{
|
||||
page &= 0xFFFFFFFFFFFFF000ULL;
|
||||
|
||||
if (self->last_page == page) {
|
||||
@ -191,8 +206,7 @@ uint64_t page_cache_fetch(page_cache_t* self, uint64_t page, bool* success, bool
|
||||
|
||||
if (kh_value(self->lookup, k) == UNMAPPED_PAGE) {
|
||||
self->last_addr = UNMAPPED_PAGE;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
self->last_addr = (uint64_t)self->page_data + kh_value(self->lookup, k);
|
||||
}
|
||||
|
||||
@ -201,11 +215,13 @@ uint64_t page_cache_fetch(page_cache_t* self, uint64_t page, bool* success, bool
|
||||
}
|
||||
|
||||
/* FIXME */
|
||||
uint64_t page_cache_fetch2(page_cache_t* self, uint64_t page, bool* success){
|
||||
uint64_t page_cache_fetch2(page_cache_t *self, uint64_t page, bool *success)
|
||||
{
|
||||
return page_cache_fetch(self, page, success, false);
|
||||
}
|
||||
|
||||
page_cache_t* page_cache_new(CPUState *cpu, const char* cache_file){
|
||||
page_cache_t *page_cache_new(CPUState *cpu, const char *cache_file)
|
||||
{
|
||||
page_cache_t *self = malloc(sizeof(page_cache_t));
|
||||
|
||||
char *tmp1;
|
||||
@ -253,11 +269,13 @@ page_cache_t* page_cache_new(CPUState *cpu, const char* cache_file){
|
||||
return self;
|
||||
}
|
||||
|
||||
bool page_cache_disassemble(page_cache_t* self, uint64_t address, cs_insn **insn){
|
||||
bool page_cache_disassemble(page_cache_t *self, uint64_t address, cs_insn **insn)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
cs_insn* page_cache_cs_malloc(page_cache_t* self, disassembler_mode_t mode){
|
||||
cs_insn *page_cache_cs_malloc(page_cache_t *self, disassembler_mode_t mode)
|
||||
{
|
||||
switch (mode) {
|
||||
case mode_16:
|
||||
return cs_malloc(self->handle_16);
|
||||
@ -271,7 +289,12 @@ cs_insn* page_cache_cs_malloc(page_cache_t* self, disassembler_mode_t mode){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool page_cache_disassemble_iter(page_cache_t* self, uint64_t* address, cs_insn *insn, uint64_t* failed_page, disassembler_mode_t mode){
|
||||
bool page_cache_disassemble_iter(page_cache_t *self,
|
||||
uint64_t *address,
|
||||
cs_insn *insn,
|
||||
uint64_t *failed_page,
|
||||
disassembler_mode_t mode)
|
||||
{
|
||||
*failed_page = 0xFFFFFFFFFFFFFFFFULL;
|
||||
|
||||
bool success = true;
|
||||
@ -303,27 +326,28 @@ bool page_cache_disassemble_iter(page_cache_t* self, uint64_t* address, cs_insn
|
||||
}
|
||||
|
||||
if ((*address & 0xFFF) >= (0x1000 - 16)) {
|
||||
memcpy((void*)self->disassemble_cache, (void*)((uint64_t)code+(0x1000-16)), 16);
|
||||
memcpy((void *)self->disassemble_cache,
|
||||
(void *)((uint64_t)code + (0x1000 - 16)), 16);
|
||||
code_ptr = self->disassemble_cache + 0xf - (0xfff - (*address & 0xfff));
|
||||
code = (uint8_t *)page_cache_fetch(self, *address + 0x1000, &success, false);
|
||||
|
||||
if (success == true) {
|
||||
memcpy((void *)(self->disassemble_cache + 16), (void *)code, 16);
|
||||
return cs_disasm_iter(*current_handle, (const uint8_t**) &code_ptr, &code_size, address, insn);
|
||||
}
|
||||
else{
|
||||
return cs_disasm_iter(*current_handle, (const uint8_t **)&code_ptr,
|
||||
&code_size, address, insn);
|
||||
} else {
|
||||
code_size = (0xfff - (*address & 0xfff));
|
||||
if(!cs_disasm_iter(*current_handle, (const uint8_t**) &code_ptr, &code_size, address, insn)){
|
||||
if (!cs_disasm_iter(*current_handle, (const uint8_t **)&code_ptr,
|
||||
&code_size, address, insn))
|
||||
{
|
||||
*failed_page = (*address + 0x1000) & 0xFFFFFFFFFFFFF000ULL;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
code_ptr = code + (*address & 0xFFF);
|
||||
return cs_disasm_iter(*current_handle, (const uint8_t**) &code_ptr, &code_size, address, insn);
|
||||
return cs_disasm_iter(*current_handle, (const uint8_t **)&code_ptr,
|
||||
&code_size, address, insn);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include "khash.h"
|
||||
#include <capstone/capstone.h>
|
||||
#include <capstone/x86.h>
|
||||
#include "khash.h"
|
||||
#include <libxdc.h>
|
||||
|
||||
#include "qemu-common.h"
|
||||
@ -29,10 +29,17 @@ typedef struct page_cache_s{
|
||||
} page_cache_t;
|
||||
|
||||
page_cache_t *page_cache_new(CPUState *cpu, const char *cache_file);
|
||||
uint64_t page_cache_fetch(page_cache_t* self, uint64_t page, bool* success, bool test_mode);
|
||||
uint64_t page_cache_fetch(page_cache_t *self,
|
||||
uint64_t page,
|
||||
bool *success,
|
||||
bool test_mode);
|
||||
|
||||
bool page_cache_disassemble(page_cache_t *self, uint64_t address, cs_insn **insn);
|
||||
bool page_cache_disassemble_iter(page_cache_t* self, uint64_t* address, cs_insn *insn, uint64_t* failed_page, disassembler_mode_t mode);
|
||||
bool page_cache_disassemble_iter(page_cache_t *self,
|
||||
uint64_t *address,
|
||||
cs_insn *insn,
|
||||
uint64_t *failed_page,
|
||||
disassembler_mode_t mode);
|
||||
|
||||
cs_insn *page_cache_cs_malloc(page_cache_t *self, disassembler_mode_t mode);
|
||||
|
||||
|
@ -1,10 +1,12 @@
|
||||
#include "patcher.h"
|
||||
#include "nyx/memory_access.h"
|
||||
#include "nyx/disassembler.h"
|
||||
#include "debug.h"
|
||||
#include "nyx/disassembler.h"
|
||||
#include "nyx/memory_access.h"
|
||||
#include "nyx/state/state.h"
|
||||
|
||||
uint8_t cmp_patch_data[] = { 0x38, 0xC0, [2 ... MAX_INSTRUCTION_SIZE]=0x90 }; // CMP AL,AL; NOP, NOP ...
|
||||
uint8_t cmp_patch_data[] = {
|
||||
0x38, 0xC0, [2 ... MAX_INSTRUCTION_SIZE] = 0x90
|
||||
}; // CMP AL,AL; NOP, NOP ...
|
||||
const uint8_t *cmp_patch = &cmp_patch_data[0];
|
||||
|
||||
/*
|
||||
@ -14,9 +16,16 @@ static void _patcher_apply_patch(patcher_t *self, size_t index);
|
||||
|
||||
static void _patcher_restore_patch(patcher_t *self, size_t index);
|
||||
|
||||
static void _patcher_save_patch(patcher_t *self, size_t index, uint8_t* data, size_t instruction_size, uint64_t addr);
|
||||
static void _patcher_save_patch(patcher_t *self,
|
||||
size_t index,
|
||||
uint8_t *data,
|
||||
size_t instruction_size,
|
||||
uint64_t addr);
|
||||
|
||||
static size_t _patcher_disassemble_size(patcher_t *self, uint8_t* data, uint64_t addr, x86_insn id);
|
||||
static size_t _patcher_disassemble_size(patcher_t *self,
|
||||
uint8_t *data,
|
||||
uint64_t addr,
|
||||
x86_insn id);
|
||||
|
||||
static void _patcher_alloc_patch_infos(patcher_t *self, size_t num_patches);
|
||||
|
||||
@ -29,7 +38,8 @@ static redqueen_t* _redq_ptr(patcher_t *self);
|
||||
* Public Functions
|
||||
*/
|
||||
|
||||
patcher_t* patcher_new(CPUState *cpu){
|
||||
patcher_t *patcher_new(CPUState *cpu)
|
||||
{
|
||||
patcher_t *res = malloc(sizeof(patcher_t));
|
||||
res->cpu = cpu;
|
||||
res->num_patches = 0;
|
||||
@ -38,13 +48,15 @@ patcher_t* patcher_new(CPUState *cpu){
|
||||
return res;
|
||||
}
|
||||
|
||||
void patcher_free(patcher_t* self){
|
||||
void patcher_free(patcher_t *self)
|
||||
{
|
||||
assert(!self->is_currently_applied);
|
||||
_patcher_free_patch_infos(self);
|
||||
free(self);
|
||||
}
|
||||
|
||||
void patcher_apply_all(patcher_t *self){
|
||||
void patcher_apply_all(patcher_t *self)
|
||||
{
|
||||
assert(!self->is_currently_applied);
|
||||
assert(!_redq_ptr(self)->hooks_applied);
|
||||
// assert(patcher_validate_patches(self));
|
||||
@ -54,7 +66,8 @@ void patcher_apply_all(patcher_t *self){
|
||||
self->is_currently_applied = true;
|
||||
}
|
||||
|
||||
void patcher_restore_all(patcher_t *self){
|
||||
void patcher_restore_all(patcher_t *self)
|
||||
{
|
||||
assert(self->is_currently_applied);
|
||||
assert(!_redq_ptr(self)->hooks_applied);
|
||||
// assert(patcher_validate_patches(self));
|
||||
@ -64,7 +77,8 @@ void patcher_restore_all(patcher_t *self){
|
||||
self->is_currently_applied = false;
|
||||
}
|
||||
|
||||
void patcher_set_addrs(patcher_t *self, uint64_t* addrs, size_t num_addrs){
|
||||
void patcher_set_addrs(patcher_t *self, uint64_t *addrs, size_t num_addrs)
|
||||
{
|
||||
_patcher_free_patch_infos(self);
|
||||
_patcher_alloc_patch_infos(self, num_addrs);
|
||||
uint8_t curr_instruction_code[MAX_INSTRUCTION_SIZE];
|
||||
@ -72,29 +86,37 @@ void patcher_set_addrs(patcher_t *self, uint64_t* addrs, size_t num_addrs){
|
||||
|
||||
for (size_t i = 0; i < self->num_patches; i++) {
|
||||
// nyx_debug_p(REDQUEEN_PREFIX, "patching %lx", addrs[i]);
|
||||
if( read_virtual_memory(addrs[i], &curr_instruction_code[0], MAX_INSTRUCTION_SIZE, self->cpu) ) {
|
||||
size_t size =_patcher_disassemble_size(self, &curr_instruction_code[0], addrs[i], X86_INS_CMP);
|
||||
if (read_virtual_memory(addrs[i], &curr_instruction_code[0],
|
||||
MAX_INSTRUCTION_SIZE, self->cpu))
|
||||
{
|
||||
size_t size = _patcher_disassemble_size(self, &curr_instruction_code[0],
|
||||
addrs[i], X86_INS_CMP);
|
||||
assert(size != 0); // csopen failed, shouldn't happen
|
||||
_patcher_save_patch(self, i, &curr_instruction_code[0], size, addrs[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void print_hexdump(const uint8_t* addr, size_t size){
|
||||
static void print_hexdump(const uint8_t *addr, size_t size)
|
||||
{
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
printf(" %02x", addr[i]);
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
bool patcher_validate_patches(patcher_t *self){
|
||||
bool patcher_validate_patches(patcher_t *self)
|
||||
{
|
||||
bool was_rq = _redq_ptr(self)->hooks_applied;
|
||||
if (was_rq)
|
||||
redqueen_remove_hooks(_redq_ptr(self));
|
||||
if(!self->patches){return true;}
|
||||
if (!self->patches) {
|
||||
return true;
|
||||
}
|
||||
for (size_t i = 0; i < self->num_patches; i++) {
|
||||
uint8_t buf[MAX_INSTRUCTION_SIZE];
|
||||
read_virtual_memory(self->patches[i].addr, &buf[0], MAX_INSTRUCTION_SIZE, self->cpu);
|
||||
read_virtual_memory(self->patches[i].addr, &buf[0], MAX_INSTRUCTION_SIZE,
|
||||
self->cpu);
|
||||
const uint8_t *should_value = NULL;
|
||||
if (self->is_currently_applied) {
|
||||
should_value = cmp_patch;
|
||||
@ -107,7 +129,9 @@ bool patcher_validate_patches(patcher_t *self){
|
||||
nyx_debug_p(REDQUEEN_PREFIX, "should_be:");
|
||||
print_hexdump(should_value, self->patches[i].size);
|
||||
if (0 != memcmp(&buf[0], should_value, self->patches[i].size)) {
|
||||
nyx_debug_p(REDQUEEN_PREFIX, "validating patches failed self->is_currently_applied = %d", self->is_currently_applied);
|
||||
nyx_debug_p(REDQUEEN_PREFIX,
|
||||
"validating patches failed self->is_currently_applied = %d",
|
||||
self->is_currently_applied);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -121,15 +145,22 @@ bool patcher_validate_patches(patcher_t *self){
|
||||
* Private Helper Functions Definitions
|
||||
*/
|
||||
|
||||
static void _patcher_apply_patch(patcher_t *self, size_t index) {
|
||||
static void _patcher_apply_patch(patcher_t *self, size_t index)
|
||||
{
|
||||
abort(); // deprecated function -> remove this code later
|
||||
}
|
||||
|
||||
static void _patcher_restore_patch(patcher_t *self, size_t index){
|
||||
static void _patcher_restore_patch(patcher_t *self, size_t index)
|
||||
{
|
||||
abort(); // deprecated function -> remove this code later
|
||||
}
|
||||
|
||||
static void _patcher_save_patch(patcher_t *self, size_t index, uint8_t* data, size_t instruction_size, uint64_t addr) {
|
||||
static void _patcher_save_patch(patcher_t *self,
|
||||
size_t index,
|
||||
uint8_t *data,
|
||||
size_t instruction_size,
|
||||
uint64_t addr)
|
||||
{
|
||||
assert(instruction_size >= 2);
|
||||
assert(instruction_size < MAX_INSTRUCTION_SIZE);
|
||||
patch_info_t *info = &self->patches[index];
|
||||
@ -139,15 +170,22 @@ static void _patcher_save_patch(patcher_t *self, size_t index, uint8_t* data, si
|
||||
info->size = instruction_size;
|
||||
}
|
||||
|
||||
static size_t _patcher_disassemble_size(patcher_t *self, uint8_t* data, uint64_t addr, x86_insn type){
|
||||
|
||||
static size_t _patcher_disassemble_size(patcher_t *self,
|
||||
uint8_t *data,
|
||||
uint64_t addr,
|
||||
x86_insn type)
|
||||
{
|
||||
csh handle;
|
||||
if (cs_open(CS_ARCH_X86, get_capstone_mode(GET_GLOBAL_STATE()->disassembler_word_width), &handle) == CS_ERR_OK){
|
||||
if (cs_open(CS_ARCH_X86,
|
||||
get_capstone_mode(GET_GLOBAL_STATE()->disassembler_word_width),
|
||||
&handle) == CS_ERR_OK)
|
||||
{
|
||||
cs_insn *insn = cs_malloc(handle);
|
||||
uint8_t *cur_offset = data;
|
||||
uint64_t cs_address = addr;
|
||||
uint64_t code_size = MAX_INSTRUCTION_SIZE;
|
||||
cs_disasm_iter(handle, (const uint8_t **) &cur_offset, &code_size, &cs_address, insn);
|
||||
cs_disasm_iter(handle, (const uint8_t **)&cur_offset, &code_size,
|
||||
&cs_address, insn);
|
||||
size_t size = insn->size;
|
||||
if (type != X86_INS_INVALID) {
|
||||
assert(insn->id == type);
|
||||
@ -159,7 +197,8 @@ static size_t _patcher_disassemble_size(patcher_t *self, uint8_t* data, uint64_t
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void _patcher_alloc_patch_infos(patcher_t *self, size_t num_patches){
|
||||
static void _patcher_alloc_patch_infos(patcher_t *self, size_t num_patches)
|
||||
{
|
||||
assert(self->num_patches == 0);
|
||||
assert(self->patches == NULL);
|
||||
assert(num_patches < 10000);
|
||||
@ -167,14 +206,16 @@ static void _patcher_alloc_patch_infos(patcher_t *self, size_t num_patches){
|
||||
self->patches = malloc(sizeof(patch_info_t) * num_patches);
|
||||
}
|
||||
|
||||
static void _patcher_free_patch_infos(patcher_t *self){
|
||||
static void _patcher_free_patch_infos(patcher_t *self)
|
||||
{
|
||||
assert(!self->is_currently_applied);
|
||||
free(self->patches);
|
||||
self->patches = NULL;
|
||||
self->num_patches = 0;
|
||||
}
|
||||
|
||||
static redqueen_t* _redq_ptr(patcher_t *self){
|
||||
static redqueen_t *_redq_ptr(patcher_t *self)
|
||||
{
|
||||
redqueen_t *res = GET_GLOBAL_STATE()->redqueen_state; // self->cpu->redqueen_state;
|
||||
return res;
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
#ifndef __GUARD_REDQUEEN_PATCHER_STRUCT__
|
||||
#define __GUARD_REDQUEEN_PATCHER_STRUCT__
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <capstone/capstone.h>
|
||||
#include <capstone/x86.h>
|
||||
@ -24,7 +24,6 @@ typedef struct patch_info_s{
|
||||
} patch_info_t;
|
||||
|
||||
typedef struct patcher_s {
|
||||
|
||||
CPUState *cpu;
|
||||
|
||||
patch_info_t *patches;
|
||||
|
125
nyx/pt.c
125
nyx/pt.c
@ -53,46 +53,56 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#define PT_BUFFER_MMAP_ADDR 0x3ffff0000000
|
||||
|
||||
static void pt_set(CPUState *cpu, run_on_cpu_data arg){
|
||||
static void pt_set(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
asm volatile("" ::: "memory");
|
||||
}
|
||||
|
||||
static inline int pt_cmd_hmp_context(CPUState *cpu, uint64_t cmd){
|
||||
static inline int pt_cmd_hmp_context(CPUState *cpu, uint64_t cmd)
|
||||
{
|
||||
cpu->pt_ret = -1;
|
||||
if (pt_hypercalls_enabled()) {
|
||||
nyx_debug_p(PT_PREFIX, "Error: HMP commands are ignored if kafl tracing mode is enabled (-kafl)!");
|
||||
}
|
||||
else{
|
||||
nyx_debug_p(PT_PREFIX, "Error: HMP commands are ignored if kafl tracing "
|
||||
"mode is enabled (-kafl)!");
|
||||
} else {
|
||||
cpu->pt_cmd = cmd;
|
||||
run_on_cpu(cpu, pt_set, RUN_ON_CPU_NULL);
|
||||
}
|
||||
return cpu->pt_ret;
|
||||
}
|
||||
|
||||
static int pt_cmd(CPUState *cpu, uint64_t cmd, bool hmp_mode){
|
||||
static int pt_cmd(CPUState *cpu, uint64_t cmd, bool hmp_mode)
|
||||
{
|
||||
if (hmp_mode) {
|
||||
return pt_cmd_hmp_context(cpu, cmd);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
cpu->pt_cmd = cmd;
|
||||
pt_pre_kvm_run(cpu);
|
||||
return cpu->pt_ret;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int pt_ioctl(int fd, unsigned long request, unsigned long arg){
|
||||
static inline int pt_ioctl(int fd, unsigned long request, unsigned long arg)
|
||||
{
|
||||
if (!fd) {
|
||||
return -EINVAL;
|
||||
}
|
||||
return ioctl(fd, request, arg);
|
||||
}
|
||||
|
||||
void pt_dump(CPUState *cpu, int bytes){
|
||||
if(!(GET_GLOBAL_STATE()->redqueen_state && GET_GLOBAL_STATE()->redqueen_state->intercept_mode)){
|
||||
if (GET_GLOBAL_STATE()->in_fuzzing_mode && GET_GLOBAL_STATE()->decoder_page_fault == false && GET_GLOBAL_STATE()->decoder && !GET_GLOBAL_STATE()->dump_page){
|
||||
void pt_dump(CPUState *cpu, int bytes)
|
||||
{
|
||||
if (!(GET_GLOBAL_STATE()->redqueen_state &&
|
||||
GET_GLOBAL_STATE()->redqueen_state->intercept_mode))
|
||||
{
|
||||
if (GET_GLOBAL_STATE()->in_fuzzing_mode &&
|
||||
GET_GLOBAL_STATE()->decoder_page_fault == false &&
|
||||
GET_GLOBAL_STATE()->decoder && !GET_GLOBAL_STATE()->dump_page)
|
||||
{
|
||||
GET_GLOBAL_STATE()->pt_trace_size += bytes;
|
||||
pt_write_pt_dump_file(cpu->pt_mmap, bytes);
|
||||
decoder_result_t result = libxdc_decode(GET_GLOBAL_STATE()->decoder, cpu->pt_mmap, bytes);
|
||||
decoder_result_t result =
|
||||
libxdc_decode(GET_GLOBAL_STATE()->decoder, cpu->pt_mmap, bytes);
|
||||
switch (result) {
|
||||
case decoder_success:
|
||||
break;
|
||||
@ -102,7 +112,8 @@ void pt_dump(CPUState *cpu, int bytes){
|
||||
case decoder_page_fault:
|
||||
// fprintf(stderr, "Page not found => 0x%lx\n", libxdc_get_page_fault_addr(GET_GLOBAL_STATE()->decoder));
|
||||
GET_GLOBAL_STATE()->decoder_page_fault = true;
|
||||
GET_GLOBAL_STATE()->decoder_page_fault_addr = libxdc_get_page_fault_addr(GET_GLOBAL_STATE()->decoder);
|
||||
GET_GLOBAL_STATE()->decoder_page_fault_addr =
|
||||
libxdc_get_page_fault_addr(GET_GLOBAL_STATE()->decoder);
|
||||
break;
|
||||
case decoder_unkown_packet:
|
||||
fprintf(stderr, "WARNING: libxdc_decode returned unknown_packet\n");
|
||||
@ -116,7 +127,8 @@ void pt_dump(CPUState *cpu, int bytes){
|
||||
}
|
||||
|
||||
|
||||
int pt_enable(CPUState *cpu, bool hmp_mode){
|
||||
int pt_enable(CPUState *cpu, bool hmp_mode)
|
||||
{
|
||||
if (!fast_reload_set_bitmap(get_fast_reload_snapshot())) {
|
||||
coverage_bitmap_reset();
|
||||
}
|
||||
@ -128,12 +140,14 @@ int pt_enable(CPUState *cpu, bool hmp_mode){
|
||||
return pt_cmd(cpu, KVM_VMX_PT_ENABLE, hmp_mode);
|
||||
}
|
||||
|
||||
int pt_disable(CPUState *cpu, bool hmp_mode){
|
||||
int pt_disable(CPUState *cpu, bool hmp_mode)
|
||||
{
|
||||
int r = pt_cmd(cpu, KVM_VMX_PT_DISABLE, hmp_mode);
|
||||
return r;
|
||||
}
|
||||
|
||||
int pt_set_cr3(CPUState *cpu, uint64_t val, bool hmp_mode){
|
||||
int pt_set_cr3(CPUState *cpu, uint64_t val, bool hmp_mode)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
if (val == GET_GLOBAL_STATE()->pt_c3_filter) {
|
||||
@ -156,7 +170,8 @@ int pt_set_cr3(CPUState *cpu, uint64_t val, bool hmp_mode){
|
||||
return r;
|
||||
}
|
||||
|
||||
int pt_enable_ip_filtering(CPUState *cpu, uint8_t addrn, bool redqueen, bool hmp_mode){
|
||||
int pt_enable_ip_filtering(CPUState *cpu, uint8_t addrn, bool redqueen, bool hmp_mode)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
if (addrn > 3) {
|
||||
@ -167,8 +182,12 @@ int pt_enable_ip_filtering(CPUState *cpu, uint8_t addrn, bool redqueen, bool hmp
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if(GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] > GET_GLOBAL_STATE()->pt_ip_filter_b[addrn]){
|
||||
nyx_debug_p(PT_PREFIX, "Error (ip_a > ip_b) 0x%lx-0x%lx", GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] , GET_GLOBAL_STATE()->pt_ip_filter_b[addrn]);
|
||||
if (GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] >
|
||||
GET_GLOBAL_STATE()->pt_ip_filter_b[addrn])
|
||||
{
|
||||
nyx_debug_p(PT_PREFIX, "Error (ip_a > ip_b) 0x%lx-0x%lx",
|
||||
GET_GLOBAL_STATE()->pt_ip_filter_a[addrn],
|
||||
GET_GLOBAL_STATE()->pt_ip_filter_b[addrn]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -176,9 +195,14 @@ int pt_enable_ip_filtering(CPUState *cpu, uint8_t addrn, bool redqueen, bool hmp
|
||||
pt_disable_ip_filtering(cpu, addrn, hmp_mode);
|
||||
}
|
||||
|
||||
nyx_debug_p(PT_PREFIX, "Configuring new trace region (addr%d, 0x%lx-0x%lx)", addrn, GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] , GET_GLOBAL_STATE()->pt_ip_filter_b[addrn]);
|
||||
nyx_debug_p(PT_PREFIX, "Configuring new trace region (addr%d, 0x%lx-0x%lx)",
|
||||
addrn, GET_GLOBAL_STATE()->pt_ip_filter_a[addrn],
|
||||
GET_GLOBAL_STATE()->pt_ip_filter_b[addrn]);
|
||||
|
||||
if(GET_GLOBAL_STATE()->pt_ip_filter_configured[addrn] && GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] != 0 && GET_GLOBAL_STATE()->pt_ip_filter_b[addrn] != 0){
|
||||
if (GET_GLOBAL_STATE()->pt_ip_filter_configured[addrn] &&
|
||||
GET_GLOBAL_STATE()->pt_ip_filter_a[addrn] != 0 &&
|
||||
GET_GLOBAL_STATE()->pt_ip_filter_b[addrn] != 0)
|
||||
{
|
||||
r += pt_cmd(cpu, KVM_VMX_PT_CONFIGURE_ADDR0 + addrn, hmp_mode);
|
||||
r += pt_cmd(cpu, KVM_VMX_PT_ENABLE_ADDR0 + addrn, hmp_mode);
|
||||
GET_GLOBAL_STATE()->pt_ip_filter_enabled[addrn] = true;
|
||||
@ -186,7 +210,8 @@ int pt_enable_ip_filtering(CPUState *cpu, uint8_t addrn, bool redqueen, bool hmp
|
||||
return r;
|
||||
}
|
||||
|
||||
void pt_init_decoder(CPUState *cpu){
|
||||
void pt_init_decoder(CPUState *cpu)
|
||||
{
|
||||
uint64_t filters[4][2] = { 0 };
|
||||
|
||||
/* TODO time to clean up this code -.- */
|
||||
@ -202,16 +227,23 @@ void pt_init_decoder(CPUState *cpu){
|
||||
assert(GET_GLOBAL_STATE()->decoder == NULL);
|
||||
assert(GET_GLOBAL_STATE()->shared_bitmap_ptr != NULL);
|
||||
assert(GET_GLOBAL_STATE()->shared_bitmap_size != 0);
|
||||
GET_GLOBAL_STATE()->decoder = libxdc_init(filters, (void* (*)(void*, uint64_t, bool*))page_cache_fetch2, GET_GLOBAL_STATE()->page_cache, GET_GLOBAL_STATE()->shared_bitmap_ptr, GET_GLOBAL_STATE()->shared_bitmap_size);
|
||||
|
||||
libxdc_register_bb_callback(GET_GLOBAL_STATE()->decoder, (void (*)(void*, disassembler_mode_t, uint64_t, uint64_t))redqueen_callback, GET_GLOBAL_STATE()->redqueen_state);
|
||||
|
||||
alt_bitmap_init(
|
||||
GET_GLOBAL_STATE()->decoder =
|
||||
libxdc_init(filters, (void *(*)(void *, uint64_t, bool *))page_cache_fetch2,
|
||||
GET_GLOBAL_STATE()->page_cache,
|
||||
GET_GLOBAL_STATE()->shared_bitmap_ptr,
|
||||
GET_GLOBAL_STATE()->shared_bitmap_size);
|
||||
|
||||
libxdc_register_bb_callback(GET_GLOBAL_STATE()->decoder,
|
||||
(void (*)(void *, disassembler_mode_t, uint64_t,
|
||||
uint64_t))redqueen_callback,
|
||||
GET_GLOBAL_STATE()->redqueen_state);
|
||||
|
||||
alt_bitmap_init(GET_GLOBAL_STATE()->shared_bitmap_ptr,
|
||||
GET_GLOBAL_STATE()->shared_bitmap_size);
|
||||
}
|
||||
|
||||
int pt_disable_ip_filtering(CPUState *cpu, uint8_t addrn, bool hmp_mode){
|
||||
int pt_disable_ip_filtering(CPUState *cpu, uint8_t addrn, bool hmp_mode)
|
||||
{
|
||||
int r = 0;
|
||||
switch (addrn) {
|
||||
case 0:
|
||||
@ -229,7 +261,8 @@ int pt_disable_ip_filtering(CPUState *cpu, uint8_t addrn, bool hmp_mode){
|
||||
return r;
|
||||
}
|
||||
|
||||
void pt_kvm_init(CPUState *cpu){
|
||||
void pt_kvm_init(CPUState *cpu)
|
||||
{
|
||||
cpu->pt_cmd = 0;
|
||||
cpu->pt_enabled = false;
|
||||
cpu->pt_fd = 0;
|
||||
@ -247,7 +280,8 @@ struct vmx_pt_filter_iprs {
|
||||
|
||||
pthread_mutex_t pt_dump_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
void pt_pre_kvm_run(CPUState *cpu){
|
||||
void pt_pre_kvm_run(CPUState *cpu)
|
||||
{
|
||||
pthread_mutex_lock(&pt_dump_mutex);
|
||||
int ret;
|
||||
struct vmx_pt_filter_iprs filter_iprs;
|
||||
@ -281,13 +315,15 @@ void pt_pre_kvm_run(CPUState *cpu){
|
||||
GET_GLOBAL_STATE()->redqueen_disable_pending = false;
|
||||
}
|
||||
|
||||
if(GET_GLOBAL_STATE()->pt_trace_mode || GET_GLOBAL_STATE()->pt_trace_mode_force){
|
||||
if (GET_GLOBAL_STATE()->pt_trace_mode || GET_GLOBAL_STATE()->pt_trace_mode_force)
|
||||
{
|
||||
if (!cpu->pt_fd) {
|
||||
cpu->pt_fd = kvm_vcpu_ioctl(cpu, KVM_VMX_PT_SETUP_FD, (unsigned long)0);
|
||||
assert(cpu->pt_fd != -1);
|
||||
ret = ioctl(cpu->pt_fd, KVM_VMX_PT_GET_TOPA_SIZE, (unsigned long)0x0);
|
||||
|
||||
cpu->pt_mmap = mmap((void*)PT_BUFFER_MMAP_ADDR, ret, PROT_READ|PROT_WRITE, MAP_SHARED, cpu->pt_fd, 0);
|
||||
cpu->pt_mmap = mmap((void *)PT_BUFFER_MMAP_ADDR, ret,
|
||||
PROT_READ | PROT_WRITE, MAP_SHARED, cpu->pt_fd, 0);
|
||||
assert(cpu->pt_mmap != (void *)0xFFFFFFFFFFFFFFFF);
|
||||
// add an extra page to have enough space for an additional PT_TRACE_END byte
|
||||
assert(mmap(cpu->pt_mmap + ret, 0x1000, PROT_READ | PROT_WRITE,
|
||||
@ -304,7 +340,8 @@ void pt_pre_kvm_run(CPUState *cpu){
|
||||
case KVM_VMX_PT_ENABLE:
|
||||
if (cpu->pt_fd) {
|
||||
/* dump for the very last time before enabling VMX_PT ... just in case */
|
||||
ioctl(cpu->pt_fd, KVM_VMX_PT_CHECK_TOPA_OVERFLOW, (unsigned long)0);
|
||||
ioctl(cpu->pt_fd, KVM_VMX_PT_CHECK_TOPA_OVERFLOW,
|
||||
(unsigned long)0);
|
||||
|
||||
if (!ioctl(cpu->pt_fd, cpu->pt_cmd, 0)) {
|
||||
cpu->pt_enabled = true;
|
||||
@ -327,8 +364,12 @@ void pt_pre_kvm_run(CPUState *cpu){
|
||||
case KVM_VMX_PT_CONFIGURE_ADDR1:
|
||||
case KVM_VMX_PT_CONFIGURE_ADDR2:
|
||||
case KVM_VMX_PT_CONFIGURE_ADDR3:
|
||||
filter_iprs.a = GET_GLOBAL_STATE()->pt_ip_filter_a[(cpu->pt_cmd)-KVM_VMX_PT_CONFIGURE_ADDR0];
|
||||
filter_iprs.b = GET_GLOBAL_STATE()->pt_ip_filter_b[(cpu->pt_cmd)-KVM_VMX_PT_CONFIGURE_ADDR0];
|
||||
filter_iprs.a =
|
||||
GET_GLOBAL_STATE()
|
||||
->pt_ip_filter_a[(cpu->pt_cmd) - KVM_VMX_PT_CONFIGURE_ADDR0];
|
||||
filter_iprs.b =
|
||||
GET_GLOBAL_STATE()
|
||||
->pt_ip_filter_b[(cpu->pt_cmd) - KVM_VMX_PT_CONFIGURE_ADDR0];
|
||||
ret = pt_ioctl(cpu->pt_fd, cpu->pt_cmd, (unsigned long)&filter_iprs);
|
||||
break;
|
||||
case KVM_VMX_PT_ENABLE_ADDR0:
|
||||
@ -338,7 +379,8 @@ void pt_pre_kvm_run(CPUState *cpu){
|
||||
ret = pt_ioctl(cpu->pt_fd, cpu->pt_cmd, (unsigned long)0);
|
||||
break;
|
||||
case KVM_VMX_PT_CONFIGURE_CR3:
|
||||
ret = pt_ioctl(cpu->pt_fd, cpu->pt_cmd, GET_GLOBAL_STATE()->pt_c3_filter);
|
||||
ret = pt_ioctl(cpu->pt_fd, cpu->pt_cmd,
|
||||
GET_GLOBAL_STATE()->pt_c3_filter);
|
||||
break;
|
||||
case KVM_VMX_PT_ENABLE_CR3:
|
||||
ret = pt_ioctl(cpu->pt_fd, cpu->pt_cmd, (unsigned long)0);
|
||||
@ -356,7 +398,8 @@ void pt_pre_kvm_run(CPUState *cpu){
|
||||
pthread_mutex_unlock(&pt_dump_mutex);
|
||||
}
|
||||
|
||||
void pt_handle_overflow(CPUState *cpu){
|
||||
void pt_handle_overflow(CPUState *cpu)
|
||||
{
|
||||
pthread_mutex_lock(&pt_dump_mutex);
|
||||
int overflow = ioctl(cpu->pt_fd, KVM_VMX_PT_CHECK_TOPA_OVERFLOW, (unsigned long)0);
|
||||
if (overflow > 0) {
|
||||
@ -365,8 +408,10 @@ void pt_handle_overflow(CPUState *cpu){
|
||||
pthread_mutex_unlock(&pt_dump_mutex);
|
||||
}
|
||||
|
||||
void pt_post_kvm_run(CPUState *cpu){
|
||||
if(GET_GLOBAL_STATE()->pt_trace_mode || GET_GLOBAL_STATE()->pt_trace_mode_force){
|
||||
void pt_post_kvm_run(CPUState *cpu)
|
||||
{
|
||||
if (GET_GLOBAL_STATE()->pt_trace_mode || GET_GLOBAL_STATE()->pt_trace_mode_force)
|
||||
{
|
||||
pt_handle_overflow(cpu);
|
||||
}
|
||||
}
|
||||
|
1
nyx/pt.h
1
nyx/pt.h
@ -38,4 +38,3 @@ void pt_handle_overflow(CPUState *cpu);
|
||||
void pt_dump(CPUState *cpu, int bytes);
|
||||
|
||||
#endif
|
||||
|
||||
|
699
nyx/redqueen.c
699
nyx/redqueen.c
File diff suppressed because it is too large
Load Diff
@ -22,17 +22,17 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
|
||||
#pragma once
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include <linux/kvm.h>
|
||||
#include <capstone/capstone.h>
|
||||
#include <capstone/x86.h>
|
||||
#include "nyx/redqueen_trace.h"
|
||||
#include "nyx/khash.h"
|
||||
#include "nyx/page_cache.h"
|
||||
#include "nyx/redqueen_trace.h"
|
||||
#include <capstone/capstone.h>
|
||||
#include <capstone/x86.h>
|
||||
#include <linux/kvm.h>
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
// #define RQ_DEBUG
|
||||
|
||||
@ -54,14 +54,32 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
|
||||
#define REDQUEEN_SE_INSTRUMENTATION 2
|
||||
#define REDQUEEN_WHITELIST_INSTRUMENTATION 3
|
||||
|
||||
enum reg_types{RAX, RCX, RDX, RBX, RSP, RBP, RSI, RDI, R8, R9, R10, R11, R12, R13, R14, R15};
|
||||
enum reg_types {
|
||||
RAX,
|
||||
RCX,
|
||||
RDX,
|
||||
RBX,
|
||||
RSP,
|
||||
RBP,
|
||||
RSI,
|
||||
RDI,
|
||||
R8,
|
||||
R9,
|
||||
R10,
|
||||
R11,
|
||||
R12,
|
||||
R13,
|
||||
R14,
|
||||
R15
|
||||
};
|
||||
|
||||
#define CMP_BITMAP_NOP 0x0000000UL
|
||||
#define CMP_BITMAP_RQ_INSTRUCTION 0x1000000UL
|
||||
#define CMP_BITMAP_SE_INSTRUCTION 0x2000000UL
|
||||
#define CMP_BITMAP_BLACKLISTED 0x4000000UL
|
||||
#define CMP_BITMAP_TRACE_ENABLED 0x8000000UL
|
||||
#define CMP_BITMAP_SHOULD_HOOK_SE (CMP_BITMAP_SE_INSTRUCTION|CMP_BITMAP_TRACE_ENABLED)
|
||||
#define CMP_BITMAP_SHOULD_HOOK_SE \
|
||||
(CMP_BITMAP_SE_INSTRUCTION | CMP_BITMAP_TRACE_ENABLED)
|
||||
#define CMP_BITMAP_SHOULD_HOOK_RQ (CMP_BITMAP_RQ_INSTRUCTION)
|
||||
|
||||
KHASH_MAP_INIT_INT64(RQ, uint32_t)
|
||||
@ -117,5 +135,7 @@ void dump_se_memory_access_at(redqueen_t* self, uint64_t instr_addr, uint64_t me
|
||||
void redqueen_insert_hooks(redqueen_t *self);
|
||||
void redqueen_remove_hooks(redqueen_t *self);
|
||||
|
||||
void redqueen_callback(void* opaque, disassembler_mode_t mode, uint64_t start_addr, uint64_t end_addr);
|
||||
|
||||
void redqueen_callback(void *opaque,
|
||||
disassembler_mode_t mode,
|
||||
uint64_t start_addr,
|
||||
uint64_t end_addr);
|
||||
|
@ -1,9 +1,9 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "redqueen_patch.h"
|
||||
#include "redqueen.h"
|
||||
#include "patcher.h"
|
||||
#include "file_helper.h"
|
||||
#include "debug.h"
|
||||
#include "file_helper.h"
|
||||
#include "patcher.h"
|
||||
#include "redqueen.h"
|
||||
|
||||
/*
|
||||
* Private Helper Functions Declarations
|
||||
@ -15,12 +15,14 @@ void _load_and_set_patches(patcher_t* self);
|
||||
* Public Functions
|
||||
*/
|
||||
|
||||
void pt_enable_patches(patcher_t *self){
|
||||
void pt_enable_patches(patcher_t *self)
|
||||
{
|
||||
_load_and_set_patches(self);
|
||||
patcher_apply_all(self);
|
||||
}
|
||||
|
||||
void pt_disable_patches(patcher_t *self){
|
||||
void pt_disable_patches(patcher_t *self)
|
||||
{
|
||||
patcher_restore_all(self);
|
||||
}
|
||||
|
||||
@ -30,7 +32,8 @@ void pt_disable_patches(patcher_t *self){
|
||||
*/
|
||||
|
||||
|
||||
void _load_and_set_patches(patcher_t* self){
|
||||
void _load_and_set_patches(patcher_t *self)
|
||||
{
|
||||
size_t num_addrs = 0;
|
||||
uint64_t *addrs = NULL;
|
||||
parse_address_file(redqueen_workdir.redqueen_patches, &num_addrs, &addrs);
|
||||
|
@ -1,11 +1,11 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include <stdint.h>
|
||||
#include <unistd.h>
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "redqueen_trace.h"
|
||||
#include "redqueen.h"
|
||||
#include "redqueen_trace.h"
|
||||
#include "state/state.h"
|
||||
|
||||
|
||||
@ -33,7 +33,8 @@ void alt_bitmap_reset(void)
|
||||
}
|
||||
}
|
||||
|
||||
static inline uint64_t mix_bits(uint64_t v) {
|
||||
static inline uint64_t mix_bits(uint64_t v)
|
||||
{
|
||||
v ^= (v >> 31);
|
||||
v *= 0x7fb5d329728ea185;
|
||||
return v;
|
||||
@ -56,10 +57,12 @@ static void alt_bitmap_add(uint64_t from, uint64_t to)
|
||||
}
|
||||
|
||||
|
||||
static int reset_trace_fd(void) {
|
||||
static int reset_trace_fd(void)
|
||||
{
|
||||
if (trace_fd)
|
||||
close(trace_fd);
|
||||
trace_fd = open(redqueen_workdir.pt_trace_results, O_WRONLY | O_CREAT | O_TRUNC, 0644);
|
||||
trace_fd =
|
||||
open(redqueen_workdir.pt_trace_results, O_WRONLY | O_CREAT | O_TRUNC, 0644);
|
||||
if (trace_fd < 0) {
|
||||
fprintf(stderr, "Failed to initiate trace output: %s\n", strerror(errno));
|
||||
assert(0);
|
||||
@ -67,26 +70,34 @@ static int reset_trace_fd(void) {
|
||||
return trace_fd;
|
||||
}
|
||||
|
||||
void redqueen_trace_init(void) {
|
||||
void redqueen_trace_init(void)
|
||||
{
|
||||
redqueen_trace_enabled = true;
|
||||
}
|
||||
|
||||
redqueen_trace_t* redqueen_trace_new(void){
|
||||
redqueen_trace_t *redqueen_trace_new(void)
|
||||
{
|
||||
redqueen_trace_t *self = malloc(sizeof(redqueen_trace_t));
|
||||
self->lookup = kh_init(RQ_TRACE);
|
||||
self->num_ordered_transitions = 0;
|
||||
self->max_ordered_transitions = INIT_NUM_OF_STORED_TRANSITIONS;
|
||||
self->ordered_transitions = malloc(INIT_NUM_OF_STORED_TRANSITIONS*sizeof(uint128_t));
|
||||
self->ordered_transitions =
|
||||
malloc(INIT_NUM_OF_STORED_TRANSITIONS * sizeof(uint128_t));
|
||||
return self;
|
||||
}
|
||||
|
||||
void redqueen_trace_free(redqueen_trace_t* self){
|
||||
void redqueen_trace_free(redqueen_trace_t *self)
|
||||
{
|
||||
kh_destroy(RQ_TRACE, self->lookup);
|
||||
free(self->ordered_transitions);
|
||||
free(self);
|
||||
}
|
||||
|
||||
void redqueen_trace_register_transition(redqueen_trace_t* self, disassembler_mode_t mode, uint64_t from, uint64_t to){
|
||||
void redqueen_trace_register_transition(redqueen_trace_t *self,
|
||||
disassembler_mode_t mode,
|
||||
uint64_t from,
|
||||
uint64_t to)
|
||||
{
|
||||
khiter_t k;
|
||||
int ret;
|
||||
uint64_t exit_ip = 0xffffffffffffffff;
|
||||
@ -111,7 +122,8 @@ void redqueen_trace_register_transition(redqueen_trace_t* self, disassembler_mod
|
||||
}
|
||||
}
|
||||
|
||||
static void redqueen_trace_write(void){
|
||||
static void redqueen_trace_write(void)
|
||||
{
|
||||
#ifdef KAFL_FULL_TRACES
|
||||
return;
|
||||
#endif
|
||||
@ -122,11 +134,13 @@ static void redqueen_trace_write(void){
|
||||
uint128_t key = self->ordered_transitions[i];
|
||||
k = kh_get(RQ_TRACE, self->lookup, key);
|
||||
assert(k != kh_end(self->lookup));
|
||||
dprintf(trace_fd, "%lx,%lx,%lx\n", (uint64_t)(key>>64), (uint64_t)key, kh_value(self->lookup, k) );
|
||||
dprintf(trace_fd, "%lx,%lx,%lx\n", (uint64_t)(key >> 64), (uint64_t)key,
|
||||
kh_value(self->lookup, k));
|
||||
}
|
||||
}
|
||||
|
||||
static void redqueen_state_reset(void){
|
||||
static void redqueen_state_reset(void)
|
||||
{
|
||||
redqueen_trace_t *self = GET_GLOBAL_STATE()->redqueen_state->trace_state;
|
||||
kh_destroy(RQ_TRACE, self->lookup);
|
||||
self->lookup = kh_init(RQ_TRACE);
|
||||
@ -134,14 +148,16 @@ static void redqueen_state_reset(void){
|
||||
}
|
||||
|
||||
|
||||
void redqueen_trace_reset(void){
|
||||
void redqueen_trace_reset(void)
|
||||
{
|
||||
if (redqueen_trace_enabled) {
|
||||
redqueen_state_reset();
|
||||
reset_trace_fd();
|
||||
}
|
||||
}
|
||||
|
||||
void redqueen_trace_flush(void){
|
||||
void redqueen_trace_flush(void)
|
||||
{
|
||||
if (redqueen_trace_enabled) {
|
||||
redqueen_trace_write();
|
||||
if (trace_fd)
|
||||
@ -149,24 +165,28 @@ void redqueen_trace_flush(void){
|
||||
}
|
||||
}
|
||||
|
||||
void redqueen_set_trace_mode(void){
|
||||
void redqueen_set_trace_mode(void)
|
||||
{
|
||||
if (redqueen_trace_enabled) {
|
||||
libxdc_enable_tracing(GET_GLOBAL_STATE()->decoder);
|
||||
libxdc_register_edge_callback(GET_GLOBAL_STATE()->decoder,
|
||||
(void (*)(void*, disassembler_mode_t, uint64_t, uint64_t))&redqueen_trace_register_transition,
|
||||
(void (*)(void *, disassembler_mode_t,
|
||||
uint64_t, uint64_t)) &
|
||||
redqueen_trace_register_transition,
|
||||
GET_GLOBAL_STATE()->redqueen_state->trace_state);
|
||||
}
|
||||
}
|
||||
|
||||
void redqueen_unset_trace_mode(void){
|
||||
void redqueen_unset_trace_mode(void)
|
||||
{
|
||||
if (redqueen_trace_enabled) {
|
||||
libxdc_disable_tracing(GET_GLOBAL_STATE()->decoder);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef DEBUG_MAIN
|
||||
int main(int argc, char** argv){
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
redqueen_trace_t *rq_obj = redqueen_trace_new();
|
||||
|
||||
reset_trace_fd();
|
||||
|
@ -1,9 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
@ -21,7 +21,8 @@ typedef uint128_t khint128_t;
|
||||
@return The hash value [khint_t]
|
||||
*/
|
||||
#define kh_int128_hash_func(key) \
|
||||
(khint32_t)((key) >> 33 ^ (key) ^ (key) << 11) ^ (((key >> 64)) >> 33 ^ ((key >> 64)) ^ ((key >> 64)) << 11)
|
||||
(khint32_t)((key) >> 33 ^ (key) ^ (key) << 11) ^ \
|
||||
(((key >> 64)) >> 33 ^ ((key >> 64)) ^ ((key >> 64)) << 11)
|
||||
/*! @function
|
||||
@abstract 64-bit integer comparison function
|
||||
*/
|
||||
@ -33,7 +34,8 @@ typedef uint128_t khint128_t;
|
||||
@param khval_t Type of values [type]
|
||||
*/
|
||||
#define KHASH_MAP_INIT_INT128(name, khval_t) \
|
||||
KHASH_INIT(name, khint128_t, khval_t, 1, kh_int128_hash_func, kh_int128_hash_equal)
|
||||
KHASH_INIT(name, khint128_t, khval_t, 1, kh_int128_hash_func, \
|
||||
kh_int128_hash_equal)
|
||||
|
||||
KHASH_MAP_INIT_INT128(RQ_TRACE, uint64_t)
|
||||
|
||||
@ -52,7 +54,10 @@ void alt_bitmap_init(void* ptr, uint32_t size);
|
||||
|
||||
redqueen_trace_t *redqueen_trace_new(void);
|
||||
void redqueen_trace_free(redqueen_trace_t *self);
|
||||
void redqueen_trace_register_transition(redqueen_trace_t* self, disassembler_mode_t mode, uint64_t from, uint64_t to);
|
||||
void redqueen_trace_register_transition(redqueen_trace_t *self,
|
||||
disassembler_mode_t mode,
|
||||
uint64_t from,
|
||||
uint64_t to);
|
||||
|
||||
void redqueen_trace_init(void);
|
||||
void redqueen_set_trace_mode(void);
|
||||
|
@ -13,7 +13,8 @@
|
||||
|
||||
// #define SHAREDIR_DEBUG
|
||||
|
||||
sharedir_t* sharedir_new(void){
|
||||
sharedir_t *sharedir_new(void)
|
||||
{
|
||||
sharedir_t *self = malloc(sizeof(sharedir_t));
|
||||
self->dir = NULL;
|
||||
self->lookup = kh_init(SHAREDIR_LOOKUP);
|
||||
@ -22,30 +23,34 @@ sharedir_t* sharedir_new(void){
|
||||
return self;
|
||||
}
|
||||
|
||||
void sharedir_set_dir(sharedir_t* self, const char* dir){
|
||||
void sharedir_set_dir(sharedir_t *self, const char *dir)
|
||||
{
|
||||
assert(!self->dir);
|
||||
assert(asprintf(&self->dir, "%s", dir) != -1);
|
||||
}
|
||||
|
||||
static bool file_exits(const char* file){
|
||||
static bool file_exits(const char *file)
|
||||
{
|
||||
struct stat sb;
|
||||
return (stat(file, &sb) == 0);
|
||||
}
|
||||
|
||||
static time_t get_file_mod_time(char *file){
|
||||
static time_t get_file_mod_time(char *file)
|
||||
{
|
||||
struct stat attr;
|
||||
stat(file, &attr);
|
||||
return attr.st_mtime;
|
||||
}
|
||||
|
||||
static size_t get_file_size(const char* file){
|
||||
static size_t get_file_size(const char *file)
|
||||
{
|
||||
struct stat st;
|
||||
stat(file, &st);
|
||||
return st.st_size;
|
||||
}
|
||||
|
||||
static char* sharedir_scan(sharedir_t* self, const char* file){
|
||||
|
||||
static char *sharedir_scan(sharedir_t *self, const char *file)
|
||||
{
|
||||
/*
|
||||
* Agent is not under our control, but lets roughly constrain
|
||||
* it to anything stored in or linked from sharedir
|
||||
@ -53,8 +58,8 @@ static char* sharedir_scan(sharedir_t* self, const char* file){
|
||||
chdir(self->dir);
|
||||
char *real_path = realpath(file, NULL);
|
||||
|
||||
if (file[0] != '/' && !strstr(file, "/../") &&
|
||||
real_path && file_exits(real_path)) {
|
||||
if (file[0] != '/' && !strstr(file, "/../") && real_path && file_exits(real_path))
|
||||
{
|
||||
return real_path;
|
||||
}
|
||||
|
||||
@ -62,7 +67,8 @@ static char* sharedir_scan(sharedir_t* self, const char* file){
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static sharedir_file_t* sharedir_get_object(sharedir_t* self, const char* file){
|
||||
static sharedir_file_t *sharedir_get_object(sharedir_t *self, const char *file)
|
||||
{
|
||||
khiter_t k;
|
||||
int ret;
|
||||
sharedir_file_t *obj = NULL;
|
||||
@ -83,8 +89,7 @@ static sharedir_file_t* sharedir_get_object(sharedir_t* self, const char* file){
|
||||
assert(get_file_size(obj->path) == obj->size);
|
||||
|
||||
return obj;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
/* nope ! */
|
||||
char *realpath = sharedir_scan(self, file);
|
||||
struct stat sb;
|
||||
@ -114,7 +119,8 @@ static sharedir_file_t* sharedir_get_object(sharedir_t* self, const char* file){
|
||||
}
|
||||
}
|
||||
|
||||
static FILE* get_file_ptr(sharedir_t* self, sharedir_file_t* obj){
|
||||
static FILE *get_file_ptr(sharedir_t *self, sharedir_file_t *obj)
|
||||
{
|
||||
if (obj == self->last_file_obj_ptr && self->last_file_f) {
|
||||
return self->last_file_f;
|
||||
}
|
||||
@ -129,9 +135,11 @@ static FILE* get_file_ptr(sharedir_t* self, sharedir_file_t* obj){
|
||||
return f;
|
||||
}
|
||||
|
||||
uint64_t sharedir_request_file(sharedir_t* self, const char* file, uint8_t* page_buffer){
|
||||
uint64_t sharedir_request_file(sharedir_t *self, const char *file, uint8_t *page_buffer)
|
||||
{
|
||||
if (!self->dir) {
|
||||
fprintf(stderr, "WARNING: New file request received, but no share dir configured! [FILE: %s]\n", file);
|
||||
fprintf(stderr, "WARNING: New file request received, but no share dir configured! [FILE: %s]\n",
|
||||
file);
|
||||
return 0xFFFFFFFFFFFFFFFFUL;
|
||||
}
|
||||
|
||||
@ -151,8 +159,7 @@ uint64_t sharedir_request_file(sharedir_t* self, const char* file, uint8_t* page
|
||||
assert(fread(page_buffer, 1, 0x1000, f) == 0x1000);
|
||||
obj->bytes_left -= 0x1000;
|
||||
return 0x1000;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
if (obj->bytes_left != 0) {
|
||||
f = get_file_ptr(self, obj);
|
||||
fseek(f, obj->size - obj->bytes_left, SEEK_SET);
|
||||
@ -162,14 +169,12 @@ uint64_t sharedir_request_file(sharedir_t* self, const char* file, uint8_t* page
|
||||
obj->bytes_left = 0;
|
||||
|
||||
return ret_value;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
obj->bytes_left = (uint_fast64_t)obj->size;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
nyx_error("Warning: No such file in sharedir: %s\n", file);
|
||||
return 0xFFFFFFFFFFFFFFFFUL;
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdint.h>
|
||||
#include "khash.h"
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
|
||||
typedef struct sharedir_file_s {
|
||||
char *file;
|
||||
|
@ -1,13 +1,13 @@
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include "nyx/snapshot/block/block_cow.h"
|
||||
#include "sysemu/block-backend.h"
|
||||
#include "nyx/state/state.h"
|
||||
#include "nyx/debug.h"
|
||||
#include "nyx/snapshot/block/block_cow.h"
|
||||
#include "nyx/state/state.h"
|
||||
|
||||
|
||||
// #define COW_CACHE_DEBUG
|
||||
@ -20,32 +20,40 @@
|
||||
uint64_t global_cow_primary_size = COW_CACHE_PRIMARY_MINIMUM_SIZE;
|
||||
bool global_cow_primary_size_adjustable = true;
|
||||
|
||||
void set_global_cow_cache_primary_size(uint64_t new_size){
|
||||
if (global_cow_primary_size_adjustable && new_size > COW_CACHE_PRIMARY_MINIMUM_SIZE && (new_size & 0xFFF) == 0){
|
||||
void set_global_cow_cache_primary_size(uint64_t new_size)
|
||||
{
|
||||
if (global_cow_primary_size_adjustable &&
|
||||
new_size > COW_CACHE_PRIMARY_MINIMUM_SIZE && (new_size & 0xFFF) == 0)
|
||||
{
|
||||
global_cow_primary_size = new_size;
|
||||
global_cow_primary_size_adjustable = false;
|
||||
}
|
||||
}
|
||||
|
||||
static inline uint64_t get_global_cow_cache_primary_size(void){
|
||||
static inline uint64_t get_global_cow_cache_primary_size(void)
|
||||
{
|
||||
return global_cow_primary_size;
|
||||
}
|
||||
|
||||
cow_cache_t* cow_cache_new(const char* filename){
|
||||
|
||||
cow_cache_t *cow_cache_new(const char *filename)
|
||||
{
|
||||
cow_cache_t *self = malloc(sizeof(cow_cache_t));
|
||||
self->lookup_primary = kh_init(COW_CACHE);
|
||||
self->lookup_secondary = kh_init(COW_CACHE);
|
||||
self->lookup_secondary_tmp = kh_init(COW_CACHE);
|
||||
|
||||
self->cow_primary_size = COW_CACHE_PRIMARY_MINIMUM_SIZE;
|
||||
self->data_primary = mmap(NULL, self->cow_primary_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
|
||||
self->data_primary = mmap(NULL, self->cow_primary_size, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
|
||||
assert(self->data_primary != MAP_FAILED);
|
||||
|
||||
self->data_secondary = mmap(NULL, COW_CACHE_SECONDARY_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
|
||||
self->data_secondary = mmap(NULL, COW_CACHE_SECONDARY_SIZE, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
|
||||
assert(self->data_secondary != MAP_FAILED);
|
||||
|
||||
self->data_secondary_tmp = mmap(NULL, COW_CACHE_SECONDARY_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
|
||||
self->data_secondary_tmp = mmap(NULL, COW_CACHE_SECONDARY_SIZE,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
|
||||
assert(self->data_secondary_tmp != MAP_FAILED);
|
||||
|
||||
self->filename = strdup(basename(filename));
|
||||
@ -54,10 +62,12 @@ cow_cache_t* cow_cache_new(const char* filename){
|
||||
self->offset_secondary_tmp = 0;
|
||||
|
||||
if (getenv("NYX_DISABLE_BLOCK_COW")) {
|
||||
fprintf(stderr, "WARNING: Nyx block COW layer disabled for %s (** write operations are not cached **)\n", filename);
|
||||
fprintf(stderr,
|
||||
"WARNING: Nyx block COW layer disabled for %s (** write operations "
|
||||
"are not cached **)\n",
|
||||
filename);
|
||||
self->enabled = false;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
self->enabled = true;
|
||||
}
|
||||
self->enabled_fuzz = false;
|
||||
@ -74,7 +84,10 @@ cow_cache_t* cow_cache_new(const char* filename){
|
||||
return self;
|
||||
}
|
||||
|
||||
static char* gen_file_name(cow_cache_t* self, const char* filename_prefix, const char* filename_postfix){
|
||||
static char *gen_file_name(cow_cache_t *self,
|
||||
const char *filename_prefix,
|
||||
const char *filename_postfix)
|
||||
{
|
||||
char *tmp1;
|
||||
char *tmp2;
|
||||
|
||||
@ -92,7 +105,8 @@ static char* gen_file_name(cow_cache_t* self, const char* filename_prefix, const
|
||||
return tmp1;
|
||||
}
|
||||
|
||||
void read_primary_buffer(cow_cache_t* self, const char* filename_prefix, bool switch_mode){
|
||||
void read_primary_buffer(cow_cache_t *self, const char *filename_prefix, bool switch_mode)
|
||||
{
|
||||
assert(!self->enabled_fuzz);
|
||||
global_cow_primary_size_adjustable = false;
|
||||
|
||||
@ -108,14 +122,16 @@ void read_primary_buffer(cow_cache_t* self, const char* filename_prefix, bool sw
|
||||
assert(stat(tmp2, &buffer) == 0);
|
||||
|
||||
if (buffer.st_size > get_global_cow_cache_primary_size()) {
|
||||
fprintf(stderr, "ERROR: in-memory CoW buffer is too small compared to snapshot file (buffer: 0x%lx / file: 0x%lx)\n", get_global_cow_cache_primary_size(), buffer.st_size);
|
||||
fprintf(stderr,
|
||||
"ERROR: in-memory CoW buffer is too small compared to snapshot file "
|
||||
"(buffer: 0x%lx / file: 0x%lx)\n",
|
||||
get_global_cow_cache_primary_size(), buffer.st_size);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (buffer.st_size) {
|
||||
self->lookup_primary = kh_load(COW_CACHE, tmp1);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
self->lookup_primary = kh_init(COW_CACHE);
|
||||
}
|
||||
|
||||
@ -124,19 +140,21 @@ void read_primary_buffer(cow_cache_t* self, const char* filename_prefix, bool sw
|
||||
if (switch_mode) {
|
||||
munmap(self->data_primary, self->cow_primary_size);
|
||||
self->cow_primary_size = get_global_cow_cache_primary_size();
|
||||
self->data_primary = mmap(0, self->cow_primary_size, PROT_READ, MAP_SHARED, fd, 0);
|
||||
self->data_primary =
|
||||
mmap(0, self->cow_primary_size, PROT_READ, MAP_SHARED, fd, 0);
|
||||
assert(self->data_primary);
|
||||
}
|
||||
else{
|
||||
|
||||
} else {
|
||||
if (get_global_cow_cache_primary_size() != self->cow_primary_size) {
|
||||
munmap(self->data_primary, self->cow_primary_size);
|
||||
self->cow_primary_size = get_global_cow_cache_primary_size();
|
||||
self->data_primary = mmap(NULL, self->cow_primary_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
|
||||
self->data_primary = mmap(NULL, self->cow_primary_size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
|
||||
assert(self->data_primary != MAP_FAILED);
|
||||
}
|
||||
|
||||
void* ptr = mmap(0, COW_CACHE_PRIMARY_MINIMUM_SIZE, PROT_READ , MAP_SHARED, fd, 0);
|
||||
void *ptr =
|
||||
mmap(0, COW_CACHE_PRIMARY_MINIMUM_SIZE, PROT_READ, MAP_SHARED, fd, 0);
|
||||
assert(ptr);
|
||||
memcpy(self->data_primary, ptr, buffer.st_size);
|
||||
munmap(ptr, COW_CACHE_PRIMARY_MINIMUM_SIZE);
|
||||
@ -153,7 +171,8 @@ void read_primary_buffer(cow_cache_t* self, const char* filename_prefix, bool sw
|
||||
free(tmp2);
|
||||
}
|
||||
|
||||
void dump_primary_buffer(cow_cache_t* self, const char* filename_prefix){
|
||||
void dump_primary_buffer(cow_cache_t *self, const char *filename_prefix)
|
||||
{
|
||||
assert(self->enabled_fuzz);
|
||||
|
||||
char *tmp1;
|
||||
@ -164,8 +183,7 @@ void dump_primary_buffer(cow_cache_t* self, const char* filename_prefix){
|
||||
|
||||
if (self->offset_primary) {
|
||||
kh_write(COW_CACHE, self->lookup_primary, tmp1);
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
fclose(fopen(tmp1, "wb"));
|
||||
}
|
||||
|
||||
@ -185,14 +203,14 @@ void dump_primary_buffer(cow_cache_t* self, const char* filename_prefix){
|
||||
free(tmp2);
|
||||
}
|
||||
|
||||
void cow_cache_reset(cow_cache_t* self){
|
||||
void cow_cache_reset(cow_cache_t *self)
|
||||
{
|
||||
if (!self->enabled_fuzz)
|
||||
return;
|
||||
/* TODO */
|
||||
assert(self->enabled_fuzz);
|
||||
|
||||
if (self->enabled_fuzz) {
|
||||
|
||||
#ifdef DEBUG_COW_LAYER
|
||||
printf("%s: read_calls =>\t%ld\n", __func__, self->read_calls);
|
||||
printf("%s: write_calls =>\t%ld\n", __func__, self->write_calls);
|
||||
@ -208,8 +226,7 @@ void cow_cache_reset(cow_cache_t* self){
|
||||
self->read_calls = 0;
|
||||
self->write_calls = 0;
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
self->offset_secondary_tmp = 0;
|
||||
kh_clear(COW_CACHE, self->lookup_secondary_tmp);
|
||||
|
||||
@ -223,25 +240,29 @@ void cow_cache_reset(cow_cache_t* self){
|
||||
}
|
||||
|
||||
|
||||
void cow_cache_enable_tmp_mode(cow_cache_t* self){
|
||||
void cow_cache_enable_tmp_mode(cow_cache_t *self)
|
||||
{
|
||||
assert(self->enabled_fuzz);
|
||||
self->enabled_fuzz_tmp = true;
|
||||
}
|
||||
|
||||
void cow_cache_disable_tmp_mode(cow_cache_t* self){
|
||||
void cow_cache_disable_tmp_mode(cow_cache_t *self)
|
||||
{
|
||||
assert(self->enabled_fuzz);
|
||||
assert(self->enabled_fuzz_tmp);
|
||||
cow_cache_reset(self);
|
||||
self->enabled_fuzz_tmp = false;
|
||||
}
|
||||
|
||||
void cow_cache_enable(cow_cache_t* self){
|
||||
void cow_cache_enable(cow_cache_t *self)
|
||||
{
|
||||
cow_cache_reset(self);
|
||||
self->enabled = true;
|
||||
}
|
||||
|
||||
|
||||
void cow_cache_disable(cow_cache_t* self){
|
||||
void cow_cache_disable(cow_cache_t *self)
|
||||
{
|
||||
cow_cache_reset(self);
|
||||
self->enabled = false;
|
||||
}
|
||||
@ -266,31 +287,56 @@ extern int blk_check_byte_request(BlockBackend *blk, int64_t offset, size_t size
|
||||
extern void blk_aio_complete(BlkAioEmAIOCB *acb);
|
||||
|
||||
/* read from primary buffer */
|
||||
static inline void read_from_primary_buffer(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags, uint64_t offset_addr, uint64_t iov_offset){
|
||||
static inline void read_from_primary_buffer(cow_cache_t *self,
|
||||
BlockBackend *blk,
|
||||
int64_t offset,
|
||||
unsigned int bytes,
|
||||
QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags,
|
||||
uint64_t offset_addr,
|
||||
uint64_t iov_offset)
|
||||
{
|
||||
khiter_t k;
|
||||
|
||||
k = kh_get(COW_CACHE, self->lookup_primary, offset_addr);
|
||||
if (k != kh_end(self->lookup_primary)) {
|
||||
#ifdef COW_CACHE_DEBUG
|
||||
printf("[PRE ] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n", offset_addr, iov_offset, self->offset_primary);
|
||||
printf("[PRE ] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA "
|
||||
"OFFSET: %lx\n",
|
||||
offset_addr, iov_offset, self->offset_primary);
|
||||
#endif
|
||||
qemu_iovec_from_buf(qiov, iov_offset, self->data_primary + kh_value(self->lookup_primary, k), CHUNK_SIZE);
|
||||
qemu_iovec_from_buf(qiov, iov_offset,
|
||||
self->data_primary + kh_value(self->lookup_primary, k),
|
||||
CHUNK_SIZE);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/* try to read from secondary buffer
|
||||
* read from primary buffer if the data is not available yet */
|
||||
static inline void read_from_secondary_buffer(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags, uint64_t offset_addr, uint64_t iov_offset){
|
||||
static inline void read_from_secondary_buffer(cow_cache_t *self,
|
||||
BlockBackend *blk,
|
||||
int64_t offset,
|
||||
unsigned int bytes,
|
||||
QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags,
|
||||
uint64_t offset_addr,
|
||||
uint64_t iov_offset)
|
||||
{
|
||||
/* read from L2 TMP buffer */
|
||||
khiter_t k;
|
||||
if (self->enabled_fuzz_tmp) {
|
||||
k = kh_get(COW_CACHE, self->lookup_secondary_tmp, offset_addr);
|
||||
if (k != kh_end(self->lookup_secondary_tmp)) {
|
||||
#ifdef COW_CACHE_DEBUG
|
||||
printf("[FTMP] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n", offset_addr, iov_offset, self->offset_secondary);
|
||||
printf("[FTMP] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA "
|
||||
"OFFSET: %lx\n",
|
||||
offset_addr, iov_offset, self->offset_secondary);
|
||||
#endif
|
||||
qemu_iovec_from_buf(qiov, iov_offset, self->data_secondary_tmp + kh_value(self->lookup_secondary_tmp, k), CHUNK_SIZE);
|
||||
qemu_iovec_from_buf(qiov, iov_offset,
|
||||
self->data_secondary_tmp +
|
||||
kh_value(self->lookup_secondary_tmp, k),
|
||||
CHUNK_SIZE);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -299,9 +345,13 @@ static inline void read_from_secondary_buffer(cow_cache_t* self, BlockBackend *b
|
||||
k = kh_get(COW_CACHE, self->lookup_secondary, offset_addr);
|
||||
if (k != kh_end(self->lookup_secondary)) {
|
||||
#ifdef COW_CACHE_DEBUG
|
||||
printf("[FUZZ] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n", offset_addr, iov_offset, self->offset_secondary);
|
||||
printf("[FUZZ] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA "
|
||||
"OFFSET: %lx\n",
|
||||
offset_addr, iov_offset, self->offset_secondary);
|
||||
#endif
|
||||
qemu_iovec_from_buf(qiov, iov_offset, self->data_secondary + kh_value(self->lookup_secondary, k), CHUNK_SIZE);
|
||||
qemu_iovec_from_buf(qiov, iov_offset,
|
||||
self->data_secondary + kh_value(self->lookup_secondary, k),
|
||||
CHUNK_SIZE);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -309,21 +359,29 @@ static inline void read_from_secondary_buffer(cow_cache_t* self, BlockBackend *b
|
||||
k = kh_get(COW_CACHE, self->lookup_primary, offset_addr);
|
||||
if (k != kh_end(self->lookup_primary)) {
|
||||
#ifdef COW_CACHE_DEBUG
|
||||
printf("[PRE ] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n", offset_addr, iov_offset, self->offset_primary);
|
||||
printf("[PRE ] READ DIRTY COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA "
|
||||
"OFFSET: %lx\n",
|
||||
offset_addr, iov_offset, self->offset_primary);
|
||||
#endif
|
||||
qemu_iovec_from_buf(qiov, iov_offset, self->data_primary + kh_value(self->lookup_primary, k), CHUNK_SIZE);
|
||||
qemu_iovec_from_buf(qiov, iov_offset,
|
||||
self->data_primary + kh_value(self->lookup_primary, k),
|
||||
CHUNK_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
/* read data from cow cache */
|
||||
static int cow_cache_read(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags){
|
||||
|
||||
static int cow_cache_read(cow_cache_t *self,
|
||||
BlockBackend *blk,
|
||||
int64_t offset,
|
||||
unsigned int bytes,
|
||||
QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
#ifdef DEBUG_COW_LAYER
|
||||
if (self->enabled_fuzz) {
|
||||
if (!self->enabled_fuzz_tmp) {
|
||||
self->read_calls++;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
self->read_calls_tmp++;
|
||||
}
|
||||
}
|
||||
@ -339,13 +397,15 @@ static int cow_cache_read(cow_cache_t* self, BlockBackend *blk, int64_t offset,
|
||||
assert(!(qiov->size % CHUNK_SIZE));
|
||||
|
||||
uint64_t iov_offset = 0;
|
||||
for(uint64_t offset_addr = offset; offset_addr < (offset+(qiov->size)); offset_addr+= CHUNK_SIZE){
|
||||
|
||||
for (uint64_t offset_addr = offset; offset_addr < (offset + (qiov->size));
|
||||
offset_addr += CHUNK_SIZE)
|
||||
{
|
||||
if (self->enabled_fuzz) {
|
||||
read_from_secondary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags, offset_addr, iov_offset);
|
||||
}
|
||||
else{
|
||||
read_from_primary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags, offset_addr, iov_offset);
|
||||
read_from_secondary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags,
|
||||
offset_addr, iov_offset);
|
||||
} else {
|
||||
read_from_primary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags,
|
||||
offset_addr, iov_offset);
|
||||
}
|
||||
|
||||
iov_offset += CHUNK_SIZE;
|
||||
@ -356,7 +416,15 @@ static int cow_cache_read(cow_cache_t* self, BlockBackend *blk, int64_t offset,
|
||||
|
||||
|
||||
/* write to primary buffer */
|
||||
static inline void write_to_primary_buffer(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags, uint64_t offset_addr, uint64_t iov_offset){
|
||||
static inline void write_to_primary_buffer(cow_cache_t *self,
|
||||
BlockBackend *blk,
|
||||
int64_t offset,
|
||||
unsigned int bytes,
|
||||
QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags,
|
||||
uint64_t offset_addr,
|
||||
uint64_t iov_offset)
|
||||
{
|
||||
int ret;
|
||||
khiter_t k;
|
||||
|
||||
@ -365,7 +433,8 @@ static inline void write_to_primary_buffer(cow_cache_t* self, BlockBackend *blk,
|
||||
/* create page */
|
||||
k = kh_put(COW_CACHE, self->lookup_primary, offset_addr, &ret);
|
||||
#ifdef COW_CACHE_DEBUG
|
||||
printf("ADD NEW COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n", offset_addr, iov_offset, self->offset_primary);
|
||||
printf("ADD NEW COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx\n",
|
||||
offset_addr, iov_offset, self->offset_primary);
|
||||
#endif
|
||||
|
||||
|
||||
@ -374,7 +443,9 @@ static inline void write_to_primary_buffer(cow_cache_t* self, BlockBackend *blk,
|
||||
self->offset_primary += CHUNK_SIZE;
|
||||
|
||||
#ifdef COW_CACHE_VERBOSE
|
||||
printf("COW CACHE IS 0x%lx BYTES (KB: %ld / MB: %ld / GB: %ld) IN SIZE!\n", self->offset, self->offset >> 10, self->offset >> 20, self->offset >> 30);
|
||||
printf("COW CACHE IS 0x%lx BYTES (KB: %ld / MB: %ld / GB: %ld) IN SIZE!\n",
|
||||
self->offset, self->offset >> 10, self->offset >> 20,
|
||||
self->offset >> 30);
|
||||
#endif
|
||||
|
||||
/* IN CASE THE BUFFER IS FULL -> ABORT! */
|
||||
@ -382,14 +453,25 @@ static inline void write_to_primary_buffer(cow_cache_t* self, BlockBackend *blk,
|
||||
}
|
||||
|
||||
#ifdef COW_CACHE_DEBUG
|
||||
printf("LOAD COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx (%s)\n", offset_addr, iov_offset, kh_value(self->lookup_primary, k), self->filename);
|
||||
printf("LOAD COW PAGE: ADDR: %lx IOVEC OFFSET: %lx DATA OFFSET: %lx (%s)\n",
|
||||
offset_addr, iov_offset, kh_value(self->lookup_primary, k), self->filename);
|
||||
#endif
|
||||
|
||||
/* write to cached page */
|
||||
qemu_iovec_to_buf(qiov, iov_offset, self->data_primary + kh_value(self->lookup_primary, k), CHUNK_SIZE);
|
||||
qemu_iovec_to_buf(qiov, iov_offset,
|
||||
self->data_primary + kh_value(self->lookup_primary, k),
|
||||
CHUNK_SIZE);
|
||||
}
|
||||
|
||||
static inline void write_to_secondary_buffer(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags, uint64_t offset_addr, uint64_t iov_offset){
|
||||
static inline void write_to_secondary_buffer(cow_cache_t *self,
|
||||
BlockBackend *blk,
|
||||
int64_t offset,
|
||||
unsigned int bytes,
|
||||
QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags,
|
||||
uint64_t offset_addr,
|
||||
uint64_t iov_offset)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!self->enabled_fuzz_tmp) {
|
||||
@ -408,12 +490,13 @@ static inline void write_to_secondary_buffer(cow_cache_t* self, BlockBackend *bl
|
||||
k_secondary = kh_put(COW_CACHE, self->lookup_secondary, offset_addr, &ret);
|
||||
kh_value(self->lookup_secondary, k_secondary) = self->offset_secondary;
|
||||
self->offset_secondary += CHUNK_SIZE;
|
||||
|
||||
}
|
||||
/* write to cache */
|
||||
qemu_iovec_to_buf(qiov, iov_offset, self->data_secondary + kh_value(self->lookup_secondary, k_secondary), CHUNK_SIZE);
|
||||
}
|
||||
else{
|
||||
qemu_iovec_to_buf(qiov, iov_offset,
|
||||
self->data_secondary +
|
||||
kh_value(self->lookup_secondary, k_secondary),
|
||||
CHUNK_SIZE);
|
||||
} else {
|
||||
/* L2 TMP mode */
|
||||
|
||||
/* IN CASE THE BUFFER IS FULL -> ABORT! */
|
||||
@ -423,28 +506,38 @@ static inline void write_to_secondary_buffer(cow_cache_t* self, BlockBackend *bl
|
||||
return;
|
||||
}
|
||||
|
||||
khiter_t k_secondary_tmp = kh_get(COW_CACHE, self->lookup_secondary_tmp, offset_addr);
|
||||
khiter_t k_secondary_tmp =
|
||||
kh_get(COW_CACHE, self->lookup_secondary_tmp, offset_addr);
|
||||
if (unlikely(k_secondary_tmp == kh_end(self->lookup_secondary_tmp))) {
|
||||
/* if page is not cached in secondary tmp buffer yet */
|
||||
k_secondary_tmp = kh_put(COW_CACHE, self->lookup_secondary_tmp, offset_addr, &ret);
|
||||
kh_value(self->lookup_secondary_tmp, k_secondary_tmp) = self->offset_secondary_tmp;
|
||||
k_secondary_tmp =
|
||||
kh_put(COW_CACHE, self->lookup_secondary_tmp, offset_addr, &ret);
|
||||
kh_value(self->lookup_secondary_tmp, k_secondary_tmp) =
|
||||
self->offset_secondary_tmp;
|
||||
self->offset_secondary_tmp += CHUNK_SIZE;
|
||||
}
|
||||
|
||||
/* write to cache */
|
||||
qemu_iovec_to_buf(qiov, iov_offset, self->data_secondary_tmp + kh_value(self->lookup_secondary_tmp, k_secondary_tmp), CHUNK_SIZE);
|
||||
qemu_iovec_to_buf(qiov, iov_offset,
|
||||
self->data_secondary_tmp +
|
||||
kh_value(self->lookup_secondary_tmp, k_secondary_tmp),
|
||||
CHUNK_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
/* write data to cow cache */
|
||||
static int cow_cache_write(cow_cache_t* self, BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags){
|
||||
|
||||
static int cow_cache_write(cow_cache_t *self,
|
||||
BlockBackend *blk,
|
||||
int64_t offset,
|
||||
unsigned int bytes,
|
||||
QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
#ifdef DEBUG_COW_LAYER
|
||||
if (self->enabled_fuzz) {
|
||||
if (!self->enabled_fuzz_tmp) {
|
||||
self->write_calls++;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
self->write_calls_tmp++;
|
||||
}
|
||||
}
|
||||
@ -460,18 +553,20 @@ static int cow_cache_write(cow_cache_t* self, BlockBackend *blk, int64_t offset,
|
||||
GET_GLOBAL_STATE()->cow_cache_full = true;
|
||||
fprintf(stderr, "WARNING: %s write in %lx CHUNKSIZE\n", __func__, qiov->size);
|
||||
return 0;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
assert(!(qiov->size % CHUNK_SIZE));
|
||||
}
|
||||
|
||||
uint64_t iov_offset = 0;
|
||||
for(uint64_t offset_addr = offset; offset_addr < (offset+(qiov->size)); offset_addr+= CHUNK_SIZE){
|
||||
for (uint64_t offset_addr = offset; offset_addr < (offset + (qiov->size));
|
||||
offset_addr += CHUNK_SIZE)
|
||||
{
|
||||
if (self->enabled_fuzz) {
|
||||
write_to_secondary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags, offset_addr, iov_offset);
|
||||
}
|
||||
else{
|
||||
write_to_primary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags, offset_addr, iov_offset);
|
||||
write_to_secondary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags,
|
||||
offset_addr, iov_offset);
|
||||
} else {
|
||||
write_to_primary_buffer(self, blk, offset, CHUNK_SIZE, qiov, flags,
|
||||
offset_addr, iov_offset);
|
||||
}
|
||||
|
||||
iov_offset += CHUNK_SIZE;
|
||||
@ -480,14 +575,15 @@ static int cow_cache_write(cow_cache_t* self, BlockBackend *blk, int64_t offset,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void switch_to_fuzz_mode(cow_cache_t* self){
|
||||
void switch_to_fuzz_mode(cow_cache_t *self)
|
||||
{
|
||||
self->enabled_fuzz = true;
|
||||
assert(!mprotect(self->data_primary, self->cow_primary_size, PROT_READ));
|
||||
nyx_debug("switching to secondary CoW buffer\n");
|
||||
}
|
||||
|
||||
void cow_cache_read_entry(void* opaque){
|
||||
|
||||
void cow_cache_read_entry(void *opaque)
|
||||
{
|
||||
BlkAioEmAIOCB *acb = opaque;
|
||||
BlkRwCo *rwco = &acb->rwco;
|
||||
|
||||
@ -495,13 +591,15 @@ void cow_cache_read_entry(void* opaque){
|
||||
printf("%s %lx %lx\n", __func__, rwco->offset, acb->bytes);
|
||||
#endif
|
||||
|
||||
rwco->ret = cow_cache_read( *((cow_cache_t**)(rwco->blk)), rwco->blk, rwco->offset, acb->bytes, rwco->qiov, rwco->flags);
|
||||
rwco->ret = cow_cache_read(*((cow_cache_t **)(rwco->blk)), rwco->blk,
|
||||
rwco->offset, acb->bytes, rwco->qiov, rwco->flags);
|
||||
|
||||
blk_aio_complete(acb);
|
||||
}
|
||||
|
||||
|
||||
void cow_cache_write_entry(void* opaque){
|
||||
void cow_cache_write_entry(void *opaque)
|
||||
{
|
||||
BlkAioEmAIOCB *acb = opaque;
|
||||
BlkRwCo *rwco = &acb->rwco;
|
||||
|
||||
@ -509,7 +607,8 @@ void cow_cache_write_entry(void* opaque){
|
||||
printf("%s\n", __func__);
|
||||
#endif
|
||||
|
||||
rwco->ret = cow_cache_write( *((cow_cache_t**)(rwco->blk)), rwco->blk, rwco->offset, acb->bytes, rwco->qiov, rwco->flags);
|
||||
rwco->ret = cow_cache_write(*((cow_cache_t **)(rwco->blk)), rwco->blk,
|
||||
rwco->offset, acb->bytes, rwco->qiov, rwco->flags);
|
||||
|
||||
blk_aio_complete(acb);
|
||||
}
|
||||
|
@ -60,7 +60,9 @@ void cow_cache_reset(cow_cache_t* self);
|
||||
|
||||
void switch_to_fuzz_mode(cow_cache_t *self);
|
||||
|
||||
void read_primary_buffer(cow_cache_t* self, const char* filename_prefix, bool switch_mode);
|
||||
void read_primary_buffer(cow_cache_t *self,
|
||||
const char *filename_prefix,
|
||||
bool switch_mode);
|
||||
void dump_primary_buffer(cow_cache_t *self, const char *filename_prefix);
|
||||
|
||||
void cow_cache_read_entry(void *opaque);
|
||||
|
@ -17,8 +17,8 @@ typedef struct fast_reload_cow_entry_s{
|
||||
} fast_reload_cow_entry_t;
|
||||
|
||||
|
||||
nyx_block_t* nyx_block_snapshot_init_from_file(const char* folder, bool pre_snapshot){
|
||||
|
||||
nyx_block_t *nyx_block_snapshot_init_from_file(const char *folder, bool pre_snapshot)
|
||||
{
|
||||
nyx_block_t *self = malloc(sizeof(nyx_block_t));
|
||||
memset(self, 0, sizeof(nyx_block_t));
|
||||
|
||||
@ -51,7 +51,8 @@ nyx_block_t* nyx_block_snapshot_init_from_file(const char* folder, bool pre_snap
|
||||
nyx_debug("%d vs %x\n", temp_cow_cache_array_size, self->cow_cache_array_size);
|
||||
assert(self->cow_cache_array_size == temp_cow_cache_array_size);
|
||||
|
||||
self->cow_cache_array = (cow_cache_t**)malloc(sizeof(cow_cache_t*)*self->cow_cache_array_size);
|
||||
self->cow_cache_array =
|
||||
(cow_cache_t **)malloc(sizeof(cow_cache_t *) * self->cow_cache_array_size);
|
||||
|
||||
uint32_t i = 0;
|
||||
uint32_t id = 0;
|
||||
@ -78,8 +79,8 @@ nyx_block_t* nyx_block_snapshot_init_from_file(const char* folder, bool pre_snap
|
||||
return self;
|
||||
}
|
||||
|
||||
nyx_block_t* nyx_block_snapshot_init(void){
|
||||
|
||||
nyx_block_t *nyx_block_snapshot_init(void)
|
||||
{
|
||||
nyx_block_t *self = malloc(sizeof(nyx_block_t));
|
||||
memset(self, 0, sizeof(nyx_block_t));
|
||||
|
||||
@ -91,7 +92,8 @@ nyx_block_t* nyx_block_snapshot_init(void){
|
||||
}
|
||||
}
|
||||
|
||||
self->cow_cache_array = (cow_cache_t**)malloc(sizeof(cow_cache_t*)*self->cow_cache_array_size);
|
||||
self->cow_cache_array =
|
||||
(cow_cache_t **)malloc(sizeof(cow_cache_t *) * self->cow_cache_array_size);
|
||||
|
||||
uint32_t i = 0;
|
||||
for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
|
||||
@ -107,30 +109,35 @@ nyx_block_t* nyx_block_snapshot_init(void){
|
||||
return self;
|
||||
}
|
||||
|
||||
void nyx_block_snapshot_flush(nyx_block_t* self){
|
||||
void nyx_block_snapshot_flush(nyx_block_t *self)
|
||||
{
|
||||
GET_GLOBAL_STATE()->cow_cache_full = false;
|
||||
}
|
||||
|
||||
void nyx_block_snapshot_switch_incremental(nyx_block_t* self){
|
||||
void nyx_block_snapshot_switch_incremental(nyx_block_t *self)
|
||||
{
|
||||
for (uint32_t i = 0; i < self->cow_cache_array_size; i++) {
|
||||
cow_cache_enable_tmp_mode(self->cow_cache_array[i]);
|
||||
}
|
||||
nyx_block_snapshot_flush(self);
|
||||
}
|
||||
|
||||
void nyx_block_snapshot_disable_incremental(nyx_block_t* self){
|
||||
void nyx_block_snapshot_disable_incremental(nyx_block_t *self)
|
||||
{
|
||||
for (uint32_t i = 0; i < self->cow_cache_array_size; i++) {
|
||||
cow_cache_disable_tmp_mode(self->cow_cache_array[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void nyx_block_snapshot_reset(nyx_block_t* self){
|
||||
void nyx_block_snapshot_reset(nyx_block_t *self)
|
||||
{
|
||||
for (uint32_t i = 0; i < self->cow_cache_array_size; i++) {
|
||||
cow_cache_reset(self->cow_cache_array[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void nyx_block_snapshot_serialize(nyx_block_t* self, const char* snapshot_folder){
|
||||
void nyx_block_snapshot_serialize(nyx_block_t *self, const char *snapshot_folder)
|
||||
{
|
||||
fast_reload_cow_entry_t entry;
|
||||
|
||||
char *tmp1;
|
||||
@ -146,7 +153,8 @@ void nyx_block_snapshot_serialize(nyx_block_t* self, const char* snapshot_folder
|
||||
|
||||
for (uint32_t i = 0; i < self->cow_cache_array_size; i++) {
|
||||
entry.id = i;
|
||||
strncpy((char*)&entry.idstr, (const char*)self->cow_cache_array[i]->filename, 255);
|
||||
strncpy((char *)&entry.idstr,
|
||||
(const char *)self->cow_cache_array[i]->filename, 255);
|
||||
fwrite(&entry, sizeof(fast_reload_cow_entry_t), 1, f);
|
||||
|
||||
dump_primary_buffer(self->cow_cache_array[i], tmp2);
|
||||
|
@ -1,7 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include "nyx/snapshot/block/block_cow.h"
|
||||
#include <stdint.h>
|
||||
|
||||
typedef struct nyx_block_s {
|
||||
cow_cache_t **cow_cache_array;
|
||||
|
@ -34,32 +34,34 @@
|
||||
extern void enable_fast_snapshot_rtc(void);
|
||||
extern void enable_fast_snapshot_kvm_clock(void);
|
||||
|
||||
static void enable_fast_snapshot_mode(void){
|
||||
static void enable_fast_snapshot_mode(void)
|
||||
{
|
||||
enable_fast_snapshot_rtc();
|
||||
enable_fast_snapshot_kvm_clock();
|
||||
}
|
||||
|
||||
extern int kvm_nyx_put_tsc_value(CPUState *cs, uint64_t data);
|
||||
|
||||
static void set_tsc_value(nyx_device_state_t* self, bool tmp_snapshot){
|
||||
static void set_tsc_value(nyx_device_state_t *self, bool tmp_snapshot)
|
||||
{
|
||||
if (self->incremental_mode) {
|
||||
assert(self->tsc_value_incremental);
|
||||
assert(kvm_nyx_put_tsc_value(qemu_get_cpu(0), self->tsc_value_incremental) == 0);
|
||||
}
|
||||
else{
|
||||
assert(kvm_nyx_put_tsc_value(qemu_get_cpu(0), self->tsc_value_incremental) ==
|
||||
0);
|
||||
} else {
|
||||
assert(self->tsc_value);
|
||||
assert(kvm_nyx_put_tsc_value(qemu_get_cpu(0), self->tsc_value) == 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void save_tsc_value(nyx_device_state_t* self, bool incremental_mode){
|
||||
static void save_tsc_value(nyx_device_state_t *self, bool incremental_mode)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(qemu_get_cpu(0));
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
if (incremental_mode) {
|
||||
self->tsc_value_incremental = env->tsc;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
self->tsc_value = env->tsc;
|
||||
}
|
||||
}
|
||||
@ -117,10 +119,10 @@ static void fast_qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_o
|
||||
|
||||
QTAILQ_FOREACH (se, &savevm_state.handlers, entry) {
|
||||
if (strcmp(se->idstr, "ram") && strcmp(se->idstr, "block")) {
|
||||
if (!se->ops ||
|
||||
(in_postcopy && se->ops->save_live_complete_postcopy) ||
|
||||
if (!se->ops || (in_postcopy && se->ops->save_live_complete_postcopy) ||
|
||||
(in_postcopy && !iterable_only) ||
|
||||
!se->ops->save_live_complete_precopy) {
|
||||
!se->ops->save_live_complete_precopy)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -189,7 +191,8 @@ static void fast_qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_o
|
||||
}
|
||||
|
||||
|
||||
static int fast_qemu_savevm_state_iterate(QEMUFile *f, bool postcopy) {
|
||||
static int fast_qemu_savevm_state_iterate(QEMUFile *f, bool postcopy)
|
||||
{
|
||||
SaveStateEntry *se;
|
||||
int ret = 1;
|
||||
|
||||
@ -236,7 +239,8 @@ static int fast_qemu_savevm_state_iterate(QEMUFile *f, bool postcopy) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void fast_qemu_savevm_state_setup(QEMUFile *f){
|
||||
static void fast_qemu_savevm_state_setup(QEMUFile *f)
|
||||
{
|
||||
SaveStateEntry *se;
|
||||
int ret;
|
||||
|
||||
@ -263,7 +267,8 @@ static void fast_qemu_savevm_state_setup(QEMUFile *f){
|
||||
}
|
||||
|
||||
|
||||
static int fast_qemu_savevm_state(QEMUFile *f, Error **errp) {
|
||||
static int fast_qemu_savevm_state(QEMUFile *f, Error **errp)
|
||||
{
|
||||
qemu_savevm_state_header(f);
|
||||
fast_qemu_savevm_state_setup(f);
|
||||
|
||||
@ -278,10 +283,16 @@ static int fast_qemu_savevm_state(QEMUFile *f, Error **errp) {
|
||||
}
|
||||
|
||||
/* QEMUFile RAM Emulation */
|
||||
static ssize_t fast_savevm_writev_buffer(void *opaque, struct iovec *iov, int iovcnt, int64_t pos){
|
||||
static ssize_t fast_savevm_writev_buffer(void *opaque,
|
||||
struct iovec *iov,
|
||||
int iovcnt,
|
||||
int64_t pos)
|
||||
{
|
||||
ssize_t retval = 0;
|
||||
for (uint32_t i = 0; i < iovcnt; i++) {
|
||||
memcpy((void*)(((struct fast_savevm_opaque_t*)(opaque))->buf + ((struct fast_savevm_opaque_t*)(opaque))->pos), iov[i].iov_base, iov[i].iov_len);
|
||||
memcpy((void *)(((struct fast_savevm_opaque_t *)(opaque))->buf +
|
||||
((struct fast_savevm_opaque_t *)(opaque))->pos),
|
||||
iov[i].iov_base, iov[i].iov_len);
|
||||
((struct fast_savevm_opaque_t *)(opaque))->pos += iov[i].iov_len;
|
||||
retval += iov[i].iov_len;
|
||||
}
|
||||
@ -289,18 +300,24 @@ static ssize_t fast_savevm_writev_buffer(void *opaque, struct iovec *iov, int io
|
||||
}
|
||||
|
||||
|
||||
static int fast_savevm_fclose_save_to_buffer(void *opaque){
|
||||
memcpy(((struct fast_savevm_opaque_t*)(opaque))->output_buffer, ((struct fast_savevm_opaque_t*)(opaque))->buf, ((struct fast_savevm_opaque_t*)(opaque))->pos);
|
||||
*((struct fast_savevm_opaque_t*)(opaque))->output_buffer_size = ((struct fast_savevm_opaque_t*)(opaque))->pos;
|
||||
static int fast_savevm_fclose_save_to_buffer(void *opaque)
|
||||
{
|
||||
memcpy(((struct fast_savevm_opaque_t *)(opaque))->output_buffer,
|
||||
((struct fast_savevm_opaque_t *)(opaque))->buf,
|
||||
((struct fast_savevm_opaque_t *)(opaque))->pos);
|
||||
*((struct fast_savevm_opaque_t *)(opaque))->output_buffer_size =
|
||||
((struct fast_savevm_opaque_t *)(opaque))->pos;
|
||||
// printf("DUMPED: %d\n", *((struct fast_savevm_opaque_t*)(opaque))->output_buffer_size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fast_loadvm_fclose(void *opaque){
|
||||
static int fast_loadvm_fclose(void *opaque)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t fast_loadvm_get_buffer(void *opaque, uint8_t *buf, int64_t pos, size_t size){
|
||||
static ssize_t fast_loadvm_get_buffer(void *opaque, uint8_t *buf, int64_t pos, size_t size)
|
||||
{
|
||||
memcpy(buf, (void *)(((struct fast_savevm_opaque_t *)(opaque))->buf + pos), size);
|
||||
return size;
|
||||
}
|
||||
@ -316,7 +333,9 @@ static const QEMUFileOps fast_savevm_ops_to_buffer = {
|
||||
};
|
||||
|
||||
|
||||
nyx_device_state_t* nyx_device_state_init_from_snapshot(const char* snapshot_folder, bool pre_snapshot){
|
||||
nyx_device_state_t *nyx_device_state_init_from_snapshot(const char *snapshot_folder,
|
||||
bool pre_snapshot)
|
||||
{
|
||||
nyx_device_state_t *self = malloc(sizeof(nyx_device_state_t));
|
||||
memset(self, 0, sizeof(nyx_device_state_t));
|
||||
|
||||
@ -324,7 +343,8 @@ nyx_device_state_t* nyx_device_state_init_from_snapshot(const char* snapshot_fol
|
||||
self->state_buf_size = 0;
|
||||
|
||||
char *qemu_state_file;
|
||||
assert(asprintf(&qemu_state_file, "%s/fast_snapshot.qemu_state", snapshot_folder) != -1);
|
||||
assert(asprintf(&qemu_state_file, "%s/fast_snapshot.qemu_state",
|
||||
snapshot_folder) != -1);
|
||||
|
||||
struct fast_savevm_opaque_t fast_savevm_opaque;
|
||||
FILE *f;
|
||||
@ -369,8 +389,8 @@ nyx_device_state_t* nyx_device_state_init_from_snapshot(const char* snapshot_fol
|
||||
* backed by RAM. state_reallocation_new() then uses this file to build an
|
||||
* optimized sequence of snapshot restore operations.
|
||||
*/
|
||||
nyx_device_state_t* nyx_device_state_init(void){
|
||||
|
||||
nyx_device_state_t *nyx_device_state_init(void)
|
||||
{
|
||||
nyx_device_state_t *self = malloc(sizeof(nyx_device_state_t));
|
||||
memset(self, 0, sizeof(nyx_device_state_t));
|
||||
|
||||
@ -411,41 +431,49 @@ nyx_device_state_t* nyx_device_state_init(void){
|
||||
return self;
|
||||
}
|
||||
|
||||
void nyx_device_state_switch_incremental(nyx_device_state_t* self){
|
||||
void nyx_device_state_switch_incremental(nyx_device_state_t *self)
|
||||
{
|
||||
self->incremental_mode = true;
|
||||
fdl_fast_create_tmp(self->qemu_state);
|
||||
fdl_fast_enable_tmp(self->qemu_state);
|
||||
}
|
||||
|
||||
void nyx_device_state_disable_incremental(nyx_device_state_t* self){
|
||||
void nyx_device_state_disable_incremental(nyx_device_state_t *self)
|
||||
{
|
||||
fdl_fast_disable_tmp(self->qemu_state);
|
||||
self->incremental_mode = false;
|
||||
}
|
||||
|
||||
void nyx_device_state_restore(nyx_device_state_t* self){
|
||||
void nyx_device_state_restore(nyx_device_state_t *self)
|
||||
{
|
||||
fdl_fast_reload(self->qemu_state);
|
||||
call_fast_change_handlers();
|
||||
}
|
||||
|
||||
void nyx_device_state_post_restore(nyx_device_state_t* self){
|
||||
void nyx_device_state_post_restore(nyx_device_state_t *self)
|
||||
{
|
||||
set_tsc_value(self, self->incremental_mode);
|
||||
}
|
||||
|
||||
|
||||
void nyx_device_state_save_tsc(nyx_device_state_t* self){
|
||||
void nyx_device_state_save_tsc(nyx_device_state_t *self)
|
||||
{
|
||||
save_tsc_value(self, false);
|
||||
}
|
||||
|
||||
|
||||
void nyx_device_state_save_tsc_incremental(nyx_device_state_t* self){
|
||||
void nyx_device_state_save_tsc_incremental(nyx_device_state_t *self)
|
||||
{
|
||||
save_tsc_value(self, true);
|
||||
}
|
||||
|
||||
void nyx_device_state_serialize(nyx_device_state_t* self, const char* snapshot_folder){
|
||||
void nyx_device_state_serialize(nyx_device_state_t *self, const char *snapshot_folder)
|
||||
{
|
||||
char *tmp;
|
||||
assert(asprintf(&tmp, "%s/fast_snapshot.qemu_state", snapshot_folder) != -1);
|
||||
|
||||
FILE *f_qemu_state = fopen(tmp, "w+b");
|
||||
assert(fwrite(self->state_buf, 1, self->state_buf_size, f_qemu_state) == self->state_buf_size);
|
||||
assert(fwrite(self->state_buf, 1, self->state_buf_size, f_qemu_state) ==
|
||||
self->state_buf_size);
|
||||
fclose(f_qemu_state);
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include "nyx/snapshot/devices/state_reallocation.h"
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
|
||||
typedef struct nyx_device_state_s {
|
||||
state_reallocation_t *qemu_state;
|
||||
@ -19,7 +19,8 @@ typedef struct nyx_device_state_s{
|
||||
|
||||
|
||||
nyx_device_state_t *nyx_device_state_init(void);
|
||||
nyx_device_state_t* nyx_device_state_init_from_snapshot(const char* snapshot_folder, bool pre_snapshot);
|
||||
nyx_device_state_t *nyx_device_state_init_from_snapshot(const char *snapshot_folder,
|
||||
bool pre_snapshot);
|
||||
|
||||
void nyx_device_state_restore(nyx_device_state_t *self);
|
||||
void nyx_device_state_post_restore(nyx_device_state_t *self);
|
||||
|
@ -99,15 +99,15 @@ static SaveStateEntry *fdl_find_se(const char *idstr, int instance_id)
|
||||
|
||||
QTAILQ_FOREACH (se, &savevm_state.handlers, entry) {
|
||||
if (!strcmp(se->idstr, idstr) &&
|
||||
(instance_id == se->instance_id ||
|
||||
instance_id == se->alias_id)){
|
||||
(instance_id == se->instance_id || instance_id == se->alias_id))
|
||||
{
|
||||
return se;
|
||||
}
|
||||
/* Migrating from an older version? */
|
||||
if (strstr(se->idstr, idstr) && se->compat) {
|
||||
if (!strcmp(se->compat->idstr, idstr) &&
|
||||
(instance_id == se->compat->instance_id ||
|
||||
instance_id == se->alias_id)){
|
||||
(instance_id == se->compat->instance_id || instance_id == se->alias_id))
|
||||
{
|
||||
return se;
|
||||
}
|
||||
}
|
||||
@ -115,10 +115,16 @@ static SaveStateEntry *fdl_find_se(const char *idstr, int instance_id)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int fdl_vmstate_load_state(state_reallocation_t* self, QEMUFile *f, const VMStateDescription *vmsd, void *opaque, int version_id, uintptr_t* opaque_ptr);
|
||||
static int fdl_vmstate_load_state(state_reallocation_t *self,
|
||||
QEMUFile *f,
|
||||
const VMStateDescription *vmsd,
|
||||
void *opaque,
|
||||
int version_id,
|
||||
uintptr_t *opaque_ptr);
|
||||
|
||||
|
||||
static inline VMStateDescription* fdl_vmstate_get_subsection(VMStateDescription **sub, char *idstr)
|
||||
static inline VMStateDescription *fdl_vmstate_get_subsection(VMStateDescription **sub,
|
||||
char *idstr)
|
||||
{
|
||||
while (sub && *sub && (*sub)->needed) {
|
||||
if (strcmp(idstr, (*sub)->name) == 0) {
|
||||
@ -129,7 +135,10 @@ static inline VMStateDescription* fdl_vmstate_get_subsection(VMStateDescription
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int fdl_vmstate_subsection_load(state_reallocation_t* self, QEMUFile *f, const VMStateDescription *vmsd, void *opaque)
|
||||
static int fdl_vmstate_subsection_load(state_reallocation_t *self,
|
||||
QEMUFile *f,
|
||||
const VMStateDescription *vmsd,
|
||||
void *opaque)
|
||||
{
|
||||
while (qemu_peek_byte(f, 0) == QEMU_VM_SUBSECTION) {
|
||||
char idstr[256], *idstr_ret;
|
||||
@ -153,7 +162,8 @@ static int fdl_vmstate_subsection_load(state_reallocation_t* self, QEMUFile *f,
|
||||
/* it doesn't have a valid subsection name */
|
||||
return 0;
|
||||
}
|
||||
sub_vmsd = fdl_vmstate_get_subsection((VMStateDescription **)vmsd->subsections, idstr);
|
||||
sub_vmsd = fdl_vmstate_get_subsection((VMStateDescription **)vmsd->subsections,
|
||||
idstr);
|
||||
if (sub_vmsd == NULL) {
|
||||
return -ENOENT;
|
||||
}
|
||||
@ -177,11 +187,12 @@ uint32_t post_version_id_array[256];
|
||||
void *post_opaque_array[256];
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
static void add_post_fptr(state_reallocation_t* self, void* fptr, uint32_t version_id, void* opaque, const char* name){
|
||||
|
||||
static void add_post_fptr(state_reallocation_t *self,
|
||||
void *fptr,
|
||||
uint32_t version_id,
|
||||
void *opaque,
|
||||
const char *name)
|
||||
{
|
||||
if (!self) {
|
||||
return;
|
||||
}
|
||||
@ -191,7 +202,6 @@ static void add_post_fptr(state_reallocation_t* self, void* fptr, uint32_t versi
|
||||
}
|
||||
|
||||
|
||||
|
||||
if (1) {
|
||||
/*
|
||||
if( !strcmp("cpu_common", name) ||
|
||||
@ -257,9 +267,12 @@ static void add_post_fptr(state_reallocation_t* self, void* fptr, uint32_t versi
|
||||
if (self->fast_state_fptr_pos >= self->fast_state_fptr_size) {
|
||||
nyx_debug("RESIZE %s\n", __func__);
|
||||
self->fast_state_fptr_size += REALLOC_SIZE;
|
||||
self->fptr = realloc(self->fptr, self->fast_state_fptr_size * sizeof(void*));
|
||||
self->opaque = realloc(self->opaque, self->fast_state_fptr_size * sizeof(void*));
|
||||
self->version = realloc(self->version, self->fast_state_fptr_size * sizeof(uint32_t));
|
||||
self->fptr =
|
||||
realloc(self->fptr, self->fast_state_fptr_size * sizeof(void *));
|
||||
self->opaque =
|
||||
realloc(self->opaque, self->fast_state_fptr_size * sizeof(void *));
|
||||
self->version =
|
||||
realloc(self->version, self->fast_state_fptr_size * sizeof(uint32_t));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -270,11 +283,13 @@ void fast_get_pci_irq_state(void* data, size_t size, void* opaque);
|
||||
// void fast_virtio_device_get(void* data, size_t size, void* opaque);
|
||||
int virtio_device_get(QEMUFile *f, void *opaque, size_t size, const VMStateField *field);
|
||||
|
||||
static int fast_loadvm_fclose(void *opaque){
|
||||
static int fast_loadvm_fclose(void *opaque)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t fast_loadvm_get_buffer(void *opaque, uint8_t *buf, int64_t pos, size_t size){
|
||||
static ssize_t fast_loadvm_get_buffer(void *opaque, uint8_t *buf, int64_t pos, size_t size)
|
||||
{
|
||||
assert(pos < ((struct fast_savevm_opaque_t *)(opaque))->buflen);
|
||||
memcpy(buf, (void *)(((struct fast_savevm_opaque_t *)(opaque))->buf + pos), size);
|
||||
return size;
|
||||
@ -299,7 +314,14 @@ static void fast_virtio_device_get(void* data, size_t size, void* opaque)
|
||||
virtio_device_get(f, opaque, size, NULL);
|
||||
}
|
||||
|
||||
static void add_get(state_reallocation_t* self, void* fptr, void* opaque, size_t size, void* field, QEMUFile* f, const char* name){
|
||||
static void add_get(state_reallocation_t *self,
|
||||
void *fptr,
|
||||
void *opaque,
|
||||
size_t size,
|
||||
void *field,
|
||||
QEMUFile *f,
|
||||
const char *name)
|
||||
{
|
||||
if (!self) {
|
||||
return;
|
||||
}
|
||||
@ -315,7 +337,8 @@ static void add_get(state_reallocation_t* self, void* fptr, void* opaque, size_t
|
||||
*((uint64_t *)data) = qemu_get_be64(f);
|
||||
}
|
||||
|
||||
else if(!strcmp(name, "pci irq state")){
|
||||
else if (!strcmp(name, "pci irq state"))
|
||||
{
|
||||
qemu_file_skip(f, size * -1);
|
||||
handler = fast_get_pci_irq_state;
|
||||
data = malloc(sizeof(uint8_t) * size);
|
||||
@ -324,20 +347,17 @@ static void add_get(state_reallocation_t* self, void* fptr, void* opaque, size_t
|
||||
((uint32_t *)data)[1] = qemu_get_be32(f);
|
||||
((uint32_t *)data)[2] = qemu_get_be32(f);
|
||||
((uint32_t *)data)[3] = qemu_get_be32(f);
|
||||
}
|
||||
else if(!strcmp(name, "pci config")){
|
||||
} else if (!strcmp(name, "pci config")) {
|
||||
qemu_file_skip(f, size * -1);
|
||||
handler = fast_get_pci_config_device;
|
||||
data = malloc(sizeof(uint8_t) * size);
|
||||
qemu_get_buffer(f, (uint8_t *)data, size);
|
||||
}
|
||||
else if(!strcmp(name, "virtio")){
|
||||
} else if (!strcmp(name, "virtio")) {
|
||||
qemu_file_skip(f, size * -1);
|
||||
handler = fast_virtio_device_get;
|
||||
data = malloc(sizeof(uint8_t) * size);
|
||||
qemu_get_buffer(f, (uint8_t *)data, size);
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
fprintf(stderr, "WARNING: NOT IMPLEMENTED FAST GET ROUTINE for %s\n", name);
|
||||
abort();
|
||||
return;
|
||||
@ -355,30 +375,42 @@ static void add_get(state_reallocation_t* self, void* fptr, void* opaque, size_t
|
||||
if (self->fast_state_get_fptr_pos >= self->fast_state_get_fptr_size) {
|
||||
nyx_debug("RESIZE %s\n", __func__);
|
||||
self->fast_state_get_fptr_size += REALLOC_SIZE;
|
||||
self->get_fptr = realloc(self->get_fptr, self->fast_state_get_fptr_size * sizeof(void*));
|
||||
self->get_opaque = realloc(self->get_opaque, self->fast_state_get_fptr_size * sizeof(void*));
|
||||
self->get_size = realloc(self->get_size, self->fast_state_get_fptr_size * sizeof(size_t));
|
||||
self->get_data = realloc(self->get_data, self->fast_state_get_fptr_size * sizeof(void*));
|
||||
self->get_fptr =
|
||||
realloc(self->get_fptr, self->fast_state_get_fptr_size * sizeof(void *));
|
||||
self->get_opaque = realloc(self->get_opaque,
|
||||
self->fast_state_get_fptr_size * sizeof(void *));
|
||||
self->get_size =
|
||||
realloc(self->get_size, self->fast_state_get_fptr_size * sizeof(size_t));
|
||||
self->get_data =
|
||||
realloc(self->get_data, self->fast_state_get_fptr_size * sizeof(void *));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void add_mblock(state_reallocation_t* self, char* foo, const char* bar, size_t offset, uint64_t start, uint64_t size){
|
||||
|
||||
static void add_mblock(state_reallocation_t *self,
|
||||
char *foo,
|
||||
const char *bar,
|
||||
size_t offset,
|
||||
uint64_t start,
|
||||
uint64_t size)
|
||||
{
|
||||
if (!self) {
|
||||
return;
|
||||
}
|
||||
|
||||
if(self->fast_state_pos && (uint64_t)(self->ptr[self->fast_state_pos-1]+self->size[self->fast_state_pos-1]) == start){
|
||||
if (self->fast_state_pos &&
|
||||
(uint64_t)(self->ptr[self->fast_state_pos - 1] +
|
||||
self->size[self->fast_state_pos - 1]) == start)
|
||||
{
|
||||
void *new = (void *)(self->pre_alloc_block + self->pre_alloc_block_offset);
|
||||
self->pre_alloc_block_offset += size;
|
||||
memcpy(new, (void *)start, size);
|
||||
|
||||
self->size[self->fast_state_pos-1] = size + self->size[self->fast_state_pos-1];
|
||||
}
|
||||
else{
|
||||
self->size[self->fast_state_pos - 1] =
|
||||
size + self->size[self->fast_state_pos - 1];
|
||||
} else {
|
||||
self->ptr[self->fast_state_pos] = (void *)start;
|
||||
self->copy[self->fast_state_pos] = (void*)(self->pre_alloc_block+self->pre_alloc_block_offset);
|
||||
self->copy[self->fast_state_pos] =
|
||||
(void *)(self->pre_alloc_block + self->pre_alloc_block_offset);
|
||||
self->pre_alloc_block_offset += size;
|
||||
|
||||
memcpy(self->copy[self->fast_state_pos], (void *)start, size);
|
||||
@ -393,8 +425,13 @@ static void add_mblock(state_reallocation_t* self, char* foo, const char* bar, s
|
||||
}
|
||||
}
|
||||
|
||||
static inline int get_handler(state_reallocation_t* self, QEMUFile* f, void* curr_elem, size_t size, VMStateField *field, char* vmsd_name){
|
||||
|
||||
static inline int get_handler(state_reallocation_t *self,
|
||||
QEMUFile *f,
|
||||
void *curr_elem,
|
||||
size_t size,
|
||||
VMStateField *field,
|
||||
char *vmsd_name)
|
||||
{
|
||||
int ret;
|
||||
// printf("%s\n", vmsd_name);
|
||||
|
||||
@ -403,107 +440,105 @@ static inline int get_handler(state_reallocation_t* self, QEMUFile* f, void* cur
|
||||
|
||||
if (!strcmp(field->info->name, "bool")) {
|
||||
assert(size == 1);
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 1);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "int8")){
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem,
|
||||
1);
|
||||
} else if (!strcmp(field->info->name, "int8")) {
|
||||
assert(size == 1);
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 1);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "int16")){
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem,
|
||||
1);
|
||||
} else if (!strcmp(field->info->name, "int16")) {
|
||||
assert(size == 2);
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 2);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "int32")){
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem,
|
||||
2);
|
||||
} else if (!strcmp(field->info->name, "int32")) {
|
||||
assert(size == 4);
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 4);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "int32 equal")){
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem,
|
||||
4);
|
||||
} else if (!strcmp(field->info->name, "int32 equal")) {
|
||||
assert(size == 4);
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 4);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "int32 le")){
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem,
|
||||
4);
|
||||
} else if (!strcmp(field->info->name, "int32 le")) {
|
||||
assert(size == 4);
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 4);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "int64")){
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem,
|
||||
4);
|
||||
} else if (!strcmp(field->info->name, "int64")) {
|
||||
assert(size == 8);
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 8);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "uint8")){
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem,
|
||||
8);
|
||||
} else if (!strcmp(field->info->name, "uint8")) {
|
||||
assert(size == 1);
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 1);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "uint16")){
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem,
|
||||
1);
|
||||
} else if (!strcmp(field->info->name, "uint16")) {
|
||||
assert(size == 2);
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 2);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "uint32")){
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem,
|
||||
2);
|
||||
} else if (!strcmp(field->info->name, "uint32")) {
|
||||
assert(size == 4);
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 4);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "uint32 equal")){
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem,
|
||||
4);
|
||||
} else if (!strcmp(field->info->name, "uint32 equal")) {
|
||||
assert(size == 4);
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 4);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "uint64")){
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem,
|
||||
4);
|
||||
} else if (!strcmp(field->info->name, "uint64")) {
|
||||
assert(size == 8);
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 8);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "int64 equal")){
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem,
|
||||
8);
|
||||
} else if (!strcmp(field->info->name, "int64 equal")) {
|
||||
assert(size == 8);
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 8);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "uint8 equal")){
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem,
|
||||
8);
|
||||
} else if (!strcmp(field->info->name, "uint8 equal")) {
|
||||
assert(size == 1);
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 1);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "uint16 equal")){
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem,
|
||||
1);
|
||||
} else if (!strcmp(field->info->name, "uint16 equal")) {
|
||||
assert(size == 16);
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 2);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "float64")){
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem,
|
||||
2);
|
||||
} else if (!strcmp(field->info->name, "float64")) {
|
||||
assert(size == 64);
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 8);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "CPU_Double_U")){
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem,
|
||||
8);
|
||||
} else if (!strcmp(field->info->name, "CPU_Double_U")) {
|
||||
assert(0);
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, 8);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "buffer")){
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, size);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "unused_buffer")){
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem,
|
||||
8);
|
||||
} else if (!strcmp(field->info->name, "buffer")) {
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem,
|
||||
size);
|
||||
} else if (!strcmp(field->info->name, "unused_buffer")) {
|
||||
/* save nothing */
|
||||
}
|
||||
else if(!strcmp(field->info->name, "tmp")){
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem, size);
|
||||
} else if (!strcmp(field->info->name, "tmp")) {
|
||||
add_mblock(self, vmsd_name, field->name, field->offset, (uint64_t)curr_elem,
|
||||
size);
|
||||
/* save nothing */
|
||||
}
|
||||
else if(!strcmp(field->info->name, "bitmap")){
|
||||
} else if (!strcmp(field->info->name, "bitmap")) {
|
||||
assert(0);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "qtailq")){
|
||||
} else if (!strcmp(field->info->name, "qtailq")) {
|
||||
assert(0);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "timer")){
|
||||
add_get(self, (void*) field->info->get, curr_elem, size, (void*) field, f, field->info->name);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "fpreg")){
|
||||
} else if (!strcmp(field->info->name, "timer")) {
|
||||
add_get(self, (void *)field->info->get, curr_elem, size, (void *)field, f,
|
||||
field->info->name);
|
||||
} else if (!strcmp(field->info->name, "fpreg")) {
|
||||
nyx_debug("type: %s (size: %lx)\n", field->info->name, size);
|
||||
assert(0);
|
||||
add_get(self, (void*) field->info->get, curr_elem, size, (void*) field, f, field->info->name);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "pci config")){
|
||||
add_get(self, (void*) field->info->get, curr_elem, size, (void*) field, f, field->info->name);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "pci irq state")){
|
||||
add_get(self, (void*) field->info->get, curr_elem, size, (void*) field, f, field->info->name);
|
||||
}
|
||||
else if(!strcmp(field->info->name, "virtio")){
|
||||
add_get(self, (void*) field->info->get, curr_elem, size, (void*) field, f, field->info->name);
|
||||
}
|
||||
else{
|
||||
fprintf(stderr, "[QEMU-PT] %s: WARNING no handler for %s, type %s, size %lx!\n",
|
||||
add_get(self, (void *)field->info->get, curr_elem, size, (void *)field, f,
|
||||
field->info->name);
|
||||
} else if (!strcmp(field->info->name, "pci config")) {
|
||||
add_get(self, (void *)field->info->get, curr_elem, size, (void *)field, f,
|
||||
field->info->name);
|
||||
} else if (!strcmp(field->info->name, "pci irq state")) {
|
||||
add_get(self, (void *)field->info->get, curr_elem, size, (void *)field, f,
|
||||
field->info->name);
|
||||
} else if (!strcmp(field->info->name, "virtio")) {
|
||||
add_get(self, (void *)field->info->get, curr_elem, size, (void *)field, f,
|
||||
field->info->name);
|
||||
} else {
|
||||
fprintf(stderr,
|
||||
"[QEMU-PT] %s: WARNING no handler for %s, type %s, size %lx!\n",
|
||||
__func__, vmsd_name, field->info->name, size);
|
||||
assert(0);
|
||||
}
|
||||
@ -511,7 +546,12 @@ static inline int get_handler(state_reallocation_t* self, QEMUFile* f, void* cur
|
||||
}
|
||||
|
||||
/* TODO: modify opaque_ptr */
|
||||
static int fdl_vmstate_load_state(state_reallocation_t* self, QEMUFile *f, const VMStateDescription *vmsd, void *opaque, int version_id, uintptr_t* opaque_ptr)
|
||||
static int fdl_vmstate_load_state(state_reallocation_t *self,
|
||||
QEMUFile *f,
|
||||
const VMStateDescription *vmsd,
|
||||
void *opaque,
|
||||
int version_id,
|
||||
uintptr_t *opaque_ptr)
|
||||
{
|
||||
#ifdef VERBOSE_DEBUG
|
||||
printf("---------------------------------\nVMSD: %p\t%s\n", opaque, vmsd->name);
|
||||
@ -530,8 +570,7 @@ static int fdl_vmstate_load_state(state_reallocation_t* self, QEMUFile *f, const
|
||||
printf("OLD LOAD\n");
|
||||
#endif
|
||||
|
||||
if (vmsd->load_state_old &&
|
||||
version_id >= vmsd->minimum_version_id_old) {
|
||||
if (vmsd->load_state_old && version_id >= vmsd->minimum_version_id_old) {
|
||||
fprintf(stderr, "OLDSTATE\n");
|
||||
assert(0);
|
||||
ret = vmsd->load_state_old(f, opaque, version_id);
|
||||
@ -552,10 +591,9 @@ static int fdl_vmstate_load_state(state_reallocation_t* self, QEMUFile *f, const
|
||||
#ifdef VERBOSE_DEBUG
|
||||
printf("Field: %s %s %s\n", __func__, vmsd->name, field->name);
|
||||
#endif
|
||||
if ((field->field_exists &&
|
||||
field->field_exists(opaque, version_id)) ||
|
||||
(!field->field_exists &&
|
||||
field->version_id <= version_id)) {
|
||||
if ((field->field_exists && field->field_exists(opaque, version_id)) ||
|
||||
(!field->field_exists && field->version_id <= version_id))
|
||||
{
|
||||
void *first_elem = opaque + field->offset;
|
||||
int i, n_elems = vmstate_n_elems(opaque, field);
|
||||
int size = vmstate_size(opaque, field);
|
||||
@ -582,14 +620,18 @@ static int fdl_vmstate_load_state(state_reallocation_t* self, QEMUFile *f, const
|
||||
|
||||
if (field->flags & VMS_ARRAY_OF_POINTER) {
|
||||
#ifdef VERBOSE_DEBUG
|
||||
printf("Field-Offset 1 0x%lx-0x%lx\n", (uint64_t)(field->offset + (opaque)), (uint64_t)(field->offset+(size*n_elems) + (opaque)));
|
||||
printf("=VMS_ARRAY_OF_POINTER 1= %lx %x\n", *((uint64_t*)curr_elem), size);
|
||||
printf("Field-Offset 1 0x%lx-0x%lx\n",
|
||||
(uint64_t)(field->offset + (opaque)),
|
||||
(uint64_t)(field->offset + (size * n_elems) + (opaque)));
|
||||
printf("=VMS_ARRAY_OF_POINTER 1= %lx %x\n",
|
||||
*((uint64_t *)curr_elem), size);
|
||||
// hexDump((void*)field->name, curr_elem, size);
|
||||
#endif
|
||||
|
||||
tmp_opaque_ptr = curr_elem;
|
||||
curr_elem = *(void **)curr_elem;
|
||||
add_mblock(self, (char*)vmsd->name, (const char*)field->name, field->offset, (uint64_t)(curr_elem), (uint64_t)(size));
|
||||
add_mblock(self, (char *)vmsd->name, (const char *)field->name,
|
||||
field->offset, (uint64_t)(curr_elem), (uint64_t)(size));
|
||||
#ifdef VERBOSE_DEBUG
|
||||
// hexDump((void*)field->name, curr_elem, size);
|
||||
#endif
|
||||
@ -599,34 +641,40 @@ static int fdl_vmstate_load_state(state_reallocation_t* self, QEMUFile *f, const
|
||||
// if null pointer check placeholder and do not follow
|
||||
assert(field->flags & VMS_ARRAY_OF_POINTER);
|
||||
#ifdef VERBOSE_DEBUG
|
||||
printf("Field-Offset 2 0x%lx-0x%lx\n", (uint64_t)(field->offset + (opaque)), (uint64_t)(field->offset+(size*n_elems) + (opaque)));
|
||||
printf("=VMS_ARRAY_OF_POINTER 2= %lx %x\n", *((uint64_t*)curr_elem), size);
|
||||
printf("Field-Offset 2 0x%lx-0x%lx\n",
|
||||
(uint64_t)(field->offset + (opaque)),
|
||||
(uint64_t)(field->offset + (size * n_elems) + (opaque)));
|
||||
printf("=VMS_ARRAY_OF_POINTER 2= %lx %x\n",
|
||||
*((uint64_t *)curr_elem), size);
|
||||
// hexDump((void*)field->name, curr_elem, size);
|
||||
#endif
|
||||
|
||||
nyx_debug("*** vmstate_info_nullptr.get ***\n");
|
||||
ret = vmstate_info_nullptr.get(f, curr_elem, size, NULL);
|
||||
add_mblock(self, (char*)vmsd->name, (const char*)field->name, field->offset, (uint64_t)(curr_elem), (uint64_t)(size));
|
||||
add_mblock(self, (char *)vmsd->name, (const char *)field->name,
|
||||
field->offset, (uint64_t)(curr_elem), (uint64_t)(size));
|
||||
#ifdef VERBOSE_DEBUG
|
||||
// hexDump((void*)field->name, curr_elem, size);
|
||||
#endif
|
||||
|
||||
} else if (field->flags & VMS_STRUCT) {
|
||||
//printf("Field-Offset 0x%lx-0x%lx\n", field->offset + (opaque-base_opaque), field->offset+(size*n_elems) + (opaque-base_opaque));
|
||||
// printf("Field-Offset 0x%lx-0x%lx\n", field->offset + (opaque-base_opaque),
|
||||
// field->offset+(size*n_elems) + (opaque-base_opaque));
|
||||
#ifdef VERBOSE_DEBUG
|
||||
printf("=VMS_STRUCT= %lx %x\n", *((uint64_t *)curr_elem), size);
|
||||
// hexDump((void*)field->name, curr_elem, size);
|
||||
#endif
|
||||
/* FIXME */
|
||||
ret = fdl_vmstate_load_state(self, f, field->vmsd, curr_elem, field->vmsd->version_id, tmp_opaque_ptr);
|
||||
ret = fdl_vmstate_load_state(self, f, field->vmsd, curr_elem,
|
||||
field->vmsd->version_id,
|
||||
tmp_opaque_ptr);
|
||||
#ifdef VERBOSE_DEBUG
|
||||
// hexDump((void*)field->name, curr_elem, size);
|
||||
#endif
|
||||
|
||||
} else {
|
||||
|
||||
|
||||
ret = get_handler(self, f, curr_elem, size, field, (char*)vmsd->name);
|
||||
ret = get_handler(self, f, curr_elem, size, field,
|
||||
(char *)vmsd->name);
|
||||
}
|
||||
if (ret >= 0) {
|
||||
ret = qemu_file_get_error(f);
|
||||
@ -639,8 +687,7 @@ static int fdl_vmstate_load_state(state_reallocation_t* self, QEMUFile *f, const
|
||||
} else if (field->flags & VMS_MUST_EXIST) {
|
||||
nyx_debug("Input validation failed: %s/%s", vmsd->name, field->name);
|
||||
return -1;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
// printf("Field does not exist...\n");
|
||||
}
|
||||
field++;
|
||||
@ -667,17 +714,24 @@ static int fdl_vmstate_load_state(state_reallocation_t* self, QEMUFile *f, const
|
||||
}
|
||||
|
||||
|
||||
static int fdl_vmstate_load(state_reallocation_t* self, QEMUFile *f, SaveStateEntry *se, int version_id)
|
||||
static int fdl_vmstate_load(state_reallocation_t *self,
|
||||
QEMUFile *f,
|
||||
SaveStateEntry *se,
|
||||
int version_id)
|
||||
{
|
||||
if (!se->vmsd) { /* Old style */
|
||||
return se->ops->load_state(f, se->opaque, version_id);
|
||||
}
|
||||
|
||||
uintptr_t *t = (uintptr_t *)&(se->opaque);
|
||||
return fdl_vmstate_load_state(self, f, se->vmsd, se->opaque, version_id, (uintptr_t *)t);
|
||||
return fdl_vmstate_load_state(self, f, se->vmsd, se->opaque, version_id,
|
||||
(uintptr_t *)t);
|
||||
}
|
||||
|
||||
static int fdl_enumerate_section(state_reallocation_t* self, QEMUFile *f, MigrationIncomingState *mis){
|
||||
static int fdl_enumerate_section(state_reallocation_t *self,
|
||||
QEMUFile *f,
|
||||
MigrationIncomingState *mis)
|
||||
{
|
||||
uint32_t instance_id, version_id, section_id;
|
||||
SaveStateEntry *se;
|
||||
|
||||
@ -702,14 +756,16 @@ static int fdl_enumerate_section(state_reallocation_t* self, QEMUFile *f, Migrat
|
||||
|
||||
/* Validate version */
|
||||
if (version_id > se->version_id) {
|
||||
printf("savevm: unsupported version %d for '%s' v%d", version_id, idstr, se->version_id);
|
||||
printf("savevm: unsupported version %d for '%s' v%d", version_id, idstr,
|
||||
se->version_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
se->load_version_id = version_id;
|
||||
se->load_section_id = section_id;
|
||||
|
||||
if(se->vmsd && ((strcmp("tiMer", (const char*)(VMStateDescription *)(se->vmsd)->name))
|
||||
if (se->vmsd &&
|
||||
((strcmp("tiMer", (const char *)(VMStateDescription *)(se->vmsd)->name))
|
||||
/*
|
||||
&& (strcmp("cpu_common", (VMStateDescription *)(se->vmsd)->name))
|
||||
&& (strcmp("cpu", (VMStateDescription *)(se->vmsd)->name))
|
||||
@ -751,16 +807,17 @@ static int fdl_enumerate_section(state_reallocation_t* self, QEMUFile *f, Migrat
|
||||
*/
|
||||
|
||||
|
||||
)){
|
||||
))
|
||||
{
|
||||
ret = fdl_vmstate_load(self, f, se, version_id);
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
nyx_debug("---------------------------------\nVMSD2: %p\n", (void *)se->vmsd);
|
||||
ret = vmstate_load(f, se);
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
printf("error while loading state for instance 0x%x of device '%s'", instance_id, idstr);
|
||||
printf("error while loading state for instance 0x%x of device '%s'",
|
||||
instance_id, idstr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -770,7 +827,8 @@ static int fdl_enumerate_section(state_reallocation_t* self, QEMUFile *f, Migrat
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fdl_enumerate_global_states(state_reallocation_t* self, QEMUFile *f){
|
||||
static void fdl_enumerate_global_states(state_reallocation_t *self, QEMUFile *f)
|
||||
{
|
||||
((struct QEMUFile_tmp *)f)->pos = 0;
|
||||
((struct QEMUFile_tmp *)f)->buf_index = 0;
|
||||
((struct QEMUFile_tmp *)f)->buf_size = 0;
|
||||
@ -784,7 +842,8 @@ static void fdl_enumerate_global_states(state_reallocation_t* self, QEMUFile *f)
|
||||
qemu_get_byte(f);
|
||||
|
||||
/* migration state */
|
||||
vmstate_load_state(f, (VMStateDescription*) &vmstate_configuration, (void*)&savevm_state, 0);
|
||||
vmstate_load_state(f, (VMStateDescription *)&vmstate_configuration,
|
||||
(void *)&savevm_state, 0);
|
||||
|
||||
while ((section_type = qemu_get_byte(f)) != QEMU_VM_EOF) {
|
||||
switch (section_type) {
|
||||
@ -801,7 +860,8 @@ static void fdl_enumerate_global_states(state_reallocation_t* self, QEMUFile *f)
|
||||
}
|
||||
}
|
||||
|
||||
state_reallocation_t* state_reallocation_new(QEMUFile *f){
|
||||
state_reallocation_t *state_reallocation_new(QEMUFile *f)
|
||||
{
|
||||
state_reallocation_t *self = malloc(sizeof(state_reallocation_t));
|
||||
self->fast_state_pos = 0;
|
||||
self->fast_state_size = REALLOC_SIZE;
|
||||
@ -824,7 +884,9 @@ state_reallocation_t* state_reallocation_new(QEMUFile *f){
|
||||
self->get_size = malloc(sizeof(size_t) * REALLOC_SIZE);
|
||||
self->get_data = malloc(sizeof(void *) * REALLOC_SIZE);
|
||||
|
||||
self->pre_alloc_block = (uint32_t*)mmap(NULL, PRE_ALLOC_BLOCK_SIZE, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
|
||||
self->pre_alloc_block = (uint32_t *)mmap(NULL, PRE_ALLOC_BLOCK_SIZE,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
|
||||
assert(self->pre_alloc_block != (void *)-1);
|
||||
self->pre_alloc_block_offset = 0;
|
||||
|
||||
@ -843,8 +905,8 @@ state_reallocation_t* state_reallocation_new(QEMUFile *f){
|
||||
return self;
|
||||
}
|
||||
|
||||
void fdl_fast_reload(state_reallocation_t* self){
|
||||
|
||||
void fdl_fast_reload(state_reallocation_t *self)
|
||||
{
|
||||
for (uint32_t i = 0; i < self->fast_state_fptr_pos; i++) {
|
||||
if ((self->version[i]) == 1337) {
|
||||
((int (*)(void *opaque))self->fptr[i])(self->opaque[i]);
|
||||
@ -855,8 +917,7 @@ void fdl_fast_reload(state_reallocation_t* self){
|
||||
for (uint32_t i = 0; i < self->fast_state_pos; i++) {
|
||||
memcpy(self->ptr[i], self->copy[i], self->size[i]);
|
||||
}
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
for (uint32_t i = 0; i < self->fast_state_pos; i++) {
|
||||
memcpy(self->ptr[i], self->tmp_snapshot.copy[i], self->size[i]);
|
||||
}
|
||||
@ -864,17 +925,18 @@ void fdl_fast_reload(state_reallocation_t* self){
|
||||
|
||||
for (uint32_t i = 0; i < self->fast_state_fptr_pos; i++) {
|
||||
if ((self->version[i]) != 1337) {
|
||||
((int (*)(void *opaque, int version_id))self->fptr[i])(self->opaque[i], self->version[i]);
|
||||
((int (*)(void *opaque, int version_id))self->fptr[i])(self->opaque[i],
|
||||
self->version[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void fdl_fast_create_tmp(state_reallocation_t* self){
|
||||
void fdl_fast_create_tmp(state_reallocation_t *self)
|
||||
{
|
||||
for (uint32_t i = 0; i < self->fast_state_fptr_pos; i++) {
|
||||
if ((self->version[i]) == 1337) {
|
||||
((int (*)(void *opaque))self->fptr[i])(self->opaque[i]);
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
//((int (*)(void *opaque, int version_id))self->fptr[i])(self->opaque[i], self->version[i]);
|
||||
}
|
||||
}
|
||||
@ -887,17 +949,19 @@ void fdl_fast_create_tmp(state_reallocation_t* self){
|
||||
for (uint32_t i = 0; i < self->fast_state_fptr_pos; i++) {
|
||||
if ((self->version[i]) == 1337) {
|
||||
//((int (*)(void *opaque))self->fptr[i])(self->opaque[i]);
|
||||
}
|
||||
else{
|
||||
((int (*)(void *opaque, int version_id))self->fptr[i])(self->opaque[i], self->version[i]);
|
||||
} else {
|
||||
((int (*)(void *opaque, int version_id))self->fptr[i])(self->opaque[i],
|
||||
self->version[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void fdl_fast_enable_tmp(state_reallocation_t* self){
|
||||
void fdl_fast_enable_tmp(state_reallocation_t *self)
|
||||
{
|
||||
self->tmp_snapshot.enabled = true;
|
||||
}
|
||||
|
||||
void fdl_fast_disable_tmp(state_reallocation_t* self){
|
||||
void fdl_fast_disable_tmp(state_reallocation_t *self)
|
||||
{
|
||||
self->tmp_snapshot.enabled = false;
|
||||
}
|
||||
|
@ -21,8 +21,8 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#pragma once
|
||||
#include "qemu/osdep.h"
|
||||
#include "monitor/monitor.h"
|
||||
#include "migration/migration.h"
|
||||
#include "monitor/monitor.h"
|
||||
#include "nyx/khash.h"
|
||||
|
||||
#define IO_BUF_SIZE 32768
|
||||
|
@ -1,9 +1,9 @@
|
||||
|
||||
#include <assert.h>
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "nyx/snapshot/devices/vm_change_state_handlers.h"
|
||||
#include <assert.h>
|
||||
|
||||
|
||||
VMChangeStateHandler *change_kvm_clock_handler = NULL;
|
||||
@ -17,7 +17,8 @@ VMChangeStateHandler* change_ide_core_handler = NULL;
|
||||
uint8_t change_ide_core_opaque_num = 0;
|
||||
void *change_ide_core_opaque[32] = { NULL };
|
||||
|
||||
void call_fast_change_handlers(void){
|
||||
void call_fast_change_handlers(void)
|
||||
{
|
||||
assert(change_kvm_clock_handler && change_kvm_pit_handler && change_cpu_handler);
|
||||
|
||||
change_kvm_clock_handler(change_kvm_clock_opaque, 1, RUN_STATE_RUNNING);
|
||||
@ -34,7 +35,8 @@ void call_fast_change_handlers(void){
|
||||
}
|
||||
}
|
||||
|
||||
void add_fast_reload_change_handler(VMChangeStateHandler *cb, void *opaque, int id){
|
||||
void add_fast_reload_change_handler(VMChangeStateHandler *cb, void *opaque, int id)
|
||||
{
|
||||
switch (id) {
|
||||
case RELOAD_HANDLER_KVM_CLOCK:
|
||||
change_kvm_clock_handler = cb;
|
||||
|
@ -1,8 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include "sysemu/runstate.h"
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#define RELOAD_HANDLER_KVM_CLOCK 0
|
||||
#define RELOAD_HANDLER_KVM_PIT 1
|
||||
|
@ -1,21 +1,22 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
|
||||
#include "qemu/bitmap.h"
|
||||
#include "exec/ram_addr.h"
|
||||
#include "qemu/rcu_queue.h"
|
||||
#include "migration/migration.h"
|
||||
#include "qemu/bitmap.h"
|
||||
#include "qemu/rcu_queue.h"
|
||||
|
||||
#include "nyx/memory_access.h"
|
||||
|
||||
#include "nyx/snapshot/helper.h"
|
||||
#include "nyx/fast_vm_reload.h"
|
||||
#include "nyx/snapshot/helper.h"
|
||||
|
||||
// #define DEBUG_NYX_SNAPSHOT_HELPER
|
||||
|
||||
uint64_t get_ram_size(void){
|
||||
uint64_t get_ram_size(void)
|
||||
{
|
||||
RAMBlock *block;
|
||||
uint64_t guest_ram_size = 0;
|
||||
QLIST_FOREACH_RCU (block, &ram_list.blocks, next) {
|
||||
|
@ -1,61 +1,72 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
|
||||
#include "exec/ram_addr.h"
|
||||
#include "qemu/rcu_queue.h"
|
||||
#include "migration/migration.h"
|
||||
#include "qemu/rcu_queue.h"
|
||||
|
||||
#include "nyx/memory_access.h"
|
||||
|
||||
#include "nyx/snapshot/memory/backend/nyx_debug.h"
|
||||
#include "nyx/fast_vm_reload.h"
|
||||
#include "nyx/snapshot/memory/backend/nyx_debug.h"
|
||||
|
||||
|
||||
/* init operation */
|
||||
void nyx_snapshot_debug_pre_init(void){
|
||||
void nyx_snapshot_debug_pre_init(void)
|
||||
{
|
||||
/* TODO */
|
||||
}
|
||||
|
||||
/* init operation */
|
||||
void nyx_snapshot_debug_init(fast_reload_t* self){
|
||||
void nyx_snapshot_debug_init(fast_reload_t *self)
|
||||
{
|
||||
/* TODO */
|
||||
}
|
||||
|
||||
/* enable operation */
|
||||
void nyx_snapshot_debug_enable(fast_reload_t* self){
|
||||
void nyx_snapshot_debug_enable(fast_reload_t *self)
|
||||
{
|
||||
/* TODO */
|
||||
}
|
||||
|
||||
/* restore operation */
|
||||
uint32_t nyx_snapshot_debug_restore(shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, bool verbose){
|
||||
uint32_t nyx_snapshot_debug_restore(shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist,
|
||||
bool verbose)
|
||||
{
|
||||
uint32_t num_dirty_pages = 0;
|
||||
|
||||
void *current_region = NULL;
|
||||
int counter = 0;
|
||||
for (uint8_t i = 0; i < shadow_memory_state->ram_regions_num; i++) {
|
||||
|
||||
if (shadow_memory_state->incremental_enabled) {
|
||||
current_region = shadow_memory_state->ram_regions[i].incremental_region_ptr;
|
||||
}
|
||||
else{
|
||||
current_region =
|
||||
shadow_memory_state->ram_regions[i].incremental_region_ptr;
|
||||
} else {
|
||||
current_region = shadow_memory_state->ram_regions[i].snapshot_region_ptr;
|
||||
}
|
||||
|
||||
for(uint64_t addr = 0; addr < shadow_memory_state->ram_regions[i].size; addr+=0x1000){
|
||||
|
||||
void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + addr;
|
||||
for (uint64_t addr = 0; addr < shadow_memory_state->ram_regions[i].size;
|
||||
addr += 0x1000)
|
||||
{
|
||||
void *host_addr =
|
||||
shadow_memory_state->ram_regions[i].host_region_ptr + addr;
|
||||
void *snapshot_addr = current_region + addr;
|
||||
uint64_t physical_addr = addr + shadow_memory_state->ram_regions[i].base;
|
||||
|
||||
/* check first if the page is dirty (this is super slow, but quite useful for debugging) */
|
||||
if (memcmp(host_addr, snapshot_addr, TARGET_PAGE_SIZE)) {
|
||||
/* check if page is not on the block list */
|
||||
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == false){
|
||||
if (snapshot_page_blocklist_check_phys_addr(blocklist,
|
||||
physical_addr) == false)
|
||||
{
|
||||
// fprintf(stderr, "(2) DIRTY: 0x%lx (NUM: %d - OFFSET: 0x%lx)\n", physical_addr, i, addr);
|
||||
|
||||
if (verbose) {
|
||||
printf("%s -> (phys: 0x%lx) %p <-- %p [%d]\n", __func__, physical_addr, host_addr, snapshot_addr, shadow_memory_state->incremental_enabled);
|
||||
printf("%s -> (phys: 0x%lx) %p <-- %p [%d]\n", __func__,
|
||||
physical_addr, host_addr, snapshot_addr,
|
||||
shadow_memory_state->incremental_enabled);
|
||||
counter++;
|
||||
}
|
||||
|
||||
@ -72,33 +83,45 @@ uint32_t nyx_snapshot_debug_restore(shadow_memory_t* shadow_memory_state, snapsh
|
||||
return num_dirty_pages;
|
||||
}
|
||||
|
||||
void nyx_snapshot_debug_save_root_pages(shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, bool verbose){
|
||||
void nyx_snapshot_debug_save_root_pages(shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist,
|
||||
bool verbose)
|
||||
{
|
||||
void *current_region = NULL;
|
||||
|
||||
for (uint8_t i = 0; i < shadow_memory_state->ram_regions_num; i++) {
|
||||
|
||||
if (shadow_memory_state->incremental_enabled) {
|
||||
current_region = shadow_memory_state->ram_regions[i].incremental_region_ptr;
|
||||
}
|
||||
else{
|
||||
current_region =
|
||||
shadow_memory_state->ram_regions[i].incremental_region_ptr;
|
||||
} else {
|
||||
current_region = shadow_memory_state->ram_regions[i].snapshot_region_ptr;
|
||||
}
|
||||
|
||||
for(uint64_t addr = 0; addr < shadow_memory_state->ram_regions[i].size; addr+=0x1000){
|
||||
|
||||
void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + addr;
|
||||
for (uint64_t addr = 0; addr < shadow_memory_state->ram_regions[i].size;
|
||||
addr += 0x1000)
|
||||
{
|
||||
void *host_addr =
|
||||
shadow_memory_state->ram_regions[i].host_region_ptr + addr;
|
||||
void *snapshot_addr = current_region + addr;
|
||||
uint64_t physical_addr = addr + shadow_memory_state->ram_regions[i].base;
|
||||
void* incremental_addr = shadow_memory_state->ram_regions[i].incremental_region_ptr + addr;
|
||||
void *incremental_addr =
|
||||
shadow_memory_state->ram_regions[i].incremental_region_ptr + addr;
|
||||
|
||||
/* check first if the page is dirty (this is super slow, but quite useful for debugging) */
|
||||
if (memcmp(host_addr, snapshot_addr, TARGET_PAGE_SIZE)) {
|
||||
/* check if page is not on the block list */
|
||||
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == false){
|
||||
if (snapshot_page_blocklist_check_phys_addr(blocklist,
|
||||
physical_addr) == false)
|
||||
{
|
||||
// fprintf(stderr, "(2) DIRTY: 0x%lx (NUM: %d - OFFSET: 0x%lx)\n", physical_addr, i, addr);
|
||||
|
||||
if(verbose && !shadow_memory_is_root_page_tracked(shadow_memory_state, addr, i)){
|
||||
printf("%s -> %p <-- %p [%d]\n", __func__, host_addr, snapshot_addr, shadow_memory_state->incremental_enabled);
|
||||
if (verbose &&
|
||||
!shadow_memory_is_root_page_tracked(shadow_memory_state,
|
||||
addr, i))
|
||||
{
|
||||
printf("%s -> %p <-- %p [%d]\n", __func__, host_addr,
|
||||
snapshot_addr,
|
||||
shadow_memory_state->incremental_enabled);
|
||||
}
|
||||
|
||||
shadow_memory_track_dirty_root_pages(shadow_memory_state, addr, i);
|
||||
@ -110,6 +133,7 @@ void nyx_snapshot_debug_save_root_pages(shadow_memory_t* shadow_memory_state, sn
|
||||
}
|
||||
|
||||
/* set operation */
|
||||
void nyx_snapshot_debug_set(fast_reload_t* self){
|
||||
void nyx_snapshot_debug_set(fast_reload_t *self)
|
||||
{
|
||||
/* TODO */
|
||||
}
|
@ -1,11 +1,15 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include "nyx/fast_vm_reload.h"
|
||||
#include <stdint.h>
|
||||
|
||||
void nyx_snapshot_debug_pre_init(void);
|
||||
void nyx_snapshot_debug_init(fast_reload_t *self);
|
||||
void nyx_snapshot_debug_enable(fast_reload_t *self);
|
||||
uint32_t nyx_snapshot_debug_restore(shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, bool verbose);
|
||||
uint32_t nyx_snapshot_debug_restore(shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist,
|
||||
bool verbose);
|
||||
void nyx_snapshot_debug_set(fast_reload_t *self);
|
||||
void nyx_snapshot_debug_save_root_pages(shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, bool verbose);
|
||||
void nyx_snapshot_debug_save_root_pages(shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist,
|
||||
bool verbose);
|
||||
|
@ -2,8 +2,8 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include <sys/ioctl.h>
|
||||
|
||||
#include "nyx/snapshot/memory/backend/nyx_dirty_ring.h"
|
||||
#include "nyx/snapshot/helper.h"
|
||||
#include "nyx/snapshot/memory/backend/nyx_dirty_ring.h"
|
||||
|
||||
#include "sysemu/kvm.h"
|
||||
#include "sysemu/kvm_int.h"
|
||||
@ -26,7 +26,8 @@ uint32_t kvm_dirty_gfns_index = 0;
|
||||
uint32_t kvm_dirty_gfns_index_mask = 0;
|
||||
|
||||
|
||||
static int vm_enable_dirty_ring(int vm_fd, uint32_t ring_size){
|
||||
static int vm_enable_dirty_ring(int vm_fd, uint32_t ring_size)
|
||||
{
|
||||
struct kvm_enable_cap cap = { 0 };
|
||||
|
||||
cap.cap = KVM_CAP_DIRTY_LOG_RING;
|
||||
@ -40,22 +41,27 @@ static int vm_enable_dirty_ring(int vm_fd, uint32_t ring_size){
|
||||
return ring_size;
|
||||
}
|
||||
|
||||
static int check_dirty_ring_size(int kvm_fd, int vm_fd){
|
||||
static int check_dirty_ring_size(int kvm_fd, int vm_fd)
|
||||
{
|
||||
int ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, KVM_CAP_DIRTY_LOG_RING);
|
||||
if (ret < 0) {
|
||||
printf("[QEMU-Nyx] Error: KVM_CAP_DIRTY_LOG_RING failed (dirty ring not supported?)\n");
|
||||
printf("[QEMU-Nyx] Error: KVM_CAP_DIRTY_LOG_RING failed (dirty ring not "
|
||||
"supported?)\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
printf("[QEMU-Nyx] Max Dirty Ring Size -> %d (Entries: %d)\n", ret, ret/(int)sizeof(struct kvm_dirty_gfn));
|
||||
printf("[QEMU-Nyx] Max Dirty Ring Size -> %d (Entries: %d)\n", ret,
|
||||
ret / (int)sizeof(struct kvm_dirty_gfn));
|
||||
|
||||
uint64_t dirty_ring_max_size = ret; //kvm_dirty_ring_size * sizeof(struct kvm_dirty_gfn);
|
||||
uint64_t dirty_ring_max_size =
|
||||
ret; // kvm_dirty_ring_size * sizeof(struct kvm_dirty_gfn);
|
||||
|
||||
/* DIRTY RING -> 1MB in size results in 256M trackable memory */
|
||||
ret = vm_enable_dirty_ring(vm_fd, dirty_ring_max_size);
|
||||
|
||||
if (ret < 0) {
|
||||
printf("[QEMU-Nyx] Error: Enabling dirty ring (size: %ld) failed\n", dirty_ring_max_size);
|
||||
printf("[QEMU-Nyx] Error: Enabling dirty ring (size: %ld) failed\n",
|
||||
dirty_ring_max_size);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@ -63,11 +69,14 @@ static int check_dirty_ring_size(int kvm_fd, int vm_fd){
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void allocate_dirty_ring(int kvm_vcpu, int vm_fd){
|
||||
static void allocate_dirty_ring(int kvm_vcpu, int vm_fd)
|
||||
{
|
||||
assert(dirty_ring_size);
|
||||
|
||||
if (dirty_ring_size) {
|
||||
kvm_dirty_gfns = mmap(NULL, dirty_ring_size, PROT_READ | PROT_WRITE, MAP_SHARED, kvm_vcpu, PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET);
|
||||
kvm_dirty_gfns = mmap(NULL, dirty_ring_size, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED, kvm_vcpu,
|
||||
PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET);
|
||||
if (kvm_dirty_gfns == MAP_FAILED) {
|
||||
printf("[QEMU-Nyx] Error: Dirty ring mmap failed!\n");
|
||||
exit(1);
|
||||
@ -80,38 +89,46 @@ static void allocate_dirty_ring(int kvm_vcpu, int vm_fd){
|
||||
}
|
||||
|
||||
/* pre_init operation */
|
||||
void nyx_dirty_ring_early_init(int kvm_fd, int vm_fd){
|
||||
void nyx_dirty_ring_early_init(int kvm_fd, int vm_fd)
|
||||
{
|
||||
dirty_ring_size = check_dirty_ring_size(kvm_fd, vm_fd);
|
||||
}
|
||||
|
||||
void nyx_dirty_ring_pre_init(int kvm_fd, int vm_fd){
|
||||
void nyx_dirty_ring_pre_init(int kvm_fd, int vm_fd)
|
||||
{
|
||||
allocate_dirty_ring(kvm_fd, vm_fd);
|
||||
|
||||
kvm_dirty_gfns_index = 0;
|
||||
kvm_dirty_gfns_index_mask = ((dirty_ring_max_size_global/sizeof(struct kvm_dirty_gfn)) - 1);
|
||||
|
||||
kvm_dirty_gfns_index_mask =
|
||||
((dirty_ring_max_size_global / sizeof(struct kvm_dirty_gfn)) - 1);
|
||||
}
|
||||
|
||||
static inline void dirty_ring_collect(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, uint64_t slot, uint64_t gfn){
|
||||
|
||||
static inline void dirty_ring_collect(nyx_dirty_ring_t *self,
|
||||
shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist,
|
||||
uint64_t slot,
|
||||
uint64_t gfn)
|
||||
{
|
||||
/* sanity check */
|
||||
assert((slot & 0xFFFF0000) == 0);
|
||||
|
||||
slot_t *kvm_region_slot = &self->kvm_region_slots[slot & 0xFFFF];
|
||||
|
||||
if (test_and_set_bit(gfn, (void *)kvm_region_slot->bitmap) == false) {
|
||||
|
||||
kvm_region_slot->stack[kvm_region_slot->stack_ptr] = gfn;
|
||||
kvm_region_slot->stack_ptr++;
|
||||
}
|
||||
}
|
||||
|
||||
static void dirty_ring_flush_and_collect(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist, int vm_fd){
|
||||
static void dirty_ring_flush_and_collect(nyx_dirty_ring_t *self,
|
||||
shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist,
|
||||
int vm_fd)
|
||||
{
|
||||
struct kvm_dirty_gfn *entry = NULL;
|
||||
int cleared = 0;
|
||||
|
||||
while (true) {
|
||||
|
||||
entry = &kvm_dirty_gfns[kvm_dirty_gfns_index & kvm_dirty_gfns_index_mask];
|
||||
|
||||
if ((entry->flags & 0x3) == 0) {
|
||||
@ -119,12 +136,14 @@ static void dirty_ring_flush_and_collect(nyx_dirty_ring_t* self, shadow_memory_t
|
||||
}
|
||||
|
||||
if ((entry->flags & 0x1) == 1) {
|
||||
dirty_ring_collect(self, shadow_memory_state, blocklist, entry->slot, entry->offset);
|
||||
dirty_ring_collect(self, shadow_memory_state, blocklist, entry->slot,
|
||||
entry->offset);
|
||||
cleared++;
|
||||
entry->flags |= 0x2; // reset dirty entry
|
||||
}
|
||||
else{
|
||||
printf("[QEMU-Nyx] [%p] kvm_dirty_gfn -> flags: %d slot: %d offset: %lx {ERROR}\n", entry, entry->flags, entry->slot, entry->offset);
|
||||
} else {
|
||||
printf("[QEMU-Nyx] [%p] kvm_dirty_gfn -> flags: %d slot: %d offset: %lx "
|
||||
"{ERROR}\n",
|
||||
entry, entry->flags, entry->slot, entry->offset);
|
||||
fflush(stdout);
|
||||
exit(1);
|
||||
}
|
||||
@ -136,12 +155,12 @@ static void dirty_ring_flush_and_collect(nyx_dirty_ring_t* self, shadow_memory_t
|
||||
assert(ret == cleared);
|
||||
}
|
||||
|
||||
static void dirty_ring_flush(int vm_fd){
|
||||
static void dirty_ring_flush(int vm_fd)
|
||||
{
|
||||
struct kvm_dirty_gfn *entry = NULL;
|
||||
int cleared = 0;
|
||||
|
||||
while (true) {
|
||||
|
||||
entry = &kvm_dirty_gfns[kvm_dirty_gfns_index & kvm_dirty_gfns_index_mask];
|
||||
|
||||
if ((entry->flags & 0x3) == 0) {
|
||||
@ -151,9 +170,10 @@ static void dirty_ring_flush(int vm_fd){
|
||||
if ((entry->flags & 0x1) == 1) {
|
||||
cleared++;
|
||||
entry->flags |= 0x2; // reset dirty entry
|
||||
}
|
||||
else{
|
||||
printf("[QEMU-Nyx] [%p] kvm_dirty_gfn -> flags: %d slot: %d offset: %lx {ERROR}\n", entry, entry->flags, entry->slot, entry->offset);
|
||||
} else {
|
||||
printf("[QEMU-Nyx] [%p] kvm_dirty_gfn -> flags: %d slot: %d offset: %lx "
|
||||
"{ERROR}\n",
|
||||
entry, entry->flags, entry->slot, entry->offset);
|
||||
fflush(stdout);
|
||||
exit(1);
|
||||
}
|
||||
@ -166,7 +186,8 @@ static void dirty_ring_flush(int vm_fd){
|
||||
}
|
||||
|
||||
/* init operation */
|
||||
nyx_dirty_ring_t* nyx_dirty_ring_init(shadow_memory_t* shadow_memory){
|
||||
nyx_dirty_ring_t *nyx_dirty_ring_init(shadow_memory_t *shadow_memory)
|
||||
{
|
||||
nyx_dirty_ring_t *self = malloc(sizeof(nyx_dirty_ring_t));
|
||||
memset(self, 0, sizeof(nyx_dirty_ring_t));
|
||||
|
||||
@ -210,12 +231,18 @@ nyx_dirty_ring_t* nyx_dirty_ring_init(shadow_memory_t* shadow_memory){
|
||||
if (self->kvm_region_slots[i].enabled) {
|
||||
bool ram_region_found = false;
|
||||
for (int j = 0; j < shadow_memory->ram_regions_num; j++) {
|
||||
|
||||
if(FAST_IN_RANGE(mem->start_addr, shadow_memory->ram_regions[j].base, (shadow_memory->ram_regions[j].base+shadow_memory->ram_regions[j].size))){
|
||||
assert(FAST_IN_RANGE((mem->start_addr+mem->memory_size-1), shadow_memory->ram_regions[j].base, (shadow_memory->ram_regions[j].base+shadow_memory->ram_regions[j].size)));
|
||||
if (FAST_IN_RANGE(mem->start_addr, shadow_memory->ram_regions[j].base,
|
||||
(shadow_memory->ram_regions[j].base +
|
||||
shadow_memory->ram_regions[j].size)))
|
||||
{
|
||||
assert(FAST_IN_RANGE((mem->start_addr + mem->memory_size - 1),
|
||||
shadow_memory->ram_regions[j].base,
|
||||
(shadow_memory->ram_regions[j].base +
|
||||
shadow_memory->ram_regions[j].size)));
|
||||
|
||||
self->kvm_region_slots[i].region_id = j;
|
||||
self->kvm_region_slots[i].region_offset = mem->start_addr - shadow_memory->ram_regions[j].base;
|
||||
self->kvm_region_slots[i].region_offset =
|
||||
mem->start_addr - shadow_memory->ram_regions[j].base;
|
||||
ram_region_found = true;
|
||||
break;
|
||||
}
|
||||
@ -231,10 +258,11 @@ nyx_dirty_ring_t* nyx_dirty_ring_init(shadow_memory_t* shadow_memory){
|
||||
printf("[%d].stack = %p\n", i, self->kvm_region_slots[i].stack);
|
||||
printf("[%d].stack_ptr = %ld\n", i, self->kvm_region_slots[i].stack_ptr);
|
||||
if (self->kvm_region_slots[i].enabled) {
|
||||
printf("[%d].region_id = %d\n", i, self->kvm_region_slots[i].region_id);
|
||||
printf("[%d].region_offset = 0x%lx\n", i, self->kvm_region_slots[i].region_offset);
|
||||
}
|
||||
else{
|
||||
printf("[%d].region_id = %d\n", i,
|
||||
self->kvm_region_slots[i].region_id);
|
||||
printf("[%d].region_offset = 0x%lx\n", i,
|
||||
self->kvm_region_slots[i].region_offset);
|
||||
} else {
|
||||
printf("[%d].region_id = -\n", i);
|
||||
printf("[%d].region_offset = -\n", i);
|
||||
}
|
||||
@ -245,7 +273,10 @@ nyx_dirty_ring_t* nyx_dirty_ring_init(shadow_memory_t* shadow_memory){
|
||||
return self;
|
||||
}
|
||||
|
||||
static uint32_t restore_memory(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
|
||||
static uint32_t restore_memory(nyx_dirty_ring_t *self,
|
||||
shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist)
|
||||
{
|
||||
uint32_t num_dirty_pages = 0;
|
||||
void *host_addr = NULL;
|
||||
void *snapshot_addr = NULL;
|
||||
@ -261,19 +292,31 @@ static uint32_t restore_memory(nyx_dirty_ring_t* self, shadow_memory_t* shadow_m
|
||||
|
||||
entry_offset_addr = kvm_region_slot->region_offset + (gfn << 12);
|
||||
|
||||
physical_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].base + entry_offset_addr;
|
||||
physical_addr =
|
||||
shadow_memory_state->ram_regions[kvm_region_slot->region_id].base +
|
||||
entry_offset_addr;
|
||||
|
||||
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){
|
||||
if (snapshot_page_blocklist_check_phys_addr(blocklist,
|
||||
physical_addr) == true)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
host_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].host_region_ptr + entry_offset_addr;
|
||||
host_addr =
|
||||
shadow_memory_state->ram_regions[kvm_region_slot->region_id]
|
||||
.host_region_ptr +
|
||||
entry_offset_addr;
|
||||
|
||||
if (shadow_memory_state->incremental_enabled) {
|
||||
snapshot_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].incremental_region_ptr + entry_offset_addr;
|
||||
}
|
||||
else{
|
||||
snapshot_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].snapshot_region_ptr + entry_offset_addr;
|
||||
snapshot_addr =
|
||||
shadow_memory_state->ram_regions[kvm_region_slot->region_id]
|
||||
.incremental_region_ptr +
|
||||
entry_offset_addr;
|
||||
} else {
|
||||
snapshot_addr =
|
||||
shadow_memory_state->ram_regions[kvm_region_slot->region_id]
|
||||
.snapshot_region_ptr +
|
||||
entry_offset_addr;
|
||||
}
|
||||
|
||||
memcpy(host_addr, snapshot_addr, TARGET_PAGE_SIZE);
|
||||
@ -287,7 +330,10 @@ static uint32_t restore_memory(nyx_dirty_ring_t* self, shadow_memory_t* shadow_m
|
||||
return num_dirty_pages;
|
||||
}
|
||||
|
||||
static void save_root_pages(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
|
||||
static void save_root_pages(nyx_dirty_ring_t *self,
|
||||
shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist)
|
||||
{
|
||||
void *host_addr = NULL;
|
||||
void *incremental_addr = NULL;
|
||||
uint64_t physical_addr = 0;
|
||||
@ -302,16 +348,28 @@ static void save_root_pages(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memo
|
||||
|
||||
entry_offset_addr = kvm_region_slot->region_offset + (gfn << 12);
|
||||
|
||||
physical_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].base + entry_offset_addr;
|
||||
physical_addr =
|
||||
shadow_memory_state->ram_regions[kvm_region_slot->region_id].base +
|
||||
entry_offset_addr;
|
||||
|
||||
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){
|
||||
if (snapshot_page_blocklist_check_phys_addr(blocklist,
|
||||
physical_addr) == true)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
host_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].host_region_ptr + entry_offset_addr;
|
||||
incremental_addr = shadow_memory_state->ram_regions[kvm_region_slot->region_id].incremental_region_ptr + entry_offset_addr;
|
||||
host_addr =
|
||||
shadow_memory_state->ram_regions[kvm_region_slot->region_id]
|
||||
.host_region_ptr +
|
||||
entry_offset_addr;
|
||||
incremental_addr =
|
||||
shadow_memory_state->ram_regions[kvm_region_slot->region_id]
|
||||
.incremental_region_ptr +
|
||||
entry_offset_addr;
|
||||
|
||||
shadow_memory_track_dirty_root_pages(shadow_memory_state, entry_offset_addr, kvm_region_slot->region_id);
|
||||
shadow_memory_track_dirty_root_pages(shadow_memory_state,
|
||||
entry_offset_addr,
|
||||
kvm_region_slot->region_id);
|
||||
memcpy(incremental_addr, host_addr, TARGET_PAGE_SIZE);
|
||||
|
||||
clear_bit(gfn, (void *)kvm_region_slot->bitmap);
|
||||
@ -321,21 +379,33 @@ static void save_root_pages(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memo
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t nyx_snapshot_nyx_dirty_ring_restore(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
|
||||
dirty_ring_flush_and_collect(self, shadow_memory_state, blocklist, kvm_get_vm_fd(kvm_state));
|
||||
uint32_t nyx_snapshot_nyx_dirty_ring_restore(nyx_dirty_ring_t *self,
|
||||
shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist)
|
||||
{
|
||||
dirty_ring_flush_and_collect(self, shadow_memory_state, blocklist,
|
||||
kvm_get_vm_fd(kvm_state));
|
||||
return restore_memory(self, shadow_memory_state, blocklist);
|
||||
}
|
||||
|
||||
void nyx_snapshot_nyx_dirty_ring_save_root_pages(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
|
||||
|
||||
dirty_ring_flush_and_collect(self, shadow_memory_state, blocklist, kvm_get_vm_fd(kvm_state));
|
||||
void nyx_snapshot_nyx_dirty_ring_save_root_pages(nyx_dirty_ring_t *self,
|
||||
shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist)
|
||||
{
|
||||
dirty_ring_flush_and_collect(self, shadow_memory_state, blocklist,
|
||||
kvm_get_vm_fd(kvm_state));
|
||||
save_root_pages(self, shadow_memory_state, blocklist);
|
||||
}
|
||||
|
||||
void nyx_snapshot_nyx_dirty_ring_flush(void){
|
||||
void nyx_snapshot_nyx_dirty_ring_flush(void)
|
||||
{
|
||||
dirty_ring_flush(kvm_get_vm_fd(kvm_state));
|
||||
}
|
||||
|
||||
void nyx_snapshot_nyx_dirty_ring_flush_and_collect(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
|
||||
dirty_ring_flush_and_collect(self, shadow_memory_state, blocklist, kvm_get_vm_fd(kvm_state));
|
||||
void nyx_snapshot_nyx_dirty_ring_flush_and_collect(nyx_dirty_ring_t *self,
|
||||
shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist)
|
||||
{
|
||||
dirty_ring_flush_and_collect(self, shadow_memory_state, blocklist,
|
||||
kvm_get_vm_fd(kvm_state));
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include "nyx/snapshot/memory/block_list.h"
|
||||
#include "nyx/snapshot/memory/shadow_memory.h"
|
||||
#include <stdint.h>
|
||||
|
||||
struct kvm_dirty_gfn {
|
||||
uint32_t flags;
|
||||
@ -37,10 +37,16 @@ void nyx_dirty_ring_pre_init(int kvm_fd, int vm_fd);
|
||||
|
||||
nyx_dirty_ring_t *nyx_dirty_ring_init(shadow_memory_t *shadow_memory);
|
||||
|
||||
uint32_t nyx_snapshot_nyx_dirty_ring_restore(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist);
|
||||
uint32_t nyx_snapshot_nyx_dirty_ring_restore(nyx_dirty_ring_t *self,
|
||||
shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist);
|
||||
|
||||
void nyx_snapshot_nyx_dirty_ring_save_root_pages(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist);
|
||||
void nyx_snapshot_nyx_dirty_ring_save_root_pages(nyx_dirty_ring_t *self,
|
||||
shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist);
|
||||
|
||||
void nyx_snapshot_nyx_dirty_ring_flush(void);
|
||||
|
||||
void nyx_snapshot_nyx_dirty_ring_flush_and_collect(nyx_dirty_ring_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist);
|
||||
void nyx_snapshot_nyx_dirty_ring_flush_and_collect(nyx_dirty_ring_t *self,
|
||||
shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist);
|
||||
|
@ -1,10 +1,10 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
|
||||
#include "exec/ram_addr.h"
|
||||
#include "qemu/rcu_queue.h"
|
||||
#include "migration/migration.h"
|
||||
#include "qemu/rcu_queue.h"
|
||||
|
||||
#include "nyx/memory_access.h"
|
||||
|
||||
@ -25,8 +25,8 @@
|
||||
// #define RESET_VRAM
|
||||
// #define DEBUG_FDL_VRAM
|
||||
|
||||
nyx_fdl_t* nyx_fdl_init(shadow_memory_t* shadow_memory){
|
||||
|
||||
nyx_fdl_t *nyx_fdl_init(shadow_memory_t *shadow_memory)
|
||||
{
|
||||
static bool fdl_created = false;
|
||||
/* not sure if we're able to create another FDL instance -> probably not */
|
||||
assert(fdl_created == false);
|
||||
@ -47,8 +47,10 @@ nyx_fdl_t* nyx_fdl_init(shadow_memory_t* shadow_memory){
|
||||
configuration.num = 0;
|
||||
|
||||
for (uint8_t i = 0; i < shadow_memory->ram_regions_num; i++) {
|
||||
configuration.areas[configuration.num].base_address = shadow_memory->ram_regions[i].base;
|
||||
configuration.areas[configuration.num].size = shadow_memory->ram_regions[i].size;
|
||||
configuration.areas[configuration.num].base_address =
|
||||
shadow_memory->ram_regions[i].base;
|
||||
configuration.areas[configuration.num].size =
|
||||
shadow_memory->ram_regions[i].size;
|
||||
configuration.num++;
|
||||
}
|
||||
|
||||
@ -59,18 +61,23 @@ nyx_fdl_t* nyx_fdl_init(shadow_memory_t* shadow_memory){
|
||||
printf("KVM_VMX_FDL_SET: %d\n", ret);
|
||||
printf("configuration.mmap_size = 0x%lx\n", configuration.mmap_size);
|
||||
for (uint8_t i = 0; i < configuration.num; i++) {
|
||||
printf("configuration.areas[%d].mmap_bitmap_offset = 0x%lx\n", i, configuration.areas[i].mmap_bitmap_offset);
|
||||
printf("configuration.areas[%d].mmap_stack_offset = 0x%lx\n", i, configuration.areas[i].mmap_stack_offset);
|
||||
printf("configuration.areas[%d].mmap_bitmap_offset = 0x%lx\n", i,
|
||||
configuration.areas[i].mmap_bitmap_offset);
|
||||
printf("configuration.areas[%d].mmap_stack_offset = 0x%lx\n", i,
|
||||
configuration.areas[i].mmap_stack_offset);
|
||||
}
|
||||
#endif
|
||||
|
||||
self->vmx_fdl_mmap = mmap(NULL, configuration.mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, self->vmx_fdl_fd, 0);
|
||||
self->vmx_fdl_mmap = mmap(NULL, configuration.mmap_size, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED, self->vmx_fdl_fd, 0);
|
||||
|
||||
assert(self->vmx_fdl_mmap != (void *)0xFFFFFFFFFFFFFFFF);
|
||||
|
||||
for (uint8_t i = 0; i < configuration.num; i++) {
|
||||
self->entry[i].stack = self->vmx_fdl_mmap + configuration.areas[i].mmap_stack_offset;
|
||||
self->entry[i].bitmap = self->vmx_fdl_mmap + configuration.areas[i].mmap_bitmap_offset;
|
||||
self->entry[i].stack =
|
||||
self->vmx_fdl_mmap + configuration.areas[i].mmap_stack_offset;
|
||||
self->entry[i].bitmap =
|
||||
self->vmx_fdl_mmap + configuration.areas[i].mmap_bitmap_offset;
|
||||
|
||||
#ifdef DEBUG_VMX_FDL_ALLOC
|
||||
printf("fdl_stacks[%d] -> %p\n", i, self->entry[i].stack);
|
||||
@ -95,8 +102,10 @@ nyx_fdl_t* nyx_fdl_init(shadow_memory_t* shadow_memory){
|
||||
}
|
||||
|
||||
/* restore operation */
|
||||
uint32_t nyx_snapshot_nyx_fdl_restore(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
|
||||
|
||||
uint32_t nyx_snapshot_nyx_fdl_restore(nyx_fdl_t *self,
|
||||
shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist)
|
||||
{
|
||||
uint32_t num_dirty_pages = 0;
|
||||
void *current_region = NULL;
|
||||
|
||||
@ -109,29 +118,34 @@ uint32_t nyx_snapshot_nyx_fdl_restore(nyx_fdl_t* self, shadow_memory_t* shadow_m
|
||||
|
||||
for (uint8_t i = 0; i < result.num; i++) {
|
||||
#ifdef SHOW_NUM_DIRTY_PAGES
|
||||
printf("Kernel -> [%d] %ld \t%ldKB\n", i, result.values[i], (0x1000*result.values[i])>>0x10);
|
||||
printf("Kernel -> [%d] %ld \t%ldKB\n", i, result.values[i],
|
||||
(0x1000 * result.values[i]) >> 0x10);
|
||||
#endif
|
||||
|
||||
if (shadow_memory_state->incremental_enabled) {
|
||||
current_region = shadow_memory_state->ram_regions[i].incremental_region_ptr;
|
||||
}
|
||||
else{
|
||||
current_region =
|
||||
shadow_memory_state->ram_regions[i].incremental_region_ptr;
|
||||
} else {
|
||||
current_region = shadow_memory_state->ram_regions[i].snapshot_region_ptr;
|
||||
}
|
||||
|
||||
for (uint64_t j = 0; j < result.values[i]; j++) {
|
||||
|
||||
uint64_t physical_addr = self->entry[i].stack[j];
|
||||
uint64_t entry_offset_addr = physical_addr - shadow_memory_state->ram_regions[i].base;
|
||||
uint64_t entry_offset_addr =
|
||||
physical_addr - shadow_memory_state->ram_regions[i].base;
|
||||
|
||||
void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + entry_offset_addr;
|
||||
void *host_addr = shadow_memory_state->ram_regions[i].host_region_ptr +
|
||||
entry_offset_addr;
|
||||
void *snapshot_addr = current_region + entry_offset_addr;
|
||||
|
||||
|
||||
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){
|
||||
if (snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) ==
|
||||
true)
|
||||
{
|
||||
#ifdef DEBUG_VERFIY_BITMAP
|
||||
if (!is_black_listed_addr(self, entry_offset_addr)) {
|
||||
printf("WARNING: %s: -> %lx is not blacklisted\n", __func__, entry_offset_addr);
|
||||
printf("WARNING: %s: -> %lx is not blacklisted\n", __func__,
|
||||
entry_offset_addr);
|
||||
abort();
|
||||
}
|
||||
#endif
|
||||
@ -142,7 +156,6 @@ uint32_t nyx_snapshot_nyx_fdl_restore(nyx_fdl_t* self, shadow_memory_t* shadow_m
|
||||
memcpy(host_addr, snapshot_addr, TARGET_PAGE_SIZE);
|
||||
num_dirty_pages++;
|
||||
}
|
||||
|
||||
}
|
||||
#ifdef RESET_VRAM
|
||||
// nyx_snapshot_nyx_fdl_restore_vram(self, shadow_memory_state);
|
||||
@ -150,7 +163,10 @@ uint32_t nyx_snapshot_nyx_fdl_restore(nyx_fdl_t* self, shadow_memory_t* shadow_m
|
||||
return num_dirty_pages;
|
||||
}
|
||||
|
||||
void nyx_snapshot_nyx_fdl_save_root_pages(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
|
||||
void nyx_snapshot_nyx_fdl_save_root_pages(nyx_fdl_t *self,
|
||||
shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist)
|
||||
{
|
||||
struct fdl_result result;
|
||||
memset(&result, 0, sizeof(struct fdl_result));
|
||||
int res = ioctl(self->vmx_fdl_fd, KVM_VMX_FDL_GET_INDEX, &result);
|
||||
@ -158,21 +174,28 @@ void nyx_snapshot_nyx_fdl_save_root_pages(nyx_fdl_t* self, shadow_memory_t* shad
|
||||
|
||||
for (uint8_t i = 0; i < result.num; i++) {
|
||||
#ifdef SHOW_NUM_DIRTY_PAGES
|
||||
printf("Kernel -> [%d] %ld \t%ldKB\n", i, result.values[i], (0x1000*result.values[i])>>0x10);
|
||||
printf("Kernel -> [%d] %ld \t%ldKB\n", i, result.values[i],
|
||||
(0x1000 * result.values[i]) >> 0x10);
|
||||
#endif
|
||||
|
||||
for (uint64_t j = 0; j < result.values[i]; j++) {
|
||||
|
||||
uint64_t physical_addr = self->entry[i].stack[j];
|
||||
uint64_t entry_offset_addr = physical_addr - shadow_memory_state->ram_regions[i].base;
|
||||
uint64_t entry_offset_addr =
|
||||
physical_addr - shadow_memory_state->ram_regions[i].base;
|
||||
|
||||
void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + entry_offset_addr;
|
||||
void* incremental_addr = shadow_memory_state->ram_regions[i].incremental_region_ptr + entry_offset_addr;
|
||||
void *host_addr = shadow_memory_state->ram_regions[i].host_region_ptr +
|
||||
entry_offset_addr;
|
||||
void *incremental_addr =
|
||||
shadow_memory_state->ram_regions[i].incremental_region_ptr +
|
||||
entry_offset_addr;
|
||||
|
||||
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){
|
||||
if (snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) ==
|
||||
true)
|
||||
{
|
||||
#ifdef DEBUG_VERFIY_BITMAP
|
||||
if (!is_black_listed_addr(self, entry_offset_addr)) {
|
||||
printf("WARNING: %s: -> %lx is not blacklisted\n", __func__, entry_offset_addr);
|
||||
printf("WARNING: %s: -> %lx is not blacklisted\n", __func__,
|
||||
entry_offset_addr);
|
||||
abort();
|
||||
}
|
||||
#endif
|
||||
@ -180,14 +203,9 @@ void nyx_snapshot_nyx_fdl_save_root_pages(nyx_fdl_t* self, shadow_memory_t* shad
|
||||
}
|
||||
|
||||
clear_bit(entry_offset_addr >> 12, (void *)self->entry[i].bitmap);
|
||||
shadow_memory_track_dirty_root_pages(shadow_memory_state, entry_offset_addr, i);
|
||||
shadow_memory_track_dirty_root_pages(shadow_memory_state,
|
||||
entry_offset_addr, i);
|
||||
memcpy(incremental_addr, host_addr, TARGET_PAGE_SIZE);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -56,6 +56,10 @@ typedef struct nyx_fdl_s{
|
||||
} nyx_fdl_t;
|
||||
|
||||
nyx_fdl_t *nyx_fdl_init(shadow_memory_t *self);
|
||||
uint32_t nyx_snapshot_nyx_fdl_restore(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist);
|
||||
uint32_t nyx_snapshot_nyx_fdl_restore(nyx_fdl_t *self,
|
||||
shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist);
|
||||
|
||||
void nyx_snapshot_nyx_fdl_save_root_pages(nyx_fdl_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist);
|
||||
void nyx_snapshot_nyx_fdl_save_root_pages(nyx_fdl_t *self,
|
||||
shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist);
|
||||
|
@ -1,15 +1,15 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
|
||||
#include "exec/ram_addr.h"
|
||||
#include "qemu/rcu_queue.h"
|
||||
#include "migration/migration.h"
|
||||
#include "qemu/rcu_queue.h"
|
||||
|
||||
#include "nyx/memory_access.h"
|
||||
|
||||
#include "nyx/snapshot/memory/block_list.h"
|
||||
#include "nyx/snapshot/helper.h"
|
||||
#include "nyx/snapshot/memory/block_list.h"
|
||||
#include "nyx/snapshot/memory/shadow_memory.h"
|
||||
|
||||
#define REALLOC_SIZE 0x8000
|
||||
@ -17,18 +17,21 @@
|
||||
// #define DEBUG_NYX_SNAPSHOT_PAGE_BLOCKLIST
|
||||
|
||||
|
||||
snapshot_page_blocklist_t* snapshot_page_blocklist_init(void){
|
||||
|
||||
snapshot_page_blocklist_t *snapshot_page_blocklist_init(void)
|
||||
{
|
||||
snapshot_page_blocklist_t *self = malloc(sizeof(snapshot_page_blocklist_t));
|
||||
|
||||
uint64_t ram_size = get_ram_size();
|
||||
self->phys_area_size = ram_size <= MEM_SPLIT_START ? ram_size : ram_size + (MEM_SPLIT_END-MEM_SPLIT_START);
|
||||
self->phys_area_size = ram_size <= MEM_SPLIT_START ?
|
||||
ram_size :
|
||||
ram_size + (MEM_SPLIT_END - MEM_SPLIT_START);
|
||||
|
||||
self->phys_bitmap = malloc(BITMAP_SIZE(self->phys_area_size));
|
||||
memset(self->phys_bitmap, 0x0, BITMAP_SIZE(self->phys_area_size));
|
||||
|
||||
if (ram_size > MEM_SPLIT_START) {
|
||||
memset(self->phys_bitmap+BITMAP_SIZE(MEM_SPLIT_START), 0xff, BITMAP_SIZE((MEM_SPLIT_END-MEM_SPLIT_START)));
|
||||
memset(self->phys_bitmap + BITMAP_SIZE(MEM_SPLIT_START), 0xff,
|
||||
BITMAP_SIZE((MEM_SPLIT_END - MEM_SPLIT_START)));
|
||||
}
|
||||
|
||||
self->pages_num = 0;
|
||||
@ -38,7 +41,8 @@ snapshot_page_blocklist_t* snapshot_page_blocklist_init(void){
|
||||
return self;
|
||||
}
|
||||
|
||||
void snapshot_page_blocklist_add(snapshot_page_blocklist_t* self, uint64_t phys_addr){
|
||||
void snapshot_page_blocklist_add(snapshot_page_blocklist_t *self, uint64_t phys_addr)
|
||||
{
|
||||
if (phys_addr == -1) {
|
||||
fprintf(stderr, "ERROR %s: phys_addr=%lx\n", __func__, phys_addr);
|
||||
return;
|
||||
|
@ -1,11 +1,10 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include "nyx/snapshot/memory/shadow_memory.h"
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
|
||||
typedef struct snapshot_page_blocklist_s {
|
||||
|
||||
/* total number of blocklisted page frames */
|
||||
uint64_t pages_num;
|
||||
|
||||
@ -25,8 +24,11 @@ typedef struct snapshot_page_blocklist_s{
|
||||
void snapshot_page_blocklist_add(snapshot_page_blocklist_t *self, uint64_t phys_addr);
|
||||
|
||||
/* returns true if phys_addr is on the blocklis */
|
||||
static inline bool snapshot_page_blocklist_check_phys_addr(snapshot_page_blocklist_t* self, uint64_t phys_addr){
|
||||
return phys_addr < self->phys_area_size && test_bit(phys_addr>>12, (const unsigned long *)self->phys_bitmap) != 0;
|
||||
static inline bool snapshot_page_blocklist_check_phys_addr(
|
||||
snapshot_page_blocklist_t *self, uint64_t phys_addr)
|
||||
{
|
||||
return phys_addr < self->phys_area_size &&
|
||||
test_bit(phys_addr >> 12, (const unsigned long *)self->phys_bitmap) != 0;
|
||||
}
|
||||
|
||||
snapshot_page_blocklist_t *snapshot_page_blocklist_init(void);
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
|
||||
#include "exec/ram_addr.h"
|
||||
#include "migration/migration.h"
|
||||
@ -11,15 +11,15 @@
|
||||
#include <sys/ioctl.h>
|
||||
|
||||
#include "nyx/snapshot/helper.h"
|
||||
#include "nyx/snapshot/memory/shadow_memory.h"
|
||||
#include "nyx/snapshot/memory/nyx_fdl_user.h"
|
||||
#include "nyx/snapshot/memory/shadow_memory.h"
|
||||
|
||||
/* debug option */
|
||||
// #define DEBUG_USER_FDL
|
||||
|
||||
/* init operation */
|
||||
nyx_fdl_user_t* nyx_fdl_user_init(shadow_memory_t* shadow_memory_state){
|
||||
|
||||
nyx_fdl_user_t *nyx_fdl_user_init(shadow_memory_t *shadow_memory_state)
|
||||
{
|
||||
nyx_fdl_user_t *self = malloc(sizeof(nyx_fdl_user_t));
|
||||
memset(self, 0, sizeof(nyx_fdl_user_t));
|
||||
|
||||
@ -27,19 +27,23 @@ nyx_fdl_user_t* nyx_fdl_user_init(shadow_memory_t* shadow_memory_state){
|
||||
self->num = shadow_memory_state->ram_regions_num;
|
||||
|
||||
for (uint8_t i = 0; i < shadow_memory_state->ram_regions_num; i++) {
|
||||
self->entry[i].stack = malloc(DIRTY_STACK_SIZE(shadow_memory_state->ram_regions[i].size));
|
||||
self->entry[i].bitmap = malloc(BITMAP_SIZE(shadow_memory_state->ram_regions[i].size));
|
||||
self->entry[i].stack =
|
||||
malloc(DIRTY_STACK_SIZE(shadow_memory_state->ram_regions[i].size));
|
||||
self->entry[i].bitmap =
|
||||
malloc(BITMAP_SIZE(shadow_memory_state->ram_regions[i].size));
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
/* enable operation */
|
||||
void nyx_fdl_user_enable(nyx_fdl_user_t* self){
|
||||
void nyx_fdl_user_enable(nyx_fdl_user_t *self)
|
||||
{
|
||||
assert(self);
|
||||
self->enabled = true;
|
||||
}
|
||||
|
||||
static void nyx_snapshot_user_fdl_reset(nyx_fdl_user_t* self){
|
||||
static void nyx_snapshot_user_fdl_reset(nyx_fdl_user_t *self)
|
||||
{
|
||||
if (self) {
|
||||
for (uint8_t i = 0; i < self->num; i++) {
|
||||
self->entry[i].pos = 0;
|
||||
@ -48,33 +52,41 @@ static void nyx_snapshot_user_fdl_reset(nyx_fdl_user_t* self){
|
||||
}
|
||||
|
||||
/* reset operation */
|
||||
uint32_t nyx_snapshot_user_fdl_restore(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
|
||||
uint32_t nyx_snapshot_user_fdl_restore(nyx_fdl_user_t *self,
|
||||
shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist)
|
||||
{
|
||||
uint32_t num_dirty_pages = 0;
|
||||
if (self) {
|
||||
|
||||
void *current_region = NULL;
|
||||
|
||||
|
||||
for (uint8_t i = 0; i < self->num; i++) {
|
||||
#ifdef DEBUG_USER_FDL
|
||||
printf("User -> [%d] %ld \t%ldKB\n", i, self->entry[i].pos, (0x1000*self->entry[i].pos)>>0x10);
|
||||
printf("User -> [%d] %ld \t%ldKB\n", i, self->entry[i].pos,
|
||||
(0x1000 * self->entry[i].pos) >> 0x10);
|
||||
#endif
|
||||
|
||||
if (shadow_memory_state->incremental_enabled) {
|
||||
current_region = shadow_memory_state->ram_regions[i].incremental_region_ptr;
|
||||
}
|
||||
else{
|
||||
current_region = shadow_memory_state->ram_regions[i].snapshot_region_ptr;
|
||||
current_region =
|
||||
shadow_memory_state->ram_regions[i].incremental_region_ptr;
|
||||
} else {
|
||||
current_region =
|
||||
shadow_memory_state->ram_regions[i].snapshot_region_ptr;
|
||||
}
|
||||
|
||||
for (uint64_t j = 0; j < self->entry[i].pos; j++) {
|
||||
uint64_t physical_addr = self->entry[i].stack[j];
|
||||
uint64_t entry_offset_addr = physical_addr - shadow_memory_state->ram_regions[i].base;
|
||||
uint64_t entry_offset_addr =
|
||||
physical_addr - shadow_memory_state->ram_regions[i].base;
|
||||
|
||||
void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + entry_offset_addr;
|
||||
void *host_addr = shadow_memory_state->ram_regions[i].host_region_ptr +
|
||||
entry_offset_addr;
|
||||
void *snapshot_addr = current_region + entry_offset_addr;
|
||||
|
||||
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){
|
||||
if (snapshot_page_blocklist_check_phys_addr(blocklist,
|
||||
physical_addr) == true)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -85,9 +97,7 @@ uint32_t nyx_snapshot_user_fdl_restore(nyx_fdl_user_t* self, shadow_memory_t* sh
|
||||
memcpy(host_addr, snapshot_addr, TARGET_PAGE_SIZE);
|
||||
num_dirty_pages++;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
nyx_snapshot_user_fdl_reset(self);
|
||||
@ -95,13 +105,17 @@ uint32_t nyx_snapshot_user_fdl_restore(nyx_fdl_user_t* self, shadow_memory_t* sh
|
||||
}
|
||||
|
||||
/* set operation (mark pf as dirty) */
|
||||
void nyx_fdl_user_set(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, nyx_fdl_t* nyx_fdl_state, uint64_t addr, uint64_t length){
|
||||
void nyx_fdl_user_set(nyx_fdl_user_t *self,
|
||||
shadow_memory_t *shadow_memory_state,
|
||||
nyx_fdl_t *nyx_fdl_state,
|
||||
uint64_t addr,
|
||||
uint64_t length)
|
||||
{
|
||||
if (length < 0x1000) {
|
||||
length = 0x1000;
|
||||
}
|
||||
|
||||
if (self && self->enabled && length >= 0x1000) {
|
||||
|
||||
uint8_t ram_area = 0xff;
|
||||
|
||||
/* optimize this? */
|
||||
@ -110,21 +124,61 @@ void nyx_fdl_user_set(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state
|
||||
|
||||
switch (MAX_REGIONS - shadow_memory_state->ram_regions_num) {
|
||||
case 0:
|
||||
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[7].base, shadow_memory_state->ram_regions[7].base+(shadow_memory_state->ram_regions[7].size-1)) ? 7 : ram_area;
|
||||
ram_area =
|
||||
FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[7].base,
|
||||
shadow_memory_state->ram_regions[7].base +
|
||||
(shadow_memory_state->ram_regions[7].size - 1)) ?
|
||||
7 :
|
||||
ram_area;
|
||||
case 1:
|
||||
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[6].base, shadow_memory_state->ram_regions[6].base+(shadow_memory_state->ram_regions[6].size-1)) ? 6 : ram_area;
|
||||
ram_area =
|
||||
FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[6].base,
|
||||
shadow_memory_state->ram_regions[6].base +
|
||||
(shadow_memory_state->ram_regions[6].size - 1)) ?
|
||||
6 :
|
||||
ram_area;
|
||||
case 2:
|
||||
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[5].base, shadow_memory_state->ram_regions[5].base+(shadow_memory_state->ram_regions[5].size-1)) ? 5 : ram_area;
|
||||
ram_area =
|
||||
FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[5].base,
|
||||
shadow_memory_state->ram_regions[5].base +
|
||||
(shadow_memory_state->ram_regions[5].size - 1)) ?
|
||||
5 :
|
||||
ram_area;
|
||||
case 3:
|
||||
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[4].base, shadow_memory_state->ram_regions[4].base+(shadow_memory_state->ram_regions[4].size-1)) ? 4 : ram_area;
|
||||
ram_area =
|
||||
FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[4].base,
|
||||
shadow_memory_state->ram_regions[4].base +
|
||||
(shadow_memory_state->ram_regions[4].size - 1)) ?
|
||||
4 :
|
||||
ram_area;
|
||||
case 4:
|
||||
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[3].base, shadow_memory_state->ram_regions[3].base+(shadow_memory_state->ram_regions[3].size-1)) ? 3 : ram_area;
|
||||
ram_area =
|
||||
FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[3].base,
|
||||
shadow_memory_state->ram_regions[3].base +
|
||||
(shadow_memory_state->ram_regions[3].size - 1)) ?
|
||||
3 :
|
||||
ram_area;
|
||||
case 5:
|
||||
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[2].base, shadow_memory_state->ram_regions[2].base+(shadow_memory_state->ram_regions[2].size-1)) ? 2 : ram_area;
|
||||
ram_area =
|
||||
FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[2].base,
|
||||
shadow_memory_state->ram_regions[2].base +
|
||||
(shadow_memory_state->ram_regions[2].size - 1)) ?
|
||||
2 :
|
||||
ram_area;
|
||||
case 6:
|
||||
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[1].base, shadow_memory_state->ram_regions[1].base+(shadow_memory_state->ram_regions[1].size-1)) ? 1 : ram_area;
|
||||
ram_area =
|
||||
FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[1].base,
|
||||
shadow_memory_state->ram_regions[1].base +
|
||||
(shadow_memory_state->ram_regions[1].size - 1)) ?
|
||||
1 :
|
||||
ram_area;
|
||||
case 7:
|
||||
ram_area = FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[0].base, shadow_memory_state->ram_regions[0].base+(shadow_memory_state->ram_regions[0].size-1)) ? 0 : ram_area;
|
||||
ram_area =
|
||||
FAST_IN_RANGE(addr, shadow_memory_state->ram_regions[0].base,
|
||||
shadow_memory_state->ram_regions[0].base +
|
||||
(shadow_memory_state->ram_regions[0].size - 1)) ?
|
||||
0 :
|
||||
ram_area;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -137,10 +191,11 @@ void nyx_fdl_user_set(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state
|
||||
|
||||
|
||||
for (uint64_t offset = 0; offset < length; offset += 0x1000) {
|
||||
|
||||
uint64_t current_addr = (addr + offset) & 0xFFFFFFFFFFFFF000;
|
||||
|
||||
long pfn = (long) ((current_addr-shadow_memory_state->ram_regions[ram_area].base)>>12);
|
||||
long pfn = (long)((current_addr -
|
||||
shadow_memory_state->ram_regions[ram_area].base) >>
|
||||
12);
|
||||
|
||||
assert(self->entry[ram_area].bitmap);
|
||||
|
||||
@ -148,7 +203,8 @@ void nyx_fdl_user_set(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state
|
||||
if (!test_bit(pfn, (const unsigned long *)self->entry[ram_area].bitmap)) {
|
||||
set_bit(pfn, (unsigned long *)self->entry[ram_area].bitmap);
|
||||
|
||||
self->entry[ram_area].stack[self->entry[ram_area].pos] = current_addr & 0xFFFFFFFFFFFFF000;
|
||||
self->entry[ram_area].stack[self->entry[ram_area].pos] =
|
||||
current_addr & 0xFFFFFFFFFFFFF000;
|
||||
self->entry[ram_area].pos++;
|
||||
|
||||
#ifdef DEBUG_USER_FDL
|
||||
@ -159,21 +215,30 @@ void nyx_fdl_user_set(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state
|
||||
}
|
||||
}
|
||||
|
||||
void nyx_snapshot_nyx_fdl_user_save_root_pages(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist){
|
||||
|
||||
void nyx_snapshot_nyx_fdl_user_save_root_pages(nyx_fdl_user_t *self,
|
||||
shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist)
|
||||
{
|
||||
for (uint8_t i = 0; i < self->num; i++) {
|
||||
#ifdef DEBUG_USER_FDL
|
||||
printf("User -> [%d] %ld \t%ldKB\n", i, self->entry[i].pos, (0x1000*self->entry[i].pos)>>0x10);
|
||||
printf("User -> [%d] %ld \t%ldKB\n", i, self->entry[i].pos,
|
||||
(0x1000 * self->entry[i].pos) >> 0x10);
|
||||
#endif
|
||||
|
||||
for (uint64_t j = 0; j < self->entry[i].pos; j++) {
|
||||
uint64_t physical_addr = self->entry[i].stack[j];
|
||||
uint64_t entry_offset_addr = physical_addr - shadow_memory_state->ram_regions[i].base;
|
||||
uint64_t entry_offset_addr =
|
||||
physical_addr - shadow_memory_state->ram_regions[i].base;
|
||||
|
||||
void* host_addr = shadow_memory_state->ram_regions[i].host_region_ptr + entry_offset_addr;
|
||||
void* incremental_addr = shadow_memory_state->ram_regions[i].incremental_region_ptr + entry_offset_addr;
|
||||
void *host_addr = shadow_memory_state->ram_regions[i].host_region_ptr +
|
||||
entry_offset_addr;
|
||||
void *incremental_addr =
|
||||
shadow_memory_state->ram_regions[i].incremental_region_ptr +
|
||||
entry_offset_addr;
|
||||
|
||||
if(snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) == true){
|
||||
if (snapshot_page_blocklist_check_phys_addr(blocklist, physical_addr) ==
|
||||
true)
|
||||
{
|
||||
printf("%s: 0x%lx is dirty\n", __func__, physical_addr);
|
||||
continue;
|
||||
}
|
||||
@ -182,9 +247,9 @@ void nyx_snapshot_nyx_fdl_user_save_root_pages(nyx_fdl_user_t* self, shadow_memo
|
||||
#endif
|
||||
|
||||
clear_bit(entry_offset_addr >> 12, (void *)self->entry[i].bitmap);
|
||||
shadow_memory_track_dirty_root_pages(shadow_memory_state, entry_offset_addr, i);
|
||||
shadow_memory_track_dirty_root_pages(shadow_memory_state,
|
||||
entry_offset_addr, i);
|
||||
memcpy(incremental_addr, host_addr, TARGET_PAGE_SIZE);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,10 +1,10 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include "nyx/snapshot/helper.h"
|
||||
#include "nyx/snapshot/memory/backend/nyx_fdl.h"
|
||||
#include "nyx/snapshot/memory/block_list.h"
|
||||
#include "nyx/snapshot/memory/shadow_memory.h"
|
||||
#include "nyx/snapshot/memory/backend/nyx_fdl.h"
|
||||
#include <stdint.h>
|
||||
|
||||
#define MAX_REGIONS 8 /* don't */
|
||||
|
||||
@ -23,8 +23,16 @@ nyx_fdl_user_t* nyx_fdl_user_init(shadow_memory_t* shadow_memory_state);
|
||||
|
||||
void nyx_fdl_user_enable(nyx_fdl_user_t *self);
|
||||
|
||||
void nyx_fdl_user_set(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, nyx_fdl_t* nyx_fdl_state, uint64_t addr, uint64_t length);
|
||||
void nyx_fdl_user_set(nyx_fdl_user_t *self,
|
||||
shadow_memory_t *shadow_memory_state,
|
||||
nyx_fdl_t *nyx_fdl_state,
|
||||
uint64_t addr,
|
||||
uint64_t length);
|
||||
|
||||
uint32_t nyx_snapshot_user_fdl_restore(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist);
|
||||
uint32_t nyx_snapshot_user_fdl_restore(nyx_fdl_user_t *self,
|
||||
shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist);
|
||||
|
||||
void nyx_snapshot_nyx_fdl_user_save_root_pages(nyx_fdl_user_t* self, shadow_memory_t* shadow_memory_state, snapshot_page_blocklist_t* blocklist);
|
||||
void nyx_snapshot_nyx_fdl_user_save_root_pages(nyx_fdl_user_t *self,
|
||||
shadow_memory_t *shadow_memory_state,
|
||||
snapshot_page_blocklist_t *blocklist);
|
||||
|
@ -1,17 +1,17 @@
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
|
||||
#include "exec/ram_addr.h"
|
||||
#include "qemu/rcu_queue.h"
|
||||
#include "migration/migration.h"
|
||||
#include "qemu/rcu_queue.h"
|
||||
|
||||
#include "nyx/debug.h"
|
||||
#include "nyx/memory_access.h"
|
||||
|
||||
#include "nyx/snapshot/memory/shadow_memory.h"
|
||||
#include "nyx/snapshot/helper.h"
|
||||
#include "nyx/snapshot/memory/shadow_memory.h"
|
||||
|
||||
typedef struct fast_reload_dump_head_s {
|
||||
uint32_t shadow_memory_regions;
|
||||
@ -24,28 +24,34 @@ typedef struct fast_reload_dump_entry_s{
|
||||
} fast_reload_dump_entry_t;
|
||||
|
||||
|
||||
static void shadow_memory_set_incremental_ptrs(shadow_memory_t* self){
|
||||
static void shadow_memory_set_incremental_ptrs(shadow_memory_t *self)
|
||||
{
|
||||
for (uint8_t i = 0; i < self->ram_regions_num; i++) {
|
||||
self->ram_regions[i].incremental_region_ptr = self->incremental_ptr + self->ram_regions[i].offset;
|
||||
self->ram_regions[i].incremental_region_ptr =
|
||||
self->incremental_ptr + self->ram_regions[i].offset;
|
||||
}
|
||||
}
|
||||
|
||||
static void shadow_memory_pre_alloc_incremental(shadow_memory_t* self){
|
||||
self->incremental_ptr = mmap(0, self->memory_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->snapshot_ptr_fd, 0);
|
||||
static void shadow_memory_pre_alloc_incremental(shadow_memory_t *self)
|
||||
{
|
||||
self->incremental_ptr = mmap(0, self->memory_size, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE, self->snapshot_ptr_fd, 0);
|
||||
shadow_memory_set_incremental_ptrs(self);
|
||||
}
|
||||
|
||||
static void shadow_memory_init_generic(shadow_memory_t* self){
|
||||
static void shadow_memory_init_generic(shadow_memory_t *self)
|
||||
{
|
||||
self->root_track_pages_num = 0;
|
||||
self->root_track_pages_size = 32 << 10;
|
||||
self->root_track_pages_stack = malloc(sizeof(uint64_t)*self->root_track_pages_size);
|
||||
self->root_track_pages_stack =
|
||||
malloc(sizeof(uint64_t) * self->root_track_pages_size);
|
||||
shadow_memory_pre_alloc_incremental(self);
|
||||
|
||||
self->incremental_enabled = false;
|
||||
}
|
||||
|
||||
shadow_memory_t* shadow_memory_init(void){
|
||||
|
||||
shadow_memory_t *shadow_memory_init(void)
|
||||
{
|
||||
RAMBlock *block;
|
||||
RAMBlock *block_array[10];
|
||||
void *snapshot_ptr_offset_array[10];
|
||||
@ -57,22 +63,26 @@ shadow_memory_t* shadow_memory_init(void){
|
||||
self->memory_size += block->used_length;
|
||||
}
|
||||
|
||||
self->snapshot_ptr_fd = memfd_create("in_memory_root_snapshot", MFD_CLOEXEC | MFD_ALLOW_SEALING);
|
||||
self->snapshot_ptr_fd =
|
||||
memfd_create("in_memory_root_snapshot", MFD_CLOEXEC | MFD_ALLOW_SEALING);
|
||||
assert(!ftruncate(self->snapshot_ptr_fd, self->memory_size));
|
||||
fcntl(self->snapshot_ptr_fd, F_ADD_SEALS, F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL);
|
||||
fcntl(self->snapshot_ptr_fd, F_ADD_SEALS,
|
||||
F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL);
|
||||
|
||||
self->snapshot_ptr = mmap(NULL, self->memory_size, PROT_READ | PROT_WRITE , MAP_SHARED , self->snapshot_ptr_fd, 0);
|
||||
self->snapshot_ptr = mmap(NULL, self->memory_size, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED, self->snapshot_ptr_fd, 0);
|
||||
madvise(self->snapshot_ptr, self->memory_size, MADV_RANDOM | MADV_MERGEABLE);
|
||||
|
||||
nyx_debug_p(RELOAD_PREFIX, "Allocating Memory (%p) Size: %lx", self->snapshot_ptr, self->memory_size);
|
||||
|
||||
nyx_debug_p(RELOAD_PREFIX, "Allocating Memory (%p) Size: %lx",
|
||||
self->snapshot_ptr, self->memory_size);
|
||||
|
||||
|
||||
uint64_t offset = 0;
|
||||
uint8_t i = 0;
|
||||
uint8_t regions_num = 0;
|
||||
QLIST_FOREACH_RCU (block, &ram_list.blocks, next) {
|
||||
nyx_debug_p(RELOAD_PREFIX, "%lx %lx %lx\t%s\t%p", block->offset, block->used_length, block->max_length, block->idstr, block->host);
|
||||
nyx_debug_p(RELOAD_PREFIX, "%lx %lx %lx\t%s\t%p", block->offset,
|
||||
block->used_length, block->max_length, block->idstr, block->host);
|
||||
block_array[i] = block;
|
||||
|
||||
memcpy(self->snapshot_ptr + offset, block->host, block->used_length);
|
||||
@ -84,26 +94,34 @@ shadow_memory_t* shadow_memory_init(void){
|
||||
for (uint8_t i = 0; i < regions_num; i++) {
|
||||
block = block_array[i];
|
||||
if (!block->mr->readonly) {
|
||||
|
||||
if (self->ram_regions_num == 0 && block->used_length >= MEM_SPLIT_START) {
|
||||
|
||||
self->ram_regions[self->ram_regions_num].ram_region = i;
|
||||
self->ram_regions[self->ram_regions_num].base = block->mr->addr;
|
||||
self->ram_regions[self->ram_regions_num].size = MEM_SPLIT_START;
|
||||
self->ram_regions[self->ram_regions_num].offset = snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0];
|
||||
self->ram_regions[self->ram_regions_num].offset =
|
||||
snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0];
|
||||
self->ram_regions[self->ram_regions_num].host_region_ptr = block->host;
|
||||
self->ram_regions[self->ram_regions_num].snapshot_region_ptr = self->snapshot_ptr+self->ram_regions[self->ram_regions_num].offset;
|
||||
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
|
||||
memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1);
|
||||
self->ram_regions[self->ram_regions_num].snapshot_region_ptr =
|
||||
self->snapshot_ptr +
|
||||
self->ram_regions[self->ram_regions_num].offset;
|
||||
self->ram_regions[self->ram_regions_num].idstr =
|
||||
malloc(strlen(block->idstr) + 1);
|
||||
memset(self->ram_regions[self->ram_regions_num].idstr, 0,
|
||||
strlen(block->idstr) + 1);
|
||||
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
|
||||
self->ram_regions_num++;
|
||||
|
||||
self->ram_regions[self->ram_regions_num].ram_region = i;
|
||||
self->ram_regions[self->ram_regions_num].base = MEM_SPLIT_END;
|
||||
self->ram_regions[self->ram_regions_num].base =
|
||||
MEM_SPLIT_END
|
||||
;
|
||||
self->ram_regions[self->ram_regions_num].size = block->used_length - MEM_SPLIT_START;
|
||||
self->ram_regions[self->ram_regions_num].offset = (snapshot_ptr_offset_array[i] + MEM_SPLIT_START) - snapshot_ptr_offset_array[0];
|
||||
self->ram_regions[self->ram_regions_num].host_region_ptr = block->host+MEM_SPLIT_START;
|
||||
self->ram_regions[self->ram_regions_num].snapshot_region_ptr = snapshot_ptr_offset_array[i]+MEM_SPLIT_START;
|
||||
self->ram_regions[self->ram_regions_num].offset =
|
||||
(snapshot_ptr_offset_array[i] + MEM_SPLIT_START) - snapshot_ptr_offset_array[0];
|
||||
self->ram_regions[self->ram_regions_num].host_region_ptr =
|
||||
block->host + MEM_SPLIT_START;
|
||||
self->ram_regions[self->ram_regions_num].snapshot_region_ptr =
|
||||
snapshot_ptr_offset_array[i] + MEM_SPLIT_START;
|
||||
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
|
||||
memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1);
|
||||
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
|
||||
@ -112,11 +130,14 @@ shadow_memory_t* shadow_memory_init(void){
|
||||
self->ram_regions[self->ram_regions_num].ram_region = i;
|
||||
self->ram_regions[self->ram_regions_num].base = block->mr->addr;
|
||||
self->ram_regions[self->ram_regions_num].size = block->used_length;
|
||||
self->ram_regions[self->ram_regions_num].offset = snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0];
|
||||
self->ram_regions[self->ram_regions_num].offset =
|
||||
snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0];
|
||||
self->ram_regions[self->ram_regions_num].host_region_ptr = block->host;
|
||||
self->ram_regions[self->ram_regions_num].snapshot_region_ptr = self->snapshot_ptr+self->ram_regions[self->ram_regions_num].offset;
|
||||
self->ram_regions[self->ram_regions_num].snapshot_region_ptr =
|
||||
self->snapshot_ptr + self->ram_regions[self->ram_regions_num].offset;
|
||||
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
|
||||
memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1);
|
||||
memset(self->ram_regions[self->ram_regions_num].idstr, 0,
|
||||
strlen(block->idstr) + 1);
|
||||
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
|
||||
}
|
||||
|
||||
@ -125,13 +146,13 @@ shadow_memory_t* shadow_memory_init(void){
|
||||
}
|
||||
|
||||
|
||||
|
||||
shadow_memory_init_generic(self);
|
||||
return self;
|
||||
}
|
||||
|
||||
shadow_memory_t* shadow_memory_init_from_snapshot(const char* snapshot_folder, bool pre_snapshot){
|
||||
|
||||
shadow_memory_t *shadow_memory_init_from_snapshot(const char *snapshot_folder,
|
||||
bool pre_snapshot)
|
||||
{
|
||||
RAMBlock *block;
|
||||
RAMBlock *block_array[10];
|
||||
void *snapshot_ptr_offset_array[10];
|
||||
@ -167,7 +188,9 @@ shadow_memory_t* shadow_memory_init_from_snapshot(const char* snapshot_folder, b
|
||||
fclose(file_mem_meta);
|
||||
|
||||
if (self->ram_regions_num != head.shadow_memory_regions) {
|
||||
nyx_error("Error: self->ram_regions_num (%d) != head.shadow_memory_regions (%d)\n", self->ram_regions_num, head.shadow_memory_regions);
|
||||
nyx_error(
|
||||
"Error: self->ram_regions_num (%d) != head.shadow_memory_regions (%d)\n",
|
||||
self->ram_regions_num, head.shadow_memory_regions);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@ -178,16 +201,18 @@ shadow_memory_t* shadow_memory_init_from_snapshot(const char* snapshot_folder, b
|
||||
fseek(file_mem_dump, 0L, SEEK_END);
|
||||
uint64_t file_mem_dump_size = ftell(file_mem_dump);
|
||||
|
||||
nyx_debug("guest_ram_size == ftell(f) => 0x%lx vs 0x%lx (%s)\n", self->memory_size, file_mem_dump_size, path_dump);
|
||||
nyx_debug("guest_ram_size == ftell(f) => 0x%lx vs 0x%lx (%s)\n",
|
||||
self->memory_size, file_mem_dump_size, path_dump);
|
||||
|
||||
#define VGA_SIZE (16 << 20)
|
||||
|
||||
if (self->memory_size != file_mem_dump_size) {
|
||||
if (file_mem_dump_size >= VGA_SIZE) {
|
||||
nyx_error("ERROR: guest size should be %ld MB - set it to %ld MB\n", (file_mem_dump_size-VGA_SIZE)>>20, (self->memory_size-VGA_SIZE)>>20);
|
||||
nyx_error("ERROR: guest size should be %ld MB - set it to %ld MB\n",
|
||||
(file_mem_dump_size - VGA_SIZE) >> 20,
|
||||
(self->memory_size - VGA_SIZE) >> 20);
|
||||
exit(1);
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
nyx_error("ERROR: guest size: %ld bytes\n", file_mem_dump_size);
|
||||
exit(1);
|
||||
}
|
||||
@ -197,7 +222,8 @@ shadow_memory_t* shadow_memory_init_from_snapshot(const char* snapshot_folder, b
|
||||
fclose(file_mem_dump);
|
||||
|
||||
self->snapshot_ptr_fd = open(path_dump, O_RDONLY);
|
||||
self->snapshot_ptr = mmap(0, self->memory_size, PROT_READ, MAP_SHARED, self->snapshot_ptr_fd, 0);
|
||||
self->snapshot_ptr =
|
||||
mmap(0, self->memory_size, PROT_READ, MAP_SHARED, self->snapshot_ptr_fd, 0);
|
||||
|
||||
assert(self->snapshot_ptr != (void *)-1);
|
||||
madvise(self->snapshot_ptr, self->memory_size, MADV_MERGEABLE);
|
||||
@ -207,7 +233,8 @@ shadow_memory_t* shadow_memory_init_from_snapshot(const char* snapshot_folder, b
|
||||
uint8_t i = 0;
|
||||
uint8_t regions_num = 0;
|
||||
QLIST_FOREACH_RCU (block, &ram_list.blocks, next) {
|
||||
nyx_debug_p(RELOAD_PREFIX, "%lx %lx %lx\t%s\t%p", block->offset, block->used_length, block->max_length, block->idstr, block->host);
|
||||
nyx_debug_p(RELOAD_PREFIX, "%lx %lx %lx\t%s\t%p", block->offset,
|
||||
block->used_length, block->max_length, block->idstr, block->host);
|
||||
|
||||
block_array[i] = block;
|
||||
snapshot_ptr_offset_array[i++] = self->snapshot_ptr + offset;
|
||||
@ -219,26 +246,34 @@ shadow_memory_t* shadow_memory_init_from_snapshot(const char* snapshot_folder, b
|
||||
for (uint8_t i = 0; i < regions_num; i++) {
|
||||
block = block_array[i];
|
||||
if (!block->mr->readonly) {
|
||||
|
||||
if (self->ram_regions_num == 0 && block->used_length >= MEM_SPLIT_START) {
|
||||
|
||||
self->ram_regions[self->ram_regions_num].ram_region = i;
|
||||
self->ram_regions[self->ram_regions_num].base = block->mr->addr;
|
||||
self->ram_regions[self->ram_regions_num].size = MEM_SPLIT_START;
|
||||
self->ram_regions[self->ram_regions_num].offset = snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0];
|
||||
self->ram_regions[self->ram_regions_num].offset =
|
||||
snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0];
|
||||
self->ram_regions[self->ram_regions_num].host_region_ptr = block->host;
|
||||
self->ram_regions[self->ram_regions_num].snapshot_region_ptr = self->snapshot_ptr+self->ram_regions[self->ram_regions_num].offset;
|
||||
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
|
||||
memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1);
|
||||
self->ram_regions[self->ram_regions_num].snapshot_region_ptr =
|
||||
self->snapshot_ptr +
|
||||
self->ram_regions[self->ram_regions_num].offset;
|
||||
self->ram_regions[self->ram_regions_num].idstr =
|
||||
malloc(strlen(block->idstr) + 1);
|
||||
memset(self->ram_regions[self->ram_regions_num].idstr, 0,
|
||||
strlen(block->idstr) + 1);
|
||||
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
|
||||
self->ram_regions_num++;
|
||||
|
||||
self->ram_regions[self->ram_regions_num].ram_region = i;
|
||||
self->ram_regions[self->ram_regions_num].base = MEM_SPLIT_END;
|
||||
self->ram_regions[self->ram_regions_num].base =
|
||||
MEM_SPLIT_END
|
||||
;
|
||||
self->ram_regions[self->ram_regions_num].size = block->used_length - MEM_SPLIT_START;
|
||||
self->ram_regions[self->ram_regions_num].offset = (snapshot_ptr_offset_array[i] + MEM_SPLIT_START) - snapshot_ptr_offset_array[0];
|
||||
self->ram_regions[self->ram_regions_num].host_region_ptr = block->host+MEM_SPLIT_START;
|
||||
self->ram_regions[self->ram_regions_num].snapshot_region_ptr = snapshot_ptr_offset_array[i]+MEM_SPLIT_START;
|
||||
self->ram_regions[self->ram_regions_num].offset =
|
||||
(snapshot_ptr_offset_array[i] + MEM_SPLIT_START) - snapshot_ptr_offset_array[0];
|
||||
self->ram_regions[self->ram_regions_num].host_region_ptr =
|
||||
block->host + MEM_SPLIT_START;
|
||||
self->ram_regions[self->ram_regions_num].snapshot_region_ptr =
|
||||
snapshot_ptr_offset_array[i] + MEM_SPLIT_START;
|
||||
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
|
||||
memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1);
|
||||
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
|
||||
@ -247,11 +282,14 @@ shadow_memory_t* shadow_memory_init_from_snapshot(const char* snapshot_folder, b
|
||||
self->ram_regions[self->ram_regions_num].ram_region = i;
|
||||
self->ram_regions[self->ram_regions_num].base = block->mr->addr;
|
||||
self->ram_regions[self->ram_regions_num].size = block->used_length;
|
||||
self->ram_regions[self->ram_regions_num].offset = snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0];
|
||||
self->ram_regions[self->ram_regions_num].offset =
|
||||
snapshot_ptr_offset_array[i] - snapshot_ptr_offset_array[0];
|
||||
self->ram_regions[self->ram_regions_num].host_region_ptr = block->host;
|
||||
self->ram_regions[self->ram_regions_num].snapshot_region_ptr = self->snapshot_ptr+self->ram_regions[self->ram_regions_num].offset;
|
||||
self->ram_regions[self->ram_regions_num].snapshot_region_ptr =
|
||||
self->snapshot_ptr + self->ram_regions[self->ram_regions_num].offset;
|
||||
self->ram_regions[self->ram_regions_num].idstr = malloc(strlen(block->idstr) + 1);
|
||||
memset(self->ram_regions[self->ram_regions_num].idstr, 0, strlen(block->idstr) + 1);
|
||||
memset(self->ram_regions[self->ram_regions_num].idstr, 0,
|
||||
strlen(block->idstr) + 1);
|
||||
strcpy(self->ram_regions[self->ram_regions_num].idstr, block->idstr);
|
||||
}
|
||||
|
||||
@ -271,7 +309,9 @@ shadow_memory_t* shadow_memory_init_from_snapshot(const char* snapshot_folder, b
|
||||
for (uint8_t i = 0; i < self->ram_regions_num; i++) {
|
||||
void *host_addr = self->ram_regions[i].host_region_ptr + 0;
|
||||
assert(munmap(host_addr, self->ram_regions[i].size) != EINVAL);
|
||||
assert(mmap(host_addr, self->ram_regions[i].size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_FIXED, self->snapshot_ptr_fd, self->ram_regions[i].offset) != MAP_FAILED);
|
||||
assert(mmap(host_addr, self->ram_regions[i].size,
|
||||
PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_FIXED,
|
||||
self->snapshot_ptr_fd, self->ram_regions[i].offset) != MAP_FAILED);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -280,23 +320,27 @@ shadow_memory_t* shadow_memory_init_from_snapshot(const char* snapshot_folder, b
|
||||
}
|
||||
|
||||
|
||||
void shadow_memory_prepare_incremental(shadow_memory_t* self){
|
||||
void shadow_memory_prepare_incremental(shadow_memory_t *self)
|
||||
{
|
||||
static int count = 0;
|
||||
|
||||
if (count >= RESTORE_RATE) {
|
||||
count = 0;
|
||||
munmap(self->incremental_ptr, self->memory_size);
|
||||
self->incremental_ptr = mmap(0, self->memory_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, self->snapshot_ptr_fd, 0);
|
||||
self->incremental_ptr = mmap(0, self->memory_size, PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE, self->snapshot_ptr_fd, 0);
|
||||
shadow_memory_set_incremental_ptrs(self);
|
||||
}
|
||||
count++;
|
||||
}
|
||||
|
||||
void shadow_memory_switch_snapshot(shadow_memory_t* self, bool incremental){
|
||||
void shadow_memory_switch_snapshot(shadow_memory_t *self, bool incremental)
|
||||
{
|
||||
self->incremental_enabled = incremental;
|
||||
}
|
||||
|
||||
void shadow_memory_restore_memory(shadow_memory_t* self){
|
||||
void shadow_memory_restore_memory(shadow_memory_t *self)
|
||||
{
|
||||
rcu_read_lock();
|
||||
|
||||
uint8_t slot = 0;
|
||||
@ -305,8 +349,10 @@ void shadow_memory_restore_memory(shadow_memory_t* self){
|
||||
addr = self->root_track_pages_stack[i] & 0xFFFFFFFFFFFFF000;
|
||||
slot = self->root_track_pages_stack[i] & 0xFFF;
|
||||
|
||||
memcpy(self->ram_regions[slot].host_region_ptr+addr, self->ram_regions[slot].snapshot_region_ptr+addr, TARGET_PAGE_SIZE);
|
||||
memcpy(self->ram_regions[slot].incremental_region_ptr+addr, self->ram_regions[slot].snapshot_region_ptr+addr, TARGET_PAGE_SIZE);
|
||||
memcpy(self->ram_regions[slot].host_region_ptr + addr,
|
||||
self->ram_regions[slot].snapshot_region_ptr + addr, TARGET_PAGE_SIZE);
|
||||
memcpy(self->ram_regions[slot].incremental_region_ptr + addr,
|
||||
self->ram_regions[slot].snapshot_region_ptr + addr, TARGET_PAGE_SIZE);
|
||||
}
|
||||
|
||||
self->root_track_pages_num = 0;
|
||||
@ -315,7 +361,10 @@ void shadow_memory_restore_memory(shadow_memory_t* self){
|
||||
|
||||
|
||||
/* only used in debug mode -> no need to be fast */
|
||||
bool shadow_memory_is_root_page_tracked(shadow_memory_t* self, uint64_t address, uint8_t slot){
|
||||
bool shadow_memory_is_root_page_tracked(shadow_memory_t *self,
|
||||
uint64_t address,
|
||||
uint8_t slot)
|
||||
{
|
||||
uint64_t value = (address & 0xFFFFFFFFFFFFF000) | slot;
|
||||
|
||||
for (uint64_t i = 0; i < self->root_track_pages_num; i++) {
|
||||
@ -326,7 +375,8 @@ bool shadow_memory_is_root_page_tracked(shadow_memory_t* self, uint64_t address,
|
||||
return false;
|
||||
}
|
||||
|
||||
void shadow_memory_serialize(shadow_memory_t* self, const char* snapshot_folder){
|
||||
void shadow_memory_serialize(shadow_memory_t *self, const char *snapshot_folder)
|
||||
{
|
||||
char *tmp1;
|
||||
char *tmp2;
|
||||
assert(asprintf(&tmp1, "%s/fast_snapshot.mem_meta", snapshot_folder) != -1);
|
||||
@ -356,12 +406,20 @@ void shadow_memory_serialize(shadow_memory_t* self, const char* snapshot_folder)
|
||||
fclose(file_mem_data);
|
||||
}
|
||||
|
||||
static bool shadow_memory_read_page_frame(shadow_memory_t* self, uint64_t address, void* ptr, uint16_t offset, uint16_t size){
|
||||
static bool shadow_memory_read_page_frame(shadow_memory_t *self,
|
||||
uint64_t address,
|
||||
void *ptr,
|
||||
uint16_t offset,
|
||||
uint16_t size)
|
||||
{
|
||||
assert((offset + size) <= 0x1000);
|
||||
|
||||
for (uint8_t i = 0; i < self->ram_regions_num; i++) {
|
||||
if(address >= self->ram_regions[i].base && address < (self->ram_regions[i].base + self->ram_regions[i].size)){
|
||||
void* snapshot_ptr = self->ram_regions[i].snapshot_region_ptr + (address-self->ram_regions[i].base);
|
||||
if (address >= self->ram_regions[i].base &&
|
||||
address < (self->ram_regions[i].base + self->ram_regions[i].size))
|
||||
{
|
||||
void *snapshot_ptr = self->ram_regions[i].snapshot_region_ptr +
|
||||
(address - self->ram_regions[i].base);
|
||||
memcpy(ptr, snapshot_ptr + offset, size);
|
||||
return true;
|
||||
}
|
||||
@ -369,21 +427,24 @@ static bool shadow_memory_read_page_frame(shadow_memory_t* self, uint64_t addres
|
||||
return false;
|
||||
}
|
||||
|
||||
bool shadow_memory_read_physical_memory(shadow_memory_t* self, uint64_t address, void* ptr, size_t size){
|
||||
|
||||
bool shadow_memory_read_physical_memory(shadow_memory_t *self,
|
||||
uint64_t address,
|
||||
void *ptr,
|
||||
size_t size)
|
||||
{
|
||||
size_t bytes_left = size;
|
||||
size_t copy_bytes = 0;
|
||||
uint64_t current_address = address;
|
||||
uint64_t offset = 0;
|
||||
|
||||
while (bytes_left != 0) {
|
||||
|
||||
/* full page */
|
||||
if ((current_address & 0xFFF) == 0) {
|
||||
copy_bytes = 0x1000;
|
||||
}
|
||||
/* partial page (starting at an offset) */
|
||||
else {
|
||||
else
|
||||
{
|
||||
copy_bytes = 0x1000 - (current_address & 0xFFF);
|
||||
}
|
||||
|
||||
@ -392,7 +453,10 @@ bool shadow_memory_read_physical_memory(shadow_memory_t* self, uint64_t address,
|
||||
copy_bytes = bytes_left;
|
||||
}
|
||||
|
||||
if (shadow_memory_read_page_frame(self, current_address & ~0xFFFULL, ptr + offset, current_address & 0xFFFULL, copy_bytes) == false){
|
||||
if (shadow_memory_read_page_frame(self, current_address & ~0xFFFULL,
|
||||
ptr + offset, current_address & 0xFFFULL,
|
||||
copy_bytes) == false)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
current_address += copy_bytes;
|
||||
|
@ -1,13 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include "nyx/snapshot/devices/state_reallocation.h"
|
||||
#include <stdint.h>
|
||||
|
||||
/* munmap & mmap incremental snapshot area after RESTORE_RATE restores to avoid high memory pressure */
|
||||
#define RESTORE_RATE 2000
|
||||
|
||||
typedef struct ram_region_s {
|
||||
|
||||
/* simple numeric identifier
|
||||
* (can be the same for multiple regions if the memory is
|
||||
* actually splitted across different bases in the guest's memory
|
||||
@ -66,25 +65,37 @@ typedef struct shadow_memory_s{
|
||||
} shadow_memory_t;
|
||||
|
||||
shadow_memory_t *shadow_memory_init(void);
|
||||
shadow_memory_t* shadow_memory_init_from_snapshot(const char* snapshot_folder, bool pre_snapshot);
|
||||
shadow_memory_t *shadow_memory_init_from_snapshot(const char *snapshot_folder,
|
||||
bool pre_snapshot);
|
||||
|
||||
void shadow_memory_prepare_incremental(shadow_memory_t *self);
|
||||
void shadow_memory_switch_snapshot(shadow_memory_t *self, bool incremental);
|
||||
|
||||
void shadow_memory_restore_memory(shadow_memory_t *self);
|
||||
|
||||
static inline void shadow_memory_track_dirty_root_pages(shadow_memory_t* self, uint64_t address, uint8_t slot){
|
||||
static inline void shadow_memory_track_dirty_root_pages(shadow_memory_t *self,
|
||||
uint64_t address,
|
||||
uint8_t slot)
|
||||
{
|
||||
if (unlikely(self->root_track_pages_num >= self->root_track_pages_size)) {
|
||||
self->root_track_pages_size <<= 2;
|
||||
self->root_track_pages_stack = realloc(self->root_track_pages_stack, self->root_track_pages_size*sizeof(uint64_t));
|
||||
self->root_track_pages_stack =
|
||||
realloc(self->root_track_pages_stack,
|
||||
self->root_track_pages_size * sizeof(uint64_t));
|
||||
}
|
||||
|
||||
self->root_track_pages_stack[self->root_track_pages_num] = (address & 0xFFFFFFFFFFFFF000) | slot;
|
||||
self->root_track_pages_stack[self->root_track_pages_num] =
|
||||
(address & 0xFFFFFFFFFFFFF000) | slot;
|
||||
self->root_track_pages_num++;
|
||||
}
|
||||
|
||||
bool shadow_memory_is_root_page_tracked(shadow_memory_t* self, uint64_t address, uint8_t slot);
|
||||
bool shadow_memory_is_root_page_tracked(shadow_memory_t *self,
|
||||
uint64_t address,
|
||||
uint8_t slot);
|
||||
|
||||
void shadow_memory_serialize(shadow_memory_t *self, const char *snapshot_folder);
|
||||
|
||||
bool shadow_memory_read_physical_memory(shadow_memory_t* self, uint64_t address, void* ptr, size_t size);
|
||||
bool shadow_memory_read_physical_memory(shadow_memory_t *self,
|
||||
uint64_t address,
|
||||
void *ptr,
|
||||
size_t size);
|
||||
|
@ -3,12 +3,13 @@
|
||||
#include "sysemu/cpus.h"
|
||||
#include "nyx/state/snapshot_state.h"
|
||||
#include "nyx/debug.h"
|
||||
#include "nyx/state/state.h"
|
||||
#include "nyx/memory_access.h"
|
||||
#include <stdio.h>
|
||||
#include "nyx/state/state.h"
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
|
||||
void serialize_state(const char* filename_prefix, bool is_pre_snapshot){
|
||||
void serialize_state(const char *filename_prefix, bool is_pre_snapshot)
|
||||
{
|
||||
nyx_trace();
|
||||
|
||||
char *tmp;
|
||||
@ -29,8 +30,7 @@ void serialize_state(const char* filename_prefix, bool is_pre_snapshot){
|
||||
if (is_pre_snapshot) {
|
||||
header.type = NYX_SERIALIZED_TYPE_PRE_SNAPSHOT;
|
||||
fwrite(&header, sizeof(serialized_state_header_t), 1, fp);
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
header.type = NYX_SERIALIZED_TYPE_ROOT_SNAPSHOT;
|
||||
fwrite(&header, sizeof(serialized_state_header_t), 1, fp);
|
||||
|
||||
@ -38,12 +38,14 @@ void serialize_state(const char* filename_prefix, bool is_pre_snapshot){
|
||||
serialized_state_root_snapshot_t root_snapshot = { 0 };
|
||||
|
||||
for (uint8_t i = 0; i < 4; i++) {
|
||||
root_snapshot.pt_ip_filter_configured[i] = nyx_global_state->pt_ip_filter_configured[i];
|
||||
root_snapshot.pt_ip_filter_configured[i] =
|
||||
nyx_global_state->pt_ip_filter_configured[i];
|
||||
root_snapshot.pt_ip_filter_a[i] = nyx_global_state->pt_ip_filter_a[i];
|
||||
root_snapshot.pt_ip_filter_b[i] = nyx_global_state->pt_ip_filter_b[i];
|
||||
}
|
||||
root_snapshot.parent_cr3 = nyx_global_state->parent_cr3;
|
||||
root_snapshot.disassembler_word_width = nyx_global_state->disassembler_word_width;
|
||||
root_snapshot.disassembler_word_width =
|
||||
nyx_global_state->disassembler_word_width;
|
||||
root_snapshot.fast_reload_pre_image = nyx_global_state->fast_reload_pre_image;
|
||||
root_snapshot.mem_mode = nyx_global_state->mem_mode;
|
||||
root_snapshot.pt_trace_mode = nyx_global_state->pt_trace_mode;
|
||||
@ -55,22 +57,26 @@ void serialize_state(const char* filename_prefix, bool is_pre_snapshot){
|
||||
|
||||
root_snapshot.cap_timeout_detection = nyx_global_state->cap_timeout_detection;
|
||||
root_snapshot.cap_only_reload_mode = nyx_global_state->cap_only_reload_mode;
|
||||
root_snapshot.cap_compile_time_tracing = nyx_global_state->cap_compile_time_tracing;
|
||||
root_snapshot.cap_compile_time_tracing =
|
||||
nyx_global_state->cap_compile_time_tracing;
|
||||
root_snapshot.cap_ijon_tracing = nyx_global_state->cap_ijon_tracing;
|
||||
root_snapshot.cap_cr3 = nyx_global_state->cap_cr3;
|
||||
root_snapshot.cap_compile_time_tracing_buffer_vaddr = nyx_global_state->cap_compile_time_tracing_buffer_vaddr;
|
||||
root_snapshot.cap_ijon_tracing_buffer_vaddr = nyx_global_state->cap_ijon_tracing_buffer_vaddr;
|
||||
root_snapshot.cap_coverage_bitmap_size = nyx_global_state->cap_coverage_bitmap_size;
|
||||
root_snapshot.cap_compile_time_tracing_buffer_vaddr =
|
||||
nyx_global_state->cap_compile_time_tracing_buffer_vaddr;
|
||||
root_snapshot.cap_ijon_tracing_buffer_vaddr =
|
||||
nyx_global_state->cap_ijon_tracing_buffer_vaddr;
|
||||
root_snapshot.cap_coverage_bitmap_size =
|
||||
nyx_global_state->cap_coverage_bitmap_size;
|
||||
|
||||
fwrite(&root_snapshot, sizeof(serialized_state_root_snapshot_t), 1, fp);
|
||||
|
||||
}
|
||||
|
||||
fclose(fp);
|
||||
free(tmp);
|
||||
}
|
||||
|
||||
void deserialize_state(const char* filename_prefix){
|
||||
void deserialize_state(const char *filename_prefix)
|
||||
{
|
||||
nyx_trace();
|
||||
|
||||
char *tmp;
|
||||
@ -92,20 +98,22 @@ void deserialize_state(const char* filename_prefix){
|
||||
|
||||
if (header.type == NYX_SERIALIZED_TYPE_PRE_SNAPSHOT) {
|
||||
/* we're done here */
|
||||
}
|
||||
else if (header.type == NYX_SERIALIZED_TYPE_ROOT_SNAPSHOT){
|
||||
} else if (header.type == NYX_SERIALIZED_TYPE_ROOT_SNAPSHOT) {
|
||||
qemu_nyx_state_t *nyx_global_state = GET_GLOBAL_STATE();
|
||||
serialized_state_root_snapshot_t root_snapshot = { 0 };
|
||||
assert(fread(&root_snapshot, sizeof(serialized_state_root_snapshot_t), 1, fp) == 1);
|
||||
assert(fread(&root_snapshot, sizeof(serialized_state_root_snapshot_t), 1,
|
||||
fp) == 1);
|
||||
|
||||
for (uint8_t i = 0; i < 4; i++) {
|
||||
nyx_global_state->pt_ip_filter_configured[i] = root_snapshot.pt_ip_filter_configured[i];
|
||||
nyx_global_state->pt_ip_filter_configured[i] =
|
||||
root_snapshot.pt_ip_filter_configured[i];
|
||||
nyx_global_state->pt_ip_filter_a[i] = root_snapshot.pt_ip_filter_a[i];
|
||||
nyx_global_state->pt_ip_filter_b[i] = root_snapshot.pt_ip_filter_b[i];
|
||||
}
|
||||
|
||||
nyx_global_state->parent_cr3 = root_snapshot.parent_cr3;
|
||||
nyx_global_state->disassembler_word_width = root_snapshot.disassembler_word_width;
|
||||
nyx_global_state->disassembler_word_width =
|
||||
root_snapshot.disassembler_word_width;
|
||||
nyx_global_state->fast_reload_pre_image = root_snapshot.fast_reload_pre_image;
|
||||
nyx_global_state->mem_mode = root_snapshot.mem_mode;
|
||||
nyx_global_state->pt_trace_mode = root_snapshot.pt_trace_mode;
|
||||
@ -117,21 +125,25 @@ void deserialize_state(const char* filename_prefix){
|
||||
|
||||
nyx_global_state->cap_timeout_detection = root_snapshot.cap_timeout_detection;
|
||||
nyx_global_state->cap_only_reload_mode = root_snapshot.cap_only_reload_mode;
|
||||
nyx_global_state->cap_compile_time_tracing = root_snapshot.cap_compile_time_tracing;
|
||||
nyx_global_state->cap_compile_time_tracing =
|
||||
root_snapshot.cap_compile_time_tracing;
|
||||
nyx_global_state->cap_ijon_tracing = root_snapshot.cap_ijon_tracing;
|
||||
nyx_global_state->cap_cr3 = root_snapshot.cap_cr3;
|
||||
nyx_global_state->cap_compile_time_tracing_buffer_vaddr = root_snapshot.cap_compile_time_tracing_buffer_vaddr;
|
||||
nyx_global_state->cap_ijon_tracing_buffer_vaddr = root_snapshot.cap_ijon_tracing_buffer_vaddr;
|
||||
nyx_global_state->cap_coverage_bitmap_size = root_snapshot.cap_coverage_bitmap_size;
|
||||
nyx_global_state->cap_compile_time_tracing_buffer_vaddr =
|
||||
root_snapshot.cap_compile_time_tracing_buffer_vaddr;
|
||||
nyx_global_state->cap_ijon_tracing_buffer_vaddr =
|
||||
root_snapshot.cap_ijon_tracing_buffer_vaddr;
|
||||
nyx_global_state->cap_coverage_bitmap_size =
|
||||
root_snapshot.cap_coverage_bitmap_size;
|
||||
|
||||
assert(apply_capabilities(qemu_get_cpu(0)));
|
||||
remap_payload_buffer(nyx_global_state->payload_buffer, ((CPUState *)qemu_get_cpu(0)) );
|
||||
remap_payload_buffer(nyx_global_state->payload_buffer,
|
||||
((CPUState *)qemu_get_cpu(0)));
|
||||
|
||||
/* makes sure that we are allowed to enter the fuzzing loop */
|
||||
nyx_global_state->get_host_config_done = true;
|
||||
nyx_global_state->set_agent_config_done = true;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
fprintf(stderr, "[QEMU-Nyx]: this feature is currently missing\n");
|
||||
abort();
|
||||
}
|
||||
|
@ -42,6 +42,5 @@ typedef struct serialized_state_root_snapshot_s {
|
||||
} serialized_state_root_snapshot_t;
|
||||
|
||||
|
||||
|
||||
void serialize_state(const char *filename_prefix, bool is_pre_snapshot);
|
||||
void deserialize_state(const char *filename_prefix);
|
||||
|
@ -24,14 +24,14 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include "nyx/state/state.h"
|
||||
#include "nyx/debug.h"
|
||||
#include "nyx/memory_access.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "nyx/auxiliary_buffer.h"
|
||||
#include "nyx/sharedir.h"
|
||||
#include "nyx/debug.h"
|
||||
#include "nyx/fast_vm_reload_sync.h"
|
||||
#include "nyx/helpers.h"
|
||||
#include "nyx/memory_access.h"
|
||||
#include "nyx/sharedir.h"
|
||||
#include "nyx/state/state.h"
|
||||
|
||||
// #define STATE_VERBOSE
|
||||
|
||||
@ -40,7 +40,8 @@ qemu_nyx_state_t global_state;
|
||||
|
||||
#define LIBXDC_RELEASE_VERSION_REQUIRED 2
|
||||
|
||||
void state_init_global(void){
|
||||
void state_init_global(void)
|
||||
{
|
||||
#ifdef STATE_VERBOSE
|
||||
fprintf(stderr, "--> %s <--\n", __func__);
|
||||
#endif
|
||||
@ -149,57 +150,69 @@ void state_init_global(void){
|
||||
}
|
||||
|
||||
|
||||
fast_reload_t* get_fast_reload_snapshot(void){
|
||||
fast_reload_t *get_fast_reload_snapshot(void)
|
||||
{
|
||||
return global_state.fast_reload_snapshot;
|
||||
}
|
||||
|
||||
void set_fast_reload_mode(bool mode){
|
||||
void set_fast_reload_mode(bool mode)
|
||||
{
|
||||
global_state.fast_reload_mode = mode;
|
||||
}
|
||||
|
||||
void set_fast_reload_path(const char* path){
|
||||
void set_fast_reload_path(const char *path)
|
||||
{
|
||||
assert(global_state.fast_reload_path == NULL);
|
||||
global_state.fast_reload_path = malloc(strlen(path) + 1);
|
||||
strcpy(global_state.fast_reload_path, path);
|
||||
}
|
||||
|
||||
void set_fast_reload_pre_path(const char* path){
|
||||
void set_fast_reload_pre_path(const char *path)
|
||||
{
|
||||
assert(global_state.fast_reload_pre_path == NULL);
|
||||
global_state.fast_reload_pre_path = malloc(strlen(path) + 1);
|
||||
strcpy(global_state.fast_reload_pre_path, path);
|
||||
}
|
||||
|
||||
void set_fast_reload_pre_image(void){
|
||||
void set_fast_reload_pre_image(void)
|
||||
{
|
||||
assert(global_state.fast_reload_pre_path != NULL);
|
||||
global_state.fast_reload_pre_image = true;
|
||||
}
|
||||
|
||||
void enable_fast_reloads(void){
|
||||
void enable_fast_reloads(void)
|
||||
{
|
||||
assert(global_state.fast_reload_path != NULL);
|
||||
global_state.fast_reload_enabled = true;
|
||||
}
|
||||
|
||||
void init_page_cache(char* path){
|
||||
void init_page_cache(char *path)
|
||||
{
|
||||
assert(global_state.page_cache == NULL);
|
||||
global_state.page_cache = page_cache_new((CPUState *)qemu_get_cpu(0), path);
|
||||
}
|
||||
|
||||
page_cache_t* get_page_cache(void){
|
||||
page_cache_t *get_page_cache(void)
|
||||
{
|
||||
assert(global_state.page_cache);
|
||||
return global_state.page_cache;
|
||||
}
|
||||
|
||||
void init_redqueen_state(void){
|
||||
global_state.redqueen_state = new_rq_state((CPUState *)qemu_get_cpu(0), get_page_cache());
|
||||
void init_redqueen_state(void)
|
||||
{
|
||||
global_state.redqueen_state =
|
||||
new_rq_state((CPUState *)qemu_get_cpu(0), get_page_cache());
|
||||
}
|
||||
|
||||
|
||||
redqueen_t* get_redqueen_state(void){
|
||||
redqueen_t *get_redqueen_state(void)
|
||||
{
|
||||
assert(global_state.redqueen_state != NULL);
|
||||
return global_state.redqueen_state;
|
||||
}
|
||||
|
||||
static void* alloc_auxiliary_buffer(const char* file){
|
||||
static void *alloc_auxiliary_buffer(const char *file)
|
||||
{
|
||||
void *ptr;
|
||||
struct stat st;
|
||||
int fd = open(file, O_CREAT | O_RDWR, S_IRWXU | S_IRWXG | S_IRWXO);
|
||||
@ -207,7 +220,8 @@ static void* alloc_auxiliary_buffer(const char* file){
|
||||
assert(ftruncate(fd, AUX_BUFFER_SIZE) == 0);
|
||||
stat(file, &st);
|
||||
|
||||
nyx_debug_p(INTERFACE_PREFIX, "new aux buffer file: (max size: %x) %lx", AUX_BUFFER_SIZE, st.st_size);
|
||||
nyx_debug_p(INTERFACE_PREFIX, "new aux buffer file: (max size: %x) %lx",
|
||||
AUX_BUFFER_SIZE, st.st_size);
|
||||
|
||||
assert(AUX_BUFFER_SIZE == st.st_size);
|
||||
ptr = mmap(0, AUX_BUFFER_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
@ -218,26 +232,32 @@ static void* alloc_auxiliary_buffer(const char* file){
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void init_aux_buffer(const char* filename){
|
||||
global_state.auxilary_buffer = (auxilary_buffer_t*)alloc_auxiliary_buffer(filename);
|
||||
void init_aux_buffer(const char *filename)
|
||||
{
|
||||
global_state.auxilary_buffer =
|
||||
(auxilary_buffer_t *)alloc_auxiliary_buffer(filename);
|
||||
init_auxiliary_buffer(global_state.auxilary_buffer);
|
||||
}
|
||||
|
||||
void set_payload_buffer(uint64_t payload_buffer){
|
||||
void set_payload_buffer(uint64_t payload_buffer)
|
||||
{
|
||||
assert(global_state.payload_buffer == 0 && global_state.nested == false);
|
||||
global_state.payload_buffer = payload_buffer;
|
||||
global_state.nested = false;
|
||||
}
|
||||
|
||||
void set_payload_pages(uint64_t* payload_pages, uint32_t pages){
|
||||
assert(global_state.nested_payload_pages == NULL && global_state.nested_payload_pages_num == 0);
|
||||
void set_payload_pages(uint64_t *payload_pages, uint32_t pages)
|
||||
{
|
||||
assert(global_state.nested_payload_pages == NULL &&
|
||||
global_state.nested_payload_pages_num == 0);
|
||||
global_state.nested_payload_pages = (uint64_t *)malloc(sizeof(uint64_t) * pages);
|
||||
global_state.nested_payload_pages_num = pages;
|
||||
memcpy(global_state.nested_payload_pages, payload_pages, sizeof(uint64_t) * pages);
|
||||
global_state.nested = true;
|
||||
}
|
||||
|
||||
void set_workdir_path(char* workdir){
|
||||
void set_workdir_path(char *workdir)
|
||||
{
|
||||
assert(workdir && !global_state.workdir_path);
|
||||
assert(asprintf(&global_state.workdir_path, "%s", workdir) != -1);
|
||||
}
|
||||
|
@ -21,14 +21,14 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "nyx/auxiliary_buffer.h"
|
||||
#include "nyx/fast_vm_reload.h"
|
||||
#include "nyx/fast_vm_reload_sync.h"
|
||||
#include "nyx/page_cache.h"
|
||||
#include "nyx/redqueen.h"
|
||||
#include "nyx/redqueen_patch.h"
|
||||
#include "nyx/fast_vm_reload.h"
|
||||
#include "nyx/page_cache.h"
|
||||
#include "nyx/synchronization.h"
|
||||
#include "nyx/auxiliary_buffer.h"
|
||||
#include "nyx/sharedir.h"
|
||||
#include "nyx/fast_vm_reload_sync.h"
|
||||
#include "nyx/synchronization.h"
|
||||
#include "nyx/types.h"
|
||||
|
||||
#include <libxdc.h>
|
||||
@ -36,7 +36,6 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
|
||||
#define INTEL_PT_MAX_RANGES 4
|
||||
|
||||
typedef struct qemu_nyx_state_s {
|
||||
|
||||
/* set if FDL backend is used (required to perform some additional runtime tests) */
|
||||
bool nyx_fdl;
|
||||
|
||||
@ -73,8 +72,9 @@ typedef struct qemu_nyx_state_s{
|
||||
bool pt_trace_mode; // enabled by default; disabled if compile-time tracing is implemented by agent
|
||||
|
||||
/* disabled by default; enable to force usage of PT tracing
|
||||
* (useful for targets that use compile-time tracing and redqueen at the same time (which obviously relies on PT traces))
|
||||
* This mode is usually enabled by the fuzzing logic by enabling trace mode.
|
||||
* (useful for targets that use compile-time tracing and redqueen at the same
|
||||
* time (which obviously relies on PT traces)) This mode is usually enabled by
|
||||
* the fuzzing logic by enabling trace mode.
|
||||
* *** THIS FEATURES IS STILL EXPERIMENTAL ***
|
||||
* */
|
||||
bool pt_trace_mode_force;
|
||||
|
@ -1,18 +1,18 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "qemu-common.h"
|
||||
#include "nyx/synchronization.h"
|
||||
#include "nyx/debug.h"
|
||||
#include "nyx/fast_vm_reload.h"
|
||||
#include "nyx/file_helper.h"
|
||||
#include "nyx/helpers.h"
|
||||
#include "nyx/hypercall/hypercall.h"
|
||||
#include "nyx/interface.h"
|
||||
#include "nyx/fast_vm_reload.h"
|
||||
#include "qemu-common.h"
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/sysemu.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "nyx/debug.h"
|
||||
#include "nyx/state/state.h"
|
||||
#include <sys/syscall.h>
|
||||
#include <linux/kvm.h>
|
||||
#include "qemu/main-loop.h"
|
||||
#include "nyx/helpers.h"
|
||||
#include "nyx/file_helper.h"
|
||||
#include <sys/syscall.h>
|
||||
|
||||
|
||||
#include "pt.h"
|
||||
@ -28,7 +28,8 @@ volatile bool synchronization_kvm_loop_waiting = false;
|
||||
/* SIGALRM based timeout detection */
|
||||
// #define DEBUG_TIMEOUT_DETECTOR
|
||||
|
||||
void init_timeout_detector(timeout_detector_t* timer){
|
||||
void init_timeout_detector(timeout_detector_t *timer)
|
||||
{
|
||||
timer->kvm_tid = 0;
|
||||
timer->detection_enabled = false;
|
||||
|
||||
@ -39,10 +40,10 @@ void init_timeout_detector(timeout_detector_t* timer){
|
||||
timer->alarm.it_interval.tv_usec = 0;
|
||||
timer->alarm.it_value.tv_sec = 0;
|
||||
timer->alarm.it_value.tv_usec = 0;
|
||||
|
||||
}
|
||||
|
||||
static void sigalarm_handler(int signum) {
|
||||
static void sigalarm_handler(int signum)
|
||||
{
|
||||
/* ensure that SIGALARM is ALWAYS handled by kvm thread */
|
||||
assert(GET_GLOBAL_STATE()->timeout_detector.kvm_tid == syscall(SYS_gettid));
|
||||
#ifdef DEBUG_TIMEOUT_DETECTOR
|
||||
@ -50,7 +51,8 @@ static void sigalarm_handler(int signum) {
|
||||
#endif
|
||||
}
|
||||
|
||||
void install_timeout_detector(timeout_detector_t* timer){
|
||||
void install_timeout_detector(timeout_detector_t *timer)
|
||||
{
|
||||
timer->kvm_tid = syscall(SYS_gettid);
|
||||
if (signal(SIGALRM, sigalarm_handler) == SIG_ERR) {
|
||||
fprintf(stderr, "%s failed!\n", __func__);
|
||||
@ -61,7 +63,8 @@ void install_timeout_detector(timeout_detector_t* timer){
|
||||
#endif
|
||||
}
|
||||
|
||||
void reset_timeout_detector(timeout_detector_t* timer){
|
||||
void reset_timeout_detector(timeout_detector_t *timer)
|
||||
{
|
||||
#ifdef DEBUG_TIMEOUT_DETECTOR
|
||||
fprintf(stderr, "%s!\n", __func__);
|
||||
#endif
|
||||
@ -90,14 +93,17 @@ void update_itimer(timeout_detector_t* timer, uint8_t sec, uint32_t usec)
|
||||
}
|
||||
}
|
||||
|
||||
void arm_sigprof_timer(timeout_detector_t* timer){
|
||||
void arm_sigprof_timer(timeout_detector_t *timer)
|
||||
{
|
||||
#ifdef DEBUG_TIMEOUT_DETECTOR
|
||||
fprintf(stderr, "%s (%ld %ld)\n", __func__, timer->alarm.it_value.tv_sec, timer->alarm.it_value.tv_usec);
|
||||
fprintf(stderr, "%s (%ld %ld)\n", __func__, timer->alarm.it_value.tv_sec,
|
||||
timer->alarm.it_value.tv_usec);
|
||||
#endif
|
||||
|
||||
if (timer->detection_enabled) {
|
||||
if (timer->alarm.it_value.tv_usec == 0 && timer->alarm.it_value.tv_sec == 0) {
|
||||
fprintf(stderr, "Attempting to re-arm an expired timer! => reset(%ld.%ld)\n",
|
||||
fprintf(stderr,
|
||||
"Attempting to re-arm an expired timer! => reset(%ld.%ld)\n",
|
||||
timer->config.tv_sec, timer->config.tv_usec);
|
||||
reset_timeout_detector(timer);
|
||||
}
|
||||
@ -105,10 +111,11 @@ void arm_sigprof_timer(timeout_detector_t* timer){
|
||||
}
|
||||
}
|
||||
|
||||
bool disarm_sigprof_timer(timeout_detector_t* timer){
|
||||
|
||||
bool disarm_sigprof_timer(timeout_detector_t *timer)
|
||||
{
|
||||
#ifdef DEBUG_TIMEOUT_DETECTOR
|
||||
fprintf(stderr, "%s (%ld %ld)\n", __func__, timer->alarm.it_value.tv_sec, timer->alarm.it_value.tv_usec);
|
||||
fprintf(stderr, "%s (%ld %ld)\n", __func__, timer->alarm.it_value.tv_sec,
|
||||
timer->alarm.it_value.tv_usec);
|
||||
#endif
|
||||
|
||||
if (timer->detection_enabled) {
|
||||
@ -124,7 +131,8 @@ bool disarm_sigprof_timer(timeout_detector_t* timer){
|
||||
return false;
|
||||
}
|
||||
|
||||
void block_signals(void){
|
||||
void block_signals(void)
|
||||
{
|
||||
sigset_t set;
|
||||
|
||||
sigemptyset(&set);
|
||||
@ -133,10 +141,10 @@ void block_signals(void){
|
||||
sigaddset(&set, SIGSEGV);
|
||||
pthread_sigmask(SIG_BLOCK, &set, NULL);
|
||||
// fprintf(stderr, "%s!\n", __func__);
|
||||
|
||||
}
|
||||
|
||||
void unblock_signals(void){
|
||||
void unblock_signals(void)
|
||||
{
|
||||
sigset_t set;
|
||||
|
||||
sigemptyset(&set);
|
||||
@ -148,7 +156,8 @@ void unblock_signals(void){
|
||||
|
||||
/* -------------------- */
|
||||
|
||||
static inline void handle_tmp_snapshot_state(void){
|
||||
static inline void handle_tmp_snapshot_state(void)
|
||||
{
|
||||
if (GET_GLOBAL_STATE()->discard_tmp_snapshot) {
|
||||
if (fast_reload_tmp_created(get_fast_reload_snapshot())) {
|
||||
qemu_mutex_lock_iothread();
|
||||
@ -161,12 +170,14 @@ static inline void handle_tmp_snapshot_state(void){
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool synchronization_check_page_not_found(void){
|
||||
static inline bool synchronization_check_page_not_found(void)
|
||||
{
|
||||
bool failure = false;
|
||||
|
||||
/* a page is missing in the current execution */
|
||||
if (GET_GLOBAL_STATE()->decoder_page_fault) {
|
||||
set_page_not_found_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, GET_GLOBAL_STATE()->decoder_page_fault_addr);
|
||||
set_page_not_found_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer,
|
||||
GET_GLOBAL_STATE()->decoder_page_fault_addr);
|
||||
GET_GLOBAL_STATE()->decoder_page_fault = false;
|
||||
GET_GLOBAL_STATE()->decoder_page_fault_addr = 0;
|
||||
failure = true;
|
||||
@ -184,7 +195,8 @@ static inline bool synchronization_check_page_not_found(void){
|
||||
return failure;
|
||||
}
|
||||
|
||||
void synchronization_unlock(void){
|
||||
void synchronization_unlock(void)
|
||||
{
|
||||
// fprintf(stderr, "%s\n", __func__);
|
||||
|
||||
pthread_mutex_lock(&synchronization_lock_mutex);
|
||||
@ -196,15 +208,16 @@ void synchronization_unlock(void){
|
||||
uint64_t run_counter = 0;
|
||||
bool in_fuzzing_loop = false;
|
||||
|
||||
void synchronization_lock_hprintf(void){
|
||||
void synchronization_lock_hprintf(void)
|
||||
{
|
||||
pthread_mutex_lock(&synchronization_lock_mutex);
|
||||
interface_send_char(NYX_INTERFACE_PING);
|
||||
|
||||
pthread_cond_wait(&synchronization_lock_condition, &synchronization_lock_mutex);
|
||||
pthread_mutex_unlock(&synchronization_lock_mutex);
|
||||
}
|
||||
void synchronization_lock(void){
|
||||
|
||||
void synchronization_lock(void)
|
||||
{
|
||||
timeout_detector_t timer = GET_GLOBAL_STATE()->timeout_detector;
|
||||
pthread_mutex_lock(&synchronization_lock_mutex);
|
||||
run_counter++;
|
||||
@ -251,7 +264,8 @@ void synchronization_lock(void){
|
||||
pthread_cond_wait(&synchronization_lock_condition, &synchronization_lock_mutex);
|
||||
pthread_mutex_unlock(&synchronization_lock_mutex);
|
||||
|
||||
check_auxiliary_config_buffer(GET_GLOBAL_STATE()->auxilary_buffer, &GET_GLOBAL_STATE()->shadow_config);
|
||||
check_auxiliary_config_buffer(GET_GLOBAL_STATE()->auxilary_buffer,
|
||||
&GET_GLOBAL_STATE()->shadow_config);
|
||||
|
||||
if (GET_GLOBAL_STATE()->starved == true)
|
||||
set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 2);
|
||||
@ -261,22 +275,25 @@ void synchronization_lock(void){
|
||||
GET_GLOBAL_STATE()->pt_trace_size = 0;
|
||||
}
|
||||
|
||||
static void perform_reload(void){
|
||||
static void perform_reload(void)
|
||||
{
|
||||
if (fast_reload_root_created(get_fast_reload_snapshot())) {
|
||||
qemu_mutex_lock_iothread();
|
||||
fast_reload_restore(get_fast_reload_snapshot());
|
||||
qemu_mutex_unlock_iothread();
|
||||
set_reload_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer);
|
||||
set_result_dirty_pages(GET_GLOBAL_STATE()->auxilary_buffer, get_dirty_page_num(get_fast_reload_snapshot()));
|
||||
}
|
||||
else{
|
||||
set_result_dirty_pages(GET_GLOBAL_STATE()->auxilary_buffer,
|
||||
get_dirty_page_num(get_fast_reload_snapshot()));
|
||||
} else {
|
||||
fprintf(stderr, "WARNING: Root snapshot is not available yet!\n");
|
||||
}
|
||||
}
|
||||
|
||||
void synchronization_lock_crash_found(void){
|
||||
void synchronization_lock_crash_found(void)
|
||||
{
|
||||
if (!in_fuzzing_loop && GET_GLOBAL_STATE()->in_fuzzing_mode) {
|
||||
fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP] at %lx\n", getpid(), run_counter, __func__, get_rip(qemu_get_cpu(0)));
|
||||
fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP] at %lx\n", getpid(),
|
||||
run_counter, __func__, get_rip(qemu_get_cpu(0)));
|
||||
// abort();
|
||||
}
|
||||
|
||||
@ -291,9 +308,11 @@ void synchronization_lock_crash_found(void){
|
||||
in_fuzzing_loop = false;
|
||||
}
|
||||
|
||||
void synchronization_lock_asan_found(void){
|
||||
void synchronization_lock_asan_found(void)
|
||||
{
|
||||
if (!in_fuzzing_loop) {
|
||||
fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(), run_counter, __func__);
|
||||
fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(),
|
||||
run_counter, __func__);
|
||||
set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 0);
|
||||
}
|
||||
|
||||
@ -308,8 +327,8 @@ void synchronization_lock_asan_found(void){
|
||||
in_fuzzing_loop = false;
|
||||
}
|
||||
|
||||
void synchronization_lock_timeout_found(void){
|
||||
|
||||
void synchronization_lock_timeout_found(void)
|
||||
{
|
||||
// fprintf(stderr, "<%d>\t%s\n", getpid(), __func__);
|
||||
|
||||
if (!in_fuzzing_loop) {
|
||||
@ -328,9 +347,11 @@ void synchronization_lock_timeout_found(void){
|
||||
in_fuzzing_loop = false;
|
||||
}
|
||||
|
||||
void synchronization_lock_shutdown_detected(void){
|
||||
void synchronization_lock_shutdown_detected(void)
|
||||
{
|
||||
if (!in_fuzzing_loop) {
|
||||
fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(), run_counter, __func__);
|
||||
fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(),
|
||||
run_counter, __func__);
|
||||
set_success_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 0);
|
||||
}
|
||||
|
||||
@ -343,19 +364,23 @@ void synchronization_lock_shutdown_detected(void){
|
||||
in_fuzzing_loop = false;
|
||||
}
|
||||
|
||||
void synchronization_payload_buffer_write_detected(void){
|
||||
void synchronization_payload_buffer_write_detected(void)
|
||||
{
|
||||
static char reason[1024];
|
||||
|
||||
if (!in_fuzzing_loop) {
|
||||
fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(), run_counter, __func__);
|
||||
fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(),
|
||||
run_counter, __func__);
|
||||
}
|
||||
|
||||
pt_disable(qemu_get_cpu(0), false);
|
||||
|
||||
handle_tmp_snapshot_state();
|
||||
|
||||
int bytes = snprintf(reason, 1024, "Payload buffer write attempt at RIP: %lx\n", get_rip(qemu_get_cpu(0)));
|
||||
set_payload_buffer_write_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer, reason, bytes);
|
||||
int bytes = snprintf(reason, 1024, "Payload buffer write attempt at RIP: %lx\n",
|
||||
get_rip(qemu_get_cpu(0)));
|
||||
set_payload_buffer_write_reason_auxiliary_buffer(GET_GLOBAL_STATE()->auxilary_buffer,
|
||||
reason, bytes);
|
||||
set_reload_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer);
|
||||
|
||||
perform_reload();
|
||||
@ -363,9 +388,11 @@ void synchronization_payload_buffer_write_detected(void){
|
||||
in_fuzzing_loop = false;
|
||||
}
|
||||
|
||||
void synchronization_cow_full_detected(void){
|
||||
void synchronization_cow_full_detected(void)
|
||||
{
|
||||
if (!in_fuzzing_loop) {
|
||||
fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(), run_counter, __func__);
|
||||
fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(),
|
||||
run_counter, __func__);
|
||||
}
|
||||
|
||||
pt_disable(qemu_get_cpu(0), false);
|
||||
@ -377,7 +404,8 @@ void synchronization_cow_full_detected(void){
|
||||
in_fuzzing_loop = false;
|
||||
}
|
||||
|
||||
void synchronization_disable_pt(CPUState *cpu){
|
||||
void synchronization_disable_pt(CPUState *cpu)
|
||||
{
|
||||
// nyx_trace();
|
||||
if (!in_fuzzing_loop) {
|
||||
// fprintf(stderr, "<%d-%ld>\t%s [NOT IN FUZZING LOOP]\n", getpid(), run_counter, __func__);
|
||||
@ -388,17 +416,23 @@ void synchronization_disable_pt(CPUState *cpu){
|
||||
|
||||
handle_tmp_snapshot_state();
|
||||
|
||||
if(GET_GLOBAL_STATE()->in_reload_mode || GET_GLOBAL_STATE()->in_redqueen_reload_mode || GET_GLOBAL_STATE()->dump_page || fast_reload_tmp_created(get_fast_reload_snapshot())){
|
||||
if (GET_GLOBAL_STATE()->in_reload_mode ||
|
||||
GET_GLOBAL_STATE()->in_redqueen_reload_mode || GET_GLOBAL_STATE()->dump_page ||
|
||||
fast_reload_tmp_created(get_fast_reload_snapshot()))
|
||||
{
|
||||
perform_reload();
|
||||
}
|
||||
|
||||
set_result_pt_trace_size(GET_GLOBAL_STATE()->auxilary_buffer, GET_GLOBAL_STATE()->pt_trace_size);
|
||||
set_result_bb_coverage(GET_GLOBAL_STATE()->auxilary_buffer, GET_GLOBAL_STATE()->bb_coverage);
|
||||
set_result_pt_trace_size(GET_GLOBAL_STATE()->auxilary_buffer,
|
||||
GET_GLOBAL_STATE()->pt_trace_size);
|
||||
set_result_bb_coverage(GET_GLOBAL_STATE()->auxilary_buffer,
|
||||
GET_GLOBAL_STATE()->bb_coverage);
|
||||
|
||||
in_fuzzing_loop = false;
|
||||
}
|
||||
|
||||
void synchronization_enter_fuzzing_loop(CPUState *cpu){
|
||||
void synchronization_enter_fuzzing_loop(CPUState *cpu)
|
||||
{
|
||||
if (pt_enable(cpu, false) == 0) {
|
||||
cpu->pt_enabled = true;
|
||||
}
|
||||
@ -406,4 +440,3 @@ void synchronization_enter_fuzzing_loop(CPUState *cpu){
|
||||
|
||||
reset_timeout_detector(&(GET_GLOBAL_STATE()->timeout_detector));
|
||||
}
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
#include "qemu/osdep.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <unistd.h>
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "nyx/debug.h"
|
||||
#include "nyx/state/state.h"
|
||||
@ -15,7 +15,8 @@ char *pt_trace_dump_filename;
|
||||
bool pt_dump_initialized = false;
|
||||
bool pt_dump_enabled = false;
|
||||
|
||||
void pt_trace_dump_enable(bool enable){
|
||||
void pt_trace_dump_enable(bool enable)
|
||||
{
|
||||
if (pt_dump_initialized)
|
||||
pt_dump_enabled = enable;
|
||||
}
|
||||
@ -29,14 +30,16 @@ void pt_trace_dump_init(char* filename)
|
||||
|
||||
test_fd = open(filename, O_CREAT | O_TRUNC | O_WRONLY, 0644);
|
||||
if (test_fd < 0)
|
||||
fprintf(stderr, "Error accessing pt_dump output path %s: %s", pt_trace_dump_filename, strerror(errno));
|
||||
fprintf(stderr, "Error accessing pt_dump output path %s: %s",
|
||||
pt_trace_dump_filename, strerror(errno));
|
||||
assert(test_fd >= 0);
|
||||
|
||||
pt_trace_dump_filename = strdup(filename);
|
||||
assert(pt_trace_dump_filename);
|
||||
}
|
||||
|
||||
void pt_truncate_pt_dump_file(void) {
|
||||
void pt_truncate_pt_dump_file(void)
|
||||
{
|
||||
int fd;
|
||||
|
||||
if (!pt_dump_enabled)
|
||||
@ -44,7 +47,8 @@ void pt_truncate_pt_dump_file(void) {
|
||||
|
||||
fd = open(pt_trace_dump_filename, O_CREAT | O_TRUNC | O_WRONLY, 0644);
|
||||
if (fd < 0) {
|
||||
fprintf(stderr, "Error truncating %s: %s\n", pt_trace_dump_filename, strerror(errno));
|
||||
fprintf(stderr, "Error truncating %s: %s\n", pt_trace_dump_filename,
|
||||
strerror(errno));
|
||||
assert(0);
|
||||
}
|
||||
close(fd);
|
||||
@ -59,10 +63,10 @@ void pt_write_pt_dump_file(uint8_t *data, size_t bytes)
|
||||
|
||||
fd = open(pt_trace_dump_filename, O_APPEND | O_WRONLY, 0644);
|
||||
if (fd < 0) {
|
||||
fprintf(stderr, "Error writing pt_trace_dump to %s: %s\n", pt_trace_dump_filename, strerror(errno));
|
||||
fprintf(stderr, "Error writing pt_trace_dump to %s: %s\n",
|
||||
pt_trace_dump_filename, strerror(errno));
|
||||
assert(0);
|
||||
}
|
||||
assert(bytes == write(fd, data, bytes));
|
||||
close(fd);
|
||||
}
|
||||
|
||||
|
71
vl.c
71
vl.c
@ -137,12 +137,12 @@ int main(int argc, char **argv)
|
||||
#ifdef QEMU_NYX
|
||||
// clang-format on
|
||||
#include "nyx/debug.h"
|
||||
#include "nyx/pt.h"
|
||||
#include "nyx/hypercall/hypercall.h"
|
||||
#include "nyx/synchronization.h"
|
||||
#include "nyx/fast_vm_reload.h"
|
||||
#include "nyx/state/state.h"
|
||||
#include "nyx/fast_vm_reload_sync.h"
|
||||
#include "nyx/hypercall/hypercall.h"
|
||||
#include "nyx/pt.h"
|
||||
#include "nyx/state/state.h"
|
||||
#include "nyx/synchronization.h"
|
||||
// clang-format off
|
||||
#endif
|
||||
|
||||
@ -261,8 +261,7 @@ static QemuOptsList qemu_fast_vm_reloads_opts = {
|
||||
.implied_opt_name = "order",
|
||||
.head = QTAILQ_HEAD_INITIALIZER(qemu_fast_vm_reloads_opts.head),
|
||||
.merge_lists = true,
|
||||
.desc = {
|
||||
{
|
||||
.desc = {{
|
||||
.name = "path",
|
||||
.type = QEMU_OPT_STRING,
|
||||
}, {
|
||||
@ -274,9 +273,7 @@ static QemuOptsList qemu_fast_vm_reloads_opts = {
|
||||
}, {
|
||||
.name = "skip_serialization",
|
||||
.type = QEMU_OPT_BOOL,
|
||||
},
|
||||
{ }
|
||||
},
|
||||
}, {}},
|
||||
};
|
||||
// clang-format off
|
||||
#endif
|
||||
@ -1903,8 +1900,8 @@ static void version(void)
|
||||
{
|
||||
#ifdef QEMU_NYX
|
||||
// clang-format on
|
||||
printf("QEMU-PT emulator version " QEMU_VERSION QEMU_PKGVERSION " (kAFL)\n"
|
||||
QEMU_COPYRIGHT "\n");
|
||||
printf("QEMU-PT emulator version " QEMU_VERSION QEMU_PKGVERSION
|
||||
" (kAFL)\n" QEMU_COPYRIGHT "\n");
|
||||
// clang-format off
|
||||
#else
|
||||
printf("QEMU emulator version " QEMU_FULL_VERSION "\n"
|
||||
@ -2809,7 +2806,8 @@ static bool object_create_delayed(const char *type, QemuOpts *opts)
|
||||
|
||||
#ifdef QEMU_NYX
|
||||
// clang-format on
|
||||
static bool verifiy_snapshot_folder(const char* folder){
|
||||
static bool verifiy_snapshot_folder(const char *folder)
|
||||
{
|
||||
struct stat s;
|
||||
|
||||
if (!folder) {
|
||||
@ -2818,8 +2816,7 @@ static bool verifiy_snapshot_folder(const char* folder){
|
||||
if (-1 != stat(folder, &s)) {
|
||||
if (S_ISDIR(s.st_mode)) {
|
||||
return true;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
error_report("fast_vm_reload: path is not a folder");
|
||||
exit(1);
|
||||
}
|
||||
@ -4594,13 +4591,15 @@ int main(int argc, char **argv, char **envp)
|
||||
fast_reload_init(GET_GLOBAL_STATE()->fast_reload_snapshot);
|
||||
|
||||
if (fast_vm_reload) {
|
||||
|
||||
if (getenv("NYX_DISABLE_BLOCK_COW")) {
|
||||
nyx_error("Nyx block COW cache layer cannot be disabled while using fast snapshots\n");
|
||||
nyx_error("Nyx block COW cache layer cannot be disabled while using "
|
||||
"fast snapshots\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
QemuOpts *opts = qemu_opts_parse_noisily(qemu_find_opts("fast_vm_reload-opts"), fast_vm_reload_opt_arg, true);
|
||||
QemuOpts *opts =
|
||||
qemu_opts_parse_noisily(qemu_find_opts("fast_vm_reload-opts"),
|
||||
fast_vm_reload_opt_arg, true);
|
||||
const char *snapshot_path = qemu_opt_get(opts, "path");
|
||||
const char *pre_snapshot_path = qemu_opt_get(opts, "pre_path");
|
||||
|
||||
@ -4630,13 +4629,19 @@ int main(int argc, char **argv, char **envp)
|
||||
bool load_mode = qemu_opt_get_bool(opts, "load", false);
|
||||
bool skip_serialization = qemu_opt_get_bool(opts, "skip_serialization", false);
|
||||
|
||||
if((snapshot_used || load_mode || skip_serialization) && getenv("NYX_DISABLE_DIRTY_RING")){
|
||||
error_report("NYX_DISABLE_DIRTY_RING is only allowed during pre-snapshot creation\n");
|
||||
if ((snapshot_used || load_mode || skip_serialization) &&
|
||||
getenv("NYX_DISABLE_DIRTY_RING"))
|
||||
{
|
||||
error_report("NYX_DISABLE_DIRTY_RING is only allowed during "
|
||||
"pre-snapshot creation\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if((pre_snapshot_used && !snapshot_used && !load_mode) && !getenv("NYX_DISABLE_DIRTY_RING")){
|
||||
error_report("NYX_DISABLE_DIRTY_RING is required during pre-snapshot creation\n");
|
||||
if ((pre_snapshot_used && !snapshot_used && !load_mode) &&
|
||||
!getenv("NYX_DISABLE_DIRTY_RING"))
|
||||
{
|
||||
error_report(
|
||||
"NYX_DISABLE_DIRTY_RING is required during pre-snapshot creation\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@ -4646,7 +4651,8 @@ int main(int argc, char **argv, char **envp)
|
||||
}
|
||||
|
||||
if ((!snapshot_used && !pre_snapshot_used) && load_mode) {
|
||||
error_report("invalid argument ((!pre_snapshot_used && !pre_snapshot_used) && load_mode)!\n");
|
||||
error_report("invalid argument ((!pre_snapshot_used && "
|
||||
"!pre_snapshot_used) && load_mode)!\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@ -4657,32 +4663,33 @@ int main(int argc, char **argv, char **envp)
|
||||
if (!skip_serialization) {
|
||||
enable_fast_reloads();
|
||||
}
|
||||
fast_reload_create_from_file_pre_image(get_fast_reload_snapshot(), pre_snapshot_path, false);
|
||||
fast_reload_create_from_file_pre_image(get_fast_reload_snapshot(),
|
||||
pre_snapshot_path, false);
|
||||
fast_reload_destroy(get_fast_reload_snapshot());
|
||||
GET_GLOBAL_STATE()->fast_reload_snapshot = fast_reload_new();
|
||||
fast_reload_init(GET_GLOBAL_STATE()->fast_reload_snapshot);
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
if (pre_snapshot_used) {
|
||||
nyx_printf("[Qemu-Nyx]: preparing to create pre image...\n");
|
||||
set_fast_reload_pre_path(pre_snapshot_path);
|
||||
set_fast_reload_pre_image();
|
||||
}
|
||||
else if(snapshot_used){
|
||||
} else if (snapshot_used) {
|
||||
set_fast_reload_path(snapshot_path);
|
||||
if (!skip_serialization) {
|
||||
enable_fast_reloads();
|
||||
}
|
||||
if (load_mode) {
|
||||
set_fast_reload_mode(true);
|
||||
nyx_printf("[Qemu-Nyx]: waiting for snapshot to start fuzzing...\n");
|
||||
fast_reload_create_from_file(get_fast_reload_snapshot(), snapshot_path, false);
|
||||
nyx_printf(
|
||||
"[Qemu-Nyx]: waiting for snapshot to start fuzzing...\n");
|
||||
fast_reload_create_from_file(get_fast_reload_snapshot(),
|
||||
snapshot_path, false);
|
||||
// cpu_synchronize_all_post_reset();
|
||||
set_state_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer, 3);
|
||||
set_state_auxiliary_result_buffer(GET_GLOBAL_STATE()->auxilary_buffer,
|
||||
3);
|
||||
skip_init();
|
||||
// GET_GLOBAL_STATE()->pt_trace_mode = false;
|
||||
}
|
||||
else{
|
||||
} else {
|
||||
nyx_printf("[Qemu-Nyx]: Booting VM to start fuzzing...\n");
|
||||
set_fast_reload_mode(false);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user