split FDL and PT mode

- PT mode now works with both FDL and dirty ring backend
as in-kernel dirty page trackers.
This commit is contained in:
Sergej Schumilo 2023-02-10 20:10:45 +01:00
parent ede09f502b
commit 5dae6ab764
6 changed files with 41 additions and 37 deletions

View File

@ -95,6 +95,7 @@ struct KVMState
#ifdef QEMU_NYX #ifdef QEMU_NYX
// clang-format on // clang-format on
bool nyx_no_pt_mode; bool nyx_no_pt_mode;
bool nyx_dirty_ring;
// clang-format off // clang-format off
#endif #endif
@ -419,7 +420,7 @@ int kvm_init_vcpu(CPUState *cpu)
#ifdef QEMU_NYX #ifdef QEMU_NYX
// clang-format on // clang-format on
if (s->nyx_no_pt_mode) { if (s->nyx_dirty_ring) {
if (!getenv("NYX_DISABLE_DIRTY_RING")) { if (!getenv("NYX_DISABLE_DIRTY_RING")) {
nyx_dirty_ring_pre_init(cpu->kvm_fd, s->vmfd); nyx_dirty_ring_pre_init(cpu->kvm_fd, s->vmfd);
} }
@ -1931,8 +1932,7 @@ static int kvm_init(MachineState *ms)
} }
#ifdef QEMU_NYX #ifdef QEMU_NYX
// clang-format on // clang-format on
if (ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_PT) != 1 && if (ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_PT) != 1)
ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_FDL) != 1)
{ {
/* fallback -> use vanilla KVM module instead (no Intel-PT tracing or nested hypercalls at this point) */ /* fallback -> use vanilla KVM module instead (no Intel-PT tracing or nested hypercalls at this point) */
fprintf(stderr, "[QEMU-Nyx] Could not access KVM-PT kernel " fprintf(stderr, "[QEMU-Nyx] Could not access KVM-PT kernel "
@ -1944,14 +1944,6 @@ static int kvm_init(MachineState *ms)
goto err; goto err;
} }
int ret_val = ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_DIRTY_LOG_RING);
if (ret_val == -1 || ret_val == 0) {
fprintf(stderr, "[QEMU-Nyx] Error: NYX requires support for "
"KVM_CAP_DIRTY_LOG_RING in fallback mode!\n");
ret = -errno;
goto err;
}
/* check for vmware_backdoor support */ /* check for vmware_backdoor support */
int fd = open("/sys/module/kvm/parameters/enable_vmware_backdoor", O_RDONLY); int fd = open("/sys/module/kvm/parameters/enable_vmware_backdoor", O_RDONLY);
if (fd == -1) { if (fd == -1) {
@ -1985,15 +1977,32 @@ static int kvm_init(MachineState *ms)
fprintf(stderr, "[QEMU-Nyx] NYX runs in fallback mode (no Intel-PT tracing " fprintf(stderr, "[QEMU-Nyx] NYX runs in fallback mode (no Intel-PT tracing "
"or nested hypercall support)!\n"); "or nested hypercall support)!\n");
s->nyx_no_pt_mode = true; s->nyx_no_pt_mode = true;
GET_GLOBAL_STATE()->nyx_fdl = false; GET_GLOBAL_STATE()->nyx_pt = false;
GET_GLOBAL_STATE()->pt_trace_mode = GET_GLOBAL_STATE()->pt_trace_mode = false; // Intel PT is not available in this mode
false; // Intel PT is not available in this mode }
else {
s->nyx_no_pt_mode = false;
GET_GLOBAL_STATE()->nyx_pt = true;
GET_GLOBAL_STATE()->pt_trace_mode = true;
}
if (ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_NYX_FDL) == 1){
s->nyx_dirty_ring = false;
fast_reload_set_mode(get_fast_reload_snapshot(), RELOAD_MEMORY_MODE_FDL);
}
else {
int ret_val = ioctl(s->fd, KVM_CHECK_EXTENSION, KVM_CAP_DIRTY_LOG_RING);
if (ret_val == -1 || ret_val == 0) {
fprintf(stderr, "[QEMU-Nyx] Error: NYX requires support for "
"KVM_CAP_DIRTY_LOG_RING in fallback mode!\n");
ret = -errno;
goto err;
}
s->nyx_dirty_ring = true;
fast_reload_set_mode(get_fast_reload_snapshot(), fast_reload_set_mode(get_fast_reload_snapshot(),
RELOAD_MEMORY_MODE_DIRTY_RING); RELOAD_MEMORY_MODE_DIRTY_RING);
} else {
s->nyx_no_pt_mode = false;
GET_GLOBAL_STATE()->nyx_fdl = true;
fast_reload_set_mode(get_fast_reload_snapshot(), RELOAD_MEMORY_MODE_FDL);
} }
// clang-format off // clang-format off
#endif #endif
@ -2063,7 +2072,7 @@ static int kvm_init(MachineState *ms)
#ifdef QEMU_NYX #ifdef QEMU_NYX
// clang-format on // clang-format on
if (s->nyx_no_pt_mode) { if (s->nyx_dirty_ring) {
if (getenv("NYX_DISABLE_DIRTY_RING")) { if (getenv("NYX_DISABLE_DIRTY_RING")) {
fprintf(stderr, fprintf(stderr,
"WARNING: Nyx has disabled KVM's dirty-ring (required to enable " "WARNING: Nyx has disabled KVM's dirty-ring (required to enable "
@ -2121,7 +2130,7 @@ static int kvm_init(MachineState *ms)
ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, 1); ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, 1);
#else #else
// clang-format on // clang-format on
if (s->nyx_no_pt_mode) { if (s->nyx_dirty_ring) {
ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, 1); ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, 1);
} else { } else {
ret = 0; ret = 0;

View File

@ -25,19 +25,14 @@ extern int load_snapshot(const char *name, Error **errp);
static void adjust_rip(CPUX86State *env, fast_reload_t *snapshot) static void adjust_rip(CPUX86State *env, fast_reload_t *snapshot)
{ {
switch (fast_reload_get_mode(snapshot)) { /* PT mode relies on a custom kernel which uses 'vmcall' hypercalls instead of
case RELOAD_MEMORY_MODE_DEBUG: * vmware-backdoor based hypercalls (via 'out' instructions).
case RELOAD_MEMORY_MODE_DEBUG_QUIET: */
env->eip -= 1; /* out */ if (GET_GLOBAL_STATE()->nyx_pt == true){
break;
case RELOAD_MEMORY_MODE_FDL:
case RELOAD_MEMORY_MODE_FDL_DEBUG:
env->eip -= 3; /* vmcall */ env->eip -= 3; /* vmcall */
break; }
case RELOAD_MEMORY_MODE_DIRTY_RING: else{
case RELOAD_MEMORY_MODE_DIRTY_RING_DEBUG:
env->eip -= 1; /* out */ env->eip -= 1; /* out */
break;
} }
} }

View File

@ -70,7 +70,7 @@ void handle_hypercall_kafl_set_agent_config(struct kvm_run *run,
GET_GLOBAL_STATE()->cap_compile_time_tracing = config.agent_tracing; GET_GLOBAL_STATE()->cap_compile_time_tracing = config.agent_tracing;
if (!GET_GLOBAL_STATE()->cap_compile_time_tracing && if (!GET_GLOBAL_STATE()->cap_compile_time_tracing &&
!GET_GLOBAL_STATE()->nyx_fdl) !GET_GLOBAL_STATE()->nyx_pt)
{ {
nyx_abort("No Intel PT support on this KVM build and no " nyx_abort("No Intel PT support on this KVM build and no "
"compile-time instrumentation enabled in the target\n"); "compile-time instrumentation enabled in the target\n");

View File

@ -45,7 +45,7 @@ void state_init_global(void)
/* safety first */ /* safety first */
assert(libxdc_get_release_version() == LIBXDC_RELEASE_VERSION_REQUIRED); assert(libxdc_get_release_version() == LIBXDC_RELEASE_VERSION_REQUIRED);
global_state.nyx_fdl = false; global_state.nyx_pt = false;
global_state.workdir_path = NULL; global_state.workdir_path = NULL;
global_state.worker_id = 0xffff; global_state.worker_id = 0xffff;

View File

@ -37,8 +37,8 @@ along with QEMU-PT. If not, see <http://www.gnu.org/licenses/>.
#define INTEL_PT_MAX_RANGES 4 #define INTEL_PT_MAX_RANGES 4
typedef struct qemu_nyx_state_s { typedef struct qemu_nyx_state_s {
/* set if FDL backend is used (required to perform some additional runtime tests) */ /* set if PT mode is supported */
bool nyx_fdl; bool nyx_pt;
char *workdir_path; char *workdir_path;
uint32_t worker_id; uint32_t worker_id;

View File

@ -4484,11 +4484,11 @@ static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
model_id = ""; model_id = "";
} }
#ifdef QEMU_NYX #ifdef QEMU_NYX
if(strncmp(model_id, NYX_PT_CPU_MODEL, strlen(NYX_PT_CPU_MODEL)) == 0 && GET_GLOBAL_STATE()->nyx_fdl == false){ if(strncmp(model_id, NYX_PT_CPU_MODEL, strlen(NYX_PT_CPU_MODEL)) == 0 && GET_GLOBAL_STATE()->nyx_pt == false){
fprintf(stderr, "[QEMU-Nyx] Warning: Attempt to use unsupported CPU model (PT) without KVM-PT (Hint: use '-cpu kAFL64-Hypervisor-v2' instead)\n"); fprintf(stderr, "[QEMU-Nyx] Warning: Attempt to use unsupported CPU model (PT) without KVM-PT (Hint: use '-cpu kAFL64-Hypervisor-v2' instead)\n");
model_id = NYX_NO_PT_CPU_MODEL; model_id = NYX_NO_PT_CPU_MODEL;
} }
if(strncmp(model_id, NYX_NO_PT_CPU_MODEL, strlen(NYX_NO_PT_CPU_MODEL)) == 0 && GET_GLOBAL_STATE()->nyx_fdl == true){ if(strncmp(model_id, NYX_NO_PT_CPU_MODEL, strlen(NYX_NO_PT_CPU_MODEL)) == 0 && GET_GLOBAL_STATE()->nyx_pt == true){
fprintf(stderr, "[QEMU-Nyx] Error: Attempt to use unsupported CPU model (NO-PT) with KVM-PT (Hint: use '-cpu kAFL64-Hypervisor-v1' instead)\n"); fprintf(stderr, "[QEMU-Nyx] Error: Attempt to use unsupported CPU model (NO-PT) with KVM-PT (Hint: use '-cpu kAFL64-Hypervisor-v1' instead)\n");
exit(1); exit(1);
} }