diff --git a/Cargo.toml b/Cargo.toml index c755aa1a19..3cb927aa17 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,6 +10,7 @@ members = [ "libafl_derive", "libafl_cc", "libafl_targets", + "libafl_frida", ] exclude = [ "fuzzers/libfuzzer_libpng", diff --git a/fuzzers/frida_libpng/Cargo.toml b/fuzzers/frida_libpng/Cargo.toml index ae06dd2ea4..c59b89c88c 100644 --- a/fuzzers/frida_libpng/Cargo.toml +++ b/fuzzers/frida_libpng/Cargo.toml @@ -22,9 +22,14 @@ num_cpus = "1.0" which = "4.1" [target.'cfg(unix)'.dependencies] -libafl = { path = "../../libafl/" } -frida-gum = { version = "0.3.2", optional = true, features = ["auto-download", "event-sink", "invocation-listener"] } -frida-gum-sys = { version = "0.2.2", optional = true, features = ["auto-download", "event-sink", "invocation-listener"] } +libafl = { path = "../../libafl/", features = [ "std" ] } #, "llmp_small_maps", "llmp_debug"]} +capstone = "0.8.0" +frida-gum = { version = "0.4", optional = true, features = [ "auto-download", "event-sink", "invocation-listener"] } +frida-gum-sys = { version = "0.2.4", optional = true, features = [ "auto-download", "event-sink", "invocation-listener"] } +libafl_frida = { path = "../../libafl_frida", version = "0.1.0" } lazy_static = "1.4.0" libc = "0.2" libloading = "0.7.0" +num-traits = "0.2.14" +rangemap = "0.1.10" +seahash = "4.1.0" diff --git a/fuzzers/frida_libpng/build.rs b/fuzzers/frida_libpng/build.rs index d6d7e094bf..0fa8a7e5b3 100644 --- a/fuzzers/frida_libpng/build.rs +++ b/fuzzers/frida_libpng/build.rs @@ -119,6 +119,8 @@ fn main() { //.arg("HAS_DUMMY_CRASH=1") .arg("-fPIC") .arg("-shared") + .arg("-O3") + //.arg("-fomit-frame-pointer") .arg(if env::var("CARGO_CFG_TARGET_OS").unwrap() == "android" { "-static-libstdc++" } else { diff --git a/fuzzers/frida_libpng/harness.cc b/fuzzers/frida_libpng/harness.cc index eaf9aa27ba..20a2070e16 100644 --- a/fuzzers/frida_libpng/harness.cc +++ b/fuzzers/frida_libpng/harness.cc @@ -17,6 +17,7 @@ #include #include #include +#include #include @@ -83,6 +84,25 @@ extern "C" int afl_libfuzzer_init() { return 0; } +static char * allocation = NULL; +__attribute__((noinline)) +void func3( char * alloc) { + printf("func3\n"); + if (random() % 5 == 0) { + alloc[0xff] = 0xde; + } +} +__attribute__((noinline)) +void func2() { + allocation = (char*)malloc(0xff); + printf("func2\n"); + func3(allocation); +} +__attribute__((noinline)) +void func1() { + printf("func1\n"); + func2(); +} // Entry point for LibFuzzer. // Roughly follows the libpng book example: // http://www.libpng.org/pub/png/book/chapter13.html @@ -91,6 +111,9 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { return 0; } + + func1(); + std::vector v(data, data + size); if (png_sig_cmp(v.data(), 0, kPngHeaderSize)) { // not a PNG. diff --git a/fuzzers/frida_libpng/src/fuzzer.rs b/fuzzers/frida_libpng/src/fuzzer.rs index 1c816c16e1..9728cba289 100644 --- a/fuzzers/frida_libpng/src/fuzzer.rs +++ b/fuzzers/frida_libpng/src/fuzzer.rs @@ -4,12 +4,14 @@ use libafl::{ bolts::tuples::{tuple_list, Named}, corpus::{ - Corpus, InMemoryCorpus, IndexesLenTimeMinimizerCorpusScheduler, OnDiskCorpus, - QueueCorpusScheduler, + ondisk::OnDiskMetadataFormat, Corpus, InMemoryCorpus, + IndexesLenTimeMinimizerCorpusScheduler, OnDiskCorpus, QueueCorpusScheduler, }, events::{setup_restarting_mgr_std, EventManager}, - executors::{inprocess::InProcessExecutor, Executor, ExitKind, HasObservers}, - feedbacks::{CrashFeedback, MaxMapFeedback}, + executors::{ + inprocess::InProcessExecutor, timeout::TimeoutExecutor, Executor, ExitKind, HasObservers, + }, + feedbacks::{CrashFeedback, MaxMapFeedback, TimeoutFeedback}, fuzzer::{Fuzzer, StdFuzzer}, inputs::{HasTargetBytes, Input}, mutators::scheduled::{havoc_mutations, StdScheduledMutator}, @@ -22,229 +24,38 @@ use libafl::{ Error, }; -use core::cell::RefCell; -#[cfg(target_arch = "x86_64")] -use frida_gum::instruction_writer::X86Register; -#[cfg(target_arch = "aarch64")] -use frida_gum::instruction_writer::{Aarch64Register, IndexMode}; use frida_gum::{ - instruction_writer::InstructionWriter, - stalker::{NoneEventSink, Stalker, Transformer}, + stalker::{NoneEventSink, Stalker}, + Gum, NativePointer, }; -use frida_gum::{Gum, MemoryRange, Module, NativePointer, PageProtection}; -use std::{env, ffi::c_void, path::PathBuf}; -/// An helper that feeds FridaInProcessExecutor with user-supplied instrumentation -pub trait FridaHelper<'a> { - fn transformer(&self) -> &Transformer<'a>; -} +use std::{env, ffi::c_void, marker::PhantomData, path::PathBuf, time::Duration}; -const MAP_SIZE: usize = 64 * 1024; +use libafl_frida::{ + asan_rt::{AsanErrorsFeedback, AsanErrorsObserver, ASAN_ERRORS}, + helper::{FridaHelper, FridaInstrumentationHelper, MAP_SIZE}, + FridaOptions, +}; -/// An helper that feeds FridaInProcessExecutor with edge-coverage instrumentation -struct FridaEdgeCoverageHelper<'a> { - map: [u8; MAP_SIZE], - previous_pc: RefCell, - base_address: u64, - size: usize, - current_log_impl: u64, - /// Transformer that has to be passed to FridaInProcessExecutor - transformer: Option>, -} - -impl<'a> FridaHelper<'a> for FridaEdgeCoverageHelper<'a> { - fn transformer(&self) -> &Transformer<'a> { - self.transformer.as_ref().unwrap() - } -} - -/// Helper function to get the size of a module's CODE section from frida -pub fn get_module_size(module_name: &str) -> usize { - let mut code_size = 0; - let code_size_ref = &mut code_size; - Module::enumerate_ranges(module_name, PageProtection::ReadExecute, move |details| { - *code_size_ref = details.memory_range().size() as usize; - true - }); - - code_size -} - -/// A minimal maybe_log implementation. We insert this into the transformed instruction stream -/// every time we need a copy that is within a direct branch of the start of the transformed basic -/// block. -#[cfg(target_arch = "x86_64")] -const MAYBE_LOG_CODE: [u8; 47] = [ - 0x9c, /* pushfq */ - 0x50, /* push rax */ - 0x51, /* push rcx */ - 0x52, /* push rdx */ - 0x48, 0x8d, 0x05, 0x24, 0x00, 0x00, 0x00, /* lea rax, sym._afl_area_ptr_ptr */ - 0x48, 0x8b, 0x00, /* mov rax, qword [rax] */ - 0x48, 0x8d, 0x0d, 0x22, 0x00, 0x00, 0x00, /* lea rcx, sym.previous_pc */ - 0x48, 0x8b, 0x11, /* mov rdx, qword [rcx] */ - 0x48, 0x8b, 0x12, /* mov rdx, qword [rdx] */ - 0x48, 0x31, 0xfa, /* xor rdx, rdi */ - 0xfe, 0x04, 0x10, /* inc byte [rax + rdx] */ - 0x48, 0xd1, 0xef, /* shr rdi, 1 */ - 0x48, 0x8b, 0x01, /* mov rax, qword [rcx] */ - 0x48, 0x89, 0x38, /* mov qword [rax], rdi */ - 0x5a, /* pop rdx */ - 0x59, /* pop rcx */ - 0x58, /* pop rax */ - 0x9d, /* popfq */ - 0xc3, /* ret */ - - /* Read-only data goes here: */ - /* uint8_t* afl_area_ptr */ - /* uint64_t* afl_prev_loc_ptr */ -]; - -#[cfg(target_arch = "aarch64")] -const MAYBE_LOG_CODE: [u8; 56] = [ - // __afl_area_ptr[current_pc ^ previous_pc]++; - // previous_pc = current_pc >> 1; - 0xE1, 0x0B, 0xBF, 0xA9, // stp x1, x2, [sp, -0x10]! - 0xE3, 0x13, 0xBF, 0xA9, // stp x3, x4, [sp, -0x10]! - // x0 = current_pc - 0x81, 0x01, 0x00, 0x58, // ldr x1, #0x30, =__afl_area_ptr - 0xa2, 0x01, 0x00, 0x58, // ldr x2, #0x38, =&previous_pc - 0x44, 0x00, 0x40, 0xf9, // ldr x4, [x2] (=previous_pc) - // __afl_area_ptr[current_pc ^ previous_pc]++; - 0x84, 0x00, 0x00, 0xca, // eor x4, x4, x0 - 0x23, 0x68, 0x64, 0xf8, // ldr x3, [x1, x4] - 0x63, 0x04, 0x00, 0x91, // add x3, x3, #1 - 0x23, 0x68, 0x24, 0xf8, // str x3, [x1, x2] - // previous_pc = current_pc >> 1; - 0xe0, 0x07, 0x40, 0x8b, // add x0, xzr, x0, LSR #1 - 0x40, 0x00, 0x00, 0xf9, // str x0, [x2] - 0xE3, 0x13, 0xc1, 0xA8, // ldp x3, x4, [sp], #0x10 - 0xE1, 0x0B, 0xc1, 0xA8, // ldp x1, x2, [sp], #0x10 - 0xC0, 0x03, 0x5F, 0xD6, // ret - - // &afl_area_ptr - // &afl_prev_loc_ptr -]; - -/// The implementation of the FridaEdgeCoverageHelper -impl<'a> FridaEdgeCoverageHelper<'a> { - /// Constructor function to create a new FridaEdgeCoverageHelper, given a module_name. - pub fn new(gum: &'a Gum, module_name: &str) -> Self { - let mut helper = Self { - map: [0u8; MAP_SIZE], - previous_pc: RefCell::new(0x0), - base_address: Module::find_base_address(module_name).0 as u64, - size: get_module_size(module_name), - current_log_impl: 0, - transformer: None, - }; - - let transformer = Transformer::from_callback(gum, |basic_block, _output| { - let mut first = true; - for instruction in basic_block { - if first { - first = false; - let address = unsafe { (*instruction.instr()).address }; - if address >= helper.base_address - && address <= helper.base_address + helper.size as u64 - { - let writer = _output.writer(); - if helper.current_log_impl == 0 - || !writer.can_branch_directly_to(helper.current_log_impl) - || !writer.can_branch_directly_between( - writer.pc() + 128, - helper.current_log_impl, - ) - { - let after_log_impl = writer.code_offset() + 1; - - #[cfg(target_arch = "x86_64")] - writer.put_jmp_near_label(after_log_impl); - #[cfg(target_arch = "aarch64")] - writer.put_b_label(after_log_impl); - - helper.current_log_impl = writer.pc(); - writer.put_bytes(&MAYBE_LOG_CODE); - let prev_loc_pointer = helper.previous_pc.as_ptr() as *mut _ as usize; - let map_pointer = helper.map.as_ptr() as usize; - - writer.put_bytes(&prev_loc_pointer.to_ne_bytes()); - writer.put_bytes(&map_pointer.to_ne_bytes()); - - writer.put_label(after_log_impl); - } - #[cfg(target_arch = "x86_64")] - { - println!("here"); - writer.put_lea_reg_reg_offset( - X86Register::Rsp, - X86Register::Rsp, - -(frida_gum_sys::GUM_RED_ZONE_SIZE as i32), - ); - writer.put_push_reg(X86Register::Rdi); - writer.put_mov_reg_address( - X86Register::Rdi, - ((address >> 4) ^ (address << 8)) & (MAP_SIZE - 1) as u64, - ); - writer.put_call_address(helper.current_log_impl); - writer.put_pop_reg(X86Register::Rdi); - writer.put_lea_reg_reg_offset( - X86Register::Rsp, - X86Register::Rsp, - frida_gum_sys::GUM_RED_ZONE_SIZE as i32, - ); - } - #[cfg(target_arch = "aarch64")] - { - writer.put_stp_reg_reg_reg_offset( - Aarch64Register::Lr, - Aarch64Register::X0, - Aarch64Register::Sp, - -(16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i32) as i64, - IndexMode::PreAdjust, - ); - writer.put_ldr_reg_u64( - Aarch64Register::X0, - ((address >> 4) ^ (address << 8)) & (MAP_SIZE - 1) as u64, - ); - writer.put_bl_imm(helper.current_log_impl); - writer.put_ldp_reg_reg_reg_offset( - Aarch64Register::Lr, - Aarch64Register::X0, - Aarch64Register::Sp, - 16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i64, - IndexMode::PostAdjust, - ); - } - } - } - instruction.keep() - } - }); - - helper.transformer = Some(transformer); - helper - } -} - -struct FridaInProcessExecutor<'a, FH, H, I, OT> +struct FridaInProcessExecutor<'a, 'b, 'c, FH, H, I, OT> where - FH: FridaHelper<'a>, + FH: FridaHelper<'b>, H: FnMut(&[u8]) -> ExitKind, I: Input + HasTargetBytes, OT: ObserversTuple, { - base: InProcessExecutor<'a, H, I, OT>, + base: TimeoutExecutor, I, OT>, /// Frida's dynamic rewriting engine stalker: Stalker<'a>, /// User provided callback for instrumentation - helper: &'a FH, + helper: &'c mut FH, followed: bool, + _phantom: PhantomData<&'b u8>, } -impl<'a, FH, H, I, OT> Executor for FridaInProcessExecutor<'a, FH, H, I, OT> +impl<'a, 'b, 'c, FH, H, I, OT> Executor for FridaInProcessExecutor<'a, 'b, 'c, FH, H, I, OT> where - FH: FridaHelper<'a>, + FH: FridaHelper<'b>, H: FnMut(&[u8]) -> ExitKind, I: Input + HasTargetBytes, OT: ObserversTuple, @@ -255,22 +66,34 @@ where where EM: EventManager, { - if !self.followed { - self.followed = true; - self.stalker - .follow_me::(self.helper.transformer(), None); - } else { - self.stalker.activate(NativePointer( - self.base.harness_mut() as *mut _ as *mut c_void - )) + if self.helper.stalker_enabled() { + if !self.followed { + self.followed = true; + self.stalker + .follow_me::(self.helper.transformer(), None); + } else { + self.stalker.activate(NativePointer( + self.base.inner().harness_mut() as *mut _ as *mut c_void + )) + } } + + self.helper.pre_exec(input); + self.base.pre_exec(state, event_mgr, input) } /// Instruct the target about the input and run #[inline] fn run_target(&mut self, input: &I) -> Result { - self.base.run_target(input) + let res = self.base.run_target(input); + if unsafe { ASAN_ERRORS.is_some() && !ASAN_ERRORS.as_ref().unwrap().is_empty() } { + println!("Crashing target as it had ASAN errors"); + unsafe { + libc::raise(libc::SIGABRT); + } + } + res } /// Called right after execution finished. @@ -284,14 +107,17 @@ where where EM: EventManager, { - self.stalker.deactivate(); + if self.helper.stalker_enabled() { + self.stalker.deactivate(); + } + self.helper.post_exec(input); self.base.post_exec(state, event_mgr, input) } } -impl<'a, FH, H, I, OT> HasObservers for FridaInProcessExecutor<'a, FH, H, I, OT> +impl<'a, 'b, 'c, FH, H, I, OT> HasObservers for FridaInProcessExecutor<'a, 'b, 'c, FH, H, I, OT> where - FH: FridaHelper<'a>, + FH: FridaHelper<'b>, H: FnMut(&[u8]) -> ExitKind, I: Input + HasTargetBytes, OT: ObserversTuple, @@ -307,9 +133,9 @@ where } } -impl<'a, FH, H, I, OT> Named for FridaInProcessExecutor<'a, FH, H, I, OT> +impl<'a, 'b, 'c, FH, H, I, OT> Named for FridaInProcessExecutor<'a, 'b, 'c, FH, H, I, OT> where - FH: FridaHelper<'a>, + FH: FridaHelper<'b>, H: FnMut(&[u8]) -> ExitKind, I: Input + HasTargetBytes, OT: ObserversTuple, @@ -319,31 +145,37 @@ where } } -impl<'a, FH, H, I, OT> FridaInProcessExecutor<'a, FH, H, I, OT> +impl<'a, 'b, 'c, FH, H, I, OT> FridaInProcessExecutor<'a, 'b, 'c, FH, H, I, OT> where - FH: FridaHelper<'a>, + FH: FridaHelper<'b>, H: FnMut(&[u8]) -> ExitKind, I: Input + HasTargetBytes, OT: ObserversTuple, { - pub fn new(gum: &'a Gum, base: InProcessExecutor<'a, H, I, OT>, helper: &'a FH) -> Self { - let mut stalker = Stalker::new(gum); + pub fn new( + gum: &'a Gum, + base: InProcessExecutor<'a, H, I, OT>, + helper: &'c mut FH, + timeout: Duration, + ) -> Self { + let stalker = Stalker::new(gum); // Let's exclude the main module and libc.so at least: - stalker.exclude(&MemoryRange::new( - Module::find_base_address(&env::args().next().unwrap()), - get_module_size(&env::args().next().unwrap()), - )); - stalker.exclude(&MemoryRange::new( - Module::find_base_address("libc.so"), - get_module_size("libc.so"), - )); + //stalker.exclude(&MemoryRange::new( + //Module::find_base_address(&env::args().next().unwrap()), + //get_module_size(&env::args().next().unwrap()), + //)); + //stalker.exclude(&MemoryRange::new( + //Module::find_base_address("libc.so"), + //get_module_size("libc.so"), + //)); Self { - base, + base: TimeoutExecutor::new(base, timeout), stalker, helper, followed: false, + _phantom: PhantomData, } } } @@ -362,6 +194,11 @@ pub fn main() { fuzz( &env::args().nth(1).expect("no module specified"), &env::args().nth(2).expect("no symbol specified"), + env::args() + .nth(3) + .expect("no modules to instrument specified") + .split(":") + .collect(), vec![PathBuf::from("./corpus")], PathBuf::from("./crashes"), 1337, @@ -387,6 +224,7 @@ fn fuzz( unsafe fn fuzz( module_name: &str, symbol_name: &str, + modules_to_instrument: Vec<&str>, corpus_dirs: Vec, objective_dir: PathBuf, broker_port: u16, @@ -395,37 +233,43 @@ unsafe fn fuzz( let stats = SimpleStats::new(|s| println!("{}", s)); // The restarting state will spawn the same process again as child, then restarted it each time it crashes. - let (state, mut restarting_mgr) = - match setup_restarting_mgr_std(stats, broker_port) { - Ok(res) => res, - Err(err) => match err { - Error::ShuttingDown => { - return Ok(()); - } - _ => { - panic!("Failed to setup the restarter: {}", err); - } - }, - }; + let (state, mut restarting_mgr) = match setup_restarting_mgr_std(stats, broker_port) { + Ok(res) => res, + Err(err) => match err { + Error::ShuttingDown => { + return Ok(()); + } + _ => { + panic!("Failed to setup the restarter: {}", err); + } + }, + }; let gum = Gum::obtain(); + let lib = libloading::Library::new(module_name).unwrap(); let target_func: libloading::Symbol i32> = lib.get(symbol_name.as_bytes()).unwrap(); - let mut frida_helper = FridaEdgeCoverageHelper::new(&gum, module_name); - - // Create an observation channel using the coverage map - let edges_observer = HitcountsMapObserver::new(StdMapObserver::new_from_ptr( - "edges", - frida_helper.map.as_mut_ptr(), - MAP_SIZE, - )); let mut frida_harness = move |buf: &[u8]| { (target_func)(buf.as_ptr(), buf.len()); ExitKind::Ok }; + let mut frida_helper = FridaInstrumentationHelper::new( + &gum, + FridaOptions::parse_env_options(), + module_name, + &modules_to_instrument, + ); + + // Create an observation channel using the coverage map + let edges_observer = HitcountsMapObserver::new(StdMapObserver::new_from_ptr( + "edges", + frida_helper.map_ptr(), + MAP_SIZE, + )); + // If not restarting, create a State from scratch let mut state = state.unwrap_or_else(|| { State::new( @@ -441,9 +285,14 @@ unsafe fn fuzz( )), // Corpus in which we store solutions (crashes in this example), // on disk so the user can get them after stopping the fuzzer - OnDiskCorpus::new(objective_dir).unwrap(), + OnDiskCorpus::new_save_meta(objective_dir, Some(OnDiskMetadataFormat::JsonPretty)) + .unwrap(), // Feedbacks to recognize an input as solution - tuple_list!(CrashFeedback::new()), + tuple_list!( + CrashFeedback::new(), + TimeoutFeedback::new(), + AsanErrorsFeedback::new() + ), ) }); @@ -474,21 +323,22 @@ unsafe fn fuzz( InProcessExecutor::new( "in-process(edges)", &mut frida_harness, - tuple_list!(edges_observer), + tuple_list!(edges_observer, AsanErrorsObserver::new(&ASAN_ERRORS)), &mut state, &mut restarting_mgr, )?, - &frida_helper, + &mut frida_helper, + Duration::new(10, 0), ); // Let's exclude the main module and libc.so at least: - executor.stalker.exclude(&MemoryRange::new( - Module::find_base_address(&env::args().next().unwrap()), - get_module_size(&env::args().next().unwrap()), - )); - executor.stalker.exclude(&MemoryRange::new( - Module::find_base_address("libc.so"), - get_module_size("libc.so"), - )); + //executor.stalker.exclude(&MemoryRange::new( + //Module::find_base_address(&env::args().next().unwrap()), + //get_module_size(&env::args().next().unwrap()), + //)); + //executor.stalker.exclude(&MemoryRange::new( + //Module::find_base_address("libc.so"), + //get_module_size("libc.so"), + //)); // In case the corpus is empty (on first run), reset if state.corpus().count() < 1 { @@ -501,6 +351,7 @@ unsafe fn fuzz( println!("We imported {} inputs from disk.", state.corpus().count()); } + //executor.helper.register_thread(); fuzzer.fuzz_loop(&mut state, &mut executor, &mut restarting_mgr, &scheduler)?; // Never reached diff --git a/fuzzers/libfuzzer_stb_image/src/main.rs b/fuzzers/libfuzzer_stb_image/src/main.rs index abc8492c83..1f018de01f 100644 --- a/fuzzers/libfuzzer_stb_image/src/main.rs +++ b/fuzzers/libfuzzer_stb_image/src/main.rs @@ -4,12 +4,12 @@ use std::{env, path::PathBuf}; use libafl::{ - bolts::{shmem::StdShMem, tuples::tuple_list}, + bolts::tuples::tuple_list, corpus::{ Corpus, InMemoryCorpus, IndexesLenTimeMinimizerCorpusScheduler, OnDiskCorpus, QueueCorpusScheduler, }, - events::setup_restarting_mgr, + events::setup_restarting_mgr_std, executors::{inprocess::InProcessExecutor, ExitKind}, feedbacks::{CrashFeedback, MaxMapFeedback, TimeFeedback}, fuzzer::{Fuzzer, StdFuzzer}, @@ -25,7 +25,6 @@ use libafl::{ use libafl_targets::{libfuzzer_initialize, libfuzzer_test_one_input, EDGES_MAP, MAX_EDGES_NUM}; -/// The main fn, no_mangle as it is a C main pub fn main() { // Registry the metadata types used in this fuzzer // Needed only on no_std @@ -49,18 +48,17 @@ fn fuzz(corpus_dirs: Vec, objective_dir: PathBuf, broker_port: u16) -> let stats = SimpleStats::new(|s| println!("{}", s)); // The restarting state will spawn the same process again as child, then restarted it each time it crashes. - let (state, mut restarting_mgr) = - match setup_restarting_mgr::<_, _, StdShMem, _>(stats, broker_port) { - Ok(res) => res, - Err(err) => match err { - Error::ShuttingDown => { - return Ok(()); - } - _ => { - panic!("Failed to setup the restarter: {}", err); - } - }, - }; + let (state, mut restarting_mgr) = match setup_restarting_mgr_std(stats, broker_port) { + Ok(res) => res, + Err(err) => match err { + Error::ShuttingDown => { + return Ok(()); + } + _ => { + panic!("Failed to setup the restarter: {}", err); + } + }, + }; // Create an observation channel using the coverage map // We don't use the hitcounts (see the Cargo.toml, we use pcguard_edges) diff --git a/libafl/Cargo.toml b/libafl/Cargo.toml index 603db866d1..b6aeebda97 100644 --- a/libafl/Cargo.toml +++ b/libafl/Cargo.toml @@ -39,7 +39,7 @@ std = [] # print, sharedmap, ... support anymap_debug = ["serde_json"] # uses serde_json to Debug the anymap trait. Disable for smaller footprint. derive = ["libafl_derive"] # provide derive(SerdeAny) macro. llmp_small_maps = [] # reduces initial map size for llmp -llmp_debug = [] # Enables debug output for LLMP +llmp_debug = ["backtrace"] # Enables debug output for LLMP [[example]] name = "llmp_test" @@ -59,13 +59,20 @@ ctor = "*" libafl_derive = { version = "*", optional = true, path = "../libafl_derive" } serde_json = { version = "1.0", optional = true, default-features = false, features = ["alloc"] } # an easy way to debug print SerdeAnyMap num_enum = "0.5.1" +spin = "0.9.0" -backtrace = "0.3" # for llmp_debug +[target.'cfg(target_os = "android")'.dependencies] +backtrace = { version = "0.3", optional = true, default-features = false, features = ["std", "libbacktrace"] } # for llmp_debug + +[target.'cfg(not(target_os = "android"))'.dependencies] +backtrace = { version = "0.3", optional = true } # for llmp_debug [target.'cfg(unix)'.dependencies] libc = "0.2" # For (*nix) libc nix = "0.20.0" uds = "0.2.3" +lock_api = "0.4.3" +regex = "1.4.5" [target.'cfg(windows)'.dependencies] windows = "0.4.0" diff --git a/libafl/examples/llmp_test/main.rs b/libafl/examples/llmp_test/main.rs index eda837b958..ac02feff2d 100644 --- a/libafl/examples/llmp_test/main.rs +++ b/libafl/examples/llmp_test/main.rs @@ -25,8 +25,8 @@ const _TAG_1MEG_V1: Tag = 0xB1111161; #[cfg(all(unix, feature = "std"))] fn adder_loop(port: u16) -> ! { - let shmem_provider = Rc::new(RefCell::new(StdShMemProvider::new())); - let mut client = llmp::LlmpClient::create_attach_to_tcp(&shmem_provider, port).unwrap(); + let shmem_provider = StdShMemProvider::new().unwrap(); + let mut client = llmp::LlmpClient::create_attach_to_tcp(shmem_provider, port).unwrap(); let mut last_result: u32 = 0; let mut current_result: u32 = 0; loop { @@ -68,11 +68,8 @@ fn adder_loop(port: u16) -> ! { #[cfg(all(unix, feature = "std"))] fn large_msg_loop(port: u16) -> ! { - let mut client = llmp::LlmpClient::create_attach_to_tcp( - &Rc::new(RefCell::new(StdShMemProvider::new())), - port, - ) - .unwrap(); + let mut client = + llmp::LlmpClient::create_attach_to_tcp(StdShMemProvider::new().unwrap(), port).unwrap(); let meg_buf = [1u8; 1 << 20]; @@ -133,8 +130,7 @@ fn main() { match mode.as_str() { "broker" => { - let mut broker = - llmp::LlmpBroker::new(&Rc::new(RefCell::new(StdShMemProvider::new()))).unwrap(); + let mut broker = llmp::LlmpBroker::new(StdShMemProvider::new().unwrap()).unwrap(); broker .launch_listener(llmp::Listener::Tcp( std::net::TcpListener::bind(format!("127.0.0.1:{}", port)).unwrap(), @@ -143,11 +139,9 @@ fn main() { broker.loop_forever(&mut broker_message_hook, Some(Duration::from_millis(5))) } "ctr" => { - let mut client = llmp::LlmpClient::create_attach_to_tcp( - &Rc::new(RefCell::new(StdShMemProvider::new())), - port, - ) - .unwrap(); + let mut client = + llmp::LlmpClient::create_attach_to_tcp(StdShMemProvider::new().unwrap(), port) + .unwrap(); let mut counter: u32 = 0; loop { counter = counter.wrapping_add(1); diff --git a/libafl/src/bolts/llmp.rs b/libafl/src/bolts/llmp.rs index 2cbadc4655..11888c19d3 100644 --- a/libafl/src/bolts/llmp.rs +++ b/libafl/src/bolts/llmp.rs @@ -52,9 +52,8 @@ Then register some clientloops using llmp_broker_register_threaded_clientloop */ -use alloc::{rc::Rc, string::String, vec::Vec}; +use alloc::{string::String, vec::Vec}; use core::{ - cell::RefCell, cmp::max, fmt::Debug, mem::size_of, @@ -75,11 +74,13 @@ use std::{ use backtrace::Backtrace; #[cfg(unix)] -use crate::bolts::os::unix_signals::{c_void, setup_signal_handler, siginfo_t, Handler, Signal}; +use crate::bolts::os::unix_signals::{setup_signal_handler, siginfo_t, Handler, Signal}; use crate::{ bolts::shmem::{ShMem, ShMemDescription, ShMemId, ShMemProvider}, Error, }; +#[cfg(unix)] +use libc::ucontext_t; /// We'll start off with 256 megabyte maps per fuzzer client #[cfg(not(feature = "llmp_small_maps"))] @@ -215,8 +216,12 @@ fn new_map_size(max_alloc: usize) -> usize { /// Initialize a new llmp_page. size should be relative to /// llmp_page->messages unsafe fn _llmp_page_init(shmem: &mut SHM, sender: u32, allow_reinit: bool) { - let map_size = shmem.map().len(); + #[cfg(all(feature = "llmp_debug", feature = "std"))] + dbg!("_llmp_page_init: shmem {}", &shmem); + let map_size = shmem.len(); let page = shmem2page_mut(shmem); + #[cfg(all(feature = "llmp_debug", feature = "std"))] + dbg!("_llmp_page_init: page {}", *page); if (*page).magic == PAGE_INITIALIZED_MAGIC && !allow_reinit { panic!( "Tried to initialize page {:?} twice (for shmem {:?})", @@ -234,6 +239,7 @@ unsafe fn _llmp_page_init(shmem: &mut SHM, sender: u32, allow_reinit (*(*page).messages.as_mut_ptr()).tag = LLMP_TAG_UNSET; ptr::write_volatile(&mut (*page).save_to_unmap, 0); ptr::write_volatile(&mut (*page).sender_dead, 0); + assert!((*page).size_total != 0); } /// Get the next pointer and make sure it's in the current page, and has enough space. @@ -365,7 +371,7 @@ where { #[cfg(feature = "std")] /// Creates either a broker, if the tcp port is not bound, or a client, connected to this port. - pub fn on_port(shmem_provider: &Rc>, port: u16) -> Result { + pub fn on_port(shmem_provider: SP, port: u16) -> Result { match TcpListener::bind(format!("127.0.0.1:{}", port)) { Ok(listener) => { // We got the port. We are the broker! :) @@ -390,12 +396,6 @@ where } } - pub fn shmem_provider(&mut self) -> &Rc> { - match self { - LlmpConnection::IsBroker { broker } => &broker.shmem_provider, - LlmpConnection::IsClient { client } => &client.shmem_provider, - } - } /// Describe this in a reproducable fashion, if it's a client pub fn describe(&self) -> Result { Ok(match self { @@ -406,7 +406,7 @@ where /// Recreate an existing client from the stored description pub fn existing_client_from_description( - shmem_provider: &Rc>, + shmem_provider: SP, description: &LlmpClientDescription, ) -> Result, Error> { Ok(LlmpConnection::IsClient { @@ -480,7 +480,7 @@ where /// By keeping the message history around, /// new clients may join at any time in the future. pub keep_pages_forever: bool, - shmem_provider: Rc>, + shmem_provider: SP, } /// An actor on the sending part of the shared map @@ -488,23 +488,17 @@ impl LlmpSender where SP: ShMemProvider, { - pub fn new( - shmem_provider: &Rc>, - id: u32, - keep_pages_forever: bool, - ) -> Result { + pub fn new(mut shmem_provider: SP, id: u32, keep_pages_forever: bool) -> Result { Ok(Self { id, last_msg_sent: ptr::null_mut(), out_maps: vec![LlmpSharedMap::new( 0, - shmem_provider - .borrow_mut() - .new_map(LLMP_CFG_INITIAL_MAP_SIZE)?, + shmem_provider.new_map(LLMP_CFG_INITIAL_MAP_SIZE)?, )], // drop pages to the broker if it already read them keep_pages_forever, - shmem_provider: shmem_provider.clone(), + shmem_provider, }) } @@ -520,14 +514,11 @@ where /// Reattach to a vacant out_map, to with a previous sender stored the information in an env before. #[cfg(feature = "std")] - pub fn on_existing_from_env( - shmem_provider: &Rc>, - env_name: &str, - ) -> Result { + pub fn on_existing_from_env(mut shmem_provider: SP, env_name: &str) -> Result { let msg_sent_offset = msg_offset_from_env(env_name)?; Self::on_existing_map( shmem_provider.clone(), - shmem_provider.borrow_mut().existing_from_env(env_name)?, + shmem_provider.existing_from_env(env_name)?, msg_sent_offset, ) } @@ -565,7 +556,7 @@ where /// It is essential, that the receiver (or someone else) keeps a pointer to this map /// else reattach will get a new, empty page, from the OS, or fail. pub fn on_existing_map( - shmem_provider: Rc>, + shmem_provider: SP, current_out_map: SP::Mem, last_msg_sent_offset: Option, ) -> Result { @@ -649,7 +640,7 @@ where #[cfg(all(feature = "llmp_debug", feature = "std"))] println!( "Allocating {} (>={}) bytes on page {:?} / map {:?} (last msg: {:?})", - complete_msg_size, buf_len, page, map, last_msg + complete_msg_size, buf_len, page, &map, last_msg ); /* DBG("XXX complete_msg_size %lu (h: %lu)\n", complete_msg_size, sizeof(llmp_message)); */ /* In case we don't have enough space, make sure the next page will be large @@ -795,7 +786,6 @@ where let mut new_map_shmem = LlmpSharedMap::new( (*old_map).sender, self.shmem_provider - .borrow_mut() .new_map(new_map_size((*old_map).max_alloc_size))?, ); let mut new_map = new_map_shmem.page_mut(); @@ -909,14 +899,12 @@ where // Create this client on an existing map from the given description. acquired with `self.describe` pub fn on_existing_from_description( - shmem_provider: &Rc>, + mut shmem_provider: SP, description: &LlmpDescription, ) -> Result { Self::on_existing_map( shmem_provider.clone(), - shmem_provider - .borrow_mut() - .from_description(description.shmem)?, + shmem_provider.from_description(description.shmem)?, description.last_message_offset, ) } @@ -932,7 +920,7 @@ where /// Pointer to the last meg this received pub last_msg_recvd: *const LlmpMsg, /// The shmem provider - pub shmem_provider: Rc>, + pub shmem_provider: SP, /// current page. After EOP, this gets replaced with the new one pub current_recv_map: LlmpSharedMap, } @@ -944,13 +932,10 @@ where { /// Reattach to a vacant recv_map, to with a previous sender stored the information in an env before. #[cfg(feature = "std")] - pub fn on_existing_from_env( - shmem_provider: &Rc>, - env_name: &str, - ) -> Result { + pub fn on_existing_from_env(mut shmem_provider: SP, env_name: &str) -> Result { Self::on_existing_map( shmem_provider.clone(), - shmem_provider.borrow_mut().existing_from_env(env_name)?, + shmem_provider.existing_from_env(env_name)?, msg_offset_from_env(env_name)?, ) } @@ -968,7 +953,7 @@ where /// It is essential, that the sender (or someone else) keeps a pointer to the sender_map /// else reattach will get a new, empty page, from the OS, or fail. pub fn on_existing_map( - shmem_provider: Rc>, + shmem_provider: SP, current_sender_map: SP::Mem, last_msg_recvd_offset: Option, ) -> Result { @@ -1053,12 +1038,11 @@ where ptr::write_volatile(&mut (*page).save_to_unmap, 1); // Map the new page. The old one should be unmapped by Drop - self.current_recv_map = LlmpSharedMap::existing( - self.shmem_provider.borrow_mut().from_id_and_size( + self.current_recv_map = + LlmpSharedMap::existing(self.shmem_provider.from_id_and_size( ShMemId::from_slice(&pageinfo_cpy.shm_str), pageinfo_cpy.map_size, - )?, - ); + )?); page = self.current_recv_map.page_mut(); // Mark the new page save to unmap also (it's mapped by us, the broker now) ptr::write_volatile(&mut (*page).save_to_unmap, 1); @@ -1151,14 +1135,12 @@ where // Create this client on an existing map from the given description. acquired with `self.describe` pub fn on_existing_from_description( - shmem_provider: &Rc>, + mut shmem_provider: SP, description: &LlmpDescription, ) -> Result { Self::on_existing_map( shmem_provider.clone(), - shmem_provider - .borrow_mut() - .from_description(description.shmem)?, + shmem_provider.from_description(description.shmem)?, description.last_message_offset, ) } @@ -1220,6 +1202,8 @@ where if (*ret.page()).magic != PAGE_INITIALIZED_MAGIC { panic!("Map was not priviously initialized at {:?}", &ret.shmem); } + #[cfg(all(feature = "llmp_debug", feature = "std"))] + dbg!("PAGE: {}", *ret.page()); } ret } @@ -1328,7 +1312,7 @@ where /// handlers shutting_down: bool, /// The ShMemProvider to use - shmem_provider: Rc>, + shmem_provider: SP, } #[cfg(unix)] @@ -1336,9 +1320,9 @@ pub struct LlmpBrokerSignalHandler { shutting_down: bool, } -#[cfg(all(unix))] +#[cfg(unix)] impl Handler for LlmpBrokerSignalHandler { - fn handle(&mut self, _signal: Signal, _info: siginfo_t, _void: *const c_void) { + fn handle(&mut self, _signal: Signal, _info: siginfo_t, _context: &mut ucontext_t) { unsafe { ptr::write_volatile(&mut self.shutting_down, true) }; } @@ -1354,14 +1338,14 @@ where SP: ShMemProvider, { /// Create and initialize a new llmp_broker - pub fn new(shmem_provider: &Rc>) -> Result { + pub fn new(mut shmem_provider: SP) -> Result { Ok(LlmpBroker { llmp_out: LlmpSender { id: 0, last_msg_sent: ptr::null_mut(), out_maps: vec![LlmpSharedMap::new( 0, - shmem_provider.borrow_mut().new_map(new_map_size(0))?, + shmem_provider.new_map(new_map_size(0))?, )], // Broker never cleans up the pages so that new // clients may join at any time @@ -1371,7 +1355,7 @@ where llmp_clients: vec![], socket_name: None, shutting_down: false, - shmem_provider: shmem_provider.clone(), + shmem_provider, }) } @@ -1515,33 +1499,30 @@ where // Tcp out map sends messages from background thread tcp server to foreground client let tcp_out_map = LlmpSharedMap::new( llmp_tcp_id, - self.shmem_provider - .borrow_mut() - .new_map(LLMP_CFG_INITIAL_MAP_SIZE)?, + self.shmem_provider.new_map(LLMP_CFG_INITIAL_MAP_SIZE)?, ); let shmem_id = tcp_out_map.shmem.id(); let tcp_out_map_str = *shmem_id.as_slice(); let tcp_out_map_size = tcp_out_map.shmem.len(); self.register_client(tcp_out_map); - let shmem_provider_clone = self.shmem_provider.borrow_mut().clone(); + let mut shmem_provider_clone = self.shmem_provider.clone(); Ok(thread::spawn(move || { - let shmem_provider = Rc::new(RefCell::new(shmem_provider_clone)); + shmem_provider_clone.post_fork(); // Clone so we get a new connection to the AshmemServer if we are using // ServedShMemProvider let mut new_client_sender = LlmpSender { id: 0, last_msg_sent: ptr::null_mut(), out_maps: vec![LlmpSharedMap::existing( - shmem_provider - .borrow_mut() + shmem_provider_clone .from_id_and_size(ShMemId::from_slice(&tcp_out_map_str), tcp_out_map_size) .unwrap(), )], // drop pages to the broker if it already read them keep_pages_forever: false, - shmem_provider: shmem_provider.clone(), + shmem_provider: shmem_provider_clone.clone(), }; loop { @@ -1627,7 +1608,7 @@ where } else { let pageinfo = (*msg).buf.as_mut_ptr() as *mut LlmpPayloadSharedMapInfo; - match self.shmem_provider.borrow_mut().from_id_and_size( + match self.shmem_provider.from_id_and_size( ShMemId::from_slice(&(*pageinfo).shm_str), (*pageinfo).map_size, ) { @@ -1686,7 +1667,7 @@ pub struct LlmpClient where SP: ShMemProvider, { - shmem_provider: Rc>, + shmem_provider: SP, /// Outgoing channel to the broker pub sender: LlmpSender, /// Incoming (broker) broadcast map @@ -1703,7 +1684,7 @@ where /// It is essential, that the broker (or someone else) kept a pointer to the out_map /// else reattach will get a new, empty page, from the OS, or fail pub fn on_existing_map( - shmem_provider: Rc>, + shmem_provider: SP, _current_out_map: SP::Mem, _last_msg_sent_offset: Option, current_broker_map: SP::Mem, @@ -1726,20 +1707,17 @@ where /// Recreate this client from a previous client.to_env #[cfg(feature = "std")] - pub fn on_existing_from_env( - shmem_provider: &Rc>, - env_name: &str, - ) -> Result { + pub fn on_existing_from_env(shmem_provider: SP, env_name: &str) -> Result { Ok(Self { sender: LlmpSender::on_existing_from_env( - shmem_provider, + shmem_provider.clone(), &format!("{}_SENDER", env_name), )?, receiver: LlmpReceiver::on_existing_from_env( - shmem_provider, + shmem_provider.clone(), &format!("{}_RECEIVER", env_name), )?, - shmem_provider: shmem_provider.clone(), + shmem_provider, }) } @@ -1761,16 +1739,19 @@ where /// Create an existing client from description fn existing_client_from_description( - shmem_provider: &Rc>, + shmem_provider: SP, description: &LlmpClientDescription, ) -> Result { Ok(Self { - sender: LlmpSender::on_existing_from_description(shmem_provider, &description.sender)?, + sender: LlmpSender::on_existing_from_description( + shmem_provider.clone(), + &description.sender, + )?, receiver: LlmpReceiver::on_existing_from_description( - shmem_provider, + shmem_provider.clone(), &description.receiver, )?, - shmem_provider: shmem_provider.clone(), + shmem_provider, }) } @@ -1787,7 +1768,7 @@ where /// Creates a new LlmpClient pub fn new( - shmem_provider: &Rc>, + mut shmem_provider: SP, initial_broker_map: LlmpSharedMap, ) -> Result { Ok(Self { @@ -1795,9 +1776,7 @@ where id: 0, last_msg_sent: ptr::null_mut(), out_maps: vec![LlmpSharedMap::new(0, { - shmem_provider - .borrow_mut() - .new_map(LLMP_CFG_INITIAL_MAP_SIZE)? + shmem_provider.new_map(LLMP_CFG_INITIAL_MAP_SIZE)? })], // drop pages to the broker if it already read them keep_pages_forever: false, @@ -1810,7 +1789,7 @@ where last_msg_recvd: ptr::null_mut(), shmem_provider: shmem_provider.clone(), }, - shmem_provider: shmem_provider.clone(), + shmem_provider, }) } @@ -1887,20 +1866,14 @@ where #[cfg(feature = "std")] /// Creates a new LlmpClient, reading the map id and len from env - pub fn create_using_env( - shmem_provider: &Rc>, - env_var: &str, - ) -> Result { - let map = LlmpSharedMap::existing(shmem_provider.borrow_mut().existing_from_env(env_var)?); + pub fn create_using_env(mut shmem_provider: SP, env_var: &str) -> Result { + let map = LlmpSharedMap::existing(shmem_provider.existing_from_env(env_var)?); Self::new(shmem_provider, map) } #[cfg(feature = "std")] /// Create a LlmpClient, getting the ID from a given port - pub fn create_attach_to_tcp( - shmem_provider: &Rc>, - port: u16, - ) -> Result { + pub fn create_attach_to_tcp(mut shmem_provider: SP, port: u16) -> Result { let mut stream = TcpStream::connect(format!("127.0.0.1:{}", port))?; println!("Connected to port {}", port); @@ -1915,11 +1888,7 @@ where let broker_map_description: ShMemDescription = postcard::from_bytes(&new_broker_map_str)?; - let map = LlmpSharedMap::existing( - shmem_provider - .borrow_mut() - .from_description(broker_map_description)?, - ); + let map = LlmpSharedMap::existing(shmem_provider.from_description(broker_map_description)?); let ret = Self::new(shmem_provider, map)?; let own_map_description_bytes = @@ -1933,7 +1902,6 @@ where #[cfg(all(unix, feature = "std"))] mod tests { - use alloc::rc::Rc; use std::{thread::sleep, time::Duration}; use super::{ @@ -1945,18 +1913,16 @@ mod tests { use crate::bolts::shmem::{ShMemProvider, StdShMemProvider}; - use core::cell::RefCell; - #[test] pub fn llmp_connection() { - let shmem_provider = Rc::new(RefCell::new(StdShMemProvider::new())); - let mut broker = match LlmpConnection::on_port(&shmem_provider, 1337).unwrap() { + let shmem_provider = StdShMemProvider::new().unwrap(); + let mut broker = match LlmpConnection::on_port(shmem_provider.clone(), 1337).unwrap() { IsClient { client: _ } => panic!("Could not bind to port as broker"), IsBroker { broker } => broker, }; // Add the first client (2nd, actually, because of the tcp listener client) - let mut client = match LlmpConnection::on_port(&shmem_provider, 1337).unwrap() { + let mut client = match LlmpConnection::on_port(shmem_provider.clone(), 1337).unwrap() { IsBroker { broker: _ } => panic!("Second connect should be a client!"), IsClient { client } => client, }; @@ -1973,6 +1939,7 @@ mod tests { client.send_buf(tag, &arr).unwrap(); client.to_env("_ENV_TEST").unwrap(); + #[cfg(all(feature = "llmp_debug", feature = "std"))] dbg!(std::env::vars()); for (key, value) in std::env::vars_os() { @@ -1980,7 +1947,7 @@ mod tests { } /* recreate the client from env, check if it still works */ - client = LlmpClient::on_existing_from_env(&shmem_provider, "_ENV_TEST").unwrap(); + client = LlmpClient::on_existing_from_env(shmem_provider, "_ENV_TEST").unwrap(); client.send_buf(tag, &arr).unwrap(); diff --git a/libafl/src/bolts/os/ashmem_server.rs b/libafl/src/bolts/os/ashmem_server.rs index b5aee980e6..2beb506efb 100644 --- a/libafl/src/bolts/os/ashmem_server.rs +++ b/libafl/src/bolts/os/ashmem_server.rs @@ -11,10 +11,13 @@ use crate::{ }, Error, }; +use core::mem::ManuallyDrop; use hashbrown::HashMap; use serde::{Deserialize, Serialize}; use std::{ + cell::RefCell, io::{Read, Write}, + rc::Rc, sync::{Arc, Condvar, Mutex}, }; @@ -39,11 +42,12 @@ const ASHMEM_SERVER_NAME: &str = "@ashmem_server"; pub struct ServedShMemProvider { stream: UnixStream, inner: AshmemShMemProvider, + id: i32, } #[derive(Clone, Debug)] pub struct ServedShMem { - inner: AshmemShMem, + inner: ManuallyDrop, server_fd: i32, } @@ -95,13 +99,13 @@ impl ServedShMemProvider { impl Default for ServedShMemProvider { fn default() -> Self { - Self::new() + Self::new().unwrap() } } impl Clone for ServedShMemProvider { fn clone(&self) -> Self { - Self::new() + Self::new().unwrap() } } @@ -109,22 +113,26 @@ impl ShMemProvider for ServedShMemProvider { type Mem = ServedShMem; /// Connect to the server and return a new ServedShMemProvider - fn new() -> Self { - Self { + fn new() -> Result { + let mut res = Self { stream: UnixStream::connect_to_unix_addr( &UnixSocketAddr::new(ASHMEM_SERVER_NAME).unwrap(), - ) - .expect("Unable to open connection to ashmem service"), - inner: AshmemShMemProvider::new(), - } + )?, + inner: AshmemShMemProvider::new()?, + id: -1, + }; + let (id, _) = res.send_receive(AshmemRequest::Hello(None)); + res.id = id; + Ok(res) } fn new_map(&mut self, map_size: usize) -> Result { let (server_fd, client_fd) = self.send_receive(AshmemRequest::NewMap(map_size)); Ok(ServedShMem { - inner: self - .inner - .from_id_and_size(ShMemId::from_string(&format!("{}", client_fd)), map_size)?, + inner: ManuallyDrop::new( + self.inner + .from_id_and_size(ShMemId::from_string(&format!("{}", client_fd)), map_size)?, + ), server_fd, }) } @@ -136,12 +144,30 @@ impl ShMemProvider for ServedShMemProvider { ShMemDescription::from_string_and_size(server_id_str, size), )); Ok(ServedShMem { - inner: self - .inner - .from_id_and_size(ShMemId::from_string(&format!("{}", client_fd)), size)?, + inner: ManuallyDrop::new( + self.inner + .from_id_and_size(ShMemId::from_string(&format!("{}", client_fd)), size)?, + ), server_fd, }) } + + fn post_fork(&mut self) { + self.stream = + UnixStream::connect_to_unix_addr(&UnixSocketAddr::new(ASHMEM_SERVER_NAME).unwrap()) + .expect("Unable to reconnect to the ashmem service"); + let (id, _) = self.send_receive(AshmemRequest::Hello(Some(self.id))); + self.id = id; + } + + fn release_map(&mut self, map: &mut Self::Mem) { + let (refcount, _) = self.send_receive(AshmemRequest::Deregister(map.server_fd)); + if refcount == 0 { + unsafe { + ManuallyDrop::drop(&mut map.inner); + } + } + } } /// A request sent to the ShMem server to receive a fd to a shared map @@ -152,38 +178,119 @@ pub enum AshmemRequest { /// Another client already has a map with this description mapped. ExistingMap(ShMemDescription), /// A client tells us it unregisters the previously allocated map - Deregister(u32), + Deregister(i32), + /// A message that tells us hello, and optionally which other client we were created from, we + /// return a client id. + Hello(Option), } #[derive(Debug)] struct AshmemClient { stream: UnixStream, + maps: HashMap>>>, } impl AshmemClient { fn new(stream: UnixStream) -> Self { - Self { stream } + Self { + stream, + maps: HashMap::new(), + } } } #[derive(Debug)] pub struct AshmemService { provider: AshmemShMemProvider, - maps: Vec, + clients: HashMap, + all_maps: HashMap>>, +} + +#[derive(Debug)] +enum AshmemResponse { + Mapping(Rc>), + Id(i32), + RefCount(u32), } impl AshmemService { /// Create a new AshMem service #[must_use] - fn new() -> Self { - AshmemService { - provider: AshmemShMemProvider::new(), - maps: Vec::new(), - } + fn new() -> Result { + Ok(AshmemService { + provider: AshmemShMemProvider::new()?, + clients: HashMap::new(), + all_maps: HashMap::new(), + }) } /// Read and handle the client request, send the answer over unix fd. - fn handle_client(&mut self, client: &mut AshmemClient) -> Result<(), Error> { + fn handle_request(&mut self, client_id: RawFd) -> Result { + let request = self.read_request(client_id)?; + + //println!("got ashmem client: {}, request:{:?}", client_id, request); + // Handle the client request + let response = match request { + AshmemRequest::Hello(other_id) => { + if let Some(other_id) = other_id { + if other_id != client_id { + // remove temporarily + let other_client = self.clients.remove(&other_id); + let client = self.clients.get_mut(&client_id).unwrap(); + for (id, map) in other_client.as_ref().unwrap().maps.iter() { + client.maps.insert(*id, map.clone()); + } + self.clients.insert(other_id, other_client.unwrap()); + } + }; + Ok(AshmemResponse::Id(client_id)) + } + AshmemRequest::NewMap(map_size) => Ok(AshmemResponse::Mapping(Rc::new(RefCell::new( + self.provider.new_map(map_size)?, + )))), + AshmemRequest::ExistingMap(description) => { + let client = self.clients.get_mut(&client_id).unwrap(); + if client.maps.contains_key(&description.id.to_int()) { + Ok(AshmemResponse::Mapping( + client + .maps + .get_mut(&description.id.to_int()) + .as_mut() + .unwrap() + .first() + .as_mut() + .unwrap() + .clone(), + )) + } else if self.all_maps.contains_key(&description.id.to_int()) { + Ok(AshmemResponse::Mapping( + self.all_maps + .get_mut(&description.id.to_int()) + .unwrap() + .clone(), + )) + } else { + let new_rc = + Rc::new(RefCell::new(self.provider.from_description(description)?)); + self.all_maps + .insert(description.id.to_int(), new_rc.clone()); + Ok(AshmemResponse::Mapping(new_rc)) + } + } + AshmemRequest::Deregister(map_id) => { + let client = self.clients.get_mut(&client_id).unwrap(); + let map = client.maps.entry(map_id).or_default().pop().unwrap(); + Ok(AshmemResponse::RefCount(Rc::strong_count(&map) as u32)) + } + }; + //println!("send ashmem client: {}, response: {:?}", client_id, &response); + + response + } + + fn read_request(&mut self, client_id: RawFd) -> Result { + let client = self.clients.get_mut(&client_id).unwrap(); + // Always receive one be u32 of size, then the command. let mut size_bytes = [0u8; 4]; client.stream.read_exact(&mut size_bytes)?; @@ -196,23 +303,36 @@ impl AshmemService { .expect("Failed to read message body"); let request: AshmemRequest = postcard::from_bytes(&bytes)?; - // Handle the client request - let mapping = match request { - AshmemRequest::NewMap(map_size) => self.provider.new_map(map_size)?, - AshmemRequest::ExistingMap(description) => { - self.provider.from_description(description)? - } - AshmemRequest::Deregister(_) => { - return Ok(()); - } - }; + Ok(request) + } + fn handle_client(&mut self, client_id: RawFd) -> Result<(), Error> { + let response = self.handle_request(client_id)?; - let id = mapping.id(); - let server_fd: i32 = id.to_string().parse().unwrap(); - client - .stream - .send_fds(&id.to_string().as_bytes(), &[server_fd])?; - self.maps.push(mapping); + match response { + AshmemResponse::Mapping(mapping) => { + let id = mapping.borrow().id(); + let server_fd: i32 = id.to_string().parse().unwrap(); + let client = self.clients.get_mut(&client_id).unwrap(); + client + .stream + .send_fds(&id.to_string().as_bytes(), &[server_fd])?; + client + .maps + .entry(server_fd) + .or_default() + .push(mapping.clone()); + } + AshmemResponse::Id(id) => { + let client = self.clients.get_mut(&client_id).unwrap(); + client.stream.send_fds(&id.to_string().as_bytes(), &[])?; + } + AshmemResponse::RefCount(refcount) => { + let client = self.clients.get_mut(&client_id).unwrap(); + client + .stream + .send_fds(&refcount.to_string().as_bytes(), &[])?; + } + } Ok(()) } @@ -222,7 +342,7 @@ impl AshmemService { let syncpair = Arc::new((Mutex::new(false), Condvar::new())); let childsyncpair = Arc::clone(&syncpair); let join_handle = - thread::spawn(move || Self::new().listen(ASHMEM_SERVER_NAME, childsyncpair)); + thread::spawn(move || Self::new()?.listen(ASHMEM_SERVER_NAME, childsyncpair)); let (lock, cvar) = &*syncpair; let mut started = lock.lock().unwrap(); @@ -252,7 +372,6 @@ impl AshmemService { "The server appears to already be running. We are probably a client".to_string(), )); }; - let mut clients: HashMap = HashMap::new(); let mut poll_fds: Vec = vec![PollFd::new( listener.as_raw_fd(), PollFlags::POLLIN | PollFlags::POLLRDNORM | PollFlags::POLLRDBAND, @@ -278,11 +397,10 @@ impl AshmemService { unsafe { *((&poll_fd as *const PollFd) as *const libc::pollfd) }.fd; if revents.contains(PollFlags::POLLHUP) { poll_fds.remove(poll_fds.iter().position(|item| *item == poll_fd).unwrap()); - clients.remove(&raw_polled_fd); + self.clients.remove(&raw_polled_fd); } else if revents.contains(PollFlags::POLLIN) { - if clients.contains_key(&raw_polled_fd) { - let mut client = clients.get_mut(&raw_polled_fd).unwrap(); - match self.handle_client(&mut client) { + if self.clients.contains_key(&raw_polled_fd) { + match self.handle_client(raw_polled_fd) { Ok(()) => (), Err(e) => { dbg!("Ignoring failed read from client", e, poll_fd); @@ -304,14 +422,15 @@ impl AshmemService { PollFlags::POLLIN | PollFlags::POLLRDNORM | PollFlags::POLLRDBAND, ); poll_fds.push(pollfd); - let mut client = AshmemClient::new(stream); - match self.handle_client(&mut client) { + let client = AshmemClient::new(stream); + let client_id = client.stream.as_raw_fd(); + self.clients.insert(client_id, client); + match self.handle_client(client_id) { Ok(()) => (), Err(e) => { dbg!("Ignoring failed read from client", e); } }; - clients.insert(client.stream.as_raw_fd(), client); } } else { //println!("Unknown revents flags: {:?}", revents); diff --git a/libafl/src/bolts/os/unix_signals.rs b/libafl/src/bolts/os/unix_signals.rs index 1182798be9..0254b32657 100644 --- a/libafl/src/bolts/os/unix_signals.rs +++ b/libafl/src/bolts/os/unix_signals.rs @@ -13,9 +13,9 @@ use core::{ use std::ffi::CString; use libc::{ - c_int, malloc, sigaction, sigaltstack, sigemptyset, stack_t, SA_NODEFER, SA_ONSTACK, - SA_SIGINFO, SIGABRT, SIGALRM, SIGBUS, SIGFPE, SIGHUP, SIGILL, SIGINT, SIGKILL, SIGPIPE, - SIGQUIT, SIGSEGV, SIGTERM, SIGUSR2, + c_int, malloc, sigaction, sigaltstack, sigemptyset, stack_t, ucontext_t, SA_NODEFER, + SA_ONSTACK, SA_SIGINFO, SIGABRT, SIGALRM, SIGBUS, SIGFPE, SIGHUP, SIGILL, SIGINT, SIGKILL, + SIGPIPE, SIGQUIT, SIGSEGV, SIGTERM, SIGTRAP, SIGUSR2, }; use num_enum::{IntoPrimitive, TryFromPrimitive}; @@ -40,6 +40,7 @@ pub enum Signal { SigQuit = SIGQUIT, SigTerm = SIGTERM, SigInterrupt = SIGINT, + SigTrap = SIGTRAP, } pub static CRASH_SIGNALS: &[Signal] = &[ @@ -77,6 +78,7 @@ impl Display for Signal { Signal::SigQuit => write!(f, "SIGQUIT")?, Signal::SigTerm => write!(f, "SIGTERM")?, Signal::SigInterrupt => write!(f, "SIGINT")?, + Signal::SigTrap => write!(f, "SIGTRAP")?, }; Ok(()) @@ -85,7 +87,7 @@ impl Display for Signal { pub trait Handler { /// Handle a signal - fn handle(&mut self, signal: Signal, info: siginfo_t, _void: *const c_void); + fn handle(&mut self, signal: Signal, info: siginfo_t, _context: &mut ucontext_t); /// Return a list of signals to handle fn signals(&self) -> Vec; } @@ -113,7 +115,7 @@ static mut SIGNAL_HANDLERS: [Option; 32] = [ /// # Safety /// This should be somewhat safe to call for signals previously registered, /// unless the signal handlers registered using [setup_signal_handler] are broken. -unsafe fn handle_signal(sig: c_int, info: siginfo_t, void: *const c_void) { +unsafe fn handle_signal(sig: c_int, info: siginfo_t, void: *mut c_void) { let signal = &Signal::try_from(sig).unwrap(); let handler = { match &SIGNAL_HANDLERS[*signal as usize] { @@ -121,7 +123,7 @@ unsafe fn handle_signal(sig: c_int, info: siginfo_t, void: *const c_void) { None => return, } }; - handler.handle(*signal, info, void); + handler.handle(*signal, info, &mut *(void as *mut ucontext_t)); } /// Setup signal handlers in a somewhat rusty way. diff --git a/libafl/src/bolts/shmem.rs b/libafl/src/bolts/shmem.rs index 930dc38c24..9fe1cbf076 100644 --- a/libafl/src/bolts/shmem.rs +++ b/libafl/src/bolts/shmem.rs @@ -16,11 +16,11 @@ pub type OsShMemProvider = Win32ShMemProvider; pub type OsShMem = Win32ShMem; #[cfg(target_os = "android")] -use crate::bolts::os::ashmem_server::{ServedShMem, ServedShMemProvider}; +use crate::bolts::os::ashmem_server::ServedShMemProvider; #[cfg(target_os = "android")] -pub type StdShMemProvider = ServedShMemProvider; +pub type StdShMemProvider = RcShMemProvider; #[cfg(target_os = "android")] -pub type StdShMem = ServedShMem; +pub type StdShMem = RcShMem; #[cfg(all(feature = "std", not(target_os = "android")))] pub type StdShMemProvider = OsShMemProvider; @@ -32,7 +32,9 @@ use serde::{Deserialize, Serialize}; #[cfg(feature = "std")] use std::env; -use alloc::string::ToString; +use alloc::{rc::Rc, string::ToString}; +use core::cell::RefCell; +use core::mem::ManuallyDrop; use crate::Error; @@ -137,11 +139,11 @@ pub trait ShMem: Sized + Debug + Clone { } } -pub trait ShMemProvider: Send + Clone + Default { +pub trait ShMemProvider: Send + Clone + Default + Debug { type Mem: ShMem; /// Create a new instance of the provider - fn new() -> Self; + fn new() -> Result; /// Create a new shared memory mapping fn new_map(&mut self, map_size: usize) -> Result; @@ -168,6 +170,108 @@ pub trait ShMemProvider: Send + Clone + Default { map_size, )) } + + /// This method should be called after a fork or thread creation event, allowing the ShMem to + /// reset thread specific info. + fn post_fork(&mut self) { + // do nothing + } + + /// Release the resources associated with the given ShMem + fn release_map(&mut self, _map: &mut Self::Mem) { + // do nothing + } +} + +#[derive(Debug, Clone)] +pub struct RcShMem { + internal: ManuallyDrop, + provider: Rc>, +} + +impl ShMem for RcShMem +where + T: ShMemProvider + alloc::fmt::Debug, +{ + fn id(&self) -> ShMemId { + self.internal.id() + } + + fn len(&self) -> usize { + self.internal.len() + } + + fn map(&self) -> &[u8] { + self.internal.map() + } + + fn map_mut(&mut self) -> &mut [u8] { + self.internal.map_mut() + } +} + +impl Drop for RcShMem { + fn drop(&mut self) { + self.provider.borrow_mut().release_map(&mut self.internal) + } +} + +#[derive(Debug, Clone)] +pub struct RcShMemProvider { + internal: Rc>, +} + +unsafe impl Send for RcShMemProvider {} + +impl ShMemProvider for RcShMemProvider +where + T: ShMemProvider + alloc::fmt::Debug, +{ + type Mem = RcShMem; + + fn new() -> Result { + return Ok(Self { + internal: Rc::new(RefCell::new(T::new()?)), + }); + } + + fn new_map(&mut self, map_size: usize) -> Result { + Ok(Self::Mem { + internal: ManuallyDrop::new(self.internal.borrow_mut().new_map(map_size)?), + provider: self.internal.clone(), + }) + } + + fn from_id_and_size(&mut self, id: ShMemId, size: usize) -> Result { + Ok(Self::Mem { + internal: ManuallyDrop::new(self.internal.borrow_mut().from_id_and_size(id, size)?), + provider: self.internal.clone(), + }) + } + + fn release_map(&mut self, map: &mut Self::Mem) { + self.internal.borrow_mut().release_map(&mut map.internal) + } + + fn clone_ref(&mut self, mapping: &Self::Mem) -> Result { + Ok(Self::Mem { + internal: ManuallyDrop::new(self.internal.borrow_mut().clone_ref(&mapping.internal)?), + provider: self.internal.clone(), + }) + } + + fn post_fork(&mut self) { + self.internal.borrow_mut().post_fork() + } +} + +impl Default for RcShMemProvider +where + T: ShMemProvider + alloc::fmt::Debug, +{ + fn default() -> Self { + Self::new().unwrap() + } } #[cfg(all(unix, feature = "std"))] @@ -320,7 +424,7 @@ pub mod unix_shmem { #[cfg(unix)] impl Default for CommonUnixShMemProvider { fn default() -> Self { - Self::new() + Self::new().unwrap() } } @@ -329,8 +433,8 @@ pub mod unix_shmem { impl ShMemProvider for CommonUnixShMemProvider { type Mem = CommonUnixShMem; - fn new() -> Self { - Self {} + fn new() -> Result { + Ok(Self {}) } fn new_map(&mut self, map_size: usize) -> Result { CommonUnixShMem::new(map_size) @@ -507,7 +611,6 @@ pub mod unix_shmem { impl Drop for AshmemShMem { fn drop(&mut self) { unsafe { - //let fd = Self::fd_from_id(self.id).unwrap(); let fd: i32 = self.id.to_string().parse().unwrap(); let length = ioctl(fd, ASHMEM_GET_SIZE); @@ -533,7 +636,7 @@ pub mod unix_shmem { #[cfg(unix)] impl Default for AshmemShMemProvider { fn default() -> Self { - Self::new() + Self::new().unwrap() } } @@ -542,8 +645,8 @@ pub mod unix_shmem { impl ShMemProvider for AshmemShMemProvider { type Mem = AshmemShMem; - fn new() -> Self { - Self {} + fn new() -> Result { + Ok(Self {}) } fn new_map(&mut self, map_size: usize) -> Result { @@ -693,7 +796,7 @@ pub mod win32_shmem { impl Default for Win32ShMemProvider { fn default() -> Self { - Self::new() + Self::new().unwrap() } } @@ -701,8 +804,8 @@ pub mod win32_shmem { impl ShMemProvider for Win32ShMemProvider { type Mem = Win32ShMem; - fn new() -> Self { - Self {} + fn new() -> Result { + Ok(Self {}) } fn new_map(&mut self, map_size: usize) -> Result { Win32ShMem::new_map(map_size) diff --git a/libafl/src/events/llmp.rs b/libafl/src/events/llmp.rs index d8a0da2d3e..934001f0aa 100644 --- a/libafl/src/events/llmp.rs +++ b/libafl/src/events/llmp.rs @@ -1,7 +1,7 @@ //! LLMP-backed event manager for scalable multi-processed fuzzing -use alloc::{rc::Rc, string::ToString, vec::Vec}; -use core::{cell::RefCell, marker::PhantomData, time::Duration}; +use alloc::{string::ToString, vec::Vec}; +use core::{marker::PhantomData, time::Duration}; use serde::{de::DeserializeOwned, Serialize}; #[cfg(feature = "std")] @@ -12,6 +12,7 @@ use crate::bolts::{ llmp::{LlmpClient, LlmpReceiver}, shmem::StdShMemProvider, }; + use crate::{ bolts::{ llmp::{self, LlmpClientDescription, LlmpSender, Tag}, @@ -32,10 +33,7 @@ use crate::{ use crate::utils::startable_self; #[cfg(all(feature = "std", unix))] -use crate::{ - bolts::shmem::UnixShMemProvider, - utils::{fork, ForkResult}, -}; +use crate::utils::{fork, ForkResult}; #[cfg(all(feature = "std", target_os = "android"))] use crate::bolts::os::ashmem_server::AshmemService; @@ -65,41 +63,6 @@ where phantom: PhantomData<(I, S)>, } -#[cfg(feature = "std")] -#[cfg(unix)] -impl LlmpEventManager -where - I: Input, - S: IfInteresting, - ST: Stats, -{ - /// Create llmp on a port - /// If the port is not yet bound, it will act as broker - /// Else, it will act as client. - #[cfg(feature = "std")] - pub fn new_on_port_std( - shmem_provider: &Rc>, - stats: ST, - port: u16, - ) -> Result { - Ok(Self { - stats: Some(stats), - llmp: llmp::LlmpConnection::on_port(shmem_provider, port)?, - phantom: PhantomData, - }) - } - - /// If a client respawns, it may reuse the existing connection, previously stored by LlmpClient::to_env - /// Std uses UnixShMem. - #[cfg(feature = "std")] - pub fn existing_client_from_env_std( - shmem_provider: &Rc>, - env_name: &str, - ) -> Result { - Self::existing_client_from_env(shmem_provider, env_name) - } -} - impl Drop for LlmpEventManager where I: Input, @@ -124,11 +87,7 @@ where /// If the port is not yet bound, it will act as broker /// Else, it will act as client. #[cfg(feature = "std")] - pub fn new_on_port( - shmem_provider: &Rc>, - stats: ST, - port: u16, - ) -> Result { + pub fn new_on_port(shmem_provider: SP, stats: ST, port: u16) -> Result { Ok(Self { stats: Some(stats), llmp: llmp::LlmpConnection::on_port(shmem_provider, port)?, @@ -138,10 +97,7 @@ where /// If a client respawns, it may reuse the existing connection, previously stored by LlmpClient::to_env #[cfg(feature = "std")] - pub fn existing_client_from_env( - shmem_provider: &Rc>, - env_name: &str, - ) -> Result { + pub fn existing_client_from_env(shmem_provider: SP, env_name: &str) -> Result { Ok(Self { stats: None, llmp: llmp::LlmpConnection::IsClient { @@ -160,7 +116,7 @@ where /// Create an existing client from description pub fn existing_client_from_description( - shmem_provider: &Rc>, + shmem_provider: SP, description: &LlmpClientDescription, ) -> Result { Ok(Self { @@ -304,7 +260,7 @@ where let observers: OT = postcard::from_bytes(&observers_buf)?; // TODO include ExitKind in NewTestcase - let fitness = state.is_interesting(&input, &observers, ExitKind::Ok)?; + let fitness = state.is_interesting(&input, &observers, &ExitKind::Ok)?; if fitness > 0 && state .add_if_interesting(&input, fitness, scheduler)? @@ -400,7 +356,7 @@ where /// Deserialize the state and corpus tuple, previously serialized with `serialize_state_corpus(...)` #[allow(clippy::type_complexity)] pub fn deserialize_state_mgr( - shmem_provider: &Rc>, + shmem_provider: SP, state_corpus_serialized: &[u8], ) -> Result<(S, LlmpEventManager), Error> where @@ -525,7 +481,7 @@ where #[cfg(target_os = "android")] AshmemService::start().expect("Error starting Ashmem Service"); - setup_restarting_mgr(StdShMemProvider::new(), stats, broker_port) + setup_restarting_mgr(StdShMemProvider::new()?, stats, broker_port) } /// A restarting state is a combination of restarter and runner, that can be used on systems without `fork`. @@ -537,7 +493,7 @@ where clippy::similar_names )] // for { mgr = LlmpEventManager... } pub fn setup_restarting_mgr( - shmem_provider: SP, + mut shmem_provider: SP, //mgr: &mut LlmpEventManager, stats: ST, broker_port: u16, @@ -548,13 +504,13 @@ where SP: ShMemProvider, ST: Stats, { - let shmem_provider = Rc::new(RefCell::new(shmem_provider)); - let mut mgr = - LlmpEventManager::::new_on_port(&shmem_provider, stats, broker_port)?; + LlmpEventManager::::new_on_port(shmem_provider.clone(), stats, broker_port)?; // We start ourself as child process to actually fuzz - let (sender, mut receiver, shmem_provider) = if std::env::var(_ENV_FUZZER_SENDER).is_err() { + let (sender, mut receiver, mut new_shmem_provider) = if std::env::var(_ENV_FUZZER_SENDER) + .is_err() + { if mgr.is_broker() { // Yep, broker. Just loop here. println!("Doing broker things. Run this tool again to start fuzzing in a client."); @@ -566,13 +522,9 @@ where mgr.to_env(_ENV_FUZZER_BROKER_CLIENT_INITIAL); // First, create a channel from the fuzzer (sender) to us (receiver) to report its state for restarts. - let sender = { LlmpSender::new(&shmem_provider, 0, false)? }; + let sender = { LlmpSender::new(shmem_provider.clone(), 0, false)? }; - let map = { - shmem_provider - .borrow_mut() - .clone_ref(&sender.out_maps.last().unwrap().shmem)? - }; + let map = { shmem_provider.clone_ref(&sender.out_maps.last().unwrap().shmem)? }; let receiver = LlmpReceiver::on_existing_map(shmem_provider.clone(), map, None)?; // Store the information to a map. sender.to_env(_ENV_FUZZER_SENDER)?; @@ -613,14 +565,16 @@ where // A sender and a receiver for single communication // Clone so we get a new connection to the AshmemServer if we are using // ServedShMemProvider - let shmem_provider = Rc::new(RefCell::new(shmem_provider.borrow_mut().clone())); + shmem_provider.post_fork(); ( - LlmpSender::on_existing_from_env(&shmem_provider, _ENV_FUZZER_SENDER)?, - LlmpReceiver::on_existing_from_env(&shmem_provider, _ENV_FUZZER_RECEIVER)?, + LlmpSender::on_existing_from_env(shmem_provider.clone(), _ENV_FUZZER_SENDER)?, + LlmpReceiver::on_existing_from_env(shmem_provider.clone(), _ENV_FUZZER_RECEIVER)?, shmem_provider, ) }; + new_shmem_provider.post_fork(); + println!("We're a client, let's fuzz :)"); for (var, val) in std::env::vars() { @@ -633,7 +587,7 @@ where println!("First run. Let's set it all up"); // Mgr to send and receive msgs from/to all other fuzzer instances let client_mgr = LlmpEventManager::::existing_client_from_env( - &shmem_provider, + new_shmem_provider, _ENV_FUZZER_BROKER_CLIENT_INITIAL, )?; @@ -643,7 +597,7 @@ where Some((_sender, _tag, msg)) => { println!("Subsequent run. Let's load all data from shmem (received {} bytes from previous instance)", msg.len()); let (state, mgr): (S, LlmpEventManager) = - deserialize_state_mgr(&shmem_provider, &msg)?; + deserialize_state_mgr(new_shmem_provider, &msg)?; (Some(state), LlmpRestartingEventManager::new(mgr, sender)) } diff --git a/libafl/src/executors/inprocess.rs b/libafl/src/executors/inprocess.rs index b5230b9265..0b21ca6745 100644 --- a/libafl/src/executors/inprocess.rs +++ b/libafl/src/executors/inprocess.rs @@ -235,7 +235,7 @@ where mod unix_signal_handler { use alloc::vec::Vec; use core::ptr; - use libc::{c_void, siginfo_t}; + use libc::{c_void, siginfo_t, ucontext_t}; #[cfg(feature = "std")] use std::io::{stdout, Write}; @@ -273,8 +273,8 @@ mod unix_signal_handler { pub event_mgr_ptr: *mut c_void, pub observers_ptr: *const c_void, pub current_input_ptr: *const c_void, - pub crash_handler: unsafe fn(Signal, siginfo_t, *const c_void, data: &mut Self), - pub timeout_handler: unsafe fn(Signal, siginfo_t, *const c_void, data: &mut Self), + pub crash_handler: unsafe fn(Signal, siginfo_t, &mut ucontext_t, data: &mut Self), + pub timeout_handler: unsafe fn(Signal, siginfo_t, &mut ucontext_t, data: &mut Self), } unsafe impl Send for InProcessExecutorHandlerData {} @@ -283,21 +283,21 @@ mod unix_signal_handler { unsafe fn nop_handler( _signal: Signal, _info: siginfo_t, - _void: *const c_void, + _context: &mut ucontext_t, _data: &mut InProcessExecutorHandlerData, ) { } #[cfg(unix)] impl Handler for InProcessExecutorHandlerData { - fn handle(&mut self, signal: Signal, info: siginfo_t, void: *const c_void) { + fn handle(&mut self, signal: Signal, info: siginfo_t, context: &mut ucontext_t) { unsafe { let data = &mut GLOBAL_STATE; match signal { Signal::SigUser2 | Signal::SigAlarm => { - (data.timeout_handler)(signal, info, void, data) + (data.timeout_handler)(signal, info, context, data) } - _ => (data.crash_handler)(signal, info, void, data), + _ => (data.crash_handler)(signal, info, context, data), } } } @@ -312,6 +312,7 @@ mod unix_signal_handler { Signal::SigFloatingPointException, Signal::SigIllegalInstruction, Signal::SigSegmentationFault, + Signal::SigTrap, ] } } @@ -320,7 +321,7 @@ mod unix_signal_handler { pub unsafe fn inproc_timeout_handler( _signal: Signal, _info: siginfo_t, - _void: *const c_void, + _context: &mut ucontext_t, data: &mut InProcessExecutorHandlerData, ) where EM: EventManager, @@ -348,7 +349,7 @@ mod unix_signal_handler { let obj_fitness = state .objectives_mut() - .is_interesting_all(&input, observers, ExitKind::Timeout) + .is_interesting_all(&input, observers, &ExitKind::Timeout) .expect("In timeout handler objectives failure."); if obj_fitness > 0 { state @@ -382,7 +383,7 @@ mod unix_signal_handler { pub unsafe fn inproc_crash_handler( _signal: Signal, _info: siginfo_t, - _void: *const c_void, + _context: &mut ucontext_t, data: &mut InProcessExecutorHandlerData, ) where EM: EventManager, @@ -392,6 +393,10 @@ mod unix_signal_handler { S: HasObjectives + HasSolutions, I: Input + HasTargetBytes, { + #[cfg(all(target_os = "android", target_arch = "aarch64"))] + let _context = *(((_context as *mut _ as *mut c_void as usize) + 128) as *mut c_void + as *mut ucontext_t); + #[cfg(feature = "std")] println!("Crashed with {}", _signal); if !data.current_input_ptr.is_null() { @@ -401,6 +406,45 @@ mod unix_signal_handler { #[cfg(feature = "std")] println!("Child crashed!"); + + #[cfg(all( + feature = "std", + any(target_os = "linux", target_os = "android"), + target_arch = "aarch64" + ))] + { + use crate::utils::find_mapping_for_address; + println!("{:━^100}", " CRASH "); + println!( + "Received signal {} at 0x{:016x}, fault address: 0x{:016x}", + _signal, _context.uc_mcontext.pc, _context.uc_mcontext.fault_address + ); + if let Ok((start, _, _, path)) = + find_mapping_for_address(_context.uc_mcontext.pc as usize) + { + println!( + "pc is at offset 0x{:08x} in {}", + _context.uc_mcontext.pc as usize - start, + path + ); + } + + println!("{:━^100}", " REGISTERS "); + for reg in 0..31 { + print!( + "x{:02}: 0x{:016x} ", + reg, _context.uc_mcontext.regs[reg as usize] + ); + if reg % 4 == 3 { + println!(); + } + } + println!("pc : 0x{:016x} ", _context.uc_mcontext.pc); + + //println!("{:━^100}", " BACKTRACE "); + //println!("{:?}", backtrace::Backtrace::new()) + } + #[cfg(feature = "std")] let _ = stdout().flush(); @@ -410,7 +454,7 @@ mod unix_signal_handler { let obj_fitness = state .objectives_mut() - .is_interesting_all(&input, observers, ExitKind::Crash) + .is_interesting_all(&input, observers, &ExitKind::Crash) .expect("In crash handler objectives failure."); if obj_fitness > 0 { let new_input = input.clone(); @@ -574,7 +618,7 @@ mod windows_exception_handler { let obj_fitness = state .objectives_mut() - .is_interesting_all(&input, observers, ExitKind::Crash) + .is_interesting_all(&input, observers, &ExitKind::Crash) .expect("In crash handler objectives failure."); if obj_fitness > 0 { let new_input = input.clone(); diff --git a/libafl/src/executors/mod.rs b/libafl/src/executors/mod.rs index 1ccccbf437..08b26ed602 100644 --- a/libafl/src/executors/mod.rs +++ b/libafl/src/executors/mod.rs @@ -5,24 +5,28 @@ pub use inprocess::InProcessExecutor; pub mod timeout; pub use timeout::TimeoutExecutor; -use core::cmp::PartialEq; use core::marker::PhantomData; use crate::{ - bolts::tuples::Named, + bolts::{serdeany::SerdeAny, tuples::Named}, events::EventManager, inputs::{HasTargetBytes, Input}, observers::ObserversTuple, Error, }; +use alloc::boxed::Box; + +pub trait CustomExitKind: core::fmt::Debug + SerdeAny + 'static {} + /// How an execution finished. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug)] pub enum ExitKind { Ok, Crash, Oom, Timeout, + Custom(Box), } pub trait HasObservers diff --git a/libafl/src/executors/timeout.rs b/libafl/src/executors/timeout.rs index 1fbb4b84e9..0b428aa855 100644 --- a/libafl/src/executors/timeout.rs +++ b/libafl/src/executors/timeout.rs @@ -91,6 +91,10 @@ where phantom: PhantomData, } } + + pub fn inner(&mut self) -> &mut E { + &mut self.executor + } } impl Executor for TimeoutExecutor @@ -126,6 +130,11 @@ where null_mut(), ); } + #[cfg(windows)] + { + // TODO + let _ = self.exec_tmout.as_millis(); + } self.executor.pre_exec(_state, _event_mgr, _input) } @@ -155,6 +164,10 @@ where null_mut(), ); } + #[cfg(windows)] + { + // TODO + } self.executor.post_exec(_state, _event_mgr, _input) } diff --git a/libafl/src/feedbacks/map.rs b/libafl/src/feedbacks/map.rs index 014550d0da..89374e154d 100644 --- a/libafl/src/feedbacks/map.rs +++ b/libafl/src/feedbacks/map.rs @@ -154,7 +154,7 @@ where &mut self, _input: &I, observers: &OT, - _exit_kind: ExitKind, + _exit_kind: &ExitKind, ) -> Result { let mut interesting = 0; // TODO optimize diff --git a/libafl/src/feedbacks/mod.rs b/libafl/src/feedbacks/mod.rs index be782e594a..6cb8261383 100644 --- a/libafl/src/feedbacks/mod.rs +++ b/libafl/src/feedbacks/mod.rs @@ -29,7 +29,7 @@ where &mut self, input: &I, observers: &OT, - exit_kind: ExitKind, + exit_kind: &ExitKind, ) -> Result; /// Append to the testcase the generated metadata in case of a new corpus item @@ -54,7 +54,7 @@ where &mut self, input: &I, observers: &OT, - exit_kind: ExitKind, + exit_kind: &ExitKind, ) -> Result; /// Write metadata for this testcase @@ -73,7 +73,7 @@ where &mut self, _: &I, _: &OT, - _: ExitKind, + _: &ExitKind, ) -> Result { Ok(0) } @@ -99,9 +99,9 @@ where &mut self, input: &I, observers: &OT, - exit_kind: ExitKind, + exit_kind: &ExitKind, ) -> Result { - Ok(self.0.is_interesting(input, observers, exit_kind.clone())? + Ok(self.0.is_interesting(input, observers, exit_kind)? + self.1.is_interesting_all(input, observers, exit_kind)?) } @@ -128,9 +128,9 @@ where &mut self, _input: &I, _observers: &OT, - exit_kind: ExitKind, + exit_kind: &ExitKind, ) -> Result { - if exit_kind == ExitKind::Crash { + if let ExitKind::Crash = exit_kind { Ok(1) } else { Ok(0) @@ -168,9 +168,9 @@ where &mut self, _input: &I, _observers: &OT, - exit_kind: ExitKind, + exit_kind: &ExitKind, ) -> Result { - if exit_kind == ExitKind::Timeout { + if let ExitKind::Timeout = exit_kind { Ok(1) } else { Ok(0) @@ -211,7 +211,7 @@ where &mut self, _input: &I, observers: &OT, - _exit_kind: ExitKind, + _exit_kind: &ExitKind, ) -> Result { let observer = observers.match_first_type::().unwrap(); self.exec_time = *observer.last_runtime(); diff --git a/libafl/src/state/mod.rs b/libafl/src/state/mod.rs index 2d0d960ef2..cd295ff9ea 100644 --- a/libafl/src/state/mod.rs +++ b/libafl/src/state/mod.rs @@ -169,7 +169,7 @@ where &mut self, input: &I, observers: &OT, - exit_kind: ExitKind, + exit_kind: &ExitKind, ) -> Result where OT: ObserversTuple; @@ -448,7 +448,7 @@ where &mut self, input: &I, observers: &OT, - exit_kind: ExitKind, + exit_kind: &ExitKind, ) -> Result where OT: ObserversTuple, @@ -654,13 +654,13 @@ where executor.post_exec_observers()?; let observers = executor.observers(); - let fitness = - self.feedbacks_mut() - .is_interesting_all(&input, observers, exit_kind.clone())?; + let fitness = self + .feedbacks_mut() + .is_interesting_all(&input, observers, &exit_kind)?; let is_solution = self .objectives_mut() - .is_interesting_all(&input, observers, exit_kind)? + .is_interesting_all(&input, observers, &exit_kind)? > 0; Ok((fitness, is_solution)) } diff --git a/libafl/src/utils.rs b/libafl/src/utils.rs index edc3a580af..3a24a26cb8 100644 --- a/libafl/src/utils.rs +++ b/libafl/src/utils.rs @@ -438,6 +438,77 @@ pub fn startable_self() -> Result { Ok(startable) } +/// Allows one to walk the mappings in /proc/self/maps, caling a callback function for each +/// mapping. +/// If the callback returns true, we stop the walk. +#[cfg(all(feature = "std", any(target_os = "linux", target_os = "android")))] +pub fn walk_self_maps(visitor: &mut dyn FnMut(usize, usize, String, String) -> bool) { + use regex::Regex; + use std::{ + fs::File, + io::{BufRead, BufReader}, + }; + let re = Regex::new(r"^(?P[0-9a-f]{8,16})-(?P[0-9a-f]{8,16}) (?P[-rwxp]{4}) (?P[0-9a-f]{8}) [0-9a-f]+:[0-9a-f]+ [0-9]+\s+(?P.*)$") + .unwrap(); + + let mapsfile = File::open("/proc/self/maps").expect("Unable to open /proc/self/maps"); + + for line in BufReader::new(mapsfile).lines() { + let line = line.unwrap(); + if let Some(caps) = re.captures(&line) { + if visitor( + usize::from_str_radix(caps.name("start").unwrap().as_str(), 16).unwrap(), + usize::from_str_radix(caps.name("end").unwrap().as_str(), 16).unwrap(), + caps.name("perm").unwrap().as_str().to_string(), + caps.name("path").unwrap().as_str().to_string(), + ) { + break; + }; + } + } +} + +/// Get the start and end address, permissions and path of the mapping containing a particular address +#[cfg(all(feature = "std", any(target_os = "linux", target_os = "android")))] +pub fn find_mapping_for_address(address: usize) -> Result<(usize, usize, String, String), Error> { + let mut result = (0, 0, "".to_string(), "".to_string()); + walk_self_maps(&mut |start, end, permissions, path| { + if start <= address && address < end { + result = (start, end, permissions, path); + true + } else { + false + } + }); + + if result.0 != 0 { + Ok(result) + } else { + Err(Error::Unknown( + "Couldn't find a mapping for this address".to_string(), + )) + } +} + +/// Get the start and end address of the mapping containing with a particular path +#[cfg(all(feature = "std", any(target_os = "linux", target_os = "android")))] +pub fn find_mapping_for_path(libpath: &str) -> (usize, usize) { + let mut libstart = 0; + let mut libend = 0; + walk_self_maps(&mut |start, end, _permissions, path| { + if libpath == path { + if libstart == 0 { + libstart = start; + } + + libend = end; + } + false + }); + + (libstart, libend) +} + #[cfg(test)] mod tests { //use xxhash_rust::xxh3::xxh3_64_with_seed; diff --git a/libafl_frida/Cargo.toml b/libafl_frida/Cargo.toml new file mode 100644 index 0000000000..92a082e53a --- /dev/null +++ b/libafl_frida/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "libafl_frida" +version = "0.1.0" +authors = ["s1341 "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[build-dependencies] +cc = { version = "1.0", features = ["parallel"] } + +[dependencies] +libafl = { path = "../libafl", version = "0.1.0", features = ["std", "libafl_derive"] } +libafl_targets = { path = "../libafl_targets", version = "0.1.0" } +nix = "0.20.0" +libc = "0.2.92" +hashbrown = "0.11" +libloading = "0.7.0" +rangemap = "0.1.10" +frida-gum = { version = "0.4.0", features = [ "auto-download", "backtrace", "event-sink", "invocation-listener"] } +frida-gum-sys = { version = "0.2.4", features = [ "auto-download", "event-sink", "invocation-listener"] } +regex = "1.4" +dynasmrt = "1.0.1" +capstone = "0.8.0" +color-backtrace ={ version = "0.5", features = [ "resolve-modules" ] } +termcolor = "1.1.2" +serde = "1.0" +backtrace = { version = "0.3.58", default-features = false, features = ["std", "serde"] } +num-traits = "0.2.14" +seahash = "4.1.0" + +[target.'cfg(unix)'.dependencies] +gothook = { version = "0.1" } diff --git a/libafl_frida/build.rs b/libafl_frida/build.rs new file mode 100644 index 0000000000..ebd77b48cc --- /dev/null +++ b/libafl_frida/build.rs @@ -0,0 +1,5 @@ +// build.rs + +fn main() { + cc::Build::new().file("src/gettls.c").compile("libgettls.a"); +} diff --git a/libafl_frida/src/asan_rt.rs b/libafl_frida/src/asan_rt.rs new file mode 100644 index 0000000000..942ae71d2b --- /dev/null +++ b/libafl_frida/src/asan_rt.rs @@ -0,0 +1,1880 @@ +use hashbrown::HashMap; +use libafl::{ + bolts::{ownedref::OwnedPtr, tuples::Named}, + corpus::Testcase, + executors::{CustomExitKind, ExitKind}, + feedbacks::Feedback, + inputs::{HasTargetBytes, Input}, + observers::{Observer, ObserversTuple}, + state::HasMetadata, + utils::{find_mapping_for_address, walk_self_maps}, + Error, SerdeAny, +}; +use nix::{ + libc::{memmove, memset}, + sys::mman::{mmap, MapFlags, ProtFlags}, +}; + +use backtrace::Backtrace; +use capstone::{ + arch::{arm64::Arm64OperandType, ArchOperand::Arm64Operand, BuildsCapstone}, + Capstone, Insn, +}; +use color_backtrace::{default_output_stream, BacktracePrinter, Verbosity}; +use dynasmrt::{dynasm, DynasmApi, DynasmLabelApi}; +#[cfg(unix)] +use gothook::GotHookLibrary; +use libc::{sysconf, _SC_PAGESIZE}; +use rangemap::RangeSet; +use serde::{Deserialize, Serialize}; +use std::{ + cell::{RefCell, RefMut}, + ffi::c_void, + io::Write, + rc::Rc, +}; +use termcolor::{Color, ColorSpec, WriteColor}; + +use crate::FridaOptions; + +extern "C" { + fn __register_frame(begin: *mut c_void); +} + +static mut ALLOCATOR_SINGLETON: Option> = None; + +struct Allocator { + runtime: Rc>, + page_size: usize, + shadow_offset: usize, + shadow_bit: usize, + pre_allocated_shadow: bool, + allocations: HashMap, + shadow_pages: RangeSet, + allocation_queue: HashMap>, +} + +macro_rules! map_to_shadow { + ($self:expr, $address:expr) => { + (($address >> 3) + $self.shadow_offset) & ((1 << ($self.shadow_bit + 1)) - 1) + }; +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +struct AllocationMetadata { + address: usize, + size: usize, + actual_size: usize, + allocation_site_backtrace: Option, + release_site_backtrace: Option, + freed: bool, + is_malloc_zero: bool, +} + +impl Allocator { + fn new(runtime: Rc>) { + let page_size = unsafe { sysconf(_SC_PAGESIZE) as usize }; + // probe to find a usable shadow bit: + let mut shadow_bit: usize = 0; + for try_shadow_bit in &[46usize, 36usize] { + let addr: usize = 1 << try_shadow_bit; + if unsafe { + mmap( + addr as *mut c_void, + page_size, + ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, + MapFlags::MAP_PRIVATE | MapFlags::MAP_ANONYMOUS | MapFlags::MAP_FIXED, + -1, + 0, + ) + } + .is_ok() + { + shadow_bit = *try_shadow_bit; + break; + } + } + assert!(shadow_bit != 0); + + // attempt to pre-map the entire shadow-memory space + let addr: usize = 1 << shadow_bit; + let pre_allocated_shadow = if let Ok(_) = unsafe { + mmap( + addr as *mut c_void, + addr + addr, + ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, + MapFlags::MAP_ANONYMOUS | MapFlags::MAP_FIXED, + -1, + 0, + ) + } { + true + } else { + false + }; + + let res = Self { + runtime, + page_size, + pre_allocated_shadow, + shadow_offset: 1 << shadow_bit, + shadow_bit, + allocations: HashMap::new(), + shadow_pages: RangeSet::new(), + allocation_queue: HashMap::new(), + }; + unsafe { + ALLOCATOR_SINGLETON = Some(RefCell::new(res)); + } + } + + pub fn get() -> RefMut<'static, Allocator> { + unsafe { + ALLOCATOR_SINGLETON + .as_mut() + .unwrap() + .try_borrow_mut() + .unwrap() + } + } + + pub fn init(runtime: Rc>) { + Self::new(runtime); + } + + #[inline] + fn round_up_to_page(&self, size: usize) -> usize { + ((size + self.page_size) / self.page_size) * self.page_size + } + + #[inline] + fn round_down_to_page(&self, value: usize) -> usize { + (value / self.page_size) * self.page_size + } + + pub unsafe fn alloc(&mut self, size: usize, _alignment: usize) -> *mut c_void { + let mut is_malloc_zero = false; + let size = if size == 0 { + println!("zero-sized allocation!"); + is_malloc_zero = true; + 16 + } else { + size + }; + if size > (1 << 30) { + panic!("Allocation is too large: 0x{:x}", size); + } + let rounded_up_size = self.round_up_to_page(size); + + let metadata = if let Some(mut metadata) = self + .allocation_queue + .entry(rounded_up_size) + .or_default() + .pop() + { + //println!("reusing allocation at {:x}, (actual mapping starts at {:x}) size {:x}", metadata.address, metadata.address - self.page_size, size); + metadata.is_malloc_zero = is_malloc_zero; + metadata.size = size; + if self + .runtime + .borrow() + .options + .enable_asan_allocation_backtraces + { + metadata.allocation_site_backtrace = Some(Backtrace::new_unresolved()); + } + metadata + } else { + let mapping = match mmap( + std::ptr::null_mut(), + rounded_up_size + 2 * self.page_size, + ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, + MapFlags::MAP_ANONYMOUS | MapFlags::MAP_PRIVATE, + -1, + 0, + ) { + Ok(mapping) => mapping as usize, + Err(err) => { + println!("An error occurred while mapping memory: {:?}", err); + return std::ptr::null_mut(); + } + }; + + self.map_shadow_for_region( + mapping, + mapping + rounded_up_size + 2 * self.page_size, + false, + ); + + let mut metadata = AllocationMetadata { + address: mapping + self.page_size, + size, + actual_size: rounded_up_size, + ..Default::default() + }; + + if self + .runtime + .borrow() + .options + .enable_asan_allocation_backtraces + { + metadata.allocation_site_backtrace = Some(Backtrace::new_unresolved()); + } + + metadata + }; + + // unpoison the shadow memory for the allocation itself + Self::unpoison(map_to_shadow!(self, metadata.address), size); + let address = metadata.address as *mut c_void; + + self.allocations.insert(metadata.address, metadata); + //println!("serving address: {:?}, size: {:x}", address, size); + address + } + + pub unsafe fn release(&mut self, ptr: *mut c_void) { + let mut metadata = match self.allocations.get_mut(&(ptr as usize)) { + Some(metadata) => metadata, + None => { + if !ptr.is_null() { + // TODO: report this as an observer + self.runtime + .borrow_mut() + .report_error(AsanError::UnallocatedFree((ptr as usize, Backtrace::new()))); + } + return; + } + }; + + if metadata.freed { + self.runtime + .borrow_mut() + .report_error(AsanError::DoubleFree(( + ptr as usize, + metadata.clone(), + Backtrace::new(), + ))); + } + let shadow_mapping_start = map_to_shadow!(self, ptr as usize); + + metadata.freed = true; + if self + .runtime + .borrow() + .options + .enable_asan_allocation_backtraces + { + metadata.release_site_backtrace = Some(Backtrace::new_unresolved()); + } + + // poison the shadow memory for the allocation + Self::poison(shadow_mapping_start, metadata.size); + } + + pub fn find_metadata( + &mut self, + ptr: usize, + hint_base: usize, + ) -> Option<&mut AllocationMetadata> { + let mut metadatas: Vec<&mut AllocationMetadata> = self.allocations.values_mut().collect(); + metadatas.sort_by(|a, b| a.address.cmp(&b.address)); + let mut offset_to_closest = i64::max_value(); + let mut closest = None; + for metadata in metadatas { + println!("{:#x}", metadata.address); + let new_offset = if hint_base == metadata.address { + (ptr as i64 - metadata.address as i64).abs() + } else { + std::cmp::min( + offset_to_closest, + (ptr as i64 - metadata.address as i64).abs(), + ) + }; + if new_offset < offset_to_closest { + offset_to_closest = new_offset; + closest = Some(metadata); + } + } + closest + } + + pub fn reset(&mut self) { + for (address, mut allocation) in self.allocations.drain() { + // First poison the memory. + Self::poison(map_to_shadow!(self, address), allocation.size); + + // Reset the allocaiton metadata object + allocation.size = 0; + allocation.freed = false; + allocation.allocation_site_backtrace = None; + allocation.release_site_backtrace = None; + + // Move the allocation from the allocations to the to-be-allocated queues + self.allocation_queue + .entry(allocation.actual_size) + .or_default() + .push(allocation); + } + } + + pub fn get_usable_size(&self, ptr: *mut c_void) -> usize { + match self.allocations.get(&(ptr as usize)) { + Some(metadata) => metadata.size, + None => { + panic!( + "Attempted to get_usable_size on a pointer ({:?}) which was not allocated!", + ptr + ); + } + } + } + + fn unpoison(start: usize, size: usize) { + //println!("unpoisoning {:x} for {:x}", start, size / 8 + 1); + unsafe { + //println!("memset: {:?}", start as *mut c_void); + memset(start as *mut c_void, 0xff, size / 8); + + let remainder = size % 8; + if remainder > 0 { + //println!("remainder: {:x}, offset: {:x}", remainder, start + size / 8); + memset( + (start + size / 8) as *mut c_void, + (0xff << (8 - remainder)) & 0xff, + 1, + ); + } + } + } + + fn poison(start: usize, size: usize) { + //println!("poisoning {:x} for {:x}", start, size / 8 + 1); + unsafe { + //println!("memset: {:?}", start as *mut c_void); + memset(start as *mut c_void, 0x00, size / 8); + + let remainder = size % 8; + if remainder > 0 { + //println!("remainder: {:x}, offset: {:x}", remainder, start + size / 8); + memset((start + size / 8) as *mut c_void, 0x00, 1); + } + } + } + + /// Map shadow memory for a region, and optionally unpoison it + pub fn map_shadow_for_region( + &mut self, + start: usize, + end: usize, + unpoison: bool, + ) -> (usize, usize) { + //println!("start: {:x}, end {:x}, size {:x}", start, end, end - start); + + let shadow_mapping_start = map_to_shadow!(self, start); + + if !self.pre_allocated_shadow { + let shadow_start = self.round_down_to_page(shadow_mapping_start); + let shadow_end = + self.round_up_to_page((end - start) / 8) + self.page_size + shadow_start; + for range in self.shadow_pages.gaps(&(shadow_start..shadow_end)) { + //println!("range: {:x}-{:x}, pagesize: {}", range.start, range.end, self.page_size); + unsafe { + mmap( + range.start as *mut c_void, + range.end - range.start, + ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, + MapFlags::MAP_ANONYMOUS | MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE, + -1, + 0, + ) + .expect("An error occurred while mapping shadow memory"); + } + } + + self.shadow_pages.insert(shadow_start..shadow_end); + } + + //println!("shadow_mapping_start: {:x}, shadow_size: {:x}", shadow_mapping_start, (end - start) / 8); + if unpoison { + Self::unpoison(shadow_mapping_start, end - start); + } + + (shadow_mapping_start, (end - start) / 8) + } +} + +/// Hook for malloc. +pub extern "C" fn asan_malloc(size: usize) -> *mut c_void { + unsafe { Allocator::get().alloc(size, 0x8) } +} + +/// Hook for new. +pub extern "C" fn asan_new(size: usize) -> *mut c_void { + unsafe { Allocator::get().alloc(size, 0x8) } +} + +/// Hook for new. +pub extern "C" fn asan_new_nothrow(size: usize, _nothrow: *const c_void) -> *mut c_void { + unsafe { Allocator::get().alloc(size, 0x8) } +} + +/// Hook for new with alignment. +pub extern "C" fn asan_new_aligned(size: usize, alignment: usize) -> *mut c_void { + unsafe { Allocator::get().alloc(size, alignment) } +} + +/// Hook for new with alignment. +pub extern "C" fn asan_new_aligned_nothrow( + size: usize, + alignment: usize, + _nothrow: *const c_void, +) -> *mut c_void { + unsafe { Allocator::get().alloc(size, alignment) } +} + +/// Hook for pvalloc +pub extern "C" fn asan_pvalloc(size: usize) -> *mut c_void { + unsafe { Allocator::get().alloc(size, 0x8) } +} + +/// Hook for valloc +pub extern "C" fn asan_valloc(size: usize) -> *mut c_void { + unsafe { Allocator::get().alloc(size, 0x8) } +} + +/// Hook for calloc +pub extern "C" fn asan_calloc(nmemb: usize, size: usize) -> *mut c_void { + unsafe { Allocator::get().alloc(size * nmemb, 0x8) } +} + +/// Hook for realloc +/// +/// # Safety +/// This function is inherently unsafe, as it takes a raw pointer +pub unsafe extern "C" fn asan_realloc(ptr: *mut c_void, size: usize) -> *mut c_void { + let mut allocator = Allocator::get(); + let ret = allocator.alloc(size, 0x8); + if ptr != std::ptr::null_mut() { + memmove(ret, ptr, allocator.get_usable_size(ptr)); + } + allocator.release(ptr); + ret +} + +/// Hook for free +/// +/// # Safety +/// This function is inherently unsafe, as it takes a raw pointer +pub unsafe extern "C" fn asan_free(ptr: *mut c_void) { + if ptr != std::ptr::null_mut() { + Allocator::get().release(ptr); + } +} + +/// Hook for delete +/// +/// # Safety +/// This function is inherently unsafe, as it takes a raw pointer +pub unsafe extern "C" fn asan_delete(ptr: *mut c_void) { + if ptr != std::ptr::null_mut() { + Allocator::get().release(ptr); + } +} + +/// Hook for delete +/// +/// # Safety +/// This function is inherently unsafe, as it takes a raw pointer +pub unsafe extern "C" fn asan_delete_ulong(ptr: *mut c_void, _ulong: u64) { + if ptr != std::ptr::null_mut() { + Allocator::get().release(ptr); + } +} + +/// Hook for delete +/// +/// # Safety +/// This function is inherently unsafe, as it takes a raw pointer +pub unsafe extern "C" fn asan_delete_ulong_aligned( + ptr: *mut c_void, + _ulong: u64, + _nothrow: *const c_void, +) { + if ptr != std::ptr::null_mut() { + Allocator::get().release(ptr); + } +} + +/// Hook for delete +/// +/// # Safety +/// This function is inherently unsafe, as it takes a raw pointer +pub unsafe extern "C" fn asan_delete_aligned(ptr: *mut c_void, _alignment: usize) { + if ptr != std::ptr::null_mut() { + Allocator::get().release(ptr); + } +} + +/// Hook for delete +/// +/// # Safety +/// This function is inherently unsafe, as it takes a raw pointer +pub unsafe extern "C" fn asan_delete_nothrow(ptr: *mut c_void, _nothrow: *const c_void) { + if ptr != std::ptr::null_mut() { + Allocator::get().release(ptr); + } +} + +/// Hook for delete +/// +/// # Safety +/// This function is inherently unsafe, as it takes a raw pointer +pub unsafe extern "C" fn asan_delete_aligned_nothrow( + ptr: *mut c_void, + _alignment: usize, + _nothrow: *const c_void, +) { + if ptr != std::ptr::null_mut() { + Allocator::get().release(ptr); + } +} + +/// Hook for malloc_usable_size +/// +/// # Safety +/// This function is inherently unsafe, as it takes a raw pointer +pub unsafe extern "C" fn asan_malloc_usable_size(ptr: *mut c_void) -> usize { + Allocator::get().get_usable_size(ptr) +} + +/// Hook for memalign +pub extern "C" fn asan_memalign(size: usize, alignment: usize) -> *mut c_void { + unsafe { Allocator::get().alloc(size, alignment) } +} + +/// Hook for posix_memalign +/// +/// # Safety +/// This function is inherently unsafe, as it takes a raw pointer +pub unsafe extern "C" fn asan_posix_memalign( + pptr: *mut *mut c_void, + size: usize, + alignment: usize, +) -> i32 { + *pptr = Allocator::get().alloc(size, alignment); + 0 +} + +/// Hook for mallinfo +pub extern "C" fn asan_mallinfo() -> *mut c_void { + std::ptr::null_mut() +} + +/// Get the current thread's TLS address +extern "C" { + fn get_tls_ptr() -> *const c_void; +} + +pub struct AsanRuntime { + regs: [usize; 32], + blob_check_mem_byte: Option>, + blob_check_mem_halfword: Option>, + blob_check_mem_dword: Option>, + blob_check_mem_qword: Option>, + blob_check_mem_16bytes: Option>, + blob_check_mem_3bytes: Option>, + blob_check_mem_6bytes: Option>, + blob_check_mem_12bytes: Option>, + blob_check_mem_24bytes: Option>, + blob_check_mem_32bytes: Option>, + blob_check_mem_48bytes: Option>, + blob_check_mem_64bytes: Option>, + stalked_addresses: HashMap, + options: FridaOptions, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +struct AsanReadWriteError { + registers: [usize; 32], + pc: usize, + fault: (u16, u16, usize, usize), + metadata: AllocationMetadata, + backtrace: Backtrace, +} + +#[derive(Debug, Clone, Serialize, Deserialize, SerdeAny)] +enum AsanError { + OobRead(AsanReadWriteError), + OobWrite(AsanReadWriteError), + ReadAfterFree(AsanReadWriteError), + WriteAfterFree(AsanReadWriteError), + DoubleFree((usize, AllocationMetadata, Backtrace)), + UnallocatedFree((usize, Backtrace)), + Unknown(([usize; 32], usize, (u16, u16, usize, usize), Backtrace)), + Leak((usize, AllocationMetadata)), + StackOobRead(([usize; 32], usize, (u16, u16, usize, usize), Backtrace)), + StackOobWrite(([usize; 32], usize, (u16, u16, usize, usize), Backtrace)), +} + +impl AsanError { + fn description(&self) -> &str { + match self { + AsanError::OobRead(_) => "heap out-of-bounds read", + AsanError::OobWrite(_) => "heap out-of-bounds write", + AsanError::DoubleFree(_) => "double-free", + AsanError::UnallocatedFree(_) => "unallocated-free", + AsanError::WriteAfterFree(_) => "heap use-after-free write", + AsanError::ReadAfterFree(_) => "heap use-after-free read", + AsanError::Unknown(_) => "heap unknown", + AsanError::Leak(_) => "memory-leak", + AsanError::StackOobRead(_) => "stack out-of-bounds read", + AsanError::StackOobWrite(_) => "stack out-of-bounds write", + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, SerdeAny)] +pub struct AsanErrors { + errors: Vec, +} + +impl AsanErrors { + fn new() -> Self { + Self { errors: Vec::new() } + } + + pub fn clear(&mut self) { + self.errors.clear() + } + + pub fn len(&self) -> usize { + self.errors.len() + } + + pub fn is_empty(&self) -> bool { + self.errors.is_empty() + } +} +impl CustomExitKind for AsanErrors {} + +impl AsanRuntime { + pub fn new(options: FridaOptions) -> Rc> { + let res = Rc::new(RefCell::new(Self { + regs: [0; 32], + blob_check_mem_byte: None, + blob_check_mem_halfword: None, + blob_check_mem_dword: None, + blob_check_mem_qword: None, + blob_check_mem_16bytes: None, + blob_check_mem_3bytes: None, + blob_check_mem_6bytes: None, + blob_check_mem_12bytes: None, + blob_check_mem_24bytes: None, + blob_check_mem_32bytes: None, + blob_check_mem_48bytes: None, + blob_check_mem_64bytes: None, + stalked_addresses: HashMap::new(), + options, + })); + Allocator::init(res.clone()); + res + } + /// Initialize the runtime so that it is read for action. Take care not to move the runtime + /// instance after this function has been called, as the generated blobs would become + /// invalid! + pub fn init(&mut self, modules_to_instrument: &[&str]) { + // workaround frida's frida-gum-allocate-near bug: + unsafe { + for _ in 0..512 { + mmap( + std::ptr::null_mut(), + 128 * 1024, + ProtFlags::PROT_NONE, + MapFlags::MAP_ANONYMOUS | MapFlags::MAP_PRIVATE, + -1, + 0, + ) + .expect("Failed to map dummy regions for frida workaround"); + mmap( + std::ptr::null_mut(), + 4 * 1024 * 1024, + ProtFlags::PROT_NONE, + MapFlags::MAP_ANONYMOUS | MapFlags::MAP_PRIVATE, + -1, + 0, + ) + .expect("Failed to map dummy regions for frida workaround"); + } + } + + unsafe { + ASAN_ERRORS = Some(AsanErrors::new()); + } + + self.generate_instrumentation_blobs(); + self.unpoison_all_existing_memory(); + for module_name in modules_to_instrument { + #[cfg(unix)] + self.hook_library(module_name); + } + } + + /// Reset all allocations so that they can be reused for new allocation requests. + pub fn reset_allocations(&self) { + Allocator::get().reset(); + } + + /// Check if the test leaked any memory and report it if so. + pub fn check_for_leaks(&mut self) { + for metadata in Allocator::get().allocations.values_mut() { + if !metadata.freed { + self.report_error(AsanError::Leak((metadata.address, metadata.clone()))); + } + } + } + + pub fn errors(&mut self) -> &Option { + unsafe { &ASAN_ERRORS } + } + + /// Make sure the specified memory is unpoisoned + pub fn unpoison(&self, address: usize, size: usize) { + Allocator::get().map_shadow_for_region(address, address + size, true); + } + + /// Add a stalked address to real address mapping. + //#[inline] + pub fn add_stalked_address(&mut self, stalked: usize, real: usize) { + self.stalked_addresses.insert(stalked, real); + } + + pub fn real_address_for_stalked(&self, stalked: usize) -> Option<&usize> { + self.stalked_addresses.get(&stalked) + } + + /// Unpoison all the memory that is currently mapped with read/write permissions. + fn unpoison_all_existing_memory(&self) { + let mut allocator = Allocator::get(); + walk_self_maps(&mut |start, end, _permissions, _path| { + //if permissions.as_bytes()[0] == b'r' || permissions.as_bytes()[1] == b'w' { + if allocator.pre_allocated_shadow && start == 1 << allocator.shadow_bit { + return false; + } + allocator.map_shadow_for_region(start, end, true); + //} + false + }); + } + + /// Register the current thread with the runtime, implementing shadow memory for its stack and + /// tls mappings. + pub fn register_thread(&self) { + let mut allocator = Allocator::get(); + let (stack_start, stack_end) = Self::current_stack(); + allocator.map_shadow_for_region(stack_start, stack_end, true); + + let (tls_start, tls_end) = Self::current_tls(); + allocator.map_shadow_for_region(tls_start, tls_end, true); + println!( + "registering thread with stack {:x}:{:x} and tls {:x}:{:x}", + stack_start as usize, stack_end as usize, tls_start as usize, tls_end as usize + ); + } + + /// Determine the stack start, end for the currently running thread + pub fn current_stack() -> (usize, usize) { + let stack_var = 0xeadbeef; + let stack_address = &stack_var as *const _ as *const c_void as usize; + + let (start, end, _, _) = find_mapping_for_address(stack_address).unwrap(); + (start, end) + } + + /// Determine the tls start, end for the currently running thread + fn current_tls() -> (usize, usize) { + let tls_address = unsafe { get_tls_ptr() } as usize; + + let (start, end, _, _) = find_mapping_for_address(tls_address).unwrap(); + (start, end) + } + + /// Locate the target library and hook it's memory allocation functions + #[cfg(unix)] + fn hook_library(&mut self, path: &str) { + let target_lib = GotHookLibrary::new(path, false); + + // shadow the library itself, allowing all accesses + Allocator::get().map_shadow_for_region(target_lib.start(), target_lib.end(), true); + + unsafe { + // Hook all the memory allocator functions + target_lib.hook_function("malloc", asan_malloc as *const c_void); + target_lib.hook_function("_Znam", asan_new as *const c_void); + target_lib.hook_function("_ZnamRKSt9nothrow_t", asan_new_nothrow as *const c_void); + target_lib.hook_function("_ZnamSt11align_val_t", asan_new_aligned as *const c_void); + target_lib.hook_function( + "_ZnamSt11align_val_tRKSt9nothrow_t", + asan_new_aligned_nothrow as *const c_void, + ); + target_lib.hook_function("_Znwm", asan_new as *const c_void); + target_lib.hook_function("_ZnwmRKSt9nothrow_t", asan_new_nothrow as *const c_void); + target_lib.hook_function("_ZnwmSt11align_val_t", asan_new_aligned as *const c_void); + target_lib.hook_function( + "_ZnwmSt11align_val_tRKSt9nothrow_t", + asan_new_aligned_nothrow as *const c_void, + ); + + target_lib.hook_function("_ZdaPv", asan_delete as *const c_void); + target_lib.hook_function("_ZdaPvm", asan_delete_ulong as *const c_void); + target_lib.hook_function( + "_ZdaPvmSt11align_val_t", + asan_delete_ulong_aligned as *const c_void, + ); + target_lib.hook_function("_ZdaPvRKSt9nothrow_t", asan_delete_nothrow as *const c_void); + target_lib.hook_function( + "_ZdaPvSt11align_val_t", + asan_delete_aligned as *const c_void, + ); + target_lib.hook_function( + "_ZdaPvSt11align_val_tRKSt9nothrow_t", + asan_delete_aligned_nothrow as *const c_void, + ); + + target_lib.hook_function("_ZdlPv", asan_delete as *const c_void); + target_lib.hook_function("_ZdlPvm", asan_delete_ulong as *const c_void); + target_lib.hook_function( + "_ZdlPvmSt11align_val_t", + asan_delete_ulong_aligned as *const c_void, + ); + target_lib.hook_function("_ZdlPvRKSt9nothrow_t", asan_delete_nothrow as *const c_void); + target_lib.hook_function( + "_ZdlPvSt11align_val_t", + asan_delete_aligned as *const c_void, + ); + target_lib.hook_function( + "_ZdlPvSt11align_val_tRKSt9nothrow_t", + asan_delete_aligned_nothrow as *const c_void, + ); + + target_lib.hook_function("calloc", asan_calloc as *const c_void); + target_lib.hook_function("pvalloc", asan_pvalloc as *const c_void); + target_lib.hook_function("valloc", asan_valloc as *const c_void); + target_lib.hook_function("realloc", asan_realloc as *const c_void); + target_lib.hook_function("free", asan_free as *const c_void); + target_lib.hook_function("memalign", asan_memalign as *const c_void); + target_lib.hook_function("posix_memalign", asan_posix_memalign as *const c_void); + target_lib.hook_function( + "malloc_usable_size", + asan_malloc_usable_size as *const c_void, + ); + } + } + + extern "C" fn handle_trap(&mut self) { + let mut actual_pc = self.regs[31]; + actual_pc = match self.stalked_addresses.get(&actual_pc) { + Some(addr) => *addr, + _ => actual_pc, + }; + + let cs = Capstone::new() + .arm64() + .mode(capstone::arch::arm64::ArchMode::Arm) + .detail(true) + .build() + .unwrap(); + + let instructions = cs + .disasm_count( + unsafe { std::slice::from_raw_parts(actual_pc as *mut u8, 24) }, + actual_pc as u64, + 3, + ) + .unwrap(); + let instructions = instructions.iter().collect::>(); + let mut insn = instructions.first().unwrap(); + if insn.mnemonic().unwrap() == "msr" && insn.op_str().unwrap() == "nzcv, x0" { + insn = instructions.get(2).unwrap(); + actual_pc = insn.address() as usize; + } + + let detail = cs.insn_detail(&insn).unwrap(); + let arch_detail = detail.arch_detail(); + let (mut base_reg, mut index_reg, displacement) = + if let Arm64Operand(arm64operand) = arch_detail.operands().last().unwrap() { + if let Arm64OperandType::Mem(opmem) = arm64operand.op_type { + (opmem.base().0, opmem.index().0, opmem.disp()) + } else { + (0, 0, 0) + } + } else { + (0, 0, 0) + }; + + if capstone::arch::arm64::Arm64Reg::ARM64_REG_X0 as u16 <= base_reg + && base_reg <= capstone::arch::arm64::Arm64Reg::ARM64_REG_X28 as u16 + { + base_reg -= capstone::arch::arm64::Arm64Reg::ARM64_REG_X0 as u16; + } else if base_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_X29 as u16 { + base_reg = 29u16; + } else if base_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_X30 as u16 { + base_reg = 30u16; + } else if base_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_SP as u16 + || base_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_WSP as u16 + || base_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_XZR as u16 + || base_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_WZR as u16 + { + base_reg = 31u16; + } else if capstone::arch::arm64::Arm64Reg::ARM64_REG_W0 as u16 <= base_reg + && base_reg <= capstone::arch::arm64::Arm64Reg::ARM64_REG_W30 as u16 + { + base_reg -= capstone::arch::arm64::Arm64Reg::ARM64_REG_W0 as u16; + } else if capstone::arch::arm64::Arm64Reg::ARM64_REG_S0 as u16 <= base_reg + && base_reg <= capstone::arch::arm64::Arm64Reg::ARM64_REG_S31 as u16 + { + base_reg -= capstone::arch::arm64::Arm64Reg::ARM64_REG_S0 as u16; + } + + let mut fault_address = + (self.regs[base_reg as usize] as isize + displacement as isize) as usize; + + if index_reg != 0 { + if capstone::arch::arm64::Arm64Reg::ARM64_REG_X0 as u16 <= index_reg + && index_reg <= capstone::arch::arm64::Arm64Reg::ARM64_REG_X28 as u16 + { + index_reg -= capstone::arch::arm64::Arm64Reg::ARM64_REG_X0 as u16; + } else if index_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_X29 as u16 { + index_reg = 29u16; + } else if index_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_X30 as u16 { + index_reg = 30u16; + } else if index_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_SP as u16 + || index_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_WSP as u16 + || index_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_XZR as u16 + || index_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_WZR as u16 + { + index_reg = 31u16; + } else if capstone::arch::arm64::Arm64Reg::ARM64_REG_W0 as u16 <= index_reg + && index_reg <= capstone::arch::arm64::Arm64Reg::ARM64_REG_W30 as u16 + { + index_reg -= capstone::arch::arm64::Arm64Reg::ARM64_REG_W0 as u16; + } else if capstone::arch::arm64::Arm64Reg::ARM64_REG_S0 as u16 <= index_reg + && index_reg <= capstone::arch::arm64::Arm64Reg::ARM64_REG_S31 as u16 + { + index_reg -= capstone::arch::arm64::Arm64Reg::ARM64_REG_S0 as u16; + } + fault_address += self.regs[index_reg as usize] as usize; + } else { + index_reg = 0xffff + } + + let backtrace = Backtrace::new(); + + let (stack_start, stack_end) = Self::current_stack(); + let error = if fault_address >= stack_start && fault_address < stack_end { + if insn.mnemonic().unwrap().starts_with('l') { + AsanError::StackOobRead(( + self.regs, + actual_pc, + (base_reg, index_reg, displacement as usize, fault_address), + backtrace, + )) + } else { + AsanError::StackOobWrite(( + self.regs, + actual_pc, + (base_reg, index_reg, displacement as usize, fault_address), + backtrace, + )) + } + } else { + let mut allocator = Allocator::get(); + if let Some(metadata) = + allocator.find_metadata(fault_address, self.regs[base_reg as usize]) + { + let asan_readwrite_error = AsanReadWriteError { + registers: self.regs, + pc: actual_pc, + fault: (base_reg, index_reg, displacement as usize, fault_address), + metadata: metadata.clone(), + backtrace, + }; + if insn.mnemonic().unwrap().starts_with('l') { + if metadata.freed { + AsanError::ReadAfterFree(asan_readwrite_error) + } else { + AsanError::OobRead(asan_readwrite_error) + } + } else { + if metadata.freed { + AsanError::WriteAfterFree(asan_readwrite_error) + } else { + AsanError::OobWrite(asan_readwrite_error) + } + } + } else { + AsanError::Unknown(( + self.regs, + actual_pc, + (base_reg, index_reg, displacement as usize, fault_address), + backtrace, + )) + } + }; + self.report_error(error); + } + + fn report_error(&mut self, error: AsanError) { + unsafe { + ASAN_ERRORS.as_mut().unwrap().errors.push(error.clone()); + } + + let mut out_stream = default_output_stream(); + let output = out_stream.as_mut(); + + let backtrace_printer = BacktracePrinter::new() + .clear_frame_filters() + .print_addresses(true) + .verbosity(Verbosity::Full) + .add_frame_filter(Box::new(|frames| { + frames.retain( + |x| matches!(&x.name, Some(n) if !n.starts_with("libafl_frida::asan_rt::")), + ) + })); + + writeln!(output, "{:━^100}", " Memory error detected! ").unwrap(); + output + .set_color(ColorSpec::new().set_fg(Some(Color::Red))) + .unwrap(); + write!(output, "{}", error.description()).unwrap(); + match error { + AsanError::OobRead(mut error) + | AsanError::OobWrite(mut error) + | AsanError::ReadAfterFree(mut error) + | AsanError::WriteAfterFree(mut error) => { + let (basereg, indexreg, _displacement, fault_address) = error.fault; + + if let Ok((start, _, _, path)) = find_mapping_for_address(error.pc) { + writeln!( + output, + " at 0x{:x} ({}:0x{:04x}), faulting address 0x{:x}", + error.pc, + path, + error.pc - start, + fault_address + ) + .unwrap(); + } else { + writeln!( + output, + " at 0x{:x}, faulting address 0x{:x}", + error.pc, fault_address + ) + .unwrap(); + } + output.reset().unwrap(); + + writeln!(output, "{:━^100}", " REGISTERS ").unwrap(); + for reg in 0..=30 { + if reg == basereg { + output + .set_color(ColorSpec::new().set_fg(Some(Color::Red))) + .unwrap(); + } else if reg == indexreg { + output + .set_color(ColorSpec::new().set_fg(Some(Color::Yellow))) + .unwrap(); + } + write!( + output, + "x{:02}: 0x{:016x} ", + reg, error.registers[reg as usize] + ) + .unwrap(); + output.reset().unwrap(); + if reg % 4 == 3 { + write!(output, "\n").unwrap(); + } + } + writeln!(output, "pc : 0x{:016x} ", error.pc).unwrap(); + + writeln!(output, "{:━^100}", " CODE ").unwrap(); + let mut cs = Capstone::new() + .arm64() + .mode(capstone::arch::arm64::ArchMode::Arm) + .build() + .unwrap(); + cs.set_skipdata(true).expect("failed to set skipdata"); + + let start_pc = error.pc - 4 * 5; + for insn in cs + .disasm_count( + unsafe { std::slice::from_raw_parts(start_pc as *mut u8, 4 * 11) }, + start_pc as u64, + 11, + ) + .expect("failed to disassemble instructions") + .iter() + { + if insn.address() as usize == error.pc { + output + .set_color(ColorSpec::new().set_fg(Some(Color::Red))) + .unwrap(); + writeln!(output, "\t => {}", insn).unwrap(); + output.reset().unwrap(); + } else { + writeln!(output, "\t {}", insn).unwrap(); + } + } + backtrace_printer + .print_trace(&error.backtrace, output) + .unwrap(); + + writeln!(output, "{:━^100}", " ALLOCATION INFO ").unwrap(); + let offset: i64 = fault_address as i64 - error.metadata.address as i64; + let direction = if offset > 0 { "right" } else { "left" }; + writeln!( + output, + "access is {} to the {} of the 0x{:x} byte allocation at 0x{:x}", + offset, direction, error.metadata.size, error.metadata.address + ) + .unwrap(); + + if error.metadata.is_malloc_zero { + writeln!(output, "allocation was zero-sized").unwrap(); + } + + if let Some(backtrace) = error.metadata.allocation_site_backtrace.as_mut() { + writeln!(output, "allocation site backtrace:").unwrap(); + backtrace.resolve(); + backtrace_printer.print_trace(backtrace, output).unwrap(); + } + + if error.metadata.freed { + writeln!(output, "{:━^100}", " FREE INFO ").unwrap(); + if let Some(backtrace) = error.metadata.release_site_backtrace.as_mut() { + writeln!(output, "free site backtrace:").unwrap(); + backtrace.resolve(); + backtrace_printer.print_trace(backtrace, output).unwrap(); + } + } + } + AsanError::Unknown((registers, pc, fault, backtrace)) => { + let (basereg, indexreg, _displacement, fault_address) = fault; + + if let Ok((start, _, _, path)) = find_mapping_for_address(pc) { + writeln!( + output, + " at 0x{:x} ({}:0x{:04x}), faulting address 0x{:x}", + pc, + path, + pc - start, + fault_address + ) + .unwrap(); + } else { + writeln!( + output, + " at 0x{:x}, faulting address 0x{:x}", + pc, fault_address + ) + .unwrap(); + } + output.reset().unwrap(); + + writeln!(output, "{:━^100}", " REGISTERS ").unwrap(); + for reg in 0..=30 { + if reg == basereg { + output + .set_color(ColorSpec::new().set_fg(Some(Color::Red))) + .unwrap(); + } else if reg == indexreg { + output + .set_color(ColorSpec::new().set_fg(Some(Color::Yellow))) + .unwrap(); + } + write!(output, "x{:02}: 0x{:016x} ", reg, registers[reg as usize]).unwrap(); + output.reset().unwrap(); + if reg % 4 == 3 { + writeln!(output).unwrap(); + } + } + writeln!(output, "pc : 0x{:016x} ", pc).unwrap(); + + writeln!(output, "{:━^100}", " CODE ").unwrap(); + let mut cs = Capstone::new() + .arm64() + .mode(capstone::arch::arm64::ArchMode::Arm) + .build() + .unwrap(); + cs.set_skipdata(true).expect("failed to set skipdata"); + + let start_pc = pc - 4 * 5; + for insn in cs + .disasm_count( + unsafe { std::slice::from_raw_parts(start_pc as *mut u8, 4 * 11) }, + start_pc as u64, + 11, + ) + .expect("failed to disassemble instructions") + .iter() + { + if insn.address() as usize == pc { + output + .set_color(ColorSpec::new().set_fg(Some(Color::Red))) + .unwrap(); + writeln!(output, "\t => {}", insn).unwrap(); + output.reset().unwrap(); + } else { + writeln!(output, "\t {}", insn).unwrap(); + } + } + backtrace_printer.print_trace(&backtrace, output).unwrap(); + } + AsanError::DoubleFree((ptr, mut metadata, backtrace)) => { + writeln!(output, " of {:?}", ptr).unwrap(); + output.reset().unwrap(); + backtrace_printer.print_trace(&backtrace, output).unwrap(); + + writeln!(output, "{:━^100}", " ALLOCATION INFO ").unwrap(); + writeln!( + output, + "allocation at 0x{:x}, with size 0x{:x}", + metadata.address, metadata.size + ) + .unwrap(); + if metadata.is_malloc_zero { + writeln!(output, "allocation was zero-sized").unwrap(); + } + + if let Some(backtrace) = metadata.allocation_site_backtrace.as_mut() { + writeln!(output, "allocation site backtrace:").unwrap(); + backtrace.resolve(); + backtrace_printer.print_trace(backtrace, output).unwrap(); + } + writeln!(output, "{:━^100}", " FREE INFO ").unwrap(); + if let Some(backtrace) = metadata.release_site_backtrace.as_mut() { + writeln!(output, "previous free site backtrace:").unwrap(); + backtrace.resolve(); + backtrace_printer.print_trace(backtrace, output).unwrap(); + } + } + AsanError::UnallocatedFree((ptr, backtrace)) => { + writeln!(output, " of {:?}", ptr).unwrap(); + output.reset().unwrap(); + backtrace_printer.print_trace(&backtrace, output).unwrap(); + } + AsanError::Leak((ptr, mut metadata)) => { + writeln!(output, " of {:?}", ptr).unwrap(); + output.reset().unwrap(); + + writeln!(output, "{:━^100}", " ALLOCATION INFO ").unwrap(); + writeln!( + output, + "allocation at 0x{:x}, with size 0x{:x}", + metadata.address, metadata.size + ) + .unwrap(); + if metadata.is_malloc_zero { + writeln!(output, "allocation was zero-sized").unwrap(); + } + + if let Some(backtrace) = metadata.allocation_site_backtrace.as_mut() { + writeln!(output, "allocation site backtrace:").unwrap(); + backtrace.resolve(); + backtrace_printer.print_trace(backtrace, output).unwrap(); + } + } + AsanError::StackOobRead((registers, pc, fault, backtrace)) + | AsanError::StackOobWrite((registers, pc, fault, backtrace)) => { + let (basereg, indexreg, _displacement, fault_address) = fault; + + if let Ok((start, _, _, path)) = find_mapping_for_address(pc) { + writeln!( + output, + " at 0x{:x} ({}:0x{:04x}), faulting address 0x{:x}", + pc, + path, + pc - start, + fault_address + ) + .unwrap(); + } else { + writeln!( + output, + " at 0x{:x}, faulting address 0x{:x}", + pc, fault_address + ) + .unwrap(); + } + output.reset().unwrap(); + + writeln!(output, "{:━^100}", " REGISTERS ").unwrap(); + for reg in 0..=30 { + if reg == basereg { + output + .set_color(ColorSpec::new().set_fg(Some(Color::Red))) + .unwrap(); + } else if reg == indexreg { + output + .set_color(ColorSpec::new().set_fg(Some(Color::Yellow))) + .unwrap(); + } + write!(output, "x{:02}: 0x{:016x} ", reg, registers[reg as usize]).unwrap(); + output.reset().unwrap(); + if reg % 4 == 3 { + writeln!(output).unwrap(); + } + } + writeln!(output, "pc : 0x{:016x} ", pc).unwrap(); + + writeln!(output, "{:━^100}", " CODE ").unwrap(); + let mut cs = Capstone::new() + .arm64() + .mode(capstone::arch::arm64::ArchMode::Arm) + .build() + .unwrap(); + cs.set_skipdata(true).expect("failed to set skipdata"); + + let start_pc = pc - 4 * 5; + for insn in cs + .disasm_count( + unsafe { std::slice::from_raw_parts(start_pc as *mut u8, 4 * 11) }, + start_pc as u64, + 11, + ) + .expect("failed to disassemble instructions") + .iter() + { + if insn.address() as usize == pc { + output + .set_color(ColorSpec::new().set_fg(Some(Color::Red))) + .unwrap(); + writeln!(output, "\t => {}", insn).unwrap(); + output.reset().unwrap(); + } else { + writeln!(output, "\t {}", insn).unwrap(); + } + } + backtrace_printer.print_trace(&backtrace, output).unwrap(); + } + }; + + if !self.options.asan_continue_after_error() { + panic!("Crashing target!"); + } + } + + /// Generate the instrumentation blobs for the current arch. + fn generate_instrumentation_blobs(&mut self) { + let shadow_bit = Allocator::get().shadow_bit as u32; + macro_rules! shadow_check { + ($ops:ident, $bit:expr) => {dynasm!($ops + ; .arch aarch64 + //; brk #5 + ; b >skip_report + + ; report: + ; stp x29, x30, [sp, #-0x10]! + ; mov x29, sp + + ; ldr x0, >self_regs_addr + ; stp x2, x3, [x0, #0x10] + ; stp x4, x5, [x0, #0x20] + ; stp x6, x7, [x0, #0x30] + ; stp x8, x9, [x0, #0x40] + ; stp x10, x11, [x0, #0x50] + ; stp x12, x13, [x0, #0x60] + ; stp x14, x15, [x0, #0x70] + ; stp x16, x17, [x0, #0x80] + ; stp x18, x19, [x0, #0x90] + ; stp x20, x21, [x0, #0xa0] + ; stp x22, x23, [x0, #0xb0] + ; stp x24, x25, [x0, #0xc0] + ; stp x26, x27, [x0, #0xd0] + ; stp x28, x29, [x0, #0xe0] + ; stp x30, xzr, [x0, #0xf0] + ; mov x28, x0 + ; .dword (0xd53b4218u32 as i32) // mrs x24, nzcv + //; ldp x0, x1, [sp], #144 + ; ldp x0, x1, [sp, 0x10] + ; stp x0, x1, [x28] + + ; adr x25, >done + ; str x25, [x28, 0xf8] + + ; adr x25, eh_frame_fde + ; adr x27, >fde_address + ; ldr w26, [x27] + ; cmp w26, #0x0 + ; b.ne >skip_register + ; sub x25, x25, x27 + ; str w25, [x27] + ; ldr x1, >register_frame_func + //; brk #11 + ; blr x1 + ; skip_register: + ; ldr x0, >self_addr + ; ldr x1, >trap_func + ; blr x1 + + ; .dword (0xd51b4218u32 as i32) // msr nzcv, x24 + ; ldr x0, >self_regs_addr + ; ldp x2, x3, [x0, #0x10] + ; ldp x4, x5, [x0, #0x20] + ; ldp x6, x7, [x0, #0x30] + ; ldp x8, x9, [x0, #0x40] + ; ldp x10, x11, [x0, #0x50] + ; ldp x12, x13, [x0, #0x60] + ; ldp x14, x15, [x0, #0x70] + ; ldp x16, x17, [x0, #0x80] + ; ldp x18, x19, [x0, #0x90] + ; ldp x20, x21, [x0, #0xa0] + ; ldp x22, x23, [x0, #0xb0] + ; ldp x24, x25, [x0, #0xc0] + ; ldp x26, x27, [x0, #0xd0] + ; ldp x28, x29, [x0, #0xe0] + ; ldp x30, xzr, [x0, #0xf0] + + ; ldp x29, x30, [sp], #0x10 + ; b >done + ; self_addr: + ; .qword self as *mut _ as *mut c_void as i64 + ; self_regs_addr: + ; .qword &mut self.regs as *mut _ as *mut c_void as i64 + ; trap_func: + ; .qword AsanRuntime::handle_trap as *mut c_void as i64 + ; register_frame_func: + ; .qword __register_frame as *mut c_void as i64 + ; eh_frame_cie: + ; .dword 0x14 + ; .dword 0x00 + ; .dword 0x00527a01 + ; .dword 0x011e7c01 + ; .dword 0x001f0c1b + ; eh_frame_fde: + ; .dword 0x14 + ; .dword 0x18 + ; fde_address: + ; .dword 0x0 // <-- address offset goes here + ; .dword 0x104 + //advance_loc 12 + //def_cfa r29 (x29) at offset 16 + //offset r30 (x30) at cfa-8 + //offset r29 (x29) at cfa-16 + ; .dword 0x1d0c4c00 + ; .dword (0x9d029e10 as u32 as i32) + ; .dword 0x04 + // empty next FDE: + ; .dword 0x0 + ; .dword 0x0 + + ; skip_report: + ; mov x1, #1 + ; add x1, xzr, x1, lsl #shadow_bit + ; add x1, x1, x0, lsr #3 + ; ubfx x1, x1, #0, #(shadow_bit + 1) + ; ldrh w1, [x1, #0] + ; and x0, x0, #7 + ; rev16 w1, w1 + ; rbit w1, w1 + ; lsr x1, x1, #16 + ; lsr x1, x1, x0 + ; tbnz x1, #$bit, >done + ; b {dynasm!($ops + ; .arch aarch64 + ; b >skip_report + + ; report: + ; stp x29, x30, [sp, #-0x10]! + ; mov x29, sp + + ; ldr x0, >self_regs_addr + ; stp x2, x3, [x0, #0x10] + ; stp x4, x5, [x0, #0x20] + ; stp x6, x7, [x0, #0x30] + ; stp x8, x9, [x0, #0x40] + ; stp x10, x11, [x0, #0x50] + ; stp x12, x13, [x0, #0x60] + ; stp x14, x15, [x0, #0x70] + ; stp x16, x17, [x0, #0x80] + ; stp x18, x19, [x0, #0x90] + ; stp x20, x21, [x0, #0xa0] + ; stp x22, x23, [x0, #0xb0] + ; stp x24, x25, [x0, #0xc0] + ; stp x26, x27, [x0, #0xd0] + ; stp x28, x29, [x0, #0xe0] + ; stp x30, xzr, [x0, #0xf0] + ; mov x28, x0 + ; .dword (0xd53b4218u32 as i32) // mrs x24, nzcv + ; ldp x0, x1, [sp, 0x10] + ; stp x0, x1, [x28] + + ; adr x25, >done + ; add x25, x25, 4 + ; str x25, [x28, 0xf8] + + ; adr x25, eh_frame_fde + ; adr x27, >fde_address + ; ldr w26, [x27] + ; cmp w26, #0x0 + ; b.ne >skip_register + ; sub x25, x25, x27 + ; str w25, [x27] + ; ldr x1, >register_frame_func + //; brk #11 + ; blr x1 + ; skip_register: + ; ldr x0, >self_addr + ; ldr x1, >trap_func + ; blr x1 + + ; .dword (0xd51b4218u32 as i32) // msr nzcv, x24 + ; ldr x0, >self_regs_addr + ; ldp x2, x3, [x0, #0x10] + ; ldp x4, x5, [x0, #0x20] + ; ldp x6, x7, [x0, #0x30] + ; ldp x8, x9, [x0, #0x40] + ; ldp x10, x11, [x0, #0x50] + ; ldp x12, x13, [x0, #0x60] + ; ldp x14, x15, [x0, #0x70] + ; ldp x16, x17, [x0, #0x80] + ; ldp x18, x19, [x0, #0x90] + ; ldp x20, x21, [x0, #0xa0] + ; ldp x22, x23, [x0, #0xb0] + ; ldp x24, x25, [x0, #0xc0] + ; ldp x26, x27, [x0, #0xd0] + ; ldp x28, x29, [x0, #0xe0] + ; ldp x30, xzr, [x0, #0xf0] + + ; ldp x29, x30, [sp], #0x10 + ; b >done + ; self_addr: + ; .qword self as *mut _ as *mut c_void as i64 + ; self_regs_addr: + ; .qword &mut self.regs as *mut _ as *mut c_void as i64 + ; trap_func: + ; .qword AsanRuntime::handle_trap as *mut c_void as i64 + ; register_frame_func: + ; .qword __register_frame as *mut c_void as i64 + ; eh_frame_cie: + ; .dword 0x14 + ; .dword 0x00 + ; .dword 0x00527a01 + ; .dword 0x011e7c01 + ; .dword 0x001f0c1b + ; eh_frame_fde: + ; .dword 0x14 + ; .dword 0x18 + ; fde_address: + ; .dword 0x0 // <-- address offset goes here + ; .dword 0x104 + //advance_loc 12 + //def_cfa r29 (x29) at offset 16 + //offset r30 (x30) at cfa-8 + //offset r29 (x29) at cfa-16 + ; .dword 0x1d0c4c00 + ; .dword (0x9d029e10 as u32 as i32) + ; .dword 0x04 + // empty next FDE: + ; .dword 0x0 + ; .dword 0x0 + + + ; skip_report: + ; mov x1, #1 + ; add x1, xzr, x1, lsl #shadow_bit + ; add x1, x1, x0, lsr #3 + ; ubfx x1, x1, #0, #(shadow_bit + 1) + ; ldrh w1, [x1, #0] + ; and x0, x0, #7 + ; rev16 w1, w1 + ; rbit w1, w1 + ; lsr x1, x1, #16 + ; lsr x1, x1, x0 + ; .dword -717536768 // 0xd53b4200 //mrs x0, NZCV + ; and x1, x1, #$val + ; cmp x1, #$val + ; b.eq >done + ; b ::new(0); + shadow_check!(ops_check_mem_byte, 0); + self.blob_check_mem_byte = Some(ops_check_mem_byte.finalize().unwrap().into_boxed_slice()); + + let mut ops_check_mem_halfword = + dynasmrt::VecAssembler::::new(0); + shadow_check!(ops_check_mem_halfword, 1); + self.blob_check_mem_halfword = Some( + ops_check_mem_halfword + .finalize() + .unwrap() + .into_boxed_slice(), + ); + + let mut ops_check_mem_dword = + dynasmrt::VecAssembler::::new(0); + shadow_check!(ops_check_mem_dword, 2); + self.blob_check_mem_dword = + Some(ops_check_mem_dword.finalize().unwrap().into_boxed_slice()); + + let mut ops_check_mem_qword = + dynasmrt::VecAssembler::::new(0); + shadow_check!(ops_check_mem_qword, 3); + self.blob_check_mem_qword = + Some(ops_check_mem_qword.finalize().unwrap().into_boxed_slice()); + + let mut ops_check_mem_16bytes = + dynasmrt::VecAssembler::::new(0); + shadow_check!(ops_check_mem_16bytes, 4); + self.blob_check_mem_16bytes = + Some(ops_check_mem_16bytes.finalize().unwrap().into_boxed_slice()); + + let mut ops_check_mem_3bytes = + dynasmrt::VecAssembler::::new(0); + shadow_check_exact!(ops_check_mem_3bytes, 3); + self.blob_check_mem_3bytes = + Some(ops_check_mem_3bytes.finalize().unwrap().into_boxed_slice()); + + let mut ops_check_mem_6bytes = + dynasmrt::VecAssembler::::new(0); + shadow_check_exact!(ops_check_mem_6bytes, 6); + self.blob_check_mem_6bytes = + Some(ops_check_mem_6bytes.finalize().unwrap().into_boxed_slice()); + + let mut ops_check_mem_12bytes = + dynasmrt::VecAssembler::::new(0); + shadow_check_exact!(ops_check_mem_12bytes, 12); + self.blob_check_mem_12bytes = + Some(ops_check_mem_12bytes.finalize().unwrap().into_boxed_slice()); + + let mut ops_check_mem_24bytes = + dynasmrt::VecAssembler::::new(0); + shadow_check_exact!(ops_check_mem_24bytes, 24); + self.blob_check_mem_24bytes = + Some(ops_check_mem_24bytes.finalize().unwrap().into_boxed_slice()); + + let mut ops_check_mem_32bytes = + dynasmrt::VecAssembler::::new(0); + shadow_check_exact!(ops_check_mem_32bytes, 32); + self.blob_check_mem_32bytes = + Some(ops_check_mem_32bytes.finalize().unwrap().into_boxed_slice()); + + let mut ops_check_mem_48bytes = + dynasmrt::VecAssembler::::new(0); + shadow_check_exact!(ops_check_mem_48bytes, 48); + self.blob_check_mem_48bytes = + Some(ops_check_mem_48bytes.finalize().unwrap().into_boxed_slice()); + + let mut ops_check_mem_64bytes = + dynasmrt::VecAssembler::::new(0); + shadow_check_exact!(ops_check_mem_64bytes, 64); + self.blob_check_mem_64bytes = + Some(ops_check_mem_64bytes.finalize().unwrap().into_boxed_slice()); + } + + /// Get the blob which checks a byte access + #[inline] + pub fn blob_check_mem_byte(&self) -> &[u8] { + self.blob_check_mem_byte.as_ref().unwrap() + } + + /// Get the blob which checks a halfword access + #[inline] + pub fn blob_check_mem_halfword(&self) -> &[u8] { + self.blob_check_mem_halfword.as_ref().unwrap() + } + + /// Get the blob which checks a dword access + #[inline] + pub fn blob_check_mem_dword(&self) -> &[u8] { + self.blob_check_mem_dword.as_ref().unwrap() + } + + /// Get the blob which checks a qword access + #[inline] + pub fn blob_check_mem_qword(&self) -> &[u8] { + self.blob_check_mem_qword.as_ref().unwrap() + } + + /// Get the blob which checks a 16 byte access + #[inline] + pub fn blob_check_mem_16bytes(&self) -> &[u8] { + self.blob_check_mem_16bytes.as_ref().unwrap() + } + + /// Get the blob which checks a 3 byte access + #[inline] + pub fn blob_check_mem_3bytes(&self) -> &[u8] { + self.blob_check_mem_3bytes.as_ref().unwrap() + } + + /// Get the blob which checks a 6 byte access + #[inline] + pub fn blob_check_mem_6bytes(&self) -> &[u8] { + self.blob_check_mem_6bytes.as_ref().unwrap() + } + + /// Get the blob which checks a 12 byte access + #[inline] + pub fn blob_check_mem_12bytes(&self) -> &[u8] { + self.blob_check_mem_12bytes.as_ref().unwrap() + } + + /// Get the blob which checks a 24 byte access + #[inline] + pub fn blob_check_mem_24bytes(&self) -> &[u8] { + self.blob_check_mem_24bytes.as_ref().unwrap() + } + + /// Get the blob which checks a 32 byte access + #[inline] + pub fn blob_check_mem_32bytes(&self) -> &[u8] { + self.blob_check_mem_32bytes.as_ref().unwrap() + } + + /// Get the blob which checks a 48 byte access + #[inline] + pub fn blob_check_mem_48bytes(&self) -> &[u8] { + self.blob_check_mem_48bytes.as_ref().unwrap() + } + + /// Get the blob which checks a 64 byte access + #[inline] + pub fn blob_check_mem_64bytes(&self) -> &[u8] { + self.blob_check_mem_64bytes.as_ref().unwrap() + } +} + +pub static mut ASAN_ERRORS: Option = None; + +#[derive(Serialize, Deserialize)] +pub struct AsanErrorsObserver { + errors: OwnedPtr>, +} + +impl Observer for AsanErrorsObserver { + fn pre_exec(&mut self) -> Result<(), Error> { + unsafe { + if ASAN_ERRORS.is_some() { + ASAN_ERRORS.as_mut().unwrap().clear(); + } + } + + Ok(()) + } +} + +impl Named for AsanErrorsObserver { + #[inline] + fn name(&self) -> &str { + "AsanErrorsObserver" + } +} + +impl AsanErrorsObserver { + pub fn new(errors: &'static Option) -> Self { + Self { + errors: OwnedPtr::Ptr(errors as *const Option), + } + } + + pub fn new_owned(errors: Option) -> Self { + Self { + errors: OwnedPtr::Owned(Box::new(errors)), + } + } + + pub fn new_from_ptr(errors: *const Option) -> Self { + Self { + errors: OwnedPtr::Ptr(errors), + } + } + + pub fn errors(&self) -> Option<&AsanErrors> { + match &self.errors { + OwnedPtr::Ptr(p) => unsafe { p.as_ref().unwrap().as_ref() }, + OwnedPtr::Owned(b) => b.as_ref().as_ref(), + } + } +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct AsanErrorsFeedback { + errors: Option, +} + +impl Feedback for AsanErrorsFeedback +where + I: Input + HasTargetBytes, +{ + fn is_interesting( + &mut self, + _input: &I, + observers: &OT, + _exit_kind: &ExitKind, + ) -> Result { + let observer = observers + .match_first_type::() + .expect("An AsanErrorsFeedback needs an AsanErrorsObserver".into()); + match observer.errors() { + None => Ok(0), + Some(errors) => { + if !errors.errors.is_empty() { + self.errors = Some(errors.clone()); + Ok(1) + } else { + Ok(0) + } + } + } + } + + fn append_metadata(&mut self, testcase: &mut Testcase) -> Result<(), Error> { + if let Some(errors) = &self.errors { + testcase.add_metadata(errors.clone()); + } + + Ok(()) + } + + fn discard_metadata(&mut self, _input: &I) -> Result<(), Error> { + self.errors = None; + Ok(()) + } +} + +impl Named for AsanErrorsFeedback { + #[inline] + fn name(&self) -> &str { + "AsanErrorsFeedback" + } +} + +impl AsanErrorsFeedback { + pub fn new() -> Self { + Self { errors: None } + } +} + +impl Default for AsanErrorsFeedback { + fn default() -> Self { + Self::new() + } +} diff --git a/libafl_frida/src/gettls.c b/libafl_frida/src/gettls.c new file mode 100644 index 0000000000..ba14df7489 --- /dev/null +++ b/libafl_frida/src/gettls.c @@ -0,0 +1,9 @@ +#ifdef _MSC_VER +__declspec( thread ) int i = 0; +#else +__thread int i = 0; +#endif + +void * get_tls_ptr() { + return (void*)&i; +} diff --git a/libafl_frida/src/helper.rs b/libafl_frida/src/helper.rs new file mode 100644 index 0000000000..f3e14fa672 --- /dev/null +++ b/libafl_frida/src/helper.rs @@ -0,0 +1,617 @@ +use libafl::inputs::{HasTargetBytes, Input}; + +#[cfg(any(target_os = "linux", target_os = "android"))] +use libafl::utils::find_mapping_for_path; + +use libafl_targets::drcov::{DrCovBasicBlock, DrCovWriter}; + +#[cfg(target_arch = "aarch64")] +use capstone::arch::{arm64::Arm64OperandType, ArchOperand::Arm64Operand}; +use capstone::{ + arch::{self, BuildsCapstone}, + Capstone, Insn, +}; + +use core::cell::RefCell; +#[cfg(target_arch = "x86_64")] +use frida_gum::instruction_writer::X86Register; +#[cfg(target_arch = "aarch64")] +use frida_gum::instruction_writer::{Aarch64Register, IndexMode}; +use frida_gum::{ + instruction_writer::InstructionWriter, + stalker::{StalkerOutput, Transformer}, + CpuContext, +}; +use frida_gum::{Gum, Module, PageProtection}; +use num_traits::cast::FromPrimitive; + +use rangemap::RangeMap; +use std::rc::Rc; + +use crate::{asan_rt::AsanRuntime, FridaOptions}; + +/// An helper that feeds FridaInProcessExecutor with user-supplied instrumentation +pub trait FridaHelper<'a> { + fn transformer(&self) -> &Transformer<'a>; + + fn register_thread(&self); + + fn pre_exec(&mut self, input: &I); + + fn post_exec(&mut self, input: &I); + + fn stalker_enabled(&self) -> bool; + + fn map_ptr(&mut self) -> *mut u8; +} + +pub const MAP_SIZE: usize = 64 * 1024; + +/// An helper that feeds FridaInProcessExecutor with edge-coverage instrumentation +pub struct FridaInstrumentationHelper<'a> { + map: [u8; MAP_SIZE], + previous_pc: [u64; 1], + current_log_impl: u64, + /// Transformer that has to be passed to FridaInProcessExecutor + transformer: Option>, + capstone: Capstone, + asan_runtime: Rc>, + ranges: RangeMap, + options: FridaOptions, + drcov_basic_blocks: Vec, +} + +impl<'a> FridaHelper<'a> for FridaInstrumentationHelper<'a> { + fn transformer(&self) -> &Transformer<'a> { + self.transformer.as_ref().unwrap() + } + + /// Register the current thread with the FridaInstrumentationHelper + fn register_thread(&self) { + self.asan_runtime.borrow().register_thread(); + } + + fn pre_exec(&mut self, input: &I) { + let target_bytes = input.target_bytes(); + let slice = target_bytes.as_slice(); + //println!("target_bytes: {:02x?}", slice); + self.asan_runtime + .borrow() + .unpoison(slice.as_ptr() as usize, slice.len()); + } + + fn post_exec(&mut self, input: &I) { + if self.options.drcov_enabled() { + let filename = format!( + "./coverage/{:016x}.drcov", + seahash::hash(input.target_bytes().as_slice()) + ); + DrCovWriter::new(&filename, &self.ranges, &mut self.drcov_basic_blocks).write(); + } + + if self.options.asan_enabled() { + if self.options.asan_detect_leaks() { + self.asan_runtime.borrow_mut().check_for_leaks(); + } + self.asan_runtime.borrow_mut().reset_allocations(); + } + } + + fn stalker_enabled(&self) -> bool { + self.options.stalker_enabled() + } + + fn map_ptr(&mut self) -> *mut u8 { + self.map.as_mut_ptr() + } +} + +/// Helper function to get the size of a module's CODE section from frida +pub fn get_module_size(module_name: &str) -> usize { + let mut code_size = 0; + let code_size_ref = &mut code_size; + Module::enumerate_ranges(module_name, PageProtection::ReadExecute, move |details| { + *code_size_ref = details.memory_range().size() as usize; + true + }); + + code_size +} + +/// A minimal maybe_log implementation. We insert this into the transformed instruction stream +/// every time we need a copy that is within a direct branch of the start of the transformed basic +/// block. +#[cfg(target_arch = "x86_64")] +const MAYBE_LOG_CODE: [u8; 47] = [ + 0x9c, /* pushfq */ + 0x50, /* push rax */ + 0x51, /* push rcx */ + 0x52, /* push rdx */ + 0x48, 0x8d, 0x05, 0x24, 0x00, 0x00, 0x00, /* lea rax, sym._afl_area_ptr_ptr */ + 0x48, 0x8b, 0x00, /* mov rax, qword [rax] */ + 0x48, 0x8d, 0x0d, 0x22, 0x00, 0x00, 0x00, /* lea rcx, sym.previous_pc */ + 0x48, 0x8b, 0x11, /* mov rdx, qword [rcx] */ + 0x48, 0x8b, 0x12, /* mov rdx, qword [rdx] */ + 0x48, 0x31, 0xfa, /* xor rdx, rdi */ + 0xfe, 0x04, 0x10, /* inc byte [rax + rdx] */ + 0x48, 0xd1, 0xef, /* shr rdi, 1 */ + 0x48, 0x8b, 0x01, /* mov rax, qword [rcx] */ + 0x48, 0x89, 0x38, /* mov qword [rax], rdi */ + 0x5a, /* pop rdx */ + 0x59, /* pop rcx */ + 0x58, /* pop rax */ + 0x9d, /* popfq */ + 0xc3, /* ret */ + + /* Read-only data goes here: */ + /* uint8_t* afl_area_ptr */ + /* uint64_t* afl_prev_loc_ptr */ +]; + +#[cfg(target_arch = "aarch64")] +const MAYBE_LOG_CODE: [u8; 60] = [ + // __afl_area_ptr[current_pc ^ previous_pc]++; + // previous_pc = current_pc >> 1; + 0xE1, 0x0B, 0xBF, 0xA9, // stp x1, x2, [sp, -0x10]! + 0xE3, 0x13, 0xBF, 0xA9, // stp x3, x4, [sp, -0x10]! + // x0 = current_pc + 0xa1, 0x01, 0x00, 0x58, // ldr x1, #0x30, =__afl_area_ptr + 0x82, 0x01, 0x00, 0x58, // ldr x2, #0x38, =&previous_pc + 0x44, 0x00, 0x40, 0xf9, // ldr x4, [x2] (=previous_pc) + // __afl_area_ptr[current_pc ^ previous_pc]++; + 0x84, 0x00, 0x00, 0xca, // eor x4, x4, x0 + 0x84, 0x3c, 0x40, 0x92, // and x4, x4, 0xffff (=MAP_SIZE - 1) + //0x20, 0x13, 0x20, 0xd4, + 0x23, 0x68, 0x64, 0xf8, // ldr x3, [x1, x4] + 0x63, 0x04, 0x00, 0x91, // add x3, x3, #1 + 0x23, 0x68, 0x24, 0xf8, // str x3, [x1, x4] + // previous_pc = current_pc >> 1; + 0xe0, 0x07, 0x40, 0x8b, // add x0, xzr, x0, LSR #1 + 0x40, 0x00, 0x00, 0xf9, // str x0, [x2] + 0xE3, 0x13, 0xc1, 0xA8, // ldp x3, x4, [sp], #0x10 + 0xE1, 0x0B, 0xc1, 0xA8, // ldp x1, x2, [sp], #0x10 + 0xC0, 0x03, 0x5F, 0xD6, // ret + + // &afl_area_ptr + // &afl_prev_loc_ptr +]; + +#[cfg(target_arch = "aarch64")] +fn get_pc(context: &CpuContext) -> usize { + context.pc() as usize +} + +#[cfg(target_arch = "x86_64")] +fn get_pc(context: &CpuContext) -> usize { + context.rip() as usize +} + +/// The implementation of the FridaInstrumentationHelper +impl<'a> FridaInstrumentationHelper<'a> { + /// Constructor function to create a new FridaInstrumentationHelper, given a module_name. + pub fn new( + gum: &'a Gum, + options: FridaOptions, + _harness_module_name: &str, + modules_to_instrument: &'a Vec<&str>, + ) -> Self { + let mut helper = Self { + map: [0u8; MAP_SIZE], + previous_pc: [0u64; 1], + current_log_impl: 0, + transformer: None, + capstone: Capstone::new() + .arm64() + .mode(arch::arm64::ArchMode::Arm) + .detail(true) + .build() + .expect("Failed to create Capstone object"), + asan_runtime: AsanRuntime::new(options), + ranges: RangeMap::new(), + options, + drcov_basic_blocks: vec![], + }; + + if options.stalker_enabled() { + for (id, module_name) in modules_to_instrument.iter().enumerate() { + let (lib_start, lib_end) = find_mapping_for_path(module_name); + println!("including range {:x}-{:x}", lib_start, lib_end); + helper + .ranges + .insert(lib_start..lib_end, (id as u16, module_name)); + } + + if helper.options.drcov_enabled() { + std::fs::create_dir_all("./coverage") + .expect("failed to create directory for coverage files"); + } + + let transformer = Transformer::from_callback(gum, |basic_block, output| { + let mut first = true; + for instruction in basic_block { + let instr = instruction.instr(); + let address = instr.address(); + //println!("address: {:x} contains: {:?}", address, helper.ranges.contains(&(address as usize))); + if helper.ranges.contains_key(&(address as usize)) { + if first { + first = false; + //println!("block @ {:x} transformed to {:x}", address, output.writer().pc()); + if helper.options.coverage_enabled() { + helper.emit_coverage_mapping(address, &output); + } + if helper.options.drcov_enabled() { + instruction.put_callout(|context| { + let real_address = match helper + .asan_runtime + .borrow() + .real_address_for_stalked(get_pc(&context)) + { + Some(address) => *address, + _ => get_pc(&context), + }; + //let (range, (id, name)) = helper.ranges.get_key_value(&real_address).unwrap(); + //println!("{}:0x{:016x}", name, real_address - range.start); + helper + .drcov_basic_blocks + .push(DrCovBasicBlock::new(real_address, real_address + 4)); + }) + } + } + + if helper.options.asan_enabled() { + #[cfg(not(target_arch = "aarch64"))] + todo!("Implement ASAN for non-aarch64 targets"); + #[cfg(target_arch = "aarch64")] + if let Ok((basereg, indexreg, displacement, width)) = + helper.is_interesting_instruction(address, instr) + { + helper.emit_shadow_check( + address, + &output, + basereg, + indexreg, + displacement, + width, + ); + } + } + if helper.options.asan_enabled() || helper.options.drcov_enabled() { + helper.asan_runtime.borrow_mut().add_stalked_address( + output.writer().pc() as usize - 4, + address as usize, + ); + } + } + instruction.keep() + } + }); + helper.transformer = Some(transformer); + if helper.options.asan_enabled() || helper.options.drcov_enabled() { + helper.asan_runtime.borrow_mut().init(modules_to_instrument); + } + } + helper + } + + #[cfg(target_arch = "aarch64")] + #[inline] + fn get_writer_register(&self, reg: capstone::RegId) -> Aarch64Register { + let regint: u16 = reg.0; + Aarch64Register::from_u32(regint as u32).unwrap() + } + + #[cfg(target_arch = "aarch64")] + #[inline] + fn emit_shadow_check( + &self, + _address: u64, + output: &StalkerOutput, + basereg: capstone::RegId, + indexreg: capstone::RegId, + displacement: i32, + width: u32, + ) { + let writer = output.writer(); + + let basereg = self.get_writer_register(basereg); + let indexreg = if indexreg.0 != 0 { + Some(self.get_writer_register(indexreg)) + } else { + None + }; + + //writer.put_brk_imm(1); + + // Preserve x0, x1: + writer.put_stp_reg_reg_reg_offset( + Aarch64Register::X0, + Aarch64Register::X1, + Aarch64Register::Sp, + -(16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i32) as i64, + IndexMode::PreAdjust, + ); + + // Make sure the base register is copied into x0 + match basereg { + Aarch64Register::X0 | Aarch64Register::W0 => {} + Aarch64Register::X1 | Aarch64Register::W1 => { + writer.put_mov_reg_reg(Aarch64Register::X0, Aarch64Register::X1); + } + _ => { + if !writer.put_mov_reg_reg(Aarch64Register::X0, basereg) { + writer.put_mov_reg_reg(Aarch64Register::W0, basereg); + } + } + } + + // Make sure the index register is copied into x1 + if indexreg.is_some() { + if let Some(indexreg) = indexreg { + match indexreg { + Aarch64Register::X0 | Aarch64Register::W0 => { + writer.put_ldr_reg_reg_offset( + Aarch64Register::X1, + Aarch64Register::Sp, + 0u64, + ); + } + Aarch64Register::X1 | Aarch64Register::W1 => {} + _ => { + if !writer.put_mov_reg_reg(Aarch64Register::X1, indexreg) { + writer.put_mov_reg_reg(Aarch64Register::W1, indexreg); + } + } + } + } + writer.put_add_reg_reg_reg( + Aarch64Register::X0, + Aarch64Register::X0, + Aarch64Register::X1, + ); + } + + let displacement = displacement + + if basereg == Aarch64Register::Sp { + 16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i32 + } else { + 0 + }; + + #[allow(clippy::comparison_chain)] + if displacement < 0 { + if displacement > -4096 { + // Subtract the displacement into x0 + writer.put_sub_reg_reg_imm( + Aarch64Register::X0, + Aarch64Register::X0, + displacement.abs() as u64, + ); + } else { + let displacement_hi = displacement.abs() / 4096; + let displacement_lo = displacement.abs() % 4096; + writer.put_bytes(&(0xd1400000u32 | ((displacement_hi as u32) << 10)).to_le_bytes()); + writer.put_sub_reg_reg_imm( + Aarch64Register::X0, + Aarch64Register::X0, + displacement_lo as u64, + ); + } + } else if displacement > 0 { + if displacement < 4096 { + // Add the displacement into x0 + writer.put_add_reg_reg_imm( + Aarch64Register::X0, + Aarch64Register::X0, + displacement as u64, + ); + } else { + let displacement_hi = displacement / 4096; + let displacement_lo = displacement % 4096; + writer.put_bytes(&(0x91400000u32 | ((displacement_hi as u32) << 10)).to_le_bytes()); + writer.put_add_reg_reg_imm( + Aarch64Register::X0, + Aarch64Register::X0, + displacement_lo as u64, + ); + } + } + // Insert the check_shadow_mem code blob + match width { + 1 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_byte()), + 2 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_halfword()), + 3 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_3bytes()), + 4 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_dword()), + 6 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_6bytes()), + 8 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_qword()), + 12 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_12bytes()), + 16 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_16bytes()), + 24 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_24bytes()), + 32 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_32bytes()), + _ => false, + }; + + // Restore x0, x1 + assert!(writer.put_ldp_reg_reg_reg_offset( + Aarch64Register::X0, + Aarch64Register::X1, + Aarch64Register::Sp, + 16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i64, + IndexMode::PostAdjust, + )); + } + + #[cfg(target_arch = "aarch64")] + #[inline] + fn get_instruction_width(&self, instr: &Insn, operands: &Vec) -> u32 { + use capstone::arch::arm64::Arm64Insn as I; + use capstone::arch::arm64::Arm64Reg as R; + use capstone::arch::arm64::Arm64Vas as V; + + let num_registers = match instr.id().0.into() { + I::ARM64_INS_STP + | I::ARM64_INS_STXP + | I::ARM64_INS_STNP + | I::ARM64_INS_STLXP + | I::ARM64_INS_LDP + | I::ARM64_INS_LDXP + | I::ARM64_INS_LDNP => 2, + _ => 1, + }; + + let mnemonic = instr.mnemonic().unwrap(); + match mnemonic.as_bytes().last().unwrap() { + b'b' => return 1, + b'h' => return 2, + b'w' => return 4 * num_registers, + _ => (), + } + + if let Arm64Operand(operand) = operands.first().unwrap() { + if operand.vas != V::ARM64_VAS_INVALID { + let count_byte: u32 = if mnemonic.starts_with("st") || mnemonic.starts_with("ld") { + mnemonic.chars().nth(2).unwrap().to_digit(10).unwrap() + } else { + 1 + }; + + return match operand.vas { + V::ARM64_VAS_1B => 1 * count_byte, + V::ARM64_VAS_1H => 2 * count_byte, + V::ARM64_VAS_4B | V::ARM64_VAS_1S | V::ARM64_VAS_1D | V::ARM64_VAS_2H => { + 4 * count_byte + } + V::ARM64_VAS_8B + | V::ARM64_VAS_4H + | V::ARM64_VAS_2S + | V::ARM64_VAS_2D + | V::ARM64_VAS_1Q => 8 * count_byte, + V::ARM64_VAS_8H | V::ARM64_VAS_4S | V::ARM64_VAS_16B => 16 * count_byte, + V::ARM64_VAS_INVALID => { + panic!("should not be reached"); + } + }; + } else if let Arm64OperandType::Reg(operand) = operand.op_type { + match operand.0 as u32 { + R::ARM64_REG_W0..=R::ARM64_REG_W30 + | R::ARM64_REG_WZR + | R::ARM64_REG_WSP + | R::ARM64_REG_S0..=R::ARM64_REG_S31 => return 4 * num_registers, + R::ARM64_REG_D0..=R::ARM64_REG_D31 => return 8 * num_registers, + R::ARM64_REG_Q0..=R::ARM64_REG_Q31 => return 16, + _ => (), + } + }; + }; + + 8 * num_registers + } + + #[cfg(target_arch = "aarch64")] + #[inline] + fn is_interesting_instruction( + &self, + _address: u64, + instr: &Insn, + ) -> Result<(capstone::RegId, capstone::RegId, i32, u32), ()> { + // We have to ignore these instructions. Simulating them with their side effects is + // complex, to say the least. + match instr.mnemonic().unwrap() { + "ldaxr" | "stlxr" | "ldxr" | "stxr" | "ldar" | "stlr" | "ldarb" | "ldarh" | "ldaxp" + | "ldaxrb" | "ldaxrh" | "stlrb" | "stlrh" | "stlxp" | "stlxrb" | "stlxrh" | "ldxrb" + | "ldxrh" | "stxrb" | "stxrh" => return Err(()), + _ => (), + } + + let operands = self + .capstone + .insn_detail(instr) + .unwrap() + .arch_detail() + .operands(); + if operands.len() < 2 { + return Err(()); + } + + if let Arm64Operand(arm64operand) = operands.last().unwrap() { + if let Arm64OperandType::Mem(opmem) = arm64operand.op_type { + return Ok(( + opmem.base(), + opmem.index(), + opmem.disp(), + self.get_instruction_width(instr, &operands), + )); + } + } + + Err(()) + } + + #[inline] + fn emit_coverage_mapping(&mut self, address: u64, output: &StalkerOutput) { + let writer = output.writer(); + if self.current_log_impl == 0 + || !writer.can_branch_directly_to(self.current_log_impl) + || !writer.can_branch_directly_between(writer.pc() + 128, self.current_log_impl) + { + let after_log_impl = writer.code_offset() + 1; + + #[cfg(target_arch = "x86_64")] + writer.put_jmp_near_label(after_log_impl); + #[cfg(target_arch = "aarch64")] + writer.put_b_label(after_log_impl); + + self.current_log_impl = writer.pc(); + writer.put_bytes(&MAYBE_LOG_CODE); + let prev_loc_pointer = self.previous_pc.as_ptr() as usize; + let map_pointer = self.map.as_ptr() as usize; + + writer.put_bytes(&map_pointer.to_ne_bytes()); + writer.put_bytes(&prev_loc_pointer.to_ne_bytes()); + + writer.put_label(after_log_impl); + } + #[cfg(target_arch = "x86_64")] + { + println!("here"); + writer.put_lea_reg_reg_offset( + X86Register::Rsp, + X86Register::Rsp, + -(frida_gum_sys::GUM_RED_ZONE_SIZE as i32), + ); + writer.put_push_reg(X86Register::Rdi); + writer.put_mov_reg_address( + X86Register::Rdi, + ((address >> 4) ^ (address << 8)) & (MAP_SIZE - 1) as u64, + ); + writer.put_call_address(self.current_log_impl); + writer.put_pop_reg(X86Register::Rdi); + writer.put_lea_reg_reg_offset( + X86Register::Rsp, + X86Register::Rsp, + frida_gum_sys::GUM_RED_ZONE_SIZE as i32, + ); + } + #[cfg(target_arch = "aarch64")] + { + writer.put_stp_reg_reg_reg_offset( + Aarch64Register::Lr, + Aarch64Register::X0, + Aarch64Register::Sp, + -(16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i32) as i64, + IndexMode::PreAdjust, + ); + writer.put_ldr_reg_u64( + Aarch64Register::X0, + ((address >> 4) ^ (address << 8)) & (MAP_SIZE - 1) as u64, + ); + writer.put_bl_imm(self.current_log_impl); + writer.put_ldp_reg_reg_reg_offset( + Aarch64Register::Lr, + Aarch64Register::X0, + Aarch64Register::Sp, + 16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i64, + IndexMode::PostAdjust, + ); + } + } +} diff --git a/libafl_frida/src/lib.rs b/libafl_frida/src/lib.rs new file mode 100644 index 0000000000..8f2940ed26 --- /dev/null +++ b/libafl_frida/src/lib.rs @@ -0,0 +1,122 @@ +pub mod asan_rt; +pub mod helper; + +/// A representation of the various Frida options +#[derive(Clone, Copy, Debug)] +pub struct FridaOptions { + enable_asan: bool, + enable_asan_leak_detection: bool, + enable_asan_continue_after_error: bool, + enable_asan_allocation_backtraces: bool, + enable_coverage: bool, + enable_drcov: bool, +} + +impl FridaOptions { + /// Parse the frida options from the LIBAFL_FRIDA_OPTIONS environment variable. + /// + /// Options are ':' separated, and each options is a 'name=value' string. + pub fn parse_env_options() -> Self { + let mut options = Self::default(); + + if let Ok(env_options) = std::env::var("LIBAFL_FRIDA_OPTIONS") { + for option in env_options.trim().to_lowercase().split(':') { + let (name, mut value) = + option.split_at(option.find('=').expect("Expected a '=' in option string")); + value = value.get(1..).unwrap(); + match name { + "asan" => { + options.enable_asan = value.parse().unwrap(); + + #[cfg(not(target_arch = "aarch64"))] + if options.enable_asan { + panic!("ASAN is not currently supported on targets other than aarch64"); + } + } + "asan-detect-leaks" => { + options.enable_asan_leak_detection = value.parse().unwrap(); + } + "asan-continue-after-error" => { + options.enable_asan_continue_after_error = value.parse().unwrap(); + } + "asan-allocation-backtraces" => { + options.enable_asan_allocation_backtraces = value.parse().unwrap(); + } + "coverage" => { + options.enable_coverage = value.parse().unwrap(); + } + "drcov" => { + options.enable_drcov = value.parse().unwrap(); + #[cfg(not(target_arch = "aarch64"))] + if options.enable_drcov { + panic!( + "DrCov is not currently supported on targets other than aarch64" + ); + } + } + _ => { + panic!("unknown FRIDA option: '{}'", option); + } + } + } + } + + options + } + + /// Is ASAN enabled? + #[inline] + pub fn asan_enabled(&self) -> bool { + self.enable_asan + } + + /// Is coverage enabled? + #[inline] + pub fn coverage_enabled(&self) -> bool { + self.enable_coverage + } + + /// Is DrCov enabled? + #[inline] + pub fn drcov_enabled(&self) -> bool { + self.enable_drcov + } + + /// Should ASAN detect leaks + #[inline] + pub fn asan_detect_leaks(&self) -> bool { + self.enable_asan_leak_detection + } + + /// Should ASAN continue after a memory error is detected + #[inline] + pub fn asan_continue_after_error(&self) -> bool { + self.enable_asan_continue_after_error + } + + /// Should ASAN gather (and report) allocation-/free-site backtraces + #[inline] + pub fn asan_allocation_backtraces(&self) -> bool { + self.enable_asan_allocation_backtraces + } + + /// Whether stalker should be enabled. I.e. whether at least one stalker requiring option is + /// enabled. + #[inline] + pub fn stalker_enabled(&self) -> bool { + self.enable_asan || self.enable_coverage || self.enable_drcov + } +} + +impl Default for FridaOptions { + fn default() -> Self { + Self { + enable_asan: false, + enable_asan_leak_detection: false, + enable_asan_continue_after_error: false, + enable_asan_allocation_backtraces: true, + enable_coverage: true, + enable_drcov: false, + } + } +} diff --git a/libafl_targets/Cargo.toml b/libafl_targets/Cargo.toml index c2da88266d..41cbc55236 100644 --- a/libafl_targets/Cargo.toml +++ b/libafl_targets/Cargo.toml @@ -22,3 +22,4 @@ pcguard = ["pcguard_hitcounts"] cc = { version = "1.0", features = ["parallel"] } [dependencies] +rangemap = "0.1.10" diff --git a/libafl_targets/src/drcov.rs b/libafl_targets/src/drcov.rs new file mode 100644 index 0000000000..5b65e1549d --- /dev/null +++ b/libafl_targets/src/drcov.rs @@ -0,0 +1,90 @@ +use rangemap::RangeMap; +use std::{ + fs::File, + io::{BufWriter, Write}, +}; + +#[derive(Clone, Copy)] +pub struct DrCovBasicBlock { + start: usize, + end: usize, +} + +pub struct DrCovWriter<'a> { + writer: BufWriter, + module_mapping: &'a RangeMap, + basic_blocks: &'a mut Vec, +} + +#[repr(C)] +struct DrCovBasicBlockEntry { + start: u32, + size: u16, + mod_id: u16, +} + +impl DrCovBasicBlock { + pub fn new(start: usize, end: usize) -> Self { + Self { start, end } + } +} +impl<'a> DrCovWriter<'a> { + pub fn new( + path: &str, + module_mapping: &'a RangeMap, + basic_blocks: &'a mut Vec, + ) -> Self { + Self { + writer: BufWriter::new( + File::create(path).expect("unable to create file for coverage data"), + ), + module_mapping, + basic_blocks, + } + } + + pub fn write(&mut self) { + self.writer + .write_all(b"DRCOV VERSION: 2\nDRCOV FLAVOR: libafl\n") + .unwrap(); + + let modules: Vec<(&std::ops::Range, &(u16, &str))> = + self.module_mapping.iter().collect(); + self.writer + .write_all(format!("Module Table: version 2, count {}\n", modules.len()).as_bytes()) + .unwrap(); + self.writer + .write_all(b"Columns: id, base, end, entry, checksum, timestamp, path\n") + .unwrap(); + for module in modules { + let (range, (id, path)) = module; + self.writer + .write_all( + format!( + "{:03}, 0x{:x}, 0x{:x}, 0x00000000, 0x00000000, 0x00000000, {}\n", + id, range.start, range.end, path + ) + .as_bytes(), + ) + .unwrap(); + } + self.writer + .write_all(format!("BB Table: {} bbs\n", self.basic_blocks.len()).as_bytes()) + .unwrap(); + for block in self.basic_blocks.drain(0..) { + let (range, (id, _)) = self.module_mapping.get_key_value(&block.start).unwrap(); + let basic_block = DrCovBasicBlockEntry { + start: (block.start - range.start) as u32, + size: (block.end - block.start) as u16, + mod_id: *id, + }; + self.writer + .write_all(unsafe { + std::slice::from_raw_parts(&basic_block as *const _ as *const u8, 8) + }) + .unwrap(); + } + + self.writer.flush().unwrap(); + } +} diff --git a/libafl_targets/src/lib.rs b/libafl_targets/src/lib.rs index 9ab270f050..03a0a0e1e0 100644 --- a/libafl_targets/src/lib.rs +++ b/libafl_targets/src/lib.rs @@ -19,3 +19,5 @@ pub use libfuzzer::*; pub mod cmplog; #[cfg(feature = "cmplog")] pub use cmplog::*; + +pub mod drcov;