From 3a21ad59a402af59ca8e18f46a00cd4f2be86502 Mon Sep 17 00:00:00 2001 From: s1341 Date: Tue, 25 May 2021 14:45:06 +0300 Subject: [PATCH] Hook using frida gum interceptor instead of gothook (#112) * Move from gothook to frida-based hooks * Force link against libc++ * Clippy + cleanup prints * exclude ranges * Add back guard pages; Implement libc hooks * Bump frida-rust version * Add hooks for mmap/munmap, as per issue #105 * Refactor to get rid of global allocator singleton * Cleanup imports; Fix free out-of-range; Move to fixed addresses for asan allocatoins * use frida-rust from crates.io now that it has caught up * cargo fmt * Clippy fixes * Better clippy fix * More clippy fix * Formatting * Review changes --- fuzzers/frida_libpng/Cargo.toml | 5 +- fuzzers/frida_libpng/src/fuzzer.rs | 34 +- libafl/src/events/llmp.rs | 4 - libafl/src/executors/inprocess.rs | 4 +- libafl_frida/Cargo.toml | 10 +- libafl_frida/build.rs | 3 + libafl_frida/src/alloc.rs | 408 ++++ libafl_frida/src/asan_errors.rs | 569 ++++++ libafl_frida/src/asan_rt.rs | 2816 ++++++++++++++++------------ libafl_frida/src/helper.rs | 53 +- libafl_frida/src/lib.rs | 7 +- 11 files changed, 2664 insertions(+), 1249 deletions(-) create mode 100644 libafl_frida/src/alloc.rs create mode 100644 libafl_frida/src/asan_errors.rs diff --git a/fuzzers/frida_libpng/Cargo.toml b/fuzzers/frida_libpng/Cargo.toml index 9dfcfb3a4f..a68944cdaf 100644 --- a/fuzzers/frida_libpng/Cargo.toml +++ b/fuzzers/frida_libpng/Cargo.toml @@ -21,11 +21,10 @@ num_cpus = "1.0" which = "4.1" [target.'cfg(unix)'.dependencies] -libafl = { path = "../../libafl/", features = [ "std", "llmp_compression", "llmp_bind_public" ] } #, "llmp_small_maps", "llmp_debug"]} +libafl = { path = "../../libafl/", features = [ "std", "llmp_bind_public" ] } #, "llmp_small_maps", "llmp_debug"]} libafl_frida = { path = "../../libafl_frida" } capstone = "0.8.0" -frida-gum = { version = "0.5.0", git = "https://github.com/frida/frida-rust", features = [ "auto-download", "backtrace", "event-sink", "invocation-listener"], rev = "69f5b8236ab4b66296507803b4b7bfec79700e84" } -#frida-gum = { version = "0.5.0", path = "../../../frida-rust/frida-gum", features = [ "auto-download", "event-sink", "invocation-listener"] } +frida-gum = { version = "0.5.1", features = [ "auto-download", "backtrace", "event-sink", "invocation-listener"] } lazy_static = "1.4.0" libc = "0.2" libloading = "0.7.0" diff --git a/fuzzers/frida_libpng/src/fuzzer.rs b/fuzzers/frida_libpng/src/fuzzer.rs index 3add7b1596..e35436cde3 100644 --- a/fuzzers/frida_libpng/src/fuzzer.rs +++ b/fuzzers/frida_libpng/src/fuzzer.rs @@ -40,7 +40,7 @@ use libafl::{ use frida_gum::{ stalker::{NoneEventSink, Stalker}, - Gum, NativePointer, + Gum, MemoryRange, NativePointer, }; use std::{ @@ -53,7 +53,7 @@ use std::{ }; use libafl_frida::{ - asan_rt::{AsanErrorsFeedback, AsanErrorsObserver, ASAN_ERRORS}, + asan_errors::{AsanErrorsFeedback, AsanErrorsObserver, ASAN_ERRORS}, helper::{FridaHelper, FridaInstrumentationHelper, MAP_SIZE}, FridaOptions, }; @@ -190,17 +190,15 @@ where helper: &'c mut FH, timeout: Duration, ) -> Self { - let stalker = Stalker::new(gum); + let mut stalker = Stalker::new(gum); - // Let's exclude the main module and libc.so at least: - //stalker.exclude(&MemoryRange::new( - //Module::find_base_address(&env::args().next().unwrap()), - //get_module_size(&env::args().next().unwrap()), - //)); - //stalker.exclude(&MemoryRange::new( - //Module::find_base_address("libc.so"), - //get_module_size("libc.so"), - //)); + for range in helper.ranges().gaps(&(0..usize::MAX)) { + println!("excluding range: {:x}-{:x}", range.start, range.end); + stalker.exclude(&MemoryRange::new( + NativePointer(range.start as *mut c_void), + range.end - range.start, + )); + } Self { base: TimeoutExecutor::new(base, timeout), @@ -385,7 +383,8 @@ unsafe fn fuzz( // RNG StdRand::with_seed(current_nanos()), // Corpus that will be evolved, we keep it in memory for performance - InMemoryCorpus::new(), + OnDiskCorpus::new(PathBuf::from("./corpus_discovered")).unwrap(), + //InMemoryCorpus::new(), // Corpus in which we store solutions (crashes in this example), // on disk so the user can get them after stopping the fuzzer OnDiskCorpus::new_save_meta( @@ -440,15 +439,6 @@ unsafe fn fuzz( &mut frida_helper, Duration::new(10, 0), ); - // Let's exclude the main module and libc.so at least: - //executor.stalker.exclude(&MemoryRange::new( - //Module::find_base_address(&env::args().next().unwrap()), - //get_module_size(&env::args().next().unwrap()), - //)); - //executor.stalker.exclude(&MemoryRange::new( - //Module::find_base_address("libc.so"), - //get_module_size("libc.so"), - //)); // In case the corpus is empty (on first run), reset if state.corpus().count() < 1 { diff --git a/libafl/src/events/llmp.rs b/libafl/src/events/llmp.rs index 97b5c59ab2..738b5de3d8 100644 --- a/libafl/src/events/llmp.rs +++ b/libafl/src/events/llmp.rs @@ -851,10 +851,6 @@ where println!("We're a client, let's fuzz :)"); - for (var, val) in std::env::vars() { - println!("ENV VARS: {:?}: {:?}", var, val); - } - // If we're restarting, deserialize the old state. let (state, mut mgr) = match receiver.recv_buf()? { None => { diff --git a/libafl/src/executors/inprocess.rs b/libafl/src/executors/inprocess.rs index 8dd3e59112..a6058e2f96 100644 --- a/libafl/src/executors/inprocess.rs +++ b/libafl/src/executors/inprocess.rs @@ -430,8 +430,7 @@ mod unix_signal_handler { { println!("Double crash\n"); #[cfg(target_os = "android")] - let si_addr = - { ((_info._pad[0] as usize) | ((_info._pad[1] as usize) << 32)) as usize }; + let si_addr = (_info._pad[0] as i64) | ((_info._pad[1] as i64) << 32); #[cfg(not(target_os = "android"))] let si_addr = { _info.si_addr() as usize }; @@ -466,6 +465,7 @@ mod unix_signal_handler { #[cfg(feature = "std")] println!("Child crashed!"); + #[allow(clippy::non_ascii_literal)] #[cfg(all( feature = "std", any(target_os = "linux", target_os = "android"), diff --git a/libafl_frida/Cargo.toml b/libafl_frida/Cargo.toml index bf64178fd0..712f906017 100644 --- a/libafl_frida/Cargo.toml +++ b/libafl_frida/Cargo.toml @@ -21,10 +21,8 @@ libc = "0.2.92" hashbrown = "0.11" libloading = "0.7.0" rangemap = "0.1.10" -frida-gum = { version = "0.5.0", git = "https://github.com/frida/frida-rust", features = [ "auto-download", "backtrace", "event-sink", "invocation-listener"], rev = "69f5b8236ab4b66296507803b4b7bfec79700e84" } -frida-gum-sys = { version = "0.3.0", git = "https://github.com/frida/frida-rust", features = [ "auto-download", "event-sink", "invocation-listener"], rev = "69f5b8236ab4b66296507803b4b7bfec79700e84" } -#frida-gum = { version = "0.5.0", path = "../../frida-rust/frida-gum", features = [ "auto-download", "backtrace", "event-sink", "invocation-listener"] } -#frida-gum-sys = { version = "0.3.0", path = "../../frida-rust/frida-gum-sys", features = [ "auto-download", "event-sink", "invocation-listener"] } +frida-gum-sys = { version = "0.3", features = [ "auto-download", "event-sink", "invocation-listener"] } +frida-gum = { version = "0.5.1", features = [ "auto-download", "backtrace", "event-sink", "invocation-listener"] } core_affinity = { version = "0.5", git = "https://github.com/s1341/core_affinity_rs" } regex = "1.4" dynasmrt = "1.0.1" @@ -35,6 +33,4 @@ serde = "1.0" backtrace = { version = "0.3.58", default-features = false, features = ["std", "serde"] } num-traits = "0.2.14" ahash = "0.7" - -[target.'cfg(unix)'.dependencies] -gothook = { version = "0.1" } +paste = "1.0" diff --git a/libafl_frida/build.rs b/libafl_frida/build.rs index ebd77b48cc..3dbcc85dee 100644 --- a/libafl_frida/build.rs +++ b/libafl_frida/build.rs @@ -2,4 +2,7 @@ fn main() { cc::Build::new().file("src/gettls.c").compile("libgettls.a"); + + // Force linking against libc++ + println!("cargo:rustc-link-lib=dylib=c++"); } diff --git a/libafl_frida/src/alloc.rs b/libafl_frida/src/alloc.rs new file mode 100644 index 0000000000..ca5a440121 --- /dev/null +++ b/libafl_frida/src/alloc.rs @@ -0,0 +1,408 @@ +use hashbrown::HashMap; +use libafl::bolts::os::walk_self_maps; +use nix::{ + libc::memset, + sys::mman::{mmap, MapFlags, ProtFlags}, +}; + +use backtrace::Backtrace; +#[cfg(unix)] +use libc::{sysconf, _SC_PAGESIZE}; +use rangemap::RangeSet; +use serde::{Deserialize, Serialize}; +use std::{ffi::c_void, io}; + +use crate::{ + asan_errors::{AsanError, AsanErrors}, + FridaOptions, +}; + +pub(crate) struct Allocator { + options: FridaOptions, + page_size: usize, + shadow_offset: usize, + shadow_bit: usize, + pre_allocated_shadow: bool, + allocations: HashMap, + shadow_pages: RangeSet, + allocation_queue: HashMap>, + largest_allocation: usize, + base_mapping_addr: usize, + current_mapping_addr: usize, +} + +macro_rules! map_to_shadow { + ($self:expr, $address:expr) => { + (($address >> 3) + $self.shadow_offset) & ((1 << ($self.shadow_bit + 1)) - 1) + }; +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +pub(crate) struct AllocationMetadata { + pub address: usize, + pub size: usize, + pub actual_size: usize, + pub allocation_site_backtrace: Option, + pub release_site_backtrace: Option, + pub freed: bool, + pub is_malloc_zero: bool, +} + +impl Allocator { + pub fn new(options: FridaOptions) -> Self { + let ret = unsafe { sysconf(_SC_PAGESIZE) }; + if ret < 0 { + panic!("Failed to read pagesize {:?}", io::Error::last_os_error()); + } + #[allow(clippy::cast_sign_loss)] + let page_size = ret as usize; + // probe to find a usable shadow bit: + let mut shadow_bit: usize = 0; + for try_shadow_bit in &[46usize, 36usize] { + let addr: usize = 1 << try_shadow_bit; + if unsafe { + mmap( + addr as *mut c_void, + page_size, + ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, + MapFlags::MAP_PRIVATE + | MapFlags::MAP_ANONYMOUS + | MapFlags::MAP_FIXED + | MapFlags::MAP_NORESERVE, + -1, + 0, + ) + } + .is_ok() + { + shadow_bit = *try_shadow_bit; + break; + } + } + assert!(shadow_bit != 0); + + // attempt to pre-map the entire shadow-memory space + let addr: usize = 1 << shadow_bit; + let pre_allocated_shadow = unsafe { + mmap( + addr as *mut c_void, + addr + addr, + ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, + MapFlags::MAP_ANONYMOUS + | MapFlags::MAP_FIXED + | MapFlags::MAP_PRIVATE + | MapFlags::MAP_NORESERVE, + -1, + 0, + ) + } + .is_ok(); + + Self { + options, + page_size, + pre_allocated_shadow, + shadow_offset: 1 << shadow_bit, + shadow_bit, + allocations: HashMap::new(), + shadow_pages: RangeSet::new(), + allocation_queue: HashMap::new(), + largest_allocation: 0, + base_mapping_addr: addr + addr + addr, + current_mapping_addr: addr + addr + addr, + } + } + + /// Retreive the shadow bit used by this allocator. + pub fn shadow_bit(&self) -> u32 { + self.shadow_bit as u32 + } + + #[inline] + fn round_up_to_page(&self, size: usize) -> usize { + ((size + self.page_size) / self.page_size) * self.page_size + } + + #[inline] + fn round_down_to_page(&self, value: usize) -> usize { + (value / self.page_size) * self.page_size + } + + fn find_smallest_fit(&mut self, size: usize) -> Option { + let mut current_size = size; + while current_size <= self.largest_allocation { + if self.allocation_queue.contains_key(¤t_size) { + if let Some(metadata) = self.allocation_queue.entry(current_size).or_default().pop() + { + return Some(metadata); + } + } + current_size *= 2; + } + None + } + + pub unsafe fn alloc(&mut self, size: usize, _alignment: usize) -> *mut c_void { + let mut is_malloc_zero = false; + let size = if size == 0 { + println!("zero-sized allocation!"); + is_malloc_zero = true; + 16 + } else { + size + }; + if size > (1 << 30) { + panic!("Allocation is too large: 0x{:x}", size); + } + let rounded_up_size = self.round_up_to_page(size) + 2 * self.page_size; + + let metadata = if let Some(mut metadata) = self.find_smallest_fit(rounded_up_size) { + //println!("reusing allocation at {:x}, (actual mapping starts at {:x}) size {:x}", metadata.address, metadata.address - self.page_size, size); + metadata.is_malloc_zero = is_malloc_zero; + metadata.size = size; + if self.options.enable_asan_allocation_backtraces { + metadata.allocation_site_backtrace = Some(Backtrace::new_unresolved()); + } + metadata + } else { + let mapping = match mmap( + self.current_mapping_addr as *mut c_void, + rounded_up_size, + ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, + MapFlags::MAP_ANONYMOUS + | MapFlags::MAP_PRIVATE + | MapFlags::MAP_FIXED + | MapFlags::MAP_NORESERVE, + -1, + 0, + ) { + Ok(mapping) => mapping as usize, + Err(err) => { + println!("An error occurred while mapping memory: {:?}", err); + return std::ptr::null_mut(); + } + }; + self.current_mapping_addr += rounded_up_size; + + self.map_shadow_for_region(mapping, mapping + rounded_up_size, false); + + let mut metadata = AllocationMetadata { + address: mapping, + size, + actual_size: rounded_up_size, + ..AllocationMetadata::default() + }; + + if self.options.enable_asan_allocation_backtraces { + metadata.allocation_site_backtrace = Some(Backtrace::new_unresolved()); + } + + metadata + }; + + self.largest_allocation = std::cmp::max(self.largest_allocation, metadata.actual_size); + // unpoison the shadow memory for the allocation itself + Self::unpoison( + map_to_shadow!(self, metadata.address + self.page_size), + size, + ); + let address = (metadata.address + self.page_size) as *mut c_void; + + self.allocations + .insert(metadata.address + self.page_size, metadata); + //println!("serving address: {:?}, size: {:x}", address, size); + address + } + + pub unsafe fn release(&mut self, ptr: *mut c_void) { + let mut metadata = if let Some(metadata) = self.allocations.get_mut(&(ptr as usize)) { + metadata + } else { + if !ptr.is_null() { + AsanErrors::get_mut().report_error( + AsanError::UnallocatedFree((ptr as usize, Backtrace::new())), + None, + ); + } + return; + }; + + if metadata.freed { + AsanErrors::get_mut().report_error( + AsanError::DoubleFree((ptr as usize, metadata.clone(), Backtrace::new())), + None, + ); + } + let shadow_mapping_start = map_to_shadow!(self, ptr as usize); + + metadata.freed = true; + if self.options.enable_asan_allocation_backtraces { + metadata.release_site_backtrace = Some(Backtrace::new_unresolved()); + } + + // poison the shadow memory for the allocation + Self::poison(shadow_mapping_start, metadata.size); + } + + pub fn find_metadata( + &mut self, + ptr: usize, + hint_base: usize, + ) -> Option<&mut AllocationMetadata> { + let mut metadatas: Vec<&mut AllocationMetadata> = self.allocations.values_mut().collect(); + metadatas.sort_by(|a, b| a.address.cmp(&b.address)); + let mut offset_to_closest = i64::max_value(); + let mut closest = None; + for metadata in metadatas { + let new_offset = if hint_base == metadata.address { + (ptr as i64 - metadata.address as i64).abs() + } else { + std::cmp::min( + offset_to_closest, + (ptr as i64 - metadata.address as i64).abs(), + ) + }; + if new_offset < offset_to_closest { + offset_to_closest = new_offset; + closest = Some(metadata); + } + } + closest + } + + pub fn reset(&mut self) { + for (address, mut allocation) in self.allocations.drain() { + // First poison the memory. + Self::poison(map_to_shadow!(self, address), allocation.size); + + // Reset the allocaiton metadata object + allocation.size = 0; + allocation.freed = false; + allocation.allocation_site_backtrace = None; + allocation.release_site_backtrace = None; + + // Move the allocation from the allocations to the to-be-allocated queues + self.allocation_queue + .entry(allocation.actual_size) + .or_default() + .push(allocation); + } + } + + pub fn get_usable_size(&self, ptr: *mut c_void) -> usize { + match self.allocations.get(&(ptr as usize)) { + Some(metadata) => metadata.size, + None => { + panic!( + "Attempted to get_usable_size on a pointer ({:?}) which was not allocated!", + ptr + ); + } + } + } + + fn unpoison(start: usize, size: usize) { + //println!("unpoisoning {:x} for {:x}", start, size / 8 + 1); + unsafe { + //println!("memset: {:?}", start as *mut c_void); + memset(start as *mut c_void, 0xff, size / 8); + + let remainder = size % 8; + if remainder > 0 { + //println!("remainder: {:x}, offset: {:x}", remainder, start + size / 8); + memset( + (start + size / 8) as *mut c_void, + (0xff << (8 - remainder)) & 0xff, + 1, + ); + } + } + } + + pub fn poison(start: usize, size: usize) { + //println!("poisoning {:x} for {:x}", start, size / 8 + 1); + unsafe { + //println!("memset: {:?}", start as *mut c_void); + memset(start as *mut c_void, 0x00, size / 8); + + let remainder = size % 8; + if remainder > 0 { + //println!("remainder: {:x}, offset: {:x}", remainder, start + size / 8); + memset((start + size / 8) as *mut c_void, 0x00, 1); + } + } + } + + /// Map shadow memory for a region, and optionally unpoison it + pub fn map_shadow_for_region( + &mut self, + start: usize, + end: usize, + unpoison: bool, + ) -> (usize, usize) { + //println!("start: {:x}, end {:x}, size {:x}", start, end, end - start); + + let shadow_mapping_start = map_to_shadow!(self, start); + + if !self.pre_allocated_shadow { + let shadow_start = self.round_down_to_page(shadow_mapping_start); + let shadow_end = + self.round_up_to_page((end - start) / 8) + self.page_size + shadow_start; + for range in self.shadow_pages.gaps(&(shadow_start..shadow_end)) { + //println!("range: {:x}-{:x}, pagesize: {}", range.start, range.end, self.page_size); + unsafe { + mmap( + range.start as *mut c_void, + range.end - range.start, + ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, + MapFlags::MAP_ANONYMOUS | MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE, + -1, + 0, + ) + .expect("An error occurred while mapping shadow memory"); + } + } + + self.shadow_pages.insert(shadow_start..shadow_end); + } + + //println!("shadow_mapping_start: {:x}, shadow_size: {:x}", shadow_mapping_start, (end - start) / 8); + if unpoison { + Self::unpoison(shadow_mapping_start, end - start); + } + + (shadow_mapping_start, (end - start) / 8) + } + + pub fn map_to_shadow(&self, start: usize) -> usize { + map_to_shadow!(self, start) + } + + #[inline] + pub fn is_managed(&self, ptr: *mut c_void) -> bool { + //self.allocations.contains_key(&(ptr as usize)) + self.base_mapping_addr <= ptr as usize && (ptr as usize) < self.current_mapping_addr + } + + pub fn check_for_leaks(&self) { + for metadata in self.allocations.values() { + if !metadata.freed { + AsanErrors::get_mut() + .report_error(AsanError::Leak((metadata.address, metadata.clone())), None); + } + } + } + + /// Unpoison all the memory that is currently mapped with read/write permissions. + pub fn unpoison_all_existing_memory(&mut self) { + walk_self_maps(&mut |start, end, permissions, _path| { + if permissions.as_bytes()[0] == b'r' || permissions.as_bytes()[1] == b'w' { + if self.pre_allocated_shadow && start == 1 << self.shadow_bit { + return false; + } + self.map_shadow_for_region(start, end, true); + } + false + }); + } +} diff --git a/libafl_frida/src/asan_errors.rs b/libafl_frida/src/asan_errors.rs new file mode 100644 index 0000000000..81f0ce5d4e --- /dev/null +++ b/libafl_frida/src/asan_errors.rs @@ -0,0 +1,569 @@ +use backtrace::Backtrace; +use capstone::{arch::BuildsCapstone, Capstone}; +use color_backtrace::{default_output_stream, BacktracePrinter, Verbosity}; +use frida_gum::interceptor::Interceptor; +use libafl::{ + bolts::{os::find_mapping_for_address, ownedref::OwnedPtr, tuples::Named}, + corpus::Testcase, + events::EventFirer, + executors::{ExitKind, HasExecHooks}, + feedbacks::Feedback, + inputs::{HasTargetBytes, Input}, + observers::{Observer, ObserversTuple}, + state::HasMetadata, + Error, SerdeAny, +}; +use rangemap::RangeMap; +use serde::{Deserialize, Serialize}; +use std::io::Write; +use termcolor::{Color, ColorSpec, WriteColor}; + +use crate::{alloc::AllocationMetadata, FridaOptions}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub(crate) struct AsanReadWriteError { + pub registers: [usize; 32], + pub pc: usize, + pub fault: (u16, u16, usize, usize), + pub metadata: AllocationMetadata, + pub backtrace: Backtrace, +} + +#[derive(Debug, Clone, Serialize, Deserialize, SerdeAny)] +pub(crate) enum AsanError { + OobRead(AsanReadWriteError), + OobWrite(AsanReadWriteError), + ReadAfterFree(AsanReadWriteError), + WriteAfterFree(AsanReadWriteError), + DoubleFree((usize, AllocationMetadata, Backtrace)), + UnallocatedFree((usize, Backtrace)), + Unknown(([usize; 32], usize, (u16, u16, usize, usize), Backtrace)), + Leak((usize, AllocationMetadata)), + StackOobRead(([usize; 32], usize, (u16, u16, usize, usize), Backtrace)), + StackOobWrite(([usize; 32], usize, (u16, u16, usize, usize), Backtrace)), + BadFuncArgRead((String, usize, usize, Backtrace)), + BadFuncArgWrite((String, usize, usize, Backtrace)), +} + +impl AsanError { + fn description(&self) -> &str { + match self { + AsanError::OobRead(_) => "heap out-of-bounds read", + AsanError::OobWrite(_) => "heap out-of-bounds write", + AsanError::DoubleFree(_) => "double-free", + AsanError::UnallocatedFree(_) => "unallocated-free", + AsanError::WriteAfterFree(_) => "heap use-after-free write", + AsanError::ReadAfterFree(_) => "heap use-after-free read", + AsanError::Unknown(_) => "heap unknown", + AsanError::Leak(_) => "memory-leak", + AsanError::StackOobRead(_) => "stack out-of-bounds read", + AsanError::StackOobWrite(_) => "stack out-of-bounds write", + AsanError::BadFuncArgRead(_) => "function arg resulting in bad read", + AsanError::BadFuncArgWrite(_) => "function arg resulting in bad write", + } + } +} + +/// A struct holding errors that occurred during frida address sanitizer runs +#[allow(clippy::unsafe_derive_deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, SerdeAny)] +pub struct AsanErrors { + options: FridaOptions, + errors: Vec, +} + +impl AsanErrors { + /// Creates a new `AsanErrors` struct + #[must_use] + pub fn new(options: FridaOptions) -> Self { + Self { + options, + errors: Vec::new(), + } + } + + /// Clears this `AsanErrors` struct + pub fn clear(&mut self) { + self.errors.clear() + } + + /// Gets the amount of `AsanErrors` in this struct + #[must_use] + pub fn len(&self) -> usize { + self.errors.len() + } + + /// Returns `true` if no errors occurred + #[must_use] + pub fn is_empty(&self) -> bool { + self.errors.is_empty() + } + + /// Get a mutable reference to the global [`AsanErrors`] object + pub fn get_mut<'a>() -> &'a mut Self { + unsafe { ASAN_ERRORS.as_mut().unwrap() } + } + + /// Report an error + #[allow(clippy::too_many_lines)] + pub(crate) fn report_error( + &mut self, + error: AsanError, + instrumented_ranges: Option<&RangeMap>, + ) { + self.errors.push(error.clone()); + + let mut out_stream = default_output_stream(); + let output = out_stream.as_mut(); + + let backtrace_printer = BacktracePrinter::new() + .clear_frame_filters() + .print_addresses(true) + .verbosity(Verbosity::Full) + .add_frame_filter(Box::new(|frames| { + frames.retain( + |x| matches!(&x.name, Some(n) if !n.starts_with("libafl_frida::asan_rt::")), + ) + })); + + #[allow(clippy::non_ascii_literal)] + writeln!(output, "{:━^100}", " Memory error detected! ").unwrap(); + output + .set_color(ColorSpec::new().set_fg(Some(Color::Red))) + .unwrap(); + write!(output, "{}", error.description()).unwrap(); + match error { + AsanError::OobRead(mut error) + | AsanError::OobWrite(mut error) + | AsanError::ReadAfterFree(mut error) + | AsanError::WriteAfterFree(mut error) => { + let (basereg, indexreg, _displacement, fault_address) = error.fault; + + if let Some((range, path)) = instrumented_ranges.unwrap().get_key_value(&error.pc) { + writeln!( + output, + " at 0x{:x} ({}@0x{:04x}), faulting address 0x{:x}", + error.pc, + path, + error.pc - range.start, + fault_address + ) + .unwrap(); + } else { + writeln!( + output, + " at 0x{:x}, faulting address 0x{:x}", + error.pc, fault_address + ) + .unwrap(); + } + output.reset().unwrap(); + + #[allow(clippy::non_ascii_literal)] + writeln!(output, "{:━^100}", " REGISTERS ").unwrap(); + for reg in 0..=30 { + if reg == basereg { + output + .set_color(ColorSpec::new().set_fg(Some(Color::Red))) + .unwrap(); + } else if reg == indexreg { + output + .set_color(ColorSpec::new().set_fg(Some(Color::Yellow))) + .unwrap(); + } + write!( + output, + "x{:02}: 0x{:016x} ", + reg, error.registers[reg as usize] + ) + .unwrap(); + output.reset().unwrap(); + if reg % 4 == 3 { + writeln!(output).unwrap(); + } + } + writeln!(output, "pc : 0x{:016x} ", error.pc).unwrap(); + + #[allow(clippy::non_ascii_literal)] + writeln!(output, "{:━^100}", " CODE ").unwrap(); + let mut cs = Capstone::new() + .arm64() + .mode(capstone::arch::arm64::ArchMode::Arm) + .build() + .unwrap(); + cs.set_skipdata(true).expect("failed to set skipdata"); + + let start_pc = error.pc - 4 * 5; + for insn in cs + .disasm_count( + unsafe { std::slice::from_raw_parts(start_pc as *mut u8, 4 * 11) }, + start_pc as u64, + 11, + ) + .expect("failed to disassemble instructions") + .iter() + { + if insn.address() as usize == error.pc { + output + .set_color(ColorSpec::new().set_fg(Some(Color::Red))) + .unwrap(); + writeln!(output, "\t => {}", insn).unwrap(); + output.reset().unwrap(); + } else { + writeln!(output, "\t {}", insn).unwrap(); + } + } + backtrace_printer + .print_trace(&error.backtrace, output) + .unwrap(); + + #[allow(clippy::non_ascii_literal)] + writeln!(output, "{:━^100}", " ALLOCATION INFO ").unwrap(); + let offset: i64 = fault_address as i64 - error.metadata.address as i64; + let direction = if offset > 0 { "right" } else { "left" }; + writeln!( + output, + "access is {} to the {} of the 0x{:x} byte allocation at 0x{:x}", + offset, direction, error.metadata.size, error.metadata.address + ) + .unwrap(); + + if error.metadata.is_malloc_zero { + writeln!(output, "allocation was zero-sized").unwrap(); + } + + if let Some(backtrace) = error.metadata.allocation_site_backtrace.as_mut() { + writeln!(output, "allocation site backtrace:").unwrap(); + backtrace.resolve(); + backtrace_printer.print_trace(backtrace, output).unwrap(); + } + + if error.metadata.freed { + #[allow(clippy::non_ascii_literal)] + writeln!(output, "{:━^100}", " FREE INFO ").unwrap(); + if let Some(backtrace) = error.metadata.release_site_backtrace.as_mut() { + writeln!(output, "free site backtrace:").unwrap(); + backtrace.resolve(); + backtrace_printer.print_trace(backtrace, output).unwrap(); + } + } + } + AsanError::BadFuncArgRead((name, address, size, backtrace)) + | AsanError::BadFuncArgWrite((name, address, size, backtrace)) => { + writeln!( + output, + " in call to {}, argument {:#016x}, size: {:#x}", + name, address, size + ) + .unwrap(); + let invocation = Interceptor::current_invocation(); + let cpu_context = invocation.cpu_context(); + #[cfg(target_arch = "aarch64")] + { + #[allow(clippy::non_ascii_literal)] + writeln!(output, "{:━^100}", " REGISTERS ").unwrap(); + for reg in 0..29 { + let val = cpu_context.reg(reg); + if val as usize == address { + output + .set_color(ColorSpec::new().set_fg(Some(Color::Red))) + .unwrap(); + } + write!(output, "x{:02}: 0x{:016x} ", reg, val).unwrap(); + output.reset().unwrap(); + if reg % 4 == 3 { + writeln!(output).unwrap(); + } + } + write!(output, "sp : 0x{:016x} ", cpu_context.sp()).unwrap(); + write!(output, "lr : 0x{:016x} ", cpu_context.lr()).unwrap(); + writeln!(output, "pc : 0x{:016x} ", cpu_context.pc()).unwrap(); + } + + backtrace_printer.print_trace(&backtrace, output).unwrap(); + } + AsanError::DoubleFree((ptr, mut metadata, backtrace)) => { + writeln!(output, " of {:?}", ptr).unwrap(); + output.reset().unwrap(); + backtrace_printer.print_trace(&backtrace, output).unwrap(); + + #[allow(clippy::non_ascii_literal)] + writeln!(output, "{:━^100}", " ALLOCATION INFO ").unwrap(); + writeln!( + output, + "allocation at 0x{:x}, with size 0x{:x}", + metadata.address, metadata.size + ) + .unwrap(); + if metadata.is_malloc_zero { + writeln!(output, "allocation was zero-sized").unwrap(); + } + + if let Some(backtrace) = metadata.allocation_site_backtrace.as_mut() { + writeln!(output, "allocation site backtrace:").unwrap(); + backtrace.resolve(); + backtrace_printer.print_trace(backtrace, output).unwrap(); + } + #[allow(clippy::non_ascii_literal)] + writeln!(output, "{:━^100}", " FREE INFO ").unwrap(); + if let Some(backtrace) = metadata.release_site_backtrace.as_mut() { + writeln!(output, "previous free site backtrace:").unwrap(); + backtrace.resolve(); + backtrace_printer.print_trace(backtrace, output).unwrap(); + } + } + AsanError::UnallocatedFree((ptr, backtrace)) => { + writeln!(output, " of {:#016x}", ptr).unwrap(); + output.reset().unwrap(); + backtrace_printer.print_trace(&backtrace, output).unwrap(); + } + AsanError::Leak((ptr, mut metadata)) => { + writeln!(output, " of {:#016x}", ptr).unwrap(); + output.reset().unwrap(); + + #[allow(clippy::non_ascii_literal)] + writeln!(output, "{:━^100}", " ALLOCATION INFO ").unwrap(); + writeln!( + output, + "allocation at 0x{:x}, with size 0x{:x}", + metadata.address, metadata.size + ) + .unwrap(); + if metadata.is_malloc_zero { + writeln!(output, "allocation was zero-sized").unwrap(); + } + + if let Some(backtrace) = metadata.allocation_site_backtrace.as_mut() { + writeln!(output, "allocation site backtrace:").unwrap(); + backtrace.resolve(); + backtrace_printer.print_trace(backtrace, output).unwrap(); + } + } + AsanError::Unknown((registers, pc, fault, backtrace)) + | AsanError::StackOobRead((registers, pc, fault, backtrace)) + | AsanError::StackOobWrite((registers, pc, fault, backtrace)) => { + let (basereg, indexreg, _displacement, fault_address) = fault; + + if let Ok((start, _, _, path)) = find_mapping_for_address(pc) { + writeln!( + output, + " at 0x{:x} ({}:0x{:04x}), faulting address 0x{:x}", + pc, + path, + pc - start, + fault_address + ) + .unwrap(); + } else { + writeln!( + output, + " at 0x{:x}, faulting address 0x{:x}", + pc, fault_address + ) + .unwrap(); + } + output.reset().unwrap(); + + #[allow(clippy::non_ascii_literal)] + writeln!(output, "{:━^100}", " REGISTERS ").unwrap(); + for reg in 0..=30 { + if reg == basereg { + output + .set_color(ColorSpec::new().set_fg(Some(Color::Red))) + .unwrap(); + } else if reg == indexreg { + output + .set_color(ColorSpec::new().set_fg(Some(Color::Yellow))) + .unwrap(); + } + write!(output, "x{:02}: 0x{:016x} ", reg, registers[reg as usize]).unwrap(); + output.reset().unwrap(); + if reg % 4 == 3 { + writeln!(output).unwrap(); + } + } + writeln!(output, "pc : 0x{:016x} ", pc).unwrap(); + + #[allow(clippy::non_ascii_literal)] + writeln!(output, "{:━^100}", " CODE ").unwrap(); + let mut cs = Capstone::new() + .arm64() + .mode(capstone::arch::arm64::ArchMode::Arm) + .build() + .unwrap(); + cs.set_skipdata(true).expect("failed to set skipdata"); + + let start_pc = pc - 4 * 5; + for insn in cs + .disasm_count( + unsafe { std::slice::from_raw_parts(start_pc as *mut u8, 4 * 11) }, + start_pc as u64, + 11, + ) + .expect("failed to disassemble instructions") + .iter() + { + if insn.address() as usize == pc { + output + .set_color(ColorSpec::new().set_fg(Some(Color::Red))) + .unwrap(); + writeln!(output, "\t => {}", insn).unwrap(); + output.reset().unwrap(); + } else { + writeln!(output, "\t {}", insn).unwrap(); + } + } + backtrace_printer.print_trace(&backtrace, output).unwrap(); + } + }; + + if !self.options.asan_continue_after_error() { + panic!("Crashing target!"); + } + } +} + +/// static field for `AsanErrors` for a run +pub static mut ASAN_ERRORS: Option = None; + +/// An observer for frida address sanitizer `AsanError`s for a frida executor run +#[derive(Serialize, Deserialize)] +#[allow(clippy::unsafe_derive_deserialize)] +pub struct AsanErrorsObserver { + errors: OwnedPtr>, +} + +impl Observer for AsanErrorsObserver {} + +impl HasExecHooks for AsanErrorsObserver { + fn pre_exec( + &mut self, + _fuzzer: &mut Z, + _state: &mut S, + _mgr: &mut EM, + _input: &I, + ) -> Result<(), Error> { + unsafe { + if ASAN_ERRORS.is_some() { + ASAN_ERRORS.as_mut().unwrap().clear(); + } + } + + Ok(()) + } +} + +impl Named for AsanErrorsObserver { + #[inline] + fn name(&self) -> &str { + "AsanErrors" + } +} + +impl AsanErrorsObserver { + /// Creates a new `AsanErrorsObserver`, pointing to a constant `AsanErrors` field + #[must_use] + pub fn new(errors: &'static Option) -> Self { + Self { + errors: OwnedPtr::Ptr(errors as *const Option), + } + } + + /// Creates a new `AsanErrorsObserver`, owning the `AsanErrors` + #[must_use] + pub fn new_owned(errors: Option) -> Self { + Self { + errors: OwnedPtr::Owned(Box::new(errors)), + } + } + + /// Creates a new `AsanErrorsObserver` from a raw ptr + #[must_use] + pub fn new_from_ptr(errors: *const Option) -> Self { + Self { + errors: OwnedPtr::Ptr(errors), + } + } + + /// gets the [`AsanErrors`] from the previous run + #[must_use] + pub fn errors(&self) -> Option<&AsanErrors> { + match &self.errors { + OwnedPtr::Ptr(p) => unsafe { p.as_ref().unwrap().as_ref() }, + OwnedPtr::Owned(b) => b.as_ref().as_ref(), + } + } +} + +/// A feedback reporting potential [`AsanErrors`] from an `AsanErrorsObserver` +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct AsanErrorsFeedback { + errors: Option, +} + +impl Feedback for AsanErrorsFeedback +where + I: Input + HasTargetBytes, +{ + fn is_interesting( + &mut self, + _state: &mut S, + _manager: &mut EM, + _input: &I, + observers: &OT, + _exit_kind: &ExitKind, + ) -> Result + where + EM: EventFirer, + OT: ObserversTuple, + { + let observer = observers + .match_name::("AsanErrors") + .expect("An AsanErrorsFeedback needs an AsanErrorsObserver"); + match observer.errors() { + None => Ok(false), + Some(errors) => { + if errors.errors.is_empty() { + Ok(false) + } else { + self.errors = Some(errors.clone()); + Ok(true) + } + } + } + } + + fn append_metadata(&mut self, _state: &mut S, testcase: &mut Testcase) -> Result<(), Error> { + if let Some(errors) = &self.errors { + testcase.add_metadata(errors.clone()); + } + + Ok(()) + } + + fn discard_metadata(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { + self.errors = None; + Ok(()) + } +} + +impl Named for AsanErrorsFeedback { + #[inline] + fn name(&self) -> &str { + "AsanErrors" + } +} + +impl AsanErrorsFeedback { + /// Create a new `AsanErrorsFeedback` + #[must_use] + pub fn new() -> Self { + Self { errors: None } + } +} + +impl Default for AsanErrorsFeedback { + fn default() -> Self { + Self::new() + } +} diff --git a/libafl_frida/src/asan_rt.rs b/libafl_frida/src/asan_rt.rs index d83e9b883d..168b90f2ee 100644 --- a/libafl_frida/src/asan_rt.rs +++ b/libafl_frida/src/asan_rt.rs @@ -6,24 +6,12 @@ even if the target would not have crashed under normal conditions. this helps finding mem errors early. */ +use frida_gum::NativePointer; use hashbrown::HashMap; -use libafl::{ - bolts::{ - os::{find_mapping_for_address, find_mapping_for_path, walk_self_maps}, - ownedref::OwnedPtr, - tuples::Named, - }, - corpus::Testcase, - events::EventFirer, - executors::{CustomExitKind, ExitKind, HasExecHooks}, - feedbacks::Feedback, - inputs::{HasTargetBytes, Input}, - observers::{Observer, ObserversTuple}, - state::HasMetadata, - Error, SerdeAny, -}; +use libafl::bolts::os::{find_mapping_for_address, find_mapping_for_path}; + use nix::{ - libc::{memmove, memset}, + libc::memset, sys::mman::{mmap, MapFlags, ProtFlags}, }; @@ -32,587 +20,23 @@ use capstone::{ arch::{arm64::Arm64OperandType, ArchOperand::Arm64Operand, BuildsCapstone}, Capstone, Insn, }; -use color_backtrace::{default_output_stream, BacktracePrinter, Verbosity}; use dynasmrt::{dynasm, DynasmApi, DynasmLabelApi}; +use frida_gum::{interceptor::Interceptor, Gum, ModuleMap}; #[cfg(unix)] -use gothook::GotHookLibrary; -use libc::{getrlimit64, rlimit64, sysconf, _SC_PAGESIZE}; +use libc::{c_char, getrlimit64, rlimit64, wchar_t}; use rangemap::RangeMap; -use rangemap::RangeSet; -use serde::{Deserialize, Serialize}; -use std::{ - cell::{RefCell, RefMut}, - ffi::c_void, - io::{self, Write}, - path::PathBuf, - rc::Rc, -}; -use termcolor::{Color, ColorSpec, WriteColor}; +use std::{ffi::c_void, path::PathBuf}; -use crate::FridaOptions; +use crate::{ + alloc::Allocator, + asan_errors::{AsanError, AsanErrors, AsanReadWriteError, ASAN_ERRORS}, + FridaOptions, +}; extern "C" { fn __register_frame(begin: *mut c_void); } -static mut ALLOCATOR_SINGLETON: Option> = None; - -struct Allocator { - runtime: Rc>, - page_size: usize, - shadow_offset: usize, - shadow_bit: usize, - pre_allocated_shadow: bool, - allocations: HashMap, - shadow_pages: RangeSet, - allocation_queue: HashMap>, - largest_allocation: usize, -} - -macro_rules! map_to_shadow { - ($self:expr, $address:expr) => { - (($address >> 3) + $self.shadow_offset) & ((1 << ($self.shadow_bit + 1)) - 1) - }; -} - -#[derive(Clone, Debug, Default, Serialize, Deserialize)] -struct AllocationMetadata { - address: usize, - size: usize, - actual_size: usize, - allocation_site_backtrace: Option, - release_site_backtrace: Option, - freed: bool, - is_malloc_zero: bool, -} - -impl Allocator { - fn setup(runtime: Rc>) { - let ret = unsafe { sysconf(_SC_PAGESIZE) }; - if ret < 0 { - panic!("Failed to read pagesize {:?}", io::Error::last_os_error()); - } - #[allow(clippy::cast_sign_loss)] - let page_size = ret as usize; - // probe to find a usable shadow bit: - let mut shadow_bit: usize = 0; - for try_shadow_bit in &[46usize, 36usize] { - let addr: usize = 1 << try_shadow_bit; - if unsafe { - mmap( - addr as *mut c_void, - page_size, - ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, - MapFlags::MAP_PRIVATE - | MapFlags::MAP_ANONYMOUS - | MapFlags::MAP_FIXED - | MapFlags::MAP_NORESERVE, - -1, - 0, - ) - } - .is_ok() - { - shadow_bit = *try_shadow_bit; - break; - } - } - assert!(shadow_bit != 0); - - // attempt to pre-map the entire shadow-memory space - let addr: usize = 1 << shadow_bit; - let pre_allocated_shadow = unsafe { - mmap( - addr as *mut c_void, - addr + addr, - ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, - MapFlags::MAP_ANONYMOUS - | MapFlags::MAP_FIXED - | MapFlags::MAP_PRIVATE - | MapFlags::MAP_NORESERVE, - -1, - 0, - ) - } - .is_ok(); - - let allocator = Self { - runtime, - page_size, - pre_allocated_shadow, - shadow_offset: 1 << shadow_bit, - shadow_bit, - allocations: HashMap::new(), - shadow_pages: RangeSet::new(), - allocation_queue: HashMap::new(), - largest_allocation: 0, - }; - unsafe { - ALLOCATOR_SINGLETON = Some(RefCell::new(allocator)); - } - } - - pub fn get() -> RefMut<'static, Allocator> { - unsafe { - ALLOCATOR_SINGLETON - .as_mut() - .unwrap() - .try_borrow_mut() - .unwrap() - } - } - - pub fn init(runtime: Rc>) { - Self::setup(runtime); - } - - #[inline] - fn round_up_to_page(&self, size: usize) -> usize { - ((size + self.page_size) / self.page_size) * self.page_size - } - - #[inline] - fn round_down_to_page(&self, value: usize) -> usize { - (value / self.page_size) * self.page_size - } - - fn find_smallest_fit(&mut self, size: usize) -> Option { - let mut current_size = size; - while current_size <= self.largest_allocation { - if self.allocation_queue.contains_key(¤t_size) { - if let Some(metadata) = self.allocation_queue.entry(current_size).or_default().pop() - { - return Some(metadata); - } - } - current_size *= 2; - } - None - } - - pub unsafe fn alloc(&mut self, size: usize, _alignment: usize) -> *mut c_void { - let mut is_malloc_zero = false; - let size = if size == 0 { - println!("zero-sized allocation!"); - is_malloc_zero = true; - 16 - } else { - size - }; - if size > (1 << 30) { - panic!("Allocation is too large: 0x{:x}", size); - } - let rounded_up_size = self.round_up_to_page(size); - - let metadata = if let Some(mut metadata) = self.find_smallest_fit(rounded_up_size) { - //println!("reusing allocation at {:x}, (actual mapping starts at {:x}) size {:x}", metadata.address, metadata.address - self.page_size, size); - metadata.is_malloc_zero = is_malloc_zero; - metadata.size = size; - if self - .runtime - .borrow() - .options - .enable_asan_allocation_backtraces - { - metadata.allocation_site_backtrace = Some(Backtrace::new_unresolved()); - } - metadata - } else { - let mapping = match mmap( - std::ptr::null_mut(), - rounded_up_size, - ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, - MapFlags::MAP_ANONYMOUS | MapFlags::MAP_PRIVATE, - -1, - 0, - ) { - Ok(mapping) => mapping as usize, - Err(err) => { - println!("An error occurred while mapping memory: {:?}", err); - return std::ptr::null_mut(); - } - }; - - self.map_shadow_for_region(mapping, mapping + rounded_up_size, false); - - let mut metadata = AllocationMetadata { - address: mapping, - size, - actual_size: rounded_up_size, - ..AllocationMetadata::default() - }; - - if self - .runtime - .borrow() - .options - .enable_asan_allocation_backtraces - { - metadata.allocation_site_backtrace = Some(Backtrace::new_unresolved()); - } - - metadata - }; - - self.largest_allocation = std::cmp::max(self.largest_allocation, metadata.actual_size); - // unpoison the shadow memory for the allocation itself - Self::unpoison(map_to_shadow!(self, metadata.address), size); - let address = metadata.address as *mut c_void; - - self.allocations.insert(metadata.address, metadata); - //println!("serving address: {:?}, size: {:x}", address, size); - address - } - - pub unsafe fn release(&mut self, ptr: *mut c_void) { - let mut metadata = if let Some(metadata) = self.allocations.get_mut(&(ptr as usize)) { - metadata - } else { - if !ptr.is_null() { - // TODO: report this as an observer - self.runtime - .borrow_mut() - .report_error(AsanError::UnallocatedFree((ptr as usize, Backtrace::new()))); - } - return; - }; - - if metadata.freed { - self.runtime - .borrow_mut() - .report_error(AsanError::DoubleFree(( - ptr as usize, - metadata.clone(), - Backtrace::new(), - ))); - } - let shadow_mapping_start = map_to_shadow!(self, ptr as usize); - - metadata.freed = true; - if self - .runtime - .borrow() - .options - .enable_asan_allocation_backtraces - { - metadata.release_site_backtrace = Some(Backtrace::new_unresolved()); - } - - // poison the shadow memory for the allocation - Self::poison(shadow_mapping_start, metadata.size); - } - - pub fn find_metadata( - &mut self, - ptr: usize, - hint_base: usize, - ) -> Option<&mut AllocationMetadata> { - let mut metadatas: Vec<&mut AllocationMetadata> = self.allocations.values_mut().collect(); - metadatas.sort_by(|a, b| a.address.cmp(&b.address)); - let mut offset_to_closest = i64::max_value(); - let mut closest = None; - for metadata in metadatas { - let new_offset = if hint_base == metadata.address { - (ptr as i64 - metadata.address as i64).abs() - } else { - std::cmp::min( - offset_to_closest, - (ptr as i64 - metadata.address as i64).abs(), - ) - }; - if new_offset < offset_to_closest { - offset_to_closest = new_offset; - closest = Some(metadata); - } - } - closest - } - - pub fn reset(&mut self) { - for (address, mut allocation) in self.allocations.drain() { - // First poison the memory. - Self::poison(map_to_shadow!(self, address), allocation.size); - - // Reset the allocaiton metadata object - allocation.size = 0; - allocation.freed = false; - allocation.allocation_site_backtrace = None; - allocation.release_site_backtrace = None; - - // Move the allocation from the allocations to the to-be-allocated queues - self.allocation_queue - .entry(allocation.actual_size) - .or_default() - .push(allocation); - } - } - - pub fn get_usable_size(&self, ptr: *mut c_void) -> usize { - match self.allocations.get(&(ptr as usize)) { - Some(metadata) => metadata.size, - None => { - panic!( - "Attempted to get_usable_size on a pointer ({:?}) which was not allocated!", - ptr - ); - } - } - } - - fn unpoison(start: usize, size: usize) { - //println!("unpoisoning {:x} for {:x}", start, size / 8 + 1); - unsafe { - //println!("memset: {:?}", start as *mut c_void); - memset(start as *mut c_void, 0xff, size / 8); - - let remainder = size % 8; - if remainder > 0 { - //println!("remainder: {:x}, offset: {:x}", remainder, start + size / 8); - memset( - (start + size / 8) as *mut c_void, - (0xff << (8 - remainder)) & 0xff, - 1, - ); - } - } - } - - fn poison(start: usize, size: usize) { - //println!("poisoning {:x} for {:x}", start, size / 8 + 1); - unsafe { - //println!("memset: {:?}", start as *mut c_void); - memset(start as *mut c_void, 0x00, size / 8); - - let remainder = size % 8; - if remainder > 0 { - //println!("remainder: {:x}, offset: {:x}", remainder, start + size / 8); - memset((start + size / 8) as *mut c_void, 0x00, 1); - } - } - } - - /// Map shadow memory for a region, and optionally unpoison it - pub fn map_shadow_for_region( - &mut self, - start: usize, - end: usize, - unpoison: bool, - ) -> (usize, usize) { - //println!("start: {:x}, end {:x}, size {:x}", start, end, end - start); - - let shadow_mapping_start = map_to_shadow!(self, start); - - if !self.pre_allocated_shadow { - let shadow_start = self.round_down_to_page(shadow_mapping_start); - let shadow_end = - self.round_up_to_page((end - start) / 8) + self.page_size + shadow_start; - for range in self.shadow_pages.gaps(&(shadow_start..shadow_end)) { - //println!("range: {:x}-{:x}, pagesize: {}", range.start, range.end, self.page_size); - unsafe { - mmap( - range.start as *mut c_void, - range.end - range.start, - ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, - MapFlags::MAP_ANONYMOUS | MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE, - -1, - 0, - ) - .expect("An error occurred while mapping shadow memory"); - } - } - - self.shadow_pages.insert(shadow_start..shadow_end); - } - - //println!("shadow_mapping_start: {:x}, shadow_size: {:x}", shadow_mapping_start, (end - start) / 8); - if unpoison { - Self::unpoison(shadow_mapping_start, end - start); - } - - (shadow_mapping_start, (end - start) / 8) - } -} - -/// Hook for malloc. -#[must_use] -pub extern "C" fn asan_malloc(size: usize) -> *mut c_void { - unsafe { Allocator::get().alloc(size, 0x8) } -} - -/// Hook for new. -#[must_use] -pub extern "C" fn asan_new(size: usize) -> *mut c_void { - unsafe { Allocator::get().alloc(size, 0x8) } -} - -/// Hook for new. -#[must_use] -pub extern "C" fn asan_new_nothrow(size: usize, _nothrow: *const c_void) -> *mut c_void { - unsafe { Allocator::get().alloc(size, 0x8) } -} - -/// Hook for new with alignment. -#[must_use] -pub extern "C" fn asan_new_aligned(size: usize, alignment: usize) -> *mut c_void { - unsafe { Allocator::get().alloc(size, alignment) } -} - -/// Hook for new with alignment. -#[must_use] -pub extern "C" fn asan_new_aligned_nothrow( - size: usize, - alignment: usize, - _nothrow: *const c_void, -) -> *mut c_void { - unsafe { Allocator::get().alloc(size, alignment) } -} - -/// Hook for pvalloc -#[must_use] -pub extern "C" fn asan_pvalloc(size: usize) -> *mut c_void { - unsafe { Allocator::get().alloc(size, 0x8) } -} - -/// Hook for valloc -#[must_use] -pub extern "C" fn asan_valloc(size: usize) -> *mut c_void { - unsafe { Allocator::get().alloc(size, 0x8) } -} - -/// Hook for calloc -#[must_use] -pub extern "C" fn asan_calloc(nmemb: usize, size: usize) -> *mut c_void { - unsafe { Allocator::get().alloc(size * nmemb, 0x8) } -} - -/// Hook for realloc -/// -/// # Safety -/// This function is inherently unsafe, as it takes a raw pointer -#[must_use] -pub unsafe extern "C" fn asan_realloc(ptr: *mut c_void, size: usize) -> *mut c_void { - let mut allocator = Allocator::get(); - let ret = allocator.alloc(size, 0x8); - if ptr != std::ptr::null_mut() { - memmove(ret, ptr, allocator.get_usable_size(ptr)); - } - allocator.release(ptr); - ret -} - -/// Hook for free -/// -/// # Safety -/// This function is inherently unsafe, as it takes a raw pointer -pub unsafe extern "C" fn asan_free(ptr: *mut c_void) { - if ptr != std::ptr::null_mut() { - Allocator::get().release(ptr); - } -} - -/// Hook for delete -/// -/// # Safety -/// This function is inherently unsafe, as it takes a raw pointer -pub unsafe extern "C" fn asan_delete(ptr: *mut c_void) { - if ptr != std::ptr::null_mut() { - Allocator::get().release(ptr); - } -} - -/// Hook for delete -/// -/// # Safety -/// This function is inherently unsafe, as it takes a raw pointer -pub unsafe extern "C" fn asan_delete_ulong(ptr: *mut c_void, _ulong: u64) { - if ptr != std::ptr::null_mut() { - Allocator::get().release(ptr); - } -} - -/// Hook for delete -/// -/// # Safety -/// This function is inherently unsafe, as it takes a raw pointer -pub unsafe extern "C" fn asan_delete_ulong_aligned( - ptr: *mut c_void, - _ulong: u64, - _nothrow: *const c_void, -) { - if ptr != std::ptr::null_mut() { - Allocator::get().release(ptr); - } -} - -/// Hook for delete -/// -/// # Safety -/// This function is inherently unsafe, as it takes a raw pointer -pub unsafe extern "C" fn asan_delete_aligned(ptr: *mut c_void, _alignment: usize) { - if ptr != std::ptr::null_mut() { - Allocator::get().release(ptr); - } -} - -/// Hook for delete -/// -/// # Safety -/// This function is inherently unsafe, as it takes a raw pointer -pub unsafe extern "C" fn asan_delete_nothrow(ptr: *mut c_void, _nothrow: *const c_void) { - if ptr != std::ptr::null_mut() { - Allocator::get().release(ptr); - } -} - -/// Hook for `delete` -/// -/// # Safety -/// This function is inherently unsafe, as it takes a raw pointer -pub unsafe extern "C" fn asan_delete_aligned_nothrow( - ptr: *mut c_void, - _alignment: usize, - _nothrow: *const c_void, -) { - if ptr != std::ptr::null_mut() { - Allocator::get().release(ptr); - } -} - -/// Hook for `malloc_usable_size` -/// -/// # Safety -/// This function is inherently unsafe, as it takes a raw pointer -#[must_use] -pub unsafe extern "C" fn asan_malloc_usable_size(ptr: *mut c_void) -> usize { - Allocator::get().get_usable_size(ptr) -} - -/// Hook for `memalign` -#[must_use] -pub extern "C" fn asan_memalign(size: usize, alignment: usize) -> *mut c_void { - unsafe { Allocator::get().alloc(size, alignment) } -} - -/// Hook for `posix_memalign` -/// -/// # Safety -/// This function is inherently unsafe, as it takes a raw pointer -#[must_use] -pub unsafe extern "C" fn asan_posix_memalign( - pptr: *mut *mut c_void, - size: usize, - alignment: usize, -) -> i32 { - *pptr = Allocator::get().alloc(size, alignment); - 0 -} - -/// Hook for mallinfo -#[must_use] -pub extern "C" fn asan_mallinfo() -> *mut c_void { - std::ptr::null_mut() -} - /// Get the current thread's TLS address extern "C" { fn tls_ptr() -> *const c_void; @@ -624,6 +48,7 @@ extern "C" { /// even if the target would not have crashed under normal conditions. /// this helps finding mem errors early. pub struct AsanRuntime { + allocator: Allocator, regs: [usize; 32], blob_report: Option>, blob_check_mem_byte: Option>, @@ -641,85 +66,16 @@ pub struct AsanRuntime { stalked_addresses: HashMap, options: FridaOptions, instrumented_ranges: RangeMap, + module_map: Option, + shadow_check_func: Option bool>, } -#[derive(Debug, Clone, Serialize, Deserialize)] -struct AsanReadWriteError { - registers: [usize; 32], - pc: usize, - fault: (u16, u16, usize, usize), - metadata: AllocationMetadata, - backtrace: Backtrace, -} - -#[derive(Debug, Clone, Serialize, Deserialize, SerdeAny)] -enum AsanError { - OobRead(AsanReadWriteError), - OobWrite(AsanReadWriteError), - ReadAfterFree(AsanReadWriteError), - WriteAfterFree(AsanReadWriteError), - DoubleFree((usize, AllocationMetadata, Backtrace)), - UnallocatedFree((usize, Backtrace)), - Unknown(([usize; 32], usize, (u16, u16, usize, usize), Backtrace)), - Leak((usize, AllocationMetadata)), - StackOobRead(([usize; 32], usize, (u16, u16, usize, usize), Backtrace)), - StackOobWrite(([usize; 32], usize, (u16, u16, usize, usize), Backtrace)), -} - -impl AsanError { - fn description(&self) -> &str { - match self { - AsanError::OobRead(_) => "heap out-of-bounds read", - AsanError::OobWrite(_) => "heap out-of-bounds write", - AsanError::DoubleFree(_) => "double-free", - AsanError::UnallocatedFree(_) => "unallocated-free", - AsanError::WriteAfterFree(_) => "heap use-after-free write", - AsanError::ReadAfterFree(_) => "heap use-after-free read", - AsanError::Unknown(_) => "heap unknown", - AsanError::Leak(_) => "memory-leak", - AsanError::StackOobRead(_) => "stack out-of-bounds read", - AsanError::StackOobWrite(_) => "stack out-of-bounds write", - } - } -} - -/// A struct holding errors that occurred during frida address sanitizer runs -#[derive(Debug, Clone, Serialize, Deserialize, SerdeAny)] -pub struct AsanErrors { - errors: Vec, -} - -impl AsanErrors { - /// Creates a new `AsanErrors` struct - #[must_use] - fn new() -> Self { - Self { errors: Vec::new() } - } - - /// Clears this `AsanErrors` struct - pub fn clear(&mut self) { - self.errors.clear() - } - - /// Gets the amount of `AsanErrors` in this struct - #[must_use] - pub fn len(&self) -> usize { - self.errors.len() - } - - /// Returns `true` if no errors occurred - #[must_use] - pub fn is_empty(&self) -> bool { - self.errors.is_empty() - } -} -impl CustomExitKind for AsanErrors {} - impl AsanRuntime { /// Create a new `AsanRuntime` #[must_use] - pub fn new(options: FridaOptions) -> Rc> { - let res = Rc::new(RefCell::new(Self { + pub fn new(options: FridaOptions) -> AsanRuntime { + Self { + allocator: Allocator::new(options.clone()), regs: [0; 32], blob_report: None, blob_check_mem_byte: None, @@ -737,42 +93,58 @@ impl AsanRuntime { stalked_addresses: HashMap::new(), options, instrumented_ranges: RangeMap::new(), - })); - Allocator::init(res.clone()); - res + module_map: None, + shadow_check_func: None, + } } /// Initialize the runtime so that it is read for action. Take care not to move the runtime /// instance after this function has been called, as the generated blobs would become /// invalid! - pub fn init(&mut self, modules_to_instrument: &[PathBuf]) { + pub fn init(&mut self, gum: &Gum, modules_to_instrument: &[PathBuf]) { unsafe { - ASAN_ERRORS = Some(AsanErrors::new()); + ASAN_ERRORS = Some(AsanErrors::new(self.options.clone())); } self.generate_instrumentation_blobs(); + self.generate_shadow_check_function(); self.unpoison_all_existing_memory(); + for module_name in modules_to_instrument { let (start, end) = find_mapping_for_path(module_name.to_str().unwrap()); self.instrumented_ranges .insert(start..end, module_name.to_str().unwrap().to_string()); - #[cfg(unix)] - self.hook_library(module_name.to_str().unwrap()); } + let module_names: Vec<&str> = modules_to_instrument + .iter() + .map(|modname| modname.to_str().unwrap()) + .collect(); + self.module_map = Some(ModuleMap::new_from_names(&module_names)); + self.hook_functions(gum); + //unsafe { + //let mem = self.allocator.alloc(0xac + 2, 8); + + //unsafe {mprotect((self.shadow_check_func.unwrap() as usize & 0xffffffffffff000) as *mut c_void, 0x1000, ProtFlags::PROT_READ | ProtFlags::PROT_WRITE | ProtFlags::PROT_EXEC)}; + //assert!((self.shadow_check_func.unwrap())(((mem as usize) + 0) as *const c_void, 0xac)); + //assert!((self.shadow_check_func.unwrap())(((mem as usize) + 2) as *const c_void, 0xac)); + //assert!(!(self.shadow_check_func.unwrap())(((mem as usize) + 3) as *const c_void, 0xac)); + //assert!(!(self.shadow_check_func.unwrap())(((mem as isize) + -1) as *const c_void, 0xac)); + //assert!((self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa4) as *const c_void, 8)); + //assert!((self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa6) as *const c_void, 6)); + //assert!(!(self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa8) as *const c_void, 6)); + //assert!(!(self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa8) as *const c_void, 0xac)); + //assert!((self.shadow_check_func.unwrap())(((mem as usize) + 4 + 0xa8) as *const c_void, 0x1)); + //} } /// Reset all allocations so that they can be reused for new allocation requests. #[allow(clippy::unused_self)] - pub fn reset_allocations(&self) { - Allocator::get().reset(); + pub fn reset_allocations(&mut self) { + self.allocator.reset(); } /// Check if the test leaked any memory and report it if so. pub fn check_for_leaks(&mut self) { - for metadata in Allocator::get().allocations.values_mut() { - if !metadata.freed { - self.report_error(AsanError::Leak((metadata.address, metadata.clone()))); - } - } + self.allocator.check_for_leaks() } /// Returns the `AsanErrors` from the recent run @@ -783,12 +155,13 @@ impl AsanRuntime { /// Make sure the specified memory is unpoisoned #[allow(clippy::unused_self)] - pub fn unpoison(&self, address: usize, size: usize) { - Allocator::get().map_shadow_for_region(address, address + size, true); + pub fn unpoison(&mut self, address: usize, size: usize) { + self.allocator + .map_shadow_for_region(address, address + size, true); } /// Add a stalked address to real address mapping. - //#[inline] + #[inline] pub fn add_stalked_address(&mut self, stalked: usize, real: usize) { self.stalked_addresses.insert(stalked, real); } @@ -801,29 +174,21 @@ impl AsanRuntime { /// Unpoison all the memory that is currently mapped with read/write permissions. #[allow(clippy::unused_self)] - fn unpoison_all_existing_memory(&self) { - let mut allocator = Allocator::get(); - walk_self_maps(&mut |start, end, permissions, _path| { - if permissions.as_bytes()[0] == b'r' || permissions.as_bytes()[1] == b'w' { - if allocator.pre_allocated_shadow && start == 1 << allocator.shadow_bit { - return false; - } - allocator.map_shadow_for_region(start, end, true); - } - false - }); + fn unpoison_all_existing_memory(&mut self) { + self.allocator.unpoison_all_existing_memory() } /// Register the current thread with the runtime, implementing shadow memory for its stack and /// tls mappings. #[allow(clippy::unused_self)] - pub fn register_thread(&self) { - let mut allocator = Allocator::get(); + pub fn register_thread(&mut self) { let (stack_start, stack_end) = Self::current_stack(); - allocator.map_shadow_for_region(stack_start, stack_end, true); + self.allocator + .map_shadow_for_region(stack_start, stack_end, true); let (tls_start, tls_end) = Self::current_tls(); - allocator.map_shadow_for_region(tls_start, tls_end, true); + self.allocator + .map_shadow_for_region(tls_start, tls_end, true); println!( "registering thread with stack {:x}:{:x} and tls {:x}:{:x}", stack_start as usize, stack_end as usize, tls_start as usize, tls_end as usize @@ -870,7 +235,7 @@ impl AsanRuntime { /// Determine the tls start, end for the currently running thread fn current_tls() -> (usize, usize) { let tls_address = unsafe { tls_ptr() } as usize; - // we need to mask off the highest byte, due to 'High Byte Ignore" + #[cfg(target_os = "android")] let tls_address = tls_address & 0xffffffffffffff; @@ -878,77 +243,1463 @@ impl AsanRuntime { (start, end) } - /// Locate the target library and hook it's memory allocation functions - #[cfg(unix)] - #[allow(clippy::unused_self)] - fn hook_library(&mut self, path: &str) { - let target_lib = GotHookLibrary::new(path, false); + #[inline] + fn hook_malloc(&mut self, size: usize) -> *mut c_void { + unsafe { self.allocator.alloc(size, 8) } + } - // shadow the library itself, allowing all accesses - Allocator::get().map_shadow_for_region(target_lib.start(), target_lib.end(), true); + #[allow(non_snake_case)] + #[inline] + fn hook__Znam(&mut self, size: usize) -> *mut c_void { + unsafe { self.allocator.alloc(size, 8) } + } + #[allow(non_snake_case)] + #[inline] + fn hook__ZnamRKSt9nothrow_t(&mut self, size: usize, _nothrow: *const c_void) -> *mut c_void { + unsafe { self.allocator.alloc(size, 8) } + } + + #[allow(non_snake_case)] + #[inline] + fn hook__ZnamSt11align_val_t(&mut self, size: usize, alignment: usize) -> *mut c_void { + unsafe { self.allocator.alloc(size, alignment) } + } + + #[allow(non_snake_case)] + #[inline] + fn hook__ZnamSt11align_val_tRKSt9nothrow_t( + &mut self, + size: usize, + alignment: usize, + _nothrow: *const c_void, + ) -> *mut c_void { + unsafe { self.allocator.alloc(size, alignment) } + } + + #[allow(non_snake_case)] + #[inline] + fn hook__Znwm(&mut self, size: usize) -> *mut c_void { + unsafe { self.allocator.alloc(size, 8) } + } + + #[allow(non_snake_case)] + #[inline] + fn hook__ZnwmRKSt9nothrow_t(&mut self, size: usize, _nothrow: *const c_void) -> *mut c_void { + unsafe { self.allocator.alloc(size, 8) } + } + + #[allow(non_snake_case)] + #[inline] + fn hook__ZnwmSt11align_val_t(&mut self, size: usize, alignment: usize) -> *mut c_void { + unsafe { self.allocator.alloc(size, alignment) } + } + + #[allow(non_snake_case)] + #[inline] + fn hook__ZnwmSt11align_val_tRKSt9nothrow_t( + &mut self, + size: usize, + alignment: usize, + _nothrow: *const c_void, + ) -> *mut c_void { + unsafe { self.allocator.alloc(size, alignment) } + } + + #[inline] + fn hook_calloc(&mut self, nmemb: usize, size: usize) -> *mut c_void { + let ret = unsafe { self.allocator.alloc(size * nmemb, 8) }; unsafe { - // Hook all the memory allocator functions - target_lib.hook_function("malloc", asan_malloc as *const c_void); - target_lib.hook_function("_Znam", asan_new as *const c_void); - target_lib.hook_function("_ZnamRKSt9nothrow_t", asan_new_nothrow as *const c_void); - target_lib.hook_function("_ZnamSt11align_val_t", asan_new_aligned as *const c_void); - target_lib.hook_function( - "_ZnamSt11align_val_tRKSt9nothrow_t", - asan_new_aligned_nothrow as *const c_void, - ); - target_lib.hook_function("_Znwm", asan_new as *const c_void); - target_lib.hook_function("_ZnwmRKSt9nothrow_t", asan_new_nothrow as *const c_void); - target_lib.hook_function("_ZnwmSt11align_val_t", asan_new_aligned as *const c_void); - target_lib.hook_function( - "_ZnwmSt11align_val_tRKSt9nothrow_t", - asan_new_aligned_nothrow as *const c_void, - ); + memset(ret, 0, size * nmemb); + } + ret + } - target_lib.hook_function("_ZdaPv", asan_delete as *const c_void); - target_lib.hook_function("_ZdaPvm", asan_delete_ulong as *const c_void); - target_lib.hook_function( - "_ZdaPvmSt11align_val_t", - asan_delete_ulong_aligned as *const c_void, - ); - target_lib.hook_function("_ZdaPvRKSt9nothrow_t", asan_delete_nothrow as *const c_void); - target_lib.hook_function( - "_ZdaPvSt11align_val_t", - asan_delete_aligned as *const c_void, - ); - target_lib.hook_function( - "_ZdaPvSt11align_val_tRKSt9nothrow_t", - asan_delete_aligned_nothrow as *const c_void, - ); + #[inline] + fn hook_realloc(&mut self, ptr: *mut c_void, size: usize) -> *mut c_void { + unsafe { + let ret = self.allocator.alloc(size, 0x8); + if ptr != std::ptr::null_mut() { + (ptr as *mut u8).copy_to(ret as *mut u8, self.allocator.get_usable_size(ptr)); + } + self.allocator.release(ptr); + ret + } + } - target_lib.hook_function("_ZdlPv", asan_delete as *const c_void); - target_lib.hook_function("_ZdlPvm", asan_delete_ulong as *const c_void); - target_lib.hook_function( - "_ZdlPvmSt11align_val_t", - asan_delete_ulong_aligned as *const c_void, - ); - target_lib.hook_function("_ZdlPvRKSt9nothrow_t", asan_delete_nothrow as *const c_void); - target_lib.hook_function( - "_ZdlPvSt11align_val_t", - asan_delete_aligned as *const c_void, - ); - target_lib.hook_function( - "_ZdlPvSt11align_val_tRKSt9nothrow_t", - asan_delete_aligned_nothrow as *const c_void, - ); + #[inline] + fn hook_check_free(&mut self, ptr: *mut c_void) -> bool { + self.allocator.is_managed(ptr) + } - target_lib.hook_function("calloc", asan_calloc as *const c_void); - target_lib.hook_function("pvalloc", asan_pvalloc as *const c_void); - target_lib.hook_function("valloc", asan_valloc as *const c_void); - target_lib.hook_function("realloc", asan_realloc as *const c_void); - target_lib.hook_function("free", asan_free as *const c_void); - target_lib.hook_function("memalign", asan_memalign as *const c_void); - target_lib.hook_function("posix_memalign", asan_posix_memalign as *const c_void); - target_lib.hook_function( - "malloc_usable_size", - asan_malloc_usable_size as *const c_void, + #[inline] + fn hook_free(&mut self, ptr: *mut c_void) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator.release(ptr) } + } + } + + #[inline] + fn hook_memalign(&mut self, size: usize, alignment: usize) -> *mut c_void { + unsafe { self.allocator.alloc(size, alignment) } + } + + #[inline] + fn hook_posix_memalign( + &mut self, + pptr: *mut *mut c_void, + size: usize, + alignment: usize, + ) -> i32 { + unsafe { + *pptr = self.allocator.alloc(size, alignment); + } + 0 + } + + #[inline] + fn hook_malloc_usable_size(&mut self, ptr: *mut c_void) -> usize { + self.allocator.get_usable_size(ptr) + } + + #[allow(non_snake_case)] + #[inline] + fn hook__ZdaPv(&mut self, ptr: *mut c_void) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator.release(ptr) } + } + } + + #[allow(non_snake_case)] + #[inline] + fn hook__ZdaPvm(&mut self, ptr: *mut c_void, _ulong: u64) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator.release(ptr) } + } + } + + #[allow(non_snake_case)] + #[inline] + fn hook__ZdaPvmSt11align_val_t(&mut self, ptr: *mut c_void, _ulong: u64, _alignment: usize) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator.release(ptr) } + } + } + + #[allow(non_snake_case)] + #[inline] + fn hook__ZdaPvRKSt9nothrow_t(&mut self, ptr: *mut c_void, _nothrow: *const c_void) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator.release(ptr) } + } + } + + #[allow(non_snake_case)] + #[inline] + fn hook__ZdaPvSt11align_val_tRKSt9nothrow_t( + &mut self, + ptr: *mut c_void, + _alignment: usize, + _nothrow: *const c_void, + ) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator.release(ptr) } + } + } + + #[allow(non_snake_case)] + #[inline] + fn hook__ZdaPvSt11align_val_t(&mut self, ptr: *mut c_void, _alignment: usize) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator.release(ptr) } + } + } + + #[allow(non_snake_case)] + #[inline] + fn hook__ZdlPv(&mut self, ptr: *mut c_void) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator.release(ptr) } + } + } + + #[allow(non_snake_case)] + #[inline] + fn hook__ZdlPvm(&mut self, ptr: *mut c_void, _ulong: u64) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator.release(ptr) } + } + } + + #[allow(non_snake_case)] + #[inline] + fn hook__ZdlPvmSt11align_val_t(&mut self, ptr: *mut c_void, _ulong: u64, _alignment: usize) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator.release(ptr) } + } + } + + #[allow(non_snake_case)] + #[inline] + fn hook__ZdlPvRKSt9nothrow_t(&mut self, ptr: *mut c_void, _nothrow: *const c_void) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator.release(ptr) } + } + } + + #[allow(non_snake_case)] + #[inline] + fn hook__ZdlPvSt11align_val_tRKSt9nothrow_t( + &mut self, + ptr: *mut c_void, + _alignment: usize, + _nothrow: *const c_void, + ) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator.release(ptr) } + } + } + + #[allow(non_snake_case)] + #[inline] + fn hook__ZdlPvSt11align_val_t(&mut self, ptr: *mut c_void, _alignment: usize) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator.release(ptr) } + } + } + + fn hook_mmap( + &mut self, + addr: *const c_void, + length: usize, + prot: i32, + flags: i32, + fd: i32, + offset: usize, + ) -> *mut c_void { + extern "C" { + fn mmap( + addr: *const c_void, + length: usize, + prot: i32, + flags: i32, + fd: i32, + offset: usize, + ) -> *mut c_void; + } + let res = unsafe { mmap(addr, length, prot, flags, fd, offset) }; + if res != (-1_isize as *mut c_void) { + self.allocator + .map_shadow_for_region(res as usize, res as usize + length, true); + } + res + } + + fn hook_munmap(&mut self, addr: *const c_void, length: usize) -> i32 { + extern "C" { + fn munmap(addr: *const c_void, length: usize) -> i32; + } + let res = unsafe { munmap(addr, length) }; + if res != -1 { + Allocator::poison(self.allocator.map_to_shadow(addr as usize), length); + } + res + } + + #[inline] + fn hook_write(&mut self, fd: i32, buf: *const c_void, count: usize) -> usize { + extern "C" { + fn write(fd: i32, buf: *const c_void, count: usize) -> usize; + } + if !(self.shadow_check_func.unwrap())(buf, count) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgWrite(( + "write".to_string(), + buf as usize, + count, + Backtrace::new(), + )), + None, ); } + unsafe { write(fd, buf, count) } + } + + #[inline] + fn hook_read(&mut self, fd: i32, buf: *mut c_void, count: usize) -> usize { + extern "C" { + fn read(fd: i32, buf: *mut c_void, count: usize) -> usize; + } + if !(self.shadow_check_func.unwrap())(buf, count) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "read".to_string(), + buf as usize, + count, + Backtrace::new(), + )), + None, + ); + } + unsafe { read(fd, buf, count) } + } + + #[inline] + fn hook_fgets(&mut self, s: *mut c_void, size: u32, stream: *mut c_void) -> *mut c_void { + extern "C" { + fn fgets(s: *mut c_void, size: u32, stream: *mut c_void) -> *mut c_void; + } + if !(self.shadow_check_func.unwrap())(s, size as usize) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "fgets".to_string(), + s as usize, + size as usize, + Backtrace::new(), + )), + None, + ); + } + unsafe { fgets(s, size, stream) } + } + + #[inline] + fn hook_memcmp(&mut self, s1: *const c_void, s2: *const c_void, n: usize) -> i32 { + extern "C" { + fn memcmp(s1: *const c_void, s2: *const c_void, n: usize) -> i32; + } + if !(self.shadow_check_func.unwrap())(s1, n) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(("memcmp".to_string(), s1 as usize, n, Backtrace::new())), + None, + ); + } + if !(self.shadow_check_func.unwrap())(s2, n) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(("memcmp".to_string(), s2 as usize, n, Backtrace::new())), + None, + ); + } + unsafe { memcmp(s1, s2, n) } + } + + #[inline] + fn hook_memcpy(&mut self, dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void { + extern "C" { + fn memcpy(dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void; + } + if !(self.shadow_check_func.unwrap())(dest, n) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgWrite(( + "memcpy".to_string(), + dest as usize, + n, + Backtrace::new(), + )), + None, + ); + } + if !(self.shadow_check_func.unwrap())(src, n) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "memcpy".to_string(), + src as usize, + n, + Backtrace::new(), + )), + None, + ); + } + unsafe { memcpy(dest, src, n) } + } + + #[inline] + fn hook_mempcpy(&mut self, dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void { + extern "C" { + fn mempcpy(dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void; + } + if !(self.shadow_check_func.unwrap())(dest, n) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgWrite(( + "mempcpy".to_string(), + dest as usize, + n, + Backtrace::new(), + )), + None, + ); + } + if !(self.shadow_check_func.unwrap())(src, n) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "mempcpy".to_string(), + src as usize, + n, + Backtrace::new(), + )), + None, + ); + } + unsafe { mempcpy(dest, src, n) } + } + + #[inline] + fn hook_memmove(&mut self, dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void { + extern "C" { + fn memmove(dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void; + } + if !(self.shadow_check_func.unwrap())(dest, n) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgWrite(( + "memmove".to_string(), + dest as usize, + n, + Backtrace::new(), + )), + None, + ); + } + if !(self.shadow_check_func.unwrap())(src, n) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "memmove".to_string(), + src as usize, + n, + Backtrace::new(), + )), + None, + ); + } + unsafe { memmove(dest, src, n) } + } + + #[inline] + fn hook_memset(&mut self, dest: *mut c_void, c: i32, n: usize) -> *mut c_void { + extern "C" { + fn memset(dest: *mut c_void, c: i32, n: usize) -> *mut c_void; + } + if !(self.shadow_check_func.unwrap())(dest, n) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgWrite(( + "memset".to_string(), + dest as usize, + n, + Backtrace::new(), + )), + None, + ); + } + unsafe { memset(dest, c, n) } + } + + #[inline] + fn hook_memchr(&mut self, s: *mut c_void, c: i32, n: usize) -> *mut c_void { + extern "C" { + fn memchr(s: *mut c_void, c: i32, n: usize) -> *mut c_void; + } + if !(self.shadow_check_func.unwrap())(s, n) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(("memchr".to_string(), s as usize, n, Backtrace::new())), + None, + ); + } + unsafe { memchr(s, c, n) } + } + + #[inline] + fn hook_memrchr(&mut self, s: *mut c_void, c: i32, n: usize) -> *mut c_void { + extern "C" { + fn memrchr(s: *mut c_void, c: i32, n: usize) -> *mut c_void; + } + if !(self.shadow_check_func.unwrap())(s, n) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(("memrchr".to_string(), s as usize, n, Backtrace::new())), + None, + ); + } + unsafe { memrchr(s, c, n) } + } + + #[inline] + fn hook_memmem( + &mut self, + haystack: *const c_void, + haystacklen: usize, + needle: *const c_void, + needlelen: usize, + ) -> *mut c_void { + extern "C" { + fn memmem( + haystack: *const c_void, + haystacklen: usize, + needle: *const c_void, + needlelen: usize, + ) -> *mut c_void; + } + if !(self.shadow_check_func.unwrap())(haystack, haystacklen) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "memmem".to_string(), + haystack as usize, + haystacklen, + Backtrace::new(), + )), + None, + ); + } + if !(self.shadow_check_func.unwrap())(needle, needlelen) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "memmem".to_string(), + needle as usize, + needlelen, + Backtrace::new(), + )), + None, + ); + } + unsafe { memmem(haystack, haystacklen, needle, needlelen) } + } + + #[cfg(not(target_os = "android"))] + #[inline] + fn hook_bzero(&mut self, s: *mut c_void, n: usize) { + extern "C" { + fn bzero(s: *mut c_void, n: usize); + } + if !(self.shadow_check_func.unwrap())(s, n) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgWrite(("bzero".to_string(), s as usize, n, Backtrace::new())), + None, + ); + } + unsafe { bzero(s, n) } + } + + #[cfg(not(target_os = "android"))] + #[inline] + fn hook_explicit_bzero(&mut self, s: *mut c_void, n: usize) { + extern "C" { + fn explicit_bzero(s: *mut c_void, n: usize); + } + if !(self.shadow_check_func.unwrap())(s, n) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgWrite(( + "explicit_bzero".to_string(), + s as usize, + n, + Backtrace::new(), + )), + None, + ); + } + unsafe { explicit_bzero(s, n) } + } + + #[cfg(not(target_os = "android"))] + #[inline] + fn hook_bcmp(&mut self, s1: *const c_void, s2: *const c_void, n: usize) -> i32 { + extern "C" { + fn bcmp(s1: *const c_void, s2: *const c_void, n: usize) -> i32; + } + if !(self.shadow_check_func.unwrap())(s1, n) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(("bcmp".to_string(), s1 as usize, n, Backtrace::new())), + None, + ); + } + if !(self.shadow_check_func.unwrap())(s2, n) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(("bcmp".to_string(), s2 as usize, n, Backtrace::new())), + None, + ); + } + unsafe { bcmp(s1, s2, n) } + } + + #[inline] + fn hook_strchr(&mut self, s: *mut c_char, c: i32) -> *mut c_char { + extern "C" { + fn strchr(s: *mut c_char, c: i32) -> *mut c_char; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func.unwrap())(s as *const c_void, unsafe { strlen(s) }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "strchr".to_string(), + s as usize, + unsafe { strlen(s) }, + Backtrace::new(), + )), + None, + ); + } + unsafe { strchr(s, c) } + } + + #[inline] + fn hook_strrchr(&mut self, s: *mut c_char, c: i32) -> *mut c_char { + extern "C" { + fn strrchr(s: *mut c_char, c: i32) -> *mut c_char; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func.unwrap())(s as *const c_void, unsafe { strlen(s) }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "strrchr".to_string(), + s as usize, + unsafe { strlen(s) }, + Backtrace::new(), + )), + None, + ); + } + unsafe { strrchr(s, c) } + } + + #[inline] + fn hook_strcasecmp(&mut self, s1: *const c_char, s2: *const c_char) -> i32 { + extern "C" { + fn strcasecmp(s1: *const c_char, s2: *const c_char) -> i32; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func.unwrap())(s1 as *const c_void, unsafe { strlen(s1) }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "strcasecmp".to_string(), + s1 as usize, + unsafe { strlen(s1) }, + Backtrace::new(), + )), + None, + ); + } + if !(self.shadow_check_func.unwrap())(s2 as *const c_void, unsafe { strlen(s2) }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "strcasecmp".to_string(), + s2 as usize, + unsafe { strlen(s2) }, + Backtrace::new(), + )), + None, + ); + } + unsafe { strcasecmp(s1, s2) } + } + + #[inline] + fn hook_strncasecmp(&mut self, s1: *const c_char, s2: *const c_char, n: usize) -> i32 { + extern "C" { + fn strncasecmp(s1: *const c_char, s2: *const c_char, n: usize) -> i32; + } + if !(self.shadow_check_func.unwrap())(s1 as *const c_void, n) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "strncasecmp".to_string(), + s1 as usize, + n, + Backtrace::new(), + )), + None, + ); + } + if !(self.shadow_check_func.unwrap())(s2 as *const c_void, n) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "strncasecmp".to_string(), + s2 as usize, + n, + Backtrace::new(), + )), + None, + ); + } + unsafe { strncasecmp(s1, s2, n) } + } + + #[inline] + fn hook_strcat(&mut self, s1: *mut c_char, s2: *const c_char) -> *mut c_char { + extern "C" { + fn strcat(s1: *mut c_char, s2: *const c_char) -> *mut c_char; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func.unwrap())(s1 as *const c_void, unsafe { strlen(s1) }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "strcat".to_string(), + s1 as usize, + unsafe { strlen(s1) }, + Backtrace::new(), + )), + None, + ); + } + if !(self.shadow_check_func.unwrap())(s2 as *const c_void, unsafe { strlen(s2) }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "strcat".to_string(), + s2 as usize, + unsafe { strlen(s2) }, + Backtrace::new(), + )), + None, + ); + } + unsafe { strcat(s1, s2) } + } + + #[inline] + fn hook_strcmp(&mut self, s1: *const c_char, s2: *const c_char) -> i32 { + extern "C" { + fn strcmp(s1: *const c_char, s2: *const c_char) -> i32; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func.unwrap())(s1 as *const c_void, unsafe { strlen(s1) }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "strcmp".to_string(), + s1 as usize, + unsafe { strlen(s1) }, + Backtrace::new(), + )), + None, + ); + } + if !(self.shadow_check_func.unwrap())(s2 as *const c_void, unsafe { strlen(s2) }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "strcmp".to_string(), + s2 as usize, + unsafe { strlen(s2) }, + Backtrace::new(), + )), + None, + ); + } + unsafe { strcmp(s1, s2) } + } + + #[inline] + fn hook_strncmp(&mut self, s1: *const c_char, s2: *const c_char, n: usize) -> i32 { + extern "C" { + fn strncmp(s1: *const c_char, s2: *const c_char, n: usize) -> i32; + } + if !(self.shadow_check_func.unwrap())(s1 as *const c_void, n) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "strncmp".to_string(), + s1 as usize, + n, + Backtrace::new(), + )), + None, + ); + } + if !(self.shadow_check_func.unwrap())(s2 as *const c_void, n) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "strncmp".to_string(), + s2 as usize, + n, + Backtrace::new(), + )), + None, + ); + } + unsafe { strncmp(s1, s2, n) } + } + + #[inline] + fn hook_strcpy(&mut self, dest: *mut c_char, src: *const c_char) -> *mut c_char { + extern "C" { + fn strcpy(dest: *mut c_char, src: *const c_char) -> *mut c_char; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func.unwrap())(dest as *const c_void, unsafe { strlen(src) }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgWrite(( + "strcpy".to_string(), + dest as usize, + unsafe { strlen(src) }, + Backtrace::new(), + )), + None, + ); + } + if !(self.shadow_check_func.unwrap())(src as *const c_void, unsafe { strlen(src) }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "strcpy".to_string(), + src as usize, + unsafe { strlen(src) }, + Backtrace::new(), + )), + None, + ); + } + unsafe { strcpy(dest, src) } + } + + #[inline] + fn hook_strncpy(&mut self, dest: *mut c_char, src: *const c_char, n: usize) -> *mut c_char { + extern "C" { + fn strncpy(dest: *mut c_char, src: *const c_char, n: usize) -> *mut c_char; + } + if !(self.shadow_check_func.unwrap())(dest as *const c_void, n) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgWrite(( + "strncpy".to_string(), + dest as usize, + n, + Backtrace::new(), + )), + None, + ); + } + if !(self.shadow_check_func.unwrap())(src as *const c_void, n) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "strncpy".to_string(), + src as usize, + n, + Backtrace::new(), + )), + None, + ); + } + unsafe { strncpy(dest, src, n) } + } + + #[inline] + fn hook_stpcpy(&mut self, dest: *mut c_char, src: *const c_char) -> *mut c_char { + extern "C" { + fn stpcpy(dest: *mut c_char, src: *const c_char) -> *mut c_char; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func.unwrap())(dest as *const c_void, unsafe { strlen(src) }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgWrite(( + "stpcpy".to_string(), + dest as usize, + unsafe { strlen(src) }, + Backtrace::new(), + )), + None, + ); + } + if !(self.shadow_check_func.unwrap())(src as *const c_void, unsafe { strlen(src) }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "stpcpy".to_string(), + src as usize, + unsafe { strlen(src) }, + Backtrace::new(), + )), + None, + ); + } + unsafe { stpcpy(dest, src) } + } + + #[inline] + fn hook_strdup(&mut self, s: *const c_char) -> *mut c_char { + extern "C" { + fn strdup(s: *const c_char) -> *mut c_char; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func.unwrap())(s as *const c_void, unsafe { strlen(s) }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "strdup".to_string(), + s as usize, + unsafe { strlen(s) }, + Backtrace::new(), + )), + None, + ); + } + unsafe { strdup(s) } + } + + #[inline] + fn hook_strlen(&mut self, s: *const c_char) -> usize { + extern "C" { + fn strlen(s: *const c_char) -> usize; + } + let size = unsafe { strlen(s) }; + if !(self.shadow_check_func.unwrap())(s as *const c_void, size) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "strlen".to_string(), + s as usize, + size, + Backtrace::new(), + )), + None, + ); + } + size + } + + #[inline] + fn hook_strnlen(&mut self, s: *const c_char, n: usize) -> usize { + extern "C" { + fn strnlen(s: *const c_char, n: usize) -> usize; + } + let size = unsafe { strnlen(s, n) }; + if !(self.shadow_check_func.unwrap())(s as *const c_void, size) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "strnlen".to_string(), + s as usize, + size, + Backtrace::new(), + )), + None, + ); + } + size + } + + #[inline] + fn hook_strstr(&mut self, haystack: *const c_char, needle: *const c_char) -> *mut c_char { + extern "C" { + fn strstr(haystack: *const c_char, needle: *const c_char) -> *mut c_char; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func.unwrap())(haystack as *const c_void, unsafe { + strlen(haystack) + }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "strstr".to_string(), + haystack as usize, + unsafe { strlen(haystack) }, + Backtrace::new(), + )), + None, + ); + } + if !(self.shadow_check_func.unwrap())(needle as *const c_void, unsafe { strlen(needle) }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "strstr".to_string(), + needle as usize, + unsafe { strlen(needle) }, + Backtrace::new(), + )), + None, + ); + } + unsafe { strstr(haystack, needle) } + } + + #[inline] + fn hook_strcasestr(&mut self, haystack: *const c_char, needle: *const c_char) -> *mut c_char { + extern "C" { + fn strcasestr(haystack: *const c_char, needle: *const c_char) -> *mut c_char; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func.unwrap())(haystack as *const c_void, unsafe { + strlen(haystack) + }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "strcasestr".to_string(), + haystack as usize, + unsafe { strlen(haystack) }, + Backtrace::new(), + )), + None, + ); + } + if !(self.shadow_check_func.unwrap())(needle as *const c_void, unsafe { strlen(needle) }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "strcasestr".to_string(), + needle as usize, + unsafe { strlen(needle) }, + Backtrace::new(), + )), + None, + ); + } + unsafe { strcasestr(haystack, needle) } + } + + #[inline] + fn hook_atoi(&mut self, s: *const c_char) -> i32 { + extern "C" { + fn atoi(s: *const c_char) -> i32; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func.unwrap())(s as *const c_void, unsafe { strlen(s) }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "atoi".to_string(), + s as usize, + unsafe { strlen(s) }, + Backtrace::new(), + )), + None, + ); + } + unsafe { atoi(s) } + } + + #[inline] + fn hook_atol(&mut self, s: *const c_char) -> i32 { + extern "C" { + fn atol(s: *const c_char) -> i32; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func.unwrap())(s as *const c_void, unsafe { strlen(s) }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "atol".to_string(), + s as usize, + unsafe { strlen(s) }, + Backtrace::new(), + )), + None, + ); + } + unsafe { atol(s) } + } + + #[inline] + fn hook_atoll(&mut self, s: *const c_char) -> i64 { + extern "C" { + fn atoll(s: *const c_char) -> i64; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func.unwrap())(s as *const c_void, unsafe { strlen(s) }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "atoll".to_string(), + s as usize, + unsafe { strlen(s) }, + Backtrace::new(), + )), + None, + ); + } + unsafe { atoll(s) } + } + + #[inline] + fn hook_wcslen(&mut self, s: *const wchar_t) -> usize { + extern "C" { + fn wcslen(s: *const wchar_t) -> usize; + } + let size = unsafe { wcslen(s) }; + if !(self.shadow_check_func.unwrap())(s as *const c_void, (size + 1) * 2) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "wcslen".to_string(), + s as usize, + (size + 1) * 2, + Backtrace::new(), + )), + None, + ); + } + size + } + + #[inline] + fn hook_wcscpy(&mut self, dest: *mut wchar_t, src: *const wchar_t) -> *mut wchar_t { + extern "C" { + fn wcscpy(dest: *mut wchar_t, src: *const wchar_t) -> *mut wchar_t; + fn wcslen(s: *const wchar_t) -> usize; + } + if !(self.shadow_check_func.unwrap())(dest as *const c_void, unsafe { + (wcslen(src) + 1) * 2 + }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgWrite(( + "wcscpy".to_string(), + dest as usize, + (unsafe { wcslen(src) } + 1) * 2, + Backtrace::new(), + )), + None, + ); + } + if !(self.shadow_check_func.unwrap())(src as *const c_void, unsafe { + (wcslen(src) + 1) * 2 + }) { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "wcscpy".to_string(), + src as usize, + (unsafe { wcslen(src) } + 1) * 2, + Backtrace::new(), + )), + None, + ); + } + unsafe { wcscpy(dest, src) } + } + + #[inline] + fn hook_wcscmp(&mut self, s1: *const wchar_t, s2: *const wchar_t) -> i32 { + extern "C" { + fn wcscmp(s1: *const wchar_t, s2: *const wchar_t) -> i32; + fn wcslen(s: *const wchar_t) -> usize; + } + if !(self.shadow_check_func.unwrap())(s1 as *const c_void, unsafe { (wcslen(s1) + 1) * 2 }) + { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "wcscmp".to_string(), + s1 as usize, + (unsafe { wcslen(s1) } + 1) * 2, + Backtrace::new(), + )), + None, + ); + } + if !(self.shadow_check_func.unwrap())(s2 as *const c_void, unsafe { (wcslen(s2) + 1) * 2 }) + { + AsanErrors::get_mut().report_error( + AsanError::BadFuncArgRead(( + "wcscmp".to_string(), + s2 as usize, + (unsafe { wcslen(s2) } + 1) * 2, + Backtrace::new(), + )), + None, + ); + } + unsafe { wcscmp(s1, s2) } + } + + /// Hook all functions required for ASAN to function, replacing them with our own + /// implementations. + #[allow(clippy::items_after_statements)] + fn hook_functions(&mut self, gum: &Gum) { + let mut interceptor = frida_gum::interceptor::Interceptor::obtain(gum); + + macro_rules! hook_func { + ($lib:expr, $name:ident, ($($param:ident : $param_type:ty),*), $return_type:ty) => { + paste::paste! { + extern "C" { + fn $name($($param: $param_type),*) -> $return_type; + } + #[allow(non_snake_case)] + unsafe extern "C" fn []($($param: $param_type),*) -> $return_type { + let mut invocation = Interceptor::current_invocation(); + let this = &mut *(invocation.replacement_data().unwrap().0 as *mut AsanRuntime); + if this.module_map.as_ref().unwrap().find(invocation.return_addr() as u64).is_some() { + this.[]($($param),*) + } else { + $name($($param),*) + } + } + interceptor.replace( + frida_gum::Module::find_export_by_name($lib, stringify!($name)).expect("Failed to find function"), + NativePointer([] as *mut c_void), + NativePointer(self as *mut _ as *mut c_void) + ).ok(); + } + } + } + + macro_rules! hook_func_with_check { + ($lib:expr, $name:ident, ($($param:ident : $param_type:ty),*), $return_type:ty) => { + paste::paste! { + extern "C" { + fn $name($($param: $param_type),*) -> $return_type; + } + #[allow(non_snake_case)] + unsafe extern "C" fn []($($param: $param_type),*) -> $return_type { + let mut invocation = Interceptor::current_invocation(); + let this = &mut *(invocation.replacement_data().unwrap().0 as *mut AsanRuntime); + if this.[]($($param),*) { + this.[]($($param),*) + } else { + $name($($param),*) + } + } + interceptor.replace( + frida_gum::Module::find_export_by_name($lib, stringify!($name)).expect("Failed to find function"), + NativePointer([] as *mut c_void), + NativePointer(self as *mut _ as *mut c_void) + ).ok(); + } + } + } + + // Hook the memory allocator functions + hook_func!(None, malloc, (size: usize), *mut c_void); + hook_func!(None, calloc, (nmemb: usize, size: usize), *mut c_void); + hook_func!(None, realloc, (ptr: *mut c_void, size: usize), *mut c_void); + hook_func_with_check!(None, free, (ptr: *mut c_void), ()); + hook_func!(None, memalign, (size: usize, alignment: usize), *mut c_void); + hook_func!( + None, + posix_memalign, + (pptr: *mut *mut c_void, size: usize, alignment: usize), + i32 + ); + hook_func!(None, malloc_usable_size, (ptr: *mut c_void), usize); + hook_func!(None, _Znam, (size: usize), *mut c_void); + hook_func!( + None, + _ZnamRKSt9nothrow_t, + (size: usize, _nothrow: *const c_void), + *mut c_void + ); + hook_func!( + None, + _ZnamSt11align_val_t, + (size: usize, alignment: usize), + *mut c_void + ); + hook_func!( + None, + _ZnamSt11align_val_tRKSt9nothrow_t, + (size: usize, alignment: usize, _nothrow: *const c_void), + *mut c_void + ); + hook_func!(None, _Znwm, (size: usize), *mut c_void); + hook_func!( + None, + _ZnwmRKSt9nothrow_t, + (size: usize, _nothrow: *const c_void), + *mut c_void + ); + hook_func!( + None, + _ZnwmSt11align_val_t, + (size: usize, alignment: usize), + *mut c_void + ); + hook_func!( + None, + _ZnwmSt11align_val_tRKSt9nothrow_t, + (size: usize, alignment: usize, _nothrow: *const c_void), + *mut c_void + ); + hook_func!(None, _ZdaPv, (ptr: *mut c_void), ()); + hook_func!(None, _ZdaPvm, (ptr: *mut c_void, _ulong: u64), ()); + hook_func!( + None, + _ZdaPvmSt11align_val_t, + (ptr: *mut c_void, _ulong: u64, _alignment: usize), + () + ); + hook_func!( + None, + _ZdaPvRKSt9nothrow_t, + (ptr: *mut c_void, _nothrow: *const c_void), + () + ); + hook_func!( + None, + _ZdaPvSt11align_val_t, + (ptr: *mut c_void, _alignment: usize), + () + ); + hook_func!( + None, + _ZdaPvSt11align_val_tRKSt9nothrow_t, + (ptr: *mut c_void, _alignment: usize, _nothrow: *const c_void), + () + ); + hook_func!(None, _ZdlPv, (ptr: *mut c_void), ()); + hook_func!(None, _ZdlPvm, (ptr: *mut c_void, _ulong: u64), ()); + hook_func!( + None, + _ZdlPvmSt11align_val_t, + (ptr: *mut c_void, _ulong: u64, _alignment: usize), + () + ); + hook_func!( + None, + _ZdlPvRKSt9nothrow_t, + (ptr: *mut c_void, _nothrow: *const c_void), + () + ); + hook_func!( + None, + _ZdlPvSt11align_val_t, + (ptr: *mut c_void, _alignment: usize), + () + ); + hook_func!( + None, + _ZdlPvSt11align_val_tRKSt9nothrow_t, + (ptr: *mut c_void, _alignment: usize, _nothrow: *const c_void), + () + ); + + hook_func!( + None, + mmap, + ( + addr: *const c_void, + length: usize, + prot: i32, + flags: i32, + fd: i32, + offset: usize + ), + *mut c_void + ); + hook_func!(None, munmap, (addr: *const c_void, length: usize), i32); + + // Hook libc functions which may access allocated memory + hook_func!( + None, + write, + (fd: i32, buf: *const c_void, count: usize), + usize + ); + hook_func!(None, read, (fd: i32, buf: *mut c_void, count: usize), usize); + hook_func!( + None, + fgets, + (s: *mut c_void, size: u32, stream: *mut c_void), + *mut c_void + ); + hook_func!( + None, + memcmp, + (s1: *const c_void, s2: *const c_void, n: usize), + i32 + ); + hook_func!( + None, + memcpy, + (dest: *mut c_void, src: *const c_void, n: usize), + *mut c_void + ); + hook_func!( + None, + mempcpy, + (dest: *mut c_void, src: *const c_void, n: usize), + *mut c_void + ); + hook_func!( + None, + memmove, + (dest: *mut c_void, src: *const c_void, n: usize), + *mut c_void + ); + hook_func!( + None, + memset, + (s: *mut c_void, c: i32, n: usize), + *mut c_void + ); + hook_func!( + None, + memchr, + (s: *mut c_void, c: i32, n: usize), + *mut c_void + ); + hook_func!( + None, + memrchr, + (s: *mut c_void, c: i32, n: usize), + *mut c_void + ); + hook_func!( + None, + memmem, + ( + haystack: *const c_void, + haystacklen: usize, + needle: *const c_void, + needlelen: usize + ), + *mut c_void + ); + #[cfg(not(target_os = "android"))] + hook_func!(None, bzero, (s: *mut c_void, n: usize), ()); + #[cfg(not(target_os = "android"))] + hook_func!(None, explicit_bzero, (s: *mut c_void, n: usize), ()); + #[cfg(not(target_os = "android"))] + hook_func!( + None, + bcmp, + (s1: *const c_void, s2: *const c_void, n: usize), + i32 + ); + hook_func!(None, strchr, (s: *mut c_char, c: i32), *mut c_char); + hook_func!(None, strrchr, (s: *mut c_char, c: i32), *mut c_char); + hook_func!( + None, + strcasecmp, + (s1: *const c_char, s2: *const c_char), + i32 + ); + hook_func!( + None, + strncasecmp, + (s1: *const c_char, s2: *const c_char, n: usize), + i32 + ); + hook_func!( + None, + strcat, + (dest: *mut c_char, src: *const c_char), + *mut c_char + ); + hook_func!(None, strcmp, (s1: *const c_char, s2: *const c_char), i32); + hook_func!( + None, + strncmp, + (s1: *const c_char, s2: *const c_char, n: usize), + i32 + ); + hook_func!( + None, + strcpy, + (dest: *mut c_char, src: *const c_char), + *mut c_char + ); + hook_func!( + None, + strncpy, + (dest: *mut c_char, src: *const c_char, n: usize), + *mut c_char + ); + hook_func!( + None, + stpcpy, + (dest: *mut c_char, src: *const c_char), + *mut c_char + ); + hook_func!(None, strdup, (s: *const c_char), *mut c_char); + hook_func!(None, strlen, (s: *const c_char), usize); + hook_func!(None, strnlen, (s: *const c_char, n: usize), usize); + hook_func!( + None, + strstr, + (haystack: *const c_char, needle: *const c_char), + *mut c_char + ); + hook_func!( + None, + strcasestr, + (haystack: *const c_char, needle: *const c_char), + *mut c_char + ); + hook_func!(None, atoi, (nptr: *const c_char), i32); + hook_func!(None, atol, (nptr: *const c_char), i32); + hook_func!(None, atoll, (nptr: *const c_char), i64); + hook_func!(None, wcslen, (s: *const wchar_t), usize); + hook_func!( + None, + wcscpy, + (dest: *mut wchar_t, src: *const wchar_t), + *mut wchar_t + ); + hook_func!(None, wcscmp, (s1: *const wchar_t, s2: *const wchar_t), i32); } #[allow(clippy::cast_sign_loss)] // for displacement @@ -1071,10 +1822,10 @@ impl AsanRuntime { )) } } else { - let mut allocator = Allocator::get(); #[allow(clippy::option_if_let_else)] - if let Some(metadata) = - allocator.find_metadata(fault_address, self.regs[base_reg as usize]) + if let Some(metadata) = self + .allocator + .find_metadata(fault_address, self.regs[base_reg as usize]) { let asan_readwrite_error = AsanReadWriteError { registers: self.regs, @@ -1103,293 +1854,133 @@ impl AsanRuntime { )) } }; - self.report_error(error); + AsanErrors::get_mut().report_error(error, Some(&self.instrumented_ranges)); } - #[allow(clippy::too_many_lines)] - fn report_error(&mut self, error: AsanError) { + #[allow(clippy::unused_self)] + fn generate_shadow_check_function(&mut self) { + let shadow_bit = self.allocator.shadow_bit(); + let mut ops = dynasmrt::VecAssembler::::new(0); + dynasm!(ops + ; .arch aarch64 + + // calculate the shadow address + ; mov x5, #1 + ; add x5, xzr, x5, lsl #shadow_bit + ; add x5, x5, x0, lsr #3 + ; ubfx x5, x5, #0, #(shadow_bit + 2) + + ; cmp x1, #0 + ; b.eq >return_success + // check if the ptr is not aligned to 8 bytes + ; ands x6, x0, #7 + ; b.eq >no_start_offset + + // we need to test the high bits from the first shadow byte + ; ldrh w7, [x5, #0] + ; rev16 w7, w7 + ; rbit w7, w7 + ; lsr x7, x7, #16 + ; lsr x7, x7, x6 + + ; cmp x1, #8 + ; b.lt >dont_fill_to_8 + ; mov x2, #8 + ; sub x6, x2, x6 + ; b >check_bits + ; dont_fill_to_8: + ; mov x6, x1 + ; check_bits: + ; mov x2, #1 + ; lsl x2, x2, x6 + ; sub x4, x2, #1 + + // if shadow_bits & size_to_test != size_to_test: fail + ; and x7, x7, x4 + ; cmp x7, x4 + ; b.ne >return_failure + + // size -= size_to_test + ; sub x1, x1, x6 + // shadow_addr += 1 (we consumed the initial byte in the above test) + ; add x5, x5, 1 + + ; no_start_offset: + // num_shadow_bytes = size / 8 + ; lsr x4, x1, #3 + ; eor x3, x3, x3 + ; sub x3, x3, #1 + + // if num_shadow_bytes < 8; then goto check_bytes; else check_8_shadow_bytes + ; check_8_shadow_bytes: + ; cmp x4, #0x8 + ; b.lt >less_than_8_shadow_bytes_remaining + ; ldr x7, [x5], #8 + ; cmp x7, x3 + ; b.ne >return_failure + ; sub x4, x4, #8 + ; sub x1, x1, #64 + ; b check_trailing_bits + ; ldrb w7, [x5], #1 + ; cmp w7, #0xff + ; b.ne >return_failure + ; sub x4, x4, #1 + ; sub x1, x1, #8 + ; b return_success + + ; and x4, x1, #7 + ; mov x2, #1 + ; lsl x2, x2, x4 + ; sub x4, x2, #1 + + ; ldrh w7, [x5, #0] + ; rev16 w7, w7 + ; rbit w7, w7 + ; lsr x7, x7, #16 + ; and x7, x7, x4 + ; cmp x7, x4 + ; b.ne >return_failure + + ; return_success: + ; mov x0, #1 + ; b >prologue + + ; return_failure: + ; mov x0, #0 + + + ; prologue: + ; ret + ); + + let blob = ops.finalize().unwrap(); unsafe { - ASAN_ERRORS.as_mut().unwrap().errors.push(error.clone()); - } - - let mut out_stream = default_output_stream(); - let output = out_stream.as_mut(); - - let backtrace_printer = BacktracePrinter::new() - .clear_frame_filters() - .print_addresses(true) - .verbosity(Verbosity::Full) - .add_frame_filter(Box::new(|frames| { - frames.retain( - |x| matches!(&x.name, Some(n) if !n.starts_with("libafl_frida::asan_rt::")), - ) - })); - - #[allow(clippy::non_ascii_literal)] - writeln!(output, "{:━^100}", " Memory error detected! ").unwrap(); - output - .set_color(ColorSpec::new().set_fg(Some(Color::Red))) + let mapping = mmap( + std::ptr::null_mut(), + 0x1000, + ProtFlags::all(), + MapFlags::MAP_ANON | MapFlags::MAP_PRIVATE, + -1, + 0, + ) .unwrap(); - write!(output, "{}", error.description()).unwrap(); - match error { - AsanError::OobRead(mut error) - | AsanError::OobWrite(mut error) - | AsanError::ReadAfterFree(mut error) - | AsanError::WriteAfterFree(mut error) => { - let (basereg, indexreg, _displacement, fault_address) = error.fault; - - if let Some((range, path)) = self.instrumented_ranges.get_key_value(&error.pc) { - writeln!( - output, - " at 0x{:x} ({}@0x{:04x}), faulting address 0x{:x}", - error.pc, - path, - error.pc - range.start, - fault_address - ) - .unwrap(); - } else { - writeln!( - output, - " at 0x{:x}, faulting address 0x{:x}", - error.pc, fault_address - ) - .unwrap(); - } - output.reset().unwrap(); - - #[allow(clippy::non_ascii_literal)] - writeln!(output, "{:━^100}", " REGISTERS ").unwrap(); - for reg in 0..=30 { - if reg == basereg { - output - .set_color(ColorSpec::new().set_fg(Some(Color::Red))) - .unwrap(); - } else if reg == indexreg { - output - .set_color(ColorSpec::new().set_fg(Some(Color::Yellow))) - .unwrap(); - } - write!( - output, - "x{:02}: 0x{:016x} ", - reg, error.registers[reg as usize] - ) - .unwrap(); - output.reset().unwrap(); - if reg % 4 == 3 { - writeln!(output).unwrap(); - } - } - writeln!(output, "pc : 0x{:016x} ", error.pc).unwrap(); - - #[allow(clippy::non_ascii_literal)] - writeln!(output, "{:━^100}", " CODE ").unwrap(); - let mut cs = Capstone::new() - .arm64() - .mode(capstone::arch::arm64::ArchMode::Arm) - .build() - .unwrap(); - cs.set_skipdata(true).expect("failed to set skipdata"); - - let start_pc = error.pc - 4 * 5; - for insn in cs - .disasm_count( - unsafe { std::slice::from_raw_parts(start_pc as *mut u8, 4 * 11) }, - start_pc as u64, - 11, - ) - .expect("failed to disassemble instructions") - .iter() - { - if insn.address() as usize == error.pc { - output - .set_color(ColorSpec::new().set_fg(Some(Color::Red))) - .unwrap(); - writeln!(output, "\t => {}", insn).unwrap(); - output.reset().unwrap(); - } else { - writeln!(output, "\t {}", insn).unwrap(); - } - } - backtrace_printer - .print_trace(&error.backtrace, output) - .unwrap(); - - #[allow(clippy::non_ascii_literal)] - writeln!(output, "{:━^100}", " ALLOCATION INFO ").unwrap(); - let offset: i64 = fault_address as i64 - error.metadata.address as i64; - let direction = if offset > 0 { "right" } else { "left" }; - writeln!( - output, - "access is {} to the {} of the 0x{:x} byte allocation at 0x{:x}", - offset, direction, error.metadata.size, error.metadata.address - ) - .unwrap(); - - if error.metadata.is_malloc_zero { - writeln!(output, "allocation was zero-sized").unwrap(); - } - - if let Some(backtrace) = error.metadata.allocation_site_backtrace.as_mut() { - writeln!(output, "allocation site backtrace:").unwrap(); - backtrace.resolve(); - backtrace_printer.print_trace(backtrace, output).unwrap(); - } - - if error.metadata.freed { - #[allow(clippy::non_ascii_literal)] - writeln!(output, "{:━^100}", " FREE INFO ").unwrap(); - if let Some(backtrace) = error.metadata.release_site_backtrace.as_mut() { - writeln!(output, "free site backtrace:").unwrap(); - backtrace.resolve(); - backtrace_printer.print_trace(backtrace, output).unwrap(); - } - } - } - AsanError::DoubleFree((ptr, mut metadata, backtrace)) => { - writeln!(output, " of {:?}", ptr).unwrap(); - output.reset().unwrap(); - backtrace_printer.print_trace(&backtrace, output).unwrap(); - - #[allow(clippy::non_ascii_literal)] - writeln!(output, "{:━^100}", " ALLOCATION INFO ").unwrap(); - writeln!( - output, - "allocation at 0x{:x}, with size 0x{:x}", - metadata.address, metadata.size - ) - .unwrap(); - if metadata.is_malloc_zero { - writeln!(output, "allocation was zero-sized").unwrap(); - } - - if let Some(backtrace) = metadata.allocation_site_backtrace.as_mut() { - writeln!(output, "allocation site backtrace:").unwrap(); - backtrace.resolve(); - backtrace_printer.print_trace(backtrace, output).unwrap(); - } - #[allow(clippy::non_ascii_literal)] - writeln!(output, "{:━^100}", " FREE INFO ").unwrap(); - if let Some(backtrace) = metadata.release_site_backtrace.as_mut() { - writeln!(output, "previous free site backtrace:").unwrap(); - backtrace.resolve(); - backtrace_printer.print_trace(backtrace, output).unwrap(); - } - } - AsanError::UnallocatedFree((ptr, backtrace)) => { - writeln!(output, " of {:#016x}", ptr).unwrap(); - output.reset().unwrap(); - backtrace_printer.print_trace(&backtrace, output).unwrap(); - } - AsanError::Leak((ptr, mut metadata)) => { - writeln!(output, " of {:#016x}", ptr).unwrap(); - output.reset().unwrap(); - - #[allow(clippy::non_ascii_literal)] - writeln!(output, "{:━^100}", " ALLOCATION INFO ").unwrap(); - writeln!( - output, - "allocation at 0x{:x}, with size 0x{:x}", - metadata.address, metadata.size - ) - .unwrap(); - if metadata.is_malloc_zero { - writeln!(output, "allocation was zero-sized").unwrap(); - } - - if let Some(backtrace) = metadata.allocation_site_backtrace.as_mut() { - writeln!(output, "allocation site backtrace:").unwrap(); - backtrace.resolve(); - backtrace_printer.print_trace(backtrace, output).unwrap(); - } - } - AsanError::Unknown((registers, pc, fault, backtrace)) - | AsanError::StackOobRead((registers, pc, fault, backtrace)) - | AsanError::StackOobWrite((registers, pc, fault, backtrace)) => { - let (basereg, indexreg, _displacement, fault_address) = fault; - - if let Ok((start, _, _, path)) = find_mapping_for_address(pc) { - writeln!( - output, - " at 0x{:x} ({}:0x{:04x}), faulting address 0x{:x}", - pc, - path, - pc - start, - fault_address - ) - .unwrap(); - } else { - writeln!( - output, - " at 0x{:x}, faulting address 0x{:x}", - pc, fault_address - ) - .unwrap(); - } - output.reset().unwrap(); - - #[allow(clippy::non_ascii_literal)] - writeln!(output, "{:━^100}", " REGISTERS ").unwrap(); - for reg in 0..=30 { - if reg == basereg { - output - .set_color(ColorSpec::new().set_fg(Some(Color::Red))) - .unwrap(); - } else if reg == indexreg { - output - .set_color(ColorSpec::new().set_fg(Some(Color::Yellow))) - .unwrap(); - } - write!(output, "x{:02}: 0x{:016x} ", reg, registers[reg as usize]).unwrap(); - output.reset().unwrap(); - if reg % 4 == 3 { - writeln!(output).unwrap(); - } - } - writeln!(output, "pc : 0x{:016x} ", pc).unwrap(); - - #[allow(clippy::non_ascii_literal)] - writeln!(output, "{:━^100}", " CODE ").unwrap(); - let mut cs = Capstone::new() - .arm64() - .mode(capstone::arch::arm64::ArchMode::Arm) - .build() - .unwrap(); - cs.set_skipdata(true).expect("failed to set skipdata"); - - let start_pc = pc - 4 * 5; - for insn in cs - .disasm_count( - unsafe { std::slice::from_raw_parts(start_pc as *mut u8, 4 * 11) }, - start_pc as u64, - 11, - ) - .expect("failed to disassemble instructions") - .iter() - { - if insn.address() as usize == pc { - output - .set_color(ColorSpec::new().set_fg(Some(Color::Red))) - .unwrap(); - writeln!(output, "\t => {}", insn).unwrap(); - output.reset().unwrap(); - } else { - writeln!(output, "\t {}", insn).unwrap(); - } - } - backtrace_printer.print_trace(&backtrace, output).unwrap(); - } - }; - - if !self.options.asan_continue_after_error() { - panic!("Crashing target!"); + blob.as_ptr() + .copy_to_nonoverlapping(mapping as *mut u8, blob.len()); + self.shadow_check_func = Some(std::mem::transmute(mapping as *mut u8)); } } #[allow(clippy::unused_self)] fn generate_shadow_check_blob(&mut self, bit: u32) -> Box<[u8]> { - let shadow_bit = Allocator::get().shadow_bit as u32; + let shadow_bit = self.allocator.shadow_bit(); macro_rules! shadow_check { ($ops:ident, $bit:expr) => {dynasm!($ops ; .arch aarch64 @@ -1420,7 +2011,7 @@ impl AsanRuntime { #[allow(clippy::unused_self)] fn generate_shadow_check_exact_blob(&mut self, val: u64) -> Box<[u8]> { - let shadow_bit = Allocator::get().shadow_bit as u32; + let shadow_bit = self.allocator.shadow_bit(); macro_rules! shadow_check_exact { ($ops:ident, $val:expr) => {dynasm!($ops ; .arch aarch64 @@ -1673,148 +2264,3 @@ impl AsanRuntime { self.blob_check_mem_64bytes.as_ref().unwrap() } } - -/// static field for `AsanErrors` for a run -pub static mut ASAN_ERRORS: Option = None; - -/// An observer for frida address sanitizer `AsanError`s for a frida executor run -#[derive(Serialize, Deserialize)] -#[allow(clippy::unsafe_derive_deserialize)] -pub struct AsanErrorsObserver { - errors: OwnedPtr>, -} - -impl Observer for AsanErrorsObserver {} - -impl HasExecHooks for AsanErrorsObserver { - fn pre_exec( - &mut self, - _fuzzer: &mut Z, - _state: &mut S, - _mgr: &mut EM, - _input: &I, - ) -> Result<(), Error> { - unsafe { - if ASAN_ERRORS.is_some() { - ASAN_ERRORS.as_mut().unwrap().clear(); - } - } - - Ok(()) - } -} - -impl Named for AsanErrorsObserver { - #[inline] - fn name(&self) -> &str { - "AsanErrors" - } -} - -impl AsanErrorsObserver { - /// Creates a new `AsanErrorsObserver`, pointing to a constant `AsanErrors` field - #[must_use] - pub fn new(errors: &'static Option) -> Self { - Self { - errors: OwnedPtr::Ptr(errors as *const Option), - } - } - - /// Creates a new `AsanErrorsObserver`, owning the `AsanErrors` - #[must_use] - pub fn new_owned(errors: Option) -> Self { - Self { - errors: OwnedPtr::Owned(Box::new(errors)), - } - } - - /// Creates a new `AsanErrorsObserver` from a raw ptr - #[must_use] - pub fn new_from_ptr(errors: *const Option) -> Self { - Self { - errors: OwnedPtr::Ptr(errors), - } - } - - /// gets the [`AsanErrors`] from the previous run - #[must_use] - pub fn errors(&self) -> Option<&AsanErrors> { - match &self.errors { - OwnedPtr::Ptr(p) => unsafe { p.as_ref().unwrap().as_ref() }, - OwnedPtr::Owned(b) => b.as_ref().as_ref(), - } - } -} - -/// A feedback reporting potential [`AsanErrors`] from an `AsanErrorsObserver` -#[derive(Serialize, Deserialize, Clone, Debug)] -pub struct AsanErrorsFeedback { - errors: Option, -} - -impl Feedback for AsanErrorsFeedback -where - I: Input + HasTargetBytes, -{ - fn is_interesting( - &mut self, - _state: &mut S, - _manager: &mut EM, - _input: &I, - observers: &OT, - _exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { - let observer = observers - .match_name::("AsanErrors") - .expect("An AsanErrorsFeedback needs an AsanErrorsObserver"); - match observer.errors() { - None => Ok(false), - Some(errors) => { - if errors.errors.is_empty() { - Ok(false) - } else { - self.errors = Some(errors.clone()); - Ok(true) - } - } - } - } - - fn append_metadata(&mut self, _state: &mut S, testcase: &mut Testcase) -> Result<(), Error> { - if let Some(errors) = &self.errors { - testcase.add_metadata(errors.clone()); - } - - Ok(()) - } - - fn discard_metadata(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { - self.errors = None; - Ok(()) - } -} - -impl Named for AsanErrorsFeedback { - #[inline] - fn name(&self) -> &str { - "AsanErrors" - } -} - -impl AsanErrorsFeedback { - /// Create a new `AsanErrorsFeedback` - #[must_use] - pub fn new() -> Self { - Self { errors: None } - } -} - -impl Default for AsanErrorsFeedback { - fn default() -> Self { - Self::new() - } -} diff --git a/libafl_frida/src/helper.rs b/libafl_frida/src/helper.rs index b6d0be139b..0df4f21dcf 100644 --- a/libafl_frida/src/helper.rs +++ b/libafl_frida/src/helper.rs @@ -19,7 +19,6 @@ use capstone::{ Capstone, Insn, }; -use core::cell::RefCell; #[cfg(target_arch = "x86_64")] use frida_gum::instruction_writer::X86Register; #[cfg(target_arch = "aarch64")] @@ -34,7 +33,7 @@ use frida_gum::{Gum, Module, PageProtection}; use num_traits::cast::FromPrimitive; use rangemap::RangeMap; -use std::{path::PathBuf, rc::Rc}; +use std::path::PathBuf; use nix::sys::mman::{mmap, MapFlags, ProtFlags}; @@ -46,7 +45,7 @@ pub trait FridaHelper<'a> { fn transformer(&self) -> &Transformer<'a>; /// Register a new thread with this `FridaHelper` - fn register_thread(&self); + fn register_thread(&mut self); /// Called prior to execution of an input fn pre_exec(&mut self, input: &I); @@ -59,6 +58,8 @@ pub trait FridaHelper<'a> { /// pointer to the frida coverage map fn map_ptr(&mut self) -> *mut u8; + + fn ranges(&self) -> &RangeMap; } /// (Default) map size for frida coverage reporting @@ -75,7 +76,7 @@ pub struct FridaInstrumentationHelper<'a> { transformer: Option>, #[cfg(target_arch = "aarch64")] capstone: Capstone, - asan_runtime: Rc>, + asan_runtime: AsanRuntime, ranges: RangeMap, options: &'a FridaOptions, drcov_basic_blocks: Vec, @@ -87,8 +88,8 @@ impl<'a> FridaHelper<'a> for FridaInstrumentationHelper<'a> { } /// Register the current thread with the [`FridaInstrumentationHelper`] - fn register_thread(&self) { - self.asan_runtime.borrow().register_thread(); + fn register_thread(&mut self) { + self.asan_runtime.register_thread(); } fn pre_exec(&mut self, input: &I) { @@ -96,7 +97,6 @@ impl<'a> FridaHelper<'a> for FridaInstrumentationHelper<'a> { let slice = target_bytes.as_slice(); //println!("target_bytes: {:02x?}", slice); self.asan_runtime - .borrow() .unpoison(slice.as_ptr() as usize, slice.len()); } @@ -111,9 +111,9 @@ impl<'a> FridaHelper<'a> for FridaInstrumentationHelper<'a> { if self.options.asan_enabled() { if self.options.asan_detect_leaks() { - self.asan_runtime.borrow_mut().check_for_leaks(); + self.asan_runtime.check_for_leaks(); } - self.asan_runtime.borrow_mut().reset_allocations(); + self.asan_runtime.reset_allocations(); } } @@ -124,6 +124,10 @@ impl<'a> FridaHelper<'a> for FridaInstrumentationHelper<'a> { fn map_ptr(&mut self) -> *mut u8 { self.map.as_mut_ptr() } + + fn ranges(&self) -> &RangeMap { + &self.ranges + } } /// Helper function to get the size of a module's CODE section from frida @@ -312,7 +316,6 @@ impl<'a> FridaInstrumentationHelper<'a> { instruction.put_callout(|context| { let real_address = match helper .asan_runtime - .borrow() .real_address_for_stalked(pc(&context)) { Some(address) => *address, @@ -347,7 +350,7 @@ impl<'a> FridaInstrumentationHelper<'a> { } } if helper.options().asan_enabled() || helper.options().drcov_enabled() { - helper.asan_runtime.borrow_mut().add_stalked_address( + helper.asan_runtime.add_stalked_address( output.writer().pc() as usize - 4, address as usize, ); @@ -358,7 +361,7 @@ impl<'a> FridaInstrumentationHelper<'a> { }); helper.transformer = Some(transformer); if helper.options().asan_enabled() || helper.options().drcov_enabled() { - helper.asan_runtime.borrow_mut().init(modules_to_instrument); + helper.asan_runtime.init(gum, modules_to_instrument); } } helper @@ -410,7 +413,7 @@ impl<'a> FridaInstrumentationHelper<'a> { writer.put_b_label(after_report_impl); self.current_report_impl = writer.pc(); - writer.put_bytes(self.asan_runtime.borrow().blob_report()); + writer.put_bytes(self.asan_runtime.blob_report()); writer.put_label(after_report_impl); } @@ -547,18 +550,18 @@ impl<'a> FridaInstrumentationHelper<'a> { } // Insert the check_shadow_mem code blob match width { - 1 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_byte()), - 2 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_halfword()), - 3 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_3bytes()), - 4 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_dword()), - 6 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_6bytes()), - 8 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_qword()), - 12 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_12bytes()), - 16 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_16bytes()), - 24 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_24bytes()), - 32 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_32bytes()), - 48 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_48bytes()), - 64 => writer.put_bytes(&self.asan_runtime.borrow().blob_check_mem_64bytes()), + 1 => writer.put_bytes(&self.asan_runtime.blob_check_mem_byte()), + 2 => writer.put_bytes(&self.asan_runtime.blob_check_mem_halfword()), + 3 => writer.put_bytes(&self.asan_runtime.blob_check_mem_3bytes()), + 4 => writer.put_bytes(&self.asan_runtime.blob_check_mem_dword()), + 6 => writer.put_bytes(&self.asan_runtime.blob_check_mem_6bytes()), + 8 => writer.put_bytes(&self.asan_runtime.blob_check_mem_qword()), + 12 => writer.put_bytes(&self.asan_runtime.blob_check_mem_12bytes()), + 16 => writer.put_bytes(&self.asan_runtime.blob_check_mem_16bytes()), + 24 => writer.put_bytes(&self.asan_runtime.blob_check_mem_24bytes()), + 32 => writer.put_bytes(&self.asan_runtime.blob_check_mem_32bytes()), + 48 => writer.put_bytes(&self.asan_runtime.blob_check_mem_48bytes()), + 64 => writer.put_bytes(&self.asan_runtime.blob_check_mem_64bytes()), _ => false, }; diff --git a/libafl_frida/src/lib.rs b/libafl_frida/src/lib.rs index d14d38bc88..c6f0f47b06 100644 --- a/libafl_frida/src/lib.rs +++ b/libafl_frida/src/lib.rs @@ -3,17 +3,22 @@ The frida executor is a binary-only mode for `LibAFL`. It can report coverage and, on supported architecutres, even reports memory access errors. */ +/// The frida-asan allocator +pub mod alloc; +/// Handling of ASAN errors +pub mod asan_errors; /// The frida address sanitizer runtime pub mod asan_rt; /// The `LibAFL` frida helper pub mod helper; + // for parsing asan cores use libafl::bolts::os::parse_core_bind_arg; // for getting current core_id use core_affinity::get_core_ids; /// A representation of the various Frida options -#[derive(Clone, Debug)] +#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] #[allow(clippy::struct_excessive_bools)] pub struct FridaOptions { enable_asan: bool,