diff --git a/fuzzers/frida_libpng/Cargo.toml b/fuzzers/frida_libpng/Cargo.toml index ca6acb30a8..3b83db753c 100644 --- a/fuzzers/frida_libpng/Cargo.toml +++ b/fuzzers/frida_libpng/Cargo.toml @@ -29,8 +29,8 @@ reqwest = { version = "0.11.4", features = ["blocking"] } [dependencies] libafl = { path = "../../libafl/", features = [ "std", "llmp_compression", "llmp_bind_public" ] } #, "llmp_small_maps", "llmp_debug"]} -capstone = "0.8.0" -frida-gum = { version = "0.5.2", features = [ "auto-download", "event-sink", "invocation-listener"] } +capstone = "0.10.0" +frida-gum = { version = "0.6.1", features = [ "auto-download", "event-sink", "invocation-listener"] } libafl_frida = { path = "../../libafl_frida", version = "0.6.1", features = ["cmplog"] } libafl_targets = { path = "../../libafl_targets", version = "0.6.1" , features = ["sancov_cmplog"] } lazy_static = "1.4.0" diff --git a/fuzzers/frida_libpng/harness.cc b/fuzzers/frida_libpng/harness.cc index 0a257baa00..ff9e16f62a 100644 --- a/fuzzers/frida_libpng/harness.cc +++ b/fuzzers/frida_libpng/harness.cc @@ -133,7 +133,7 @@ HARNESS_EXPORTS extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_ } - func1(); + // func1(); std::vector v(data, data + size); if (png_sig_cmp(v.data(), 0, kPngHeaderSize)) { diff --git a/fuzzers/frida_libpng/src/fuzzer.rs b/fuzzers/frida_libpng/src/fuzzer.rs index 413db5c36b..74de045130 100644 --- a/fuzzers/frida_libpng/src/fuzzer.rs +++ b/fuzzers/frida_libpng/src/fuzzer.rs @@ -156,6 +156,7 @@ where ) -> Self { let mut stalker = Stalker::new(gum); + #[cfg(all(not(debug_assertions), target_arch = "x86_64"))] for range in helper.ranges().gaps(&(0..usize::MAX)) { println!("excluding range: {:x}-{:x}", range.start, range.end); stalker.exclude(&MemoryRange::new( diff --git a/libafl_frida/Cargo.toml b/libafl_frida/Cargo.toml index 72cb0f7e8a..cc096d1907 100644 --- a/libafl_frida/Cargo.toml +++ b/libafl_frida/Cargo.toml @@ -27,11 +27,11 @@ hashbrown = "0.11" libloading = "0.7.0" rangemap = "0.1.10" frida-gum-sys = { version = "0.3", features = [ "auto-download", "event-sink", "invocation-listener"] } -frida-gum = { version = "0.5.2", features = [ "auto-download", "event-sink", "invocation-listener"] } +frida-gum = { version = "0.6.1", features = [ "auto-download", "event-sink", "invocation-listener"] } core_affinity = { version = "0.5", git = "https://github.com/s1341/core_affinity_rs", rev = "6648a7a" } regex = "1.4" dynasmrt = "1.0.1" -capstone = "0.8.0" +capstone = "0.10.0" color-backtrace ={ version = "0.5", features = [ "resolve-modules" ] } termcolor = "1.1.2" serde = "1.0" diff --git a/libafl_frida/src/alloc.rs b/libafl_frida/src/alloc.rs index f65048542d..c05fd0db00 100644 --- a/libafl_frida/src/alloc.rs +++ b/libafl_frida/src/alloc.rs @@ -27,11 +27,8 @@ pub(crate) struct Allocator { allocations: HashMap, shadow_pages: RangeSet, allocation_queue: HashMap>, - #[cfg(target_arch = "aarch64")] largest_allocation: usize, - #[cfg(target_arch = "aarch64")] base_mapping_addr: usize, - #[cfg(target_arch = "aarch64")] current_mapping_addr: usize, } @@ -68,6 +65,7 @@ impl Allocator { // probe to find a usable shadow bit: let mut shadow_bit: usize = 0; + #[cfg(target_arch = "aarch64")] for try_shadow_bit in &[46usize, 36usize] { let addr: usize = 1 << try_shadow_bit; if unsafe { @@ -89,9 +87,36 @@ impl Allocator { break; } } + + // x86_64's userspace's up to 0x7fff-ffff-ffff so 46 is not available. (0x4000-0000-0000 - 0xc000-0000-0000) + // we'd also want to avoid 0x5555-xxxx-xxxx because programs are mapped there. so 45 is not available either (0x2000-0000-0000 - 0x6000-0000-0000). + // This memory map is for amd64 linux. + #[cfg(all(target_arch = "x86_64", target_os = "linux"))] + { + let try_shadow_bit: usize = 44; + let addr: usize = 1 << try_shadow_bit; + if unsafe { + mmap( + addr as *mut c_void, + page_size, + ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, + MapFlags::MAP_PRIVATE + | ANONYMOUS_FLAG + | MapFlags::MAP_FIXED + | MapFlags::MAP_NORESERVE, + -1, + 0, + ) + } + .is_ok() + { + shadow_bit = try_shadow_bit; + } + } assert!(shadow_bit != 0); // attempt to pre-map the entire shadow-memory space + let addr: usize = 1 << shadow_bit; let pre_allocated_shadow = unsafe { mmap( @@ -117,11 +142,8 @@ impl Allocator { allocations: HashMap::new(), shadow_pages: RangeSet::new(), allocation_queue: HashMap::new(), - #[cfg(target_arch = "aarch64")] largest_allocation: 0, - #[cfg(target_arch = "aarch64")] base_mapping_addr: addr + addr + addr, - #[cfg(target_arch = "aarch64")] current_mapping_addr: addr + addr + addr, } } @@ -144,7 +166,6 @@ impl Allocator { (value / self.page_size) * self.page_size } - #[cfg(target_arch = "aarch64")] fn find_smallest_fit(&mut self, size: usize) -> Option { let mut current_size = size; while current_size <= self.largest_allocation { @@ -159,12 +180,11 @@ impl Allocator { None } - #[cfg(target_arch = "aarch64")] #[must_use] pub unsafe fn alloc(&mut self, size: usize, _alignment: usize) -> *mut c_void { let mut is_malloc_zero = false; let size = if size == 0 { - println!("zero-sized allocation!"); + // println!("zero-sized allocation!"); is_malloc_zero = true; 16 } else { @@ -187,6 +207,7 @@ impl Allocator { } metadata } else { + // println!("{:x}, {:x}", self.current_mapping_addr, rounded_up_size); let mapping = match mmap( self.current_mapping_addr as *mut c_void, rounded_up_size, @@ -214,7 +235,6 @@ impl Allocator { actual_size: rounded_up_size, ..AllocationMetadata::default() }; - if self.options.enable_asan_allocation_backtraces { metadata.allocation_site_backtrace = Some(Backtrace::new_unresolved()); } @@ -232,11 +252,10 @@ impl Allocator { self.allocations .insert(metadata.address + self.page_size, metadata); - //println!("serving address: {:?}, size: {:x}", address, size); + // println!("serving address: {:?}, size: {:x}", address, size); address } - #[cfg(target_arch = "aarch64")] pub unsafe fn release(&mut self, ptr: *mut c_void) { let mut metadata = if let Some(metadata) = self.allocations.get_mut(&(ptr as usize)) { metadata @@ -320,7 +339,6 @@ impl Allocator { } } - #[cfg(target_arch = "aarch64")] pub fn get_usable_size(&self, ptr: *mut c_void) -> usize { match self.allocations.get(&(ptr as usize)) { Some(metadata) => metadata.size, @@ -334,14 +352,14 @@ impl Allocator { } fn unpoison(start: usize, size: usize) { - //println!("unpoisoning {:x} for {:x}", start, size / 8 + 1); + // println!("unpoisoning {:x} for {:x}", start, size / 8 + 1); unsafe { - //println!("memset: {:?}", start as *mut c_void); + // println!("memset: {:?}", start as *mut c_void); memset(start as *mut c_void, 0xff, size / 8); let remainder = size % 8; if remainder > 0 { - //println!("remainder: {:x}, offset: {:x}", remainder, start + size / 8); + // println!("remainder: {:x}, offset: {:x}", remainder, start + size / 8); memset( (start + size / 8) as *mut c_void, (0xff << (8 - remainder)) & 0xff, @@ -352,14 +370,14 @@ impl Allocator { } pub fn poison(start: usize, size: usize) { - //println!("poisoning {:x} for {:x}", start, size / 8 + 1); + // println!("poisoning {:x} for {:x}", start, size / 8 + 1); unsafe { - //println!("memset: {:?}", start as *mut c_void); + // println!("memset: {:?}", start as *mut c_void); memset(start as *mut c_void, 0x00, size / 8); let remainder = size % 8; if remainder > 0 { - //println!("remainder: {:x}, offset: {:x}", remainder, start + size / 8); + // println!("remainder: {:x}, offset: {:x}", remainder, start + size / 8); memset((start + size / 8) as *mut c_void, 0x00, 1); } } @@ -381,7 +399,12 @@ impl Allocator { let shadow_end = self.round_up_to_page((end - start) / 8) + self.page_size + shadow_start; for range in self.shadow_pages.gaps(&(shadow_start..shadow_end)) { - //println!("range: {:x}-{:x}, pagesize: {}", range.start, range.end, self.page_size); + /* + println!( + "range: {:x}-{:x}, pagesize: {}", + range.start, range.end, self.page_size + ); + */ unsafe { mmap( range.start as *mut c_void, @@ -406,12 +429,10 @@ impl Allocator { (shadow_mapping_start, (end - start) / 8) } - #[cfg(target_arch = "aarch64")] pub fn map_to_shadow(&self, start: usize) -> usize { map_to_shadow!(self, start) } - #[cfg(target_arch = "aarch64")] #[inline] pub fn is_managed(&self, ptr: *mut c_void) -> bool { //self.allocations.contains_key(&(ptr as usize)) diff --git a/libafl_frida/src/asan_errors.rs b/libafl_frida/src/asan_errors.rs index c4db043d65..c7baabe2fc 100644 --- a/libafl_frida/src/asan_errors.rs +++ b/libafl_frida/src/asan_errors.rs @@ -20,17 +20,21 @@ use serde::{Deserialize, Serialize}; use std::io::Write; use termcolor::{Color, ColorSpec, WriteColor}; -use crate::{alloc::AllocationMetadata, FridaOptions}; +#[cfg(target_arch = "x86_64")] +use crate::asan_rt::ASAN_SAVE_REGISTER_NAMES; + +use crate::{alloc::AllocationMetadata, asan_rt::ASAN_SAVE_REGISTER_COUNT, FridaOptions}; #[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct AsanReadWriteError { - pub registers: [usize; 32], + pub registers: [usize; ASAN_SAVE_REGISTER_COUNT], pub pc: usize, - pub fault: (u16, u16, usize, usize), + pub fault: (Option, Option, usize, usize), pub metadata: AllocationMetadata, pub backtrace: Backtrace, } +#[allow(clippy::type_complexity)] #[derive(Debug, Clone, Serialize, Deserialize, SerdeAny)] pub(crate) enum AsanError { OobRead(AsanReadWriteError), @@ -39,10 +43,31 @@ pub(crate) enum AsanError { WriteAfterFree(AsanReadWriteError), DoubleFree((usize, AllocationMetadata, Backtrace)), UnallocatedFree((usize, Backtrace)), - Unknown(([usize; 32], usize, (u16, u16, usize, usize), Backtrace)), + Unknown( + ( + [usize; ASAN_SAVE_REGISTER_COUNT], + usize, + (Option, Option, usize, usize), + Backtrace, + ), + ), Leak((usize, AllocationMetadata)), - StackOobRead(([usize; 32], usize, (u16, u16, usize, usize), Backtrace)), - StackOobWrite(([usize; 32], usize, (u16, u16, usize, usize), Backtrace)), + StackOobRead( + ( + [usize; ASAN_SAVE_REGISTER_COUNT], + usize, + (Option, Option, usize, usize), + Backtrace, + ), + ), + StackOobWrite( + ( + [usize; ASAN_SAVE_REGISTER_COUNT], + usize, + (Option, Option, usize, usize), + Backtrace, + ), + ), BadFuncArgRead((String, usize, usize, usize, Backtrace)), BadFuncArgWrite((String, usize, usize, usize, Backtrace)), } @@ -160,12 +185,13 @@ impl AsanErrors { #[allow(clippy::non_ascii_literal)] writeln!(output, "{:━^100}", " REGISTERS ").unwrap(); + #[cfg(target_arch = "aarch64")] for reg in 0..=30 { - if reg == basereg { + if basereg.is_some() && reg == basereg.unwrap() as usize { output .set_color(ColorSpec::new().set_fg(Some(Color::Red))) .unwrap(); - } else if reg == indexreg { + } else if indexreg.is_some() && reg == indexreg.unwrap() as usize { output .set_color(ColorSpec::new().set_fg(Some(Color::Yellow))) .unwrap(); @@ -181,15 +207,52 @@ impl AsanErrors { writeln!(output).unwrap(); } } + #[cfg(target_arch = "aarch64")] writeln!(output, "pc : 0x{:016x} ", error.pc).unwrap(); + #[cfg(target_arch = "x86_64")] + for (reg, name) in ASAN_SAVE_REGISTER_NAMES + .iter() + .enumerate() + .take(ASAN_SAVE_REGISTER_COUNT) + { + if basereg.is_some() && reg == basereg.unwrap() as usize { + output + .set_color(ColorSpec::new().set_fg(Some(Color::Red))) + .unwrap(); + } else if indexreg.is_some() && reg == indexreg.unwrap() as usize { + output + .set_color(ColorSpec::new().set_fg(Some(Color::Yellow))) + .unwrap(); + } + write!(output, "{}: 0x{:016x} ", name, error.registers[reg]).unwrap(); + output.reset().unwrap(); + if reg % 4 == 3 { + writeln!(output).unwrap(); + } + } + + #[cfg(target_arch = "x86_64")] + writeln!(output, "rip: 0x{:016x}", error.pc).unwrap(); + #[allow(clippy::non_ascii_literal)] writeln!(output, "{:━^100}", " CODE ").unwrap(); + + #[cfg(target_arch = "aarch64")] let mut cs = Capstone::new() .arm64() .mode(capstone::arch::arm64::ArchMode::Arm) .build() .unwrap(); + + #[cfg(target_arch = "x86_64")] + let mut cs = Capstone::new() + .x86() + .mode(capstone::arch::x86::ArchMode::Mode64) + .detail(true) + .build() + .expect("Failed to create Capstone object"); + cs.set_skipdata(true).expect("failed to set skipdata"); let start_pc = error.pc - 4 * 5; @@ -380,12 +443,14 @@ impl AsanErrors { #[allow(clippy::non_ascii_literal)] writeln!(output, "{:━^100}", " REGISTERS ").unwrap(); + + #[cfg(target_arch = "aarch64")] for reg in 0..=30 { - if reg == basereg { + if basereg.is_some() && reg == basereg.unwrap() as usize { output .set_color(ColorSpec::new().set_fg(Some(Color::Red))) .unwrap(); - } else if reg == indexreg { + } else if indexreg.is_some() && reg == indexreg.unwrap() as usize { output .set_color(ColorSpec::new().set_fg(Some(Color::Yellow))) .unwrap(); @@ -396,15 +461,53 @@ impl AsanErrors { writeln!(output).unwrap(); } } + #[cfg(target_arch = "aarch64")] writeln!(output, "pc : 0x{:016x} ", pc).unwrap(); + #[cfg(target_arch = "x86_64")] + for reg in 0..ASAN_SAVE_REGISTER_COUNT { + if basereg.is_some() && reg == basereg.unwrap() as usize { + output + .set_color(ColorSpec::new().set_fg(Some(Color::Red))) + .unwrap(); + } else if indexreg.is_some() && reg == indexreg.unwrap() as usize { + output + .set_color(ColorSpec::new().set_fg(Some(Color::Yellow))) + .unwrap(); + } + write!( + output, + "{}: 0x{:016x} ", + ASAN_SAVE_REGISTER_NAMES[reg], registers[reg] + ) + .unwrap(); + output.reset().unwrap(); + if reg % 4 == 3 { + writeln!(output).unwrap(); + } + } + + #[cfg(target_arch = "x86_64")] + writeln!(output, "Rip: 0x{:016x}", pc).unwrap(); + #[allow(clippy::non_ascii_literal)] writeln!(output, "{:━^100}", " CODE ").unwrap(); + + #[cfg(target_arch = "aarch64")] let mut cs = Capstone::new() .arm64() .mode(capstone::arch::arm64::ArchMode::Arm) .build() .unwrap(); + + #[cfg(target_arch = "x86_64")] + let mut cs = Capstone::new() + .x86() + .mode(capstone::arch::x86::ArchMode::Mode64) + .detail(true) + .build() + .expect("Failed to create Capstone object"); + cs.set_skipdata(true).expect("failed to set skipdata"); let start_pc = pc - 4 * 5; diff --git a/libafl_frida/src/asan_rt.rs b/libafl_frida/src/asan_rt.rs index 2386637901..93ef16c77e 100644 --- a/libafl_frida/src/asan_rt.rs +++ b/libafl_frida/src/asan_rt.rs @@ -6,28 +6,33 @@ even if the target would not have crashed under normal conditions. this helps finding mem errors early. */ -#[cfg(target_arch = "aarch64")] use frida_gum::NativePointer; use frida_gum::RangeDetails; use hashbrown::HashMap; use nix::sys::mman::{mmap, MapFlags, ProtFlags}; -#[cfg(target_arch = "aarch64")] use nix::libc::memset; use backtrace::Backtrace; + +#[cfg(target_arch = "aarch64")] use capstone::{ arch::{arm64::Arm64OperandType, ArchOperand::Arm64Operand, BuildsCapstone}, Capstone, Insn, }; + +#[cfg(target_arch = "x86_64")] +use capstone::{ + arch::{self, x86::X86OperandType, ArchOperand::X86Operand, BuildsCapstone}, + Capstone, RegAccessType, RegId, +}; + use dynasmrt::{dynasm, DynasmApi, DynasmLabelApi}; -#[cfg(target_arch = "aarch64")] use frida_gum::interceptor::Interceptor; use frida_gum::{Gum, ModuleMap}; #[cfg(unix)] use libc::RLIMIT_STACK; -#[cfg(target_arch = "aarch64")] use libc::{c_char, wchar_t}; #[cfg(target_vendor = "apple")] use libc::{getrlimit, rlimit}; @@ -55,6 +60,35 @@ const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANON; #[cfg(not(target_vendor = "apple"))] const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANONYMOUS; +// sixteen general purpose registers are put in this order, rax, rbx, rcx, rdx, rbp, rsp, rsi, rdi, r8-r15, plus instrumented rip, accessed memory addr and true rip +#[cfg(target_arch = "x86_64")] +pub const ASAN_SAVE_REGISTER_COUNT: usize = 19; + +pub const ASAN_SAVE_REGISTER_NAMES: [&str; ASAN_SAVE_REGISTER_COUNT] = [ + "rax", + "rbx", + "rcx", + "rdx", + "rbp", + "rsp", + "rsi", + "rdi", + "r8", + "r9", + "r10", + "r11", + "r12", + "r13", + "r14", + "r15", + "instrumented rip", + "fault address", + "actual rip", +]; + +#[cfg(tareget_arch = "aarch64")] +pub const ASAN_SAVE_REGISTER_COUNT: usize = 32; + /// The frida address sanitizer runtime, providing address sanitization. /// When executing in `ASAN`, each memory access will get checked, using frida stalker under the hood. /// The runtime can report memory errors that occurred during execution, @@ -62,7 +96,7 @@ const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANONYMOUS; /// this helps finding mem errors early. pub struct AsanRuntime { allocator: Allocator, - regs: [usize; 32], + regs: [usize; ASAN_SAVE_REGISTER_COUNT], blob_report: Option>, blob_check_mem_byte: Option>, blob_check_mem_halfword: Option>, @@ -88,7 +122,7 @@ impl AsanRuntime { pub fn new(options: FridaOptions) -> AsanRuntime { Self { allocator: Allocator::new(options.clone()), - regs: [0; 32], + regs: [0; ASAN_SAVE_REGISTER_COUNT], blob_report: None, blob_check_mem_byte: None, blob_check_mem_halfword: None, @@ -117,27 +151,52 @@ impl AsanRuntime { } self.generate_instrumentation_blobs(); + self.generate_shadow_check_function(); self.unpoison_all_existing_memory(); self.module_map = Some(ModuleMap::new_from_names(modules_to_instrument)); - #[cfg(target_arch = "aarch64")] self.hook_functions(_gum); - //unsafe { - //let mem = self.allocator.alloc(0xac + 2, 8); - //unsafe {mprotect((self.shadow_check_func.unwrap() as usize & 0xffffffffffff000) as *mut c_void, 0x1000, ProtFlags::PROT_READ | ProtFlags::PROT_WRITE | ProtFlags::PROT_EXEC)}; - //assert!((self.shadow_check_func.unwrap())(((mem as usize) + 0) as *const c_void, 0xac)); - //assert!((self.shadow_check_func.unwrap())(((mem as usize) + 2) as *const c_void, 0xac)); - //assert!(!(self.shadow_check_func.unwrap())(((mem as usize) + 3) as *const c_void, 0xac)); - //assert!(!(self.shadow_check_func.unwrap())(((mem as isize) + -1) as *const c_void, 0xac)); - //assert!((self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa4) as *const c_void, 8)); - //assert!((self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa6) as *const c_void, 6)); - //assert!(!(self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa8) as *const c_void, 6)); - //assert!(!(self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa8) as *const c_void, 0xac)); - //assert!((self.shadow_check_func.unwrap())(((mem as usize) + 4 + 0xa8) as *const c_void, 0x1)); - //} + /* + + unsafe { + let mem = self.allocator.alloc(0xac + 2, 8); + unsafe {mprotect((self.shadow_check_func.unwrap() as usize & 0xffffffffffff000) as *mut c_void, 0x1000, ProtFlags::PROT_READ | ProtFlags::PROT_WRITE | ProtFlags::PROT_EXEC)}; + println!("Test0"); + /* + 0x555555916ce9 je libafl_frida::asan_rt::AsanRuntime::init+14852 + 0x555555916cef mov rdi, r15 <0x555558392338> + */ + assert!((self.shadow_check_func.unwrap())(((mem as usize) + 0) as *const c_void, 0x00)); + println!("Test1"); + assert!((self.shadow_check_func.unwrap())(((mem as usize) + 0) as *const c_void, 0xac)); + println!("Test2"); + assert!((self.shadow_check_func.unwrap())(((mem as usize) + 2) as *const c_void, 0xac)); + println!("Test3"); + assert!(!(self.shadow_check_func.unwrap())(((mem as usize) + 3) as *const c_void, 0xac)); + println!("Test4"); + assert!(!(self.shadow_check_func.unwrap())(((mem as isize) + -1) as *const c_void, 0xac)); + println!("Test5"); + assert!((self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa4) as *const c_void, 8)); + println!("Test6"); + assert!((self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa6) as *const c_void, 6)); + println!("Test7"); + assert!(!(self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa8) as *const c_void, 6)); + println!("Test8"); + assert!(!(self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa8) as *const c_void, 0xac)); + println!("Test9"); + assert!((self.shadow_check_func.unwrap())(((mem as usize) + 4 + 0xa8) as *const c_void, 0x1)); + println!("FIN"); + + for i in 0..0xad { + assert!((self.shadow_check_func.unwrap())(((mem as usize) + i) as *const c_void, 0x01)); + } + // assert!((self.shadow_check_func.unwrap())(((mem2 as usize) + 8875) as *const c_void, 4)); + } + + */ } /// Reset all allocations so that they can be reused for new allocation requests. @@ -165,7 +224,6 @@ impl AsanRuntime { } /// Make sure the specified memory is poisoned - #[cfg(target_arch = "aarch64")] pub fn poison(&mut self, address: usize, size: usize) { Allocator::poison(self.allocator.map_to_shadow(address), size); } @@ -289,33 +347,40 @@ impl AsanRuntime { } #[cfg(target_arch = "aarch64")] + #[inline] + fn pc() -> usize { + Interceptor::current_invocation().cpu_context().pc() as usize + } + + #[cfg(target_arch = "x86_64")] + #[inline] + fn pc() -> usize { + Interceptor::current_invocation().cpu_context().rip() as usize + } + #[inline] fn hook_malloc(&mut self, size: usize) -> *mut c_void { unsafe { self.allocator.alloc(size, 8) } } - #[cfg(target_arch = "aarch64")] #[allow(non_snake_case)] #[inline] fn hook__Znam(&mut self, size: usize) -> *mut c_void { unsafe { self.allocator.alloc(size, 8) } } - #[cfg(target_arch = "aarch64")] #[allow(non_snake_case)] #[inline] fn hook__ZnamRKSt9nothrow_t(&mut self, size: usize, _nothrow: *const c_void) -> *mut c_void { unsafe { self.allocator.alloc(size, 8) } } - #[cfg(target_arch = "aarch64")] #[allow(non_snake_case)] #[inline] fn hook__ZnamSt11align_val_t(&mut self, size: usize, alignment: usize) -> *mut c_void { unsafe { self.allocator.alloc(size, alignment) } } - #[cfg(target_arch = "aarch64")] #[allow(non_snake_case)] #[inline] fn hook__ZnamSt11align_val_tRKSt9nothrow_t( @@ -327,28 +392,24 @@ impl AsanRuntime { unsafe { self.allocator.alloc(size, alignment) } } - #[cfg(target_arch = "aarch64")] #[allow(non_snake_case)] #[inline] fn hook__Znwm(&mut self, size: usize) -> *mut c_void { unsafe { self.allocator.alloc(size, 8) } } - #[cfg(target_arch = "aarch64")] #[allow(non_snake_case)] #[inline] fn hook__ZnwmRKSt9nothrow_t(&mut self, size: usize, _nothrow: *const c_void) -> *mut c_void { unsafe { self.allocator.alloc(size, 8) } } - #[cfg(target_arch = "aarch64")] #[allow(non_snake_case)] #[inline] fn hook__ZnwmSt11align_val_t(&mut self, size: usize, alignment: usize) -> *mut c_void { unsafe { self.allocator.alloc(size, alignment) } } - #[cfg(target_arch = "aarch64")] #[allow(non_snake_case)] #[inline] fn hook__ZnwmSt11align_val_tRKSt9nothrow_t( @@ -360,7 +421,6 @@ impl AsanRuntime { unsafe { self.allocator.alloc(size, alignment) } } - #[cfg(target_arch = "aarch64")] #[inline] fn hook_calloc(&mut self, nmemb: usize, size: usize) -> *mut c_void { let ret = unsafe { self.allocator.alloc(size * nmemb, 8) }; @@ -370,8 +430,8 @@ impl AsanRuntime { ret } - #[cfg(target_arch = "aarch64")] #[inline] + #[allow(clippy::cmp_null)] fn hook_realloc(&mut self, ptr: *mut c_void, size: usize) -> *mut c_void { unsafe { let ret = self.allocator.alloc(size, 0x8); @@ -385,27 +445,25 @@ impl AsanRuntime { } } - #[cfg(target_arch = "aarch64")] #[inline] fn hook_check_free(&mut self, ptr: *mut c_void) -> bool { self.allocator.is_managed(ptr) } - #[cfg(target_arch = "aarch64")] #[inline] + #[allow(clippy::cmp_null)] fn hook_free(&mut self, ptr: *mut c_void) { if ptr != std::ptr::null_mut() { unsafe { self.allocator.release(ptr) } } } - #[cfg(all(target_arch = "aarch64", not(target_vendor = "apple")))] + #[cfg(not(target_vendor = "apple"))] #[inline] fn hook_memalign(&mut self, alignment: usize, size: usize) -> *mut c_void { unsafe { self.allocator.alloc(size, alignment) } } - #[cfg(target_arch = "aarch64")] #[inline] fn hook_posix_memalign( &mut self, @@ -420,14 +478,14 @@ impl AsanRuntime { } #[inline] - #[cfg(all(target_arch = "aarch64", not(target_vendor = "apple")))] + #[cfg(all(not(target_vendor = "apple")))] fn hook_malloc_usable_size(&mut self, ptr: *mut c_void) -> usize { self.allocator.get_usable_size(ptr) } #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] #[inline] - #[cfg(target_arch = "aarch64")] fn hook__ZdaPv(&mut self, ptr: *mut c_void) { if ptr != std::ptr::null_mut() { unsafe { self.allocator.release(ptr) } @@ -435,8 +493,8 @@ impl AsanRuntime { } #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] #[inline] - #[cfg(target_arch = "aarch64")] fn hook__ZdaPvm(&mut self, ptr: *mut c_void, _ulong: u64) { if ptr != std::ptr::null_mut() { unsafe { self.allocator.release(ptr) } @@ -444,8 +502,8 @@ impl AsanRuntime { } #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] #[inline] - #[cfg(target_arch = "aarch64")] fn hook__ZdaPvmSt11align_val_t(&mut self, ptr: *mut c_void, _ulong: u64, _alignment: usize) { if ptr != std::ptr::null_mut() { unsafe { self.allocator.release(ptr) } @@ -453,8 +511,8 @@ impl AsanRuntime { } #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] #[inline] - #[cfg(target_arch = "aarch64")] fn hook__ZdaPvRKSt9nothrow_t(&mut self, ptr: *mut c_void, _nothrow: *const c_void) { if ptr != std::ptr::null_mut() { unsafe { self.allocator.release(ptr) } @@ -462,8 +520,8 @@ impl AsanRuntime { } #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] #[inline] - #[cfg(target_arch = "aarch64")] fn hook__ZdaPvSt11align_val_tRKSt9nothrow_t( &mut self, ptr: *mut c_void, @@ -476,8 +534,8 @@ impl AsanRuntime { } #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] #[inline] - #[cfg(target_arch = "aarch64")] fn hook__ZdaPvSt11align_val_t(&mut self, ptr: *mut c_void, _alignment: usize) { if ptr != std::ptr::null_mut() { unsafe { self.allocator.release(ptr) } @@ -485,8 +543,8 @@ impl AsanRuntime { } #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] #[inline] - #[cfg(target_arch = "aarch64")] fn hook__ZdlPv(&mut self, ptr: *mut c_void) { if ptr != std::ptr::null_mut() { unsafe { self.allocator.release(ptr) } @@ -494,8 +552,8 @@ impl AsanRuntime { } #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] #[inline] - #[cfg(target_arch = "aarch64")] fn hook__ZdlPvm(&mut self, ptr: *mut c_void, _ulong: u64) { if ptr != std::ptr::null_mut() { unsafe { self.allocator.release(ptr) } @@ -503,8 +561,8 @@ impl AsanRuntime { } #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] #[inline] - #[cfg(target_arch = "aarch64")] fn hook__ZdlPvmSt11align_val_t(&mut self, ptr: *mut c_void, _ulong: u64, _alignment: usize) { if ptr != std::ptr::null_mut() { unsafe { self.allocator.release(ptr) } @@ -512,8 +570,8 @@ impl AsanRuntime { } #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] #[inline] - #[cfg(target_arch = "aarch64")] fn hook__ZdlPvRKSt9nothrow_t(&mut self, ptr: *mut c_void, _nothrow: *const c_void) { if ptr != std::ptr::null_mut() { unsafe { self.allocator.release(ptr) } @@ -521,8 +579,8 @@ impl AsanRuntime { } #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] #[inline] - #[cfg(target_arch = "aarch64")] fn hook__ZdlPvSt11align_val_tRKSt9nothrow_t( &mut self, ptr: *mut c_void, @@ -535,8 +593,8 @@ impl AsanRuntime { } #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] #[inline] - #[cfg(target_arch = "aarch64")] fn hook__ZdlPvSt11align_val_t(&mut self, ptr: *mut c_void, _alignment: usize) { if ptr != std::ptr::null_mut() { unsafe { self.allocator.release(ptr) } @@ -544,7 +602,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_mmap( &mut self, addr: *const c_void, @@ -573,7 +630,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_munmap(&mut self, addr: *const c_void, length: usize) -> i32 { extern "C" { fn munmap(addr: *const c_void, length: usize) -> i32; @@ -586,7 +642,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_write(&mut self, fd: i32, buf: *const c_void, count: usize) -> usize { extern "C" { fn write(fd: i32, buf: *const c_void, count: usize) -> usize; @@ -594,9 +649,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(buf, count) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( "write".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), buf as usize, count, Backtrace::new(), @@ -606,7 +659,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_read(&mut self, fd: i32, buf: *mut c_void, count: usize) -> usize { extern "C" { fn read(fd: i32, buf: *mut c_void, count: usize) -> usize; @@ -614,9 +666,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(buf, count) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "read".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), buf as usize, count, Backtrace::new(), @@ -626,7 +676,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_fgets(&mut self, s: *mut c_void, size: u32, stream: *mut c_void) -> *mut c_void { extern "C" { fn fgets(s: *mut c_void, size: u32, stream: *mut c_void) -> *mut c_void; @@ -634,9 +683,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s, size as usize) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "fgets".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s as usize, size as usize, Backtrace::new(), @@ -646,7 +693,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_memcmp(&mut self, s1: *const c_void, s2: *const c_void, n: usize) -> i32 { extern "C" { fn memcmp(s1: *const c_void, s2: *const c_void, n: usize) -> i32; @@ -654,9 +700,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s1, n) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "memcmp".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s1 as usize, n, Backtrace::new(), @@ -665,9 +709,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s2, n) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "memcmp".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s2 as usize, n, Backtrace::new(), @@ -677,7 +719,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_memcpy(&mut self, dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void { extern "C" { fn memcpy(dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void; @@ -685,9 +726,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(dest, n) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( "memcpy".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), dest as usize, n, Backtrace::new(), @@ -696,9 +735,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(src, n) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "memcpy".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), src as usize, n, Backtrace::new(), @@ -708,7 +745,7 @@ impl AsanRuntime { } #[inline] - #[cfg(all(target_arch = "aarch64", not(target_vendor = "apple")))] + #[cfg(not(target_vendor = "apple"))] fn hook_mempcpy(&mut self, dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void { extern "C" { fn mempcpy(dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void; @@ -716,9 +753,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(dest, n) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( "mempcpy".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), dest as usize, n, Backtrace::new(), @@ -727,9 +762,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(src, n) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "mempcpy".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), src as usize, n, Backtrace::new(), @@ -739,7 +772,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_memmove(&mut self, dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void { extern "C" { fn memmove(dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void; @@ -747,9 +779,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(dest, n) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( "memmove".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), dest as usize, n, Backtrace::new(), @@ -758,9 +788,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(src, n) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "memmove".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), src as usize, n, Backtrace::new(), @@ -770,7 +798,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_memset(&mut self, dest: *mut c_void, c: i32, n: usize) -> *mut c_void { extern "C" { fn memset(dest: *mut c_void, c: i32, n: usize) -> *mut c_void; @@ -778,9 +805,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(dest, n) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( "memset".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), dest as usize, n, Backtrace::new(), @@ -790,7 +815,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_memchr(&mut self, s: *mut c_void, c: i32, n: usize) -> *mut c_void { extern "C" { fn memchr(s: *mut c_void, c: i32, n: usize) -> *mut c_void; @@ -798,9 +822,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s, n) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "memchr".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s as usize, n, Backtrace::new(), @@ -810,7 +832,7 @@ impl AsanRuntime { } #[inline] - #[cfg(all(target_arch = "aarch64", not(target_vendor = "apple")))] + #[cfg(not(target_vendor = "apple"))] fn hook_memrchr(&mut self, s: *mut c_void, c: i32, n: usize) -> *mut c_void { extern "C" { fn memrchr(s: *mut c_void, c: i32, n: usize) -> *mut c_void; @@ -818,9 +840,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s, n) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "memrchr".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s as usize, n, Backtrace::new(), @@ -830,7 +850,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_memmem( &mut self, haystack: *const c_void, @@ -849,9 +868,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(haystack, haystacklen) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "memmem".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), haystack as usize, haystacklen, Backtrace::new(), @@ -860,9 +877,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(needle, needlelen) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "memmem".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), needle as usize, needlelen, Backtrace::new(), @@ -871,7 +886,7 @@ impl AsanRuntime { unsafe { memmem(haystack, haystacklen, needle, needlelen) } } - #[cfg(all(not(target_os = "android"), target_arch = "aarch64"))] + #[cfg(all(not(target_os = "android")))] #[inline] fn hook_bzero(&mut self, s: *mut c_void, n: usize) { extern "C" { @@ -880,9 +895,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s, n) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( "bzero".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s as usize, n, Backtrace::new(), @@ -891,11 +904,7 @@ impl AsanRuntime { unsafe { bzero(s, n) } } - #[cfg(all( - not(target_os = "android"), - target_arch = "aarch64", - not(target_vendor = "apple") - ))] + #[cfg(all(not(target_os = "android"), not(target_vendor = "apple")))] #[inline] fn hook_explicit_bzero(&mut self, s: *mut c_void, n: usize) { extern "C" { @@ -904,9 +913,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s, n) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( "explicit_bzero".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s as usize, n, Backtrace::new(), @@ -915,7 +922,7 @@ impl AsanRuntime { unsafe { explicit_bzero(s, n) } } - #[cfg(all(not(target_os = "android"), target_arch = "aarch64"))] + #[cfg(all(not(target_os = "android")))] #[inline] fn hook_bcmp(&mut self, s1: *const c_void, s2: *const c_void, n: usize) -> i32 { extern "C" { @@ -924,9 +931,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s1, n) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "bcmp".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s1 as usize, n, Backtrace::new(), @@ -935,9 +940,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s2, n) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "bcmp".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s2 as usize, n, Backtrace::new(), @@ -947,7 +950,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_strchr(&mut self, s: *mut c_char, c: i32) -> *mut c_char { extern "C" { fn strchr(s: *mut c_char, c: i32) -> *mut c_char; @@ -956,9 +958,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s as *const c_void, unsafe { strlen(s) }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strchr".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s as usize, unsafe { strlen(s) }, Backtrace::new(), @@ -968,7 +968,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_strrchr(&mut self, s: *mut c_char, c: i32) -> *mut c_char { extern "C" { fn strrchr(s: *mut c_char, c: i32) -> *mut c_char; @@ -977,9 +976,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s as *const c_void, unsafe { strlen(s) }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strrchr".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s as usize, unsafe { strlen(s) }, Backtrace::new(), @@ -989,7 +986,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_strcasecmp(&mut self, s1: *const c_char, s2: *const c_char) -> i32 { extern "C" { fn strcasecmp(s1: *const c_char, s2: *const c_char) -> i32; @@ -998,9 +994,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s1 as *const c_void, unsafe { strlen(s1) }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strcasecmp".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s1 as usize, unsafe { strlen(s1) }, Backtrace::new(), @@ -1009,9 +1003,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s2 as *const c_void, unsafe { strlen(s2) }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strcasecmp".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s2 as usize, unsafe { strlen(s2) }, Backtrace::new(), @@ -1021,7 +1013,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_strncasecmp(&mut self, s1: *const c_char, s2: *const c_char, n: usize) -> i32 { extern "C" { fn strncasecmp(s1: *const c_char, s2: *const c_char, n: usize) -> i32; @@ -1029,9 +1020,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s1 as *const c_void, n) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strncasecmp".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s1 as usize, n, Backtrace::new(), @@ -1040,9 +1029,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s2 as *const c_void, n) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strncasecmp".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s2 as usize, n, Backtrace::new(), @@ -1052,7 +1039,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_strcat(&mut self, s1: *mut c_char, s2: *const c_char) -> *mut c_char { extern "C" { fn strcat(s1: *mut c_char, s2: *const c_char) -> *mut c_char; @@ -1061,9 +1047,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s1 as *const c_void, unsafe { strlen(s1) }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strcat".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s1 as usize, unsafe { strlen(s1) }, Backtrace::new(), @@ -1072,9 +1056,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s2 as *const c_void, unsafe { strlen(s2) }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strcat".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s2 as usize, unsafe { strlen(s2) }, Backtrace::new(), @@ -1084,7 +1066,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_strcmp(&mut self, s1: *const c_char, s2: *const c_char) -> i32 { extern "C" { fn strcmp(s1: *const c_char, s2: *const c_char) -> i32; @@ -1093,9 +1074,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s1 as *const c_void, unsafe { strlen(s1) }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strcmp".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s1 as usize, unsafe { strlen(s1) }, Backtrace::new(), @@ -1104,9 +1083,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s2 as *const c_void, unsafe { strlen(s2) }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strcmp".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s2 as usize, unsafe { strlen(s2) }, Backtrace::new(), @@ -1116,7 +1093,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_strncmp(&mut self, s1: *const c_char, s2: *const c_char, n: usize) -> i32 { extern "C" { fn strncmp(s1: *const c_char, s2: *const c_char, n: usize) -> i32; @@ -1124,9 +1100,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s1 as *const c_void, n) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strncmp".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s1 as usize, n, Backtrace::new(), @@ -1135,9 +1109,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s2 as *const c_void, n) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strncmp".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s2 as usize, n, Backtrace::new(), @@ -1147,7 +1119,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_strcpy(&mut self, dest: *mut c_char, src: *const c_char) -> *mut c_char { extern "C" { fn strcpy(dest: *mut c_char, src: *const c_char) -> *mut c_char; @@ -1156,9 +1127,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(dest as *const c_void, unsafe { strlen(src) }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( "strcpy".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), dest as usize, unsafe { strlen(src) }, Backtrace::new(), @@ -1167,9 +1136,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(src as *const c_void, unsafe { strlen(src) }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strcpy".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), src as usize, unsafe { strlen(src) }, Backtrace::new(), @@ -1179,7 +1146,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_strncpy(&mut self, dest: *mut c_char, src: *const c_char, n: usize) -> *mut c_char { extern "C" { fn strncpy(dest: *mut c_char, src: *const c_char, n: usize) -> *mut c_char; @@ -1187,9 +1153,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(dest as *const c_void, n) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( "strncpy".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), dest as usize, n, Backtrace::new(), @@ -1198,9 +1162,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(src as *const c_void, n) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strncpy".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), src as usize, n, Backtrace::new(), @@ -1210,7 +1172,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_stpcpy(&mut self, dest: *mut c_char, src: *const c_char) -> *mut c_char { extern "C" { fn stpcpy(dest: *mut c_char, src: *const c_char) -> *mut c_char; @@ -1219,9 +1180,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(dest as *const c_void, unsafe { strlen(src) }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( "stpcpy".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), dest as usize, unsafe { strlen(src) }, Backtrace::new(), @@ -1230,9 +1189,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(src as *const c_void, unsafe { strlen(src) }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "stpcpy".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), src as usize, unsafe { strlen(src) }, Backtrace::new(), @@ -1242,7 +1199,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_strdup(&mut self, s: *const c_char) -> *mut c_char { extern "C" { fn strlen(s: *const c_char) -> usize; @@ -1252,9 +1208,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s as *const c_void, size) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strdup".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s as usize, unsafe { strlen(s) }, Backtrace::new(), @@ -1269,7 +1223,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_strlen(&mut self, s: *const c_char) -> usize { extern "C" { fn strlen(s: *const c_char) -> usize; @@ -1278,9 +1231,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s as *const c_void, size) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strlen".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s as usize, size, Backtrace::new(), @@ -1290,7 +1241,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_strnlen(&mut self, s: *const c_char, n: usize) -> usize { extern "C" { fn strnlen(s: *const c_char, n: usize) -> usize; @@ -1299,9 +1249,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s as *const c_void, size) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strnlen".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s as usize, size, Backtrace::new(), @@ -1311,7 +1259,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_strstr(&mut self, haystack: *const c_char, needle: *const c_char) -> *mut c_char { extern "C" { fn strstr(haystack: *const c_char, needle: *const c_char) -> *mut c_char; @@ -1322,9 +1269,7 @@ impl AsanRuntime { }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strstr".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), haystack as usize, unsafe { strlen(haystack) }, Backtrace::new(), @@ -1333,9 +1278,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(needle as *const c_void, unsafe { strlen(needle) }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strstr".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), needle as usize, unsafe { strlen(needle) }, Backtrace::new(), @@ -1345,7 +1288,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_strcasestr(&mut self, haystack: *const c_char, needle: *const c_char) -> *mut c_char { extern "C" { fn strcasestr(haystack: *const c_char, needle: *const c_char) -> *mut c_char; @@ -1356,9 +1298,7 @@ impl AsanRuntime { }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strcasestr".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), haystack as usize, unsafe { strlen(haystack) }, Backtrace::new(), @@ -1367,9 +1307,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(needle as *const c_void, unsafe { strlen(needle) }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strcasestr".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), needle as usize, unsafe { strlen(needle) }, Backtrace::new(), @@ -1379,7 +1317,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_atoi(&mut self, s: *const c_char) -> i32 { extern "C" { fn atoi(s: *const c_char) -> i32; @@ -1388,9 +1325,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s as *const c_void, unsafe { strlen(s) }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "atoi".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s as usize, unsafe { strlen(s) }, Backtrace::new(), @@ -1400,7 +1335,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_atol(&mut self, s: *const c_char) -> i32 { extern "C" { fn atol(s: *const c_char) -> i32; @@ -1409,9 +1343,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s as *const c_void, unsafe { strlen(s) }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "atol".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s as usize, unsafe { strlen(s) }, Backtrace::new(), @@ -1421,7 +1353,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_atoll(&mut self, s: *const c_char) -> i64 { extern "C" { fn atoll(s: *const c_char) -> i64; @@ -1430,9 +1361,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s as *const c_void, unsafe { strlen(s) }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "atoll".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s as usize, unsafe { strlen(s) }, Backtrace::new(), @@ -1442,7 +1371,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_wcslen(&mut self, s: *const wchar_t) -> usize { extern "C" { fn wcslen(s: *const wchar_t) -> usize; @@ -1451,9 +1379,7 @@ impl AsanRuntime { if !(self.shadow_check_func.unwrap())(s as *const c_void, (size + 1) * 2) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "wcslen".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s as usize, (size + 1) * 2, Backtrace::new(), @@ -1463,7 +1389,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_wcscpy(&mut self, dest: *mut wchar_t, src: *const wchar_t) -> *mut wchar_t { extern "C" { fn wcscpy(dest: *mut wchar_t, src: *const wchar_t) -> *mut wchar_t; @@ -1474,9 +1399,7 @@ impl AsanRuntime { }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( "wcscpy".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), dest as usize, (unsafe { wcslen(src) } + 1) * 2, Backtrace::new(), @@ -1487,9 +1410,7 @@ impl AsanRuntime { }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "wcscpy".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), src as usize, (unsafe { wcslen(src) } + 1) * 2, Backtrace::new(), @@ -1499,7 +1420,6 @@ impl AsanRuntime { } #[inline] - #[cfg(target_arch = "aarch64")] fn hook_wcscmp(&mut self, s1: *const wchar_t, s2: *const wchar_t) -> i32 { extern "C" { fn wcscmp(s1: *const wchar_t, s2: *const wchar_t) -> i32; @@ -1509,9 +1429,7 @@ impl AsanRuntime { { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "wcscmp".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s1 as usize, (unsafe { wcslen(s1) } + 1) * 2, Backtrace::new(), @@ -1521,9 +1439,7 @@ impl AsanRuntime { { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "wcscmp".to_string(), - self.real_address_for_stalked( - Interceptor::current_invocation().cpu_context().pc() as usize - ), + self.real_address_for_stalked(AsanRuntime::pc()), s2 as usize, (unsafe { wcslen(s2) } + 1) * 2, Backtrace::new(), @@ -1535,7 +1451,7 @@ impl AsanRuntime { /// Hook all functions required for ASAN to function, replacing them with our own /// implementations. #[allow(clippy::items_after_statements)] - #[cfg(target_arch = "aarch64")] + #[allow(clippy::too_many_lines)] fn hook_functions(&mut self, gum: &Gum) { let mut interceptor = frida_gum::interceptor::Interceptor::obtain(gum); @@ -1863,6 +1779,163 @@ impl AsanRuntime { hook_func!(None, wcscmp, (s1: *const wchar_t, s2: *const wchar_t), i32); } + #[cfg(target_arch = "x86_64")] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::too_many_lines)] + extern "C" fn handle_trap(&mut self) { + self.dump_registers(); + + let fault_address = self.regs[17]; + let actual_pc = self.regs[18]; + + let cs = Capstone::new() + .x86() + .mode(arch::x86::ArchMode::Mode64) + .detail(true) + .build() + .expect("Failed to create Capstone object"); + + let instructions = cs + .disasm_count( + unsafe { std::slice::from_raw_parts(actual_pc as *mut u8, 24) }, + actual_pc as u64, + 3, + ) + .expect("Failed to disassmeble"); + + let insn = instructions.as_ref().first().unwrap(); // This is the very instruction that has triggered fault + println!("{:#?}", insn); + let operands = cs.insn_detail(insn).unwrap().arch_detail().operands(); + + let mut access_type: Option = None; + let mut regs: Option<(RegId, RegId, i64)> = None; + for operand in operands { + if let X86Operand(x86operand) = operand { + if let X86OperandType::Mem(mem) = x86operand.op_type { + access_type = x86operand.access; + regs = Some((mem.base(), mem.index(), mem.disp())); + } + } + } + + let backtrace = Backtrace::new(); + let (stack_start, stack_end) = Self::current_stack(); + + if let Some(r) = regs { + let (base_idx, size) = self.register_idx(r.0); // safe to unwrap + let (index_idx, _) = self.register_idx(r.1); + let disp = r.2; + + // from capstone register id to self.regs's index + let base_value = match base_idx { + Some(base) => match size { + Some(sz) => { + if sz == 64 { + Some(self.regs[base as usize]) + } else { + Some(self.regs[base as usize] & 0xffffffff) + } + } + _ => None, + }, + _ => None, + }; + + // println!("{:x}", base_value); + #[allow(clippy::option_if_let_else)] + let error = if fault_address >= stack_start && fault_address < stack_end { + match access_type { + Some(typ) => match typ { + RegAccessType::ReadOnly => AsanError::StackOobRead(( + self.regs, + actual_pc, + (base_idx, index_idx, disp as usize, fault_address), + backtrace, + )), + _ => AsanError::StackOobWrite(( + self.regs, + actual_pc, + (base_idx, index_idx, disp as usize, fault_address), + backtrace, + )), + }, + None => AsanError::Unknown(( + self.regs, + actual_pc, + (base_idx, index_idx, disp as usize, fault_address), + backtrace, + )), + } + } else if base_value.is_some() { + if let Some(metadata) = self + .allocator + .find_metadata(fault_address, base_value.unwrap()) + { + match access_type { + Some(typ) => { + let asan_readwrite_error = AsanReadWriteError { + registers: self.regs, + pc: actual_pc, + fault: (base_idx, index_idx, disp as usize, fault_address), + metadata: metadata.clone(), + backtrace, + }; + match typ { + RegAccessType::ReadOnly => { + if metadata.freed { + AsanError::ReadAfterFree(asan_readwrite_error) + } else { + AsanError::OobRead(asan_readwrite_error) + } + } + _ => { + if metadata.freed { + AsanError::WriteAfterFree(asan_readwrite_error) + } else { + AsanError::OobWrite(asan_readwrite_error) + } + } + } + } + None => AsanError::Unknown(( + self.regs, + actual_pc, + (base_idx, index_idx, disp as usize, fault_address), + backtrace, + )), + } + } else { + AsanError::Unknown(( + self.regs, + actual_pc, + (base_idx, index_idx, disp as usize, fault_address), + backtrace, + )) + } + } else { + AsanError::Unknown(( + self.regs, + actual_pc, + (base_idx, index_idx, disp as usize, fault_address), + backtrace, + )) + }; + AsanErrors::get_mut().report_error(error); + + // This is not even a mem instruction?? + } else { + AsanErrors::get_mut().report_error(AsanError::Unknown(( + self.regs, + actual_pc, + (None, None, 0, fault_address), + backtrace, + ))); + } + + // self.dump_registers(); + } + + #[cfg(target_arch = "aarch64")] #[allow(clippy::cast_sign_loss)] // for displacement #[allow(clippy::too_many_lines)] extern "C" fn handle_trap(&mut self) { @@ -1972,14 +2045,24 @@ impl AsanRuntime { AsanError::StackOobRead(( self.regs, actual_pc, - (base_reg, index_reg, displacement as usize, fault_address), + ( + Some(base_reg), + Some(index_reg), + displacement as usize, + fault_address, + ), backtrace, )) } else { AsanError::StackOobWrite(( self.regs, actual_pc, - (base_reg, index_reg, displacement as usize, fault_address), + ( + Some(base_reg), + Some(index_reg), + displacement as usize, + fault_address, + ), backtrace, )) } @@ -1990,7 +2073,12 @@ impl AsanRuntime { let asan_readwrite_error = AsanReadWriteError { registers: self.regs, pc: actual_pc, - fault: (base_reg, index_reg, displacement as usize, fault_address), + fault: ( + Some(base_reg), + Some(index_reg), + displacement as usize, + fault_address, + ), metadata: metadata.clone(), backtrace, }; @@ -2009,13 +2097,334 @@ impl AsanRuntime { AsanError::Unknown(( self.regs, actual_pc, - (base_reg, index_reg, displacement as usize, fault_address), + ( + Some(base_reg), + Some(index_reg), + displacement as usize, + fault_address, + ), backtrace, )) }; AsanErrors::get_mut().report_error(error); } + #[cfg(target_arch = "x86_64")] + #[allow(clippy::unused_self)] + fn register_idx(&self, capid: RegId) -> (Option, Option) { + match capid.0 { + 19 => (Some(0), Some(32)), + 22 => (Some(2), Some(32)), + 24 => (Some(3), Some(32)), + 21 => (Some(1), Some(32)), + 30 => (Some(5), Some(32)), + 20 => (Some(4), Some(32)), + 29 => (Some(6), Some(32)), + 23 => (Some(7), Some(32)), + 226 => (Some(8), Some(32)), + 227 => (Some(9), Some(32)), + 228 => (Some(10), Some(32)), + 229 => (Some(11), Some(32)), + 230 => (Some(12), Some(32)), + 231 => (Some(13), Some(32)), + 232 => (Some(14), Some(32)), + 233 => (Some(15), Some(32)), + 26 => (Some(18), Some(32)), + 35 => (Some(0), Some(64)), + 38 => (Some(2), Some(64)), + 40 => (Some(3), Some(64)), + 37 => (Some(1), Some(64)), + 44 => (Some(5), Some(64)), + 36 => (Some(4), Some(64)), + 43 => (Some(6), Some(64)), + 39 => (Some(7), Some(64)), + 106 => (Some(8), Some(64)), + 107 => (Some(9), Some(64)), + 108 => (Some(10), Some(64)), + 109 => (Some(11), Some(64)), + 110 => (Some(12), Some(64)), + 111 => (Some(13), Some(64)), + 112 => (Some(14), Some(64)), + 113 => (Some(15), Some(64)), + 41 => (Some(18), Some(64)), + _ => (None, None), + } + } + + #[cfg(target_arch = "x86_64")] + fn dump_registers(&self) { + println!("rax: {:x}", self.regs[0]); + println!("rbx: {:x}", self.regs[1]); + println!("rcx: {:x}", self.regs[2]); + println!("rdx: {:x}", self.regs[3]); + println!("rbp: {:x}", self.regs[4]); + println!("rsp: {:x}", self.regs[5]); + println!("rsi: {:x}", self.regs[6]); + println!("rdi: {:x}", self.regs[7]); + println!("r8: {:x}", self.regs[8]); + println!("r9: {:x}", self.regs[9]); + println!("r10: {:x}", self.regs[10]); + println!("r11: {:x}", self.regs[11]); + println!("r12: {:x}", self.regs[12]); + println!("r13: {:x}", self.regs[13]); + println!("r14: {:x}", self.regs[14]); + println!("r15: {:x}", self.regs[15]); + println!("instrumented rip: {:x}", self.regs[16]); + println!("fault address: {:x}", self.regs[17]); + println!("actual rip: {:x}", self.regs[18]); + } + + // https://godbolt.org/z/Y87PYGd69 + /* + #include + #include + uint8_t shadow_bit = 44; + + uint64_t generate_shadow_check_function(uint64_t start, uint64_t size){ + // calculate the shadow address + uint64_t addr = 1; + addr = addr << shadow_bit; + addr = addr + (start >> 3); + uint64_t mask = (1ULL << (shadow_bit + 1)) - 1; + addr = addr & mask; + + if(size == 0){ + // goto return_success + return 1; + } + else{ + // check if the ptr is not aligned to 8 bytes + uint8_t remainder = start & 0b111; + if(remainder != 0){ + // we need to test the high bits from the first shadow byte + uint8_t shift; + if(size < 8){ + shift = size; + } + else{ + shift = 8 - remainder; + } + // goto check_bits + uint8_t mask = (1 << shift) - 1; + + // bitwise reverse for amd64 :< + // https://gist.github.com/yantonov/4359090 + // we need 16bit number here, (not 8bit) + uint16_t val = *(uint16_t *)addr; + val = (val & 0xff00) >> 8 | (val & 0x00ff) << 8; + val = (val & 0xf0f0) >> 4 | (val & 0x0f0f) << 4; + val = (val & 0xcccc) >> 2 | (val & 0x3333) << 2; + val = (val & 0xaaaa) >> 1 | (val & 0x5555) << 1; + val = (val >> 8) | (val << 8); // swap the byte + val = (val >> remainder); + if((val & mask) != mask){ + // goto return failure + return 0; + } + + size = size - shift; + addr += 1; + } + + // no_start_offset + uint64_t num_shadow_bytes = size >> 3; + uint64_t mask = -1; + + while(true){ + if(num_shadow_bytes < 8){ + // goto less_than_8_shadow_bytes_remaining + break; + } + else{ + uint64_t val = *(uint64_t *)addr; + addr += 8; + if(val != mask){ + // goto return failure + return 0; + } + num_shadow_bytes -= 8; + size -= 64; + } + } + + while(true){ + if(num_shadow_bytes < 1){ + // goto check_trailing_bits + break; + } + else{ + uint8_t val = *(uint8_t *)addr; + addr += 1; + if(val != 0xff){ + // goto return failure + return 0; + } + num_shadow_bytes -= 1; + size -= 8; + } + } + + if(size == 0){ + // goto return success + return 1; + } + + uint8_t mask2 = ((1 << (size & 0b111)) - 1); + uint8_t val = *(uint8_t *)addr; + val = (val & 0xf0) >> 4 | (val & 0x0f) << 4; + val = (val & 0xff) >> 2 | (val & 0x33) << 2; + val = (val & 0xaa) >> 1 | (val & 0x55) << 1; + + if((val & mask2) != mask2){ + // goto return failure + return 0; + } + return 1; + } + } + */ + #[cfg(target_arch = "x86_64")] + #[allow(clippy::unused_self, clippy::identity_op)] + #[allow(clippy::too_many_lines)] + fn generate_shadow_check_function(&mut self) { + let shadow_bit = self.allocator.shadow_bit(); + let mut ops = dynasmrt::VecAssembler::::new(0); + + // Rdi start, Rsi size + dynasm!(ops + ; .arch x64 + ; mov cl, shadow_bit as i8 + ; mov eax, 1 + ; mov edx, 1 + ; shl rdx, cl + ; mov r10d, 2 + ; shl r10, cl + ; test rsi, rsi + ; je >LBB0_15 + ; mov rcx, rdi + ; shr rcx, 3 + ; add rdx, rcx + ; add r10, -1 + ; and r10, rdx + ; and edi, 7 + ; je >LBB0_4 + ; mov cl, 8 + ; sub cl, dil + ; cmp rsi, 8 + ; movzx ecx, cl + ; mov r8d, esi + ; cmovae r8d, ecx + ; mov r9d, -1 + ; mov ecx, r8d + ; shl r9d, cl + ; movzx ecx, WORD [r10] + ; rol cx, 8 + ; mov edx, ecx + ; shr edx, 4 + ; and edx, 3855 + ; shl ecx, 4 + ; and ecx, -3856 + ; or ecx, edx + ; mov edx, ecx + ; shr edx, 2 + ; and edx, 13107 + ; and ecx, -3277 + ; lea ecx, [rdx + 4*rcx] + ; mov edx, ecx + ; shr edx, 1 + ; and edx, 21845 + ; and ecx, -10923 + ; lea ecx, [rdx + 2*rcx] + ; rol cx, 8 + ; movzx edx, cx + ; mov ecx, edi + ; shr edx, cl + ; not r9d + ; movzx ecx, r9b + ; and edx, ecx + ; cmp edx, ecx + ; jne >LBB0_11 + ; movzx ecx, r8b + ; sub rsi, rcx + ; add r10, 1 + ;LBB0_4: + ; mov r8, rsi + ; shr r8, 3 + ; mov r9, r8 + ; and r9, -8 + ; mov edi, r8d + ; and edi, 7 + ; add r9, r10 + ; and esi, 63 + ; mov rdx, r8 + ; mov rcx, r10 + ;LBB0_5: + ; cmp rdx, 7 + ; jbe >LBB0_8 + ; add rdx, -8 + ; cmp QWORD [rcx], -1 + ; lea rcx, [rcx + 8] + ; je LBB0_11 + ;LBB0_8: + ; lea rcx, [8*rdi] + ; sub rsi, rcx + ;LBB0_9: + ; test rdi, rdi + ; je >LBB0_13 + ; add rdi, -1 + ; cmp BYTE [r9], -1 + ; lea r9, [r9 + 1] + ; je LBB0_15 + ; and sil, 7 + ; mov dl, -1 + ; mov ecx, esi + ; shl dl, cl + ; not dl + ; mov cl, BYTE [r8 + r10] + ; rol cl, 4 + ; mov eax, ecx + ; shr al, 2 + ; shl cl, 2 + ; and cl, -52 + ; or cl, al + ; mov eax, ecx + ; shr al, 1 + ; and al, 85 + ; add cl, cl + ; and cl, -86 + ; or cl, al + ; and cl, dl + ; xor eax, eax + ; cmp cl, dl + ; sete al + ;LBB0_15: + ; ret + ); + let blob = ops.finalize().unwrap(); + unsafe { + let mapping = mmap( + std::ptr::null_mut(), + 0x1000, + ProtFlags::all(), + MapFlags::MAP_ANON | MapFlags::MAP_PRIVATE, + -1, + 0, + ) + .unwrap(); + blob.as_ptr() + .copy_to_nonoverlapping(mapping as *mut u8, blob.len()); + self.shadow_check_func = Some(std::mem::transmute(mapping as *mut u8)); + } + } + + #[cfg(target_arch = "aarch64")] #[allow(clippy::unused_self, clippy::identity_op)] // identity_op appears to be a false positive in ubfx fn generate_shadow_check_function(&mut self) { let shadow_bit = self.allocator.shadow_bit(); @@ -2137,6 +2546,110 @@ impl AsanRuntime { } } + // https://godbolt.org/z/cqEKf63e1 + /* + #include + #include + uint8_t shadow_bit = 8; + uint8_t bit = 3; + uint64_t generate_shadow_check_blob(uint64_t start){ + uint64_t addr = 1; + addr = addr << shadow_bit; + addr = addr + (start >> 3); + uint64_t mask = (1ULL << (shadow_bit + 1)) - 1; + addr = addr & mask; + + uint8_t remainder = start & 0b111; + uint16_t val = *(uint16_t *)addr; + val = (val & 0xff00) >> 8 | (val & 0x00ff) << 8; + val = (val & 0xf0f0) >> 4 | (val & 0x0f0f) << 4; + val = (val & 0xcccc) >> 2 | (val & 0x3333) << 2; + val = (val & 0xaaaa) >> 1 | (val & 0x5555) << 1; + val = (val >> 8) | (val << 8); // swap the byte + val = (val >> remainder); + + uint8_t mask2 = (1 << bit) - 1; + if((val & mask2) == mask2){ + // success + return 0; + } + else{ + // failure + return 1; + } + } + */ + #[cfg(target_arch = "x86_64")] + #[allow(clippy::unused_self)] + fn generate_shadow_check_blob(&mut self, bit: u32) -> Box<[u8]> { + let shadow_bit = self.allocator.shadow_bit(); + // Rcx, Rax, Rdi, Rdx, Rsi are used, so we save them in emit_shadow_check + macro_rules! shadow_check{ + ($ops:ident, $bit:expr) => {dynasm!($ops + ; .arch x64 + ; mov cl, shadow_bit as i8 + ; mov eax, 1 + ; shl rax, cl + ; mov rdx, rdi + ; mov esi, 2 + ; shl rsi, cl + ; shr rdx, 3 + ; add rdx, rax + ; add rsi, -1 + ; and rsi, rdx + ; movzx eax, WORD [rsi] + ; rol ax, 8 + ; mov ecx, eax + ; shr ecx, 4 + ; and ecx, 3855 + ; shl eax, 4 + ; and eax, -3856 + ; or eax, ecx + ; mov ecx, eax + ; shr ecx, 2 + ; and ecx, 13107 + ; and eax, -3277 + ; lea eax, [rcx + 4*rax] + ; mov ecx, eax + ; shr ecx, 1 + ; and ecx, 21845 + ; and eax, -10923 + ; lea eax, [rcx + 2*rax] + ; rol ax, 8 + ; movzx edx, ax + ; and dil, 7 + ; mov ecx, edi + ; shr edx, cl + ; mov cl, BYTE bit as i8 + ; mov eax, -1 + ; shl eax, cl + ; not eax + ; movzx ecx, al + ; and edx, ecx + ; xor eax, eax + ; cmp edx, ecx + ; je >done + ; lea rsi, [>done] // leap 10 bytes forward + ; nop // jmp takes 10 bytes at most so we want to allocate 10 bytes buffer (?) + ; nop + ; nop + ; nop + ; nop + ; nop + ; nop + ; nop + ; nop + ; nop + ;done: + );}; + } + let mut ops = dynasmrt::VecAssembler::::new(0); + shadow_check!(ops, bit); + let ops_vec = ops.finalize().unwrap(); + ops_vec[..ops_vec.len() - 10].to_vec().into_boxed_slice() //???? + } + + #[cfg(target_arch = "aarch64")] #[allow(clippy::unused_self)] fn generate_shadow_check_blob(&mut self, bit: u32) -> Box<[u8]> { let shadow_bit = self.allocator.shadow_bit(); @@ -2168,6 +2681,7 @@ impl AsanRuntime { ops_vec[..ops_vec.len() - 4].to_vec().into_boxed_slice() } + #[cfg(target_arch = "aarch64")] #[allow(clippy::unused_self)] fn generate_shadow_check_exact_blob(&mut self, val: u64) -> Box<[u8]> { let shadow_bit = self.allocator.shadow_bit(); @@ -2204,8 +2718,99 @@ impl AsanRuntime { ops_vec[..ops_vec.len() - 4].to_vec().into_boxed_slice() } + // Save registers into self_regs_addr + // Five registers, Rdi, Rsi, Rdx, Rcx, Rax are saved in emit_shadow_check before entering this function + // So we retrieve them after saving other registers + #[cfg(target_arch = "x86_64")] + #[allow(clippy::similar_names)] + #[allow(clippy::cast_possible_wrap)] + #[allow(clippy::too_many_lines)] + fn generate_instrumentation_blobs(&mut self) { + let mut ops_report = dynasmrt::VecAssembler::::new(0); + dynasm!(ops_report + ; .arch x64 + ; report: + ; mov rdi, [>self_regs_addr] // load self.regs into rdi + ; mov [rdi + 0x80], rsi // return address is loaded into rsi in generate_shadow_check_blob + ; mov [rdi + 0x8], rbx + ; mov [rdi + 0x20], rbp + ; mov [rdi + 0x28], rsp + ; mov [rdi + 0x40], r8 + ; mov [rdi + 0x48], r9 + ; mov [rdi + 0x50], r10 + ; mov [rdi + 0x58], r11 + ; mov [rdi + 0x60], r12 + ; mov [rdi + 0x68], r13 + ; mov [rdi + 0x70], r14 + ; mov [rdi + 0x78], r15 + ; mov rax, [rsp + 0x10] + ; mov [rdi + 0x0], rax + ; mov rcx, [rsp + 0x18] + ; mov [rdi + 0x10], rcx + ; mov rdx, [rsp + 0x20] + ; mov [rdi + 0x18], rdx + ; mov rsi, [rsp + 0x28] + ; mov [rdi + 0x30], rsi + + ; mov rsi, [rsp + 0x0] // access_addr + ; mov [rdi + 0x88], rsi + ; mov rsi, [rsp + 0x8] // true_rip + ; mov [rdi + 0x90], rsi + + ; mov rsi, rdi // we want to save rdi, but we have to copy the address of self.regs into another register + ; mov rdi, [rsp + 0x30] + ; mov [rsi + 0x38], rdi + + ; mov rdi, [>self_addr] + ; mov rsi, [>trap_func] + + // Align the rsp to 16bytes boundary + // This adds either -8 or -16 to the currrent rsp. + // rsp is restored later from self.regs + ; add rsp, -8 + ; and rsp, -16 + + ; call rsi + + ; mov rdi, [>self_regs_addr] + // restore rbx to r15 + ; mov rbx, [rdi + 0x8] + ; mov rbp, [rdi + 0x20] + ; mov rsp, [rdi + 0x28] + ; mov r8, [rdi + 0x40] + ; mov r9, [rdi + 0x48] + ; mov r10, [rdi + 0x50] + ; mov r11, [rdi + 0x58] + ; mov r12, [rdi + 0x60] + ; mov r13, [rdi + 0x68] + ; mov r14, [rdi + 0x70] + ; mov r15, [rdi + 0x78] + ; mov rsi, [rdi + 0x80] // load back >done into rsi + ; jmp rsi + + // Ignore eh_frame_cie for amd64 + // See discussions https://github.com/AFLplusplus/LibAFL/pull/331 + ;->accessed_address: + ; .dword 0x0 + ; self_addr: + ; .qword self as *mut _ as *mut c_void as i64 + ; self_regs_addr: + ; .qword &mut self.regs as *mut _ as *mut c_void as i64 + ; trap_func: + ; .qword AsanRuntime::handle_trap as *mut c_void as i64 + ); + self.blob_report = Some(ops_report.finalize().unwrap().into_boxed_slice()); + + self.blob_check_mem_byte = Some(self.generate_shadow_check_blob(0)); + self.blob_check_mem_halfword = Some(self.generate_shadow_check_blob(1)); + self.blob_check_mem_dword = Some(self.generate_shadow_check_blob(2)); + self.blob_check_mem_qword = Some(self.generate_shadow_check_blob(3)); + self.blob_check_mem_16bytes = Some(self.generate_shadow_check_blob(4)); + } + /// /// Generate the instrumentation blobs for the current arch. + #[cfg(target_arch = "aarch64")] #[allow(clippy::similar_names)] // We allow things like dword and qword #[allow(clippy::cast_possible_wrap)] #[allow(clippy::too_many_lines)] diff --git a/libafl_frida/src/helper.rs b/libafl_frida/src/helper.rs index eadd62dc06..d5c80c2b46 100644 --- a/libafl_frida/src/helper.rs +++ b/libafl_frida/src/helper.rs @@ -16,6 +16,15 @@ use capstone::{ Capstone, Insn, }; +#[cfg(target_arch = "x86_64")] +use capstone::{ + arch::{self, x86::X86OperandType, ArchOperand::X86Operand, BuildsCapstone}, + Capstone, Insn, RegId, +}; + +#[cfg(target_arch = "aarch64")] +use num_traits::cast::FromPrimitive; + #[cfg(target_arch = "x86_64")] use frida_gum::instruction_writer::X86Register; #[cfg(target_arch = "aarch64")] @@ -30,8 +39,6 @@ use frida_gum::{ use frida_gum::CpuContext; use frida_gum::{Gum, Module, PageProtection}; -#[cfg(target_arch = "aarch64")] -use num_traits::cast::FromPrimitive; use rangemap::RangeMap; @@ -98,11 +105,9 @@ pub struct FridaInstrumentationHelper<'a> { map: [u8; MAP_SIZE], previous_pc: [u64; 1], current_log_impl: u64, - #[cfg(target_arch = "aarch64")] current_report_impl: u64, /// Transformer that has to be passed to FridaInProcessExecutor transformer: Option>, - #[cfg(target_arch = "aarch64")] capstone: Capstone, #[cfg(unix)] asan_runtime: AsanRuntime, @@ -125,16 +130,10 @@ impl<'a> FridaHelper<'a> for FridaInstrumentationHelper<'a> { self.asan_runtime.register_thread(); } - #[cfg(not(target_arch = "aarch64"))] - fn pre_exec(&mut self, _input: &I) {} - - #[cfg(target_arch = "aarch64")] fn pre_exec(&mut self, input: &I) { - #[cfg(target_arch = "aarch64")] let target_bytes = input.target_bytes(); let slice = target_bytes.as_slice(); //println!("target_bytes: {:#x}: {:02x?}", slice.as_ptr() as usize, slice); - #[cfg(target_arch = "aarch64")] if self.options.asan_enabled() { self.asan_runtime .unpoison(slice.as_ptr() as usize, slice.len()); @@ -150,7 +149,6 @@ impl<'a> FridaHelper<'a> for FridaInstrumentationHelper<'a> { DrCovWriter::new(&filename, &self.ranges, &mut self.drcov_basic_blocks).write(); } - #[cfg(target_arch = "aarch64")] if self.options.asan_enabled() { if self.options.asan_detect_leaks() { self.asan_runtime.check_for_leaks(); @@ -298,7 +296,6 @@ impl<'a> FridaInstrumentationHelper<'a> { map: [0u8; MAP_SIZE], previous_pc: [0u64; 1], current_log_impl: 0, - #[cfg(target_arch = "aarch64")] current_report_impl: 0, transformer: None, #[cfg(target_arch = "aarch64")] @@ -308,6 +305,13 @@ impl<'a> FridaInstrumentationHelper<'a> { .detail(true) .build() .expect("Failed to create Capstone object"), + #[cfg(target_arch = "x86_64")] + capstone: Capstone::new() + .x86() + .mode(arch::x86::ArchMode::Mode64) + .detail(true) + .build() + .expect("Failed to create Capstone object"), #[cfg(not(windows))] asan_runtime: AsanRuntime::new(options.clone()), #[cfg(feature = "cmplog")] @@ -322,6 +326,7 @@ impl<'a> FridaInstrumentationHelper<'a> { for (i, module) in helper.module_map.values().iter().enumerate() { let range = module.range(); let start = range.base_address().0 as usize; + // println!("start: {:x}", start); helper .ranges .insert(start..(start + range.size()), (i as u16, module.path())); @@ -330,7 +335,7 @@ impl<'a> FridaInstrumentationHelper<'a> { for (module_name, offset) in suppressed_specifiers { let module_details = ModuleDetails::with_name(module_name).unwrap(); let lib_start = module_details.range().base_address().0 as usize; - println!("removing address: {:#x}", lib_start + offset); + // println!("removing address: {:#x}", lib_start + offset); helper .ranges .remove((lib_start + offset)..(lib_start + offset + 4)); @@ -347,12 +352,19 @@ impl<'a> FridaInstrumentationHelper<'a> { for instruction in basic_block { let instr = instruction.instr(); let address = instr.address(); - //println!("block @ {:x} transformed to {:x}", address, output.writer().pc()); - //println!("address: {:x} contains: {:?}", address, helper.ranges.contains_key(&(address as usize))); + // println!("block @ {:x} transformed to {:x}", address, output.writer().pc()); + /* + println!( + "address: {:x} contains: {:?}", + address, + helper.ranges.contains_key(&(address as usize)) + ); + */ + // println!("Ranges: {:#?}", helper.ranges); if helper.ranges.contains_key(&(address as usize)) { if first { first = false; - //println!("block @ {:x} transformed to {:x}", address, output.writer().pc()); + // println!("block @ {:x} transformed to {:x}", address, output.writer().pc()); if helper.options().coverage_enabled() { helper.emit_coverage_mapping(address, &output); } @@ -371,8 +383,15 @@ impl<'a> FridaInstrumentationHelper<'a> { } if helper.options().asan_enabled() { - #[cfg(not(target_arch = "aarch64"))] - todo!("Implement ASAN for non-aarch64 targets"); + #[cfg(target_arch = "x86_64")] + if let Ok((segment, width, basereg, indexreg, scale, disp)) = + helper.asan_is_interesting_instruction(address, instr) + { + helper.emit_shadow_check( + address, &output, segment, width, basereg, indexreg, scale, + disp, + ); + } #[cfg(target_arch = "aarch64")] if let Ok((basereg, indexreg, displacement, width, shift, extender)) = helper.asan_is_interesting_instruction(address, instr) @@ -444,6 +463,53 @@ impl<'a> FridaInstrumentationHelper<'a> { Aarch64Register::from_u32(regint as u32).unwrap() } + // frida registers: https://docs.rs/frida-gum/0.4.0/frida_gum/instruction_writer/enum.X86Register.html + // capstone registers: https://docs.rs/capstone-sys/0.14.0/capstone_sys/x86_reg/index.html + #[cfg(target_arch = "x86_64")] + #[must_use] + #[inline] + #[allow(clippy::unused_self)] + pub fn writer_register(&self, reg: RegId) -> X86Register { + let regint: u16 = reg.0; + match regint { + 19 => X86Register::Eax, + 22 => X86Register::Ecx, + 24 => X86Register::Edx, + 21 => X86Register::Ebx, + 30 => X86Register::Esp, + 20 => X86Register::Ebp, + 29 => X86Register::Esi, + 23 => X86Register::Edi, + 226 => X86Register::R8d, + 227 => X86Register::R9d, + 228 => X86Register::R10d, + 229 => X86Register::R11d, + 230 => X86Register::R12d, + 231 => X86Register::R13d, + 232 => X86Register::R14d, + 233 => X86Register::R15d, + 26 => X86Register::Eip, + 35 => X86Register::Rax, + 38 => X86Register::Rcx, + 40 => X86Register::Rdx, + 37 => X86Register::Rbx, + 44 => X86Register::Rsp, + 36 => X86Register::Rbp, + 43 => X86Register::Rsi, + 39 => X86Register::Rdi, + 106 => X86Register::R8, + 107 => X86Register::R9, + 108 => X86Register::R10, + 109 => X86Register::R11, + 110 => X86Register::R12, + 111 => X86Register::R13, + 112 => X86Register::R14, + 113 => X86Register::R15, + 41 => X86Register::Rip, + _ => X86Register::None, // Ignore Xax..Xip + } + } + #[cfg(all(feature = "cmplog", target_arch = "aarch64"))] #[inline] /// Emit the instrumentation code which is responsible for opernads value extraction and cmplog map population @@ -762,6 +828,160 @@ impl<'a> FridaInstrumentationHelper<'a> { )); } + #[inline] + #[allow(clippy::too_many_lines)] + #[allow(clippy::too_many_arguments)] + pub fn emit_shadow_check( + &mut self, + address: u64, + output: &StalkerOutput, + _segment: RegId, + width: u8, + basereg: RegId, + indexreg: RegId, + scale: i32, + disp: i64, + ) { + let redzone_size = i64::from(frida_gum_sys::GUM_RED_ZONE_SIZE); + let writer = output.writer(); + let true_rip = address; + + let basereg = if basereg.0 == 0 { + None + } else { + let reg = self.writer_register(basereg); + Some(reg) + }; + + let indexreg = if indexreg.0 == 0 { + None + } else { + let reg = self.writer_register(indexreg); + Some(reg) + }; + + let scale = match scale { + 2 => 1, + 4 => 2, + 8 => 3, + _ => 0, + }; + if self.current_report_impl == 0 + || !writer.can_branch_directly_to(self.current_report_impl) + || !writer.can_branch_directly_between(writer.pc() + 128, self.current_report_impl) + { + let after_report_impl = writer.code_offset() + 2; + + #[cfg(target_arch = "x86_64")] + writer.put_jmp_near_label(after_report_impl); + #[cfg(target_arch = "aarch64")] + writer.put_b_label(after_report_impl); + + self.current_report_impl = writer.pc(); + #[cfg(unix)] + writer.put_bytes(self.asan_runtime.blob_report()); + + writer.put_label(after_report_impl); + } + + /* Save registers that we'll use later in shadow_check_blob + | addr | rip | + | Rcx | Rax | + | Rsi | Rdx | + Old Rsp - (redsone_size) -> | flags | Rdi | + | | | + Old Rsp -> | | | + */ + writer.put_lea_reg_reg_offset(X86Register::Rsp, X86Register::Rsp, -(redzone_size)); + writer.put_pushfx(); + writer.put_push_reg(X86Register::Rdi); + writer.put_push_reg(X86Register::Rsi); + writer.put_push_reg(X86Register::Rdx); + writer.put_push_reg(X86Register::Rcx); + writer.put_push_reg(X86Register::Rax); + + /* + Things are a bit different when Rip is either base register or index register. + Suppose we have an instruction like + `bnd jmp qword ptr [rip + 0x2e4b5]` + We can't just emit code like + `mov rdi, rip` to get RIP loaded into RDI, + because this RIP is NOT the orginal RIP (, which is usually within .text) anymore, rather it is pointing to the memory allocated by the frida stalker. + Please confer https://frida.re/docs/stalker/ for details. + */ + // Init Rdi + match basereg { + Some(reg) => match reg { + X86Register::Rip => { + writer.put_mov_reg_address(X86Register::Rdi, true_rip); + } + _ => { + writer.put_mov_reg_reg(X86Register::Rdi, basereg.unwrap()); + } + }, + None => { + writer.put_xor_reg_reg(X86Register::Rdi, X86Register::Rdi); + } + } + + match indexreg { + Some(reg) => match reg { + X86Register::Rip => { + writer.put_mov_reg_address(X86Register::Rsi, true_rip); + } + _ => { + writer.put_mov_reg_reg(X86Register::Rsi, indexreg.unwrap()); + } + }, + None => { + writer.put_xor_reg_reg(X86Register::Rsi, X86Register::Rsi); + } + } + + // Scale + if scale > 0 { + writer.put_shl_reg_u8(X86Register::Rsi, scale); + } + + // Finally set Rdi to base + index * scale + disp + writer.put_add_reg_reg(X86Register::Rdi, X86Register::Rsi); + writer.put_lea_reg_reg_offset(X86Register::Rdi, X86Register::Rdi, disp); + + writer.put_mov_reg_address(X86Register::Rsi, true_rip); // load true_rip into rsi in case we need them in handle_trap + writer.put_push_reg(X86Register::Rsi); // save true_rip + writer.put_push_reg(X86Register::Rdi); // save accessed_address + + #[cfg(unix)] + let checked: bool = match width { + 1 => writer.put_bytes(self.asan_runtime.blob_check_mem_byte()), + 2 => writer.put_bytes(self.asan_runtime.blob_check_mem_halfword()), + 4 => writer.put_bytes(self.asan_runtime.blob_check_mem_dword()), + 8 => writer.put_bytes(self.asan_runtime.blob_check_mem_qword()), + 16 => writer.put_bytes(self.asan_runtime.blob_check_mem_16bytes()), + _ => false, + }; + + if checked { + writer.put_jmp_address(self.current_report_impl); + for _ in 0..10 { + // shadow_check_blob's done will land somewhere in these nops + // on amd64 jump can takes 10 bytes at most, so that's why I put 10 bytes. + writer.put_nop(); + } + } + + writer.put_pop_reg(X86Register::Rdi); + writer.put_pop_reg(X86Register::Rsi); + + writer.put_pop_reg(X86Register::Rax); + writer.put_pop_reg(X86Register::Rcx); + writer.put_pop_reg(X86Register::Rdx); + writer.put_pop_reg(X86Register::Rsi); + writer.put_pop_reg(X86Register::Rdi); + writer.put_popfx(); + writer.put_lea_reg_reg_offset(X86Register::Rsp, X86Register::Rsp, redzone_size); + } + #[cfg(target_arch = "aarch64")] #[inline] fn emit_shadow_check( @@ -1092,6 +1312,68 @@ impl<'a> FridaInstrumentationHelper<'a> { Err(()) } + #[cfg(target_arch = "x86_64")] + #[inline] + fn asan_is_interesting_instruction( + &self, + _address: u64, + instr: &Insn, + ) -> Result<(RegId, u8, RegId, RegId, i32, i64), ()> { + let operands = self + .capstone + .insn_detail(instr) + .unwrap() + .arch_detail() + .operands(); + + // Ignore lea instruction + // put nop into the white-list so that instructions like + // like `nop dword [rax + rax]` does not get caught. + match instr.mnemonic().unwrap() { + "lea" | "nop" => return Err(()), + + _ => (), + } + + // This is a TODO! In this case, both the src and the dst are mem operand + // so we would need to return two operadns? + if instr.mnemonic().unwrap().starts_with("rep") { + return Err(()); + } + + for operand in operands { + if let X86Operand(x86operand) = operand { + if let X86OperandType::Mem(opmem) = x86operand.op_type { + /* + println!( + "insn: {:#?} {:#?} width: {}, segment: {:#?}, base: {:#?}, index: {:#?}, scale: {}, disp: {}", + insn_id, + instr, + x86operand.size, + opmem.segment(), + opmem.base(), + opmem.index(), + opmem.scale(), + opmem.disp(), + ); + */ + if opmem.segment() == RegId(0) { + return Ok(( + opmem.segment(), + x86operand.size, + opmem.base(), + opmem.index(), + opmem.scale(), + opmem.disp(), + )); + } + } + } + } + + Err(()) + } + #[cfg(all(feature = "cmplog", target_arch = "aarch64"))] #[inline] /// Check if the current instruction is cmplog relevant one(any opcode which sets the flags) @@ -1185,7 +1467,7 @@ impl<'a> FridaInstrumentationHelper<'a> { fn emit_coverage_mapping(&mut self, address: u64, output: &StalkerOutput) { let writer = output.writer(); #[allow(clippy::cast_possible_wrap)] // gum redzone size is u32, we need an offset as i32. - let redzone_size = frida_gum_sys::GUM_RED_ZONE_SIZE as i32; + let redzone_size = i64::from(frida_gum_sys::GUM_RED_ZONE_SIZE); if self.current_log_impl == 0 || !writer.can_branch_directly_to(self.current_log_impl) || !writer.can_branch_directly_between(writer.pc() + 128, self.current_log_impl) diff --git a/libafl_frida/src/lib.rs b/libafl_frida/src/lib.rs index 57699ec684..4a6f341531 100644 --- a/libafl_frida/src/lib.rs +++ b/libafl_frida/src/lib.rs @@ -63,10 +63,6 @@ impl FridaOptions { match name { "asan" => { options.enable_asan = value.parse().unwrap(); - #[cfg(not(target_arch = "aarch64"))] - if options.enable_asan { - panic!("ASAN is not currently supported on targets other than aarch64"); - } } "asan-detect-leaks" => { options.enable_asan_leak_detection = value.parse().unwrap();