diff --git a/fuzzers/frida_libpng/src/fuzzer.rs b/fuzzers/frida_libpng/src/fuzzer.rs index a1c8c2e69b..376a35cae4 100644 --- a/fuzzers/frida_libpng/src/fuzzer.rs +++ b/fuzzers/frida_libpng/src/fuzzer.rs @@ -41,8 +41,6 @@ use libafl::{ Error, }; -#[cfg(unix)] -use libafl_frida::asan_errors::{AsanErrorsFeedback, AsanErrorsObserver, ASAN_ERRORS}; use libafl_frida::{ coverage_rt::MAP_SIZE, executor::FridaInProcessExecutor, @@ -51,6 +49,8 @@ use libafl_frida::{ }; use libafl_targets::cmplog::{CmpLogObserver, CMPLOG_MAP}; +#[cfg(unix)] +use libafl_frida::asan::errors::{AsanErrorsFeedback, AsanErrorsObserver, ASAN_ERRORS}; fn timeout_from_millis_str(time: &str) -> Result { Ok(Duration::from_millis(time.parse()?)) } diff --git a/libafl_frida/src/alloc.rs b/libafl_frida/src/alloc.rs index 87b259d875..adefb2276b 100644 --- a/libafl_frida/src/alloc.rs +++ b/libafl_frida/src/alloc.rs @@ -13,11 +13,11 @@ use serde::{Deserialize, Serialize}; use std::{ffi::c_void, io}; use crate::{ - asan_errors::{AsanError, AsanErrors}, + asan::errors::{AsanError, AsanErrors}, FridaOptions, }; -pub(crate) struct Allocator { +pub struct Allocator { #[allow(dead_code)] options: FridaOptions, page_size: usize, @@ -44,7 +44,7 @@ macro_rules! map_to_shadow { } #[derive(Clone, Debug, Default, Serialize, Deserialize)] -pub(crate) struct AllocationMetadata { +pub struct AllocationMetadata { pub address: usize, pub size: usize, pub actual_size: usize, @@ -55,6 +55,7 @@ pub(crate) struct AllocationMetadata { } impl Allocator { + #[must_use] pub fn new(options: FridaOptions) -> Self { let ret = unsafe { sysconf(_SC_PAGESIZE) }; assert!( @@ -193,6 +194,7 @@ impl Allocator { } #[must_use] + #[allow(clippy::missing_safety_doc)] pub unsafe fn alloc(&mut self, size: usize, _alignment: usize) -> *mut c_void { let mut is_malloc_zero = false; let size = if size == 0 { @@ -270,6 +272,7 @@ impl Allocator { address } + #[allow(clippy::missing_safety_doc)] pub unsafe fn release(&mut self, ptr: *mut c_void) { let mut metadata = if let Some(metadata) = self.allocations.get_mut(&(ptr as usize)) { metadata @@ -443,6 +446,7 @@ impl Allocator { (shadow_mapping_start, (end - start) / 8) } + #[must_use] pub fn map_to_shadow(&self, start: usize) -> usize { map_to_shadow!(self, start) } diff --git a/libafl_frida/src/asan_rt.rs b/libafl_frida/src/asan/asan_rt.rs similarity index 65% rename from libafl_frida/src/asan_rt.rs rename to libafl_frida/src/asan/asan_rt.rs index fdbc275baa..ea3b49e45c 100644 --- a/libafl_frida/src/asan_rt.rs +++ b/libafl_frida/src/asan/asan_rt.rs @@ -12,25 +12,37 @@ use hashbrown::HashMap; use nix::sys::mman::{mmap, MapFlags, ProtFlags}; -use nix::libc::memset; - use backtrace::Backtrace; +use crate::helper::FridaInstrumentationHelper; + #[cfg(target_arch = "aarch64")] use capstone::{ - arch::{arm64::Arm64OperandType, ArchOperand::Arm64Operand, BuildsCapstone}, + arch::{ + arm64::{Arm64Extender, Arm64OperandType, Arm64Shift}, + ArchOperand::Arm64Operand, + BuildsCapstone, + }, Capstone, Insn, }; +#[cfg(target_arch = "aarch64")] +use frida_gum::instruction_writer::{Aarch64Register, IndexMode}; + #[cfg(target_arch = "x86_64")] use capstone::{ arch::{self, x86::X86OperandType, ArchOperand::X86Operand, BuildsCapstone}, - Capstone, RegAccessType, RegId, + Capstone, Insn, RegAccessType, RegId, }; +#[cfg(target_arch = "x86_64")] +use frida_gum::instruction_writer::X86Register; + use dynasmrt::{dynasm, DynasmApi, DynasmLabelApi}; use frida_gum::interceptor::Interceptor; -use frida_gum::{Gum, Module, ModuleMap}; +use frida_gum::{ + instruction_writer::InstructionWriter, stalker::StalkerOutput, Gum, Module, ModuleMap, +}; #[cfg(unix)] use libc::RLIMIT_STACK; use libc::{c_char, wchar_t}; @@ -42,7 +54,7 @@ use std::{ffi::c_void, ptr::write_volatile}; use crate::{ alloc::Allocator, - asan_errors::{AsanError, AsanErrors, AsanReadWriteError, ASAN_ERRORS}, + asan::errors::{AsanError, AsanErrors, AsanReadWriteError, ASAN_ERRORS}, FridaOptions, }; @@ -96,6 +108,7 @@ pub const ASAN_SAVE_REGISTER_COUNT: usize = 32; /// even if the target would not have crashed under normal conditions. /// this helps finding mem errors early. pub struct AsanRuntime { + current_report_impl: u64, allocator: Allocator, regs: [usize; ASAN_SAVE_REGISTER_COUNT], blob_report: Option>, @@ -123,6 +136,7 @@ impl AsanRuntime { #[must_use] pub fn new(options: FridaOptions) -> AsanRuntime { Self { + current_report_impl: 0, allocator: Allocator::new(options.clone()), regs: [0; ASAN_SAVE_REGISTER_COUNT], blob_report: None, @@ -215,6 +229,20 @@ impl AsanRuntime { self.allocator.reset(); } + #[must_use] + pub fn allocator(&self) -> &Allocator { + &self.allocator + } + + pub fn allocator_mut(&mut self) -> &mut Allocator { + &mut self.allocator + } + + #[must_use] + pub fn shadow_check_func(&self) -> &Option bool> { + &self.shadow_check_func + } + /// Check if the test leaked any memory and report it if so. pub fn check_for_leaks(&mut self) { self.allocator.check_for_leaks(); @@ -357,1107 +385,19 @@ impl AsanRuntime { } #[cfg(target_arch = "aarch64")] + #[must_use] #[inline] - fn pc() -> usize { + pub fn pc() -> usize { Interceptor::current_invocation().cpu_context().pc() as usize } #[cfg(target_arch = "x86_64")] + #[must_use] #[inline] - fn pc() -> usize { + pub fn pc() -> usize { Interceptor::current_invocation().cpu_context().rip() as usize } - #[inline] - fn hook_malloc(&mut self, size: usize) -> *mut c_void { - unsafe { self.allocator.alloc(size, 8) } - } - - #[allow(non_snake_case)] - #[inline] - fn hook__Znam(&mut self, size: usize) -> *mut c_void { - unsafe { self.allocator.alloc(size, 8) } - } - - #[allow(non_snake_case)] - #[inline] - fn hook__ZnamRKSt9nothrow_t(&mut self, size: usize, _nothrow: *const c_void) -> *mut c_void { - unsafe { self.allocator.alloc(size, 8) } - } - - #[allow(non_snake_case)] - #[inline] - fn hook__ZnamSt11align_val_t(&mut self, size: usize, alignment: usize) -> *mut c_void { - unsafe { self.allocator.alloc(size, alignment) } - } - - #[allow(non_snake_case)] - #[inline] - fn hook__ZnamSt11align_val_tRKSt9nothrow_t( - &mut self, - size: usize, - alignment: usize, - _nothrow: *const c_void, - ) -> *mut c_void { - unsafe { self.allocator.alloc(size, alignment) } - } - - #[allow(non_snake_case)] - #[inline] - fn hook__Znwm(&mut self, size: usize) -> *mut c_void { - unsafe { self.allocator.alloc(size, 8) } - } - - #[allow(non_snake_case)] - #[inline] - fn hook__ZnwmRKSt9nothrow_t(&mut self, size: usize, _nothrow: *const c_void) -> *mut c_void { - unsafe { self.allocator.alloc(size, 8) } - } - - #[allow(non_snake_case)] - #[inline] - fn hook__ZnwmSt11align_val_t(&mut self, size: usize, alignment: usize) -> *mut c_void { - unsafe { self.allocator.alloc(size, alignment) } - } - - #[allow(non_snake_case)] - #[inline] - fn hook__ZnwmSt11align_val_tRKSt9nothrow_t( - &mut self, - size: usize, - alignment: usize, - _nothrow: *const c_void, - ) -> *mut c_void { - unsafe { self.allocator.alloc(size, alignment) } - } - - #[inline] - fn hook_calloc(&mut self, nmemb: usize, size: usize) -> *mut c_void { - let ret = unsafe { self.allocator.alloc(size * nmemb, 8) }; - unsafe { - memset(ret, 0, size * nmemb); - } - ret - } - - #[inline] - #[allow(clippy::cmp_null)] - fn hook_realloc(&mut self, ptr: *mut c_void, size: usize) -> *mut c_void { - unsafe { - let ret = self.allocator.alloc(size, 0x8); - if ptr != std::ptr::null_mut() && ret != std::ptr::null_mut() { - let old_size = self.allocator.get_usable_size(ptr); - let copy_size = if size < old_size { size } else { old_size }; - (ptr as *mut u8).copy_to(ret as *mut u8, copy_size); - } - self.allocator.release(ptr); - ret - } - } - - #[inline] - fn hook_check_free(&mut self, ptr: *mut c_void) -> bool { - self.allocator.is_managed(ptr) - } - - #[inline] - #[allow(clippy::cmp_null)] - fn hook_free(&mut self, ptr: *mut c_void) { - if ptr != std::ptr::null_mut() { - unsafe { self.allocator.release(ptr) } - } - } - - #[cfg(not(target_vendor = "apple"))] - #[inline] - fn hook_memalign(&mut self, alignment: usize, size: usize) -> *mut c_void { - unsafe { self.allocator.alloc(size, alignment) } - } - - #[inline] - fn hook_posix_memalign( - &mut self, - pptr: *mut *mut c_void, - alignment: usize, - size: usize, - ) -> i32 { - unsafe { - *pptr = self.allocator.alloc(size, alignment); - } - 0 - } - - #[inline] - #[cfg(all(not(target_vendor = "apple")))] - fn hook_malloc_usable_size(&mut self, ptr: *mut c_void) -> usize { - self.allocator.get_usable_size(ptr) - } - - #[allow(non_snake_case)] - #[allow(clippy::cmp_null)] - #[inline] - fn hook__ZdaPv(&mut self, ptr: *mut c_void) { - if ptr != std::ptr::null_mut() { - unsafe { self.allocator.release(ptr) } - } - } - - #[allow(non_snake_case)] - #[allow(clippy::cmp_null)] - #[inline] - fn hook__ZdaPvm(&mut self, ptr: *mut c_void, _ulong: u64) { - if ptr != std::ptr::null_mut() { - unsafe { self.allocator.release(ptr) } - } - } - - #[allow(non_snake_case)] - #[allow(clippy::cmp_null)] - #[inline] - fn hook__ZdaPvmSt11align_val_t(&mut self, ptr: *mut c_void, _ulong: u64, _alignment: usize) { - if ptr != std::ptr::null_mut() { - unsafe { self.allocator.release(ptr) } - } - } - - #[allow(non_snake_case)] - #[allow(clippy::cmp_null)] - #[inline] - fn hook__ZdaPvRKSt9nothrow_t(&mut self, ptr: *mut c_void, _nothrow: *const c_void) { - if ptr != std::ptr::null_mut() { - unsafe { self.allocator.release(ptr) } - } - } - - #[allow(non_snake_case)] - #[allow(clippy::cmp_null)] - #[inline] - fn hook__ZdaPvSt11align_val_tRKSt9nothrow_t( - &mut self, - ptr: *mut c_void, - _alignment: usize, - _nothrow: *const c_void, - ) { - if ptr != std::ptr::null_mut() { - unsafe { self.allocator.release(ptr) } - } - } - - #[allow(non_snake_case)] - #[allow(clippy::cmp_null)] - #[inline] - fn hook__ZdaPvSt11align_val_t(&mut self, ptr: *mut c_void, _alignment: usize) { - if ptr != std::ptr::null_mut() { - unsafe { self.allocator.release(ptr) } - } - } - - #[allow(non_snake_case)] - #[allow(clippy::cmp_null)] - #[inline] - fn hook__ZdlPv(&mut self, ptr: *mut c_void) { - if ptr != std::ptr::null_mut() { - unsafe { self.allocator.release(ptr) } - } - } - - #[allow(non_snake_case)] - #[allow(clippy::cmp_null)] - #[inline] - fn hook__ZdlPvm(&mut self, ptr: *mut c_void, _ulong: u64) { - if ptr != std::ptr::null_mut() { - unsafe { self.allocator.release(ptr) } - } - } - - #[allow(non_snake_case)] - #[allow(clippy::cmp_null)] - #[inline] - fn hook__ZdlPvmSt11align_val_t(&mut self, ptr: *mut c_void, _ulong: u64, _alignment: usize) { - if ptr != std::ptr::null_mut() { - unsafe { self.allocator.release(ptr) } - } - } - - #[allow(non_snake_case)] - #[allow(clippy::cmp_null)] - #[inline] - fn hook__ZdlPvRKSt9nothrow_t(&mut self, ptr: *mut c_void, _nothrow: *const c_void) { - if ptr != std::ptr::null_mut() { - unsafe { self.allocator.release(ptr) } - } - } - - #[allow(non_snake_case)] - #[allow(clippy::cmp_null)] - #[inline] - fn hook__ZdlPvSt11align_val_tRKSt9nothrow_t( - &mut self, - ptr: *mut c_void, - _alignment: usize, - _nothrow: *const c_void, - ) { - if ptr != std::ptr::null_mut() { - unsafe { self.allocator.release(ptr) } - } - } - - #[allow(non_snake_case)] - #[allow(clippy::cmp_null)] - #[inline] - fn hook__ZdlPvSt11align_val_t(&mut self, ptr: *mut c_void, _alignment: usize) { - if ptr != std::ptr::null_mut() { - unsafe { self.allocator.release(ptr) } - } - } - - #[inline] - fn hook_mmap( - &mut self, - addr: *const c_void, - length: usize, - prot: i32, - flags: i32, - fd: i32, - offset: usize, - ) -> *mut c_void { - extern "C" { - fn mmap( - addr: *const c_void, - length: usize, - prot: i32, - flags: i32, - fd: i32, - offset: usize, - ) -> *mut c_void; - } - let res = unsafe { mmap(addr, length, prot, flags, fd, offset) }; - if res != (-1_isize as *mut c_void) { - self.allocator - .map_shadow_for_region(res as usize, res as usize + length, true); - } - res - } - - #[inline] - fn hook_munmap(&mut self, addr: *const c_void, length: usize) -> i32 { - extern "C" { - fn munmap(addr: *const c_void, length: usize) -> i32; - } - let res = unsafe { munmap(addr, length) }; - if res != -1 { - Allocator::poison(self.allocator.map_to_shadow(addr as usize), length); - } - res - } - - #[inline] - fn hook_write(&mut self, fd: i32, buf: *const c_void, count: usize) -> usize { - extern "C" { - fn write(fd: i32, buf: *const c_void, count: usize) -> usize; - } - if !(self.shadow_check_func.unwrap())(buf, count) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( - "write".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - buf as usize, - count, - Backtrace::new(), - ))); - } - unsafe { write(fd, buf, count) } - } - - #[inline] - fn hook_read(&mut self, fd: i32, buf: *mut c_void, count: usize) -> usize { - extern "C" { - fn read(fd: i32, buf: *mut c_void, count: usize) -> usize; - } - if !(self.shadow_check_func.unwrap())(buf, count) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "read".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - buf as usize, - count, - Backtrace::new(), - ))); - } - unsafe { read(fd, buf, count) } - } - - #[inline] - fn hook_fgets(&mut self, s: *mut c_void, size: u32, stream: *mut c_void) -> *mut c_void { - extern "C" { - fn fgets(s: *mut c_void, size: u32, stream: *mut c_void) -> *mut c_void; - } - if !(self.shadow_check_func.unwrap())(s, size as usize) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "fgets".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s as usize, - size as usize, - Backtrace::new(), - ))); - } - unsafe { fgets(s, size, stream) } - } - - #[inline] - fn hook_memcmp(&mut self, s1: *const c_void, s2: *const c_void, n: usize) -> i32 { - extern "C" { - fn memcmp(s1: *const c_void, s2: *const c_void, n: usize) -> i32; - } - if !(self.shadow_check_func.unwrap())(s1, n) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "memcmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s1 as usize, - n, - Backtrace::new(), - ))); - } - if !(self.shadow_check_func.unwrap())(s2, n) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "memcmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s2 as usize, - n, - Backtrace::new(), - ))); - } - unsafe { memcmp(s1, s2, n) } - } - - #[inline] - fn hook_memcpy(&mut self, dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void { - extern "C" { - fn memcpy(dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void; - } - if !(self.shadow_check_func.unwrap())(dest, n) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( - "memcpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - dest as usize, - n, - Backtrace::new(), - ))); - } - if !(self.shadow_check_func.unwrap())(src, n) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "memcpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - src as usize, - n, - Backtrace::new(), - ))); - } - unsafe { memcpy(dest, src, n) } - } - - #[inline] - #[cfg(not(target_vendor = "apple"))] - fn hook_mempcpy(&mut self, dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void { - extern "C" { - fn mempcpy(dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void; - } - if !(self.shadow_check_func.unwrap())(dest, n) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( - "mempcpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - dest as usize, - n, - Backtrace::new(), - ))); - } - if !(self.shadow_check_func.unwrap())(src, n) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "mempcpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - src as usize, - n, - Backtrace::new(), - ))); - } - unsafe { mempcpy(dest, src, n) } - } - - #[inline] - fn hook_memmove(&mut self, dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void { - extern "C" { - fn memmove(dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void; - } - if !(self.shadow_check_func.unwrap())(dest, n) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( - "memmove".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - dest as usize, - n, - Backtrace::new(), - ))); - } - if !(self.shadow_check_func.unwrap())(src, n) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "memmove".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - src as usize, - n, - Backtrace::new(), - ))); - } - unsafe { memmove(dest, src, n) } - } - - #[inline] - fn hook_memset(&mut self, dest: *mut c_void, c: i32, n: usize) -> *mut c_void { - extern "C" { - fn memset(dest: *mut c_void, c: i32, n: usize) -> *mut c_void; - } - if !(self.shadow_check_func.unwrap())(dest, n) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( - "memset".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - dest as usize, - n, - Backtrace::new(), - ))); - } - unsafe { memset(dest, c, n) } - } - - #[inline] - fn hook_memchr(&mut self, s: *mut c_void, c: i32, n: usize) -> *mut c_void { - extern "C" { - fn memchr(s: *mut c_void, c: i32, n: usize) -> *mut c_void; - } - if !(self.shadow_check_func.unwrap())(s, n) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "memchr".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s as usize, - n, - Backtrace::new(), - ))); - } - unsafe { memchr(s, c, n) } - } - - #[inline] - #[cfg(not(target_vendor = "apple"))] - fn hook_memrchr(&mut self, s: *mut c_void, c: i32, n: usize) -> *mut c_void { - extern "C" { - fn memrchr(s: *mut c_void, c: i32, n: usize) -> *mut c_void; - } - if !(self.shadow_check_func.unwrap())(s, n) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "memrchr".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s as usize, - n, - Backtrace::new(), - ))); - } - unsafe { memrchr(s, c, n) } - } - - #[inline] - fn hook_memmem( - &mut self, - haystack: *const c_void, - haystacklen: usize, - needle: *const c_void, - needlelen: usize, - ) -> *mut c_void { - extern "C" { - fn memmem( - haystack: *const c_void, - haystacklen: usize, - needle: *const c_void, - needlelen: usize, - ) -> *mut c_void; - } - if !(self.shadow_check_func.unwrap())(haystack, haystacklen) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "memmem".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - haystack as usize, - haystacklen, - Backtrace::new(), - ))); - } - if !(self.shadow_check_func.unwrap())(needle, needlelen) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "memmem".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - needle as usize, - needlelen, - Backtrace::new(), - ))); - } - unsafe { memmem(haystack, haystacklen, needle, needlelen) } - } - - #[cfg(all(not(target_os = "android")))] - #[inline] - fn hook_bzero(&mut self, s: *mut c_void, n: usize) { - extern "C" { - fn bzero(s: *mut c_void, n: usize); - } - if !(self.shadow_check_func.unwrap())(s, n) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( - "bzero".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s as usize, - n, - Backtrace::new(), - ))); - } - unsafe { bzero(s, n) } - } - - #[cfg(all(not(target_os = "android"), not(target_vendor = "apple")))] - #[inline] - fn hook_explicit_bzero(&mut self, s: *mut c_void, n: usize) { - extern "C" { - fn explicit_bzero(s: *mut c_void, n: usize); - } - if !(self.shadow_check_func.unwrap())(s, n) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( - "explicit_bzero".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s as usize, - n, - Backtrace::new(), - ))); - } - unsafe { explicit_bzero(s, n) } - } - - #[cfg(all(not(target_os = "android")))] - #[inline] - fn hook_bcmp(&mut self, s1: *const c_void, s2: *const c_void, n: usize) -> i32 { - extern "C" { - fn bcmp(s1: *const c_void, s2: *const c_void, n: usize) -> i32; - } - if !(self.shadow_check_func.unwrap())(s1, n) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "bcmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s1 as usize, - n, - Backtrace::new(), - ))); - } - if !(self.shadow_check_func.unwrap())(s2, n) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "bcmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s2 as usize, - n, - Backtrace::new(), - ))); - } - unsafe { bcmp(s1, s2, n) } - } - - #[inline] - fn hook_strchr(&mut self, s: *mut c_char, c: i32) -> *mut c_char { - extern "C" { - fn strchr(s: *mut c_char, c: i32) -> *mut c_char; - fn strlen(s: *const c_char) -> usize; - } - if !(self.shadow_check_func.unwrap())(s as *const c_void, unsafe { strlen(s) }) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "strchr".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s as usize, - unsafe { strlen(s) }, - Backtrace::new(), - ))); - } - unsafe { strchr(s, c) } - } - - #[inline] - fn hook_strrchr(&mut self, s: *mut c_char, c: i32) -> *mut c_char { - extern "C" { - fn strrchr(s: *mut c_char, c: i32) -> *mut c_char; - fn strlen(s: *const c_char) -> usize; - } - if !(self.shadow_check_func.unwrap())(s as *const c_void, unsafe { strlen(s) }) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "strrchr".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s as usize, - unsafe { strlen(s) }, - Backtrace::new(), - ))); - } - unsafe { strrchr(s, c) } - } - - #[inline] - fn hook_strcasecmp(&mut self, s1: *const c_char, s2: *const c_char) -> i32 { - extern "C" { - fn strcasecmp(s1: *const c_char, s2: *const c_char) -> i32; - fn strlen(s: *const c_char) -> usize; - } - if !(self.shadow_check_func.unwrap())(s1 as *const c_void, unsafe { strlen(s1) }) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "strcasecmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s1 as usize, - unsafe { strlen(s1) }, - Backtrace::new(), - ))); - } - if !(self.shadow_check_func.unwrap())(s2 as *const c_void, unsafe { strlen(s2) }) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "strcasecmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s2 as usize, - unsafe { strlen(s2) }, - Backtrace::new(), - ))); - } - unsafe { strcasecmp(s1, s2) } - } - - #[inline] - fn hook_strncasecmp(&mut self, s1: *const c_char, s2: *const c_char, n: usize) -> i32 { - extern "C" { - fn strncasecmp(s1: *const c_char, s2: *const c_char, n: usize) -> i32; - } - if !(self.shadow_check_func.unwrap())(s1 as *const c_void, n) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "strncasecmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s1 as usize, - n, - Backtrace::new(), - ))); - } - if !(self.shadow_check_func.unwrap())(s2 as *const c_void, n) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "strncasecmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s2 as usize, - n, - Backtrace::new(), - ))); - } - unsafe { strncasecmp(s1, s2, n) } - } - - #[inline] - fn hook_strcat(&mut self, s1: *mut c_char, s2: *const c_char) -> *mut c_char { - extern "C" { - fn strcat(s1: *mut c_char, s2: *const c_char) -> *mut c_char; - fn strlen(s: *const c_char) -> usize; - } - if !(self.shadow_check_func.unwrap())(s1 as *const c_void, unsafe { strlen(s1) }) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "strcat".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s1 as usize, - unsafe { strlen(s1) }, - Backtrace::new(), - ))); - } - if !(self.shadow_check_func.unwrap())(s2 as *const c_void, unsafe { strlen(s2) }) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "strcat".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s2 as usize, - unsafe { strlen(s2) }, - Backtrace::new(), - ))); - } - unsafe { strcat(s1, s2) } - } - - #[inline] - fn hook_strcmp(&mut self, s1: *const c_char, s2: *const c_char) -> i32 { - extern "C" { - fn strcmp(s1: *const c_char, s2: *const c_char) -> i32; - fn strlen(s: *const c_char) -> usize; - } - if !(self.shadow_check_func.unwrap())(s1 as *const c_void, unsafe { strlen(s1) }) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "strcmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s1 as usize, - unsafe { strlen(s1) }, - Backtrace::new(), - ))); - } - if !(self.shadow_check_func.unwrap())(s2 as *const c_void, unsafe { strlen(s2) }) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "strcmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s2 as usize, - unsafe { strlen(s2) }, - Backtrace::new(), - ))); - } - unsafe { strcmp(s1, s2) } - } - - #[inline] - fn hook_strncmp(&mut self, s1: *const c_char, s2: *const c_char, n: usize) -> i32 { - extern "C" { - fn strncmp(s1: *const c_char, s2: *const c_char, n: usize) -> i32; - } - if !(self.shadow_check_func.unwrap())(s1 as *const c_void, n) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "strncmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s1 as usize, - n, - Backtrace::new(), - ))); - } - if !(self.shadow_check_func.unwrap())(s2 as *const c_void, n) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "strncmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s2 as usize, - n, - Backtrace::new(), - ))); - } - unsafe { strncmp(s1, s2, n) } - } - - #[inline] - fn hook_strcpy(&mut self, dest: *mut c_char, src: *const c_char) -> *mut c_char { - extern "C" { - fn strcpy(dest: *mut c_char, src: *const c_char) -> *mut c_char; - fn strlen(s: *const c_char) -> usize; - } - if !(self.shadow_check_func.unwrap())(dest as *const c_void, unsafe { strlen(src) }) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( - "strcpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - dest as usize, - unsafe { strlen(src) }, - Backtrace::new(), - ))); - } - if !(self.shadow_check_func.unwrap())(src as *const c_void, unsafe { strlen(src) }) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "strcpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - src as usize, - unsafe { strlen(src) }, - Backtrace::new(), - ))); - } - unsafe { strcpy(dest, src) } - } - - #[inline] - fn hook_strncpy(&mut self, dest: *mut c_char, src: *const c_char, n: usize) -> *mut c_char { - extern "C" { - fn strncpy(dest: *mut c_char, src: *const c_char, n: usize) -> *mut c_char; - } - if !(self.shadow_check_func.unwrap())(dest as *const c_void, n) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( - "strncpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - dest as usize, - n, - Backtrace::new(), - ))); - } - if !(self.shadow_check_func.unwrap())(src as *const c_void, n) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "strncpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - src as usize, - n, - Backtrace::new(), - ))); - } - unsafe { strncpy(dest, src, n) } - } - - #[inline] - fn hook_stpcpy(&mut self, dest: *mut c_char, src: *const c_char) -> *mut c_char { - extern "C" { - fn stpcpy(dest: *mut c_char, src: *const c_char) -> *mut c_char; - fn strlen(s: *const c_char) -> usize; - } - if !(self.shadow_check_func.unwrap())(dest as *const c_void, unsafe { strlen(src) }) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( - "stpcpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - dest as usize, - unsafe { strlen(src) }, - Backtrace::new(), - ))); - } - if !(self.shadow_check_func.unwrap())(src as *const c_void, unsafe { strlen(src) }) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "stpcpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - src as usize, - unsafe { strlen(src) }, - Backtrace::new(), - ))); - } - unsafe { stpcpy(dest, src) } - } - - #[inline] - fn hook_strdup(&mut self, s: *const c_char) -> *mut c_char { - extern "C" { - fn strlen(s: *const c_char) -> usize; - fn strcpy(dest: *mut c_char, src: *const c_char) -> *mut c_char; - } - let size = unsafe { strlen(s) }; - if !(self.shadow_check_func.unwrap())(s as *const c_void, size) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "strdup".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s as usize, - unsafe { strlen(s) }, - Backtrace::new(), - ))); - } - - unsafe { - let ret = self.allocator.alloc(size, 8) as *mut c_char; - strcpy(ret, s); - ret - } - } - - #[inline] - fn hook_strlen(&mut self, s: *const c_char) -> usize { - extern "C" { - fn strlen(s: *const c_char) -> usize; - } - let size = unsafe { strlen(s) }; - if !(self.shadow_check_func.unwrap())(s as *const c_void, size) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "strlen".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s as usize, - size, - Backtrace::new(), - ))); - } - size - } - - #[inline] - fn hook_strnlen(&mut self, s: *const c_char, n: usize) -> usize { - extern "C" { - fn strnlen(s: *const c_char, n: usize) -> usize; - } - let size = unsafe { strnlen(s, n) }; - if !(self.shadow_check_func.unwrap())(s as *const c_void, size) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "strnlen".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s as usize, - size, - Backtrace::new(), - ))); - } - size - } - - #[inline] - fn hook_strstr(&mut self, haystack: *const c_char, needle: *const c_char) -> *mut c_char { - extern "C" { - fn strstr(haystack: *const c_char, needle: *const c_char) -> *mut c_char; - fn strlen(s: *const c_char) -> usize; - } - if !(self.shadow_check_func.unwrap())(haystack as *const c_void, unsafe { - strlen(haystack) - }) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "strstr".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - haystack as usize, - unsafe { strlen(haystack) }, - Backtrace::new(), - ))); - } - if !(self.shadow_check_func.unwrap())(needle as *const c_void, unsafe { strlen(needle) }) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "strstr".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - needle as usize, - unsafe { strlen(needle) }, - Backtrace::new(), - ))); - } - unsafe { strstr(haystack, needle) } - } - - #[inline] - fn hook_strcasestr(&mut self, haystack: *const c_char, needle: *const c_char) -> *mut c_char { - extern "C" { - fn strcasestr(haystack: *const c_char, needle: *const c_char) -> *mut c_char; - fn strlen(s: *const c_char) -> usize; - } - if !(self.shadow_check_func.unwrap())(haystack as *const c_void, unsafe { - strlen(haystack) - }) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "strcasestr".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - haystack as usize, - unsafe { strlen(haystack) }, - Backtrace::new(), - ))); - } - if !(self.shadow_check_func.unwrap())(needle as *const c_void, unsafe { strlen(needle) }) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "strcasestr".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - needle as usize, - unsafe { strlen(needle) }, - Backtrace::new(), - ))); - } - unsafe { strcasestr(haystack, needle) } - } - - #[inline] - fn hook_atoi(&mut self, s: *const c_char) -> i32 { - extern "C" { - fn atoi(s: *const c_char) -> i32; - fn strlen(s: *const c_char) -> usize; - } - if !(self.shadow_check_func.unwrap())(s as *const c_void, unsafe { strlen(s) }) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "atoi".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s as usize, - unsafe { strlen(s) }, - Backtrace::new(), - ))); - } - unsafe { atoi(s) } - } - - #[inline] - fn hook_atol(&mut self, s: *const c_char) -> i32 { - extern "C" { - fn atol(s: *const c_char) -> i32; - fn strlen(s: *const c_char) -> usize; - } - if !(self.shadow_check_func.unwrap())(s as *const c_void, unsafe { strlen(s) }) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "atol".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s as usize, - unsafe { strlen(s) }, - Backtrace::new(), - ))); - } - unsafe { atol(s) } - } - - #[inline] - fn hook_atoll(&mut self, s: *const c_char) -> i64 { - extern "C" { - fn atoll(s: *const c_char) -> i64; - fn strlen(s: *const c_char) -> usize; - } - if !(self.shadow_check_func.unwrap())(s as *const c_void, unsafe { strlen(s) }) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "atoll".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s as usize, - unsafe { strlen(s) }, - Backtrace::new(), - ))); - } - unsafe { atoll(s) } - } - - #[inline] - fn hook_wcslen(&mut self, s: *const wchar_t) -> usize { - extern "C" { - fn wcslen(s: *const wchar_t) -> usize; - } - let size = unsafe { wcslen(s) }; - if !(self.shadow_check_func.unwrap())(s as *const c_void, (size + 1) * 2) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "wcslen".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s as usize, - (size + 1) * 2, - Backtrace::new(), - ))); - } - size - } - - #[inline] - fn hook_wcscpy(&mut self, dest: *mut wchar_t, src: *const wchar_t) -> *mut wchar_t { - extern "C" { - fn wcscpy(dest: *mut wchar_t, src: *const wchar_t) -> *mut wchar_t; - fn wcslen(s: *const wchar_t) -> usize; - } - if !(self.shadow_check_func.unwrap())(dest as *const c_void, unsafe { - (wcslen(src) + 1) * 2 - }) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( - "wcscpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - dest as usize, - (unsafe { wcslen(src) } + 1) * 2, - Backtrace::new(), - ))); - } - if !(self.shadow_check_func.unwrap())(src as *const c_void, unsafe { - (wcslen(src) + 1) * 2 - }) { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "wcscpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - src as usize, - (unsafe { wcslen(src) } + 1) * 2, - Backtrace::new(), - ))); - } - unsafe { wcscpy(dest, src) } - } - - #[inline] - fn hook_wcscmp(&mut self, s1: *const wchar_t, s2: *const wchar_t) -> i32 { - extern "C" { - fn wcscmp(s1: *const wchar_t, s2: *const wchar_t) -> i32; - fn wcslen(s: *const wchar_t) -> usize; - } - if !(self.shadow_check_func.unwrap())(s1 as *const c_void, unsafe { (wcslen(s1) + 1) * 2 }) - { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "wcscmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s1 as usize, - (unsafe { wcslen(s1) } + 1) * 2, - Backtrace::new(), - ))); - } - if !(self.shadow_check_func.unwrap())(s2 as *const c_void, unsafe { (wcslen(s2) + 1) * 2 }) - { - AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( - "wcscmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), - s2 as usize, - (unsafe { wcslen(s2) } + 1) * 2, - Backtrace::new(), - ))); - } - unsafe { wcscmp(s1, s2) } - } - /// Hook all functions required for ASAN to function, replacing them with our own /// implementations. #[allow(clippy::items_after_statements)] @@ -3086,4 +2026,487 @@ impl AsanRuntime { pub fn blob_check_mem_64bytes(&self) -> &[u8] { self.blob_check_mem_64bytes.as_ref().unwrap() } + + #[cfg(target_arch = "aarch64")] + #[inline] + pub fn asan_is_interesting_instruction( + &self, + capstone: &Capstone, + _address: u64, + instr: &Insn, + ) -> Result< + ( + capstone::RegId, + capstone::RegId, + i32, + u32, + Arm64Shift, + Arm64Extender, + ), + (), + > { + // We have to ignore these instructions. Simulating them with their side effects is + // complex, to say the least. + match instr.mnemonic().unwrap() { + "ldaxr" | "stlxr" | "ldxr" | "stxr" | "ldar" | "stlr" | "ldarb" | "ldarh" | "ldaxp" + | "ldaxrb" | "ldaxrh" | "stlrb" | "stlrh" | "stlxp" | "stlxrb" | "stlxrh" | "ldxrb" + | "ldxrh" | "stxrb" | "stxrh" => return Err(()), + _ => (), + } + + let operands = capstone + .insn_detail(instr) + .unwrap() + .arch_detail() + .operands(); + if operands.len() < 2 { + return Err(()); + } + + if let Arm64Operand(arm64operand) = operands.last().unwrap() { + if let Arm64OperandType::Mem(opmem) = arm64operand.op_type { + return Ok(( + opmem.base(), + opmem.index(), + opmem.disp(), + FridaInstrumentationHelper::instruction_width(instr, &operands), + arm64operand.shift, + arm64operand.ext, + )); + } + } + + Err(()) + } + + #[cfg(all(target_arch = "x86_64", unix))] + #[inline] + #[allow(clippy::unused_self)] + #[allow(clippy::result_unit_err)] + pub fn asan_is_interesting_instruction( + &self, + capstone: &Capstone, + _address: u64, + instr: &Insn, + ) -> Result<(RegId, u8, RegId, RegId, i32, i64), ()> { + let operands = capstone + .insn_detail(instr) + .unwrap() + .arch_detail() + .operands(); + + // Ignore lea instruction + // put nop into the white-list so that instructions like + // like `nop dword [rax + rax]` does not get caught. + match instr.mnemonic().unwrap() { + "lea" | "nop" => return Err(()), + + _ => (), + } + + // This is a TODO! In this case, both the src and the dst are mem operand + // so we would need to return two operadns? + if instr.mnemonic().unwrap().starts_with("rep") { + return Err(()); + } + + for operand in operands { + if let X86Operand(x86operand) = operand { + if let X86OperandType::Mem(opmem) = x86operand.op_type { + /* + println!( + "insn: {:#?} {:#?} width: {}, segment: {:#?}, base: {:#?}, index: {:#?}, scale: {}, disp: {}", + insn_id, + instr, + x86operand.size, + opmem.segment(), + opmem.base(), + opmem.index(), + opmem.scale(), + opmem.disp(), + ); + */ + if opmem.segment() == RegId(0) { + return Ok(( + opmem.segment(), + x86operand.size, + opmem.base(), + opmem.index(), + opmem.scale(), + opmem.disp(), + )); + } + } + } + } + + Err(()) + } + + #[inline] + #[allow(clippy::too_many_lines)] + #[allow(clippy::too_many_arguments)] + #[cfg(all(target_arch = "x86_64", unix))] + pub fn emit_shadow_check( + &mut self, + address: u64, + output: &StalkerOutput, + _segment: RegId, + width: u8, + basereg: RegId, + indexreg: RegId, + scale: i32, + disp: i64, + ) { + let redzone_size = i64::from(frida_gum_sys::GUM_RED_ZONE_SIZE); + let writer = output.writer(); + let true_rip = address; + + let basereg = if basereg.0 == 0 { + None + } else { + let reg = FridaInstrumentationHelper::writer_register(basereg); + Some(reg) + }; + + let indexreg = if indexreg.0 == 0 { + None + } else { + let reg = FridaInstrumentationHelper::writer_register(indexreg); + Some(reg) + }; + + let scale = match scale { + 2 => 1, + 4 => 2, + 8 => 3, + _ => 0, + }; + if self.current_report_impl == 0 + || !writer.can_branch_directly_to(self.current_report_impl) + || !writer.can_branch_directly_between(writer.pc() + 128, self.current_report_impl) + { + let after_report_impl = writer.code_offset() + 2; + + #[cfg(target_arch = "x86_64")] + writer.put_jmp_near_label(after_report_impl); + #[cfg(target_arch = "aarch64")] + writer.put_b_label(after_report_impl); + + self.current_report_impl = writer.pc(); + #[cfg(unix)] + writer.put_bytes(self.blob_report()); + + writer.put_label(after_report_impl); + } + + /* Save registers that we'll use later in shadow_check_blob + | addr | rip | + | Rcx | Rax | + | Rsi | Rdx | + Old Rsp - (redsone_size) -> | flags | Rdi | + | | | + Old Rsp -> | | | + */ + writer.put_lea_reg_reg_offset(X86Register::Rsp, X86Register::Rsp, -(redzone_size)); + writer.put_pushfx(); + writer.put_push_reg(X86Register::Rdi); + writer.put_push_reg(X86Register::Rsi); + writer.put_push_reg(X86Register::Rdx); + writer.put_push_reg(X86Register::Rcx); + writer.put_push_reg(X86Register::Rax); + + /* + Things are a bit different when Rip is either base register or index register. + Suppose we have an instruction like + `bnd jmp qword ptr [rip + 0x2e4b5]` + We can't just emit code like + `mov rdi, rip` to get RIP loaded into RDI, + because this RIP is NOT the orginal RIP (, which is usually within .text) anymore, rather it is pointing to the memory allocated by the frida stalker. + Please confer https://frida.re/docs/stalker/ for details. + */ + // Init Rdi + match basereg { + Some(reg) => match reg { + X86Register::Rip => { + writer.put_mov_reg_address(X86Register::Rdi, true_rip); + } + _ => { + writer.put_mov_reg_reg(X86Register::Rdi, basereg.unwrap()); + } + }, + None => { + writer.put_xor_reg_reg(X86Register::Rdi, X86Register::Rdi); + } + } + + match indexreg { + Some(reg) => match reg { + X86Register::Rip => { + writer.put_mov_reg_address(X86Register::Rsi, true_rip); + } + _ => { + writer.put_mov_reg_reg(X86Register::Rsi, indexreg.unwrap()); + } + }, + None => { + writer.put_xor_reg_reg(X86Register::Rsi, X86Register::Rsi); + } + } + + // Scale + if scale > 0 { + writer.put_shl_reg_u8(X86Register::Rsi, scale); + } + + // Finally set Rdi to base + index * scale + disp + writer.put_add_reg_reg(X86Register::Rdi, X86Register::Rsi); + writer.put_lea_reg_reg_offset(X86Register::Rdi, X86Register::Rdi, disp); + + writer.put_mov_reg_address(X86Register::Rsi, true_rip); // load true_rip into rsi in case we need them in handle_trap + writer.put_push_reg(X86Register::Rsi); // save true_rip + writer.put_push_reg(X86Register::Rdi); // save accessed_address + + #[cfg(unix)] + let checked: bool = match width { + 1 => writer.put_bytes(self.blob_check_mem_byte()), + 2 => writer.put_bytes(self.blob_check_mem_halfword()), + 4 => writer.put_bytes(self.blob_check_mem_dword()), + 8 => writer.put_bytes(self.blob_check_mem_qword()), + 16 => writer.put_bytes(self.blob_check_mem_16bytes()), + _ => false, + }; + + if checked { + writer.put_jmp_address(self.current_report_impl); + for _ in 0..10 { + // shadow_check_blob's done will land somewhere in these nops + // on amd64 jump can takes 10 bytes at most, so that's why I put 10 bytes. + writer.put_nop(); + } + } + + writer.put_pop_reg(X86Register::Rdi); + writer.put_pop_reg(X86Register::Rsi); + + writer.put_pop_reg(X86Register::Rax); + writer.put_pop_reg(X86Register::Rcx); + writer.put_pop_reg(X86Register::Rdx); + writer.put_pop_reg(X86Register::Rsi); + writer.put_pop_reg(X86Register::Rdi); + writer.put_popfx(); + writer.put_lea_reg_reg_offset(X86Register::Rsp, X86Register::Rsp, redzone_size); + } + + #[cfg(target_arch = "aarch64")] + #[inline] + pub fn emit_shadow_check( + &mut self, + _address: u64, + output: &StalkerOutput, + basereg: capstone::RegId, + indexreg: capstone::RegId, + displacement: i32, + width: u32, + shift: Arm64Shift, + extender: Arm64Extender, + ) { + let redzone_size = frida_gum_sys::GUM_RED_ZONE_SIZE as i32; + let writer = output.writer(); + + let basereg = FridaInstrumentationHelper::writer_register(basereg); + let indexreg = if indexreg.0 != 0 { + Some(FridaInstrumentationHelper::writer_register(indexreg)) + } else { + None + }; + + if self.current_report_impl == 0 + || !writer.can_branch_directly_to(self.current_report_impl) + || !writer.can_branch_directly_between(writer.pc() + 128, self.current_report_impl) + { + let after_report_impl = writer.code_offset() + 2; + + #[cfg(target_arch = "x86_64")] + writer.put_jmp_near_label(after_report_impl); + #[cfg(target_arch = "aarch64")] + writer.put_b_label(after_report_impl); + + self.current_report_impl = writer.pc(); + + #[cfg(unix)] + writer.put_bytes(self.blob_report()); + + writer.put_label(after_report_impl); + } + //writer.put_brk_imm(1); + + // Preserve x0, x1: + writer.put_stp_reg_reg_reg_offset( + Aarch64Register::X0, + Aarch64Register::X1, + Aarch64Register::Sp, + -(16 + redzone_size) as i64, + IndexMode::PreAdjust, + ); + + // Make sure the base register is copied into x0 + match basereg { + Aarch64Register::X0 | Aarch64Register::W0 => {} + Aarch64Register::X1 | Aarch64Register::W1 => { + writer.put_mov_reg_reg(Aarch64Register::X0, Aarch64Register::X1); + } + _ => { + if !writer.put_mov_reg_reg(Aarch64Register::X0, basereg) { + writer.put_mov_reg_reg(Aarch64Register::W0, basereg); + } + } + } + + // Make sure the index register is copied into x1 + if indexreg.is_some() { + if let Some(indexreg) = indexreg { + match indexreg { + Aarch64Register::X0 | Aarch64Register::W0 => { + writer.put_ldr_reg_reg_offset( + Aarch64Register::X1, + Aarch64Register::Sp, + 0u64, + ); + } + Aarch64Register::X1 | Aarch64Register::W1 => {} + _ => { + if !writer.put_mov_reg_reg(Aarch64Register::X1, indexreg) { + writer.put_mov_reg_reg(Aarch64Register::W1, indexreg); + } + } + } + } + + if let (Arm64Extender::ARM64_EXT_INVALID, Arm64Shift::Invalid) = (extender, shift) { + writer.put_add_reg_reg_reg( + Aarch64Register::X0, + Aarch64Register::X0, + Aarch64Register::X1, + ); + } else { + let extender_encoding: i32 = match extender { + Arm64Extender::ARM64_EXT_UXTB => 0b000, + Arm64Extender::ARM64_EXT_UXTH => 0b001, + Arm64Extender::ARM64_EXT_UXTW => 0b010, + Arm64Extender::ARM64_EXT_UXTX => 0b011, + Arm64Extender::ARM64_EXT_SXTB => 0b100, + Arm64Extender::ARM64_EXT_SXTH => 0b101, + Arm64Extender::ARM64_EXT_SXTW => 0b110, + Arm64Extender::ARM64_EXT_SXTX => 0b111, + _ => -1, + }; + let (shift_encoding, shift_amount): (i32, u32) = match shift { + Arm64Shift::Lsl(amount) => (0b00, amount), + Arm64Shift::Lsr(amount) => (0b01, amount), + Arm64Shift::Asr(amount) => (0b10, amount), + _ => (-1, 0), + }; + + if extender_encoding != -1 && shift_amount < 0b1000 { + // emit add extended register: https://developer.arm.com/documentation/ddi0602/latest/Base-Instructions/ADD--extended-register---Add--extended-register-- + writer.put_bytes( + &(0x8b210000 | ((extender_encoding as u32) << 13) | (shift_amount << 10)) + .to_le_bytes(), + ); + } else if shift_encoding != -1 { + writer.put_bytes( + &(0x8b010000 | ((shift_encoding as u32) << 22) | (shift_amount << 10)) + .to_le_bytes(), + ); + } else { + panic!("extender: {:?}, shift: {:?}", extender, shift); + } + }; + } + + let displacement = displacement + + if basereg == Aarch64Register::Sp { + 16 + redzone_size + } else { + 0 + }; + + #[allow(clippy::comparison_chain)] + if displacement < 0 { + if displacement > -4096 { + // Subtract the displacement into x0 + writer.put_sub_reg_reg_imm( + Aarch64Register::X0, + Aarch64Register::X0, + displacement.abs() as u64, + ); + } else { + let displacement_hi = displacement.abs() / 4096; + let displacement_lo = displacement.abs() % 4096; + writer.put_bytes(&(0xd1400000u32 | ((displacement_hi as u32) << 10)).to_le_bytes()); + writer.put_sub_reg_reg_imm( + Aarch64Register::X0, + Aarch64Register::X0, + displacement_lo as u64, + ); + } + } else if displacement > 0 { + if displacement < 4096 { + // Add the displacement into x0 + writer.put_add_reg_reg_imm( + Aarch64Register::X0, + Aarch64Register::X0, + displacement as u64, + ); + } else { + let displacement_hi = displacement / 4096; + let displacement_lo = displacement % 4096; + writer.put_bytes(&(0x91400000u32 | ((displacement_hi as u32) << 10)).to_le_bytes()); + writer.put_add_reg_reg_imm( + Aarch64Register::X0, + Aarch64Register::X0, + displacement_lo as u64, + ); + } + } + // Insert the check_shadow_mem code blob + #[cfg(unix)] + match width { + 1 => writer.put_bytes(&self.blob_check_mem_byte()), + 2 => writer.put_bytes(&self.blob_check_mem_halfword()), + 3 => writer.put_bytes(&self.blob_check_mem_3bytes()), + 4 => writer.put_bytes(&self.blob_check_mem_dword()), + 6 => writer.put_bytes(&self.blob_check_mem_6bytes()), + 8 => writer.put_bytes(&self.blob_check_mem_qword()), + 12 => writer.put_bytes(&self.blob_check_mem_12bytes()), + 16 => writer.put_bytes(&self.blob_check_mem_16bytes()), + 24 => writer.put_bytes(&self.blob_check_mem_24bytes()), + 32 => writer.put_bytes(&self.blob_check_mem_32bytes()), + 48 => writer.put_bytes(&self.blob_check_mem_48bytes()), + 64 => writer.put_bytes(&self.blob_check_mem_64bytes()), + _ => false, + }; + + // Add the branch to report + //writer.put_brk_imm(0x12); + writer.put_branch_address(self.current_report_impl); + + match width { + 3 | 6 | 12 | 24 | 32 | 48 | 64 => { + let msr_nvcz_x0: u32 = 0xd51b4200; + writer.put_bytes(&msr_nvcz_x0.to_le_bytes()); + } + _ => (), + } + + // Restore x0, x1 + assert!(writer.put_ldp_reg_reg_reg_offset( + Aarch64Register::X0, + Aarch64Register::X1, + Aarch64Register::Sp, + 16 + redzone_size as i64, + IndexMode::PostAdjust, + )); + } } diff --git a/libafl_frida/src/asan_errors.rs b/libafl_frida/src/asan/errors.rs similarity index 99% rename from libafl_frida/src/asan_errors.rs rename to libafl_frida/src/asan/errors.rs index 7abb4de91a..455de66168 100644 --- a/libafl_frida/src/asan_errors.rs +++ b/libafl_frida/src/asan/errors.rs @@ -21,9 +21,9 @@ use std::io::Write; use termcolor::{Color, ColorSpec, WriteColor}; #[cfg(target_arch = "x86_64")] -use crate::asan_rt::ASAN_SAVE_REGISTER_NAMES; +use crate::asan::asan_rt::ASAN_SAVE_REGISTER_NAMES; -use crate::{alloc::AllocationMetadata, asan_rt::ASAN_SAVE_REGISTER_COUNT, FridaOptions}; +use crate::{alloc::AllocationMetadata, asan::asan_rt::ASAN_SAVE_REGISTER_COUNT, FridaOptions}; #[derive(Debug, Clone, Serialize, Deserialize)] pub(crate) struct AsanReadWriteError { diff --git a/libafl_frida/src/asan/hook_funcs.rs b/libafl_frida/src/asan/hook_funcs.rs new file mode 100644 index 0000000000..5f6cbc4b62 --- /dev/null +++ b/libafl_frida/src/asan/hook_funcs.rs @@ -0,0 +1,1130 @@ +use crate::{ + alloc::Allocator, + asan::{ + asan_rt::AsanRuntime, + errors::{AsanError, AsanErrors}, + }, +}; +use backtrace::Backtrace; +use libc::{c_char, wchar_t}; +use nix::libc::memset; +use std::ffi::c_void; + +#[allow(clippy::not_unsafe_ptr_arg_deref)] +impl AsanRuntime { + #[inline] + pub fn hook_malloc(&mut self, size: usize) -> *mut c_void { + unsafe { self.allocator_mut().alloc(size, 8) } + } + + #[allow(non_snake_case)] + #[inline] + pub fn hook__Znam(&mut self, size: usize) -> *mut c_void { + unsafe { self.allocator_mut().alloc(size, 8) } + } + + #[allow(non_snake_case)] + #[inline] + pub fn hook__ZnamRKSt9nothrow_t( + &mut self, + size: usize, + _nothrow: *const c_void, + ) -> *mut c_void { + unsafe { self.allocator_mut().alloc(size, 8) } + } + + #[allow(non_snake_case)] + #[inline] + pub fn hook__ZnamSt11align_val_t(&mut self, size: usize, alignment: usize) -> *mut c_void { + unsafe { self.allocator_mut().alloc(size, alignment) } + } + + #[allow(non_snake_case)] + #[inline] + pub fn hook__ZnamSt11align_val_tRKSt9nothrow_t( + &mut self, + size: usize, + alignment: usize, + _nothrow: *const c_void, + ) -> *mut c_void { + unsafe { self.allocator_mut().alloc(size, alignment) } + } + + #[allow(non_snake_case)] + #[inline] + pub fn hook__Znwm(&mut self, size: usize) -> *mut c_void { + unsafe { self.allocator_mut().alloc(size, 8) } + } + + #[allow(non_snake_case)] + #[inline] + pub fn hook__ZnwmRKSt9nothrow_t( + &mut self, + size: usize, + _nothrow: *const c_void, + ) -> *mut c_void { + unsafe { self.allocator_mut().alloc(size, 8) } + } + + #[allow(non_snake_case)] + #[inline] + pub fn hook__ZnwmSt11align_val_t(&mut self, size: usize, alignment: usize) -> *mut c_void { + unsafe { self.allocator_mut().alloc(size, alignment) } + } + + #[allow(non_snake_case)] + #[inline] + pub fn hook__ZnwmSt11align_val_tRKSt9nothrow_t( + &mut self, + size: usize, + alignment: usize, + _nothrow: *const c_void, + ) -> *mut c_void { + unsafe { self.allocator_mut().alloc(size, alignment) } + } + + #[inline] + pub fn hook_calloc(&mut self, nmemb: usize, size: usize) -> *mut c_void { + let ret = unsafe { self.allocator_mut().alloc(size * nmemb, 8) }; + unsafe { + memset(ret, 0, size * nmemb); + } + ret + } + + #[inline] + #[allow(clippy::cmp_null)] + pub fn hook_realloc(&mut self, ptr: *mut c_void, size: usize) -> *mut c_void { + unsafe { + let ret = self.allocator_mut().alloc(size, 0x8); + if ptr != std::ptr::null_mut() && ret != std::ptr::null_mut() { + let old_size = self.allocator_mut().get_usable_size(ptr); + let copy_size = if size < old_size { size } else { old_size }; + (ptr as *mut u8).copy_to(ret as *mut u8, copy_size); + } + self.allocator_mut().release(ptr); + ret + } + } + + #[inline] + pub fn hook_check_free(&mut self, ptr: *mut c_void) -> bool { + self.allocator_mut().is_managed(ptr) + } + + #[inline] + #[allow(clippy::cmp_null)] + pub fn hook_free(&mut self, ptr: *mut c_void) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator_mut().release(ptr) } + } + } + + #[cfg(not(target_vendor = "apple"))] + #[inline] + pub fn hook_memalign(&mut self, alignment: usize, size: usize) -> *mut c_void { + unsafe { self.allocator_mut().alloc(size, alignment) } + } + + #[inline] + pub fn hook_posix_memalign( + &mut self, + pptr: *mut *mut c_void, + alignment: usize, + size: usize, + ) -> i32 { + unsafe { + *pptr = self.allocator_mut().alloc(size, alignment); + } + 0 + } + + #[inline] + #[cfg(all(not(target_vendor = "apple")))] + pub fn hook_malloc_usable_size(&mut self, ptr: *mut c_void) -> usize { + self.allocator_mut().get_usable_size(ptr) + } + + #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] + #[inline] + pub fn hook__ZdaPv(&mut self, ptr: *mut c_void) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator_mut().release(ptr) } + } + } + + #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] + #[inline] + pub fn hook__ZdaPvm(&mut self, ptr: *mut c_void, _ulong: u64) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator_mut().release(ptr) } + } + } + + #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] + #[inline] + pub fn hook__ZdaPvmSt11align_val_t( + &mut self, + ptr: *mut c_void, + _ulong: u64, + _alignment: usize, + ) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator_mut().release(ptr) } + } + } + + #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] + #[inline] + pub fn hook__ZdaPvRKSt9nothrow_t(&mut self, ptr: *mut c_void, _nothrow: *const c_void) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator_mut().release(ptr) } + } + } + + #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] + #[inline] + pub fn hook__ZdaPvSt11align_val_tRKSt9nothrow_t( + &mut self, + ptr: *mut c_void, + _alignment: usize, + _nothrow: *const c_void, + ) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator_mut().release(ptr) } + } + } + + #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] + #[inline] + pub fn hook__ZdaPvSt11align_val_t(&mut self, ptr: *mut c_void, _alignment: usize) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator_mut().release(ptr) } + } + } + + #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] + #[inline] + pub fn hook__ZdlPv(&mut self, ptr: *mut c_void) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator_mut().release(ptr) } + } + } + + #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] + #[inline] + pub fn hook__ZdlPvm(&mut self, ptr: *mut c_void, _ulong: u64) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator_mut().release(ptr) } + } + } + + #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] + #[inline] + pub fn hook__ZdlPvmSt11align_val_t( + &mut self, + ptr: *mut c_void, + _ulong: u64, + _alignment: usize, + ) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator_mut().release(ptr) } + } + } + + #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] + #[inline] + pub fn hook__ZdlPvRKSt9nothrow_t(&mut self, ptr: *mut c_void, _nothrow: *const c_void) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator_mut().release(ptr) } + } + } + + #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] + #[inline] + pub fn hook__ZdlPvSt11align_val_tRKSt9nothrow_t( + &mut self, + ptr: *mut c_void, + _alignment: usize, + _nothrow: *const c_void, + ) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator_mut().release(ptr) } + } + } + + #[allow(non_snake_case)] + #[allow(clippy::cmp_null)] + #[inline] + pub fn hook__ZdlPvSt11align_val_t(&mut self, ptr: *mut c_void, _alignment: usize) { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator_mut().release(ptr) } + } + } + + #[inline] + pub fn hook_mmap( + &mut self, + addr: *const c_void, + length: usize, + prot: i32, + flags: i32, + fd: i32, + offset: usize, + ) -> *mut c_void { + extern "C" { + fn mmap( + addr: *const c_void, + length: usize, + prot: i32, + flags: i32, + fd: i32, + offset: usize, + ) -> *mut c_void; + } + let res = unsafe { mmap(addr, length, prot, flags, fd, offset) }; + if res != (-1_isize as *mut c_void) { + self.allocator_mut() + .map_shadow_for_region(res as usize, res as usize + length, true); + } + res + } + + #[inline] + pub fn hook_munmap(&mut self, addr: *const c_void, length: usize) -> i32 { + extern "C" { + fn munmap(addr: *const c_void, length: usize) -> i32; + } + let res = unsafe { munmap(addr, length) }; + if res != -1 { + Allocator::poison(self.allocator_mut().map_to_shadow(addr as usize), length); + } + res + } + + #[inline] + pub fn hook_write(&mut self, fd: i32, buf: *const c_void, count: usize) -> usize { + extern "C" { + fn write(fd: i32, buf: *const c_void, count: usize) -> usize; + } + if !(self.shadow_check_func().unwrap())(buf, count) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( + "write".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + buf as usize, + count, + Backtrace::new(), + ))); + } + unsafe { write(fd, buf, count) } + } + + #[inline] + pub fn hook_read(&mut self, fd: i32, buf: *mut c_void, count: usize) -> usize { + extern "C" { + fn read(fd: i32, buf: *mut c_void, count: usize) -> usize; + } + if !(self.shadow_check_func().unwrap())(buf, count) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "read".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + buf as usize, + count, + Backtrace::new(), + ))); + } + unsafe { read(fd, buf, count) } + } + + #[inline] + pub fn hook_fgets(&mut self, s: *mut c_void, size: u32, stream: *mut c_void) -> *mut c_void { + extern "C" { + fn fgets(s: *mut c_void, size: u32, stream: *mut c_void) -> *mut c_void; + } + if !(self.shadow_check_func().unwrap())(s, size as usize) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "fgets".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s as usize, + size as usize, + Backtrace::new(), + ))); + } + unsafe { fgets(s, size, stream) } + } + + #[inline] + pub fn hook_memcmp(&mut self, s1: *const c_void, s2: *const c_void, n: usize) -> i32 { + extern "C" { + fn memcmp(s1: *const c_void, s2: *const c_void, n: usize) -> i32; + } + if !(self.shadow_check_func().unwrap())(s1, n) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "memcmp".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s1 as usize, + n, + Backtrace::new(), + ))); + } + if !(self.shadow_check_func().unwrap())(s2, n) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "memcmp".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s2 as usize, + n, + Backtrace::new(), + ))); + } + unsafe { memcmp(s1, s2, n) } + } + + #[inline] + pub fn hook_memcpy(&mut self, dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void { + extern "C" { + fn memcpy(dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void; + } + if !(self.shadow_check_func().unwrap())(dest, n) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( + "memcpy".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + dest as usize, + n, + Backtrace::new(), + ))); + } + if !(self.shadow_check_func().unwrap())(src, n) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "memcpy".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + src as usize, + n, + Backtrace::new(), + ))); + } + unsafe { memcpy(dest, src, n) } + } + + #[inline] + #[cfg(not(target_vendor = "apple"))] + pub fn hook_mempcpy(&mut self, dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void { + extern "C" { + fn mempcpy(dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void; + } + if !(self.shadow_check_func().unwrap())(dest, n) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( + "mempcpy".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + dest as usize, + n, + Backtrace::new(), + ))); + } + if !(self.shadow_check_func().unwrap())(src, n) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "mempcpy".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + src as usize, + n, + Backtrace::new(), + ))); + } + unsafe { mempcpy(dest, src, n) } + } + + #[inline] + pub fn hook_memmove(&mut self, dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void { + extern "C" { + fn memmove(dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void; + } + if !(self.shadow_check_func().unwrap())(dest, n) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( + "memmove".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + dest as usize, + n, + Backtrace::new(), + ))); + } + if !(self.shadow_check_func().unwrap())(src, n) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "memmove".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + src as usize, + n, + Backtrace::new(), + ))); + } + unsafe { memmove(dest, src, n) } + } + + #[inline] + pub fn hook_memset(&mut self, dest: *mut c_void, c: i32, n: usize) -> *mut c_void { + extern "C" { + fn memset(dest: *mut c_void, c: i32, n: usize) -> *mut c_void; + } + if !(self.shadow_check_func().unwrap())(dest, n) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( + "memset".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + dest as usize, + n, + Backtrace::new(), + ))); + } + unsafe { memset(dest, c, n) } + } + + #[inline] + pub fn hook_memchr(&mut self, s: *mut c_void, c: i32, n: usize) -> *mut c_void { + extern "C" { + fn memchr(s: *mut c_void, c: i32, n: usize) -> *mut c_void; + } + if !(self.shadow_check_func().unwrap())(s, n) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "memchr".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s as usize, + n, + Backtrace::new(), + ))); + } + unsafe { memchr(s, c, n) } + } + + #[inline] + #[cfg(not(target_vendor = "apple"))] + pub fn hook_memrchr(&mut self, s: *mut c_void, c: i32, n: usize) -> *mut c_void { + extern "C" { + fn memrchr(s: *mut c_void, c: i32, n: usize) -> *mut c_void; + } + if !(self.shadow_check_func().unwrap())(s, n) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "memrchr".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s as usize, + n, + Backtrace::new(), + ))); + } + unsafe { memrchr(s, c, n) } + } + + #[inline] + pub fn hook_memmem( + &mut self, + haystack: *const c_void, + haystacklen: usize, + needle: *const c_void, + needlelen: usize, + ) -> *mut c_void { + extern "C" { + fn memmem( + haystack: *const c_void, + haystacklen: usize, + needle: *const c_void, + needlelen: usize, + ) -> *mut c_void; + } + if !(self.shadow_check_func().unwrap())(haystack, haystacklen) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "memmem".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + haystack as usize, + haystacklen, + Backtrace::new(), + ))); + } + if !(self.shadow_check_func().unwrap())(needle, needlelen) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "memmem".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + needle as usize, + needlelen, + Backtrace::new(), + ))); + } + unsafe { memmem(haystack, haystacklen, needle, needlelen) } + } + + #[cfg(all(not(target_os = "android")))] + #[inline] + pub fn hook_bzero(&mut self, s: *mut c_void, n: usize) { + extern "C" { + fn bzero(s: *mut c_void, n: usize); + } + if !(self.shadow_check_func().unwrap())(s, n) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( + "bzero".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s as usize, + n, + Backtrace::new(), + ))); + } + unsafe { bzero(s, n) } + } + + #[cfg(all(not(target_os = "android"), not(target_vendor = "apple")))] + #[inline] + pub fn hook_explicit_bzero(&mut self, s: *mut c_void, n: usize) { + extern "C" { + fn explicit_bzero(s: *mut c_void, n: usize); + } + if !(self.shadow_check_func().unwrap())(s, n) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( + "explicit_bzero".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s as usize, + n, + Backtrace::new(), + ))); + } + unsafe { explicit_bzero(s, n) } + } + + #[cfg(all(not(target_os = "android")))] + #[inline] + pub fn hook_bcmp(&mut self, s1: *const c_void, s2: *const c_void, n: usize) -> i32 { + extern "C" { + fn bcmp(s1: *const c_void, s2: *const c_void, n: usize) -> i32; + } + if !(self.shadow_check_func().unwrap())(s1, n) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "bcmp".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s1 as usize, + n, + Backtrace::new(), + ))); + } + if !(self.shadow_check_func().unwrap())(s2, n) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "bcmp".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s2 as usize, + n, + Backtrace::new(), + ))); + } + unsafe { bcmp(s1, s2, n) } + } + + #[inline] + pub fn hook_strchr(&mut self, s: *mut c_char, c: i32) -> *mut c_char { + extern "C" { + fn strchr(s: *mut c_char, c: i32) -> *mut c_char; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func().unwrap())(s as *const c_void, unsafe { strlen(s) }) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "strchr".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s as usize, + unsafe { strlen(s) }, + Backtrace::new(), + ))); + } + unsafe { strchr(s, c) } + } + + #[inline] + pub fn hook_strrchr(&mut self, s: *mut c_char, c: i32) -> *mut c_char { + extern "C" { + fn strrchr(s: *mut c_char, c: i32) -> *mut c_char; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func().unwrap())(s as *const c_void, unsafe { strlen(s) }) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "strrchr".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s as usize, + unsafe { strlen(s) }, + Backtrace::new(), + ))); + } + unsafe { strrchr(s, c) } + } + + #[inline] + pub fn hook_strcasecmp(&mut self, s1: *const c_char, s2: *const c_char) -> i32 { + extern "C" { + fn strcasecmp(s1: *const c_char, s2: *const c_char) -> i32; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func().unwrap())(s1 as *const c_void, unsafe { strlen(s1) }) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "strcasecmp".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s1 as usize, + unsafe { strlen(s1) }, + Backtrace::new(), + ))); + } + if !(self.shadow_check_func().unwrap())(s2 as *const c_void, unsafe { strlen(s2) }) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "strcasecmp".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s2 as usize, + unsafe { strlen(s2) }, + Backtrace::new(), + ))); + } + unsafe { strcasecmp(s1, s2) } + } + + #[inline] + pub fn hook_strncasecmp(&mut self, s1: *const c_char, s2: *const c_char, n: usize) -> i32 { + extern "C" { + fn strncasecmp(s1: *const c_char, s2: *const c_char, n: usize) -> i32; + } + if !(self.shadow_check_func().unwrap())(s1 as *const c_void, n) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "strncasecmp".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s1 as usize, + n, + Backtrace::new(), + ))); + } + if !(self.shadow_check_func().unwrap())(s2 as *const c_void, n) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "strncasecmp".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s2 as usize, + n, + Backtrace::new(), + ))); + } + unsafe { strncasecmp(s1, s2, n) } + } + + #[inline] + pub fn hook_strcat(&mut self, s1: *mut c_char, s2: *const c_char) -> *mut c_char { + extern "C" { + fn strcat(s1: *mut c_char, s2: *const c_char) -> *mut c_char; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func().unwrap())(s1 as *const c_void, unsafe { strlen(s1) }) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "strcat".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s1 as usize, + unsafe { strlen(s1) }, + Backtrace::new(), + ))); + } + if !(self.shadow_check_func().unwrap())(s2 as *const c_void, unsafe { strlen(s2) }) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "strcat".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s2 as usize, + unsafe { strlen(s2) }, + Backtrace::new(), + ))); + } + unsafe { strcat(s1, s2) } + } + + #[inline] + pub fn hook_strcmp(&mut self, s1: *const c_char, s2: *const c_char) -> i32 { + extern "C" { + fn strcmp(s1: *const c_char, s2: *const c_char) -> i32; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func().unwrap())(s1 as *const c_void, unsafe { strlen(s1) }) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "strcmp".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s1 as usize, + unsafe { strlen(s1) }, + Backtrace::new(), + ))); + } + if !(self.shadow_check_func().unwrap())(s2 as *const c_void, unsafe { strlen(s2) }) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "strcmp".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s2 as usize, + unsafe { strlen(s2) }, + Backtrace::new(), + ))); + } + unsafe { strcmp(s1, s2) } + } + + #[inline] + pub fn hook_strncmp(&mut self, s1: *const c_char, s2: *const c_char, n: usize) -> i32 { + extern "C" { + fn strncmp(s1: *const c_char, s2: *const c_char, n: usize) -> i32; + } + if !(self.shadow_check_func().unwrap())(s1 as *const c_void, n) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "strncmp".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s1 as usize, + n, + Backtrace::new(), + ))); + } + if !(self.shadow_check_func().unwrap())(s2 as *const c_void, n) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "strncmp".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s2 as usize, + n, + Backtrace::new(), + ))); + } + unsafe { strncmp(s1, s2, n) } + } + + #[inline] + pub fn hook_strcpy(&mut self, dest: *mut c_char, src: *const c_char) -> *mut c_char { + extern "C" { + fn strcpy(dest: *mut c_char, src: *const c_char) -> *mut c_char; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func().unwrap())(dest as *const c_void, unsafe { strlen(src) }) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( + "strcpy".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + dest as usize, + unsafe { strlen(src) }, + Backtrace::new(), + ))); + } + if !(self.shadow_check_func().unwrap())(src as *const c_void, unsafe { strlen(src) }) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "strcpy".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + src as usize, + unsafe { strlen(src) }, + Backtrace::new(), + ))); + } + unsafe { strcpy(dest, src) } + } + + #[inline] + pub fn hook_strncpy(&mut self, dest: *mut c_char, src: *const c_char, n: usize) -> *mut c_char { + extern "C" { + fn strncpy(dest: *mut c_char, src: *const c_char, n: usize) -> *mut c_char; + } + if !(self.shadow_check_func().unwrap())(dest as *const c_void, n) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( + "strncpy".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + dest as usize, + n, + Backtrace::new(), + ))); + } + if !(self.shadow_check_func().unwrap())(src as *const c_void, n) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "strncpy".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + src as usize, + n, + Backtrace::new(), + ))); + } + unsafe { strncpy(dest, src, n) } + } + + #[inline] + pub fn hook_stpcpy(&mut self, dest: *mut c_char, src: *const c_char) -> *mut c_char { + extern "C" { + fn stpcpy(dest: *mut c_char, src: *const c_char) -> *mut c_char; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func().unwrap())(dest as *const c_void, unsafe { strlen(src) }) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( + "stpcpy".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + dest as usize, + unsafe { strlen(src) }, + Backtrace::new(), + ))); + } + if !(self.shadow_check_func().unwrap())(src as *const c_void, unsafe { strlen(src) }) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "stpcpy".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + src as usize, + unsafe { strlen(src) }, + Backtrace::new(), + ))); + } + unsafe { stpcpy(dest, src) } + } + + #[inline] + pub fn hook_strdup(&mut self, s: *const c_char) -> *mut c_char { + extern "C" { + fn strlen(s: *const c_char) -> usize; + fn strcpy(dest: *mut c_char, src: *const c_char) -> *mut c_char; + } + let size = unsafe { strlen(s) }; + if !(self.shadow_check_func().unwrap())(s as *const c_void, size) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "strdup".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s as usize, + unsafe { strlen(s) }, + Backtrace::new(), + ))); + } + + unsafe { + let ret = self.allocator_mut().alloc(size, 8) as *mut c_char; + strcpy(ret, s); + ret + } + } + + #[inline] + pub fn hook_strlen(&mut self, s: *const c_char) -> usize { + extern "C" { + fn strlen(s: *const c_char) -> usize; + } + let size = unsafe { strlen(s) }; + if !(self.shadow_check_func().unwrap())(s as *const c_void, size) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "strlen".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s as usize, + size, + Backtrace::new(), + ))); + } + size + } + + #[inline] + pub fn hook_strnlen(&mut self, s: *const c_char, n: usize) -> usize { + extern "C" { + fn strnlen(s: *const c_char, n: usize) -> usize; + } + let size = unsafe { strnlen(s, n) }; + if !(self.shadow_check_func().unwrap())(s as *const c_void, size) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "strnlen".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s as usize, + size, + Backtrace::new(), + ))); + } + size + } + + #[inline] + pub fn hook_strstr(&mut self, haystack: *const c_char, needle: *const c_char) -> *mut c_char { + extern "C" { + fn strstr(haystack: *const c_char, needle: *const c_char) -> *mut c_char; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func().unwrap())(haystack as *const c_void, unsafe { + strlen(haystack) + }) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "strstr".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + haystack as usize, + unsafe { strlen(haystack) }, + Backtrace::new(), + ))); + } + if !(self.shadow_check_func().unwrap())(needle as *const c_void, unsafe { strlen(needle) }) + { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "strstr".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + needle as usize, + unsafe { strlen(needle) }, + Backtrace::new(), + ))); + } + unsafe { strstr(haystack, needle) } + } + + #[inline] + pub fn hook_strcasestr( + &mut self, + haystack: *const c_char, + needle: *const c_char, + ) -> *mut c_char { + extern "C" { + fn strcasestr(haystack: *const c_char, needle: *const c_char) -> *mut c_char; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func().unwrap())(haystack as *const c_void, unsafe { + strlen(haystack) + }) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "strcasestr".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + haystack as usize, + unsafe { strlen(haystack) }, + Backtrace::new(), + ))); + } + if !(self.shadow_check_func().unwrap())(needle as *const c_void, unsafe { strlen(needle) }) + { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "strcasestr".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + needle as usize, + unsafe { strlen(needle) }, + Backtrace::new(), + ))); + } + unsafe { strcasestr(haystack, needle) } + } + + #[inline] + pub fn hook_atoi(&mut self, s: *const c_char) -> i32 { + extern "C" { + fn atoi(s: *const c_char) -> i32; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func().unwrap())(s as *const c_void, unsafe { strlen(s) }) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "atoi".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s as usize, + unsafe { strlen(s) }, + Backtrace::new(), + ))); + } + unsafe { atoi(s) } + } + + #[inline] + pub fn hook_atol(&mut self, s: *const c_char) -> i32 { + extern "C" { + fn atol(s: *const c_char) -> i32; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func().unwrap())(s as *const c_void, unsafe { strlen(s) }) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "atol".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s as usize, + unsafe { strlen(s) }, + Backtrace::new(), + ))); + } + unsafe { atol(s) } + } + + #[inline] + pub fn hook_atoll(&mut self, s: *const c_char) -> i64 { + extern "C" { + fn atoll(s: *const c_char) -> i64; + fn strlen(s: *const c_char) -> usize; + } + if !(self.shadow_check_func().unwrap())(s as *const c_void, unsafe { strlen(s) }) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "atoll".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s as usize, + unsafe { strlen(s) }, + Backtrace::new(), + ))); + } + unsafe { atoll(s) } + } + + #[inline] + pub fn hook_wcslen(&mut self, s: *const wchar_t) -> usize { + extern "C" { + fn wcslen(s: *const wchar_t) -> usize; + } + let size = unsafe { wcslen(s) }; + if !(self.shadow_check_func().unwrap())(s as *const c_void, (size + 1) * 2) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "wcslen".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s as usize, + (size + 1) * 2, + Backtrace::new(), + ))); + } + size + } + + #[inline] + pub fn hook_wcscpy(&mut self, dest: *mut wchar_t, src: *const wchar_t) -> *mut wchar_t { + extern "C" { + fn wcscpy(dest: *mut wchar_t, src: *const wchar_t) -> *mut wchar_t; + fn wcslen(s: *const wchar_t) -> usize; + } + if !(self.shadow_check_func().unwrap())(dest as *const c_void, unsafe { + (wcslen(src) + 1) * 2 + }) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgWrite(( + "wcscpy".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + dest as usize, + (unsafe { wcslen(src) } + 1) * 2, + Backtrace::new(), + ))); + } + if !(self.shadow_check_func().unwrap())(src as *const c_void, unsafe { + (wcslen(src) + 1) * 2 + }) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "wcscpy".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + src as usize, + (unsafe { wcslen(src) } + 1) * 2, + Backtrace::new(), + ))); + } + unsafe { wcscpy(dest, src) } + } + + #[inline] + pub fn hook_wcscmp(&mut self, s1: *const wchar_t, s2: *const wchar_t) -> i32 { + extern "C" { + fn wcscmp(s1: *const wchar_t, s2: *const wchar_t) -> i32; + fn wcslen(s: *const wchar_t) -> usize; + } + if !(self.shadow_check_func().unwrap())(s1 as *const c_void, unsafe { + (wcslen(s1) + 1) * 2 + }) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "wcscmp".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s1 as usize, + (unsafe { wcslen(s1) } + 1) * 2, + Backtrace::new(), + ))); + } + if !(self.shadow_check_func().unwrap())(s2 as *const c_void, unsafe { + (wcslen(s2) + 1) * 2 + }) { + AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( + "wcscmp".to_string(), + self.real_address_for_stalked(AsanRuntime::pc()), + s2 as usize, + (unsafe { wcslen(s2) } + 1) * 2, + Backtrace::new(), + ))); + } + unsafe { wcscmp(s1, s2) } + } +} diff --git a/libafl_frida/src/asan/mod.rs b/libafl_frida/src/asan/mod.rs new file mode 100644 index 0000000000..acf7989985 --- /dev/null +++ b/libafl_frida/src/asan/mod.rs @@ -0,0 +1,3 @@ +pub mod asan_rt; +pub mod errors; +pub mod hook_funcs; diff --git a/libafl_frida/src/cmplog_rt.rs b/libafl_frida/src/cmplog_rt.rs index bc0bee6d90..5a37774f85 100644 --- a/libafl_frida/src/cmplog_rt.rs +++ b/libafl_frida/src/cmplog_rt.rs @@ -7,12 +7,40 @@ extern "C" { pub fn __libafl_targets_cmplog_instructions(k: u64, shape: u8, arg1: u64, arg2: u64); } +#[cfg(target_arch = "aarch64")] +use frida_gum::{ + instruction_writer::{Aarch64Register, IndexMode, InstructionWriter}, + stalker::StalkerOutput, +}; + +#[cfg(all(feature = "cmplog", target_arch = "aarch64"))] +use crate::helper::FridaInstrumentationHelper; + +pub enum SpecialCmpLogCase { + Tbz, + Tbnz, +} + +#[cfg(target_arch = "aarch64")] +use capstone::{ + arch::{arm64::Arm64OperandType, ArchOperand::Arm64Operand}, + Capstone, Insn, +}; + pub struct CmpLogRuntime { ops_save_register_and_blr_to_populate: Option>, ops_handle_tbz_masking: Option>, ops_handle_tbnz_masking: Option>, } +#[cfg(all(feature = "cmplog", target_arch = "aarch64"))] +pub enum CmplogOperandType { + Regid(capstone::RegId), + Imm(u64), + Cimm(u64), + Mem(capstone::RegId, capstone::RegId, i32, u32), +} + impl CmpLogRuntime { #[must_use] pub fn new() -> CmpLogRuntime { @@ -175,6 +203,426 @@ impl CmpLogRuntime { pub fn ops_handle_tbnz_masking(&self) -> &[u8] { self.ops_handle_tbnz_masking.as_ref().unwrap() } + + #[cfg(all(feature = "cmplog", target_arch = "aarch64"))] + #[inline] + /// Emit the instrumentation code which is responsible for opernads value extraction and cmplog map population + pub fn emit_comparison_handling( + &self, + _address: u64, + output: &StalkerOutput, + op1: CmplogOperandType, + op2: CmplogOperandType, + special_case: Option, + ) { + let writer = output.writer(); + + // Preserve x0, x1: + writer.put_stp_reg_reg_reg_offset( + Aarch64Register::X0, + Aarch64Register::X1, + Aarch64Register::Sp, + -(16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i32) as i64, + IndexMode::PreAdjust, + ); + + // make sure operand1 value is saved into x0 + match op1 { + CmplogOperandType::Imm(value) | CmplogOperandType::Cimm(value) => { + writer.put_ldr_reg_u64(Aarch64Register::X0, value); + } + CmplogOperandType::Regid(reg) => { + let reg = FridaInstrumentationHelper::writer_register(reg); + match reg { + Aarch64Register::X0 | Aarch64Register::W0 => {} + Aarch64Register::X1 | Aarch64Register::W1 => { + writer.put_mov_reg_reg(Aarch64Register::X0, Aarch64Register::X1); + } + _ => { + if !writer.put_mov_reg_reg(Aarch64Register::X0, reg) { + writer.put_mov_reg_reg(Aarch64Register::W0, reg); + } + } + } + } + CmplogOperandType::Mem(basereg, indexreg, displacement, _width) => { + let basereg = FridaInstrumentationHelper::writer_register(basereg); + let indexreg = if indexreg.0 != 0 { + Some(FridaInstrumentationHelper::writer_register(indexreg)) + } else { + None + }; + + // calculate base+index+displacment into x0 + let displacement = displacement + + if basereg == Aarch64Register::Sp { + 16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i32 + } else { + 0 + }; + + if indexreg.is_some() { + if let Some(indexreg) = indexreg { + writer.put_add_reg_reg_reg(Aarch64Register::X0, basereg, indexreg); + } + } else { + match basereg { + Aarch64Register::X0 | Aarch64Register::W0 => {} + Aarch64Register::X1 | Aarch64Register::W1 => { + writer.put_mov_reg_reg(Aarch64Register::X0, Aarch64Register::X1); + } + _ => { + if !writer.put_mov_reg_reg(Aarch64Register::X0, basereg) { + writer.put_mov_reg_reg(Aarch64Register::W0, basereg); + } + } + } + } + + //add displacement + writer.put_add_reg_reg_imm( + Aarch64Register::X0, + Aarch64Register::X0, + displacement as u64, + ); + + //deref into x0 to get the real value + writer.put_ldr_reg_reg_offset(Aarch64Register::X0, Aarch64Register::X0, 0u64); + } + } + + // make sure operand2 value is saved into x1 + match op2 { + CmplogOperandType::Imm(value) | CmplogOperandType::Cimm(value) => { + writer.put_ldr_reg_u64(Aarch64Register::X1, value); + match special_case { + Some(inst) => match inst { + SpecialCmpLogCase::Tbz => { + writer.put_bytes(&self.ops_handle_tbz_masking()); + } + SpecialCmpLogCase::Tbnz => { + writer.put_bytes(&self.ops_handle_tbnz_masking()); + } + }, + None => (), + } + } + CmplogOperandType::Regid(reg) => { + let reg = FridaInstrumentationHelper::writer_register(reg); + match reg { + Aarch64Register::X1 | Aarch64Register::W1 => {} + Aarch64Register::X0 | Aarch64Register::W0 => { + writer.put_ldr_reg_reg_offset( + Aarch64Register::X1, + Aarch64Register::Sp, + 0u64, + ); + } + _ => { + if !writer.put_mov_reg_reg(Aarch64Register::X1, reg) { + writer.put_mov_reg_reg(Aarch64Register::W1, reg); + } + } + } + } + CmplogOperandType::Mem(basereg, indexreg, displacement, _width) => { + let basereg = FridaInstrumentationHelper::writer_register(basereg); + let indexreg = if indexreg.0 != 0 { + Some(FridaInstrumentationHelper::writer_register(indexreg)) + } else { + None + }; + + // calculate base+index+displacment into x1 + let displacement = displacement + + if basereg == Aarch64Register::Sp { + 16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i32 + } else { + 0 + }; + + if indexreg.is_some() { + if let Some(indexreg) = indexreg { + match indexreg { + Aarch64Register::X0 | Aarch64Register::W0 => { + match basereg { + Aarch64Register::X1 | Aarch64Register::W1 => { + // x0 is overwrittern indexreg by op1 value. + // x1 is basereg + + // Preserve x2, x3: + writer.put_stp_reg_reg_reg_offset( + Aarch64Register::X2, + Aarch64Register::X3, + Aarch64Register::Sp, + -(16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i32) as i64, + IndexMode::PreAdjust, + ); + + //reload indexreg to x2 + writer.put_ldr_reg_reg_offset( + Aarch64Register::X2, + Aarch64Register::Sp, + 0u64, + ); + //add them into basereg==x1 + writer.put_add_reg_reg_reg( + basereg, + basereg, + Aarch64Register::X2, + ); + + // Restore x2, x3 + assert!(writer.put_ldp_reg_reg_reg_offset( + Aarch64Register::X2, + Aarch64Register::X3, + Aarch64Register::Sp, + 16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i64, + IndexMode::PostAdjust, + )); + } + _ => { + // x0 is overwrittern indexreg by op1 value. + // basereg is not x1 nor x0 + + //reload indexreg to x1 + writer.put_ldr_reg_reg_offset( + Aarch64Register::X1, + Aarch64Register::Sp, + 0u64, + ); + //add basereg into indexreg==x1 + writer.put_add_reg_reg_reg( + Aarch64Register::X1, + basereg, + Aarch64Register::X1, + ); + } + } + } + Aarch64Register::X1 | Aarch64Register::W1 => { + match basereg { + Aarch64Register::X0 | Aarch64Register::W0 => { + // x0 is overwrittern basereg by op1 value. + // x1 is indexreg + + // Preserve x2, x3: + writer.put_stp_reg_reg_reg_offset( + Aarch64Register::X2, + Aarch64Register::X3, + Aarch64Register::Sp, + -(16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i32) as i64, + IndexMode::PreAdjust, + ); + + //reload basereg to x2 + writer.put_ldr_reg_reg_offset( + Aarch64Register::X2, + Aarch64Register::Sp, + 0u64, + ); + //add basereg into indexreg==x1 + writer.put_add_reg_reg_reg( + indexreg, + Aarch64Register::X2, + indexreg, + ); + + // Restore x2, x3 + assert!(writer.put_ldp_reg_reg_reg_offset( + Aarch64Register::X2, + Aarch64Register::X3, + Aarch64Register::Sp, + 16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i64, + IndexMode::PostAdjust, + )); + } + _ => { + // indexreg is x1 + // basereg is not x0 and not x1 + + //add them into x1 + writer.put_add_reg_reg_reg(indexreg, basereg, indexreg); + } + } + } + _ => { + match basereg { + Aarch64Register::X0 | Aarch64Register::W0 => { + //basereg is overwrittern by op1 value + //index reg is not x0 nor x1 + + //reload basereg to x1 + writer.put_ldr_reg_reg_offset( + Aarch64Register::X1, + Aarch64Register::Sp, + 0u64, + ); + //add indexreg to basereg==x1 + writer.put_add_reg_reg_reg( + Aarch64Register::X1, + Aarch64Register::X1, + indexreg, + ); + } + _ => { + //basereg is not x0, can be x1 + //index reg is not x0 nor x1 + + //add them into x1 + writer.put_add_reg_reg_reg( + Aarch64Register::X1, + basereg, + indexreg, + ); + } + } + } + } + } + } else { + match basereg { + Aarch64Register::X1 | Aarch64Register::W1 => {} + Aarch64Register::X0 | Aarch64Register::W0 => { + // x0 is overwrittern basereg by op1 value. + //reload basereg to x1 + writer.put_ldr_reg_reg_offset( + Aarch64Register::X1, + Aarch64Register::Sp, + 0u64, + ); + } + _ => { + writer.put_mov_reg_reg(Aarch64Register::W1, basereg); + } + } + } + + // add displacement + writer.put_add_reg_reg_imm( + Aarch64Register::X1, + Aarch64Register::X1, + displacement as u64, + ); + //deref into x1 to get the real value + writer.put_ldr_reg_reg_offset(Aarch64Register::X1, Aarch64Register::X1, 0u64); + } + } + + //call cmplog runtime to populate the values map + writer.put_bytes(&self.ops_save_register_and_blr_to_populate()); + + // Restore x0, x1 + assert!(writer.put_ldp_reg_reg_reg_offset( + Aarch64Register::X0, + Aarch64Register::X1, + Aarch64Register::Sp, + 16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i64, + IndexMode::PostAdjust, + )); + } + + #[cfg(all(feature = "cmplog", target_arch = "aarch64"))] + #[inline] + /// Check if the current instruction is cmplog relevant one(any opcode which sets the flags) + pub fn cmplog_is_interesting_instruction( + &self, + capstone: &Capstone, + _address: u64, + instr: &Insn, + ) -> Result< + ( + CmplogOperandType, + CmplogOperandType, + Option, + ), + (), + > { + // We only care for compare instrunctions - aka instructions which set the flags + match instr.mnemonic().unwrap() { + "cmp" | "ands" | "subs" | "adds" | "negs" | "ngcs" | "sbcs" | "bics" | "cbz" + | "cbnz" | "tbz" | "tbnz" | "adcs" => (), + _ => return Err(()), + } + let mut operands = capstone + .insn_detail(instr) + .unwrap() + .arch_detail() + .operands(); + + // cbz - 1 operand, tbz - 3 operands + let special_case = [ + "cbz", "cbnz", "tbz", "tbnz", "subs", "adds", "ands", "sbcs", "bics", "adcs", + ] + .contains(&instr.mnemonic().unwrap()); + if operands.len() != 2 && !special_case { + return Err(()); + } + + // handle special opcodes case which have 3 operands, but the 1st(dest) is not important to us + if ["subs", "adds", "ands", "sbcs", "bics", "adcs"].contains(&instr.mnemonic().unwrap()) { + //remove the dest operand from the list + operands.remove(0); + } + + // cbz marked as special since there is only 1 operand + let special_case = match instr.mnemonic().unwrap() { + "cbz" | "cbnz" => true, + _ => false, + }; + + let operand1 = if let Arm64Operand(arm64operand) = operands.first().unwrap() { + match arm64operand.op_type { + Arm64OperandType::Reg(regid) => Some(CmplogOperandType::Regid(regid)), + Arm64OperandType::Imm(val) => Some(CmplogOperandType::Imm(val as u64)), + Arm64OperandType::Mem(opmem) => Some(CmplogOperandType::Mem( + opmem.base(), + opmem.index(), + opmem.disp(), + FridaInstrumentationHelper::instruction_width(instr, &operands), + )), + Arm64OperandType::Cimm(val) => Some(CmplogOperandType::Cimm(val as u64)), + _ => return Err(()), + } + } else { + None + }; + + let operand2 = match special_case { + true => Some(CmplogOperandType::Imm(0)), + false => { + if let Arm64Operand(arm64operand2) = &operands[1] { + match arm64operand2.op_type { + Arm64OperandType::Reg(regid) => Some(CmplogOperandType::Regid(regid)), + Arm64OperandType::Imm(val) => Some(CmplogOperandType::Imm(val as u64)), + Arm64OperandType::Mem(opmem) => Some(CmplogOperandType::Mem( + opmem.base(), + opmem.index(), + opmem.disp(), + FridaInstrumentationHelper::instruction_width(instr, &operands), + )), + Arm64OperandType::Cimm(val) => Some(CmplogOperandType::Cimm(val as u64)), + _ => return Err(()), + } + } else { + None + } + } + }; + + // tbz will need to have special handling at emit time(masking operand1 value with operand2) + let special_case = match instr.mnemonic().unwrap() { + "tbz" => Some(SpecialCmpLogCase::Tbz), + "tbnz" => Some(SpecialCmpLogCase::Tbnz), + _ => None, + }; + + if operand1.is_some() && operand2.is_some() { + Ok((operand1.unwrap(), operand2.unwrap(), special_case)) + } else { + Err(()) + } + } } impl Default for CmpLogRuntime { diff --git a/libafl_frida/src/executor.rs b/libafl_frida/src/executor.rs index cdf601e80f..209432becb 100644 --- a/libafl_frida/src/executor.rs +++ b/libafl_frida/src/executor.rs @@ -18,7 +18,7 @@ use libafl::{ }; #[cfg(unix)] -use crate::asan_errors::ASAN_ERRORS; +use crate::asan::errors::ASAN_ERRORS; #[cfg(windows)] use libafl::executors::inprocess::{HasInProcessHandlers, InProcessHandlers}; @@ -116,7 +116,7 @@ where #[cfg(all(not(debug_assertions), target_arch = "x86_64"))] let mut stalker = Stalker::new(gum); - #[cfg(all(not(debug_assertions), target_arch = "x86_64"))] + #[cfg(not(all(debug_assertions, target_arch = "x86_64")))] for range in helper.ranges().gaps(&(0..usize::MAX)) { println!("excluding range: {:x}-{:x}", range.start, range.end); stalker.exclude(&MemoryRange::new( diff --git a/libafl_frida/src/helper.rs b/libafl_frida/src/helper.rs index fa4805fc19..bfa096162b 100644 --- a/libafl_frida/src/helper.rs +++ b/libafl_frida/src/helper.rs @@ -7,32 +7,27 @@ use libafl_targets::drcov::{DrCovBasicBlock, DrCovWriter}; #[cfg(target_arch = "aarch64")] use capstone::{ - arch::{ - self, - arm64::{Arm64Extender, Arm64OperandType, Arm64Shift}, - ArchOperand::Arm64Operand, - BuildsCapstone, - }, + arch::{self, arm64::Arm64OperandType, ArchOperand::Arm64Operand, BuildsCapstone}, Capstone, Insn, }; #[cfg(all(target_arch = "x86_64", unix))] use capstone::{ - arch::{self, x86::X86OperandType, ArchOperand::X86Operand, BuildsCapstone}, - Capstone, Insn, RegId, + arch::{self, BuildsCapstone}, + Capstone, RegId, }; #[cfg(target_arch = "aarch64")] use num_traits::cast::FromPrimitive; +#[cfg(target_arch = "aarch64")] +use frida_gum::instruction_writer::Aarch64Register; + #[cfg(target_arch = "x86_64")] use frida_gum::instruction_writer::X86Register; -#[cfg(target_arch = "aarch64")] -use frida_gum::instruction_writer::{Aarch64Register, IndexMode}; + use frida_gum::{ - instruction_writer::InstructionWriter, - stalker::{StalkerOutput, Transformer}, - ModuleDetails, ModuleMap, + instruction_writer::InstructionWriter, stalker::Transformer, ModuleDetails, ModuleMap, }; #[cfg(unix)] @@ -46,7 +41,7 @@ use rangemap::RangeMap; use nix::sys::mman::{mmap, MapFlags, ProtFlags}; #[cfg(unix)] -use crate::{asan_rt::AsanRuntime, FridaOptions}; +use crate::{asan::asan_rt::AsanRuntime, FridaOptions}; #[cfg(windows)] use crate::FridaOptions; @@ -56,20 +51,6 @@ use crate::coverage_rt::CoverageRuntime; #[cfg(feature = "cmplog")] use crate::cmplog_rt::CmpLogRuntime; -#[cfg(all(feature = "cmplog", target_arch = "aarch64"))] -enum CmplogOperandType { - Regid(capstone::RegId), - Imm(u64), - Cimm(u64), - Mem(capstone::RegId, capstone::RegId, i32, u32), -} - -#[cfg(all(feature = "cmplog", target_arch = "aarch64"))] -enum SpecialCmpLogCase { - Tbz, - Tbnz, -} - #[cfg(any(target_vendor = "apple"))] const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANON; #[cfg(not(any(target_vendor = "apple", target_os = "windows")))] @@ -102,8 +83,6 @@ pub trait FridaHelper<'a> { /// An helper that feeds `FridaInProcessExecutor` with edge-coverage instrumentation pub struct FridaInstrumentationHelper<'a> { coverage_rt: CoverageRuntime, - #[cfg(unix)] - current_report_impl: u64, /// Transformer that has to be passed to FridaInProcessExecutor transformer: Option>, #[cfg(unix)] @@ -240,8 +219,6 @@ impl<'a> FridaInstrumentationHelper<'a> { let mut helper = Self { coverage_rt: CoverageRuntime::new(), - #[cfg(unix)] - current_report_impl: 0, transformer: None, #[cfg(target_arch = "aarch64")] capstone: Capstone::new() @@ -333,19 +310,24 @@ impl<'a> FridaInstrumentationHelper<'a> { if helper.options().asan_enabled() { #[cfg(all(target_arch = "x86_64", unix))] - if let Ok((segment, width, basereg, indexreg, scale, disp)) = - helper.asan_is_interesting_instruction(address, instr) + if let Ok((segment, width, basereg, indexreg, scale, disp)) = helper + .asan_runtime + .asan_is_interesting_instruction(&helper.capstone, address, instr) { - helper.emit_shadow_check( + helper.asan_runtime.emit_shadow_check( address, &output, segment, width, basereg, indexreg, scale, disp, ); } #[cfg(target_arch = "aarch64")] if let Ok((basereg, indexreg, displacement, width, shift, extender)) = - helper.asan_is_interesting_instruction(address, instr) + helper.asan_runtime.asan_is_interesting_instruction( + &helper.capstone, + address, + instr, + ) { - helper.emit_shadow_check( + helper.asan_runtime.emit_shadow_check( address, &output, basereg, @@ -362,11 +344,12 @@ impl<'a> FridaInstrumentationHelper<'a> { todo!("Implement cmplog for non-aarch64 targets"); #[cfg(all(feature = "cmplog", target_arch = "aarch64"))] // check if this instruction is a compare instruction and if so save the registers values - if let Ok((op1, op2, special_case)) = - helper.cmplog_is_interesting_instruction(address, instr) + if let Ok((op1, op2, special_case)) = helper + .cmplog_runtime + .cmplog_is_interesting_instruction(&helper.capstone, address, instr) { //emit code that saves the relevant data in runtime(passes it to x0, x1) - helper.emit_comparison_handling( + helper.cmplog_runtime.emit_comparison_handling( address, &output, op1, @@ -405,748 +388,10 @@ impl<'a> FridaInstrumentationHelper<'a> { fn options(&self) -> &FridaOptions { self.options } - #[cfg(target_arch = "aarch64")] - #[inline] - fn writer_register(&self, reg: capstone::RegId) -> Aarch64Register { - let regint: u16 = reg.0; - Aarch64Register::from_u32(regint as u32).unwrap() - } - - // frida registers: https://docs.rs/frida-gum/0.4.0/frida_gum/instruction_writer/enum.X86Register.html - // capstone registers: https://docs.rs/capstone-sys/0.14.0/capstone_sys/x86_reg/index.html - #[cfg(all(target_arch = "x86_64", unix))] - #[must_use] - #[inline] - #[allow(clippy::unused_self)] - pub fn writer_register(&self, reg: RegId) -> X86Register { - let regint: u16 = reg.0; - match regint { - 19 => X86Register::Eax, - 22 => X86Register::Ecx, - 24 => X86Register::Edx, - 21 => X86Register::Ebx, - 30 => X86Register::Esp, - 20 => X86Register::Ebp, - 29 => X86Register::Esi, - 23 => X86Register::Edi, - 226 => X86Register::R8d, - 227 => X86Register::R9d, - 228 => X86Register::R10d, - 229 => X86Register::R11d, - 230 => X86Register::R12d, - 231 => X86Register::R13d, - 232 => X86Register::R14d, - 233 => X86Register::R15d, - 26 => X86Register::Eip, - 35 => X86Register::Rax, - 38 => X86Register::Rcx, - 40 => X86Register::Rdx, - 37 => X86Register::Rbx, - 44 => X86Register::Rsp, - 36 => X86Register::Rbp, - 43 => X86Register::Rsi, - 39 => X86Register::Rdi, - 106 => X86Register::R8, - 107 => X86Register::R9, - 108 => X86Register::R10, - 109 => X86Register::R11, - 110 => X86Register::R12, - 111 => X86Register::R13, - 112 => X86Register::R14, - 113 => X86Register::R15, - 41 => X86Register::Rip, - _ => X86Register::None, // Ignore Xax..Xip - } - } - - #[cfg(all(feature = "cmplog", target_arch = "aarch64"))] - #[inline] - /// Emit the instrumentation code which is responsible for opernads value extraction and cmplog map population - fn emit_comparison_handling( - &self, - _address: u64, - output: &StalkerOutput, - op1: CmplogOperandType, - op2: CmplogOperandType, - special_case: Option, - ) { - let writer = output.writer(); - - // Preserve x0, x1: - writer.put_stp_reg_reg_reg_offset( - Aarch64Register::X0, - Aarch64Register::X1, - Aarch64Register::Sp, - -(16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i32) as i64, - IndexMode::PreAdjust, - ); - - // make sure operand1 value is saved into x0 - match op1 { - CmplogOperandType::Imm(value) | CmplogOperandType::Cimm(value) => { - writer.put_ldr_reg_u64(Aarch64Register::X0, value); - } - CmplogOperandType::Regid(reg) => { - let reg = self.writer_register(reg); - match reg { - Aarch64Register::X0 | Aarch64Register::W0 => {} - Aarch64Register::X1 | Aarch64Register::W1 => { - writer.put_mov_reg_reg(Aarch64Register::X0, Aarch64Register::X1); - } - _ => { - if !writer.put_mov_reg_reg(Aarch64Register::X0, reg) { - writer.put_mov_reg_reg(Aarch64Register::W0, reg); - } - } - } - } - CmplogOperandType::Mem(basereg, indexreg, displacement, _width) => { - let basereg = self.writer_register(basereg); - let indexreg = if indexreg.0 != 0 { - Some(self.writer_register(indexreg)) - } else { - None - }; - - // calculate base+index+displacment into x0 - let displacement = displacement - + if basereg == Aarch64Register::Sp { - 16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i32 - } else { - 0 - }; - - if indexreg.is_some() { - if let Some(indexreg) = indexreg { - writer.put_add_reg_reg_reg(Aarch64Register::X0, basereg, indexreg); - } - } else { - match basereg { - Aarch64Register::X0 | Aarch64Register::W0 => {} - Aarch64Register::X1 | Aarch64Register::W1 => { - writer.put_mov_reg_reg(Aarch64Register::X0, Aarch64Register::X1); - } - _ => { - if !writer.put_mov_reg_reg(Aarch64Register::X0, basereg) { - writer.put_mov_reg_reg(Aarch64Register::W0, basereg); - } - } - } - } - - //add displacement - writer.put_add_reg_reg_imm( - Aarch64Register::X0, - Aarch64Register::X0, - displacement as u64, - ); - - //deref into x0 to get the real value - writer.put_ldr_reg_reg_offset(Aarch64Register::X0, Aarch64Register::X0, 0u64); - } - } - - // make sure operand2 value is saved into x1 - match op2 { - CmplogOperandType::Imm(value) | CmplogOperandType::Cimm(value) => { - writer.put_ldr_reg_u64(Aarch64Register::X1, value); - match special_case { - Some(inst) => match inst { - SpecialCmpLogCase::Tbz => { - writer.put_bytes(&self.cmplog_runtime.ops_handle_tbz_masking()); - } - SpecialCmpLogCase::Tbnz => { - writer.put_bytes(&self.cmplog_runtime.ops_handle_tbnz_masking()); - } - }, - None => (), - } - } - CmplogOperandType::Regid(reg) => { - let reg = self.writer_register(reg); - match reg { - Aarch64Register::X1 | Aarch64Register::W1 => {} - Aarch64Register::X0 | Aarch64Register::W0 => { - writer.put_ldr_reg_reg_offset( - Aarch64Register::X1, - Aarch64Register::Sp, - 0u64, - ); - } - _ => { - if !writer.put_mov_reg_reg(Aarch64Register::X1, reg) { - writer.put_mov_reg_reg(Aarch64Register::W1, reg); - } - } - } - } - CmplogOperandType::Mem(basereg, indexreg, displacement, _width) => { - let basereg = self.writer_register(basereg); - let indexreg = if indexreg.0 != 0 { - Some(self.writer_register(indexreg)) - } else { - None - }; - - // calculate base+index+displacment into x1 - let displacement = displacement - + if basereg == Aarch64Register::Sp { - 16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i32 - } else { - 0 - }; - - if indexreg.is_some() { - if let Some(indexreg) = indexreg { - match indexreg { - Aarch64Register::X0 | Aarch64Register::W0 => { - match basereg { - Aarch64Register::X1 | Aarch64Register::W1 => { - // x0 is overwrittern indexreg by op1 value. - // x1 is basereg - - // Preserve x2, x3: - writer.put_stp_reg_reg_reg_offset( - Aarch64Register::X2, - Aarch64Register::X3, - Aarch64Register::Sp, - -(16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i32) as i64, - IndexMode::PreAdjust, - ); - - //reload indexreg to x2 - writer.put_ldr_reg_reg_offset( - Aarch64Register::X2, - Aarch64Register::Sp, - 0u64, - ); - //add them into basereg==x1 - writer.put_add_reg_reg_reg( - basereg, - basereg, - Aarch64Register::X2, - ); - - // Restore x2, x3 - assert!(writer.put_ldp_reg_reg_reg_offset( - Aarch64Register::X2, - Aarch64Register::X3, - Aarch64Register::Sp, - 16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i64, - IndexMode::PostAdjust, - )); - } - _ => { - // x0 is overwrittern indexreg by op1 value. - // basereg is not x1 nor x0 - - //reload indexreg to x1 - writer.put_ldr_reg_reg_offset( - Aarch64Register::X1, - Aarch64Register::Sp, - 0u64, - ); - //add basereg into indexreg==x1 - writer.put_add_reg_reg_reg( - Aarch64Register::X1, - basereg, - Aarch64Register::X1, - ); - } - } - } - Aarch64Register::X1 | Aarch64Register::W1 => { - match basereg { - Aarch64Register::X0 | Aarch64Register::W0 => { - // x0 is overwrittern basereg by op1 value. - // x1 is indexreg - - // Preserve x2, x3: - writer.put_stp_reg_reg_reg_offset( - Aarch64Register::X2, - Aarch64Register::X3, - Aarch64Register::Sp, - -(16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i32) as i64, - IndexMode::PreAdjust, - ); - - //reload basereg to x2 - writer.put_ldr_reg_reg_offset( - Aarch64Register::X2, - Aarch64Register::Sp, - 0u64, - ); - //add basereg into indexreg==x1 - writer.put_add_reg_reg_reg( - indexreg, - Aarch64Register::X2, - indexreg, - ); - - // Restore x2, x3 - assert!(writer.put_ldp_reg_reg_reg_offset( - Aarch64Register::X2, - Aarch64Register::X3, - Aarch64Register::Sp, - 16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i64, - IndexMode::PostAdjust, - )); - } - _ => { - // indexreg is x1 - // basereg is not x0 and not x1 - - //add them into x1 - writer.put_add_reg_reg_reg(indexreg, basereg, indexreg); - } - } - } - _ => { - match basereg { - Aarch64Register::X0 | Aarch64Register::W0 => { - //basereg is overwrittern by op1 value - //index reg is not x0 nor x1 - - //reload basereg to x1 - writer.put_ldr_reg_reg_offset( - Aarch64Register::X1, - Aarch64Register::Sp, - 0u64, - ); - //add indexreg to basereg==x1 - writer.put_add_reg_reg_reg( - Aarch64Register::X1, - Aarch64Register::X1, - indexreg, - ); - } - _ => { - //basereg is not x0, can be x1 - //index reg is not x0 nor x1 - - //add them into x1 - writer.put_add_reg_reg_reg( - Aarch64Register::X1, - basereg, - indexreg, - ); - } - } - } - } - } - } else { - match basereg { - Aarch64Register::X1 | Aarch64Register::W1 => {} - Aarch64Register::X0 | Aarch64Register::W0 => { - // x0 is overwrittern basereg by op1 value. - //reload basereg to x1 - writer.put_ldr_reg_reg_offset( - Aarch64Register::X1, - Aarch64Register::Sp, - 0u64, - ); - } - _ => { - writer.put_mov_reg_reg(Aarch64Register::W1, basereg); - } - } - } - - // add displacement - writer.put_add_reg_reg_imm( - Aarch64Register::X1, - Aarch64Register::X1, - displacement as u64, - ); - //deref into x1 to get the real value - writer.put_ldr_reg_reg_offset(Aarch64Register::X1, Aarch64Register::X1, 0u64); - } - } - - //call cmplog runtime to populate the values map - writer.put_bytes(&self.cmplog_runtime.ops_save_register_and_blr_to_populate()); - - // Restore x0, x1 - assert!(writer.put_ldp_reg_reg_reg_offset( - Aarch64Register::X0, - Aarch64Register::X1, - Aarch64Register::Sp, - 16 + frida_gum_sys::GUM_RED_ZONE_SIZE as i64, - IndexMode::PostAdjust, - )); - } - - #[inline] - #[allow(clippy::too_many_lines)] - #[allow(clippy::too_many_arguments)] - #[cfg(all(target_arch = "x86_64", unix))] - pub fn emit_shadow_check( - &mut self, - address: u64, - output: &StalkerOutput, - _segment: RegId, - width: u8, - basereg: RegId, - indexreg: RegId, - scale: i32, - disp: i64, - ) { - let redzone_size = i64::from(frida_gum_sys::GUM_RED_ZONE_SIZE); - let writer = output.writer(); - let true_rip = address; - - let basereg = if basereg.0 == 0 { - None - } else { - let reg = self.writer_register(basereg); - Some(reg) - }; - - let indexreg = if indexreg.0 == 0 { - None - } else { - let reg = self.writer_register(indexreg); - Some(reg) - }; - - let scale = match scale { - 2 => 1, - 4 => 2, - 8 => 3, - _ => 0, - }; - if self.current_report_impl == 0 - || !writer.can_branch_directly_to(self.current_report_impl) - || !writer.can_branch_directly_between(writer.pc() + 128, self.current_report_impl) - { - let after_report_impl = writer.code_offset() + 2; - - #[cfg(target_arch = "x86_64")] - writer.put_jmp_near_label(after_report_impl); - #[cfg(target_arch = "aarch64")] - writer.put_b_label(after_report_impl); - - self.current_report_impl = writer.pc(); - #[cfg(unix)] - writer.put_bytes(self.asan_runtime.blob_report()); - - writer.put_label(after_report_impl); - } - - /* Save registers that we'll use later in shadow_check_blob - | addr | rip | - | Rcx | Rax | - | Rsi | Rdx | - Old Rsp - (redsone_size) -> | flags | Rdi | - | | | - Old Rsp -> | | | - */ - writer.put_lea_reg_reg_offset(X86Register::Rsp, X86Register::Rsp, -(redzone_size)); - writer.put_pushfx(); - writer.put_push_reg(X86Register::Rdi); - writer.put_push_reg(X86Register::Rsi); - writer.put_push_reg(X86Register::Rdx); - writer.put_push_reg(X86Register::Rcx); - writer.put_push_reg(X86Register::Rax); - - /* - Things are a bit different when Rip is either base register or index register. - Suppose we have an instruction like - `bnd jmp qword ptr [rip + 0x2e4b5]` - We can't just emit code like - `mov rdi, rip` to get RIP loaded into RDI, - because this RIP is NOT the orginal RIP (, which is usually within .text) anymore, rather it is pointing to the memory allocated by the frida stalker. - Please confer https://frida.re/docs/stalker/ for details. - */ - // Init Rdi - match basereg { - Some(reg) => match reg { - X86Register::Rip => { - writer.put_mov_reg_address(X86Register::Rdi, true_rip); - } - _ => { - writer.put_mov_reg_reg(X86Register::Rdi, basereg.unwrap()); - } - }, - None => { - writer.put_xor_reg_reg(X86Register::Rdi, X86Register::Rdi); - } - } - - match indexreg { - Some(reg) => match reg { - X86Register::Rip => { - writer.put_mov_reg_address(X86Register::Rsi, true_rip); - } - _ => { - writer.put_mov_reg_reg(X86Register::Rsi, indexreg.unwrap()); - } - }, - None => { - writer.put_xor_reg_reg(X86Register::Rsi, X86Register::Rsi); - } - } - - // Scale - if scale > 0 { - writer.put_shl_reg_u8(X86Register::Rsi, scale); - } - - // Finally set Rdi to base + index * scale + disp - writer.put_add_reg_reg(X86Register::Rdi, X86Register::Rsi); - writer.put_lea_reg_reg_offset(X86Register::Rdi, X86Register::Rdi, disp); - - writer.put_mov_reg_address(X86Register::Rsi, true_rip); // load true_rip into rsi in case we need them in handle_trap - writer.put_push_reg(X86Register::Rsi); // save true_rip - writer.put_push_reg(X86Register::Rdi); // save accessed_address - - #[cfg(unix)] - let checked: bool = match width { - 1 => writer.put_bytes(self.asan_runtime.blob_check_mem_byte()), - 2 => writer.put_bytes(self.asan_runtime.blob_check_mem_halfword()), - 4 => writer.put_bytes(self.asan_runtime.blob_check_mem_dword()), - 8 => writer.put_bytes(self.asan_runtime.blob_check_mem_qword()), - 16 => writer.put_bytes(self.asan_runtime.blob_check_mem_16bytes()), - _ => false, - }; - - if checked { - writer.put_jmp_address(self.current_report_impl); - for _ in 0..10 { - // shadow_check_blob's done will land somewhere in these nops - // on amd64 jump can takes 10 bytes at most, so that's why I put 10 bytes. - writer.put_nop(); - } - } - - writer.put_pop_reg(X86Register::Rdi); - writer.put_pop_reg(X86Register::Rsi); - - writer.put_pop_reg(X86Register::Rax); - writer.put_pop_reg(X86Register::Rcx); - writer.put_pop_reg(X86Register::Rdx); - writer.put_pop_reg(X86Register::Rsi); - writer.put_pop_reg(X86Register::Rdi); - writer.put_popfx(); - writer.put_lea_reg_reg_offset(X86Register::Rsp, X86Register::Rsp, redzone_size); - } #[cfg(target_arch = "aarch64")] #[inline] - fn emit_shadow_check( - &mut self, - _address: u64, - output: &StalkerOutput, - basereg: capstone::RegId, - indexreg: capstone::RegId, - displacement: i32, - width: u32, - shift: Arm64Shift, - extender: Arm64Extender, - ) { - let redzone_size = frida_gum_sys::GUM_RED_ZONE_SIZE as i32; - let writer = output.writer(); - - let basereg = self.writer_register(basereg); - let indexreg = if indexreg.0 != 0 { - Some(self.writer_register(indexreg)) - } else { - None - }; - - if self.current_report_impl == 0 - || !writer.can_branch_directly_to(self.current_report_impl) - || !writer.can_branch_directly_between(writer.pc() + 128, self.current_report_impl) - { - let after_report_impl = writer.code_offset() + 2; - - #[cfg(target_arch = "x86_64")] - writer.put_jmp_near_label(after_report_impl); - #[cfg(target_arch = "aarch64")] - writer.put_b_label(after_report_impl); - - self.current_report_impl = writer.pc(); - - #[cfg(unix)] - writer.put_bytes(self.asan_runtime.blob_report()); - - writer.put_label(after_report_impl); - } - //writer.put_brk_imm(1); - - // Preserve x0, x1: - writer.put_stp_reg_reg_reg_offset( - Aarch64Register::X0, - Aarch64Register::X1, - Aarch64Register::Sp, - -(16 + redzone_size) as i64, - IndexMode::PreAdjust, - ); - - // Make sure the base register is copied into x0 - match basereg { - Aarch64Register::X0 | Aarch64Register::W0 => {} - Aarch64Register::X1 | Aarch64Register::W1 => { - writer.put_mov_reg_reg(Aarch64Register::X0, Aarch64Register::X1); - } - _ => { - if !writer.put_mov_reg_reg(Aarch64Register::X0, basereg) { - writer.put_mov_reg_reg(Aarch64Register::W0, basereg); - } - } - } - - // Make sure the index register is copied into x1 - if indexreg.is_some() { - if let Some(indexreg) = indexreg { - match indexreg { - Aarch64Register::X0 | Aarch64Register::W0 => { - writer.put_ldr_reg_reg_offset( - Aarch64Register::X1, - Aarch64Register::Sp, - 0u64, - ); - } - Aarch64Register::X1 | Aarch64Register::W1 => {} - _ => { - if !writer.put_mov_reg_reg(Aarch64Register::X1, indexreg) { - writer.put_mov_reg_reg(Aarch64Register::W1, indexreg); - } - } - } - } - - if let (Arm64Extender::ARM64_EXT_INVALID, Arm64Shift::Invalid) = (extender, shift) { - writer.put_add_reg_reg_reg( - Aarch64Register::X0, - Aarch64Register::X0, - Aarch64Register::X1, - ); - } else { - let extender_encoding: i32 = match extender { - Arm64Extender::ARM64_EXT_UXTB => 0b000, - Arm64Extender::ARM64_EXT_UXTH => 0b001, - Arm64Extender::ARM64_EXT_UXTW => 0b010, - Arm64Extender::ARM64_EXT_UXTX => 0b011, - Arm64Extender::ARM64_EXT_SXTB => 0b100, - Arm64Extender::ARM64_EXT_SXTH => 0b101, - Arm64Extender::ARM64_EXT_SXTW => 0b110, - Arm64Extender::ARM64_EXT_SXTX => 0b111, - _ => -1, - }; - let (shift_encoding, shift_amount): (i32, u32) = match shift { - Arm64Shift::Lsl(amount) => (0b00, amount), - Arm64Shift::Lsr(amount) => (0b01, amount), - Arm64Shift::Asr(amount) => (0b10, amount), - _ => (-1, 0), - }; - - if extender_encoding != -1 && shift_amount < 0b1000 { - // emit add extended register: https://developer.arm.com/documentation/ddi0602/latest/Base-Instructions/ADD--extended-register---Add--extended-register-- - writer.put_bytes( - &(0x8b210000 | ((extender_encoding as u32) << 13) | (shift_amount << 10)) - .to_le_bytes(), - ); - } else if shift_encoding != -1 { - writer.put_bytes( - &(0x8b010000 | ((shift_encoding as u32) << 22) | (shift_amount << 10)) - .to_le_bytes(), - ); - } else { - panic!("extender: {:?}, shift: {:?}", extender, shift); - } - }; - } - - let displacement = displacement - + if basereg == Aarch64Register::Sp { - 16 + redzone_size - } else { - 0 - }; - - #[allow(clippy::comparison_chain)] - if displacement < 0 { - if displacement > -4096 { - // Subtract the displacement into x0 - writer.put_sub_reg_reg_imm( - Aarch64Register::X0, - Aarch64Register::X0, - displacement.abs() as u64, - ); - } else { - let displacement_hi = displacement.abs() / 4096; - let displacement_lo = displacement.abs() % 4096; - writer.put_bytes(&(0xd1400000u32 | ((displacement_hi as u32) << 10)).to_le_bytes()); - writer.put_sub_reg_reg_imm( - Aarch64Register::X0, - Aarch64Register::X0, - displacement_lo as u64, - ); - } - } else if displacement > 0 { - if displacement < 4096 { - // Add the displacement into x0 - writer.put_add_reg_reg_imm( - Aarch64Register::X0, - Aarch64Register::X0, - displacement as u64, - ); - } else { - let displacement_hi = displacement / 4096; - let displacement_lo = displacement % 4096; - writer.put_bytes(&(0x91400000u32 | ((displacement_hi as u32) << 10)).to_le_bytes()); - writer.put_add_reg_reg_imm( - Aarch64Register::X0, - Aarch64Register::X0, - displacement_lo as u64, - ); - } - } - // Insert the check_shadow_mem code blob - #[cfg(unix)] - match width { - 1 => writer.put_bytes(&self.asan_runtime.blob_check_mem_byte()), - 2 => writer.put_bytes(&self.asan_runtime.blob_check_mem_halfword()), - 3 => writer.put_bytes(&self.asan_runtime.blob_check_mem_3bytes()), - 4 => writer.put_bytes(&self.asan_runtime.blob_check_mem_dword()), - 6 => writer.put_bytes(&self.asan_runtime.blob_check_mem_6bytes()), - 8 => writer.put_bytes(&self.asan_runtime.blob_check_mem_qword()), - 12 => writer.put_bytes(&self.asan_runtime.blob_check_mem_12bytes()), - 16 => writer.put_bytes(&self.asan_runtime.blob_check_mem_16bytes()), - 24 => writer.put_bytes(&self.asan_runtime.blob_check_mem_24bytes()), - 32 => writer.put_bytes(&self.asan_runtime.blob_check_mem_32bytes()), - 48 => writer.put_bytes(&self.asan_runtime.blob_check_mem_48bytes()), - 64 => writer.put_bytes(&self.asan_runtime.blob_check_mem_64bytes()), - _ => false, - }; - - // Add the branch to report - //writer.put_brk_imm(0x12); - writer.put_branch_address(self.current_report_impl); - - match width { - 3 | 6 | 12 | 24 | 32 | 48 | 64 => { - let msr_nvcz_x0: u32 = 0xd51b4200; - writer.put_bytes(&msr_nvcz_x0.to_le_bytes()); - } - _ => (), - } - - // Restore x0, x1 - assert!(writer.put_ldp_reg_reg_reg_offset( - Aarch64Register::X0, - Aarch64Register::X1, - Aarch64Register::Sp, - 16 + redzone_size as i64, - IndexMode::PostAdjust, - )); - } - - #[cfg(target_arch = "aarch64")] - #[inline] - fn instruction_width(&self, instr: &Insn, operands: &Vec) -> u32 { + pub fn instruction_width(instr: &Insn, operands: &Vec) -> u32 { use capstone::arch::arm64::Arm64Insn as I; use capstone::arch::arm64::Arm64Reg as R; use capstone::arch::arm64::Arm64Vas as V; @@ -1212,217 +457,55 @@ impl<'a> FridaInstrumentationHelper<'a> { #[cfg(target_arch = "aarch64")] #[inline] - fn asan_is_interesting_instruction( - &self, - _address: u64, - instr: &Insn, - ) -> Result< - ( - capstone::RegId, - capstone::RegId, - i32, - u32, - Arm64Shift, - Arm64Extender, - ), - (), - > { - // We have to ignore these instructions. Simulating them with their side effects is - // complex, to say the least. - match instr.mnemonic().unwrap() { - "ldaxr" | "stlxr" | "ldxr" | "stxr" | "ldar" | "stlr" | "ldarb" | "ldarh" | "ldaxp" - | "ldaxrb" | "ldaxrh" | "stlrb" | "stlrh" | "stlxp" | "stlxrb" | "stlxrh" | "ldxrb" - | "ldxrh" | "stxrb" | "stxrh" => return Err(()), - _ => (), - } - - let operands = self - .capstone - .insn_detail(instr) - .unwrap() - .arch_detail() - .operands(); - if operands.len() < 2 { - return Err(()); - } - - if let Arm64Operand(arm64operand) = operands.last().unwrap() { - if let Arm64OperandType::Mem(opmem) = arm64operand.op_type { - return Ok(( - opmem.base(), - opmem.index(), - opmem.disp(), - self.instruction_width(instr, &operands), - arm64operand.shift, - arm64operand.ext, - )); - } - } - - Err(()) + pub fn writer_register(reg: capstone::RegId) -> Aarch64Register { + let regint: u16 = reg.0; + Aarch64Register::from_u32(regint as u32).unwrap() } + // frida registers: https://docs.rs/frida-gum/0.4.0/frida_gum/instruction_writer/enum.X86Register.html + // capstone registers: https://docs.rs/capstone-sys/0.14.0/capstone_sys/x86_reg/index.html #[cfg(all(target_arch = "x86_64", unix))] + #[must_use] #[inline] - fn asan_is_interesting_instruction( - &self, - _address: u64, - instr: &Insn, - ) -> Result<(RegId, u8, RegId, RegId, i32, i64), ()> { - let operands = self - .capstone - .insn_detail(instr) - .unwrap() - .arch_detail() - .operands(); - - // Ignore lea instruction - // put nop into the white-list so that instructions like - // like `nop dword [rax + rax]` does not get caught. - match instr.mnemonic().unwrap() { - "lea" | "nop" => return Err(()), - - _ => (), - } - - // This is a TODO! In this case, both the src and the dst are mem operand - // so we would need to return two operadns? - if instr.mnemonic().unwrap().starts_with("rep") { - return Err(()); - } - - for operand in operands { - if let X86Operand(x86operand) = operand { - if let X86OperandType::Mem(opmem) = x86operand.op_type { - /* - println!( - "insn: {:#?} {:#?} width: {}, segment: {:#?}, base: {:#?}, index: {:#?}, scale: {}, disp: {}", - insn_id, - instr, - x86operand.size, - opmem.segment(), - opmem.base(), - opmem.index(), - opmem.scale(), - opmem.disp(), - ); - */ - if opmem.segment() == RegId(0) { - return Ok(( - opmem.segment(), - x86operand.size, - opmem.base(), - opmem.index(), - opmem.scale(), - opmem.disp(), - )); - } - } - } - } - - Err(()) - } - - #[cfg(all(feature = "cmplog", target_arch = "aarch64"))] - #[inline] - /// Check if the current instruction is cmplog relevant one(any opcode which sets the flags) - fn cmplog_is_interesting_instruction( - &self, - _address: u64, - instr: &Insn, - ) -> Result< - ( - CmplogOperandType, - CmplogOperandType, - Option, - ), - (), - > { - // We only care for compare instrunctions - aka instructions which set the flags - match instr.mnemonic().unwrap() { - "cmp" | "ands" | "subs" | "adds" | "negs" | "ngcs" | "sbcs" | "bics" | "cbz" - | "cbnz" | "tbz" | "tbnz" | "adcs" => (), - _ => return Err(()), - } - let mut operands = self - .capstone - .insn_detail(instr) - .unwrap() - .arch_detail() - .operands(); - - // cbz - 1 operand, tbz - 3 operands - let special_case = [ - "cbz", "cbnz", "tbz", "tbnz", "subs", "adds", "ands", "sbcs", "bics", "adcs", - ] - .contains(&instr.mnemonic().unwrap()); - if operands.len() != 2 && !special_case { - return Err(()); - } - - // handle special opcodes case which have 3 operands, but the 1st(dest) is not important to us - if ["subs", "adds", "ands", "sbcs", "bics", "adcs"].contains(&instr.mnemonic().unwrap()) { - //remove the dest operand from the list - operands.remove(0); - } - - // cbz marked as special since there is only 1 operand - let special_case = match instr.mnemonic().unwrap() { - "cbz" | "cbnz" => true, - _ => false, - }; - - let operand1 = if let Arm64Operand(arm64operand) = operands.first().unwrap() { - match arm64operand.op_type { - Arm64OperandType::Reg(regid) => Some(CmplogOperandType::Regid(regid)), - Arm64OperandType::Imm(val) => Some(CmplogOperandType::Imm(val as u64)), - Arm64OperandType::Mem(opmem) => Some(CmplogOperandType::Mem( - opmem.base(), - opmem.index(), - opmem.disp(), - self.instruction_width(instr, &operands), - )), - Arm64OperandType::Cimm(val) => Some(CmplogOperandType::Cimm(val as u64)), - _ => return Err(()), - } - } else { - None - }; - - let operand2 = match special_case { - true => Some(CmplogOperandType::Imm(0)), - false => { - if let Arm64Operand(arm64operand2) = &operands[1] { - match arm64operand2.op_type { - Arm64OperandType::Reg(regid) => Some(CmplogOperandType::Regid(regid)), - Arm64OperandType::Imm(val) => Some(CmplogOperandType::Imm(val as u64)), - Arm64OperandType::Mem(opmem) => Some(CmplogOperandType::Mem( - opmem.base(), - opmem.index(), - opmem.disp(), - self.instruction_width(instr, &operands), - )), - Arm64OperandType::Cimm(val) => Some(CmplogOperandType::Cimm(val as u64)), - _ => return Err(()), - } - } else { - None - } - } - }; - - // tbz will need to have special handling at emit time(masking operand1 value with operand2) - let special_case = match instr.mnemonic().unwrap() { - "tbz" => Some(SpecialCmpLogCase::Tbz), - "tbnz" => Some(SpecialCmpLogCase::Tbnz), - _ => None, - }; - - if operand1.is_some() && operand2.is_some() { - Ok((operand1.unwrap(), operand2.unwrap(), special_case)) - } else { - Err(()) + #[allow(clippy::unused_self)] + pub fn writer_register(reg: RegId) -> X86Register { + let regint: u16 = reg.0; + match regint { + 19 => X86Register::Eax, + 22 => X86Register::Ecx, + 24 => X86Register::Edx, + 21 => X86Register::Ebx, + 30 => X86Register::Esp, + 20 => X86Register::Ebp, + 29 => X86Register::Esi, + 23 => X86Register::Edi, + 226 => X86Register::R8d, + 227 => X86Register::R9d, + 228 => X86Register::R10d, + 229 => X86Register::R11d, + 230 => X86Register::R12d, + 231 => X86Register::R13d, + 232 => X86Register::R14d, + 233 => X86Register::R15d, + 26 => X86Register::Eip, + 35 => X86Register::Rax, + 38 => X86Register::Rcx, + 40 => X86Register::Rdx, + 37 => X86Register::Rbx, + 44 => X86Register::Rsp, + 36 => X86Register::Rbp, + 43 => X86Register::Rsi, + 39 => X86Register::Rdi, + 106 => X86Register::R8, + 107 => X86Register::R9, + 108 => X86Register::R10, + 109 => X86Register::R11, + 110 => X86Register::R12, + 111 => X86Register::R13, + 112 => X86Register::R14, + 113 => X86Register::R15, + 41 => X86Register::Rip, + _ => X86Register::None, // Ignore Xax..Xip } } } diff --git a/libafl_frida/src/lib.rs b/libafl_frida/src/lib.rs index 6973e7f8c0..4c1d8f805c 100644 --- a/libafl_frida/src/lib.rs +++ b/libafl_frida/src/lib.rs @@ -6,12 +6,9 @@ It can report coverage and, on supported architecutres, even reports memory acce /// The frida-asan allocator #[cfg(unix)] pub mod alloc; -/// Handling of ASAN errors + #[cfg(unix)] -pub mod asan_errors; -/// The frida address sanitizer runtime -#[cfg(unix)] -pub mod asan_rt; +pub mod asan; pub mod coverage_rt;