MacOS frida ASAN fix (#2510)

* Add asan fix

* fmt

* ASAN linux fix

* Revert fmt

* Fix libafl_cc

* do fmt

* revert libaflcc

* clippy + fmt

* clippy

* change assert

* fix unpoison + other issues

* fmt

* format toml

* explore submaps

* fmt
This commit is contained in:
Sharad Khanna 2024-09-24 00:56:27 -04:00 committed by GitHub
parent 72893797b4
commit 629a560f3b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 120 additions and 77 deletions

View File

@ -104,6 +104,8 @@ document-features = { version = "0.2", optional = true } # Document all features
[target.'cfg(windows)'.dependencies] [target.'cfg(windows)'.dependencies]
winsafe = { version = "0.0.21", features = ["kernel"] } winsafe = { version = "0.0.21", features = ["kernel"] }
[target.'cfg(target_vendor="apple")'.dependencies]
mach-sys = { version = "0.5.4" }
[dev-dependencies] [dev-dependencies]
serial_test = { version = "3", default-features = false, features = [ serial_test = { version = "3", default-features = false, features = [

View File

@ -1,3 +1,5 @@
#[cfg(target_vendor = "apple")]
use std::ptr::addr_of_mut;
#[cfg(any( #[cfg(any(
windows, windows,
target_os = "linux", target_os = "linux",
@ -13,6 +15,16 @@ use backtrace::Backtrace;
use frida_gum::{PageProtection, RangeDetails}; use frida_gum::{PageProtection, RangeDetails};
use hashbrown::HashMap; use hashbrown::HashMap;
use libafl_bolts::cli::FuzzerOptions; use libafl_bolts::cli::FuzzerOptions;
#[cfg(target_vendor = "apple")]
use mach_sys::{
kern_return::KERN_SUCCESS,
message::mach_msg_type_number_t,
traps::mach_task_self,
vm::mach_vm_region_recurse,
vm_prot::VM_PROT_READ,
vm_region::{vm_region_recurse_info_t, vm_region_submap_info_64},
vm_types::{mach_vm_address_t, mach_vm_size_t, natural_t},
};
#[cfg(any( #[cfg(any(
windows, windows,
target_os = "linux", target_os = "linux",
@ -28,6 +40,9 @@ use serde::{Deserialize, Serialize};
use crate::asan::errors::{AsanError, AsanErrors}; use crate::asan::errors::{AsanError, AsanErrors};
#[cfg(target_vendor = "apple")]
const VM_REGION_SUBMAP_INFO_COUNT_64: mach_msg_type_number_t = 19;
/// An allocator wrapper with binary-only address sanitization /// An allocator wrapper with binary-only address sanitization
#[derive(Debug)] #[derive(Debug)]
pub struct Allocator { pub struct Allocator {
@ -236,7 +251,11 @@ impl Allocator {
let address = (metadata.address + self.page_size) as *mut c_void; let address = (metadata.address + self.page_size) as *mut c_void;
self.allocations.insert(address as usize, metadata); self.allocations.insert(address as usize, metadata);
log::trace!("serving address: {:?}, size: {:x}", address, size); log::trace!(
"serving address: {:#x}, size: {:#x}",
address as usize,
size
);
address address
} }
@ -478,14 +497,11 @@ impl Allocator {
//4. The aligned check is where the address and the size is 8 byte aligned. Use check_shadow_aligned to check it //4. The aligned check is where the address and the size is 8 byte aligned. Use check_shadow_aligned to check it
//5. The post-alignment is the same as pre-alignment except it is the qword following the aligned portion. Use a specialized check to ensure that [end & ~7, end) is valid. //5. The post-alignment is the same as pre-alignment except it is the qword following the aligned portion. Use a specialized check to ensure that [end & ~7, end) is valid.
if size == 0 if size == 0 {
/*|| !self.is_managed(address as *mut c_void)*/
{
return true; return true;
} }
if !self.is_managed(address as *mut c_void) { if !self.is_managed(address as *mut c_void) {
log::trace!("unmanaged address to check_shadow: {:?}, {size:x}", address);
return true; return true;
} }
@ -544,11 +560,11 @@ impl Allocator {
map_to_shadow!(self, start) map_to_shadow!(self, start)
} }
/// Checks if the currennt address is one of ours /// Checks if the current address is one of ours - is this address in the allocator region
#[inline] #[inline]
pub fn is_managed(&self, ptr: *mut c_void) -> bool { pub fn is_managed(&self, ptr: *mut c_void) -> bool {
//self.allocations.contains_key(&(ptr as usize)) //self.allocations.contains_key(&(ptr as usize))
self.shadow_offset <= ptr as usize && (ptr as usize) < self.current_mapping_addr self.base_mapping_addr <= ptr as usize && (ptr as usize) < self.current_mapping_addr
} }
/// Checks if any of the allocations has not been freed /// Checks if any of the allocations has not been freed
@ -562,14 +578,70 @@ impl Allocator {
} }
/// Unpoison all the memory that is currently mapped with read permissions. /// Unpoison all the memory that is currently mapped with read permissions.
#[cfg(target_vendor = "apple")]
pub fn unpoison_all_existing_memory(&mut self) {
let task = unsafe { mach_task_self() };
let mut address: mach_vm_address_t = 0;
let mut size: mach_vm_size_t = 0;
let mut depth: natural_t = 0;
loop {
let mut kr;
let mut info_count: mach_msg_type_number_t = VM_REGION_SUBMAP_INFO_COUNT_64;
let mut info = vm_region_submap_info_64::default();
loop {
kr = unsafe {
mach_vm_region_recurse(
task,
addr_of_mut!(address),
addr_of_mut!(size),
addr_of_mut!(depth),
addr_of_mut!(info) as vm_region_recurse_info_t,
addr_of_mut!(info_count),
)
};
if kr != KERN_SUCCESS {
break;
}
if info.is_submap != 0 {
depth += 1;
continue;
} else {
break;
}
}
if kr != KERN_SUCCESS {
break;
}
let start = address as usize;
let end = (address + size) as usize;
if info.protection & VM_PROT_READ == VM_PROT_READ {
//if its at least readable
if self.shadow_offset <= start && end <= self.current_mapping_addr {
log::trace!("Reached the shadow/allocator region - skipping");
} else {
log::trace!("Unpoisoning: {:#x}:{:#x}", address, address + size);
self.map_shadow_for_region(start, end, true);
}
}
address += size;
size = 0;
}
}
#[cfg(not(target_vendor = "apple"))]
pub fn unpoison_all_existing_memory(&mut self) { pub fn unpoison_all_existing_memory(&mut self) {
RangeDetails::enumerate_with_prot( RangeDetails::enumerate_with_prot(
PageProtection::Read, PageProtection::Read,
&mut |range: &RangeDetails| -> bool { &mut |range: &RangeDetails| -> bool {
let start = range.memory_range().base_address().0 as usize; let start = range.memory_range().base_address().0 as usize;
let end = start + range.memory_range().size(); let end = start + range.memory_range().size();
if self.is_managed(start as *mut c_void) { if self.shadow_offset <= start && end <= self.current_mapping_addr {
log::trace!("Not unpoisoning: {:#x}-{:#x}, is_managed", start, end); log::trace!("Reached the shadow/allocator region - skipping");
} else { } else {
log::trace!("Unpoisoning: {:#x}-{:#x}", start, end); log::trace!("Unpoisoning: {:#x}-{:#x}", start, end);
self.map_shadow_for_region(start, end, true); self.map_shadow_for_region(start, end, true);

View File

@ -60,7 +60,7 @@ extern "C" {
fn __register_frame(begin: *mut c_void); fn __register_frame(begin: *mut c_void);
} }
#[cfg(not(target_os = "ios"))] #[cfg(not(target_vendor = "apple"))]
extern "C" { extern "C" {
fn tls_ptr() -> *const c_void; fn tls_ptr() -> *const c_void;
} }
@ -186,7 +186,6 @@ impl FridaRuntime for AsanRuntime {
self.register_hooks(gum); self.register_hooks(gum);
self.generate_instrumentation_blobs(); self.generate_instrumentation_blobs();
self.unpoison_all_existing_memory(); self.unpoison_all_existing_memory();
self.register_thread(); self.register_thread();
} }
@ -320,7 +319,7 @@ impl AsanRuntime {
/// Register the current thread with the runtime, implementing shadow memory for its stack and /// Register the current thread with the runtime, implementing shadow memory for its stack and
/// tls mappings. /// tls mappings.
#[allow(clippy::unused_self)] #[allow(clippy::unused_self)]
#[cfg(not(target_os = "ios"))] #[cfg(not(target_vendor = "apple"))]
pub fn register_thread(&mut self) { pub fn register_thread(&mut self) {
let (stack_start, stack_end) = Self::current_stack(); let (stack_start, stack_end) = Self::current_stack();
let (tls_start, tls_end) = Self::current_tls(); let (tls_start, tls_end) = Self::current_tls();
@ -337,7 +336,7 @@ impl AsanRuntime {
/// Register the current thread with the runtime, implementing shadow memory for its stack mapping. /// Register the current thread with the runtime, implementing shadow memory for its stack mapping.
#[allow(clippy::unused_self)] #[allow(clippy::unused_self)]
#[cfg(target_os = "ios")] #[cfg(target_vendor = "apple")]
pub fn register_thread(&mut self) { pub fn register_thread(&mut self) {
let (stack_start, stack_end) = Self::current_stack(); let (stack_start, stack_end) = Self::current_stack();
self.allocator self.allocator
@ -379,13 +378,17 @@ impl AsanRuntime {
fn range_for_address(address: usize) -> (usize, usize) { fn range_for_address(address: usize) -> (usize, usize) {
let mut start = 0; let mut start = 0;
let mut end = 0; let mut end = 0;
RangeDetails::enumerate_with_prot(PageProtection::NoAccess, &mut |range: &RangeDetails| {
RangeDetails::enumerate_with_prot(PageProtection::Read, &mut |range: &RangeDetails| {
let range_start = range.memory_range().base_address().0 as usize; let range_start = range.memory_range().base_address().0 as usize;
let range_end = range_start + range.memory_range().size(); let range_end = range_start + range.memory_range().size();
if range_start <= address && range_end >= address { if range_start <= address && range_end >= address {
start = range_start; start = range_start;
end = range_end; end = range_end;
// I want to stop iteration here return false;
}
if address < start {
//if the address is less than the start then we cannot find it
return false; return false;
} }
true true
@ -410,51 +413,21 @@ impl AsanRuntime {
let stack_address = addr_of_mut!(stack_var) as usize; let stack_address = addr_of_mut!(stack_var) as usize;
// let range_details = RangeDetails::with_address(stack_address as u64).unwrap(); // let range_details = RangeDetails::with_address(stack_address as u64).unwrap();
// Write something to (hopefully) make sure the val isn't optimized out // Write something to (hopefully) make sure the val isn't optimized out
unsafe { unsafe {
write_volatile(&mut stack_var, 0xfadbeef); write_volatile(&mut stack_var, 0xfadbeef);
} }
let mut range = None;
for area in mmap_rs::MemoryAreas::open(None).unwrap() { let range = Self::range_for_address(stack_address);
let area_ref = area.as_ref().unwrap();
if area_ref.start() <= stack_address && stack_address <= area_ref.end() { assert_ne!(range.0, 0, "Couldn't find stack mapping!");
range = Some((area_ref.end() - 1024 * 1024, area_ref.end()));
break; (range.1 - 1024 * 1024, range.1)
}
}
if let Some((start, end)) = range {
// #[cfg(unix)]
// {
// let max_start = end - Self::max_stack_size();
//
// let flags = ANONYMOUS_FLAG | MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE;
// #[cfg(not(target_vendor = "apple"))]
// let flags = flags | MapFlags::MAP_STACK;
//
// if start != max_start {
// let mapping = unsafe {
// mmap(
// NonZeroUsize::new(max_start),
// NonZeroUsize::new(start - max_start).unwrap(),
// ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
// flags,
// -1,
// 0,
// )
// };
// assert!(mapping.unwrap() as usize == max_start);
// }
// (max_start, end)
// }
// #[cfg(windows)]
(start, end)
} else {
panic!("Couldn't find stack mapping!");
}
} }
/// Determine the tls start, end for the currently running thread /// Determine the tls start, end for the currently running thread
#[must_use] #[must_use]
#[cfg(not(target_os = "ios"))] #[cfg(not(target_vendor = "apple"))]
fn current_tls() -> (usize, usize) { fn current_tls() -> (usize, usize) {
let tls_address = unsafe { tls_ptr() } as usize; let tls_address = unsafe { tls_ptr() } as usize;
@ -514,18 +487,14 @@ impl AsanRuntime {
//is this necessary? The stalked return address will always be the real return address //is this necessary? The stalked return address will always be the real return address
// let real_address = this.real_address_for_stalked(invocation.return_addr()); // let real_address = this.real_address_for_stalked(invocation.return_addr());
let original = [<$name:snake:upper _PTR>].get().unwrap(); let original = [<$name:snake:upper _PTR>].get().unwrap();
if this.hooks_enabled {
let previous_hook_state = this.hooks_enabled; if !ASAN_IN_HOOK.get() && this.hooks_enabled {
this.hooks_enabled = false; ASAN_IN_HOOK.set(true);
let ret = this.[<hook_ $name>](*original, $($param),*); let ret = this.[<hook_ $name>](*original, $($param),*);
this.hooks_enabled = previous_hook_state; ASAN_IN_HOOK.set(false);
ret ret
} else { } else {
let previous_hook_state = this.hooks_enabled;
this.hooks_enabled = false;
let ret = (original)($($param),*); let ret = (original)($($param),*);
this.hooks_enabled = previous_hook_state;
ret ret
} }
} }
@ -600,8 +569,9 @@ impl AsanRuntime {
let mut invocation = Interceptor::current_invocation(); let mut invocation = Interceptor::current_invocation();
let this = &mut *(invocation.replacement_data().unwrap().0 as *mut AsanRuntime); let this = &mut *(invocation.replacement_data().unwrap().0 as *mut AsanRuntime);
let original = [<$name:snake:upper _PTR>].get().unwrap(); let original = [<$name:snake:upper _PTR>].get().unwrap();
//don't check if hooks are enabled as there are certain cases where we want to run the hook even if we are out of the program
if !ASAN_IN_HOOK.get() && this.hooks_enabled && this.[<hook_check_ $name>]($($param),*){ //For example, sometimes libafl will allocate certain things during the run and free them after the run. This results in a bug where a buffer will come from libafl-frida alloc and be freed in the normal allocator.
if !ASAN_IN_HOOK.get() && this.[<hook_check_ $name>]($($param),*){
ASAN_IN_HOOK.set(true); ASAN_IN_HOOK.set(true);
let ret = this.[<hook_ $name>](*original, $($param),*); let ret = this.[<hook_ $name>](*original, $($param),*);
ASAN_IN_HOOK.set(false); ASAN_IN_HOOK.set(false);
@ -639,8 +609,9 @@ impl AsanRuntime {
let mut invocation = Interceptor::current_invocation(); let mut invocation = Interceptor::current_invocation();
let this = &mut *(invocation.replacement_data().unwrap().0 as *mut AsanRuntime); let this = &mut *(invocation.replacement_data().unwrap().0 as *mut AsanRuntime);
let original = [<$lib_ident:snake:upper _ $name:snake:upper _PTR>].get().unwrap(); let original = [<$lib_ident:snake:upper _ $name:snake:upper _PTR>].get().unwrap();
//don't check if hooks are enabled as there are certain cases where we want to run the hook even if we are out of the program
if !ASAN_IN_HOOK.get() && this.hooks_enabled && this.[<hook_check_ $name>]($($param),*){ //For example, sometimes libafl will allocate certain things during the run and free them after the run. This results in a bug where a buffer will come from libafl-frida alloc and be freed in the normal allocator.
if !ASAN_IN_HOOK.get() && this.[<hook_check_ $name>]($($param),*){
ASAN_IN_HOOK.set(true); ASAN_IN_HOOK.set(true);
let ret = this.[<hook_ $name>](*original, $($param),*); let ret = this.[<hook_ $name>](*original, $($param),*);
ASAN_IN_HOOK.set(false); ASAN_IN_HOOK.set(false);
@ -1767,7 +1738,7 @@ impl AsanRuntime {
; add x1, x1, x0, lsr #3 ; add x1, x1, x0, lsr #3
; ubfx x1, x1, #0, #(shadow_bit + 1) ; ubfx x1, x1, #0, #(shadow_bit + 1)
; mov x2, #1 ; mov x2, #1
; add x1, x1, x2, lsl #shadow_bit ; add x1, x1, x2, lsl #shadow_bit //x1 contains the offset of the shadow byte
; ldr w1, [x1, #0] //w1 contains our shadow check ; ldr w1, [x1, #0] //w1 contains our shadow check
; and x0, x0, #7 //x0 is the offset for unaligned accesses ; and x0, x0, #7 //x0 is the offset for unaligned accesses
; rev32 x1, x1 ; rev32 x1, x1

View File

@ -4,7 +4,8 @@
#ifdef _MSC_VER #ifdef _MSC_VER
#include <windows.h> #include <windows.h>
#include <winnt.h>
#include <winternl.h>
BOOL APIENTRY DllMain(HANDLE hModule, DWORD ul_reason_for_call, BOOL APIENTRY DllMain(HANDLE hModule, DWORD ul_reason_for_call,
LPVOID lpReserved) { LPVOID lpReserved) {
(void)hModule; (void)hModule;
@ -37,9 +38,6 @@ EXTERN int heap_uaf_write(const uint8_t *_data, size_t _size) {
return 0; return 0;
} }
#include <winnt.h>
#include <winternl.h>
static volatile bool stop = false; static volatile bool stop = false;
EXTERN int heap_oob_read(const uint8_t *_data, size_t _size) { EXTERN int heap_oob_read(const uint8_t *_data, size_t _size) {