frida-asan: move to mmap-rs (#1570)

This commit is contained in:
s1341 2023-09-28 16:35:54 +03:00 committed by GitHub
parent 9c3f8f4511
commit 78fd4e0d39
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 97 additions and 111 deletions

View File

@ -72,6 +72,7 @@ num-traits = "0.2"
ahash = "0.8" ahash = "0.8"
paste = "1.0" paste = "1.0"
log = "0.4.20" log = "0.4.20"
mmap-rs = "0.6.0"
[dev-dependencies] [dev-dependencies]
serial_test = { version = "2", default-features = false, features = ["logging"] } serial_test = { version = "2", default-features = false, features = ["logging"] }

View File

@ -3,8 +3,7 @@
target_vendor = "apple", target_vendor = "apple",
all(target_arch = "aarch64", target_os = "android") all(target_arch = "aarch64", target_os = "android")
))] ))]
use std::io; use std::{collections::BTreeMap, ffi::c_void};
use std::{collections::BTreeMap, ffi::c_void, num::NonZeroUsize};
use backtrace::Backtrace; use backtrace::Backtrace;
use frida_gum::{PageProtection, RangeDetails}; use frida_gum::{PageProtection, RangeDetails};
@ -15,11 +14,8 @@ use libafl_bolts::cli::FuzzerOptions;
target_vendor = "apple", target_vendor = "apple",
all(target_arch = "aarch64", target_os = "android") all(target_arch = "aarch64", target_os = "android")
))] ))]
use libc::{sysconf, _SC_PAGESIZE}; use mmap_rs::{MemoryAreas, MmapFlags, MmapMut, MmapOptions, ReservedMut};
use nix::{ use nix::libc::memset;
libc::memset,
sys::mman::{mmap, MapFlags, ProtFlags},
};
use rangemap::RangeSet; use rangemap::RangeSet;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -38,10 +34,12 @@ pub struct Allocator {
shadow_offset: usize, shadow_offset: usize,
/// The shadow bit /// The shadow bit
shadow_bit: usize, shadow_bit: usize,
/// If the shadow is pre-allocated /// The reserved (pre-allocated) shadow mapping
pre_allocated_shadow: bool, pre_allocated_shadow_mappings: HashMap<(usize, usize), ReservedMut>,
/// All tracked allocations /// All tracked allocations
allocations: HashMap<usize, AllocationMetadata>, allocations: HashMap<usize, AllocationMetadata>,
/// All mappings
mappings: HashMap<usize, MmapMut>,
/// The shadow memory pages /// The shadow memory pages
shadow_pages: RangeSet<usize>, shadow_pages: RangeSet<usize>,
/// A list of allocations /// A list of allocations
@ -56,11 +54,6 @@ pub struct Allocator {
current_mapping_addr: usize, current_mapping_addr: usize,
} }
#[cfg(target_vendor = "apple")]
const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANON;
#[cfg(not(target_vendor = "apple"))]
const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANONYMOUS;
macro_rules! map_to_shadow { macro_rules! map_to_shadow {
($self:expr, $address:expr) => { ($self:expr, $address:expr) => {
$self.shadow_offset + (($address >> 3) & ((1 << ($self.shadow_bit + 1)) - 1)) $self.shadow_offset + (($address >> 3) & ((1 << ($self.shadow_bit + 1)) - 1))
@ -89,6 +82,7 @@ pub struct AllocationMetadata {
impl Allocator { impl Allocator {
/// Creates a new [`Allocator`] (not supported on this platform!) /// Creates a new [`Allocator`] (not supported on this platform!)
#[cfg(not(any( #[cfg(not(any(
windows,
target_os = "linux", target_os = "linux",
target_vendor = "apple", target_vendor = "apple",
all(target_arch = "aarch64", target_os = "android") all(target_arch = "aarch64", target_os = "android")
@ -100,6 +94,7 @@ impl Allocator {
/// Creates a new [`Allocator`] /// Creates a new [`Allocator`]
#[cfg(any( #[cfg(any(
windows,
target_os = "linux", target_os = "linux",
target_vendor = "apple", target_vendor = "apple",
all(target_arch = "aarch64", target_os = "android") all(target_arch = "aarch64", target_os = "android")
@ -181,29 +176,32 @@ impl Allocator {
metadata metadata
} else { } else {
// log::trace!("{:x}, {:x}", self.current_mapping_addr, rounded_up_size); // log::trace!("{:x}, {:x}", self.current_mapping_addr, rounded_up_size);
let mapping = match mmap( let mapping = match MmapOptions::new(rounded_up_size)
NonZeroUsize::new(self.current_mapping_addr), .unwrap()
NonZeroUsize::new_unchecked(rounded_up_size), .with_address(self.current_mapping_addr)
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, .map_mut()
ANONYMOUS_FLAG {
| MapFlags::MAP_PRIVATE Ok(mapping) => mapping,
| MapFlags::MAP_FIXED
| MapFlags::MAP_NORESERVE,
-1,
0,
) {
Ok(mapping) => mapping as usize,
Err(err) => { Err(err) => {
log::error!("An error occurred while mapping memory: {err:?}"); log::error!("An error occurred while mapping memory: {err:?}");
return std::ptr::null_mut(); return std::ptr::null_mut();
} }
}; };
self.current_mapping_addr += rounded_up_size; self.current_mapping_addr += ((rounded_up_size
+ MmapOptions::allocation_granularity())
/ MmapOptions::allocation_granularity())
* MmapOptions::allocation_granularity();
self.map_shadow_for_region(mapping, mapping + rounded_up_size, false); self.map_shadow_for_region(
mapping.as_ptr() as usize,
mapping.as_ptr().add(rounded_up_size) as usize,
false,
);
let address = mapping.as_ptr() as usize;
self.mappings.insert(address, mapping);
let mut metadata = AllocationMetadata { let mut metadata = AllocationMetadata {
address: mapping, address,
size, size,
actual_size: rounded_up_size, actual_size: rounded_up_size,
..AllocationMetadata::default() ..AllocationMetadata::default()
@ -223,8 +221,7 @@ impl Allocator {
); );
let address = (metadata.address + self.page_size) as *mut c_void; let address = (metadata.address + self.page_size) as *mut c_void;
self.allocations self.allocations.insert(address as usize, metadata);
.insert(metadata.address + self.page_size, metadata);
// log::trace!("serving address: {:?}, size: {:x}", address, size); // log::trace!("serving address: {:?}, size: {:x}", address, size);
address address
} }
@ -373,10 +370,9 @@ impl Allocator {
let shadow_mapping_start = map_to_shadow!(self, start); let shadow_mapping_start = map_to_shadow!(self, start);
if !self.pre_allocated_shadow {
let shadow_start = self.round_down_to_page(shadow_mapping_start); let shadow_start = self.round_down_to_page(shadow_mapping_start);
let shadow_end = let shadow_end = self.round_up_to_page((end - start) / 8) + self.page_size + shadow_start;
self.round_up_to_page((end - start) / 8) + self.page_size + shadow_start; if self.pre_allocated_shadow_mappings.is_empty() {
for range in self.shadow_pages.gaps(&(shadow_start..shadow_end)) { for range in self.shadow_pages.gaps(&(shadow_start..shadow_end)) {
/* /*
log::trace!( log::trace!(
@ -384,20 +380,40 @@ impl Allocator {
range.start, range.end, self.page_size range.start, range.end, self.page_size
); );
*/ */
unsafe { let mapping = MmapOptions::new(range.end - range.start - 1)
mmap( .unwrap()
NonZeroUsize::new(range.start), .with_address(range.start)
NonZeroUsize::new(range.end - range.start).unwrap(), .map_mut()
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
ANONYMOUS_FLAG | MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE,
-1,
0,
)
.expect("An error occurred while mapping shadow memory"); .expect("An error occurred while mapping shadow memory");
}
self.mappings.insert(range.start, mapping);
} }
self.shadow_pages.insert(shadow_start..shadow_end); self.shadow_pages.insert(shadow_start..shadow_end);
} else {
let mut new_shadow_mappings = Vec::new();
for range in self.shadow_pages.gaps(&(shadow_start..shadow_end)) {
for ((start, end), shadow_mapping) in &mut self.pre_allocated_shadow_mappings {
if *start <= range.start && range.start < *start + shadow_mapping.len() {
let mut start_mapping =
shadow_mapping.split_off(range.start - *start).unwrap();
let end_mapping = start_mapping
.split_off(range.end - (range.start - *start))
.unwrap();
new_shadow_mappings.push(((range.end, *end), end_mapping));
self.mappings
.insert(range.start, start_mapping.try_into().unwrap());
break;
}
}
}
for new_shadow_mapping in new_shadow_mappings {
self.pre_allocated_shadow_mappings
.insert(new_shadow_mapping.0, new_shadow_mapping.1);
self.shadow_pages
.insert(new_shadow_mapping.0 .0..new_shadow_mapping.0 .1);
}
} }
// log::trace!("shadow_mapping_start: {:x}, shadow_size: {:x}", shadow_mapping_start, (end - start) / 8); // log::trace!("shadow_mapping_start: {:x}, shadow_size: {:x}", shadow_mapping_start, (end - start) / 8);
@ -438,7 +454,7 @@ impl Allocator {
if range.protection() as u32 & PageProtection::ReadWrite as u32 != 0 { if range.protection() as u32 & PageProtection::ReadWrite as u32 != 0 {
let start = range.memory_range().base_address().0 as usize; let start = range.memory_range().base_address().0 as usize;
let end = start + range.memory_range().size(); let end = start + range.memory_range().size();
if self.pre_allocated_shadow && start == 1 << self.shadow_bit { if !self.pre_allocated_shadow_mappings.is_empty() && start == 1 << self.shadow_bit {
return true; return true;
} }
self.map_shadow_for_region(start, end, true); self.map_shadow_for_region(start, end, true);
@ -461,31 +477,28 @@ impl Allocator {
let mut userspace_max: usize = 0; let mut userspace_max: usize = 0;
// Enumerate memory ranges that are already occupied. // Enumerate memory ranges that are already occupied.
for prot in [ for area in MemoryAreas::open(None).unwrap() {
PageProtection::Read, let start = area.as_ref().unwrap().start();
PageProtection::Write, let end = area.unwrap().end();
PageProtection::Execute,
] {
RangeDetails::enumerate_with_prot(prot, &mut |details| {
let start = details.memory_range().base_address().0 as usize;
let end = start + details.memory_range().size();
occupied_ranges.push((start, end)); occupied_ranges.push((start, end));
log::trace!("{:x} {:x}", start, end); log::trace!("{:x} {:x}", start, end);
let base: usize = 2; let base: usize = 2;
// On x64, if end > 2**48, then that's in vsyscall or something. // On x64, if end > 2**48, then that's in vsyscall or something.
#[cfg(target_arch = "x86_64")] #[cfg(all(unix, target_arch = "x86_64"))]
if end <= base.pow(48) && end > userspace_max { if end <= base.pow(48) && end > userspace_max {
userspace_max = end; userspace_max = end;
} }
// On x64, if end > 2**52, then range is not in userspace #[cfg(all(not(unix), target_arch = "x86_64"))]
if (end >> 3) <= base.pow(44) && (end >> 3) > userspace_max {
userspace_max = end >> 3;
}
// On aarch64, if end > 2**52, then range is not in userspace
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
if end <= base.pow(52) && end > userspace_max { if end <= base.pow(52) && end > userspace_max {
userspace_max = end; userspace_max = end;
} }
true
});
} }
let mut maxbit = 0; let mut maxbit = 0;
@ -498,7 +511,7 @@ impl Allocator {
} }
{ {
for try_shadow_bit in &[maxbit - 4, maxbit - 3, maxbit - 2] { for try_shadow_bit in &[maxbit, maxbit - 4, maxbit - 3, maxbit - 2] {
let addr: usize = 1 << try_shadow_bit; let addr: usize = 1 << try_shadow_bit;
let shadow_start = addr; let shadow_start = addr;
let shadow_end = addr + addr + addr; let shadow_end = addr + addr + addr;
@ -512,48 +525,27 @@ impl Allocator {
} }
} }
if unsafe { if let Ok(mapping) = MmapOptions::new(1 << (*try_shadow_bit + 1))
mmap( .unwrap()
NonZeroUsize::new(addr), .with_flags(MmapFlags::NO_RESERVE)
NonZeroUsize::new_unchecked(self.page_size), .with_address(addr)
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, .reserve_mut()
MapFlags::MAP_PRIVATE
| ANONYMOUS_FLAG
| MapFlags::MAP_FIXED
| MapFlags::MAP_NORESERVE,
-1,
0,
)
}
.is_ok()
{ {
shadow_bit = (*try_shadow_bit).try_into().unwrap(); shadow_bit = (*try_shadow_bit).try_into().unwrap();
log::warn!("shadow_bit {shadow_bit:x} is suitable");
self.pre_allocated_shadow_mappings
.insert((addr, (addr + (1 << shadow_bit))), mapping);
break; break;
} }
} }
} }
log::warn!("shadow_bit {shadow_bit:x} is suitable");
// assert!(shadow_bit != 0); // assert!(shadow_bit != 0);
// attempt to pre-map the entire shadow-memory space // attempt to pre-map the entire shadow-memory space
let addr: usize = 1 << shadow_bit; let addr: usize = 1 << shadow_bit;
let pre_allocated_shadow = unsafe {
mmap(
NonZeroUsize::new(addr),
NonZeroUsize::new_unchecked(addr + addr),
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
ANONYMOUS_FLAG
| MapFlags::MAP_FIXED
| MapFlags::MAP_PRIVATE
| MapFlags::MAP_NORESERVE,
-1,
0,
)
}
.is_ok();
self.pre_allocated_shadow = pre_allocated_shadow;
self.shadow_offset = 1 << shadow_bit; self.shadow_offset = 1 << shadow_bit;
self.shadow_bit = shadow_bit; self.shadow_bit = shadow_bit;
self.base_mapping_addr = addr + addr + addr; self.base_mapping_addr = addr + addr + addr;
@ -564,6 +556,7 @@ impl Allocator {
impl Default for Allocator { impl Default for Allocator {
/// Creates a new [`Allocator`] (not supported on this platform!) /// Creates a new [`Allocator`] (not supported on this platform!)
#[cfg(not(any( #[cfg(not(any(
windows,
target_os = "linux", target_os = "linux",
target_vendor = "apple", target_vendor = "apple",
all(target_arch = "aarch64", target_os = "android") all(target_arch = "aarch64", target_os = "android")
@ -572,17 +565,8 @@ impl Default for Allocator {
todo!("Shadow region not yet supported for this platform!"); todo!("Shadow region not yet supported for this platform!");
} }
#[allow(clippy::too_many_lines)]
fn default() -> Self { fn default() -> Self {
let ret = unsafe { sysconf(_SC_PAGESIZE) }; let page_size = MmapOptions::page_size();
assert!(
ret >= 0,
"Failed to read pagesize {:?}",
io::Error::last_os_error()
);
#[allow(clippy::cast_sign_loss)]
let page_size = ret as usize;
Self { Self {
max_allocation: 1 << 30, max_allocation: 1 << 30,
@ -590,7 +574,8 @@ impl Default for Allocator {
max_total_allocation: 1 << 32, max_total_allocation: 1 << 32,
allocation_backtraces: false, allocation_backtraces: false,
page_size, page_size,
pre_allocated_shadow: false, pre_allocated_shadow_mappings: HashMap::new(),
mappings: HashMap::new(),
shadow_offset: 0, shadow_offset: 0,
shadow_bit: 0, shadow_bit: 0,
allocations: HashMap::new(), allocations: HashMap::new(),