frida-asan: move to mmap-rs (#1570)

This commit is contained in:
s1341 2023-09-28 16:35:54 +03:00 committed by GitHub
parent 9c3f8f4511
commit 78fd4e0d39
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 97 additions and 111 deletions

View File

@ -72,6 +72,7 @@ num-traits = "0.2"
ahash = "0.8"
paste = "1.0"
log = "0.4.20"
mmap-rs = "0.6.0"
[dev-dependencies]
serial_test = { version = "2", default-features = false, features = ["logging"] }

View File

@ -3,8 +3,7 @@
target_vendor = "apple",
all(target_arch = "aarch64", target_os = "android")
))]
use std::io;
use std::{collections::BTreeMap, ffi::c_void, num::NonZeroUsize};
use std::{collections::BTreeMap, ffi::c_void};
use backtrace::Backtrace;
use frida_gum::{PageProtection, RangeDetails};
@ -15,11 +14,8 @@ use libafl_bolts::cli::FuzzerOptions;
target_vendor = "apple",
all(target_arch = "aarch64", target_os = "android")
))]
use libc::{sysconf, _SC_PAGESIZE};
use nix::{
libc::memset,
sys::mman::{mmap, MapFlags, ProtFlags},
};
use mmap_rs::{MemoryAreas, MmapFlags, MmapMut, MmapOptions, ReservedMut};
use nix::libc::memset;
use rangemap::RangeSet;
use serde::{Deserialize, Serialize};
@ -38,10 +34,12 @@ pub struct Allocator {
shadow_offset: usize,
/// The shadow bit
shadow_bit: usize,
/// If the shadow is pre-allocated
pre_allocated_shadow: bool,
/// The reserved (pre-allocated) shadow mapping
pre_allocated_shadow_mappings: HashMap<(usize, usize), ReservedMut>,
/// All tracked allocations
allocations: HashMap<usize, AllocationMetadata>,
/// All mappings
mappings: HashMap<usize, MmapMut>,
/// The shadow memory pages
shadow_pages: RangeSet<usize>,
/// A list of allocations
@ -56,11 +54,6 @@ pub struct Allocator {
current_mapping_addr: usize,
}
#[cfg(target_vendor = "apple")]
const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANON;
#[cfg(not(target_vendor = "apple"))]
const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANONYMOUS;
macro_rules! map_to_shadow {
($self:expr, $address:expr) => {
$self.shadow_offset + (($address >> 3) & ((1 << ($self.shadow_bit + 1)) - 1))
@ -89,6 +82,7 @@ pub struct AllocationMetadata {
impl Allocator {
/// Creates a new [`Allocator`] (not supported on this platform!)
#[cfg(not(any(
windows,
target_os = "linux",
target_vendor = "apple",
all(target_arch = "aarch64", target_os = "android")
@ -100,6 +94,7 @@ impl Allocator {
/// Creates a new [`Allocator`]
#[cfg(any(
windows,
target_os = "linux",
target_vendor = "apple",
all(target_arch = "aarch64", target_os = "android")
@ -181,29 +176,32 @@ impl Allocator {
metadata
} else {
// log::trace!("{:x}, {:x}", self.current_mapping_addr, rounded_up_size);
let mapping = match mmap(
NonZeroUsize::new(self.current_mapping_addr),
NonZeroUsize::new_unchecked(rounded_up_size),
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
ANONYMOUS_FLAG
| MapFlags::MAP_PRIVATE
| MapFlags::MAP_FIXED
| MapFlags::MAP_NORESERVE,
-1,
0,
) {
Ok(mapping) => mapping as usize,
let mapping = match MmapOptions::new(rounded_up_size)
.unwrap()
.with_address(self.current_mapping_addr)
.map_mut()
{
Ok(mapping) => mapping,
Err(err) => {
log::error!("An error occurred while mapping memory: {err:?}");
return std::ptr::null_mut();
}
};
self.current_mapping_addr += rounded_up_size;
self.current_mapping_addr += ((rounded_up_size
+ MmapOptions::allocation_granularity())
/ MmapOptions::allocation_granularity())
* MmapOptions::allocation_granularity();
self.map_shadow_for_region(mapping, mapping + rounded_up_size, false);
self.map_shadow_for_region(
mapping.as_ptr() as usize,
mapping.as_ptr().add(rounded_up_size) as usize,
false,
);
let address = mapping.as_ptr() as usize;
self.mappings.insert(address, mapping);
let mut metadata = AllocationMetadata {
address: mapping,
address,
size,
actual_size: rounded_up_size,
..AllocationMetadata::default()
@ -223,8 +221,7 @@ impl Allocator {
);
let address = (metadata.address + self.page_size) as *mut c_void;
self.allocations
.insert(metadata.address + self.page_size, metadata);
self.allocations.insert(address as usize, metadata);
// log::trace!("serving address: {:?}, size: {:x}", address, size);
address
}
@ -373,10 +370,9 @@ impl Allocator {
let shadow_mapping_start = map_to_shadow!(self, start);
if !self.pre_allocated_shadow {
let shadow_start = self.round_down_to_page(shadow_mapping_start);
let shadow_end =
self.round_up_to_page((end - start) / 8) + self.page_size + shadow_start;
let shadow_end = self.round_up_to_page((end - start) / 8) + self.page_size + shadow_start;
if self.pre_allocated_shadow_mappings.is_empty() {
for range in self.shadow_pages.gaps(&(shadow_start..shadow_end)) {
/*
log::trace!(
@ -384,20 +380,40 @@ impl Allocator {
range.start, range.end, self.page_size
);
*/
unsafe {
mmap(
NonZeroUsize::new(range.start),
NonZeroUsize::new(range.end - range.start).unwrap(),
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
ANONYMOUS_FLAG | MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE,
-1,
0,
)
let mapping = MmapOptions::new(range.end - range.start - 1)
.unwrap()
.with_address(range.start)
.map_mut()
.expect("An error occurred while mapping shadow memory");
}
self.mappings.insert(range.start, mapping);
}
self.shadow_pages.insert(shadow_start..shadow_end);
} else {
let mut new_shadow_mappings = Vec::new();
for range in self.shadow_pages.gaps(&(shadow_start..shadow_end)) {
for ((start, end), shadow_mapping) in &mut self.pre_allocated_shadow_mappings {
if *start <= range.start && range.start < *start + shadow_mapping.len() {
let mut start_mapping =
shadow_mapping.split_off(range.start - *start).unwrap();
let end_mapping = start_mapping
.split_off(range.end - (range.start - *start))
.unwrap();
new_shadow_mappings.push(((range.end, *end), end_mapping));
self.mappings
.insert(range.start, start_mapping.try_into().unwrap());
break;
}
}
}
for new_shadow_mapping in new_shadow_mappings {
self.pre_allocated_shadow_mappings
.insert(new_shadow_mapping.0, new_shadow_mapping.1);
self.shadow_pages
.insert(new_shadow_mapping.0 .0..new_shadow_mapping.0 .1);
}
}
// log::trace!("shadow_mapping_start: {:x}, shadow_size: {:x}", shadow_mapping_start, (end - start) / 8);
@ -438,7 +454,7 @@ impl Allocator {
if range.protection() as u32 & PageProtection::ReadWrite as u32 != 0 {
let start = range.memory_range().base_address().0 as usize;
let end = start + range.memory_range().size();
if self.pre_allocated_shadow && start == 1 << self.shadow_bit {
if !self.pre_allocated_shadow_mappings.is_empty() && start == 1 << self.shadow_bit {
return true;
}
self.map_shadow_for_region(start, end, true);
@ -461,31 +477,28 @@ impl Allocator {
let mut userspace_max: usize = 0;
// Enumerate memory ranges that are already occupied.
for prot in [
PageProtection::Read,
PageProtection::Write,
PageProtection::Execute,
] {
RangeDetails::enumerate_with_prot(prot, &mut |details| {
let start = details.memory_range().base_address().0 as usize;
let end = start + details.memory_range().size();
for area in MemoryAreas::open(None).unwrap() {
let start = area.as_ref().unwrap().start();
let end = area.unwrap().end();
occupied_ranges.push((start, end));
log::trace!("{:x} {:x}", start, end);
let base: usize = 2;
// On x64, if end > 2**48, then that's in vsyscall or something.
#[cfg(target_arch = "x86_64")]
#[cfg(all(unix, target_arch = "x86_64"))]
if end <= base.pow(48) && end > userspace_max {
userspace_max = end;
}
// On x64, if end > 2**52, then range is not in userspace
#[cfg(all(not(unix), target_arch = "x86_64"))]
if (end >> 3) <= base.pow(44) && (end >> 3) > userspace_max {
userspace_max = end >> 3;
}
// On aarch64, if end > 2**52, then range is not in userspace
#[cfg(target_arch = "aarch64")]
if end <= base.pow(52) && end > userspace_max {
userspace_max = end;
}
true
});
}
let mut maxbit = 0;
@ -498,7 +511,7 @@ impl Allocator {
}
{
for try_shadow_bit in &[maxbit - 4, maxbit - 3, maxbit - 2] {
for try_shadow_bit in &[maxbit, maxbit - 4, maxbit - 3, maxbit - 2] {
let addr: usize = 1 << try_shadow_bit;
let shadow_start = addr;
let shadow_end = addr + addr + addr;
@ -512,48 +525,27 @@ impl Allocator {
}
}
if unsafe {
mmap(
NonZeroUsize::new(addr),
NonZeroUsize::new_unchecked(self.page_size),
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
MapFlags::MAP_PRIVATE
| ANONYMOUS_FLAG
| MapFlags::MAP_FIXED
| MapFlags::MAP_NORESERVE,
-1,
0,
)
}
.is_ok()
if let Ok(mapping) = MmapOptions::new(1 << (*try_shadow_bit + 1))
.unwrap()
.with_flags(MmapFlags::NO_RESERVE)
.with_address(addr)
.reserve_mut()
{
shadow_bit = (*try_shadow_bit).try_into().unwrap();
log::warn!("shadow_bit {shadow_bit:x} is suitable");
self.pre_allocated_shadow_mappings
.insert((addr, (addr + (1 << shadow_bit))), mapping);
break;
}
}
}
log::warn!("shadow_bit {shadow_bit:x} is suitable");
// assert!(shadow_bit != 0);
// attempt to pre-map the entire shadow-memory space
let addr: usize = 1 << shadow_bit;
let pre_allocated_shadow = unsafe {
mmap(
NonZeroUsize::new(addr),
NonZeroUsize::new_unchecked(addr + addr),
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
ANONYMOUS_FLAG
| MapFlags::MAP_FIXED
| MapFlags::MAP_PRIVATE
| MapFlags::MAP_NORESERVE,
-1,
0,
)
}
.is_ok();
self.pre_allocated_shadow = pre_allocated_shadow;
self.shadow_offset = 1 << shadow_bit;
self.shadow_bit = shadow_bit;
self.base_mapping_addr = addr + addr + addr;
@ -564,6 +556,7 @@ impl Allocator {
impl Default for Allocator {
/// Creates a new [`Allocator`] (not supported on this platform!)
#[cfg(not(any(
windows,
target_os = "linux",
target_vendor = "apple",
all(target_arch = "aarch64", target_os = "android")
@ -572,17 +565,8 @@ impl Default for Allocator {
todo!("Shadow region not yet supported for this platform!");
}
#[allow(clippy::too_many_lines)]
fn default() -> Self {
let ret = unsafe { sysconf(_SC_PAGESIZE) };
assert!(
ret >= 0,
"Failed to read pagesize {:?}",
io::Error::last_os_error()
);
#[allow(clippy::cast_sign_loss)]
let page_size = ret as usize;
let page_size = MmapOptions::page_size();
Self {
max_allocation: 1 << 30,
@ -590,7 +574,8 @@ impl Default for Allocator {
max_total_allocation: 1 << 32,
allocation_backtraces: false,
page_size,
pre_allocated_shadow: false,
pre_allocated_shadow_mappings: HashMap::new(),
mappings: HashMap::new(),
shadow_offset: 0,
shadow_bit: 0,
allocations: HashMap::new(),