libafl_frida: Add tests for ASan for Unix platforms (#1781)
* First draft of a Asan tests. As of now, unix-only. This is a WIP, as 1) destroying Gum causes segmentation fault and thus a single test is supported by using a static Gum object. Ideally, this should be fixed and a new Gum instance would be created for each test. 2) 70 identical errors are reported by Asan instead of a a single one. Apart from that, the draft fixes a number of errors found in Asan * Fmt fixes * PR comments addressed * Not crashing upon Asan errors while testing * More PR comments: removing env_logger, renaming harness to test_harness * Revert "More PR comments: removing env_logger, renaming harness to test_harness" This reverts commit 2d3494b3f56e0a5ef23566cb9a884e8c57867b57. * More PR comments: removing env_logger, renaming harness to test_harness * Checking for clang presence and failing the test if harness not found * Fmt * Running multiple Asan tests * Cpp Fmt * clang-format * More clippy complaints and Apple compilation * Last clippy complaints (ran scripts/clippy.sh) * Fixing unused MacOS function * Fixing unused MacOS imports
This commit is contained in:
parent
aaeeead574
commit
6a72f8a1ad
@ -84,3 +84,5 @@ yaxpeax-arch = "0.2.7"
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
serial_test = { version = "2", default-features = false, features = ["logging"] }
|
serial_test = { version = "2", default-features = false, features = ["logging"] }
|
||||||
|
clap = {version = "4.0", features = ["derive"]}
|
||||||
|
libloading = "0.7"
|
||||||
|
@ -9,4 +9,31 @@ fn main() {
|
|||||||
// Force linking against libc++
|
// Force linking against libc++
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
println!("cargo:rustc-link-lib=dylib=c++");
|
println!("cargo:rustc-link-lib=dylib=c++");
|
||||||
|
|
||||||
|
// Build the test harness
|
||||||
|
// clang++ -shared -fPIC -O0 -o test_harness.so test_harness.cpp
|
||||||
|
#[cfg(unix)]
|
||||||
|
{
|
||||||
|
// Check if we have clang++ installed
|
||||||
|
let clangpp = std::process::Command::new("clang++")
|
||||||
|
.arg("--version")
|
||||||
|
.output();
|
||||||
|
|
||||||
|
match clangpp {
|
||||||
|
Ok(_) => {
|
||||||
|
std::process::Command::new("clang++")
|
||||||
|
.arg("-shared")
|
||||||
|
.arg("-fPIC")
|
||||||
|
.arg("-O0")
|
||||||
|
.arg("-o")
|
||||||
|
.arg("test_harness.so")
|
||||||
|
.arg("test_harness.cpp")
|
||||||
|
.status()
|
||||||
|
.expect("Failed to build test harness");
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
println!("cargo:warning=clang++ not found, skipping test harness build");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -378,12 +378,20 @@ impl Allocator {
|
|||||||
end: usize,
|
end: usize,
|
||||||
unpoison: bool,
|
unpoison: bool,
|
||||||
) -> (usize, usize) {
|
) -> (usize, usize) {
|
||||||
// log::trace!("start: {:x}, end {:x}, size {:x}", start, end, end - start);
|
|
||||||
|
|
||||||
let shadow_mapping_start = map_to_shadow!(self, start);
|
let shadow_mapping_start = map_to_shadow!(self, start);
|
||||||
|
|
||||||
let shadow_start = self.round_down_to_page(shadow_mapping_start);
|
let shadow_start = self.round_down_to_page(shadow_mapping_start);
|
||||||
|
// I'm not sure this works as planned. The same address appearing as start and end is mapped to
|
||||||
|
// different addresses.
|
||||||
let shadow_end = self.round_up_to_page((end - start) / 8) + self.page_size + shadow_start;
|
let shadow_end = self.round_up_to_page((end - start) / 8) + self.page_size + shadow_start;
|
||||||
|
log::trace!(
|
||||||
|
"map_shadow_for_region start: {:x}, end {:x}, size {:x}, shadow {:x}-{:x}",
|
||||||
|
start,
|
||||||
|
end,
|
||||||
|
end - start,
|
||||||
|
shadow_start,
|
||||||
|
shadow_end
|
||||||
|
);
|
||||||
if self.pre_allocated_shadow_mappings.is_empty() {
|
if self.pre_allocated_shadow_mappings.is_empty() {
|
||||||
for range in self.shadow_pages.gaps(&(shadow_start..shadow_end)) {
|
for range in self.shadow_pages.gaps(&(shadow_start..shadow_end)) {
|
||||||
/*
|
/*
|
||||||
@ -401,28 +409,46 @@ impl Allocator {
|
|||||||
self.mappings.insert(range.start, mapping);
|
self.mappings.insert(range.start, mapping);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log::trace!("adding shadow pages {:x} - {:x}", shadow_start, shadow_end);
|
||||||
self.shadow_pages.insert(shadow_start..shadow_end);
|
self.shadow_pages.insert(shadow_start..shadow_end);
|
||||||
} else {
|
} else {
|
||||||
let mut new_shadow_mappings = Vec::new();
|
let mut new_shadow_mappings = Vec::new();
|
||||||
for range in self.shadow_pages.gaps(&(shadow_start..shadow_end)) {
|
for gap in self.shadow_pages.gaps(&(shadow_start..shadow_end)) {
|
||||||
for ((start, end), shadow_mapping) in &mut self.pre_allocated_shadow_mappings {
|
for ((pa_start, pa_end), shadow_mapping) in &mut self.pre_allocated_shadow_mappings
|
||||||
if *start <= range.start && range.start < *start + shadow_mapping.len() {
|
{
|
||||||
|
if *pa_start <= gap.start && gap.start < *pa_start + shadow_mapping.len() {
|
||||||
|
log::trace!("pa_start: {:x}, pa_end {:x}, gap.start {:x}, shadow_mapping.ptr {:x}, shadow_mapping.len {:x}",
|
||||||
|
*pa_start, *pa_end, gap.start, shadow_mapping.as_ptr() as usize, shadow_mapping.len());
|
||||||
|
|
||||||
|
// Split the preallocated mapping into two parts, keeping the
|
||||||
|
// part before the gap and returning the part starting with the gap as a new mapping
|
||||||
let mut start_mapping =
|
let mut start_mapping =
|
||||||
shadow_mapping.split_off(range.start - *start).unwrap();
|
shadow_mapping.split_off(gap.start - *pa_start).unwrap();
|
||||||
let end_mapping = start_mapping
|
|
||||||
.split_off(range.end - (range.start - *start))
|
// Split the new mapping into two parts,
|
||||||
.unwrap();
|
// keeping the part holding the gap and returning the part starting after the gap as a new mapping
|
||||||
new_shadow_mappings.push(((range.end, *end), end_mapping));
|
let end_mapping = start_mapping.split_off(gap.end - gap.start).unwrap();
|
||||||
|
|
||||||
|
//Push the new after-the-gap mapping to the list of mappings to be added
|
||||||
|
new_shadow_mappings.push(((gap.end, *pa_end), end_mapping));
|
||||||
|
|
||||||
|
// Insert the new gap mapping into the list of mappings
|
||||||
self.mappings
|
self.mappings
|
||||||
.insert(range.start, start_mapping.try_into().unwrap());
|
.insert(gap.start, start_mapping.try_into().unwrap());
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for new_shadow_mapping in new_shadow_mappings {
|
for new_shadow_mapping in new_shadow_mappings {
|
||||||
|
log::trace!(
|
||||||
|
"adding pre_allocated_shadow_mappings and shadow pages {:x} - {:x}",
|
||||||
|
new_shadow_mapping.0 .0,
|
||||||
|
new_shadow_mapping.0 .1
|
||||||
|
);
|
||||||
self.pre_allocated_shadow_mappings
|
self.pre_allocated_shadow_mappings
|
||||||
.insert(new_shadow_mapping.0, new_shadow_mapping.1);
|
.insert(new_shadow_mapping.0, new_shadow_mapping.1);
|
||||||
|
|
||||||
self.shadow_pages
|
self.shadow_pages
|
||||||
.insert(new_shadow_mapping.0 .0..new_shadow_mapping.0 .1);
|
.insert(new_shadow_mapping.0 .0..new_shadow_mapping.0 .1);
|
||||||
}
|
}
|
||||||
@ -493,7 +519,7 @@ impl Allocator {
|
|||||||
let start = area.as_ref().unwrap().start();
|
let start = area.as_ref().unwrap().start();
|
||||||
let end = area.unwrap().end();
|
let end = area.unwrap().end();
|
||||||
occupied_ranges.push((start, end));
|
occupied_ranges.push((start, end));
|
||||||
log::trace!("{:x} {:x}", start, end);
|
// log::trace!("Occupied {:x} {:x}", start, end);
|
||||||
let base: usize = 2;
|
let base: usize = 2;
|
||||||
// On x64, if end > 2**48, then that's in vsyscall or something.
|
// On x64, if end > 2**48, then that's in vsyscall or something.
|
||||||
#[cfg(all(unix, target_arch = "x86_64"))]
|
#[cfg(all(unix, target_arch = "x86_64"))]
|
||||||
@ -527,28 +553,56 @@ impl Allocator {
|
|||||||
let addr: usize = 1 << try_shadow_bit;
|
let addr: usize = 1 << try_shadow_bit;
|
||||||
let shadow_start = addr;
|
let shadow_start = addr;
|
||||||
let shadow_end = addr + addr + addr;
|
let shadow_end = addr + addr + addr;
|
||||||
|
let mut good_candidate = true;
|
||||||
// check if the proposed shadow bit overlaps with occupied ranges.
|
// check if the proposed shadow bit overlaps with occupied ranges.
|
||||||
for (start, end) in &occupied_ranges {
|
for (start, end) in &occupied_ranges {
|
||||||
|
// log::trace!("{:x} {:x}, {:x} {:x} -> {:x} - {:x}", shadow_start, shadow_end, start, end,
|
||||||
|
// shadow_start + ((start >> 3) & ((1 << (try_shadow_bit + 1)) - 1)),
|
||||||
|
// shadow_start + ((end >> 3) & ((1 << (try_shadow_bit + 1)) - 1))
|
||||||
|
// );
|
||||||
if (shadow_start <= *end) && (*start <= shadow_end) {
|
if (shadow_start <= *end) && (*start <= shadow_end) {
|
||||||
log::trace!("{:x} {:x}, {:x} {:x}", shadow_start, shadow_end, start, end);
|
log::trace!("{:x} {:x}, {:x} {:x}", shadow_start, shadow_end, start, end);
|
||||||
log::warn!("shadow_bit {try_shadow_bit:x} is not suitable");
|
log::warn!("shadow_bit {try_shadow_bit:x} is not suitable");
|
||||||
|
good_candidate = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
//check that the entire range's shadow is within the candidate shadow memory space
|
||||||
|
if (shadow_start + ((start >> 3) & ((1 << (try_shadow_bit + 1)) - 1))
|
||||||
|
> shadow_end)
|
||||||
|
|| (shadow_start + (((end >> 3) & ((1 << (try_shadow_bit + 1)) - 1)) + 1)
|
||||||
|
> shadow_end)
|
||||||
|
{
|
||||||
|
log::warn!(
|
||||||
|
"shadow_bit {try_shadow_bit:x} is not suitable (shadow out of range)"
|
||||||
|
);
|
||||||
|
good_candidate = false;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Ok(mapping) = MmapOptions::new(1 << (*try_shadow_bit + 1))
|
if good_candidate {
|
||||||
.unwrap()
|
// We reserve the shadow memory space of size addr*2, but don't commit it.
|
||||||
.with_flags(MmapFlags::NO_RESERVE)
|
if let Ok(mapping) = MmapOptions::new(1 << (*try_shadow_bit + 1))
|
||||||
.with_address(addr)
|
.unwrap()
|
||||||
.reserve_mut()
|
.with_flags(MmapFlags::NO_RESERVE)
|
||||||
{
|
.with_address(addr)
|
||||||
shadow_bit = (*try_shadow_bit).try_into().unwrap();
|
.reserve_mut()
|
||||||
|
{
|
||||||
|
shadow_bit = (*try_shadow_bit).try_into().unwrap();
|
||||||
|
|
||||||
log::warn!("shadow_bit {shadow_bit:x} is suitable");
|
log::warn!("shadow_bit {shadow_bit:x} is suitable");
|
||||||
self.pre_allocated_shadow_mappings
|
log::trace!(
|
||||||
.insert((addr, (addr + (1 << shadow_bit))), mapping);
|
"adding pre_allocated_shadow_mappings {:x} - {:x} with size {:}",
|
||||||
break;
|
addr,
|
||||||
|
(addr + (1 << (shadow_bit + 1))),
|
||||||
|
mapping.len()
|
||||||
|
);
|
||||||
|
|
||||||
|
self.pre_allocated_shadow_mappings
|
||||||
|
.insert((addr, (addr + (1 << (shadow_bit + 1)))), mapping);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
log::warn!("shadow_bit {try_shadow_bit:x} is not suitable - failed to allocate shadow memory");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,18 +20,18 @@ use frida_gum::instruction_writer::X86Register;
|
|||||||
use frida_gum::instruction_writer::{Aarch64Register, IndexMode};
|
use frida_gum::instruction_writer::{Aarch64Register, IndexMode};
|
||||||
use frida_gum::{
|
use frida_gum::{
|
||||||
instruction_writer::InstructionWriter, interceptor::Interceptor, stalker::StalkerOutput, Gum,
|
instruction_writer::InstructionWriter, interceptor::Interceptor, stalker::StalkerOutput, Gum,
|
||||||
Module, ModuleDetails, ModuleMap, NativePointer, RangeDetails,
|
Module, ModuleDetails, ModuleMap, NativePointer, PageProtection, RangeDetails,
|
||||||
};
|
};
|
||||||
use frida_gum_sys::Insn;
|
use frida_gum_sys::Insn;
|
||||||
use hashbrown::HashMap;
|
use hashbrown::HashMap;
|
||||||
use libafl_bolts::{cli::FuzzerOptions, AsSlice};
|
use libafl_bolts::{cli::FuzzerOptions, AsSlice};
|
||||||
#[cfg(unix)]
|
// #[cfg(target_vendor = "apple")]
|
||||||
use libc::RLIMIT_STACK;
|
// use libc::RLIMIT_STACK;
|
||||||
use libc::{c_char, wchar_t};
|
use libc::{c_char, wchar_t};
|
||||||
#[cfg(target_vendor = "apple")]
|
// #[cfg(target_vendor = "apple")]
|
||||||
use libc::{getrlimit, rlimit};
|
// use libc::{getrlimit, rlimit};
|
||||||
#[cfg(all(unix, not(target_vendor = "apple")))]
|
// #[cfg(all(unix, not(target_vendor = "apple")))]
|
||||||
use libc::{getrlimit64, rlimit64};
|
// use libc::{getrlimit64, rlimit64};
|
||||||
use nix::sys::mman::{mmap, MapFlags, ProtFlags};
|
use nix::sys::mman::{mmap, MapFlags, ProtFlags};
|
||||||
use rangemap::RangeMap;
|
use rangemap::RangeMap;
|
||||||
#[cfg(target_arch = "aarch64")]
|
#[cfg(target_arch = "aarch64")]
|
||||||
@ -63,10 +63,10 @@ extern "C" {
|
|||||||
fn tls_ptr() -> *const c_void;
|
fn tls_ptr() -> *const c_void;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(target_vendor = "apple")]
|
// #[cfg(target_vendor = "apple")]
|
||||||
const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANON;
|
// const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANON;
|
||||||
#[cfg(not(target_vendor = "apple"))]
|
// #[cfg(not(target_vendor = "apple"))]
|
||||||
const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANONYMOUS;
|
// const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANONYMOUS;
|
||||||
|
|
||||||
/// The count of registers that need to be saved by the asan runtime
|
/// The count of registers that need to be saved by the asan runtime
|
||||||
/// sixteen general purpose registers are put in this order, rax, rbx, rcx, rdx, rbp, rsp, rsi, rdi, r8-r15, plus instrumented rip, accessed memory addr and true rip
|
/// sixteen general purpose registers are put in this order, rax, rbx, rcx, rdx, rbp, rsp, rsi, rdi, r8-r15, plus instrumented rip, accessed memory addr and true rip
|
||||||
@ -403,29 +403,57 @@ impl AsanRuntime {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get the maximum stack size for the current stack
|
/// Get the maximum stack size for the current stack
|
||||||
#[must_use]
|
// #[must_use]
|
||||||
#[cfg(target_vendor = "apple")]
|
// #[cfg(target_vendor = "apple")]
|
||||||
fn max_stack_size() -> usize {
|
// fn max_stack_size() -> usize {
|
||||||
let mut stack_rlimit = rlimit {
|
// let mut stack_rlimit = rlimit {
|
||||||
rlim_cur: 0,
|
// rlim_cur: 0,
|
||||||
rlim_max: 0,
|
// rlim_max: 0,
|
||||||
};
|
// };
|
||||||
assert!(unsafe { getrlimit(RLIMIT_STACK, addr_of_mut!(stack_rlimit)) } == 0);
|
// assert!(unsafe { getrlimit(RLIMIT_STACK, addr_of_mut!(stack_rlimit)) } == 0);
|
||||||
|
|
||||||
stack_rlimit.rlim_cur as usize
|
// stack_rlimit.rlim_cur as usize
|
||||||
}
|
// }
|
||||||
|
|
||||||
/// Get the maximum stack size for the current stack
|
/// Get the maximum stack size for the current stack
|
||||||
#[must_use]
|
// #[must_use]
|
||||||
#[cfg(all(unix, not(target_vendor = "apple")))]
|
// #[cfg(all(unix, not(target_vendor = "apple")))]
|
||||||
fn max_stack_size() -> usize {
|
// fn max_stack_size() -> usize {
|
||||||
let mut stack_rlimit = rlimit64 {
|
// let mut stack_rlimit = rlimit64 {
|
||||||
rlim_cur: 0,
|
// rlim_cur: 0,
|
||||||
rlim_max: 0,
|
// rlim_max: 0,
|
||||||
};
|
// };
|
||||||
assert!(unsafe { getrlimit64(RLIMIT_STACK, addr_of_mut!(stack_rlimit)) } == 0);
|
// assert!(unsafe { getrlimit64(RLIMIT_STACK, addr_of_mut!(stack_rlimit)) } == 0);
|
||||||
|
|
||||||
stack_rlimit.rlim_cur as usize
|
// stack_rlimit.rlim_cur as usize
|
||||||
|
// }
|
||||||
|
|
||||||
|
/// Get the start and end of the memory region containing the given address
|
||||||
|
/// Uses `RangeDetails::enumerate_with_prot` as `RangeDetails::with_address` has
|
||||||
|
/// a [bug](https://github.com/frida/frida-rust/issues/120)
|
||||||
|
/// Returns (start, end)
|
||||||
|
fn range_for_address(address: usize) -> (usize, usize) {
|
||||||
|
let mut start = 0;
|
||||||
|
let mut end = 0;
|
||||||
|
RangeDetails::enumerate_with_prot(PageProtection::NoAccess, &mut |range: &RangeDetails| {
|
||||||
|
let range_start = range.memory_range().base_address().0 as usize;
|
||||||
|
let range_end = range_start + range.memory_range().size();
|
||||||
|
if range_start <= address && range_end >= address {
|
||||||
|
start = range_start;
|
||||||
|
end = range_end;
|
||||||
|
// I want to stop iteration here
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
true
|
||||||
|
});
|
||||||
|
|
||||||
|
if start == 0 {
|
||||||
|
log::error!(
|
||||||
|
"range_for_address: no range found for address {:#x}",
|
||||||
|
address
|
||||||
|
);
|
||||||
|
}
|
||||||
|
(start, end)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Determine the stack start, end for the currently running thread
|
/// Determine the stack start, end for the currently running thread
|
||||||
@ -436,35 +464,37 @@ impl AsanRuntime {
|
|||||||
pub fn current_stack() -> (usize, usize) {
|
pub fn current_stack() -> (usize, usize) {
|
||||||
let mut stack_var = 0xeadbeef;
|
let mut stack_var = 0xeadbeef;
|
||||||
let stack_address = addr_of_mut!(stack_var) as usize;
|
let stack_address = addr_of_mut!(stack_var) as usize;
|
||||||
let range_details = RangeDetails::with_address(stack_address as u64).unwrap();
|
// let range_details = RangeDetails::with_address(stack_address as u64).unwrap();
|
||||||
// Write something to (hopefully) make sure the val isn't optimized out
|
// Write something to (hopefully) make sure the val isn't optimized out
|
||||||
unsafe {
|
unsafe {
|
||||||
write_volatile(&mut stack_var, 0xfadbeef);
|
write_volatile(&mut stack_var, 0xfadbeef);
|
||||||
}
|
}
|
||||||
|
|
||||||
let start = range_details.memory_range().base_address().0 as usize;
|
// let start = range_details.memory_range().base_address().0 as usize;
|
||||||
let end = start + range_details.memory_range().size();
|
// let end = start + range_details.memory_range().size();
|
||||||
|
// (start, end)
|
||||||
|
Self::range_for_address(stack_address)
|
||||||
|
|
||||||
let max_start = end - Self::max_stack_size();
|
// let max_start = end - Self::max_stack_size();
|
||||||
|
|
||||||
let flags = ANONYMOUS_FLAG | MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE;
|
// let flags = ANONYMOUS_FLAG | MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE;
|
||||||
#[cfg(not(target_vendor = "apple"))]
|
// #[cfg(not(target_vendor = "apple"))]
|
||||||
let flags = flags | MapFlags::MAP_STACK;
|
// let flags = flags | MapFlags::MAP_STACK;
|
||||||
|
|
||||||
if start != max_start {
|
// if start != max_start {
|
||||||
let mapping = unsafe {
|
// let mapping = unsafe {
|
||||||
mmap(
|
// mmap(
|
||||||
NonZeroUsize::new(max_start),
|
// NonZeroUsize::new(max_start),
|
||||||
NonZeroUsize::new(start - max_start).unwrap(),
|
// NonZeroUsize::new(start - max_start).unwrap(),
|
||||||
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
|
// ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
|
||||||
flags,
|
// flags,
|
||||||
-1,
|
// -1,
|
||||||
0,
|
// 0,
|
||||||
)
|
// )
|
||||||
};
|
// };
|
||||||
assert!(mapping.unwrap() as usize == max_start);
|
// assert!(mapping.unwrap() as usize == max_start);
|
||||||
}
|
// }
|
||||||
(max_start, end)
|
// (max_start, end)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Determine the tls start, end for the currently running thread
|
/// Determine the tls start, end for the currently running thread
|
||||||
@ -477,10 +507,13 @@ impl AsanRuntime {
|
|||||||
// Strip off the top byte, as scudo allocates buffers with top-byte set to 0xb4
|
// Strip off the top byte, as scudo allocates buffers with top-byte set to 0xb4
|
||||||
let tls_address = tls_address & 0xffffffffffffff;
|
let tls_address = tls_address & 0xffffffffffffff;
|
||||||
|
|
||||||
let range_details = RangeDetails::with_address(tls_address as u64).unwrap();
|
// let range_details = RangeDetails::with_address(tls_address as u64).unwrap();
|
||||||
let start = range_details.memory_range().base_address().0 as usize;
|
// log::info!("tls address: {:#x}, range_details {:x} size {:x}", tls_address,
|
||||||
let end = start + range_details.memory_range().size();
|
// range_details.memory_range().base_address().0 as usize, range_details.memory_range().size());
|
||||||
(start, end)
|
// let start = range_details.memory_range().base_address().0 as usize;
|
||||||
|
// let end = start + range_details.memory_range().size();
|
||||||
|
// (start, end)
|
||||||
|
Self::range_for_address(tls_address)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the current instruction pointer
|
/// Gets the current instruction pointer
|
||||||
@ -509,6 +542,7 @@ impl AsanRuntime {
|
|||||||
macro_rules! hook_func {
|
macro_rules! hook_func {
|
||||||
($lib:expr, $name:ident, ($($param:ident : $param_type:ty),*), $return_type:ty) => {
|
($lib:expr, $name:ident, ($($param:ident : $param_type:ty),*), $return_type:ty) => {
|
||||||
paste::paste! {
|
paste::paste! {
|
||||||
|
log::trace!("Hooking {}", stringify!($name));
|
||||||
extern "C" {
|
extern "C" {
|
||||||
fn $name($($param: $param_type),*) -> $return_type;
|
fn $name($($param: $param_type),*) -> $return_type;
|
||||||
}
|
}
|
||||||
@ -574,6 +608,7 @@ impl AsanRuntime {
|
|||||||
hook_func!(None, malloc_usable_size, (ptr: *mut c_void), usize);
|
hook_func!(None, malloc_usable_size, (ptr: *mut c_void), usize);
|
||||||
|
|
||||||
for libname in ["libc++.so", "libc++.so.1", "libc++_shared.so"] {
|
for libname in ["libc++.so", "libc++.so.1", "libc++_shared.so"] {
|
||||||
|
log::info!("Hooking c++ functions in {}", libname);
|
||||||
for export in Module::enumerate_exports(libname) {
|
for export in Module::enumerate_exports(libname) {
|
||||||
match &export.name[..] {
|
match &export.name[..] {
|
||||||
"_Znam" => {
|
"_Znam" => {
|
||||||
@ -710,7 +745,7 @@ impl AsanRuntime {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
log::info!("Hooking libc functions");
|
||||||
hook_func!(
|
hook_func!(
|
||||||
None,
|
None,
|
||||||
mmap,
|
mmap,
|
||||||
@ -926,10 +961,14 @@ impl AsanRuntime {
|
|||||||
for operand_idx in 0..operand_count {
|
for operand_idx in 0..operand_count {
|
||||||
let operand = insn.operand(operand_idx);
|
let operand = insn.operand(operand_idx);
|
||||||
if operand.is_memory() {
|
if operand.is_memory() {
|
||||||
|
// The order is like in Intel, not AT&T
|
||||||
|
// So a memory read looks like
|
||||||
|
// mov edx,DWORD PTR [rax+0x14]
|
||||||
|
// not mov 0x14(%rax),%edx
|
||||||
access_type = if operand_idx == 0 {
|
access_type = if operand_idx == 0 {
|
||||||
Some(AccessType::Read)
|
|
||||||
} else {
|
|
||||||
Some(AccessType::Write)
|
Some(AccessType::Write)
|
||||||
|
} else {
|
||||||
|
Some(AccessType::Read)
|
||||||
};
|
};
|
||||||
if let Some((basereg, indexreg, _, disp)) = operand_details(&operand) {
|
if let Some((basereg, indexreg, _, disp)) = operand_details(&operand) {
|
||||||
regs = Some((basereg, indexreg, disp));
|
regs = Some((basereg, indexreg, disp));
|
||||||
@ -1053,6 +1092,10 @@ impl AsanRuntime {
|
|||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// log::info!("ASAN Error, attach the debugger!");
|
||||||
|
// // Sleep for 1 minute to give the user time to attach a debugger
|
||||||
|
// std::thread::sleep(std::time::Duration::from_secs(60));
|
||||||
|
|
||||||
// self.dump_registers();
|
// self.dump_registers();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1480,7 +1523,7 @@ impl AsanRuntime {
|
|||||||
unsafe {
|
unsafe {
|
||||||
let mapping = mmap(
|
let mapping = mmap(
|
||||||
None,
|
None,
|
||||||
std::num::NonZeroUsize::new_unchecked(0x1000),
|
NonZeroUsize::new_unchecked(0x1000),
|
||||||
ProtFlags::all(),
|
ProtFlags::all(),
|
||||||
MapFlags::MAP_ANON | MapFlags::MAP_PRIVATE,
|
MapFlags::MAP_ANON | MapFlags::MAP_PRIVATE,
|
||||||
-1,
|
-1,
|
||||||
|
@ -18,6 +18,7 @@ use libafl::{
|
|||||||
Error,
|
Error,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[cfg(not(test))]
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
use crate::asan::errors::ASAN_ERRORS;
|
use crate::asan::errors::ASAN_ERRORS;
|
||||||
use crate::helper::{FridaInstrumentationHelper, FridaRuntimeTuple};
|
use crate::helper::{FridaInstrumentationHelper, FridaRuntimeTuple};
|
||||||
@ -31,7 +32,7 @@ where
|
|||||||
S::Input: HasTargetBytes,
|
S::Input: HasTargetBytes,
|
||||||
S: State,
|
S: State,
|
||||||
OT: ObserversTuple<S>,
|
OT: ObserversTuple<S>,
|
||||||
'a: 'b,
|
'b: 'a,
|
||||||
{
|
{
|
||||||
base: InProcessExecutor<'a, H, OT, S>,
|
base: InProcessExecutor<'a, H, OT, S>,
|
||||||
// thread_id for the Stalker
|
// thread_id for the Stalker
|
||||||
@ -102,6 +103,8 @@ where
|
|||||||
if self.helper.stalker_enabled() {
|
if self.helper.stalker_enabled() {
|
||||||
self.stalker.deactivate();
|
self.stalker.deactivate();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(not(test))]
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
unsafe {
|
unsafe {
|
||||||
if ASAN_ERRORS.is_some() && !ASAN_ERRORS.as_ref().unwrap().is_empty() {
|
if ASAN_ERRORS.is_some() && !ASAN_ERRORS.as_ref().unwrap().is_empty() {
|
||||||
|
@ -344,3 +344,181 @@ impl Default for FridaOptions {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::sync::OnceLock;
|
||||||
|
|
||||||
|
use clap::Parser;
|
||||||
|
use frida_gum::Gum;
|
||||||
|
use libafl::{
|
||||||
|
corpus::{Corpus, InMemoryCorpus, Testcase},
|
||||||
|
events::NopEventManager,
|
||||||
|
executors::{ExitKind, InProcessExecutor},
|
||||||
|
feedback_and_fast, feedback_or_fast,
|
||||||
|
feedbacks::ConstFeedback,
|
||||||
|
inputs::{BytesInput, HasTargetBytes},
|
||||||
|
mutators::{mutations::BitFlipMutator, StdScheduledMutator},
|
||||||
|
schedulers::StdScheduler,
|
||||||
|
stages::StdMutationalStage,
|
||||||
|
state::{HasSolutions, StdState},
|
||||||
|
Fuzzer, StdFuzzer,
|
||||||
|
};
|
||||||
|
use libafl_bolts::{
|
||||||
|
cli::FuzzerOptions, rands::StdRand, tuples::tuple_list, AsSlice, SimpleStdoutLogger,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
asan::{
|
||||||
|
asan_rt::AsanRuntime,
|
||||||
|
errors::{AsanErrorsFeedback, AsanErrorsObserver, ASAN_ERRORS},
|
||||||
|
},
|
||||||
|
coverage_rt::CoverageRuntime,
|
||||||
|
executor::FridaInProcessExecutor,
|
||||||
|
helper::FridaInstrumentationHelper,
|
||||||
|
};
|
||||||
|
|
||||||
|
static GUM: OnceLock<Gum> = OnceLock::new();
|
||||||
|
|
||||||
|
unsafe fn test_asan(options: &FuzzerOptions) {
|
||||||
|
// The names of the functions to run
|
||||||
|
let tests = vec![
|
||||||
|
("LLVMFuzzerTestOneInput", 0),
|
||||||
|
("heap_oob_read", 1),
|
||||||
|
("heap_oob_write", 1),
|
||||||
|
("heap_uaf_write", 1),
|
||||||
|
("heap_uaf_read", 1),
|
||||||
|
("malloc_heap_oob_read", 1),
|
||||||
|
("malloc_heap_oob_write", 1),
|
||||||
|
("malloc_heap_uaf_write", 1),
|
||||||
|
("malloc_heap_uaf_read", 1),
|
||||||
|
];
|
||||||
|
|
||||||
|
let lib = libloading::Library::new(options.clone().harness.unwrap()).unwrap();
|
||||||
|
|
||||||
|
let coverage = CoverageRuntime::new();
|
||||||
|
let asan = AsanRuntime::new(options);
|
||||||
|
let mut frida_helper = FridaInstrumentationHelper::new(
|
||||||
|
GUM.get().expect("Gum uninitialized"),
|
||||||
|
options,
|
||||||
|
tuple_list!(coverage, asan),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Run the tests for each function
|
||||||
|
for test in tests {
|
||||||
|
let (function_name, err_cnt) = test;
|
||||||
|
log::info!("Testing with harness function {}", function_name);
|
||||||
|
|
||||||
|
let mut corpus = InMemoryCorpus::<BytesInput>::new();
|
||||||
|
|
||||||
|
//TODO - make sure we use the right one
|
||||||
|
let testcase = Testcase::new(vec![0; 4].into());
|
||||||
|
corpus.add(testcase).unwrap();
|
||||||
|
|
||||||
|
let rand = StdRand::with_seed(0);
|
||||||
|
|
||||||
|
let mut feedback = ConstFeedback::new(false);
|
||||||
|
|
||||||
|
// Feedbacks to recognize an input as solution
|
||||||
|
let mut objective = feedback_or_fast!(
|
||||||
|
// true enables the AsanErrorFeedback
|
||||||
|
feedback_and_fast!(ConstFeedback::from(true), AsanErrorsFeedback::new())
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut state = StdState::new(
|
||||||
|
rand,
|
||||||
|
corpus,
|
||||||
|
InMemoryCorpus::<BytesInput>::new(),
|
||||||
|
&mut feedback,
|
||||||
|
&mut objective,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut event_manager = NopEventManager::new();
|
||||||
|
|
||||||
|
let mut fuzzer = StdFuzzer::new(StdScheduler::new(), feedback, objective);
|
||||||
|
|
||||||
|
let observers = tuple_list!(
|
||||||
|
AsanErrorsObserver::new(&ASAN_ERRORS) //,
|
||||||
|
);
|
||||||
|
|
||||||
|
{
|
||||||
|
let target_func: libloading::Symbol<
|
||||||
|
unsafe extern "C" fn(data: *const u8, size: usize) -> i32,
|
||||||
|
> = lib.get(function_name.as_bytes()).unwrap();
|
||||||
|
|
||||||
|
let mut harness = |input: &BytesInput| {
|
||||||
|
let target = input.target_bytes();
|
||||||
|
let buf = target.as_slice();
|
||||||
|
(target_func)(buf.as_ptr(), buf.len());
|
||||||
|
ExitKind::Ok
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut executor = FridaInProcessExecutor::new(
|
||||||
|
GUM.get().expect("Gum uninitialized"),
|
||||||
|
InProcessExecutor::new(
|
||||||
|
&mut harness,
|
||||||
|
observers, // tuple_list!(),
|
||||||
|
&mut fuzzer,
|
||||||
|
&mut state,
|
||||||
|
&mut event_manager,
|
||||||
|
)
|
||||||
|
.unwrap(),
|
||||||
|
&mut frida_helper,
|
||||||
|
);
|
||||||
|
|
||||||
|
let mutator = StdScheduledMutator::new(tuple_list!(BitFlipMutator::new()));
|
||||||
|
let mut stages = tuple_list!(StdMutationalStage::with_max_iterations(mutator, 1));
|
||||||
|
|
||||||
|
// log::info!("Starting fuzzing!");
|
||||||
|
fuzzer
|
||||||
|
.fuzz_one(&mut stages, &mut executor, &mut state, &mut event_manager)
|
||||||
|
.unwrap_or_else(|_| panic!("Error in fuzz_one"));
|
||||||
|
|
||||||
|
log::info!("Done fuzzing! Got {} solutions", state.solutions().count());
|
||||||
|
}
|
||||||
|
assert_eq!(state.solutions().count(), err_cnt);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[cfg(unix)]
|
||||||
|
fn run_test_asan() {
|
||||||
|
// Read RUST_LOG from the environment and set the log level accordingly (not using env_logger)
|
||||||
|
// Note that in cargo test, the output of successfull tests is suppressed by default,
|
||||||
|
// both those sent to stdout and stderr. To see the output, run `cargo test -- --nocapture`.
|
||||||
|
if let Ok(value) = std::env::var("RUST_LOG") {
|
||||||
|
match value.as_str() {
|
||||||
|
"off" => log::set_max_level(log::LevelFilter::Off),
|
||||||
|
"error" => log::set_max_level(log::LevelFilter::Error),
|
||||||
|
"warn" => log::set_max_level(log::LevelFilter::Warn),
|
||||||
|
"info" => log::set_max_level(log::LevelFilter::Info),
|
||||||
|
"debug" => log::set_max_level(log::LevelFilter::Debug),
|
||||||
|
"trace" => log::set_max_level(log::LevelFilter::Trace),
|
||||||
|
_ => panic!("Unknown RUST_LOG level: {value}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
SimpleStdoutLogger::set_logger().unwrap();
|
||||||
|
|
||||||
|
// Check if the harness dynamic library is present, if not - skip the test
|
||||||
|
let test_harness = "test_harness.so";
|
||||||
|
assert!(
|
||||||
|
std::path::Path::new(test_harness).exists(),
|
||||||
|
"Skipping test, {test_harness} not found"
|
||||||
|
);
|
||||||
|
|
||||||
|
GUM.set(unsafe { Gum::obtain() })
|
||||||
|
.unwrap_or_else(|_| panic!("Failed to initialize Gum"));
|
||||||
|
let simulated_args = vec![
|
||||||
|
"libafl_frida_test",
|
||||||
|
"-A",
|
||||||
|
"--disable-excludes",
|
||||||
|
"--continue-on-error",
|
||||||
|
"-H",
|
||||||
|
test_harness,
|
||||||
|
];
|
||||||
|
let options: FuzzerOptions = FuzzerOptions::try_parse_from(simulated_args).unwrap();
|
||||||
|
unsafe { test_asan(&options) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
63
libafl_frida/test_harness.cpp
Normal file
63
libafl_frida/test_harness.cpp
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
#include <stdint.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
extern "C" int heap_uaf_read(const uint8_t *_data, size_t _size) {
|
||||||
|
int *array = new int[100];
|
||||||
|
delete[] array;
|
||||||
|
fprintf(stdout, "%d\n", array[5]);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C" int heap_uaf_write(const uint8_t *_data, size_t _size) {
|
||||||
|
int *array = new int[100];
|
||||||
|
delete[] array;
|
||||||
|
array[5] = 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C" int heap_oob_read(const uint8_t *_data, size_t _size) {
|
||||||
|
int *array = new int[100];
|
||||||
|
fprintf(stdout, "%d\n", array[100]);
|
||||||
|
delete[] array;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C" int heap_oob_write(const uint8_t *_data, size_t _size) {
|
||||||
|
int *array = new int[100];
|
||||||
|
array[100] = 1;
|
||||||
|
delete[] array;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
extern "C" int malloc_heap_uaf_read(const uint8_t *_data, size_t _size) {
|
||||||
|
int *array = static_cast<int *>(malloc(100 * sizeof(int)));
|
||||||
|
free(array);
|
||||||
|
fprintf(stdout, "%d\n", array[5]);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C" int malloc_heap_uaf_write(const uint8_t *_data, size_t _size) {
|
||||||
|
int *array = static_cast<int *>(malloc(100 * sizeof(int)));
|
||||||
|
free(array);
|
||||||
|
array[5] = 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C" int malloc_heap_oob_read(const uint8_t *_data, size_t _size) {
|
||||||
|
int *array = static_cast<int *>(malloc(100 * sizeof(int)));
|
||||||
|
fprintf(stdout, "%d\n", array[100]);
|
||||||
|
free(array);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C" int malloc_heap_oob_write(const uint8_t *_data, size_t _size) {
|
||||||
|
int *array = static_cast<int *>(malloc(100 * sizeof(int)));
|
||||||
|
array[100] = 1;
|
||||||
|
free(array);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
|
||||||
|
// abort();
|
||||||
|
return 0;
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user