Added MacOS CI (#131)
* added macos ci * running tests on macos * some macos fixes * fmt * some must_use infos * trying' to fix MacOs testcases * no main in test * fixed MacOS testcases * tried to fix build errors * unified shmem_limits * Revert "unified shmem_limits" This reverts commit 8c6bb8d6a2cec71d72bb181b5b491737a771298e. * hopefully fixed macos testcase * removed unneeded values
This commit is contained in:
parent
11771c3323
commit
3b2ee4bb70
16
.github/workflows/build_and_test.yml
vendored
16
.github/workflows/build_and_test.yml
vendored
@ -13,7 +13,7 @@ jobs:
|
||||
lint:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
os: [ubuntu-latest, windows-latest, macOS-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@ -70,6 +70,20 @@ jobs:
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: clippy
|
||||
macos:
|
||||
runs-on: macOS-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: MacOS Build
|
||||
run: cargo build --verbose
|
||||
- name: Run clippy
|
||||
run: ./scripts/clippy.sh
|
||||
- name: Build fuzzers
|
||||
run: ./scripts/build_all_fuzzers.sh
|
||||
- name: Increase map sizes
|
||||
run: ./scripts/shmem_limits_macos.sh
|
||||
- name: Run Tests
|
||||
run: cargo test
|
||||
# TODO: Figure out how to properly build stuff with clang
|
||||
#- name: Add clang path to $PATH env
|
||||
# if: runner.os == 'Windows'
|
||||
|
@ -6,10 +6,10 @@ It has been tested on Linux.
|
||||
|
||||
## Build
|
||||
|
||||
To build this example, run `cargo build --example libfuzzer_libpng --release`.
|
||||
To build this example, run `cargo build --release` in this folder.
|
||||
This will call (the build.rs)[./build.rs], which in turn downloads a libpng archive from the web.
|
||||
Then, it will link (the fuzzer)[./src/fuzzer.rs] against (the C++ harness)[./harness.cc] and the instrumented `libpng`.
|
||||
Afterwards, the fuzzer will be ready to run, from `../../target/examples/libfuzzer_libpng`.
|
||||
Afterwards, the fuzzer will be ready to run, from `target/frida_libpng`.
|
||||
|
||||
### Build For Android
|
||||
When building for android using a cross-compiler, make sure you have a _standalone toolchain_, and then add the following:
|
||||
@ -22,13 +22,9 @@ When building for android using a cross-compiler, make sure you have a _standalo
|
||||
|
||||
## Run
|
||||
|
||||
The first time you run the binary, the broker will open a tcp port (currently on port `1337`), waiting for fuzzer clients to connect. This port is local and only used for the initial handshake. All further communication happens via shared map, to be independent of the kernel.
|
||||
|
||||
Each following execution will run a fuzzer client.
|
||||
As this example uses in-process fuzzing, we added a Restarting Event Manager (`setup_restarting_mgr`).
|
||||
This means each client will start itself again to listen for crashes and timeouts.
|
||||
This example uses in-process-fuzzing, using the `launcher` feature, in combination with a Restarting Event Manager.
|
||||
This means running --cores each client will start itself again to listen for crashes and timeouts.
|
||||
By restarting the actual fuzzer, it can recover from these exit conditions.
|
||||
|
||||
In any real-world scenario, you should use `taskset` to pin each client to an empty CPU core, the lib does not pick an empty core automatically (yet).
|
||||
|
||||
For convenience, you may just run `./test.sh` in this folder to test it.
|
||||
After building the libpng-harness, too, you can run `find . -name libpng-harness.so` to find the location of your harness, then run
|
||||
`./target/release/frida_libpng ./libpng-harness.so LLVMFuzzerTestOneInput ./libpng-harness.so --cores=0`
|
@ -1,12 +1,33 @@
|
||||
PWD=`pwd`
|
||||
FUZZER_NAME="fuzzer_libpng"
|
||||
|
||||
all:
|
||||
PHONY: all clean
|
||||
|
||||
all: fuzzer
|
||||
|
||||
libpng-1.6.37:
|
||||
wget https://deac-fra.dl.sourceforge.net/project/libpng/libpng16/1.6.37/libpng-1.6.37.tar.xz
|
||||
tar -xvf libpng-1.6.37.tar.xz
|
||||
|
||||
target/release/libafl_cxx: src/* src/bin/*
|
||||
# Build the libpng libfuzzer library
|
||||
cargo build --release
|
||||
|
||||
libafl_cxx: target/release/libafl_cxx
|
||||
|
||||
libafl_cc: target/release/libafl_cxx
|
||||
|
||||
libpng-1.6.37/.libs/libpng16.a: libpng-1.6.37 libafl_cc
|
||||
cd libpng-1.6.37 && ./configure && cd ..
|
||||
$(MAKE) -C libpng-1.6.37 CC=$(realpath target/release/libafl_cc) CXX=$(realpath target/release/libafl_cxx)
|
||||
|
||||
|
||||
fuzzer: libpng-1.6.37/.libs/libpng16.a libafl_cxx
|
||||
# Build the libpng libfuzzer library
|
||||
cargo build --release
|
||||
|
||||
# Build the libpng harness
|
||||
$(PWD)/target/release/libafl_cxx \
|
||||
target/release/libafl_cxx \
|
||||
$(PWD)/harness.cc \
|
||||
$(PWD)/libpng-1.6.37/.libs/libpng16.a \
|
||||
-I$(PWD)/libpng-1.6.37/ \
|
||||
|
@ -29,6 +29,7 @@ use libafl::{
|
||||
use libafl_targets::{libfuzzer_initialize, libfuzzer_test_one_input, EDGES_MAP, MAX_EDGES_NUM};
|
||||
|
||||
/// The main fn, `no_mangle` as it is a C main
|
||||
#[cfg(not(test))]
|
||||
#[no_mangle]
|
||||
pub fn main() {
|
||||
// Registry the metadata types used in this fuzzer
|
||||
|
@ -366,6 +366,7 @@ macro_rules! create_serde_registry_for_trait {
|
||||
where
|
||||
T: Any,
|
||||
{
|
||||
#[allow(clippy::manual_map)]
|
||||
match self.map.get(&unpack_type_id(TypeId::of::<T>())) {
|
||||
None => None,
|
||||
Some(h) => {
|
||||
@ -386,6 +387,7 @@ macro_rules! create_serde_registry_for_trait {
|
||||
fn(&Box<dyn $trait_name>) -> &dyn $trait_name,
|
||||
>,
|
||||
> {
|
||||
#[allow(clippy::manual_map)]
|
||||
match self.map.get(&unpack_type_id(*typeid)) {
|
||||
None => None,
|
||||
Some(h) => Some(h.values().map(|x| x.as_ref())),
|
||||
@ -405,6 +407,7 @@ macro_rules! create_serde_registry_for_trait {
|
||||
where
|
||||
T: Any,
|
||||
{
|
||||
#[allow(clippy::manual_map)]
|
||||
match self.map.get_mut(&unpack_type_id(TypeId::of::<T>())) {
|
||||
None => None,
|
||||
Some(h) => Some(
|
||||
@ -425,6 +428,7 @@ macro_rules! create_serde_registry_for_trait {
|
||||
fn(&mut Box<dyn $trait_name>) -> &mut dyn $trait_name,
|
||||
>,
|
||||
> {
|
||||
#[allow(clippy::manual_map)]
|
||||
match self.map.get_mut(&unpack_type_id(*typeid)) {
|
||||
None => None,
|
||||
Some(h) => Some(h.values_mut().map(|x| x.as_mut())),
|
||||
|
@ -289,14 +289,14 @@ impl Forkserver {
|
||||
&mut copy,
|
||||
)?;
|
||||
if sret > 0 {
|
||||
if let Err(_) = self.st_pipe.read_exact(&mut buf) {
|
||||
return Err(Error::Forkserver(
|
||||
"Unable to communicate with fork server (OOM?)".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if self.st_pipe.read_exact(&mut buf).is_ok() {
|
||||
let val: i32 = i32::from_ne_bytes(buf);
|
||||
Ok(Some(val))
|
||||
} else {
|
||||
Err(Error::Forkserver(
|
||||
"Unable to communicate with fork server (OOM?)".to_string(),
|
||||
))
|
||||
}
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ use crate::{
|
||||
};
|
||||
|
||||
pub(crate) struct Allocator {
|
||||
#[allow(dead_code)]
|
||||
options: FridaOptions,
|
||||
page_size: usize,
|
||||
shadow_offset: usize,
|
||||
@ -26,11 +27,19 @@ pub(crate) struct Allocator {
|
||||
allocations: HashMap<usize, AllocationMetadata>,
|
||||
shadow_pages: RangeSet<usize>,
|
||||
allocation_queue: HashMap<usize, Vec<AllocationMetadata>>,
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
largest_allocation: usize,
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
base_mapping_addr: usize,
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
current_mapping_addr: usize,
|
||||
}
|
||||
|
||||
#[cfg(any(target_os = "macos", target_os = "ios"))]
|
||||
const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANON;
|
||||
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
|
||||
const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANONYMOUS;
|
||||
|
||||
macro_rules! map_to_shadow {
|
||||
($self:expr, $address:expr) => {
|
||||
(($address >> 3) + $self.shadow_offset) & ((1 << ($self.shadow_bit + 1)) - 1)
|
||||
@ -58,6 +67,7 @@ impl Allocator {
|
||||
let page_size = ret as usize;
|
||||
// probe to find a usable shadow bit:
|
||||
let mut shadow_bit: usize = 0;
|
||||
|
||||
for try_shadow_bit in &[46usize, 36usize] {
|
||||
let addr: usize = 1 << try_shadow_bit;
|
||||
if unsafe {
|
||||
@ -66,7 +76,7 @@ impl Allocator {
|
||||
page_size,
|
||||
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
|
||||
MapFlags::MAP_PRIVATE
|
||||
| MapFlags::MAP_ANONYMOUS
|
||||
| ANONYMOUS_FLAG
|
||||
| MapFlags::MAP_FIXED
|
||||
| MapFlags::MAP_NORESERVE,
|
||||
-1,
|
||||
@ -88,7 +98,7 @@ impl Allocator {
|
||||
addr as *mut c_void,
|
||||
addr + addr,
|
||||
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
|
||||
MapFlags::MAP_ANONYMOUS
|
||||
ANONYMOUS_FLAG
|
||||
| MapFlags::MAP_FIXED
|
||||
| MapFlags::MAP_PRIVATE
|
||||
| MapFlags::MAP_NORESERVE,
|
||||
@ -107,27 +117,34 @@ impl Allocator {
|
||||
allocations: HashMap::new(),
|
||||
shadow_pages: RangeSet::new(),
|
||||
allocation_queue: HashMap::new(),
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
largest_allocation: 0,
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
base_mapping_addr: addr + addr + addr,
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
current_mapping_addr: addr + addr + addr,
|
||||
}
|
||||
}
|
||||
|
||||
/// Retreive the shadow bit used by this allocator.
|
||||
#[must_use]
|
||||
pub fn shadow_bit(&self) -> u32 {
|
||||
self.shadow_bit as u32
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[must_use]
|
||||
fn round_up_to_page(&self, size: usize) -> usize {
|
||||
((size + self.page_size) / self.page_size) * self.page_size
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[must_use]
|
||||
fn round_down_to_page(&self, value: usize) -> usize {
|
||||
(value / self.page_size) * self.page_size
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn find_smallest_fit(&mut self, size: usize) -> Option<AllocationMetadata> {
|
||||
let mut current_size = size;
|
||||
while current_size <= self.largest_allocation {
|
||||
@ -142,6 +159,8 @@ impl Allocator {
|
||||
None
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
#[must_use]
|
||||
pub unsafe fn alloc(&mut self, size: usize, _alignment: usize) -> *mut c_void {
|
||||
let mut is_malloc_zero = false;
|
||||
let size = if size == 0 {
|
||||
@ -169,7 +188,7 @@ impl Allocator {
|
||||
self.current_mapping_addr as *mut c_void,
|
||||
rounded_up_size,
|
||||
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
|
||||
MapFlags::MAP_ANONYMOUS
|
||||
ANONYMOUS_FLAG
|
||||
| MapFlags::MAP_PRIVATE
|
||||
| MapFlags::MAP_FIXED
|
||||
| MapFlags::MAP_NORESERVE,
|
||||
@ -214,6 +233,7 @@ impl Allocator {
|
||||
address
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
pub unsafe fn release(&mut self, ptr: *mut c_void) {
|
||||
let mut metadata = if let Some(metadata) = self.allocations.get_mut(&(ptr as usize)) {
|
||||
metadata
|
||||
@ -288,6 +308,7 @@ impl Allocator {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
pub fn get_usable_size(&self, ptr: *mut c_void) -> usize {
|
||||
match self.allocations.get(&(ptr as usize)) {
|
||||
Some(metadata) => metadata.size,
|
||||
@ -354,7 +375,7 @@ impl Allocator {
|
||||
range.start as *mut c_void,
|
||||
range.end - range.start,
|
||||
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
|
||||
MapFlags::MAP_ANONYMOUS | MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE,
|
||||
ANONYMOUS_FLAG | MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE,
|
||||
-1,
|
||||
0,
|
||||
)
|
||||
@ -373,10 +394,12 @@ impl Allocator {
|
||||
(shadow_mapping_start, (end - start) / 8)
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
pub fn map_to_shadow(&self, start: usize) -> usize {
|
||||
map_to_shadow!(self, start)
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
#[inline]
|
||||
pub fn is_managed(&self, ptr: *mut c_void) -> bool {
|
||||
//self.allocations.contains_key(&(ptr as usize))
|
||||
|
@ -246,8 +246,8 @@ impl AsanErrors {
|
||||
}
|
||||
}
|
||||
}
|
||||
AsanError::BadFuncArgRead((name, pc, address, size, backtrace))
|
||||
| AsanError::BadFuncArgWrite((name, pc, address, size, backtrace)) => {
|
||||
AsanError::BadFuncArgRead((name, _pc, address, size, backtrace))
|
||||
| AsanError::BadFuncArgWrite((name, _pc, address, size, backtrace)) => {
|
||||
writeln!(
|
||||
output,
|
||||
" in call to {}, argument {:#016x}, size: {:#x}",
|
||||
@ -260,17 +260,17 @@ impl AsanErrors {
|
||||
{
|
||||
let invocation = Interceptor::current_invocation();
|
||||
let cpu_context = invocation.cpu_context();
|
||||
if let Some(module_details) = ModuleDetails::with_address(pc as u64) {
|
||||
if let Some(module_details) = ModuleDetails::with_address(_pc as u64) {
|
||||
writeln!(
|
||||
output,
|
||||
" at 0x{:x} ({}@0x{:04x})",
|
||||
pc,
|
||||
_pc,
|
||||
module_details.path(),
|
||||
pc - module_details.range().base_address().0 as usize,
|
||||
_pc - module_details.range().base_address().0 as usize,
|
||||
)
|
||||
.unwrap();
|
||||
} else {
|
||||
writeln!(output, " at 0x{:x}", pc).unwrap();
|
||||
writeln!(output, " at 0x{:x}", _pc).unwrap();
|
||||
}
|
||||
|
||||
#[allow(clippy::non_ascii_literal)]
|
||||
|
@ -6,13 +6,15 @@ even if the target would not have crashed under normal conditions.
|
||||
this helps finding mem errors early.
|
||||
*/
|
||||
|
||||
use frida_gum::{NativePointer, RangeDetails};
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
use frida_gum::NativePointer;
|
||||
use frida_gum::RangeDetails;
|
||||
use hashbrown::HashMap;
|
||||
|
||||
use nix::{
|
||||
libc::memset,
|
||||
sys::mman::{mmap, MapFlags, ProtFlags},
|
||||
};
|
||||
use nix::sys::mman::{mmap, MapFlags, ProtFlags};
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
use nix::libc::memset;
|
||||
|
||||
use backtrace::Backtrace;
|
||||
use capstone::{
|
||||
@ -20,10 +22,18 @@ use capstone::{
|
||||
Capstone, Insn,
|
||||
};
|
||||
use dynasmrt::{dynasm, DynasmApi, DynasmLabelApi};
|
||||
use frida_gum::{interceptor::Interceptor, Gum, ModuleMap};
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
use frida_gum::interceptor::Interceptor;
|
||||
use frida_gum::{Gum, ModuleMap};
|
||||
#[cfg(unix)]
|
||||
use libc::{c_char, getrlimit64, rlimit64, wchar_t};
|
||||
use std::ffi::c_void;
|
||||
use libc::RLIMIT_STACK;
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
use libc::{c_char, wchar_t};
|
||||
#[cfg(any(target_os = "macos", target_os = "ios"))]
|
||||
use libc::{getrlimit, rlimit};
|
||||
#[cfg(all(unix, not(any(target_os = "macos", target_os = "ios"))))]
|
||||
use libc::{getrlimit64, rlimit64};
|
||||
use std::{ffi::c_void, ptr::write_volatile};
|
||||
|
||||
use crate::{
|
||||
alloc::Allocator,
|
||||
@ -40,6 +50,11 @@ extern "C" {
|
||||
fn tls_ptr() -> *const c_void;
|
||||
}
|
||||
|
||||
#[cfg(any(target_os = "macos", target_os = "ios"))]
|
||||
const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANON;
|
||||
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
|
||||
const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANONYMOUS;
|
||||
|
||||
/// The frida address sanitizer runtime, providing address sanitization.
|
||||
/// When executing in `ASAN`, each memory access will get checked, using frida stalker under the hood.
|
||||
/// The runtime can report memory errors that occurred during execution,
|
||||
@ -189,25 +204,52 @@ impl AsanRuntime {
|
||||
);
|
||||
}
|
||||
|
||||
/// Get the maximum stack size for the current stack
|
||||
#[must_use]
|
||||
#[cfg(any(target_os = "macos", target_os = "ios"))]
|
||||
fn max_stack_size() -> usize {
|
||||
let mut stack_rlimit = rlimit {
|
||||
rlim_cur: 0,
|
||||
rlim_max: 0,
|
||||
};
|
||||
assert!(unsafe { getrlimit(RLIMIT_STACK, &mut stack_rlimit as *mut rlimit) } == 0);
|
||||
|
||||
stack_rlimit.rlim_max as usize
|
||||
}
|
||||
|
||||
/// Get the maximum stack size for the current stack
|
||||
#[must_use]
|
||||
#[cfg(all(unix, not(any(target_os = "macos", target_os = "ios"))))]
|
||||
fn max_stack_size() -> usize {
|
||||
let mut stack_rlimit = rlimit64 {
|
||||
rlim_cur: 0,
|
||||
rlim_max: 0,
|
||||
};
|
||||
assert!(unsafe { getrlimit64(RLIMIT_STACK, &mut stack_rlimit as *mut rlimit64) } == 0);
|
||||
|
||||
stack_rlimit.rlim_max as usize
|
||||
}
|
||||
|
||||
/// Determine the stack start, end for the currently running thread
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics, if no mapping for the `stack_address` at `0xeadbeef` could be found.
|
||||
#[must_use]
|
||||
pub fn current_stack() -> (usize, usize) {
|
||||
let stack_var = 0xeadbeef;
|
||||
let stack_address = &stack_var as *const _ as *const c_void as usize;
|
||||
let mut stack_var = 0xeadbeef;
|
||||
let stack_address = &mut stack_var as *mut _ as *mut c_void as usize;
|
||||
let range_details = RangeDetails::with_address(stack_address as u64).unwrap();
|
||||
// Write something to (hopefully) make sure the val isn't optimized out
|
||||
unsafe { write_volatile(&mut stack_var, 0xfadbeef) };
|
||||
|
||||
let start = range_details.memory_range().base_address().0 as usize;
|
||||
let end = start + range_details.memory_range().size();
|
||||
|
||||
let mut stack_rlimit = rlimit64 {
|
||||
rlim_cur: 0,
|
||||
rlim_max: 0,
|
||||
};
|
||||
assert!(unsafe { getrlimit64(3, &mut stack_rlimit as *mut rlimit64) } == 0);
|
||||
let max_start = end - Self::max_stack_size();
|
||||
|
||||
let max_start = end - stack_rlimit.rlim_cur as usize;
|
||||
let flags = ANONYMOUS_FLAG | MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE;
|
||||
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
|
||||
let flags = flags | MapFlags::MAP_STACK;
|
||||
|
||||
if start != max_start {
|
||||
let mapping = unsafe {
|
||||
@ -215,10 +257,7 @@ impl AsanRuntime {
|
||||
max_start as *mut c_void,
|
||||
start - max_start,
|
||||
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
|
||||
MapFlags::MAP_ANONYMOUS
|
||||
| MapFlags::MAP_FIXED
|
||||
| MapFlags::MAP_PRIVATE
|
||||
| MapFlags::MAP_STACK,
|
||||
flags,
|
||||
-1,
|
||||
0,
|
||||
)
|
||||
@ -229,10 +268,12 @@ impl AsanRuntime {
|
||||
}
|
||||
|
||||
/// Determine the tls start, end for the currently running thread
|
||||
#[must_use]
|
||||
fn current_tls() -> (usize, usize) {
|
||||
let tls_address = unsafe { tls_ptr() } as usize;
|
||||
|
||||
#[cfg(target_os = "android")]
|
||||
// Strip off the top byte, as scudo allocates buffers with top-byte set to 0xb4
|
||||
let tls_address = tls_address & 0xffffffffffffff;
|
||||
|
||||
let range_details = RangeDetails::with_address(tls_address as u64).unwrap();
|
||||
@ -241,29 +282,34 @@ impl AsanRuntime {
|
||||
(start, end)
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
#[inline]
|
||||
fn hook_malloc(&mut self, size: usize) -> *mut c_void {
|
||||
unsafe { self.allocator.alloc(size, 8) }
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
#[allow(non_snake_case)]
|
||||
#[inline]
|
||||
fn hook__Znam(&mut self, size: usize) -> *mut c_void {
|
||||
unsafe { self.allocator.alloc(size, 8) }
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
#[allow(non_snake_case)]
|
||||
#[inline]
|
||||
fn hook__ZnamRKSt9nothrow_t(&mut self, size: usize, _nothrow: *const c_void) -> *mut c_void {
|
||||
unsafe { self.allocator.alloc(size, 8) }
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
#[allow(non_snake_case)]
|
||||
#[inline]
|
||||
fn hook__ZnamSt11align_val_t(&mut self, size: usize, alignment: usize) -> *mut c_void {
|
||||
unsafe { self.allocator.alloc(size, alignment) }
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
#[allow(non_snake_case)]
|
||||
#[inline]
|
||||
fn hook__ZnamSt11align_val_tRKSt9nothrow_t(
|
||||
@ -275,24 +321,28 @@ impl AsanRuntime {
|
||||
unsafe { self.allocator.alloc(size, alignment) }
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
#[allow(non_snake_case)]
|
||||
#[inline]
|
||||
fn hook__Znwm(&mut self, size: usize) -> *mut c_void {
|
||||
unsafe { self.allocator.alloc(size, 8) }
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
#[allow(non_snake_case)]
|
||||
#[inline]
|
||||
fn hook__ZnwmRKSt9nothrow_t(&mut self, size: usize, _nothrow: *const c_void) -> *mut c_void {
|
||||
unsafe { self.allocator.alloc(size, 8) }
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
#[allow(non_snake_case)]
|
||||
#[inline]
|
||||
fn hook__ZnwmSt11align_val_t(&mut self, size: usize, alignment: usize) -> *mut c_void {
|
||||
unsafe { self.allocator.alloc(size, alignment) }
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
#[allow(non_snake_case)]
|
||||
#[inline]
|
||||
fn hook__ZnwmSt11align_val_tRKSt9nothrow_t(
|
||||
@ -304,6 +354,7 @@ impl AsanRuntime {
|
||||
unsafe { self.allocator.alloc(size, alignment) }
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
#[inline]
|
||||
fn hook_calloc(&mut self, nmemb: usize, size: usize) -> *mut c_void {
|
||||
let ret = unsafe { self.allocator.alloc(size * nmemb, 8) };
|
||||
@ -313,6 +364,7 @@ impl AsanRuntime {
|
||||
ret
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
#[inline]
|
||||
fn hook_realloc(&mut self, ptr: *mut c_void, size: usize) -> *mut c_void {
|
||||
unsafe {
|
||||
@ -327,11 +379,13 @@ impl AsanRuntime {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
#[inline]
|
||||
fn hook_check_free(&mut self, ptr: *mut c_void) -> bool {
|
||||
self.allocator.is_managed(ptr)
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
#[inline]
|
||||
fn hook_free(&mut self, ptr: *mut c_void) {
|
||||
if ptr != std::ptr::null_mut() {
|
||||
@ -339,11 +393,13 @@ impl AsanRuntime {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
#[inline]
|
||||
fn hook_memalign(&mut self, alignment: usize, size: usize) -> *mut c_void {
|
||||
unsafe { self.allocator.alloc(size, alignment) }
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
#[inline]
|
||||
fn hook_posix_memalign(
|
||||
&mut self,
|
||||
@ -358,12 +414,14 @@ impl AsanRuntime {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn hook_malloc_usable_size(&mut self, ptr: *mut c_void) -> usize {
|
||||
self.allocator.get_usable_size(ptr)
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[inline]
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn hook__ZdaPv(&mut self, ptr: *mut c_void) {
|
||||
if ptr != std::ptr::null_mut() {
|
||||
unsafe { self.allocator.release(ptr) }
|
||||
@ -372,6 +430,7 @@ impl AsanRuntime {
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[inline]
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn hook__ZdaPvm(&mut self, ptr: *mut c_void, _ulong: u64) {
|
||||
if ptr != std::ptr::null_mut() {
|
||||
unsafe { self.allocator.release(ptr) }
|
||||
@ -380,6 +439,7 @@ impl AsanRuntime {
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[inline]
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn hook__ZdaPvmSt11align_val_t(&mut self, ptr: *mut c_void, _ulong: u64, _alignment: usize) {
|
||||
if ptr != std::ptr::null_mut() {
|
||||
unsafe { self.allocator.release(ptr) }
|
||||
@ -388,6 +448,7 @@ impl AsanRuntime {
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[inline]
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn hook__ZdaPvRKSt9nothrow_t(&mut self, ptr: *mut c_void, _nothrow: *const c_void) {
|
||||
if ptr != std::ptr::null_mut() {
|
||||
unsafe { self.allocator.release(ptr) }
|
||||
@ -396,6 +457,7 @@ impl AsanRuntime {
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[inline]
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn hook__ZdaPvSt11align_val_tRKSt9nothrow_t(
|
||||
&mut self,
|
||||
ptr: *mut c_void,
|
||||
@ -409,6 +471,7 @@ impl AsanRuntime {
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[inline]
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn hook__ZdaPvSt11align_val_t(&mut self, ptr: *mut c_void, _alignment: usize) {
|
||||
if ptr != std::ptr::null_mut() {
|
||||
unsafe { self.allocator.release(ptr) }
|
||||
@ -417,6 +480,7 @@ impl AsanRuntime {
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[inline]
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn hook__ZdlPv(&mut self, ptr: *mut c_void) {
|
||||
if ptr != std::ptr::null_mut() {
|
||||
unsafe { self.allocator.release(ptr) }
|
||||
@ -425,6 +489,7 @@ impl AsanRuntime {
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[inline]
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn hook__ZdlPvm(&mut self, ptr: *mut c_void, _ulong: u64) {
|
||||
if ptr != std::ptr::null_mut() {
|
||||
unsafe { self.allocator.release(ptr) }
|
||||
@ -433,6 +498,7 @@ impl AsanRuntime {
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[inline]
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn hook__ZdlPvmSt11align_val_t(&mut self, ptr: *mut c_void, _ulong: u64, _alignment: usize) {
|
||||
if ptr != std::ptr::null_mut() {
|
||||
unsafe { self.allocator.release(ptr) }
|
||||
@ -441,6 +507,7 @@ impl AsanRuntime {
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[inline]
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn hook__ZdlPvRKSt9nothrow_t(&mut self, ptr: *mut c_void, _nothrow: *const c_void) {
|
||||
if ptr != std::ptr::null_mut() {
|
||||
unsafe { self.allocator.release(ptr) }
|
||||
@ -449,6 +516,7 @@ impl AsanRuntime {
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[inline]
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn hook__ZdlPvSt11align_val_tRKSt9nothrow_t(
|
||||
&mut self,
|
||||
ptr: *mut c_void,
|
||||
@ -462,6 +530,7 @@ impl AsanRuntime {
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[inline]
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn hook__ZdlPvSt11align_val_t(&mut self, ptr: *mut c_void, _alignment: usize) {
|
||||
if ptr != std::ptr::null_mut() {
|
||||
unsafe { self.allocator.release(ptr) }
|
||||
@ -469,6 +538,7 @@ impl AsanRuntime {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn hook_mmap(
|
||||
&mut self,
|
||||
addr: *const c_void,
|
||||
@ -497,6 +567,7 @@ impl AsanRuntime {
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn hook_munmap(&mut self, addr: *const c_void, length: usize) -> i32 {
|
||||
extern "C" {
|
||||
fn munmap(addr: *const c_void, length: usize) -> i32;
|
||||
|
@ -33,7 +33,14 @@ use rangemap::RangeMap;
|
||||
|
||||
use nix::sys::mman::{mmap, MapFlags, ProtFlags};
|
||||
|
||||
use crate::{asan_rt::AsanRuntime, FridaOptions};
|
||||
use crate::FridaOptions;
|
||||
|
||||
use crate::asan_rt::AsanRuntime;
|
||||
|
||||
#[cfg(any(target_os = "macos", target_os = "ios"))]
|
||||
const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANON;
|
||||
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
|
||||
const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANONYMOUS;
|
||||
|
||||
/// An helper that feeds [`FridaInProcessExecutor`] with user-supplied instrumentation
|
||||
pub trait FridaHelper<'a> {
|
||||
@ -226,7 +233,7 @@ impl<'a> FridaInstrumentationHelper<'a> {
|
||||
std::ptr::null_mut(),
|
||||
128 * 1024,
|
||||
ProtFlags::PROT_NONE,
|
||||
MapFlags::MAP_ANONYMOUS | MapFlags::MAP_PRIVATE | MapFlags::MAP_NORESERVE,
|
||||
ANONYMOUS_FLAG | MapFlags::MAP_PRIVATE | MapFlags::MAP_NORESERVE,
|
||||
-1,
|
||||
0,
|
||||
)
|
||||
@ -235,7 +242,7 @@ impl<'a> FridaInstrumentationHelper<'a> {
|
||||
std::ptr::null_mut(),
|
||||
4 * 1024 * 1024,
|
||||
ProtFlags::PROT_NONE,
|
||||
MapFlags::MAP_ANONYMOUS | MapFlags::MAP_PRIVATE | MapFlags::MAP_NORESERVE,
|
||||
ANONYMOUS_FLAG | MapFlags::MAP_PRIVATE | MapFlags::MAP_NORESERVE,
|
||||
-1,
|
||||
0,
|
||||
)
|
||||
|
@ -51,13 +51,21 @@
|
||||
|
||||
#define EXPORT_FN
|
||||
|
||||
#if defined(__APPLE__)
|
||||
// TODO: Find a proper way to deal with weak fns on Apple!
|
||||
#define EXT_FUNC(NAME, RETURN_TYPE, FUNC_SIG, WARN) \
|
||||
RETURN_TYPE NAME FUNC_SIG __attribute__((weak_import)) { return 0; }
|
||||
#else
|
||||
// Declare these symbols as weak to allow them to be optionally defined.
|
||||
#define EXT_FUNC(NAME, RETURN_TYPE, FUNC_SIG, WARN) \
|
||||
__attribute__((weak, visibility("default"))) RETURN_TYPE NAME FUNC_SIG
|
||||
#endif
|
||||
|
||||
#define CHECK_WEAK_FN(Name) (Name != NULL)
|
||||
#endif
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wunused-parameter"
|
||||
EXT_FUNC(LLVMFuzzerInitialize, int, (int *argc, char ***argv), false);
|
||||
EXT_FUNC(LLVMFuzzerCustomMutator, size_t,
|
||||
(uint8_t *Data, size_t Size, size_t MaxSize, unsigned int Seed),
|
||||
@ -67,6 +75,7 @@ EXT_FUNC(LLVMFuzzerCustomCrossOver, size_t,
|
||||
const uint8_t *Data2, size_t Size2,
|
||||
uint8_t *Out, size_t MaxOutSize, unsigned int Seed),
|
||||
false);
|
||||
#pragma GCC diagnostic pop
|
||||
|
||||
#undef EXT_FUNC
|
||||
|
||||
|
@ -9,10 +9,27 @@
|
||||
#endif
|
||||
|
||||
#if defined(__APPLE__)
|
||||
#pragma weak __sanitizer_cov_trace_const_cmp1 = __sanitizer_cov_trace_cmp1
|
||||
#pragma weak __sanitizer_cov_trace_const_cmp2 = __sanitizer_cov_trace_cmp2
|
||||
#pragma weak __sanitizer_cov_trace_const_cmp4 = __sanitizer_cov_trace_cmp4
|
||||
#pragma weak __sanitizer_cov_trace_const_cmp8 = __sanitizer_cov_trace_cmp8
|
||||
|
||||
void __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2);
|
||||
void __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2) {
|
||||
__sanitizer_cov_trace_cmp1(arg1, arg2);
|
||||
}
|
||||
|
||||
void __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2);
|
||||
void __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2) {
|
||||
__sanitizer_cov_trace_cmp2(arg1, arg2);
|
||||
}
|
||||
|
||||
void __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2);
|
||||
void __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2) {
|
||||
__sanitizer_cov_trace_cmp4(arg1, arg2);
|
||||
}
|
||||
|
||||
void __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2);
|
||||
void __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2) {
|
||||
__sanitizer_cov_trace_cmp8(arg1, arg2);
|
||||
}
|
||||
|
||||
#elif defined(_MSC_VER)
|
||||
#pragma comment(linker, "/alternatename:__sanitizer_cov_trace_const_cmp1=__sanitizer_cov_trace_cmp1")
|
||||
#pragma comment(linker, "/alternatename:__sanitizer_cov_trace_const_cmp2=__sanitizer_cov_trace_cmp2")
|
||||
@ -129,3 +146,4 @@ void __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -2,9 +2,6 @@
|
||||
|
||||
sudo sysctl -w kern.sysv.shmmax=524288000
|
||||
sudo sysctl -w kern.sysv.shmmin=1
|
||||
sudo sysctl -w kern.sysv.shmmni=64
|
||||
sudo sysctl -w kern.sysv.shmseg=16
|
||||
sudo sysctl -w kern.sysv.semmns=130
|
||||
sudo sysctl -w kern.sysv.shmall=131072000
|
||||
sudo sysctl -w kern.sysv.maxproc=2048
|
||||
sudo sysctl -w kern.maxprocperuid=512
|
||||
|
Loading…
x
Reference in New Issue
Block a user