Various fixes for frida-asan on aarch64-linux-android (#64)

This commit is contained in:
s1341 2021-04-29 20:31:36 +03:00 committed by GitHub
parent f3b4305dac
commit e62f4de6b5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 107 additions and 33 deletions

View File

@ -317,6 +317,8 @@ unsafe fn fuzz(
let scheduler = IndexesLenTimeMinimizerCorpusScheduler::new(QueueCorpusScheduler::new());
let mut fuzzer = StdFuzzer::new(tuple_list!(stage));
frida_helper.register_thread();
// Create the executor for an in-process function with just one observer for edge coverage
let mut executor = FridaInProcessExecutor::new(
&gum,
@ -351,7 +353,6 @@ unsafe fn fuzz(
println!("We imported {} inputs from disk.", state.corpus().count());
}
//executor.helper.register_thread();
fuzzer.fuzz_loop(&mut state, &mut executor, &mut restarting_mgr, &scheduler)?;
// Never reached

View File

@ -24,7 +24,7 @@ use color_backtrace::{default_output_stream, BacktracePrinter, Verbosity};
use dynasmrt::{dynasm, DynasmApi, DynasmLabelApi};
#[cfg(unix)]
use gothook::GotHookLibrary;
use libc::{sysconf, _SC_PAGESIZE};
use libc::{_SC_PAGESIZE, getrlimit64, rlimit64, sysconf};
use rangemap::RangeSet;
use serde::{Deserialize, Serialize};
use std::{
@ -52,6 +52,7 @@ struct Allocator {
allocations: HashMap<usize, AllocationMetadata>,
shadow_pages: RangeSet<usize>,
allocation_queue: HashMap<usize, Vec<AllocationMetadata>>,
largest_allocation: usize,
}
macro_rules! map_to_shadow {
@ -88,7 +89,7 @@ impl Allocator {
addr as *mut c_void,
page_size,
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
MapFlags::MAP_PRIVATE | MapFlags::MAP_ANONYMOUS | MapFlags::MAP_FIXED,
MapFlags::MAP_PRIVATE | MapFlags::MAP_ANONYMOUS | MapFlags::MAP_FIXED | MapFlags::MAP_NORESERVE,
-1,
0,
)
@ -108,7 +109,7 @@ impl Allocator {
addr as *mut c_void,
addr + addr,
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
MapFlags::MAP_ANONYMOUS | MapFlags::MAP_FIXED,
MapFlags::MAP_ANONYMOUS | MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE | MapFlags::MAP_NORESERVE,
-1,
0,
)
@ -124,6 +125,7 @@ impl Allocator {
allocations: HashMap::new(),
shadow_pages: RangeSet::new(),
allocation_queue: HashMap::new(),
largest_allocation: 0,
};
unsafe {
ALLOCATOR_SINGLETON = Some(RefCell::new(allocator));
@ -154,6 +156,19 @@ impl Allocator {
(value / self.page_size) * self.page_size
}
fn find_smallest_fit(&mut self, size: usize) -> Option<AllocationMetadata> {
let mut current_size = size;
while current_size <= self.largest_allocation {
if self.allocation_queue.contains_key(&current_size) {
if let Some(metadata) = self.allocation_queue.entry(current_size).or_default().pop() {
return Some(metadata);
}
}
current_size *= 2;
}
None
}
pub unsafe fn alloc(&mut self, size: usize, _alignment: usize) -> *mut c_void {
let mut is_malloc_zero = false;
let size = if size == 0 {
@ -168,11 +183,7 @@ impl Allocator {
}
let rounded_up_size = self.round_up_to_page(size);
let metadata = if let Some(mut metadata) = self
.allocation_queue
.entry(rounded_up_size)
.or_default()
.pop()
let metadata = if let Some(mut metadata) = self.find_smallest_fit(rounded_up_size)
{
//println!("reusing allocation at {:x}, (actual mapping starts at {:x}) size {:x}", metadata.address, metadata.address - self.page_size, size);
metadata.is_malloc_zero = is_malloc_zero;
@ -189,7 +200,7 @@ impl Allocator {
} else {
let mapping = match mmap(
std::ptr::null_mut(),
rounded_up_size + 2 * self.page_size,
rounded_up_size,
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
MapFlags::MAP_ANONYMOUS | MapFlags::MAP_PRIVATE,
-1,
@ -204,12 +215,12 @@ impl Allocator {
self.map_shadow_for_region(
mapping,
mapping + rounded_up_size + 2 * self.page_size,
mapping + rounded_up_size,
false,
);
let mut metadata = AllocationMetadata {
address: mapping + self.page_size,
address: mapping,
size,
actual_size: rounded_up_size,
..AllocationMetadata::default()
@ -227,6 +238,7 @@ impl Allocator {
metadata
};
self.largest_allocation = std::cmp::max(self.largest_allocation, metadata.actual_size);
// unpoison the shadow memory for the allocation itself
Self::unpoison(map_to_shadow!(self, metadata.address), size);
let address = metadata.address as *mut c_void;
@ -774,23 +786,44 @@ impl AsanRuntime {
pub fn register_thread(&self) {
let mut allocator = Allocator::get();
let (stack_start, stack_end) = Self::current_stack();
println!("current stack: {:#016x}-{:#016x}", stack_start, stack_end);
allocator.map_shadow_for_region(stack_start, stack_end, true);
let (tls_start, tls_end) = Self::current_tls();
allocator.map_shadow_for_region(tls_start, tls_end, true);
println!(
"registering thread with stack {:x}:{:x} and tls {:x}:{:x}",
stack_start as usize, stack_end as usize, tls_start as usize, tls_end as usize
);
//let (tls_start, tls_end) = Self::current_tls();
//allocator.map_shadow_for_region(tls_start, tls_end, true);
//println!(
//"registering thread with stack {:x}:{:x} and tls {:x}:{:x}",
//stack_start as usize, stack_end as usize, tls_start as usize, tls_end as usize
//);
}
/// Determine the stack start, end for the currently running thread
pub fn current_stack() -> (usize, usize) {
let stack_var = 0xeadbeef;
let stack_address = &stack_var as *const _ as *const c_void as usize;
let (start, end, _, _) = find_mapping_for_address(stack_address).unwrap();
(start, end)
let mut stack_rlimit = rlimit64 { rlim_cur: 0, rlim_max: 0 };
assert!(unsafe { getrlimit64(3, &mut stack_rlimit as *mut rlimit64 ) } == 0);
println!("stack_rlimit: {:?}", stack_rlimit);
let max_start = end - stack_rlimit.rlim_cur as usize;
if start != max_start {
let mapping = unsafe {
mmap(
max_start as *mut c_void,
start - max_start,
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
MapFlags::MAP_ANONYMOUS | MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE | MapFlags::MAP_STACK,
-1,
0,
)
};
assert!(mapping.unwrap() as usize == max_start);
}
(max_start, end)
}
/// Determine the tls start, end for the currently running thread
@ -1269,12 +1302,12 @@ impl AsanRuntime {
}
}
AsanError::UnallocatedFree((ptr, backtrace)) => {
writeln!(output, " of {:?}", ptr).unwrap();
writeln!(output, " of {:#016x}", ptr).unwrap();
output.reset().unwrap();
backtrace_printer.print_trace(&backtrace, output).unwrap();
}
AsanError::Leak((ptr, mut metadata)) => {
writeln!(output, " of {:?}", ptr).unwrap();
writeln!(output, " of {:#016x}", ptr).unwrap();
output.reset().unwrap();
#[allow(clippy::non_ascii_literal)]
@ -1388,6 +1421,7 @@ impl AsanRuntime {
; b >skip_report
; report:
; brk 0x11
; stp x29, x30, [sp, #-0x10]!
; mov x29, sp
@ -1487,7 +1521,7 @@ impl AsanRuntime {
; mov x1, #1
; add x1, xzr, x1, lsl #shadow_bit
; add x1, x1, x0, lsr #3
; ubfx x1, x1, #0, #(shadow_bit + 1)
; ubfx x1, x1, #0, #(shadow_bit + 2)
; ldrh w1, [x1, #0]
; and x0, x0, #7
; rev16 w1, w1
@ -1507,6 +1541,7 @@ impl AsanRuntime {
; b >skip_report
; report:
; brk 0x22
; stp x29, x30, [sp, #-0x10]!
; mov x29, sp
@ -1607,7 +1642,7 @@ impl AsanRuntime {
; mov x1, #1
; add x1, xzr, x1, lsl #shadow_bit
; add x1, x1, x0, lsr #3
; ubfx x1, x1, #0, #(shadow_bit + 1)
; ubfx x1, x1, #0, #(shadow_bit + 2)
; ldrh w1, [x1, #0]
; and x0, x0, #7
; rev16 w1, w1

View File

@ -6,7 +6,7 @@ use libafl::utils::find_mapping_for_path;
use libafl_targets::drcov::{DrCovBasicBlock, DrCovWriter};
#[cfg(target_arch = "aarch64")]
use capstone::arch::{arm64::Arm64OperandType, ArchOperand::Arm64Operand};
use capstone::arch::{arm64::{Arm64OperandType, Arm64Extender, Arm64Shift}, ArchOperand::Arm64Operand};
use capstone::{
arch::{self, BuildsCapstone},
Capstone, Insn,
@ -215,7 +215,7 @@ impl<'a> FridaInstrumentationHelper<'a> {
if options.stalker_enabled() {
for (id, module_name) in modules_to_instrument.iter().enumerate() {
let (lib_start, lib_end) = find_mapping_for_path(module_name);
println!("including range {:x}-{:x}", lib_start, lib_end);
println!("including range {:x}-{:x} for {}", lib_start, lib_end, module_name);
helper
.ranges
.insert(lib_start..lib_end, (id as u16, module_name));
@ -262,7 +262,7 @@ impl<'a> FridaInstrumentationHelper<'a> {
#[cfg(not(target_arch = "aarch64"))]
todo!("Implement ASAN for non-aarch64 targets");
#[cfg(target_arch = "aarch64")]
if let Ok((basereg, indexreg, displacement, width)) =
if let Ok((basereg, indexreg, displacement, width, shift, extender)) =
helper.is_interesting_instruction(address, instr)
{
helper.emit_shadow_check(
@ -272,6 +272,8 @@ impl<'a> FridaInstrumentationHelper<'a> {
indexreg,
displacement,
width,
shift,
extender,
);
}
}
@ -310,6 +312,8 @@ impl<'a> FridaInstrumentationHelper<'a> {
indexreg: capstone::RegId,
displacement: i32,
width: u32,
shift: Arm64Shift,
extender: Arm64Extender,
) {
let writer = output.writer();
@ -363,11 +367,43 @@ impl<'a> FridaInstrumentationHelper<'a> {
}
}
}
if let (Arm64Extender::ARM64_EXT_INVALID, Arm64Shift::Invalid) = (extender, shift) {
writer.put_add_reg_reg_reg(
Aarch64Register::X0,
Aarch64Register::X0,
Aarch64Register::X1,
);
} else {
let extender_encoding: i32 = match extender {
Arm64Extender::ARM64_EXT_UXTB => 0b000,
Arm64Extender::ARM64_EXT_UXTH => 0b001,
Arm64Extender::ARM64_EXT_UXTW => 0b010,
Arm64Extender::ARM64_EXT_UXTX => 0b011,
Arm64Extender::ARM64_EXT_SXTB => 0b100,
Arm64Extender::ARM64_EXT_SXTH => 0b101,
Arm64Extender::ARM64_EXT_SXTW => 0b110,
Arm64Extender::ARM64_EXT_SXTX => 0b111,
_ => -1,
};
let (shift_encoding, shift_amount): (i32, u32) = match shift {
Arm64Shift::Lsl(amount) => (0b00, amount),
Arm64Shift::Lsr(amount) => (0b01, amount),
Arm64Shift::Asr(amount) => (0b10, amount),
_ => (-1, 0),
};
if extender_encoding != -1 && shift_amount < 0b1000 {
// emit add extended register: https://developer.arm.com/documentation/ddi0602/latest/Base-Instructions/ADD--extended-register---Add--extended-register--
writer.put_bytes(&(0x8b210000 | ((extender_encoding as u32) << 13) | (shift_amount << 10)).to_le_bytes());
} else if shift_encoding != -1 {
writer.put_bytes(&(0x8b010000 | ((shift_encoding as u32) << 22) | (shift_amount << 10)).to_le_bytes());
} else {
panic!("extender: {:?}, shift: {:?}", extender, shift);
}
};
}
let displacement = displacement
@ -512,7 +548,7 @@ impl<'a> FridaInstrumentationHelper<'a> {
&self,
_address: u64,
instr: &Insn,
) -> Result<(capstone::RegId, capstone::RegId, i32, u32), ()> {
) -> Result<(capstone::RegId, capstone::RegId, i32, u32, Arm64Shift, Arm64Extender), ()> {
// We have to ignore these instructions. Simulating them with their side effects is
// complex, to say the least.
match instr.mnemonic().unwrap() {
@ -539,6 +575,8 @@ impl<'a> FridaInstrumentationHelper<'a> {
opmem.index(),
opmem.disp(),
self.get_instruction_width(instr, &operands),
arm64operand.shift,
arm64operand.ext,
));
}
}