Frida Address Sanitizer for x86_64 (#331)

* remove libafl_tests

* fmt

* fix

* fix

* fix

* first

* width

* start working on runtime side

* experimental c code for generate_shadow_check_function

* generate shadow_check_blob

* add

* debuggin

* fix

* passes assert tests

* cargo fmt

* generate_shadow_check_blob, untested

* save flags

* add

* make registers numbers a const

* register frames?

* comment

* debugging memcpy

* fix a bug, more to come

* finally error removed

* finally working function hooking & clean up

* fix for arm & update stub

* fix

* blob

* blob_check_mem works? (at least no errors) & fmt

* add an link to show how the asm code are generated

* put probe code for aarch64 back & clippy

* fmt

* still blob emitting errors

* fmt

* now that blob works?

* stack alignment

* testing speed with hook_function only

* comment some printlns out

* small fix: ignore rep, jmp to current_report_impl iff blob_check_mems are emitted

* make rip accessible by pc()

* Program counter accessors for both arch

* fmt

* fix

* fix offset

* retrieve accessed memory addr, r/w rip

* inspect the fault triggering instruction

* AsanError Classification

* clippy fixes

* pass basereg/indexreg/disp to AsanErros

* update asanerrors for amd64

* clippy

* fmt

* use frida/frida-rust

* just use 44

* fix debug build

* fix

* fix

* crate.io

* change

* fmt
This commit is contained in:
Toka 2021-11-05 14:37:28 +09:00 committed by GitHub
parent f0daeb377e
commit bf67b6ca76
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 1321 additions and 313 deletions

View File

@ -29,8 +29,8 @@ reqwest = { version = "0.11.4", features = ["blocking"] }
[dependencies] [dependencies]
libafl = { path = "../../libafl/", features = [ "std", "llmp_compression", "llmp_bind_public" ] } #, "llmp_small_maps", "llmp_debug"]} libafl = { path = "../../libafl/", features = [ "std", "llmp_compression", "llmp_bind_public" ] } #, "llmp_small_maps", "llmp_debug"]}
capstone = "0.8.0" capstone = "0.10.0"
frida-gum = { version = "0.5.2", features = [ "auto-download", "event-sink", "invocation-listener"] } frida-gum = { version = "0.6.1", features = [ "auto-download", "event-sink", "invocation-listener"] }
libafl_frida = { path = "../../libafl_frida", version = "0.6.1", features = ["cmplog"] } libafl_frida = { path = "../../libafl_frida", version = "0.6.1", features = ["cmplog"] }
libafl_targets = { path = "../../libafl_targets", version = "0.6.1" , features = ["sancov_cmplog"] } libafl_targets = { path = "../../libafl_targets", version = "0.6.1" , features = ["sancov_cmplog"] }
lazy_static = "1.4.0" lazy_static = "1.4.0"

View File

@ -133,7 +133,7 @@ HARNESS_EXPORTS extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_
} }
func1(); // func1();
std::vector<unsigned char> v(data, data + size); std::vector<unsigned char> v(data, data + size);
if (png_sig_cmp(v.data(), 0, kPngHeaderSize)) { if (png_sig_cmp(v.data(), 0, kPngHeaderSize)) {

View File

@ -156,6 +156,7 @@ where
) -> Self { ) -> Self {
let mut stalker = Stalker::new(gum); let mut stalker = Stalker::new(gum);
#[cfg(all(not(debug_assertions), target_arch = "x86_64"))]
for range in helper.ranges().gaps(&(0..usize::MAX)) { for range in helper.ranges().gaps(&(0..usize::MAX)) {
println!("excluding range: {:x}-{:x}", range.start, range.end); println!("excluding range: {:x}-{:x}", range.start, range.end);
stalker.exclude(&MemoryRange::new( stalker.exclude(&MemoryRange::new(

View File

@ -27,11 +27,11 @@ hashbrown = "0.11"
libloading = "0.7.0" libloading = "0.7.0"
rangemap = "0.1.10" rangemap = "0.1.10"
frida-gum-sys = { version = "0.3", features = [ "auto-download", "event-sink", "invocation-listener"] } frida-gum-sys = { version = "0.3", features = [ "auto-download", "event-sink", "invocation-listener"] }
frida-gum = { version = "0.5.2", features = [ "auto-download", "event-sink", "invocation-listener"] } frida-gum = { version = "0.6.1", features = [ "auto-download", "event-sink", "invocation-listener"] }
core_affinity = { version = "0.5", git = "https://github.com/s1341/core_affinity_rs", rev = "6648a7a" } core_affinity = { version = "0.5", git = "https://github.com/s1341/core_affinity_rs", rev = "6648a7a" }
regex = "1.4" regex = "1.4"
dynasmrt = "1.0.1" dynasmrt = "1.0.1"
capstone = "0.8.0" capstone = "0.10.0"
color-backtrace ={ version = "0.5", features = [ "resolve-modules" ] } color-backtrace ={ version = "0.5", features = [ "resolve-modules" ] }
termcolor = "1.1.2" termcolor = "1.1.2"
serde = "1.0" serde = "1.0"

View File

@ -27,11 +27,8 @@ pub(crate) struct Allocator {
allocations: HashMap<usize, AllocationMetadata>, allocations: HashMap<usize, AllocationMetadata>,
shadow_pages: RangeSet<usize>, shadow_pages: RangeSet<usize>,
allocation_queue: HashMap<usize, Vec<AllocationMetadata>>, allocation_queue: HashMap<usize, Vec<AllocationMetadata>>,
#[cfg(target_arch = "aarch64")]
largest_allocation: usize, largest_allocation: usize,
#[cfg(target_arch = "aarch64")]
base_mapping_addr: usize, base_mapping_addr: usize,
#[cfg(target_arch = "aarch64")]
current_mapping_addr: usize, current_mapping_addr: usize,
} }
@ -68,6 +65,7 @@ impl Allocator {
// probe to find a usable shadow bit: // probe to find a usable shadow bit:
let mut shadow_bit: usize = 0; let mut shadow_bit: usize = 0;
#[cfg(target_arch = "aarch64")]
for try_shadow_bit in &[46usize, 36usize] { for try_shadow_bit in &[46usize, 36usize] {
let addr: usize = 1 << try_shadow_bit; let addr: usize = 1 << try_shadow_bit;
if unsafe { if unsafe {
@ -89,9 +87,36 @@ impl Allocator {
break; break;
} }
} }
// x86_64's userspace's up to 0x7fff-ffff-ffff so 46 is not available. (0x4000-0000-0000 - 0xc000-0000-0000)
// we'd also want to avoid 0x5555-xxxx-xxxx because programs are mapped there. so 45 is not available either (0x2000-0000-0000 - 0x6000-0000-0000).
// This memory map is for amd64 linux.
#[cfg(all(target_arch = "x86_64", target_os = "linux"))]
{
let try_shadow_bit: usize = 44;
let addr: usize = 1 << try_shadow_bit;
if unsafe {
mmap(
addr as *mut c_void,
page_size,
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
MapFlags::MAP_PRIVATE
| ANONYMOUS_FLAG
| MapFlags::MAP_FIXED
| MapFlags::MAP_NORESERVE,
-1,
0,
)
}
.is_ok()
{
shadow_bit = try_shadow_bit;
}
}
assert!(shadow_bit != 0); assert!(shadow_bit != 0);
// attempt to pre-map the entire shadow-memory space // attempt to pre-map the entire shadow-memory space
let addr: usize = 1 << shadow_bit; let addr: usize = 1 << shadow_bit;
let pre_allocated_shadow = unsafe { let pre_allocated_shadow = unsafe {
mmap( mmap(
@ -117,11 +142,8 @@ impl Allocator {
allocations: HashMap::new(), allocations: HashMap::new(),
shadow_pages: RangeSet::new(), shadow_pages: RangeSet::new(),
allocation_queue: HashMap::new(), allocation_queue: HashMap::new(),
#[cfg(target_arch = "aarch64")]
largest_allocation: 0, largest_allocation: 0,
#[cfg(target_arch = "aarch64")]
base_mapping_addr: addr + addr + addr, base_mapping_addr: addr + addr + addr,
#[cfg(target_arch = "aarch64")]
current_mapping_addr: addr + addr + addr, current_mapping_addr: addr + addr + addr,
} }
} }
@ -144,7 +166,6 @@ impl Allocator {
(value / self.page_size) * self.page_size (value / self.page_size) * self.page_size
} }
#[cfg(target_arch = "aarch64")]
fn find_smallest_fit(&mut self, size: usize) -> Option<AllocationMetadata> { fn find_smallest_fit(&mut self, size: usize) -> Option<AllocationMetadata> {
let mut current_size = size; let mut current_size = size;
while current_size <= self.largest_allocation { while current_size <= self.largest_allocation {
@ -159,12 +180,11 @@ impl Allocator {
None None
} }
#[cfg(target_arch = "aarch64")]
#[must_use] #[must_use]
pub unsafe fn alloc(&mut self, size: usize, _alignment: usize) -> *mut c_void { pub unsafe fn alloc(&mut self, size: usize, _alignment: usize) -> *mut c_void {
let mut is_malloc_zero = false; let mut is_malloc_zero = false;
let size = if size == 0 { let size = if size == 0 {
println!("zero-sized allocation!"); // println!("zero-sized allocation!");
is_malloc_zero = true; is_malloc_zero = true;
16 16
} else { } else {
@ -187,6 +207,7 @@ impl Allocator {
} }
metadata metadata
} else { } else {
// println!("{:x}, {:x}", self.current_mapping_addr, rounded_up_size);
let mapping = match mmap( let mapping = match mmap(
self.current_mapping_addr as *mut c_void, self.current_mapping_addr as *mut c_void,
rounded_up_size, rounded_up_size,
@ -214,7 +235,6 @@ impl Allocator {
actual_size: rounded_up_size, actual_size: rounded_up_size,
..AllocationMetadata::default() ..AllocationMetadata::default()
}; };
if self.options.enable_asan_allocation_backtraces { if self.options.enable_asan_allocation_backtraces {
metadata.allocation_site_backtrace = Some(Backtrace::new_unresolved()); metadata.allocation_site_backtrace = Some(Backtrace::new_unresolved());
} }
@ -232,11 +252,10 @@ impl Allocator {
self.allocations self.allocations
.insert(metadata.address + self.page_size, metadata); .insert(metadata.address + self.page_size, metadata);
//println!("serving address: {:?}, size: {:x}", address, size); // println!("serving address: {:?}, size: {:x}", address, size);
address address
} }
#[cfg(target_arch = "aarch64")]
pub unsafe fn release(&mut self, ptr: *mut c_void) { pub unsafe fn release(&mut self, ptr: *mut c_void) {
let mut metadata = if let Some(metadata) = self.allocations.get_mut(&(ptr as usize)) { let mut metadata = if let Some(metadata) = self.allocations.get_mut(&(ptr as usize)) {
metadata metadata
@ -320,7 +339,6 @@ impl Allocator {
} }
} }
#[cfg(target_arch = "aarch64")]
pub fn get_usable_size(&self, ptr: *mut c_void) -> usize { pub fn get_usable_size(&self, ptr: *mut c_void) -> usize {
match self.allocations.get(&(ptr as usize)) { match self.allocations.get(&(ptr as usize)) {
Some(metadata) => metadata.size, Some(metadata) => metadata.size,
@ -334,14 +352,14 @@ impl Allocator {
} }
fn unpoison(start: usize, size: usize) { fn unpoison(start: usize, size: usize) {
//println!("unpoisoning {:x} for {:x}", start, size / 8 + 1); // println!("unpoisoning {:x} for {:x}", start, size / 8 + 1);
unsafe { unsafe {
//println!("memset: {:?}", start as *mut c_void); // println!("memset: {:?}", start as *mut c_void);
memset(start as *mut c_void, 0xff, size / 8); memset(start as *mut c_void, 0xff, size / 8);
let remainder = size % 8; let remainder = size % 8;
if remainder > 0 { if remainder > 0 {
//println!("remainder: {:x}, offset: {:x}", remainder, start + size / 8); // println!("remainder: {:x}, offset: {:x}", remainder, start + size / 8);
memset( memset(
(start + size / 8) as *mut c_void, (start + size / 8) as *mut c_void,
(0xff << (8 - remainder)) & 0xff, (0xff << (8 - remainder)) & 0xff,
@ -352,14 +370,14 @@ impl Allocator {
} }
pub fn poison(start: usize, size: usize) { pub fn poison(start: usize, size: usize) {
//println!("poisoning {:x} for {:x}", start, size / 8 + 1); // println!("poisoning {:x} for {:x}", start, size / 8 + 1);
unsafe { unsafe {
//println!("memset: {:?}", start as *mut c_void); // println!("memset: {:?}", start as *mut c_void);
memset(start as *mut c_void, 0x00, size / 8); memset(start as *mut c_void, 0x00, size / 8);
let remainder = size % 8; let remainder = size % 8;
if remainder > 0 { if remainder > 0 {
//println!("remainder: {:x}, offset: {:x}", remainder, start + size / 8); // println!("remainder: {:x}, offset: {:x}", remainder, start + size / 8);
memset((start + size / 8) as *mut c_void, 0x00, 1); memset((start + size / 8) as *mut c_void, 0x00, 1);
} }
} }
@ -381,7 +399,12 @@ impl Allocator {
let shadow_end = let shadow_end =
self.round_up_to_page((end - start) / 8) + self.page_size + shadow_start; self.round_up_to_page((end - start) / 8) + self.page_size + shadow_start;
for range in self.shadow_pages.gaps(&(shadow_start..shadow_end)) { for range in self.shadow_pages.gaps(&(shadow_start..shadow_end)) {
//println!("range: {:x}-{:x}, pagesize: {}", range.start, range.end, self.page_size); /*
println!(
"range: {:x}-{:x}, pagesize: {}",
range.start, range.end, self.page_size
);
*/
unsafe { unsafe {
mmap( mmap(
range.start as *mut c_void, range.start as *mut c_void,
@ -406,12 +429,10 @@ impl Allocator {
(shadow_mapping_start, (end - start) / 8) (shadow_mapping_start, (end - start) / 8)
} }
#[cfg(target_arch = "aarch64")]
pub fn map_to_shadow(&self, start: usize) -> usize { pub fn map_to_shadow(&self, start: usize) -> usize {
map_to_shadow!(self, start) map_to_shadow!(self, start)
} }
#[cfg(target_arch = "aarch64")]
#[inline] #[inline]
pub fn is_managed(&self, ptr: *mut c_void) -> bool { pub fn is_managed(&self, ptr: *mut c_void) -> bool {
//self.allocations.contains_key(&(ptr as usize)) //self.allocations.contains_key(&(ptr as usize))

View File

@ -20,17 +20,21 @@ use serde::{Deserialize, Serialize};
use std::io::Write; use std::io::Write;
use termcolor::{Color, ColorSpec, WriteColor}; use termcolor::{Color, ColorSpec, WriteColor};
use crate::{alloc::AllocationMetadata, FridaOptions}; #[cfg(target_arch = "x86_64")]
use crate::asan_rt::ASAN_SAVE_REGISTER_NAMES;
use crate::{alloc::AllocationMetadata, asan_rt::ASAN_SAVE_REGISTER_COUNT, FridaOptions};
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct AsanReadWriteError { pub(crate) struct AsanReadWriteError {
pub registers: [usize; 32], pub registers: [usize; ASAN_SAVE_REGISTER_COUNT],
pub pc: usize, pub pc: usize,
pub fault: (u16, u16, usize, usize), pub fault: (Option<u16>, Option<u16>, usize, usize),
pub metadata: AllocationMetadata, pub metadata: AllocationMetadata,
pub backtrace: Backtrace, pub backtrace: Backtrace,
} }
#[allow(clippy::type_complexity)]
#[derive(Debug, Clone, Serialize, Deserialize, SerdeAny)] #[derive(Debug, Clone, Serialize, Deserialize, SerdeAny)]
pub(crate) enum AsanError { pub(crate) enum AsanError {
OobRead(AsanReadWriteError), OobRead(AsanReadWriteError),
@ -39,10 +43,31 @@ pub(crate) enum AsanError {
WriteAfterFree(AsanReadWriteError), WriteAfterFree(AsanReadWriteError),
DoubleFree((usize, AllocationMetadata, Backtrace)), DoubleFree((usize, AllocationMetadata, Backtrace)),
UnallocatedFree((usize, Backtrace)), UnallocatedFree((usize, Backtrace)),
Unknown(([usize; 32], usize, (u16, u16, usize, usize), Backtrace)), Unknown(
(
[usize; ASAN_SAVE_REGISTER_COUNT],
usize,
(Option<u16>, Option<u16>, usize, usize),
Backtrace,
),
),
Leak((usize, AllocationMetadata)), Leak((usize, AllocationMetadata)),
StackOobRead(([usize; 32], usize, (u16, u16, usize, usize), Backtrace)), StackOobRead(
StackOobWrite(([usize; 32], usize, (u16, u16, usize, usize), Backtrace)), (
[usize; ASAN_SAVE_REGISTER_COUNT],
usize,
(Option<u16>, Option<u16>, usize, usize),
Backtrace,
),
),
StackOobWrite(
(
[usize; ASAN_SAVE_REGISTER_COUNT],
usize,
(Option<u16>, Option<u16>, usize, usize),
Backtrace,
),
),
BadFuncArgRead((String, usize, usize, usize, Backtrace)), BadFuncArgRead((String, usize, usize, usize, Backtrace)),
BadFuncArgWrite((String, usize, usize, usize, Backtrace)), BadFuncArgWrite((String, usize, usize, usize, Backtrace)),
} }
@ -160,12 +185,13 @@ impl AsanErrors {
#[allow(clippy::non_ascii_literal)] #[allow(clippy::non_ascii_literal)]
writeln!(output, "{:━^100}", " REGISTERS ").unwrap(); writeln!(output, "{:━^100}", " REGISTERS ").unwrap();
#[cfg(target_arch = "aarch64")]
for reg in 0..=30 { for reg in 0..=30 {
if reg == basereg { if basereg.is_some() && reg == basereg.unwrap() as usize {
output output
.set_color(ColorSpec::new().set_fg(Some(Color::Red))) .set_color(ColorSpec::new().set_fg(Some(Color::Red)))
.unwrap(); .unwrap();
} else if reg == indexreg { } else if indexreg.is_some() && reg == indexreg.unwrap() as usize {
output output
.set_color(ColorSpec::new().set_fg(Some(Color::Yellow))) .set_color(ColorSpec::new().set_fg(Some(Color::Yellow)))
.unwrap(); .unwrap();
@ -181,15 +207,52 @@ impl AsanErrors {
writeln!(output).unwrap(); writeln!(output).unwrap();
} }
} }
#[cfg(target_arch = "aarch64")]
writeln!(output, "pc : 0x{:016x} ", error.pc).unwrap(); writeln!(output, "pc : 0x{:016x} ", error.pc).unwrap();
#[cfg(target_arch = "x86_64")]
for (reg, name) in ASAN_SAVE_REGISTER_NAMES
.iter()
.enumerate()
.take(ASAN_SAVE_REGISTER_COUNT)
{
if basereg.is_some() && reg == basereg.unwrap() as usize {
output
.set_color(ColorSpec::new().set_fg(Some(Color::Red)))
.unwrap();
} else if indexreg.is_some() && reg == indexreg.unwrap() as usize {
output
.set_color(ColorSpec::new().set_fg(Some(Color::Yellow)))
.unwrap();
}
write!(output, "{}: 0x{:016x} ", name, error.registers[reg]).unwrap();
output.reset().unwrap();
if reg % 4 == 3 {
writeln!(output).unwrap();
}
}
#[cfg(target_arch = "x86_64")]
writeln!(output, "rip: 0x{:016x}", error.pc).unwrap();
#[allow(clippy::non_ascii_literal)] #[allow(clippy::non_ascii_literal)]
writeln!(output, "{:━^100}", " CODE ").unwrap(); writeln!(output, "{:━^100}", " CODE ").unwrap();
#[cfg(target_arch = "aarch64")]
let mut cs = Capstone::new() let mut cs = Capstone::new()
.arm64() .arm64()
.mode(capstone::arch::arm64::ArchMode::Arm) .mode(capstone::arch::arm64::ArchMode::Arm)
.build() .build()
.unwrap(); .unwrap();
#[cfg(target_arch = "x86_64")]
let mut cs = Capstone::new()
.x86()
.mode(capstone::arch::x86::ArchMode::Mode64)
.detail(true)
.build()
.expect("Failed to create Capstone object");
cs.set_skipdata(true).expect("failed to set skipdata"); cs.set_skipdata(true).expect("failed to set skipdata");
let start_pc = error.pc - 4 * 5; let start_pc = error.pc - 4 * 5;
@ -380,12 +443,14 @@ impl AsanErrors {
#[allow(clippy::non_ascii_literal)] #[allow(clippy::non_ascii_literal)]
writeln!(output, "{:━^100}", " REGISTERS ").unwrap(); writeln!(output, "{:━^100}", " REGISTERS ").unwrap();
#[cfg(target_arch = "aarch64")]
for reg in 0..=30 { for reg in 0..=30 {
if reg == basereg { if basereg.is_some() && reg == basereg.unwrap() as usize {
output output
.set_color(ColorSpec::new().set_fg(Some(Color::Red))) .set_color(ColorSpec::new().set_fg(Some(Color::Red)))
.unwrap(); .unwrap();
} else if reg == indexreg { } else if indexreg.is_some() && reg == indexreg.unwrap() as usize {
output output
.set_color(ColorSpec::new().set_fg(Some(Color::Yellow))) .set_color(ColorSpec::new().set_fg(Some(Color::Yellow)))
.unwrap(); .unwrap();
@ -396,15 +461,53 @@ impl AsanErrors {
writeln!(output).unwrap(); writeln!(output).unwrap();
} }
} }
#[cfg(target_arch = "aarch64")]
writeln!(output, "pc : 0x{:016x} ", pc).unwrap(); writeln!(output, "pc : 0x{:016x} ", pc).unwrap();
#[cfg(target_arch = "x86_64")]
for reg in 0..ASAN_SAVE_REGISTER_COUNT {
if basereg.is_some() && reg == basereg.unwrap() as usize {
output
.set_color(ColorSpec::new().set_fg(Some(Color::Red)))
.unwrap();
} else if indexreg.is_some() && reg == indexreg.unwrap() as usize {
output
.set_color(ColorSpec::new().set_fg(Some(Color::Yellow)))
.unwrap();
}
write!(
output,
"{}: 0x{:016x} ",
ASAN_SAVE_REGISTER_NAMES[reg], registers[reg]
)
.unwrap();
output.reset().unwrap();
if reg % 4 == 3 {
writeln!(output).unwrap();
}
}
#[cfg(target_arch = "x86_64")]
writeln!(output, "Rip: 0x{:016x}", pc).unwrap();
#[allow(clippy::non_ascii_literal)] #[allow(clippy::non_ascii_literal)]
writeln!(output, "{:━^100}", " CODE ").unwrap(); writeln!(output, "{:━^100}", " CODE ").unwrap();
#[cfg(target_arch = "aarch64")]
let mut cs = Capstone::new() let mut cs = Capstone::new()
.arm64() .arm64()
.mode(capstone::arch::arm64::ArchMode::Arm) .mode(capstone::arch::arm64::ArchMode::Arm)
.build() .build()
.unwrap(); .unwrap();
#[cfg(target_arch = "x86_64")]
let mut cs = Capstone::new()
.x86()
.mode(capstone::arch::x86::ArchMode::Mode64)
.detail(true)
.build()
.expect("Failed to create Capstone object");
cs.set_skipdata(true).expect("failed to set skipdata"); cs.set_skipdata(true).expect("failed to set skipdata");
let start_pc = pc - 4 * 5; let start_pc = pc - 4 * 5;

File diff suppressed because it is too large Load Diff

View File

@ -16,6 +16,15 @@ use capstone::{
Capstone, Insn, Capstone, Insn,
}; };
#[cfg(target_arch = "x86_64")]
use capstone::{
arch::{self, x86::X86OperandType, ArchOperand::X86Operand, BuildsCapstone},
Capstone, Insn, RegId,
};
#[cfg(target_arch = "aarch64")]
use num_traits::cast::FromPrimitive;
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
use frida_gum::instruction_writer::X86Register; use frida_gum::instruction_writer::X86Register;
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
@ -30,8 +39,6 @@ use frida_gum::{
use frida_gum::CpuContext; use frida_gum::CpuContext;
use frida_gum::{Gum, Module, PageProtection}; use frida_gum::{Gum, Module, PageProtection};
#[cfg(target_arch = "aarch64")]
use num_traits::cast::FromPrimitive;
use rangemap::RangeMap; use rangemap::RangeMap;
@ -98,11 +105,9 @@ pub struct FridaInstrumentationHelper<'a> {
map: [u8; MAP_SIZE], map: [u8; MAP_SIZE],
previous_pc: [u64; 1], previous_pc: [u64; 1],
current_log_impl: u64, current_log_impl: u64,
#[cfg(target_arch = "aarch64")]
current_report_impl: u64, current_report_impl: u64,
/// Transformer that has to be passed to FridaInProcessExecutor /// Transformer that has to be passed to FridaInProcessExecutor
transformer: Option<Transformer<'a>>, transformer: Option<Transformer<'a>>,
#[cfg(target_arch = "aarch64")]
capstone: Capstone, capstone: Capstone,
#[cfg(unix)] #[cfg(unix)]
asan_runtime: AsanRuntime, asan_runtime: AsanRuntime,
@ -125,16 +130,10 @@ impl<'a> FridaHelper<'a> for FridaInstrumentationHelper<'a> {
self.asan_runtime.register_thread(); self.asan_runtime.register_thread();
} }
#[cfg(not(target_arch = "aarch64"))]
fn pre_exec<I: Input + HasTargetBytes>(&mut self, _input: &I) {}
#[cfg(target_arch = "aarch64")]
fn pre_exec<I: Input + HasTargetBytes>(&mut self, input: &I) { fn pre_exec<I: Input + HasTargetBytes>(&mut self, input: &I) {
#[cfg(target_arch = "aarch64")]
let target_bytes = input.target_bytes(); let target_bytes = input.target_bytes();
let slice = target_bytes.as_slice(); let slice = target_bytes.as_slice();
//println!("target_bytes: {:#x}: {:02x?}", slice.as_ptr() as usize, slice); //println!("target_bytes: {:#x}: {:02x?}", slice.as_ptr() as usize, slice);
#[cfg(target_arch = "aarch64")]
if self.options.asan_enabled() { if self.options.asan_enabled() {
self.asan_runtime self.asan_runtime
.unpoison(slice.as_ptr() as usize, slice.len()); .unpoison(slice.as_ptr() as usize, slice.len());
@ -150,7 +149,6 @@ impl<'a> FridaHelper<'a> for FridaInstrumentationHelper<'a> {
DrCovWriter::new(&filename, &self.ranges, &mut self.drcov_basic_blocks).write(); DrCovWriter::new(&filename, &self.ranges, &mut self.drcov_basic_blocks).write();
} }
#[cfg(target_arch = "aarch64")]
if self.options.asan_enabled() { if self.options.asan_enabled() {
if self.options.asan_detect_leaks() { if self.options.asan_detect_leaks() {
self.asan_runtime.check_for_leaks(); self.asan_runtime.check_for_leaks();
@ -298,7 +296,6 @@ impl<'a> FridaInstrumentationHelper<'a> {
map: [0u8; MAP_SIZE], map: [0u8; MAP_SIZE],
previous_pc: [0u64; 1], previous_pc: [0u64; 1],
current_log_impl: 0, current_log_impl: 0,
#[cfg(target_arch = "aarch64")]
current_report_impl: 0, current_report_impl: 0,
transformer: None, transformer: None,
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
@ -308,6 +305,13 @@ impl<'a> FridaInstrumentationHelper<'a> {
.detail(true) .detail(true)
.build() .build()
.expect("Failed to create Capstone object"), .expect("Failed to create Capstone object"),
#[cfg(target_arch = "x86_64")]
capstone: Capstone::new()
.x86()
.mode(arch::x86::ArchMode::Mode64)
.detail(true)
.build()
.expect("Failed to create Capstone object"),
#[cfg(not(windows))] #[cfg(not(windows))]
asan_runtime: AsanRuntime::new(options.clone()), asan_runtime: AsanRuntime::new(options.clone()),
#[cfg(feature = "cmplog")] #[cfg(feature = "cmplog")]
@ -322,6 +326,7 @@ impl<'a> FridaInstrumentationHelper<'a> {
for (i, module) in helper.module_map.values().iter().enumerate() { for (i, module) in helper.module_map.values().iter().enumerate() {
let range = module.range(); let range = module.range();
let start = range.base_address().0 as usize; let start = range.base_address().0 as usize;
// println!("start: {:x}", start);
helper helper
.ranges .ranges
.insert(start..(start + range.size()), (i as u16, module.path())); .insert(start..(start + range.size()), (i as u16, module.path()));
@ -330,7 +335,7 @@ impl<'a> FridaInstrumentationHelper<'a> {
for (module_name, offset) in suppressed_specifiers { for (module_name, offset) in suppressed_specifiers {
let module_details = ModuleDetails::with_name(module_name).unwrap(); let module_details = ModuleDetails::with_name(module_name).unwrap();
let lib_start = module_details.range().base_address().0 as usize; let lib_start = module_details.range().base_address().0 as usize;
println!("removing address: {:#x}", lib_start + offset); // println!("removing address: {:#x}", lib_start + offset);
helper helper
.ranges .ranges
.remove((lib_start + offset)..(lib_start + offset + 4)); .remove((lib_start + offset)..(lib_start + offset + 4));
@ -347,12 +352,19 @@ impl<'a> FridaInstrumentationHelper<'a> {
for instruction in basic_block { for instruction in basic_block {
let instr = instruction.instr(); let instr = instruction.instr();
let address = instr.address(); let address = instr.address();
//println!("block @ {:x} transformed to {:x}", address, output.writer().pc()); // println!("block @ {:x} transformed to {:x}", address, output.writer().pc());
//println!("address: {:x} contains: {:?}", address, helper.ranges.contains_key(&(address as usize))); /*
println!(
"address: {:x} contains: {:?}",
address,
helper.ranges.contains_key(&(address as usize))
);
*/
// println!("Ranges: {:#?}", helper.ranges);
if helper.ranges.contains_key(&(address as usize)) { if helper.ranges.contains_key(&(address as usize)) {
if first { if first {
first = false; first = false;
//println!("block @ {:x} transformed to {:x}", address, output.writer().pc()); // println!("block @ {:x} transformed to {:x}", address, output.writer().pc());
if helper.options().coverage_enabled() { if helper.options().coverage_enabled() {
helper.emit_coverage_mapping(address, &output); helper.emit_coverage_mapping(address, &output);
} }
@ -371,8 +383,15 @@ impl<'a> FridaInstrumentationHelper<'a> {
} }
if helper.options().asan_enabled() { if helper.options().asan_enabled() {
#[cfg(not(target_arch = "aarch64"))] #[cfg(target_arch = "x86_64")]
todo!("Implement ASAN for non-aarch64 targets"); if let Ok((segment, width, basereg, indexreg, scale, disp)) =
helper.asan_is_interesting_instruction(address, instr)
{
helper.emit_shadow_check(
address, &output, segment, width, basereg, indexreg, scale,
disp,
);
}
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
if let Ok((basereg, indexreg, displacement, width, shift, extender)) = if let Ok((basereg, indexreg, displacement, width, shift, extender)) =
helper.asan_is_interesting_instruction(address, instr) helper.asan_is_interesting_instruction(address, instr)
@ -444,6 +463,53 @@ impl<'a> FridaInstrumentationHelper<'a> {
Aarch64Register::from_u32(regint as u32).unwrap() Aarch64Register::from_u32(regint as u32).unwrap()
} }
// frida registers: https://docs.rs/frida-gum/0.4.0/frida_gum/instruction_writer/enum.X86Register.html
// capstone registers: https://docs.rs/capstone-sys/0.14.0/capstone_sys/x86_reg/index.html
#[cfg(target_arch = "x86_64")]
#[must_use]
#[inline]
#[allow(clippy::unused_self)]
pub fn writer_register(&self, reg: RegId) -> X86Register {
let regint: u16 = reg.0;
match regint {
19 => X86Register::Eax,
22 => X86Register::Ecx,
24 => X86Register::Edx,
21 => X86Register::Ebx,
30 => X86Register::Esp,
20 => X86Register::Ebp,
29 => X86Register::Esi,
23 => X86Register::Edi,
226 => X86Register::R8d,
227 => X86Register::R9d,
228 => X86Register::R10d,
229 => X86Register::R11d,
230 => X86Register::R12d,
231 => X86Register::R13d,
232 => X86Register::R14d,
233 => X86Register::R15d,
26 => X86Register::Eip,
35 => X86Register::Rax,
38 => X86Register::Rcx,
40 => X86Register::Rdx,
37 => X86Register::Rbx,
44 => X86Register::Rsp,
36 => X86Register::Rbp,
43 => X86Register::Rsi,
39 => X86Register::Rdi,
106 => X86Register::R8,
107 => X86Register::R9,
108 => X86Register::R10,
109 => X86Register::R11,
110 => X86Register::R12,
111 => X86Register::R13,
112 => X86Register::R14,
113 => X86Register::R15,
41 => X86Register::Rip,
_ => X86Register::None, // Ignore Xax..Xip
}
}
#[cfg(all(feature = "cmplog", target_arch = "aarch64"))] #[cfg(all(feature = "cmplog", target_arch = "aarch64"))]
#[inline] #[inline]
/// Emit the instrumentation code which is responsible for opernads value extraction and cmplog map population /// Emit the instrumentation code which is responsible for opernads value extraction and cmplog map population
@ -762,6 +828,160 @@ impl<'a> FridaInstrumentationHelper<'a> {
)); ));
} }
#[inline]
#[allow(clippy::too_many_lines)]
#[allow(clippy::too_many_arguments)]
pub fn emit_shadow_check(
&mut self,
address: u64,
output: &StalkerOutput,
_segment: RegId,
width: u8,
basereg: RegId,
indexreg: RegId,
scale: i32,
disp: i64,
) {
let redzone_size = i64::from(frida_gum_sys::GUM_RED_ZONE_SIZE);
let writer = output.writer();
let true_rip = address;
let basereg = if basereg.0 == 0 {
None
} else {
let reg = self.writer_register(basereg);
Some(reg)
};
let indexreg = if indexreg.0 == 0 {
None
} else {
let reg = self.writer_register(indexreg);
Some(reg)
};
let scale = match scale {
2 => 1,
4 => 2,
8 => 3,
_ => 0,
};
if self.current_report_impl == 0
|| !writer.can_branch_directly_to(self.current_report_impl)
|| !writer.can_branch_directly_between(writer.pc() + 128, self.current_report_impl)
{
let after_report_impl = writer.code_offset() + 2;
#[cfg(target_arch = "x86_64")]
writer.put_jmp_near_label(after_report_impl);
#[cfg(target_arch = "aarch64")]
writer.put_b_label(after_report_impl);
self.current_report_impl = writer.pc();
#[cfg(unix)]
writer.put_bytes(self.asan_runtime.blob_report());
writer.put_label(after_report_impl);
}
/* Save registers that we'll use later in shadow_check_blob
| addr | rip |
| Rcx | Rax |
| Rsi | Rdx |
Old Rsp - (redsone_size) -> | flags | Rdi |
| | |
Old Rsp -> | | |
*/
writer.put_lea_reg_reg_offset(X86Register::Rsp, X86Register::Rsp, -(redzone_size));
writer.put_pushfx();
writer.put_push_reg(X86Register::Rdi);
writer.put_push_reg(X86Register::Rsi);
writer.put_push_reg(X86Register::Rdx);
writer.put_push_reg(X86Register::Rcx);
writer.put_push_reg(X86Register::Rax);
/*
Things are a bit different when Rip is either base register or index register.
Suppose we have an instruction like
`bnd jmp qword ptr [rip + 0x2e4b5]`
We can't just emit code like
`mov rdi, rip` to get RIP loaded into RDI,
because this RIP is NOT the orginal RIP (, which is usually within .text) anymore, rather it is pointing to the memory allocated by the frida stalker.
Please confer https://frida.re/docs/stalker/ for details.
*/
// Init Rdi
match basereg {
Some(reg) => match reg {
X86Register::Rip => {
writer.put_mov_reg_address(X86Register::Rdi, true_rip);
}
_ => {
writer.put_mov_reg_reg(X86Register::Rdi, basereg.unwrap());
}
},
None => {
writer.put_xor_reg_reg(X86Register::Rdi, X86Register::Rdi);
}
}
match indexreg {
Some(reg) => match reg {
X86Register::Rip => {
writer.put_mov_reg_address(X86Register::Rsi, true_rip);
}
_ => {
writer.put_mov_reg_reg(X86Register::Rsi, indexreg.unwrap());
}
},
None => {
writer.put_xor_reg_reg(X86Register::Rsi, X86Register::Rsi);
}
}
// Scale
if scale > 0 {
writer.put_shl_reg_u8(X86Register::Rsi, scale);
}
// Finally set Rdi to base + index * scale + disp
writer.put_add_reg_reg(X86Register::Rdi, X86Register::Rsi);
writer.put_lea_reg_reg_offset(X86Register::Rdi, X86Register::Rdi, disp);
writer.put_mov_reg_address(X86Register::Rsi, true_rip); // load true_rip into rsi in case we need them in handle_trap
writer.put_push_reg(X86Register::Rsi); // save true_rip
writer.put_push_reg(X86Register::Rdi); // save accessed_address
#[cfg(unix)]
let checked: bool = match width {
1 => writer.put_bytes(self.asan_runtime.blob_check_mem_byte()),
2 => writer.put_bytes(self.asan_runtime.blob_check_mem_halfword()),
4 => writer.put_bytes(self.asan_runtime.blob_check_mem_dword()),
8 => writer.put_bytes(self.asan_runtime.blob_check_mem_qword()),
16 => writer.put_bytes(self.asan_runtime.blob_check_mem_16bytes()),
_ => false,
};
if checked {
writer.put_jmp_address(self.current_report_impl);
for _ in 0..10 {
// shadow_check_blob's done will land somewhere in these nops
// on amd64 jump can takes 10 bytes at most, so that's why I put 10 bytes.
writer.put_nop();
}
}
writer.put_pop_reg(X86Register::Rdi);
writer.put_pop_reg(X86Register::Rsi);
writer.put_pop_reg(X86Register::Rax);
writer.put_pop_reg(X86Register::Rcx);
writer.put_pop_reg(X86Register::Rdx);
writer.put_pop_reg(X86Register::Rsi);
writer.put_pop_reg(X86Register::Rdi);
writer.put_popfx();
writer.put_lea_reg_reg_offset(X86Register::Rsp, X86Register::Rsp, redzone_size);
}
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
#[inline] #[inline]
fn emit_shadow_check( fn emit_shadow_check(
@ -1092,6 +1312,68 @@ impl<'a> FridaInstrumentationHelper<'a> {
Err(()) Err(())
} }
#[cfg(target_arch = "x86_64")]
#[inline]
fn asan_is_interesting_instruction(
&self,
_address: u64,
instr: &Insn,
) -> Result<(RegId, u8, RegId, RegId, i32, i64), ()> {
let operands = self
.capstone
.insn_detail(instr)
.unwrap()
.arch_detail()
.operands();
// Ignore lea instruction
// put nop into the white-list so that instructions like
// like `nop dword [rax + rax]` does not get caught.
match instr.mnemonic().unwrap() {
"lea" | "nop" => return Err(()),
_ => (),
}
// This is a TODO! In this case, both the src and the dst are mem operand
// so we would need to return two operadns?
if instr.mnemonic().unwrap().starts_with("rep") {
return Err(());
}
for operand in operands {
if let X86Operand(x86operand) = operand {
if let X86OperandType::Mem(opmem) = x86operand.op_type {
/*
println!(
"insn: {:#?} {:#?} width: {}, segment: {:#?}, base: {:#?}, index: {:#?}, scale: {}, disp: {}",
insn_id,
instr,
x86operand.size,
opmem.segment(),
opmem.base(),
opmem.index(),
opmem.scale(),
opmem.disp(),
);
*/
if opmem.segment() == RegId(0) {
return Ok((
opmem.segment(),
x86operand.size,
opmem.base(),
opmem.index(),
opmem.scale(),
opmem.disp(),
));
}
}
}
}
Err(())
}
#[cfg(all(feature = "cmplog", target_arch = "aarch64"))] #[cfg(all(feature = "cmplog", target_arch = "aarch64"))]
#[inline] #[inline]
/// Check if the current instruction is cmplog relevant one(any opcode which sets the flags) /// Check if the current instruction is cmplog relevant one(any opcode which sets the flags)
@ -1185,7 +1467,7 @@ impl<'a> FridaInstrumentationHelper<'a> {
fn emit_coverage_mapping(&mut self, address: u64, output: &StalkerOutput) { fn emit_coverage_mapping(&mut self, address: u64, output: &StalkerOutput) {
let writer = output.writer(); let writer = output.writer();
#[allow(clippy::cast_possible_wrap)] // gum redzone size is u32, we need an offset as i32. #[allow(clippy::cast_possible_wrap)] // gum redzone size is u32, we need an offset as i32.
let redzone_size = frida_gum_sys::GUM_RED_ZONE_SIZE as i32; let redzone_size = i64::from(frida_gum_sys::GUM_RED_ZONE_SIZE);
if self.current_log_impl == 0 if self.current_log_impl == 0
|| !writer.can_branch_directly_to(self.current_log_impl) || !writer.can_branch_directly_to(self.current_log_impl)
|| !writer.can_branch_directly_between(writer.pc() + 128, self.current_log_impl) || !writer.can_branch_directly_between(writer.pc() + 128, self.current_log_impl)

View File

@ -63,10 +63,6 @@ impl FridaOptions {
match name { match name {
"asan" => { "asan" => {
options.enable_asan = value.parse().unwrap(); options.enable_asan = value.parse().unwrap();
#[cfg(not(target_arch = "aarch64"))]
if options.enable_asan {
panic!("ASAN is not currently supported on targets other than aarch64");
}
} }
"asan-detect-leaks" => { "asan-detect-leaks" => {
options.enable_asan_leak_detection = value.parse().unwrap(); options.enable_asan_leak_detection = value.parse().unwrap();