Frida shadow fix (#425)

* map_to_shadow

* fix map_to_shadow

* aarch64 change?

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* use

* revert

* s1341's change

* Fix shadow calculation in instrumented code

* Fix asan error output to be more accurate

Co-authored-by: s1341 <github@shmarya.net>
This commit is contained in:
Dongjia Zhang 2021-12-20 18:51:45 +09:00 committed by GitHub
parent 1f24ad0b65
commit 2aa0ca5ef1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 222 additions and 184 deletions

View File

@ -39,7 +39,7 @@ const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANONYMOUS;
macro_rules! map_to_shadow { macro_rules! map_to_shadow {
($self:expr, $address:expr) => { ($self:expr, $address:expr) => {
(($address >> 3) + $self.shadow_offset) & ((1 << ($self.shadow_bit + 1)) - 1) $self.shadow_offset + (($address >> 3) & ((1 << ($self.shadow_bit + 1)) - 1))
}; };
} }
@ -67,18 +67,9 @@ impl Allocator {
#[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_sign_loss)]
let page_size = ret as usize; let page_size = ret as usize;
// probe to find a usable shadow bit: // probe to find a usable shadow bit:
#[cfg(any( let mut shadow_bit = 0;
target_arch = "aarch64",
all(target_arch = "x86_64", target_os = "linux")
))]
let mut shadow_bit: usize = 0;
#[cfg(not(any(
target_arch = "aarch64",
all(target_arch = "x86_64", target_os = "linux")
)))]
let shadow_bit = 0;
#[cfg(target_arch = "aarch64")] #[cfg(all(target_arch = "aarch64", target_os = "android"))]
for try_shadow_bit in &[46usize, 36usize] { for try_shadow_bit in &[46usize, 36usize] {
let addr: usize = 1 << try_shadow_bit; let addr: usize = 1 << try_shadow_bit;
if unsafe { if unsafe {
@ -104,7 +95,7 @@ impl Allocator {
// x86_64's userspace's up to 0x7fff-ffff-ffff so 46 is not available. (0x4000-0000-0000 - 0xc000-0000-0000) // x86_64's userspace's up to 0x7fff-ffff-ffff so 46 is not available. (0x4000-0000-0000 - 0xc000-0000-0000)
// we'd also want to avoid 0x5555-xxxx-xxxx because programs are mapped there. so 45 is not available either (0x2000-0000-0000 - 0x6000-0000-0000). // we'd also want to avoid 0x5555-xxxx-xxxx because programs are mapped there. so 45 is not available either (0x2000-0000-0000 - 0x6000-0000-0000).
// This memory map is for amd64 linux. // This memory map is for amd64 linux.
#[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[cfg(target_os = "linux")]
{ {
let try_shadow_bit: usize = 44; let try_shadow_bit: usize = 44;
let addr: usize = 1 << try_shadow_bit; let addr: usize = 1 << try_shadow_bit;

View File

@ -10,7 +10,7 @@ use frida_gum::NativePointer;
use frida_gum::{ModuleDetails, RangeDetails}; use frida_gum::{ModuleDetails, RangeDetails};
use hashbrown::HashMap; use hashbrown::HashMap;
use nix::sys::mman::{mmap, MapFlags, ProtFlags}; use nix::sys::mman::{mmap, mprotect, MapFlags, ProtFlags};
use backtrace::Backtrace; use backtrace::Backtrace;
@ -183,44 +183,79 @@ impl AsanRuntime {
self.hook_functions(_gum); self.hook_functions(_gum);
/*
unsafe { unsafe {
let mem = self.allocator.alloc(0xac + 2, 8); let mem = self.allocator.alloc(0xac + 2, 8);
unsafe {mprotect((self.shadow_check_func.unwrap() as usize & 0xffffffffffff000) as *mut c_void, 0x1000, ProtFlags::PROT_READ | ProtFlags::PROT_WRITE | ProtFlags::PROT_EXEC)}; unsafe {
mprotect(
(self.shadow_check_func.unwrap() as usize & 0xffffffffffff000) as *mut c_void,
0x1000,
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE | ProtFlags::PROT_EXEC,
)
};
println!("Test0"); println!("Test0");
/* /*
0x555555916ce9 <libafl_frida::asan_rt::AsanRuntime::init+13033> je libafl_frida::asan_rt::AsanRuntime::init+14852 <libafl_frida::asan_rt::AsanRuntime::init+14852> 0x555555916ce9 <libafl_frida::asan_rt::AsanRuntime::init+13033> je libafl_frida::asan_rt::AsanRuntime::init+14852 <libafl_frida::asan_rt::AsanRuntime::init+14852>
0x555555916cef <libafl_frida::asan_rt::AsanRuntime::init+13039> mov rdi, r15 <0x555558392338> 0x555555916cef <libafl_frida::asan_rt::AsanRuntime::init+13039> mov rdi, r15 <0x555558392338>
*/ */
assert!((self.shadow_check_func.unwrap())(((mem as usize) + 0) as *const c_void, 0x00)); assert!((self.shadow_check_func.unwrap())(
((mem as usize) + 0) as *const c_void,
0x00
));
println!("Test1"); println!("Test1");
assert!((self.shadow_check_func.unwrap())(((mem as usize) + 0) as *const c_void, 0xac)); assert!((self.shadow_check_func.unwrap())(
((mem as usize) + 0) as *const c_void,
0xac
));
println!("Test2"); println!("Test2");
assert!((self.shadow_check_func.unwrap())(((mem as usize) + 2) as *const c_void, 0xac)); assert!((self.shadow_check_func.unwrap())(
((mem as usize) + 2) as *const c_void,
0xac
));
println!("Test3"); println!("Test3");
assert!(!(self.shadow_check_func.unwrap())(((mem as usize) + 3) as *const c_void, 0xac)); assert!(!(self.shadow_check_func.unwrap())(
((mem as usize) + 3) as *const c_void,
0xac
));
println!("Test4"); println!("Test4");
assert!(!(self.shadow_check_func.unwrap())(((mem as isize) + -1) as *const c_void, 0xac)); assert!(!(self.shadow_check_func.unwrap())(
((mem as isize) + -1) as *const c_void,
0xac
));
println!("Test5"); println!("Test5");
assert!((self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa4) as *const c_void, 8)); assert!((self.shadow_check_func.unwrap())(
((mem as usize) + 2 + 0xa4) as *const c_void,
8
));
println!("Test6"); println!("Test6");
assert!((self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa6) as *const c_void, 6)); assert!((self.shadow_check_func.unwrap())(
((mem as usize) + 2 + 0xa6) as *const c_void,
6
));
println!("Test7"); println!("Test7");
assert!(!(self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa8) as *const c_void, 6)); assert!(!(self.shadow_check_func.unwrap())(
((mem as usize) + 2 + 0xa8) as *const c_void,
6
));
println!("Test8"); println!("Test8");
assert!(!(self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa8) as *const c_void, 0xac)); assert!(!(self.shadow_check_func.unwrap())(
((mem as usize) + 2 + 0xa8) as *const c_void,
0xac
));
println!("Test9"); println!("Test9");
assert!((self.shadow_check_func.unwrap())(((mem as usize) + 4 + 0xa8) as *const c_void, 0x1)); assert!((self.shadow_check_func.unwrap())(
((mem as usize) + 4 + 0xa8) as *const c_void,
0x1
));
println!("FIN"); println!("FIN");
for i in 0..0xad { for i in 0..0xad {
assert!((self.shadow_check_func.unwrap())(((mem as usize) + i) as *const c_void, 0x01)); assert!((self.shadow_check_func.unwrap())(
((mem as usize) + i) as *const c_void,
0x01
));
} }
// assert!((self.shadow_check_func.unwrap())(((mem2 as usize) + 8875) as *const c_void, 4)); // assert!((self.shadow_check_func.unwrap())(((mem2 as usize) + 8875) as *const c_void, 4));
} }
*/
} }
/// Reset all allocations so that they can be reused for new allocation requests. /// Reset all allocations so that they can be reused for new allocation requests.
@ -1173,7 +1208,7 @@ impl AsanRuntime {
println!("actual rip: {:x}", self.regs[18]); println!("actual rip: {:x}", self.regs[18]);
} }
// https://godbolt.org/z/Y87PYGd69 // https://godbolt.org/z/oajhcP5sv
/* /*
#include <stdio.h> #include <stdio.h>
#include <stdint.h> #include <stdint.h>
@ -1181,11 +1216,11 @@ impl AsanRuntime {
uint64_t generate_shadow_check_function(uint64_t start, uint64_t size){ uint64_t generate_shadow_check_function(uint64_t start, uint64_t size){
// calculate the shadow address // calculate the shadow address
uint64_t addr = 1; uint64_t addr = 0;
addr = addr << shadow_bit;
addr = addr + (start >> 3); addr = addr + (start >> 3);
uint64_t mask = (1ULL << (shadow_bit + 1)) - 1; uint64_t mask = (1ULL << (shadow_bit + 1)) - 1;
addr = addr & mask; addr = addr & mask;
addr = addr + (1ULL << shadow_bit);
if(size == 0){ if(size == 0){
// goto return_success // goto return_success
@ -1292,19 +1327,19 @@ impl AsanRuntime {
// Rdi start, Rsi size // Rdi start, Rsi size
dynasm!(ops dynasm!(ops
; .arch x64 ; .arch x64
; mov cl, shadow_bit as i8 ; mov cl, BYTE shadow_bit as i8
; mov r10, -2
; shl r10, cl
; mov eax, 1 ; mov eax, 1
; mov edx, 1 ; mov edx, 1
; shl rdx, cl ; shl rdx, cl
; mov r10d, 2
; shl r10, cl
; test rsi, rsi ; test rsi, rsi
; je >LBB0_15 ; je >LBB0_15
; mov rcx, rdi ; mov rcx, rdi
; shr rcx, 3 ; shr rcx, 3
; add rdx, rcx ; not r10
; add r10, -1 ; and r10, rcx
; and r10, rdx ; add r10, rdx
; and edi, 7 ; and edi, 7
; je >LBB0_4 ; je >LBB0_4
; mov cl, 8 ; mov cl, 8
@ -1432,10 +1467,12 @@ impl AsanRuntime {
; .arch aarch64 ; .arch aarch64
// calculate the shadow address // calculate the shadow address
; mov x5, #1 ; mov x5, #0
; add x5, xzr, x5, lsl #shadow_bit // ; add x5, xzr, x5, lsl #shadow_bit
; add x5, x5, x0, lsr #3 ; add x5, x5, x0, lsr #3
; ubfx x5, x5, #0, #(shadow_bit + 1) ; ubfx x5, x5, #0, #(shadow_bit + 1)
; mov x6, #1
; add x5, x5, x6, lsl #shadow_bit
; cmp x1, #0 ; cmp x1, #0
; b.eq >return_success ; b.eq >return_success
@ -1545,18 +1582,18 @@ impl AsanRuntime {
} }
} }
// https://godbolt.org/z/cqEKf63e1 // https://godbolt.org/z/ah8vG8sWo
/* /*
#include <stdio.h> #include <stdio.h>
#include <stdint.h> #include <stdint.h>
uint8_t shadow_bit = 8; uint8_t shadow_bit = 8;
uint8_t bit = 3; uint8_t bit = 3;
uint64_t generate_shadow_check_blob(uint64_t start){ uint64_t generate_shadow_check_blob(uint64_t start){
uint64_t addr = 1; uint64_t addr = 0;
addr = addr << shadow_bit;
addr = addr + (start >> 3); addr = addr + (start >> 3);
uint64_t mask = (1ULL << (shadow_bit + 1)) - 1; uint64_t mask = (1ULL << (shadow_bit + 1)) - 1;
addr = addr & mask; addr = addr & mask;
addr = addr + (1ULL << shadow_bit);
uint8_t remainder = start & 0b111; uint8_t remainder = start & 0b111;
uint16_t val = *(uint16_t *)addr; uint16_t val = *(uint16_t *)addr;
@ -1586,17 +1623,16 @@ impl AsanRuntime {
macro_rules! shadow_check{ macro_rules! shadow_check{
($ops:ident, $bit:expr) => {dynasm!($ops ($ops:ident, $bit:expr) => {dynasm!($ops
; .arch x64 ; .arch x64
; mov cl, shadow_bit as i8 ; mov cl, BYTE shadow_bit as i8
; mov eax, 1 ; mov rax, -2
; shl rax, cl ; shl rax, cl
; mov rdx, rdi ; mov rdx, rdi
; mov esi, 2
; shl rsi, cl
; shr rdx, 3 ; shr rdx, 3
; add rdx, rax ; not rax
; add rsi, -1 ; and rax, rdx
; and rsi, rdx ; mov edx, 1
; movzx eax, WORD [rsi] ; shl rdx, cl
; movzx eax, WORD [rax + rdx]
; rol ax, 8 ; rol ax, 8
; mov ecx, eax ; mov ecx, eax
; shr ecx, 4 ; shr ecx, 4
@ -1656,16 +1692,20 @@ impl AsanRuntime {
($ops:ident, $bit:expr) => {dynasm!($ops ($ops:ident, $bit:expr) => {dynasm!($ops
; .arch aarch64 ; .arch aarch64
; mov x1, #1 ; stp x2, x3, [sp, #-0x10]!
; add x1, xzr, x1, lsl #shadow_bit ; mov x1, #0
// ; add x1, xzr, x1, lsl #shadow_bit
; add x1, x1, x0, lsr #3 ; add x1, x1, x0, lsr #3
; ubfx x1, x1, #0, #(shadow_bit + 1) ; ubfx x1, x1, #0, #(shadow_bit + 1)
; mov x2, #1
; add x1, x1, x2, lsl #shadow_bit
; ldrh w1, [x1, #0] ; ldrh w1, [x1, #0]
; and x0, x0, #7 ; and x0, x0, #7
; rev16 w1, w1 ; rev16 w1, w1
; rbit w1, w1 ; rbit w1, w1
; lsr x1, x1, #16 ; lsr x1, x1, #16
; lsr x1, x1, x0 ; lsr x1, x1, x0
; ldp x2, x3, [sp], 0x10
; tbnz x1, #$bit, >done ; tbnz x1, #$bit, >done
; adr x1, >done ; adr x1, >done
@ -1688,10 +1728,13 @@ impl AsanRuntime {
($ops:ident, $val:expr) => {dynasm!($ops ($ops:ident, $val:expr) => {dynasm!($ops
; .arch aarch64 ; .arch aarch64
; mov x1, #1 ; stp x2, x3, [sp, #-0x10]!
; add x1, xzr, x1, lsl #shadow_bit ; mov x1, #0
// ; add x1, xzr, x1, lsl #shadow_bit
; add x1, x1, x0, lsr #3 ; add x1, x1, x0, lsr #3
; ubfx x1, x1, #0, #(shadow_bit + 1) ; ubfx x1, x1, #0, #(shadow_bit + 1)
; mov x2, #1
; add x1, x1, x2, lsl #shadow_bit
; ldrh w1, [x1, #0] ; ldrh w1, [x1, #0]
; and x0, x0, #7 ; and x0, x0, #7
; rev16 w1, w1 ; rev16 w1, w1
@ -1699,7 +1742,6 @@ impl AsanRuntime {
; lsr x1, x1, #16 ; lsr x1, x1, #16
; lsr x1, x1, x0 ; lsr x1, x1, x0
; .dword -717536768 // 0xd53b4200 //mrs x0, NZCV ; .dword -717536768 // 0xd53b4200 //mrs x0, NZCV
; stp x2, x3, [sp, #-0x10]!
; mov x2, $val ; mov x2, $val
; ands x1, x1, x2 ; ands x1, x1, x2
; ldp x2, x3, [sp], 0x10 ; ldp x2, x3, [sp], 0x10

View File

@ -281,12 +281,15 @@ impl AsanErrors {
#[allow(clippy::non_ascii_literal)] #[allow(clippy::non_ascii_literal)]
writeln!(output, "{:━^100}", " ALLOCATION INFO ").unwrap(); writeln!(output, "{:━^100}", " ALLOCATION INFO ").unwrap();
let offset: i64 = fault_address as i64 - error.metadata.address as i64; let offset: i64 = fault_address as i64 - (error.metadata.address + 0x1000) as i64;
let direction = if offset > 0 { "right" } else { "left" }; let direction = if offset > 0 { "right" } else { "left" };
writeln!( writeln!(
output, output,
"access is {} to the {} of the 0x{:x} byte allocation at 0x{:x}", "access is {:#x} to the {} of the {:#x} byte allocation at {:#x}",
offset, direction, error.metadata.size, error.metadata.address offset,
direction,
error.metadata.size,
error.metadata.address + 0x1000
) )
.unwrap(); .unwrap();
@ -369,7 +372,8 @@ impl AsanErrors {
writeln!( writeln!(
output, output,
"allocation at 0x{:x}, with size 0x{:x}", "allocation at 0x{:x}, with size 0x{:x}",
metadata.address, metadata.size metadata.address + 0x1000,
metadata.size
) )
.unwrap(); .unwrap();
if metadata.is_malloc_zero { if metadata.is_malloc_zero {
@ -403,7 +407,8 @@ impl AsanErrors {
writeln!( writeln!(
output, output,
"allocation at 0x{:x}, with size 0x{:x}", "allocation at 0x{:x}, with size 0x{:x}",
metadata.address, metadata.size metadata.address + 0x1000,
metadata.size
) )
.unwrap(); .unwrap();
if metadata.is_malloc_zero { if metadata.is_malloc_zero {

View File

@ -7,7 +7,7 @@ use frida_gum::{
Gum, NativePointer, Gum, NativePointer,
}; };
#[cfg(all(not(debug_assertions), target_arch = "x86_64"))] #[cfg(any(debug_assertions, target_arch = "aarch64"))]
use frida_gum::MemoryRange; use frida_gum::MemoryRange;
use libafl::{ use libafl::{
@ -111,12 +111,12 @@ where
OT: ObserversTuple<I, S>, OT: ObserversTuple<I, S>,
{ {
pub fn new(gum: &'a Gum, base: InProcessExecutor<'a, H, I, OT, S>, helper: &'c mut FH) -> Self { pub fn new(gum: &'a Gum, base: InProcessExecutor<'a, H, I, OT, S>, helper: &'c mut FH) -> Self {
#[cfg(not(all(not(debug_assertions), target_arch = "x86_64")))]
let stalker = Stalker::new(gum);
#[cfg(all(not(debug_assertions), target_arch = "x86_64"))] #[cfg(all(not(debug_assertions), target_arch = "x86_64"))]
let stalker = Stalker::new(gum);
#[cfg(any(debug_assertions, target_arch = "aarch64"))]
let mut stalker = Stalker::new(gum); let mut stalker = Stalker::new(gum);
#[cfg(not(all(debug_assertions, target_arch = "x86_64")))] #[cfg(any(debug_assertions, target_arch = "aarch64"))]
for range in helper.ranges().gaps(&(0..usize::MAX)) { for range in helper.ranges().gaps(&(0..usize::MAX)) {
println!("excluding range: {:x}-{:x}", range.start, range.end); println!("excluding range: {:x}-{:x}", range.start, range.end);
stalker.exclude(&MemoryRange::new( stalker.exclude(&MemoryRange::new(