Frida shadow fix (#425)

* map_to_shadow

* fix map_to_shadow

* aarch64 change?

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* use

* revert

* s1341's change

* Fix shadow calculation in instrumented code

* Fix asan error output to be more accurate

Co-authored-by: s1341 <github@shmarya.net>
This commit is contained in:
Dongjia Zhang 2021-12-20 18:51:45 +09:00 committed by GitHub
parent 1f24ad0b65
commit 2aa0ca5ef1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 222 additions and 184 deletions

View File

@ -39,7 +39,7 @@ const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANONYMOUS;
macro_rules! map_to_shadow { macro_rules! map_to_shadow {
($self:expr, $address:expr) => { ($self:expr, $address:expr) => {
(($address >> 3) + $self.shadow_offset) & ((1 << ($self.shadow_bit + 1)) - 1) $self.shadow_offset + (($address >> 3) & ((1 << ($self.shadow_bit + 1)) - 1))
}; };
} }
@ -67,18 +67,9 @@ impl Allocator {
#[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_sign_loss)]
let page_size = ret as usize; let page_size = ret as usize;
// probe to find a usable shadow bit: // probe to find a usable shadow bit:
#[cfg(any( let mut shadow_bit = 0;
target_arch = "aarch64",
all(target_arch = "x86_64", target_os = "linux")
))]
let mut shadow_bit: usize = 0;
#[cfg(not(any(
target_arch = "aarch64",
all(target_arch = "x86_64", target_os = "linux")
)))]
let shadow_bit = 0;
#[cfg(target_arch = "aarch64")] #[cfg(all(target_arch = "aarch64", target_os = "android"))]
for try_shadow_bit in &[46usize, 36usize] { for try_shadow_bit in &[46usize, 36usize] {
let addr: usize = 1 << try_shadow_bit; let addr: usize = 1 << try_shadow_bit;
if unsafe { if unsafe {
@ -104,7 +95,7 @@ impl Allocator {
// x86_64's userspace's up to 0x7fff-ffff-ffff so 46 is not available. (0x4000-0000-0000 - 0xc000-0000-0000) // x86_64's userspace's up to 0x7fff-ffff-ffff so 46 is not available. (0x4000-0000-0000 - 0xc000-0000-0000)
// we'd also want to avoid 0x5555-xxxx-xxxx because programs are mapped there. so 45 is not available either (0x2000-0000-0000 - 0x6000-0000-0000). // we'd also want to avoid 0x5555-xxxx-xxxx because programs are mapped there. so 45 is not available either (0x2000-0000-0000 - 0x6000-0000-0000).
// This memory map is for amd64 linux. // This memory map is for amd64 linux.
#[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[cfg(target_os = "linux")]
{ {
let try_shadow_bit: usize = 44; let try_shadow_bit: usize = 44;
let addr: usize = 1 << try_shadow_bit; let addr: usize = 1 << try_shadow_bit;

View File

@ -10,7 +10,7 @@ use frida_gum::NativePointer;
use frida_gum::{ModuleDetails, RangeDetails}; use frida_gum::{ModuleDetails, RangeDetails};
use hashbrown::HashMap; use hashbrown::HashMap;
use nix::sys::mman::{mmap, MapFlags, ProtFlags}; use nix::sys::mman::{mmap, mprotect, MapFlags, ProtFlags};
use backtrace::Backtrace; use backtrace::Backtrace;
@ -183,44 +183,79 @@ impl AsanRuntime {
self.hook_functions(_gum); self.hook_functions(_gum);
/*
unsafe { unsafe {
let mem = self.allocator.alloc(0xac + 2, 8); let mem = self.allocator.alloc(0xac + 2, 8);
unsafe {mprotect((self.shadow_check_func.unwrap() as usize & 0xffffffffffff000) as *mut c_void, 0x1000, ProtFlags::PROT_READ | ProtFlags::PROT_WRITE | ProtFlags::PROT_EXEC)}; unsafe {
println!("Test0"); mprotect(
/* (self.shadow_check_func.unwrap() as usize & 0xffffffffffff000) as *mut c_void,
0x555555916ce9 <libafl_frida::asan_rt::AsanRuntime::init+13033> je libafl_frida::asan_rt::AsanRuntime::init+14852 <libafl_frida::asan_rt::AsanRuntime::init+14852> 0x1000,
0x555555916cef <libafl_frida::asan_rt::AsanRuntime::init+13039> mov rdi, r15 <0x555558392338> ProtFlags::PROT_READ | ProtFlags::PROT_WRITE | ProtFlags::PROT_EXEC,
*/ )
assert!((self.shadow_check_func.unwrap())(((mem as usize) + 0) as *const c_void, 0x00)); };
println!("Test1"); println!("Test0");
assert!((self.shadow_check_func.unwrap())(((mem as usize) + 0) as *const c_void, 0xac)); /*
println!("Test2"); 0x555555916ce9 <libafl_frida::asan_rt::AsanRuntime::init+13033> je libafl_frida::asan_rt::AsanRuntime::init+14852 <libafl_frida::asan_rt::AsanRuntime::init+14852>
assert!((self.shadow_check_func.unwrap())(((mem as usize) + 2) as *const c_void, 0xac)); 0x555555916cef <libafl_frida::asan_rt::AsanRuntime::init+13039> mov rdi, r15 <0x555558392338>
println!("Test3"); */
assert!(!(self.shadow_check_func.unwrap())(((mem as usize) + 3) as *const c_void, 0xac)); assert!((self.shadow_check_func.unwrap())(
println!("Test4"); ((mem as usize) + 0) as *const c_void,
assert!(!(self.shadow_check_func.unwrap())(((mem as isize) + -1) as *const c_void, 0xac)); 0x00
println!("Test5"); ));
assert!((self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa4) as *const c_void, 8)); println!("Test1");
println!("Test6"); assert!((self.shadow_check_func.unwrap())(
assert!((self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa6) as *const c_void, 6)); ((mem as usize) + 0) as *const c_void,
println!("Test7"); 0xac
assert!(!(self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa8) as *const c_void, 6)); ));
println!("Test8"); println!("Test2");
assert!(!(self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa8) as *const c_void, 0xac)); assert!((self.shadow_check_func.unwrap())(
println!("Test9"); ((mem as usize) + 2) as *const c_void,
assert!((self.shadow_check_func.unwrap())(((mem as usize) + 4 + 0xa8) as *const c_void, 0x1)); 0xac
println!("FIN"); ));
println!("Test3");
assert!(!(self.shadow_check_func.unwrap())(
((mem as usize) + 3) as *const c_void,
0xac
));
println!("Test4");
assert!(!(self.shadow_check_func.unwrap())(
((mem as isize) + -1) as *const c_void,
0xac
));
println!("Test5");
assert!((self.shadow_check_func.unwrap())(
((mem as usize) + 2 + 0xa4) as *const c_void,
8
));
println!("Test6");
assert!((self.shadow_check_func.unwrap())(
((mem as usize) + 2 + 0xa6) as *const c_void,
6
));
println!("Test7");
assert!(!(self.shadow_check_func.unwrap())(
((mem as usize) + 2 + 0xa8) as *const c_void,
6
));
println!("Test8");
assert!(!(self.shadow_check_func.unwrap())(
((mem as usize) + 2 + 0xa8) as *const c_void,
0xac
));
println!("Test9");
assert!((self.shadow_check_func.unwrap())(
((mem as usize) + 4 + 0xa8) as *const c_void,
0x1
));
println!("FIN");
for i in 0..0xad { for i in 0..0xad {
assert!((self.shadow_check_func.unwrap())(((mem as usize) + i) as *const c_void, 0x01)); assert!((self.shadow_check_func.unwrap())(
((mem as usize) + i) as *const c_void,
0x01
));
}
// assert!((self.shadow_check_func.unwrap())(((mem2 as usize) + 8875) as *const c_void, 4));
} }
// assert!((self.shadow_check_func.unwrap())(((mem2 as usize) + 8875) as *const c_void, 4));
}
*/
} }
/// Reset all allocations so that they can be reused for new allocation requests. /// Reset all allocations so that they can be reused for new allocation requests.
@ -1173,7 +1208,7 @@ impl AsanRuntime {
println!("actual rip: {:x}", self.regs[18]); println!("actual rip: {:x}", self.regs[18]);
} }
// https://godbolt.org/z/Y87PYGd69 // https://godbolt.org/z/oajhcP5sv
/* /*
#include <stdio.h> #include <stdio.h>
#include <stdint.h> #include <stdint.h>
@ -1181,11 +1216,11 @@ impl AsanRuntime {
uint64_t generate_shadow_check_function(uint64_t start, uint64_t size){ uint64_t generate_shadow_check_function(uint64_t start, uint64_t size){
// calculate the shadow address // calculate the shadow address
uint64_t addr = 1; uint64_t addr = 0;
addr = addr << shadow_bit;
addr = addr + (start >> 3); addr = addr + (start >> 3);
uint64_t mask = (1ULL << (shadow_bit + 1)) - 1; uint64_t mask = (1ULL << (shadow_bit + 1)) - 1;
addr = addr & mask; addr = addr & mask;
addr = addr + (1ULL << shadow_bit);
if(size == 0){ if(size == 0){
// goto return_success // goto return_success
@ -1292,117 +1327,117 @@ impl AsanRuntime {
// Rdi start, Rsi size // Rdi start, Rsi size
dynasm!(ops dynasm!(ops
; .arch x64 ; .arch x64
; mov cl, shadow_bit as i8 ; mov cl, BYTE shadow_bit as i8
; mov eax, 1 ; mov r10, -2
; mov edx, 1 ; shl r10, cl
; shl rdx, cl ; mov eax, 1
; mov r10d, 2 ; mov edx, 1
; shl r10, cl ; shl rdx, cl
; test rsi, rsi ; test rsi, rsi
; je >LBB0_15 ; je >LBB0_15
; mov rcx, rdi ; mov rcx, rdi
; shr rcx, 3 ; shr rcx, 3
; add rdx, rcx ; not r10
; add r10, -1 ; and r10, rcx
; and r10, rdx ; add r10, rdx
; and edi, 7 ; and edi, 7
; je >LBB0_4 ; je >LBB0_4
; mov cl, 8 ; mov cl, 8
; sub cl, dil ; sub cl, dil
; cmp rsi, 8 ; cmp rsi, 8
; movzx ecx, cl ; movzx ecx, cl
; mov r8d, esi ; mov r8d, esi
; cmovae r8d, ecx ; cmovae r8d, ecx
; mov r9d, -1 ; mov r9d, -1
; mov ecx, r8d ; mov ecx, r8d
; shl r9d, cl ; shl r9d, cl
; movzx ecx, WORD [r10] ; movzx ecx, WORD [r10]
; rol cx, 8 ; rol cx, 8
; mov edx, ecx ; mov edx, ecx
; shr edx, 4 ; shr edx, 4
; and edx, 3855 ; and edx, 3855
; shl ecx, 4 ; shl ecx, 4
; and ecx, -3856 ; and ecx, -3856
; or ecx, edx ; or ecx, edx
; mov edx, ecx ; mov edx, ecx
; shr edx, 2 ; shr edx, 2
; and edx, 13107 ; and edx, 13107
; and ecx, -3277 ; and ecx, -3277
; lea ecx, [rdx + 4*rcx] ; lea ecx, [rdx + 4*rcx]
; mov edx, ecx ; mov edx, ecx
; shr edx, 1 ; shr edx, 1
; and edx, 21845 ; and edx, 21845
; and ecx, -10923 ; and ecx, -10923
; lea ecx, [rdx + 2*rcx] ; lea ecx, [rdx + 2*rcx]
; rol cx, 8 ; rol cx, 8
; movzx edx, cx ; movzx edx, cx
; mov ecx, edi ; mov ecx, edi
; shr edx, cl ; shr edx, cl
; not r9d ; not r9d
; movzx ecx, r9b ; movzx ecx, r9b
; and edx, ecx ; and edx, ecx
; cmp edx, ecx ; cmp edx, ecx
; jne >LBB0_11 ; jne >LBB0_11
; movzx ecx, r8b ; movzx ecx, r8b
; sub rsi, rcx ; sub rsi, rcx
; add r10, 1 ; add r10, 1
;LBB0_4: ;LBB0_4:
; mov r8, rsi ; mov r8, rsi
; shr r8, 3 ; shr r8, 3
; mov r9, r8 ; mov r9, r8
; and r9, -8 ; and r9, -8
; mov edi, r8d ; mov edi, r8d
; and edi, 7 ; and edi, 7
; add r9, r10 ; add r9, r10
; and esi, 63 ; and esi, 63
; mov rdx, r8 ; mov rdx, r8
; mov rcx, r10 ; mov rcx, r10
;LBB0_5: ;LBB0_5:
; cmp rdx, 7 ; cmp rdx, 7
; jbe >LBB0_8 ; jbe >LBB0_8
; add rdx, -8 ; add rdx, -8
; cmp QWORD [rcx], -1 ; cmp QWORD [rcx], -1
; lea rcx, [rcx + 8] ; lea rcx, [rcx + 8]
; je <LBB0_5 ; je <LBB0_5
; jmp >LBB0_11 ; jmp >LBB0_11
;LBB0_8: ;LBB0_8:
; lea rcx, [8*rdi] ; lea rcx, [8*rdi]
; sub rsi, rcx ; sub rsi, rcx
;LBB0_9: ;LBB0_9:
; test rdi, rdi ; test rdi, rdi
; je >LBB0_13 ; je >LBB0_13
; add rdi, -1 ; add rdi, -1
; cmp BYTE [r9], -1 ; cmp BYTE [r9], -1
; lea r9, [r9 + 1] ; lea r9, [r9 + 1]
; je <LBB0_9 ; je <LBB0_9
;LBB0_11: ;LBB0_11:
; xor eax, eax ; xor eax, eax
; ret ; ret
;LBB0_13: ;LBB0_13:
; test rsi, rsi ; test rsi, rsi
; je >LBB0_15 ; je >LBB0_15
; and sil, 7 ; and sil, 7
; mov dl, -1 ; mov dl, -1
; mov ecx, esi ; mov ecx, esi
; shl dl, cl ; shl dl, cl
; not dl ; not dl
; mov cl, BYTE [r8 + r10] ; mov cl, BYTE [r8 + r10]
; rol cl, 4 ; rol cl, 4
; mov eax, ecx ; mov eax, ecx
; shr al, 2 ; shr al, 2
; shl cl, 2 ; shl cl, 2
; and cl, -52 ; and cl, -52
; or cl, al ; or cl, al
; mov eax, ecx ; mov eax, ecx
; shr al, 1 ; shr al, 1
; and al, 85 ; and al, 85
; add cl, cl ; add cl, cl
; and cl, -86 ; and cl, -86
; or cl, al ; or cl, al
; and cl, dl ; and cl, dl
; xor eax, eax ; xor eax, eax
; cmp cl, dl ; cmp cl, dl
; sete al ; sete al
;LBB0_15: ;LBB0_15:
; ret ; ret
); );
@ -1432,10 +1467,12 @@ impl AsanRuntime {
; .arch aarch64 ; .arch aarch64
// calculate the shadow address // calculate the shadow address
; mov x5, #1 ; mov x5, #0
; add x5, xzr, x5, lsl #shadow_bit // ; add x5, xzr, x5, lsl #shadow_bit
; add x5, x5, x0, lsr #3 ; add x5, x5, x0, lsr #3
; ubfx x5, x5, #0, #(shadow_bit + 1) ; ubfx x5, x5, #0, #(shadow_bit + 1)
; mov x6, #1
; add x5, x5, x6, lsl #shadow_bit
; cmp x1, #0 ; cmp x1, #0
; b.eq >return_success ; b.eq >return_success
@ -1545,18 +1582,18 @@ impl AsanRuntime {
} }
} }
// https://godbolt.org/z/cqEKf63e1 // https://godbolt.org/z/ah8vG8sWo
/* /*
#include <stdio.h> #include <stdio.h>
#include <stdint.h> #include <stdint.h>
uint8_t shadow_bit = 8; uint8_t shadow_bit = 8;
uint8_t bit = 3; uint8_t bit = 3;
uint64_t generate_shadow_check_blob(uint64_t start){ uint64_t generate_shadow_check_blob(uint64_t start){
uint64_t addr = 1; uint64_t addr = 0;
addr = addr << shadow_bit;
addr = addr + (start >> 3); addr = addr + (start >> 3);
uint64_t mask = (1ULL << (shadow_bit + 1)) - 1; uint64_t mask = (1ULL << (shadow_bit + 1)) - 1;
addr = addr & mask; addr = addr & mask;
addr = addr + (1ULL << shadow_bit);
uint8_t remainder = start & 0b111; uint8_t remainder = start & 0b111;
uint16_t val = *(uint16_t *)addr; uint16_t val = *(uint16_t *)addr;
@ -1586,17 +1623,16 @@ impl AsanRuntime {
macro_rules! shadow_check{ macro_rules! shadow_check{
($ops:ident, $bit:expr) => {dynasm!($ops ($ops:ident, $bit:expr) => {dynasm!($ops
; .arch x64 ; .arch x64
; mov cl, shadow_bit as i8 ; mov cl, BYTE shadow_bit as i8
; mov eax, 1 ; mov rax, -2
; shl rax, cl ; shl rax, cl
; mov rdx, rdi ; mov rdx, rdi
; mov esi, 2
; shl rsi, cl
; shr rdx, 3 ; shr rdx, 3
; add rdx, rax ; not rax
; add rsi, -1 ; and rax, rdx
; and rsi, rdx ; mov edx, 1
; movzx eax, WORD [rsi] ; shl rdx, cl
; movzx eax, WORD [rax + rdx]
; rol ax, 8 ; rol ax, 8
; mov ecx, eax ; mov ecx, eax
; shr ecx, 4 ; shr ecx, 4
@ -1656,16 +1692,20 @@ impl AsanRuntime {
($ops:ident, $bit:expr) => {dynasm!($ops ($ops:ident, $bit:expr) => {dynasm!($ops
; .arch aarch64 ; .arch aarch64
; mov x1, #1 ; stp x2, x3, [sp, #-0x10]!
; add x1, xzr, x1, lsl #shadow_bit ; mov x1, #0
// ; add x1, xzr, x1, lsl #shadow_bit
; add x1, x1, x0, lsr #3 ; add x1, x1, x0, lsr #3
; ubfx x1, x1, #0, #(shadow_bit + 1) ; ubfx x1, x1, #0, #(shadow_bit + 1)
; mov x2, #1
; add x1, x1, x2, lsl #shadow_bit
; ldrh w1, [x1, #0] ; ldrh w1, [x1, #0]
; and x0, x0, #7 ; and x0, x0, #7
; rev16 w1, w1 ; rev16 w1, w1
; rbit w1, w1 ; rbit w1, w1
; lsr x1, x1, #16 ; lsr x1, x1, #16
; lsr x1, x1, x0 ; lsr x1, x1, x0
; ldp x2, x3, [sp], 0x10
; tbnz x1, #$bit, >done ; tbnz x1, #$bit, >done
; adr x1, >done ; adr x1, >done
@ -1688,10 +1728,13 @@ impl AsanRuntime {
($ops:ident, $val:expr) => {dynasm!($ops ($ops:ident, $val:expr) => {dynasm!($ops
; .arch aarch64 ; .arch aarch64
; mov x1, #1 ; stp x2, x3, [sp, #-0x10]!
; add x1, xzr, x1, lsl #shadow_bit ; mov x1, #0
// ; add x1, xzr, x1, lsl #shadow_bit
; add x1, x1, x0, lsr #3 ; add x1, x1, x0, lsr #3
; ubfx x1, x1, #0, #(shadow_bit + 1) ; ubfx x1, x1, #0, #(shadow_bit + 1)
; mov x2, #1
; add x1, x1, x2, lsl #shadow_bit
; ldrh w1, [x1, #0] ; ldrh w1, [x1, #0]
; and x0, x0, #7 ; and x0, x0, #7
; rev16 w1, w1 ; rev16 w1, w1
@ -1699,7 +1742,6 @@ impl AsanRuntime {
; lsr x1, x1, #16 ; lsr x1, x1, #16
; lsr x1, x1, x0 ; lsr x1, x1, x0
; .dword -717536768 // 0xd53b4200 //mrs x0, NZCV ; .dword -717536768 // 0xd53b4200 //mrs x0, NZCV
; stp x2, x3, [sp, #-0x10]!
; mov x2, $val ; mov x2, $val
; ands x1, x1, x2 ; ands x1, x1, x2
; ldp x2, x3, [sp], 0x10 ; ldp x2, x3, [sp], 0x10

View File

@ -281,12 +281,15 @@ impl AsanErrors {
#[allow(clippy::non_ascii_literal)] #[allow(clippy::non_ascii_literal)]
writeln!(output, "{:━^100}", " ALLOCATION INFO ").unwrap(); writeln!(output, "{:━^100}", " ALLOCATION INFO ").unwrap();
let offset: i64 = fault_address as i64 - error.metadata.address as i64; let offset: i64 = fault_address as i64 - (error.metadata.address + 0x1000) as i64;
let direction = if offset > 0 { "right" } else { "left" }; let direction = if offset > 0 { "right" } else { "left" };
writeln!( writeln!(
output, output,
"access is {} to the {} of the 0x{:x} byte allocation at 0x{:x}", "access is {:#x} to the {} of the {:#x} byte allocation at {:#x}",
offset, direction, error.metadata.size, error.metadata.address offset,
direction,
error.metadata.size,
error.metadata.address + 0x1000
) )
.unwrap(); .unwrap();
@ -369,7 +372,8 @@ impl AsanErrors {
writeln!( writeln!(
output, output,
"allocation at 0x{:x}, with size 0x{:x}", "allocation at 0x{:x}, with size 0x{:x}",
metadata.address, metadata.size metadata.address + 0x1000,
metadata.size
) )
.unwrap(); .unwrap();
if metadata.is_malloc_zero { if metadata.is_malloc_zero {
@ -403,7 +407,8 @@ impl AsanErrors {
writeln!( writeln!(
output, output,
"allocation at 0x{:x}, with size 0x{:x}", "allocation at 0x{:x}, with size 0x{:x}",
metadata.address, metadata.size metadata.address + 0x1000,
metadata.size
) )
.unwrap(); .unwrap();
if metadata.is_malloc_zero { if metadata.is_malloc_zero {

View File

@ -7,7 +7,7 @@ use frida_gum::{
Gum, NativePointer, Gum, NativePointer,
}; };
#[cfg(all(not(debug_assertions), target_arch = "x86_64"))] #[cfg(any(debug_assertions, target_arch = "aarch64"))]
use frida_gum::MemoryRange; use frida_gum::MemoryRange;
use libafl::{ use libafl::{
@ -111,12 +111,12 @@ where
OT: ObserversTuple<I, S>, OT: ObserversTuple<I, S>,
{ {
pub fn new(gum: &'a Gum, base: InProcessExecutor<'a, H, I, OT, S>, helper: &'c mut FH) -> Self { pub fn new(gum: &'a Gum, base: InProcessExecutor<'a, H, I, OT, S>, helper: &'c mut FH) -> Self {
#[cfg(not(all(not(debug_assertions), target_arch = "x86_64")))]
let stalker = Stalker::new(gum);
#[cfg(all(not(debug_assertions), target_arch = "x86_64"))] #[cfg(all(not(debug_assertions), target_arch = "x86_64"))]
let stalker = Stalker::new(gum);
#[cfg(any(debug_assertions, target_arch = "aarch64"))]
let mut stalker = Stalker::new(gum); let mut stalker = Stalker::new(gum);
#[cfg(not(all(debug_assertions, target_arch = "x86_64")))] #[cfg(any(debug_assertions, target_arch = "aarch64"))]
for range in helper.ranges().gaps(&(0..usize::MAX)) { for range in helper.ranges().gaps(&(0..usize::MAX)) {
println!("excluding range: {:x}-{:x}", range.start, range.end); println!("excluding range: {:x}-{:x}", range.start, range.end);
stalker.exclude(&MemoryRange::new( stalker.exclude(&MemoryRange::new(