Frida shadow fix (#425)

* map_to_shadow

* fix map_to_shadow

* aarch64 change?

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* use

* revert

* s1341's change

* Fix shadow calculation in instrumented code

* Fix asan error output to be more accurate

Co-authored-by: s1341 <github@shmarya.net>
This commit is contained in:
Dongjia Zhang 2021-12-20 18:51:45 +09:00 committed by GitHub
parent 1f24ad0b65
commit 2aa0ca5ef1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 222 additions and 184 deletions

View File

@ -39,7 +39,7 @@ const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANONYMOUS;
macro_rules! map_to_shadow {
($self:expr, $address:expr) => {
(($address >> 3) + $self.shadow_offset) & ((1 << ($self.shadow_bit + 1)) - 1)
$self.shadow_offset + (($address >> 3) & ((1 << ($self.shadow_bit + 1)) - 1))
};
}
@ -67,18 +67,9 @@ impl Allocator {
#[allow(clippy::cast_sign_loss)]
let page_size = ret as usize;
// probe to find a usable shadow bit:
#[cfg(any(
target_arch = "aarch64",
all(target_arch = "x86_64", target_os = "linux")
))]
let mut shadow_bit: usize = 0;
#[cfg(not(any(
target_arch = "aarch64",
all(target_arch = "x86_64", target_os = "linux")
)))]
let shadow_bit = 0;
let mut shadow_bit = 0;
#[cfg(target_arch = "aarch64")]
#[cfg(all(target_arch = "aarch64", target_os = "android"))]
for try_shadow_bit in &[46usize, 36usize] {
let addr: usize = 1 << try_shadow_bit;
if unsafe {
@ -104,7 +95,7 @@ impl Allocator {
// x86_64's userspace's up to 0x7fff-ffff-ffff so 46 is not available. (0x4000-0000-0000 - 0xc000-0000-0000)
// we'd also want to avoid 0x5555-xxxx-xxxx because programs are mapped there. so 45 is not available either (0x2000-0000-0000 - 0x6000-0000-0000).
// This memory map is for amd64 linux.
#[cfg(all(target_arch = "x86_64", target_os = "linux"))]
#[cfg(target_os = "linux")]
{
let try_shadow_bit: usize = 44;
let addr: usize = 1 << try_shadow_bit;

View File

@ -10,7 +10,7 @@ use frida_gum::NativePointer;
use frida_gum::{ModuleDetails, RangeDetails};
use hashbrown::HashMap;
use nix::sys::mman::{mmap, MapFlags, ProtFlags};
use nix::sys::mman::{mmap, mprotect, MapFlags, ProtFlags};
use backtrace::Backtrace;
@ -183,44 +183,79 @@ impl AsanRuntime {
self.hook_functions(_gum);
/*
unsafe {
let mem = self.allocator.alloc(0xac + 2, 8);
unsafe {mprotect((self.shadow_check_func.unwrap() as usize & 0xffffffffffff000) as *mut c_void, 0x1000, ProtFlags::PROT_READ | ProtFlags::PROT_WRITE | ProtFlags::PROT_EXEC)};
println!("Test0");
/*
0x555555916ce9 <libafl_frida::asan_rt::AsanRuntime::init+13033> je libafl_frida::asan_rt::AsanRuntime::init+14852 <libafl_frida::asan_rt::AsanRuntime::init+14852>
0x555555916cef <libafl_frida::asan_rt::AsanRuntime::init+13039> mov rdi, r15 <0x555558392338>
*/
assert!((self.shadow_check_func.unwrap())(((mem as usize) + 0) as *const c_void, 0x00));
println!("Test1");
assert!((self.shadow_check_func.unwrap())(((mem as usize) + 0) as *const c_void, 0xac));
println!("Test2");
assert!((self.shadow_check_func.unwrap())(((mem as usize) + 2) as *const c_void, 0xac));
println!("Test3");
assert!(!(self.shadow_check_func.unwrap())(((mem as usize) + 3) as *const c_void, 0xac));
println!("Test4");
assert!(!(self.shadow_check_func.unwrap())(((mem as isize) + -1) as *const c_void, 0xac));
println!("Test5");
assert!((self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa4) as *const c_void, 8));
println!("Test6");
assert!((self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa6) as *const c_void, 6));
println!("Test7");
assert!(!(self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa8) as *const c_void, 6));
println!("Test8");
assert!(!(self.shadow_check_func.unwrap())(((mem as usize) + 2 + 0xa8) as *const c_void, 0xac));
println!("Test9");
assert!((self.shadow_check_func.unwrap())(((mem as usize) + 4 + 0xa8) as *const c_void, 0x1));
println!("FIN");
let mem = self.allocator.alloc(0xac + 2, 8);
unsafe {
mprotect(
(self.shadow_check_func.unwrap() as usize & 0xffffffffffff000) as *mut c_void,
0x1000,
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE | ProtFlags::PROT_EXEC,
)
};
println!("Test0");
/*
0x555555916ce9 <libafl_frida::asan_rt::AsanRuntime::init+13033> je libafl_frida::asan_rt::AsanRuntime::init+14852 <libafl_frida::asan_rt::AsanRuntime::init+14852>
0x555555916cef <libafl_frida::asan_rt::AsanRuntime::init+13039> mov rdi, r15 <0x555558392338>
*/
assert!((self.shadow_check_func.unwrap())(
((mem as usize) + 0) as *const c_void,
0x00
));
println!("Test1");
assert!((self.shadow_check_func.unwrap())(
((mem as usize) + 0) as *const c_void,
0xac
));
println!("Test2");
assert!((self.shadow_check_func.unwrap())(
((mem as usize) + 2) as *const c_void,
0xac
));
println!("Test3");
assert!(!(self.shadow_check_func.unwrap())(
((mem as usize) + 3) as *const c_void,
0xac
));
println!("Test4");
assert!(!(self.shadow_check_func.unwrap())(
((mem as isize) + -1) as *const c_void,
0xac
));
println!("Test5");
assert!((self.shadow_check_func.unwrap())(
((mem as usize) + 2 + 0xa4) as *const c_void,
8
));
println!("Test6");
assert!((self.shadow_check_func.unwrap())(
((mem as usize) + 2 + 0xa6) as *const c_void,
6
));
println!("Test7");
assert!(!(self.shadow_check_func.unwrap())(
((mem as usize) + 2 + 0xa8) as *const c_void,
6
));
println!("Test8");
assert!(!(self.shadow_check_func.unwrap())(
((mem as usize) + 2 + 0xa8) as *const c_void,
0xac
));
println!("Test9");
assert!((self.shadow_check_func.unwrap())(
((mem as usize) + 4 + 0xa8) as *const c_void,
0x1
));
println!("FIN");
for i in 0..0xad {
assert!((self.shadow_check_func.unwrap())(((mem as usize) + i) as *const c_void, 0x01));
for i in 0..0xad {
assert!((self.shadow_check_func.unwrap())(
((mem as usize) + i) as *const c_void,
0x01
));
}
// assert!((self.shadow_check_func.unwrap())(((mem2 as usize) + 8875) as *const c_void, 4));
}
// assert!((self.shadow_check_func.unwrap())(((mem2 as usize) + 8875) as *const c_void, 4));
}
*/
}
/// Reset all allocations so that they can be reused for new allocation requests.
@ -1173,7 +1208,7 @@ impl AsanRuntime {
println!("actual rip: {:x}", self.regs[18]);
}
// https://godbolt.org/z/Y87PYGd69
// https://godbolt.org/z/oajhcP5sv
/*
#include <stdio.h>
#include <stdint.h>
@ -1181,11 +1216,11 @@ impl AsanRuntime {
uint64_t generate_shadow_check_function(uint64_t start, uint64_t size){
// calculate the shadow address
uint64_t addr = 1;
addr = addr << shadow_bit;
uint64_t addr = 0;
addr = addr + (start >> 3);
uint64_t mask = (1ULL << (shadow_bit + 1)) - 1;
addr = addr & mask;
addr = addr + (1ULL << shadow_bit);
if(size == 0){
// goto return_success
@ -1292,117 +1327,117 @@ impl AsanRuntime {
// Rdi start, Rsi size
dynasm!(ops
; .arch x64
; mov cl, shadow_bit as i8
; mov eax, 1
; mov edx, 1
; shl rdx, cl
; mov r10d, 2
; shl r10, cl
; test rsi, rsi
; je >LBB0_15
; mov rcx, rdi
; shr rcx, 3
; add rdx, rcx
; add r10, -1
; and r10, rdx
; and edi, 7
; je >LBB0_4
; mov cl, 8
; sub cl, dil
; cmp rsi, 8
; movzx ecx, cl
; mov r8d, esi
; cmovae r8d, ecx
; mov r9d, -1
; mov ecx, r8d
; shl r9d, cl
; movzx ecx, WORD [r10]
; rol cx, 8
; mov edx, ecx
; shr edx, 4
; and edx, 3855
; shl ecx, 4
; and ecx, -3856
; or ecx, edx
; mov edx, ecx
; shr edx, 2
; and edx, 13107
; and ecx, -3277
; lea ecx, [rdx + 4*rcx]
; mov edx, ecx
; shr edx, 1
; and edx, 21845
; and ecx, -10923
; lea ecx, [rdx + 2*rcx]
; rol cx, 8
; movzx edx, cx
; mov ecx, edi
; shr edx, cl
; not r9d
; movzx ecx, r9b
; and edx, ecx
; cmp edx, ecx
; jne >LBB0_11
; movzx ecx, r8b
; sub rsi, rcx
; add r10, 1
; mov cl, BYTE shadow_bit as i8
; mov r10, -2
; shl r10, cl
; mov eax, 1
; mov edx, 1
; shl rdx, cl
; test rsi, rsi
; je >LBB0_15
; mov rcx, rdi
; shr rcx, 3
; not r10
; and r10, rcx
; add r10, rdx
; and edi, 7
; je >LBB0_4
; mov cl, 8
; sub cl, dil
; cmp rsi, 8
; movzx ecx, cl
; mov r8d, esi
; cmovae r8d, ecx
; mov r9d, -1
; mov ecx, r8d
; shl r9d, cl
; movzx ecx, WORD [r10]
; rol cx, 8
; mov edx, ecx
; shr edx, 4
; and edx, 3855
; shl ecx, 4
; and ecx, -3856
; or ecx, edx
; mov edx, ecx
; shr edx, 2
; and edx, 13107
; and ecx, -3277
; lea ecx, [rdx + 4*rcx]
; mov edx, ecx
; shr edx, 1
; and edx, 21845
; and ecx, -10923
; lea ecx, [rdx + 2*rcx]
; rol cx, 8
; movzx edx, cx
; mov ecx, edi
; shr edx, cl
; not r9d
; movzx ecx, r9b
; and edx, ecx
; cmp edx, ecx
; jne >LBB0_11
; movzx ecx, r8b
; sub rsi, rcx
; add r10, 1
;LBB0_4:
; mov r8, rsi
; shr r8, 3
; mov r9, r8
; and r9, -8
; mov edi, r8d
; and edi, 7
; add r9, r10
; and esi, 63
; mov rdx, r8
; mov rcx, r10
; mov r8, rsi
; shr r8, 3
; mov r9, r8
; and r9, -8
; mov edi, r8d
; and edi, 7
; add r9, r10
; and esi, 63
; mov rdx, r8
; mov rcx, r10
;LBB0_5:
; cmp rdx, 7
; jbe >LBB0_8
; add rdx, -8
; cmp QWORD [rcx], -1
; lea rcx, [rcx + 8]
; je <LBB0_5
; jmp >LBB0_11
; cmp rdx, 7
; jbe >LBB0_8
; add rdx, -8
; cmp QWORD [rcx], -1
; lea rcx, [rcx + 8]
; je <LBB0_5
; jmp >LBB0_11
;LBB0_8:
; lea rcx, [8*rdi]
; sub rsi, rcx
; lea rcx, [8*rdi]
; sub rsi, rcx
;LBB0_9:
; test rdi, rdi
; je >LBB0_13
; add rdi, -1
; cmp BYTE [r9], -1
; lea r9, [r9 + 1]
; je <LBB0_9
; test rdi, rdi
; je >LBB0_13
; add rdi, -1
; cmp BYTE [r9], -1
; lea r9, [r9 + 1]
; je <LBB0_9
;LBB0_11:
; xor eax, eax
; ret
; xor eax, eax
; ret
;LBB0_13:
; test rsi, rsi
; je >LBB0_15
; and sil, 7
; mov dl, -1
; mov ecx, esi
; shl dl, cl
; not dl
; mov cl, BYTE [r8 + r10]
; rol cl, 4
; mov eax, ecx
; shr al, 2
; shl cl, 2
; and cl, -52
; or cl, al
; mov eax, ecx
; shr al, 1
; and al, 85
; add cl, cl
; and cl, -86
; or cl, al
; and cl, dl
; xor eax, eax
; cmp cl, dl
; sete al
; test rsi, rsi
; je >LBB0_15
; and sil, 7
; mov dl, -1
; mov ecx, esi
; shl dl, cl
; not dl
; mov cl, BYTE [r8 + r10]
; rol cl, 4
; mov eax, ecx
; shr al, 2
; shl cl, 2
; and cl, -52
; or cl, al
; mov eax, ecx
; shr al, 1
; and al, 85
; add cl, cl
; and cl, -86
; or cl, al
; and cl, dl
; xor eax, eax
; cmp cl, dl
; sete al
;LBB0_15:
; ret
);
@ -1432,10 +1467,12 @@ impl AsanRuntime {
; .arch aarch64
// calculate the shadow address
; mov x5, #1
; add x5, xzr, x5, lsl #shadow_bit
; mov x5, #0
// ; add x5, xzr, x5, lsl #shadow_bit
; add x5, x5, x0, lsr #3
; ubfx x5, x5, #0, #(shadow_bit + 1)
; mov x6, #1
; add x5, x5, x6, lsl #shadow_bit
; cmp x1, #0
; b.eq >return_success
@ -1545,18 +1582,18 @@ impl AsanRuntime {
}
}
// https://godbolt.org/z/cqEKf63e1
// https://godbolt.org/z/ah8vG8sWo
/*
#include <stdio.h>
#include <stdint.h>
uint8_t shadow_bit = 8;
uint8_t bit = 3;
uint64_t generate_shadow_check_blob(uint64_t start){
uint64_t addr = 1;
addr = addr << shadow_bit;
uint64_t addr = 0;
addr = addr + (start >> 3);
uint64_t mask = (1ULL << (shadow_bit + 1)) - 1;
addr = addr & mask;
addr = addr + (1ULL << shadow_bit);
uint8_t remainder = start & 0b111;
uint16_t val = *(uint16_t *)addr;
@ -1586,17 +1623,16 @@ impl AsanRuntime {
macro_rules! shadow_check{
($ops:ident, $bit:expr) => {dynasm!($ops
; .arch x64
; mov cl, shadow_bit as i8
; mov eax, 1
; mov cl, BYTE shadow_bit as i8
; mov rax, -2
; shl rax, cl
; mov rdx, rdi
; mov esi, 2
; shl rsi, cl
; shr rdx, 3
; add rdx, rax
; add rsi, -1
; and rsi, rdx
; movzx eax, WORD [rsi]
; not rax
; and rax, rdx
; mov edx, 1
; shl rdx, cl
; movzx eax, WORD [rax + rdx]
; rol ax, 8
; mov ecx, eax
; shr ecx, 4
@ -1656,16 +1692,20 @@ impl AsanRuntime {
($ops:ident, $bit:expr) => {dynasm!($ops
; .arch aarch64
; mov x1, #1
; add x1, xzr, x1, lsl #shadow_bit
; stp x2, x3, [sp, #-0x10]!
; mov x1, #0
// ; add x1, xzr, x1, lsl #shadow_bit
; add x1, x1, x0, lsr #3
; ubfx x1, x1, #0, #(shadow_bit + 1)
; mov x2, #1
; add x1, x1, x2, lsl #shadow_bit
; ldrh w1, [x1, #0]
; and x0, x0, #7
; rev16 w1, w1
; rbit w1, w1
; lsr x1, x1, #16
; lsr x1, x1, x0
; ldp x2, x3, [sp], 0x10
; tbnz x1, #$bit, >done
; adr x1, >done
@ -1688,10 +1728,13 @@ impl AsanRuntime {
($ops:ident, $val:expr) => {dynasm!($ops
; .arch aarch64
; mov x1, #1
; add x1, xzr, x1, lsl #shadow_bit
; stp x2, x3, [sp, #-0x10]!
; mov x1, #0
// ; add x1, xzr, x1, lsl #shadow_bit
; add x1, x1, x0, lsr #3
; ubfx x1, x1, #0, #(shadow_bit + 1)
; mov x2, #1
; add x1, x1, x2, lsl #shadow_bit
; ldrh w1, [x1, #0]
; and x0, x0, #7
; rev16 w1, w1
@ -1699,7 +1742,6 @@ impl AsanRuntime {
; lsr x1, x1, #16
; lsr x1, x1, x0
; .dword -717536768 // 0xd53b4200 //mrs x0, NZCV
; stp x2, x3, [sp, #-0x10]!
; mov x2, $val
; ands x1, x1, x2
; ldp x2, x3, [sp], 0x10

View File

@ -281,12 +281,15 @@ impl AsanErrors {
#[allow(clippy::non_ascii_literal)]
writeln!(output, "{:━^100}", " ALLOCATION INFO ").unwrap();
let offset: i64 = fault_address as i64 - error.metadata.address as i64;
let offset: i64 = fault_address as i64 - (error.metadata.address + 0x1000) as i64;
let direction = if offset > 0 { "right" } else { "left" };
writeln!(
output,
"access is {} to the {} of the 0x{:x} byte allocation at 0x{:x}",
offset, direction, error.metadata.size, error.metadata.address
"access is {:#x} to the {} of the {:#x} byte allocation at {:#x}",
offset,
direction,
error.metadata.size,
error.metadata.address + 0x1000
)
.unwrap();
@ -369,7 +372,8 @@ impl AsanErrors {
writeln!(
output,
"allocation at 0x{:x}, with size 0x{:x}",
metadata.address, metadata.size
metadata.address + 0x1000,
metadata.size
)
.unwrap();
if metadata.is_malloc_zero {
@ -403,7 +407,8 @@ impl AsanErrors {
writeln!(
output,
"allocation at 0x{:x}, with size 0x{:x}",
metadata.address, metadata.size
metadata.address + 0x1000,
metadata.size
)
.unwrap();
if metadata.is_malloc_zero {

View File

@ -7,7 +7,7 @@ use frida_gum::{
Gum, NativePointer,
};
#[cfg(all(not(debug_assertions), target_arch = "x86_64"))]
#[cfg(any(debug_assertions, target_arch = "aarch64"))]
use frida_gum::MemoryRange;
use libafl::{
@ -111,12 +111,12 @@ where
OT: ObserversTuple<I, S>,
{
pub fn new(gum: &'a Gum, base: InProcessExecutor<'a, H, I, OT, S>, helper: &'c mut FH) -> Self {
#[cfg(not(all(not(debug_assertions), target_arch = "x86_64")))]
let stalker = Stalker::new(gum);
#[cfg(all(not(debug_assertions), target_arch = "x86_64"))]
let stalker = Stalker::new(gum);
#[cfg(any(debug_assertions, target_arch = "aarch64"))]
let mut stalker = Stalker::new(gum);
#[cfg(not(all(debug_assertions, target_arch = "x86_64")))]
#[cfg(any(debug_assertions, target_arch = "aarch64"))]
for range in helper.ranges().gaps(&(0..usize::MAX)) {
println!("excluding range: {:x}-{:x}", range.start, range.end);
stalker.exclude(&MemoryRange::new(