Remove capstone from frida [aarch64] (#1723)

* Partially finish ASAN and CmpLog changes

* Fix handle_trap, report_error, and remove capstone

* Fix a few bugs. Can now detect UAFs properly

* Some small changes

* Make API more consistent with x86

* Fix printing

* Remove unneeded inputs, final changes

* formatting

* Fix x86 build

* Formatting
This commit is contained in:
Sharad Khanna 2023-12-16 02:10:40 -05:00 committed by GitHub
parent a0a4dd60bb
commit fce5fd9a2b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 423 additions and 609 deletions

View File

@ -29,11 +29,10 @@ serdeany_autoreg = ["libafl_bolts/serdeany_autoreg"]
cc = { version = "1.0", features = ["parallel"] }
[target.'cfg(target_arch = "aarch64")'.dependencies]
capstone = "0.11.0"
yaxpeax-arm = "0.2.4"
[target.'cfg(target_arch = "x86_64")'.dependencies]
yaxpeax-x86 = { git = "https://github.com/tokatoka/yaxpeax-x86/" } # replace this with origin later
yaxpeax-arch = "0.2.7"
[dependencies]
libafl = { path = "../libafl", default-features = false, version = "0.11.1", features = [
@ -67,6 +66,7 @@ frida-gum = { version = "0.13.2", features = [
"module-names",
] }
dynasmrt = "2"
color-backtrace = { version = "0.6", features = ["resolve-modules"] }
termcolor = "1.1.3"
serde = "1.0"
@ -80,5 +80,7 @@ paste = "1.0"
log = "0.4.20"
mmap-rs = "0.6.0"
yaxpeax-arch = "0.2.7"
[dev-dependencies]
serial_test = { version = "2", default-features = false, features = ["logging"] }

View File

@ -13,15 +13,6 @@ use core::{
use std::{ffi::c_void, num::NonZeroUsize, ptr::write_volatile, rc::Rc};
use backtrace::Backtrace;
#[cfg(target_arch = "aarch64")]
use capstone::{
arch::{
arm64::{Arm64Extender, Arm64OperandType, Arm64Shift},
ArchOperand::Arm64Operand,
BuildsCapstone,
},
Capstone,
};
use dynasmrt::{dynasm, DynasmApi, DynasmLabelApi};
#[cfg(target_arch = "x86_64")]
use frida_gum::instruction_writer::X86Register;
@ -31,7 +22,6 @@ use frida_gum::{
instruction_writer::InstructionWriter, interceptor::Interceptor, stalker::StalkerOutput, Gum,
Module, ModuleDetails, ModuleMap, NativePointer, RangeDetails,
};
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
use frida_gum_sys::Insn;
use hashbrown::HashMap;
use libafl_bolts::{cli::FuzzerOptions, AsSlice};
@ -44,20 +34,24 @@ use libc::{getrlimit, rlimit};
use libc::{getrlimit64, rlimit64};
use nix::sys::mman::{mmap, MapFlags, ProtFlags};
use rangemap::RangeMap;
#[cfg(target_arch = "aarch64")]
use yaxpeax_arch::Arch;
#[cfg(target_arch = "aarch64")]
use yaxpeax_arm::armv8::a64::{ARMv8, InstDecoder, Opcode, Operand, ShiftStyle, SizeCode};
#[cfg(target_arch = "x86_64")]
use yaxpeax_x86::amd64::{InstDecoder, Instruction, Opcode};
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
#[cfg(any(target_arch = "x86_64"))]
use crate::utils::frida_to_cs;
#[cfg(target_arch = "aarch64")]
use crate::utils::instruction_width;
use crate::utils::{instruction_width, writer_register};
#[cfg(target_arch = "x86_64")]
use crate::utils::operand_details;
use crate::utils::{operand_details, AccessType};
use crate::{
alloc::Allocator,
asan::errors::{AsanError, AsanErrors, AsanReadWriteError, ASAN_ERRORS},
helper::{FridaRuntime, SkipRange},
utils::{disas_count, AccessType},
utils::disas_count,
};
extern "C" {
@ -193,15 +187,9 @@ impl FridaRuntime for AsanRuntime {
}));
self.hook_functions(gum);
/*
unsafe {
/* unsafe {
let mem = self.allocator.alloc(0xac + 2, 8);
mprotect(
(self.shadow_check_func.unwrap() as usize & 0xffffffffffff000) as *mut c_void,
0x1000,
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE | ProtFlags::PROT_EXEC,
)
.unwrap();
log::info!("Test0");
/*
0x555555916ce9 <libafl_frida::asan_rt::AsanRuntime::init+13033> je libafl_frida::asan_rt::AsanRuntime::init+14852 <libafl_frida::asan_rt::AsanRuntime::init+14852>
@ -265,8 +253,8 @@ impl FridaRuntime for AsanRuntime {
));
}
// assert!((self.shadow_check_func.unwrap())(((mem2 as usize) + 8875) as *const c_void, 4));
}
*/
}*/
self.register_thread();
}
fn pre_exec<I: libafl::inputs::Input + libafl::inputs::HasTargetBytes>(
@ -1074,113 +1062,61 @@ impl AsanRuntime {
extern "C" fn handle_trap(&mut self) {
let mut actual_pc = self.regs[31];
actual_pc = match self.stalked_addresses.get(&actual_pc) {
//get the pc associated with the trapped insn
Some(addr) => *addr,
None => actual_pc,
};
let cs = Capstone::new()
.arm64()
.mode(capstone::arch::arm64::ArchMode::Arm)
.detail(true)
.build()
.unwrap();
let decoder = <ARMv8 as Arch>::Decoder::default();
let instructions = cs
.disasm_count(
unsafe { std::slice::from_raw_parts(actual_pc as *mut u8, 24) },
actual_pc as u64,
3,
)
.unwrap();
let instructions = instructions.iter().collect::<Vec<&capstone::Insn>>();
let mut insn = instructions.first().unwrap();
if insn.mnemonic().unwrap() == "msr" && insn.op_str().unwrap() == "nzcv, x0" {
insn = instructions.get(2).unwrap();
actual_pc = insn.address() as usize;
let insn = disas_count(
&decoder,
unsafe { std::slice::from_raw_parts(actual_pc as *mut u8, 4) },
1,
)[0];
if insn.opcode == Opcode::MSR && insn.operands[0] == Operand::SystemReg(23056) { //the first operand is nzcv
//What case is this for??
/*insn = instructions.get(2).unwrap();
actual_pc = insn.address() as usize;*/
}
let detail = cs.insn_detail(insn).unwrap();
let arch_detail = detail.arch_detail();
let (mut base_reg, mut index_reg, displacement) =
if let Arm64Operand(arm64operand) = arch_detail.operands().last().unwrap() {
if let Arm64OperandType::Mem(opmem) = arm64operand.op_type {
(opmem.base().0, opmem.index().0, opmem.disp())
} else {
(0, 0, 0)
let operands_len = insn
.operands
.iter()
.position(|item| *item == Operand::Nothing)
.unwrap_or_else(|| 4);
//the memory operand is always the last operand in aarch64
let (base_reg, index_reg, displacement) = match insn.operands[operands_len - 1] {
Operand::RegRegOffset(reg1, reg2, _, _, _) => (reg1, Some(reg2), 0),
Operand::RegPreIndex(reg, disp, _) => (reg, None, disp),
Operand::RegPostIndex(reg, _) => {
//in post index the disp is applied after so it doesn't matter for this memory access
(reg, None, 0)
}
Operand::RegPostIndexReg(reg, _) => (reg, None, 0),
_ => {
return;
}
} else {
(0, 0, 0)
};
if capstone::arch::arm64::Arm64Reg::ARM64_REG_X0 as u16 <= base_reg
&& base_reg <= capstone::arch::arm64::Arm64Reg::ARM64_REG_X28 as u16
{
base_reg -= capstone::arch::arm64::Arm64Reg::ARM64_REG_X0 as u16;
} else if base_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_X29 as u16 {
base_reg = 29u16;
} else if base_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_X30 as u16 {
base_reg = 30u16;
} else if base_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_SP as u16
|| base_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_WSP as u16
|| base_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_XZR as u16
|| base_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_WZR as u16
{
base_reg = 31u16;
} else if capstone::arch::arm64::Arm64Reg::ARM64_REG_W0 as u16 <= base_reg
&& base_reg <= capstone::arch::arm64::Arm64Reg::ARM64_REG_W30 as u16
{
base_reg -= capstone::arch::arm64::Arm64Reg::ARM64_REG_W0 as u16;
} else if capstone::arch::arm64::Arm64Reg::ARM64_REG_S0 as u16 <= base_reg
&& base_reg <= capstone::arch::arm64::Arm64Reg::ARM64_REG_S31 as u16
{
base_reg -= capstone::arch::arm64::Arm64Reg::ARM64_REG_S0 as u16;
}
#[allow(clippy::cast_possible_wrap)]
let mut fault_address =
let fault_address =
(self.regs[base_reg as usize] as isize + displacement as isize) as usize;
if index_reg == 0 {
index_reg = 0xffff;
} else {
if capstone::arch::arm64::Arm64Reg::ARM64_REG_X0 as u16 <= index_reg
&& index_reg <= capstone::arch::arm64::Arm64Reg::ARM64_REG_X28 as u16
{
index_reg -= capstone::arch::arm64::Arm64Reg::ARM64_REG_X0 as u16;
} else if index_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_X29 as u16 {
index_reg = 29u16;
} else if index_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_X30 as u16 {
index_reg = 30u16;
} else if index_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_SP as u16
|| index_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_WSP as u16
|| index_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_XZR as u16
|| index_reg == capstone::arch::arm64::Arm64Reg::ARM64_REG_WZR as u16
{
index_reg = 31u16;
} else if capstone::arch::arm64::Arm64Reg::ARM64_REG_W0 as u16 <= index_reg
&& index_reg <= capstone::arch::arm64::Arm64Reg::ARM64_REG_W30 as u16
{
index_reg -= capstone::arch::arm64::Arm64Reg::ARM64_REG_W0 as u16;
} else if capstone::arch::arm64::Arm64Reg::ARM64_REG_S0 as u16 <= index_reg
&& index_reg <= capstone::arch::arm64::Arm64Reg::ARM64_REG_S31 as u16
{
index_reg -= capstone::arch::arm64::Arm64Reg::ARM64_REG_S0 as u16;
}
fault_address += self.regs[index_reg as usize];
}
let backtrace = Backtrace::new();
let (stack_start, stack_end) = Self::current_stack();
#[allow(clippy::option_if_let_else)]
let error = if fault_address >= stack_start && fault_address < stack_end {
if insn.mnemonic().unwrap().starts_with('l') {
if insn.opcode.to_string().starts_with('l') {
AsanError::StackOobRead((
self.regs,
actual_pc,
(
Some(base_reg),
Some(index_reg),
Some(index_reg.unwrap_or_else(|| 0xffff)),
displacement as usize,
fault_address,
),
@ -1192,7 +1128,7 @@ impl AsanRuntime {
actual_pc,
(
Some(base_reg),
Some(index_reg),
Some(index_reg.unwrap_or_else(|| 0xffff)),
displacement as usize,
fault_address,
),
@ -1208,14 +1144,14 @@ impl AsanRuntime {
pc: actual_pc,
fault: (
Some(base_reg),
Some(index_reg),
Some(index_reg.unwrap_or_else(|| 0xffff)),
displacement as usize,
fault_address,
),
metadata: metadata.clone(),
backtrace,
};
if insn.mnemonic().unwrap().starts_with('l') {
if insn.opcode.to_string().starts_with('l') {
if metadata.freed {
AsanError::ReadAfterFree(asan_readwrite_error)
} else {
@ -1232,7 +1168,7 @@ impl AsanRuntime {
actual_pc,
(
Some(base_reg),
Some(index_reg),
Some(index_reg.unwrap_or_else(|| 0xffff)),
displacement as usize,
fault_address,
),
@ -2192,53 +2128,101 @@ impl AsanRuntime {
#[must_use]
#[inline]
pub fn asan_is_interesting_instruction(
capstone: &Capstone,
decoder: InstDecoder,
_address: u64,
instr: &Insn,
) -> Option<(
capstone::RegId,
capstone::RegId,
i32,
u32,
Arm64Shift,
Arm64Extender,
u16, //reg1
Option<(u16, SizeCode)>, //size of reg2. This needs to be an option in the case that we don't have one
i32, //displacement.
u32, //load/store size
Option<(ShiftStyle, u8)>, //(shift type, shift size)
)> {
// We need to re-decode frida-internal capstone values to upstream capstone
let cs_instr = frida_to_cs(capstone, instr);
let cs_instr = cs_instr.first().unwrap();
let instr = disas_count(&decoder, instr.bytes(), 1)[0];
// We have to ignore these instructions. Simulating them with their side effects is
// complex, to say the least.
match cs_instr.mnemonic().unwrap() {
"ldaxr" | "stlxr" | "ldxr" | "stxr" | "ldar" | "stlr" | "ldarb" | "ldarh" | "ldaxp"
| "ldaxrb" | "ldaxrh" | "stlrb" | "stlrh" | "stlxp" | "stlxrb" | "stlxrh" | "ldxrb"
| "ldxrh" | "stxrb" | "stxrh" => return None,
match instr.opcode {
Opcode::LDAXR
| Opcode::STLXR
| Opcode::LDXR
| Opcode::LDAR
| Opcode::STLR
| Opcode::LDARB
| Opcode::LDAXP
| Opcode::LDAXRB
| Opcode::LDAXRH
| Opcode::STLRB
| Opcode::STLRH
| Opcode::STLXP
| Opcode::STLXRB
| Opcode::STLXRH
| Opcode::LDXRB
| Opcode::LDXRH
| Opcode::STXRB
| Opcode::STXRH => {
return None;
}
_ => (),
}
let operands = capstone
.insn_detail(cs_instr)
.unwrap()
.arch_detail()
.operands();
if operands.len() < 2 {
//we need to do this convuluted operation because operands in yaxpeax are in a constant slice of size 4,
//and any unused operands are Operand::Nothing
let operands_len = instr
.operands
.iter()
.position(|item| *item == Operand::Nothing)
.unwrap_or_else(|| 4);
if operands_len < 2 {
return None;
}
if let Arm64Operand(arm64operand) = operands.last().unwrap() {
if let Arm64OperandType::Mem(opmem) = arm64operand.op_type {
return Some((
opmem.base(),
opmem.index(),
opmem.disp(),
instruction_width(cs_instr, &operands),
arm64operand.shift,
arm64operand.ext,
));
/*if instr.opcode == Opcode::LDRSW || instr.opcode == Opcode::LDR {
//this is a special case for pc-relative loads. The only two opcodes capable of this are LDR and LDRSW
// For more information on this, look up "literal" loads in the ARM docs.
match instr.operands[1] {
//this is safe because an ldr is guranteed to have at least 3 operands
Operand::PCOffset(off) => {
return Some((32, None, off, memory_access_size, None));
}
_ => (),
}
}*/
None
// println!("{:?} {}", instr, memory_access_size);
//abuse the fact that the last operand is always the mem operand
match instr.operands[operands_len - 1] {
Operand::RegRegOffset(reg1, reg2, size, shift, shift_size) => {
let ret = Some((
reg1,
Some((reg2, size)),
0,
instruction_width(&instr),
Some((shift, shift_size)),
));
// log::trace!("Interesting instruction: {}, {:?}", instr.to_string(), ret);
return ret;
}
Operand::RegPreIndex(reg, disp, _) => {
let ret = Some((reg, None, disp, instruction_width(&instr), None));
// log::trace!("Interesting instruction: {}, {:?}", instr.to_string(), ret);
return ret;
}
Operand::RegPostIndex(reg, _) => {
//in post index the disp is applied after so it doesn't matter for this memory access
let ret = Some((reg, None, 0, instruction_width(&instr), None));
// log::trace!("Interesting instruction: {}, {:?}", instr.to_string(), ret);
return ret;
}
Operand::RegPostIndexReg(reg, _) => {
let ret = Some((reg, None, 0, instruction_width(&instr), None));
// log::trace!("Interesting instruction: {}, {:?}", instr.to_string(), ret);
return ret;
}
_ => {
return None;
}
}
}
/// Checks if the current instruction is interesting for address sanitization.
@ -2474,12 +2458,11 @@ impl AsanRuntime {
&mut self,
_address: u64,
output: &StalkerOutput,
basereg: capstone::RegId,
indexreg: capstone::RegId,
basereg: u16,
indexreg: Option<(u16, SizeCode)>,
displacement: i32,
width: u32,
shift: Arm64Shift,
extender: Arm64Extender,
shift: Option<(ShiftStyle, u8)>,
) {
debug_assert!(
i32::try_from(frida_gum_sys::GUM_RED_ZONE_SIZE).is_ok(),
@ -2489,11 +2472,11 @@ impl AsanRuntime {
let redzone_size = frida_gum_sys::GUM_RED_ZONE_SIZE as i32;
let writer = output.writer();
let basereg = writer_register(basereg);
let indexreg = if indexreg.0 == 0 {
None
let basereg = writer_register(basereg, SizeCode::X, false); //the writer register can never be zr and is always 64 bit
let indexreg = if let Some((reg, sizecode)) = indexreg {
Some(writer_register(reg, sizecode, true)) //the index register can be zr
} else {
Some(writer_register(indexreg))
None
};
if self.current_report_impl == 0
@ -2502,8 +2485,6 @@ impl AsanRuntime {
{
let after_report_impl = writer.code_offset() + 2;
#[cfg(target_arch = "x86_64")]
writer.put_jmp_near_label(after_report_impl);
#[cfg(target_arch = "aarch64")]
writer.put_b_label(after_report_impl);
@ -2558,28 +2539,22 @@ impl AsanRuntime {
}
}
if let (Arm64Extender::ARM64_EXT_INVALID, Arm64Shift::Invalid) = (extender, shift) {
writer.put_add_reg_reg_reg(
Aarch64Register::X0,
Aarch64Register::X0,
Aarch64Register::X1,
);
} else {
let extender_encoding: i32 = match extender {
Arm64Extender::ARM64_EXT_UXTB => 0b000,
Arm64Extender::ARM64_EXT_UXTH => 0b001,
Arm64Extender::ARM64_EXT_UXTW => 0b010,
Arm64Extender::ARM64_EXT_UXTX => 0b011,
Arm64Extender::ARM64_EXT_SXTB => 0b100,
Arm64Extender::ARM64_EXT_SXTH => 0b101,
Arm64Extender::ARM64_EXT_SXTW => 0b110,
Arm64Extender::ARM64_EXT_SXTX => 0b111,
Arm64Extender::ARM64_EXT_INVALID => -1,
if let Some((shift_type, amount)) = shift {
let extender_encoding: i32 = match shift_type {
ShiftStyle::UXTB => 0b000,
ShiftStyle::UXTH => 0b001,
ShiftStyle::UXTW => 0b010,
ShiftStyle::UXTX => 0b011,
ShiftStyle::SXTB => 0b100,
ShiftStyle::SXTH => 0b101,
ShiftStyle::SXTW => 0b110,
ShiftStyle::SXTX => 0b111,
_ => -1,
};
let (shift_encoding, shift_amount): (i32, u32) = match shift {
Arm64Shift::Lsl(amount) => (0b00, amount),
Arm64Shift::Lsr(amount) => (0b01, amount),
Arm64Shift::Asr(amount) => (0b10, amount),
let (shift_encoding, shift_amount): (i32, u32) = match shift_type {
ShiftStyle::LSL => (0b00, amount as u32),
ShiftStyle::LSR => (0b01, amount as u32),
ShiftStyle::ASR => (0b10, amount as u32),
_ => (-1, 0),
};
@ -2589,16 +2564,22 @@ impl AsanRuntime {
writer.put_bytes(
&(0x8b210000 | ((extender_encoding as u32) << 13) | (shift_amount << 10))
.to_le_bytes(),
);
); //add x0, x0, w1, [shift] #[amount]
} else if shift_encoding != -1 {
#[allow(clippy::cast_sign_loss)]
writer.put_bytes(
&(0x8b010000 | ((shift_encoding as u32) << 22) | (shift_amount << 10))
.to_le_bytes(),
);
); //add x0, x0, x1, [shift] #[amount]
} else {
panic!("extender: {extender:?}, shift: {shift:?}");
panic!("shift_type: {shift_type:?}, shift: {shift:?}");
}
} else {
writer.put_add_reg_reg_reg(
Aarch64Register::X0,
Aarch64Register::X0,
Aarch64Register::X1,
);
};
}
@ -2625,12 +2606,12 @@ impl AsanRuntime {
let displacement = displacement.unsigned_abs();
let displacement_hi = displacement / 4096;
let displacement_lo = displacement % 4096;
writer.put_bytes(&(0xd1400000u32 | (displacement_hi << 10)).to_le_bytes());
writer.put_bytes(&(0xd1400000u32 | (displacement_hi << 10)).to_le_bytes()); //sub x0, x0, #[displacement / 4096] LSL#12
writer.put_sub_reg_reg_imm(
Aarch64Register::X0,
Aarch64Register::X0,
u64::from(displacement_lo),
);
); //sub x0, x0, #[displacement & 4095]
}
} else if displacement > 0 {
#[allow(clippy::cast_sign_loss)]
@ -2670,7 +2651,7 @@ impl AsanRuntime {
64 => writer.put_bytes(self.blob_check_mem_64bytes()),
_ => false,
};
//Shouldn't there be some manipulation of the code_offset here?
// Add the branch to report
//writer.put_brk_imm(0x12);
writer.put_branch_address(self.current_report_impl);

View File

@ -19,8 +19,11 @@ use libafl::{
use libafl_bolts::{ownedref::OwnedPtr, Named, SerdeAny};
use serde::{Deserialize, Serialize};
use termcolor::{Color, ColorSpec, WriteColor};
#[cfg(target_arch = "x86_64")]
#[cfg(target_arch = "aarch64")]
use yaxpeax_arch::Arch;
use yaxpeax_arch::LengthedInstruction;
#[cfg(target_arch = "aarch64")]
use yaxpeax_arm::armv8::a64::ARMv8;
#[cfg(target_arch = "x86_64")]
use yaxpeax_x86::amd64::InstDecoder;
@ -239,21 +242,26 @@ impl AsanErrors {
writeln!(output, "{:━^100}", " CODE ").unwrap();
#[cfg(target_arch = "aarch64")]
let mut cs = Capstone::new()
.arm64()
.mode(capstone::arch::arm64::ArchMode::Arm)
.build()
.unwrap();
let decoder = <ARMv8 as Arch>::Decoder::default();
#[cfg(target_arch = "x86_64")]
let decoder = InstDecoder::minimal();
let start_pc = error.pc - 4 * 5;
#[cfg(target_arch = "x86_64")]
let insts = disas_count(
&decoder,
unsafe { std::slice::from_raw_parts(start_pc as *mut u8, 15 * 11) },
11,
);
#[cfg(target_arch = "aarch64")]
let insts = disas_count(
&decoder,
unsafe { std::slice::from_raw_parts(start_pc as *mut u8, 4 * 11) },
11,
);
let mut inst_address = start_pc;
for insn in insts {
@ -489,22 +497,27 @@ impl AsanErrors {
writeln!(output, "{:━^100}", " CODE ").unwrap();
#[cfg(target_arch = "aarch64")]
let mut cs = Capstone::new()
.arm64()
.mode(capstone::arch::arm64::ArchMode::Arm)
.build()
.unwrap();
let decoder = <ARMv8 as Arch>::Decoder::default();
#[cfg(target_arch = "x86_64")]
let decoder = InstDecoder::minimal();
let start_pc = pc;
#[cfg(target_arch = "x86_64")]
let insts = disas_count(
&decoder,
unsafe { std::slice::from_raw_parts(start_pc as *mut u8, 15 * 11) },
11,
);
#[cfg(target_arch = "aarch64")]
let insts = disas_count(
&decoder,
unsafe { std::slice::from_raw_parts(start_pc as *mut u8, 4 * 11) },
11,
);
let mut inst_address = start_pc;
for insn in insts {
if inst_address == pc {

View File

@ -6,6 +6,8 @@
use std::ffi::c_void;
use dynasmrt::{dynasm, DynasmApi, DynasmLabelApi};
#[cfg(target_arch = "aarch64")]
use frida_gum_sys::Insn;
use libafl::{
inputs::{HasTargetBytes, Input},
Error,
@ -27,11 +29,9 @@ use frida_gum::{
instruction_writer::{Aarch64Register, IndexMode, InstructionWriter},
stalker::StalkerOutput,
};
#[cfg(target_arch = "aarch64")]
use frida_gum_sys::Insn;
#[cfg(all(feature = "cmplog", target_arch = "aarch64"))]
use crate::utils::{frida_to_cs, instruction_width, writer_register};
use crate::utils::{disas_count, writer_register};
#[cfg(all(feature = "cmplog", target_arch = "aarch64"))]
/// Speciial `CmpLog` Cases for `aarch64`
@ -44,10 +44,7 @@ pub enum SpecialCmpLogCase {
}
#[cfg(target_arch = "aarch64")]
use capstone::{
arch::{arm64::Arm64OperandType, ArchOperand::Arm64Operand},
Capstone,
};
use yaxpeax_arm::armv8::a64::{InstDecoder, Opcode, Operand, ShiftStyle};
/// The [`frida_gum_sys::GUM_RED_ZONE_SIZE`] casted to [`i32`]
///
@ -68,13 +65,12 @@ fn gum_red_zone_size_i32() -> i32 {
#[cfg(all(feature = "cmplog", target_arch = "aarch64"))]
pub enum CmplogOperandType {
/// A Register
Regid(capstone::RegId),
Regid(Aarch64Register),
/// An immediate value
Imm(u64),
/// A constant immediate value
Cimm(u64),
/// A memory operand
Mem(capstone::RegId, capstone::RegId, i32, u32),
// We don't need a memory type because you cannot directly compare with memory
}
/// `Frida`-based binary-only innstrumentation that logs compares to the fuzzer
@ -276,8 +272,9 @@ impl CmpLogRuntime {
&self,
_address: u64,
output: &StalkerOutput,
op1: &CmplogOperandType,
op2: &CmplogOperandType,
op1: &CmplogOperandType, //first operand of the comparsion
op2: &CmplogOperandType, //second operand of the comparsion
_shift: Option<(ShiftStyle, u8)>,
special_case: Option<SpecialCmpLogCase>,
) {
let writer = output.writer();
@ -296,67 +293,17 @@ impl CmpLogRuntime {
CmplogOperandType::Imm(value) | CmplogOperandType::Cimm(value) => {
writer.put_ldr_reg_u64(Aarch64Register::X0, *value);
}
CmplogOperandType::Regid(reg) => {
let reg = writer_register(*reg);
match reg {
CmplogOperandType::Regid(reg) => match *reg {
Aarch64Register::X0 | Aarch64Register::W0 => {}
Aarch64Register::X1 | Aarch64Register::W1 => {
writer.put_mov_reg_reg(Aarch64Register::X0, Aarch64Register::X1);
}
_ => {
if !writer.put_mov_reg_reg(Aarch64Register::X0, reg) {
writer.put_mov_reg_reg(Aarch64Register::W0, reg);
if !writer.put_mov_reg_reg(Aarch64Register::X0, *reg) {
writer.put_mov_reg_reg(Aarch64Register::W0, *reg);
}
}
}
}
CmplogOperandType::Mem(basereg, indexreg, displacement, _width) => {
let basereg = writer_register(*basereg);
let indexreg = if indexreg.0 == 0 {
None
} else {
Some(writer_register(*indexreg))
};
// calculate base+index+displacment into x0
let displacement = displacement
+ if basereg == Aarch64Register::Sp {
16 + gum_red_zone_size_i32()
} else {
0
};
if indexreg.is_some() {
if let Some(indexreg) = indexreg {
writer.put_add_reg_reg_reg(Aarch64Register::X0, basereg, indexreg);
}
} else {
match basereg {
Aarch64Register::X0 | Aarch64Register::W0 => {}
Aarch64Register::X1 | Aarch64Register::W1 => {
writer.put_mov_reg_reg(Aarch64Register::X0, Aarch64Register::X1);
}
_ => {
if !writer.put_mov_reg_reg(Aarch64Register::X0, basereg) {
writer.put_mov_reg_reg(Aarch64Register::W0, basereg);
}
}
}
}
debug_assert!(displacement >= 0);
//add displacement
#[allow(clippy::cast_sign_loss)]
writer.put_add_reg_reg_imm(
Aarch64Register::X0,
Aarch64Register::X0,
displacement as u64,
);
//deref into x0 to get the real value
writer.put_ldr_reg_reg_offset(Aarch64Register::X0, Aarch64Register::X0, 0u64);
}
},
}
// make sure operand2 value is saved into x1
@ -374,207 +321,17 @@ impl CmpLogRuntime {
}
}
}
CmplogOperandType::Regid(reg) => {
let reg = writer_register(*reg);
match reg {
CmplogOperandType::Regid(reg) => match *reg {
Aarch64Register::X1 | Aarch64Register::W1 => {}
Aarch64Register::X0 | Aarch64Register::W0 => {
writer.put_ldr_reg_reg_offset(
Aarch64Register::X1,
Aarch64Register::Sp,
0u64,
);
writer.put_ldr_reg_reg_offset(Aarch64Register::X1, Aarch64Register::Sp, 0u64);
}
_ => {
if !writer.put_mov_reg_reg(Aarch64Register::X1, reg) {
writer.put_mov_reg_reg(Aarch64Register::W1, reg);
if !writer.put_mov_reg_reg(Aarch64Register::X1, *reg) {
writer.put_mov_reg_reg(Aarch64Register::W1, *reg);
}
}
}
}
CmplogOperandType::Mem(basereg, indexreg, displacement, _width) => {
let basereg = writer_register(*basereg);
let indexreg = if indexreg.0 == 0 {
None
} else {
Some(writer_register(*indexreg))
};
// calculate base+index+displacement into x1
let displacement = displacement
+ if basereg == Aarch64Register::Sp {
16 + gum_red_zone_size_i32()
} else {
0
};
if indexreg.is_some() {
if let Some(indexreg) = indexreg {
match indexreg {
Aarch64Register::X0 | Aarch64Register::W0 => {
match basereg {
Aarch64Register::X1 | Aarch64Register::W1 => {
// x0 is overwritten indexreg by op1 value.
// x1 is basereg
// Preserve x2, x3:
writer.put_stp_reg_reg_reg_offset(
Aarch64Register::X2,
Aarch64Register::X3,
Aarch64Register::Sp,
i64::from(-(16 + gum_red_zone_size_i32())),
IndexMode::PreAdjust,
);
//reload indexreg to x2
writer.put_ldr_reg_reg_offset(
Aarch64Register::X2,
Aarch64Register::Sp,
0u64,
);
//add them into basereg==x1
writer.put_add_reg_reg_reg(
basereg,
basereg,
Aarch64Register::X2,
);
// Restore x2, x3
assert!(writer.put_ldp_reg_reg_reg_offset(
Aarch64Register::X2,
Aarch64Register::X3,
Aarch64Register::Sp,
16 + i64::from(frida_gum_sys::GUM_RED_ZONE_SIZE),
IndexMode::PostAdjust,
));
}
_ => {
// x0 is overwrittern indexreg by op1 value.
// basereg is not x1 nor x0
//reload indexreg to x1
writer.put_ldr_reg_reg_offset(
Aarch64Register::X1,
Aarch64Register::Sp,
0u64,
);
//add basereg into indexreg==x1
writer.put_add_reg_reg_reg(
Aarch64Register::X1,
basereg,
Aarch64Register::X1,
);
}
}
}
Aarch64Register::X1 | Aarch64Register::W1 => {
match basereg {
Aarch64Register::X0 | Aarch64Register::W0 => {
// x0 is overwritten basereg by op1 value.
// x1 is indexreg
// Preserve x2, x3:
writer.put_stp_reg_reg_reg_offset(
Aarch64Register::X2,
Aarch64Register::X3,
Aarch64Register::Sp,
i64::from(-(16 + gum_red_zone_size_i32())),
IndexMode::PreAdjust,
);
//reload basereg to x2
writer.put_ldr_reg_reg_offset(
Aarch64Register::X2,
Aarch64Register::Sp,
0u64,
);
//add basereg into indexreg==x1
writer.put_add_reg_reg_reg(
indexreg,
Aarch64Register::X2,
indexreg,
);
// Restore x2, x3
assert!(writer.put_ldp_reg_reg_reg_offset(
Aarch64Register::X2,
Aarch64Register::X3,
Aarch64Register::Sp,
16 + i64::from(frida_gum_sys::GUM_RED_ZONE_SIZE),
IndexMode::PostAdjust,
));
}
_ => {
// indexreg is x1
// basereg is not x0 and not x1
//add them into x1
writer.put_add_reg_reg_reg(indexreg, basereg, indexreg);
}
}
}
_ => {
match basereg {
Aarch64Register::X0 | Aarch64Register::W0 => {
//basereg is overwritten by op1 value
//index reg is not x0 nor x1
//reload basereg to x1
writer.put_ldr_reg_reg_offset(
Aarch64Register::X1,
Aarch64Register::Sp,
0u64,
);
//add indexreg to basereg==x1
writer.put_add_reg_reg_reg(
Aarch64Register::X1,
Aarch64Register::X1,
indexreg,
);
}
_ => {
//basereg is not x0, can be x1
//index reg is not x0 nor x1
//add them into x1
writer.put_add_reg_reg_reg(
Aarch64Register::X1,
basereg,
indexreg,
);
}
}
}
}
}
} else {
match basereg {
Aarch64Register::X1 | Aarch64Register::W1 => {}
Aarch64Register::X0 | Aarch64Register::W0 => {
// x0 is overwrittern basereg by op1 value.
//reload basereg to x1
writer.put_ldr_reg_reg_offset(
Aarch64Register::X1,
Aarch64Register::Sp,
0u64,
);
}
_ => {
writer.put_mov_reg_reg(Aarch64Register::W1, basereg);
}
}
}
// add displacement
#[allow(clippy::cast_sign_loss)]
writer.put_add_reg_reg_imm(
Aarch64Register::X1,
Aarch64Register::X1,
displacement as u64,
);
//deref into x1 to get the real value
writer.put_ldr_reg_reg_offset(Aarch64Register::X1, Aarch64Register::X1, 0u64);
}
},
}
//call cmplog runtime to populate the values map
@ -596,97 +353,122 @@ impl CmpLogRuntime {
/// Check if the current instruction is cmplog relevant one(any opcode which sets the flags)
#[must_use]
pub fn cmplog_is_interesting_instruction(
capstone: &Capstone,
decoder: InstDecoder,
_address: u64,
instr: &Insn,
) -> Option<(
CmplogOperandType,
CmplogOperandType,
Option<(ShiftStyle, u8)>, //possible shifts: everything except MSL
Option<SpecialCmpLogCase>,
)> {
// We need to re-decode frida-internal capstone values to upstream capstone
let cs_instr = frida_to_cs(capstone, instr);
let cs_instr = cs_instr.first().unwrap();
let mut instr = disas_count(&decoder, instr.bytes(), 1)[0];
let operands_len = instr
.operands
.iter()
.position(|item| *item == Operand::Nothing)
.unwrap_or_else(|| 4);
// "cmp" | "ands" | "subs" | "adds" | "negs" | "ngcs" | "sbcs" | "bics" | "cbz"
// | "cbnz" | "tbz" | "tbnz" | "adcs" - yaxpeax aliases insns (i.e., cmp -> subs)
// We only care for compare instructions - aka instructions which set the flags
match cs_instr.mnemonic().unwrap() {
"cmp" | "ands" | "subs" | "adds" | "negs" | "ngcs" | "sbcs" | "bics" | "cbz"
| "cbnz" | "tbz" | "tbnz" | "adcs" => (),
match instr.opcode {
Opcode::SUBS
| Opcode::ANDS
| Opcode::ADDS
| Opcode::SBCS
| Opcode::BICS
| Opcode::CBZ
| Opcode::CBNZ
| Opcode::TBZ
| Opcode::TBNZ
| Opcode::ADC => (),
_ => return None,
}
let mut operands = capstone
.insn_detail(cs_instr)
.unwrap()
.arch_detail()
.operands();
// cbz - 1 operand, tbz - 3 operands
// cbz - 1 operand, everything else - 3 operands
let special_case = [
"cbz", "cbnz", "tbz", "tbnz", "subs", "adds", "ands", "sbcs", "bics", "adcs",
Opcode::CBZ,
Opcode::CBNZ,
Opcode::TBZ,
Opcode::TBNZ,
Opcode::SUBS,
Opcode::ADDS,
Opcode::ANDS,
Opcode::SBCS,
Opcode::BICS,
Opcode::ADCS,
]
.contains(&cs_instr.mnemonic().unwrap());
if operands.len() != 2 && !special_case {
.contains(&instr.opcode);
//this check is to ensure that there are the right number of operands
if operands_len != 2 && !special_case {
return None;
}
// handle special opcodes case which have 3 operands, but the 1st(dest) is not important to us
if ["subs", "adds", "ands", "sbcs", "bics", "adcs"].contains(&cs_instr.mnemonic().unwrap())
////subs", "adds", "ands", "sbcs", "bics", "adcs"
if [
Opcode::SUBS,
Opcode::ADDS,
Opcode::ANDS,
Opcode::SBCS,
Opcode::BICS,
Opcode::ADCS,
]
.contains(&instr.opcode)
{
//remove the dest operand from the list
operands.remove(0);
instr.operands.rotate_left(1);
instr.operands[3] = Operand::Nothing;
}
// cbz marked as special since there is only 1 operand
#[allow(clippy::cast_sign_loss)]
let special_case = matches!(cs_instr.mnemonic().unwrap(), "cbz" | "cbnz");
let special_case = matches!(instr.opcode, Opcode::CBZ | Opcode::CBNZ);
#[allow(clippy::cast_sign_loss, clippy::similar_names)]
let operand1 = if let Arm64Operand(arm64operand) = operands.first().unwrap() {
match arm64operand.op_type {
Arm64OperandType::Reg(regid) => Some(CmplogOperandType::Regid(regid)),
Arm64OperandType::Imm(val) => Some(CmplogOperandType::Imm(val as u64)),
Arm64OperandType::Mem(opmem) => Some(CmplogOperandType::Mem(
opmem.base(),
opmem.index(),
opmem.disp(),
instruction_width(cs_instr, &operands),
let operand1 = match instr.operands[0] {
//the only possibilities are registers for the first operand
//precompute the aarch64 frida register because it is ambiguous if register=31 means xzr or sp in yaxpeax
Operand::Register(sizecode, reg) => Some(CmplogOperandType::Regid(writer_register(
reg, sizecode, true,
))),
Operand::RegisterOrSP(sizecode, reg) => Some(CmplogOperandType::Regid(
writer_register(reg, sizecode, false),
)),
Arm64OperandType::Cimm(val) => Some(CmplogOperandType::Cimm(val as u64)),
_ => return None,
}
} else {
None
_ => panic!("First argument is not a register"), //this should never be possible in arm64
};
#[allow(clippy::cast_sign_loss)]
let operand2 = if special_case {
Some(CmplogOperandType::Imm(0))
} else if let Arm64Operand(arm64operand2) = &operands[1] {
match arm64operand2.op_type {
Arm64OperandType::Reg(regid) => Some(CmplogOperandType::Regid(regid)),
Arm64OperandType::Imm(val) => Some(CmplogOperandType::Imm(val as u64)),
Arm64OperandType::Mem(opmem) => Some(CmplogOperandType::Mem(
opmem.base(),
opmem.index(),
opmem.disp(),
instruction_width(cs_instr, &operands),
)),
Arm64OperandType::Cimm(val) => Some(CmplogOperandType::Cimm(val as u64)),
_ => return None,
}
Some((CmplogOperandType::Imm(0), None))
} else {
None
match instr.operands[1] {
Operand::Register(sizecode, reg) => Some((
CmplogOperandType::Regid(writer_register(reg, sizecode, true)),
None,
)),
Operand::ImmShift(imm, shift) => {
Some((CmplogOperandType::Imm((imm as u64) << shift), None))
} //precalculate the shift
Operand::RegShift(shiftstyle, amount, regsize, reg) => {
let reg = CmplogOperandType::Regid(writer_register(reg, regsize, true));
let shift = (shiftstyle, amount);
Some((reg, Some(shift)))
}
Operand::Immediate(imm) => Some((CmplogOperandType::Imm(imm as u64), None)),
_ => panic!("Second argument could not be decoded"),
}
};
// tbz will need to have special handling at emit time(masking operand1 value with operand2)
let special_case = match cs_instr.mnemonic().unwrap() {
"tbz" => Some(SpecialCmpLogCase::Tbz),
"tbnz" => Some(SpecialCmpLogCase::Tbnz),
let special_case = match instr.opcode {
Opcode::TBZ => Some(SpecialCmpLogCase::Tbz),
Opcode::TBNZ => Some(SpecialCmpLogCase::Tbnz),
_ => None,
};
if let Some(op1) = operand1 {
operand2.map(|op2| (op1, op2, special_case))
operand2.map(|op2| (op1, op2.0, op2.1, special_case))
} else {
None
}

View File

@ -6,11 +6,6 @@ use std::{
rc::Rc,
};
#[cfg(target_arch = "aarch64")]
use capstone::{
arch::{self, BuildsCapstone},
Capstone,
};
#[cfg(unix)]
use frida_gum::instruction_writer::InstructionWriter;
use frida_gum::{
@ -27,6 +22,10 @@ use libafl_targets::drcov::DrCovBasicBlock;
#[cfg(unix)]
use nix::sys::mman::{mmap, MapFlags, ProtFlags};
use rangemap::RangeMap;
#[cfg(target_arch = "aarch64")]
use yaxpeax_arch::Arch;
#[cfg(all(target_arch = "aarch64", unix))]
use yaxpeax_arm::armv8::a64::{ARMv8, InstDecoder};
#[cfg(target_arch = "x86_64")]
use yaxpeax_x86::amd64::InstDecoder;
@ -445,28 +444,14 @@ where
let ranges = Rc::clone(ranges);
let runtimes = Rc::clone(runtimes);
#[cfg(target_arch = "aarch64")]
let capstone = Capstone::new()
.arm64()
.mode(arch::arm64::ArchMode::Arm)
.detail(true)
.build()
.expect("Failed to create Capstone object");
#[cfg(target_arch = "x86_64")]
let decoder = InstDecoder::minimal();
Transformer::from_callback(gum, move |basic_block, output| {
Self::transform(
basic_block,
&output,
&ranges,
&runtimes,
#[cfg(target_arch = "aarch64")]
&capstone,
#[cfg(target_arch = "x86_64")]
decoder,
);
let decoder = <ARMv8 as Arch>::Decoder::default();
Transformer::from_callback(gum, move |basic_block, output| {
Self::transform(basic_block, &output, &ranges, &runtimes, decoder);
})
}
@ -523,7 +508,7 @@ where
}
#[cfg(target_arch = "aarch64")]
if let Some((basereg, indexreg, displacement, width, shift, extender)) = res {
if let Some((basereg, indexreg, displacement, width, shift)) = res {
if let Some(rt) = runtimes.match_first_type_mut::<AsanRuntime>() {
rt.emit_shadow_check(
address,
@ -533,18 +518,25 @@ where
displacement,
width,
shift,
extender,
);
}
}
#[cfg(all(feature = "cmplog", target_arch = "aarch64"))]
if let Some(rt) = runtimes.match_first_type_mut::<CmpLogRuntime>() {
if let Some((op1, op2, special_case)) =
CmpLogRuntime::cmplog_is_interesting_instruction(&capstone, address, instr)
if let Some((op1, op2, shift, special_case)) =
CmpLogRuntime::cmplog_is_interesting_instruction(decoder, address, instr)
//change this as well
{
//emit code that saves the relevant data in runtime(passes it to x0, x1)
rt.emit_comparison_handling(address, &output, &op1, &op2, special_case);
rt.emit_comparison_handling(
address,
&output,
&op1,
&op2,
shift,
special_case,
);
}
}

View File

@ -1,100 +1,128 @@
#[cfg(target_arch = "aarch64")]
use capstone::Capstone;
#[cfg(target_arch = "aarch64")]
use capstone::{
arch::{self, arm64::Arm64OperandType, ArchOperand::Arm64Operand},
Insn,
};
#[cfg(target_arch = "aarch64")]
use frida_gum::instruction_writer::Aarch64Register;
#[cfg(target_arch = "x86_64")]
use frida_gum::instruction_writer::X86Register;
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
#[cfg(any(target_arch = "x86_64"))]
use frida_gum_sys;
#[cfg(target_arch = "aarch64")]
use num_traits::cast::FromPrimitive;
#[cfg(target_arch = "x86_64")]
use yaxpeax_arch::LengthedInstruction;
#[cfg(target_arch = "aarch64")]
use yaxpeax_arch::{Decoder, ReaderBuilder};
#[cfg(target_arch = "aarch64")]
use yaxpeax_arm::armv8::a64::{InstDecoder, Instruction, Opcode, Operand, SIMDSizeCode, SizeCode};
#[cfg(target_arch = "x86_64")]
use yaxpeax_x86::amd64::Operand;
#[cfg(target_arch = "x86_64")]
use yaxpeax_x86::amd64::{InstDecoder, Instruction, RegSpec};
/// Determine the size of an SIMD register
#[cfg(target_arch = "aarch64")]
#[inline]
#[must_use]
pub fn get_simd_size(sizecode: SIMDSizeCode) -> u32 {
match sizecode {
SIMDSizeCode::B => 1,
SIMDSizeCode::H => 2,
SIMDSizeCode::S => 4,
SIMDSizeCode::D => 8,
SIMDSizeCode::Q => 16,
}
}
/// Determine the size of a normal register
#[cfg(target_arch = "aarch64")]
#[inline]
#[must_use]
pub fn get_reg_size(sizecode: SizeCode) -> u32 {
match sizecode {
SizeCode::W => {
//this is guaranteed to be 4 because we deal with the strb/ldrb and strh/ldrh should be dealt with in instruction_width
4
}
SizeCode::X => 8,
}
}
/// Determine the width of the specified instruction
#[cfg(target_arch = "aarch64")]
#[inline]
#[must_use]
pub fn instruction_width(instr: &Insn, operands: &[arch::ArchOperand]) -> u32 {
use capstone::arch::arm64::{Arm64Insn as I, Arm64Reg as R, Arm64Vas as V};
let num_registers = match instr.id().0.into() {
I::ARM64_INS_STP
| I::ARM64_INS_STXP
| I::ARM64_INS_STNP
| I::ARM64_INS_STLXP
| I::ARM64_INS_LDP
| I::ARM64_INS_LDXP
| I::ARM64_INS_LDNP => 2,
pub fn instruction_width(instr: &Instruction) -> u32 {
let num_registers = match instr.opcode {
Opcode::STP
| Opcode::STXP
| Opcode::STNP
| Opcode::STLXP
| Opcode::LDP
| Opcode::LDXP
| Opcode::LDNP => 2,
_ => 1,
};
let mnemonic = instr.mnemonic().unwrap();
match mnemonic.as_bytes().last().unwrap() {
// let mnemonic = instr.opcode.to_string().as_bytes();
match instr.opcode.to_string().as_bytes().last().unwrap() {
b'b' => return 1,
b'h' => return 2,
b'w' => return 4 * num_registers,
_ => (),
}
if let Arm64Operand(operand) = operands.first().unwrap() {
if operand.vas != V::ARM64_VAS_INVALID {
let count_byte: u32 = if mnemonic.starts_with("st") || mnemonic.starts_with("ld") {
mnemonic.chars().nth(2).unwrap().to_digit(10).unwrap()
} else {
1
};
return match operand.vas {
V::ARM64_VAS_1B => count_byte,
V::ARM64_VAS_1H => 2 * count_byte,
V::ARM64_VAS_4B | V::ARM64_VAS_1S | V::ARM64_VAS_1D | V::ARM64_VAS_2H => {
4 * count_byte
let size = match instr.operands.first().unwrap() {
Operand::Register(sizecode, _) => {
//this is used for standard loads/stores including ldr, ldp, etc.
get_reg_size(*sizecode)
}
V::ARM64_VAS_8B
| V::ARM64_VAS_4H
| V::ARM64_VAS_2S
| V::ARM64_VAS_2D
| V::ARM64_VAS_1Q => 8 * count_byte,
V::ARM64_VAS_8H | V::ARM64_VAS_4S | V::ARM64_VAS_16B => 16 * count_byte,
V::ARM64_VAS_INVALID => {
panic!("should not be reached");
Operand::RegisterPair(sizecode, _) => {
//not sure where this is used, but it is possible in yaxpeax
get_reg_size(*sizecode)
}
Operand::SIMDRegister(sizecode, _) => {
//this is used in cases like ldr q0, [sp]
get_simd_size(*sizecode)
}
Operand::SIMDRegisterGroup(sizecode, _, _, num) => {
////This is used for cases such as ld4 {v1.2s, v2.2s, v3.2s, v4.2s}, [x0].
//the sizecode is the size of each simd structure (This can only be D or Q), num is the number of them (i.e. ld4 would be 4)
get_simd_size(*sizecode) * *num as u32
}
Operand::SIMDRegisterGroupLane(_, sizecode, num, _) => {
//This is used for cases such as ld4 {v0.s, v1.s, v2.s, v3.s}[0], [x0]. In this case sizecode is the size of each lane, num is the number of them
get_simd_size(*sizecode) * *num as u32
}
_ => {
return 0;
}
};
} else if let Arm64OperandType::Reg(operand) = operand.op_type {
match u32::from(operand.0) {
R::ARM64_REG_W0..=R::ARM64_REG_W30
| R::ARM64_REG_WZR
| R::ARM64_REG_WSP
| R::ARM64_REG_S0..=R::ARM64_REG_S31 => return 4 * num_registers,
R::ARM64_REG_D0..=R::ARM64_REG_D31 => return 8 * num_registers,
R::ARM64_REG_Q0..=R::ARM64_REG_Q31 => return 16,
_ => (),
}
};
};
8 * num_registers
num_registers * size
}
/// Convert from a capstone register id to a frida `InstructionWriter` register index
/// Convert from a yaxpeax register to frida gum's register state
#[cfg(target_arch = "aarch64")]
#[must_use]
#[inline]
pub fn writer_register(reg: capstone::RegId) -> Aarch64Register {
let regint: u16 = reg.0;
Aarch64Register::from_u32(u32::from(regint)).unwrap()
pub fn writer_register(reg: u16, sizecode: SizeCode, zr: bool) -> Aarch64Register {
//yaxpeax and arm both make it so that depending on the opcode reg=31 can be EITHER SP or XZR.
match (reg, sizecode, zr) {
(0..=28, SizeCode::X, _) => {
Aarch64Register::from_u32(Aarch64Register::X0 as u32 + reg as u32).unwrap()
}
(0..=30, SizeCode::W, _) => {
Aarch64Register::from_u32(Aarch64Register::W0 as u32 + reg as u32).unwrap()
}
(29, SizeCode::X, _) => Aarch64Register::Fp,
(30, SizeCode::X, _) => Aarch64Register::Lr,
(31, SizeCode::X, false) => Aarch64Register::Sp,
(31, SizeCode::W, false) => Aarch64Register::Wsp,
(31, SizeCode::X, true) => Aarch64Register::Xzr,
(31, SizeCode::W, true) => Aarch64Register::Wzr,
_ => panic!("Failed to get writer register"),
}
}
/// Translate from `RegSpec` to `X86Register`
#[cfg(all(target_arch = "x86_64", unix))]
const X86_64_REGS: [(RegSpec, X86Register); 34] = [
(RegSpec::eax(), X86Register::Eax),
(RegSpec::ecx(), X86Register::Ecx),
@ -151,7 +179,7 @@ pub fn writer_register(reg: RegSpec) -> X86Register {
/// Translates a frida instruction to a capstone instruction.
/// Returns a [`capstone::Instructions`] with a single [`capstone::Insn`] inside.
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
#[cfg(any(target_arch = "x86_64"))]
pub(crate) fn frida_to_cs(decoder: InstDecoder, frida_insn: &frida_gum_sys::Insn) -> Instruction {
decoder.decode_slice(frida_insn.bytes()).unwrap()
}
@ -229,3 +257,19 @@ pub fn disas_count(decoder: &InstDecoder, data: &[u8], count: usize) -> Vec<Inst
counter -= 1;
}
}
#[cfg(target_arch = "aarch64")]
/// Disassemble "count" number of instructions
pub fn disas_count(decoder: &InstDecoder, data: &[u8], count: usize) -> Vec<Instruction> {
let _counter = count;
let mut ret = vec![];
let _start = 0;
let mut reader = ReaderBuilder::<u64, u8>::read_from(data);
while let Ok(insn) = decoder.decode(&mut reader) {
ret.push(insn);
}
ret
}