Compile-time edge module compilation check, native support for ConstMapObserver (#2592)

* compile-time edge module compilation trick

* clippy

* possible since rust 1.79

* split edge module in submodules

* Update frida to 0.14.0 (#2596)

* update frida crate to the latest version

* adapt libafl_frida to the latest version of frida

* tracers and generators private modules

* do not use star export.

* same for drcov

* forgot a file...

* first draft of generic-based edge module for ConstantLengthMapObserver.

* integration of OwnedSizedSlice.

replaced OwnedSlice in ConstMapObserver by the new OwnedSizedSlice.

* fix serde stuff

* no std

* import

* fixed qemu_cmin with new constant map abstraction.

* fix const map

* fix clippy from another pr...

* fix non-null usage

* fix ci?

* new feature stuff

* fixes

* minor fixes

* fmt

* non null

* im stupid

* fmt

* fix fuzzer

* fix fuzzers

* sized slice

* fuzzer fixes

* ptr::NonNull -> NonNull

* shorter trait length

* fmt
This commit is contained in:
Romain Malmain 2024-11-04 14:34:52 +01:00 committed by GitHub
parent 56a5463ae4
commit 49ea0b03a6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
20 changed files with 1467 additions and 894 deletions

View File

@ -5,7 +5,7 @@ runs:
steps:
- name: Install QEMU deps
shell: bash
run: apt-get update && apt-get install -y qemu-utils sudo python3-msgpack python3-jinja2 curl
run: apt-get update && apt-get install -y qemu-utils sudo python3-msgpack python3-jinja2 curl python3-dev
- uses: dtolnay/rust-toolchain@stable
- name: enable mult-thread for `make`
shell: bash

View File

@ -1,4 +1,4 @@
use std::{path::PathBuf, time::Duration};
use std::{path::PathBuf, ptr::NonNull, time::Duration};
use libafl::{
corpus::{InMemoryCorpus, OnDiskCorpus},
@ -46,7 +46,12 @@ pub fn main() {
libafl::executors::ExitKind::Ok
};
// Create an observation channel using the signals map
let observer = unsafe { ConstMapObserver::<u8, 3>::from_mut_ptr("signals", map_ptr) };
let observer = unsafe {
ConstMapObserver::<u8, 3>::from_mut_ptr(
"signals",
NonNull::new(map_ptr).expect("map ptr is null."),
)
};
// Create a stacktrace observer
let mut bt = shmem_provider.new_on_shmem::<Option<u64>>(None).unwrap();
let bt_observer = BacktraceObserver::new(

View File

@ -1,4 +1,4 @@
use std::path::PathBuf;
use std::{path::PathBuf, ptr::NonNull};
use libafl::{
corpus::{InMemoryCorpus, OnDiskCorpus},
@ -35,7 +35,12 @@ pub fn main() {
libafl::executors::ExitKind::Ok
};
// Create an observation channel using the signals map
let observer = unsafe { ConstMapObserver::<u8, 3>::from_mut_ptr("signals", array_ptr) };
let observer = unsafe {
ConstMapObserver::<u8, 3>::from_mut_ptr(
"signals",
NonNull::new(array_ptr).expect("map ptr is null"),
)
};
// Create a stacktrace observer
let bt_observer = BacktraceObserver::owned(
"BacktraceObserver",

View File

@ -42,7 +42,10 @@ pub fn main() {
let mut shmem = shmem_provider.new_shmem(MAP_SIZE).unwrap();
//let the forkserver know the shmid
shmem.write_to_env("__AFL_SHM_ID").unwrap();
let shmem_map = shmem.as_slice_mut();
let shmem_map: &mut [u8; MAP_SIZE] = shmem
.as_slice_mut()
.try_into()
.expect("could not convert slice to sized slice.");
// Create an observation channel using the signals map
let edges_observer = HitcountsMapObserver::new(ConstMapObserver::<_, MAP_SIZE>::new(

View File

@ -9,6 +9,7 @@ use std::{
io::{self, Write},
path::PathBuf,
process,
ptr::NonNull,
time::Duration,
};
@ -160,14 +161,14 @@ fn fuzz(
let mut edges_observer = unsafe {
HitcountsMapObserver::new(ConstMapObserver::<_, EDGES_MAP_DEFAULT_SIZE>::from_mut_ptr(
"edges",
edges.as_mut_ptr(),
NonNull::new(edges.as_mut_ptr()).expect("map ptr is null."),
))
.track_indices()
};
let emulator_modules = tuple_list!(
StdEdgeCoverageChildModule::builder()
.map_observer(edges_observer.as_mut())
.const_map_observer(edges_observer.as_mut())
.build()?,
CmpLogChildModule::default(),
);
@ -199,7 +200,8 @@ fn fuzz(
let stack_ptr: u64 = qemu.read_reg(Regs::Sp).unwrap();
let mut ret_addr = [0; 8];
unsafe { qemu.read_mem(stack_ptr, &mut ret_addr) };
qemu.read_mem(stack_ptr, &mut ret_addr)
.expect("qemu read failed");
let ret_addr = u64::from_le_bytes(ret_addr);
println!("Stack pointer = {stack_ptr:#x}");
@ -323,7 +325,7 @@ fn fuzz(
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
// The wrapped harness function, calling out to the LLVM-style harness
let mut harness = |emulator: &mut Emulator<_, _, _, _, _>, input: &BytesInput| {
let mut harness = |_emulator: &mut Emulator<_, _, _, _, _>, input: &BytesInput| {
let target = input.target_bytes();
let mut buf = target.as_slice();
let mut len = buf.len();
@ -333,7 +335,7 @@ fn fuzz(
}
unsafe {
qemu.write_mem(input_addr, buf);
qemu.write_mem_unchecked(input_addr, buf);
qemu.write_reg(Regs::Rdi, input_addr).unwrap();
qemu.write_reg(Regs::Rsi, len as GuestReg).unwrap();
@ -394,8 +396,8 @@ fn fuzz(
#[cfg(unix)]
{
let null_fd = file_null.as_raw_fd();
// dup2(null_fd, io::stdout().as_raw_fd())?;
// dup2(null_fd, io::stderr().as_raw_fd())?;
dup2(null_fd, io::stdout().as_raw_fd())?;
dup2(null_fd, io::stderr().as_raw_fd())?;
}
// reopen file to make sure we're at the end
log.replace(

View File

@ -2,7 +2,7 @@
//!
#[cfg(feature = "i386")]
use core::mem::size_of;
use std::{env, io, path::PathBuf, process};
use std::{env, io, path::PathBuf, process, ptr::NonNull};
use clap::{builder::Str, Parser};
use libafl::{
@ -162,7 +162,7 @@ pub fn fuzz() -> Result<(), Error> {
let mut edges_observer = unsafe {
HitcountsMapObserver::new(ConstMapObserver::<_, EDGES_MAP_DEFAULT_SIZE>::from_mut_ptr(
"edges",
edges.as_mut_ptr(),
NonNull::new(edges.as_mut_ptr()).expect("The edge map pointer is null."),
))
};
@ -196,7 +196,7 @@ pub fn fuzz() -> Result<(), Error> {
let len = len as GuestReg;
unsafe {
qemu.write_mem(input_addr, buf);
qemu.write_mem(input_addr, buf).expect("qemu write failed.");
qemu.write_reg(Regs::Pc, test_one_input_ptr).unwrap();
qemu.write_reg(Regs::Sp, stack_ptr).unwrap();
qemu.write_return_address(ret_addr).unwrap();
@ -219,7 +219,7 @@ pub fn fuzz() -> Result<(), Error> {
};
let modules = tuple_list!(StdEdgeCoverageChildModule::builder()
.map_observer(edges_observer.as_mut())
.const_map_observer(edges_observer.as_mut())
.build()?);
let emulator = Emulator::empty().qemu(qemu).modules(modules).build()?;

View File

@ -1635,7 +1635,7 @@ mod tests {
let mut shmem = shmem_provider.new_shmem(MAP_SIZE).unwrap();
shmem.write_to_env("__AFL_SHM_ID").unwrap();
let shmem_buf = shmem.as_slice_mut();
let shmem_buf: &mut [u8; MAP_SIZE] = shmem.as_slice_mut().try_into().unwrap();
let edges_observer = HitcountsMapObserver::new(ConstMapObserver::<_, MAP_SIZE>::new(
"shared_mem",

View File

@ -5,28 +5,26 @@ use core::{
fmt::Debug,
hash::{Hash, Hasher},
ops::{Deref, DerefMut},
ptr::NonNull,
};
use ahash::RandomState;
use libafl_bolts::{ownedref::OwnedMutSlice, AsSlice, AsSliceMut, HasLen, Named};
use libafl_bolts::{ownedref::OwnedMutSizedSlice, HasLen, Named};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use crate::{
observers::{map::MapObserver, Observer, VariableLengthMapObserver},
observers::{map::MapObserver, ConstLenMapObserver, Observer},
Error,
};
// TODO: remove the size field and implement ConstantLengthMapObserver
/// Use a const size to speedup `Feedback::is_interesting` when the user can
/// know the size of the map at compile time.
#[derive(Serialize, Deserialize, Debug)]
#[allow(clippy::unsafe_derive_deserialize)]
pub struct ConstMapObserver<'a, T, const N: usize> {
map: OwnedMutSlice<'a, T>,
map: OwnedMutSizedSlice<'a, T, N>,
initial: T,
name: Cow<'static, str>,
size: usize,
}
impl<I, S, T, const N: usize> Observer<I, S> for ConstMapObserver<'_, T, N>
@ -87,19 +85,19 @@ where
#[inline]
fn get(&self, idx: usize) -> T {
self.as_slice()[idx]
self[idx]
}
#[inline]
fn set(&mut self, idx: usize, val: T) {
self.map.as_slice_mut()[idx] = val;
(*self)[idx] = val;
}
/// Count the set bytes in the map
fn count_bytes(&self) -> u64 {
let initial = self.initial();
let cnt = self.usable_count();
let map = self.as_slice();
let map = self.map.as_slice();
let mut res = 0;
for x in &map[0..cnt] {
if *x != initial {
@ -110,7 +108,7 @@ where
}
fn usable_count(&self) -> usize {
self.as_slice().len()
self.len()
}
#[inline]
@ -124,7 +122,7 @@ where
// Normal memset, see https://rust.godbolt.org/z/Trs5hv
let initial = self.initial();
let cnt = self.usable_count();
let map = self.as_slice_mut();
let map = &mut (*self);
for x in &mut map[0..cnt] {
*x = initial;
}
@ -132,14 +130,14 @@ where
}
fn to_vec(&self) -> Vec<T> {
self.as_slice().to_vec()
self.map.to_vec()
}
/// Get the number of set entries with the specified indexes
fn how_many_set(&self, indexes: &[usize]) -> usize {
let initial = self.initial();
let cnt = self.usable_count();
let map = self.as_slice();
let map = self.map.as_slice();
let mut res = 0;
for i in indexes {
if *i < cnt && map[*i] != initial {
@ -150,37 +148,30 @@ where
}
}
impl<T, const N: usize> VariableLengthMapObserver for ConstMapObserver<'_, T, N>
impl<T, const N: usize> ConstLenMapObserver<N> for ConstMapObserver<'_, T, N>
where
T: PartialEq + Copy + Hash + Serialize + DeserializeOwned + Debug + 'static,
{
fn map_slice(&mut self) -> &[Self::Entry] {
self.map.as_slice()
fn map_slice(&self) -> &[Self::Entry; N] {
&self.map
}
fn map_slice_mut(&mut self) -> &mut [Self::Entry] {
self.map.as_slice_mut()
}
fn size(&mut self) -> &usize {
&N
}
fn size_mut(&mut self) -> &mut usize {
&mut self.size
fn map_slice_mut(&mut self) -> &mut [Self::Entry; N] {
&mut self.map
}
}
impl<T, const N: usize> Deref for ConstMapObserver<'_, T, N> {
type Target = [T];
fn deref(&self) -> &[T] {
&self.map
self.map.as_slice()
}
}
impl<T, const N: usize> DerefMut for ConstMapObserver<'_, T, N> {
fn deref_mut(&mut self) -> &mut [T] {
&mut self.map
self.map.as_mut_slice()
}
}
@ -194,13 +185,12 @@ where
/// Will get a pointer to the map and dereference it at any point in time.
/// The map must not move in memory!
#[must_use]
pub fn new(name: &'static str, map: &'a mut [T]) -> Self {
pub fn new(name: &'static str, map: &'a mut [T; N]) -> Self {
assert!(map.len() >= N);
Self {
map: OwnedMutSlice::from(map),
map: OwnedMutSizedSlice::from(map),
name: Cow::from(name),
initial: T::default(),
size: N,
}
}
@ -208,34 +198,12 @@ where
///
/// # Safety
/// Will dereference the `map_ptr` with up to len elements.
pub unsafe fn from_mut_ptr(name: &'static str, map_ptr: *mut T) -> Self {
#[must_use]
pub unsafe fn from_mut_ptr(name: &'static str, map_ptr: NonNull<T>) -> Self {
ConstMapObserver {
map: OwnedMutSlice::from_raw_parts_mut(map_ptr, N),
map: OwnedMutSizedSlice::from_raw_mut(map_ptr),
name: Cow::from(name),
initial: T::default(),
size: N,
}
}
}
impl<T, const N: usize> ConstMapObserver<'_, T, N>
where
T: Default + Clone,
{
/// Creates a new [`MapObserver`] with an owned map
#[must_use]
pub fn owned(name: &'static str, map: Vec<T>) -> Self {
assert!(map.len() >= N);
let initial = if map.is_empty() {
T::default()
} else {
map[0].clone()
};
Self {
map: OwnedMutSlice::from(map),
name: Cow::from(name),
initial,
size: N,
}
}
}

View File

@ -14,7 +14,9 @@ use serde::{Deserialize, Serialize};
use crate::{
executors::ExitKind,
observers::{map::MapObserver, DifferentialObserver, Observer, VariableLengthMapObserver},
observers::{
map::MapObserver, ConstLenMapObserver, DifferentialObserver, Observer, VarLenMapObserver,
},
Error,
};
@ -230,11 +232,24 @@ where
}
}
impl<M> VariableLengthMapObserver for HitcountsMapObserver<M>
impl<M, const N: usize> ConstLenMapObserver<N> for HitcountsMapObserver<M>
where
M: VariableLengthMapObserver + MapObserver<Entry = u8>,
M: ConstLenMapObserver<N> + MapObserver<Entry = u8>,
{
fn map_slice(&mut self) -> &[Self::Entry] {
fn map_slice(&self) -> &[Self::Entry; N] {
self.base.map_slice()
}
fn map_slice_mut(&mut self) -> &mut [Self::Entry; N] {
self.base.map_slice_mut()
}
}
impl<M> VarLenMapObserver for HitcountsMapObserver<M>
where
M: VarLenMapObserver + MapObserver<Entry = u8>,
{
fn map_slice(&self) -> &[Self::Entry] {
self.base.map_slice()
}
@ -242,7 +257,7 @@ where
self.base.map_slice_mut()
}
fn size(&mut self) -> &usize {
fn size(&self) -> &usize {
self.base.size()
}

View File

@ -387,27 +387,30 @@ pub trait MapObserver:
/// The "real" length of the underlying map could change at any point in time.
/// Thus, the size of the map should be fetched each time it is used.
pub trait VariableLengthMapObserver: MapObserver {
pub trait VarLenMapObserver: MapObserver {
/// A mutable slice reference to the map.
/// The length of the map gives the maximum allocatable size.
fn map_slice(&mut self) -> &[Self::Entry];
fn map_slice(&self) -> &[Self::Entry];
/// A slice reference to the map.
/// The length of the map gives the maximum allocatable size.
fn map_slice_mut(&mut self) -> &mut [Self::Entry];
/// A reference to the size of the map.
fn size(&mut self) -> &usize;
fn size(&self) -> &usize;
/// A mutable reference to the size of the map.
fn size_mut(&mut self) -> &mut usize;
}
/// Implementors guarantee the size of the map is constant at any point in time and equals N.
pub trait ConstantLengthMapObserver<const N: usize>: MapObserver {
pub trait ConstLenMapObserver<const N: usize>: MapObserver {
/// The size of the map
const LENGTH: usize = N;
/// A mutable slice reference to the map
fn map_slice(&self) -> &[Self::Entry; N];
/// A mutable slice reference to the map
fn map_slice_mut(&mut self) -> &mut [Self::Entry; N];
}

View File

@ -15,7 +15,7 @@ use libafl_bolts::{
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use crate::{
observers::{map::MapObserver, Observer, VariableLengthMapObserver},
observers::{map::MapObserver, Observer, VarLenMapObserver},
Error,
};
@ -149,11 +149,11 @@ where
}
}
impl<T> VariableLengthMapObserver for VariableMapObserver<'_, T>
impl<T> VarLenMapObserver for VariableMapObserver<'_, T>
where
T: PartialEq + Copy + Hash + Serialize + DeserializeOwned + Debug,
{
fn map_slice(&mut self) -> &[Self::Entry] {
fn map_slice(&self) -> &[Self::Entry] {
self.map.as_ref()
}
@ -161,7 +161,7 @@ where
self.map.as_mut()
}
fn size(&mut self) -> &usize {
fn size(&self) -> &usize {
self.size.as_ref()
}

View File

@ -680,7 +680,18 @@ pub trait AsSlice<'a> {
fn as_slice(&'a self) -> Self::SliceRef;
}
impl<'a, T, R> AsSlice<'a> for R
/// Can be converted to a slice
pub trait AsSizedSlice<'a, const N: usize> {
/// Type of the entries of this slice
type Entry: 'a;
/// Type of the reference to this slice
type SliceRef: Deref<Target = [Self::Entry; N]>;
/// Convert to a slice
fn as_sized_slice(&'a self) -> Self::SliceRef;
}
impl<'a, T, R: ?Sized> AsSlice<'a> for R
where
T: 'a,
R: Deref<Target = [T]>,
@ -693,6 +704,19 @@ where
}
}
impl<'a, T, const N: usize, R: ?Sized> AsSizedSlice<'a, N> for R
where
T: 'a,
R: Deref<Target = [T; N]>,
{
type Entry = T;
type SliceRef = &'a [T; N];
fn as_sized_slice(&'a self) -> Self::SliceRef {
self
}
}
/// Can be converted to a mutable slice
pub trait AsSliceMut<'a>: AsSlice<'a> {
/// Type of the mutable reference to this slice
@ -702,7 +726,16 @@ pub trait AsSliceMut<'a>: AsSlice<'a> {
fn as_slice_mut(&'a mut self) -> Self::SliceRefMut;
}
impl<'a, T, R> AsSliceMut<'a> for R
/// Can be converted to a mutable slice
pub trait AsSizedSliceMut<'a, const N: usize>: AsSizedSlice<'a, N> {
/// Type of the mutable reference to this slice
type SliceRefMut: DerefMut<Target = [Self::Entry; N]>;
/// Convert to a slice
fn as_sized_slice_mut(&'a mut self) -> Self::SliceRefMut;
}
impl<'a, T, R: ?Sized> AsSliceMut<'a> for R
where
T: 'a,
R: DerefMut<Target = [T]>,
@ -714,6 +747,18 @@ where
}
}
impl<'a, T, const N: usize, R: ?Sized> AsSizedSliceMut<'a, N> for R
where
T: 'a,
R: DerefMut<Target = [T; N]>,
{
type SliceRefMut = &'a mut [T; N];
fn as_sized_slice_mut(&'a mut self) -> Self::SliceRefMut {
&mut *self
}
}
/// Create an `Iterator` from a reference
pub trait AsIter<'it> {
/// The item type

View File

@ -10,13 +10,67 @@ use core::{
clone::Clone,
fmt::Debug,
ops::{Deref, DerefMut, RangeBounds},
ptr::NonNull,
slice,
slice::SliceIndex,
};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::{shmem::ShMem, AsSlice, AsSliceMut, IntoOwned, Truncate};
use crate::{
shmem::ShMem, AsSizedSlice, AsSizedSliceMut, AsSlice, AsSliceMut, IntoOwned, Truncate,
};
/// Constant size array visitor for serde deserialization.
/// Mostly taken from <https://github.com/serde-rs/serde/issues/1937#issuecomment-812137971>
mod arrays {
use alloc::{boxed::Box, fmt, vec::Vec};
use core::{convert::TryInto, marker::PhantomData};
use serde::{
de::{SeqAccess, Visitor},
Deserialize, Deserializer,
};
struct ArrayVisitor<T, const N: usize>(PhantomData<T>);
impl<'de, T, const N: usize> Visitor<'de> for ArrayVisitor<T, N>
where
T: Deserialize<'de>,
{
type Value = Box<[T; N]>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str(&format!("an array of length {N}"))
}
#[inline]
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
// can be optimized using MaybeUninit
let mut data = Vec::with_capacity(N);
for _ in 0..N {
match (seq.next_element())? {
Some(val) => data.push(val),
None => return Err(serde::de::Error::invalid_length(N, &self)),
}
}
match data.try_into() {
Ok(arr) => Ok(arr),
Err(_) => unreachable!(),
}
}
}
pub fn deserialize<'de, D, T, const N: usize>(deserializer: D) -> Result<Box<[T; N]>, D::Error>
where
D: Deserializer<'de>,
T: Deserialize<'de>,
{
deserializer.deserialize_tuple(N, ArrayVisitor::<T, N>(PhantomData))
}
}
/// Private part of the unsafe marker, making sure this cannot be initialized directly.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
@ -815,6 +869,213 @@ impl<'a, T> From<&'a mut &'a mut [T]> for OwnedMutSlice<'a, T> {
}
}
/// Wrap a mutable slice and convert to a Box on serialize.
/// We use a hidden inner enum so the public API can be safe,
/// unless the user uses the unsafe [`OwnedMutSizedSlice::from_raw_mut`].
/// The variable length version is [`OwnedMutSlice`].
#[derive(Debug)]
pub enum OwnedMutSizedSliceInner<'a, T: 'a + Sized, const N: usize> {
/// A raw ptr to a memory location of length N
RefRaw(*mut [T; N], UnsafeMarker),
/// A ptr to a mutable slice of the type
Ref(&'a mut [T; N]),
/// An owned [`Box`] of the type
Owned(Box<[T; N]>),
}
impl<'a, T: 'a + Sized + Serialize, const N: usize> Serialize
for OwnedMutSizedSliceInner<'a, T, N>
{
fn serialize<S>(&self, se: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
OwnedMutSizedSliceInner::RefRaw(rr, _) => unsafe { &**rr }.serialize(se),
OwnedMutSizedSliceInner::Ref(r) => (*r).serialize(se),
OwnedMutSizedSliceInner::Owned(b) => (*b).serialize(se),
}
}
}
impl<'de, 'a, T: 'a + Sized, const N: usize> Deserialize<'de> for OwnedMutSizedSliceInner<'a, T, N>
where
T: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
arrays::deserialize(deserializer).map(OwnedMutSizedSliceInner::Owned)
}
}
/// Wrap a mutable slice of constant size N and convert to a Box on serialize
#[allow(clippy::unsafe_derive_deserialize)]
#[derive(Debug, Serialize, Deserialize)]
pub struct OwnedMutSizedSlice<'a, T: 'a + Sized, const N: usize> {
inner: OwnedMutSizedSliceInner<'a, T, N>,
}
impl<'it, T, const N: usize> IntoIterator for &'it mut OwnedMutSizedSlice<'_, T, N> {
type Item = <IterMut<'it, T> as Iterator>::Item;
type IntoIter = IterMut<'it, T>;
fn into_iter(self) -> Self::IntoIter {
self.as_sized_slice_mut().iter_mut()
}
}
impl<'it, T, const N: usize> IntoIterator for &'it OwnedMutSizedSlice<'_, T, N> {
type Item = <Iter<'it, T> as Iterator>::Item;
type IntoIter = Iter<'it, T>;
fn into_iter(self) -> Self::IntoIter {
self.as_sized_slice().iter()
}
}
impl<'a, T: 'a + Sized, const N: usize> OwnedMutSizedSlice<'a, T, N> {
/// Create a new [`OwnedMutSizedSlice`] from a raw pointer
///
/// # Safety
///
/// The pointer must be valid and point to a map of the size `size_of<T>() * N`
/// The content will be dereferenced in subsequent operations.
#[must_use]
pub unsafe fn from_raw_mut(ptr: NonNull<T>) -> OwnedMutSizedSlice<'a, T, N> {
Self {
inner: OwnedMutSizedSliceInner::RefRaw(
ptr.as_ptr() as *mut [T; N],
UnsafeMarker::new(),
),
}
}
/// Returns an iterator over the slice.
pub fn iter(&self) -> Iter<'_, T> {
<&Self as IntoIterator>::into_iter(self)
}
/// Returns a mutable iterator over the slice.
pub fn iter_mut(&mut self) -> IterMut<'_, T> {
<&mut Self as IntoIterator>::into_iter(self)
}
}
impl<T: Sized, const N: usize> Deref for OwnedMutSizedSlice<'_, T, N> {
type Target = [T; N];
fn deref(&self) -> &Self::Target {
match &self.inner {
OwnedMutSizedSliceInner::RefRaw(rr, _) => unsafe { &**rr },
OwnedMutSizedSliceInner::Ref(r) => r,
OwnedMutSizedSliceInner::Owned(v) => v,
}
}
}
impl<T: Sized, const N: usize> DerefMut for OwnedMutSizedSlice<'_, T, N> {
fn deref_mut(&mut self) -> &mut [T; N] {
match &mut self.inner {
OwnedMutSizedSliceInner::RefRaw(rr, _) => unsafe { &mut **rr },
OwnedMutSizedSliceInner::Ref(r) => r,
OwnedMutSizedSliceInner::Owned(v) => v,
}
}
}
impl<T, const N: usize> IntoOwned for OwnedMutSizedSlice<'_, T, N>
where
T: Sized + Clone,
{
#[must_use]
fn is_owned(&self) -> bool {
match self.inner {
OwnedMutSizedSliceInner::RefRaw(..) | OwnedMutSizedSliceInner::Ref(_) => false,
OwnedMutSizedSliceInner::Owned(_) => true,
}
}
#[must_use]
fn into_owned(self) -> Self {
let slice: Box<[T; N]> = match self.inner {
OwnedMutSizedSliceInner::RefRaw(rr, _) => unsafe { Box::from((*rr).clone()) },
OwnedMutSizedSliceInner::Ref(r) => Box::from(r.clone()),
OwnedMutSizedSliceInner::Owned(v) => v,
};
Self {
inner: OwnedMutSizedSliceInner::Owned(slice),
}
}
}
impl<'a, T: 'a + Clone, const N: usize> Clone for OwnedMutSizedSlice<'a, T, N> {
fn clone(&self) -> Self {
let slice: Box<[T; N]> = match &self.inner {
OwnedMutSizedSliceInner::RefRaw(rr, _) => unsafe { Box::from((**rr).clone()) },
OwnedMutSizedSliceInner::Ref(r) => Box::from((*r).clone()),
OwnedMutSizedSliceInner::Owned(v) => v.clone(),
};
Self {
inner: OwnedMutSizedSliceInner::Owned(slice),
}
}
}
/// Create a new [`OwnedMutSizedSlice`] from a sized slice
impl<T, const N: usize> From<Box<[T; N]>> for OwnedMutSizedSlice<'_, T, N> {
fn from(s: Box<[T; N]>) -> Self {
Self {
inner: OwnedMutSizedSliceInner::Owned(s),
}
}
}
/// Create a Boxed slice from an [`OwnedMutSizedSlice`], or return the owned boxed sized slice.
impl<'a, T, const N: usize> From<OwnedMutSizedSlice<'a, T, N>> for Box<[T; N]>
where
T: Clone,
{
fn from(slice: OwnedMutSizedSlice<'a, T, N>) -> Self {
let slice = slice.into_owned();
match slice.inner {
OwnedMutSizedSliceInner::Owned(b) => b,
_ => panic!("Could not own slice!"),
}
}
}
/// Create a new [`OwnedMutSizedSlice`] from a reference to a boxed sized slice
#[allow(clippy::mut_mut)] // This makes use in some iterators easier
impl<'a, T, const N: usize> From<&'a mut Box<[T; N]>> for OwnedMutSizedSlice<'a, T, N> {
fn from(r: &'a mut Box<[T; N]>) -> Self {
Self {
inner: OwnedMutSizedSliceInner::Ref((*r).as_mut()),
}
}
}
/// Create a new [`OwnedMutSizedSlice`] from a reference to ref to a slice
impl<'a, T, const N: usize> From<&'a mut [T; N]> for OwnedMutSizedSlice<'a, T, N> {
fn from(r: &'a mut [T; N]) -> Self {
Self {
inner: OwnedMutSizedSliceInner::Ref(r),
}
}
}
/// Create a new [`OwnedMutSizedSlice`] from a reference to ref to a slice
#[allow(clippy::mut_mut)] // This makes use in some iterators easier
impl<'a, T, const N: usize> From<&'a mut &'a mut [T; N]> for OwnedMutSizedSlice<'a, T, N> {
fn from(r: &'a mut &'a mut [T; N]) -> Self {
Self {
inner: OwnedMutSizedSliceInner::Ref(r),
}
}
}
/// Wrap a C-style pointer and convert to a Box on serialize
#[derive(Clone, Debug)]
pub enum OwnedPtr<T: Sized> {

View File

@ -1,802 +0,0 @@
use std::{cell::UnsafeCell, cmp::max, fmt::Debug, ptr, ptr::addr_of};
use hashbrown::{hash_map::Entry, HashMap};
use libafl::{inputs::UsesInput, observers::VariableLengthMapObserver, HasMetadata};
use libafl_bolts::Error;
use libafl_qemu_sys::GuestAddr;
#[cfg(feature = "systemmode")]
use libafl_qemu_sys::GuestPhysAddr;
use libafl_targets::EDGES_MAP;
use serde::{Deserialize, Serialize};
use crate::{
emu::EmulatorModules,
modules::{
hash_me, AddressFilter, EmulatorModule, EmulatorModuleTuple, PageFilter, StdAddressFilter,
StdPageFilter,
},
qemu::Hook,
};
#[no_mangle]
static mut LIBAFL_QEMU_EDGES_MAP_PTR: *mut u8 = ptr::null_mut();
#[no_mangle]
static mut LIBAFL_QEMU_EDGES_MAP_SIZE_PTR: *mut usize = ptr::null_mut();
#[no_mangle]
static mut LIBAFL_QEMU_EDGES_MAP_ALLOCATED_SIZE: usize = 0;
#[no_mangle]
static mut LIBAFL_QEMU_EDGES_MAP_MASK_MAX: usize = 0;
#[cfg_attr(
any(not(feature = "serdeany_autoreg"), miri),
allow(clippy::unsafe_derive_deserialize)
)] // for SerdeAny
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct QemuEdgesMapMetadata {
pub map: HashMap<(GuestAddr, GuestAddr), u64>,
pub current_id: u64,
}
libafl_bolts::impl_serdeany!(QemuEdgesMapMetadata);
impl QemuEdgesMapMetadata {
#[must_use]
pub fn new() -> Self {
Self {
map: HashMap::new(),
current_id: 0,
}
}
}
/// Standard edge coverage module, adapted to most use cases
pub type StdEdgeCoverageModule = StdEdgeCoverageFullModule;
/// Standard edge coverage module builder, adapted to most use cases
pub type StdEdgeCoverageModuleBuilder = StdEdgeCoverageFullModuleBuilder;
pub type CollidingEdgeCoverageModule<AF, PF> = EdgeCoverageModule<AF, PF, EdgeCoverageChildVariant>;
pub trait EdgeCoverageVariant<AF, PF>: 'static + Debug {
const DO_SIDE_EFFECTS: bool = true;
fn jit_hitcount<ET, S>(&mut self, _emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
panic!("JIT hitcount is not supported.")
}
fn jit_no_hitcount<ET, S>(&mut self, _emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
panic!("JIT no hitcount is not supported.")
}
fn fn_hitcount<ET, S>(&mut self, _emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
panic!("Func hitcount is not supported.")
}
fn fn_no_hitcount<ET, S>(&mut self, _emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
panic!("Func no hitcount is not supported.")
}
}
#[derive(Debug)]
pub struct EdgeCoverageFullVariant;
pub type StdEdgeCoverageFullModule =
EdgeCoverageModule<StdAddressFilter, StdPageFilter, EdgeCoverageFullVariant>;
pub type StdEdgeCoverageFullModuleBuilder =
EdgeCoverageModuleBuilder<StdAddressFilter, StdPageFilter, EdgeCoverageFullVariant, false>;
impl<AF, PF> EdgeCoverageVariant<AF, PF> for EdgeCoverageFullVariant {
fn jit_hitcount<ET, S>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
let hook_id = emulator_modules.edges(
Hook::Function(gen_unique_edge_ids::<AF, ET, PF, S, Self>),
Hook::Empty,
);
unsafe {
libafl_qemu_sys::libafl_qemu_edge_hook_set_jit(
hook_id.0,
Some(libafl_qemu_sys::libafl_jit_trace_edge_hitcount),
);
}
}
fn jit_no_hitcount<ET, S>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
let hook_id = emulator_modules.edges(
Hook::Function(gen_unique_edge_ids::<AF, ET, PF, S, Self>),
Hook::Empty,
);
unsafe {
libafl_qemu_sys::libafl_qemu_edge_hook_set_jit(
hook_id.0,
Some(libafl_qemu_sys::libafl_jit_trace_edge_single),
);
}
}
fn fn_hitcount<ET, S>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
emulator_modules.edges(
Hook::Function(gen_unique_edge_ids::<AF, ET, PF, S, Self>),
Hook::Raw(trace_edge_hitcount),
);
}
fn fn_no_hitcount<ET, S>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
emulator_modules.edges(
Hook::Function(gen_unique_edge_ids::<AF, ET, PF, S, Self>),
Hook::Raw(trace_edge_single),
);
}
}
impl Default for StdEdgeCoverageFullModuleBuilder {
fn default() -> Self {
Self {
variant: EdgeCoverageFullVariant,
address_filter: StdAddressFilter::default(),
page_filter: StdPageFilter::default(),
use_hitcounts: true,
use_jit: true,
}
}
}
impl StdEdgeCoverageFullModule {
#[must_use]
pub fn builder() -> StdEdgeCoverageFullModuleBuilder {
EdgeCoverageModuleBuilder::default()
}
}
#[derive(Debug)]
pub struct EdgeCoverageClassicVariant;
pub type StdEdgeCoverageClassicModule =
EdgeCoverageModule<StdAddressFilter, StdPageFilter, EdgeCoverageClassicVariant>;
pub type StdEdgeCoverageClassicModuleBuilder =
EdgeCoverageModuleBuilder<StdAddressFilter, StdPageFilter, EdgeCoverageClassicVariant, false>;
impl<AF, PF> EdgeCoverageVariant<AF, PF> for EdgeCoverageClassicVariant {
const DO_SIDE_EFFECTS: bool = false;
fn jit_hitcount<ET, S>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
let hook_id = emulator_modules.blocks(
Hook::Function(gen_hashed_block_ids::<AF, ET, PF, S, Self>),
Hook::Empty,
Hook::Empty,
);
unsafe {
libafl_qemu_sys::libafl_qemu_block_hook_set_jit(
hook_id.0,
Some(libafl_qemu_sys::libafl_jit_trace_block_hitcount),
);
}
}
fn jit_no_hitcount<ET, S>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
let hook_id = emulator_modules.blocks(
Hook::Function(gen_hashed_block_ids::<AF, ET, PF, S, Self>),
Hook::Empty,
Hook::Empty,
);
unsafe {
libafl_qemu_sys::libafl_qemu_block_hook_set_jit(
hook_id.0,
Some(libafl_qemu_sys::libafl_jit_trace_block_single),
);
}
}
fn fn_hitcount<ET, S>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
emulator_modules.blocks(
Hook::Function(gen_hashed_block_ids::<AF, ET, PF, S, Self>),
Hook::Empty,
Hook::Raw(trace_block_transition_hitcount),
);
}
fn fn_no_hitcount<ET, S>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
emulator_modules.blocks(
Hook::Function(gen_hashed_block_ids::<AF, ET, PF, S, Self>),
Hook::Empty,
Hook::Raw(trace_block_transition_single),
);
}
}
impl Default for StdEdgeCoverageClassicModuleBuilder {
fn default() -> Self {
Self {
variant: EdgeCoverageClassicVariant,
address_filter: StdAddressFilter::default(),
page_filter: StdPageFilter::default(),
use_hitcounts: true,
use_jit: true,
}
}
}
impl StdEdgeCoverageClassicModule {
#[must_use]
pub fn builder() -> StdEdgeCoverageClassicModuleBuilder {
EdgeCoverageModuleBuilder::default()
}
}
#[derive(Debug)]
pub struct EdgeCoverageChildVariant;
pub type StdEdgeCoverageChildModule =
EdgeCoverageModule<StdAddressFilter, StdPageFilter, EdgeCoverageChildVariant>;
pub type StdEdgeCoverageChildModuleBuilder =
EdgeCoverageModuleBuilder<StdAddressFilter, StdPageFilter, EdgeCoverageChildVariant, false>;
impl<AF, PF> EdgeCoverageVariant<AF, PF> for EdgeCoverageChildVariant {
const DO_SIDE_EFFECTS: bool = false;
fn fn_hitcount<ET, S>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
emulator_modules.edges(
Hook::Function(gen_hashed_edge_ids::<AF, ET, PF, S, Self>),
Hook::Raw(trace_edge_hitcount_ptr),
);
}
fn fn_no_hitcount<ET, S>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
emulator_modules.edges(
Hook::Function(gen_hashed_edge_ids::<AF, ET, PF, S, Self>),
Hook::Raw(trace_edge_single_ptr),
);
}
}
impl Default for StdEdgeCoverageChildModuleBuilder {
fn default() -> Self {
Self {
variant: EdgeCoverageChildVariant,
address_filter: StdAddressFilter::default(),
page_filter: StdPageFilter::default(),
use_hitcounts: true,
use_jit: true,
}
}
}
impl StdEdgeCoverageChildModule {
#[must_use]
pub fn builder() -> StdEdgeCoverageChildModuleBuilder {
EdgeCoverageModuleBuilder::default().jit(false)
}
}
#[derive(Debug)]
pub struct EdgeCoverageModuleBuilder<AF, PF, V, const IS_INITIALIZED: bool> {
variant: V,
address_filter: AF,
page_filter: PF,
use_hitcounts: bool,
use_jit: bool,
}
#[derive(Debug)]
pub struct EdgeCoverageModule<AF, PF, V> {
variant: V,
address_filter: AF,
// we only use it in system mode at the moment.
#[cfg_attr(not(feature = "systemmode"), allow(dead_code))]
page_filter: PF,
use_hitcounts: bool,
use_jit: bool,
}
impl<AF, PF, V> EdgeCoverageModuleBuilder<AF, PF, V, true> {
pub fn build(self) -> Result<EdgeCoverageModule<AF, PF, V>, Error> {
Ok(EdgeCoverageModule::new(
self.address_filter,
self.page_filter,
self.variant,
self.use_hitcounts,
self.use_jit,
))
}
}
impl<AF, PF, V, const IS_INITIALIZED: bool> EdgeCoverageModuleBuilder<AF, PF, V, IS_INITIALIZED> {
fn new(
variant: V,
address_filter: AF,
page_filter: PF,
use_hitcounts: bool,
use_jit: bool,
) -> Self {
Self {
variant,
address_filter,
page_filter,
use_hitcounts,
use_jit,
}
}
#[must_use]
pub fn map_observer<O>(self, map_observer: &mut O) -> EdgeCoverageModuleBuilder<AF, PF, V, true>
where
O: VariableLengthMapObserver,
{
let map_ptr = map_observer.map_slice_mut().as_mut_ptr() as *mut u8;
let map_max_size = map_observer.map_slice_mut().len();
let size_ptr = map_observer.as_mut().size_mut() as *mut usize;
unsafe {
LIBAFL_QEMU_EDGES_MAP_PTR = map_ptr;
LIBAFL_QEMU_EDGES_MAP_SIZE_PTR = size_ptr;
LIBAFL_QEMU_EDGES_MAP_ALLOCATED_SIZE = map_max_size;
LIBAFL_QEMU_EDGES_MAP_MASK_MAX = map_max_size - 1;
}
EdgeCoverageModuleBuilder::<AF, PF, V, true>::new(
self.variant,
self.address_filter,
self.page_filter,
self.use_hitcounts,
self.use_jit,
)
}
pub fn variant<V2>(self, variant: V2) -> EdgeCoverageModuleBuilder<AF, PF, V2, IS_INITIALIZED> {
EdgeCoverageModuleBuilder::new(
variant,
self.address_filter,
self.page_filter,
self.use_hitcounts,
self.use_jit,
)
}
pub fn address_filter<AF2>(
self,
address_filter: AF2,
) -> EdgeCoverageModuleBuilder<AF2, PF, V, IS_INITIALIZED> {
EdgeCoverageModuleBuilder::new(
self.variant,
address_filter,
self.page_filter,
self.use_hitcounts,
self.use_jit,
)
}
pub fn page_filter<PF2>(
self,
page_filter: PF2,
) -> EdgeCoverageModuleBuilder<AF, PF2, V, IS_INITIALIZED> {
EdgeCoverageModuleBuilder::new(
self.variant,
self.address_filter,
page_filter,
self.use_hitcounts,
self.use_jit,
)
}
#[must_use]
pub fn hitcounts(
self,
use_hitcounts: bool,
) -> EdgeCoverageModuleBuilder<AF, PF, V, IS_INITIALIZED> {
EdgeCoverageModuleBuilder::new(
self.variant,
self.address_filter,
self.page_filter,
use_hitcounts,
self.use_jit,
)
}
#[must_use]
pub fn jit(self, use_jit: bool) -> EdgeCoverageModuleBuilder<AF, PF, V, IS_INITIALIZED> {
EdgeCoverageModuleBuilder::new(
self.variant,
self.address_filter,
self.page_filter,
self.use_hitcounts,
use_jit,
)
}
}
impl<AF, PF, V> EdgeCoverageModule<AF, PF, V> {
#[must_use]
pub fn new(
address_filter: AF,
page_filter: PF,
variant: V,
use_hitcounts: bool,
use_jit: bool,
) -> Self {
Self {
variant,
address_filter,
page_filter,
use_hitcounts,
use_jit,
}
}
}
impl<AF, PF, V> EdgeCoverageModule<AF, PF, V>
where
AF: AddressFilter,
PF: PageFilter,
{
#[cfg(feature = "usermode")]
#[must_use]
pub fn must_instrument(&self, addr: GuestAddr) -> bool {
self.address_filter.allowed(&addr)
}
#[cfg(feature = "systemmode")]
#[must_use]
pub fn must_instrument(&self, addr: GuestAddr, page_id: Option<GuestPhysAddr>) -> bool {
if let Some(page_id) = page_id {
self.address_filter.allowed(&addr) && self.page_filter.allowed(&page_id)
} else {
self.address_filter.allowed(&addr)
}
}
}
impl<S, AF, PF, V> EmulatorModule<S> for EdgeCoverageModule<AF, PF, V>
where
AF: AddressFilter + 'static,
PF: PageFilter + 'static,
S: Unpin + UsesInput + HasMetadata,
V: EdgeCoverageVariant<AF, PF> + 'static,
{
const HOOKS_DO_SIDE_EFFECTS: bool = V::DO_SIDE_EFFECTS;
type ModuleAddressFilter = AF;
#[cfg(feature = "systemmode")]
type ModulePageFilter = PF;
fn first_exec<ET>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>, _state: &mut S)
where
ET: EmulatorModuleTuple<S>,
{
if self.use_hitcounts {
if self.use_jit {
self.variant.jit_hitcount(emulator_modules);
} else {
self.variant.fn_hitcount(emulator_modules);
}
} else if self.use_jit {
self.variant.jit_no_hitcount(emulator_modules);
} else {
self.variant.fn_no_hitcount(emulator_modules);
}
}
fn address_filter(&self) -> &Self::ModuleAddressFilter {
&self.address_filter
}
fn address_filter_mut(&mut self) -> &mut Self::ModuleAddressFilter {
&mut self.address_filter
}
#[cfg(feature = "systemmode")]
fn page_filter(&self) -> &Self::ModulePageFilter {
&self.page_filter
}
#[cfg(feature = "systemmode")]
fn page_filter_mut(&mut self) -> &mut Self::ModulePageFilter {
&mut self.page_filter
}
}
thread_local!(static PREV_LOC : UnsafeCell<u64> = const { UnsafeCell::new(0) });
pub fn gen_unique_edge_ids<AF, ET, PF, S, V>(
emulator_modules: &mut EmulatorModules<ET, S>,
state: Option<&mut S>,
src: GuestAddr,
dest: GuestAddr,
) -> Option<u64>
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
V: EdgeCoverageVariant<AF, PF>,
{
if let Some(module) = emulator_modules.get::<EdgeCoverageModule<AF, PF, V>>() {
unsafe {
assert!(LIBAFL_QEMU_EDGES_MAP_MASK_MAX > 0);
assert_ne!(*addr_of!(LIBAFL_QEMU_EDGES_MAP_SIZE_PTR), ptr::null_mut());
}
#[cfg(feature = "usermode")]
{
if !module.must_instrument(src) && !module.must_instrument(dest) {
return None;
}
}
#[cfg(feature = "systemmode")]
{
let paging_id = emulator_modules
.qemu()
.current_cpu()
.and_then(|cpu| cpu.current_paging_id());
if !module.must_instrument(src, paging_id) && !module.must_instrument(dest, paging_id) {
return None;
}
}
}
let state = state.expect("The gen_unique_edge_ids hook works only for in-process fuzzing");
let meta = state.metadata_or_insert_with(QemuEdgesMapMetadata::new);
match meta.map.entry((src, dest)) {
Entry::Occupied(e) => {
let id = *e.get();
unsafe {
let nxt = (id as usize + 1) & LIBAFL_QEMU_EDGES_MAP_MASK_MAX;
*LIBAFL_QEMU_EDGES_MAP_SIZE_PTR = max(*LIBAFL_QEMU_EDGES_MAP_SIZE_PTR, nxt);
}
Some(id)
}
Entry::Vacant(e) => {
let id = meta.current_id;
e.insert(id);
unsafe {
meta.current_id = (id + 1) & (LIBAFL_QEMU_EDGES_MAP_MASK_MAX as u64);
*LIBAFL_QEMU_EDGES_MAP_SIZE_PTR = meta.current_id as usize;
}
// GuestAddress is u32 for 32 bit guests
#[allow(clippy::unnecessary_cast)]
Some(id as u64)
}
}
}
/// # Safety
///
/// Calling this concurrently for the same id is racey and may lose updates.
pub unsafe extern "C" fn trace_edge_hitcount(_: *const (), id: u64) {
unsafe {
EDGES_MAP[id as usize] = EDGES_MAP[id as usize].wrapping_add(1);
}
}
pub extern "C" fn trace_edge_single(_: *const (), id: u64) {
// # Safety
// Worst case we set the byte to 1 multiple times..
unsafe {
EDGES_MAP[id as usize] = 1;
}
}
#[allow(clippy::unnecessary_cast)]
pub fn gen_hashed_edge_ids<AF, ET, PF, S, V>(
emulator_modules: &mut EmulatorModules<ET, S>,
_state: Option<&mut S>,
src: GuestAddr,
dest: GuestAddr,
) -> Option<u64>
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
V: EdgeCoverageVariant<AF, PF>,
{
if let Some(module) = emulator_modules.get::<EdgeCoverageModule<AF, PF, V>>() {
#[cfg(feature = "usermode")]
if !module.must_instrument(src) && !module.must_instrument(dest) {
return None;
}
#[cfg(feature = "systemmode")]
{
let paging_id = emulator_modules
.qemu()
.current_cpu()
.and_then(|cpu| cpu.current_paging_id());
if !module.must_instrument(src, paging_id) && !module.must_instrument(dest, paging_id) {
return None;
}
}
let id = hash_me(src as u64) ^ hash_me(dest as u64);
unsafe {
let nxt = (id as usize + 1) & LIBAFL_QEMU_EDGES_MAP_MASK_MAX;
*LIBAFL_QEMU_EDGES_MAP_SIZE_PTR = nxt;
}
// GuestAddress is u32 for 32 bit guests
#[allow(clippy::unnecessary_cast)]
Some(id)
} else {
None
}
}
/// # Safety
/// Increases id at `EDGES_MAP_PTR` - potentially racey if called concurrently.
pub unsafe extern "C" fn trace_edge_hitcount_ptr(_: *const (), id: u64) {
unsafe {
let ptr = LIBAFL_QEMU_EDGES_MAP_PTR.add(id as usize);
*ptr = (*ptr).wrapping_add(1);
}
}
/// # Safety
/// Fine.
/// Worst case we set the byte to 1 multiple times.
pub unsafe extern "C" fn trace_edge_single_ptr(_: *const (), id: u64) {
unsafe {
let ptr = LIBAFL_QEMU_EDGES_MAP_PTR.add(id as usize);
*ptr = 1;
}
}
#[allow(clippy::unnecessary_cast)]
pub fn gen_hashed_block_ids<AF, ET, PF, S, V>(
emulator_modules: &mut EmulatorModules<ET, S>,
_state: Option<&mut S>,
pc: GuestAddr,
) -> Option<u64>
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
V: EdgeCoverageVariant<AF, PF>,
{
// first check if we should filter
if let Some(module) = emulator_modules.get::<EdgeCoverageModule<AF, PF, V>>() {
#[cfg(feature = "usermode")]
{
if !module.must_instrument(pc) {
return None;
}
}
#[cfg(feature = "systemmode")]
{
let page_id = emulator_modules
.qemu()
.current_cpu()
.and_then(|cpu| cpu.current_paging_id());
if !module.must_instrument(pc, page_id) {
return None;
}
}
}
let id = hash_me(pc as u64);
unsafe {
let nxt = (id as usize + 1) & LIBAFL_QEMU_EDGES_MAP_MASK_MAX;
*LIBAFL_QEMU_EDGES_MAP_SIZE_PTR = nxt;
}
// GuestAddress is u32 for 32 bit guests
#[allow(clippy::unnecessary_cast)]
Some(id)
}
/// # Safety
/// Dereferences the global `PREV_LOC` variable. May not be called concurrently.
pub unsafe extern "C" fn trace_block_transition_hitcount(_: *const (), id: u64) {
unsafe {
PREV_LOC.with(|prev_loc| {
let x = ((*prev_loc.get() ^ id) as usize) & LIBAFL_QEMU_EDGES_MAP_MASK_MAX;
let entry = LIBAFL_QEMU_EDGES_MAP_PTR.add(x);
*entry = (*entry).wrapping_add(1);
*prev_loc.get() = id.overflowing_shr(1).0;
});
}
}
/// # Safety
/// Dereferences the global `PREV_LOC` variable. May not be called concurrently.
pub unsafe extern "C" fn trace_block_transition_single(_: *const (), id: u64) {
unsafe {
PREV_LOC.with(|prev_loc| {
let x = ((*prev_loc.get() ^ id) as usize) & LIBAFL_QEMU_EDGES_MAP_MASK_MAX;
let entry = LIBAFL_QEMU_EDGES_MAP_PTR.add(x);
*entry = 1;
*prev_loc.get() = id.overflowing_shr(1).0;
});
}
}

View File

@ -0,0 +1,77 @@
use libafl::{inputs::UsesInput, HasMetadata};
use super::{
helpers::{gen_hashed_edge_ids, trace_edge_hitcount_ptr, trace_edge_single_ptr},
EdgeCoverageVariant,
};
use crate::{
modules::{
AddressFilter, EdgeCoverageModule, EdgeCoverageModuleBuilder, EmulatorModuleTuple,
PageFilter, StdAddressFilter, StdPageFilter,
},
EmulatorModules, Hook,
};
#[derive(Debug)]
pub struct EdgeCoverageChildVariant;
pub type StdEdgeCoverageChildModule =
EdgeCoverageModule<StdAddressFilter, StdPageFilter, EdgeCoverageChildVariant, false, 0>;
pub type StdEdgeCoverageChildModuleBuilder = EdgeCoverageModuleBuilder<
StdAddressFilter,
StdPageFilter,
EdgeCoverageChildVariant,
false,
false,
0,
>;
impl<AF, PF, const IS_CONST_MAP: bool, const MAP_SIZE: usize>
EdgeCoverageVariant<AF, PF, IS_CONST_MAP, MAP_SIZE> for EdgeCoverageChildVariant
{
const DO_SIDE_EFFECTS: bool = false;
fn fn_hitcount<ET, S>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
emulator_modules.edges(
Hook::Function(gen_hashed_edge_ids::<AF, ET, PF, S, Self, IS_CONST_MAP, MAP_SIZE>),
Hook::Raw(trace_edge_hitcount_ptr),
);
}
fn fn_no_hitcount<ET, S>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
emulator_modules.edges(
Hook::Function(gen_hashed_edge_ids::<AF, ET, PF, S, Self, IS_CONST_MAP, MAP_SIZE>),
Hook::Raw(trace_edge_single_ptr),
);
}
}
impl Default for StdEdgeCoverageChildModuleBuilder {
fn default() -> Self {
Self {
variant: EdgeCoverageChildVariant,
address_filter: StdAddressFilter::default(),
page_filter: StdPageFilter::default(),
use_hitcounts: true,
use_jit: true,
}
}
}
impl StdEdgeCoverageChildModule {
#[must_use]
pub fn builder() -> StdEdgeCoverageChildModuleBuilder {
EdgeCoverageModuleBuilder::default().jit(false)
}
}

View File

@ -0,0 +1,124 @@
use libafl::{inputs::UsesInput, HasMetadata};
use super::{
helpers::{
gen_hashed_block_ids, trace_block_transition_hitcount, trace_block_transition_single,
},
EdgeCoverageVariant,
};
use crate::{
modules::{
AddressFilter, EdgeCoverageModule, EdgeCoverageModuleBuilder, EmulatorModuleTuple,
PageFilter, StdAddressFilter, StdPageFilter,
},
EmulatorModules, Hook,
};
#[derive(Debug)]
pub struct EdgeCoverageClassicVariant;
pub type StdEdgeCoverageClassicModule =
EdgeCoverageModule<StdAddressFilter, StdPageFilter, EdgeCoverageClassicVariant, false, 0>;
pub type StdEdgeCoverageClassicModuleBuilder = EdgeCoverageModuleBuilder<
StdAddressFilter,
StdPageFilter,
EdgeCoverageClassicVariant,
false,
false,
0,
>;
impl<AF, PF, const IS_CONST_MAP: bool, const MAP_SIZE: usize>
EdgeCoverageVariant<AF, PF, IS_CONST_MAP, MAP_SIZE> for EdgeCoverageClassicVariant
{
const DO_SIDE_EFFECTS: bool = false;
fn jit_hitcount<ET, S>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
let hook_id = emulator_modules.blocks(
Hook::Function(gen_hashed_block_ids::<AF, ET, PF, S, Self, IS_CONST_MAP, MAP_SIZE>),
Hook::Empty,
Hook::Empty,
);
unsafe {
libafl_qemu_sys::libafl_qemu_block_hook_set_jit(
hook_id.0,
Some(libafl_qemu_sys::libafl_jit_trace_block_hitcount),
);
}
}
fn jit_no_hitcount<ET, S>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
let hook_id = emulator_modules.blocks(
Hook::Function(gen_hashed_block_ids::<AF, ET, PF, S, Self, IS_CONST_MAP, MAP_SIZE>),
Hook::Empty,
Hook::Empty,
);
unsafe {
libafl_qemu_sys::libafl_qemu_block_hook_set_jit(
hook_id.0,
Some(libafl_qemu_sys::libafl_jit_trace_block_single),
);
}
}
fn fn_hitcount<ET, S>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
emulator_modules.blocks(
Hook::Function(gen_hashed_block_ids::<AF, ET, PF, S, Self, IS_CONST_MAP, MAP_SIZE>),
Hook::Empty,
Hook::Raw(trace_block_transition_hitcount),
);
}
fn fn_no_hitcount<ET, S>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
emulator_modules.blocks(
Hook::Function(gen_hashed_block_ids::<AF, ET, PF, S, Self, IS_CONST_MAP, MAP_SIZE>),
Hook::Empty,
Hook::Raw(trace_block_transition_single),
);
}
}
impl Default for StdEdgeCoverageClassicModuleBuilder {
fn default() -> Self {
Self {
variant: EdgeCoverageClassicVariant,
address_filter: StdAddressFilter::default(),
page_filter: StdPageFilter::default(),
use_hitcounts: true,
use_jit: true,
}
}
}
impl StdEdgeCoverageClassicModule {
#[must_use]
pub fn builder() -> StdEdgeCoverageClassicModuleBuilder {
EdgeCoverageModuleBuilder::default()
}
}

View File

@ -0,0 +1,114 @@
use libafl::{inputs::UsesInput, HasMetadata};
use super::{
helpers::{gen_unique_edge_ids, trace_edge_hitcount, trace_edge_single},
EdgeCoverageVariant,
};
use crate::{
modules::{
AddressFilter, EdgeCoverageModule, EdgeCoverageModuleBuilder, EmulatorModuleTuple,
PageFilter, StdAddressFilter, StdPageFilter,
},
EmulatorModules, Hook,
};
#[derive(Debug)]
pub struct EdgeCoverageFullVariant;
pub type StdEdgeCoverageFullModule =
EdgeCoverageModule<StdAddressFilter, StdPageFilter, EdgeCoverageFullVariant, false, 0>;
pub type StdEdgeCoverageFullModuleBuilder = EdgeCoverageModuleBuilder<
StdAddressFilter,
StdPageFilter,
EdgeCoverageFullVariant,
false,
false,
0,
>;
impl<AF, PF, const IS_CONST_MAP: bool, const MAP_SIZE: usize>
EdgeCoverageVariant<AF, PF, IS_CONST_MAP, MAP_SIZE> for EdgeCoverageFullVariant
{
fn jit_hitcount<ET, S>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
let hook_id = emulator_modules.edges(
Hook::Function(gen_unique_edge_ids::<AF, ET, PF, S, Self, IS_CONST_MAP, MAP_SIZE>),
Hook::Empty,
);
unsafe {
libafl_qemu_sys::libafl_qemu_edge_hook_set_jit(
hook_id.0,
Some(libafl_qemu_sys::libafl_jit_trace_edge_hitcount),
);
}
}
fn jit_no_hitcount<ET, S>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
let hook_id = emulator_modules.edges(
Hook::Function(gen_unique_edge_ids::<AF, ET, PF, S, Self, IS_CONST_MAP, MAP_SIZE>),
Hook::Empty,
);
unsafe {
libafl_qemu_sys::libafl_qemu_edge_hook_set_jit(
hook_id.0,
Some(libafl_qemu_sys::libafl_jit_trace_edge_single),
);
}
}
fn fn_hitcount<ET, S>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
emulator_modules.edges(
Hook::Function(gen_unique_edge_ids::<AF, ET, PF, S, Self, IS_CONST_MAP, MAP_SIZE>),
Hook::Raw(trace_edge_hitcount),
);
}
fn fn_no_hitcount<ET, S>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
emulator_modules.edges(
Hook::Function(gen_unique_edge_ids::<AF, ET, PF, S, Self, IS_CONST_MAP, MAP_SIZE>),
Hook::Raw(trace_edge_single),
);
}
}
impl Default for StdEdgeCoverageFullModuleBuilder {
fn default() -> Self {
Self {
variant: EdgeCoverageFullVariant,
address_filter: StdAddressFilter::default(),
page_filter: StdPageFilter::default(),
use_hitcounts: true,
use_jit: true,
}
}
}
impl StdEdgeCoverageFullModule {
#[must_use]
pub fn builder() -> StdEdgeCoverageFullModuleBuilder {
EdgeCoverageModuleBuilder::default()
}
}

View File

@ -0,0 +1,345 @@
use std::ptr;
/// Generators, responsible for generating block/edge ids
pub use generators::{gen_hashed_block_ids, gen_hashed_edge_ids, gen_unique_edge_ids};
use hashbrown::HashMap;
use libafl_qemu_sys::GuestAddr;
use serde::{Deserialize, Serialize};
/// Tracers, responsible for propagating an ID in a map.
pub use tracers::{
trace_block_transition_hitcount, trace_block_transition_single, trace_edge_hitcount,
trace_edge_hitcount_ptr, trace_edge_single, trace_edge_single_ptr,
};
// Constants used for variable-length maps
#[no_mangle]
pub(super) static mut LIBAFL_QEMU_EDGES_MAP_PTR: *mut u8 = ptr::null_mut();
#[no_mangle]
pub(super) static mut LIBAFL_QEMU_EDGES_MAP_SIZE_PTR: *mut usize = ptr::null_mut();
#[no_mangle]
pub(super) static mut LIBAFL_QEMU_EDGES_MAP_ALLOCATED_SIZE: usize = 0;
#[no_mangle]
pub(super) static mut LIBAFL_QEMU_EDGES_MAP_MASK_MAX: usize = 0;
#[cfg_attr(
any(not(feature = "serdeany_autoreg"), miri),
allow(clippy::unsafe_derive_deserialize)
)] // for SerdeAny
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct QemuEdgesMapMetadata {
pub map: HashMap<(GuestAddr, GuestAddr), u64>,
pub current_id: u64,
}
libafl_bolts::impl_serdeany!(QemuEdgesMapMetadata);
impl QemuEdgesMapMetadata {
#[must_use]
pub fn new() -> Self {
Self {
map: HashMap::new(),
current_id: 0,
}
}
}
mod generators {
use std::{cmp::max, ptr, ptr::addr_of};
use hashbrown::hash_map::Entry;
use libafl::{inputs::UsesInput, HasMetadata};
use libafl_qemu_sys::GuestAddr;
use super::{
super::EdgeCoverageVariant, QemuEdgesMapMetadata, LIBAFL_QEMU_EDGES_MAP_MASK_MAX,
LIBAFL_QEMU_EDGES_MAP_SIZE_PTR,
};
use crate::{
modules::{hash_me, AddressFilter, EdgeCoverageModule, EmulatorModuleTuple, PageFilter},
EmulatorModules,
};
fn get_mask<const IS_CONST_MAP: bool, const MAP_SIZE: usize>() -> usize {
if IS_CONST_MAP {
const {
assert!(
!IS_CONST_MAP || MAP_SIZE > 0,
"The size of a const map should be bigger than 0."
);
MAP_SIZE.overflowing_sub(1).0
}
} else {
unsafe { LIBAFL_QEMU_EDGES_MAP_MASK_MAX }
}
}
pub fn gen_unique_edge_ids<AF, ET, PF, S, V, const IS_CONST_MAP: bool, const MAP_SIZE: usize>(
emulator_modules: &mut EmulatorModules<ET, S>,
state: Option<&mut S>,
src: GuestAddr,
dest: GuestAddr,
) -> Option<u64>
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
V: EdgeCoverageVariant<AF, PF, IS_CONST_MAP, MAP_SIZE>,
{
if let Some(module) =
emulator_modules.get::<EdgeCoverageModule<AF, PF, V, IS_CONST_MAP, MAP_SIZE>>()
{
unsafe {
assert!(LIBAFL_QEMU_EDGES_MAP_MASK_MAX > 0);
assert_ne!(*addr_of!(LIBAFL_QEMU_EDGES_MAP_SIZE_PTR), ptr::null_mut());
}
#[cfg(feature = "usermode")]
{
if !module.must_instrument(src) && !module.must_instrument(dest) {
return None;
}
}
#[cfg(feature = "systemmode")]
{
let paging_id = emulator_modules
.qemu()
.current_cpu()
.and_then(|cpu| cpu.current_paging_id());
if !module.must_instrument(src, paging_id)
&& !module.must_instrument(dest, paging_id)
{
return None;
}
}
}
let mask: usize = get_mask::<IS_CONST_MAP, MAP_SIZE>();
let state = state.expect("The gen_unique_edge_ids hook works only for in-process fuzzing");
let meta = state.metadata_or_insert_with(QemuEdgesMapMetadata::new);
match meta.map.entry((src, dest)) {
Entry::Occupied(e) => {
let id = *e.get();
unsafe {
let nxt = (id as usize + 1) & mask;
if !IS_CONST_MAP {
*LIBAFL_QEMU_EDGES_MAP_SIZE_PTR = max(*LIBAFL_QEMU_EDGES_MAP_SIZE_PTR, nxt);
}
}
Some(id)
}
Entry::Vacant(e) => {
let id = meta.current_id;
e.insert(id);
unsafe {
meta.current_id = (id + 1) & (mask as u64);
if !IS_CONST_MAP {
*LIBAFL_QEMU_EDGES_MAP_SIZE_PTR = meta.current_id as usize;
}
}
// GuestAddress is u32 for 32 bit guests
#[allow(clippy::unnecessary_cast)]
Some(id as u64)
}
}
}
#[allow(clippy::unnecessary_cast)]
pub fn gen_hashed_edge_ids<AF, ET, PF, S, V, const IS_CONST_MAP: bool, const MAP_SIZE: usize>(
emulator_modules: &mut EmulatorModules<ET, S>,
_state: Option<&mut S>,
src: GuestAddr,
dest: GuestAddr,
) -> Option<u64>
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
V: EdgeCoverageVariant<AF, PF, IS_CONST_MAP, MAP_SIZE>,
{
if let Some(module) =
emulator_modules.get::<EdgeCoverageModule<AF, PF, V, IS_CONST_MAP, MAP_SIZE>>()
{
#[cfg(feature = "usermode")]
if !module.must_instrument(src) && !module.must_instrument(dest) {
return None;
}
#[cfg(feature = "systemmode")]
{
let paging_id = emulator_modules
.qemu()
.current_cpu()
.and_then(|cpu| cpu.current_paging_id());
if !module.must_instrument(src, paging_id)
&& !module.must_instrument(dest, paging_id)
{
return None;
}
}
let mask: usize = get_mask::<IS_CONST_MAP, MAP_SIZE>();
let id = hash_me(src as u64) ^ hash_me(dest as u64);
unsafe {
let nxt = (id as usize + 1) & mask;
if !IS_CONST_MAP {
*LIBAFL_QEMU_EDGES_MAP_SIZE_PTR = nxt;
}
}
// GuestAddress is u32 for 32 bit guests
#[allow(clippy::unnecessary_cast)]
Some(id)
} else {
None
}
}
#[allow(clippy::unnecessary_cast)]
pub fn gen_hashed_block_ids<AF, ET, PF, S, V, const IS_CONST_MAP: bool, const MAP_SIZE: usize>(
emulator_modules: &mut EmulatorModules<ET, S>,
_state: Option<&mut S>,
pc: GuestAddr,
) -> Option<u64>
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
V: EdgeCoverageVariant<AF, PF, IS_CONST_MAP, MAP_SIZE>,
{
// first check if we should filter
if let Some(module) =
emulator_modules.get::<EdgeCoverageModule<AF, PF, V, IS_CONST_MAP, MAP_SIZE>>()
{
#[cfg(feature = "usermode")]
{
if !module.must_instrument(pc) {
return None;
}
}
#[cfg(feature = "systemmode")]
{
let page_id = emulator_modules
.qemu()
.current_cpu()
.and_then(|cpu| cpu.current_paging_id());
if !module.must_instrument(pc, page_id) {
return None;
}
}
}
let mask: usize = get_mask::<IS_CONST_MAP, MAP_SIZE>();
let id = hash_me(pc as u64);
unsafe {
let nxt = (id as usize + 1) & mask;
if !IS_CONST_MAP {
*LIBAFL_QEMU_EDGES_MAP_SIZE_PTR = nxt;
}
}
// GuestAddress is u32 for 32 bit guests
#[allow(clippy::unnecessary_cast)]
Some(id)
}
}
mod tracers {
use std::cell::UnsafeCell;
use libafl_targets::EDGES_MAP;
use super::{LIBAFL_QEMU_EDGES_MAP_MASK_MAX, LIBAFL_QEMU_EDGES_MAP_PTR};
thread_local!(static PREV_LOC : UnsafeCell<u64> = const { UnsafeCell::new(0) });
/// # Safety
///
/// - @id should be the one generated by a gen_* function from this module.
/// - Calling this concurrently for the same id is racey and may lose updates.
pub unsafe extern "C" fn trace_edge_hitcount(_: *const (), id: u64) {
unsafe {
EDGES_MAP[id as usize] = EDGES_MAP[id as usize].wrapping_add(1);
}
}
/// # Safety
///
/// - @id should be the one generated by a gen_* function from this module.
pub unsafe extern "C" fn trace_edge_single(_: *const (), id: u64) {
// # Safety
// Worst case we set the byte to 1 multiple times..
unsafe {
EDGES_MAP[id as usize] = 1;
}
}
/// # Safety
///
/// Increases id at `EDGES_MAP_PTR` - potentially racey if called concurrently.
pub unsafe extern "C" fn trace_edge_hitcount_ptr(_: *const (), id: u64) {
unsafe {
let ptr = LIBAFL_QEMU_EDGES_MAP_PTR.add(id as usize);
*ptr = (*ptr).wrapping_add(1);
}
}
/// # Safety
///
/// Fine.
/// Worst case we set the byte to 1 multiple times.
pub unsafe extern "C" fn trace_edge_single_ptr(_: *const (), id: u64) {
unsafe {
let ptr = LIBAFL_QEMU_EDGES_MAP_PTR.add(id as usize);
*ptr = 1;
}
}
/// # Safety
///
/// Dereferences the global `PREV_LOC` variable. May not be called concurrently.
pub unsafe extern "C" fn trace_block_transition_hitcount(_: *const (), id: u64) {
unsafe {
PREV_LOC.with(|prev_loc| {
let x = ((*prev_loc.get() ^ id) as usize) & LIBAFL_QEMU_EDGES_MAP_MASK_MAX;
let entry = LIBAFL_QEMU_EDGES_MAP_PTR.add(x);
*entry = (*entry).wrapping_add(1);
*prev_loc.get() = id.overflowing_shr(1).0;
});
}
}
/// # Safety
///
/// Dereferences the global `PREV_LOC` variable. May not be called concurrently.
pub unsafe extern "C" fn trace_block_transition_single(_: *const (), id: u64) {
unsafe {
PREV_LOC.with(|prev_loc| {
let x = ((*prev_loc.get() ^ id) as usize) & LIBAFL_QEMU_EDGES_MAP_MASK_MAX;
let entry = LIBAFL_QEMU_EDGES_MAP_PTR.add(x);
*entry = 1;
*prev_loc.get() = id.overflowing_shr(1).0;
});
}
}
}

View File

@ -0,0 +1,403 @@
use std::fmt::Debug;
use libafl::{inputs::UsesInput, observers::VarLenMapObserver, HasMetadata};
use libafl_bolts::Error;
use libafl_qemu_sys::GuestAddr;
#[cfg(feature = "systemmode")]
use libafl_qemu_sys::GuestPhysAddr;
use crate::{
emu::EmulatorModules,
modules::{AddressFilter, EmulatorModule, EmulatorModuleTuple, PageFilter},
};
mod helpers;
use helpers::{
LIBAFL_QEMU_EDGES_MAP_ALLOCATED_SIZE, LIBAFL_QEMU_EDGES_MAP_MASK_MAX,
LIBAFL_QEMU_EDGES_MAP_PTR, LIBAFL_QEMU_EDGES_MAP_SIZE_PTR,
};
pub mod full;
pub use full::{
EdgeCoverageFullVariant, StdEdgeCoverageFullModule, StdEdgeCoverageFullModuleBuilder,
};
pub mod classic;
pub use classic::{
EdgeCoverageClassicVariant, StdEdgeCoverageClassicModule, StdEdgeCoverageClassicModuleBuilder,
};
pub mod child;
pub use child::{
EdgeCoverageChildVariant, StdEdgeCoverageChildModule, StdEdgeCoverageChildModuleBuilder,
};
use libafl::observers::ConstLenMapObserver;
/// Standard edge coverage module, adapted to most use cases
pub type StdEdgeCoverageModule = StdEdgeCoverageFullModule;
/// Standard edge coverage module builder, adapted to most use cases
pub type StdEdgeCoverageModuleBuilder = StdEdgeCoverageFullModuleBuilder;
pub type CollidingEdgeCoverageModule<AF, PF, const IS_CONST_MAP: bool, const MAP_SIZE: usize> =
EdgeCoverageModule<AF, PF, EdgeCoverageChildVariant, IS_CONST_MAP, MAP_SIZE>;
/// An edge coverage module variant.
trait EdgeCoverageVariant<AF, PF, const IS_CONST_MAP: bool, const MAP_SIZE: usize>:
'static + Debug
{
const DO_SIDE_EFFECTS: bool = true;
fn jit_hitcount<ET, S>(&mut self, _emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
panic!("JIT hitcount is not supported.")
}
fn jit_no_hitcount<ET, S>(&mut self, _emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
panic!("JIT no hitcount is not supported.")
}
fn fn_hitcount<ET, S>(&mut self, _emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
panic!("Func hitcount is not supported.")
}
fn fn_no_hitcount<ET, S>(&mut self, _emulator_modules: &mut EmulatorModules<ET, S>)
where
AF: AddressFilter,
ET: EmulatorModuleTuple<S>,
PF: PageFilter,
S: Unpin + UsesInput + HasMetadata,
{
panic!("Func no hitcount is not supported.")
}
}
#[derive(Debug)]
pub struct EdgeCoverageModuleBuilder<
AF,
PF,
V,
const IS_INITIALIZED: bool,
const IS_CONST_MAP: bool,
const MAP_SIZE: usize,
> {
variant: V,
address_filter: AF,
page_filter: PF,
use_hitcounts: bool,
use_jit: bool,
}
#[derive(Debug)]
pub struct EdgeCoverageModule<AF, PF, V, const IS_CONST_MAP: bool, const MAP_SIZE: usize> {
variant: V,
address_filter: AF,
// we only use it in system mode at the moment.
#[cfg_attr(not(feature = "systemmode"), allow(dead_code))]
page_filter: PF,
use_hitcounts: bool,
use_jit: bool,
}
impl<AF, PF, V, const IS_INITIALIZED: bool, const IS_CONST_MAP: bool, const MAP_SIZE: usize>
EdgeCoverageModuleBuilder<AF, PF, V, IS_INITIALIZED, IS_CONST_MAP, MAP_SIZE>
{
pub fn build(self) -> Result<EdgeCoverageModule<AF, PF, V, IS_CONST_MAP, MAP_SIZE>, Error> {
const {
assert!(
IS_INITIALIZED,
"The edge module builder must be first initialized with a call to `map_observer`."
);
};
Ok(EdgeCoverageModule::new(
self.address_filter,
self.page_filter,
self.variant,
self.use_hitcounts,
self.use_jit,
))
}
}
impl<AF, PF, V, const IS_INITIALIZED: bool, const IS_CONST_MAP: bool, const MAP_SIZE: usize>
EdgeCoverageModuleBuilder<AF, PF, V, IS_INITIALIZED, IS_CONST_MAP, MAP_SIZE>
{
fn new(
variant: V,
address_filter: AF,
page_filter: PF,
use_hitcounts: bool,
use_jit: bool,
) -> Self {
Self {
variant,
address_filter,
page_filter,
use_hitcounts,
use_jit,
}
}
#[must_use]
pub fn map_observer<O>(
self,
map_observer: &mut O,
) -> EdgeCoverageModuleBuilder<AF, PF, V, true, false, 0>
where
O: VarLenMapObserver,
{
let map_ptr = map_observer.map_slice_mut().as_mut_ptr() as *mut u8;
let map_max_size = map_observer.map_slice_mut().len();
let size_ptr = map_observer.as_mut().size_mut() as *mut usize;
unsafe {
LIBAFL_QEMU_EDGES_MAP_PTR = map_ptr;
LIBAFL_QEMU_EDGES_MAP_SIZE_PTR = size_ptr;
LIBAFL_QEMU_EDGES_MAP_ALLOCATED_SIZE = map_max_size;
LIBAFL_QEMU_EDGES_MAP_MASK_MAX = map_max_size - 1;
}
EdgeCoverageModuleBuilder::<AF, PF, V, true, false, 0>::new(
self.variant,
self.address_filter,
self.page_filter,
self.use_hitcounts,
self.use_jit,
)
}
#[must_use]
pub fn const_map_observer<O, const NEW_MAP_SIZE: usize>(
self,
_const_map_observer: &mut O,
) -> EdgeCoverageModuleBuilder<AF, PF, V, true, true, NEW_MAP_SIZE>
where
O: ConstLenMapObserver<NEW_MAP_SIZE>,
{
EdgeCoverageModuleBuilder::<AF, PF, V, true, true, NEW_MAP_SIZE>::new(
self.variant,
self.address_filter,
self.page_filter,
self.use_hitcounts,
self.use_jit,
)
}
pub fn variant<V2>(
self,
variant: V2,
) -> EdgeCoverageModuleBuilder<AF, PF, V2, IS_INITIALIZED, IS_CONST_MAP, MAP_SIZE> {
EdgeCoverageModuleBuilder::new(
variant,
self.address_filter,
self.page_filter,
self.use_hitcounts,
self.use_jit,
)
}
pub fn address_filter<AF2>(
self,
address_filter: AF2,
) -> EdgeCoverageModuleBuilder<AF2, PF, V, IS_INITIALIZED, IS_CONST_MAP, MAP_SIZE> {
EdgeCoverageModuleBuilder::new(
self.variant,
address_filter,
self.page_filter,
self.use_hitcounts,
self.use_jit,
)
}
pub fn page_filter<PF2>(
self,
page_filter: PF2,
) -> EdgeCoverageModuleBuilder<AF, PF2, V, IS_INITIALIZED, IS_CONST_MAP, MAP_SIZE> {
EdgeCoverageModuleBuilder::new(
self.variant,
self.address_filter,
page_filter,
self.use_hitcounts,
self.use_jit,
)
}
#[must_use]
pub fn hitcounts(
self,
use_hitcounts: bool,
) -> EdgeCoverageModuleBuilder<AF, PF, V, IS_INITIALIZED, IS_CONST_MAP, MAP_SIZE> {
EdgeCoverageModuleBuilder::new(
self.variant,
self.address_filter,
self.page_filter,
use_hitcounts,
self.use_jit,
)
}
#[must_use]
pub fn jit(
self,
use_jit: bool,
) -> EdgeCoverageModuleBuilder<AF, PF, V, IS_INITIALIZED, IS_CONST_MAP, MAP_SIZE> {
EdgeCoverageModuleBuilder::new(
self.variant,
self.address_filter,
self.page_filter,
self.use_hitcounts,
use_jit,
)
}
}
impl<AF, PF, V, const IS_CONST_MAP: bool, const MAP_SIZE: usize>
EdgeCoverageModule<AF, PF, V, IS_CONST_MAP, MAP_SIZE>
{
#[must_use]
pub fn new(
address_filter: AF,
page_filter: PF,
variant: V,
use_hitcounts: bool,
use_jit: bool,
) -> Self {
Self {
variant,
address_filter,
page_filter,
use_hitcounts,
use_jit,
}
}
}
impl<AF, PF, V, const IS_CONST_MAP: bool, const MAP_SIZE: usize>
EdgeCoverageModule<AF, PF, V, IS_CONST_MAP, MAP_SIZE>
where
AF: AddressFilter,
PF: PageFilter,
{
#[cfg(feature = "usermode")]
#[must_use]
pub fn must_instrument(&self, addr: GuestAddr) -> bool {
self.address_filter.allowed(&addr)
}
#[cfg(feature = "systemmode")]
#[must_use]
pub fn must_instrument(&self, addr: GuestAddr, page_id: Option<GuestPhysAddr>) -> bool {
if let Some(page_id) = page_id {
self.address_filter.allowed(&addr) && self.page_filter.allowed(&page_id)
} else {
self.address_filter.allowed(&addr)
}
}
}
impl<S, AF, PF, V, const IS_CONST_MAP: bool, const MAP_SIZE: usize> EmulatorModule<S>
for EdgeCoverageModule<AF, PF, V, IS_CONST_MAP, MAP_SIZE>
where
AF: AddressFilter + 'static,
PF: PageFilter + 'static,
S: Unpin + UsesInput + HasMetadata,
V: EdgeCoverageVariant<AF, PF, IS_CONST_MAP, MAP_SIZE> + 'static,
{
type ModuleAddressFilter = AF;
#[cfg(feature = "systemmode")]
type ModulePageFilter = PF;
const HOOKS_DO_SIDE_EFFECTS: bool = V::DO_SIDE_EFFECTS;
fn first_exec<ET>(&mut self, emulator_modules: &mut EmulatorModules<ET, S>, _state: &mut S)
where
ET: EmulatorModuleTuple<S>,
{
if self.use_hitcounts {
if self.use_jit {
self.variant.jit_hitcount(emulator_modules);
} else {
self.variant.fn_hitcount(emulator_modules);
}
} else if self.use_jit {
self.variant.jit_no_hitcount(emulator_modules);
} else {
self.variant.fn_no_hitcount(emulator_modules);
}
}
fn address_filter(&self) -> &Self::ModuleAddressFilter {
&self.address_filter
}
fn address_filter_mut(&mut self) -> &mut Self::ModuleAddressFilter {
&mut self.address_filter
}
#[cfg(feature = "systemmode")]
fn page_filter(&self) -> &Self::ModulePageFilter {
&self.page_filter
}
#[cfg(feature = "systemmode")]
fn page_filter_mut(&mut self) -> &mut Self::ModulePageFilter {
&mut self.page_filter
}
}
#[cfg(any(test, doc))]
mod tests {
use std::ptr::addr_of_mut;
use libafl::observers::{CanTrack, HitcountsMapObserver, VariableMapObserver};
use libafl_bolts::ownedref::OwnedMutSlice;
use libafl_targets::{edges_map_mut_ptr, EDGES_MAP_DEFAULT_SIZE, MAX_EDGES_FOUND};
use crate::modules::StdEdgeCoverageModule;
/// The test is actually implemented as a doctest, since Rust does not
/// permit tests that must not compile by default...
///
/// ```compile_fail
/// use libafl_qemu::modules::StdEdgeCoverageModule;
///
/// StdEdgeCoverageModule::builder().build().unwrap();
/// ```
#[allow(unused)]
pub fn does_not_build() {}
#[test]
pub fn does_build() {
let mut edges_observer = unsafe {
HitcountsMapObserver::new(VariableMapObserver::from_mut_slice(
"edges",
OwnedMutSlice::from_raw_parts_mut(edges_map_mut_ptr(), EDGES_MAP_DEFAULT_SIZE),
addr_of_mut!(MAX_EDGES_FOUND),
))
.track_indices()
};
StdEdgeCoverageModule::builder()
.map_observer(edges_observer.as_mut())
.build()
.unwrap();
}
}

View File

@ -18,7 +18,12 @@ pub mod systemmode;
pub use systemmode::*;
pub mod edges;
pub use edges::*;
pub use edges::{
EdgeCoverageModule, EdgeCoverageModuleBuilder, StdEdgeCoverageChildModule,
StdEdgeCoverageChildModuleBuilder, StdEdgeCoverageClassicModule,
StdEdgeCoverageClassicModuleBuilder, StdEdgeCoverageFullModule,
StdEdgeCoverageFullModuleBuilder, StdEdgeCoverageModule, StdEdgeCoverageModuleBuilder,
};
#[cfg(not(cpu_target = "hexagon"))]
pub mod calls;
@ -33,7 +38,7 @@ pub use cmplog::CmpLogModule;
#[cfg(not(cpu_target = "hexagon"))]
pub mod drcov;
#[cfg(not(cpu_target = "hexagon"))]
pub use drcov::*;
pub use drcov::{DrCovMetadata, DrCovModule, DrCovModuleBuilder};
use crate::{emu::EmulatorModules, Qemu};