Implement backtrace observers for crash dedupe (#379)

* create stacktrace observer

* create stacktrace feedback

* post-merge fixes

* address comments

* update Cargo.toml

* fix CI issue + dynamic naming

* duplicate baby_fizzer

* update stacktrace baby_fuzzer

* force unwinding tables

* ignore test dumps

* fix stacktrace baby_fuzzer logic

* upgrade Backtrace version

* trigger observers.post_exec in crash_handler

* implement NewHashFeedbackState and update logic

* digest symbols pointers

* cleanup

* minimal output

* fix backdated EventFirer generic param

* add baby_fuzzer example with a fork executor

* duplicate baby_fuzzer_stacktrace with forkexecutor

* backtrace collection implemented

* add c app fuzzer example with a fork executor

* group backtrace baby fuzzers

* added c code baby fuzzer with inprocess executor

* remove need for static COLLECT_BACKTRACE

* moved code to stacktrace.rs + fixed bug

* add comment

* add command executor fuzzer example

* post merge cleanup

* add missing doc

* address comment

* fix nit

* clean duplicate variable in timeout handler

* fix command executor bt collection

* clean code and use StdShMem

* cleanup

* add ObserverWithHashField + rename StacktraceObserver

* rename + refactor some code

* add CommandBacktraceObserver

* update command executor

* update baby fuzzers

* simplify BacktraceSharedMemoryWrapper

* use better names + static methods

* use std feature macro on BacktraceObserver + fix bug

* use Box in HashValueWrapper to minimize variants size diff

* use copy_from_slice

* std conditional backtrace collection

* fix std import

* fix comment

* add exit_kind to observer.post_exec

* added hash trait to Input

* collect backtrace in post_exec

* add crash handlers to InProcessForkExecutor

* fix panic message

* duplicate forkserver fuzzer example

minimal example

update

* proto bt collection working

* rename CommandBacktraceExecutor to ASANBacktraceExecutor

* refactor ASANBacktraceObserver

* support for forkserver working

* update fuzzer example

* less verbosity

* Post merge fixes

* implement hash for GeneralizedInput

* update forkserver example after merge

* clippy fixes

* fix inproc test

* fixes for cargo hack --feature-powerset

* fix baby_no_std

* implement Hash for NautilusInput

* update fork executor baby fuzzer

* fix doc

* implement Hash for PacketData

* fix windows build

* fix windows no_std

* fix backtrace baby fuzzers README

* add comments

* move setup_bt_panic to constructor

* pre/post child exec hooks in Observer

* setup_child_panic_hook

* fix ObserversOwnedMap on nightly

* add backtrace fuzzers to CI checks

* fix typo

* fix relative paths in test_all_fuzzers.sh

Co-authored-by: Andrea Fioraldi <andreafioraldi@gmail.com>
This commit is contained in:
Youssef 2022-01-31 15:58:15 +01:00 committed by GitHub
parent 62e514e61d
commit e307dfb16f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
62 changed files with 2636 additions and 73 deletions

View File

@ -0,0 +1,2 @@
libpng-*
rundumps/*

View File

@ -0,0 +1,22 @@
[package]
name = "baby_fuzzer_with_forkexecutor"
version = "0.6.1"
authors = ["Andrea Fioraldi <andreafioraldi@gmail.com>", "Dominik Maier <domenukk@gmail.com>"]
edition = "2021"
[features]
default = ["std"]
std = []
[profile.dev]
panic = "abort"
[profile.release]
panic = "abort"
lto = true
codegen-units = 1
opt-level = 3
debug = true
[dependencies]
libafl = { path = "../../libafl/" }

View File

@ -0,0 +1,8 @@
# Baby fuzzer
This is a minimalistic example about how to create a libafl based fuzzer.
It runs on a single core until a crash occurs and then exits.
The tested program is a simple Rust function without any instrumentation.
For real fuzzing, you will want to add some sort to add coverage or other feedback.

View File

@ -0,0 +1,132 @@
use std::path::PathBuf;
#[cfg(windows)]
use std::ptr::write_volatile;
use libafl::{
bolts::{
current_nanos,
rands::StdRand,
shmem::{unix_shmem, ShMemProvider},
tuples::tuple_list,
AsMutSlice, AsSlice,
},
corpus::{InMemoryCorpus, OnDiskCorpus, QueueCorpusScheduler},
events::SimpleEventManager,
executors::{ExitKind, InProcessForkExecutor},
feedbacks::{CrashFeedback, MapFeedbackState, MaxMapFeedback},
fuzzer::{Fuzzer, StdFuzzer},
generators::RandPrintablesGenerator,
inputs::{BytesInput, HasTargetBytes},
monitors::SimpleMonitor,
mutators::scheduled::{havoc_mutations, StdScheduledMutator},
observers::StdMapObserver,
stages::mutational::StdMutationalStage,
state::StdState,
};
#[allow(clippy::similar_names)]
pub fn main() {
let mut shmem_provider = unix_shmem::UnixShMemProvider::new().unwrap();
let mut signals = shmem_provider.new_shmem(16).unwrap();
let mut signals_clone = signals.clone();
let mut signals_set = |idx: usize| {
let a = signals.as_mut_slice();
a[idx] = 1;
};
// The closure that we want to fuzz
let mut harness = |input: &BytesInput| {
let target = input.target_bytes();
let buf = target.as_slice();
signals_set(0);
if !buf.is_empty() && buf[0] == b'a' {
signals_set(1);
if buf.len() > 1 && buf[1] == b'b' {
signals_set(2);
if buf.len() > 2 && buf[2] == b'c' {
// removed the windows panic for simplicity, will add later
#[cfg(unix)]
panic!("panic 1");
}
if buf.len() > 2 && buf[2] == b'd' {
#[cfg(unix)]
panic!("panic 2");
}
if buf.len() > 2 && buf[2] == b'e' {
#[cfg(unix)]
panic!("panic 3");
}
}
}
ExitKind::Ok
};
// Create an observation channel using the signals map
let observer = StdMapObserver::new("signals", signals_clone.as_mut_slice());
// Create a stacktrace observer to add the observers tuple
// The state of the edges feedback.
let feedback_state = MapFeedbackState::with_observer(&observer);
// Feedback to rate the interestingness of an input, obtained by ANDing the interestingness of both feedbacks
let feedback = MaxMapFeedback::new(&feedback_state, &observer);
// A feedback to choose if an input is a solution or not
let objective = CrashFeedback::new();
// create a State from scratch
let mut state = StdState::new(
// RNG
StdRand::with_seed(current_nanos()),
// Corpus that will be evolved, we keep it in memory for performance
InMemoryCorpus::new(),
// Corpus in which we store solutions (crashes in this example),
// on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
// States of the feedbacks.
// They are the data related to the feedbacks that you want to persist in the State.
tuple_list!(feedback_state),
);
// The Monitor trait define how the fuzzer stats are displayed to the user
let mon = SimpleMonitor::new(|s| println!("{}", s));
// The event manager handle the various events generated during the fuzzing loop
// such as the notification of the addition of a new item to the corpus
let mut mgr = SimpleEventManager::new(mon);
// A queue policy to get testcasess from the corpus
let scheduler = QueueCorpusScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
// Create the executor for an in-process function with just one observer
let mut executor = InProcessForkExecutor::new(
&mut harness,
tuple_list!(observer),
&mut fuzzer,
&mut state,
&mut mgr,
shmem_provider,
)
.expect("Failed to create the Executor");
// Generator of printable bytearrays of max size 32
let mut generator = RandPrintablesGenerator::new(32);
// Generate 8 initial inputs
state
.generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8)
.expect("Failed to generate the initial corpus");
// Setup a mutational stage with a basic bytes mutator
let mutator = StdScheduledMutator::new(havoc_mutations());
let mut stages = tuple_list!(StdMutationalStage::new(mutator));
fuzzer
.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)
.expect("Error in the fuzzing loop");
}

View File

@ -0,0 +1,12 @@
# Backtrace baby fuzzers
The projects contained in this directory are simple fuzzers derived from the original baby_fuzzer examples, whose perpose is to show how to use a `BacktraceObserver` or an `ASANObserver` to dedupe crashes and other necessary component for this feature.
The examples cover:
- An `InProcessForkExecutor` fuzzing a C harness
- An `InProcessForkExecutor` fuzzing a Rust harness
- An `InProcessExecutor` fuzzing a C harness
- An `InProcessExecutor` fuzzing a Rust harness
- A `CommandExecutor` fuzzing a simple binary
- A `ForkServerExecutor` fuzzing a simple binary

View File

@ -0,0 +1,2 @@
[build]
rustflags = ["-Cforce-unwind-tables=y"]

View File

@ -0,0 +1,2 @@
libpng-*
rundumps/*

View File

@ -0,0 +1,22 @@
[package]
name = "c_code_with_fork_executor"
version = "0.0.1"
edition = "2021"
[features]
default = ["std"]
std = []
[profile.release]
panic = "abort"
lto = true
codegen-units = 1
opt-level = 3
debug = true
[dependencies]
libafl = { path = "../../../libafl/" }
libc = "0.2"
[build-dependencies]
cc = "1.0"

View File

@ -0,0 +1,5 @@
extern crate cc;
fn main() {
cc::Build::new().file("src/harness.c").compile("harness.a");
}

View File

@ -0,0 +1,84 @@
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/shm.h>
#define MAX_INPUT_SIZE 3
#define SHMEM_COUNT 100
int shmid;
key_t key = 58974;
int create_shmem_array() {
shmid = shmget(key, SHMEM_COUNT * sizeof(uint8_t), IPC_CREAT | 0666);
printf("created a shared memory segment with shmid=%d\n", shmid);
void *res = shmat(shmid, NULL, 0);
uint8_t *array_ptr = (uint8_t *)res;
for (int i = 0; i < SHMEM_COUNT; i++) {
array_ptr[i] = 0;
}
return 0;
}
int set_value(int i) {
void *res = shmat(shmid, NULL, 0);
uint8_t *array_ptr = (uint8_t *)res;
array_ptr[i] = 1;
return 0;
}
uint8_t get_value(int i) {
void *res = shmat(shmid, NULL, 0);
uint8_t *array_ptr = (uint8_t *)res;
return array_ptr[i];
}
int destroy_shmem(int id) {
if (-1 == shmctl(id, IPC_RMID, NULL)) {
return -1;
}
return 0;
}
void c_harness(uint8_t *array) {
set_value(0);
if (array[0] == 'a') {
set_value(1);
if (array[1] == 'b') {
set_value(2);
if (array[2] == 'a') {
// abort 1
abort();
}
if (array[2] == 'b') {
// abort 2
abort();
}
if (array[2] == 'c') {
// abort 3
abort();
}
}
}
}
uint8_t *get_ptr() {
void *res = shmat(shmid, NULL, 0);
return (uint8_t *)res;
}
// To remove
// int main() {
// create_shmem_array();
// uint8_t input[MAX_INPUT_SIZE] = {0};
// input[0] = 97;
// input[1] = 98;
// input[2] = 92;
// c_harness(&input);
// printf("%d", get_value(0));
// printf("%d", get_value(1));
// printf("%d", get_value(2));
// printf("%d", get_value(3));
// destroy_shmem(shmid);
// return 0;
// }

View File

@ -0,0 +1,118 @@
use std::path::PathBuf;
use libafl::bolts::shmem::ShMemProvider;
use libafl::bolts::AsSlice;
use libafl::observers::ConstMapObserver;
use libafl::{
bolts::{current_nanos, rands::StdRand, shmem::UnixShMemProvider, tuples::tuple_list},
corpus::{InMemoryCorpus, OnDiskCorpus, QueueCorpusScheduler},
events::SimpleEventManager,
executors::InProcessForkExecutor,
feedback_and,
feedbacks::{
CrashFeedback, MapFeedbackState, MaxMapFeedback, NewHashFeedback, NewHashFeedbackState,
},
fuzzer::{Fuzzer, StdFuzzer},
generators::RandPrintablesGenerator,
inputs::{BytesInput, HasTargetBytes},
monitors::SimpleMonitor,
mutators::scheduled::{havoc_mutations, StdScheduledMutator},
observers::BacktraceObserver,
stages::mutational::StdMutationalStage,
state::StdState,
};
use libc::c_int;
use libc::c_uchar;
extern crate libc;
extern "C" {
fn c_harness(input: *const c_uchar);
fn create_shmem_array() -> c_int;
fn get_ptr() -> *mut u8;
}
#[allow(clippy::similar_names)]
pub fn main() {
let shmem_provider = UnixShMemProvider::new().unwrap();
unsafe { create_shmem_array() };
let map_ptr = unsafe { get_ptr() };
let mut harness = |input: &BytesInput| {
let target = input.target_bytes();
let buf = target.as_slice();
unsafe { c_harness(buf.as_ptr()) }
libafl::executors::ExitKind::Ok
};
// Create an observation channel using the signals map
let observer = unsafe { ConstMapObserver::<u8, 3>::new_from_ptr("signals", map_ptr) };
// Create a stacktrace observer
let bt_observer =
BacktraceObserver::new("BacktraceObserver", libafl::observers::HarnessType::FFI);
// The state of the edges feedback.
let feedback_state = MapFeedbackState::with_observer(&observer);
let st_feedback_state = NewHashFeedbackState::<u64>::with_observer(&bt_observer);
// Feedback to rate the interestingness of an input, obtained by ANDing the interestingness of both feedbacks
let feedback = MaxMapFeedback::new(&feedback_state, &observer);
// A feedback to choose if an input is a solution or not
let objective = feedback_and!(
CrashFeedback::new(),
NewHashFeedback::<BacktraceObserver>::new_with_observer("BacktraceObserver", &bt_observer)
);
// create a State from scratch
let mut state = StdState::new(
// RNG
StdRand::with_seed(current_nanos()),
// Corpus that will be evolved, we keep it in memory for performance
InMemoryCorpus::new(),
// Corpus in which we store solutions (crashes in this example),
// on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
// States of the feedbacks.
// They are the data related to the feedbacks that you want to persist in the State.
tuple_list!(feedback_state, st_feedback_state),
);
// The Monitor trait define how the fuzzer stats are displayed to the user
let mon = SimpleMonitor::new(|s| println!("{}", s));
// The event manager handle the various events generated during the fuzzing loop
// such as the notification of the addition of a new item to the corpus
let mut mgr = SimpleEventManager::new(mon);
// A queue policy to get testcasess from the corpus
let scheduler = QueueCorpusScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
// Create the executor for an in-process function with just one observer
let mut executor = InProcessForkExecutor::new(
&mut harness,
tuple_list!(observer, bt_observer),
&mut fuzzer,
&mut state,
&mut mgr,
shmem_provider,
)
.expect("Failed to create the Executor");
// Generator of printable bytearrays of max size 32
let mut generator = RandPrintablesGenerator::new(32);
// Generate 8 initial inputs
state
.generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8)
.expect("Failed to generate the initial corpus");
// Setup a mutational stage with a basic bytes mutator
let mutator = StdScheduledMutator::new(havoc_mutations());
let mut stages = tuple_list!(StdMutationalStage::new(mutator));
fuzzer
.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)
.expect("Error in the fuzzing loop");
}

View File

@ -0,0 +1,2 @@
[build]
rustflags = ["-Cforce-unwind-tables=y"]

View File

@ -0,0 +1,2 @@
libpng-*
rundumps/*

View File

@ -0,0 +1,22 @@
[package]
name = "c_code_with_fork_executor"
version = "0.0.1"
edition = "2021"
[features]
default = ["std"]
std = []
[profile.release]
panic = "abort"
lto = true
codegen-units = 1
opt-level = 3
debug = true
[dependencies]
libafl = { path = "../../../libafl/" }
libc = "0.2"
[build-dependencies]
cc = "1.0"

View File

@ -0,0 +1,5 @@
extern crate cc;
fn main() {
cc::Build::new().file("src/harness.c").compile("harness.a");
}

View File

@ -0,0 +1,46 @@
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/shm.h>
#define MAX_INPUT_SIZE 3
#define MAP_SIZE 100
// uint8_t *array;
uint8_t array[MAP_SIZE];
uint8_t *array_ptr = &array;
int init() {
for (int i = 0; i < MAP_SIZE; i++) {
array[i] = 0;
}
return 0;
}
int set_value(int i) {
array[i] = 1;
return 0;
}
void c_harness(uint8_t *array) {
set_value(0);
if (array[0] == 'a') {
set_value(1);
if (array[1] == 'b') {
set_value(2);
if (array[2] == 'a') {
// abort 1
abort();
}
if (array[2] == 'b') {
// abort 2
abort();
}
if (array[2] == 'c') {
// abort 3
abort();
}
}
}
}

View File

@ -0,0 +1,111 @@
use std::path::PathBuf;
use libafl::bolts::AsSlice;
use libafl::executors::InProcessExecutor;
use libafl::observers::ConstMapObserver;
use libafl::{
bolts::{current_nanos, rands::StdRand, tuples::tuple_list},
corpus::{InMemoryCorpus, OnDiskCorpus, QueueCorpusScheduler},
events::SimpleEventManager,
feedback_and,
feedbacks::{
CrashFeedback, MapFeedbackState, MaxMapFeedback, NewHashFeedback, NewHashFeedbackState,
},
fuzzer::{Fuzzer, StdFuzzer},
generators::RandPrintablesGenerator,
inputs::{BytesInput, HasTargetBytes},
monitors::SimpleMonitor,
mutators::scheduled::{havoc_mutations, StdScheduledMutator},
observers::BacktraceObserver,
stages::mutational::StdMutationalStage,
state::StdState,
};
use libc::c_uchar;
extern crate libc;
extern "C" {
fn c_harness(input: *const c_uchar);
static array_ptr: *mut u8;
}
#[allow(clippy::similar_names)]
pub fn main() {
let mut harness = |input: &BytesInput| {
let target = input.target_bytes();
let buf = target.as_slice();
unsafe { c_harness(buf.as_ptr()) }
libafl::executors::ExitKind::Ok
};
// Create an observation channel using the signals map
let observer = unsafe { ConstMapObserver::<u8, 3>::new_from_ptr("signals", array_ptr) };
// Create a stacktrace observer
let bt_observer =
BacktraceObserver::new("BacktraceObserver", libafl::observers::HarnessType::FFI);
// The state of the edges feedback.
let feedback_state = MapFeedbackState::with_observer(&observer);
let st_feedback_state = NewHashFeedbackState::<u64>::with_observer(&bt_observer);
// Feedback to rate the interestingness of an input, obtained by ANDing the interestingness of both feedbacks
let feedback = MaxMapFeedback::new(&feedback_state, &observer);
// A feedback to choose if an input is a solution or not
let objective = feedback_and!(
CrashFeedback::new(),
NewHashFeedback::<BacktraceObserver>::new_with_observer("BacktraceObserver", &bt_observer)
);
// create a State from scratch
let mut state = StdState::new(
// RNG
StdRand::with_seed(current_nanos()),
// Corpus that will be evolved, we keep it in memory for performance
InMemoryCorpus::new(),
// Corpus in which we store solutions (crashes in this example),
// on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
// States of the feedbacks.
// They are the data related to the feedbacks that you want to persist in the State.
tuple_list!(feedback_state, st_feedback_state),
);
// The Monitor trait define how the fuzzer stats are displayed to the user
let mon = SimpleMonitor::new(|s| println!("{}", s));
// The event manager handle the various events generated during the fuzzing loop
// such as the notification of the addition of a new item to the corpus
let mut mgr = SimpleEventManager::new(mon);
// A queue policy to get testcasess from the corpus
let scheduler = QueueCorpusScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
// Create the executor for an in-process function with just one observer
let mut executor = InProcessExecutor::new(
&mut harness,
tuple_list!(observer, bt_observer),
&mut fuzzer,
&mut state,
&mut mgr,
)
.expect("Failed to create the Executor");
// Generator of printable bytearrays of max size 32
let mut generator = RandPrintablesGenerator::new(32);
// Generate 8 initial inputs
state
.generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8)
.expect("Failed to generate the initial corpus");
// Setup a mutational stage with a basic bytes mutator
let mutator = StdScheduledMutator::new(havoc_mutations());
let mut stages = tuple_list!(StdMutationalStage::new(mutator));
fuzzer
.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)
.expect("Error in the fuzzing loop");
}

View File

@ -0,0 +1,2 @@
[build]
rustflags = ["-Cforce-unwind-tables=y"]

View File

@ -0,0 +1,2 @@
libpng-*
rundumps/*

View File

@ -0,0 +1,20 @@
[package]
name = "command_executor"
version = "0.0.1"
edition = "2021"
[features]
default = ["std"]
std = []
[profile.release]
lto = true
codegen-units = 1
opt-level = 3
debug = true
[dependencies]
libafl = { path = "../../../libafl/" }
ahash = { version = "0.7"} # another hash
spawn-ptrace = {version= "0.1.2"}

View File

@ -0,0 +1,139 @@
use std::path::PathBuf;
#[cfg(windows)]
use std::ptr::write_volatile;
use libafl::{
bolts::{
current_nanos,
rands::StdRand,
shmem::{unix_shmem, ShMem, ShMemId, ShMemProvider},
tuples::tuple_list,
AsMutSlice, AsSlice,
},
corpus::{InMemoryCorpus, OnDiskCorpus, QueueCorpusScheduler},
events::SimpleEventManager,
feedback_and,
feedbacks::{
CrashFeedback, MapFeedbackState, MaxMapFeedback, NewHashFeedback, NewHashFeedbackState,
},
fuzzer::{Fuzzer, StdFuzzer},
generators::RandPrintablesGenerator,
inputs::{HasTargetBytes, Input},
monitors::SimpleMonitor,
mutators::scheduled::{havoc_mutations, StdScheduledMutator},
observers::{get_asan_runtime_flags, ASANBacktraceObserver, StdMapObserver},
stages::mutational::StdMutationalStage,
state::StdState,
};
use libafl::{executors::command::CommandConfigurator, Error};
use std::{
io::Write,
process::{Child, Command, Stdio},
};
#[allow(clippy::similar_names)]
pub fn main() {
let mut shmem_provider = unix_shmem::UnixShMemProvider::new().unwrap();
let mut signals = shmem_provider.new_shmem(3).unwrap();
let shmem_id = signals.id();
// Create an observation channel using the signals map
let observer = StdMapObserver::new("signals", signals.as_mut_slice());
// Create a stacktrace observer
let bt_observer = ASANBacktraceObserver::new("ASANBacktraceObserver");
// The state of the edges feedback.
let feedback_state = MapFeedbackState::with_observer(&observer);
let bt_feedback_state = NewHashFeedbackState::<u64>::with_observer(&bt_observer);
// Feedback to rate the interestingness of an input, obtained by ANDing the interestingness of both feedbacks
let feedback = MaxMapFeedback::new(&feedback_state, &observer);
// A feedback to choose if an input is a solution or not
let objective = feedback_and!(
CrashFeedback::new(),
NewHashFeedback::<ASANBacktraceObserver>::new_with_observer(
"ASANBacktraceObserver",
&bt_observer
)
);
// let objective = CrashFeedback::new();
// create a State from scratch
let mut state = StdState::new(
// RNG
StdRand::with_seed(current_nanos()),
// Corpus that will be evolved, we keep it in memory for performance
InMemoryCorpus::new(),
// Corpus in which we store solutions (crashes in this example),
// on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
// States of the feedbacks.
// They are the data related to the feedbacks that you want to persist in the State.
tuple_list!(feedback_state, bt_feedback_state),
);
// The Monitor trait define how the fuzzer stats are displayed to the user
let mon = SimpleMonitor::new(|s| println!("{}", s));
// The event manager handle the various events generated during the fuzzing loop
// such as the notification of the addition of a new item to the corpus
let mut mgr = SimpleEventManager::new(mon);
// A queue policy to get testcasess from the corpus
let scheduler = QueueCorpusScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
// Create the executor for an in-process function with just one observer
#[derive(Debug)]
struct MyExecutor {
shmem_id: ShMemId,
}
impl<EM, I: Input + HasTargetBytes, S, Z> CommandConfigurator<EM, I, S, Z> for MyExecutor {
fn spawn_child(
&mut self,
_fuzzer: &mut Z,
_state: &mut S,
_mgr: &mut EM,
input: &I,
) -> Result<Child, Error> {
let mut command = Command::new("./test_command");
let command = command
.args(&[self.shmem_id.as_str()])
.env("ASAN_OPTIONS", get_asan_runtime_flags());
command
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped());
let child = command.spawn().expect("failed to start process");
let mut stdin = child.stdin.as_ref().unwrap();
stdin.write_all(input.target_bytes().as_slice())?;
Ok(child)
}
}
let mut executor = MyExecutor { shmem_id }.into_executor(tuple_list!(observer, bt_observer));
// Generator of printable bytearrays of max size 32
let mut generator = RandPrintablesGenerator::new(32);
// Generate 8 initial inputs
state
.generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8)
.expect("Failed to generate the initial corpus");
// Setup a mutational stage with a basic bytes mutator
let mutator = StdScheduledMutator::new(havoc_mutations());
let mut stages = tuple_list!(StdMutationalStage::new(mutator));
fuzzer
.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)
.expect("Error in the fuzzing loop");
}

View File

@ -0,0 +1,88 @@
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/shm.h>
#include <sys/types.h>
#include <unistd.h>
int shmid;
int set_shmid(int id) {
shmid = id;
return 0;
}
int set_value(int i) {
void *res = shmat(shmid, NULL, 0);
if ((int)res == -1) {
printf("Failed to attach to memory with id=%d\n", shmid);
} else {
printf("pointer is %p\n", res);
}
uint8_t *array_ptr = (uint8_t *)res;
array_ptr[i] = 1;
return 0;
}
uint8_t get_value(int i) {
void *res = shmat(shmid, NULL, 0);
if ((int)res == -1) {
printf("Failed to attach to memory with id=%d\n", shmid);
}
uint8_t *array_ptr = (uint8_t *)res;
return array_ptr[i];
}
int destroy_shmem() {
if (-1 == shmctl(shmid, IPC_RMID, NULL)) {
return -1;
}
return 0;
}
void c_harness(char *array) {
set_value(0);
if (array[0] == 'a') {
set_value(1);
if (array[1] == 'b') {
set_value(2);
if (array[2] == 'a') {
// abort 1
// fprintf(stderr, "Will abort1\n");
abort();
}
if (array[2] == 'b') {
// abort 2
// fprintf(stderr, "Will abort2\n");
abort();
}
if (array[2] == 'c') {
// abort 3
// fprintf(stderr, "Will abort3\n");
abort();
}
}
}
}
int main(int argc, char *argv[]) {
printf("running test_command\n");
if (argc != 2) {
printf("Need exactly two arguments\n");
exit(-1);
}
int id = atoi(argv[1]);
set_shmid(id);
char buffer[100] = {0};
read(STDIN_FILENO, buffer, 100);
c_harness(buffer);
printf("value[0]=%d\n", get_value(0));
printf("value[1]=%d\n", get_value(1));
printf("value[2]=%d\n", get_value(2));
if (destroy_shmem() == -1) {
printf("Failed to destroy the shared memory\n");
exit(-1);
}
return 0;
}

Binary file not shown.

View File

@ -0,0 +1,3 @@
forkserver_simple
rundumps
asanlog*

View File

@ -0,0 +1,18 @@
[package]
name = "forkserver_executor"
version = "0.0.1"
edition = "2021"
[profile.dev]
panic = "abort"
[profile.release]
panic = "abort"
lto = true
codegen-units = 1
opt-level = 3
[dependencies]
libafl = { path = "../../../libafl/" }
clap = { version = "3.0", features = ["default"] }

View File

@ -0,0 +1,47 @@
use std::env;
use std::path::Path;
use std::process::{exit, Command};
const AFL_URL: &str = "https://github.com/AFLplusplus/AFLplusplus";
fn main() {
if cfg!(windows) {
println!("cargo:warning=No support for windows yet.");
exit(0);
}
let cwd = env::current_dir().unwrap().to_string_lossy().to_string();
let afl = format!("{}/AFLplusplus", &cwd);
let afl_gcc = format!("{}/AFLplusplus/afl-cc", &cwd);
let afl_path = Path::new(&afl);
let afl_gcc_path = Path::new(&afl_gcc);
if !afl_path.is_dir() {
println!("cargo:warning=AFL++ not found, downloading...");
Command::new("git")
.arg("clone")
.arg(AFL_URL)
.status()
.unwrap();
}
if !afl_gcc_path.is_file() {
Command::new("make")
.arg("all")
.current_dir(&afl_path)
.status()
.unwrap();
}
Command::new(afl_gcc_path)
.args(&["src/program.c", "-o"])
.arg(&format!("{}/target/release/program", &cwd))
.arg("-fsanitize=address")
.status()
.unwrap();
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed=src/");
}

View File

@ -0,0 +1,113 @@
use libafl::{
bolts::{
current_nanos,
rands::StdRand,
shmem::{ShMem, ShMemProvider, StdShMemProvider},
tuples::tuple_list,
AsMutSlice,
},
corpus::{InMemoryCorpus, OnDiskCorpus, QueueCorpusScheduler},
events::SimpleEventManager,
executors::forkserver::ForkserverExecutor,
feedback_and,
feedbacks::{
CrashFeedback, MapFeedbackState, MaxMapFeedback, NewHashFeedback, NewHashFeedbackState,
},
fuzzer::{Fuzzer, StdFuzzer},
generators::RandPrintablesGenerator,
inputs::BytesInput,
monitors::SimpleMonitor,
mutators::scheduled::{havoc_mutations, StdScheduledMutator},
observers::{ASANBacktraceObserver, ConstMapObserver, HitcountsMapObserver},
stages::mutational::StdMutationalStage,
state::StdState,
};
use std::path::PathBuf;
#[allow(clippy::similar_names)]
pub fn main() {
const MAP_SIZE: usize = 65536;
//Coverage map shared between observer and executor
let mut shmem_provider = StdShMemProvider::new().unwrap();
let mut shmem = shmem_provider.new_shmem(MAP_SIZE).unwrap();
//let the forkserver know the shmid
shmem.write_to_env("__AFL_SHM_ID").unwrap();
let shmem_map = shmem.as_mut_slice();
// Create an observation channel using the signals map
let edges_observer = HitcountsMapObserver::new(ConstMapObserver::<_, MAP_SIZE>::new(
"shared_mem",
shmem_map,
));
let bt_observer = ASANBacktraceObserver::new("ASANBacktraceObserver");
// The state of the edges feedback.
let feedback_state = MapFeedbackState::with_observer(&edges_observer);
let bt_state = NewHashFeedbackState::<u64>::with_observer(&bt_observer);
// Feedback to rate the interestingness of an input
// This one is composed by two Feedbacks in OR
let feedback = MaxMapFeedback::new_tracking(&feedback_state, &edges_observer, true, false);
// A feedback to choose if an input is a solution or not
// We want to do the same crash deduplication that AFL does
let objective = feedback_and!(
CrashFeedback::new(),
NewHashFeedback::new_with_observer("NewHashFeedback", &bt_observer)
);
// create a State from scratch
let mut state = StdState::new(
// RNG
StdRand::with_seed(current_nanos()),
// Corpus that will be evolved, we keep it in memory for performance
InMemoryCorpus::<BytesInput>::new(),
// Corpus in which we store solutions (crashes in this example),
// on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
// States of the feedbacks.
// They are the data related to the feedbacks that you want to persist in the State.
tuple_list!(feedback_state, bt_state),
);
// The Monitor trait define how the fuzzer stats are reported to the user
let monitor = SimpleMonitor::new(|s| println!("{}", s));
// The event manager handle the various events generated during the fuzzing loop
// such as the notification of the addition of a new item to the corpus
let mut mgr = SimpleEventManager::new(monitor);
// A minimization+queue policy to get testcasess from the corpus
let scheduler = QueueCorpusScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
// Create the executor for the forkserver
let mut executor = ForkserverExecutor::with_shmem_inputs(
"./target/release/program".to_string(),
&[],
tuple_list!(edges_observer, bt_observer),
true,
&mut shmem_provider,
)
.expect("Failed to create the executor.");
// Generator of printable bytearrays of max size 32
let mut generator = RandPrintablesGenerator::new(3);
// Generate 8 initial inputs
state
.generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8)
.expect("Failed to generate the initial corpus");
// Setup a mutational stage with a basic bytes mutator
let mutator = StdScheduledMutator::new(havoc_mutations());
let mut stages = tuple_list!(StdMutationalStage::new(mutator));
fuzzer
.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)
.expect("Error in the fuzzing loop");
}

View File

@ -0,0 +1,44 @@
#include <stdio.h>
#include <stdlib.h>
// The following line is needed for shared memeory testcase fuzzing
__AFL_FUZZ_INIT();
int main(int argc, char **argv) {
FILE *file = stdin;
if (argc > 1) {
file = fopen(argv[1], "rb");
}
// The following three lines are for normal fuzzing.
/*
char buf[16];
char* p = fgets(buf, 16, file);
buf[15] = 0;
*/
// The following line is also needed for shared memory testcase fuzzing
unsigned char *buf = __AFL_FUZZ_TESTCASE_BUF;
printf("input: %s\n", buf);
if (buf[0] == 'a') {
if (buf[1] == 'b') {
if (buf[2] == 'a') {
fprintf(stdout, "abort1");
abort();
}
if (buf[2] == 'b') {
fprintf(stdout, "abort2");
abort();
}
if (buf[2] == 'c') {
fprintf(stdout, "abort3");
abort();
}
}
}
return 0;
}

View File

@ -0,0 +1,2 @@
[build]
rustflags = ["-Cforce-unwind-tables=y"]

View File

@ -0,0 +1,2 @@
libpng-*
rundumps/*

View File

@ -0,0 +1,21 @@
[package]
name = "rust_code_with_fork_executor"
version = "0.0.1"
edition = "2021"
[features]
default = ["std"]
std = []
[profile.dev]
panic = "abort"
[profile.release]
panic = "abort"
lto = true
codegen-units = 1
opt-level = 3
debug = true
[dependencies]
libafl = { path = "../../../libafl/" }

View File

@ -0,0 +1,140 @@
use std::path::PathBuf;
#[cfg(windows)]
use std::ptr::write_volatile;
use libafl::{
bolts::{
current_nanos,
rands::StdRand,
shmem::{unix_shmem, ShMemProvider},
tuples::tuple_list,
AsMutSlice, AsSlice,
},
corpus::{InMemoryCorpus, OnDiskCorpus, QueueCorpusScheduler},
events::SimpleEventManager,
executors::{ExitKind, InProcessForkExecutor},
feedback_and,
feedbacks::{
CrashFeedback, MapFeedbackState, MaxMapFeedback, NewHashFeedback, NewHashFeedbackState,
},
fuzzer::{Fuzzer, StdFuzzer},
generators::RandPrintablesGenerator,
inputs::{BytesInput, HasTargetBytes},
monitors::SimpleMonitor,
mutators::scheduled::{havoc_mutations, StdScheduledMutator},
observers::{BacktraceObserver, StdMapObserver},
stages::mutational::StdMutationalStage,
state::StdState,
};
#[allow(clippy::similar_names)]
pub fn main() {
let mut shmem_provider = unix_shmem::UnixShMemProvider::new().unwrap();
let mut signals = shmem_provider.new_shmem(16).unwrap();
let mut signals_clone = signals.clone();
let mut signals_set = |idx: usize| {
let a = signals.as_mut_slice();
a[idx] = 1;
};
// The closure that we want to fuzz
let mut harness = |input: &BytesInput| {
let target = input.target_bytes();
let buf = target.as_slice();
signals_set(0);
if !buf.is_empty() && buf[0] == b'a' {
signals_set(1);
if buf.len() > 1 && buf[1] == b'b' {
signals_set(2);
if buf.len() > 2 && buf[2] == b'c' {
#[cfg(unix)]
panic!("panic 1");
}
if buf.len() > 2 && buf[2] == b'd' {
#[cfg(unix)]
panic!("panic 2");
}
if buf.len() > 2 && buf[2] == b'e' {
#[cfg(unix)]
panic!("panic 3");
}
}
}
ExitKind::Ok
};
// Create an observation channel using the signals map
let observer = StdMapObserver::new("signals", signals_clone.as_mut_slice());
// Create a stacktrace observer
let bt_observer =
BacktraceObserver::new("BacktraceObserver", libafl::observers::HarnessType::RUST);
// The state of the edges feedback.
let feedback_state = MapFeedbackState::with_observer(&observer);
let bt_feedback_state = NewHashFeedbackState::<u64>::with_observer(&bt_observer);
// Feedback to rate the interestingness of an input, obtained by ANDing the interestingness of both feedbacks
let feedback = MaxMapFeedback::new(&feedback_state, &observer);
// A feedback to choose if an input is a solution or not
let objective = feedback_and!(
CrashFeedback::new(),
NewHashFeedback::<BacktraceObserver>::new_with_observer("BacktraceObserver", &bt_observer)
);
// create a State from scratch
let mut state = StdState::new(
// RNG
StdRand::with_seed(current_nanos()),
// Corpus that will be evolved, we keep it in memory for performance
InMemoryCorpus::new(),
// Corpus in which we store solutions (crashes in this example),
// on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
// States of the feedbacks.
// They are the data related to the feedbacks that you want to persist in the State.
tuple_list!(feedback_state, bt_feedback_state),
);
// The Monitor trait define how the fuzzer stats are displayed to the user
let mon = SimpleMonitor::new(|s| println!("{}", s));
// The event manager handle the various events generated during the fuzzing loop
// such as the notification of the addition of a new item to the corpus
let mut mgr = SimpleEventManager::new(mon);
// A queue policy to get testcasess from the corpus
let scheduler = QueueCorpusScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
// Create the executor for an in-process function with just one observer
let mut executor = InProcessForkExecutor::new(
&mut harness,
tuple_list!(observer, bt_observer),
&mut fuzzer,
&mut state,
&mut mgr,
shmem_provider,
)
.expect("Failed to create the Executor");
// Generator of printable bytearrays of max size 32
let mut generator = RandPrintablesGenerator::new(32);
// Generate 8 initial inputs
state
.generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8)
.expect("Failed to generate the initial corpus");
// Setup a mutational stage with a basic bytes mutator
let mutator = StdScheduledMutator::new(havoc_mutations());
let mut stages = tuple_list!(StdMutationalStage::new(mutator));
fuzzer
.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)
.expect("Error in the fuzzing loop");
}

View File

@ -0,0 +1,2 @@
[build]
rustflags = ["-Cforce-unwind-tables=y"]

View File

@ -0,0 +1,2 @@
libpng-*
rundumps/*

View File

@ -0,0 +1,21 @@
[package]
name = "rust_code_with_inprocess_executor"
version = "0.0.1"
edition = "2021"
[features]
default = ["std"]
std = []
[profile.dev]
panic = "abort"
[profile.release]
panic = "abort"
lto = true
codegen-units = 1
opt-level = 3
debug = true
[dependencies]
libafl = { path = "../../../libafl/" }

View File

@ -0,0 +1,133 @@
use std::path::PathBuf;
#[cfg(windows)]
use std::ptr::write_volatile;
use libafl::{
bolts::{current_nanos, rands::StdRand, tuples::tuple_list, AsSlice},
corpus::{InMemoryCorpus, OnDiskCorpus, QueueCorpusScheduler},
events::SimpleEventManager,
executors::{inprocess::InProcessExecutor, ExitKind},
feedback_and,
feedbacks::{
CrashFeedback, MapFeedbackState, MaxMapFeedback, NewHashFeedback, NewHashFeedbackState,
},
fuzzer::{Fuzzer, StdFuzzer},
generators::RandPrintablesGenerator,
inputs::{BytesInput, HasTargetBytes},
monitors::SimpleMonitor,
mutators::scheduled::{havoc_mutations, StdScheduledMutator},
observers::{BacktraceObserver, StdMapObserver},
stages::mutational::StdMutationalStage,
state::StdState,
};
/// Coverage map with explicit assignments due to the lack of instrumentation
static mut SIGNALS: [u8; 16] = [0; 16];
/// Assign a signal to the signals map
fn signals_set(idx: usize) {
unsafe { SIGNALS[idx] = 1 };
}
#[allow(clippy::similar_names)]
pub fn main() {
// The closure that we want to fuzz
let mut harness = |input: &BytesInput| {
let target = input.target_bytes();
let buf = target.as_slice();
signals_set(0);
if !buf.is_empty() && buf[0] == b'a' {
signals_set(1);
if buf.len() > 1 && buf[1] == b'b' {
signals_set(2);
if buf.len() > 2 && buf[2] == b'c' {
// removed the windows panic for simplicity, will add later
#[cfg(unix)]
panic!("panic 1");
}
if buf.len() > 2 && buf[2] == b'd' {
#[cfg(unix)]
panic!("panic 2");
}
if buf.len() > 2 && buf[2] == b'e' {
#[cfg(unix)]
panic!("panic 3");
}
}
}
ExitKind::Ok
};
// Create an observation channel using the signals map
let observer = StdMapObserver::new("signals", unsafe { &mut SIGNALS });
// Create a stacktrace observer to add the observers tuple
let bt_observer =
BacktraceObserver::new("BacktraceObserver", libafl::observers::HarnessType::RUST);
// The state of the edges feedback.
let feedback_state = MapFeedbackState::with_observer(&observer);
let hash_state = NewHashFeedbackState::<u64>::with_observer(&bt_observer);
// Feedback to rate the interestingness of an input, obtained by ANDing the interestingness of both feedbacks
let feedback = MaxMapFeedback::new(&feedback_state, &observer);
// A feedback to choose if an input is a solution or not
let objective = feedback_and!(
CrashFeedback::new(),
NewHashFeedback::<BacktraceObserver>::new_with_observer("NewHashFeedback", &bt_observer)
);
// create a State from scratch
let mut state = StdState::new(
// RNG
StdRand::with_seed(current_nanos()),
// Corpus that will be evolved, we keep it in memory for performance
InMemoryCorpus::new(),
// Corpus in which we store solutions (crashes in this example),
// on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
// States of the feedbacks.
// They are the data related to the feedbacks that you want to persist in the State.
tuple_list!(feedback_state, hash_state),
);
// The Monitor trait define how the fuzzer stats are displayed to the user
let mon = SimpleMonitor::new(|s| println!("{}", s));
// The event manager handle the various events generated during the fuzzing loop
// such as the notification of the addition of a new item to the corpus
let mut mgr = SimpleEventManager::new(mon);
// A queue policy to get testcasess from the corpus
let scheduler = QueueCorpusScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
// Create the executor for an in-process function with just one observer
let mut executor = InProcessExecutor::new(
&mut harness,
tuple_list!(observer, bt_observer),
&mut fuzzer,
&mut state,
&mut mgr,
)
.expect("Failed to create the Executor");
// Generator of printable bytearrays of max size 32
let mut generator = RandPrintablesGenerator::new(32);
// Generate 8 initial inputs
state
.generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8)
.expect("Failed to generate the initial corpus");
// Setup a mutational stage with a basic bytes mutator
let mutator = StdScheduledMutator::new(havoc_mutations());
let mut stages = tuple_list!(StdMutationalStage::new(mutator));
fuzzer
.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)
.expect("Error in the fuzzing loop");
}

Binary file not shown.

View File

@ -6,6 +6,7 @@ use libafl::{
use lain::prelude::*;
use serde::{Deserialize, Serialize};
use std::hash::Hash;
#[derive(
Serialize,
@ -35,7 +36,7 @@ impl Fixup for PacketData {
}
#[derive(
Serialize, Deserialize, Debug, Copy, Clone, FuzzerObject, ToPrimitiveU32, BinarySerialize,
Serialize, Deserialize, Debug, Copy, Clone, FuzzerObject, ToPrimitiveU32, BinarySerialize, Hash,
)]
#[repr(u32)]
pub enum PacketType {
@ -70,3 +71,15 @@ impl HasLen for PacketData {
self.serialized_size()
}
}
impl Hash for PacketData {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
match self.typ {
UnsafeEnum::Invalid(a) => a.hash(state),
UnsafeEnum::Valid(a) => a.hash(state),
}
self.offset.hash(state);
self.length.hash(state);
self.data.hash(state);
}
}

View File

@ -12,7 +12,7 @@ edition = "2021"
[features]
default = ["std", "derive", "llmp_compression", "rand_trait", "fork"]
std = ["serde_json", "serde_json/std", "hostname", "core_affinity", "nix", "serde/std", "bincode", "wait-timeout", "regex", "build_id", "uuid", "tui_monitor"] # print, env, launcher ... support
std = ["serde_json", "serde_json/std", "hostname", "core_affinity", "nix", "serde/std", "bincode", "wait-timeout", "regex", "build_id", "uuid", "tui_monitor", "backtrace"] # print, env, launcher ... support
derive = ["libafl_derive"] # provide derive(SerdeAny) macro.
fork = [] # uses the fork() syscall to spawn children, instead of launching a new command, if supported by the OS (has no effect on Windows, no_std).
rand_trait = ["rand_core"] # If set, libafl's rand implementations will implement `rand::Rng`
@ -64,6 +64,7 @@ num_enum = { version = "0.5.4", default-features = false }
typed-builder = "0.9.1" # Implement the builder pattern at compiletime
ahash = { version = "0.7", default-features=false, features=["compile-time-rng"] } # The hash function already used in hashbrown
intervaltree = { version = "0.2.7", default-features = false, features = ["serde"] }
backtrace = {version = "0.3.62", optional = true} # Used to get the stacktrace in StacktraceObserver
serde_json = { version = "1.0", optional = true, default-features = false, features = ["alloc"] }
miniz_oxide = { version = "0.5", optional = true}
@ -92,7 +93,6 @@ libc = "0.2" # For (*nix) libc
uds = "0.2.3"
lock_api = "0.4.3"
regex = "1.4.5"
backtrace = "0.3"
[target.'cfg(windows)'.dependencies]
windows = { version = "0.29.0", features = ["std", "Win32_Foundation", "Win32_System_Threading", "Win32_System_Diagnostics_Debug", "Win32_System_Kernel", "Win32_System_Memory", "Win32_Security"] }

View File

@ -2197,7 +2197,7 @@ where
.to_string_lossy()
.into();
let broker_hello = TcpResponse::BrokerConnectHello {
broker_shmem_description: broker_shmem_description,
broker_shmem_description,
hostname,
};

View File

@ -7,6 +7,7 @@ use core::{
#[cfg(feature = "std")]
use std::process::Child;
use crate::observers::ASANBacktraceObserver;
#[cfg(feature = "std")]
use crate::{executors::HasObservers, inputs::Input, observers::ObserversTuple, Error};
@ -15,7 +16,6 @@ use crate::executors::{Executor, ExitKind};
#[cfg(all(feature = "std", unix))]
use std::time::Duration;
/// A `CommandExecutor` is a wrapper around [`std::process::Command`] to execute a target as a child process.
/// Construct a `CommandExecutor` by implementing [`CommandConfigurator`] for a type of your choice and calling [`CommandConfigurator::into_executor`] on it.
pub struct CommandExecutor<EM, I, OT: Debug, S, T: Debug, Z> {
@ -61,7 +61,7 @@ where
let mut child = self.inner.spawn_child(_fuzzer, _state, _mgr, input)?;
match child
let res = match child
.wait_timeout(Duration::from_secs(5))
.expect("waiting on child failed")
.map(|status| status.signal())
@ -78,7 +78,17 @@ where
drop(child.wait());
Ok(ExitKind::Timeout)
}
}
};
let stderr = child.stderr.as_mut().unwrap();
if let Some(obs) = self
.observers
.match_name_mut::<ASANBacktraceObserver>("ASANBacktraceObserver")
{
obs.parse_asan_output_from_childstderr(stderr);
};
res
}
}

View File

@ -23,7 +23,7 @@ use crate::{
},
executors::{Executor, ExitKind, HasObservers},
inputs::{HasTargetBytes, Input},
observers::ObserversTuple,
observers::{get_asan_runtime_flags_with_log_path, ASANBacktraceObserver, ObserversTuple},
Error,
};
@ -232,6 +232,7 @@ impl Forkserver {
.stdout(stdout)
.stderr(stderr)
.env("LD_BIND_LAZY", "1")
.env("ASAN_OPTIONS", get_asan_runtime_flags_with_log_path())
.setlimit(memlimit)
.setsid()
.setstdin(out_filefd, use_stdin)
@ -243,12 +244,12 @@ impl Forkserver {
)
.spawn()
{
Ok(_) => {}
Ok(_) => (),
Err(err) => {
return Err(Error::Forkserver(format!(
"Could not spawn the forkserver: {:#?}",
err
)));
)))
}
};
@ -304,7 +305,6 @@ impl Forkserver {
let rlen = self.st_pipe.read(&mut buf)?;
let val: i32 = i32::from_ne_bytes(buf);
Ok((rlen, val))
}
@ -745,6 +745,12 @@ where
if libc::WIFSIGNALED(self.forkserver.status()) {
exit_kind = ExitKind::Crash;
if let Some(obs) = self
.observers_mut()
.match_name_mut::<ASANBacktraceObserver>("ASANBacktraceObserver")
{
obs.parse_asan_output_from_asan_log_file(pid);
}
}
self.forkserver.set_child_pid(Pid::from_raw(0));

View File

@ -16,23 +16,36 @@ use core::{
sync::atomic::{compiler_fence, Ordering},
};
#[cfg(feature = "std")]
use std::intrinsics::transmute;
#[cfg(all(feature = "std", unix))]
use libc::{siginfo_t, ucontext_t};
#[cfg(all(feature = "std", unix))]
use nix::{
sys::wait::{waitpid, WaitStatus},
unistd::{fork, ForkResult},
};
#[cfg(all(feature = "std", unix))]
use crate::bolts::shmem::ShMemProvider;
#[cfg(all(feature = "std", windows))]
use windows::Win32::System::Diagnostics::Debug::EXCEPTION_POINTERS;
#[cfg(unix)]
use crate::bolts::os::unix_signals::setup_signal_handler;
#[cfg(all(windows, feature = "std"))]
use crate::bolts::os::windows_exceptions::setup_exception_handler;
#[cfg(all(feature = "std", unix))]
use crate::bolts::shmem::ShMemProvider;
#[cfg(feature = "std")]
use crate::observers::{BacktraceObserver, HarnessType};
#[cfg(windows)]
use windows::Win32::System::Threading::SetThreadStackGuarantee;
#[cfg(all(feature = "std", windows))]
use crate::bolts::os::windows_exceptions::{ExceptionCode, Handler, CRASH_EXCEPTIONS};
use crate::{
events::{EventFirer, EventRestarter},
executors::{Executor, ExitKind, HasObservers},
@ -44,6 +57,13 @@ use crate::{
Error,
};
#[cfg(all(feature = "std", unix))]
use crate::bolts::os::unix_signals::{Handler, Signal};
#[cfg(feature = "std")]
use crate::executors::inprocess::bt_signal_handlers::{
setup_bt_panic_hook, setup_child_panic_hook,
};
/// The inmem executor simply calls a target function, then returns afterwards.
#[allow(dead_code)]
pub struct InProcessExecutor<'a, H, I, OT, S>
@ -81,7 +101,6 @@ where
I: Input,
OT: ObserversTuple<I, S>,
{
#[inline]
fn run_target(
&mut self,
fuzzer: &mut Z,
@ -91,7 +110,9 @@ where
) -> Result<ExitKind, Error> {
self.handlers
.pre_run_target(self, fuzzer, state, mgr, input);
let ret = (self.harness_fn)(input);
self.handlers.post_run_target();
Ok(ret)
}
@ -139,7 +160,22 @@ where
S: HasSolutions<I> + HasClientPerfMonitor,
Z: HasObjective<I, OF, S>,
{
let handlers = InProcessHandlers::new::<Self, EM, I, OF, OT, S, Z>()?;
#[cfg(feature = "std")]
BacktraceObserver::setup_static_variable();
#[cfg(feature = "std")]
if let Some(obs) = observers.match_name::<BacktraceObserver>("BacktraceObserver") {
if obs.harness_type() == &HarnessType::RUST {
setup_bt_panic_hook::<
InProcessExecutor<H, I, OT, S>,
I,
OT,
S,
InProcessExecutorHandlerData,
>(unsafe { &GLOBAL_STATE });
}
}
let handlers = InProcessHandlers::new::<Self, EM, I, OF, OT, S, Z, H>()?;
#[cfg(windows)]
unsafe {
/*
@ -265,7 +301,7 @@ impl InProcessHandlers {
}
/// Create new [`InProcessHandlers`].
pub fn new<E, EM, I, OF, OT, S, Z>() -> Result<Self, Error>
pub fn new<E, EM, I, OF, OT, S, Z, H>() -> Result<Self, Error>
where
I: Input,
E: HasObservers<I, OT, S>,
@ -274,16 +310,16 @@ impl InProcessHandlers {
OF: Feedback<I, S>,
S: HasSolutions<I> + HasClientPerfMonitor,
Z: HasObjective<I, OF, S>,
H: FnMut(&I) -> ExitKind,
{
#[cfg(unix)]
unsafe {
let data = &mut GLOBAL_STATE;
setup_signal_handler(data)?;
compiler_fence(Ordering::SeqCst);
Ok(Self {
crash_handler: unix_signal_handler::inproc_crash_handler::<E, EM, I, OF, OT, S, Z>
as *const _,
as *const c_void,
timeout_handler: unix_signal_handler::inproc_timeout_handler::<E, EM, I, OF, OT, S, Z>
as *const _,
})
@ -331,7 +367,23 @@ impl InProcessHandlers {
}
}
}
/// trait implemented by the static variables handling the global data
pub trait HasHandlerData: 'static + Sync {
/// get executor
fn executor<E>(&self) -> &E;
/// get mutable executor
#[allow(clippy::mut_from_ref)]
fn executor_mut<E>(&self) -> &mut E;
/// get state
fn state<S>(&self) -> &S;
/// get mutable state
#[allow(clippy::mut_from_ref)]
fn state_mut<S>(&self) -> &mut S;
/// get current input
fn current_input<I>(&self) -> &I;
/// Check if the pointers in the handler are valid
fn is_valid(&self) -> bool;
}
/// The global state of the in-process harness.
#[derive(Debug)]
#[allow(missing_docs)]
@ -356,6 +408,37 @@ pub struct InProcessExecutorHandlerData {
unsafe impl Send for InProcessExecutorHandlerData {}
unsafe impl Sync for InProcessExecutorHandlerData {}
impl HasHandlerData for InProcessExecutorHandlerData {
fn executor<E>(&self) -> &E {
unsafe { (self.executor_ptr as *const E).as_ref().unwrap() }
}
fn executor_mut<E>(&self) -> &mut E {
unsafe { (self.executor_ptr as *mut E).as_mut().unwrap() }
}
fn state<S>(&self) -> &S {
unsafe { (self.state_ptr as *const S).as_ref().unwrap() }
}
fn state_mut<S>(&self) -> &mut S {
unsafe { (self.state_ptr as *mut S).as_mut().unwrap() }
}
fn current_input<I>(&self) -> &I {
unsafe { (self.current_input_ptr as *const I).as_ref().unwrap() }
}
#[cfg(windows)]
fn is_valid(&self) -> bool {
self.in_target == 1
}
#[cfg(not(windows))]
fn is_valid(&self) -> bool {
!self.current_input_ptr.is_null()
}
}
/// Exception handling needs some nasty unsafe.
pub static mut GLOBAL_STATE: InProcessExecutorHandlerData = InProcessExecutorHandlerData {
/// The state ptr for signal handling
@ -406,6 +489,130 @@ pub fn inprocess_get_executor<'a, E>() -> Option<&'a mut E> {
unsafe { (GLOBAL_STATE.executor_ptr as *mut E).as_mut() }
}
/// signal handlers and `panic_hooks` for used BT collection
#[cfg(feature = "std")]
pub mod bt_signal_handlers {
use std::panic;
#[cfg(all(unix))]
use libc::{siginfo_t, ucontext_t};
#[cfg(all(unix))]
use crate::bolts::os::unix_signals::Signal;
#[cfg(all(windows))]
use windows::Win32::System::Diagnostics::Debug::EXCEPTION_POINTERS;
#[cfg(all(windows))]
use super::InProcessExecutorHandlerData;
use crate::{
executors::{ExitKind, HasObservers},
inputs::Input,
observers::ObserversTuple,
state::{HasClientPerfMonitor, HasSolutions},
};
use super::HasHandlerData;
/// invokes the `post_exec` hook on all observer in case of panic
pub fn setup_bt_panic_hook<E, I, OT, S, D>(data: &'static D)
where
E: HasObservers<I, OT, S>,
OT: ObserversTuple<I, S>,
I: Input,
D: HasHandlerData,
{
let old_hook = panic::take_hook();
panic::set_hook(Box::new(move |panic_info| {
if data.is_valid() {
let executor = data.executor_mut::<E>();
let observers = executor.observers_mut();
let state = data.state_mut::<S>();
let input = data.current_input::<I>();
observers
.post_exec_all(state, input, &ExitKind::Crash)
.expect("Failed to run post_exec on observers");
}
old_hook(panic_info);
}));
}
/// invokes the `post_exec_child` hook on all observer in case the child process panics
pub fn setup_child_panic_hook<E, I, OT, S, D>(data: &'static D)
where
E: HasObservers<I, OT, S>,
OT: ObserversTuple<I, S>,
I: Input,
D: HasHandlerData,
{
let old_hook = panic::take_hook();
panic::set_hook(Box::new(move |panic_info| {
if data.is_valid() {
let executor = data.executor_mut::<E>();
let observers = executor.observers_mut();
let state = data.state_mut::<S>();
let input = data.current_input::<I>();
observers
.post_exec_child_all(state, input, &ExitKind::Crash)
.expect("Failed to run post_exec on observers");
}
old_hook(panic_info);
}));
}
/// invokes the `post_exec` hook on all observer in case the child process crashes
#[cfg(unix)]
pub fn child_crash_handler<E, I, OT, S, D>(
_signal: Signal,
_info: siginfo_t,
_context: &mut ucontext_t,
data: &D,
) where
E: HasObservers<I, OT, S>,
OT: ObserversTuple<I, S>,
S: HasSolutions<I> + HasClientPerfMonitor,
I: Input,
D: HasHandlerData,
{
if data.is_valid() {
// redundant check, but better to be safe
let executor = data.executor_mut::<E>();
let observers = executor.observers_mut();
let state = data.state_mut::<S>();
let input = data.current_input::<I>();
observers
.post_exec_child_all(state, input, &ExitKind::Crash)
.expect("Failed to run post_exec on observers");
}
}
/// invokes the `post_exec` hook on all observer in case the child process crashes
#[cfg(windows)]
pub fn child_crash_handler<E, I, OT, S, D>(
_exception_pointers: *mut EXCEPTION_POINTERS,
data: &mut InProcessExecutorHandlerData,
) where
E: HasObservers<I, OT, S>,
OT: ObserversTuple<I, S>,
S: HasSolutions<I> + HasClientPerfMonitor,
I: Input,
D: HasHandlerData,
{
if data.is_valid() {
// redundant check, but better to be safe
let executor = data.executor_mut::<E>();
let observers = executor.observers_mut();
let state = data.state_mut::<S>();
let input = data.current_input::<I>();
observers
.post_exec_child_all(state, input, &ExitKind::Crash)
.expect("Failed to run post_exec on observers");
}
}
}
#[cfg(unix)]
mod unix_signal_handler {
use alloc::vec::Vec;
@ -497,8 +704,8 @@ mod unix_signal_handler {
let state = (data.state_ptr as *mut S).as_mut().unwrap();
let event_mgr = (data.event_mgr_ptr as *mut EM).as_mut().unwrap();
let fuzzer = (data.fuzzer_ptr as *mut Z).as_mut().unwrap();
let executor = (data.executor_ptr as *const E).as_ref().unwrap();
let observers = executor.observers();
let executor = (data.executor_ptr as *mut E).as_mut().unwrap();
let observers = executor.observers_mut();
if data.current_input_ptr.is_null() {
#[cfg(feature = "std")]
@ -514,6 +721,10 @@ mod unix_signal_handler {
let input = (data.current_input_ptr as *const I).as_ref().unwrap();
data.current_input_ptr = ptr::null();
observers
.post_exec_all(state, input, &ExitKind::Timeout)
.expect("Observers post_exec_all failed");
let interesting = fuzzer
.objective_mut()
.is_interesting(state, event_mgr, input, observers, &ExitKind::Timeout)
@ -616,12 +827,18 @@ mod unix_signal_handler {
let state = (data.state_ptr as *mut S).as_mut().unwrap();
let event_mgr = (data.event_mgr_ptr as *mut EM).as_mut().unwrap();
let fuzzer = (data.fuzzer_ptr as *mut Z).as_mut().unwrap();
let executor = (data.executor_ptr as *const E).as_ref().unwrap();
let observers = executor.observers();
let executor = (data.executor_ptr as *mut E).as_mut().unwrap();
let observers = executor.observers_mut();
let input = (data.current_input_ptr as *const I).as_ref().unwrap();
data.current_input_ptr = ptr::null();
#[cfg(feature = "std")]
eprintln!("Triggering post_exec_all from crash_handler");
observers
.post_exec_all(state, input, &ExitKind::Crash)
.expect("Observers post_exec_all failed");
#[cfg(feature = "std")]
eprintln!("Child crashed!");
@ -981,6 +1198,193 @@ where
}
}
/// The signature of the crash handler function
#[cfg(all(feature = "std", unix))]
pub type ForkHandlerFuncPtr =
unsafe fn(Signal, siginfo_t, &mut ucontext_t, data: &mut InProcessForkExecutorGlobalData);
/// The signature of the crash handler function
#[cfg(all(feature = "std", windows))]
pub type ForkHandlerFuncPtr =
unsafe fn(*mut EXCEPTION_POINTERS, &mut InProcessForkExecutorGlobalData);
/// The inmem executor's handlers.
#[derive(Debug)]
pub struct InChildProcessHandlers {
/// On crash C function pointer
pub crash_handler: *const c_void,
}
#[cfg(feature = "std")]
impl InChildProcessHandlers {
/// Call before running a target.
pub fn pre_run_target<E, EM, I, S, Z>(
&self,
_executor: &E,
_fuzzer: &mut Z,
_state: &mut S,
_mgr: &mut EM,
_input: &I,
) {
#[cfg(any(unix, windows))]
unsafe {
let data = &mut FORK_EXECUTOR_GLOBAL_DATA;
write_volatile(
&mut data.executor_ptr,
_executor as *const _ as *const c_void,
);
write_volatile(
&mut data.current_input_ptr,
_input as *const _ as *const c_void,
);
write_volatile(&mut data.state_ptr, _state as *mut _ as *mut c_void);
data.crash_handler = self.crash_handler;
compiler_fence(Ordering::SeqCst);
}
}
/// Create new [`InChildProcessHandlers`].
pub fn new<E, EM, I, OF, OT, S, Z>() -> Result<Self, Error>
where
I: Input,
E: HasObservers<I, OT, S>,
OT: ObserversTuple<I, S>,
EM: EventFirer<I> + EventRestarter<S>,
OF: Feedback<I, S>,
S: HasSolutions<I> + HasClientPerfMonitor,
Z: HasObjective<I, OF, S>,
{
#[cfg(any(unix, windows))]
// unsafe
{
// let data = &mut FORK_EXECUTOR_GLOBAL_DATA;
compiler_fence(Ordering::SeqCst);
Ok(Self {
crash_handler: bt_signal_handlers::child_crash_handler::<
E,
I,
OT,
S,
InProcessForkExecutorGlobalData,
> as *const c_void,
})
}
#[cfg(not(any(unix, all(windows, feature = "std"))))]
Ok(Self {
crash_handler: ptr::null(),
})
}
/// Replace the handlers with `nop` handlers, deactivating the handlers
#[must_use]
pub fn nop() -> Self {
Self {
crash_handler: ptr::null(),
}
}
}
/// The global state of the in-process-fork harness.
#[derive(Debug)]
pub struct InProcessForkExecutorGlobalData {
/// Stores a pointer to the fork executor struct
pub executor_ptr: *const c_void,
/// Stores a pointer to the state
pub state_ptr: *const c_void,
/// Stores a pointer to the current input
pub current_input_ptr: *const c_void,
/// Stores a pointer to the crash_handler function
pub crash_handler: *const c_void,
}
unsafe impl Sync for InProcessForkExecutorGlobalData {}
unsafe impl Send for InProcessForkExecutorGlobalData {}
impl HasHandlerData for InProcessForkExecutorGlobalData {
fn executor<E>(&self) -> &E {
unsafe { (self.executor_ptr as *const E).as_ref().unwrap() }
}
fn executor_mut<E>(&self) -> &mut E {
unsafe { (self.executor_ptr as *mut E).as_mut().unwrap() }
}
fn state<S>(&self) -> &S {
unsafe { (self.state_ptr as *const S).as_ref().unwrap() }
}
fn state_mut<S>(&self) -> &mut S {
unsafe { (self.state_ptr as *mut S).as_mut().unwrap() }
}
fn current_input<I>(&self) -> &I {
unsafe { (self.current_input_ptr as *const I).as_ref().unwrap() }
}
fn is_valid(&self) -> bool {
!self.current_input_ptr.is_null()
}
}
/// a static variable storing the global state
pub static mut FORK_EXECUTOR_GLOBAL_DATA: InProcessForkExecutorGlobalData =
InProcessForkExecutorGlobalData {
executor_ptr: ptr::null(),
crash_handler: ptr::null(),
state_ptr: ptr::null(),
current_input_ptr: ptr::null(),
};
#[cfg(feature = "std")]
impl Handler for InProcessForkExecutorGlobalData {
#[cfg(unix)]
fn handle(&mut self, signal: Signal, info: siginfo_t, context: &mut ucontext_t) {
match signal {
Signal::SigUser2 | Signal::SigAlarm => (),
_ => unsafe {
if !FORK_EXECUTOR_GLOBAL_DATA.crash_handler.is_null() {
let func: ForkHandlerFuncPtr =
transmute(FORK_EXECUTOR_GLOBAL_DATA.crash_handler);
(func)(signal, info, context, &mut FORK_EXECUTOR_GLOBAL_DATA);
}
},
}
}
#[cfg(unix)]
fn signals(&self) -> Vec<Signal> {
vec![
Signal::SigAlarm,
Signal::SigUser2,
Signal::SigAbort,
Signal::SigBus,
Signal::SigPipe,
Signal::SigFloatingPointException,
Signal::SigIllegalInstruction,
Signal::SigSegmentationFault,
Signal::SigTrap,
]
}
#[cfg(windows)]
#[allow(clippy::not_unsafe_ptr_arg_deref)]
fn handle(&mut self, _code: ExceptionCode, exception_pointers: *mut EXCEPTION_POINTERS) {
unsafe {
let data = &mut FORK_EXECUTOR_GLOBAL_DATA;
if !data.crash_handler.is_null() {
let func: ForkHandlerFuncPtr = transmute(data.crash_handler);
(func)(exception_pointers, data);
}
}
}
#[cfg(windows)]
fn exceptions(&self) -> Vec<ExceptionCode> {
CRASH_EXCEPTIONS.to_vec()
}
}
/// [`InProcessForkExecutor`] is an executor that forks the current process before each execution.
#[cfg(all(feature = "std", unix))]
pub struct InProcessForkExecutor<'a, H, I, OT, S, SP>
@ -993,6 +1397,7 @@ where
harness_fn: &'a mut H,
shmem_provider: SP,
observers: OT,
handlers: InChildProcessHandlers,
phantom: PhantomData<(I, S)>,
}
@ -1025,9 +1430,9 @@ where
#[inline]
fn run_target(
&mut self,
_fuzzer: &mut Z,
_state: &mut S,
_mgr: &mut EM,
fuzzer: &mut Z,
state: &mut S,
mgr: &mut EM,
input: &I,
) -> Result<ExitKind, Error> {
unsafe {
@ -1037,6 +1442,29 @@ where
// Child
self.shmem_provider.post_fork(true)?;
self.handlers
.pre_run_target(self, fuzzer, state, mgr, input);
match self
.observers()
.match_name::<BacktraceObserver>("BacktraceObserver")
.unwrap()
.harness_type()
{
crate::observers::HarnessType::FFI => {
setup_signal_handler(&mut FORK_EXECUTOR_GLOBAL_DATA)?;
}
crate::observers::HarnessType::RUST => {
setup_child_panic_hook::<
InProcessForkExecutor<H, I, OT, S, SP>,
I,
OT,
S,
InProcessForkExecutorGlobalData,
>(&FORK_EXECUTOR_GLOBAL_DATA);
}
}
(self.harness_fn)(input);
std::process::exit(0);
@ -1045,9 +1473,13 @@ where
}
Ok(ForkResult::Parent { child }) => {
// Parent
println!("from parent {} child is {}", std::process::id(), child);
self.shmem_provider.post_fork(false)?;
self.handlers
.pre_run_target(self, fuzzer, state, mgr, input);
let res = waitpid(child, None)?;
match res {
WaitStatus::Signaled(_, _, _) => Ok(ExitKind::Crash),
_ => Ok(ExitKind::Ok),
@ -1082,10 +1514,20 @@ where
S: HasSolutions<I> + HasClientPerfMonitor,
Z: HasObjective<I, OF, S>,
{
// should match on type when it's available
let handlers = match observers.match_name::<BacktraceObserver>("BacktraceObserver") {
Some(_) => {
BacktraceObserver::setup_shmem();
InChildProcessHandlers::new::<Self, EM, I, OF, OT, S, Z>()?
}
None => InChildProcessHandlers::nop(),
};
Ok(Self {
harness_fn,
shmem_provider,
observers,
handlers,
phantom: PhantomData,
})
}
@ -1156,6 +1598,8 @@ mod tests {
#[test]
#[cfg(all(feature = "std", feature = "fork", unix))]
fn test_inprocessfork_exec() {
use crate::executors::inprocess::InChildProcessHandlers;
let provider = StdShMemProvider::new().unwrap();
let mut harness = |_buf: &NopInput| ExitKind::Ok;
@ -1163,6 +1607,7 @@ mod tests {
harness_fn: &mut harness,
shmem_provider: provider,
observers: tuple_list!(),
handlers: InChildProcessHandlers::nop(),
phantom: PhantomData,
};
let input = NopInput {};

View File

@ -9,6 +9,13 @@ pub mod concolic;
#[cfg(feature = "std")]
pub use concolic::ConcolicFeedback;
#[cfg(feature = "std")]
pub mod new_hash_feedback;
#[cfg(feature = "std")]
pub use new_hash_feedback::NewHashFeedback;
#[cfg(feature = "std")]
pub use new_hash_feedback::NewHashFeedbackState;
#[cfg(feature = "nautilus")]
pub mod nautilus;
#[cfg(feature = "nautilus")]

View File

@ -0,0 +1,183 @@
//! The ``NewHashFeedback`` uses the backtrace hash and a hashset to only keep novel cases
use std::{fmt::Debug, hash::Hash, marker::PhantomData};
use hashbrown::HashSet;
use num_traits::PrimInt;
use serde::{Deserialize, Serialize};
use crate::{
bolts::tuples::{MatchName, Named},
events::EventFirer,
executors::ExitKind,
feedbacks::{Feedback, FeedbackState},
inputs::Input,
observers::{ObserverWithHashField, ObserversTuple},
state::{HasClientPerfMonitor, HasFeedbackStates},
Error,
};
/// A state that implements this trait has a hash set
pub trait HashSetState<T> {
/// creates a new instance with a specific hashset
fn with_hash_set(name: &'static str, hash_set: HashSet<T>) -> Self;
/// updates the `hash_set` with the given value
fn update_hash_set(&mut self, value: T) -> Result<bool, Error>;
}
/// The state of [`NewHashFeedback`]
#[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(bound = "T: serde::de::DeserializeOwned")]
pub struct NewHashFeedbackState<T>
where
T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Hash + Debug,
{
/// Contains information about untouched entries
pub hash_set: HashSet<T>,
/// Name identifier of this instance
pub name: String,
}
impl<T> FeedbackState for NewHashFeedbackState<T>
where
T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Hash + Debug,
{
fn reset(&mut self) -> Result<(), Error> {
self.hash_set.clear();
Ok(())
}
}
impl<T> Named for NewHashFeedbackState<T>
where
T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Hash + Debug,
{
#[inline]
fn name(&self) -> &str {
self.name.as_str()
}
}
impl<T> NewHashFeedbackState<T>
where
T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Hash + Debug,
{
/// Create a new [`NewHashFeedbackState`]
#[must_use]
pub fn new(name: &'static str) -> Self {
Self {
hash_set: HashSet::<T>::new(),
name: name.to_string(),
}
}
/// Create a new [`NewHashFeedbackState`] for an observer that implements [`ObserverWithHashField`]
pub fn with_observer(backtrace_observer: &(impl ObserverWithHashField + Named)) -> Self {
Self {
hash_set: HashSet::<T>::new(),
name: backtrace_observer.name().to_string(),
}
}
}
impl<T> HashSetState<T> for NewHashFeedbackState<T>
where
T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Hash + Debug,
{
/// Create new [`NewHashFeedbackState`] using a name and a hash set.
#[must_use]
fn with_hash_set(name: &'static str, hash_set: HashSet<T>) -> Self {
Self {
hash_set,
name: name.to_string(),
}
}
fn update_hash_set(&mut self, value: T) -> Result<bool, Error> {
let r = self.hash_set.insert(value);
println!("Got r={}, the hashset is {:?}", r, &self.hash_set);
Ok(r)
}
}
/// A [`NewHashFeedback`] maintains a hashset of already seen stacktraces and considers interesting unseen ones
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct NewHashFeedback<O> {
feedback_name: String,
observer_name: String,
o_type: PhantomData<O>,
}
impl<I, S, O> Feedback<I, S> for NewHashFeedback<O>
where
I: Input,
S: HasClientPerfMonitor + HasFeedbackStates,
O: ObserverWithHashField + Named + Debug,
{
fn is_interesting<EM, OT>(
&mut self,
_state: &mut S,
_manager: &mut EM,
_input: &I,
observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I>,
OT: ObserversTuple<I, S>,
{
let observer = observers
.match_name::<O>(&self.observer_name)
.expect("A NewHashFeedback needs a BacktraceObserver");
let backtrace_state = _state
.feedback_states_mut()
.match_name_mut::<NewHashFeedbackState<u64>>(&self.observer_name.to_string())
.unwrap();
match observer.hash() {
Some(hash) => {
let res = backtrace_state
.update_hash_set(*hash)
.expect("Failed to update the hash state");
Ok(res)
}
None => {
// We get here if the hash was not updated, i.e the first run or if no crash happens
Ok(false)
}
}
}
}
impl<O> Named for NewHashFeedback<O> {
#[inline]
fn name(&self) -> &str {
&self.feedback_name
}
}
impl<O> NewHashFeedback<O>
where
O: ObserverWithHashField + Named + Debug,
{
/// Returns a new [`NewHashFeedback`]. Carefull, it's recommended to use `new_with_observer`
/// Setting an observer name that doesn't exist would eventually trigger a panic.
#[must_use]
pub fn new(feedback_name: &str, observer_name: &str) -> Self {
Self {
feedback_name: feedback_name.to_string(),
observer_name: observer_name.to_string(),
o_type: PhantomData,
}
}
/// Returns a new [`NewHashFeedback`].
#[must_use]
pub fn new_with_observer(feedback_name: &str, observer: &O) -> Self {
Self {
feedback_name: feedback_name.to_string(),
observer_name: observer.name().to_string(),
o_type: PhantomData,
}
}
}

View File

@ -598,7 +598,9 @@ where
*state.executions_mut() += 1;
start_timer!(state);
executor.observers_mut().post_exec_all(state, input)?;
executor
.observers_mut()
.post_exec_all(state, input, &exit_kind)?;
mark_feature_time!(state, PerfFeature::PostExecObservers);
Ok(exit_kind)
@ -656,7 +658,9 @@ where
*state.executions_mut() += 1;
start_timer!(state);
executor.observers_mut().post_exec_all(state, input)?;
executor
.observers_mut()
.post_exec_all(state, input, &exit_kind)?;
mark_feature_time!(state, PerfFeature::PostExecObservers);
Ok(exit_kind)

View File

@ -17,7 +17,7 @@ use crate::{
};
/// A bytes input is the basic input
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq)]
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq, Hash)]
pub struct BytesInput {
/// The raw input bytes
bytes: Vec<u8>,

View File

@ -186,7 +186,7 @@ impl Tokenizer for NaiveTokenizer {
}
/// A codes input is the basic input
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq)]
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq, Hash)]
pub struct EncodedInput {
/// The input representation as list of codes
codes: Vec<u32>,

View File

@ -17,7 +17,7 @@ use crate::{
};
/// An item of the generalized input
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)]
pub enum GeneralizedItem {
/// Real bytes
Bytes(Vec<u8>),
@ -26,7 +26,7 @@ pub enum GeneralizedItem {
}
/// A bytes input with a generalized version mainly used for Grimoire
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq)]
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq, Hash)]
pub struct GeneralizedInput {
/// The raw input bytes
bytes: Vec<u8>,
@ -206,7 +206,6 @@ impl GeneralizedInput {
}
/// Load from a plain file of bytes
#[must_use]
#[cfg(feature = "std")]
pub fn from_bytes_file<P>(path: P) -> Result<Self, Error>
where

View File

@ -9,7 +9,7 @@ use serde::{Deserialize, Serialize};
use crate::{bolts::HasLen, inputs::Input, Error};
/// A terminal for gramatron grammar fuzzing
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq)]
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq, Hash)]
pub struct Terminal {
/// The state
pub state: usize,
@ -32,7 +32,7 @@ impl Terminal {
}
/// An input for gramatron grammar fuzzing
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq)]
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq, Hash)]
pub struct GramatronInput {
/// The input representation as list of terminals
terms: Vec<Terminal>,

View File

@ -24,15 +24,35 @@ use alloc::{
use core::{clone::Clone, fmt::Debug};
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use std::{fs::File, io::Read, path::Path};
use std::{fs::File, hash::Hash, io::Read, path::Path};
#[cfg(feature = "std")]
use crate::bolts::fs::write_file_atomic;
use crate::{bolts::ownedref::OwnedSlice, Error};
/// An input for the target
#[cfg(not(feature = "std"))]
pub trait Input: Clone + Serialize + serde::de::DeserializeOwned + Debug {
#[cfg(feature = "std")]
/// Write this input to the file
fn to_file<P>(&self, _path: P) -> Result<(), Error> {
Err(Error::NotImplemented("Not supported in no_std".into()))
}
/// Write this input to the file
fn from_file<P>(_path: P) -> Result<Self, Error> {
Err(Error::NotImplemented("Not supprted in no_std".into()))
}
/// Generate a name for this input
fn generate_name(&self, idx: usize) -> String;
/// An hook executed if the input is stored as `Testcase`
fn wrapped_as_testcase(&mut self) {}
}
/// An input for the target
#[cfg(feature = "std")]
pub trait Input: Clone + Serialize + serde::de::DeserializeOwned + Debug + Hash {
/// Write this input to the file
fn to_file<P>(&self, path: P) -> Result<(), Error>
where
@ -41,12 +61,6 @@ pub trait Input: Clone + Serialize + serde::de::DeserializeOwned + Debug {
write_file_atomic(path, &postcard::to_allocvec(self)?)
}
#[cfg(not(feature = "std"))]
/// Write this input to the file
fn to_file<P>(&self, _path: P) -> Result<(), Error> {
Err(Error::NotImplemented("Not supported in no_std".into()))
}
/// Load the contents of this input from a file
#[cfg(feature = "std")]
fn from_file<P>(path: P) -> Result<Self, Error>
@ -59,12 +73,6 @@ pub trait Input: Clone + Serialize + serde::de::DeserializeOwned + Debug {
Ok(postcard::from_bytes(&bytes)?)
}
/// Write this input to the file
#[cfg(not(feature = "std"))]
fn from_file<P>(_path: P) -> Result<Self, Error> {
Err(Error::NotImplemented("Not supprted in no_std".into()))
}
/// Generate a name for this input
fn generate_name(&self, idx: usize) -> String;
@ -73,7 +81,7 @@ pub trait Input: Clone + Serialize + serde::de::DeserializeOwned + Debug {
}
/// An input for tests, mainly. There is no real use much else.
#[derive(Copy, Clone, Serialize, Deserialize, Debug)]
#[derive(Copy, Clone, Serialize, Deserialize, Debug, Hash)]
pub struct NopInput {}
impl Input for NopInput {
fn generate_name(&self, _idx: usize) -> String {

View File

@ -12,9 +12,12 @@ use crate::{bolts::HasLen, generators::nautilus::NautilusContext, inputs::Input}
use grammartec::{
newtypes::NodeID,
rule::RuleIDOrCustom,
tree::{Tree, TreeLike},
};
use std::hash::{Hash, Hasher};
/// An [`Input`] implementation for `Nautilus` grammar.
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct NautilusInput {
@ -86,3 +89,19 @@ impl NautilusInput {
&mut self.tree
}
}
impl Hash for NautilusInput {
fn hash<H: Hasher>(&self, state: &mut H) {
self.tree().paren.hash(state);
for r in &self.tree().rules {
match r {
RuleIDOrCustom::Custom(a, b) => {
a.hash(state);
b.hash(state);
}
RuleIDOrCustom::Rule(a) => a.hash(state),
}
}
self.tree().sizes.hash(state);
}
}

View File

@ -21,6 +21,7 @@ use crate::{
tuples::Named,
AsMutSlice, AsSlice, HasLen,
},
executors::ExitKind,
observers::Observer,
Error,
};
@ -680,12 +681,12 @@ where
}
#[inline]
fn post_exec(&mut self, state: &mut S, input: &I) -> Result<(), Error> {
fn post_exec(&mut self, state: &mut S, input: &I, exit_kind: &ExitKind) -> Result<(), Error> {
let cnt = self.usable_count();
for i in 0..cnt {
*self.get_mut(i) = COUNT_CLASS_LOOKUP[*self.get(i) as usize];
}
self.base.post_exec(state, input)
self.base.post_exec(state, input, exit_kind)
}
}

View File

@ -6,6 +6,13 @@ pub use map::*;
pub mod cmp;
pub use cmp::*;
#[cfg(feature = "std")]
pub mod stacktrace;
#[cfg(feature = "std")]
pub use stacktrace::ASANBacktraceObserver;
#[cfg(feature = "std")]
pub use stacktrace::*;
pub mod concolic;
#[cfg(unstable_feature)]
@ -22,6 +29,7 @@ use crate::{
current_time,
tuples::{MatchName, Named},
},
executors::ExitKind,
Error,
};
@ -43,7 +51,29 @@ pub trait Observer<I, S>: Named + Debug {
/// Called right after execution finish.
#[inline]
fn post_exec(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> {
fn post_exec(
&mut self,
_state: &mut S,
_input: &I,
_exit_kind: &ExitKind,
) -> Result<(), Error> {
Ok(())
}
/// Called right before execution starts in the child process, if any.
#[inline]
fn pre_exec_child(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> {
Ok(())
}
/// Called right after execution finish in the child process, if any.
#[inline]
fn post_exec_child(
&mut self,
_state: &mut S,
_input: &I,
_exit_kind: &ExitKind,
) -> Result<(), Error> {
Ok(())
}
}
@ -54,7 +84,23 @@ pub trait ObserversTuple<I, S>: MatchName + Debug {
fn pre_exec_all(&mut self, state: &mut S, input: &I) -> Result<(), Error>;
/// This is called right after the last execution
fn post_exec_all(&mut self, state: &mut S, input: &I) -> Result<(), Error>;
fn post_exec_all(
&mut self,
state: &mut S,
input: &I,
exit_kind: &ExitKind,
) -> Result<(), Error>;
/// This is called right before the next execution in the child process, if any.
fn pre_exec_child_all(&mut self, state: &mut S, input: &I) -> Result<(), Error>;
/// This is called right after the last execution in the child process, if any.
fn post_exec_child_all(
&mut self,
state: &mut S,
input: &I,
exit_kind: &ExitKind,
) -> Result<(), Error>;
}
impl<I, S> ObserversTuple<I, S> for () {
@ -62,7 +108,25 @@ impl<I, S> ObserversTuple<I, S> for () {
Ok(())
}
fn post_exec_all(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> {
fn post_exec_all(
&mut self,
_state: &mut S,
_input: &I,
_exit_kind: &ExitKind,
) -> Result<(), Error> {
Ok(())
}
fn pre_exec_child_all(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> {
Ok(())
}
fn post_exec_child_all(
&mut self,
_state: &mut S,
_input: &I,
_exit_kind: &ExitKind,
) -> Result<(), Error> {
Ok(())
}
}
@ -77,12 +141,41 @@ where
self.1.pre_exec_all(state, input)
}
fn post_exec_all(&mut self, state: &mut S, input: &I) -> Result<(), Error> {
self.0.post_exec(state, input)?;
self.1.post_exec_all(state, input)
fn post_exec_all(
&mut self,
state: &mut S,
input: &I,
exit_kind: &ExitKind,
) -> Result<(), Error> {
self.0.post_exec(state, input, exit_kind)?;
self.1.post_exec_all(state, input, exit_kind)
}
fn pre_exec_child_all(&mut self, state: &mut S, input: &I) -> Result<(), Error> {
self.0.pre_exec_child(state, input)?;
self.1.pre_exec_child_all(state, input)
}
fn post_exec_child_all(
&mut self,
state: &mut S,
input: &I,
exit_kind: &ExitKind,
) -> Result<(), Error> {
self.0.post_exec_child(state, input, exit_kind)?;
self.1.post_exec_child_all(state, input, exit_kind)
}
}
/// A trait for obervers with a hash field
pub trait ObserverWithHashField {
/// get the value of the hash field
fn hash(&self) -> &Option<u64>;
/// update the hash field with the given value
fn update_hash(&mut self, hash: u64);
/// clears the current value of the hash and sets it to None
fn clear_hash(&mut self);
}
/// A simple observer, just overlooking the runtime of the target.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct TimeObserver {
@ -116,7 +209,12 @@ impl<I, S> Observer<I, S> for TimeObserver {
Ok(())
}
fn post_exec(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> {
fn post_exec(
&mut self,
_state: &mut S,
_input: &I,
_exit_kind: &ExitKind,
) -> Result<(), Error> {
self.last_runtime = current_time().checked_sub(self.start_time);
Ok(())
}

View File

@ -8,6 +8,7 @@ use crate::{
anymap::{pack_type_id, AsAny},
tuples::MatchName,
},
executors::ExitKind,
observers::{Observer, ObserversTuple},
Error,
};
@ -71,9 +72,29 @@ impl<I: 'static + Debug, S: 'static + Debug> ObserversTuple<I, S> for ObserversO
.for_each_mut(&mut |_, ob| ob.pre_exec(state, input))
}
fn post_exec_all(&mut self, state: &mut S, input: &I) -> Result<(), Error> {
fn post_exec_all(
&mut self,
state: &mut S,
input: &I,
exit_kind: &ExitKind,
) -> Result<(), Error> {
self.map
.for_each_mut(&mut |_, ob| ob.post_exec(state, input))
.for_each_mut(&mut |_, ob| ob.post_exec(state, input, exit_kind))
}
fn pre_exec_child_all(&mut self, state: &mut S, input: &I) -> Result<(), Error> {
self.map
.for_each_mut(&mut |_, ob| ob.pre_exec_child(state, input))
}
fn post_exec_child_all(
&mut self,
state: &mut S,
input: &I,
exit_kind: &ExitKind,
) -> Result<(), Error> {
self.map
.for_each_mut(&mut |_, ob| ob.post_exec_child(state, input, exit_kind))
}
}

View File

@ -0,0 +1,347 @@
//! the ``StacktraceObserver`` looks up the stacktrace on the execution thread and computes a hash for it for dedupe
use crate::{
bolts::{
shmem::{ShMemProvider, StdShMemProvider},
tuples::Named,
AsMutSlice, AsSlice,
},
executors::ExitKind,
inputs::Input,
observers::Observer,
Error,
};
use ahash::AHasher;
use backtrace::Backtrace;
use regex::Regex;
use serde::{Deserialize, Serialize};
use std::{
collections::hash_map::DefaultHasher,
fmt::Debug,
fs::{self, File},
hash::Hasher,
io::Read,
path::Path,
process::ChildStderr,
};
use super::ObserverWithHashField;
type StdShMem = <StdShMemProvider as ShMemProvider>::ShMem;
/// A struct that stores needed information to persist the backtrace across prcesses/runs
#[derive(Debug)]
pub enum BacktraceHashValueWrapper {
/// shared memory instance
Shmem(Box<StdShMem>),
/// static variable
StaticVariable((u64, u64)),
/// Neither is set
None,
}
impl BacktraceHashValueWrapper {
/// store a hash value in the [`BacktraceHashValueWrapper`]
fn store_stacktrace_hash(&mut self, bt_hash: u64, input_hash: u64) {
match self {
Self::Shmem(shmem) => {
let map = shmem.as_mut_slice();
let bt_hash_bytes = bt_hash.to_be_bytes();
let input_hash_bytes = input_hash.to_be_bytes();
map.copy_from_slice(&[bt_hash_bytes, input_hash_bytes].concat());
}
Self::StaticVariable(_) => {
*self = Self::StaticVariable((bt_hash, input_hash));
}
Self::None => panic!("BacktraceSharedMemoryWrapper is not set yet22!"),
}
}
/// get the hash value from the [`BacktraceHashValueWrapper`]
fn get_stacktrace_hash(&self) -> (u64, u64) {
match &self {
Self::Shmem(shmem) => {
let map = shmem.as_slice();
(
u64::from_be_bytes(map[0..8].try_into().expect("Incorrectly sized")),
u64::from_be_bytes(map[8..16].try_into().expect("Incorrectly sized")),
)
}
Self::StaticVariable(hash_tuple) => *hash_tuple,
Self::None => {
panic!("BacktraceSharedMemoryWrapper is not set yet!")
}
}
}
}
// Used for fuzzers not running in the same process
/// Static variable storing shared memory information
pub static mut BACKTRACE_HASH_VALUE: BacktraceHashValueWrapper = BacktraceHashValueWrapper::None;
/// Collects the backtrace via [`Backtrace`] and [`Debug`]
/// ([`Debug`] is currently used for dev purposes, symbols hash will be used eventually)
#[must_use]
pub fn collect_backtrace() -> u64 {
let b = Backtrace::new();
// will use symbols later
let trace = format!("{:?}", b);
eprintln!("{}", trace);
let mut hasher = AHasher::new_with_keys(0, 0);
hasher.write(trace.as_bytes());
let hash = hasher.finish();
println!(
"backtrace collected with hash={} at pid={}",
hash,
std::process::id()
);
hash
}
/// An enum encoding the types of harnesses
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
pub enum HarnessType {
/// Harness type when the harness is rust code
RUST,
/// Harness type when the harness is linked via FFI (e.g C code)
FFI,
}
/// An observer looking at the backtrace after the harness crashes
#[allow(clippy::unsafe_derive_deserialize)]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct BacktraceObserver {
observer_name: String,
harness_type: HarnessType,
hash: Option<u64>,
}
impl BacktraceObserver {
/// Creates a new [`BacktraceObserver`] with the given name.
#[must_use]
pub fn new(observer_name: &str, harness_type: HarnessType) -> Self {
Self {
observer_name: observer_name.to_string(),
harness_type,
hash: None,
}
}
/// Setup the shared memory and store it in [`BACKTRACE_HASH_VALUE`]
pub fn setup_shmem() {
let shmem_provider = StdShMemProvider::new();
let mut shmem = shmem_provider.unwrap().new_shmem(16).unwrap();
shmem.as_mut_slice().fill(0);
let boxed_shmem = Box::<StdShMem>::new(shmem);
unsafe {
BACKTRACE_HASH_VALUE = BacktraceHashValueWrapper::Shmem(boxed_shmem);
}
}
/// Init the [`BACKTRACE_HASH_VALUE`] to [`BacktraceHashValueWrapper::StaticVariable`] with `(0.0)`
pub fn setup_static_variable() {
unsafe {
BACKTRACE_HASH_VALUE = BacktraceHashValueWrapper::StaticVariable((0, 0));
}
}
/// returns `harness_type` for this [`BacktraceObserver`] instance
#[must_use]
pub fn harness_type(&self) -> &HarnessType {
&self.harness_type
}
}
impl ObserverWithHashField for BacktraceObserver {
/// Gets the hash value of this observer.
#[must_use]
fn hash(&self) -> &Option<u64> {
&self.hash
}
/// Updates the hash value of this observer.
fn update_hash(&mut self, hash: u64) {
self.hash = Some(hash);
}
/// Clears the current hash value
fn clear_hash(&mut self) {
self.hash = None;
}
}
impl Default for BacktraceObserver {
fn default() -> Self {
Self::new("BacktraceObserver", HarnessType::RUST)
}
}
impl<I, S> Observer<I, S> for BacktraceObserver
where
I: Input + Debug,
{
fn post_exec(&mut self, _state: &mut S, input: &I, exit_kind: &ExitKind) -> Result<(), Error> {
// run if this call resulted after a crash
if exit_kind == &ExitKind::Crash {
// hash input
let mut hasher = DefaultHasher::new();
input.hash(&mut hasher);
let input_hash = hasher.finish();
// get last backtrace hash and associated input hash
let (bt_hash, current_input_hash) =
unsafe { BACKTRACE_HASH_VALUE.get_stacktrace_hash() };
// replace if this is a new input
if current_input_hash != input_hash {
let bt_hash = collect_backtrace();
unsafe { BACKTRACE_HASH_VALUE.store_stacktrace_hash(bt_hash, input_hash) };
}
// update hash field in this observer
self.update_hash(bt_hash);
}
Ok(())
}
fn post_exec_child(
&mut self,
state: &mut S,
input: &I,
exit_kind: &ExitKind,
) -> Result<(), Error> {
self.post_exec(state, input, exit_kind)
}
}
impl Named for BacktraceObserver {
fn name(&self) -> &str {
&self.observer_name
}
}
/// static variable of ASAN log path
pub static ASAN_LOG_PATH: &str = "./asanlog";
/// returns the recommended ASAN runtime flags to capture the backtrace correctly with `log_path` set
#[must_use]
pub fn get_asan_runtime_flags_with_log_path() -> String {
let mut flags = get_asan_runtime_flags();
flags.push_str(":log_path=");
flags.push_str(ASAN_LOG_PATH);
flags
}
/// returns the recommended ASAN runtime flags to capture the backtrace correctly
#[must_use]
pub fn get_asan_runtime_flags() -> String {
let flags = vec![
"exitcode=0",
"abort_on_error=1",
"handle_abort=1",
"handle_segv=1",
"handle_sigbus=1",
"handle_sigill=1",
"handle_sigfpe=1",
];
flags.join(":")
}
/// An observer looking at the backtrace of target command using ASAN output
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ASANBacktraceObserver {
observer_name: String,
hash: Option<u64>,
}
impl ASANBacktraceObserver {
/// Creates a new [`BacktraceObserver`] with the given name.
#[must_use]
pub fn new(observer_name: &str) -> Self {
Self {
observer_name: observer_name.to_string(),
hash: None,
}
}
/// read ASAN output from the child stderr and parse it.
pub fn parse_asan_output_from_childstderr(&mut self, stderr: &mut ChildStderr) {
let mut buf = String::new();
stderr
.read_to_string(&mut buf)
.expect("Failed to read the child process stderr");
self.parse_asan_output(&buf);
}
/// read ASAN output from the log file and parse it.
pub fn parse_asan_output_from_asan_log_file(&mut self, pid: i32) {
let log_path = format!("{}.{}", ASAN_LOG_PATH, pid);
let mut asan_output = File::open(Path::new(&log_path))
.unwrap_or_else(|_| panic!("Can't find asan log at {}", &log_path));
let mut buf = String::new();
asan_output
.read_to_string(&mut buf)
.expect("Failed to read asan log");
fs::remove_file(&log_path).unwrap_or_else(|_| panic!("Failed to delete {}", &log_path));
self.parse_asan_output(&buf);
}
/// parse ASAN error output emited by the target command and compute the hash
pub fn parse_asan_output(&mut self, output: &str) {
let mut hasher = AHasher::new_with_keys(0, 0);
let matcher = Regex::new("\\s*#[0-9]*\\s0x[0-9a-f]*\\sin\\s(.*)").unwrap();
matcher.captures_iter(output).for_each(|m| {
let g = m.get(1).unwrap();
hasher.write(g.as_str().as_bytes());
});
let hash = hasher.finish();
self.update_hash(hash);
}
}
impl ObserverWithHashField for ASANBacktraceObserver {
/// Gets the hash value of this observer.
#[must_use]
fn hash(&self) -> &Option<u64> {
&self.hash
}
/// Updates the hash value of this observer.
fn update_hash(&mut self, hash: u64) {
self.hash = Some(hash);
}
/// Clears the current hash value
fn clear_hash(&mut self) {
self.hash = None;
}
}
impl Default for ASANBacktraceObserver {
fn default() -> Self {
Self::new("ASANBacktraceObserver")
}
}
impl<I, S> Observer<I, S> for ASANBacktraceObserver
where
I: Debug,
{
fn pre_exec(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> {
Ok(())
}
fn post_exec(
&mut self,
_state: &mut S,
_input: &I,
_exit_kind: &ExitKind,
) -> Result<(), Error> {
Ok(())
}
}
impl Named for ASANBacktraceObserver {
fn name(&self) -> &str {
&self.observer_name
}
}

View File

@ -388,13 +388,15 @@ where
mark_feature_time!(state, PerfFeature::PreExecObservers);
start_timer!(state);
let _ = executor.run_target(fuzzer, state, manager, input)?;
let exit_kind = executor.run_target(fuzzer, state, manager, input)?;
mark_feature_time!(state, PerfFeature::TargetExecution);
*state.executions_mut() += 1;
start_timer!(state);
executor.observers_mut().post_exec_all(state, input)?;
executor
.observers_mut()
.post_exec_all(state, input, &exit_kind)?;
mark_feature_time!(state, PerfFeature::PostExecObservers);
let cnt = executor

View File

@ -63,7 +63,7 @@ where
mark_feature_time!(state, PerfFeature::PreExecObservers);
start_timer!(state);
let _ = self
let exit_kind = self
.tracer_executor
.run_target(fuzzer, state, manager, &input)?;
mark_feature_time!(state, PerfFeature::TargetExecution);
@ -73,7 +73,7 @@ where
start_timer!(state);
self.tracer_executor
.observers_mut()
.post_exec_all(state, &input)?;
.post_exec_all(state, &input, &exit_kind)?;
mark_feature_time!(state, PerfFeature::PostExecObservers);
Ok(())
@ -143,7 +143,7 @@ where
mark_feature_time!(state, PerfFeature::PreExecObservers);
start_timer!(state);
let _ = executor.run_target(fuzzer, state, manager, &input)?;
let exit_kind = executor.run_target(fuzzer, state, manager, &input)?;
mark_feature_time!(state, PerfFeature::TargetExecution);
*state.executions_mut() += 1;
@ -151,8 +151,10 @@ where
start_timer!(state);
executor
.shadow_observers_mut()
.post_exec_all(state, &input)?;
executor.observers_mut().post_exec_all(state, &input)?;
.post_exec_all(state, &input, &exit_kind)?;
executor
.observers_mut()
.post_exec_all(state, &input, &exit_kind)?;
mark_feature_time!(state, PerfFeature::PostExecObservers);
Ok(())

View File

@ -32,7 +32,7 @@ impl CoverageRuntime {
#[must_use]
pub fn new() -> Self {
Self {
map: [0u8; MAP_SIZE],
map: [0_u8; MAP_SIZE],
previous_pc: 0,
current_log_impl: 0,
blob_maybe_log: None,

View File

@ -6,6 +6,7 @@ use core::fmt::{self, Debug, Formatter};
use libafl::{
bolts::{ownedref::OwnedRefMut, tuples::Named},
executors::ExitKind,
observers::{CmpMap, CmpObserver, CmpValues, Observer},
state::HasMetadata,
Error,
@ -209,7 +210,7 @@ where
Ok(())
}
fn post_exec(&mut self, state: &mut S, _input: &I) -> Result<(), Error> {
fn post_exec(&mut self, state: &mut S, _input: &I, _exit_kind: &ExitKind) -> Result<(), Error> {
unsafe {
CMPLOG_ENABLED = 0;
}

View File

@ -5,9 +5,10 @@ cd "$SCRIPT_DIR/.."
# TODO: This should be rewritten in rust, a Makefile, or some platform-independent language
cd fuzzers
fuzzers=$(find ./fuzzers -maxdepth 1 -type d)
backtrace_fuzzers=$(find ./fuzzers/backtrace_baby_fuzzers -maxdepth 1 -type d)
for fuzzer in *;
for fuzzer in $(echo $fuzzers $backtrace_fuzzers);
do
cd $fuzzer
# Clippy checks