libafl-fuzz: separate frida build + cmplog debug (#2591)

* libafl-fuzz: separate frida build

* cmplog debug

* update

* merge AflStatsStage
move time_tracker stage to LibAFL

* mandate track_hit_feedbacks feature for AflStatsStage

* afl_stats do not hardcode TimeoutFeedback and CrashFeedback names

* typo

* typo

* fix generics order

* add verify timeouts stage

* libafl: introduce set_timeout func to dynamically set timeouts for executor
libafl-fuzz: add verify_timeout stage

* add missing set_timeout implementations

* libafl-fuzz: move set_timeout and timeout from Executor to HasTimeout

* libafl-fuzz: add removed gitignore

* remove timeout from libafl_nyx::Executor and move it to NyxHelper

* clippy

* fix HasTimeout for QemuExecutor

* libafl-fuzz: remove observer handle usage in verify_timeouts
misc: remove prelude imports

* libafl-fuzz: fix foreign_sync_dirs option

* fmt && clippy

* clippy && fmt

* missing doc

* clippy

* bruh

* damned doc build

* trait fix

* impl HasTimeout for InProcessExecutor only if std

* clippy

* fix typo

* fix nostd build

* clippy

* remove most HasTimeout implementations for now

* typo

* remove redundant import

* misc

* fmt

* simplify trait bounds

* add old AflStatsStage back and rename it to StatsStage

* fix ci

* make set_timeout and timeout of HasTimeout inline

* fmt

* add gitignore

* serde_any fix

* tmate

* misc

* remove tmate

* test

* coordinate between capture_timeout and verify_timeout

* makefile

* fix

* fix

* fmt

* increase cmplog timeout

* semantic

* debug

* debug

* remove dbeug

* only test libafl-fuzz on CI for now

* better seed for cmplog?

* remove preflight check for now

* set Input type in forkserver

* debug

* tmate

* fix capture_timeout

* revert workflow

* run only libafl-fuzz

* remove pre-flight

* re-enable fuzzers on CI

* move capture_timeouts and verify_timeouts to main lib

* run fmt

* add note for verify timeouts

* add note in verify timeouts stage

* typo

---------

Co-authored-by: Dominik Maier <domenukk@gmail.com>
This commit is contained in:
Aarnav 2024-10-28 11:40:04 +01:00 committed by GitHub
parent 42b306a39f
commit 58fad2befd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
26 changed files with 753 additions and 217 deletions

View File

@ -84,6 +84,7 @@ pub fn main() {
#[derive(Debug)]
struct MyExecutor {
shmem_id: ShMemId,
timeout: Duration,
}
impl CommandConfigurator<BytesInput> for MyExecutor {
@ -106,11 +107,16 @@ pub fn main() {
}
fn exec_timeout(&self) -> Duration {
Duration::from_secs(5)
self.timeout
}
fn exec_timeout_mut(&mut self) -> &mut Duration {
&mut self.timeout
}
}
let mut executor = MyExecutor { shmem_id }.into_executor(tuple_list!(observer, bt_observer));
let timeout = Duration::from_secs(5);
let mut executor =
MyExecutor { shmem_id, timeout }.into_executor(tuple_list!(observer, bt_observer));
// Generator of printable bytearrays of max size 32
let mut generator = RandPrintablesGenerator::new(nonzero!(32));

View File

@ -4,7 +4,7 @@
#include <string.h>
extern "C" __declspec(dllexport) size_t
LLVMFuzzerTestOneInput(const char *data, unsigned int len) {
LLVMFuzzerTestOneInput(const char *data, unsigned int len) {
if (data[0] == 'b') {
if (data[1] == 'a') {
if (data[2] == 'd') {

View File

@ -23,8 +23,8 @@ use libafl::{
powersched::PowerSchedule, IndexesLenTimeMinimizerScheduler, PowerQueueScheduler,
},
stages::{
calibrate::CalibrationStage, power::StdPowerMutationalStage, AflStatsStage, IfStage,
ShadowTracingStage, StagesTuple, StdMutationalStage,
calibrate::CalibrationStage, power::StdPowerMutationalStage, IfStage, ShadowTracingStage,
StagesTuple, StatsStage, StdMutationalStage,
},
state::{HasCorpus, StdState, UsesState},
Error, HasMetadata, NopFuzzer,
@ -138,7 +138,7 @@ impl<M: Monitor> Instance<'_, M> {
let stats_stage = IfStage::new(
|_, _, _, _| Ok(self.options.tui),
tuple_list!(AflStatsStage::new(Duration::from_secs(5))),
tuple_list!(StatsStage::new(Duration::from_secs(5))),
);
// Feedback to rate the interestingness of an input

View File

@ -12,7 +12,7 @@ FUZZER = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/${FUZZER_NAME}'
LLVM_CONFIG = { value = "llvm-config-18", condition = { env_not_set = [
"LLVM_CONFIG",
] } }
AFL_VERSION = "8b35dd49be5f846e945f6d6a9414623d195a99cb"
AFL_VERSION = "78b7e14c73baacf1d88b3c03955e78f5080d17ba"
AFL_DIR = { value = "${PROJECT_DIR}/AFLplusplus" }
AFL_CC_PATH = { value = "${AFL_DIR}/afl-clang-fast" }
CC = { value = "clang" }
@ -25,12 +25,16 @@ if [ ! -d "$AFL_DIR" ]; then
cd ${AFL_DIR}
git checkout ${AFL_VERSION}
LLVM_CONFIG=${LLVM_CONFIG} make
fi
'''
[tasks.build_frida_mode]
script_runner = '@shell'
script = '''
cd ${AFL_DIR}
cd frida_mode
LLVM_CONFIG=${LLVM_CONFIG} make
cd ../..
fi
'''
[tasks.build_qemuafl]
script_runner = "@shell"
script = '''
@ -77,7 +81,7 @@ script = '''
AFL_PATH=${AFL_DIR} ${AFL_CC_PATH} ./test/test-instr.c -o ./test/out-instr
export LIBAFL_DEBUG_OUTPUT=1
export AFL_CORES=1
export AFL_CORES=0
export AFL_STATS_INTERVAL=1
timeout 5 ${FUZZER} -i ./test/seeds -o ./test/output ./test/out-instr || true
@ -109,7 +113,7 @@ script_runner = "@shell"
script = '''
# cmplog TODO: AFL_BENCH_UNTIL_CRASH=1 instead of timeout 15s
AFL_LLVM_CMPLOG=1 AFL_PATH=${AFL_DIR} ${AFL_CC_PATH} ./test/test-cmplog.c -o ./test/out-cmplog
AFL_CORES=1 timeout 5 ${FUZZER} -Z -l 3 -m 0 -V30 -i ./test/seeds_cmplog -o ./test/output-cmplog -c 0 ./test/out-cmplog || true
LIBAFL_DEBUG_OUTPUT=1 AFL_CORES=0 timeout 10 ${FUZZER} -Z -l 3 -m 0 -V30 -i ./test/seeds_cmplog -o ./test/output-cmplog -c 0 ./test/out-cmplog || true
test -n "$( ls ${PROJECT_DIR}/test/output-cmplog/fuzzer_main/hangs/id:0000* ${PROJECT_DIR}/test/output-cmplog/fuzzer_main/crashes/id:0000*)" || {
echo "No crashes found"
exit 1
@ -123,7 +127,7 @@ script = '''
${CC} -no-pie ./test/test-instr.c -o ./test/out-frida
export AFL_PATH=${AFL_DIR}
export AFL_CORES=1
export AFL_CORES=0
export AFL_STATS_INTERVAL=1
timeout 5 ${FUZZER} -m 0 -O -i ./test/seeds_frida -o ./test/output-frida -- ./test/out-frida || true
@ -162,7 +166,7 @@ test -n "$RUNTIME" -a -n "$RUNTIME_PERSISTENT" && {
unset AFL_FRIDA_PERSISTENT_ADDR
'''
dependencies = ["build_afl", "build_libafl_fuzz"]
dependencies = ["build_afl", "build_frida_mode", "build_libafl_fuzz"]
[tasks.test_qemu]
script_runner = "@shell"
@ -171,7 +175,7 @@ ${CC} -pie -fPIE ./test/test-instr.c -o ./test/out-qemu
${CC} -o ./test/out-qemu-cmpcov ./test/test-cmpcov.c
export AFL_PATH=${AFL_DIR}
export AFL_CORES=1
export AFL_CORES=0
export AFL_STATS_INTERVAL=1
timeout 5 ${FUZZER} -m 0 -Q -i ./test/seeds_qemu -o ./test/output-qemu -- ./test/out-qemu || true
@ -202,7 +206,7 @@ dependencies = ["build_afl", "build_qemuafl", "build_libafl_fuzz"]
script_runner = "@shell"
script = '''
export AFL_PATH=${AFL_DIR}
export AFL_CORES=1
export AFL_CORES=0
export AFL_STATS_INTERVAL=1
# TODO: test unicorn persistent mode once it's fixed on AFL++

View File

@ -128,7 +128,7 @@ pub fn check_autoresume(fuzzer_dir: &Path, auto_resume: bool) -> Result<Flock<Fi
}
}
if !auto_resume && last_update.saturating_sub(start_time) > OUTPUT_GRACE * 60 {
return Err(Error::illegal_state("The job output directory already exists and contains results! use AFL_AUTORESUME=true or provide \"-\" for -i "));
return Err(Error::illegal_state("The job output directory already exists and contains results! use AFL_AUTORESUME=1 or provide \"-\" for -i "));
}
}
if !auto_resume {

View File

@ -1,6 +1,6 @@
use std::{collections::HashMap, path::PathBuf, time::Duration};
use libafl::Error;
use libafl::{stages::afl_stats::AFL_FUZZER_STATS_UPDATE_INTERVAL_SECS, Error};
use libafl_bolts::core_affinity::Cores;
use crate::Opt;
@ -73,6 +73,8 @@ pub fn parse_envs(opt: &mut Opt) -> Result<(), Error> {
}
if let Ok(res) = std::env::var("AFL_FUZZER_STATS_UPDATE_INTERVAL") {
opt.stats_interval = res.parse()?;
} else {
opt.stats_interval = AFL_FUZZER_STATS_UPDATE_INTERVAL_SECS;
}
if let Ok(res) = std::env::var("AFL_BROKER_PORT") {
opt.broker_port = Some(res.parse()?);

View File

@ -65,7 +65,7 @@ where
if !self.ignore_timeouts {
if !self.ignore_seed_issues || self.exit_on_seed_issues {
return Err(Error::invalid_corpus(
"input led to a timeout; use AFL_IGNORE_SEED_ISSUES=true",
"input led to a timeout; use AFL_IGNORE_SEED_ISSUES=1",
));
}
return Ok(false);

View File

@ -1,7 +1,9 @@
use std::{
borrow::Cow,
cell::RefCell,
marker::PhantomData,
path::{Path, PathBuf},
rc::Rc,
time::Duration,
};
@ -13,7 +15,9 @@ use libafl::{
},
executors::forkserver::{ForkserverExecutor, ForkserverExecutorBuilder},
feedback_and, feedback_or, feedback_or_fast,
feedbacks::{ConstFeedback, CrashFeedback, MaxMapFeedback, TimeFeedback, TimeoutFeedback},
feedbacks::{
CaptureTimeoutFeedback, ConstFeedback, CrashFeedback, MaxMapFeedback, TimeFeedback,
},
fuzzer::StdFuzzer,
inputs::{BytesInput, NopTargetBytesConverter},
mutators::{havoc_mutations, tokens_mutations, AFLppRedQueen, StdScheduledMutator, Tokens},
@ -23,8 +27,11 @@ use libafl::{
IndexesLenTimeMinimizerScheduler, QueueScheduler, StdWeightedScheduler,
},
stages::{
mutational::MultiMutationalStage, CalibrationStage, ColorizationStage, IfStage,
StagesTuple, StdMutationalStage, StdPowerMutationalStage, SyncFromDiskStage,
afl_stats::{AflStatsStage, CalibrationTime, FuzzTime, SyncTime},
mutational::MultiMutationalStage,
time_tracker::TimeTrackingStageWrapper,
CalibrationStage, ColorizationStage, IfStage, StagesTuple, StdMutationalStage,
StdPowerMutationalStage, SyncFromDiskStage, VerifyTimeoutsStage,
},
state::{
HasCorpus, HasCurrentTestcase, HasExecutions, HasLastReportTime, HasStartTime, StdState,
@ -46,7 +53,6 @@ use libafl_targets::{cmps::AFLppCmpLogMap, AFLppCmpLogObserver, AFLppCmplogTraci
use serde::{Deserialize, Serialize};
use crate::{
afl_stats::{AflStatsStage, CalibrationTime, FuzzTime, SyncTime},
corpus::{set_corpus_filepath, set_solution_filepath},
env_parser::AFL_DEFAULT_MAP_SIZE,
executor::find_afl_binary,
@ -55,7 +61,7 @@ use crate::{
seed::SeedFeedback,
},
scheduler::SupportedSchedulers,
stages::{mutational_stage::SupportedMutationalStages, time_tracker::TimeTrackingStageWrapper},
stages::mutational_stage::SupportedMutationalStages,
Opt, AFL_DEFAULT_INPUT_LEN_MAX, AFL_DEFAULT_INPUT_LEN_MIN, AFL_HARNESS_FILE_INPUT,
SHMEM_ENV_VAR,
};
@ -109,17 +115,21 @@ where
let mut tokens = Tokens::new();
tokens = tokens.add_from_files(&opt.dicts)?;
let user_token_count = tokens.len();
// Create a AFLStatsStage;
let afl_stats_stage = AflStatsStage::new(
opt,
fuzzer_dir.to_path_buf(),
&edges_observer,
user_token_count,
!opt.no_autodict,
core_id,
);
let afl_stats_stage = AflStatsStage::builder()
.stats_file(fuzzer_dir.join("fuzzer_stats"))
.plot_file(fuzzer_dir.join("plot_data"))
.core_id(core_id)
.report_interval(Duration::from_secs(opt.stats_interval))
.map_observer(&edges_observer)
.uses_autotokens(!opt.no_autodict)
.tokens(&tokens)
.banner(opt.executable.display().to_string())
.version("0.13.2".to_string())
.exec_timeout(opt.hang_timeout)
.target_mode(fuzzer_target_mode(opt).to_string())
.build()
.expect("invariant; should never occur");
// Create an observation channel to keep track of the execution time.
let time_observer = TimeObserver::new("time");
@ -140,6 +150,20 @@ where
opt,
);
// We need to share this reference as [`VerifyTimeoutsStage`] will toggle this
// value before re-running the alleged timeouts so we don't keep capturing timeouts infinitely.
let enable_capture_timeouts = Rc::new(RefCell::new(false));
let capture_timeout_feedback = CaptureTimeoutFeedback::new(Rc::clone(&enable_capture_timeouts));
// Like AFL++ we re-run all timeouts with double the timeout to assert that they are not false positives
let timeout_verify_stage = IfStage::new(
|_, _, _, _| Ok(!opt.ignore_timeouts),
tuple_list!(VerifyTimeoutsStage::new(
enable_capture_timeouts,
Duration::from_millis(opt.hang_timeout),
)),
);
/*
* Feedback to decide if the Input is "solution worthy".
* We check if it's a crash or a timeout (if we are configured to consider timeouts)
@ -153,7 +177,7 @@ where
CrashFeedback::new(),
feedback_and!(
ConstFeedback::new(!opt.ignore_timeouts),
TimeoutFeedback::new()
capture_timeout_feedback,
)
),
MaxMapFeedback::with_name("edges_objective", &edges_observer)
@ -396,6 +420,7 @@ where
calibration,
cmplog,
mutational_stage,
timeout_verify_stage,
afl_stats_stage,
sync_stage
);
@ -411,7 +436,13 @@ where
)?;
} else {
// The order of the stages matter!
let mut stages = tuple_list!(calibration, mutational_stage, afl_stats_stage, sync_stage);
let mut stages = tuple_list!(
calibration,
mutational_stage,
timeout_verify_stage,
afl_stats_stage,
sync_stage
);
// Run our fuzzer; NO CmpLog
run_fuzzer_with_stages(

View File

@ -66,7 +66,6 @@
)]
use std::{collections::HashMap, path::PathBuf, time::Duration};
mod afl_stats;
mod env_parser;
mod feedback;
mod scheduler;
@ -188,7 +187,7 @@ struct Opt {
#[arg(short = 'c')]
cmplog: Option<String>,
/// sync to a foreign fuzzer queue directory (requires -M, can be specified up to 32 times)
#[arg(short = 'F', num_args = 32)]
#[arg(short = 'F')]
foreign_sync_dirs: Vec<PathBuf>,
/// fuzzer dictionary (see README.md)
#[arg(short = 'x')]

View File

@ -1,2 +1 @@
pub mod mutational_stage;
pub mod time_tracker;

View File

@ -1 +1 @@
00000000000000000000000000000000
鲻鰑糃技嬥

View File

@ -256,4 +256,8 @@ impl CommandConfigurator<BytesInput> for MyCommandConfigurator {
fn exec_timeout(&self) -> Duration {
Duration::from_secs(5)
}
fn exec_timeout_mut(&mut self) -> &mut Duration {
todo!()
}
}

View File

@ -1,10 +1,11 @@
//! A `CombinedExecutor` wraps a primary executor and a secondary one
//! In comparison to the [`crate::executors::DiffExecutor`] it does not run the secondary executor in `run_target`.
use core::fmt::Debug;
use core::{fmt::Debug, time::Duration};
use libafl_bolts::tuples::RefIndexable;
use super::HasTimeout;
use crate::{
executors::{Executor, ExitKind, HasObservers},
state::{HasExecutions, UsesState},
@ -60,6 +61,27 @@ where
}
}
impl<A, B> HasTimeout for CombinedExecutor<A, B>
where
A: HasTimeout,
B: HasTimeout,
{
#[inline]
fn set_timeout(&mut self, timeout: Duration) {
self.primary.set_timeout(timeout);
self.secondary.set_timeout(timeout);
}
#[inline]
fn timeout(&self) -> Duration {
assert!(
self.primary.timeout() == self.secondary.timeout(),
"Primary and Secondary Executors have different timeouts!"
);
self.primary.timeout()
}
}
impl<A, B> UsesState for CombinedExecutor<A, B>
where
A: UsesState,

View File

@ -23,13 +23,15 @@ use libafl_bolts::{
AsSlice,
};
use super::HasTimeout;
#[cfg(all(feature = "std", unix))]
use crate::executors::{Executor, ExitKind};
use crate::{
corpus::Corpus,
executors::HasObservers,
inputs::{HasTargetBytes, UsesInput},
observers::{ObserversTuple, StdErrObserver, StdOutObserver},
state::{HasExecutions, State, UsesState},
state::{HasCorpus, HasExecutions, State, UsesState},
std::borrow::ToOwned,
};
#[cfg(feature = "std")]
@ -151,6 +153,9 @@ where
fn exec_timeout(&self) -> Duration {
self.timeout
}
fn exec_timeout_mut(&mut self) -> &mut Duration {
&mut self.timeout
}
}
/// A `CommandExecutor` is a wrapper around [`std::process::Command`] to execute a target as a child process.
@ -283,6 +288,22 @@ where
}
}
impl<OT, S, T> HasTimeout for CommandExecutor<OT, S, T>
where
S: HasCorpus,
T: CommandConfigurator<<S::Corpus as Corpus>::Input>,
{
#[inline]
fn set_timeout(&mut self, timeout: Duration) {
*self.configurer.exec_timeout_mut() = timeout;
}
#[inline]
fn timeout(&self) -> Duration {
self.configurer.exec_timeout()
}
}
impl<OT, S, T> UsesState for CommandExecutor<OT, S, T>
where
S: State,
@ -565,6 +586,9 @@ impl CommandExecutorBuilder {
/// fn exec_timeout(&self) -> Duration {
/// Duration::from_secs(5)
/// }
/// fn exec_timeout_mut(&mut self) -> &mut Duration {
/// todo!()
/// }
/// }
///
/// fn make_executor<EM, Z>() -> impl Executor<EM, Z>
@ -592,6 +616,8 @@ pub trait CommandConfigurator<I>: Sized {
/// Provides timeout duration for execution of the child process.
fn exec_timeout(&self) -> Duration;
/// Set the timeout duration for execution of the child process.
fn exec_timeout_mut(&mut self) -> &mut Duration;
/// Create an `Executor` from this `CommandConfigurator`.
fn into_executor<OT, S>(self, observers: OT) -> CommandExecutor<OT, S, Self>

View File

@ -16,6 +16,7 @@ use libafl_bolts::{
};
use serde::{Deserialize, Serialize};
use super::HasTimeout;
use crate::{
corpus::Corpus,
executors::{Executor, ExitKind, HasObservers},
@ -120,6 +121,27 @@ where
}
}
impl<A, B, DOT, OTA, OTB> HasTimeout for DiffExecutor<A, B, DOT, OTA, OTB>
where
A: HasTimeout,
B: HasTimeout,
{
#[inline]
fn set_timeout(&mut self, timeout: core::time::Duration) {
self.primary.set_timeout(timeout);
self.secondary.set_timeout(timeout);
}
#[inline]
fn timeout(&self) -> core::time::Duration {
assert!(
self.primary.timeout() == self.secondary.timeout(),
"Primary and Secondary Executors have different timeouts!"
);
self.primary.timeout()
}
}
/// Proxy the observers of the inner executors
#[derive(Serialize, Deserialize, Debug)]
#[serde(

View File

@ -36,6 +36,7 @@ use nix::{
unistd::Pid,
};
use super::HasTimeout;
#[cfg(feature = "regex")]
use crate::observers::{
get_asan_runtime_flags, get_asan_runtime_flags_with_log_path, AsanBacktraceObserver,
@ -1547,6 +1548,21 @@ where
}
}
impl<TC, OT, S, SP> HasTimeout for ForkserverExecutor<TC, OT, S, SP>
where
SP: ShMemProvider,
{
#[inline]
fn set_timeout(&mut self, timeout: Duration) {
self.timeout = TimeSpec::from_duration(timeout);
}
#[inline]
fn timeout(&self) -> Duration {
self.timeout.into()
}
}
impl<TC, OT, S, SP> UsesState for ForkserverExecutor<TC, OT, S, SP>
where
S: State,

View File

@ -80,6 +80,40 @@ where
type State = S;
}
#[cfg(target_os = "linux")]
fn parse_itimerspec(timeout: Duration) -> libc::itimerspec {
let milli_sec = timeout.as_millis();
let it_value = libc::timespec {
tv_sec: (milli_sec / 1000) as _,
tv_nsec: ((milli_sec % 1000) * 1000 * 1000) as _,
};
let it_interval = libc::timespec {
tv_sec: 0,
tv_nsec: 0,
};
libc::itimerspec {
it_interval,
it_value,
}
}
#[cfg(not(target_os = "linux"))]
fn parse_itimerval(timeout: Duration) -> Itimerval {
let milli_sec = timeout.as_millis();
let it_value = Timeval {
tv_sec: (milli_sec / 1000) as i64,
tv_usec: (milli_sec % 1000) as i64,
};
let it_interval = Timeval {
tv_sec: 0,
tv_usec: 0,
};
Itimerval {
it_interval,
it_value,
}
}
impl<EM, HT, OT, S, SP, Z> GenericInProcessForkExecutorInner<HT, OT, S, SP, EM, Z>
where
OT: ObserversTuple<S::Input, S> + Debug,
@ -234,21 +268,7 @@ where
let default_hooks = InChildProcessHooks::new::<Self>()?;
let mut hooks = tuple_list!(default_hooks).merge(userhooks);
hooks.init_all::<Self>(state);
let milli_sec = timeout.as_millis();
let it_value = libc::timespec {
tv_sec: (milli_sec / 1000) as _,
tv_nsec: ((milli_sec % 1000) * 1000 * 1000) as _,
};
let it_interval = libc::timespec {
tv_sec: 0,
tv_nsec: 0,
};
let itimerspec = libc::itimerspec {
it_interval,
it_value,
};
let itimerspec = parse_itimerspec(timeout);
Ok(Self {
shmem_provider,
observers,
@ -274,19 +294,7 @@ where
let mut hooks = tuple_list!(default_hooks).merge(userhooks);
hooks.init_all::<Self>(state);
let milli_sec = timeout.as_millis();
let it_value = Timeval {
tv_sec: (milli_sec / 1000) as i64,
tv_usec: (milli_sec % 1000) as i64,
};
let it_interval = Timeval {
tv_sec: 0,
tv_usec: 0,
};
let itimerval = Itimerval {
it_interval,
it_value,
};
let itimerval = parse_itimerval(timeout);
Ok(Self {
shmem_provider,

View File

@ -2,7 +2,7 @@
#[cfg(unix)]
use alloc::vec::Vec;
use core::fmt::Debug;
use core::{fmt::Debug, time::Duration};
pub use combined::CombinedExecutor;
#[cfg(all(feature = "std", any(unix, doc)))]
@ -144,6 +144,15 @@ where
}
}
/// A trait that allows to get/set an `Executor`'s timeout thresold
pub trait HasTimeout {
/// Get a timeout
fn timeout(&self) -> Duration;
/// Set timeout
fn set_timeout(&mut self, timeout: Duration);
}
/// The common signals we want to handle
#[cfg(unix)]
#[inline]

View File

@ -1,9 +1,13 @@
//! A `ShadowExecutor` wraps an executor to have shadow observer that will not be considered by the feedbacks and the manager
use core::fmt::{self, Debug, Formatter};
use core::{
fmt::{self, Debug, Formatter},
time::Duration,
};
use libafl_bolts::tuples::RefIndexable;
use super::HasTimeout;
use crate::{
executors::{Executor, ExitKind, HasObservers},
inputs::UsesInput,
@ -77,6 +81,20 @@ where
}
}
impl<E, SOT> HasTimeout for ShadowExecutor<E, SOT>
where
E: HasTimeout,
{
#[inline]
fn set_timeout(&mut self, timeout: Duration) {
self.executor.set_timeout(timeout);
}
#[inline]
fn timeout(&self) -> Duration {
self.executor.timeout()
}
}
impl<E, SOT> UsesState for ShadowExecutor<E, SOT>
where
E: UsesState,

View File

@ -0,0 +1,77 @@
//! Feedback that captures Timeouts for re-running
use std::{borrow::Cow, cell::RefCell, fmt::Debug, rc::Rc};
use libafl_bolts::{Error, Named};
use serde::{de::DeserializeOwned, Serialize};
use crate::{
corpus::Testcase,
executors::ExitKind,
feedbacks::{Feedback, StateInitializer},
stages::verify_timeouts::TimeoutsToVerify,
state::HasCorpus,
HasMetadata,
};
/// A Feedback that captures all timeouts and stores them in State for re-evaluation later.
/// Use in conjunction with `VerifyTimeoutsStage`
#[derive(Debug)]
pub struct CaptureTimeoutFeedback {
enabled: Rc<RefCell<bool>>,
}
impl CaptureTimeoutFeedback {
/// Create a new [`CaptureTimeoutFeedback`].
pub fn new(enabled: Rc<RefCell<bool>>) -> Self {
Self { enabled }
}
}
impl Named for CaptureTimeoutFeedback {
fn name(&self) -> &Cow<'static, str> {
static NAME: Cow<'static, str> = Cow::Borrowed("CaptureTimeoutFeedback");
&NAME
}
}
impl<S> StateInitializer<S> for CaptureTimeoutFeedback {}
impl<EM, I, OT, S> Feedback<EM, I, OT, S> for CaptureTimeoutFeedback
where
S: HasCorpus + HasMetadata,
I: Debug + Serialize + DeserializeOwned + Default + 'static + Clone,
{
#[allow(clippy::wrong_self_convention)]
#[inline]
fn is_interesting(
&mut self,
state: &mut S,
_manager: &mut EM,
input: &I,
_observers: &OT,
exit_kind: &ExitKind,
) -> Result<bool, Error> {
if *self.enabled.borrow() && matches!(exit_kind, ExitKind::Timeout) {
let timeouts = state.metadata_or_insert_with(|| TimeoutsToVerify::<I>::new());
timeouts.push(input.clone());
return Ok(false);
}
Ok(matches!(exit_kind, ExitKind::Timeout))
}
fn append_metadata(
&mut self,
_state: &mut S,
_manager: &mut EM,
_observers: &OT,
_testcase: &mut Testcase<I>,
) -> Result<(), Error> {
Ok(())
}
#[cfg(feature = "track_hit_feedbacks")]
#[inline]
fn last_result(&self) -> Result<bool, Error> {
Ok(false)
}
}

View File

@ -27,6 +27,10 @@ pub use new_hash_feedback::NewHashFeedbackMetadata;
use serde::{Deserialize, Serialize};
use crate::{corpus::Testcase, executors::ExitKind, observers::TimeObserver, Error};
#[cfg(feature = "std")]
pub mod capture_feedback;
#[cfg(feature = "std")]
pub mod concolic;
#[cfg(feature = "std")]
@ -44,6 +48,9 @@ pub mod new_hash_feedback;
pub mod stdio;
pub mod transferred;
#[cfg(feature = "std")]
pub use capture_feedback::CaptureTimeoutFeedback;
#[cfg(feature = "introspection")]
use crate::state::HasClientPerfMonitor;
@ -776,25 +783,28 @@ pub trait ExitKindLogic {
/// Check whether the provided [`ExitKind`] is actually interesting
fn check_exit_kind(kind: &ExitKind) -> Result<bool, Error>;
}
/// Name used by `CrashFeedback`
pub const CRASH_FEEDBACK_NAME: &str = "CrashFeedback";
/// Logic which finds all [`ExitKind::Crash`] exits interesting
#[derive(Debug, Copy, Clone)]
pub struct CrashLogic;
impl ExitKindLogic for CrashLogic {
const NAME: Cow<'static, str> = Cow::Borrowed("CrashFeedback");
const NAME: Cow<'static, str> = Cow::Borrowed(CRASH_FEEDBACK_NAME);
fn check_exit_kind(kind: &ExitKind) -> Result<bool, Error> {
Ok(matches!(kind, ExitKind::Crash))
}
}
/// Name used by `TimeoutFeedback`
pub const TIMEOUT_FEEDBACK_NAME: &str = "TimeoutFeedback";
/// Logic which finds all [`ExitKind::Timeout`] exits interesting
#[derive(Debug, Copy, Clone)]
pub struct TimeoutLogic;
impl ExitKindLogic for TimeoutLogic {
const NAME: Cow<'static, str> = Cow::Borrowed("TimeoutFeedback");
const NAME: Cow<'static, str> = Cow::Borrowed(TIMEOUT_FEEDBACK_NAME);
fn check_exit_kind(kind: &ExitKind) -> Result<bool, Error> {
Ok(matches!(kind, ExitKind::Timeout))

View File

@ -1,3 +1,5 @@
//! Stage to compute and report AFL++ stats
use alloc::{string::String, vec::Vec};
use core::{marker::PhantomData, time::Duration};
use std::{
borrow::Cow,
@ -8,37 +10,46 @@ use std::{
process,
};
use libafl::{
corpus::{Corpus, HasCurrentCorpusId, SchedulerTestcaseMetadata, Testcase},
events::EventFirer,
executors::HasObservers,
inputs::UsesInput,
mutators::Tokens,
observers::MapObserver,
schedulers::{minimizer::IsFavoredMetadata, HasQueueCycles},
stages::{calibrate::UnstableEntriesMetadata, Stage},
state::{HasCorpus, HasExecutions, HasImported, HasStartTime, Stoppable, UsesState},
Error, HasMetadata, HasNamedMetadata, HasScheduler, SerdeAny,
};
#[cfg(unix)]
use libafl_bolts::os::peak_rss_mb_child_processes;
use libafl_bolts::{
core_affinity::CoreId,
current_time,
os::peak_rss_mb_child_processes,
tuples::{Handle, Handled, MatchNameRef},
Named,
};
use serde::{Deserialize, Serialize};
use crate::{fuzzer::fuzzer_target_mode, Opt};
#[cfg(feature = "track_hit_feedbacks")]
use crate::feedbacks::{CRASH_FEEDBACK_NAME, TIMEOUT_FEEDBACK_NAME};
use crate::{
corpus::{Corpus, HasCurrentCorpusId, SchedulerTestcaseMetadata, Testcase},
events::EventFirer,
executors::HasObservers,
mutators::Tokens,
observers::MapObserver,
schedulers::{minimizer::IsFavoredMetadata, HasQueueCycles},
stages::{calibrate::UnstableEntriesMetadata, Stage},
state::{HasCorpus, HasExecutions, HasImported, HasStartTime, Stoppable, UsesState},
std::string::ToString,
Error, HasMetadata, HasNamedMetadata, HasScheduler,
};
/// AFL++'s default stats update interval
pub const AFL_FUZZER_STATS_UPDATE_INTERVAL_SECS: u64 = 60;
#[derive(Debug, SerdeAny, Serialize, Deserialize)]
/// `CalibrationTime` - Use in conjunction with `TimeTrackingFeedback`
#[derive(Debug, Serialize, Deserialize)]
pub struct CalibrationTime(pub Duration);
impl From<Duration> for CalibrationTime {
fn from(value: Duration) -> Self {
Self(value)
}
}
#[derive(Debug, SerdeAny, Serialize, Deserialize)]
libafl_bolts::impl_serdeany!(CalibrationTime);
/// `SyncTime` - Use in conjunction with `TimeTrackingFeedback`
#[derive(Debug, Serialize, Deserialize)]
pub struct SyncTime(pub Duration);
impl From<Duration> for SyncTime {
fn from(value: Duration) -> Self {
@ -46,7 +57,10 @@ impl From<Duration> for SyncTime {
}
}
#[derive(Debug, SerdeAny, Serialize, Deserialize)]
libafl_bolts::impl_serdeany!(SyncTime);
/// `FuzzTime` - Use in conjunction with `TimeTrackingFeedback`
#[derive(Debug, Serialize, Deserialize)]
pub struct FuzzTime(pub Duration);
impl From<Duration> for FuzzTime {
fn from(value: Duration) -> Self {
@ -54,12 +68,15 @@ impl From<Duration> for FuzzTime {
}
}
libafl_bolts::impl_serdeany!(FuzzTime);
/// The [`AflStatsStage`] is a Stage that calculates and writes
/// AFL++'s `fuzzer_stats` and `plot_data` information.
#[derive(Debug, Clone)]
pub struct AflStatsStage<C, O, E, EM, Z> {
pub struct AflStatsStage<C, E, EM, O, Z> {
map_observer_handle: Handle<C>,
fuzzer_dir: PathBuf,
stats_file_path: PathBuf,
plot_file_path: Option<PathBuf>,
start_time: u64,
// the number of testcases that have been fuzzed
has_fuzzed_size: usize,
@ -90,14 +107,15 @@ pub struct AflStatsStage<C, O, E, EM, Z> {
/// full command line used for the fuzzing session
command_line: Cow<'static, str>,
/// Amount of tokens provided by the user. Used to determine autotokens count.
provided_tokens: usize,
dict_count: usize,
/// autotokens are enabled
autotokens_enabled: bool,
/// The core we are bound to
core_id: CoreId,
phantom: PhantomData<(C, O, E, EM, Z)>,
phantom_data: PhantomData<(O, E, EM, Z)>,
}
/// AFL++'s `fuzzer_stats`
#[derive(Debug, Clone)]
pub struct AFLFuzzerStats<'a> {
/// unix time indicating the start time of afl-fuzz
@ -196,7 +214,7 @@ pub struct AFLFuzzerStats<'a> {
/// full command line used for the fuzzing session
command_line: &'a str,
}
/// AFL++'s `plot_data`
#[derive(Debug, Clone)]
pub struct AFLPlotData<'a> {
relative_time: &'a u64,
@ -216,7 +234,7 @@ pub struct AFLPlotData<'a> {
edges_found: &'a u64,
}
impl<C, O, E, EM, Z> UsesState for AflStatsStage<C, O, E, EM, Z>
impl<C, E, EM, O, Z> UsesState for AflStatsStage<C, E, EM, O, Z>
where
E: UsesState,
EM: EventFirer<State = E::State>,
@ -225,7 +243,7 @@ where
type State = E::State;
}
impl<C, O, E, EM, Z> Stage<E, EM, Z> for AflStatsStage<C, O, E, EM, Z>
impl<C, E, EM, O, Z> Stage<E, EM, Z> for AflStatsStage<C, E, EM, O, Z>
where
E: UsesState + HasObservers,
EM: EventFirer<State = E::State>,
@ -256,7 +274,7 @@ where
));
};
let testcase = state.corpus().get(corpus_idx)?.borrow();
// NOTE: scheduled_count represents the amount of fuzzing iterations a
// NOTE: scheduled_count represents the amount of fuzz runs a
// testcase has had. Since this stage is kept at the very end of stage list,
// the entry would have been fuzzed already (and should contain IsFavoredMetadata) but would have a scheduled count of zero
// since the scheduled count is incremented after all stages have been run.
@ -264,13 +282,16 @@ where
// New testcase!
self.cycles_wo_finds = 0;
self.update_last_find();
self.maybe_update_last_crash(&testcase, state);
self.maybe_update_last_hang(&testcase, state);
#[cfg(feature = "track_hit_feedbacks")]
{
self.maybe_update_last_crash(&testcase, state);
self.maybe_update_last_hang(&testcase, state);
}
self.update_has_fuzzed_size();
self.maybe_update_is_favored_size(&testcase);
}
self.maybe_update_slowest_exec(&testcase);
self.maybe_update_max_depth(&testcase)?;
self.maybe_update_max_depth(&testcase);
// See if we actually need to run the stage, if not, avoid dynamic value computation.
if !self.check_interval() {
@ -302,7 +323,7 @@ where
state
.metadata::<Tokens>()?
.len()
.saturating_sub(self.provided_tokens)
.saturating_sub(self.dict_count)
} else {
0
};
@ -350,7 +371,10 @@ where
execs_since_crash: total_executions - self.execs_at_last_objective,
exec_timeout: self.exec_timeout,
slowest_exec_ms: self.slowest_exec.as_millis(),
#[cfg(unix)]
peak_rss_mb: peak_rss_mb_child_processes()?,
#[cfg(not(unix))]
peak_rss_mb: 0, // TODO for Windows
cpu_affinity: self.core_id.0,
total_edges: map_size as u64,
edges_found: filled_entries_in_map,
@ -381,18 +405,22 @@ where
execs_done: &stats.execs_done,
};
self.write_fuzzer_stats(&stats)?;
self.write_plot_data(&plot_data)?;
if self.plot_file_path.is_some() {
self.write_plot_data(&plot_data)?;
}
Ok(())
}
fn should_restart(&mut self, _state: &mut Self::State) -> Result<bool, Error> {
Ok(true)
}
fn clear_progress(&mut self, _state: &mut Self::State) -> Result<(), Error> {
Ok(())
}
}
impl<C, O, E, EM, Z> AflStatsStage<C, O, E, EM, Z>
impl<C, E, EM, O, Z> AflStatsStage<C, E, EM, O, Z>
where
E: UsesState + HasObservers,
EM: EventFirer<State = E::State>,
@ -401,101 +429,41 @@ where
C: AsRef<O> + Named,
O: MapObserver,
{
/// create a new instance of the [`AflStatsStage`]
#[allow(clippy::too_many_arguments)]
/// Builder for `AflStatsStage`
#[must_use]
pub fn new(
opt: &Opt,
fuzzer_dir: PathBuf,
map_observer: &C,
provided_tokens: usize,
autotokens_enabled: bool,
core_id: CoreId,
) -> Self {
Self::create_plot_data_file(&fuzzer_dir).unwrap();
Self::create_fuzzer_stats_file(&fuzzer_dir).unwrap();
Self {
map_observer_handle: map_observer.handle(),
start_time: current_time().as_secs(),
stats_report_interval: Duration::from_secs(opt.stats_interval),
has_fuzzed_size: 0,
is_favored_size: 0,
cycles_done: 0,
cycles_wo_finds: 0,
execs_at_last_objective: 0,
last_crash: current_time(),
last_find: current_time(),
last_hang: current_time(),
max_depth: 0,
saved_hangs: 0,
saved_crashes: 0,
slowest_exec: Duration::from_secs(0),
last_report_time: current_time(),
pid: process::id(),
exec_timeout: opt.hang_timeout,
target_mode: fuzzer_target_mode(opt),
afl_banner: Cow::Owned(opt.executable.display().to_string()),
afl_version: Cow::Borrowed("libafl-fuzz-0.0.1"),
command_line: get_run_cmdline(),
fuzzer_dir,
provided_tokens,
core_id,
autotokens_enabled,
phantom: PhantomData,
}
}
fn create_plot_data_file(fuzzer_dir: &Path) -> Result<(), Error> {
let path = fuzzer_dir.join("plot_data");
if path.exists() {
// check if it contains any data
let file = File::open(path)?;
if BufReader::new(file).lines().next().is_none() {
std::fs::write(fuzzer_dir.join("plot_data"), AFLPlotData::get_header())?;
}
} else {
std::fs::write(fuzzer_dir.join("plot_data"), AFLPlotData::get_header())?;
}
Ok(())
}
fn create_fuzzer_stats_file(fuzzer_dir: &Path) -> Result<(), Error> {
let path = fuzzer_dir.join("fuzzer_stats");
if !path.exists() {
_ = OpenOptions::new().append(true).create(true).open(path)?;
}
Ok(())
pub fn builder() -> AflStatsStageBuilder<C, E, EM, O, Z> {
AflStatsStageBuilder::new()
}
fn write_fuzzer_stats(&self, stats: &AFLFuzzerStats) -> Result<(), Error> {
let tmp_file = self.fuzzer_dir.join(".fuzzer_stats_tmp");
let stats_file = self.fuzzer_dir.join("fuzzer_stats");
let tmp_file = self
.stats_file_path
.parent()
.expect("fuzzer_stats file must have a parent!")
.join(".fuzzer_stats_tmp");
std::fs::write(&tmp_file, stats.to_string())?;
_ = std::fs::copy(&tmp_file, &stats_file)?;
_ = std::fs::copy(&tmp_file, &self.stats_file_path)?;
std::fs::remove_file(tmp_file)?;
Ok(())
}
fn write_plot_data(&self, plot_data: &AFLPlotData) -> Result<(), Error> {
let plot_file = self.fuzzer_dir.join("plot_data");
let mut file = OpenOptions::new().append(true).open(&plot_file)?;
let mut file = OpenOptions::new().append(true).open(
self.plot_file_path
.as_ref()
.expect("invariant; should never occur"),
)?;
writeln!(file, "{plot_data}")?;
Ok(())
}
fn maybe_update_is_favored_size(
&mut self,
testcase: &Testcase<<<E as UsesState>::State as UsesInput>::Input>,
) {
fn maybe_update_is_favored_size(&mut self, testcase: &Testcase<E::Input>) {
if testcase.has_metadata::<IsFavoredMetadata>() {
self.is_favored_size += 1;
}
}
fn maybe_update_slowest_exec(
&mut self,
testcase: &Testcase<<<E as UsesState>::State as UsesInput>::Input>,
) {
fn maybe_update_slowest_exec(&mut self, testcase: &Testcase<E::Input>) {
if let Some(exec_time) = testcase.exec_time() {
if exec_time > &self.slowest_exec {
self.slowest_exec = *exec_time;
@ -507,48 +475,35 @@ where
self.has_fuzzed_size += 1;
}
fn maybe_update_max_depth(
&mut self,
testcase: &Testcase<<<E as UsesState>::State as UsesInput>::Input>,
) -> Result<(), Error> {
fn maybe_update_max_depth(&mut self, testcase: &Testcase<E::Input>) {
if let Ok(metadata) = testcase.metadata::<SchedulerTestcaseMetadata>() {
if metadata.depth() > self.max_depth {
self.max_depth = metadata.depth();
}
} else {
return Err(Error::illegal_state(
"testcase must have scheduler metdata?",
));
}
Ok(())
}
fn update_last_find(&mut self) {
self.last_find = current_time();
}
fn maybe_update_last_crash(
&mut self,
testcase: &Testcase<<<E as UsesState>::State as UsesInput>::Input>,
state: &E::State,
) {
#[cfg(feature = "track_hit_feedbacks")]
fn maybe_update_last_crash(&mut self, testcase: &Testcase<E::Input>, state: &E::State) {
#[cfg(feature = "track_hit_feedbacks")]
if testcase
.hit_objectives()
.contains(&Cow::Borrowed("CrashFeedback"))
.contains(&Cow::Borrowed(CRASH_FEEDBACK_NAME))
{
self.last_crash = current_time();
self.execs_at_last_objective = *state.executions();
}
}
fn maybe_update_last_hang(
&mut self,
testcase: &Testcase<<<E as UsesState>::State as UsesInput>::Input>,
state: &E::State,
) {
#[cfg(feature = "track_hit_feedbacks")]
fn maybe_update_last_hang(&mut self, testcase: &Testcase<E::Input>, state: &E::State) {
if testcase
.hit_objectives()
.contains(&Cow::Borrowed("TimeoutFeedback"))
.contains(&Cow::Borrowed(TIMEOUT_FEEDBACK_NAME))
{
self.last_hang = current_time();
self.execs_at_last_objective = *state.executions();
@ -658,8 +613,190 @@ impl Display for AFLFuzzerStats<'_> {
Ok(())
}
}
/// Get the command used to invoke libafl-fuzz
/// Get the command used to invoke the fuzzer
#[must_use]
pub fn get_run_cmdline() -> Cow<'static, str> {
let args: Vec<String> = std::env::args().collect();
Cow::Owned(args.join(" "))
}
/// The Builder for `AflStatsStage`
#[derive(Debug)]
pub struct AflStatsStageBuilder<C, E, EM, O, Z> {
stats_file_path: Option<PathBuf>,
plot_file_path: Option<PathBuf>,
core_id: Option<CoreId>,
map_observer_handle: Option<Handle<C>>,
uses_autotokens: bool,
report_interval: Duration,
dict_count: usize,
exec_timeout: u64,
banner: String,
version: String,
target_mode: String,
phantom_data: PhantomData<(O, E, EM, Z)>,
}
impl<C, E, EM, O, Z> AflStatsStageBuilder<C, E, EM, O, Z>
where
E: UsesState + HasObservers,
EM: EventFirer<State = E::State>,
Z: UsesState<State = E::State>,
E::State: HasImported + HasCorpus + HasMetadata + HasExecutions,
C: AsRef<O> + Named,
O: MapObserver,
{
fn new() -> Self {
Self {
report_interval: Duration::from_secs(AFL_FUZZER_STATS_UPDATE_INTERVAL_SECS),
stats_file_path: None,
plot_file_path: None,
core_id: None,
map_observer_handle: None,
uses_autotokens: false,
dict_count: 0,
exec_timeout: 0,
banner: String::default(),
version: String::default(),
target_mode: String::default(),
phantom_data: PhantomData,
}
}
/// The file path to which we will write the fuzzer stats
#[must_use]
pub fn stats_file(mut self, path: PathBuf) -> Self {
self.stats_file_path = Some(path);
self
}
/// The file path to which we will write the plot data
#[must_use]
pub fn plot_file(mut self, path: PathBuf) -> Self {
self.plot_file_path = Some(path);
self
}
/// The core we are bound to
#[must_use]
pub fn core_id(mut self, core_id: CoreId) -> Self {
self.core_id = Some(core_id);
self
}
/// The interval with which we report stats
#[must_use]
pub fn report_interval(mut self, interval: Duration) -> Self {
self.report_interval = interval;
self
}
/// Our `MapObserver`
#[must_use]
pub fn map_observer(mut self, map_observer: &C) -> Self {
self.map_observer_handle = Some(map_observer.handle());
self
}
/// If we use autotokens provided by the target
#[must_use]
pub fn uses_autotokens(mut self, uses: bool) -> Self {
self.uses_autotokens = uses;
self
}
/// The tokens utilized by the fuzzer
#[must_use]
pub fn tokens(mut self, tokens: &Tokens) -> Self {
self.dict_count = tokens.len();
self
}
/// AFL++ Banner (typically the target)
#[must_use]
pub fn banner(mut self, banner: String) -> Self {
self.banner = banner;
self
}
/// Version of the fuzzer
#[must_use]
pub fn version(mut self, version: String) -> Self {
self.version = version;
self
}
/// The "timeout" value used in `TimeoutFeedback`
#[must_use]
pub fn exec_timeout(mut self, timeout: u64) -> Self {
self.exec_timeout = timeout;
self
}
/// Used in the UI (optional)
/// default, persistent, qemu, unicorn, non-instrumented etc
#[must_use]
pub fn target_mode(mut self, target_mode: String) -> Self {
self.target_mode = target_mode;
self
}
fn create_plot_data_file(path: &Path) -> Result<(), Error> {
if path.exists() {
// check if it contains any data
let file = File::open(path)?;
if BufReader::new(file).lines().next().is_none() {
std::fs::write(path, AFLPlotData::get_header())?;
}
} else {
std::fs::write(path, AFLPlotData::get_header())?;
}
Ok(())
}
fn create_fuzzer_stats_file(path: &Path) -> Result<(), Error> {
if !path.exists() {
_ = OpenOptions::new().append(true).create(true).open(path)?;
}
Ok(())
}
/// Build [`AflStatsStage`]
/// Will error if:
/// Cannot create the stats file
/// Cannot create the plot file (if provided)
/// No `MapObserver` supplied to the builder
/// No `stats_file_path` provieded
pub fn build(self) -> Result<AflStatsStage<C, E, EM, O, Z>, Error> {
if self.stats_file_path.is_none() {
return Err(Error::illegal_argument("Must set `stats_file_path`"));
}
let stats_file_path = self.stats_file_path.unwrap();
if self.map_observer_handle.is_none() {
return Err(Error::illegal_argument("Must set `map_observer`"));
}
if let Some(ref plot_file) = self.plot_file_path {
Self::create_plot_data_file(plot_file)?;
}
Self::create_fuzzer_stats_file(&stats_file_path)?;
Ok(AflStatsStage {
stats_file_path,
plot_file_path: self.plot_file_path,
map_observer_handle: self.map_observer_handle.unwrap(),
start_time: current_time().as_secs(),
stats_report_interval: self.report_interval,
has_fuzzed_size: 0,
is_favored_size: 0,
cycles_done: 0,
cycles_wo_finds: 0,
execs_at_last_objective: 0,
last_crash: current_time(),
last_find: current_time(),
last_hang: current_time(),
max_depth: 0,
saved_hangs: 0,
saved_crashes: 0,
slowest_exec: Duration::from_secs(0),
last_report_time: current_time(),
pid: process::id(),
exec_timeout: self.exec_timeout,
target_mode: Cow::Owned(self.target_mode),
afl_banner: Cow::Owned(self.banner),
afl_version: Cow::Owned(self.version),
command_line: get_run_cmdline(),
dict_count: self.dict_count,
core_id: self.core_id.unwrap_or(CoreId(0)),
autotokens_enabled: self.uses_autotokens,
phantom_data: PhantomData,
})
}
}

View File

@ -12,6 +12,8 @@ use alloc::{
};
use core::{fmt, marker::PhantomData};
#[cfg(feature = "std")]
pub use afl_stats::{AflStatsStage, CalibrationTime, FuzzTime, SyncTime};
pub use calibrate::CalibrationStage;
pub use colorization::*;
#[cfg(all(feature = "std", unix))]
@ -31,9 +33,11 @@ pub use logics::*;
pub use mutational::{MutationalStage, StdMutationalStage};
pub use power::{PowerMutationalStage, StdPowerMutationalStage};
use serde::{Deserialize, Serialize};
pub use stats::AflStatsStage;
pub use stats::StatsStage;
#[cfg(feature = "std")]
pub use sync::*;
#[cfg(feature = "std")]
pub use time_tracker::TimeTrackingStageWrapper;
pub use tmin::{
MapEqualityFactory, MapEqualityFeedback, StdTMinMutationalStage, TMinMutationalStage,
};
@ -42,6 +46,8 @@ pub use tuneable::*;
use tuple_list::NonEmptyTuple;
#[cfg(feature = "unicode")]
pub use unicode::*;
#[cfg(feature = "std")]
pub use verify_timeouts::{TimeoutsToVerify, VerifyTimeoutsStage};
use crate::{
corpus::{CorpusId, HasCurrentCorpusId},
@ -61,6 +67,8 @@ pub mod mutational;
pub mod push;
pub mod tmin;
#[cfg(feature = "std")]
pub mod afl_stats;
pub mod calibrate;
pub mod colorization;
#[cfg(all(feature = "std", unix))]
@ -74,10 +82,14 @@ pub mod power;
pub mod stats;
#[cfg(feature = "std")]
pub mod sync;
#[cfg(feature = "std")]
pub mod time_tracker;
pub mod tracing;
pub mod tuneable;
#[cfg(feature = "unicode")]
pub mod unicode;
#[cfg(feature = "std")]
pub mod verify_timeouts;
/// A stage is one step in the fuzzing process.
/// Multiple stages will be scheduled one by one for each input.

View File

@ -1,4 +1,4 @@
//! Stage to compute/report AFL stats
//! Stage to compute/report minimal AFL-like stats
#[cfg(feature = "std")]
use alloc::{borrow::Cow, string::ToString};
@ -22,9 +22,9 @@ use crate::{
monitors::{AggregatorOps, UserStats, UserStatsValue},
};
/// The [`AflStatsStage`] is a simple stage that computes and reports some stats.
/// The [`StatsStage`] is a simple stage that computes and reports some stats.
#[derive(Debug, Clone)]
pub struct AflStatsStage<E, EM, Z> {
pub struct StatsStage<E, EM, Z> {
// the number of testcases that have been fuzzed
has_fuzzed_size: usize,
// the number of "favored" testcases
@ -41,14 +41,14 @@ pub struct AflStatsStage<E, EM, Z> {
phantom: PhantomData<(E, EM, Z)>,
}
impl<E, EM, Z> UsesState for AflStatsStage<E, EM, Z>
impl<E, EM, Z> UsesState for StatsStage<E, EM, Z>
where
E: UsesState,
{
type State = E::State;
}
impl<E, EM, Z> Stage<E, EM, Z> for AflStatsStage<E, EM, Z>
impl<E, EM, Z> Stage<E, EM, Z> for StatsStage<E, EM, Z>
where
E: UsesState,
EM: EventFirer<State = Self::State>,
@ -102,7 +102,7 @@ where
_manager.fire(
state,
Event::UpdateUserStats {
name: Cow::from("AflStats"),
name: Cow::from("Stats"),
value: UserStats::new(
UserStatsValue::String(Cow::from(json.to_string())),
AggregatorOps::None,
@ -138,8 +138,8 @@ where
}
}
impl<E, EM, Z> AflStatsStage<E, EM, Z> {
/// create a new instance of the [`AflStatsStage`]
impl<E, EM, Z> StatsStage<E, EM, Z> {
/// create a new instance of the [`StatsStage`]
#[must_use]
pub fn new(interval: Duration) -> Self {
Self {
@ -149,8 +149,8 @@ impl<E, EM, Z> AflStatsStage<E, EM, Z> {
}
}
impl<E, EM, Z> Default for AflStatsStage<E, EM, Z> {
/// the default instance of the [`AflStatsStage`]
impl<E, EM, Z> Default for StatsStage<E, EM, Z> {
/// the default instance of the [`StatsStage`]
#[must_use]
fn default() -> Self {
Self {

View File

@ -1,13 +1,16 @@
//! Stage that wraps another stage and tracks it's execution time in `State`
use std::{marker::PhantomData, time::Duration};
use libafl::{
use libafl_bolts::{current_time, Error};
use crate::{
inputs::UsesInput,
stages::Stage,
state::{State, UsesState},
HasMetadata,
};
use libafl_bolts::{current_time, Error};
/// Track an inner Stage's execution time
#[derive(Debug)]
pub struct TimeTrackingStageWrapper<T, S, ST> {
inner: ST,
count: Duration,
@ -15,6 +18,7 @@ pub struct TimeTrackingStageWrapper<T, S, ST> {
}
impl<T, S, ST> TimeTrackingStageWrapper<T, S, ST> {
/// Create a `TimeTrackingStageWrapper`
pub fn new(inner: ST) -> Self {
Self {
inner,

View File

@ -0,0 +1,130 @@
//! Stage that re-runs captured Timeouts with double the timeout to verify
//! Note: To capture the timeouts, use in conjunction with `CaptureTimeoutFeedback`
//! Note: Will NOT work with in process executors due to the potential for restarts/crashes when
//! running inputs.
use core::time::Duration;
use std::{cell::RefCell, collections::VecDeque, fmt::Debug, marker::PhantomData, rc::Rc};
use libafl_bolts::Error;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use crate::{
corpus::Corpus,
executors::{Executor, HasObservers, HasTimeout},
inputs::{BytesInput, UsesInput},
observers::ObserversTuple,
stages::Stage,
state::{HasCorpus, State, UsesState},
Evaluator, HasMetadata,
};
/// Stage that re-runs inputs deemed as timeouts with double the timeout to assert that they are
/// not false positives. AFL++ style.
/// Note: Will NOT work with in process executors due to the potential for restarts/crashes when
/// running inputs.
#[derive(Debug)]
pub struct VerifyTimeoutsStage<E, S> {
doubled_timeout: Duration,
original_timeout: Duration,
capture_timeouts: Rc<RefCell<bool>>,
phantom: PhantomData<(E, S)>,
}
impl<E, S> VerifyTimeoutsStage<E, S> {
/// Create a `VerifyTimeoutsStage`
pub fn new(capture_timeouts: Rc<RefCell<bool>>, configured_timeout: Duration) -> Self {
Self {
capture_timeouts,
doubled_timeout: configured_timeout * 2,
original_timeout: configured_timeout,
phantom: PhantomData,
}
}
}
impl<E, S> UsesState for VerifyTimeoutsStage<E, S>
where
S: State,
{
type State = S;
}
/// Timeouts that `VerifyTimeoutsStage` will read from
#[derive(Default, Serialize, Deserialize, Clone, Debug)]
#[serde(bound = "I: for<'a> Deserialize<'a> + Serialize")]
pub struct TimeoutsToVerify<I> {
inputs: VecDeque<I>,
}
libafl_bolts::impl_serdeany!(
TimeoutsToVerify<I: Debug + 'static + Serialize + DeserializeOwned + Clone>,
<BytesInput>
);
impl<I> TimeoutsToVerify<I> {
/// Create a new `TimeoutsToVerify`
#[must_use]
pub fn new() -> Self {
Self {
inputs: VecDeque::new(),
}
}
/// Add a `TimeoutsToVerify` to queue
pub fn push(&mut self, input: I) {
self.inputs.push_back(input);
}
/// Pop a `TimeoutsToVerify` to queue
pub fn pop(&mut self) -> Option<I> {
self.inputs.pop_front()
}
/// Count `TimeoutsToVerify` in queue
#[must_use]
pub fn count(&self) -> usize {
self.inputs.len()
}
}
impl<E, EM, Z, S> Stage<E, EM, Z> for VerifyTimeoutsStage<E, S>
where
E::Observers: ObserversTuple<<Self as UsesInput>::Input, <Self as UsesState>::State>,
E: Executor<EM, Z, State = S> + HasObservers + HasTimeout,
EM: UsesState<State = S>,
Z: UsesState<State = S> + Evaluator<E, EM>,
S: HasCorpus + State + HasMetadata,
Self::Input: Debug + Serialize + DeserializeOwned + Default + 'static + Clone,
<<E as UsesState>::State as HasCorpus>::Corpus: Corpus<Input = Self::Input>, //delete me
{
fn perform(
&mut self,
fuzzer: &mut Z,
executor: &mut E,
state: &mut Self::State,
manager: &mut EM,
) -> Result<(), Error> {
let mut timeouts =
state.metadata_or_insert_with(TimeoutsToVerify::<<S::Corpus as Corpus>::Input>::new).clone();
if timeouts.count() == 0 {
return Ok(());
}
executor.set_timeout(self.doubled_timeout);
*self.capture_timeouts.borrow_mut() = false;
while let Some(input) = timeouts.pop() {
fuzzer.evaluate_input(state, executor, manager, input)?;
}
executor.set_timeout(self.original_timeout);
*self.capture_timeouts.borrow_mut() = true;
let res = state.metadata_mut::<TimeoutsToVerify<E::Input>>().unwrap();
*res = TimeoutsToVerify::<E::Input>::new();
Ok(())
}
fn should_restart(&mut self, _state: &mut Self::State) -> Result<bool, Error> {
Ok(true)
}
fn clear_progress(&mut self, _state: &mut Self::State) -> Result<(), Error> {
Ok(())
}
}