Feature/libafl fuzz misc (#2430)

* libafl-fuzz: add cmplog to CI
libafl-fuzz: add option to specify custom rng sed
libafl-fuzz: add help messages to CLI, add file extension support
libafl-fuzz: adhere to AFL++ cmplog bin path format
libafl-fuzz: avoid races when writing to fuzzer_stats
libafl-fuzz: add time tracking for CalibrationStage, MutationalStage and SyncFromDiskStage

* libafl-fuzz: fix libafl paths

* libafl-fuzz: remove redundant cmplog check

* libafl-fuzz: ingore UnstableMapEntries when using queue scheduler in afl_stats.rs
libafl-fuzz: track max_depth for QueueScheduler
libafl-fuzz: fix custom input file

* libafl-fuzz: fix Makefile

* clippy
This commit is contained in:
Aarnav 2024-07-22 15:43:47 +02:00 committed by GitHub
parent 695184169e
commit e6b94f3715
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 295 additions and 72 deletions

View File

@ -37,17 +37,34 @@ script_runner="@shell"
script='''
cargo build --profile ${PROFILE}
AFL_PATH=${AFL_DIR_NAME} ${AFL_CC_PATH} ./test/test-instr.c -o ./test/out-instr
AFL_CORES=1 AFL_STATS_INTERVAL=1 timeout 5 ${FUZZER} -i ./test/seeds -o ./test/output ./test/out-instr || true
test -n "$( ls ./test/output/fuzzer_main/queue/id:000002* 2>/dev/null )" || exit 1
test -n "$( ls ./test/output/fuzzer_main/fuzzer_stats 2>/dev/null )" || exit 1
test -n "$( ls ./test/output/fuzzer_main/plot_data 2>/dev/null )" || exit 1
test -d "./test/output/fuzzer_main/hangs" || exit 1
test -d "./test/output/fuzzer_main/crashes" || exit 1
LIBAFL_DEBUG_OUTPUT=1 AFL_CORES=1 AFL_STATS_INTERVAL=1 timeout 5 ${FUZZER} -i ./test/seeds -o ./test/output ./test/out-instr || true
test -n "$( ls ./test/output/fuzzer_main/queue/id:000002* 2>/dev/null )" || {
echo "No new corpus entries found"
exit 1
}
test -n "$( ls ./test/output/fuzzer_main/fuzzer_stats 2>/dev/null )" || {
echo "No fuzzer_stats file found"
exit 1
}
test -n "$( ls ./test/output/fuzzer_main/plot_data 2>/dev/null )" || {
echo "No plot_data found"
exit 1
}
test -d "./test/output/fuzzer_main/hangs" || {
echo "No hangs directory found"
exit 1
}
test -d "./test/output/fuzzer_main/crashes" || {
echo "No crashes directory found"
exit 1
}
# cmplog TODO: AFL_BENCH_UNTIL_CRASH=1 instead of timeout 15s
#AFL_LLVM_CMPLOG=1 AFL_PATH=${AFL_DIR_NAME} ${AFL_CC_PATH} ./test/test-cmplog.c -o ./test/out-cmplog
#AFL_CORES=1 timeout 15 ${FUZZER} -Z -l 3 -m 0 -V30 -i ./test/seeds_cmplog -o ./test/cmplog-output -c ./test/out-cmplog ./test/out-cmplog >>errors 2>&1
#test -n "$( ls ./test/cmplog-output/fuzzer_main/crashes/id:000000* ./test/cmplog-output/hangs/id:000000* 2>/dev/null )" || exit 1
AFL_LLVM_CMPLOG=1 AFL_PATH=${AFL_DIR_NAME} ${AFL_CC_PATH} ./test/test-cmplog.c -o ./test/out-cmplog
AFL_CORES=1 timeout 15 ${FUZZER} -Z -l 3 -m 0 -V30 -i ./test/seeds_cmplog -o ./test/cmplog-output -c 0 ./test/out-cmplog >>errors 2>&1
test -n "$( ls ./test/cmplog-output/fuzzer_main/crashes/id:000000* ./test/cmplog-output/hangs/id:000000* 2>/dev/null )" || {
echo "no crashes found when running cmplog"
exit 1
}
'''
dependencies = ["build_afl"]

View File

@ -26,9 +26,11 @@ Rewrite of afl-fuzz in Rust.
- [x] AFL_FUZZER_STATS_UPDATE_INTERVAL
- [x] AFL_DEFER_FORKSRV
- [x] AFL_NO_WARN_INSTABILITY (we don't warn anyways, we should maybe?)
- [x] AFL_IMPORT_FIRST (implicit)
- [x] AFL_SYNC_TIME
- [ ] AFL_FINAL_SYNC
- [x] AFL_AUTORESUME
- [x] AFL_PERSISTENT_RECORD
- [ ] AFL_FINAL_SYNC
- [ ] AFL_CRASHING_SEEDS_AS_NEW_CRASH
- [ ] AFL_IGNORE_UNKNOWN_ENVS
- [ ] AFL_NO_UI
@ -44,7 +46,6 @@ Rewrite of afl-fuzz in Rust.
- [ ] AFL_FAST_CAL
- [ ] AFL_NO_CRASH_README
- [ ] AFL_KEEP_TIMEOUTS
- [x] AFL_PERSISTENT_RECORD
- [ ] AFL_TESTCACHE_SIZE
- [ ] AFL_NO_ARITH
- [ ] AFL_DISABLE_TRIM
@ -56,7 +57,6 @@ Rewrite of afl-fuzz in Rust.
- [ ] AFL_STATSD_PORT
- [ ] AFL_STATSD_HOST
- [ ] AFL_IMPORT
- [x] AFL_IMPORT_FIRST (implicit)
- [ ] AFL_SHUFFLE_QUEUE
- [ ] AFL_CUSTOM_QEMU_BIN
- [ ] AFL_PATH

View File

@ -13,21 +13,47 @@ use libafl::{
events::EventFirer,
executors::HasObservers,
inputs::UsesInput,
mutators::Tokens,
observers::MapObserver,
schedulers::{minimizer::IsFavoredMetadata, HasQueueCycles, Scheduler},
stages::{calibrate::UnstableEntriesMetadata, Stage},
state::{HasCorpus, HasExecutions, HasImported, HasStartTime, Stoppable, UsesState},
Error, HasMetadata, HasNamedMetadata, HasScheduler,
Error, HasMetadata, HasNamedMetadata, HasScheduler, SerdeAny,
};
use libafl_bolts::{
core_affinity::CoreId,
current_time,
os::peak_rss_mb_child_processes,
tuples::{Handle, Handled, MatchNameRef},
Named,
};
use serde::{Deserialize, Serialize};
use crate::{fuzzer::fuzzer_target_mode, Opt};
#[derive(Debug, SerdeAny, Serialize, Deserialize)]
pub struct CalibrationTime(pub Duration);
impl From<Duration> for CalibrationTime {
fn from(value: Duration) -> Self {
Self(value)
}
}
#[derive(Debug, SerdeAny, Serialize, Deserialize)]
pub struct SyncTime(pub Duration);
impl From<Duration> for SyncTime {
fn from(value: Duration) -> Self {
Self(value)
}
}
#[derive(Debug, SerdeAny, Serialize, Deserialize)]
pub struct FuzzTime(pub Duration);
impl From<Duration> for FuzzTime {
fn from(value: Duration) -> Self {
Self(value)
}
}
/// The [`AflStatsStage`] is a Stage that calculates and writes
/// AFL++'s `fuzzer_stats` and `plot_data` information.
#[derive(Debug, Clone)]
@ -63,6 +89,12 @@ pub struct AflStatsStage<C, O, E, EM, Z> {
target_mode: Cow<'static, str>,
/// full command line used for the fuzzing session
command_line: Cow<'static, str>,
/// Amount of tokens provided by the user. Used to determine autotokens count.
provided_tokens: usize,
/// autotokens are enabled
autotokens_enabled: bool,
/// The core we are bound to
core_id: CoreId,
phantom: PhantomData<(C, O, E, EM, Z)>,
}
@ -82,11 +114,12 @@ pub struct AFLFuzzerStats<'a> {
cycles_wo_find: u64,
/// longest time in seconds no new path was found
time_wo_finds: u64,
/// TODO
/// Time spent fuzzing
fuzz_time: u64,
/// TODO
/// Time spent calibrating inputs
calibration_time: u64,
/// TODO
/// Time spent syncing with foreign fuzzers
/// NOTE: Syncing between our own instances is not counted.
sync_time: u64,
/// TODO
trim_time: u64,
@ -137,16 +170,16 @@ pub struct AFLFuzzerStats<'a> {
/// max rss usage reached during fuzzing in MB
peak_rss_mb: i64,
/// TODO
cpu_affinity: i64,
cpu_affinity: usize,
/// how many edges have been found
edges_found: u64,
/// TODO:
/// Size of our edges map
total_edges: u64,
/// how many edges are non-deterministic
var_byte_count: usize,
/// TODO:
havoc_expansion: usize,
/// TODO:
/// Amount of automatic dict entries found
auto_dict_entries: usize,
/// TODO:
testcache_size: usize,
@ -258,13 +291,20 @@ where
.as_ref();
let filled_entries_in_map = map_observer.count_bytes();
let map_size = map_observer.usable_count();
let unstable_entries_metadata = state
// Since we do not calibrate when using `QueueScheduler`; we cannot calculate unstable entries.
let unstable_entries_in_map = state
.metadata_map()
.get::<UnstableEntriesMetadata>()
.unwrap();
let unstable_entries_in_map = unstable_entries_metadata.unstable_entries().len();
.map_or(0, |m| m.unstable_entries().len());
let auto_dict_entries = if self.autotokens_enabled {
state
.metadata::<Tokens>()?
.len()
.saturating_sub(self.provided_tokens)
} else {
0
};
let stats = AFLFuzzerStats {
start_time: self.start_time,
last_update: self.last_report_time.as_secs(),
@ -272,9 +312,18 @@ where
fuzzer_pid: self.pid,
cycles_done: queue_cycles,
cycles_wo_find: self.cycles_wo_finds,
fuzz_time: 0, // TODO
calibration_time: 0, // TODO
sync_time: 0, // TODO
fuzz_time: state
.metadata::<FuzzTime>()
.map_or(Duration::from_secs(0), |d| d.0)
.as_secs(),
calibration_time: state
.metadata::<CalibrationTime>()
.map_or(Duration::from_secs(0), |d| d.0)
.as_secs(),
sync_time: state
.metadata::<SyncTime>()
.map_or(Duration::from_secs(0), |d| d.0)
.as_secs(),
trim_time: 0, // TODO
execs_done: total_executions,
execs_per_sec: *state.executions(), // TODO
@ -298,15 +347,15 @@ where
last_hang: self.last_hang,
last_crash: self.last_crash,
execs_since_crash: total_executions - self.execs_at_last_objective,
exec_timeout: self.exec_timeout, // TODO
exec_timeout: self.exec_timeout,
slowest_exec_ms: self.slowest_exec.as_millis(),
peak_rss_mb: peak_rss_mb_child_processes()?,
cpu_affinity: 0, // TODO
cpu_affinity: self.core_id.0,
total_edges: map_size as u64,
edges_found: filled_entries_in_map,
var_byte_count: unstable_entries_metadata.unstable_entries().len(),
var_byte_count: unstable_entries_in_map,
havoc_expansion: 0, // TODO
auto_dict_entries: 0, // TODO
auto_dict_entries,
testcache_size: 0,
testcache_count: 0,
testcache_evict: 0,
@ -354,7 +403,14 @@ where
/// create a new instance of the [`AflStatsStage`]
#[allow(clippy::too_many_arguments)]
#[must_use]
pub fn new(opt: &Opt, fuzzer_dir: PathBuf, map_observer: &C) -> Self {
pub fn new(
opt: &Opt,
fuzzer_dir: PathBuf,
map_observer: &C,
provided_tokens: usize,
autotokens_enabled: bool,
core_id: CoreId,
) -> Self {
Self::create_plot_data_file(&fuzzer_dir).unwrap();
Self::create_fuzzer_stats_file(&fuzzer_dir).unwrap();
Self {
@ -381,6 +437,9 @@ where
afl_version: Cow::Borrowed("libafl-fuzz-0.0.1"),
command_line: get_run_cmdline(),
fuzzer_dir,
provided_tokens,
core_id,
autotokens_enabled,
phantom: PhantomData,
}
}
@ -408,14 +467,17 @@ where
}
fn write_fuzzer_stats(&self, stats: &AFLFuzzerStats) -> Result<(), Error> {
std::fs::write(self.fuzzer_dir.join("fuzzer_stats"), stats.to_string())?;
let tmp_file = self.fuzzer_dir.join(".fuzzer_stats_tmp");
let stats_file = self.fuzzer_dir.join("fuzzer_stats");
std::fs::write(&tmp_file, stats.to_string())?;
std::fs::copy(&tmp_file, &stats_file)?;
std::fs::remove_file(tmp_file)?;
Ok(())
}
fn write_plot_data(&self, plot_data: &AFLPlotData) -> Result<(), Error> {
let mut file = OpenOptions::new()
.append(true)
.open(self.fuzzer_dir.join("plot_data"))?;
let plot_file = self.fuzzer_dir.join("plot_data");
let mut file = OpenOptions::new().append(true).open(&plot_file)?;
writeln!(file, "{plot_data}")?;
Ok(())
}

View File

@ -58,7 +58,7 @@ pub fn check_binary(opt: &mut Opt, shmem_env_var: &str) -> Result<(), Error> {
)));
}
if opt.skip_bin_check
|| opt.use_wine
|| opt.wine_mode
|| opt.unicorn_mode
|| (opt.qemu_mode && opt.qemu_custom_bin)
|| (opt.forkserver_cs && opt.cs_custom_bin)

View File

@ -52,7 +52,7 @@ where
{
fn create_feedback(&self, _ctx: &T) -> PersitentRecordFeedback<I, S> {
Self {
record_size: self.record_size.clone(),
record_size: self.record_size,
record: self.record.clone(),
phantomm: self.phantomm,
}
@ -156,6 +156,6 @@ where
S: State<Input = I>,
{
fn should_run(&self) -> bool {
return self.record_size > 0;
self.record_size > 0
}
}

View File

@ -43,15 +43,15 @@ use libafl_targets::{cmps::AFLppCmpLogMap, AFLppCmpLogObserver, AFLppCmplogTraci
use serde::{Deserialize, Serialize};
use crate::{
afl_stats::AflStatsStage,
afl_stats::{AflStatsStage, CalibrationTime, FuzzTime, SyncTime},
corpus::{set_corpus_filepath, set_solution_filepath},
env_parser::AFL_DEFAULT_MAP_SIZE,
feedback::{
filepath::CustomFilepathToTestcaseFeedback, persistent_record::PersitentRecordFeedback,
seed::SeedFeedback,
},
mutational_stage::SupportedMutationalStages,
scheduler::SupportedSchedulers,
stages::{mutational_stage::SupportedMutationalStages, time_tracker::TimeTrackingStageWrapper},
Opt, AFL_DEFAULT_INPUT_LEN_MAX, AFL_DEFAULT_INPUT_LEN_MIN, SHMEM_ENV_VAR,
};
@ -95,11 +95,26 @@ where
// We run the stage only if we are NOT doing sequential scheduling.
let calibration = IfStage::new(
|_, _, _, _| Ok(!opt.sequential_queue),
tuple_list!(CalibrationStage::new(&map_feedback)),
tuple_list!(TimeTrackingStageWrapper::<CalibrationTime, _, _>::new(
CalibrationStage::new(&map_feedback)
)),
);
// Add user supplied dictionaries
let mut tokens = Tokens::new();
tokens = tokens.add_from_files(&opt.dicts)?;
let user_token_count = tokens.len();
// Create a AFLStatsStage;
let afl_stats_stage = AflStatsStage::new(opt, fuzzer_dir.clone(), &edges_observer);
let afl_stats_stage = AflStatsStage::new(
opt,
fuzzer_dir.clone(),
&edges_observer,
user_token_count,
!opt.no_autodict,
core_id,
);
// Create an observation channel to keep track of the execution time.
let time_observer = TimeObserver::new("time");
@ -145,7 +160,7 @@ where
// Initialize our State if necessary
let mut state = state.unwrap_or_else(|| {
StdState::new(
StdRand::with_seed(current_nanos()),
StdRand::with_seed(opt.rng_seed.unwrap_or(current_nanos())),
// TODO: configure testcache size
CachedOnDiskCorpus::<BytesInput>::new(fuzzer_dir.join("queue"), 1000).unwrap(),
OnDiskCorpus::<BytesInput>::new(fuzzer_dir.clone()).unwrap(),
@ -159,7 +174,7 @@ where
// We can either have a simple MutationalStage (for Queue scheduling)
// Or one that utilizes scheduling metadadata (Weighted Random scheduling)
let mutation = StdScheduledMutator::new(havoc_mutations().merge(tokens_mutations()));
let mutational_stage = if opt.sequential_queue {
let inner_mutational_stage = if opt.sequential_queue {
SupportedMutationalStages::StdMutational(StdMutationalStage::new(mutation), PhantomData)
} else {
SupportedMutationalStages::PowerMutational(
@ -167,7 +182,7 @@ where
PhantomData,
)
};
let mutational_stage = TimeTrackingStageWrapper::<FuzzTime, _, _>::new(inner_mutational_stage);
let strategy = opt.power_schedule.unwrap_or(PowerSchedule::EXPLORE);
// Create our ColorizationStage
@ -185,7 +200,6 @@ where
if opt.cycle_schedules {
weighted_scheduler = weighted_scheduler.cycling_scheduler();
}
// TODO: Go back to IndexesLenTimeMinimizerScheduler once AflScheduler is implemented for it.
scheduler = SupportedSchedulers::Weighted(
IndexesLenTimeMinimizerScheduler::new(&edges_observer, weighted_scheduler),
PhantomData,
@ -203,21 +217,27 @@ where
}
// Enable autodict if configured
let mut tokens = Tokens::new();
if !opt.no_autodict {
executor = executor.autotokens(&mut tokens);
};
// Set a custom directory for the current Input if configured;
// May be used to provide a ram-disk etc..
// Set a custom directory for the current_input file if configured;
// Relevant only if harness input type is @@
if opt.harness_input_type.is_some() {
let mut file = get_unique_std_input_file();
if let Some(ext) = &opt.input_ext {
file = format!("{file}.{ext}");
}
if let Some(cur_input_dir) = &opt.cur_input_dir {
if opt.harness_input_type.is_none() {
executor = executor.arg_input_file(cur_input_dir.join(file));
} else {
executor = executor.arg_input_file(fuzzer_dir.join(file));
}
} else if opt.cur_input_dir.is_some() {
return Err(Error::illegal_argument(
"cannot use AFL_TMPDIR with stdin input type.",
));
}
executor = executor.arg_input_file(cur_input_dir.join(get_unique_std_input_file()));
}
// Finalize and build our Executor
let mut executor = executor
@ -268,14 +288,26 @@ where
// Create a Sync stage to sync from foreign fuzzers
let sync_stage = IfStage::new(
|_, _, _, _| Ok(is_main_node && !opt.foreign_sync_dirs.is_empty()),
tuple_list!(SyncFromDiskStage::with_from_file(
tuple_list!(TimeTrackingStageWrapper::<SyncTime, _, _>::new(
SyncFromDiskStage::with_from_file(
opt.foreign_sync_dirs.clone(),
opt.foreign_sync_interval
)
)),
);
// Create a CmpLog executor if configured.
if let Some(ref cmplog_binary) = opt.cmplog_binary {
// We only run cmplog on the main node
let cmplog_executable_path = match &opt.cmplog {
None => "-",
Some(ref p) => match p.as_str() {
"0" => opt.executable.to_str().unwrap(),
_ => p,
},
};
let run_cmplog = cmplog_executable_path != "-" && is_main_node;
if run_cmplog {
// The CmpLog map shared between the CmpLog observer and CmpLog executor
let mut cmplog_shmem = shmem_provider.uninit_on_shmem::<AFLppCmpLogMap>().unwrap();
@ -291,7 +323,7 @@ where
// Cmplog has 25% execution overhead so we give it double the timeout
let cmplog_executor = base_executor(opt, &mut shmem_provider)
.timeout(Duration::from_millis(opt.hang_timeout * 2))
.program(cmplog_binary)
.program(cmplog_executable_path)
.build(tuple_list!(cmplog_observer))
.unwrap();

View File

@ -11,8 +11,8 @@ use std::{collections::HashMap, path::PathBuf, time::Duration};
mod afl_stats;
mod env_parser;
mod feedback;
mod mutational_stage;
mod scheduler;
mod stages;
use clap::Parser;
use corpus::{check_autoresume, create_dir_if_not_exists, remove_main_node_file};
mod corpus;
@ -119,17 +119,30 @@ struct Opt {
input_dir: PathBuf,
#[arg(short = 'o')]
output_dir: PathBuf,
/// file extension for the fuzz test input file (if needed)
#[arg(short = 's')]
input_ext: Option<String>,
/// use a fixed seed for the RNG
#[arg(short = 's')]
rng_seed: Option<u64>,
/// power schedules compute a seed's performance score: explore(default), fast, exploit, seek, rare, mmopt, coe, lin
#[arg(short = 'p')]
power_schedule: Option<PowerSchedule>,
/// enable `CmpLog` by specifying a binary compiled for it.
#[arg(short = 'c')]
cmplog_binary: Option<PathBuf>,
cmplog: Option<String>,
/// sync to a foreign fuzzer queue directory (requires -M, can be specified up to 32 times)
#[arg(short = 'F', num_args = 32)]
foreign_sync_dirs: Vec<PathBuf>,
/// fuzzer dictionary (see README.md, specify up to 4 times)
#[arg(short = 'x', num_args = 4)]
dicts: Vec<PathBuf>,
// Environment + CLI variables
#[arg(short = 'G')]
max_input_len: Option<usize>,
#[arg(short = 'g')]
min_input_len: Option<usize>,
/// sequential queue selection instead of weighted random
#[arg(short = 'Z')]
sequential_queue: bool,
// TODO: enforce
@ -138,6 +151,7 @@ struct Opt {
// TODO: enforce
#[arg(short = 'V')]
fuzz_for_seconds: Option<usize>,
// Environment Variables
#[clap(skip)]
bench_just_one: bool,
@ -211,18 +225,22 @@ struct Opt {
qemu_custom_bin: bool,
#[clap(skip)]
cs_custom_bin: bool,
#[clap(skip)]
use_wine: bool,
/// use qemu-based instrumentation with Wine (Wine mode)
#[arg(short = 'W')]
wine_mode: bool,
#[clap(skip)]
uses_asan: bool,
#[clap(skip)]
/// use binary-only instrumentation (FRIDA mode)
#[arg(short = 'O')]
frida_mode: bool,
#[clap(skip)]
/// use binary-only instrumentation (QEMU mode)
#[arg(short = 'Q')]
qemu_mode: bool,
#[cfg(target_os = "linux")]
#[clap(skip)]
nyx_mode: bool,
#[clap(skip)]
/// use unicorn-based instrumentation (Unicorn mode)
#[arg(short = 'U')]
unicorn_mode: bool,
#[clap(skip)]
forkserver_cs: bool,

View File

@ -1,7 +1,7 @@
use std::marker::PhantomData;
use libafl::{
corpus::{CorpusId, HasTestcase, Testcase},
corpus::{Corpus, CorpusId, HasTestcase, SchedulerTestcaseMetadata, Testcase},
inputs::UsesInput,
observers::{CanTrack, ObserversTuple},
schedulers::{
@ -72,7 +72,23 @@ where
{
fn on_add(&mut self, state: &mut Self::State, id: CorpusId) -> Result<(), Error> {
match self {
Self::Queue(queue, _) => queue.on_add(state, id),
// We need to manually set the depth
// since we want to avoid implementing `AflScheduler` for `QueueScheduler`
Self::Queue(queue, _) => {
queue.on_add(state, id)?;
let current_id = *state.corpus().current();
let mut depth = match current_id {
Some(parent_idx) => state
.testcase(parent_idx)?
.metadata::<SchedulerTestcaseMetadata>()?
.depth(),
None => 0,
};
depth += 1;
let mut testcase = state.corpus().get(id)?.borrow_mut();
testcase.add_metadata(SchedulerTestcaseMetadata::new(depth));
Ok(())
}
Self::Weighted(weighted, _) => weighted.on_add(state, id),
}
}

View File

@ -0,0 +1,2 @@
pub mod mutational_stage;
pub mod time_tracker;

View File

@ -0,0 +1,76 @@
use std::{marker::PhantomData, time::Duration};
use libafl::{
inputs::UsesInput,
stages::Stage,
state::{State, UsesState},
HasMetadata,
};
use libafl_bolts::current_time;
pub struct TimeTrackingStageWrapper<T, S, ST> {
inner: ST,
count: Duration,
phantom: PhantomData<(T, S)>,
}
impl<T, S, ST> TimeTrackingStageWrapper<T, S, ST> {
pub fn new(inner: ST) -> Self {
Self {
inner,
count: Duration::from_secs(0),
phantom: PhantomData,
}
}
}
impl<T, S, ST> UsesState for TimeTrackingStageWrapper<T, S, ST>
where
S: State + HasMetadata,
{
type State = S;
}
impl<T, E, M, Z, S, ST> Stage<E, M, Z> for TimeTrackingStageWrapper<T, S, ST>
where
S: UsesInput + State + HasMetadata,
ST: Stage<E, M, Z, State = S>,
M: UsesState<State = S>,
Z: UsesState<State = S>,
E: UsesState<State = S>,
T: libafl_bolts::serdeany::SerdeAny + From<Duration>,
{
fn perform(
&mut self,
fuzzer: &mut Z,
executor: &mut E,
state: &mut Self::State,
manager: &mut M,
) -> Result<(), libafl::prelude::Error> {
let before_run = current_time();
self.inner.perform(fuzzer, executor, state, manager)?;
let after_run = current_time();
self.count += after_run - before_run;
*state.metadata_mut::<T>()? = T::from(self.count);
Ok(())
}
fn should_restart(&mut self, state: &mut Self::State) -> Result<bool, libafl::prelude::Error> {
self.inner.should_restart(state)
}
fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), libafl::prelude::Error> {
self.inner.clear_progress(state)
}
fn perform_restartable(
&mut self,
fuzzer: &mut Z,
executor: &mut E,
state: &mut Self::State,
manager: &mut M,
) -> Result<(), libafl::prelude::Error> {
self.inner
.perform_restartable(fuzzer, executor, state, manager)
}
}