Push stage trait (#380)

* rpush mutational trait

* tiny changes

* started PushStageAdapter

* fmt

* refactoring

* fix docs

* no_std

* formatted more
This commit is contained in:
Dominik Maier 2021-11-17 12:51:14 +01:00 committed by GitHub
parent 8b9f298674
commit ba969108e3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 782 additions and 252 deletions

View File

@ -20,7 +20,7 @@ use libafl::{
monitors::SimpleMonitor,
mutators::scheduled::{havoc_mutations, StdScheduledMutator},
observers::StdMapObserver,
stages::push::StdMutationalPushStage,
stages::push::{PushStageSharedState, StdMutationalPushStage},
state::{HasCorpus, StdState},
};
@ -95,13 +95,12 @@ pub fn main() {
let observers = tuple_list!(observer);
let shared_state = PushStageSharedState::new(fuzzer, state, observers, mgr);
// All fuzzer elements are hidden behind Rc<RefCell>>, so we can reuse them for multiple stages.
let push_stage = StdMutationalPushStage::new(
mutator,
Rc::new(RefCell::new(fuzzer)),
Rc::new(RefCell::new(state)),
Rc::new(RefCell::new(mgr)),
Rc::new(RefCell::new(observers)),
Rc::new(RefCell::new(Some(shared_state))),
exit_kind.clone(),
stage_idx,
);

View File

@ -50,7 +50,7 @@ where
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<PacketData, S>,
EM: EventFirer<PacketData>,
OT: ObserversTuple<PacketData, S>,
{
self.len = input.length;

View File

@ -10,6 +10,7 @@ use crate::bolts::os::unix_signals::{ucontext_t, Signal};
/// Write the contens of all important registers
#[cfg(all(target_os = "linux", target_arch = "x86_64"))]
#[allow(clippy::similar_names)]
pub fn dump_registers<W: Write>(
writer: &mut BufWriter<W>,
ucontext: &ucontext_t,

View File

@ -51,6 +51,8 @@ use crate::bolts::os::{fork, ForkResult};
#[cfg(feature = "std")]
use typed_builder::TypedBuilder;
use super::ProgressReporter;
/// Forward this to the client
const _LLMP_TAG_EVENT_TO_CLIENT: llmp::Tag = 0x2C11E471;
/// Only handle this in the broker
@ -400,7 +402,7 @@ where
}
}
impl<I, OT, S, SP> EventFirer<I, S> for LlmpEventManager<I, OT, S, SP>
impl<I, OT, S, SP> EventFirer<I> for LlmpEventManager<I, OT, S, SP>
where
I: Input,
OT: ObserversTuple<I, S>,
@ -408,7 +410,7 @@ where
//CE: CustomEvent<I>,
{
#[cfg(feature = "llmp_compression")]
fn fire(&mut self, _state: &mut S, event: Event<I>) -> Result<(), Error> {
fn fire<S2>(&mut self, _state: &mut S2, event: Event<I>) -> Result<(), Error> {
let serialized = postcard::to_allocvec(&event)?;
let flags: Flags = LLMP_FLAG_INITIALIZED;
@ -428,7 +430,7 @@ where
}
#[cfg(not(feature = "llmp_compression"))]
fn fire(&mut self, _state: &mut S, event: Event<I>) -> Result<(), Error> {
fn fire<S2>(&mut self, _state: &mut S2, event: Event<I>) -> Result<(), Error> {
let serialized = postcard::to_allocvec(&event)?;
self.llmp.send_buf(LLMP_TAG_EVENT_TO_BOTH, &serialized)?;
Ok(())
@ -507,6 +509,14 @@ where
{
}
impl<I, OT, S, SP> ProgressReporter<I> for LlmpEventManager<I, OT, S, SP>
where
I: Input,
OT: ObserversTuple<I, S> + serde::de::DeserializeOwned,
SP: ShMemProvider,
{
}
impl<I, OT, S, SP> HasEventManagerId for LlmpEventManager<I, OT, S, SP>
where
I: Input,
@ -538,15 +548,24 @@ where
}
#[cfg(feature = "std")]
impl<I, OT, S, SP> EventFirer<I, S> for LlmpRestartingEventManager<I, OT, S, SP>
impl<I, OT, S, SP> ProgressReporter<I> for LlmpRestartingEventManager<I, OT, S, SP>
where
I: Input,
OT: ObserversTuple<I, S>,
S: Serialize,
SP: ShMemProvider,
{
}
#[cfg(feature = "std")]
impl<I, OT, S, SP> EventFirer<I> for LlmpRestartingEventManager<I, OT, S, SP>
where
I: Input,
OT: ObserversTuple<I, S>,
SP: ShMemProvider,
//CE: CustomEvent<I>,
{
fn fire(&mut self, state: &mut S, event: Event<I>) -> Result<(), Error> {
fn fire<S2>(&mut self, state: &mut S2, event: Event<I>) -> Result<(), Error> {
// Check if we are going to crash in the event, in which case we store our current state for the next runner
self.llmp_mgr.fire(state, event)
}

View File

@ -14,7 +14,13 @@ use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::{
executors::ExitKind, inputs::Input, monitors::UserStats, observers::ObserversTuple, Error,
bolts::current_time,
executors::ExitKind,
inputs::Input,
monitors::UserStats,
observers::ObserversTuple,
state::{HasClientPerfMonitor, HasExecutions},
Error,
};
/// A per-fuzzer unique `ID`, usually starting with `0` and increasing
@ -268,7 +274,7 @@ where
}
/// [`EventFirer`] fire an event.
pub trait EventFirer<I, S>
pub trait EventFirer<I>
where
I: Input,
{
@ -280,10 +286,10 @@ where
/// (for example for each [`Input`], on multiple cores)
/// the [`llmp`] shared map may fill up and the client will eventually OOM or [`panic`].
/// This should not happen for a normal use-cases.
fn fire(&mut self, state: &mut S, event: Event<I>) -> Result<(), Error>;
fn fire<S>(&mut self, state: &mut S, event: Event<I>) -> Result<(), Error>;
/// Serialize all observers for this type and manager
fn serialize_observers<OT>(&mut self, observers: &OT) -> Result<Vec<u8>, Error>
fn serialize_observers<OT, S>(&mut self, observers: &OT) -> Result<Vec<u8>, Error>
where
OT: ObserversTuple<I, S> + serde::Serialize,
{
@ -296,6 +302,66 @@ where
}
}
/// [`EventFirer`] fire an event.
pub trait ProgressReporter<I>: EventFirer<I>
where
I: Input,
{
/// Given the last time, if `monitor_timeout` seconds passed, send off an info/monitor/heartbeat message to the broker.
/// Returns the new `last` time (so the old one, unless `monitor_timeout` time has passed and monitor have been sent)
/// Will return an [`crate::Error`], if the stats could not be sent.
fn maybe_report_progress<S>(
&mut self,
state: &mut S,
last_report_time: Duration,
monitor_timeout: Duration,
) -> Result<Duration, Error>
where
S: HasExecutions + HasClientPerfMonitor,
{
let executions = *state.executions();
let cur = current_time();
// default to 0 here to avoid crashes on clock skew
if cur.checked_sub(last_report_time).unwrap_or_default() > monitor_timeout {
// Default no introspection implmentation
#[cfg(not(feature = "introspection"))]
self.fire(
state,
Event::UpdateExecutions {
executions,
time: cur,
phantom: PhantomData,
},
)?;
// If performance monitor are requested, fire the `UpdatePerfMonitor` event
#[cfg(feature = "introspection")]
{
state
.introspection_monitor_mut()
.set_current_time(crate::bolts::cpu::read_time_counter());
// Send the current monitor over to the manager. This `.clone` shouldn't be
// costly as `ClientPerfMonitor` impls `Copy` since it only contains `u64`s
self.fire(
state,
Event::UpdatePerfMonitor {
executions,
time: cur,
introspection_monitor: Box::new(state.introspection_monitor().clone()),
phantom: PhantomData,
},
)?;
}
Ok(cur)
} else {
if cur.as_millis() % 1000 == 0 {}
Ok(last_report_time)
}
}
}
pub trait EventRestarter<S> {
/// For restarting event managers, implement a way to forward state to their next peers.
#[inline]
@ -332,7 +398,11 @@ pub trait HasEventManagerId {
/// [`EventManager`] is the main communications hub.
/// For the "normal" multi-processed mode, you may want to look into [`LlmpRestartingEventManager`]
pub trait EventManager<E, I, S, Z>:
EventFirer<I, S> + EventProcessor<E, I, S, Z> + EventRestarter<S> + HasEventManagerId
EventFirer<I>
+ EventProcessor<E, I, S, Z>
+ EventRestarter<S>
+ HasEventManagerId
+ ProgressReporter<I>
where
I: Input,
{
@ -342,11 +412,11 @@ where
#[derive(Copy, Clone, Debug)]
pub struct NopEventManager {}
impl<I, S> EventFirer<I, S> for NopEventManager
impl<I> EventFirer<I> for NopEventManager
where
I: Input,
{
fn fire(&mut self, _state: &mut S, _event: Event<I>) -> Result<(), Error> {
fn fire<S>(&mut self, _state: &mut S, _event: Event<I>) -> Result<(), Error> {
Ok(())
}
}
@ -366,6 +436,8 @@ impl<E, I, S, Z> EventProcessor<E, I, S, Z> for NopEventManager {
impl<E, I, S, Z> EventManager<E, I, S, Z> for NopEventManager where I: Input {}
impl<I> ProgressReporter<I> for NopEventManager where I: Input {}
impl HasEventManagerId for NopEventManager {
fn mgr_id(&self) -> EventManagerId {
EventManagerId { id: 0 }

View File

@ -29,6 +29,8 @@ use crate::{
state::{HasCorpus, HasSolutions},
};
use super::ProgressReporter;
/// The llmp connection from the actual fuzzer to the process supervising it
const _ENV_FUZZER_SENDER: &str = "_AFL_ENV_FUZZER_SENDER";
const _ENV_FUZZER_RECEIVER: &str = "_AFL_ENV_FUZZER_RECEIVER";
@ -48,12 +50,12 @@ where
events: Vec<Event<I>>,
}
impl<I, MT, S> EventFirer<I, S> for SimpleEventManager<I, MT>
impl<I, MT> EventFirer<I> for SimpleEventManager<I, MT>
where
I: Input,
MT: Monitor, //CE: CustomEvent<I, OT>,
{
fn fire(&mut self, _state: &mut S, event: Event<I>) -> Result<(), Error> {
fn fire<S>(&mut self, _state: &mut S, event: Event<I>) -> Result<(), Error> {
match Self::handle_in_broker(&mut self.monitor, &event)? {
BrokerEventResult::Forward => self.events.push(event),
BrokerEventResult::Handled => (),
@ -96,6 +98,13 @@ where
{
}
impl<I, MT> ProgressReporter<I> for SimpleEventManager<I, MT>
where
I: Input,
MT: Monitor, //CE: CustomEvent<I, OT>,
{
}
impl<I, MT> HasEventManagerId for SimpleEventManager<I, MT>
where
I: Input,
@ -230,7 +239,7 @@ where
}
#[cfg(feature = "std")]
impl<'a, C, I, MT, S, SC, SP> EventFirer<I, S>
impl<'a, C, I, MT, S, SC, SP> EventFirer<I>
for SimpleRestartingEventManager<'a, C, I, MT, S, SC, SP>
where
C: Corpus<I>,
@ -239,7 +248,7 @@ where
SP: ShMemProvider,
MT: Monitor, //CE: CustomEvent<I, OT>,
{
fn fire(&mut self, _state: &mut S, event: Event<I>) -> Result<(), Error> {
fn fire<S2>(&mut self, _state: &mut S2, event: Event<I>) -> Result<(), Error> {
self.simple_event_mgr.fire(_state, event)
}
}
@ -289,6 +298,18 @@ where
{
}
#[cfg(feature = "std")]
impl<'a, C, I, MT, S, SC, SP> ProgressReporter<I>
for SimpleRestartingEventManager<'a, C, I, MT, S, SC, SP>
where
I: Input,
C: Corpus<I>,
S: Serialize,
SP: ShMemProvider,
MT: Monitor, //CE: CustomEvent<I, OT>,
{
}
#[cfg(feature = "std")]
impl<'a, C, I, MT, S, SC, SP> HasEventManagerId
for SimpleRestartingEventManager<'a, C, I, MT, S, SC, SP>

View File

@ -163,7 +163,7 @@ where
_event_mgr: &mut EM,
) -> Result<Self, Error>
where
EM: EventFirer<I, S> + EventRestarter<S>,
EM: EventFirer<I> + EventRestarter<S>,
OC: Corpus<I>,
OF: Feedback<I, S>,
S: HasSolutions<OC, I> + HasClientPerfMonitor,
@ -393,7 +393,7 @@ mod unix_signal_handler {
data: &mut InProcessExecutorHandlerData,
) where
E: HasObservers<I, OT, S>,
EM: EventFirer<I, S> + EventRestarter<S>,
EM: EventFirer<I> + EventRestarter<S>,
OT: ObserversTuple<I, S>,
OC: Corpus<I>,
OF: Feedback<I, S>,
@ -471,7 +471,7 @@ mod unix_signal_handler {
data: &mut InProcessExecutorHandlerData,
) where
E: HasObservers<I, OT, S>,
EM: EventFirer<I, S> + EventRestarter<S>,
EM: EventFirer<I> + EventRestarter<S>,
OT: ObserversTuple<I, S>,
OC: Corpus<I>,
OF: Feedback<I, S>,
@ -643,7 +643,7 @@ mod windows_exception_handler {
_p1: u8,
) where
E: HasObservers<I, OT, S>,
EM: EventFirer<I, S> + EventRestarter<S>,
EM: EventFirer<I> + EventRestarter<S>,
OT: ObserversTuple<I, S>,
OC: Corpus<I>,
OF: Feedback<I, S>,
@ -719,7 +719,7 @@ mod windows_exception_handler {
data: &mut InProcessExecutorHandlerData,
) where
E: HasObservers<I, OT, S>,
EM: EventFirer<I, S> + EventRestarter<S>,
EM: EventFirer<I> + EventRestarter<S>,
OT: ObserversTuple<I, S>,
OC: Corpus<I>,
OF: Feedback<I, S>,
@ -917,7 +917,7 @@ where
shmem_provider: SP,
) -> Result<Self, Error>
where
EM: EventFirer<I, S> + EventRestarter<S>,
EM: EventFirer<I> + EventRestarter<S>,
OC: Corpus<I>,
OF: Feedback<I, S>,
S: HasSolutions<OC, I> + HasClientPerfMonitor,

View File

@ -53,7 +53,7 @@ where
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>,
{
self.metadata = observers

View File

@ -336,7 +336,7 @@ where
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>,
{
let mut interesting = false;
@ -563,7 +563,7 @@ where
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>,
{
// TODO Replace with match_name_type when stable

View File

@ -48,7 +48,7 @@ where
exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>;
#[cfg(feature = "introspection")]
@ -62,7 +62,7 @@ where
exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>,
{
// Start a timer for this feedback
@ -193,7 +193,7 @@ where
exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>,
{
FL::is_pair_interesting(
@ -217,7 +217,7 @@ where
exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>,
{
FL::is_pair_interesting_introspection(
@ -263,7 +263,7 @@ where
exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>;
#[cfg(feature = "introspection")]
@ -278,7 +278,7 @@ where
exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>;
}
@ -308,7 +308,7 @@ where
exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>,
{
let a = first.is_interesting(state, manager, input, observers, exit_kind)?;
@ -327,7 +327,7 @@ where
exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>,
{
// Execute this feedback
@ -359,7 +359,7 @@ where
exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>,
{
let a = first.is_interesting(state, manager, input, observers, exit_kind)?;
@ -381,7 +381,7 @@ where
exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>,
{
// Execute this feedback
@ -416,7 +416,7 @@ where
exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>,
{
let a = first.is_interesting(state, manager, input, observers, exit_kind)?;
@ -435,7 +435,7 @@ where
exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>,
{
// Execute this feedback
@ -467,7 +467,7 @@ where
exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>,
{
let a = first.is_interesting(state, manager, input, observers, exit_kind)?;
@ -489,7 +489,7 @@ where
exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>,
{
// Execute this feedback
@ -550,7 +550,7 @@ where
exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>,
{
Ok(!self
@ -664,7 +664,7 @@ where
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>,
{
Ok(false)
@ -696,7 +696,7 @@ where
exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>,
{
if let ExitKind::Crash = exit_kind {
@ -746,7 +746,7 @@ where
exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>,
{
if let ExitKind::Timeout = exit_kind {
@ -801,7 +801,7 @@ where
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>,
{
// TODO Replace with match_name_type when stable

View File

@ -63,7 +63,7 @@ where
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<NautilusInput, S>,
EM: EventFirer<NautilusInput>,
OT: ObserversTuple<NautilusInput, S>,
{
Ok(false)

View File

@ -3,7 +3,7 @@
use crate::{
bolts::current_time,
corpus::{Corpus, CorpusScheduler, Testcase},
events::{Event, EventConfig, EventFirer, EventManager},
events::{Event, EventConfig, EventFirer, EventManager, ProgressReporter},
executors::{Executor, ExitKind, HasObservers},
feedbacks::Feedback,
inputs::Input,
@ -17,8 +17,6 @@ use crate::{
#[cfg(feature = "introspection")]
use crate::monitors::PerfFeature;
#[cfg(feature = "introspection")]
use alloc::boxed::Box;
use alloc::string::ToString;
use core::{marker::PhantomData, time::Duration};
@ -84,7 +82,7 @@ where
send_events: bool,
) -> Result<(ExecuteInputResult, Option<usize>), Error>
where
EM: EventFirer<I, S>;
EM: EventFirer<I>;
}
/// Evaluate an input modyfing the state of the fuzzer
@ -148,7 +146,12 @@ pub trait Evaluator<E, EM, I, S> {
}
/// The main fuzzer trait.
pub trait Fuzzer<E, EM, I, S, ST> {
pub trait Fuzzer<E, EM, I, S, ST>
where
I: Input,
EM: ProgressReporter<I>,
S: HasExecutions + HasClientPerfMonitor,
{
/// Fuzz for a single iteration
/// Returns the index of the last fuzzed corpus item
///
@ -175,7 +178,7 @@ pub trait Fuzzer<E, EM, I, S, ST> {
let monitor_timeout = STATS_TIMEOUT_DEFAULT;
loop {
self.fuzz_one(stages, executor, state, manager)?;
last = Self::maybe_report_monitor(state, manager, last, monitor_timeout)?;
last = manager.maybe_report_progress(state, last, monitor_timeout)?;
}
}
@ -205,7 +208,7 @@ pub trait Fuzzer<E, EM, I, S, ST> {
for _ in 0..iters {
ret = self.fuzz_one(stages, executor, state, manager)?;
last = Self::maybe_report_monitor(state, manager, last, monitor_timeout)?;
last = manager.maybe_report_progress(state, last, monitor_timeout)?;
}
// If we would assume the fuzzer loop will always exit after this, we could do this here:
@ -215,16 +218,6 @@ pub trait Fuzzer<E, EM, I, S, ST> {
Ok(ret)
}
/// Given the last time, if `monitor_timeout` seconds passed, send off an info/monitor/heartbeat message to the broker.
/// Returns the new `last` time (so the old one, unless `monitor_timeout` time has passed and monitor have been sent)
/// Will return an [`crate::Error`], if the monitor could not be sent.
fn maybe_report_monitor(
state: &mut S,
manager: &mut EM,
last: Duration,
monitor_timeout: Duration,
) -> Result<Duration, Error>;
}
#[derive(Debug, PartialEq)]
@ -325,7 +318,7 @@ where
send_events: bool,
) -> Result<(ExecuteInputResult, Option<usize>), Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
{
let mut res = ExecuteInputResult::None;
@ -428,7 +421,7 @@ where
F: Feedback<I, S>,
I: Input,
OF: Feedback<I, S>,
S: HasExecutions + HasCorpus<C, I> + HasSolutions<SC, I> + HasClientPerfMonitor,
S: HasCorpus<C, I> + HasSolutions<SC, I> + HasClientPerfMonitor + HasExecutions,
SC: Corpus<I>,
{
/// Process one input, adding to the respective corpuses if needed and firing the right events
@ -462,7 +455,7 @@ where
F: Feedback<I, S>,
I: Input,
OF: Feedback<I, S>,
S: HasExecutions + HasCorpus<C, I> + HasSolutions<SC, I> + HasClientPerfMonitor,
S: HasCorpus<C, I> + HasSolutions<SC, I> + HasClientPerfMonitor + HasExecutions,
SC: Corpus<I>,
{
/// Process one input, adding to the respective corpuses if needed and firing the right events
@ -527,58 +520,10 @@ where
EM: EventManager<E, I, S, Self>,
F: Feedback<I, S>,
I: Input,
S: HasExecutions + HasClientPerfMonitor,
S: HasClientPerfMonitor + HasExecutions,
OF: Feedback<I, S>,
ST: StagesTuple<E, EM, S, Self>,
{
#[inline]
fn maybe_report_monitor(
state: &mut S,
manager: &mut EM,
last: Duration,
monitor_timeout: Duration,
) -> Result<Duration, Error> {
let cur = current_time();
// default to 0 here to avoid crashes on clock skew
if cur.checked_sub(last).unwrap_or_default() > monitor_timeout {
// Default no introspection implmentation
#[cfg(not(feature = "introspection"))]
manager.fire(
state,
Event::UpdateExecutions {
executions: *state.executions(),
time: cur,
phantom: PhantomData,
},
)?;
// If performance monitor are requested, fire the `UpdatePerfMonitor` event
#[cfg(feature = "introspection")]
{
state
.introspection_monitor_mut()
.set_current_time(crate::bolts::cpu::read_time_counter());
// Send the current monitor over to the manager. This `.clone` shouldn't be
// costly as `ClientPerfMonitor` impls `Copy` since it only contains `u64`s
manager.fire(
state,
Event::UpdatePerfMonitor {
executions: *state.executions(),
time: cur,
introspection_monitor: Box::new(state.introspection_monitor().clone()),
phantom: PhantomData,
},
)?;
}
Ok(cur)
} else {
if cur.as_millis() % 1000 == 0 {}
Ok(last)
}
}
fn fuzz_one(
&mut self,
stages: &mut ST,
@ -666,3 +611,61 @@ where
Ok(exit_kind)
}
}
pub trait ExecutesInput<I, OT, S, Z>
where
I: Input,
OT: ObserversTuple<I, S>,
{
/// Runs the input and triggers observers and feedback
fn execute_input<E, EM>(
&mut self,
state: &mut S,
executor: &mut E,
event_mgr: &mut EM,
input: &I,
) -> Result<ExitKind, Error>
where
E: Executor<EM, I, S, Z> + HasObservers<I, OT, S>,
OT: ObserversTuple<I, S>;
}
impl<C, CS, F, I, OF, OT, S, SC> ExecutesInput<I, OT, S, Self>
for StdFuzzer<C, CS, F, I, OF, OT, S, SC>
where
CS: CorpusScheduler<I, S>,
F: Feedback<I, S>,
I: Input,
OT: ObserversTuple<I, S>,
OF: Feedback<I, S>,
S: HasExecutions + HasClientPerfMonitor,
{
/// Runs the input and triggers observers and feedback
fn execute_input<E, EM>(
&mut self,
state: &mut S,
executor: &mut E,
event_mgr: &mut EM,
input: &I,
) -> Result<ExitKind, Error>
where
E: Executor<EM, I, S, Self> + HasObservers<I, OT, S>,
OT: ObserversTuple<I, S>,
{
start_timer!(state);
executor.observers_mut().pre_exec_all(state, input)?;
mark_feature_time!(state, PerfFeature::PreExecObservers);
start_timer!(state);
let exit_kind = executor.run_target(self, state, event_mgr, input)?;
mark_feature_time!(state, PerfFeature::TargetExecution);
*state.executions_mut() += 1;
start_timer!(state);
executor.observers_mut().post_exec_all(state, input)?;
mark_feature_time!(state, PerfFeature::PostExecObservers);
Ok(exit_kind)
}
}

View File

@ -31,9 +31,27 @@ pub mod sync;
#[cfg(feature = "std")]
pub use sync::*;
use crate::Error;
use crate::events::EventFirer;
use crate::events::EventRestarter;
use crate::events::HasEventManagerId;
use crate::events::ProgressReporter;
use crate::state::HasExecutions;
use crate::{
bolts::rands::Rand,
corpus::Corpus,
corpus::CorpusScheduler,
executors::Executor,
executors::HasObservers,
inputs::Input,
observers::ObserversTuple,
state::HasRand,
state::{HasClientPerfMonitor, HasCorpus},
Error, EvaluatorObservers, ExecutesInput, ExecutionProcessor, HasCorpusScheduler,
};
use core::{convert::From, marker::PhantomData};
use self::push::PushStage;
/// A stage is one step in the fuzzing process.
/// Multiple stages will be scheduled one by one for each input.
pub trait Stage<E, EM, S, Z> {
@ -143,3 +161,100 @@ where
Self::new(closure)
}
}
/// Allows us to use a [`push::PushStage`] as a normal [`Stage`]
#[allow(clippy::type_complexity)]
pub struct PushStageAdapter<C, CS, EM, I, OT, PS, R, S, Z>
where
C: Corpus<I>,
CS: CorpusScheduler<I, S>,
EM: EventFirer<I> + EventRestarter<S> + HasEventManagerId + ProgressReporter<I>,
I: Input,
OT: ObserversTuple<I, S>,
PS: PushStage<C, CS, EM, I, OT, R, S, Z>,
R: Rand,
S: HasClientPerfMonitor + HasCorpus<C, I> + HasRand<R> + HasExecutions,
Z: ExecutionProcessor<I, OT, S> + EvaluatorObservers<I, OT, S> + HasCorpusScheduler<CS, I, S>,
{
push_stage: PS,
phantom: PhantomData<(C, CS, EM, I, OT, R, S, Z)>,
}
impl<C, CS, EM, I, OT, PS, R, S, Z> PushStageAdapter<C, CS, EM, I, OT, PS, R, S, Z>
where
C: Corpus<I>,
CS: CorpusScheduler<I, S>,
EM: EventFirer<I> + EventRestarter<S> + HasEventManagerId + ProgressReporter<I>,
I: Input,
OT: ObserversTuple<I, S>,
PS: PushStage<C, CS, EM, I, OT, R, S, Z>,
R: Rand,
S: HasClientPerfMonitor + HasCorpus<C, I> + HasRand<R> + HasExecutions,
Z: ExecutionProcessor<I, OT, S> + EvaluatorObservers<I, OT, S> + HasCorpusScheduler<CS, I, S>,
{
/// Create a new [`PushStageAdapter`], warpping the given [`PushStage`]
/// to be used as a normal [`Stage`]
#[must_use]
pub fn new(push_stage: PS) -> Self {
Self {
push_stage,
phantom: PhantomData,
}
}
}
impl<C, CS, E, EM, I, OT, PS, R, S, Z> Stage<E, EM, S, Z>
for PushStageAdapter<C, CS, EM, I, OT, PS, R, S, Z>
where
C: Corpus<I>,
CS: CorpusScheduler<I, S>,
E: Executor<EM, I, S, Z> + HasObservers<I, OT, S>,
EM: EventFirer<I> + EventRestarter<S> + HasEventManagerId + ProgressReporter<I>,
I: Input,
OT: ObserversTuple<I, S>,
PS: PushStage<C, CS, EM, I, OT, R, S, Z>,
R: Rand,
S: HasClientPerfMonitor + HasCorpus<C, I> + HasRand<R> + HasExecutions,
Z: ExecutesInput<I, OT, S, Z>
+ ExecutionProcessor<I, OT, S>
+ EvaluatorObservers<I, OT, S>
+ HasCorpusScheduler<CS, I, S>,
{
fn perform(
&mut self,
fuzzer: &mut Z,
executor: &mut E,
state: &mut S,
event_mgr: &mut EM,
corpus_idx: usize,
) -> Result<(), Error> {
let push_stage = &mut self.push_stage;
push_stage.set_current_corpus_idx(corpus_idx);
push_stage.init(fuzzer, state, event_mgr, executor.observers_mut())?;
loop {
let input =
match push_stage.pre_exec(fuzzer, state, event_mgr, executor.observers_mut()) {
Some(Ok(next_input)) => next_input,
Some(Err(err)) => return Err(err),
None => break,
};
let exit_kind = fuzzer.execute_input(state, executor, event_mgr, &input)?;
push_stage.post_exec(
fuzzer,
state,
event_mgr,
executor.observers_mut(),
input,
exit_kind,
)?;
}
self.push_stage
.deinit(fuzzer, state, event_mgr, executor.observers_mut())
}
}

View File

@ -8,5 +8,337 @@
pub mod mutational;
pub use mutational::StdMutationalPushStage;
use alloc::rc::Rc;
use core::{
cell::{Cell, RefCell},
marker::PhantomData,
time::Duration,
};
use crate::{
bolts::{current_time, rands::Rand},
corpus::{Corpus, CorpusScheduler},
events::{EventFirer, EventRestarter, HasEventManagerId, ProgressReporter},
executors::ExitKind,
inputs::Input,
observers::ObserversTuple,
state::{HasClientPerfMonitor, HasCorpus, HasExecutions, HasRand},
Error, EvaluatorObservers, ExecutionProcessor, HasCorpusScheduler,
};
/// Send a monitor update all 15 (or more) seconds
const STATS_TIMEOUT_DEFAULT: Duration = Duration::from_secs(15);
// The shared state for all [`PushStage`]s
/// Should be stored inside a `[Rc<RefCell<_>>`]
#[derive(Clone, Debug)]
pub struct PushStageSharedState<C, CS, EM, I, OT, R, S, Z>
where
C: Corpus<I>,
CS: CorpusScheduler<I, S>,
EM: EventFirer<I> + EventRestarter<S> + HasEventManagerId,
I: Input,
OT: ObserversTuple<I, S>,
R: Rand,
S: HasClientPerfMonitor + HasCorpus<C, I> + HasRand<R>,
Z: ExecutionProcessor<I, OT, S> + EvaluatorObservers<I, OT, S> + HasCorpusScheduler<CS, I, S>,
{
/// The [`crate::state::State`]
pub state: S,
/// The [`crate::fuzzer::Fuzzer`] instance
pub fuzzer: Z,
/// The [`crate::events::EventManager`]
pub event_mgr: EM,
/// The [`crate::observers::ObserversTuple`]
pub observers: OT,
phantom: PhantomData<(C, CS, I, OT, R, S, Z)>,
}
impl<C, CS, EM, I, OT, R, S, Z> PushStageSharedState<C, CS, EM, I, OT, R, S, Z>
where
C: Corpus<I>,
CS: CorpusScheduler<I, S>,
EM: EventFirer<I> + EventRestarter<S> + HasEventManagerId,
I: Input,
OT: ObserversTuple<I, S>,
R: Rand,
S: HasClientPerfMonitor + HasCorpus<C, I> + HasRand<R>,
Z: ExecutionProcessor<I, OT, S> + EvaluatorObservers<I, OT, S> + HasCorpusScheduler<CS, I, S>,
{
/// Create a new `PushStageSharedState` that can be used by all [`PushStage`]s
#[must_use]
pub fn new(fuzzer: Z, state: S, observers: OT, event_mgr: EM) -> Self {
Self {
state,
fuzzer,
event_mgr,
observers,
phantom: PhantomData,
}
}
}
/// Helper class for the [`PushStage`] trait, taking care of borrowing the shared state
#[derive(Clone, Debug)]
pub struct PushStageHelper<C, CS, EM, I, OT, R, S, Z>
where
C: Corpus<I>,
CS: CorpusScheduler<I, S>,
EM: EventFirer<I> + EventRestarter<S> + HasEventManagerId,
I: Input,
OT: ObserversTuple<I, S>,
R: Rand,
S: HasClientPerfMonitor + HasCorpus<C, I> + HasRand<R>,
Z: ExecutionProcessor<I, OT, S> + EvaluatorObservers<I, OT, S> + HasCorpusScheduler<CS, I, S>,
{
/// If this stage has already been initalized.
/// This gets reset to `false` after one iteration of the stage is done.
pub initialized: bool,
/// The last time the monitor was updated
pub last_monitor_time: Duration,
/// The shared state, keeping track of the corpus and the fuzzer
#[allow(clippy::type_complexity)]
pub shared_state: Rc<RefCell<Option<PushStageSharedState<C, CS, EM, I, OT, R, S, Z>>>>,
/// If the last iteraation failed
pub errored: bool,
/// The corpus index we're currently working on
pub current_corpus_idx: Option<usize>,
/// The input we just ran
pub current_input: Option<I>, // Todo: Get rid of copy
#[allow(clippy::type_complexity)]
phantom: PhantomData<(C, CS, (), EM, I, R, OT, S, Z)>,
exit_kind: Rc<Cell<Option<ExitKind>>>,
}
impl<C, CS, EM, I, OT, R, S, Z> PushStageHelper<C, CS, EM, I, OT, R, S, Z>
where
C: Corpus<I>,
CS: CorpusScheduler<I, S>,
EM: EventFirer<I> + EventRestarter<S> + HasEventManagerId,
I: Input,
OT: ObserversTuple<I, S>,
R: Rand,
S: HasClientPerfMonitor + HasCorpus<C, I> + HasRand<R>,
Z: ExecutionProcessor<I, OT, S> + EvaluatorObservers<I, OT, S> + HasCorpusScheduler<CS, I, S>,
{
/// Create a new [`PushStageHelper`]
#[must_use]
#[allow(clippy::type_complexity)]
pub fn new(
shared_state: Rc<RefCell<Option<PushStageSharedState<C, CS, EM, I, OT, R, S, Z>>>>,
exit_kind_ref: Rc<Cell<Option<ExitKind>>>,
) -> Self {
Self {
shared_state,
initialized: false,
phantom: PhantomData,
last_monitor_time: current_time(),
exit_kind: exit_kind_ref,
errored: false,
current_input: None,
current_corpus_idx: None,
}
}
/// Sets the shared state for this helper (and all other helpers owning the same [`RefCell`])
#[inline]
pub fn set_shared_state(
&mut self,
shared_state: PushStageSharedState<C, CS, EM, I, OT, R, S, Z>,
) {
(&mut *self.shared_state.borrow_mut()).replace(shared_state);
}
/// Takes the shared state from this helper, replacing it with `None`
#[inline]
#[allow(clippy::type_complexity)]
pub fn take_shared_state(&mut self) -> Option<PushStageSharedState<C, CS, EM, I, OT, R, S, Z>> {
let shared_state_ref = &mut (*self.shared_state).borrow_mut();
shared_state_ref.take()
}
/// Returns the exit kind of the last run
#[inline]
#[must_use]
pub fn exit_kind(&self) -> Option<ExitKind> {
self.exit_kind.get()
}
/// Resets the exit kind
#[inline]
pub fn reset_exit_kind(&mut self) {
self.exit_kind.set(None);
}
/// Resets this state after a full stage iter.
fn end_of_iter(
&mut self,
shared_state: PushStageSharedState<C, CS, EM, I, OT, R, S, Z>,
errored: bool,
) {
self.set_shared_state(shared_state);
self.errored = errored;
self.current_corpus_idx = None;
if errored {
self.initialized = false;
}
}
}
/// A push stage is a generator that returns a single testcase for each call.
pub trait PushStage<E, EM, S, Z>: Iterator {}
/// It's an iterator so we can chain it.
/// After it has finished once, we will call it agan for the next fuzzer round.
pub trait PushStage<C, CS, EM, I, OT, R, S, Z>: Iterator
where
C: Corpus<I>,
CS: CorpusScheduler<I, S>,
EM: EventFirer<I> + EventRestarter<S> + HasEventManagerId + ProgressReporter<I>,
I: Input,
OT: ObserversTuple<I, S>,
R: Rand,
S: HasClientPerfMonitor + HasCorpus<C, I> + HasRand<R> + HasExecutions,
Z: ExecutionProcessor<I, OT, S> + EvaluatorObservers<I, OT, S> + HasCorpusScheduler<CS, I, S>,
{
/// Gets the [`PushStageHelper`]
fn push_stage_helper(&self) -> &PushStageHelper<C, CS, EM, I, OT, R, S, Z>;
/// Gets the [`PushStageHelper`], mut
fn push_stage_helper_mut(&mut self) -> &mut PushStageHelper<C, CS, EM, I, OT, R, S, Z>;
/// Set the current corpus index this stagve works on
fn set_current_corpus_idx(&mut self, corpus_idx: usize) {
self.push_stage_helper_mut().current_corpus_idx = Some(corpus_idx);
}
/// Called by `next_std` when this stage is being initialized.
/// This is called before the first iteration of the stage.
/// After the stage has finished once (after `deinit`), this will be called again.
#[inline]
fn init(
&mut self,
_fuzzer: &mut Z,
_state: &mut S,
_event_mgr: &mut EM,
_observers: &mut OT,
) -> Result<(), Error> {
Ok(())
}
/// Called before the a test case is executed.
/// Should return the test case to be executed.
/// After this stage has finished, or if the stage does not process any inputs, this should return `None`.
fn pre_exec(
&mut self,
_fuzzer: &mut Z,
_state: &mut S,
_event_mgr: &mut EM,
_observers: &mut OT,
) -> Option<Result<I, Error>>;
/// Called after the execution of a testcase finished.
#[inline]
fn post_exec(
&mut self,
_fuzzer: &mut Z,
_state: &mut S,
_event_mgr: &mut EM,
_observers: &mut OT,
_input: I,
_exit_kind: ExitKind,
) -> Result<(), Error> {
Ok(())
}
/// Called after the stage finished (`pre_exec` returned `None`)
#[inline]
fn deinit(
&mut self,
_fuzzer: &mut Z,
_state: &mut S,
_event_mgr: &mut EM,
_observers: &mut OT,
) -> Result<(), Error> {
Ok(())
}
/// This is the default implementation for `next` for this stage
fn next_std(&mut self) -> Option<Result<I, Error>> {
let mut shared_state = {
let shared_state_ref = &mut (*self.push_stage_helper_mut().shared_state).borrow_mut();
shared_state_ref.take().unwrap()
};
let step_success = if self.push_stage_helper().initialized {
// We already ran once
let last_input = self.push_stage_helper_mut().current_input.take().unwrap();
self.post_exec(
&mut shared_state.fuzzer,
&mut shared_state.state,
&mut shared_state.event_mgr,
&mut shared_state.observers,
last_input,
self.push_stage_helper().exit_kind().unwrap(),
)
} else {
self.init(
&mut shared_state.fuzzer,
&mut shared_state.state,
&mut shared_state.event_mgr,
&mut shared_state.observers,
)
};
if let Err(err) = step_success {
self.push_stage_helper_mut().end_of_iter(shared_state, true);
return Some(Err(err));
}
//for i in 0..num {
let ret = self.pre_exec(
&mut shared_state.fuzzer,
&mut shared_state.state,
&mut shared_state.event_mgr,
&mut shared_state.observers,
);
if ret.is_none() {
// We're done.
drop(self.push_stage_helper_mut().current_input.take());
self.push_stage_helper_mut().initialized = false;
if let Err(err) = self.deinit(
&mut shared_state.fuzzer,
&mut shared_state.state,
&mut shared_state.event_mgr,
&mut shared_state.observers,
) {
self.push_stage_helper_mut().end_of_iter(shared_state, true);
return Some(Err(err));
};
let last_monitor_time = self.push_stage_helper().last_monitor_time;
let new_monitor_time = match shared_state.event_mgr.maybe_report_progress(
&mut shared_state.state,
last_monitor_time,
STATS_TIMEOUT_DEFAULT,
) {
Ok(new_time) => new_time,
Err(err) => {
self.push_stage_helper_mut().end_of_iter(shared_state, true);
return Some(Err(err));
}
};
self.push_stage_helper_mut().last_monitor_time = new_monitor_time;
//self.fuzzer.maybe_report_monitor();
} else {
self.push_stage_helper_mut().reset_exit_kind();
}
self.push_stage_helper_mut()
.end_of_iter(shared_state, false);
ret
}
}

View File

@ -2,35 +2,28 @@
//! For the current input, it will perform a range of random mutations, and then run them in the executor.
use alloc::rc::Rc;
use core::{
borrow::BorrowMut,
cell::{Cell, RefCell},
marker::PhantomData,
time::Duration,
};
use core::cell::{Cell, RefCell};
use crate::{
bolts::{current_time, rands::Rand},
bolts::rands::Rand,
corpus::{Corpus, CorpusScheduler},
events::EventManager,
events::{EventFirer, EventRestarter, HasEventManagerId, ProgressReporter},
executors::ExitKind,
inputs::Input,
mark_feature_time,
mutators::Mutator,
observers::ObserversTuple,
start_timer,
state::{HasClientPerfMonitor, HasCorpus, HasRand},
Error, EvaluatorObservers, ExecutionProcessor, Fuzzer, HasCorpusScheduler,
state::{HasClientPerfMonitor, HasCorpus, HasExecutions, HasRand},
Error, EvaluatorObservers, ExecutionProcessor, HasCorpusScheduler,
};
#[cfg(feature = "introspection")]
use crate::monitors::PerfFeature;
/// Send a monitor update all 15 (or more) seconds
const STATS_TIMEOUT_DEFAULT: Duration = Duration::from_secs(15);
use super::{PushStage, PushStageHelper, PushStageSharedState};
pub static DEFAULT_MUTATIONAL_MAX_ITERATIONS: u64 = 128;
/// A Mutational push stage is the stage in a fuzzing run that mutates inputs.
/// Mutational push stages will usually have a range of mutations that are
/// being applied to the input one by one, between executions.
@ -45,52 +38,36 @@ pub struct StdMutationalPushStage<C, CS, EM, I, M, OT, R, S, Z>
where
C: Corpus<I>,
CS: CorpusScheduler<I, S>,
EM: EventManager<(), I, S, Z>,
EM: EventFirer<I> + EventRestarter<S> + HasEventManagerId,
I: Input,
M: Mutator<I, S>,
OT: ObserversTuple<I, S>,
R: Rand,
S: HasClientPerfMonitor + HasCorpus<C, I> + HasRand<R>,
Z: ExecutionProcessor<I, OT, S>
+ EvaluatorObservers<I, OT, S>
+ Fuzzer<(), EM, I, S, ()>
+ HasCorpusScheduler<CS, I, S>,
Z: ExecutionProcessor<I, OT, S> + EvaluatorObservers<I, OT, S> + HasCorpusScheduler<CS, I, S>,
{
initialized: bool,
state: Rc<RefCell<S>>,
current_corpus_idx: Option<usize>,
testcases_to_do: usize,
testcases_done: usize,
fuzzer: Rc<RefCell<Z>>,
event_mgr: Rc<RefCell<EM>>,
current_input: Option<I>, // Todo: Get rid of copy
stage_idx: i32,
mutator: M,
#[allow(clippy::type_complexity)]
phantom: PhantomData<(C, CS, (), EM, I, R, OT, S, Z)>,
last_monitor_time: Duration,
observers: Rc<RefCell<OT>>,
exit_kind: Rc<Cell<Option<ExitKind>>>,
psh: PushStageHelper<C, CS, EM, I, OT, R, S, Z>,
}
impl<C, CS, EM, I, M, OT, R, S, Z> StdMutationalPushStage<C, CS, EM, I, M, OT, R, S, Z>
where
C: Corpus<I>,
CS: CorpusScheduler<I, S>,
EM: EventManager<(), I, S, Z>,
EM: EventFirer<I> + EventRestarter<S> + HasEventManagerId,
I: Input,
M: Mutator<I, S>,
OT: ObserversTuple<I, S>,
R: Rand,
S: HasClientPerfMonitor + HasCorpus<C, I> + HasRand<R>,
Z: ExecutionProcessor<I, OT, S>
+ EvaluatorObservers<I, OT, S>
+ Fuzzer<(), EM, I, S, ()>
+ HasCorpusScheduler<CS, I, S>,
Z: ExecutionProcessor<I, OT, S> + EvaluatorObservers<I, OT, S> + HasCorpusScheduler<CS, I, S>,
{
/// Gets the number of iterations as a random number
#[allow(clippy::unused_self, clippy::unnecessary_wraps)] // TODO: we should put this function into a trait later
@ -101,16 +78,33 @@ where
pub fn set_current_corpus_idx(&mut self, current_corpus_idx: usize) {
self.current_corpus_idx = Some(current_corpus_idx);
}
}
impl<C, CS, EM, I, M, OT, R, S, Z> PushStage<C, CS, EM, I, OT, R, S, Z>
for StdMutationalPushStage<C, CS, EM, I, M, OT, R, S, Z>
where
C: Corpus<I>,
CS: CorpusScheduler<I, S>,
EM: EventFirer<I> + EventRestarter<S> + HasEventManagerId + ProgressReporter<I>,
I: Input,
M: Mutator<I, S>,
OT: ObserversTuple<I, S>,
R: Rand,
S: HasClientPerfMonitor + HasCorpus<C, I> + HasRand<R> + HasExecutions,
Z: ExecutionProcessor<I, OT, S> + EvaluatorObservers<I, OT, S> + HasCorpusScheduler<CS, I, S>,
{
/// Creates a new default mutational stage
fn init(&mut self) -> Result<(), Error> {
let state: &mut S = &mut (*self.state).borrow_mut();
fn init(
&mut self,
fuzzer: &mut Z,
state: &mut S,
_event_mgr: &mut EM,
_observers: &mut OT,
) -> Result<(), Error> {
// Find a testcase to work on, unless someone already set it
self.current_corpus_idx = Some(if let Some(corpus_idx) = self.current_corpus_idx {
corpus_idx
} else {
let fuzzer: &mut Z = &mut (*self.fuzzer).borrow_mut();
fuzzer.scheduler().next(state)?
});
@ -119,9 +113,25 @@ where
Ok(())
}
fn pre_exec(&mut self) -> Option<Result<I, Error>> {
let state: &mut S = &mut (*self.state).borrow_mut();
#[inline]
fn deinit(
&mut self,
_fuzzer: &mut Z,
_state: &mut S,
_event_mgr: &mut EM,
_observers: &mut OT,
) -> Result<(), Error> {
self.current_corpus_idx = None;
Ok(())
}
fn pre_exec(
&mut self,
_fuzzer: &mut Z,
state: &mut S,
_event_mgr: &mut EM,
_observers: &mut OT,
) -> Option<Result<I, Error>> {
if self.testcases_done >= self.testcases_to_do {
// finished with this cicle.
return None;
@ -144,29 +154,25 @@ where
.unwrap();
mark_feature_time!(state, PerfFeature::Mutate);
self.current_input = Some(input.clone()); // TODO: Get rid of this
self.push_stage_helper_mut()
.current_input
.replace(input.clone()); // TODO: Get rid of this
Some(Ok(input))
}
fn post_exec(&mut self) -> Result<(), Error> {
fn post_exec(
&mut self,
fuzzer: &mut Z,
state: &mut S,
event_mgr: &mut EM,
observers: &mut OT,
last_input: I,
exit_kind: ExitKind,
) -> Result<(), Error> {
// todo: isintersting, etc.
let state: &mut S = &mut (*self.state).borrow_mut();
let fuzzer: &mut Z = &mut (*self.fuzzer).borrow_mut();
let event_mgr: &mut EM = &mut (*self.event_mgr).borrow_mut();
let observers_refcell: &RefCell<OT> = self.observers.borrow_mut();
let observers: &mut OT = &mut observers_refcell.borrow_mut();
fuzzer.process_execution(
state,
event_mgr,
self.current_input.take().unwrap(),
observers,
&self.exit_kind.get().unwrap(),
true,
)?;
fuzzer.process_execution(state, event_mgr, last_input, observers, &exit_kind, true)?;
start_timer!(state);
self.mutator
@ -176,60 +182,34 @@ where
Ok(())
}
#[inline]
fn push_stage_helper(&self) -> &PushStageHelper<C, CS, EM, I, OT, R, S, Z> {
&self.psh
}
#[inline]
fn push_stage_helper_mut(&mut self) -> &mut PushStageHelper<C, CS, EM, I, OT, R, S, Z> {
&mut self.psh
}
}
impl<C, CS, EM, I, M, OT, R, S, Z> Iterator for StdMutationalPushStage<C, CS, EM, I, M, OT, R, S, Z>
where
C: Corpus<I>,
CS: CorpusScheduler<I, S>,
EM: EventManager<(), I, S, Z>,
EM: EventFirer<I> + EventRestarter<S> + HasEventManagerId + ProgressReporter<I>,
I: Input,
M: Mutator<I, S>,
OT: ObserversTuple<I, S>,
R: Rand,
S: HasClientPerfMonitor + HasCorpus<C, I> + HasRand<R>,
Z: ExecutionProcessor<I, OT, S>
+ EvaluatorObservers<I, OT, S>
+ Fuzzer<(), EM, I, S, ()>
+ HasCorpusScheduler<CS, I, S>,
S: HasClientPerfMonitor + HasCorpus<C, I> + HasRand<R> + HasExecutions,
Z: ExecutionProcessor<I, OT, S> + EvaluatorObservers<I, OT, S> + HasCorpusScheduler<CS, I, S>,
{
type Item = Result<I, Error>;
fn next(&mut self) -> Option<Result<I, Error>> {
let step_success = if self.initialized {
// We already ran once
self.post_exec()
} else {
self.init()
};
if let Err(err) = step_success {
//let errored = true;
return Some(Err(err));
}
//for i in 0..num {
let ret = self.pre_exec();
if ret.is_none() {
// We're done.
self.initialized = false;
self.current_corpus_idx = None;
let state: &mut S = &mut (*self.state).borrow_mut();
//let fuzzer: &mut Z = &mut (*self.fuzzer).borrow_mut();
let event_mgr: &mut EM = &mut (*self.event_mgr).borrow_mut();
self.last_monitor_time = Z::maybe_report_monitor(
state,
event_mgr,
self.last_monitor_time,
STATS_TIMEOUT_DEFAULT,
)
.unwrap();
//self.fuzzer.maybe_report_monitor();
} else {
self.exit_kind.replace(None);
}
ret
self.next_std()
}
}
@ -237,42 +217,30 @@ impl<C, CS, EM, I, M, OT, R, S, Z> StdMutationalPushStage<C, CS, EM, I, M, OT, R
where
C: Corpus<I>,
CS: CorpusScheduler<I, S>,
EM: EventManager<(), I, S, Z>,
EM: EventFirer<I> + EventRestarter<S> + HasEventManagerId,
I: Input,
M: Mutator<I, S>,
OT: ObserversTuple<I, S>,
R: Rand,
S: HasClientPerfMonitor + HasCorpus<C, I> + HasRand<R>,
Z: ExecutionProcessor<I, OT, S>
+ EvaluatorObservers<I, OT, S>
+ Fuzzer<(), EM, I, S, ()>
+ HasCorpusScheduler<CS, I, S>,
Z: ExecutionProcessor<I, OT, S> + EvaluatorObservers<I, OT, S> + HasCorpusScheduler<CS, I, S>,
{
/// Creates a new default mutational stage
#[must_use]
#[allow(clippy::type_complexity)]
pub fn new(
mutator: M,
fuzzer: Rc<RefCell<Z>>,
state: Rc<RefCell<S>>,
event_mgr: Rc<RefCell<EM>>,
observers: Rc<RefCell<OT>>,
shared_state: Rc<RefCell<Option<PushStageSharedState<C, CS, EM, I, OT, R, S, Z>>>>,
exit_kind: Rc<Cell<Option<ExitKind>>>,
stage_idx: i32,
) -> Self {
Self {
mutator,
phantom: PhantomData,
initialized: false,
state,
psh: PushStageHelper::new(shared_state, exit_kind),
current_corpus_idx: None, // todo
testcases_to_do: 0,
testcases_done: 0,
current_input: None,
stage_idx,
fuzzer,
event_mgr,
observers,
exit_kind,
last_monitor_time: current_time(),
}
}
}

View File

@ -411,7 +411,7 @@ where
) -> Result<(), Error>
where
Z: Evaluator<E, EM, I, Self>,
EM: EventFirer<I, Self>,
EM: EventFirer<I>,
{
for in_dir in in_dirs {
self.load_from_directory(
@ -446,7 +446,7 @@ where
) -> Result<(), Error>
where
Z: Evaluator<E, EM, I, Self>,
EM: EventFirer<I, Self>,
EM: EventFirer<I>,
{
self.load_initial_inputs_internal(fuzzer, executor, manager, in_dirs, true)
}
@ -461,7 +461,7 @@ where
) -> Result<(), Error>
where
Z: Evaluator<E, EM, I, Self>,
EM: EventFirer<I, Self>,
EM: EventFirer<I>,
{
self.load_initial_inputs_internal(fuzzer, executor, manager, in_dirs, false)
}
@ -487,7 +487,7 @@ where
where
G: Generator<I, Self>,
Z: Evaluator<E, EM, I, Self>,
EM: EventFirer<I, Self>,
EM: EventFirer<I>,
{
let mut added = 0;
for _ in 0..num {
@ -525,7 +525,7 @@ where
where
G: Generator<I, Self>,
Z: Evaluator<E, EM, I, Self>,
EM: EventFirer<I, Self>,
EM: EventFirer<I>,
{
self.generate_initial_internal(fuzzer, executor, generator, manager, num, true)
}
@ -542,7 +542,7 @@ where
where
G: Generator<I, Self>,
Z: Evaluator<E, EM, I, Self>,
EM: EventFirer<I, Self>,
EM: EventFirer<I>,
{
self.generate_initial_internal(fuzzer, executor, generator, manager, num, false)
}

View File

@ -126,7 +126,7 @@ impl AsanErrors {
self.errors.is_empty()
}
/// Get a mutable reference to the global [`AsanErrors`] object
/// Get a mutable reference to the global [`struct@AsanErrors`] object
#[must_use]
pub fn get_mut<'a>() -> &'a mut Self {
unsafe { ASAN_ERRORS.as_mut().unwrap() }
@ -595,7 +595,7 @@ impl AsanErrorsObserver {
}
}
/// gets the [`AsanErrors`] from the previous run
/// gets the [`struct@AsanErrors`] from the previous run
#[must_use]
pub fn errors(&self) -> Option<&AsanErrors> {
match &self.errors {
@ -605,7 +605,7 @@ impl AsanErrorsObserver {
}
}
/// A feedback reporting potential [`AsanErrors`] from an `AsanErrorsObserver`
/// A feedback reporting potential [`struct@AsanErrors`] from an `AsanErrorsObserver`
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct AsanErrorsFeedback {
errors: Option<AsanErrors>,
@ -625,7 +625,7 @@ where
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<I, S>,
EM: EventFirer<I>,
OT: ObserversTuple<I, S>,
{
let observer = observers

View File

@ -75,7 +75,7 @@ const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANON;
#[cfg(not(any(target_vendor = "apple", target_os = "windows")))]
const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANONYMOUS;
/// An helper that feeds [`FridaInProcessExecutor`] with user-supplied instrumentation
/// An helper that feeds `FridaInProcessExecutor` with user-supplied instrumentation
pub trait FridaHelper<'a> {
/// Access to the stalker `Transformer`
fn transformer(&self) -> &Transformer<'a>;
@ -99,7 +99,7 @@ pub trait FridaHelper<'a> {
fn ranges(&self) -> &RangeMap<usize, (u16, String)>;
}
/// An helper that feeds [`FridaInProcessExecutor`] with edge-coverage instrumentation
/// An helper that feeds `FridaInProcessExecutor` with edge-coverage instrumentation
pub struct FridaInstrumentationHelper<'a> {
coverage_rt: CoverageRuntime,
#[cfg(unix)]

View File

@ -44,7 +44,7 @@ pub struct FridaOptions {
}
impl FridaOptions {
/// Parse the frida options from the [`LIBAFL_FRIDA_OPTIONS`] environment variable.
/// Parse the frida options from the "`LIBAFL_FRIDA_OPTIONS`" environment variable.
///
/// Options are `:` separated, and each options is a `name=value` string.
///

View File

@ -383,7 +383,7 @@ where
event_mgr: &mut EM,
) -> Result<Self, Error>
where
EM: EventFirer<I, S> + EventRestarter<S>,
EM: EventFirer<I> + EventRestarter<S>,
OC: Corpus<I>,
OF: Feedback<I, S>,
S: HasSolutions<OC, I> + HasClientPerfMonitor,