* initial single-threaded restartable mgr * starting rework * created SimpleRestartingManager * fixed win build * moved cpu to boplts * bringing back cpu * no_std fixes * no_std fixes * removed unused variable
This commit is contained in:
parent
bea557a48a
commit
0c353daee6
@ -1,6 +1,7 @@
|
|||||||
//! Bolts are no conceptual fuzzing elements, but they keep libafl-based fuzzers together.
|
//! Bolts are no conceptual fuzzing elements, but they keep libafl-based fuzzers together.
|
||||||
|
|
||||||
pub mod bindings;
|
pub mod bindings;
|
||||||
|
pub mod cpu;
|
||||||
pub mod launcher;
|
pub mod launcher;
|
||||||
pub mod llmp;
|
pub mod llmp;
|
||||||
pub mod os;
|
pub mod os;
|
||||||
|
@ -707,8 +707,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity, clippy::too_many_lines)]
|
||||||
#[allow(clippy::too_many_lines)]
|
|
||||||
impl<I, OT, S, SP, ST> RestartingMgr<I, OT, S, SP, ST>
|
impl<I, OT, S, SP, ST> RestartingMgr<I, OT, S, SP, ST>
|
||||||
where
|
where
|
||||||
I: Input,
|
I: Input,
|
||||||
|
@ -1,13 +1,42 @@
|
|||||||
//! A very simple event manager, that just supports log outputs, but no multiprocessing
|
//! A very simple event manager, that just supports log outputs, but no multiprocessing
|
||||||
use alloc::{string::ToString, vec::Vec};
|
|
||||||
|
|
||||||
|
use alloc::{string::ToString, vec::Vec};
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
use core::{
|
||||||
|
marker::PhantomData,
|
||||||
|
ptr::{addr_of, read_volatile},
|
||||||
|
};
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
use serde::{de::DeserializeOwned, Serialize};
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
use typed_builder::TypedBuilder;
|
||||||
|
|
||||||
|
#[cfg(all(feature = "std", windows))]
|
||||||
|
use crate::bolts::os::startable_self;
|
||||||
|
#[cfg(all(feature = "std", unix))]
|
||||||
|
use crate::bolts::os::{fork, ForkResult};
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
use crate::bolts::{
|
||||||
|
llmp::{LlmpReceiver, LlmpSender},
|
||||||
|
shmem::ShMemProvider,
|
||||||
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
|
bolts::llmp,
|
||||||
events::{BrokerEventResult, Event, EventFirer, EventManager, EventProcessor, EventRestarter},
|
events::{BrokerEventResult, Event, EventFirer, EventManager, EventProcessor, EventRestarter},
|
||||||
inputs::Input,
|
inputs::Input,
|
||||||
stats::Stats,
|
stats::Stats,
|
||||||
Error,
|
Error,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// The llmp connection from the actual fuzzer to the process supervising it
|
||||||
|
const _ENV_FUZZER_SENDER: &str = "_AFL_ENV_FUZZER_SENDER";
|
||||||
|
const _ENV_FUZZER_RECEIVER: &str = "_AFL_ENV_FUZZER_RECEIVER";
|
||||||
|
/// The llmp (2 way) connection from a fuzzer to the broker (broadcasting all other fuzzer messages)
|
||||||
|
const _ENV_FUZZER_BROKER_CLIENT_INITIAL: &str = "_AFL_ENV_FUZZER_BROKER_CLIENT";
|
||||||
|
|
||||||
|
/// We're restarting right now.
|
||||||
|
const _LLMP_TAG_RESTART: llmp::Tag = 0x8357A87;
|
||||||
|
|
||||||
/// A simple, single-threaded event manager that just logs
|
/// A simple, single-threaded event manager that just logs
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct SimpleEventManager<I, ST>
|
pub struct SimpleEventManager<I, ST>
|
||||||
@ -168,3 +197,236 @@ where
|
|||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Provides a `builder` which can be used to build a [`RestartingMgr`], which is a combination of a
|
||||||
|
/// `restarter` and `runner`, that can be used on systems both with and without `fork` support. The
|
||||||
|
/// `restarter` will start a new process each time the child crashes or times out.
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
#[allow(clippy::default_trait_access)]
|
||||||
|
#[derive(TypedBuilder, Debug)]
|
||||||
|
pub struct SimpleRestartingEventManager<I, S, SP, ST>
|
||||||
|
where
|
||||||
|
I: Input,
|
||||||
|
S: Serialize,
|
||||||
|
SP: ShMemProvider,
|
||||||
|
ST: Stats, //CE: CustomEvent<I, OT>,
|
||||||
|
{
|
||||||
|
/// The actual simple event mgr
|
||||||
|
simple_event_mgr: SimpleEventManager<I, ST>,
|
||||||
|
/// The shared memory provider to use for the broker or client spawned by the restarting
|
||||||
|
/// manager.
|
||||||
|
shmem_provider: SP,
|
||||||
|
/// The stats to use
|
||||||
|
#[builder(setter(strip_option))]
|
||||||
|
stats: Option<ST>,
|
||||||
|
/// [`LlmpSender`] for restarts
|
||||||
|
sender: LlmpSender<SP>,
|
||||||
|
/// Phantom data
|
||||||
|
#[builder(setter(skip), default = PhantomData {})]
|
||||||
|
_phantom: PhantomData<(I, S)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
impl<I, S, SP, ST> EventFirer<I, S> for SimpleRestartingEventManager<I, S, SP, ST>
|
||||||
|
where
|
||||||
|
I: Input,
|
||||||
|
S: Serialize,
|
||||||
|
SP: ShMemProvider,
|
||||||
|
ST: Stats, //CE: CustomEvent<I, OT>,
|
||||||
|
{
|
||||||
|
fn fire(&mut self, _state: &mut S, event: Event<I>) -> Result<(), Error> {
|
||||||
|
self.simple_event_mgr.fire(_state, event)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
impl<I, S, SP, ST> EventRestarter<S> for SimpleRestartingEventManager<I, S, SP, ST>
|
||||||
|
where
|
||||||
|
I: Input,
|
||||||
|
S: Serialize,
|
||||||
|
SP: ShMemProvider,
|
||||||
|
ST: Stats, //CE: CustomEvent<I, OT>,
|
||||||
|
{
|
||||||
|
/// Reset the single page (we reuse it over and over from pos 0), then send the current state to the next runner.
|
||||||
|
fn on_restart(&mut self, state: &mut S) -> Result<(), Error> {
|
||||||
|
// First, reset the page to 0 so the next iteration can read read from the beginning of this page
|
||||||
|
unsafe {
|
||||||
|
self.sender.reset();
|
||||||
|
}
|
||||||
|
self.sender
|
||||||
|
.send_buf(_LLMP_TAG_RESTART, &postcard::to_allocvec(state)?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
impl<E, I, S, SP, ST, Z> EventProcessor<E, S, Z> for SimpleRestartingEventManager<I, S, SP, ST>
|
||||||
|
where
|
||||||
|
I: Input,
|
||||||
|
S: Serialize,
|
||||||
|
SP: ShMemProvider,
|
||||||
|
ST: Stats, //CE: CustomEvent<I, OT>,
|
||||||
|
{
|
||||||
|
fn process(&mut self, fuzzer: &mut Z, state: &mut S, executor: &mut E) -> Result<usize, Error> {
|
||||||
|
self.simple_event_mgr.process(fuzzer, state, executor)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
impl<E, I, S, SP, ST, Z> EventManager<E, I, S, Z> for SimpleRestartingEventManager<I, S, SP, ST>
|
||||||
|
where
|
||||||
|
I: Input,
|
||||||
|
S: Serialize,
|
||||||
|
SP: ShMemProvider,
|
||||||
|
ST: Stats, //CE: CustomEvent<I, OT>,
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
impl<I, S, SP, ST> SimpleRestartingEventManager<I, S, SP, ST>
|
||||||
|
where
|
||||||
|
I: Input,
|
||||||
|
S: Serialize,
|
||||||
|
SP: ShMemProvider,
|
||||||
|
ST: Stats, //TODO CE: CustomEvent,
|
||||||
|
{
|
||||||
|
/// Creates a new [`SimpleEventManager`].
|
||||||
|
pub fn new(stats: ST, sender: LlmpSender<SP>, shmem_provider: SP) -> Self {
|
||||||
|
Self {
|
||||||
|
stats: None,
|
||||||
|
sender,
|
||||||
|
simple_event_mgr: SimpleEventManager::new(stats),
|
||||||
|
shmem_provider: shmem_provider,
|
||||||
|
_phantom: PhantomData {},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
#[allow(clippy::type_complexity, clippy::too_many_lines)]
|
||||||
|
impl<I, S, SP, ST> SimpleRestartingEventManager<I, S, SP, ST>
|
||||||
|
where
|
||||||
|
I: Input,
|
||||||
|
S: DeserializeOwned + Serialize,
|
||||||
|
SP: ShMemProvider,
|
||||||
|
ST: Stats + Clone,
|
||||||
|
{
|
||||||
|
/// Launch the restarting manager
|
||||||
|
pub fn launch(
|
||||||
|
&mut self,
|
||||||
|
) -> Result<(Option<S>, SimpleRestartingEventManager<I, S, SP, ST>), Error> {
|
||||||
|
// We start ourself as child process to actually fuzz
|
||||||
|
let (mut sender, mut receiver, new_shmem_provider) = if std::env::var(_ENV_FUZZER_SENDER)
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
// First, create a channel from the fuzzer (sender) to us (receiver) to report its state for restarts.
|
||||||
|
let sender = { LlmpSender::new(self.shmem_provider.clone(), 0, false)? };
|
||||||
|
|
||||||
|
let map = {
|
||||||
|
self.shmem_provider
|
||||||
|
.clone_ref(&sender.out_maps.last().unwrap().shmem)?
|
||||||
|
};
|
||||||
|
let receiver = LlmpReceiver::on_existing_map(self.shmem_provider.clone(), map, None)?;
|
||||||
|
// Store the information to a map.
|
||||||
|
sender.to_env(_ENV_FUZZER_SENDER)?;
|
||||||
|
receiver.to_env(_ENV_FUZZER_RECEIVER)?;
|
||||||
|
|
||||||
|
let mut ctr: u64 = 0;
|
||||||
|
// Client->parent loop
|
||||||
|
loop {
|
||||||
|
dbg!("Spawning next client (id {})", ctr);
|
||||||
|
|
||||||
|
// On Unix, we fork
|
||||||
|
#[cfg(unix)]
|
||||||
|
let child_status = {
|
||||||
|
self.shmem_provider.pre_fork()?;
|
||||||
|
match unsafe { fork() }? {
|
||||||
|
ForkResult::Parent(handle) => {
|
||||||
|
self.shmem_provider.post_fork(false)?;
|
||||||
|
handle.status()
|
||||||
|
}
|
||||||
|
ForkResult::Child => {
|
||||||
|
self.shmem_provider.post_fork(true)?;
|
||||||
|
break (sender, receiver, self.shmem_provider.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// On windows, we spawn ourself again
|
||||||
|
#[cfg(windows)]
|
||||||
|
let child_status = startable_self()?.status()?;
|
||||||
|
|
||||||
|
if unsafe { read_volatile(addr_of!((*receiver.current_recv_map.page()).size_used)) }
|
||||||
|
== 0
|
||||||
|
{
|
||||||
|
#[cfg(unix)]
|
||||||
|
if child_status == 137 {
|
||||||
|
// Out of Memory, see https://tldp.org/LDP/abs/html/exitcodes.html
|
||||||
|
// and https://github.com/AFLplusplus/LibAFL/issues/32 for discussion.
|
||||||
|
panic!("Fuzzer-respawner: The fuzzed target crashed with an out of memory error! Fix your harness, or switch to another executor (for example, a forkserver).");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Storing state in the last round did not work
|
||||||
|
panic!("Fuzzer-respawner: Storing state in crashed fuzzer instance did not work, no point to spawn the next client! (Child exited with: {})", child_status);
|
||||||
|
}
|
||||||
|
|
||||||
|
ctr = ctr.wrapping_add(1);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// We are the newly started fuzzing instance (i.e. on Windows), first, connect to our own restore map.
|
||||||
|
// We get here *only on Windows*, if we were started by a restarting fuzzer.
|
||||||
|
// A sender and a receiver for single communication
|
||||||
|
(
|
||||||
|
LlmpSender::on_existing_from_env(self.shmem_provider.clone(), _ENV_FUZZER_SENDER)?,
|
||||||
|
LlmpReceiver::on_existing_from_env(
|
||||||
|
self.shmem_provider.clone(),
|
||||||
|
_ENV_FUZZER_RECEIVER,
|
||||||
|
)?,
|
||||||
|
self.shmem_provider.clone(),
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
println!("We're a client, let's fuzz :)");
|
||||||
|
|
||||||
|
// If we're restarting, deserialize the old state.
|
||||||
|
let (state, mgr) = match receiver.recv_buf()? {
|
||||||
|
None => {
|
||||||
|
println!("First run. Let's set it all up");
|
||||||
|
// Mgr to send and receive msgs from/to all other fuzzer instances
|
||||||
|
(
|
||||||
|
None,
|
||||||
|
SimpleRestartingEventManager::new(
|
||||||
|
self.stats.take().unwrap(),
|
||||||
|
sender,
|
||||||
|
new_shmem_provider,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
// Restoring from a previous run, deserialize state and corpus.
|
||||||
|
Some((_sender, _tag, msg)) => {
|
||||||
|
println!("Subsequent run. Let's load all data from shmem (received {} bytes from previous instance)", msg.len());
|
||||||
|
let state: S = postcard::from_bytes(msg)?;
|
||||||
|
// We reset the sender, the next sender and receiver (after crash) will reuse the page from the initial message.
|
||||||
|
unsafe {
|
||||||
|
sender.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
(
|
||||||
|
Some(state),
|
||||||
|
SimpleRestartingEventManager::new(
|
||||||
|
self.stats.take().unwrap(),
|
||||||
|
sender,
|
||||||
|
new_shmem_provider,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/* TODO: Not sure if this is needed
|
||||||
|
// We commit an empty NO_RESTART message to this buf, against infinite loops,
|
||||||
|
// in case something crashes in the fuzzer.
|
||||||
|
sender.send_buf(_LLMP_TAG_NO_RESTART, []);
|
||||||
|
*/
|
||||||
|
|
||||||
|
Ok((state, mgr))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -59,13 +59,13 @@ where
|
|||||||
OT: ObserversTuple,
|
OT: ObserversTuple,
|
||||||
{
|
{
|
||||||
// Start a timer for this feedback
|
// Start a timer for this feedback
|
||||||
let start_time = crate::cpu::read_time_counter();
|
let start_time = crate::bolts::cpu::read_time_counter();
|
||||||
|
|
||||||
// Execute this feedback
|
// Execute this feedback
|
||||||
let ret = self.is_interesting(state, manager, input, observers, exit_kind);
|
let ret = self.is_interesting(state, manager, input, observers, exit_kind);
|
||||||
|
|
||||||
// Get the elapsed time for checking this feedback
|
// Get the elapsed time for checking this feedback
|
||||||
let elapsed = crate::cpu::read_time_counter() - start_time;
|
let elapsed = crate::bolts::cpu::read_time_counter() - start_time;
|
||||||
|
|
||||||
// TODO: A more meaningful way to get perf for each feedback
|
// TODO: A more meaningful way to get perf for each feedback
|
||||||
|
|
||||||
|
@ -477,7 +477,7 @@ where
|
|||||||
{
|
{
|
||||||
state
|
state
|
||||||
.introspection_stats_mut()
|
.introspection_stats_mut()
|
||||||
.set_current_time(crate::cpu::read_time_counter());
|
.set_current_time(crate::bolts::cpu::read_time_counter());
|
||||||
|
|
||||||
// Send the current stats over to the manager. This `.clone` shouldn't be
|
// Send the current stats over to the manager. This `.clone` shouldn't be
|
||||||
// costly as `ClientPerfStats` impls `Copy` since it only contains `u64`s
|
// costly as `ClientPerfStats` impls `Copy` since it only contains `u64`s
|
||||||
|
@ -25,7 +25,6 @@ pub use libafl_derive::*;
|
|||||||
|
|
||||||
pub mod bolts;
|
pub mod bolts;
|
||||||
pub mod corpus;
|
pub mod corpus;
|
||||||
pub mod cpu;
|
|
||||||
pub mod events;
|
pub mod events;
|
||||||
pub mod executors;
|
pub mod executors;
|
||||||
pub mod feedbacks;
|
pub mod feedbacks;
|
||||||
@ -166,7 +165,7 @@ impl From<ParseIntError> for Error {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::{
|
use crate::{
|
||||||
bolts::{rands::StdRand, tuples::tuple_list},
|
bolts::{rands::StdRand, shmem::StdShMemProvider, tuples::tuple_list},
|
||||||
corpus::{Corpus, InMemoryCorpus, RandCorpusScheduler, Testcase},
|
corpus::{Corpus, InMemoryCorpus, RandCorpusScheduler, Testcase},
|
||||||
executors::{ExitKind, InProcessExecutor},
|
executors::{ExitKind, InProcessExecutor},
|
||||||
inputs::BytesInput,
|
inputs::BytesInput,
|
||||||
|
@ -442,7 +442,7 @@ impl ClientPerfStats {
|
|||||||
/// the current clock counter
|
/// the current clock counter
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
let start_time = crate::cpu::read_time_counter();
|
let start_time = crate::bolts::cpu::read_time_counter();
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
start_time,
|
start_time,
|
||||||
@ -467,7 +467,7 @@ impl ClientPerfStats {
|
|||||||
/// Start a timer with the current time counter
|
/// Start a timer with the current time counter
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn start_timer(&mut self) {
|
pub fn start_timer(&mut self) {
|
||||||
self.timer_start = Some(crate::cpu::read_time_counter());
|
self.timer_start = Some(crate::bolts::cpu::read_time_counter());
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Update the current [`ClientPerfStats`] with the given [`ClientPerfStats`]
|
/// Update the current [`ClientPerfStats`] with the given [`ClientPerfStats`]
|
||||||
@ -494,7 +494,7 @@ impl ClientPerfStats {
|
|||||||
}
|
}
|
||||||
Some(timer_start) => {
|
Some(timer_start) => {
|
||||||
// Calculate the elapsed time
|
// Calculate the elapsed time
|
||||||
let elapsed = crate::cpu::read_time_counter() - timer_start;
|
let elapsed = crate::bolts::cpu::read_time_counter() - timer_start;
|
||||||
|
|
||||||
// Reset the timer
|
// Reset the timer
|
||||||
self.timer_start = None;
|
self.timer_start = None;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user