Make every Builder ::builder(), so BobTheBuilder::new() becomes BobThe::builder() (#2242)

* Make every builder ::builder()

* Fix no_std

* More

* Fix clippy, stuff

* More fun

* Make NopShMem do something

* Alloc

* more fmt

* Remove UB in tinyinst executor builder

* Make builder order not matter for tinyinst

* More better

* fix

* docs

* fmt

* more fmt

* clippy

* fix fixes

* tiny thing

* more betterg

* more more

* more builder

* more builder

* more nyx

* undo breaking clippy

* clip
This commit is contained in:
Dominik Maier 2024-05-23 18:56:39 +02:00 committed by GitHub
parent b97a9a1398
commit 1fafaf6454
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
24 changed files with 253 additions and 109 deletions

View File

@ -9,7 +9,7 @@ use std::{env, net::SocketAddr, path::PathBuf, time::Duration};
use clap::Parser;
use libafl::{
corpus::{InMemoryCorpus, OnDiskCorpus},
events::{launcher::Launcher, EventConfig, LlmpEventConverterBuilder},
events::{launcher::Launcher, llmp::LlmpEventConverter, EventConfig},
executors::{inprocess::InProcessExecutor, ExitKind},
feedback_or,
feedbacks::{CrashFeedback, MaxMapFeedback, NautilusChunksMetadata, NautilusFeedback},
@ -120,7 +120,7 @@ pub extern "C" fn libafl_main() {
let context = NautilusContext::from_file(15, "grammar.json");
let mut event_converter = opt.bytes_broker_port.map(|port| {
LlmpEventConverterBuilder::new()
LlmpEventConverter::builder()
.build_on_port(
shmem_provider.clone(),
port,

View File

@ -19,7 +19,7 @@ use libafl_bolts::{
shmem::{ShMemProvider, StdShMemProvider},
tuples::tuple_list,
};
use libafl_nyx::{executor::NyxExecutorBuilder, helper::NyxHelper, settings::NyxSettings};
use libafl_nyx::{executor::NyxExecutor, helper::NyxHelper, settings::NyxSettings};
fn main() {
let shmem_provider = StdShMemProvider::new().expect("Failed to init shared memory");
@ -54,7 +54,7 @@ fn main() {
let mut feedback = MaxMapFeedback::new(&observer);
let mut objective = CrashFeedback::new();
let scheduler = RandScheduler::new();
let mut executor = NyxExecutorBuilder::new().build(helper, tuple_list!(observer));
let mut executor = NyxExecutor::builder().build(helper, tuple_list!(observer));
// If not restarting, create a State from scratch
let mut state = state.unwrap_or_else(|| {

View File

@ -14,7 +14,7 @@ use libafl::{
Fuzzer, StdFuzzer,
};
use libafl_bolts::{rands::StdRand, tuples::tuple_list};
use libafl_nyx::{executor::NyxExecutorBuilder, helper::NyxHelper, settings::NyxSettings};
use libafl_nyx::{executor::NyxExecutor, helper::NyxHelper, settings::NyxSettings};
fn main() {
// nyx stuff
@ -44,7 +44,7 @@ fn main() {
let monitor = TuiMonitor::new(ui);
let mut mgr = SimpleEventManager::new(monitor);
let mut executor = NyxExecutorBuilder::new().build(helper, tuple_list!(observer));
let mut executor = NyxExecutor::builder().build(helper, tuple_list!(observer));
let mutator = StdScheduledMutator::new(havoc_mutations());
let mut stages = tuple_list!(StdMutationalStage::new(mutator));

View File

@ -1,4 +1,4 @@
use std::path::PathBuf;
use std::{path::PathBuf, ptr::addr_of_mut, time::Duration};
use libafl::{
corpus::{CachedOnDiskCorpus, Corpus, OnDiskCorpus, Testcase},
@ -20,7 +20,7 @@ use libafl_bolts::shmem::Win32ShMemProvider;
use libafl_bolts::{
ownedref::OwnedMutPtr, rands::StdRand, shmem::ShMemProvider, tuples::tuple_list,
};
use libafl_tinyinst::executor::TinyInstExecutorBuilder;
use libafl_tinyinst::executor::TinyInstExecutor;
static mut COVERAGE: Vec<u64> = vec![];
#[cfg(not(any(target_vendor = "apple", windows, target_os = "linux")))]
@ -37,7 +37,7 @@ fn main() {
// use file to pass testcases
// let args = vec!["test.exe".to_string(), "-f".to_string(), "@@".to_string()];
let coverage = unsafe { OwnedMutPtr::Ptr(core::ptr::addr_of_mut!(COVERAGE)) };
let coverage = unsafe { OwnedMutPtr::Ptr(addr_of_mut!(COVERAGE)) };
let observer = ListObserver::new("cov", coverage);
let mut feedback = ListFeedback::new(&observer);
#[cfg(windows)]
@ -59,18 +59,19 @@ fn main() {
let scheduler = RandScheduler::new();
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
let monitor = SimpleMonitor::new(|x| println!("{}", x));
let monitor = SimpleMonitor::new(|x| println!("{x}"));
let mut mgr = SimpleEventManager::new(monitor);
let mut executor = unsafe {
TinyInstExecutorBuilder::new()
TinyInstExecutor::builder()
.tinyinst_args(tinyinst_args)
.program_args(args)
.use_shmem()
.persistent("test.exe".to_string(), "fuzz".to_string(), 1, 10000)
.timeout(std::time::Duration::new(5, 0))
.timeout(Duration::new(5, 0))
.shmem_provider(&mut shmem_provider)
.build(&mut COVERAGE, tuple_list!(observer))
.coverage_ptr(addr_of_mut!(COVERAGE))
.build(tuple_list!(observer))
.unwrap()
};
let mutator = StdScheduledMutator::new(havoc_mutations());

View File

@ -19,11 +19,12 @@ use libafl_bolts::{
};
use libafl_bolts::{
llmp::{self, LlmpBroker, LlmpClient, LlmpClientDescription, Tag},
shmem::ShMemProvider,
shmem::{NopShMemProvider, ShMemProvider},
ClientId,
};
use serde::{Deserialize, Serialize};
use super::NopEventManager;
#[cfg(feature = "llmp_compression")]
use crate::events::llmp::COMPRESS_THRESHOLD;
#[cfg(feature = "adaptive_serialization")]
@ -38,9 +39,9 @@ use crate::{
},
executors::{Executor, HasObservers},
fuzzer::{EvaluatorObservers, ExecutionProcessor},
inputs::{Input, UsesInput},
inputs::{Input, NopInput, UsesInput},
observers::ObserversTuple,
state::{HasExecutions, HasLastReportTime, UsesState},
state::{HasExecutions, HasLastReportTime, NopState, UsesState},
Error, HasMetadata,
};
@ -230,6 +231,14 @@ where
is_main: bool,
}
impl CentralizedEventManager<NopEventManager<NopState<NopInput>>, NopShMemProvider> {
/// Creates a builder for [`CentralizedEventManager`]
#[must_use]
pub fn builder() -> CentralizedEventManagerBuilder {
CentralizedEventManagerBuilder::new()
}
}
/// The builder or `CentralizedEventManager`
#[derive(Debug)]
pub struct CentralizedEventManagerBuilder {

View File

@ -48,10 +48,8 @@ use libafl_bolts::{
use typed_builder::TypedBuilder;
use super::hooks::EventManagerHooksTuple;
#[cfg(all(unix, feature = "std"))]
use crate::events::centralized::CentralizedEventManagerBuilder;
#[cfg(all(unix, feature = "std", feature = "fork"))]
use crate::events::{CentralizedEventManager, CentralizedLlmpEventBroker};
use crate::events::centralized::{CentralizedEventManager, CentralizedLlmpEventBroker};
#[cfg(feature = "adaptive_serialization")]
use crate::observers::TimeObserver;
#[cfg(feature = "std")]
@ -698,7 +696,7 @@ where
let builder = builder.time_ref(self.time_obs.handle());
let (state, mgr) = builder.build().launch()?;
let mut centralized_builder = CentralizedEventManagerBuilder::new();
let mut centralized_builder = CentralizedEventManager::builder();
if index == 1 {
centralized_builder = centralized_builder.is_main(true);

View File

@ -18,7 +18,7 @@ use libafl_bolts::{
use libafl_bolts::{
current_time,
llmp::{LlmpClient, LlmpClientDescription},
shmem::ShMemProvider,
shmem::{NopShMemProvider, ShMemProvider},
ClientId,
};
#[cfg(feature = "std")]
@ -44,9 +44,9 @@ use crate::{
},
executors::{Executor, HasObservers},
fuzzer::{EvaluatorObservers, ExecutionProcessor},
inputs::UsesInput,
inputs::{NopInput, UsesInput},
observers::ObserversTuple,
state::{HasExecutions, HasLastReportTime, State, UsesState},
state::{HasExecutions, HasLastReportTime, NopState, State, UsesState},
Error, HasMetadata,
};
@ -85,6 +85,14 @@ where
phantom: PhantomData<S>,
}
impl LlmpEventManager<(), NopState<NopInput>, NopShMemProvider> {
/// Creates a builder for [`LlmpEventManager`]
#[must_use]
pub fn builder() -> LlmpEventManagerBuilder<()> {
LlmpEventManagerBuilder::new()
}
}
/// Builder for `LlmpEventManager`
#[derive(Debug, Copy, Clone)]
pub struct LlmpEventManagerBuilder<EMH> {

View File

@ -10,7 +10,7 @@ use libafl_bolts::{
};
use libafl_bolts::{
llmp::{LlmpClient, LlmpClientDescription, Tag},
shmem::ShMemProvider,
shmem::{NopShMemProvider, ShMemProvider},
ClientId,
};
use serde::Deserialize;
@ -19,8 +19,8 @@ use crate::{
events::{CustomBufEventResult, CustomBufHandlerFn, Event, EventFirer},
executors::{Executor, HasObservers},
fuzzer::{EvaluatorObservers, ExecutionProcessor},
inputs::{Input, InputConverter, UsesInput},
state::{HasExecutions, State, UsesState},
inputs::{Input, InputConverter, NopInput, NopInputConverter, UsesInput},
state::{HasExecutions, NopState, State, UsesState},
Error, HasMetadata,
};
@ -108,6 +108,22 @@ where
phantom: PhantomData<S>,
}
impl
LlmpEventConverter<
NopInput,
NopInputConverter<NopInput>,
NopInputConverter<NopInput>,
NopState<NopInput>,
NopShMemProvider,
>
{
/// Create a builder for [`LlmpEventConverter`]
#[must_use]
pub fn builder() -> LlmpEventConverterBuilder {
LlmpEventConverterBuilder::new()
}
}
/// Build `LlmpEventConverter`
#[derive(Debug, Clone, Default)]
pub struct LlmpEventConverterBuilder {

View File

@ -1,4 +1,7 @@
//! Llmp restarting manager
//! The `LLMP` restarting manager will
//! forward messages over lockless shared maps.
//! When the target crashes, a watch process (the parent) will
//! restart/refork it.
use alloc::vec::Vec;
#[cfg(all(unix, not(miri), feature = "std"))]
@ -42,7 +45,7 @@ use crate::{
events::{
hooks::EventManagerHooksTuple, Event, EventConfig, EventFirer, EventManager,
EventManagerId, EventProcessor, EventRestarter, HasEventManagerId, LlmpEventBroker,
LlmpEventManager, LlmpEventManagerBuilder, LlmpShouldSaveState, ProgressReporter,
LlmpEventManager, LlmpShouldSaveState, ProgressReporter,
},
executors::{Executor, HasObservers},
fuzzer::{EvaluatorObservers, ExecutionProcessor},
@ -483,11 +486,11 @@ where
}
LlmpConnection::IsClient { client } => {
#[cfg(not(feature = "adaptive_serialization"))]
let mgr: LlmpEventManager<EMH, S, SP> = LlmpEventManagerBuilder::new()
let mgr: LlmpEventManager<EMH, S, SP> = LlmpEventManager::builder()
.hooks(self.hooks)
.build_from_client(client, self.configuration)?;
#[cfg(feature = "adaptive_serialization")]
let mgr: LlmpEventManager<EMH, S, SP> = LlmpEventManagerBuilder::new()
let mgr: LlmpEventManager<EMH, S, SP> = LlmpEventManager::builder()
.hooks(self.hooks)
.build_from_client(
client,
@ -511,7 +514,7 @@ where
ManagerKind::Client { cpu_core } => {
// We are a client
#[cfg(not(feature = "adaptive_serialization"))]
let mgr = LlmpEventManagerBuilder::new()
let mgr = LlmpEventManager::builder()
.hooks(self.hooks)
.build_on_port(
self.shmem_provider.clone(),
@ -519,7 +522,7 @@ where
self.configuration,
)?;
#[cfg(feature = "adaptive_serialization")]
let mgr = LlmpEventManagerBuilder::new()
let mgr = LlmpEventManager::builder()
.hooks(self.hooks)
.build_on_port(
self.shmem_provider.clone(),
@ -648,7 +651,7 @@ where
let (state, mut mgr) =
if let Some((state_opt, mgr_description)) = staterestorer.restore()? {
#[cfg(not(feature = "adaptive_serialization"))]
let llmp_mgr = LlmpEventManagerBuilder::new()
let llmp_mgr = LlmpEventManager::builder()
.hooks(self.hooks)
.build_existing_client_from_description(
new_shmem_provider,
@ -656,7 +659,7 @@ where
self.configuration,
)?;
#[cfg(feature = "adaptive_serialization")]
let llmp_mgr = LlmpEventManagerBuilder::new()
let llmp_mgr = LlmpEventManager::builder()
.hooks(self.hooks)
.build_existing_client_from_description(
new_shmem_provider,
@ -676,7 +679,7 @@ where
log::info!("First run. Let's set it all up");
// Mgr to send and receive msgs from/to all other fuzzer instances
#[cfg(not(feature = "adaptive_serialization"))]
let mgr = LlmpEventManagerBuilder::new()
let mgr = LlmpEventManager::builder()
.hooks(self.hooks)
.build_existing_client_from_env(
new_shmem_provider,
@ -684,7 +687,7 @@ where
self.configuration,
)?;
#[cfg(feature = "adaptive_serialization")]
let mgr = LlmpEventManagerBuilder::new()
let mgr = LlmpEventManager::builder()
.hooks(self.hooks)
.build_existing_client_from_env(
new_shmem_provider,
@ -738,7 +741,7 @@ mod tests {
use crate::{
corpus::{Corpus, InMemoryCorpus, Testcase},
events::llmp::{restarting::_ENV_FUZZER_SENDER, LlmpEventManagerBuilder},
events::llmp::{restarting::_ENV_FUZZER_SENDER, LlmpEventManager},
executors::{ExitKind, InProcessExecutor},
feedbacks::ConstFeedback,
fuzzer::Fuzzer,
@ -788,11 +791,11 @@ mod tests {
}
#[cfg(not(feature = "adaptive_serialization"))]
let mut llmp_mgr = LlmpEventManagerBuilder::new()
let mut llmp_mgr = LlmpEventManager::builder()
.build_from_client(llmp_client, "fuzzer".into())
.unwrap();
#[cfg(feature = "adaptive_serialization")]
let mut llmp_mgr = LlmpEventManagerBuilder::new()
let mut llmp_mgr = LlmpEventManager::builder()
.build_from_client(llmp_client, "fuzzer".into(), time_ref.clone())
.unwrap();
@ -837,7 +840,7 @@ mod tests {
let (mut state_clone, mgr_description) = staterestorer.restore().unwrap().unwrap();
#[cfg(not(feature = "adaptive_serialization"))]
let mut llmp_clone = LlmpEventManagerBuilder::new()
let mut llmp_clone = LlmpEventManager::builder()
.build_existing_client_from_description(
shmem_provider,
&mgr_description,
@ -845,7 +848,7 @@ mod tests {
)
.unwrap();
#[cfg(feature = "adaptive_serialization")]
let mut llmp_clone = LlmpEventManagerBuilder::new()
let mut llmp_clone = LlmpEventManager::builder()
.build_existing_client_from_description(
shmem_provider,
&mgr_description,

View File

@ -440,6 +440,17 @@ where
phantom: PhantomData<S>,
}
impl<S> TcpEventManager<(), S>
where
S: State,
{
/// Create a builder for [`TcpEventManager`]
#[must_use]
pub fn builder() -> TcpEventManagerBuilder<(), S> {
TcpEventManagerBuilder::new()
}
}
/// Builder for `TcpEventManager`
#[derive(Debug, Copy, Clone)]
pub struct TcpEventManagerBuilder<EMH, S> {

View File

@ -182,8 +182,7 @@ impl CommandExecutor<(), (), ()> {
/// By default, input is read from stdin, unless you specify a different location using
/// * `arg_input_arg` for input delivered _as_ an command line argument
/// * `arg_input_file` for input via a file of a specific name
/// * `arg_input_file_std` for a file with default name
/// (at the right location in the arguments)
/// * `arg_input_file_std` for a file with default name (at the right location in the arguments)
#[must_use]
pub fn builder() -> CommandExecutorBuilder {
CommandExecutorBuilder::new()

View File

@ -1324,7 +1324,7 @@ mod tests {
use serial_test::serial;
use crate::{
executors::forkserver::ForkserverExecutorBuilder,
executors::forkserver::ForkserverExecutor,
observers::{ConstMapObserver, HitcountsMapObserver},
Error,
};
@ -1348,7 +1348,7 @@ mod tests {
shmem_buf,
));
let executor = ForkserverExecutorBuilder::new()
let executor = ForkserverExecutor::builder()
.program(bin)
.args(args)
.debug_child(false)

View File

@ -219,6 +219,7 @@ where
/// * `user_hooks` - the hooks run before and after the harness's execution
/// * `harness_fn` - the harness, executing the function
/// * `observers` - the observers observing the target during execution
///
/// This may return an error on unix, if signal handler setup fails
pub fn with_timeout_generic<E, EM, OF, Z>(
user_hooks: HT,

View File

@ -232,6 +232,7 @@ where
/// * `user_hooks` - the hooks run before and after the harness's execution
/// * `harness_fn` - the harness, executing the function
/// * `observers` - the observers observing the target during execution
///
/// This may return an error on unix, if signal handler setup fails
pub fn with_timeout<EM, OF, Z>(
harness_fn: &'a mut H,
@ -329,12 +330,13 @@ where
})
}
/// Create a new in mem executor.
/// Create a new [`InProcessExecutor`].
/// Caution: crash and restart in one of them will lead to odd behavior if multiple are used,
/// depending on different corpus or state.
/// * `user_hooks` - the hooks run before and after the harness's execution
/// * `harness_fn` - the harness, executing the function
/// * `observers` - the observers observing the target during execution
///
/// This may return an error on unix, if signal handler setup fails
pub fn with_timeout_generic<EM, OF, Z>(
user_hooks: HT,

View File

@ -227,6 +227,7 @@ where
/// * `user_hooks` - the hooks run before and after the harness's execution
/// * `harness_fn` - the harness, executing the function
/// * `observers` - the observers observing the target during execution
///
/// This may return an error on unix, if signal handler setup fails
pub fn with_timeout<EM, OF, Z>(
harness_fn: &'a mut H,
@ -354,6 +355,7 @@ where
/// * `user_hooks` - the hooks run before and after the harness's execution
/// * `harness_fn` - the harness, executing the function
/// * `observers` - the observers observing the target during execution
///
/// This may return an error on unix, if signal handler setup fails
#[allow(clippy::too_many_arguments)]
pub fn with_timeout_generic<EM, OF, Z>(

View File

@ -1,4 +1,5 @@
//! # Concolic Tracing Serialization Format
//!
//! ## Design Goals
//! * The serialization format for concolic tracing was developed with the goal of being space and time efficient.
//! * Additionally, it should be easy to maintain and extend.

View File

@ -22,8 +22,7 @@ static BUILD_ID: OnceLock<Uuid> = OnceLock::new();
/// invocations of identically laid out binaries.
///
/// As such:
/// * It is guaranteed to be identical within multiple invocations of the same
/// binary.
/// * It is guaranteed to be identical within multiple invocations of the same binary.
/// * It is guaranteed to be different across binaries with different code or
/// data segments or layout.
/// * Equality is unspecified if the binaries have identical code and data

View File

@ -2,11 +2,9 @@
//! too.)
#[cfg(feature = "alloc")]
use alloc::{rc::Rc, string::ToString};
use alloc::{rc::Rc, string::ToString, vec::Vec};
#[cfg(feature = "alloc")]
use core::fmt::Display;
#[cfg(feature = "alloc")]
use core::{cell::RefCell, fmt, mem::ManuallyDrop};
use core::{cell::RefCell, fmt, fmt::Display, mem::ManuallyDrop};
use core::{
fmt::Debug,
mem,
@ -318,6 +316,72 @@ pub trait ShMemProvider: Clone + Default + Debug {
}
}
/// An [`ShMemProvider`] that does not provide any [`ShMem`].
/// This is mainly for testing and type magic.
/// The resulting [`NopShMem`] is backed by a simple byte buffer to do some simple non-shared things with.
/// Calling [`NopShMemProvider::shmem_from_id_and_size`] will return new maps for the same id every time.
///
/// # Note
/// If you just want a simple shared memory implementation, use [`StdShMemProvider`] instead.
#[cfg(feature = "alloc")]
#[derive(Debug, Clone, Default)]
pub struct NopShMemProvider;
#[cfg(feature = "alloc")]
impl ShMemProvider for NopShMemProvider {
type ShMem = NopShMem;
fn new() -> Result<Self, Error> {
Ok(Self)
}
fn new_shmem(&mut self, map_size: usize) -> Result<Self::ShMem, Error> {
self.shmem_from_id_and_size(ShMemId::default(), map_size)
}
fn shmem_from_id_and_size(
&mut self,
id: ShMemId,
map_size: usize,
) -> Result<Self::ShMem, Error> {
Ok(NopShMem {
id,
buf: vec![0; map_size],
})
}
}
/// An [`ShMem]`] that does not have any mem nor share anything.
#[cfg(feature = "alloc")]
#[derive(Debug, Clone, Default)]
pub struct NopShMem {
id: ShMemId,
buf: Vec<u8>,
}
#[cfg(feature = "alloc")]
impl ShMem for NopShMem {
fn id(&self) -> ShMemId {
self.id
}
}
#[cfg(feature = "alloc")]
impl DerefMut for NopShMem {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.buf
}
}
#[cfg(feature = "alloc")]
impl Deref for NopShMem {
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.buf
}
}
/// A Handle Counted shared map,
/// that can use internal mutability.
/// Useful if the `ShMemProvider` needs to keep local state.

View File

@ -189,7 +189,7 @@ impl FridaInstrumentationHelperBuilder {
/// Instrument all modules in `/usr/lib` as well as `libfoo.so`:
/// ```
///# use libafl_frida::helper::FridaInstrumentationHelperBuilder;
/// let builder = FridaInstrumentationHelperBuilder::new()
/// let builder = FridaInstrumentationHelper::builder()
/// .instrument_module_if(|module| module.name() == "libfoo.so")
/// .instrument_module_if(|module| module.path().starts_with("/usr/lib"));
/// ```
@ -218,7 +218,7 @@ impl FridaInstrumentationHelperBuilder {
///
/// ```
///# use libafl_frida::helper::FridaInstrumentationHelperBuilder;
/// let builder = FridaInstrumentationHelperBuilder::new()
/// let builder = FridaInstrumentationHelper::builder()
/// .instrument_module_if(|module| module.path().starts_with("/usr/lib"))
/// .skip_module_if(|module| module.name() == "libfoo.so");
/// ```

View File

@ -236,6 +236,7 @@ pub fn merge(
.scheduler_mut()
.on_remove(&mut state, idx, &Some(testcase))?;
} else {
#[allow(clippy::needless_borrows_for_generic_args)] // False-positive: file_path is used just below
rename(&file_path, &new_file_path)?;
*file_path = new_file_path;
}

View File

@ -30,6 +30,13 @@ pub struct NyxExecutor<S, OT> {
phantom: PhantomData<S>,
}
impl NyxExecutor<(), ()> {
/// Create a builder for [`NyxExeuctor`]
pub fn builder() -> NyxExecutorBuilder {
NyxExecutorBuilder::new()
}
}
impl<S, OT> UsesState for NyxExecutor<S, OT>
where
S: State,

View File

@ -5,7 +5,7 @@ use std::{fs, net::SocketAddr, path::PathBuf, time::Duration};
use libafl::{
corpus::{CachedOnDiskCorpus, Corpus, OnDiskCorpus},
events::{launcher::Launcher, EventConfig, EventRestarter, LlmpRestartingEventManager},
executors::forkserver::ForkserverExecutorBuilder,
executors::forkserver::ForkserverExecutor,
feedback_or, feedback_or_fast,
feedbacks::{CrashFeedback, MaxMapFeedback, TimeFeedback, TimeoutFeedback},
fuzzer::{Fuzzer, StdFuzzer},
@ -175,7 +175,7 @@ impl<'a> ForkserverBytesCoverageSugar<'a> {
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
let forkserver = if self.shmem_testcase {
ForkserverExecutorBuilder::new()
ForkserverExecutor::builder()
.program(self.program.clone())
.parse_afl_cmdline(self.arguments)
.is_persistent(true)
@ -186,7 +186,7 @@ impl<'a> ForkserverBytesCoverageSugar<'a> {
.shmem_provider(&mut shmem_provider_client)
.build_dynamic_map(edges_observer, tuple_list!(time_observer))
} else {
ForkserverExecutorBuilder::new()
ForkserverExecutor::builder()
.program(self.program.clone())
.parse_afl_cmdline(self.arguments)
.is_persistent(true)

View File

@ -200,6 +200,7 @@ fn main() {
}
}
#[cfg(any(feature = "forkserver", feature = "windows_asan"))]
let target_family = std::env::var("CARGO_CFG_TARGET_FAMILY").unwrap();
#[cfg(feature = "forkserver")]

View File

@ -1,5 +1,4 @@
use core::marker::PhantomData;
use std::time::Duration;
use core::{marker::PhantomData, ptr, time::Duration};
use libafl::{
executors::{Executor, ExitKind, HasObservers},
@ -10,19 +9,19 @@ use libafl::{
};
use libafl_bolts::{
fs::{InputFile, INPUTFILE_STD},
shmem::{ShMem, ShMemProvider, StdShMemProvider},
shmem::{NopShMemProvider, ShMem, ShMemProvider},
tuples::RefIndexable,
AsSlice, AsSliceMut,
};
use tinyinst::tinyinst::{litecov::RunResult, TinyInst};
/// Tinyinst executor
pub struct TinyInstExecutor<'a, S, SP, OT>
/// [`TinyInst`](https://github.com/googleprojectzero/TinyInst) executor
pub struct TinyInstExecutor<S, SP, OT>
where
SP: ShMemProvider,
{
tinyinst: TinyInst,
coverage: &'a mut Vec<u64>,
coverage_ptr: *mut Vec<u64>,
timeout: Duration,
observers: OT,
phantom: PhantomData<S>,
@ -30,7 +29,15 @@ where
map: Option<<SP as ShMemProvider>::ShMem>,
}
impl<'a, S, SP, OT> std::fmt::Debug for TinyInstExecutor<'a, S, SP, OT>
impl<'a> TinyInstExecutor<(), NopShMemProvider, ()> {
/// Create a builder for [`TinyInstExecutor`]
#[must_use]
pub fn builder() -> TinyInstExecutorBuilder<'a, NopShMemProvider> {
TinyInstExecutorBuilder::new()
}
}
impl<S, SP, OT> std::fmt::Debug for TinyInstExecutor<S, SP, OT>
where
SP: ShMemProvider,
{
@ -41,7 +48,7 @@ where
}
}
impl<'a, EM, S, SP, OT, Z> Executor<EM, Z> for TinyInstExecutor<'a, S, SP, OT>
impl<EM, S, SP, OT, Z> Executor<EM, Z> for TinyInstExecutor<S, SP, OT>
where
EM: UsesState<State = S>,
S: State + HasExecutions,
@ -80,7 +87,8 @@ where
let mut status = RunResult::OK;
unsafe {
status = self.tinyinst.run();
self.tinyinst.vec_coverage(self.coverage, false);
self.tinyinst
.vec_coverage(self.coverage_ptr.as_mut().unwrap(), false);
}
match status {
@ -100,30 +108,52 @@ pub struct TinyInstExecutorBuilder<'a, SP> {
tinyinst_args: Vec<String>,
program_args: Vec<String>,
timeout: Duration,
coverage_ptr: *mut Vec<u64>,
shmem_provider: Option<&'a mut SP>,
}
const MAX_FILE: usize = 1024 * 1024;
const SHMEM_FUZZ_HDR_SIZE: usize = 4;
impl<'a> Default for TinyInstExecutorBuilder<'a, StdShMemProvider> {
impl<'a> Default for TinyInstExecutorBuilder<'a, NopShMemProvider> {
fn default() -> Self {
Self::new()
}
}
impl<'a> TinyInstExecutorBuilder<'a, StdShMemProvider> {
impl<'a> TinyInstExecutorBuilder<'a, NopShMemProvider> {
/// Constructor
#[must_use]
pub fn new() -> TinyInstExecutorBuilder<'a, StdShMemProvider> {
pub fn new() -> TinyInstExecutorBuilder<'a, NopShMemProvider> {
Self {
tinyinst_args: vec![],
program_args: vec![],
timeout: Duration::new(3, 0),
shmem_provider: None,
coverage_ptr: ptr::null_mut(),
}
}
/// Use this to enable shmem testcase passing.
#[must_use]
pub fn shmem_provider<SP: ShMemProvider>(
self,
shmem_provider: &'a mut SP,
) -> TinyInstExecutorBuilder<'a, SP> {
TinyInstExecutorBuilder {
tinyinst_args: self.tinyinst_args,
program_args: self.program_args,
timeout: self.timeout,
shmem_provider: Some(shmem_provider),
coverage_ptr: ptr::null_mut(),
}
}
}
impl<'a, SP> TinyInstExecutorBuilder<'a, SP>
where
SP: ShMemProvider,
{
/// Argument for tinyinst instrumentation
#[must_use]
pub fn tinyinst_arg(mut self, arg: String) -> Self {
@ -207,31 +237,22 @@ impl<'a> TinyInstExecutorBuilder<'a, StdShMemProvider> {
self
}
/// Use this to enable shmem testcase passing.
/// Set the pointer to the coverage vec used to observer the execution.
///
/// # Safety
/// The coverage vec pointer must point to a valid vec and outlive the time the [`TinyInstExecutor`] is alive.
/// The map will be dereferenced and borrowed mutably during execution. This may not happen concurrently.
#[must_use]
pub fn shmem_provider<SP: ShMemProvider>(
self,
shmem_provider: &'a mut SP,
) -> TinyInstExecutorBuilder<'a, SP> {
TinyInstExecutorBuilder {
tinyinst_args: self.tinyinst_args,
program_args: self.program_args,
timeout: self.timeout,
shmem_provider: Some(shmem_provider),
pub fn coverage_ptr(mut self, coverage_ptr: *mut Vec<u64>) -> Self {
self.coverage_ptr = coverage_ptr;
self
}
}
}
impl<'a, SP> TinyInstExecutorBuilder<'a, SP>
where
SP: ShMemProvider,
{
/// Build tinyinst executor
pub fn build<OT, S>(
&mut self,
coverage: &'a mut Vec<u64>,
observers: OT,
) -> Result<TinyInstExecutor<'a, S, SP, OT>, Error> {
/// Build [`TinyInst`](https://github.com/googleprojectzero/TinyInst) executor
pub fn build<OT, S>(&mut self, observers: OT) -> Result<TinyInstExecutor<S, SP, OT>, Error> {
if self.coverage_ptr.is_null() {
return Err(Error::illegal_argument("Coverage pointer may not be null."));
}
let (map, shmem_id) = match &mut self.shmem_provider {
Some(provider) => {
// setup shared memory
@ -285,7 +306,7 @@ where
Ok(TinyInstExecutor {
tinyinst,
coverage,
coverage_ptr: self.coverage_ptr,
timeout: self.timeout,
observers,
phantom: PhantomData,
@ -295,7 +316,7 @@ where
}
}
impl<'a, S, SP, OT> HasObservers for TinyInstExecutor<'a, S, SP, OT>
impl<S, SP, OT> HasObservers for TinyInstExecutor<S, SP, OT>
where
S: State,
SP: ShMemProvider,
@ -309,14 +330,14 @@ where
RefIndexable::from(&mut self.observers)
}
}
impl<'a, S, SP, OT> UsesState for TinyInstExecutor<'a, S, SP, OT>
impl<S, SP, OT> UsesState for TinyInstExecutor<S, SP, OT>
where
S: State,
SP: ShMemProvider,
{
type State = S;
}
impl<'a, S, SP, OT> UsesObservers for TinyInstExecutor<'a, S, SP, OT>
impl<S, SP, OT> UsesObservers for TinyInstExecutor<S, SP, OT>
where
OT: ObserversTuple<S>,
S: State,