Remove ShMemProvider bound from struct definitions (#2861)

* No more ShMemProvider bound constraint in struct definition whenever possible

* Introduce StdShMem

* Update CONTRIBUTING.md
This commit is contained in:
Romain Malmain 2025-01-20 17:25:55 +01:00 committed by GitHub
parent 348bfdc7d7
commit 8089b18d34
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
48 changed files with 754 additions and 664 deletions

View File

@ -75,7 +75,7 @@ pub trait X
}
```
- __Ideally__ the types used in the the arguments of methods in traits should have the same as the types defined on the traits.
- __Ideally__ the types used in the arguments of methods in traits should have the same as the types defined on the traits.
```rust
pub trait X<A, B, C> // <- this trait have 3 generics, A, B, and C
{
@ -84,6 +84,22 @@ pub trait X<A, B, C> // <- this trait have 3 generics, A, B, and C
fn do_other_stuff(&self, a: A, b: B); // <- this is not ideal because it does not have C.
}
```
- Generic naming should be consistent. Do NOT use multiple name for the same generic, it just makes things more confusing. Do:
```rust
pub struct X<A> {
phantom: PhanomData<A>,
}
impl<A> X<A> {}
```
But not:
```rust
pub struct X<A> {
phantom: PhanomData<A>,
}
impl<B> X<B> {} // <- Do NOT do that, use A instead of B
```
- Always alphabetically order the type generics. Therefore,
```rust
pub struct X<E, EM, OT, S, Z> {}; // <- Generics are alphabetically ordered
@ -91,4 +107,31 @@ pub struct X<E, EM, OT, S, Z> {}; // <- Generics are alphabetically ordered
But not,
```rust
pub struct X<S, OT, Z, EM, E> {}; // <- Generics are not ordered
```
```
- Similarly, generic bounds in `where` clauses should be alphabetically sorted. Prefer:
```rust
pub trait FooA {}
pub trait FooB {}
pub struct X<A, B>;
impl<A, B> X<A, B>
where
A: FooA,
B: FooB,
{}
```
Over:
```rust
pub trait FooA {}
pub trait FooB {}
pub struct X<A, B>;
impl<A, B> X<A, B>
where
B: FooB, // <-|
// | Generic bounds are not alphabetically ordered.
A: FooA, // <-|
{}
```

View File

@ -96,7 +96,7 @@ unsafe fn fuzz(
let shmem_provider = StdShMemProvider::new()?;
let mut run_client = |state: Option<_>,
mgr: LlmpRestartingEventManager<_, _, _, _>,
mgr: LlmpRestartingEventManager<_, _, _, _, _>,
client_description: ClientDescription| {
// The restarting state will spawn the same process again as child, then restarted it each time it crashes.
@ -104,7 +104,7 @@ unsafe fn fuzz(
if options.asan && options.asan_cores.contains(client_description.core_id()) {
(|state: Option<_>,
mut mgr: LlmpRestartingEventManager<_, _, _, _>,
mut mgr: LlmpRestartingEventManager<_, _, _, _, _>,
_client_description| {
let gum = Gum::obtain();
@ -231,7 +231,7 @@ unsafe fn fuzz(
})(state, mgr, client_description)
} else if options.cmplog && options.cmplog_cores.contains(client_description.core_id()) {
(|state: Option<_>,
mut mgr: LlmpRestartingEventManager<_, _, _, _>,
mut mgr: LlmpRestartingEventManager<_, _, _, _, _>,
_client_description| {
let gum = Gum::obtain();
@ -367,7 +367,7 @@ unsafe fn fuzz(
})(state, mgr, client_description)
} else {
(|state: Option<_>,
mut mgr: LlmpRestartingEventManager<_, _, _, _>,
mut mgr: LlmpRestartingEventManager<_, _, _, _, _>,
_client_description| {
let gum = Gum::obtain();

View File

@ -81,7 +81,7 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> {
};
let mut run_client = |state: Option<_>,
mgr: LlmpRestartingEventManager<_, _, _, _>,
mgr: LlmpRestartingEventManager<_, _, _, _, _>,
client_description: ClientDescription| {
// The restarting state will spawn the same process again as child, then restarted it each time it crashes.
@ -101,7 +101,7 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> {
// if options.asan && options.asan_cores.contains(client_description.core_id()) {
(|state: Option<_>,
mut mgr: LlmpRestartingEventManager<_, _, _, _>,
mut mgr: LlmpRestartingEventManager<_, _, _, _, _>,
_client_description| {
let gum = Gum::obtain();

View File

@ -78,7 +78,7 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> {
let shmem_provider = StdShMemProvider::new()?;
let mut run_client = |state: Option<_>,
mgr: LlmpRestartingEventManager<_, _, _, _>,
mgr: LlmpRestartingEventManager<_, _, _, _, _>,
client_description: ClientDescription| {
// The restarting state will spawn the same process again as child, then restarted it each time it crashes.
@ -98,7 +98,7 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> {
if options.asan && options.asan_cores.contains(client_description.core_id()) {
(|state: Option<_>,
mut mgr: LlmpRestartingEventManager<_, _, _, _>,
mut mgr: LlmpRestartingEventManager<_, _, _, _, _>,
_client_description| {
let gum = Gum::obtain();
@ -214,7 +214,7 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> {
})(state, mgr, client_description)
} else if options.cmplog && options.cmplog_cores.contains(client_description.core_id()) {
(|state: Option<_>,
mut mgr: LlmpRestartingEventManager<_, _, _, _>,
mut mgr: LlmpRestartingEventManager<_, _, _, _, _>,
_client_description| {
let gum = Gum::obtain();
@ -344,7 +344,7 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> {
})(state, mgr, client_description)
} else {
(|state: Option<_>,
mut mgr: LlmpRestartingEventManager<_, _, _, _>,
mut mgr: LlmpRestartingEventManager<_, _, _, _, _>,
_client_description| {
let gum = Gum::obtain();

View File

@ -124,7 +124,7 @@ pub fn fuzz() {
env::remove_var("LD_LIBRARY_PATH");
let mut run_client = |state: Option<_>,
mut mgr: LlmpRestartingEventManager<_, _, _, _>,
mut mgr: LlmpRestartingEventManager<_, _, _, _, _>,
client_description: ClientDescription| {
let mut cov_path = options.coverage_path.clone();

View File

@ -7,7 +7,7 @@ use libafl::events::SimpleEventManager;
use libafl::events::{LlmpRestartingEventManager, MonitorTypedEventManager};
use libafl::{
corpus::{Corpus, InMemoryOnDiskCorpus, OnDiskCorpus},
events::{ClientDescription, EventRestarter, NopEventManager},
events::{ClientDescription, EventRestarter},
executors::{Executor, ShadowExecutor},
feedback_or, feedback_or_fast,
feedbacks::{CrashFeedback, MaxMapFeedback, TimeFeedback, TimeoutFeedback},
@ -30,7 +30,7 @@ use libafl::{
Error, HasMetadata, NopFuzzer,
};
#[cfg(not(feature = "simplemgr"))]
use libafl_bolts::shmem::StdShMemProvider;
use libafl_bolts::shmem::{StdShMem, StdShMemProvider};
use libafl_bolts::{
ownedref::OwnedMutSlice,
rands::StdRand,
@ -58,7 +58,7 @@ pub type ClientState =
pub type ClientMgr<M> = SimpleEventManager<BytesInput, M, ClientState>;
#[cfg(not(feature = "simplemgr"))]
pub type ClientMgr<M> = MonitorTypedEventManager<
LlmpRestartingEventManager<(), BytesInput, ClientState, StdShMemProvider>,
LlmpRestartingEventManager<(), BytesInput, ClientState, StdShMem, StdShMemProvider>,
M,
>;

View File

@ -68,6 +68,12 @@ where
Ok(false)
}
#[cfg(feature = "track_hit_feedbacks")]
#[inline]
fn last_result(&self) -> Result<bool, Error> {
Ok(false)
}
fn append_metadata(
&mut self,
state: &mut S,
@ -78,10 +84,4 @@ where
(self.func)(state, testcase, &self.out_dir)?;
Ok(())
}
#[cfg(feature = "track_hit_feedbacks")]
#[inline]
fn last_result(&self) -> Result<bool, Error> {
Ok(false)
}
}

View File

@ -42,7 +42,7 @@ use libafl::{
Error, Fuzzer, HasFeedback, HasMetadata, SerdeAny,
};
#[cfg(not(feature = "fuzzbench"))]
use libafl_bolts::shmem::StdShMemProvider;
use libafl_bolts::shmem::{StdShMem, StdShMemProvider};
use libafl_bolts::{
core_affinity::CoreId,
current_nanos, current_time,
@ -77,10 +77,11 @@ pub type LibaflFuzzState =
#[cfg(not(feature = "fuzzbench"))]
type LibaflFuzzManager = CentralizedEventManager<
LlmpRestartingEventManager<(), BytesInput, LibaflFuzzState, StdShMemProvider>,
LlmpRestartingEventManager<(), BytesInput, LibaflFuzzState, StdShMem, StdShMemProvider>,
(),
BytesInput,
LibaflFuzzState,
StdShMem,
StdShMemProvider,
>;
#[cfg(feature = "fuzzbench")]

View File

@ -30,7 +30,7 @@ use libafl::{
use libafl_bolts::{
current_nanos,
rands::StdRand,
shmem::StdShMemProvider,
shmem::{StdShMem, StdShMemProvider},
tuples::{tuple_list, Merge},
};
use libafl_nyx::{
@ -44,7 +44,7 @@ pub type ClientState =
StdState<InMemoryOnDiskCorpus<BytesInput>, BytesInput, StdRand, OnDiskCorpus<BytesInput>>;
pub type ClientMgr<M> = MonitorTypedEventManager<
LlmpRestartingEventManager<(), BytesInput, ClientState, StdShMemProvider>,
LlmpRestartingEventManager<(), BytesInput, ClientState, StdShMem, StdShMemProvider>,
M,
>;

View File

@ -141,7 +141,7 @@ pub extern "C" fn libafl_main() {
let mut secondary_run_client =
|state: Option<_>,
mut mgr: CentralizedEventManager<_, _, _, _, _>,
mut mgr: CentralizedEventManager<_, _, _, _, _, _>,
_client_description: ClientDescription| {
// Create an observation channel using the coverage map
let edges_observer =

View File

@ -219,7 +219,7 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re
let orig_size = state.corpus().count();
let msg = "Started distillation...".to_string();
<LlmpRestartingEventManager<_, _, _, _> as EventFirer<BytesInput, _>>::log(
<LlmpRestartingEventManager<_, _, _, _, _> as EventFirer<BytesInput, _>>::log(
&mut restarting_mgr,
&mut state,
LogSeverity::Info,
@ -227,7 +227,7 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re
)?;
minimizer.minimize(&mut fuzzer, &mut executor, &mut restarting_mgr, &mut state)?;
let msg = format!("Distilled out {} cases", orig_size - state.corpus().count());
<LlmpRestartingEventManager<_, _, _, _> as EventFirer<BytesInput, _>>::log(
<LlmpRestartingEventManager<_, _, _, _, _> as EventFirer<BytesInput, _>>::log(
&mut restarting_mgr,
&mut state,
LogSeverity::Info,

View File

@ -162,7 +162,7 @@ pub extern "C" fn libafl_main() {
);
let mut run_client = |state: Option<_>,
mut restarting_mgr: LlmpRestartingEventManager<_, _, _, _>,
mut restarting_mgr: LlmpRestartingEventManager<_, _, _, _, _>,
client_description: ClientDescription| {
// Create an observation channel using the coverage map
let edges_observer =

View File

@ -157,7 +157,7 @@ pub extern "C" fn libafl_main() {
let mut secondary_run_client =
|state: Option<_>,
mut mgr: CentralizedEventManager<_, _, _, _>,
mut mgr: CentralizedEventManager<_, _, _, _, _, _>,
_client_description: ClientDescription| {
// Create an observation channel using the coverage map
let edges_observer =

View File

@ -5,7 +5,6 @@ use core::{fmt::Debug, marker::PhantomData};
use libafl_bolts::{compress::GzipCompressor, llmp::LLMP_FLAG_COMPRESSED};
use libafl_bolts::{
llmp::{Flags, LlmpBrokerInner, LlmpHook, LlmpMsgHookResult, Tag},
shmem::ShMemProvider,
ClientId, Error,
};
use serde::de::DeserializeOwned;
@ -21,14 +20,13 @@ pub struct CentralizedLlmpHook<I> {
phantom: PhantomData<I>,
}
impl<I, SP> LlmpHook<SP> for CentralizedLlmpHook<I>
impl<I, SHM, SP> LlmpHook<SHM, SP> for CentralizedLlmpHook<I>
where
I: DeserializeOwned,
SP: ShMemProvider,
{
fn on_new_message(
&mut self,
_broker_inner: &mut LlmpBrokerInner<SP>,
_broker_inner: &mut LlmpBrokerInner<SHM, SP>,
client_id: ClientId,
msg_tag: &mut Tag,
_msg_flags: &mut Flags,

View File

@ -11,7 +11,6 @@ use libafl_bolts::llmp::LLMP_FLAG_COMPRESSED;
use libafl_bolts::{
llmp::{Flags, LlmpBrokerInner, LlmpHook, LlmpMsgHookResult, Tag, LLMP_FLAG_FROM_MM},
ownedref::OwnedRef,
shmem::ShMemProvider,
ClientId, Error,
};
use serde::Serialize;
@ -149,16 +148,15 @@ where
}
}
impl<A, I, SP> LlmpHook<SP> for TcpMultiMachineLlmpSenderHook<A, I>
impl<A, I, SHM, SP> LlmpHook<SHM, SP> for TcpMultiMachineLlmpSenderHook<A, I>
where
I: Input,
A: Clone + Display + ToSocketAddrs + Send + Sync + 'static,
SP: ShMemProvider,
{
/// check for received messages, and forward them alongside the incoming message to inner.
fn on_new_message(
&mut self,
_broker_inner: &mut LlmpBrokerInner<SP>,
_broker_inner: &mut LlmpBrokerInner<SHM, SP>,
_client_id: ClientId,
_msg_tag: &mut Tag,
_msg_flags: &mut Flags,
@ -211,16 +209,15 @@ where
}
}
impl<A, I, SP> LlmpHook<SP> for TcpMultiMachineLlmpReceiverHook<A, I>
impl<A, I, SHM, SP> LlmpHook<SHM, SP> for TcpMultiMachineLlmpReceiverHook<A, I>
where
I: Input,
A: Clone + Display + ToSocketAddrs + Send + Sync + 'static,
SP: ShMemProvider,
{
/// check for received messages, and forward them alongside the incoming message to inner.
fn on_new_message(
&mut self,
_broker_inner: &mut LlmpBrokerInner<SP>,
_broker_inner: &mut LlmpBrokerInner<SHM, SP>,
_client_id: ClientId,
_msg_tag: &mut Tag,
_msg_flags: &mut Flags,

View File

@ -6,7 +6,6 @@ use core::marker::PhantomData;
use libafl_bolts::{compress::GzipCompressor, llmp::LLMP_FLAG_COMPRESSED};
use libafl_bolts::{
llmp::{Flags, LlmpBrokerInner, LlmpHook, LlmpMsgHookResult, Tag},
shmem::ShMemProvider,
ClientId,
};
use serde::de::DeserializeOwned;
@ -40,15 +39,14 @@ pub struct StdLlmpEventHook<I, MT> {
phantom: PhantomData<I>,
}
impl<I, MT, SP> LlmpHook<SP> for StdLlmpEventHook<I, MT>
impl<I, MT, SHM, SP> LlmpHook<SHM, SP> for StdLlmpEventHook<I, MT>
where
I: DeserializeOwned,
SP: ShMemProvider,
MT: Monitor,
{
fn on_new_message(
&mut self,
_broker_inner: &mut LlmpBrokerInner<SP>,
_broker_inner: &mut LlmpBrokerInner<SHM, SP>,
client_id: ClientId,
msg_tag: &mut Tag,
#[cfg(feature = "llmp_compression")] msg_flags: &mut Flags,

View File

@ -18,7 +18,7 @@ use libafl_bolts::{
};
use libafl_bolts::{
llmp::{LlmpClient, LlmpClientDescription, Tag},
shmem::{NopShMemProvider, ShMemProvider},
shmem::{NopShMem, NopShMemProvider, ShMem, ShMemProvider},
tuples::{Handle, MatchNameRef},
ClientId,
};
@ -46,13 +46,10 @@ pub(crate) const _LLMP_TAG_TO_MAIN: Tag = Tag(0x3453453);
/// A wrapper manager to implement a main-secondary architecture with another broker
#[derive(Debug)]
pub struct CentralizedEventManager<EM, EMH, I, S, SP>
where
SP: ShMemProvider,
{
pub struct CentralizedEventManager<EM, EMH, I, S, SHM, SP> {
inner: EM,
/// The centralized LLMP client for inter process communication
client: LlmpClient<SP>,
client: LlmpClient<SHM, SP>,
#[cfg(feature = "llmp_compression")]
compressor: GzipCompressor,
time_ref: Option<Handle<TimeObserver>>,
@ -61,7 +58,16 @@ where
phantom: PhantomData<(I, S)>,
}
impl CentralizedEventManager<NopEventManager, (), NopInput, NopState<NopInput>, NopShMemProvider> {
impl
CentralizedEventManager<
NopEventManager,
(),
NopInput,
NopState<NopInput>,
NopShMem,
NopShMemProvider,
>
{
/// Creates a builder for [`CentralizedEventManager`]
#[must_use]
pub fn builder() -> CentralizedEventManagerBuilder {
@ -95,13 +101,14 @@ impl CentralizedEventManagerBuilder {
}
/// Creates a new [`CentralizedEventManager`].
#[expect(clippy::type_complexity)]
pub fn build_from_client<EM, EMH, I, S, SP>(
self,
inner: EM,
hooks: EMH,
client: LlmpClient<SP>,
client: LlmpClient<SP::ShMem, SP>,
time_obs: Option<Handle<TimeObserver>>,
) -> Result<CentralizedEventManager<EM, EMH, I, S, SP>, Error>
) -> Result<CentralizedEventManager<EM, EMH, I, S, SP::ShMem, SP>, Error>
where
SP: ShMemProvider,
{
@ -121,16 +128,17 @@ impl CentralizedEventManagerBuilder {
///
/// If the port is not yet bound, it will act as a broker; otherwise, it
/// will act as a client.
pub fn build_on_port<EM, EMH, I, S, SP>(
pub fn build_on_port<EM, EMH, I, S, SHM, SP>(
self,
inner: EM,
hooks: EMH,
shmem_provider: SP,
port: u16,
time_obs: Option<Handle<TimeObserver>>,
) -> Result<CentralizedEventManager<EM, EMH, I, S, SP>, Error>
) -> Result<CentralizedEventManager<EM, EMH, I, S, SHM, SP>, Error>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
let client = LlmpClient::create_attach_to_tcp(shmem_provider, port)?;
Self::build_from_client(self, inner, hooks, client, time_obs)
@ -138,42 +146,43 @@ impl CentralizedEventManagerBuilder {
/// If a client respawns, it may reuse the existing connection, previously
/// stored by [`LlmpClient::to_env()`].
pub fn build_existing_client_from_env<EM, EMH, I, S, SP>(
pub fn build_existing_client_from_env<EM, EMH, I, S, SHM, SP>(
self,
inner: EM,
hooks: EMH,
shmem_provider: SP,
env_name: &str,
time_obs: Option<Handle<TimeObserver>>,
) -> Result<CentralizedEventManager<EM, EMH, I, S, SP>, Error>
) -> Result<CentralizedEventManager<EM, EMH, I, S, SHM, SP>, Error>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
let client = LlmpClient::on_existing_from_env(shmem_provider, env_name)?;
Self::build_from_client(self, inner, hooks, client, time_obs)
}
/// Create an existing client from description
pub fn existing_client_from_description<EM, EMH, I, S, SP>(
pub fn existing_client_from_description<EM, EMH, I, S, SHM, SP>(
self,
inner: EM,
hooks: EMH,
shmem_provider: SP,
description: &LlmpClientDescription,
time_obs: Option<Handle<TimeObserver>>,
) -> Result<CentralizedEventManager<EM, EMH, I, S, SP>, Error>
) -> Result<CentralizedEventManager<EM, EMH, I, S, SHM, SP>, Error>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
let client = LlmpClient::existing_client_from_description(shmem_provider, description)?;
Self::build_from_client(self, inner, hooks, client, time_obs)
}
}
impl<EM, EMH, I, S, SP> AdaptiveSerializer for CentralizedEventManager<EM, EMH, I, S, SP>
impl<EM, EMH, I, S, SHM, SP> AdaptiveSerializer for CentralizedEventManager<EM, EMH, I, S, SHM, SP>
where
EM: AdaptiveSerializer,
SP: ShMemProvider,
{
fn serialization_time(&self) -> Duration {
self.inner.serialization_time()
@ -206,13 +215,14 @@ where
}
}
impl<EM, EMH, I, S, SP> EventFirer<I, S> for CentralizedEventManager<EM, EMH, I, S, SP>
impl<EM, EMH, I, S, SHM, SP> EventFirer<I, S> for CentralizedEventManager<EM, EMH, I, S, SHM, SP>
where
EM: HasEventManagerId + EventFirer<I, S>,
EMH: EventManagerHooksTuple<I, S>,
SP: ShMemProvider,
S: Stoppable,
I: Input,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn should_send(&self) -> bool {
self.inner.should_send()
@ -263,10 +273,11 @@ where
}
}
impl<EM, EMH, I, S, SP> EventRestarter<S> for CentralizedEventManager<EM, EMH, I, S, SP>
impl<EM, EMH, I, S, SHM, SP> EventRestarter<S> for CentralizedEventManager<EM, EMH, I, S, SHM, SP>
where
SP: ShMemProvider,
EM: EventRestarter<S>,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
#[inline]
fn on_restart(&mut self, state: &mut S) -> Result<(), Error> {
@ -276,10 +287,10 @@ where
}
}
impl<EM, EMH, I, OT, S, SP> CanSerializeObserver<OT> for CentralizedEventManager<EM, EMH, I, S, SP>
impl<EM, EMH, I, OT, S, SHM, SP> CanSerializeObserver<OT>
for CentralizedEventManager<EM, EMH, I, S, SHM, SP>
where
EM: AdaptiveSerializer,
SP: ShMemProvider,
OT: Serialize + MatchNameRef,
{
fn serialize_observers(&mut self, observers: &OT) -> Result<Option<Vec<u8>>, Error> {
@ -292,10 +303,11 @@ where
}
}
impl<EM, EMH, I, S, SP> ManagerExit for CentralizedEventManager<EM, EMH, I, S, SP>
impl<EM, EMH, I, S, SHM, SP> ManagerExit for CentralizedEventManager<EM, EMH, I, S, SHM, SP>
where
EM: ManagerExit,
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn send_exiting(&mut self) -> Result<(), Error> {
self.client.sender_mut().send_exiting()?;
@ -309,15 +321,17 @@ where
}
}
impl<E, EM, EMH, I, S, SP, Z> EventProcessor<E, S, Z> for CentralizedEventManager<EM, EMH, I, S, SP>
impl<E, EM, EMH, I, S, SHM, SP, Z> EventProcessor<E, S, Z>
for CentralizedEventManager<EM, EMH, I, S, SHM, SP>
where
E: HasObservers,
E::Observers: DeserializeOwned,
EM: EventProcessor<E, S, Z> + HasEventManagerId + EventFirer<I, S>,
EMH: EventManagerHooksTuple<I, S>,
S: Stoppable,
I: Input,
SP: ShMemProvider,
S: Stoppable,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
Z: ExecutionProcessor<Self, I, E::Observers, S> + EvaluatorObservers<E, Self, I, S>,
{
fn process(&mut self, fuzzer: &mut Z, state: &mut S, executor: &mut E) -> Result<usize, Error> {
@ -337,13 +351,14 @@ where
}
}
impl<EM, EMH, I, S, SP> ProgressReporter<S> for CentralizedEventManager<EM, EMH, I, S, SP>
impl<EM, EMH, I, S, SHM, SP> ProgressReporter<S> for CentralizedEventManager<EM, EMH, I, S, SHM, SP>
where
EM: EventFirer<I, S> + HasEventManagerId,
EMH: EventManagerHooksTuple<I, S>,
S: HasExecutions + HasMetadata + HasLastReportTime + Stoppable + MaybeHasClientPerfMonitor,
I: Input,
SP: ShMemProvider,
S: HasExecutions + HasMetadata + HasLastReportTime + Stoppable + MaybeHasClientPerfMonitor,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn maybe_report_progress(
&mut self,
@ -358,19 +373,19 @@ where
}
}
impl<EM, EMH, I, S, SP> HasEventManagerId for CentralizedEventManager<EM, EMH, I, S, SP>
impl<EM, EMH, I, S, SHM, SP> HasEventManagerId for CentralizedEventManager<EM, EMH, I, S, SHM, SP>
where
EM: HasEventManagerId,
SP: ShMemProvider,
{
fn mgr_id(&self) -> EventManagerId {
self.inner.mgr_id()
}
}
impl<EM, EMH, I, S, SP> CentralizedEventManager<EM, EMH, I, S, SP>
impl<EM, EMH, I, S, SHM, SP> CentralizedEventManager<EM, EMH, I, S, SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
/// Describe the client event manager's LLMP parts in a restorable fashion
pub fn describe(&self) -> Result<LlmpClientDescription, Error> {
@ -389,13 +404,14 @@ where
}
}
impl<EM, EMH, I, S, SP> CentralizedEventManager<EM, EMH, I, S, SP>
impl<EM, EMH, I, S, SHM, SP> CentralizedEventManager<EM, EMH, I, S, SHM, SP>
where
EM: HasEventManagerId + EventFirer<I, S>,
EMH: EventManagerHooksTuple<I, S>,
S: Stoppable,
I: Input,
SP: ShMemProvider,
S: Stoppable,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
#[cfg(feature = "llmp_compression")]
fn forward_to_main(&mut self, event: &Event<I>) -> Result<(), Error> {

View File

@ -208,7 +208,6 @@ impl<CF, MT, SP> Debug for Launcher<'_, CF, MT, SP> {
impl<CF, MT, SP> Launcher<'_, CF, MT, SP>
where
MT: Monitor + Clone,
SP: ShMemProvider,
{
/// Launch the broker and the clients and fuzz
#[cfg(any(windows, not(feature = "fork"), all(unix, feature = "fork")))]
@ -216,11 +215,12 @@ where
where
CF: FnOnce(
Option<S>,
LlmpRestartingEventManager<(), I, S, SP>,
LlmpRestartingEventManager<(), I, S, SP::ShMem, SP>,
ClientDescription,
) -> Result<(), Error>,
I: DeserializeOwned,
S: DeserializeOwned + Serialize,
SP: ShMemProvider,
{
Self::launch_with_hooks(self, tuple_list!())
}
@ -240,7 +240,7 @@ where
EMH: EventManagerHooksTuple<I, S> + Clone + Copy,
CF: FnOnce(
Option<S>,
LlmpRestartingEventManager<EMH, I, S, SP>,
LlmpRestartingEventManager<EMH, I, S, SP::ShMem, SP>,
ClientDescription,
) -> Result<(), Error>,
{
@ -385,7 +385,7 @@ where
where
CF: FnOnce(
Option<S>,
LlmpRestartingEventManager<EMH, I, S, SP>,
LlmpRestartingEventManager<EMH, I, S, SP::ShMem, SP>,
ClientDescription,
) -> Result<(), Error>,
EMH: EventManagerHooksTuple<I, S> + Clone + Copy,
@ -620,7 +620,7 @@ impl<CF, MF, MT, SP> Debug for CentralizedLauncher<'_, CF, MF, MT, SP> {
}
/// The standard inner manager of centralized
pub type StdCentralizedInnerMgr<I, S, SP> = LlmpRestartingEventManager<(), I, S, SP>;
pub type StdCentralizedInnerMgr<I, S, SHM, SP> = LlmpRestartingEventManager<(), I, S, SHM, SP>;
#[cfg(all(unix, feature = "fork"))]
impl<CF, MF, MT, SP> CentralizedLauncher<'_, CF, MF, MT, SP>
@ -635,12 +635,26 @@ where
I: DeserializeOwned + Input + Send + Sync + 'static,
CF: FnOnce(
Option<S>,
CentralizedEventManager<StdCentralizedInnerMgr<I, S, SP>, (), I, S, SP>,
CentralizedEventManager<
StdCentralizedInnerMgr<I, S, SP::ShMem, SP>,
(),
I,
S,
SP::ShMem,
SP,
>,
ClientDescription,
) -> Result<(), Error>,
MF: FnOnce(
Option<S>,
CentralizedEventManager<StdCentralizedInnerMgr<I, S, SP>, (), I, S, SP>,
CentralizedEventManager<
StdCentralizedInnerMgr<I, S, SP::ShMem, SP>,
(),
I,
S,
SP::ShMem,
SP,
>,
ClientDescription,
) -> Result<(), Error>,
{
@ -682,13 +696,13 @@ where
I: Input + Send + Sync + 'static,
CF: FnOnce(
Option<S>,
CentralizedEventManager<EM, (), I, S, SP>,
CentralizedEventManager<EM, (), I, S, SP::ShMem, SP>,
ClientDescription,
) -> Result<(), Error>,
EMB: FnOnce(&Self, ClientDescription) -> Result<(Option<S>, EM), Error>,
MF: FnOnce(
Option<S>,
CentralizedEventManager<EM, (), I, S, SP>, // No broker_hooks for centralized EM
CentralizedEventManager<EM, (), I, S, SP::ShMem, SP>, // No broker_hooks for centralized EM
ClientDescription,
) -> Result<(), Error>,
{

View File

@ -4,7 +4,7 @@
#[cfg(feature = "std")]
use alloc::string::ToString;
use alloc::vec::Vec;
use core::{marker::PhantomData, time::Duration};
use core::{fmt::Debug, marker::PhantomData, time::Duration};
#[cfg(feature = "std")]
use std::net::TcpStream;
@ -18,7 +18,7 @@ use libafl_bolts::{
use libafl_bolts::{
current_time,
llmp::{LlmpClient, LlmpClientDescription, LLMP_FLAG_FROM_MM},
shmem::{NopShMemProvider, ShMemProvider},
shmem::{NopShMem, NopShMemProvider, ShMem, ShMemProvider},
tuples::Handle,
ClientId,
};
@ -59,9 +59,10 @@ const INITIAL_EVENT_BUFFER_SIZE: usize = 1024 * 4;
/// An `EventManager` that forwards all events to other attached fuzzers on shared maps or via tcp,
/// using low-level message passing, `llmp`.
pub struct LlmpEventManager<EMH, I, S, SP>
pub struct LlmpEventManager<EMH, I, S, SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
/// We only send 1 testcase for every `throttle` second
pub(crate) throttle: Option<Duration>,
@ -69,7 +70,7 @@ where
last_sent: Duration,
hooks: EMH,
/// The LLMP client for inter process communication
llmp: LlmpClient<SP>,
llmp: LlmpClient<SHM, SP>,
#[cfg(feature = "llmp_compression")]
compressor: GzipCompressor,
/// The configuration defines this specific fuzzer.
@ -85,7 +86,7 @@ where
event_buffer: Vec<u8>,
}
impl LlmpEventManager<(), NopState<NopInput>, NopInput, NopShMemProvider> {
impl LlmpEventManager<(), NopState<NopInput>, NopInput, NopShMem, NopShMemProvider> {
/// Creates a builder for [`LlmpEventManager`]
#[must_use]
pub fn builder() -> LlmpEventManagerBuilder<()> {
@ -134,14 +135,15 @@ impl<EMH> LlmpEventManagerBuilder<EMH> {
}
/// Create a manager from a raw LLMP client
pub fn build_from_client<I, S, SP>(
pub fn build_from_client<I, S, SHM, SP>(
self,
llmp: LlmpClient<SP>,
llmp: LlmpClient<SHM, SP>,
configuration: EventConfig,
time_ref: Option<Handle<TimeObserver>>,
) -> Result<LlmpEventManager<EMH, I, S, SP>, Error>
) -> Result<LlmpEventManager<EMH, I, S, SHM, SP>, Error>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
Ok(LlmpEventManager {
throttle: self.throttle,
@ -164,15 +166,16 @@ impl<EMH> LlmpEventManagerBuilder<EMH> {
/// Create an LLMP event manager on a port.
/// It expects a broker to exist on this port.
#[cfg(feature = "std")]
pub fn build_on_port<I, S, SP>(
pub fn build_on_port<I, S, SHM, SP>(
self,
shmem_provider: SP,
port: u16,
configuration: EventConfig,
time_ref: Option<Handle<TimeObserver>>,
) -> Result<LlmpEventManager<EMH, I, S, SP>, Error>
) -> Result<LlmpEventManager<EMH, I, S, SHM, SP>, Error>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
let llmp = LlmpClient::create_attach_to_tcp(shmem_provider, port)?;
Self::build_from_client(self, llmp, configuration, time_ref)
@ -181,30 +184,32 @@ impl<EMH> LlmpEventManagerBuilder<EMH> {
/// If a client respawns, it may reuse the existing connection, previously
/// stored by [`LlmpClient::to_env()`].
#[cfg(feature = "std")]
pub fn build_existing_client_from_env<I, S, SP>(
pub fn build_existing_client_from_env<I, S, SHM, SP>(
self,
shmem_provider: SP,
env_name: &str,
configuration: EventConfig,
time_ref: Option<Handle<TimeObserver>>,
) -> Result<LlmpEventManager<EMH, I, S, SP>, Error>
) -> Result<LlmpEventManager<EMH, I, S, SHM, SP>, Error>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
let llmp = LlmpClient::on_existing_from_env(shmem_provider, env_name)?;
Self::build_from_client(self, llmp, configuration, time_ref)
}
/// Create an existing client from description
pub fn build_existing_client_from_description<I, S, SP>(
pub fn build_existing_client_from_description<I, S, SHM, SP>(
self,
shmem_provider: SP,
description: &LlmpClientDescription,
configuration: EventConfig,
time_ref: Option<Handle<TimeObserver>>,
) -> Result<LlmpEventManager<EMH, I, S, SP>, Error>
) -> Result<LlmpEventManager<EMH, I, S, SHM, SP>, Error>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
let llmp = LlmpClient::existing_client_from_description(shmem_provider, description)?;
Self::build_from_client(self, llmp, configuration, time_ref)
@ -212,19 +217,21 @@ impl<EMH> LlmpEventManagerBuilder<EMH> {
}
#[cfg(feature = "std")]
impl<EMH, I, OT, S, SP> CanSerializeObserver<OT> for LlmpEventManager<EMH, I, S, SP>
impl<EMH, I, OT, S, SHM, SP> CanSerializeObserver<OT> for LlmpEventManager<EMH, I, S, SHM, SP>
where
SP: ShMemProvider,
OT: Serialize + MatchNameRef,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn serialize_observers(&mut self, observers: &OT) -> Result<Option<Vec<u8>>, Error> {
serialize_observers_adaptive::<Self, OT>(self, observers, 2, 80)
}
}
impl<EMH, I, S, SP> AdaptiveSerializer for LlmpEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> AdaptiveSerializer for LlmpEventManager<EMH, I, S, SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn serialization_time(&self) -> Duration {
self.serialization_time
@ -257,9 +264,10 @@ where
}
}
impl<EMH, I, S, SP> core::fmt::Debug for LlmpEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> Debug for LlmpEventManager<EMH, I, S, SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let mut debug_struct = f.debug_struct("LlmpEventManager");
@ -274,9 +282,10 @@ where
}
}
impl<EMH, I, S, SP> Drop for LlmpEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> Drop for LlmpEventManager<EMH, I, S, SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
/// LLMP clients will have to wait until their pages are mapped by somebody.
fn drop(&mut self) {
@ -284,9 +293,10 @@ where
}
}
impl<EMH, I, S, SP> LlmpEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> LlmpEventManager<EMH, I, S, SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
/// Calling this function will tell the llmp broker that this client is exiting
/// This should be called from the restarter not from the actual fuzzer client
@ -334,9 +344,10 @@ where
}
}
impl<EMH, I, S, SP> LlmpEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> LlmpEventManager<EMH, I, S, SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
// Handle arriving events in the client
fn handle_in_client<E, Z>(
@ -423,7 +434,11 @@ where
}
}
impl<EMH, I, S, SP: ShMemProvider> LlmpEventManager<EMH, I, S, SP> {
impl<EMH, I, S, SHM, SP> LlmpEventManager<EMH, I, S, SHM, SP>
where
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
/// Send information that this client is exiting.
/// The other side may free up all allocated memory.
/// We are no longer allowed to send anything afterwards.
@ -432,18 +447,12 @@ impl<EMH, I, S, SP: ShMemProvider> LlmpEventManager<EMH, I, S, SP> {
}
}
impl<EMH, I, S, SP> EventFirer<I, S> for LlmpEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> EventFirer<I, S> for LlmpEventManager<EMH, I, S, SHM, SP>
where
I: Serialize,
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn should_send(&self) -> bool {
if let Some(throttle) = self.throttle {
current_time() - self.last_sent > throttle
} else {
true
}
}
fn fire(&mut self, _state: &mut S, event: Event<I>) -> Result<(), Error> {
#[cfg(feature = "llmp_compression")]
let flags = LLMP_FLAG_INITIALIZED;
@ -490,46 +499,57 @@ where
self.last_sent = current_time();
Ok(())
}
fn configuration(&self) -> EventConfig {
self.configuration
}
fn should_send(&self) -> bool {
if let Some(throttle) = self.throttle {
current_time() - self.last_sent > throttle
} else {
true
}
}
}
impl<EMH, I, S, SP> EventRestarter<S> for LlmpEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> EventRestarter<S> for LlmpEventManager<EMH, I, S, SHM, SP>
where
SP: ShMemProvider,
S: HasCurrentStageId,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn on_restart(&mut self, state: &mut S) -> Result<(), Error> {
std_on_restart(self, state)
}
}
impl<EMH, I, S, SP> ManagerExit for LlmpEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> ManagerExit for LlmpEventManager<EMH, I, S, SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn send_exiting(&mut self) -> Result<(), Error> {
self.llmp.sender_mut().send_exiting()
}
/// The LLMP client needs to wait until a broker has mapped all pages before shutting down.
/// Otherwise, the OS may already have removed the shared maps.
fn await_restart_safe(&mut self) {
// wait until we can drop the message safely.
self.llmp.await_safe_to_unmap_blocking();
}
fn send_exiting(&mut self) -> Result<(), Error> {
self.llmp.sender_mut().send_exiting()
}
}
impl<E, EMH, I, S, SP, Z> EventProcessor<E, S, Z> for LlmpEventManager<EMH, I, S, SP>
impl<E, EMH, I, S, SHM, SP, Z> EventProcessor<E, S, Z> for LlmpEventManager<EMH, I, S, SHM, SP>
where
E: HasObservers,
E::Observers: DeserializeOwned,
S: HasImported + HasSolutions<I> + HasCurrentTestcase<I> + Stoppable,
EMH: EventManagerHooksTuple<I, S>,
I: DeserializeOwned + Input,
SP: ShMemProvider,
S: HasImported + HasSolutions<I> + HasCurrentTestcase<I> + Stoppable,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
Z: ExecutionProcessor<Self, I, E::Observers, S> + EvaluatorObservers<E, Self, I, S>,
{
fn process(&mut self, fuzzer: &mut Z, state: &mut S, executor: &mut E) -> Result<usize, Error> {
@ -537,14 +557,15 @@ where
let self_id = self.llmp.sender().id();
let mut count = 0;
while let Some((client_id, tag, flags, msg)) = self.llmp.recv_buf_with_flags()? {
assert!(
tag != _LLMP_TAG_EVENT_TO_BROKER,
assert_ne!(
tag, _LLMP_TAG_EVENT_TO_BROKER,
"EVENT_TO_BROKER parcel should not have arrived in the client!"
);
if client_id == self_id {
continue;
}
#[cfg(not(feature = "llmp_compression"))]
let event_bytes = msg;
#[cfg(feature = "llmp_compression")]
@ -556,6 +577,7 @@ where
} else {
msg
};
let event: Event<I> = postcard::from_bytes(event_bytes)?;
log::debug!("Received event in normal llmp {}", event.name_detailed());
@ -576,11 +598,12 @@ where
}
}
impl<EMH, I, S, SP> ProgressReporter<S> for LlmpEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> ProgressReporter<S> for LlmpEventManager<EMH, I, S, SHM, SP>
where
S: HasExecutions + HasLastReportTime + HasMetadata + MaybeHasClientPerfMonitor,
SP: ShMemProvider,
I: Serialize,
S: HasExecutions + HasLastReportTime + HasMetadata + MaybeHasClientPerfMonitor,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn maybe_report_progress(
&mut self,
@ -595,9 +618,10 @@ where
}
}
impl<EMH, I, S, SP> HasEventManagerId for LlmpEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> HasEventManagerId for LlmpEventManager<EMH, I, S, SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
/// Gets the id assigned to this staterestorer.
fn mgr_id(&self) -> EventManagerId {

View File

@ -9,7 +9,7 @@ use libafl_bolts::{
};
use libafl_bolts::{
llmp::{LlmpClient, LlmpClientDescription, Tag},
shmem::{NopShMemProvider, ShMemProvider},
shmem::{NopShMem, NopShMemProvider, ShMem, ShMemProvider},
ClientId,
};
use serde::{de::DeserializeOwned, Serialize};
@ -84,12 +84,9 @@ impl LlmpShouldSaveState {
}
/// A manager-like llmp client that converts between input types
pub struct LlmpEventConverter<I, IC, ICB, S, SP>
where
SP: ShMemProvider,
{
pub struct LlmpEventConverter<I, IC, ICB, S, SHM, SP> {
throttle: Option<Duration>,
llmp: LlmpClient<SP>,
llmp: LlmpClient<SHM, SP>,
last_sent: Duration,
#[cfg(feature = "llmp_compression")]
compressor: GzipCompressor,
@ -104,6 +101,7 @@ impl
NopInputConverter<NopInput>,
NopInputConverter<NopInput>,
NopState<NopInput>,
NopShMem,
NopShMemProvider,
>
{
@ -136,15 +134,12 @@ impl LlmpEventConverterBuilder {
}
/// Create a event converter from a raw llmp client
pub fn build_from_client<I, IC, ICB, S, SP>(
pub fn build_from_client<I, IC, ICB, S, SHM, SP>(
self,
llmp: LlmpClient<SP>,
llmp: LlmpClient<SHM, SP>,
converter: Option<IC>,
converter_back: Option<ICB>,
) -> Result<LlmpEventConverter<I, IC, ICB, S, SP>, Error>
where
SP: ShMemProvider,
{
) -> Result<LlmpEventConverter<I, IC, ICB, S, SHM, SP>, Error> {
Ok(LlmpEventConverter {
throttle: self.throttle,
last_sent: Duration::from_secs(0),
@ -159,15 +154,16 @@ impl LlmpEventConverterBuilder {
/// Create a client from port and the input converters
#[cfg(feature = "std")]
pub fn build_on_port<I, IC, ICB, S, SP>(
pub fn build_on_port<I, IC, ICB, S, SHM, SP>(
self,
shmem_provider: SP,
port: u16,
converter: Option<IC>,
converter_back: Option<ICB>,
) -> Result<LlmpEventConverter<I, IC, ICB, S, SP>, Error>
) -> Result<LlmpEventConverter<I, IC, ICB, S, SHM, SP>, Error>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
let llmp = LlmpClient::create_attach_to_tcp(shmem_provider, port)?;
Ok(LlmpEventConverter {
@ -184,15 +180,16 @@ impl LlmpEventConverterBuilder {
/// If a client respawns, it may reuse the existing connection, previously stored by [`LlmpClient::to_env()`].
#[cfg(feature = "std")]
pub fn build_existing_client_from_env<I, IC, ICB, S, SP>(
pub fn build_existing_client_from_env<I, IC, ICB, S, SHM, SP>(
self,
shmem_provider: SP,
env_name: &str,
converter: Option<IC>,
converter_back: Option<ICB>,
) -> Result<LlmpEventConverter<I, IC, ICB, S, SP>, Error>
) -> Result<LlmpEventConverter<I, IC, ICB, S, SHM, SP>, Error>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
let llmp = LlmpClient::on_existing_from_env(shmem_provider, env_name)?;
Ok(LlmpEventConverter {
@ -208,11 +205,12 @@ impl LlmpEventConverterBuilder {
}
}
impl<I, IC, ICB, S, SP> Debug for LlmpEventConverter<I, IC, ICB, S, SP>
impl<I, IC, ICB, S, SHM, SP> Debug for LlmpEventConverter<I, IC, ICB, S, SHM, SP>
where
SP: ShMemProvider,
IC: Debug,
ICB: Debug,
SHM: Debug,
SP: Debug,
{
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let mut debug_struct = f.debug_struct("LlmpEventConverter");
@ -228,9 +226,10 @@ where
}
}
impl<I, IC, ICB, S, SP> LlmpEventConverter<I, IC, ICB, S, SP>
impl<I, IC, ICB, S, SHM, SP> LlmpEventConverter<I, IC, ICB, S, SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
// TODO other new_* routines
@ -341,8 +340,8 @@ where
let self_id = self.llmp.sender().id();
let mut count = 0;
while let Some((client_id, tag, _flags, msg)) = self.llmp.recv_buf_with_flags()? {
assert!(
tag != _LLMP_TAG_EVENT_TO_BROKER,
assert_ne!(
tag, _LLMP_TAG_EVENT_TO_BROKER,
"EVENT_TO_BROKER parcel should not have arrived in the client!"
);
@ -370,11 +369,12 @@ where
}
}
impl<I, IC, ICB, S, SP> EventFirer<I, S> for LlmpEventConverter<I, IC, ICB, S, SP>
impl<I, IC, ICB, S, SHM, SP> EventFirer<I, S> for LlmpEventConverter<I, IC, ICB, S, SHM, SP>
where
IC: InputConverter<From = I>,
SP: ShMemProvider,
IC::To: Serialize,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn should_send(&self) -> bool {
if let Some(throttle) = self.throttle {

View File

@ -22,7 +22,7 @@ use libafl_bolts::{
core_affinity::CoreId,
llmp::{Broker, LlmpBroker, LlmpConnection},
os::CTRL_C_EXIT,
shmem::{ShMemProvider, StdShMemProvider},
shmem::{ShMem, ShMemProvider, StdShMem, StdShMemProvider},
staterestore::StateRestorer,
tuples::{tuple_list, Handle, MatchNameRef},
};
@ -55,21 +55,23 @@ use crate::{
/// A manager that can restart on the fly, storing states in-between (in `on_restart`)
#[derive(Debug)]
pub struct LlmpRestartingEventManager<EMH, I, S, SP>
pub struct LlmpRestartingEventManager<EMH, I, S, SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
/// The embedded LLMP event manager
llmp_mgr: LlmpEventManager<EMH, I, S, SP>,
llmp_mgr: LlmpEventManager<EMH, I, S, SHM, SP>,
/// The staterestorer to serialize the state for the next runner
staterestorer: StateRestorer<SP>,
staterestorer: StateRestorer<SHM, SP>,
/// Decide if the state restorer must save the serialized state
save_state: LlmpShouldSaveState,
}
impl<EMH, I, S, SP> AdaptiveSerializer for LlmpRestartingEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> AdaptiveSerializer for LlmpRestartingEventManager<EMH, I, S, SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn serialization_time(&self) -> Duration {
self.llmp_mgr.serialization_time()
@ -102,11 +104,12 @@ where
}
}
impl<EMH, I, S, SP> ProgressReporter<S> for LlmpRestartingEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> ProgressReporter<S> for LlmpRestartingEventManager<EMH, I, S, SHM, SP>
where
S: HasExecutions + HasLastReportTime + HasMetadata + Serialize + MaybeHasClientPerfMonitor,
SP: ShMemProvider,
I: Serialize,
S: HasExecutions + HasLastReportTime + HasMetadata + Serialize + MaybeHasClientPerfMonitor,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn maybe_report_progress(
&mut self,
@ -121,16 +124,13 @@ where
}
}
impl<EMH, I, S, SP> EventFirer<I, S> for LlmpRestartingEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> EventFirer<I, S> for LlmpRestartingEventManager<EMH, I, S, SHM, SP>
where
I: Serialize,
S: Serialize,
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn should_send(&self) -> bool {
<LlmpEventManager<EMH, I, S, SP> as EventFirer<I, S>>::should_send(&self.llmp_mgr)
}
fn fire(&mut self, state: &mut S, event: Event<I>) -> Result<(), Error> {
// Check if we are going to crash in the event, in which case we store our current state for the next runner
self.llmp_mgr.fire(state, event)?;
@ -139,25 +139,32 @@ where
}
fn configuration(&self) -> EventConfig {
<LlmpEventManager<EMH, I, S, SP> as EventFirer<I, S>>::configuration(&self.llmp_mgr)
<LlmpEventManager<EMH, I, S, SHM, SP> as EventFirer<I, S>>::configuration(&self.llmp_mgr)
}
fn should_send(&self) -> bool {
<LlmpEventManager<EMH, I, S, SHM, SP> as EventFirer<I, S>>::should_send(&self.llmp_mgr)
}
}
#[cfg(feature = "std")]
impl<EMH, I, OT, S, SP> CanSerializeObserver<OT> for LlmpRestartingEventManager<EMH, I, S, SP>
impl<EMH, I, OT, S, SHM, SP> CanSerializeObserver<OT>
for LlmpRestartingEventManager<EMH, I, S, SHM, SP>
where
SP: ShMemProvider,
OT: Serialize + MatchNameRef,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn serialize_observers(&mut self, observers: &OT) -> Result<Option<Vec<u8>>, Error> {
serialize_observers_adaptive::<Self, OT>(self, observers, 2, 80)
}
}
impl<EMH, I, S, SP> EventRestarter<S> for LlmpRestartingEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> EventRestarter<S> for LlmpRestartingEventManager<EMH, I, S, SHM, SP>
where
SP: ShMemProvider,
S: Serialize + HasCurrentStageId,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
/// Reset the single page (we reuse it over and over from pos 0), then send the current state to the next runner.
fn on_restart(&mut self, state: &mut S) -> Result<(), Error> {
@ -180,9 +187,10 @@ where
}
}
impl<EMH, I, S, SP> ManagerExit for LlmpRestartingEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> ManagerExit for LlmpRestartingEventManager<EMH, I, S, SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn send_exiting(&mut self) -> Result<(), Error> {
self.staterestorer.send_exiting();
@ -199,16 +207,18 @@ where
}
}
impl<E, EMH, I, S, SP, Z> EventProcessor<E, S, Z> for LlmpRestartingEventManager<EMH, I, S, SP>
impl<E, EMH, I, S, SHM, SP, Z> EventProcessor<E, S, Z>
for LlmpRestartingEventManager<EMH, I, S, SHM, SP>
where
EMH: EventManagerHooksTuple<I, S>,
E: HasObservers,
E::Observers: DeserializeOwned,
S: HasImported + HasCurrentTestcase<I> + HasSolutions<I> + Stoppable + Serialize,
EMH: EventManagerHooksTuple<I, S>,
I: DeserializeOwned + Input,
SP: ShMemProvider,
Z: ExecutionProcessor<LlmpEventManager<EMH, I, S, SP>, I, E::Observers, S>
+ EvaluatorObservers<E, LlmpEventManager<EMH, I, S, SP>, I, S>,
S: HasImported + HasCurrentTestcase<I> + HasSolutions<I> + Stoppable + Serialize,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
Z: ExecutionProcessor<LlmpEventManager<EMH, I, S, SHM, SP>, I, E::Observers, S>
+ EvaluatorObservers<E, LlmpEventManager<EMH, I, S, SHM, SP>, I, S>,
{
fn process(&mut self, fuzzer: &mut Z, state: &mut S, executor: &mut E) -> Result<usize, Error> {
let res = self.llmp_mgr.process(fuzzer, state, executor)?;
@ -221,9 +231,10 @@ where
}
}
impl<EMH, I, S, SP> HasEventManagerId for LlmpRestartingEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> HasEventManagerId for LlmpRestartingEventManager<EMH, I, S, SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn mgr_id(&self) -> EventManagerId {
self.llmp_mgr.mgr_id()
@ -236,15 +247,16 @@ const _ENV_FUZZER_RECEIVER: &str = "_AFL_ENV_FUZZER_RECEIVER";
/// The llmp (2 way) connection from a fuzzer to the broker (broadcasting all other fuzzer messages)
const _ENV_FUZZER_BROKER_CLIENT_INITIAL: &str = "_AFL_ENV_FUZZER_BROKER_CLIENT";
impl<EMH, I, S, SP> LlmpRestartingEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> LlmpRestartingEventManager<EMH, I, S, SHM, SP>
where
SP: ShMemProvider,
S: Serialize,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
/// Create a new runner, the executed child doing the actual fuzzing.
pub fn new(
llmp_mgr: LlmpEventManager<EMH, I, S, SP>,
staterestorer: StateRestorer<SP>,
llmp_mgr: LlmpEventManager<EMH, I, S, SHM, SP>,
staterestorer: StateRestorer<SHM, SP>,
) -> Self {
Self {
llmp_mgr,
@ -255,8 +267,8 @@ where
/// Create a new runner specifying if it must save the serialized state on restart.
pub fn with_save_state(
llmp_mgr: LlmpEventManager<EMH, I, S, SP>,
staterestorer: StateRestorer<SP>,
llmp_mgr: LlmpEventManager<EMH, I, S, SHM, SP>,
staterestorer: StateRestorer<SHM, SP>,
save_state: LlmpShouldSaveState,
) -> Self {
Self {
@ -267,12 +279,12 @@ where
}
/// Get the staterestorer
pub fn staterestorer(&self) -> &StateRestorer<SP> {
pub fn staterestorer(&self) -> &StateRestorer<SHM, SP> {
&self.staterestorer
}
/// Get the staterestorer (mutable)
pub fn staterestorer_mut(&mut self) -> &mut StateRestorer<SP> {
pub fn staterestorer_mut(&mut self) -> &mut StateRestorer<SHM, SP> {
&mut self.staterestorer
}
@ -314,7 +326,7 @@ pub fn setup_restarting_mgr_std<I, MT, S>(
) -> Result<
(
Option<S>,
LlmpRestartingEventManager<(), I, S, StdShMemProvider>,
LlmpRestartingEventManager<(), I, S, StdShMem, StdShMemProvider>,
),
Error,
>
@ -347,7 +359,7 @@ pub fn setup_restarting_mgr_std_adaptive<I, MT, S>(
) -> Result<
(
Option<S>,
LlmpRestartingEventManager<(), I, S, StdShMemProvider>,
LlmpRestartingEventManager<(), I, S, StdShMem, StdShMemProvider>,
),
Error,
>
@ -414,20 +426,26 @@ pub struct RestartingMgr<EMH, I, MT, S, SP> {
impl<EMH, I, MT, S, SP> RestartingMgr<EMH, I, MT, S, SP>
where
EMH: EventManagerHooksTuple<I, S> + Copy + Clone,
SP: ShMemProvider,
S: Serialize + DeserializeOwned,
I: DeserializeOwned,
MT: Monitor + Clone,
S: Serialize + DeserializeOwned,
SP: ShMemProvider,
{
/// Launch the broker and the clients and fuzz
pub fn launch(
&mut self,
) -> Result<(Option<S>, LlmpRestartingEventManager<EMH, I, S, SP>), Error> {
) -> Result<
(
Option<S>,
LlmpRestartingEventManager<EMH, I, S, SP::ShMem, SP>,
),
Error,
> {
// We start ourselves as child process to actually fuzz
let (staterestorer, new_shmem_provider, core_id) = if std::env::var(_ENV_FUZZER_SENDER)
.is_err()
{
let broker_things = |mut broker: LlmpBroker<_, SP>, remote_broker_addr| {
let broker_things = |mut broker: LlmpBroker<_, SP::ShMem, SP>, remote_broker_addr| {
if let Some(remote_broker_addr) = remote_broker_addr {
log::info!("B2b: Connecting to {:?}", &remote_broker_addr);
broker.inner_mut().connect_b2b(remote_broker_addr)?;
@ -467,13 +485,14 @@ where
return Err(Error::shutting_down());
}
LlmpConnection::IsClient { client } => {
let mgr: LlmpEventManager<EMH, I, S, SP> = LlmpEventManager::builder()
.hooks(self.hooks)
.build_from_client(
client,
self.configuration,
self.time_ref.clone(),
)?;
let mgr: LlmpEventManager<EMH, I, S, SP::ShMem, SP> =
LlmpEventManager::builder()
.hooks(self.hooks)
.build_from_client(
client,
self.configuration,
self.time_ref.clone(),
)?;
(mgr, None)
}
}
@ -516,11 +535,11 @@ where
// First, create a channel from the current fuzzer to the next to store state between restarts.
#[cfg(unix)]
let staterestorer: StateRestorer<SP> =
let staterestorer: StateRestorer<SP::ShMem, SP> =
StateRestorer::new(self.shmem_provider.new_shmem(256 * 1024 * 1024)?);
#[cfg(not(unix))]
let staterestorer: StateRestorer<SP> =
let staterestorer: StateRestorer<SP::ShMem, SP> =
StateRestorer::new(self.shmem_provider.new_shmem(256 * 1024 * 1024)?);
// Store the information to a map.
staterestorer.write_to_env(_ENV_FUZZER_SENDER)?;
@ -683,7 +702,7 @@ mod tests {
use libafl_bolts::{
llmp::{LlmpClient, LlmpSharedMap},
rands::StdRand,
shmem::{ShMemProvider, StdShMemProvider},
shmem::{ShMemProvider, StdShMem, StdShMemProvider},
staterestore::StateRestorer,
tuples::{tuple_list, Handled},
ClientId,
@ -772,7 +791,7 @@ mod tests {
let mut stages = tuple_list!(StdMutationalStage::new(mutator));
// First, create a channel from the current fuzzer to the next to store state between restarts.
let mut staterestorer = StateRestorer::<StdShMemProvider>::new(
let mut staterestorer = StateRestorer::<StdShMem, StdShMemProvider>::new(
shmem_provider.new_shmem(256 * 1024 * 1024).unwrap(),
);

View File

@ -13,7 +13,11 @@ use libafl_bolts::os::unix_signals::setup_signal_handler;
use libafl_bolts::os::{fork, ForkResult};
use libafl_bolts::ClientId;
#[cfg(feature = "std")]
use libafl_bolts::{os::CTRL_C_EXIT, shmem::ShMemProvider, staterestore::StateRestorer};
use libafl_bolts::{
os::CTRL_C_EXIT,
shmem::{ShMem, ShMemProvider},
staterestore::StateRestorer,
};
#[cfg(feature = "std")]
use serde::de::DeserializeOwned;
use serde::Serialize;
@ -280,23 +284,19 @@ where
/// `restarter` will start a new process each time the child crashes or times out.
#[cfg(feature = "std")]
#[derive(Debug)]
pub struct SimpleRestartingEventManager<I, MT, S, SP>
where
SP: ShMemProvider,
{
pub struct SimpleRestartingEventManager<I, MT, S, SHM, SP> {
/// The actual simple event mgr
inner: SimpleEventManager<I, MT, S>,
/// [`StateRestorer`] for restarts
staterestorer: StateRestorer<SP>,
staterestorer: StateRestorer<SHM, SP>,
}
#[cfg(feature = "std")]
impl<I, MT, S, SP> EventFirer<I, S> for SimpleRestartingEventManager<I, MT, S, SP>
impl<I, MT, S, SHM, SP> EventFirer<I, S> for SimpleRestartingEventManager<I, MT, S, SHM, SP>
where
I: Debug,
MT: Monitor,
S: Stoppable,
SP: ShMemProvider,
{
fn should_send(&self) -> bool {
true
@ -308,9 +308,10 @@ where
}
#[cfg(feature = "std")]
impl<I, MT, S, SP> EventRestarter<S> for SimpleRestartingEventManager<I, MT, S, SP>
impl<I, MT, S, SHM, SP> EventRestarter<S> for SimpleRestartingEventManager<I, MT, S, SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
S: HasCurrentStageId + Serialize,
MT: Monitor,
{
@ -329,9 +330,9 @@ where
}
#[cfg(feature = "std")]
impl<I, MT, OT, S, SP> CanSerializeObserver<OT> for SimpleRestartingEventManager<I, MT, S, SP>
impl<I, MT, OT, S, SHM, SP> CanSerializeObserver<OT>
for SimpleRestartingEventManager<I, MT, S, SHM, SP>
where
SP: ShMemProvider,
OT: Serialize,
{
fn serialize_observers(&mut self, observers: &OT) -> Result<Option<Vec<u8>>, Error> {
@ -340,9 +341,10 @@ where
}
#[cfg(feature = "std")]
impl<I, MT, S, SP> ManagerExit for SimpleRestartingEventManager<I, MT, S, SP>
impl<I, MT, S, SHM, SP> ManagerExit for SimpleRestartingEventManager<I, MT, S, SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn send_exiting(&mut self) -> Result<(), Error> {
self.staterestorer.send_exiting();
@ -354,12 +356,14 @@ where
}
#[cfg(feature = "std")]
impl<E, I, MT, S, SP, Z> EventProcessor<E, S, Z> for SimpleRestartingEventManager<I, MT, S, SP>
impl<E, I, MT, S, SHM, SP, Z> EventProcessor<E, S, Z>
for SimpleRestartingEventManager<I, MT, S, SHM, SP>
where
I: Debug,
MT: Monitor,
SP: ShMemProvider,
S: Stoppable,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn process(&mut self, fuzzer: &mut Z, state: &mut S, executor: &mut E) -> Result<usize, Error> {
self.inner.process(fuzzer, state, executor)
@ -371,11 +375,10 @@ where
}
#[cfg(feature = "std")]
impl<I, MT, S, SP> ProgressReporter<S> for SimpleRestartingEventManager<I, MT, S, SP>
impl<I, MT, S, SHM, SP> ProgressReporter<S> for SimpleRestartingEventManager<I, MT, S, SHM, SP>
where
I: Debug,
MT: Monitor,
SP: ShMemProvider,
S: HasExecutions + HasMetadata + HasLastReportTime + Stoppable + MaybeHasClientPerfMonitor,
{
fn maybe_report_progress(
@ -392,25 +395,23 @@ where
}
#[cfg(feature = "std")]
impl<I, MT, S, SP> HasEventManagerId for SimpleRestartingEventManager<I, MT, S, SP>
where
SP: ShMemProvider,
{
impl<I, MT, S, SHM, SP> HasEventManagerId for SimpleRestartingEventManager<I, MT, S, SHM, SP> {
fn mgr_id(&self) -> EventManagerId {
self.inner.mgr_id()
}
}
#[cfg(feature = "std")]
impl<I, MT, S, SP> SimpleRestartingEventManager<I, MT, S, SP>
impl<I, MT, S, SHM, SP> SimpleRestartingEventManager<I, MT, S, SHM, SP>
where
I: Debug,
MT: Monitor,
S: Stoppable,
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
/// Creates a new [`SimpleEventManager`].
fn launched(monitor: MT, staterestorer: StateRestorer<SP>) -> Self {
fn launched(monitor: MT, staterestorer: StateRestorer<SHM, SP>) -> Self {
Self {
staterestorer,
inner: SimpleEventManager::new(monitor),
@ -429,10 +430,10 @@ where
let mut staterestorer = if std::env::var(_ENV_FUZZER_SENDER).is_err() {
// First, create a place to store state in, for restarts.
#[cfg(unix)]
let staterestorer: StateRestorer<SP> =
let staterestorer: StateRestorer<SHM, SP> =
StateRestorer::new(shmem_provider.new_shmem(256 * 1024 * 1024)?);
#[cfg(not(unix))]
let staterestorer: StateRestorer<SP> =
let staterestorer: StateRestorer<SHM, SP> =
StateRestorer::new(shmem_provider.new_shmem(256 * 1024 * 1024)?);
//let staterestorer = { LlmpSender::new(shmem_provider.clone(), 0, false)? };

View File

@ -25,7 +25,7 @@ use libafl_bolts::os::{fork, ForkResult};
use libafl_bolts::{
core_affinity::CoreId,
os::CTRL_C_EXIT,
shmem::{ShMemProvider, StdShMemProvider},
shmem::{ShMem, ShMemProvider, StdShMem, StdShMemProvider},
staterestore::StateRestorer,
tuples::tuple_list,
ClientId,
@ -819,24 +819,20 @@ impl<EMH, I, S> HasEventManagerId for TcpEventManager<EMH, I, S> {
/// A manager that can restart on the fly, storing states in-between (in `on_restart`)
#[derive(Debug)]
pub struct TcpRestartingEventManager<EMH, I, S, SP>
where
SP: ShMemProvider,
{
pub struct TcpRestartingEventManager<EMH, I, S, SHM, SP> {
/// The embedded TCP event manager
tcp_mgr: TcpEventManager<EMH, I, S>,
/// The staterestorer to serialize the state for the next runner
staterestorer: StateRestorer<SP>,
staterestorer: StateRestorer<SHM, SP>,
/// Decide if the state restorer must save the serialized state
save_state: bool,
}
impl<EMH, I, S, SP> ProgressReporter<S> for TcpRestartingEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> ProgressReporter<S> for TcpRestartingEventManager<EMH, I, S, SHM, SP>
where
EMH: EventManagerHooksTuple<I, S>,
S: HasMetadata + HasExecutions + HasLastReportTime + MaybeHasClientPerfMonitor,
I: Serialize,
SP: ShMemProvider,
{
fn maybe_report_progress(
&mut self,
@ -851,11 +847,10 @@ where
}
}
impl<EMH, I, S, SP> EventFirer<I, S> for TcpRestartingEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> EventFirer<I, S> for TcpRestartingEventManager<EMH, I, S, SHM, SP>
where
EMH: EventManagerHooksTuple<I, S>,
I: Serialize,
SP: ShMemProvider,
{
fn should_send(&self) -> bool {
self.tcp_mgr.should_send()
@ -871,30 +866,32 @@ where
}
}
impl<EMH, I, S, SP> ManagerExit for TcpRestartingEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> ManagerExit for TcpRestartingEventManager<EMH, I, S, SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
/// The tcp client needs to wait until a broker mapped all pages, before shutting down.
/// Otherwise, the OS may already have removed the shared maps,
#[inline]
fn await_restart_safe(&mut self) {
self.tcp_mgr.await_restart_safe();
}
fn send_exiting(&mut self) -> Result<(), Error> {
self.staterestorer.send_exiting();
// Also inform the broker that we are about to exit.
// This way, the broker can clean up the pages, and eventually exit.
self.tcp_mgr.send_exiting()
}
/// The tcp client needs to wait until a broker mapped all pages, before shutting down.
/// Otherwise, the OS may already have removed the shared maps,
#[inline]
fn await_restart_safe(&mut self) {
self.tcp_mgr.await_restart_safe();
}
}
impl<EMH, I, S, SP> EventRestarter<S> for TcpRestartingEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> EventRestarter<S> for TcpRestartingEventManager<EMH, I, S, SHM, SP>
where
EMH: EventManagerHooksTuple<I, S>,
S: HasExecutions + HasCurrentStageId + Serialize,
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
/// Reset the single page (we reuse it over and over from pos 0), then send the current state to the next runner.
fn on_restart(&mut self, state: &mut S) -> Result<(), Error> {
@ -913,7 +910,8 @@ where
}
}
impl<E, EMH, I, S, SP, Z> EventProcessor<E, S, Z> for TcpRestartingEventManager<EMH, I, S, SP>
impl<E, EMH, I, S, SHM, SP, Z> EventProcessor<E, S, Z>
for TcpRestartingEventManager<EMH, I, S, SHM, SP>
where
E: HasObservers + Executor<TcpEventManager<EMH, I, S>, I, S, Z>,
for<'a> E::Observers: Deserialize<'a>,
@ -926,7 +924,8 @@ where
+ HasSolutions<I>
+ HasCurrentTestcase<I>
+ Stoppable,
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
Z: ExecutionProcessor<TcpEventManager<EMH, I, S>, I, E::Observers, S>
+ EvaluatorObservers<E, TcpEventManager<EMH, I, S>, I, S>,
{
@ -939,10 +938,7 @@ where
}
}
impl<EMH, I, S, SP> HasEventManagerId for TcpRestartingEventManager<EMH, I, S, SP>
where
SP: ShMemProvider,
{
impl<EMH, I, S, SHM, SP> HasEventManagerId for TcpRestartingEventManager<EMH, I, S, SHM, SP> {
fn mgr_id(&self) -> EventManagerId {
self.tcp_mgr.mgr_id()
}
@ -954,13 +950,12 @@ const _ENV_FUZZER_RECEIVER: &str = "_AFL_ENV_FUZZER_RECEIVER";
/// The tcp (2 way) connection from a fuzzer to the broker (broadcasting all other fuzzer messages)
const _ENV_FUZZER_BROKER_CLIENT_INITIAL: &str = "_AFL_ENV_FUZZER_BROKER_CLIENT";
impl<EMH, I, S, SP> TcpRestartingEventManager<EMH, I, S, SP>
impl<EMH, I, S, SHM, SP> TcpRestartingEventManager<EMH, I, S, SHM, SP>
where
EMH: EventManagerHooksTuple<I, S>,
SP: ShMemProvider,
{
/// Create a new runner, the executed child doing the actual fuzzing.
pub fn new(tcp_mgr: TcpEventManager<EMH, I, S>, staterestorer: StateRestorer<SP>) -> Self {
pub fn new(tcp_mgr: TcpEventManager<EMH, I, S>, staterestorer: StateRestorer<SHM, SP>) -> Self {
Self {
tcp_mgr,
staterestorer,
@ -971,7 +966,7 @@ where
/// Create a new runner specifying if it must save the serialized state on restart.
pub fn with_save_state(
tcp_mgr: TcpEventManager<EMH, I, S>,
staterestorer: StateRestorer<SP>,
staterestorer: StateRestorer<SHM, SP>,
save_state: bool,
) -> Self {
Self {
@ -982,12 +977,12 @@ where
}
/// Get the staterestorer
pub fn staterestorer(&self) -> &StateRestorer<SP> {
pub fn staterestorer(&self) -> &StateRestorer<SHM, SP> {
&self.staterestorer
}
/// Get the staterestorer (mutable)
pub fn staterestorer_mut(&mut self) -> &mut StateRestorer<SP> {
pub fn staterestorer_mut(&mut self) -> &mut StateRestorer<SHM, SP> {
&mut self.staterestorer
}
}
@ -1018,7 +1013,7 @@ pub fn setup_restarting_mgr_tcp<I, MT, S>(
) -> Result<
(
Option<S>,
TcpRestartingEventManager<(), I, S, StdShMemProvider>,
TcpRestartingEventManager<(), I, S, StdShMem, StdShMemProvider>,
),
Error,
>
@ -1049,12 +1044,7 @@ where
/// `restarter` and `runner`, that can be used on systems both with and without `fork` support. The
/// `restarter` will start a new process each time the child crashes or times out.
#[derive(TypedBuilder, Debug)]
pub struct TcpRestartingMgr<EMH, I, MT, S, SP>
where
MT: Monitor,
S: DeserializeOwned,
SP: ShMemProvider + 'static,
{
pub struct TcpRestartingMgr<EMH, I, MT, S, SP> {
/// The shared memory provider to use for the broker or client spawned by the restarting
/// manager.
shmem_provider: SP,
@ -1095,7 +1085,6 @@ where
EMH: EventManagerHooksTuple<I, S> + Copy + Clone,
I: Input,
MT: Monitor + Clone,
SP: ShMemProvider,
S: HasExecutions
+ HasMetadata
+ HasImported
@ -1103,11 +1092,18 @@ where
+ HasCurrentTestcase<I>
+ DeserializeOwned
+ Stoppable,
SP: ShMemProvider,
{
/// Launch the restarting manager
pub fn launch(
&mut self,
) -> Result<(Option<S>, TcpRestartingEventManager<EMH, I, S, SP>), Error> {
) -> Result<
(
Option<S>,
TcpRestartingEventManager<EMH, I, S, SP::ShMem, SP>,
),
Error,
> {
// We start ourself as child process to actually fuzz
let (staterestorer, _new_shmem_provider, core_id) = if env::var(_ENV_FUZZER_SENDER).is_err()
{
@ -1185,11 +1181,11 @@ where
// First, create a channel from the current fuzzer to the next to store state between restarts.
#[cfg(unix)]
let staterestorer: StateRestorer<SP> =
let staterestorer: StateRestorer<SP::ShMem, SP> =
StateRestorer::new(self.shmem_provider.new_shmem(256 * 1024 * 1024)?);
#[cfg(not(unix))]
let staterestorer: StateRestorer<SP> =
let staterestorer: StateRestorer<SP::ShMem, SP> =
StateRestorer::new(self.shmem_provider.new_shmem(256 * 1024 * 1024)?);
// Store the information to a map.
staterestorer.write_to_env(_ENV_FUZZER_SENDER)?;

View File

@ -22,7 +22,7 @@ use libafl_bolts::{
fs::{get_unique_std_input_file, InputFile},
os::{dup2, pipes::Pipe},
ownedref::OwnedSlice,
shmem::{ShMem, ShMemProvider, UnixShMemProvider},
shmem::{ShMem, ShMemProvider, UnixShMem, UnixShMemProvider},
tuples::{Handle, Handled, MatchNameRef, Prepend, RefIndexable},
AsSlice, AsSliceMut, Truncate,
};
@ -606,10 +606,7 @@ impl Forkserver {
///
/// Shared memory feature is also available, but you have to set things up in your code.
/// Please refer to AFL++'s docs. <https://github.com/AFLplusplus/AFLplusplus/blob/stable/instrumentation/README.persistent_mode.md>
pub struct ForkserverExecutor<I, OT, S, SP, TC>
where
SP: ShMemProvider,
{
pub struct ForkserverExecutor<I, OT, S, SHM, TC> {
target: OsString,
args: Vec<OsString>,
input_file: InputFile,
@ -617,7 +614,7 @@ where
uses_shmem_testcase: bool,
forkserver: Forkserver,
observers: OT,
map: Option<SP::ShMem>,
map: Option<SHM>,
phantom: PhantomData<(I, S)>,
map_size: Option<usize>,
min_input_size: usize,
@ -628,11 +625,11 @@ where
crash_exitcode: Option<i8>,
}
impl<I, OT, S, SP, TC> Debug for ForkserverExecutor<I, OT, S, SP, TC>
impl<I, OT, S, SHM, TC> Debug for ForkserverExecutor<I, OT, S, SHM, TC>
where
TC: Debug,
OT: Debug,
SP: ShMemProvider,
SHM: Debug,
{
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("ForkserverExecutor")
@ -648,7 +645,7 @@ where
}
}
impl ForkserverExecutor<(), (), (), UnixShMemProvider, ()> {
impl ForkserverExecutor<(), (), (), UnixShMem, ()> {
/// Builder for `ForkserverExecutor`
#[must_use]
pub fn builder(
@ -658,11 +655,11 @@ impl ForkserverExecutor<(), (), (), UnixShMemProvider, ()> {
}
}
impl<I, OT, S, SP, TC> ForkserverExecutor<I, OT, S, SP, TC>
impl<I, OT, S, SHM, TC> ForkserverExecutor<I, OT, S, SHM, TC>
where
OT: ObserversTuple<I, S>,
SP: ShMemProvider,
TC: TargetBytesConverter<I>,
SHM: ShMem,
{
/// The `target` binary that's going to run.
pub fn target(&self) -> &OsString {
@ -827,9 +824,10 @@ pub struct ForkserverExecutorBuilder<'a, TC, SP> {
target_bytes_converter: TC,
}
impl<'a, TC, SP> ForkserverExecutorBuilder<'a, TC, SP>
impl<'a, TC, SHM, SP> ForkserverExecutorBuilder<'a, TC, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
/// Builds `ForkserverExecutor`.
/// This Forkserver will attempt to provide inputs over shared mem when `shmem_provider` is given.
@ -840,10 +838,9 @@ where
pub fn build<I, OT, S>(
mut self,
observers: OT,
) -> Result<ForkserverExecutor<I, OT, S, SP, TC>, Error>
) -> Result<ForkserverExecutor<I, OT, S, SHM, TC>, Error>
where
OT: ObserversTuple<I, S>,
SP: ShMemProvider,
TC: TargetBytesConverter<I>,
{
let (forkserver, input_file, map) = self.build_helper()?;
@ -905,13 +902,12 @@ where
mut self,
mut map_observer: A,
other_observers: OT,
) -> Result<ForkserverExecutor<I, (A, OT), S, SP, TC>, Error>
) -> Result<ForkserverExecutor<I, (A, OT), S, SHM, TC>, Error>
where
A: Observer<I, S> + AsMut<MO>,
I: Input + HasTargetBytes,
MO: MapObserver + Truncate, // TODO maybe enforce Entry = u8 for the cov map
OT: ObserversTuple<I, S> + Prepend<MO>,
SP: ShMemProvider,
{
let (forkserver, input_file, map) = self.build_helper()?;
@ -965,10 +961,7 @@ where
}
#[expect(clippy::pedantic)]
fn build_helper(&mut self) -> Result<(Forkserver, InputFile, Option<SP::ShMem>), Error>
where
SP: ShMemProvider,
{
fn build_helper(&mut self) -> Result<(Forkserver, InputFile, Option<SHM>), Error> {
let input_filename = match &self.input_filename {
Some(name) => name.clone(),
None => {
@ -1042,7 +1035,7 @@ where
fn initialize_forkserver(
&mut self,
status: i32,
map: Option<&SP::ShMem>,
map: Option<&SHM>,
forkserver: &mut Forkserver,
) -> Result<(), Error> {
let keep = status;
@ -1140,7 +1133,7 @@ where
fn initialize_old_forkserver(
&mut self,
status: i32,
map: Option<&SP::ShMem>,
map: Option<&SHM>,
forkserver: &mut Forkserver,
) -> Result<(), Error> {
if status & FS_OPT_ENABLED == FS_OPT_ENABLED && status & FS_OPT_MAPSIZE == FS_OPT_MAPSIZE {
@ -1505,7 +1498,7 @@ impl<'a> ForkserverExecutorBuilder<'a, NopTargetBytesConverter<BytesInput>, Unix
impl<'a, TC> ForkserverExecutorBuilder<'a, TC, UnixShMemProvider> {
/// Shmem provider for forkserver's shared memory testcase feature.
pub fn shmem_provider<SP: ShMemProvider>(
pub fn shmem_provider<SP>(
self,
shmem_provider: &'a mut SP,
) -> ForkserverExecutorBuilder<'a, TC, SP> {
@ -1577,12 +1570,12 @@ impl Default
}
}
impl<EM, I, OT, S, SP, TC, Z> Executor<EM, I, S, Z> for ForkserverExecutor<I, OT, S, SP, TC>
impl<EM, I, OT, S, SHM, TC, Z> Executor<EM, I, S, Z> for ForkserverExecutor<I, OT, S, SHM, TC>
where
OT: ObserversTuple<I, S>,
SP: ShMemProvider,
S: HasExecutions,
TC: TargetBytesConverter<I>,
SHM: ShMem,
{
#[inline]
fn run_target(
@ -1596,25 +1589,21 @@ where
}
}
impl<I, OT, S, SP, TC> HasTimeout for ForkserverExecutor<I, OT, S, SP, TC>
where
SP: ShMemProvider,
{
#[inline]
fn set_timeout(&mut self, timeout: Duration) {
self.timeout = TimeSpec::from_duration(timeout);
}
impl<I, OT, S, SHM, TC> HasTimeout for ForkserverExecutor<I, OT, S, SHM, TC> {
#[inline]
fn timeout(&self) -> Duration {
self.timeout.into()
}
#[inline]
fn set_timeout(&mut self, timeout: Duration) {
self.timeout = TimeSpec::from_duration(timeout);
}
}
impl<I, OT, S, SP, TC> HasObservers for ForkserverExecutor<I, OT, S, SP, TC>
impl<I, OT, S, SHM, TC> HasObservers for ForkserverExecutor<I, OT, S, SHM, TC>
where
OT: ObserversTuple<I, S>,
SP: ShMemProvider,
{
type Observers = OT;

View File

@ -74,6 +74,7 @@ where
/// # Safety
/// This function sets a bunch of raw pointers in global variables, reused in other parts of
/// the code.
// TODO: Remove EM and Z from function bound and add it to struct instead to avoid possible type confusion
#[inline]
pub unsafe fn enter_target<EM, Z>(
&mut self,

View File

@ -27,8 +27,8 @@ use crate::{
/// The process executor simply calls a target function, as mutable reference to a closure
/// The internal state of the executor is made available to the harness.
pub type StatefulInProcessExecutor<'a, H, I, OT, S, ES> =
StatefulGenericInProcessExecutor<H, &'a mut H, (), I, OT, S, ES>;
pub type StatefulInProcessExecutor<'a, ES, H, I, OT, S> =
StatefulGenericInProcessExecutor<ES, H, &'a mut H, (), I, OT, S>;
/// The process executor simply calls a target function, as boxed `FnMut` trait object
/// The internal state of the executor is made available to the harness.
@ -44,7 +44,7 @@ pub type OwnedInProcessExecutor<I, OT, S, ES> = StatefulGenericInProcessExecutor
/// The inmem executor simply calls a target function, then returns afterwards.
/// The harness can access the internal state of the executor.
pub struct StatefulGenericInProcessExecutor<H, HB, HT, I, OT, S, ES> {
pub struct StatefulGenericInProcessExecutor<ES, H, HB, HT, I, OT, S> {
/// The harness function, being executed for each fuzzing loop execution
harness_fn: HB,
/// The state used as argument of the harness
@ -54,7 +54,7 @@ pub struct StatefulGenericInProcessExecutor<H, HB, HT, I, OT, S, ES> {
phantom: PhantomData<(ES, *const H)>,
}
impl<H, HB, HT, I, OT, S, ES> Debug for StatefulGenericInProcessExecutor<H, HB, HT, I, OT, S, ES>
impl<H, HB, HT, I, OT, S, ES> Debug for StatefulGenericInProcessExecutor<ES, H, HB, HT, I, OT, S>
where
OT: Debug,
{
@ -67,7 +67,7 @@ where
}
impl<EM, H, HB, HT, I, OT, S, Z, ES> Executor<EM, I, S, Z>
for StatefulGenericInProcessExecutor<H, HB, HT, I, OT, S, ES>
for StatefulGenericInProcessExecutor<ES, H, HB, HT, I, OT, S>
where
H: FnMut(&mut ES, &mut S, &I) -> ExitKind + Sized,
HB: BorrowMut<H>,
@ -99,7 +99,7 @@ where
}
impl<H, HB, HT, I, OT, S, ES> HasObservers
for StatefulGenericInProcessExecutor<H, HB, HT, I, OT, S, ES>
for StatefulGenericInProcessExecutor<ES, H, HB, HT, I, OT, S>
where
H: FnMut(&mut ES, &mut S, &I) -> ExitKind + Sized,
HB: BorrowMut<H>,
@ -118,7 +118,7 @@ where
}
}
impl<'a, H, I, OT, S, ES> StatefulInProcessExecutor<'a, H, I, OT, S, ES>
impl<'a, H, I, OT, S, ES> StatefulInProcessExecutor<'a, ES, H, I, OT, S>
where
H: FnMut(&mut ES, &mut S, &I) -> ExitKind + Sized,
OT: ObserversTuple<I, S>,
@ -224,7 +224,7 @@ where
}
}
impl<H, HB, HT, I, OT, S, ES> StatefulGenericInProcessExecutor<H, HB, HT, I, OT, S, ES> {
impl<H, HB, HT, I, OT, S, ES> StatefulGenericInProcessExecutor<ES, H, HB, HT, I, OT, S> {
/// The executor state given to the harness
pub fn exposed_executor_state(&self) -> &ES {
&self.exposed_executor_state
@ -236,7 +236,7 @@ impl<H, HB, HT, I, OT, S, ES> StatefulGenericInProcessExecutor<H, HB, HT, I, OT,
}
}
impl<H, HB, HT, I, OT, S, ES> StatefulGenericInProcessExecutor<H, HB, HT, I, OT, S, ES>
impl<H, HB, HT, I, OT, S, ES> StatefulGenericInProcessExecutor<ES, H, HB, HT, I, OT, S>
where
H: FnMut(&mut ES, &mut S, &I) -> ExitKind + Sized,
HB: BorrowMut<H>,
@ -364,7 +364,7 @@ where
}
impl<H, HB, HT, I, OT, S, ES> HasInProcessHooks<I, S>
for StatefulGenericInProcessExecutor<H, HB, HT, I, OT, S, ES>
for StatefulGenericInProcessExecutor<ES, H, HB, HT, I, OT, S>
{
/// the timeout handler
#[inline]

View File

@ -32,7 +32,7 @@ use crate::{
};
/// Inner state of GenericInProcessExecutor-like structures.
pub struct GenericInProcessForkExecutorInner<HT, I, OT, S, SP, EM, Z> {
pub struct GenericInProcessForkExecutorInner<EM, HT, I, OT, S, SP, Z> {
pub(super) hooks: (InChildProcessHooks<I, S>, HT),
pub(super) shmem_provider: SP,
pub(super) observers: OT,
@ -40,10 +40,10 @@ pub struct GenericInProcessForkExecutorInner<HT, I, OT, S, SP, EM, Z> {
pub(super) itimerspec: libc::itimerspec,
#[cfg(all(unix, not(target_os = "linux")))]
pub(super) itimerval: Itimerval,
pub(super) phantom: PhantomData<(I, S, EM, Z)>,
pub(super) phantom: PhantomData<(EM, I, S, Z)>,
}
impl<HT, I, OT, S, SP, EM, Z> Debug for GenericInProcessForkExecutorInner<HT, I, OT, S, SP, EM, Z>
impl<EM, HT, I, OT, S, SP, Z> Debug for GenericInProcessForkExecutorInner<EM, HT, I, OT, S, SP, Z>
where
HT: Debug,
OT: Debug,
@ -104,11 +104,11 @@ fn parse_itimerval(timeout: Duration) -> Itimerval {
}
}
impl<EM, HT, I, OT, S, SP, Z> GenericInProcessForkExecutorInner<HT, I, OT, S, SP, EM, Z>
impl<EM, HT, I, OT, S, SP, Z> GenericInProcessForkExecutorInner<EM, HT, I, OT, S, SP, Z>
where
HT: ExecutorHooksTuple<I, S>,
SP: ShMemProvider,
OT: ObserversTuple<I, S>,
SP: ShMemProvider,
{
pub(super) unsafe fn pre_run_target_child(
&mut self,
@ -195,7 +195,7 @@ where
}
}
impl<HT, I, OT, S, SP, EM, Z> GenericInProcessForkExecutorInner<HT, I, OT, S, SP, EM, Z>
impl<EM, HT, I, OT, S, SP, Z> GenericInProcessForkExecutorInner<EM, HT, I, OT, S, SP, Z>
where
HT: ExecutorHooksTuple<I, S>,
OT: ObserversTuple<I, S>,
@ -284,8 +284,8 @@ where
}
}
impl<HT, I, OT, S, SP, EM, Z> HasObservers
for GenericInProcessForkExecutorInner<HT, I, OT, S, SP, EM, Z>
impl<EM, HT, I, OT, S, SP, Z> HasObservers
for GenericInProcessForkExecutorInner<EM, HT, I, OT, S, SP, Z>
{
type Observers = OT;

View File

@ -39,10 +39,10 @@ pub mod stateful;
///
/// On Linux, when fuzzing a Rust target, set `panic = "abort"` in your `Cargo.toml` (see [Cargo documentation](https://doc.rust-lang.org/cargo/reference/profiles.html#panic)).
/// Else panics can not be caught by `LibAFL`.
pub type InProcessForkExecutor<'a, H, I, OT, S, SP, EM, Z> =
GenericInProcessForkExecutor<'a, H, (), I, OT, S, SP, EM, Z>;
pub type InProcessForkExecutor<'a, EM, H, I, OT, S, SP, Z> =
GenericInProcessForkExecutor<'a, EM, H, (), I, OT, S, SP, Z>;
impl<'a, H, I, OT, S, SP, EM, Z> InProcessForkExecutor<'a, H, I, OT, S, SP, EM, Z>
impl<'a, H, I, OT, S, SP, EM, Z> InProcessForkExecutor<'a, EM, H, I, OT, S, SP, Z>
where
OT: ObserversTuple<I, S>,
{
@ -73,13 +73,13 @@ where
///
/// On Linux, when fuzzing a Rust target, set `panic = "abort"` in your `Cargo.toml` (see [Cargo documentation](https://doc.rust-lang.org/cargo/reference/profiles.html#panic)).
/// Else panics can not be caught by `LibAFL`.
pub struct GenericInProcessForkExecutor<'a, H, HT, I, OT, S, SP, EM, Z> {
pub struct GenericInProcessForkExecutor<'a, EM, H, HT, I, OT, S, SP, Z> {
harness_fn: &'a mut H,
inner: GenericInProcessForkExecutorInner<HT, I, OT, S, SP, EM, Z>,
inner: GenericInProcessForkExecutorInner<EM, HT, I, OT, S, SP, Z>,
}
impl<H, HT, I, OT, S, SP, EM, Z> Debug
for GenericInProcessForkExecutor<'_, H, HT, I, OT, S, SP, EM, Z>
for GenericInProcessForkExecutor<'_, EM, H, HT, I, OT, S, SP, Z>
where
HT: Debug,
OT: Debug,
@ -103,13 +103,13 @@ where
}
impl<EM, H, HT, I, OT, S, SP, Z> Executor<EM, I, S, Z>
for GenericInProcessForkExecutor<'_, H, HT, I, OT, S, SP, EM, Z>
for GenericInProcessForkExecutor<'_, EM, H, HT, I, OT, S, SP, Z>
where
H: FnMut(&I) -> ExitKind + Sized,
S: HasExecutions,
SP: ShMemProvider,
HT: ExecutorHooksTuple<I, S>,
OT: ObserversTuple<I, S>,
S: HasExecutions,
SP: ShMemProvider,
{
#[inline]
fn run_target(
@ -141,7 +141,7 @@ where
}
}
impl<'a, H, HT, I, OT, S, SP, EM, Z> GenericInProcessForkExecutor<'a, H, HT, I, OT, S, SP, EM, Z>
impl<'a, H, HT, I, OT, S, SP, EM, Z> GenericInProcessForkExecutor<'a, EM, H, HT, I, OT, S, SP, Z>
where
HT: ExecutorHooksTuple<I, S>,
OT: ObserversTuple<I, S>,
@ -187,7 +187,7 @@ where {
}
impl<H, HT, I, OT, S, SP, EM, Z> HasObservers
for GenericInProcessForkExecutor<'_, H, HT, I, OT, S, SP, EM, Z>
for GenericInProcessForkExecutor<'_, EM, H, HT, I, OT, S, SP, Z>
{
type Observers = OT;
#[inline]

View File

@ -4,7 +4,6 @@
//! The harness can access internal state.
use core::{
fmt::{self, Debug, Formatter},
marker::PhantomData,
time::Duration,
};
@ -25,12 +24,13 @@ use crate::{
};
/// The `StatefulInProcessForkExecutor` with no user hooks
pub type StatefulInProcessForkExecutor<'a, H, I, OT, S, SP, ES, EM, Z> =
StatefulGenericInProcessForkExecutor<'a, H, (), I, OT, S, SP, ES, EM, Z>;
pub type StatefulInProcessForkExecutor<'a, EM, ES, H, I, OT, S, SP, Z> =
StatefulGenericInProcessForkExecutor<'a, EM, ES, H, (), I, OT, S, SP, Z>;
impl<'a, H, I, OT, S, SP, ES, EM, Z> StatefulInProcessForkExecutor<'a, H, I, OT, S, SP, ES, EM, Z>
impl<'a, H, I, OT, S, SP, ES, EM, Z> StatefulInProcessForkExecutor<'a, EM, ES, H, I, OT, S, SP, Z>
where
OT: ObserversTuple<I, S>,
SP: ShMemProvider,
{
#[expect(clippy::too_many_arguments)]
/// The constructor for `InProcessForkExecutor`
@ -59,18 +59,17 @@ where
}
/// [`StatefulGenericInProcessForkExecutor`] is an executor that forks the current process before each execution. Harness can access some internal state.
pub struct StatefulGenericInProcessForkExecutor<'a, H, HT, I, OT, S, SP, ES, EM, Z> {
pub struct StatefulGenericInProcessForkExecutor<'a, EM, ES, H, HT, I, OT, S, SP, Z> {
/// The harness function, being executed for each fuzzing loop execution
harness_fn: &'a mut H,
/// The state used as argument of the harness
pub exposed_executor_state: ES,
/// Inner state of the executor
pub inner: GenericInProcessForkExecutorInner<HT, I, OT, S, SP, EM, Z>,
phantom: PhantomData<ES>,
pub inner: GenericInProcessForkExecutorInner<EM, HT, I, OT, S, SP, Z>,
}
impl<H, HT, I, OT, S, SP, ES, EM, Z> Debug
for StatefulGenericInProcessForkExecutor<'_, H, HT, I, OT, S, SP, ES, EM, Z>
for StatefulGenericInProcessForkExecutor<'_, EM, ES, H, HT, I, OT, S, SP, Z>
where
HT: Debug,
OT: Debug,
@ -94,7 +93,7 @@ where
}
impl<EM, H, HT, I, OT, S, SP, Z, ES> Executor<EM, I, S, Z>
for StatefulGenericInProcessForkExecutor<'_, H, HT, I, OT, S, SP, ES, EM, Z>
for StatefulGenericInProcessForkExecutor<'_, EM, ES, H, HT, I, OT, S, SP, Z>
where
H: FnMut(&mut ES, &I) -> ExitKind + Sized,
HT: ExecutorHooksTuple<I, S>,
@ -133,7 +132,7 @@ where
}
impl<'a, H, HT, I, OT, S, SP, ES, EM, Z>
StatefulGenericInProcessForkExecutor<'a, H, HT, I, OT, S, SP, ES, EM, Z>
StatefulGenericInProcessForkExecutor<'a, EM, ES, H, HT, I, OT, S, SP, Z>
where
HT: ExecutorHooksTuple<I, S>,
OT: ObserversTuple<I, S>,
@ -163,7 +162,6 @@ where
timeout,
shmem_provider,
)?,
phantom: PhantomData,
})
}
@ -181,7 +179,7 @@ where
}
impl<H, HT, I, OT, S, SP, ES, EM, Z> HasObservers
for StatefulGenericInProcessForkExecutor<'_, H, HT, I, OT, S, SP, ES, EM, Z>
for StatefulGenericInProcessForkExecutor<'_, EM, ES, H, HT, I, OT, S, SP, Z>
{
type Observers = OT;

View File

@ -219,7 +219,7 @@ impl<R: Read> MessageFileReader<R> {
/// A `MessageFileWriter` writes a stream of [`SymExpr`] to any [`Write`]. For each written expression, it returns
/// a [`SymExprRef`] which should be used to refer back to it.
pub struct MessageFileWriter<W: Write> {
pub struct MessageFileWriter<W> {
id_counter: usize,
writer: W,
writer_start_position: u64,
@ -396,7 +396,7 @@ impl<W: Write + Seek> MessageFileWriter<W> {
}
}
use libafl_bolts::shmem::{ShMem, ShMemCursor, ShMemProvider, StdShMemProvider};
use libafl_bolts::shmem::{ShMem, ShMemCursor, ShMemProvider, StdShMem, StdShMemProvider};
/// The default environment variable name to use for the shared memory used by the concolic tracing
pub const DEFAULT_ENV_NAME: &str = "SHARED_MEMORY_MESSAGES";
@ -439,14 +439,17 @@ impl<'buffer> MessageFileReader<Cursor<&'buffer [u8]>> {
}
}
impl<T: ShMem> MessageFileWriter<ShMemCursor<T>> {
impl<SHM> MessageFileWriter<ShMemCursor<SHM>>
where
SHM: ShMem,
{
/// Creates a new `MessageFileWriter` from the given [`ShMemCursor`].
pub fn from_shmem(shmem: T) -> io::Result<Self> {
pub fn from_shmem(shmem: SHM) -> io::Result<Self> {
Self::from_writer(ShMemCursor::new(shmem))
}
}
impl MessageFileWriter<ShMemCursor<<StdShMemProvider as ShMemProvider>::ShMem>> {
impl MessageFileWriter<ShMemCursor<StdShMem>> {
/// Creates a new `MessageFileWriter` by reading a [`ShMem`] from the given environment variable.
pub fn from_stdshmem_env_with_name(env_name: impl AsRef<str>) -> io::Result<Self> {
Self::from_shmem(
@ -464,8 +467,7 @@ impl MessageFileWriter<ShMemCursor<<StdShMemProvider as ShMemProvider>::ShMem>>
}
/// A writer that will write messages to a shared memory buffer.
pub type StdShMemMessageFileWriter =
MessageFileWriter<ShMemCursor<<StdShMemProvider as ShMemProvider>::ShMem>>;
pub type StdShMemMessageFileWriter<SHM> = MessageFileWriter<ShMemCursor<SHM>>;
#[cfg(test)]
mod serialization_tests {

View File

@ -7,7 +7,12 @@ use alloc::{
use core::{marker::PhantomData, time::Duration};
use std::path::{Path, PathBuf};
use libafl_bolts::{current_time, fs::find_new_files_rec, shmem::ShMemProvider, Named};
use libafl_bolts::{
current_time,
fs::find_new_files_rec,
shmem::{ShMem, ShMemProvider},
Named,
};
use serde::{Deserialize, Serialize};
use crate::{
@ -219,14 +224,12 @@ impl SyncFromBrokerMetadata {
/// A stage that loads testcases from disk to sync with other fuzzers such as AFL++
#[derive(Debug)]
pub struct SyncFromBrokerStage<I, IC, ICB, S, SP>
where
SP: ShMemProvider,
{
client: LlmpEventConverter<I, IC, ICB, S, SP>,
pub struct SyncFromBrokerStage<I, IC, ICB, S, SHM, SP> {
client: LlmpEventConverter<I, IC, ICB, S, SHM, SP>,
}
impl<E, EM, I, IC, ICB, DI, S, SP, Z> Stage<E, EM, S, Z> for SyncFromBrokerStage<I, IC, ICB, S, SP>
impl<E, EM, I, IC, ICB, DI, S, SHM, SP, Z> Stage<E, EM, S, Z>
for SyncFromBrokerStage<I, IC, ICB, S, SHM, SP>
where
DI: Input,
EM: EventFirer<I, S>,
@ -242,7 +245,8 @@ where
+ HasCurrentTestcase<I>
+ Stoppable
+ MaybeHasClientPerfMonitor,
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
Z: EvaluatorObservers<E, EM, I, S> + ExecutionProcessor<EM, I, E::Observers, S>,
{
#[inline]
@ -316,13 +320,10 @@ where
}
}
impl<I, IC, ICB, S, SP> SyncFromBrokerStage<I, IC, ICB, S, SP>
where
SP: ShMemProvider,
{
impl<I, IC, ICB, S, SHM, SP> SyncFromBrokerStage<I, IC, ICB, S, SHM, SP> {
/// Creates a new [`SyncFromBrokerStage`]
#[must_use]
pub fn new(client: LlmpEventConverter<I, IC, ICB, S, SP>) -> Self {
pub fn new(client: LlmpEventConverter<I, IC, ICB, S, SHM, SP>) -> Self {
Self { client }
}
}

View File

@ -113,13 +113,13 @@ impl<SP> Default for LlmpExampleHook<SP> {
}
#[cfg(all(feature = "std", not(target_os = "haiku")))]
impl<SP> LlmpHook<SP> for LlmpExampleHook<SP>
impl<SHM, SP> LlmpHook<SHM, SP> for LlmpExampleHook<SP>
where
SP: ShMemProvider + 'static,
SP: ShMemProvider<ShMem = SHM> + 'static,
{
fn on_new_message(
&mut self,
_broker_inner: &mut LlmpBrokerInner<SP>,
_broker_inner: &mut LlmpBrokerInner<SHM, SP>,
client_id: ClientId,
msg_tag: &mut Tag,
_msg_flags: &mut Flags,

View File

@ -251,6 +251,7 @@ mod linux {
use super::CoreId;
use crate::Error;
#[allow(trivial_numeric_casts)]
pub fn get_core_ids() -> Result<Vec<CoreId>, Error> {
let full_set = get_affinity_mask()?;
let mut core_ids: Vec<CoreId> = Vec::new();

View File

@ -707,25 +707,23 @@ impl LlmpMsg {
/// An Llmp instance
#[derive(Debug)]
pub enum LlmpConnection<HT, SP>
where
SP: ShMemProvider,
{
pub enum LlmpConnection<HT, SHM, SP> {
/// A broker and a thread using this tcp background thread
IsBroker {
/// The [`LlmpBroker`] of this [`LlmpConnection`].
broker: LlmpBroker<HT, SP>,
broker: LlmpBroker<HT, SHM, SP>,
},
/// A client, connected to the port
IsClient {
/// The [`LlmpClient`] of this [`LlmpConnection`].
client: LlmpClient<SP>,
client: LlmpClient<SHM, SP>,
},
}
impl<SP> LlmpConnection<(), SP>
impl<SHM, SP> LlmpConnection<(), SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
#[cfg(feature = "std")]
/// Creates either a broker, if the tcp port is not bound, or a client, connected to this port.
@ -776,10 +774,11 @@ where
}
}
impl<MT, SP> LlmpConnection<MT, SP>
impl<MT, SHM, SP> LlmpConnection<MT, SHM, SP>
where
MT: LlmpHookTuple<SP>,
SP: ShMemProvider,
MT: LlmpHookTuple<SHM, SP>,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
/// Describe this in a reproducible fashion, if it's a client
pub fn describe(&self) -> Result<LlmpClientDescription, Error> {
@ -793,7 +792,7 @@ where
pub fn existing_client_from_description(
shmem_provider: SP,
description: &LlmpClientDescription,
) -> Result<LlmpConnection<MT, SP>, Error> {
) -> Result<LlmpConnection<MT, SHM, SP>, Error> {
Ok(LlmpConnection::IsClient {
client: LlmpClient::existing_client_from_description(shmem_provider, description)?,
})
@ -891,23 +890,20 @@ struct LlmpClientExitInfo {
/// Sending end on a (unidirectional) sharedmap channel
#[derive(Debug)]
pub struct LlmpSender<SP>
where
SP: ShMemProvider,
{
pub struct LlmpSender<SHM, SP> {
/// ID of this sender.
id: ClientId,
/// Ref to the last message this sender sent on the last page.
/// If null, a new page (just) started.
last_msg_sent: *const LlmpMsg,
/// A vec of page wrappers, each containing an initialized [`ShMem`]
out_shmems: Vec<LlmpSharedMap<SP::ShMem>>,
out_shmems: Vec<LlmpSharedMap<SHM>>,
/// A vec of pages that we previously used, but that have served its purpose
/// (no potential receivers are left).
/// Instead of freeing them, we keep them around to potentially reuse them later,
/// if they are still large enough.
/// This way, the OS doesn't have to spend time zeroing pages, and getting rid of our old pages
unused_shmem_cache: Vec<LlmpSharedMap<SP::ShMem>>,
unused_shmem_cache: Vec<LlmpSharedMap<SHM>>,
/// If true, pages will never be pruned.
/// The broker uses this feature.
/// By keeping the message history around,
@ -920,9 +916,10 @@ where
}
/// An actor on the sending part of the shared map
impl<SP> LlmpSender<SP>
impl<SHM, SP> LlmpSender<SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
/// Create a new [`LlmpSender`] using a given [`ShMemProvider`], and `id`.
/// If `keep_pages_forever` is `true`, `ShMem` will never be freed.
@ -1068,7 +1065,7 @@ where
/// else reattach will get a new, empty page, from the OS, or fail.
pub fn on_existing_shmem(
shmem_provider: SP,
current_out_shmem: SP::ShMem,
current_out_shmem: SHM,
last_msg_sent_offset: Option<u64>,
) -> Result<Self, Error> {
let mut out_shmem = LlmpSharedMap::existing(current_out_shmem);
@ -1307,7 +1304,7 @@ where
&mut self,
sender_id: ClientId,
next_min_shmem_size: usize,
) -> Result<LlmpSharedMap<<SP>::ShMem>, Error> {
) -> Result<LlmpSharedMap<SHM>, Error> {
// Find a shared map that has been released to reuse, from which all receivers left / finished reading.
let cached_shmem = self
.unused_shmem_cache
@ -1586,10 +1583,7 @@ where
/// Receiving end on a (unidirectional) sharedmap channel
#[derive(Debug)]
pub struct LlmpReceiver<SP>
where
SP: ShMemProvider,
{
pub struct LlmpReceiver<SHM, SP> {
/// Client Id of this receiver
id: ClientId,
/// Pointer to the last message received
@ -1600,15 +1594,16 @@ where
/// The shmem provider
shmem_provider: SP,
/// current page. After EOP, this gets replaced with the new one
current_recv_shmem: LlmpSharedMap<SP::ShMem>,
current_recv_shmem: LlmpSharedMap<SHM>,
/// Caches the highest msg id we've seen so far
highest_msg_id: MessageId,
}
/// Receiving end of an llmp channel
impl<SP> LlmpReceiver<SP>
impl<SHM, SP> LlmpReceiver<SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
/// Reattach to a vacant `recv_shmem`, to with a previous sender stored the information in an env before.
#[cfg(feature = "std")]
@ -1634,7 +1629,7 @@ where
/// else reattach will get a new, empty page, from the OS, or fail.
pub fn on_existing_shmem(
shmem_provider: SP,
current_sender_shmem: SP::ShMem,
current_sender_shmem: SHM,
last_msg_recvd_offset: Option<u64>,
) -> Result<Self, Error> {
let mut current_recv_shmem = LlmpSharedMap::existing(current_sender_shmem);
@ -1897,10 +1892,7 @@ where
/// A page wrapper
#[derive(Clone, Debug)]
pub struct LlmpSharedMap<SHM>
where
SHM: ShMem,
{
pub struct LlmpSharedMap<SHM> {
/// Shmem containg the actual (unsafe) page,
/// shared between one `LlmpSender` and one `LlmpReceiver`
shmem: SHM,
@ -2050,18 +2042,15 @@ where
/// The inner state of [`LlmpBroker`]
#[derive(Debug)]
pub struct LlmpBrokerInner<SP>
where
SP: ShMemProvider,
{
pub struct LlmpBrokerInner<SHM, SP> {
/// Broadcast map from broker to all clients
llmp_out: LlmpSender<SP>,
llmp_out: LlmpSender<SHM, SP>,
/// Users of Llmp can add message handlers in the broker.
/// This allows us to intercept messages right in the broker.
/// This keeps the out map clean.
/// The backing values of `llmp_clients` [`ClientId`]s will always be sorted (but not gapless)
/// Make sure to always increase `num_clients_seen` when pushing a new [`LlmpReceiver`] to `llmp_clients`!
llmp_clients: Vec<LlmpReceiver<SP>>,
llmp_clients: Vec<LlmpReceiver<SHM, SP>>,
/// The own listeners we spawned via `launch_listener` or `crate_attach_to_tcp`.
/// Listeners will be ignored for `exit_cleanly_after` and they are never considered to have timed out.
listeners: Vec<ClientId>,
@ -2078,12 +2067,9 @@ where
/// The broker (node 0)
#[derive(Debug)]
pub struct LlmpBroker<HT, SP>
where
SP: ShMemProvider,
{
pub struct LlmpBroker<HT, SHM, SP> {
/// The broker
inner: LlmpBrokerInner<SP>,
inner: LlmpBrokerInner<SHM, SP>,
/// Llmp hooks
hooks: HT,
}
@ -2118,10 +2104,11 @@ pub trait Broker {
fn nb_listeners(&self) -> usize;
}
impl<HT, SP> Broker for LlmpBroker<HT, SP>
impl<HT, SHM, SP> Broker for LlmpBroker<HT, SHM, SP>
where
HT: LlmpHookTuple<SP>,
SP: ShMemProvider,
HT: LlmpHookTuple<SHM, SP>,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn is_shutting_down(&self) -> bool {
self.inner.is_shutting_down()
@ -2215,15 +2202,12 @@ impl CtrlHandler for LlmpShutdownSignalHandler {
}
/// Llmp hooks
pub trait LlmpHook<SP>
where
SP: ShMemProvider,
{
pub trait LlmpHook<SHM, SP> {
/// Hook called whenever a new message is received. It receives an llmp message as input, does
/// something with it (read, transform, forward, etc...) and decides to discard it or not.
fn on_new_message(
&mut self,
broker_inner: &mut LlmpBrokerInner<SP>,
broker_inner: &mut LlmpBrokerInner<SHM, SP>,
client_id: ClientId,
msg_tag: &mut Tag,
msg_flags: &mut Flags,
@ -2238,14 +2222,11 @@ where
}
/// A tuple of Llmp hooks. They are evaluated sequentially, and returns if one decides to filter out the evaluated message.
pub trait LlmpHookTuple<SP>
where
SP: ShMemProvider,
{
pub trait LlmpHookTuple<SHM, SP> {
/// Call all hook callbacks on new message.
fn on_new_message_all(
&mut self,
inner: &mut LlmpBrokerInner<SP>,
inner: &mut LlmpBrokerInner<SHM, SP>,
client_id: ClientId,
msg_tag: &mut Tag,
msg_flags: &mut Flags,
@ -2257,13 +2238,10 @@ where
fn on_timeout_all(&mut self) -> Result<(), Error>;
}
impl<SP> LlmpHookTuple<SP> for ()
where
SP: ShMemProvider,
{
impl<SHM, SP> LlmpHookTuple<SHM, SP> for () {
fn on_new_message_all(
&mut self,
_inner: &mut LlmpBrokerInner<SP>,
_inner: &mut LlmpBrokerInner<SHM, SP>,
_client_id: ClientId,
_msg_tag: &mut Tag,
_msg_flags: &mut Flags,
@ -2278,15 +2256,14 @@ where
}
}
impl<Head, Tail, SP> LlmpHookTuple<SP> for (Head, Tail)
impl<Head, Tail, SHM, SP> LlmpHookTuple<SHM, SP> for (Head, Tail)
where
Head: LlmpHook<SP>,
Tail: LlmpHookTuple<SP>,
SP: ShMemProvider,
Head: LlmpHook<SHM, SP>,
Tail: LlmpHookTuple<SHM, SP>,
{
fn on_new_message_all(
&mut self,
inner: &mut LlmpBrokerInner<SP>,
inner: &mut LlmpBrokerInner<SHM, SP>,
client_id: ClientId,
msg_tag: &mut Tag,
msg_flags: &mut Flags,
@ -2315,15 +2292,12 @@ where
}
}
impl<SP> LlmpBroker<(), SP>
where
SP: ShMemProvider,
{
impl<SHM, SP> LlmpBroker<(), SHM, SP> {
/// Add hooks to a hookless [`LlmpBroker`].
/// We do not support replacing hooks for now.
pub fn add_hooks<HT>(self, hooks: HT) -> LlmpBroker<HT, SP>
pub fn add_hooks<HT>(self, hooks: HT) -> LlmpBroker<HT, SHM, SP>
where
HT: LlmpHookTuple<SP>,
HT: LlmpHookTuple<SHM, SP>,
{
LlmpBroker {
inner: self.inner,
@ -2446,10 +2420,11 @@ impl Brokers {
}
}
impl<HT, SP> LlmpBroker<HT, SP>
impl<HT, SHM, SP> LlmpBroker<HT, SHM, SP>
where
HT: LlmpHookTuple<SP>,
SP: ShMemProvider,
HT: LlmpHookTuple<SHM, SP>,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
/// Create and initialize a new [`LlmpBroker`], associated with some hooks.
pub fn new(shmem_provider: SP, hooks: HT) -> Result<Self, Error> {
@ -2496,12 +2471,12 @@ where
}
/// Get the inner state of the broker
pub fn inner(&self) -> &LlmpBrokerInner<SP> {
pub fn inner(&self) -> &LlmpBrokerInner<SHM, SP> {
&self.inner
}
/// Get the inner mutable state of the broker
pub fn inner_mut(&mut self) -> &mut LlmpBrokerInner<SP> {
pub fn inner_mut(&mut self) -> &mut LlmpBrokerInner<SHM, SP> {
&mut self.inner
}
@ -2829,9 +2804,10 @@ where
/// The broker forwards all messages to its own bus-like broadcast map.
/// It may intercept messages passing through.
impl<SP> LlmpBrokerInner<SP>
impl<SHM, SP> LlmpBrokerInner<SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
/// Create and initialize a new [`LlmpBrokerInner`], associated with some hooks.
pub fn new(shmem_provider: SP) -> Result<Self, Error> {
@ -2917,7 +2893,7 @@ where
/// Will increase `num_clients_seen`.
/// The backing values of `llmp_clients` [`ClientId`]s will always be sorted (but not gapless)
/// returns the [`ClientId`] of the new client.
pub fn add_client(&mut self, mut client_receiver: LlmpReceiver<SP>) -> ClientId {
pub fn add_client(&mut self, mut client_receiver: LlmpReceiver<SHM, SP>) -> ClientId {
let id = self.peek_next_client_id();
client_receiver.id = id;
self.llmp_clients.push(client_receiver);
@ -2932,7 +2908,7 @@ where
/// Registers a new client for the given sharedmap str and size.
/// Returns the id of the new client in [`broker.client_shmem`]
pub fn register_client(&mut self, mut client_page: LlmpSharedMap<SP::ShMem>) -> ClientId {
pub fn register_client(&mut self, mut client_page: LlmpSharedMap<SHM>) -> ClientId {
// Tell the client it may unmap its initial allocated shmem page now.
// Since we now have a handle to it, it won't be umapped too early (only after we also unmap it)
client_page.mark_safe_to_unmap();
@ -3090,7 +3066,7 @@ where
/// Upon receiving this message, the broker should map the announced page and start tracking it for new messages.
#[cfg(feature = "std")]
fn announce_new_client(
sender: &mut LlmpSender<SP>,
sender: &mut LlmpSender<SHM, SP>,
shmem_description: &ShMemDescription,
) -> Result<(), Error> {
unsafe {
@ -3108,7 +3084,7 @@ where
/// Tell the broker to disconnect this client from it.
#[cfg(feature = "std")]
fn announce_client_exit(sender: &mut LlmpSender<SP>, client_id: u32) -> Result<(), Error> {
fn announce_client_exit(sender: &mut LlmpSender<SHM, SP>, client_id: u32) -> Result<(), Error> {
// # Safety
// No user-provided potentially unsafe parameters.
unsafe {
@ -3280,7 +3256,7 @@ where
mut stream: TcpStream,
request: &TcpRequest,
current_client_id: &mut ClientId,
sender: &mut LlmpSender<SP>,
sender: &mut LlmpSender<SHM, SP>,
broker_shmem_description: &ShMemDescription,
) {
match request {
@ -3451,21 +3427,19 @@ pub struct LlmpClientDescription {
/// Client side of LLMP
#[derive(Debug)]
pub struct LlmpClient<SP>
where
SP: ShMemProvider,
{
pub struct LlmpClient<SHM, SP> {
/// Outgoing channel to the broker
sender: LlmpSender<SP>,
sender: LlmpSender<SHM, SP>,
/// Incoming (broker) broadcast map
receiver: LlmpReceiver<SP>,
receiver: LlmpReceiver<SHM, SP>,
}
/// `n` clients connect to a broker. They share an outgoing map with the broker,
/// and get incoming messages from the shared broker bus
impl<SP> LlmpClient<SP>
impl<SHM, SP> LlmpClient<SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
/// Reattach to a vacant client map.
/// It is essential, that the broker (or someone else) kept a pointer to the `out_shmem`
@ -3473,9 +3447,9 @@ where
#[allow(clippy::needless_pass_by_value)] // no longer necessary on nightly
pub fn on_existing_shmem(
shmem_provider: SP,
_current_out_shmem: SP::ShMem,
_current_out_shmem: SHM,
_last_msg_sent_offset: Option<u64>,
current_broker_shmem: SP::ShMem,
current_broker_shmem: SHM,
last_msg_recvd_offset: Option<u64>,
) -> Result<Self, Error> {
Ok(Self {
@ -3542,25 +3516,25 @@ where
/// Outgoing channel to the broker
#[must_use]
pub fn sender(&self) -> &LlmpSender<SP> {
pub fn sender(&self) -> &LlmpSender<SHM, SP> {
&self.sender
}
/// Outgoing channel to the broker (mut)
#[must_use]
pub fn sender_mut(&mut self) -> &mut LlmpSender<SP> {
pub fn sender_mut(&mut self) -> &mut LlmpSender<SHM, SP> {
&mut self.sender
}
/// Incoming (broker) broadcast map
#[must_use]
pub fn receiver(&self) -> &LlmpReceiver<SP> {
pub fn receiver(&self) -> &LlmpReceiver<SHM, SP> {
&self.receiver
}
/// Incoming (broker) broadcast map (mut)
#[must_use]
pub fn receiver_mut(&mut self) -> &mut LlmpReceiver<SP> {
pub fn receiver_mut(&mut self) -> &mut LlmpReceiver<SHM, SP> {
&mut self.receiver
}
@ -3588,7 +3562,7 @@ where
/// Creates a new [`LlmpClient`]
pub fn new(
mut shmem_provider: SP,
initial_broker_shmem: LlmpSharedMap<SP::ShMem>,
initial_broker_shmem: LlmpSharedMap<SHM>,
sender_id: ClientId,
) -> Result<Self, Error> {
Ok(Self {

View File

@ -11,6 +11,7 @@ use alloc::{
vec::Vec,
};
use core::{
fmt::Debug,
mem::ManuallyDrop,
ops::{Deref, DerefMut},
};
@ -60,10 +61,7 @@ const AFL_SHMEM_SERVICE_STARTED: &str = "AFL_SHMEM_SERVICE_STARTED";
/// s out served shared maps, as used on Android.
#[derive(Debug)]
pub struct ServedShMemProvider<SP>
where
SP: ShMemProvider,
{
pub struct ServedShMemProvider<SP> {
stream: UnixStream,
inner: SP,
id: i32,
@ -76,17 +74,14 @@ where
/// [`ShMem`] that got served from a [`ShMemService`] via domain sockets and can now be used in this program.
/// It works around Android's lack of "proper" shared maps.
#[derive(Clone, Debug)]
pub struct ServedShMem<SH>
where
SH: ShMem,
{
inner: ManuallyDrop<SH>,
pub struct ServedShMem<SHM> {
inner: ManuallyDrop<SHM>,
server_fd: i32,
}
impl<SH> Deref for ServedShMem<SH>
impl<SHM> Deref for ServedShMem<SHM>
where
SH: ShMem,
SHM: Deref<Target = [u8]>,
{
type Target = [u8];
@ -95,18 +90,18 @@ where
}
}
impl<SH> DerefMut for ServedShMem<SH>
impl<SHM> DerefMut for ServedShMem<SHM>
where
SH: ShMem,
SHM: DerefMut<Target = [u8]>,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
impl<SH> ShMem for ServedShMem<SH>
impl<SHM> ShMem for ServedShMem<SHM>
where
SH: ShMem,
SHM: ShMem,
{
fn id(&self) -> ShMemId {
let client_id = self.inner.id();
@ -114,10 +109,7 @@ where
}
}
impl<SP> ServedShMemProvider<SP>
where
SP: ShMemProvider,
{
impl<SP> ServedShMemProvider<SP> {
/// Send a request to the server, and wait for a response
#[expect(clippy::similar_names)] // id and fd
fn send_receive(&mut self, request: ServedShMemRequest) -> Result<(i32, i32), Error> {
@ -290,18 +282,12 @@ pub enum ServedShMemRequest {
/// Client side communicating with the [`ShMemServer`]
#[derive(Debug)]
struct SharedShMemClient<SH>
where
SH: ShMem,
{
struct SharedShMemClient<SHM> {
stream: UnixStream,
maps: HashMap<i32, Vec<Rc<RefCell<SH>>>>,
maps: HashMap<i32, Vec<Rc<RefCell<SHM>>>>,
}
impl<SH> SharedShMemClient<SH>
where
SH: ShMem,
{
impl<SHM> SharedShMemClient<SHM> {
fn new(stream: UnixStream) -> Self {
Self {
stream,
@ -312,11 +298,8 @@ where
/// Response from Server to Client
#[derive(Debug)]
enum ServedShMemResponse<SP>
where
SP: ShMemProvider,
{
Mapping(Rc<RefCell<SP::ShMem>>),
enum ServedShMemResponse<SHM> {
Mapping(Rc<RefCell<SHM>>),
Id(i32),
RefCount(u32),
}
@ -332,22 +315,19 @@ enum ShMemServiceStatus {
/// The [`ShMemService`] is a service handing out [`ShMem`] pages via unix domain sockets.
/// It is mainly used and needed on Android.
#[derive(Debug, Clone)]
pub enum ShMemService<SP>
where
SP: ShMemProvider,
{
pub enum ShMemService<SP> {
/// A started service
Started {
/// The background thread
bg_thread: Arc<Mutex<ShMemServiceThread>>,
/// The pantom data
/// The phantom data
phantom: PhantomData<SP>,
},
/// A failed service
Failed {
/// The error message
err_msg: String,
/// The pantom data
/// The phantom data
phantom: PhantomData<SP>,
},
}
@ -412,7 +392,7 @@ where
let syncpair = Arc::new((Mutex::new(ShMemServiceStatus::Starting), Condvar::new()));
let childsyncpair = Arc::clone(&syncpair);
let join_handle = thread::spawn(move || {
let mut worker = match ServedShMemServiceWorker::<SP>::new() {
let mut worker = match ServedShMemServiceWorker::<SP::ShMem, SP>::new() {
Ok(worker) => worker,
Err(e) => {
// Make sure the parent processes can continue
@ -472,20 +452,18 @@ where
/// The struct for the worker, handling incoming requests for [`ShMem`].
#[expect(clippy::type_complexity)]
struct ServedShMemServiceWorker<SP>
where
SP: ShMemProvider,
{
struct ServedShMemServiceWorker<SHM, SP> {
provider: SP,
clients: HashMap<RawFd, SharedShMemClient<SP::ShMem>>,
clients: HashMap<RawFd, SharedShMemClient<SHM>>,
/// Maps from a pre-fork (parent) client id to its cloned maps.
forking_clients: HashMap<RawFd, HashMap<i32, Vec<Rc<RefCell<SP::ShMem>>>>>,
all_shmems: HashMap<i32, Weak<RefCell<SP::ShMem>>>,
forking_clients: HashMap<RawFd, HashMap<i32, Vec<Rc<RefCell<SHM>>>>>,
all_shmems: HashMap<i32, Weak<RefCell<SHM>>>,
}
impl<SP> ServedShMemServiceWorker<SP>
impl<SHM, SP> ServedShMemServiceWorker<SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
/// Create a new [`ShMemService`]
fn new() -> Result<Self, Error> {
@ -497,7 +475,7 @@ where
})
}
fn upgrade_shmem_with_id(&mut self, description_id: i32) -> Rc<RefCell<SP::ShMem>> {
fn upgrade_shmem_with_id(&mut self, description_id: i32) -> Rc<RefCell<SHM>> {
self.all_shmems
.get_mut(&description_id)
.unwrap()
@ -507,7 +485,7 @@ where
}
/// Read and handle the client request, send the answer over unix fd.
fn handle_request(&mut self, client_id: RawFd) -> Result<ServedShMemResponse<SP>, Error> {
fn handle_request(&mut self, client_id: RawFd) -> Result<ServedShMemResponse<SHM>, Error> {
let request = self.read_request(client_id)?;
// log::trace!("got ashmem client: {}, request:{:?}", client_id, request);

View File

@ -190,7 +190,7 @@ where
/// # Safety
/// The shared memory needs to start with a valid object of type `T`.
/// Any use of this [`OwnedRef`] will dereference a pointer to the shared memory accordingly.
pub unsafe fn from_shmem<S: ShMem>(shmem: &mut S) -> Self {
pub unsafe fn from_shmem<SHM: ShMem>(shmem: &mut SHM) -> Self {
Self::from_ptr(shmem.as_mut_ptr_of().unwrap())
}
@ -325,7 +325,7 @@ where
/// # Safety
/// The shared memory needs to start with a valid object of type `T`.
/// Any use of this [`OwnedRefMut`] will dereference a pointer to the shared memory accordingly.
pub unsafe fn from_shmem<S: ShMem>(shmem: &mut S) -> Self {
pub unsafe fn from_shmem<SHM: ShMem>(shmem: &mut SHM) -> Self {
Self::from_mut_ptr(shmem.as_mut_ptr_of().unwrap())
}

View File

@ -32,25 +32,50 @@ pub use win32_shmem::{Win32ShMem, Win32ShMemProvider};
#[cfg(all(unix, feature = "std", not(target_os = "haiku")))]
use crate::os::pipes::Pipe;
#[cfg(all(feature = "std", unix, not(target_os = "haiku")))]
pub use crate::os::unix_shmem_server::{ServedShMemProvider, ShMemService};
pub use crate::os::unix_shmem_server::{ServedShMem, ServedShMemProvider, ShMemService};
use crate::Error;
/// The standard sharedmem provider
#[cfg(all(windows, feature = "std"))]
pub type StdShMemProvider = Win32ShMemProvider;
/// The standard sharedmem
#[cfg(all(windows, feature = "std"))]
pub type StdShMem = Win32ShMem;
/// The standard sharedmem
#[cfg(all(target_os = "android", feature = "std"))]
pub type StdShMem = RcShMem<
ServedShMem<unix_shmem::ashmem::AshmemShMem>,
ServedShMemProvider<unix_shmem::ashmem::AshmemShMemProvider>,
>;
/// The standard sharedmem provider
#[cfg(all(target_os = "android", feature = "std"))]
pub type StdShMemProvider =
RcShMemProvider<ServedShMemProvider<unix_shmem::ashmem::AshmemShMemProvider>>;
/// The standard sharedmem service
#[cfg(all(target_os = "android", feature = "std"))]
pub type StdShMemService = ShMemService<unix_shmem::ashmem::AshmemShMemProvider>;
/// The standard sharedmem
#[cfg(all(feature = "std", target_vendor = "apple"))]
pub type StdShMem = RcShMem<ServedShMem<MmapShMem>, ServedShMemProvider<MmapShMemProvider>>;
/// The standard sharedmem provider
#[cfg(all(feature = "std", target_vendor = "apple"))]
pub type StdShMemProvider = RcShMemProvider<ServedShMemProvider<MmapShMemProvider>>;
#[cfg(all(feature = "std", target_vendor = "apple"))]
/// The standard sharedmem service
pub type StdShMemService = ShMemService<MmapShMemProvider>;
/// The default [`ShMem`].
#[cfg(all(
feature = "std",
unix,
not(any(target_os = "android", target_vendor = "apple", target_os = "haiku"))
))]
pub type StdShMem = UnixShMem;
/// The default [`ShMemProvider`] for this os.
#[cfg(all(
feature = "std",
@ -392,15 +417,19 @@ impl Deref for NopShMem {
/// Useful if the `ShMemProvider` needs to keep local state.
#[cfg(feature = "alloc")]
#[derive(Debug, Clone, Default)]
pub struct RcShMem<T: ShMemProvider> {
internal: ManuallyDrop<T::ShMem>,
provider: Rc<RefCell<T>>,
pub struct RcShMem<SHM, SP>
where
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
internal: ManuallyDrop<SHM>,
provider: Rc<RefCell<SP>>,
}
#[cfg(feature = "alloc")]
impl<T> ShMem for RcShMem<T>
impl<SP> ShMem for RcShMem<SP::ShMem, SP>
where
T: ShMemProvider + Debug,
SP: ShMemProvider,
{
fn id(&self) -> ShMemId {
self.internal.id()
@ -408,9 +437,10 @@ where
}
#[cfg(feature = "alloc")]
impl<T> Deref for RcShMem<T>
impl<SHM, SP> Deref for RcShMem<SHM, SP>
where
T: ShMemProvider + Debug,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
type Target = [u8];
@ -420,9 +450,10 @@ where
}
#[cfg(feature = "alloc")]
impl<T> DerefMut for RcShMem<T>
impl<SHM, SP> DerefMut for RcShMem<SHM, SP>
where
T: ShMemProvider + Debug,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn deref_mut(&mut self) -> &mut [u8] {
&mut self.internal
@ -430,7 +461,11 @@ where
}
#[cfg(feature = "alloc")]
impl<T: ShMemProvider> Drop for RcShMem<T> {
impl<SHM, SP> Drop for RcShMem<SHM, SP>
where
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
fn drop(&mut self) {
self.provider.borrow_mut().release_shmem(&mut self.internal);
}
@ -441,10 +476,7 @@ impl<T: ShMemProvider> Drop for RcShMem<T> {
/// Useful if the `ShMemProvider` needs to keep local state.
#[derive(Debug, Clone)]
#[cfg(all(unix, feature = "std", not(target_os = "haiku")))]
pub struct RcShMemProvider<SP>
where
SP: ShMemProvider,
{
pub struct RcShMemProvider<SP> {
/// The wrapped [`ShMemProvider`].
internal: Rc<RefCell<SP>>,
/// A pipe the child uses to communicate progress to the parent after fork.
@ -457,15 +489,12 @@ where
parent_child_pipe: Option<Pipe>,
}
//#[cfg(all(unix, feature = "std"))]
//unsafe impl<SP: ShMemProvider> Send for RcShMemProvider<SP> {}
#[cfg(all(unix, feature = "std", not(target_os = "haiku")))]
impl<SP> ShMemProvider for RcShMemProvider<SP>
where
SP: ShMemProvider + Debug,
{
type ShMem = RcShMem<SP>;
type ShMem = RcShMem<SP::ShMem, SP>;
fn new() -> Result<Self, Error> {
Ok(Self {
@ -535,10 +564,7 @@ where
}
#[cfg(all(unix, feature = "std", not(target_os = "haiku")))]
impl<SP> RcShMemProvider<SP>
where
SP: ShMemProvider,
{
impl<SP> RcShMemProvider<SP> {
/// "set" the "latch"
/// (we abuse `pipes` as `semaphores`, as they don't need an additional shared mem region.)
fn pipe_set(pipe: &mut Option<Pipe>) -> Result<(), Error> {
@ -599,7 +625,7 @@ where
#[cfg(all(unix, feature = "std", not(target_os = "haiku")))]
impl<SP> Default for RcShMemProvider<SP>
where
SP: ShMemProvider + Debug,
SP: ShMemProvider,
{
fn default() -> Self {
Self::new().unwrap()
@ -607,10 +633,7 @@ where
}
#[cfg(all(unix, feature = "std", not(target_os = "haiku")))]
impl<SP> RcShMemProvider<ServedShMemProvider<SP>>
where
SP: ShMemProvider + Debug,
{
impl<SP> RcShMemProvider<ServedShMemProvider<SP>> {
/// Forward to `ServedShMemProvider::on_restart`
pub fn on_restart(&mut self) {
self.internal.borrow_mut().on_restart();
@ -1010,16 +1033,15 @@ pub mod unix_shmem {
impl CommonUnixShMem {
/// Create a new shared memory mapping, using shmget/shmat
#[expect(unused_qualifications)]
pub fn new(map_size: usize) -> Result<Self, Error> {
#[cfg(any(target_os = "solaris", target_os = "illumos"))]
const SHM_R: libc::c_int = 0o400;
const SHM_R: c_int = 0o400;
#[cfg(not(any(target_os = "solaris", target_os = "illumos")))]
const SHM_R: libc::c_int = libc::SHM_R;
const SHM_R: c_int = libc::SHM_R;
#[cfg(any(target_os = "solaris", target_os = "illumos"))]
const SHM_W: libc::c_int = 0o200;
const SHM_W: c_int = 0o200;
#[cfg(not(any(target_os = "solaris", target_os = "illumos")))]
const SHM_W: libc::c_int = libc::SHM_W;
const SHM_W: c_int = libc::SHM_W;
unsafe {
let os_id = shmget(
@ -1206,7 +1228,7 @@ pub mod unix_shmem {
//return Err(Error::unknown("Failed to set the ashmem mapping's name".to_string()));
//};
#[expect(trivial_numeric_casts)]
#[allow(trivial_numeric_casts)]
if ioctl(fd, ASHMEM_SET_SIZE as _, map_size) != 0 {
close(fd);
return Err(Error::unknown(
@ -1241,7 +1263,8 @@ pub mod unix_shmem {
pub fn shmem_from_id_and_size(id: ShMemId, map_size: usize) -> Result<Self, Error> {
unsafe {
let fd: i32 = id.to_string().parse().unwrap();
#[expect(trivial_numeric_casts, clippy::cast_sign_loss)]
#[allow(trivial_numeric_casts)]
#[expect(clippy::cast_sign_loss)]
if ioctl(fd, ASHMEM_GET_SIZE as _) as u32 as usize != map_size {
return Err(Error::unknown(
"The mapping's size differs from the requested size".to_string(),
@ -1294,12 +1317,12 @@ pub mod unix_shmem {
/// [`Drop`] implementation for [`AshmemShMem`], which cleans up the mapping.
impl Drop for AshmemShMem {
#[expect(trivial_numeric_casts)]
#[allow(trivial_numeric_casts)]
fn drop(&mut self) {
unsafe {
let fd: i32 = self.id.to_string().parse().unwrap();
#[expect(trivial_numeric_casts)]
#[allow(trivial_numeric_casts)]
#[expect(clippy::cast_sign_loss)]
let length = ioctl(fd, ASHMEM_GET_SIZE as _) as u32;
@ -1729,15 +1752,15 @@ impl DummyShMemService {
/// A cursor around [`ShMem`] that immitates [`std::io::Cursor`]. Notably, this implements [`Write`] for [`ShMem`] in std environments.
#[cfg(feature = "std")]
#[derive(Debug)]
pub struct ShMemCursor<T: ShMem> {
inner: T,
pub struct ShMemCursor<SHM> {
inner: SHM,
pos: usize,
}
#[cfg(all(feature = "std", not(target_os = "haiku")))]
impl<T: ShMem> ShMemCursor<T> {
impl<SHM> ShMemCursor<SHM> {
/// Create a new [`ShMemCursor`] around [`ShMem`]
pub fn new(shmem: T) -> Self {
pub fn new(shmem: SHM) -> Self {
Self {
inner: shmem,
pos: 0,
@ -1745,14 +1768,20 @@ impl<T: ShMem> ShMemCursor<T> {
}
/// Slice from the current location on this map to the end, mutable
fn empty_slice_mut(&mut self) -> &mut [u8] {
fn empty_slice_mut(&mut self) -> &mut [u8]
where
SHM: DerefMut<Target = [u8]>,
{
use crate::AsSliceMut;
&mut (self.inner.as_slice_mut()[self.pos..])
}
}
#[cfg(all(feature = "std", not(target_os = "haiku")))]
impl<T: ShMem> Write for ShMemCursor<T> {
impl<SHM> Write for ShMemCursor<SHM>
where
SHM: DerefMut<Target = [u8]>,
{
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
match self.empty_slice_mut().write(buf) {
Ok(w) => {
@ -1763,10 +1792,6 @@ impl<T: ShMem> Write for ShMemCursor<T> {
}
}
fn flush(&mut self) -> std::io::Result<()> {
Ok(())
}
fn write_vectored(&mut self, bufs: &[std::io::IoSlice<'_>]) -> std::io::Result<usize> {
match self.empty_slice_mut().write_vectored(bufs) {
Ok(w) => {
@ -1777,6 +1802,10 @@ impl<T: ShMem> Write for ShMemCursor<T> {
}
}
fn flush(&mut self) -> std::io::Result<()> {
Ok(())
}
fn write_all(&mut self, buf: &[u8]) -> std::io::Result<()> {
match self.empty_slice_mut().write_all(buf) {
Ok(w) => {
@ -1789,7 +1818,10 @@ impl<T: ShMem> Write for ShMemCursor<T> {
}
#[cfg(feature = "std")]
impl<T: ShMem> std::io::Seek for ShMemCursor<T> {
impl<SHM> std::io::Seek for ShMemCursor<SHM>
where
SHM: DerefMut<Target = [u8]>,
{
fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> {
let effective_new_pos = match pos {
std::io::SeekFrom::Start(s) => s,

View File

@ -65,17 +65,15 @@ impl StateShMemContent {
/// it will instead write to disk, and store the file name into the map.
/// Writing to [`StateRestorer`] multiple times is not allowed.
#[derive(Debug, Clone)]
pub struct StateRestorer<SP>
where
SP: ShMemProvider,
{
shmem: SP::ShMem,
pub struct StateRestorer<SHM, SP> {
shmem: SHM,
phantom: PhantomData<*const SP>,
}
impl<SP> StateRestorer<SP>
impl<SHM, SP> StateRestorer<SHM, SP>
where
SP: ShMemProvider,
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
/// Get the map size backing this [`StateRestorer`].
pub fn mapsize(&self) -> usize {
@ -96,7 +94,7 @@ where
}
/// Create a new [`StateRestorer`].
pub fn new(shmem: SP::ShMem) -> Self {
pub fn new(shmem: SHM) -> Self {
let mut ret = Self {
shmem,
phantom: PhantomData,
@ -268,7 +266,7 @@ where
File::open(tmpfile)?.read_to_end(&mut file_content)?;
if file_content.is_empty() {
return Err(Error::illegal_state(format!(
"Colud not restore state from file {}",
"Could not restore state from file {}",
&filename
)));
}
@ -296,7 +294,7 @@ mod tests {
};
use crate::{
shmem::{ShMemProvider, StdShMemProvider},
shmem::{ShMemProvider, StdShMem, StdShMemProvider},
staterestore::StateRestorer,
};
@ -304,7 +302,7 @@ mod tests {
let mut shmem_provider = StdShMemProvider::new().unwrap();
let shmem = shmem_provider.new_shmem(TESTMAP_SIZE).unwrap();
let mut state_restorer = StateRestorer::<StdShMemProvider>::new(shmem);
let mut state_restorer = StateRestorer::<StdShMem, StdShMemProvider>::new(shmem);
let state = "hello world".to_string();

View File

@ -46,6 +46,7 @@ pub mod cpp_runtime {
#[doc(hidden)]
pub use ctor::ctor;
use libafl::observers::concolic;
pub use libafl_bolts::shmem::StdShMem;
#[doc(hidden)]
pub use libc::atexit;
#[doc(hidden)]

View File

@ -2,23 +2,30 @@
pub use libafl::observers::concolic::serialization_format::StdShMemMessageFileWriter;
use libafl::observers::concolic::SymExpr;
use libafl_bolts::shmem::ShMem;
use crate::{RSymExpr, Runtime};
/// Traces the expressions according to the format described in [`libafl::observers::concolic::serialization_format`].
///
/// The format can be read from elsewhere to perform processing of the expressions outside of the runtime.
pub struct TracingRuntime {
writer: StdShMemMessageFileWriter,
pub struct TracingRuntime<SHM>
where
SHM: ShMem,
{
writer: StdShMemMessageFileWriter<SHM>,
trace_locations: bool,
}
impl TracingRuntime {
impl<SHM> TracingRuntime<SHM>
where
SHM: ShMem,
{
/// Creates the runtime, tracing using the given writer.
/// When `trace_locations` is true, location information for calls, returns and basic blocks will also be part of the trace.
/// Tracing location information can drastically increase trace size. It is therefore recommended to not active this if not needed.
#[must_use]
pub fn new(writer: StdShMemMessageFileWriter, trace_locations: bool) -> Self {
pub fn new(writer: StdShMemMessageFileWriter<SHM>, trace_locations: bool) -> Self {
Self {
writer,
trace_locations,
@ -62,7 +69,10 @@ macro_rules! binary_expression_builder {
};
}
impl Runtime for TracingRuntime {
impl<SHM> Runtime for TracingRuntime<SHM>
where
SHM: ShMem,
{
#[no_mangle]
fn build_integer_from_buffer(
&mut self,
@ -201,7 +211,10 @@ impl Runtime for TracingRuntime {
}
}
impl Drop for TracingRuntime {
impl<SHM> Drop for TracingRuntime<SHM>
where
SHM: ShMem,
{
fn drop(&mut self) {
// manually end the writer to update the length prefix
self.writer

View File

@ -7,9 +7,11 @@ use symcc_runtime::{
export_runtime,
filter::NoFloat,
tracing::{self, StdShMemMessageFileWriter},
Runtime,
Runtime, StdShMem,
};
// use libafl_bolts::StdShmem;
export_runtime!(
NoFloat => NoFloat;
tracing::TracingRuntime::new(
@ -17,5 +19,5 @@ export_runtime!(
.expect("unable to construct tracing runtime writer. (missing env?)"),
false
)
=> tracing::TracingRuntime
=> tracing::TracingRuntime<StdShMem>
);

View File

@ -111,7 +111,7 @@ where
fuzz_with!(options, harness, do_fuzz, |fuzz_single| {
let (state, mgr): (
Option<StdState<_, _, _, _>>,
SimpleRestartingEventManager<_, _, StdState<_, _, _, _>, _>,
SimpleRestartingEventManager<_, _, StdState<_, _, _, _>, _, _>,
) = match SimpleRestartingEventManager::launch(monitor, &mut shmem_provider) {
// The restarting state will spawn the same process again as child, then restarted it each time it crashes.
Ok(res) => res,

View File

@ -69,7 +69,7 @@ pub fn merge(
let (state, mut mgr): (
Option<StdState<_, _, _, _>>,
SimpleRestartingEventManager<_, _, StdState<_, _, _, _>, _>,
SimpleRestartingEventManager<_, _, StdState<_, _, _, _>, _, _>,
) = match SimpleRestartingEventManager::launch(monitor, &mut shmem_provider) {
// The restarting state will spawn the same process again as child, then restarted it each time it crashes.
Ok(res) => res,

View File

@ -43,7 +43,7 @@ use crate::Qemu;
use crate::{command::CommandManager, modules::EmulatorModuleTuple, Emulator, EmulatorDriver};
type EmulatorInProcessExecutor<'a, C, CM, ED, ET, H, I, OT, S, SM> =
StatefulInProcessExecutor<'a, H, I, OT, S, Emulator<C, CM, ED, ET, I, S, SM>>;
StatefulInProcessExecutor<'a, Emulator<C, CM, ED, ET, I, S, SM>, H, I, OT, S>;
pub struct QemuExecutor<'a, C, CM, ED, ET, H, I, OT, S, SM> {
inner: EmulatorInProcessExecutor<'a, C, CM, ED, ET, H, I, OT, S, SM>,
@ -207,7 +207,7 @@ where
}
inner.inprocess_hooks_mut().timeout_handler = inproc_qemu_timeout_handler::<
StatefulInProcessExecutor<'a, H, I, OT, S, Emulator<C, CM, ED, ET, I, S, SM>>,
StatefulInProcessExecutor<'a, Emulator<C, CM, ED, ET, I, S, SM>, H, I, OT, S>,
EM,
ET,
I,
@ -299,9 +299,10 @@ where
}
pub type QemuInProcessForkExecutor<'a, C, CM, ED, EM, ET, H, I, OT, S, SM, SP, Z> =
StatefulInProcessForkExecutor<'a, H, I, OT, S, SP, Emulator<C, CM, ED, ET, I, S, SM>, EM, Z>;
StatefulInProcessForkExecutor<'a, EM, Emulator<C, CM, ED, ET, I, S, SM>, H, I, OT, S, SP, Z>;
#[cfg(feature = "fork")]
#[expect(clippy::type_complexity)]
pub struct QemuForkExecutor<'a, C, CM, ED, EM, ET, H, I, OT, S, SM, SP, Z> {
inner: QemuInProcessForkExecutor<'a, C, CM, ED, EM, ET, H, I, OT, S, SM, SP, Z>,
}
@ -313,12 +314,13 @@ where
C: Debug,
CM: Debug,
ED: Debug,
EM: Debug,
ET: EmulatorModuleTuple<I, S> + Debug,
OT: ObserversTuple<I, S> + Debug,
I: Debug,
S: Debug,
SM: Debug,
SP: ShMemProvider,
SP: Debug,
{
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("QemuForkExecutor")
@ -437,7 +439,6 @@ impl<C, CM, ED, EM, ET, H, I, OT, S, SM, SP, Z> HasObservers
where
ET: EmulatorModuleTuple<I, S>,
OT: ObserversTuple<I, S>,
SP: ShMemProvider,
{
type Observers = OT;
#[inline]

View File

@ -119,7 +119,7 @@ impl ForkserverBytesCoverageSugar<'_> {
let time_ref = time_observer.handle();
let mut run_client = |state: Option<_>,
mut mgr: LlmpRestartingEventManager<_, _, _, _>,
mut mgr: LlmpRestartingEventManager<_, _, _, _, _>,
_core_id| {
let time_observer = time_observer.clone();

View File

@ -147,7 +147,7 @@ where
let time_ref = time_observer.handle();
let mut run_client = |state: Option<_>,
mut mgr: LlmpRestartingEventManager<_, _, _, _>,
mut mgr: LlmpRestartingEventManager<_, _, _, _, _>,
_core_id| {
let time_observer = time_observer.clone();

View File

@ -150,7 +150,7 @@ where
let time_ref = time_observer.handle();
let mut run_client = |state: Option<_>,
mut mgr: LlmpRestartingEventManager<_, _, _, _>,
mut mgr: LlmpRestartingEventManager<_, _, _, _, _>,
_core_id| {
let time_observer = time_observer.clone();

View File

@ -1,4 +1,5 @@
use core::{marker::PhantomData, ptr, time::Duration};
use std::fmt::{Debug, Formatter};
use libafl::{
executors::{Executor, ExitKind, HasObservers},
@ -8,27 +9,24 @@ use libafl::{
};
use libafl_bolts::{
fs::{InputFile, INPUTFILE_STD},
shmem::{NopShMemProvider, ShMem, ShMemProvider},
shmem::{NopShMem, NopShMemProvider, ShMem, ShMemProvider},
tuples::RefIndexable,
AsSlice, AsSliceMut,
};
use tinyinst::tinyinst::{litecov::RunResult, TinyInst};
/// [`TinyInst`](https://github.com/googleprojectzero/TinyInst) executor
pub struct TinyInstExecutor<S, SP, OT>
where
SP: ShMemProvider,
{
pub struct TinyInstExecutor<S, SHM, OT> {
tinyinst: TinyInst,
coverage_ptr: *mut Vec<u64>,
timeout: Duration,
observers: OT,
phantom: PhantomData<S>,
cur_input: InputFile,
map: Option<<SP as ShMemProvider>::ShMem>,
map: Option<SHM>,
}
impl TinyInstExecutor<(), NopShMemProvider, ()> {
impl TinyInstExecutor<(), NopShMem, ()> {
/// Create a builder for [`TinyInstExecutor`]
#[must_use]
pub fn builder<'a>() -> TinyInstExecutorBuilder<'a, NopShMemProvider> {
@ -36,22 +34,19 @@ impl TinyInstExecutor<(), NopShMemProvider, ()> {
}
}
impl<S, SP, OT> std::fmt::Debug for TinyInstExecutor<S, SP, OT>
where
SP: ShMemProvider,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
impl<S, SHM, OT> Debug for TinyInstExecutor<S, SHM, OT> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
f.debug_struct("TinyInstExecutor")
.field("timeout", &self.timeout)
.finish_non_exhaustive()
}
}
impl<EM, I, OT, S, SP, Z> Executor<EM, I, S, Z> for TinyInstExecutor<S, SP, OT>
impl<EM, I, OT, S, SHM, Z> Executor<EM, I, S, Z> for TinyInstExecutor<S, SHM, OT>
where
S: HasExecutions,
I: HasTargetBytes,
SP: ShMemProvider,
SHM: ShMem,
{
#[inline]
fn run_target(
@ -133,10 +128,7 @@ impl<'a> TinyInstExecutorBuilder<'a, NopShMemProvider> {
/// Use this to enable shmem testcase passing.
#[must_use]
pub fn shmem_provider<SP: ShMemProvider>(
self,
shmem_provider: &'a mut SP,
) -> TinyInstExecutorBuilder<'a, SP> {
pub fn shmem_provider<SP>(self, shmem_provider: &'a mut SP) -> TinyInstExecutorBuilder<'a, SP> {
TinyInstExecutorBuilder {
tinyinst_args: self.tinyinst_args,
program_args: self.program_args,
@ -246,7 +238,10 @@ where
}
/// Build [`TinyInst`](https://github.com/googleprojectzero/TinyInst) executor
pub fn build<OT, S>(&mut self, observers: OT) -> Result<TinyInstExecutor<S, SP, OT>, Error> {
pub fn build<OT, S>(
&mut self,
observers: OT,
) -> Result<TinyInstExecutor<S, SP::ShMem, OT>, Error> {
if self.coverage_ptr.is_null() {
return Err(Error::illegal_argument("Coverage pointer may not be null."));
}
@ -313,10 +308,7 @@ where
}
}
impl<S, SP, OT> HasObservers for TinyInstExecutor<S, SP, OT>
where
SP: ShMemProvider,
{
impl<S, SHM, OT> HasObservers for TinyInstExecutor<S, SHM, OT> {
type Observers = OT;
fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> {