Remove AdaptiveSerializer, Send off interesting testcase from crash handler, Add clearer error message (#3053)

* clean

* test thread 1

* add some error messages

* mm

* std

* mm

* aa

* pp

* fix

* change fuzzers

* fix

---------

Co-authored-by: Your Name <you@example.com>
This commit is contained in:
Dongjia "toka" Zhang 2025-03-08 20:12:16 +01:00 committed by GitHub
parent 758fa7f231
commit 620500e295
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
25 changed files with 134 additions and 609 deletions

View File

@ -66,7 +66,7 @@ jobs:
run: cargo test -- --test-threads 1
- name: Run tests (Linux)
if: runner.os != 'Windows'
run: cargo test
run: cargo test -- --test-threads 1
- name: Test libafl no_std
run: cd libafl && cargo test --no-default-features
- name: Test libafl_bolts no_std no_alloc
@ -556,8 +556,6 @@ jobs:
run: cargo build --verbose
- name: Increase map sizes
run: ./scripts/shmem_limits_macos.sh
- name: Run Tests
run: cargo test
- name: Clippy
run: cargo +nightly clippy --tests --all --exclude libafl_nyx --exclude symcc_runtime --exclude runtime_test

View File

@ -119,7 +119,6 @@ impl Fuzzer {
shmem_provider.clone(),
broker_port,
EventConfig::AlwaysUnique,
None,
Some(StateRestorer::new(
shmem_provider.new_shmem(0x1000).unwrap(),
)),

View File

@ -115,7 +115,6 @@ impl Fuzzer {
shmem_provider.clone(),
broker_port,
EventConfig::AlwaysUnique,
None,
Some(StateRestorer::new(
shmem_provider.new_shmem(0x1000).unwrap(),
)),

View File

@ -260,8 +260,7 @@ fn fuzz(
let mut feedback = feedback_or!(
// New maximization map feedback linked to the edges observer and the feedback state
map_feedback,
// Time feedback, this one does not need a feedback state
TimeFeedback::new(&time_observer)
CrashFeedback::new(),
);
// A feedback to choose if an input is a solution or not
@ -391,9 +390,9 @@ fn fuzz(
#[cfg(unix)]
{
let null_fd = file_null.as_raw_fd();
dup2(null_fd, io::stdout().as_raw_fd())?;
// dup2(null_fd, io::stdout().as_raw_fd())?;
if std::env::var("LIBAFL_FUZZBENCH_DEBUG").is_err() {
dup2(null_fd, io::stderr().as_raw_fd())?;
// dup2(null_fd, io::stderr().as_raw_fd())?;
}
}
// reopen file to make sure we're at the end

View File

@ -216,18 +216,6 @@ pub extern "C" fn libafl_main() {
ExitKind::Ok
};
// Create the executor for an in-process function with one observer for edge coverage and one for the execution time
#[cfg(target_os = "linux")]
let mut executor = InProcessExecutor::batched_timeout(
&mut harness,
tuple_list!(edges_observer, time_observer),
&mut fuzzer,
&mut state,
&mut mgr,
opt.timeout,
)?;
#[cfg(not(target_os = "linux"))]
let mut executor = InProcessExecutor::with_timeout(
&mut harness,
tuple_list!(edges_observer, time_observer),

View File

@ -218,18 +218,6 @@ pub extern "C" fn libafl_main() {
ExitKind::Ok
};
// Create the executor for an in-process function with one observer for edge coverage and one for the execution time
#[cfg(target_os = "linux")]
let mut executor = InProcessExecutor::batched_timeout(
&mut harness,
tuple_list!(edges_observer, time_observer),
&mut fuzzer,
&mut state,
&mut restarting_mgr,
opt.timeout,
)?;
#[cfg(not(target_os = "linux"))]
let mut executor = InProcessExecutor::with_timeout(
&mut harness,
tuple_list!(edges_observer, time_observer),

View File

@ -232,18 +232,6 @@ pub extern "C" fn libafl_main() {
ExitKind::Ok
};
// Create the executor for an in-process function with one observer for edge coverage and one for the execution time
#[cfg(target_os = "linux")]
let mut executor = InProcessExecutor::batched_timeout(
&mut harness,
tuple_list!(edges_observer, time_observer),
&mut fuzzer,
&mut state,
&mut mgr,
opt.timeout,
)?;
#[cfg(not(target_os = "linux"))]
let mut executor = InProcessExecutor::with_timeout(
&mut harness,
tuple_list!(edges_observer, time_observer),

View File

@ -7,7 +7,7 @@
// 3. The "main evaluator", the evaluator node that will evaluate all the testcases pass by the centralized event manager to see if the testcases are worth propagating
// 4. The "main broker", the gathers the stats from the fuzzer clients and broadcast the newly found testcases from the main evaluator.
use alloc::{string::String, vec::Vec};
use alloc::string::String;
use core::{fmt::Debug, marker::PhantomData, time::Duration};
use std::process;
@ -15,28 +15,25 @@ use libafl_bolts::{
ClientId,
llmp::{LlmpClient, LlmpClientDescription, Tag},
shmem::{ShMem, ShMemProvider},
tuples::{Handle, MatchNameRef},
};
#[cfg(feature = "llmp_compression")]
use libafl_bolts::{
compress::GzipCompressor,
llmp::{LLMP_FLAG_COMPRESSED, LLMP_FLAG_INITIALIZED},
};
use serde::Serialize;
use super::{AwaitRestartSafe, RecordSerializationTime};
use super::AwaitRestartSafe;
#[cfg(feature = "llmp_compression")]
use crate::events::llmp::COMPRESS_THRESHOLD;
use crate::{
Error,
common::HasMetadata,
events::{
AdaptiveSerializer, CanSerializeObserver, Event, EventConfig, EventFirer, EventManagerId,
EventReceiver, EventRestarter, HasEventManagerId, LogSeverity, ProgressReporter,
SendExiting, serialize_observers_adaptive, std_maybe_report_progress, std_report_progress,
Event, EventConfig, EventFirer, EventManagerId, EventReceiver, EventRestarter,
HasEventManagerId, LogSeverity, ProgressReporter, SendExiting, std_maybe_report_progress,
std_report_progress,
},
inputs::Input,
observers::TimeObserver,
state::{HasExecutions, HasLastReportTime, MaybeHasClientPerfMonitor, Stoppable},
};
@ -50,7 +47,6 @@ pub struct CentralizedEventManager<EM, I, S, SHM, SP> {
client: LlmpClient<SHM, SP>,
#[cfg(feature = "llmp_compression")]
compressor: GzipCompressor,
time_ref: Option<Handle<TimeObserver>>,
is_main: bool,
phantom: PhantomData<(I, S)>,
}
@ -93,7 +89,6 @@ impl CentralizedEventManagerBuilder {
self,
inner: EM,
client: LlmpClient<SP::ShMem, SP>,
time_obs: Option<Handle<TimeObserver>>,
) -> Result<CentralizedEventManager<EM, I, S, SP::ShMem, SP>, Error>
where
SP: ShMemProvider,
@ -103,7 +98,6 @@ impl CentralizedEventManagerBuilder {
client,
#[cfg(feature = "llmp_compression")]
compressor: GzipCompressor::with_threshold(COMPRESS_THRESHOLD),
time_ref: time_obs,
is_main: self.is_main,
phantom: PhantomData,
})
@ -118,14 +112,13 @@ impl CentralizedEventManagerBuilder {
inner: EM,
shmem_provider: SP,
port: u16,
time_obs: Option<Handle<TimeObserver>>,
) -> Result<CentralizedEventManager<EM, I, S, SHM, SP>, Error>
where
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
let client = LlmpClient::create_attach_to_tcp(shmem_provider, port)?;
Self::build_from_client(self, inner, client, time_obs)
Self::build_from_client(self, inner, client)
}
/// If a client respawns, it may reuse the existing connection, previously
@ -135,14 +128,13 @@ impl CentralizedEventManagerBuilder {
inner: EM,
shmem_provider: SP,
env_name: &str,
time_obs: Option<Handle<TimeObserver>>,
) -> Result<CentralizedEventManager<EM, I, S, SHM, SP>, Error>
where
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
let client = LlmpClient::on_existing_from_env(shmem_provider, env_name)?;
Self::build_from_client(self, inner, client, time_obs)
Self::build_from_client(self, inner, client)
}
/// Create an existing client from description
@ -151,59 +143,13 @@ impl CentralizedEventManagerBuilder {
inner: EM,
shmem_provider: SP,
description: &LlmpClientDescription,
time_obs: Option<Handle<TimeObserver>>,
) -> Result<CentralizedEventManager<EM, I, S, SHM, SP>, Error>
where
SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>,
{
let client = LlmpClient::existing_client_from_description(shmem_provider, description)?;
Self::build_from_client(self, inner, client, time_obs)
}
}
impl<EM, I, S, SHM, SP> RecordSerializationTime for CentralizedEventManager<EM, I, S, SHM, SP>
where
EM: RecordSerializationTime,
{
/// Set the deserialization time (mut)
fn set_deserialization_time(&mut self, dur: Duration) {
self.inner.set_deserialization_time(dur);
}
}
impl<EM, I, S, SHM, SP> AdaptiveSerializer for CentralizedEventManager<EM, I, S, SHM, SP>
where
EM: AdaptiveSerializer,
{
fn serialization_time(&self) -> Duration {
self.inner.serialization_time()
}
fn deserialization_time(&self) -> Duration {
self.inner.deserialization_time()
}
fn serializations_cnt(&self) -> usize {
self.inner.serializations_cnt()
}
fn should_serialize_cnt(&self) -> usize {
self.inner.should_serialize_cnt()
}
fn serialization_time_mut(&mut self) -> &mut Duration {
self.inner.serialization_time_mut()
}
fn deserialization_time_mut(&mut self) -> &mut Duration {
self.inner.deserialization_time_mut()
}
fn serializations_cnt_mut(&mut self) -> &mut usize {
self.inner.serializations_cnt_mut()
}
fn should_serialize_cnt_mut(&mut self) -> &mut usize {
self.inner.should_serialize_cnt_mut()
}
fn time_ref(&self) -> &Option<Handle<TimeObserver>> {
&self.time_ref
Self::build_from_client(self, inner, client)
}
}
@ -278,21 +224,6 @@ where
}
}
impl<EM, I, OT, S, SHM, SP> CanSerializeObserver<OT> for CentralizedEventManager<EM, I, S, SHM, SP>
where
EM: AdaptiveSerializer,
OT: MatchNameRef + Serialize,
{
fn serialize_observers(&mut self, observers: &OT) -> Result<Option<Vec<u8>>, Error> {
serialize_observers_adaptive::<EM, OT>(
&mut self.inner,
observers,
4, // twice as much as the normal llmp em's value cuz it does this job twice.
80,
)
}
}
impl<EM, I, S, SHM, SP> SendExiting for CentralizedEventManager<EM, I, S, SHM, SP>
where
EM: SendExiting,

View File

@ -23,7 +23,7 @@ use core::{
use libafl_bolts::{
core_affinity::{CoreId, Cores},
shmem::ShMemProvider,
tuples::{Handle, tuple_list},
tuples::tuple_list,
};
use serde::{Deserialize, Serialize, de::DeserializeOwned};
use typed_builder::TypedBuilder;
@ -58,7 +58,6 @@ use crate::{
llmp::{LlmpRestartingEventManager, LlmpShouldSaveState, ManagerKind, RestartingMgr},
},
monitors::Monitor,
observers::TimeObserver,
};
/// The (internal) `env` that indicates we're running as client.
@ -172,9 +171,6 @@ pub struct Launcher<'a, CF, MT, SP> {
/// clusters.
#[builder(default = None)]
remote_broker_addr: Option<SocketAddr>,
/// The time observer for addaptive serialization
#[builder(default = None)]
time_ref: Option<Handle<TimeObserver>>,
/// If this launcher should spawn a new `broker` on `[Self::broker_port]` (default).
/// The reason you may not want this is, if you already have a [`Launcher`]
/// with a different configuration (for the same target) running on this machine.
@ -325,7 +321,6 @@ where
.configuration(self.configuration)
.serialize_state(self.serialize_state)
.hooks(hooks);
let builder = builder.time_ref(self.time_ref.clone());
let (state, mgr) = builder.build().launch()?;
return (self.run_client.take().unwrap())(
@ -354,8 +349,6 @@ where
.serialize_state(self.serialize_state)
.hooks(hooks);
let builder = builder.time_ref(self.time_ref.clone());
builder.build().launch()?;
// Broker exited. kill all clients.
@ -417,8 +410,6 @@ where
.serialize_state(self.serialize_state)
.hooks(hooks);
let builder = builder.time_ref(self.time_ref.clone());
let (state, mgr) = builder.build().launch()?;
return (self.run_client.take().unwrap())(state, mgr, client_description);
@ -523,8 +514,6 @@ where
.serialize_state(self.serialize_state)
.hooks(hooks);
let builder = builder.time_ref(self.time_ref.clone());
builder.build().launch()?;
//broker exited. kill all clients.
@ -573,8 +562,6 @@ pub struct CentralizedLauncher<'a, CF, MF, MT, SP> {
#[builder(default = 1338_u16)]
centralized_broker_port: u16,
/// The time observer by which to adaptively serialize
#[builder(default = None)]
time_obs: Option<Handle<TimeObserver>>,
/// The list of cores to run on
cores: &'a Cores,
/// The number of clients to spawn on each core
@ -679,8 +666,6 @@ where
.serialize_state(centralized_launcher.serialize_state)
.hooks(tuple_list!());
let builder = builder.time_ref(centralized_launcher.time_obs.clone());
builder.build().launch()
};
@ -803,7 +788,6 @@ where
// tuple_list!(multi_machine_event_manager_hook.take().unwrap()),
self.shmem_provider.clone(),
self.centralized_broker_port,
self.time_obs.clone(),
)?;
self.main_run_client.take().unwrap()(
@ -829,7 +813,6 @@ where
mgr,
self.shmem_provider.clone(),
self.centralized_broker_port,
self.time_obs.clone(),
)?;
self.secondary_run_client.take().unwrap()(

View File

@ -41,7 +41,7 @@ use libafl_bolts::{
os::CTRL_C_EXIT,
shmem::{ShMem, ShMemProvider, StdShMem, StdShMemProvider},
staterestore::StateRestorer,
tuples::{Handle, MatchNameRef, tuple_list},
tuples::tuple_list,
};
use serde::{Serialize, de::DeserializeOwned};
use typed_builder::TypedBuilder;
@ -54,16 +54,14 @@ use crate::{
Error,
common::HasMetadata,
events::{
_LLMP_TAG_EVENT_TO_BROKER, AdaptiveSerializer, AwaitRestartSafe, CanSerializeObserver,
Event, EventConfig, EventFirer, EventManagerHooksTuple, EventManagerId, EventReceiver,
EventRestarter, HasEventManagerId, LLMP_TAG_EVENT_TO_BOTH, LlmpShouldSaveState,
ProgressReporter, RecordSerializationTime, SendExiting, StdLlmpEventHook,
launcher::ClientDescription, serialize_observers_adaptive, std_maybe_report_progress,
_LLMP_TAG_EVENT_TO_BROKER, AwaitRestartSafe, Event, EventConfig, EventFirer,
EventManagerHooksTuple, EventManagerId, EventReceiver, EventRestarter, HasEventManagerId,
LLMP_TAG_EVENT_TO_BOTH, LlmpShouldSaveState, ProgressReporter, SendExiting,
StdLlmpEventHook, launcher::ClientDescription, std_maybe_report_progress,
std_report_progress,
},
inputs::Input,
monitors::Monitor,
observers::TimeObserver,
state::{
HasCurrentStageId, HasCurrentTestcase, HasExecutions, HasImported, HasLastReportTime,
HasSolutions, MaybeHasClientPerfMonitor, Stoppable,
@ -87,11 +85,6 @@ pub struct LlmpRestartingEventManager<EMH, I, S, SHM, SP> {
/// A node will not re-use the observer values sent over LLMP
/// from nodes with other configurations.
configuration: EventConfig,
serialization_time: Duration,
deserialization_time: Duration,
serializations_cnt: usize,
should_serialize_cnt: usize,
pub(crate) time_ref: Option<Handle<TimeObserver>>,
event_buffer: Vec<u8>,
/// The staterestorer to serialize the state for the next runner
/// If this is Some, this event manager can restart. Else it does not.
@ -101,50 +94,6 @@ pub struct LlmpRestartingEventManager<EMH, I, S, SHM, SP> {
phantom: PhantomData<(I, S)>,
}
impl<EMH, I, S, SHM, SP> RecordSerializationTime for LlmpRestartingEventManager<EMH, I, S, SHM, SP>
where
SHM: ShMem,
{
fn set_deserialization_time(&mut self, dur: Duration) {
self.deserialization_time = dur;
}
}
impl<EMH, I, S, SHM, SP> AdaptiveSerializer for LlmpRestartingEventManager<EMH, I, S, SHM, SP>
where
SHM: ShMem,
{
fn serialization_time(&self) -> Duration {
self.serialization_time
}
fn deserialization_time(&self) -> Duration {
self.deserialization_time
}
fn serializations_cnt(&self) -> usize {
self.serializations_cnt
}
fn should_serialize_cnt(&self) -> usize {
self.should_serialize_cnt
}
fn serialization_time_mut(&mut self) -> &mut Duration {
&mut self.serialization_time
}
fn deserialization_time_mut(&mut self) -> &mut Duration {
&mut self.deserialization_time
}
fn serializations_cnt_mut(&mut self) -> &mut usize {
&mut self.serializations_cnt
}
fn should_serialize_cnt_mut(&mut self) -> &mut usize {
&mut self.should_serialize_cnt
}
fn time_ref(&self) -> &Option<Handle<TimeObserver>> {
&self.time_ref
}
}
impl<EMH, I, S, SHM, SP> ProgressReporter<S> for LlmpRestartingEventManager<EMH, I, S, SHM, SP>
where
I: Serialize,
@ -237,18 +186,6 @@ where
}
}
#[cfg(feature = "std")]
impl<EMH, I, OT, S, SHM, SP> CanSerializeObserver<OT>
for LlmpRestartingEventManager<EMH, I, S, SHM, SP>
where
OT: MatchNameRef + Serialize,
SHM: ShMem,
{
fn serialize_observers(&mut self, observers: &OT) -> Result<Option<Vec<u8>>, Error> {
serialize_observers_adaptive::<Self, OT>(self, observers, 2, 80)
}
}
impl<EMH, I, S, SHM, SP> EventRestarter<S> for LlmpRestartingEventManager<EMH, I, S, SHM, SP>
where
S: Serialize + HasCurrentStageId,
@ -477,7 +414,6 @@ impl<EMH> LlmpEventManagerBuilder<EMH> {
self,
llmp: LlmpClient<SHM, SP>,
configuration: EventConfig,
time_ref: Option<Handle<TimeObserver>>,
staterestorer: Option<StateRestorer<SHM, SP>>,
) -> Result<LlmpRestartingEventManager<EMH, I, S, SHM, SP>, Error> {
Ok(LlmpRestartingEventManager {
@ -488,11 +424,6 @@ impl<EMH> LlmpEventManagerBuilder<EMH> {
#[cfg(feature = "llmp_compression")]
compressor: GzipCompressor::with_threshold(COMPRESS_THRESHOLD),
configuration,
serialization_time: Duration::ZERO,
deserialization_time: Duration::ZERO,
serializations_cnt: 0,
should_serialize_cnt: 0,
time_ref,
event_buffer: Vec::with_capacity(INITIAL_EVENT_BUFFER_SIZE),
staterestorer,
save_state: LlmpShouldSaveState::OnRestart,
@ -508,7 +439,6 @@ impl<EMH> LlmpEventManagerBuilder<EMH> {
shmem_provider: SP,
port: u16,
configuration: EventConfig,
time_ref: Option<Handle<TimeObserver>>,
staterestorer: Option<StateRestorer<SHM, SP>>,
) -> Result<LlmpRestartingEventManager<EMH, I, S, SHM, SP>, Error>
where
@ -516,7 +446,7 @@ impl<EMH> LlmpEventManagerBuilder<EMH> {
SP: ShMemProvider<ShMem = SHM>,
{
let llmp = LlmpClient::create_attach_to_tcp(shmem_provider, port)?;
Self::build_from_client(self, llmp, configuration, time_ref, staterestorer)
Self::build_from_client(self, llmp, configuration, staterestorer)
}
/// If a client respawns, it may reuse the existing connection, previously
@ -527,7 +457,6 @@ impl<EMH> LlmpEventManagerBuilder<EMH> {
shmem_provider: SP,
env_name: &str,
configuration: EventConfig,
time_ref: Option<Handle<TimeObserver>>,
staterestorer: Option<StateRestorer<SHM, SP>>,
) -> Result<LlmpRestartingEventManager<EMH, I, S, SHM, SP>, Error>
where
@ -535,7 +464,7 @@ impl<EMH> LlmpEventManagerBuilder<EMH> {
SP: ShMemProvider<ShMem = SHM>,
{
let llmp = LlmpClient::on_existing_from_env(shmem_provider, env_name)?;
Self::build_from_client(self, llmp, configuration, time_ref, staterestorer)
Self::build_from_client(self, llmp, configuration, staterestorer)
}
/// Create an existing client from description
@ -544,7 +473,6 @@ impl<EMH> LlmpEventManagerBuilder<EMH> {
shmem_provider: SP,
description: &LlmpClientDescription,
configuration: EventConfig,
time_ref: Option<Handle<TimeObserver>>,
staterestorer: Option<StateRestorer<SHM, SP>>,
) -> Result<LlmpRestartingEventManager<EMH, I, S, SHM, SP>, Error>
where
@ -552,7 +480,7 @@ impl<EMH> LlmpEventManagerBuilder<EMH> {
SP: ShMemProvider<ShMem = SHM>,
{
let llmp = LlmpClient::existing_client_from_description(shmem_provider, description)?;
Self::build_from_client(self, llmp, configuration, time_ref, staterestorer)
Self::build_from_client(self, llmp, configuration, staterestorer)
}
}
@ -695,7 +623,6 @@ pub fn setup_restarting_mgr_std_adaptive<I, MT, S>(
monitor: MT,
broker_port: u16,
configuration: EventConfig,
time_obs: Handle<TimeObserver>,
) -> Result<
(
Option<S>,
@ -714,7 +641,6 @@ where
.broker_port(broker_port)
.configuration(configuration)
.hooks(tuple_list!())
.time_ref(Some(time_obs))
.build()
.launch()
}
@ -756,8 +682,6 @@ pub struct RestartingMgr<EMH, I, MT, S, SP> {
serialize_state: LlmpShouldSaveState,
/// The hooks passed to event manager:
hooks: EMH,
#[builder(default = None)]
time_ref: Option<Handle<TimeObserver>>,
#[builder(setter(skip), default = PhantomData)]
phantom_data: PhantomData<(EMH, I, S)>,
}
@ -828,12 +752,7 @@ where
let mgr: LlmpRestartingEventManager<EMH, I, S, SP::ShMem, SP> =
LlmpEventManagerBuilder::builder()
.hooks(self.hooks)
.build_from_client(
client,
self.configuration,
self.time_ref.clone(),
None,
)?;
.build_from_client(client, self.configuration, None)?;
(mgr, None)
}
}
@ -860,7 +779,6 @@ where
self.shmem_provider.clone(),
self.broker_port,
self.configuration,
self.time_ref.clone(),
None,
)?;
@ -1008,7 +926,6 @@ where
new_shmem_provider,
&mgr_description,
self.configuration,
self.time_ref.clone(),
Some(staterestorer),
)?,
)
@ -1024,7 +941,6 @@ where
new_shmem_provider,
_ENV_FUZZER_BROKER_CLIENT_INITIAL,
self.configuration,
self.time_ref.clone(),
Some(staterestorer),
)?,
)
@ -1056,7 +972,7 @@ mod tests {
rands::StdRand,
shmem::{ShMemProvider, StdShMem, StdShMemProvider},
staterestore::StateRestorer,
tuples::{Handled, tuple_list},
tuples::tuple_list,
};
use serial_test::serial;
@ -1089,7 +1005,6 @@ mod tests {
let rand = StdRand::with_seed(0);
let time = TimeObserver::new("time");
let time_ref = time.handle();
let mut corpus = InMemoryCorpus::<BytesInput>::new();
let testcase = Testcase::new(vec![0; 4].into());
@ -1118,7 +1033,7 @@ mod tests {
}
let mut llmp_mgr = LlmpEventManagerBuilder::builder()
.build_from_client(llmp_client, "fuzzer".into(), Some(time_ref.clone()), None)
.build_from_client(llmp_client, "fuzzer".into(), None)
.unwrap();
let scheduler = RandScheduler::new();
@ -1170,7 +1085,6 @@ mod tests {
shmem_provider,
&mgr_description,
"fuzzer".into(),
Some(time_ref),
None,
)
.unwrap();

View File

@ -33,13 +33,11 @@ use ahash::RandomState;
pub use broker_hooks::*;
#[cfg(feature = "std")]
pub use launcher::*;
use libafl_bolts::current_time;
#[cfg(all(unix, feature = "std"))]
use libafl_bolts::os::CTRL_C_EXIT;
#[cfg(all(unix, feature = "std"))]
use libafl_bolts::os::unix_signals::{Signal, SignalHandler, siginfo_t, ucontext_t};
#[cfg(feature = "std")]
use libafl_bolts::tuples::MatchNameRef;
use libafl_bolts::{current_time, tuples::Handle};
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use uuid::Uuid;
@ -108,7 +106,7 @@ pub struct EventManagerId(
use crate::events::multi_machine::NodeId;
#[cfg(feature = "introspection")]
use crate::monitors::stats::ClientPerfStats;
use crate::{observers::TimeObserver, state::HasCurrentStageId};
use crate::state::HasCurrentStageId;
/// The log event severity
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
@ -409,58 +407,6 @@ pub trait EventFirer<I, S> {
fn should_send(&self) -> bool;
}
/// Serialize all observers for this type and manager
/// Serialize the observer using the `time_factor` and `percentage_threshold`.
/// These parameters are unique to each of the different types of `EventManager`
#[cfg(feature = "std")]
pub(crate) fn serialize_observers_adaptive<EM, OT>(
manager: &mut EM,
observers: &OT,
time_factor: u32,
percentage_threshold: usize,
) -> Result<Option<Vec<u8>>, Error>
where
EM: AdaptiveSerializer,
OT: MatchNameRef + Serialize,
{
match manager.time_ref() {
Some(t) => {
let exec_time = observers
.get(t)
.map(|o| o.last_runtime().unwrap_or(Duration::ZERO))
.unwrap();
let mut must_ser = (manager.serialization_time() + manager.deserialization_time())
* time_factor
< exec_time;
if must_ser {
*manager.should_serialize_cnt_mut() += 1;
}
if manager.serializations_cnt() > 32 {
must_ser = (manager.should_serialize_cnt() * 100 / manager.serializations_cnt())
> percentage_threshold;
}
if manager.serialization_time() == Duration::ZERO
|| must_ser
|| manager.serializations_cnt().trailing_zeros() >= 8
{
let start = current_time();
let ser = postcard::to_allocvec(observers)?;
*manager.serialization_time_mut() = current_time() - start;
*manager.serializations_cnt_mut() += 1;
Ok(Some(ser))
} else {
*manager.serializations_cnt_mut() += 1;
Ok(None)
}
}
None => Ok(None),
}
}
/// Default implementation of [`ProgressReporter::maybe_report_progress`] for implementors with the
/// given constraints
pub fn std_maybe_report_progress<PR, S>(
@ -571,12 +517,6 @@ where
Ok(())
}
/// The class that implements this must be able to serialize an observer.
pub trait CanSerializeObserver<OT> {
/// Do serialize the observer
fn serialize_observers(&mut self, observers: &OT) -> Result<Option<Vec<u8>>, Error>;
}
/// Send that we're about to exit
pub trait SendExiting {
/// Send information that this client is exiting.
@ -625,8 +565,6 @@ impl NopEventManager {
}
}
impl RecordSerializationTime for NopEventManager {}
impl<I, S> EventFirer<I, S> for NopEventManager {
fn should_send(&self) -> bool {
true
@ -673,15 +611,6 @@ impl<I, S> EventReceiver<I, S> for NopEventManager {
}
}
impl<OT> CanSerializeObserver<OT> for NopEventManager
where
OT: Serialize,
{
fn serialize_observers(&mut self, observers: &OT) -> Result<Option<Vec<u8>>, Error> {
Ok(Some(postcard::to_allocvec(observers)?))
}
}
impl<S> ProgressReporter<S> for NopEventManager {
fn maybe_report_progress(
&mut self,
@ -710,8 +639,6 @@ pub struct MonitorTypedEventManager<EM, M> {
phantom: PhantomData<M>,
}
impl<EM, M> RecordSerializationTime for MonitorTypedEventManager<EM, M> {}
impl<EM, M> MonitorTypedEventManager<EM, M> {
/// Creates a new `EventManager` that wraps another manager, but captures a `monitor` type as well.
#[must_use]
@ -723,15 +650,6 @@ impl<EM, M> MonitorTypedEventManager<EM, M> {
}
}
impl<EM, M, OT> CanSerializeObserver<OT> for MonitorTypedEventManager<EM, M>
where
OT: Serialize,
{
fn serialize_observers(&mut self, observers: &OT) -> Result<Option<Vec<u8>>, Error> {
Ok(Some(postcard::to_allocvec(observers)?))
}
}
impl<EM, I, M, S> EventFirer<I, S> for MonitorTypedEventManager<EM, M>
where
EM: EventFirer<I, S>,
@ -837,36 +755,6 @@ where
}
}
/// Record the deserialization time for this event manager
pub trait RecordSerializationTime {
/// Set the deserialization time (mut)
fn set_deserialization_time(&mut self, _dur: Duration) {}
}
/// Collected stats to decide if observers must be serialized or not
pub trait AdaptiveSerializer {
/// Expose the collected observers serialization time
fn serialization_time(&self) -> Duration;
/// Expose the collected observers deserialization time
fn deserialization_time(&self) -> Duration;
/// How many times observers were serialized
fn serializations_cnt(&self) -> usize;
/// How many times shoukd have been serialized an observer
fn should_serialize_cnt(&self) -> usize;
/// Expose the collected observers serialization time (mut)
fn serialization_time_mut(&mut self) -> &mut Duration;
/// Expose the collected observers deserialization time (mut)
fn deserialization_time_mut(&mut self) -> &mut Duration;
/// How many times observers were serialized (mut)
fn serializations_cnt_mut(&mut self) -> &mut usize;
/// How many times shoukd have been serialized an observer (mut)
fn should_serialize_cnt_mut(&mut self) -> &mut usize;
/// A [`Handle`] to the time observer to determine the `time_factor`
fn time_ref(&self) -> &Option<Handle<TimeObserver>>;
}
#[cfg(test)]
mod tests {

View File

@ -18,19 +18,19 @@ use libafl_bolts::{
shmem::{ShMem, ShMemProvider},
staterestore::StateRestorer,
};
#[cfg(feature = "std")]
use serde::Serialize;
#[cfg(feature = "std")]
use serde::de::DeserializeOwned;
use super::{AwaitRestartSafe, ProgressReporter, RecordSerializationTime, std_on_restart};
use super::{AwaitRestartSafe, ProgressReporter, std_on_restart};
#[cfg(all(unix, feature = "std", not(miri)))]
use crate::events::EVENTMGR_SIGHANDLER_STATE;
use crate::{
Error, HasMetadata,
events::{
BrokerEventResult, CanSerializeObserver, Event, EventFirer, EventManagerId, EventReceiver,
EventRestarter, HasEventManagerId, SendExiting, std_maybe_report_progress,
std_report_progress,
BrokerEventResult, Event, EventFirer, EventManagerId, EventReceiver, EventRestarter,
HasEventManagerId, SendExiting, std_maybe_report_progress, std_report_progress,
},
monitors::{Monitor, stats::ClientStatsManager},
state::{
@ -73,8 +73,6 @@ where
}
}
impl<I, MT, S> RecordSerializationTime for SimpleEventManager<I, MT, S> {}
impl<I, MT, S> EventFirer<I, S> for SimpleEventManager<I, MT, S>
where
I: Debug,
@ -143,15 +141,6 @@ where
}
}
impl<I, MT, OT, S> CanSerializeObserver<OT> for SimpleEventManager<I, MT, S>
where
OT: Serialize,
{
fn serialize_observers(&mut self, observers: &OT) -> Result<Option<Vec<u8>>, Error> {
Ok(Some(postcard::to_allocvec(observers)?))
}
}
impl<I, MT, S> ProgressReporter<S> for SimpleEventManager<I, MT, S>
where
I: Debug,
@ -295,12 +284,6 @@ pub struct SimpleRestartingEventManager<I, MT, S, SHM, SP> {
staterestorer: StateRestorer<SHM, SP>,
}
#[cfg(feature = "std")]
impl<I, MT, S, SHM, SP> RecordSerializationTime
for SimpleRestartingEventManager<I, MT, S, SHM, SP>
{
}
#[cfg(feature = "std")]
impl<I, MT, S, SHM, SP> EventFirer<I, S> for SimpleRestartingEventManager<I, MT, S, SHM, SP>
where
@ -339,17 +322,6 @@ where
}
}
#[cfg(feature = "std")]
impl<I, MT, OT, S, SHM, SP> CanSerializeObserver<OT>
for SimpleRestartingEventManager<I, MT, S, SHM, SP>
where
OT: Serialize,
{
fn serialize_observers(&mut self, observers: &OT) -> Result<Option<Vec<u8>>, Error> {
Ok(Some(postcard::to_allocvec(observers)?))
}
}
#[cfg(feature = "std")]
impl<I, MT, S, SHM, SP> SendExiting for SimpleRestartingEventManager<I, MT, S, SHM, SP>
where

View File

@ -24,7 +24,7 @@ use windows::Win32::System::Threading::{CRITICAL_SECTION, PTP_TIMER};
#[cfg(feature = "std")]
use crate::executors::hooks::timer::TimerStruct;
use crate::{
Error, HasFeedback, HasObjective,
Error, HasFeedback, HasObjective, HasScheduler,
events::{EventFirer, EventRestarter},
executors::{Executor, HasObservers, hooks::ExecutorHook, inprocess::HasInProcessHooks},
feedbacks::Feedback,
@ -240,7 +240,7 @@ impl<I, S> InProcessHooks<I, S> {
F: Feedback<EM, I, E::Observers, S>,
OF: Feedback<EM, I, E::Observers, S>,
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
I: Input + Clone,
{
// # Safety
@ -286,7 +286,7 @@ impl<I, S> InProcessHooks<I, S> {
F: Feedback<EM, I, E::Observers, S>,
OF: Feedback<EM, I, E::Observers, S>,
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
{
let ret;
#[cfg(feature = "std")]
@ -351,7 +351,7 @@ impl<I, S> InProcessHooks<I, S> {
F: Feedback<EM, I, E::Observers, S>,
OF: Feedback<EM, I, E::Observers, S>,
S: HasExecutions + HasSolutions<I>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
{
#[cfg_attr(miri, allow(unused_variables))]
let ret = Self {
@ -489,7 +489,7 @@ impl InProcessExecutorHandlerData {
F: Feedback<EM, I, E::Observers, S>,
OF: Feedback<EM, I, E::Observers, S>,
S: HasExecutions + HasSolutions<I> + HasCorpus<I> + HasCurrentTestcase<I>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
I: Input + Clone,
{
unsafe {

View File

@ -12,7 +12,7 @@ pub mod unix_signal_handler {
use libc::siginfo_t;
use crate::{
HasFeedback,
HasFeedback, HasScheduler,
events::{EventFirer, EventRestarter},
executors::{
Executor, ExitKind, HasObservers, common_signals,
@ -95,7 +95,7 @@ pub mod unix_signal_handler {
F: Feedback<EM, I, E::Observers, S>,
OF: Feedback<EM, I, E::Observers, S>,
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
I: Input + Clone,
{
let old_hook = panic::take_hook();
@ -154,7 +154,7 @@ pub mod unix_signal_handler {
F: Feedback<EM, I, E::Observers, S>,
OF: Feedback<EM, I, E::Observers, S>,
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
I: Input + Clone,
{
unsafe {
@ -213,7 +213,7 @@ pub mod unix_signal_handler {
F: Feedback<EM, I, E::Observers, S>,
OF: Feedback<EM, I, E::Observers, S>,
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
I: Input + Clone,
{
unsafe {

View File

@ -16,7 +16,7 @@ pub mod windows_asan_handler {
inprocess::run_observers_and_save_state,
},
feedbacks::Feedback,
fuzzer::HasObjective,
fuzzer::{HasObjective, HasScheduler},
inputs::Input,
observers::ObserversTuple,
state::{HasCurrentTestcase, HasExecutions, HasSolutions},
@ -33,7 +33,7 @@ pub mod windows_asan_handler {
F: Feedback<EM, I, E::Observers, S>,
OF: Feedback<EM, I, E::Observers, S>,
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
{
unsafe {
let data = &raw mut GLOBAL_STATE;
@ -147,7 +147,7 @@ pub mod windows_exception_handler {
inprocess::{HasInProcessHooks, run_observers_and_save_state},
},
feedbacks::Feedback,
fuzzer::HasObjective,
fuzzer::{HasObjective, HasScheduler},
inputs::Input,
observers::ObserversTuple,
state::{HasCurrentTestcase, HasExecutions, HasSolutions},
@ -209,7 +209,7 @@ pub mod windows_exception_handler {
F: Feedback<EM, I, E::Observers, S>,
OF: Feedback<EM, I, E::Observers, S>,
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
{
let old_hook = panic::take_hook();
panic::set_hook(Box::new(move |panic_info| unsafe {
@ -278,7 +278,7 @@ pub mod windows_exception_handler {
F: Feedback<EM, I, E::Observers, S>,
OF: Feedback<EM, I, E::Observers, S>,
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
{
let data: &mut InProcessExecutorHandlerData =
unsafe { &mut *(global_state as *mut InProcessExecutorHandlerData) };
@ -357,7 +357,7 @@ pub mod windows_exception_handler {
F: Feedback<EM, I, E::Observers, S>,
OF: Feedback<EM, I, E::Observers, S>,
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
{
// Have we set a timer_before?
if data.ptp_timer.is_some() {

View File

@ -11,8 +11,6 @@ use libafl_bolts::tuples::{Merge, RefIndexable, tuple_list};
#[cfg(windows)]
use windows::Win32::System::Threading::SetThreadStackGuarantee;
#[cfg(all(feature = "std", target_os = "linux"))]
use crate::executors::hooks::inprocess::HasTimeout;
#[cfg(all(windows, feature = "std"))]
use crate::executors::hooks::inprocess::HasTimeout;
use crate::{
@ -27,7 +25,7 @@ use crate::{
inprocess::HasInProcessHooks,
},
feedbacks::Feedback,
fuzzer::HasObjective,
fuzzer::{HasObjective, HasScheduler},
inputs::Input,
observers::ObserversTuple,
state::{HasCurrentTestcase, HasExecutions, HasSolutions},
@ -147,7 +145,7 @@ where
F: Feedback<EM, I, E::Observers, S>,
OF: Feedback<EM, I, E::Observers, S>,
S: HasCurrentTestcase<I> + HasSolutions<I>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
{
Self::with_timeout_generic::<E, F, OF>(
user_hooks,
@ -159,33 +157,6 @@ where
)
}
/// Create a new in mem executor with the default timeout and use batch mode(5 sec)
#[cfg(all(feature = "std", target_os = "linux"))]
pub fn batched_timeout_generic<E, F, OF>(
user_hooks: HT,
observers: OT,
fuzzer: &mut Z,
state: &mut S,
event_mgr: &mut EM,
exec_tmout: Duration,
) -> Result<Self, Error>
where
E: Executor<EM, I, S, Z> + HasObservers + HasInProcessHooks<I, S>,
E::Observers: ObserversTuple<I, S>,
EM: EventFirer<I, S> + EventRestarter<S>,
I: Input + Clone,
F: Feedback<EM, I, E::Observers, S>,
OF: Feedback<EM, I, E::Observers, S>,
S: HasCurrentTestcase<I> + HasSolutions<I>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
{
let mut me = Self::with_timeout_generic::<E, F, OF>(
user_hooks, observers, fuzzer, state, event_mgr, exec_tmout,
)?;
me.hooks_mut().0.timer_mut().batch_mode = true;
Ok(me)
}
/// Create a new in mem executor.
/// Caution: crash and restart in one of them will lead to odd behavior if multiple are used,
/// depending on different corpus or state.
@ -209,7 +180,7 @@ where
F: Feedback<EM, I, E::Observers, S>,
OF: Feedback<EM, I, E::Observers, S>,
S: HasCurrentTestcase<I> + HasSolutions<I>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
I: Input + Clone,
{
let default = InProcessHooks::new::<E, EM, F, OF, Z>(timeout)?;

View File

@ -15,7 +15,7 @@ use core::{
use libafl_bolts::tuples::{RefIndexable, tuple_list};
use crate::{
Error, HasFeedback, HasMetadata,
Error, HasFeedback, HasScheduler,
corpus::{Corpus, Testcase},
events::{Event, EventFirer, EventRestarter},
executors::{
@ -27,6 +27,7 @@ use crate::{
fuzzer::HasObjective,
inputs::Input,
observers::ObserversTuple,
schedulers::Scheduler,
state::{HasCorpus, HasCurrentTestcase, HasExecutions, HasSolutions},
};
@ -143,7 +144,7 @@ where
EM: EventFirer<I, S> + EventRestarter<S>,
F: Feedback<EM, I, OT, S>,
OF: Feedback<EM, I, OT, S>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
{
Self::with_timeout_generic::<F, OF>(
tuple_list!(),
@ -156,38 +157,6 @@ where
)
}
/// Create a new in mem executor with the default timeout and use batch mode(5 sec)
#[cfg(all(feature = "std", target_os = "linux"))]
pub fn batched_timeout<F, OF>(
harness_fn: &'a mut H,
observers: OT,
fuzzer: &mut Z,
state: &mut S,
event_mgr: &mut EM,
exec_tmout: Duration,
) -> Result<Self, Error>
where
EM: EventFirer<I, S> + EventRestarter<S>,
F: Feedback<EM, I, OT, S>,
OF: Feedback<EM, I, OT, S>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
{
let inner = GenericInProcessExecutorInner::batched_timeout_generic::<Self, F, OF>(
tuple_list!(),
observers,
fuzzer,
state,
event_mgr,
exec_tmout,
)?;
Ok(Self {
harness_fn,
inner,
phantom: PhantomData,
})
}
/// Create a new in mem executor.
/// Caution: crash and restart in one of them will lead to odd behavior if multiple are used,
/// depending on different corpus or state.
@ -208,7 +177,7 @@ where
EM: EventFirer<I, S> + EventRestarter<S>,
F: Feedback<EM, I, OT, S>,
OF: Feedback<EM, I, OT, S>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
{
let inner = GenericInProcessExecutorInner::with_timeout_generic::<Self, F, OF>(
tuple_list!(),
@ -249,7 +218,7 @@ where
EM: EventFirer<I, S> + EventRestarter<S>,
F: Feedback<EM, I, OT, S>,
OF: Feedback<EM, I, OT, S>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
{
Self::with_timeout_generic::<F, OF>(
user_hooks,
@ -262,34 +231,6 @@ where
)
}
/// Create a new in mem executor with the default timeout and use batch mode(5 sec)
#[cfg(all(feature = "std", target_os = "linux"))]
pub fn batched_timeout_generic<F, OF>(
user_hooks: HT,
harness_fn: HB,
observers: OT,
fuzzer: &mut Z,
state: &mut S,
event_mgr: &mut EM,
exec_tmout: Duration,
) -> Result<Self, Error>
where
EM: EventFirer<I, S> + EventRestarter<S>,
F: Feedback<EM, I, OT, S>,
OF: Feedback<EM, I, OT, S>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
{
let inner = GenericInProcessExecutorInner::batched_timeout_generic::<Self, F, OF>(
user_hooks, observers, fuzzer, state, event_mgr, exec_tmout,
)?;
Ok(Self {
harness_fn,
inner,
phantom: PhantomData,
})
}
/// Create a new [`InProcessExecutor`].
/// Caution: crash and restart in one of them will lead to odd behavior if multiple are used,
/// depending on different corpus or state.
@ -311,7 +252,7 @@ where
EM: EventFirer<I, S> + EventRestarter<S>,
F: Feedback<EM, I, OT, S>,
OF: Feedback<EM, I, OT, S>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
{
let inner = GenericInProcessExecutorInner::with_timeout_generic::<Self, F, OF>(
user_hooks, observers, fuzzer, state, event_mgr, timeout,
@ -390,20 +331,62 @@ pub fn run_observers_and_save_state<E, EM, F, I, OF, S, Z>(
OF: Feedback<EM, I, E::Observers, S>,
F: Feedback<EM, I, E::Observers, S>,
S: HasExecutions + HasSolutions<I> + HasCorpus<I> + HasCurrentTestcase<I>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
I: Input + Clone,
{
log::info!("in crash handler!");
let mut observers = executor.observers_mut();
observers
.post_exec_all(state, input, &exitkind)
.expect("Observers post_exec_all failed");
let _is_corpus = fuzzer
let is_corpus = fuzzer
.feedback_mut()
.is_interesting(state, event_mgr, input, &*observers, &exitkind)
.expect("In run_observers_and_save_state feedback failure");
if is_corpus {
// Add the input to the main corpus
let mut testcase = Testcase::from(input.clone());
#[cfg(feature = "track_hit_feedbacks")]
fuzzer
.feedback_mut()
.append_hit_feedbacks(testcase.hit_feedbacks_mut())
.expect("Failed to append hit feedbacks");
testcase.set_parent_id_optional(*state.corpus().current());
fuzzer
.feedback_mut()
.append_metadata(state, event_mgr, &observers, &mut testcase)
.expect("Failed to append metadata");
let id = state
.corpus_mut()
.add(testcase)
.expect("In run_observers_and_save_state failed to add to corpus.");
fuzzer
.scheduler_mut()
.on_add(state, id)
.expect("In run_observers_and_save_state failed to add to scheduler.");
event_mgr
.fire(
state,
Event::NewTestcase {
input: input.clone(),
observers_buf: None, // idk it's not effective anyway just leave it like this
exit_kind: ExitKind::Ok,
corpus_size: state.corpus().count(),
client_config: event_mgr.configuration(),
time: libafl_bolts::current_time(),
forward_id: None,
#[cfg(all(unix, feature = "std", feature = "multi_machine"))]
node_id: None,
},
)
.expect("Could not send off events in run_observers_and_save_state");
}
let is_solution = fuzzer
.objective_mut()
.is_interesting(state, event_mgr, input, &*observers, &exitkind)
@ -411,7 +394,6 @@ pub fn run_observers_and_save_state<E, EM, F, I, OF, S, Z>(
if is_solution {
let mut new_testcase = Testcase::from(input.clone());
new_testcase.add_metadata(exitkind);
new_testcase.set_parent_id_optional(*state.corpus().current());
if let Ok(mut tc) = state.current_testcase_mut() {
@ -435,7 +417,7 @@ pub fn run_observers_and_save_state<E, EM, F, I, OF, S, Z>(
time: libafl_bolts::current_time(),
},
)
.expect("Could not save state in run_observers_and_save_state");
.expect("Could not send off events in run_observers_and_save_state");
}
// Serialize the state and wait safely for the broker to read pending messages
@ -456,7 +438,7 @@ mod tests {
feedbacks::CrashFeedback,
inputs::NopInput,
schedulers::RandScheduler,
state::{NopState, StdState},
state::StdState,
};
#[test]
@ -467,7 +449,7 @@ mod tests {
let solutions = InMemoryCorpus::new();
let mut objective = CrashFeedback::new();
let mut feedback = tuple_list!();
let sche: RandScheduler<NopState<NopInput>> = RandScheduler::new();
let sche = RandScheduler::new();
let mut mgr = NopEventManager::new();
let mut state =
StdState::new(rand, corpus, solutions, &mut feedback, &mut objective).unwrap();

View File

@ -19,7 +19,7 @@ use crate::{
inprocess::{GenericInProcessExecutorInner, HasInProcessHooks},
},
feedbacks::Feedback,
fuzzer::HasObjective,
fuzzer::{HasObjective, HasScheduler},
inputs::Input,
observers::ObserversTuple,
state::{HasCurrentTestcase, HasExecutions, HasSolutions},
@ -143,7 +143,7 @@ where
EM: EventFirer<I, S> + EventRestarter<S>,
F: Feedback<EM, I, OT, S>,
OF: Feedback<EM, I, OT, S>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
{
Self::with_timeout_generic(
tuple_list!(),
@ -157,40 +157,6 @@ where
)
}
/// Create a new in mem executor with the default timeout and use batch mode(5 sec)
#[cfg(all(feature = "std", target_os = "linux"))]
pub fn batched_timeout<F, OF>(
harness_fn: &'a mut H,
exposed_executor_state: ES,
observers: OT,
fuzzer: &mut Z,
state: &mut S,
event_mgr: &mut EM,
exec_tmout: Duration,
) -> Result<Self, Error>
where
EM: EventFirer<I, S> + EventRestarter<S>,
F: Feedback<EM, I, OT, S>,
OF: Feedback<EM, I, OT, S>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
{
let inner = GenericInProcessExecutorInner::batched_timeout_generic::<Self, F, OF>(
tuple_list!(),
observers,
fuzzer,
state,
event_mgr,
exec_tmout,
)?;
Ok(Self {
harness_fn,
exposed_executor_state,
inner,
phantom: PhantomData,
})
}
/// Create a new in mem executor.
/// Caution: crash and restart in one of them will lead to odd behavior if multiple are used,
/// depending on different corpus or state.
@ -212,7 +178,7 @@ where
EM: EventFirer<I, S> + EventRestarter<S>,
F: Feedback<EM, I, OT, S>,
OF: Feedback<EM, I, OT, S>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
{
let inner = GenericInProcessExecutorInner::with_timeout_generic::<Self, F, OF>(
tuple_list!(),
@ -270,7 +236,7 @@ where
EM: EventFirer<I, S> + EventRestarter<S>,
F: Feedback<EM, I, OT, S>,
OF: Feedback<EM, I, OT, S>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
{
Self::with_timeout_generic(
user_hooks,
@ -284,37 +250,6 @@ where
)
}
/// Create a new in mem executor with the default timeout and use batch mode(5 sec)
#[cfg(all(feature = "std", target_os = "linux"))]
#[expect(clippy::too_many_arguments)]
pub fn batched_timeout_generic<F, OF>(
user_hooks: HT,
harness_fn: HB,
exposed_executor_state: ES,
observers: OT,
fuzzer: &mut Z,
state: &mut S,
event_mgr: &mut EM,
exec_tmout: Duration,
) -> Result<Self, Error>
where
EM: EventFirer<I, S> + EventRestarter<S>,
F: Feedback<EM, I, OT, S>,
OF: Feedback<EM, I, OT, S>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
{
let inner = GenericInProcessExecutorInner::batched_timeout_generic::<Self, F, OF>(
user_hooks, observers, fuzzer, state, event_mgr, exec_tmout,
)?;
Ok(Self {
harness_fn,
exposed_executor_state,
inner,
phantom: PhantomData,
})
}
/// Create a new in mem executor.
/// Caution: crash and restart in one of them will lead to odd behavior if multiple are used,
/// depending on different corpus or state.
@ -338,7 +273,7 @@ where
EM: EventFirer<I, S> + EventRestarter<S>,
F: Feedback<EM, I, OT, S>,
OF: Feedback<EM, I, OT, S>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
{
let inner = GenericInProcessExecutorInner::with_timeout_generic::<Self, F, OF>(
user_hooks, observers, fuzzer, state, event_mgr, timeout,

View File

@ -443,7 +443,7 @@ where
let meta = MapNoveltiesMetadata::new(novelties);
testcase.add_metadata(meta);
}
let observer = observers.get(&self.map_ref).unwrap().as_ref();
let observer = observers.get(&self.map_ref).expect("MapObserver not found. This is likely because you entered the crash handler with the wrong executor/observer").as_ref();
let initial = observer.initial();
let map_state = state
.named_metadata_map_mut()
@ -472,7 +472,11 @@ where
indices.push(i);
}
let meta = MapIndexesMetadata::new(indices);
testcase.try_add_metadata(meta)?;
if testcase.try_add_metadata(meta).is_err() {
return Err(Error::key_exists(
"MapIndexesMetadata is already attached to this testcase. You should not have more than one observer with tracking.",
));
}
} else {
for (i, value) in observer
.as_iter()

View File

@ -15,10 +15,7 @@ use crate::monitors::stats::PerfFeature;
use crate::{
Error, HasMetadata,
corpus::{Corpus, CorpusId, HasCurrentCorpusId, HasTestcase, Testcase},
events::{
CanSerializeObserver, Event, EventConfig, EventFirer, EventReceiver, ProgressReporter,
RecordSerializationTime, SendExiting,
},
events::{Event, EventConfig, EventFirer, EventReceiver, ProgressReporter, SendExiting},
executors::{Executor, ExitKind, HasObservers},
feedbacks::Feedback,
inputs::Input,
@ -352,7 +349,7 @@ impl<CS, F, IF, OF> HasObjective for StdFuzzer<CS, F, IF, OF> {
impl<CS, EM, F, I, IF, OF, OT, S> ExecutionProcessor<EM, I, OT, S> for StdFuzzer<CS, F, IF, OF>
where
CS: Scheduler<I, S>,
EM: EventFirer<I, S> + CanSerializeObserver<OT>,
EM: EventFirer<I, S>,
F: Feedback<EM, I, OT, S>,
I: Input,
OF: Feedback<EM, I, OT, S>,
@ -455,12 +452,12 @@ where
exit_kind: &ExitKind,
) -> Result<(), Error> {
// Now send off the event
let observers_buf = if exec_res.is_solution()
let observers_buf = if exec_res.is_corpus()
&& manager.should_send()
&& manager.configuration() != EventConfig::AlwaysUnique
{
// TODO set None for fast targets
manager.serialize_observers(observers)?
Some(postcard::to_allocvec(observers)?)
} else {
None
};
@ -537,7 +534,7 @@ where
CS: Scheduler<I, S>,
E: HasObservers + Executor<EM, I, S, Self>,
E::Observers: MatchName + ObserversTuple<I, S> + Serialize,
EM: EventFirer<I, S> + CanSerializeObserver<E::Observers>,
EM: EventFirer<I, S>,
F: Feedback<EM, I, E::Observers, S>,
OF: Feedback<EM, I, E::Observers, S>,
S: HasCorpus<I>
@ -610,7 +607,7 @@ where
CS: Scheduler<I, S>,
E: HasObservers + Executor<EM, I, S, Self>,
E::Observers: MatchName + ObserversTuple<I, S> + Serialize,
EM: EventFirer<I, S> + CanSerializeObserver<E::Observers>,
EM: EventFirer<I, S>,
F: Feedback<EM, I, E::Observers, S>,
OF: Feedback<EM, I, E::Observers, S>,
S: HasCorpus<I>
@ -725,7 +722,7 @@ where
let observers_buf = if manager.configuration() == EventConfig::AlwaysUnique {
None
} else {
manager.serialize_observers(&*observers)?
Some(postcard::to_allocvec(&*observers)?)
};
manager.fire(
state,
@ -758,10 +755,7 @@ where
CS: Scheduler<I, S>,
E: HasObservers + Executor<EM, I, S, Self>,
E::Observers: DeserializeOwned + Serialize + ObserversTuple<I, S>,
EM: EventReceiver<I, S>
+ RecordSerializationTime
+ CanSerializeObserver<E::Observers>
+ EventFirer<I, S>,
EM: EventReceiver<I, S> + EventFirer<I, S>,
F: Feedback<EM, I, E::Observers, S>,
I: Input,
OF: Feedback<EM, I, E::Observers, S>,
@ -791,13 +785,8 @@ where
exit_kind,
..
} => {
let start = current_time();
let observers: E::Observers =
postcard::from_bytes(observers_buf.as_ref().unwrap())?;
{
let dur = current_time() - start;
manager.set_deserialization_time(dur);
}
let res = self.evaluate_execution(
state, manager, input, &observers, &exit_kind, false,
)?;
@ -848,7 +837,7 @@ where
CS: Scheduler<I, S>,
E: HasObservers + Executor<EM, I, S, Self>,
E::Observers: DeserializeOwned + Serialize + ObserversTuple<I, S>,
EM: CanSerializeObserver<E::Observers> + EventFirer<I, S> + RecordSerializationTime,
EM: EventFirer<I, S>,
I: Input,
F: Feedback<EM, I, E::Observers, S>,
OF: Feedback<EM, I, E::Observers, S>,

View File

@ -73,7 +73,7 @@ pub unsafe fn inproc_qemu_crash_handler<E, EM, ET, F, I, OF, S, Z>(
F: Feedback<EM, I, E::Observers, S>,
OF: Feedback<EM, I, E::Observers, S>,
S: HasExecutions + HasSolutions<I> + HasCorpus<I> + HasCurrentTestcase<I> + Unpin,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
I: Input + Clone + Unpin,
{
log::debug!("QEMU signal handler has been triggered (signal {signal})");
@ -179,7 +179,7 @@ pub unsafe fn inproc_qemu_timeout_handler<E, EM, ET, F, I, OF, S, Z>(
OF: Feedback<EM, I, E::Observers, S>,
S: HasExecutions + HasSolutions<I> + Unpin + HasCurrentTestcase<I>,
I: Input,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
{
#[cfg(feature = "systemmode")]
unsafe {

View File

@ -29,7 +29,7 @@ use libafl_bolts::{
nonzero,
rands::StdRand,
shmem::{ShMem, ShMemProvider, UnixShMemProvider},
tuples::{Handled, Merge, tuple_list},
tuples::{Merge, tuple_list},
};
use typed_builder::TypedBuilder;
@ -117,7 +117,6 @@ impl ForkserverBytesCoverageSugar<'_> {
// Create an observation channel to keep track of the execution time
let time_observer = TimeObserver::new("time");
let time_ref = time_observer.handle();
let mut run_client = |state: Option<_>,
mut mgr: LlmpRestartingEventManager<_, _, _, _, _>,
@ -300,8 +299,8 @@ impl ForkserverBytesCoverageSugar<'_> {
.run_client(&mut run_client)
.cores(self.cores)
.broker_port(self.broker_port)
.remote_broker_addr(self.remote_broker_addr)
.time_ref(Some(time_ref));
.remote_broker_addr(self.remote_broker_addr);
#[cfg(unix)]
let launcher = launcher.stdout_file(Some("/dev/null"));
match launcher.build().launch() {

View File

@ -36,7 +36,7 @@ use libafl_bolts::{
ownedref::OwnedMutSlice,
rands::StdRand,
shmem::{ShMemProvider, StdShMemProvider},
tuples::{Handled, Merge, tuple_list},
tuples::{Merge, tuple_list},
};
use libafl_targets::{CmpLogObserver, edges_map_mut_ptr};
use typed_builder::TypedBuilder;
@ -148,7 +148,6 @@ where
// Create an observation channel to keep track of the execution time
let time_observer = TimeObserver::new("time");
let time_ref = time_observer.handle();
let mut run_client = |state: Option<_>,
mut mgr: LlmpRestartingEventManager<_, _, _, _, _>,
@ -355,8 +354,8 @@ where
.run_client(&mut run_client)
.cores(self.cores)
.broker_port(self.broker_port)
.remote_broker_addr(self.remote_broker_addr)
.time_ref(Some(time_ref));
.remote_broker_addr(self.remote_broker_addr);
#[cfg(unix)]
let launcher = launcher.stdout_file(Some("/dev/null"));
match launcher.build().launch() {

View File

@ -35,7 +35,7 @@ use libafl_bolts::{
ownedref::OwnedMutSlice,
rands::StdRand,
shmem::{ShMemProvider, StdShMemProvider},
tuples::{Handled, Merge, tuple_list},
tuples::{Merge, tuple_list},
};
#[cfg(not(any(feature = "mips", feature = "hexagon")))]
use libafl_qemu::modules::CmpLogModule;
@ -150,7 +150,6 @@ where
// Create an observation channel to keep track of the execution time
let time_observer = TimeObserver::new("time");
let time_ref = time_observer.handle();
let mut run_client = |state: Option<_>,
mut mgr: LlmpRestartingEventManager<_, _, _, _, _>,
@ -476,8 +475,8 @@ where
.run_client(&mut run_client)
.cores(self.cores)
.broker_port(self.broker_port)
.remote_broker_addr(self.remote_broker_addr)
.time_ref(Some(time_ref));
.remote_broker_addr(self.remote_broker_addr);
#[cfg(unix)]
let launcher = launcher.stdout_file(Some("/dev/null"));

View File

@ -1,7 +1,7 @@
//! Setup asan death callbback
use libafl::{
HasFeedback, HasObjective,
HasFeedback, HasObjective, HasScheduler,
events::{EventFirer, EventRestarter},
executors::{Executor, HasObservers, hooks::windows::windows_asan_handler::asan_death_handler},
feedbacks::Feedback,
@ -40,7 +40,7 @@ pub unsafe fn setup_asan_callback<E, EM, F, I, OF, S, Z>(
F: Feedback<EM, I, E::Observers, S>,
OF: Feedback<EM, I, E::Observers, S>,
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
I: Input + Clone,
{
unsafe {