Remove AdaptiveSerializer, Send off interesting testcase from crash handler, Add clearer error message (#3053)
* clean * test thread 1 * add some error messages * mm * std * mm * aa * pp * fix * change fuzzers * fix --------- Co-authored-by: Your Name <you@example.com>
This commit is contained in:
parent
758fa7f231
commit
620500e295
4
.github/workflows/build_and_test.yml
vendored
4
.github/workflows/build_and_test.yml
vendored
@ -66,7 +66,7 @@ jobs:
|
|||||||
run: cargo test -- --test-threads 1
|
run: cargo test -- --test-threads 1
|
||||||
- name: Run tests (Linux)
|
- name: Run tests (Linux)
|
||||||
if: runner.os != 'Windows'
|
if: runner.os != 'Windows'
|
||||||
run: cargo test
|
run: cargo test -- --test-threads 1
|
||||||
- name: Test libafl no_std
|
- name: Test libafl no_std
|
||||||
run: cd libafl && cargo test --no-default-features
|
run: cd libafl && cargo test --no-default-features
|
||||||
- name: Test libafl_bolts no_std no_alloc
|
- name: Test libafl_bolts no_std no_alloc
|
||||||
@ -556,8 +556,6 @@ jobs:
|
|||||||
run: cargo build --verbose
|
run: cargo build --verbose
|
||||||
- name: Increase map sizes
|
- name: Increase map sizes
|
||||||
run: ./scripts/shmem_limits_macos.sh
|
run: ./scripts/shmem_limits_macos.sh
|
||||||
- name: Run Tests
|
|
||||||
run: cargo test
|
|
||||||
- name: Clippy
|
- name: Clippy
|
||||||
run: cargo +nightly clippy --tests --all --exclude libafl_nyx --exclude symcc_runtime --exclude runtime_test
|
run: cargo +nightly clippy --tests --all --exclude libafl_nyx --exclude symcc_runtime --exclude runtime_test
|
||||||
|
|
||||||
|
@ -119,7 +119,6 @@ impl Fuzzer {
|
|||||||
shmem_provider.clone(),
|
shmem_provider.clone(),
|
||||||
broker_port,
|
broker_port,
|
||||||
EventConfig::AlwaysUnique,
|
EventConfig::AlwaysUnique,
|
||||||
None,
|
|
||||||
Some(StateRestorer::new(
|
Some(StateRestorer::new(
|
||||||
shmem_provider.new_shmem(0x1000).unwrap(),
|
shmem_provider.new_shmem(0x1000).unwrap(),
|
||||||
)),
|
)),
|
||||||
|
@ -115,7 +115,6 @@ impl Fuzzer {
|
|||||||
shmem_provider.clone(),
|
shmem_provider.clone(),
|
||||||
broker_port,
|
broker_port,
|
||||||
EventConfig::AlwaysUnique,
|
EventConfig::AlwaysUnique,
|
||||||
None,
|
|
||||||
Some(StateRestorer::new(
|
Some(StateRestorer::new(
|
||||||
shmem_provider.new_shmem(0x1000).unwrap(),
|
shmem_provider.new_shmem(0x1000).unwrap(),
|
||||||
)),
|
)),
|
||||||
|
@ -260,8 +260,7 @@ fn fuzz(
|
|||||||
let mut feedback = feedback_or!(
|
let mut feedback = feedback_or!(
|
||||||
// New maximization map feedback linked to the edges observer and the feedback state
|
// New maximization map feedback linked to the edges observer and the feedback state
|
||||||
map_feedback,
|
map_feedback,
|
||||||
// Time feedback, this one does not need a feedback state
|
CrashFeedback::new(),
|
||||||
TimeFeedback::new(&time_observer)
|
|
||||||
);
|
);
|
||||||
|
|
||||||
// A feedback to choose if an input is a solution or not
|
// A feedback to choose if an input is a solution or not
|
||||||
@ -391,9 +390,9 @@ fn fuzz(
|
|||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
{
|
{
|
||||||
let null_fd = file_null.as_raw_fd();
|
let null_fd = file_null.as_raw_fd();
|
||||||
dup2(null_fd, io::stdout().as_raw_fd())?;
|
// dup2(null_fd, io::stdout().as_raw_fd())?;
|
||||||
if std::env::var("LIBAFL_FUZZBENCH_DEBUG").is_err() {
|
if std::env::var("LIBAFL_FUZZBENCH_DEBUG").is_err() {
|
||||||
dup2(null_fd, io::stderr().as_raw_fd())?;
|
// dup2(null_fd, io::stderr().as_raw_fd())?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// reopen file to make sure we're at the end
|
// reopen file to make sure we're at the end
|
||||||
|
@ -216,18 +216,6 @@ pub extern "C" fn libafl_main() {
|
|||||||
ExitKind::Ok
|
ExitKind::Ok
|
||||||
};
|
};
|
||||||
|
|
||||||
// Create the executor for an in-process function with one observer for edge coverage and one for the execution time
|
|
||||||
#[cfg(target_os = "linux")]
|
|
||||||
let mut executor = InProcessExecutor::batched_timeout(
|
|
||||||
&mut harness,
|
|
||||||
tuple_list!(edges_observer, time_observer),
|
|
||||||
&mut fuzzer,
|
|
||||||
&mut state,
|
|
||||||
&mut mgr,
|
|
||||||
opt.timeout,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
#[cfg(not(target_os = "linux"))]
|
|
||||||
let mut executor = InProcessExecutor::with_timeout(
|
let mut executor = InProcessExecutor::with_timeout(
|
||||||
&mut harness,
|
&mut harness,
|
||||||
tuple_list!(edges_observer, time_observer),
|
tuple_list!(edges_observer, time_observer),
|
||||||
|
@ -218,18 +218,6 @@ pub extern "C" fn libafl_main() {
|
|||||||
ExitKind::Ok
|
ExitKind::Ok
|
||||||
};
|
};
|
||||||
|
|
||||||
// Create the executor for an in-process function with one observer for edge coverage and one for the execution time
|
|
||||||
#[cfg(target_os = "linux")]
|
|
||||||
let mut executor = InProcessExecutor::batched_timeout(
|
|
||||||
&mut harness,
|
|
||||||
tuple_list!(edges_observer, time_observer),
|
|
||||||
&mut fuzzer,
|
|
||||||
&mut state,
|
|
||||||
&mut restarting_mgr,
|
|
||||||
opt.timeout,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
#[cfg(not(target_os = "linux"))]
|
|
||||||
let mut executor = InProcessExecutor::with_timeout(
|
let mut executor = InProcessExecutor::with_timeout(
|
||||||
&mut harness,
|
&mut harness,
|
||||||
tuple_list!(edges_observer, time_observer),
|
tuple_list!(edges_observer, time_observer),
|
||||||
|
@ -232,18 +232,6 @@ pub extern "C" fn libafl_main() {
|
|||||||
ExitKind::Ok
|
ExitKind::Ok
|
||||||
};
|
};
|
||||||
|
|
||||||
// Create the executor for an in-process function with one observer for edge coverage and one for the execution time
|
|
||||||
#[cfg(target_os = "linux")]
|
|
||||||
let mut executor = InProcessExecutor::batched_timeout(
|
|
||||||
&mut harness,
|
|
||||||
tuple_list!(edges_observer, time_observer),
|
|
||||||
&mut fuzzer,
|
|
||||||
&mut state,
|
|
||||||
&mut mgr,
|
|
||||||
opt.timeout,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
#[cfg(not(target_os = "linux"))]
|
|
||||||
let mut executor = InProcessExecutor::with_timeout(
|
let mut executor = InProcessExecutor::with_timeout(
|
||||||
&mut harness,
|
&mut harness,
|
||||||
tuple_list!(edges_observer, time_observer),
|
tuple_list!(edges_observer, time_observer),
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
// 3. The "main evaluator", the evaluator node that will evaluate all the testcases pass by the centralized event manager to see if the testcases are worth propagating
|
// 3. The "main evaluator", the evaluator node that will evaluate all the testcases pass by the centralized event manager to see if the testcases are worth propagating
|
||||||
// 4. The "main broker", the gathers the stats from the fuzzer clients and broadcast the newly found testcases from the main evaluator.
|
// 4. The "main broker", the gathers the stats from the fuzzer clients and broadcast the newly found testcases from the main evaluator.
|
||||||
|
|
||||||
use alloc::{string::String, vec::Vec};
|
use alloc::string::String;
|
||||||
use core::{fmt::Debug, marker::PhantomData, time::Duration};
|
use core::{fmt::Debug, marker::PhantomData, time::Duration};
|
||||||
use std::process;
|
use std::process;
|
||||||
|
|
||||||
@ -15,28 +15,25 @@ use libafl_bolts::{
|
|||||||
ClientId,
|
ClientId,
|
||||||
llmp::{LlmpClient, LlmpClientDescription, Tag},
|
llmp::{LlmpClient, LlmpClientDescription, Tag},
|
||||||
shmem::{ShMem, ShMemProvider},
|
shmem::{ShMem, ShMemProvider},
|
||||||
tuples::{Handle, MatchNameRef},
|
|
||||||
};
|
};
|
||||||
#[cfg(feature = "llmp_compression")]
|
#[cfg(feature = "llmp_compression")]
|
||||||
use libafl_bolts::{
|
use libafl_bolts::{
|
||||||
compress::GzipCompressor,
|
compress::GzipCompressor,
|
||||||
llmp::{LLMP_FLAG_COMPRESSED, LLMP_FLAG_INITIALIZED},
|
llmp::{LLMP_FLAG_COMPRESSED, LLMP_FLAG_INITIALIZED},
|
||||||
};
|
};
|
||||||
use serde::Serialize;
|
|
||||||
|
|
||||||
use super::{AwaitRestartSafe, RecordSerializationTime};
|
use super::AwaitRestartSafe;
|
||||||
#[cfg(feature = "llmp_compression")]
|
#[cfg(feature = "llmp_compression")]
|
||||||
use crate::events::llmp::COMPRESS_THRESHOLD;
|
use crate::events::llmp::COMPRESS_THRESHOLD;
|
||||||
use crate::{
|
use crate::{
|
||||||
Error,
|
Error,
|
||||||
common::HasMetadata,
|
common::HasMetadata,
|
||||||
events::{
|
events::{
|
||||||
AdaptiveSerializer, CanSerializeObserver, Event, EventConfig, EventFirer, EventManagerId,
|
Event, EventConfig, EventFirer, EventManagerId, EventReceiver, EventRestarter,
|
||||||
EventReceiver, EventRestarter, HasEventManagerId, LogSeverity, ProgressReporter,
|
HasEventManagerId, LogSeverity, ProgressReporter, SendExiting, std_maybe_report_progress,
|
||||||
SendExiting, serialize_observers_adaptive, std_maybe_report_progress, std_report_progress,
|
std_report_progress,
|
||||||
},
|
},
|
||||||
inputs::Input,
|
inputs::Input,
|
||||||
observers::TimeObserver,
|
|
||||||
state::{HasExecutions, HasLastReportTime, MaybeHasClientPerfMonitor, Stoppable},
|
state::{HasExecutions, HasLastReportTime, MaybeHasClientPerfMonitor, Stoppable},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -50,7 +47,6 @@ pub struct CentralizedEventManager<EM, I, S, SHM, SP> {
|
|||||||
client: LlmpClient<SHM, SP>,
|
client: LlmpClient<SHM, SP>,
|
||||||
#[cfg(feature = "llmp_compression")]
|
#[cfg(feature = "llmp_compression")]
|
||||||
compressor: GzipCompressor,
|
compressor: GzipCompressor,
|
||||||
time_ref: Option<Handle<TimeObserver>>,
|
|
||||||
is_main: bool,
|
is_main: bool,
|
||||||
phantom: PhantomData<(I, S)>,
|
phantom: PhantomData<(I, S)>,
|
||||||
}
|
}
|
||||||
@ -93,7 +89,6 @@ impl CentralizedEventManagerBuilder {
|
|||||||
self,
|
self,
|
||||||
inner: EM,
|
inner: EM,
|
||||||
client: LlmpClient<SP::ShMem, SP>,
|
client: LlmpClient<SP::ShMem, SP>,
|
||||||
time_obs: Option<Handle<TimeObserver>>,
|
|
||||||
) -> Result<CentralizedEventManager<EM, I, S, SP::ShMem, SP>, Error>
|
) -> Result<CentralizedEventManager<EM, I, S, SP::ShMem, SP>, Error>
|
||||||
where
|
where
|
||||||
SP: ShMemProvider,
|
SP: ShMemProvider,
|
||||||
@ -103,7 +98,6 @@ impl CentralizedEventManagerBuilder {
|
|||||||
client,
|
client,
|
||||||
#[cfg(feature = "llmp_compression")]
|
#[cfg(feature = "llmp_compression")]
|
||||||
compressor: GzipCompressor::with_threshold(COMPRESS_THRESHOLD),
|
compressor: GzipCompressor::with_threshold(COMPRESS_THRESHOLD),
|
||||||
time_ref: time_obs,
|
|
||||||
is_main: self.is_main,
|
is_main: self.is_main,
|
||||||
phantom: PhantomData,
|
phantom: PhantomData,
|
||||||
})
|
})
|
||||||
@ -118,14 +112,13 @@ impl CentralizedEventManagerBuilder {
|
|||||||
inner: EM,
|
inner: EM,
|
||||||
shmem_provider: SP,
|
shmem_provider: SP,
|
||||||
port: u16,
|
port: u16,
|
||||||
time_obs: Option<Handle<TimeObserver>>,
|
|
||||||
) -> Result<CentralizedEventManager<EM, I, S, SHM, SP>, Error>
|
) -> Result<CentralizedEventManager<EM, I, S, SHM, SP>, Error>
|
||||||
where
|
where
|
||||||
SHM: ShMem,
|
SHM: ShMem,
|
||||||
SP: ShMemProvider<ShMem = SHM>,
|
SP: ShMemProvider<ShMem = SHM>,
|
||||||
{
|
{
|
||||||
let client = LlmpClient::create_attach_to_tcp(shmem_provider, port)?;
|
let client = LlmpClient::create_attach_to_tcp(shmem_provider, port)?;
|
||||||
Self::build_from_client(self, inner, client, time_obs)
|
Self::build_from_client(self, inner, client)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// If a client respawns, it may reuse the existing connection, previously
|
/// If a client respawns, it may reuse the existing connection, previously
|
||||||
@ -135,14 +128,13 @@ impl CentralizedEventManagerBuilder {
|
|||||||
inner: EM,
|
inner: EM,
|
||||||
shmem_provider: SP,
|
shmem_provider: SP,
|
||||||
env_name: &str,
|
env_name: &str,
|
||||||
time_obs: Option<Handle<TimeObserver>>,
|
|
||||||
) -> Result<CentralizedEventManager<EM, I, S, SHM, SP>, Error>
|
) -> Result<CentralizedEventManager<EM, I, S, SHM, SP>, Error>
|
||||||
where
|
where
|
||||||
SHM: ShMem,
|
SHM: ShMem,
|
||||||
SP: ShMemProvider<ShMem = SHM>,
|
SP: ShMemProvider<ShMem = SHM>,
|
||||||
{
|
{
|
||||||
let client = LlmpClient::on_existing_from_env(shmem_provider, env_name)?;
|
let client = LlmpClient::on_existing_from_env(shmem_provider, env_name)?;
|
||||||
Self::build_from_client(self, inner, client, time_obs)
|
Self::build_from_client(self, inner, client)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create an existing client from description
|
/// Create an existing client from description
|
||||||
@ -151,59 +143,13 @@ impl CentralizedEventManagerBuilder {
|
|||||||
inner: EM,
|
inner: EM,
|
||||||
shmem_provider: SP,
|
shmem_provider: SP,
|
||||||
description: &LlmpClientDescription,
|
description: &LlmpClientDescription,
|
||||||
time_obs: Option<Handle<TimeObserver>>,
|
|
||||||
) -> Result<CentralizedEventManager<EM, I, S, SHM, SP>, Error>
|
) -> Result<CentralizedEventManager<EM, I, S, SHM, SP>, Error>
|
||||||
where
|
where
|
||||||
SHM: ShMem,
|
SHM: ShMem,
|
||||||
SP: ShMemProvider<ShMem = SHM>,
|
SP: ShMemProvider<ShMem = SHM>,
|
||||||
{
|
{
|
||||||
let client = LlmpClient::existing_client_from_description(shmem_provider, description)?;
|
let client = LlmpClient::existing_client_from_description(shmem_provider, description)?;
|
||||||
Self::build_from_client(self, inner, client, time_obs)
|
Self::build_from_client(self, inner, client)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<EM, I, S, SHM, SP> RecordSerializationTime for CentralizedEventManager<EM, I, S, SHM, SP>
|
|
||||||
where
|
|
||||||
EM: RecordSerializationTime,
|
|
||||||
{
|
|
||||||
/// Set the deserialization time (mut)
|
|
||||||
fn set_deserialization_time(&mut self, dur: Duration) {
|
|
||||||
self.inner.set_deserialization_time(dur);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<EM, I, S, SHM, SP> AdaptiveSerializer for CentralizedEventManager<EM, I, S, SHM, SP>
|
|
||||||
where
|
|
||||||
EM: AdaptiveSerializer,
|
|
||||||
{
|
|
||||||
fn serialization_time(&self) -> Duration {
|
|
||||||
self.inner.serialization_time()
|
|
||||||
}
|
|
||||||
fn deserialization_time(&self) -> Duration {
|
|
||||||
self.inner.deserialization_time()
|
|
||||||
}
|
|
||||||
fn serializations_cnt(&self) -> usize {
|
|
||||||
self.inner.serializations_cnt()
|
|
||||||
}
|
|
||||||
fn should_serialize_cnt(&self) -> usize {
|
|
||||||
self.inner.should_serialize_cnt()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn serialization_time_mut(&mut self) -> &mut Duration {
|
|
||||||
self.inner.serialization_time_mut()
|
|
||||||
}
|
|
||||||
fn deserialization_time_mut(&mut self) -> &mut Duration {
|
|
||||||
self.inner.deserialization_time_mut()
|
|
||||||
}
|
|
||||||
fn serializations_cnt_mut(&mut self) -> &mut usize {
|
|
||||||
self.inner.serializations_cnt_mut()
|
|
||||||
}
|
|
||||||
fn should_serialize_cnt_mut(&mut self) -> &mut usize {
|
|
||||||
self.inner.should_serialize_cnt_mut()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn time_ref(&self) -> &Option<Handle<TimeObserver>> {
|
|
||||||
&self.time_ref
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -278,21 +224,6 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<EM, I, OT, S, SHM, SP> CanSerializeObserver<OT> for CentralizedEventManager<EM, I, S, SHM, SP>
|
|
||||||
where
|
|
||||||
EM: AdaptiveSerializer,
|
|
||||||
OT: MatchNameRef + Serialize,
|
|
||||||
{
|
|
||||||
fn serialize_observers(&mut self, observers: &OT) -> Result<Option<Vec<u8>>, Error> {
|
|
||||||
serialize_observers_adaptive::<EM, OT>(
|
|
||||||
&mut self.inner,
|
|
||||||
observers,
|
|
||||||
4, // twice as much as the normal llmp em's value cuz it does this job twice.
|
|
||||||
80,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<EM, I, S, SHM, SP> SendExiting for CentralizedEventManager<EM, I, S, SHM, SP>
|
impl<EM, I, S, SHM, SP> SendExiting for CentralizedEventManager<EM, I, S, SHM, SP>
|
||||||
where
|
where
|
||||||
EM: SendExiting,
|
EM: SendExiting,
|
||||||
|
@ -23,7 +23,7 @@ use core::{
|
|||||||
use libafl_bolts::{
|
use libafl_bolts::{
|
||||||
core_affinity::{CoreId, Cores},
|
core_affinity::{CoreId, Cores},
|
||||||
shmem::ShMemProvider,
|
shmem::ShMemProvider,
|
||||||
tuples::{Handle, tuple_list},
|
tuples::tuple_list,
|
||||||
};
|
};
|
||||||
use serde::{Deserialize, Serialize, de::DeserializeOwned};
|
use serde::{Deserialize, Serialize, de::DeserializeOwned};
|
||||||
use typed_builder::TypedBuilder;
|
use typed_builder::TypedBuilder;
|
||||||
@ -58,7 +58,6 @@ use crate::{
|
|||||||
llmp::{LlmpRestartingEventManager, LlmpShouldSaveState, ManagerKind, RestartingMgr},
|
llmp::{LlmpRestartingEventManager, LlmpShouldSaveState, ManagerKind, RestartingMgr},
|
||||||
},
|
},
|
||||||
monitors::Monitor,
|
monitors::Monitor,
|
||||||
observers::TimeObserver,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/// The (internal) `env` that indicates we're running as client.
|
/// The (internal) `env` that indicates we're running as client.
|
||||||
@ -172,9 +171,6 @@ pub struct Launcher<'a, CF, MT, SP> {
|
|||||||
/// clusters.
|
/// clusters.
|
||||||
#[builder(default = None)]
|
#[builder(default = None)]
|
||||||
remote_broker_addr: Option<SocketAddr>,
|
remote_broker_addr: Option<SocketAddr>,
|
||||||
/// The time observer for addaptive serialization
|
|
||||||
#[builder(default = None)]
|
|
||||||
time_ref: Option<Handle<TimeObserver>>,
|
|
||||||
/// If this launcher should spawn a new `broker` on `[Self::broker_port]` (default).
|
/// If this launcher should spawn a new `broker` on `[Self::broker_port]` (default).
|
||||||
/// The reason you may not want this is, if you already have a [`Launcher`]
|
/// The reason you may not want this is, if you already have a [`Launcher`]
|
||||||
/// with a different configuration (for the same target) running on this machine.
|
/// with a different configuration (for the same target) running on this machine.
|
||||||
@ -325,7 +321,6 @@ where
|
|||||||
.configuration(self.configuration)
|
.configuration(self.configuration)
|
||||||
.serialize_state(self.serialize_state)
|
.serialize_state(self.serialize_state)
|
||||||
.hooks(hooks);
|
.hooks(hooks);
|
||||||
let builder = builder.time_ref(self.time_ref.clone());
|
|
||||||
let (state, mgr) = builder.build().launch()?;
|
let (state, mgr) = builder.build().launch()?;
|
||||||
|
|
||||||
return (self.run_client.take().unwrap())(
|
return (self.run_client.take().unwrap())(
|
||||||
@ -354,8 +349,6 @@ where
|
|||||||
.serialize_state(self.serialize_state)
|
.serialize_state(self.serialize_state)
|
||||||
.hooks(hooks);
|
.hooks(hooks);
|
||||||
|
|
||||||
let builder = builder.time_ref(self.time_ref.clone());
|
|
||||||
|
|
||||||
builder.build().launch()?;
|
builder.build().launch()?;
|
||||||
|
|
||||||
// Broker exited. kill all clients.
|
// Broker exited. kill all clients.
|
||||||
@ -417,8 +410,6 @@ where
|
|||||||
.serialize_state(self.serialize_state)
|
.serialize_state(self.serialize_state)
|
||||||
.hooks(hooks);
|
.hooks(hooks);
|
||||||
|
|
||||||
let builder = builder.time_ref(self.time_ref.clone());
|
|
||||||
|
|
||||||
let (state, mgr) = builder.build().launch()?;
|
let (state, mgr) = builder.build().launch()?;
|
||||||
|
|
||||||
return (self.run_client.take().unwrap())(state, mgr, client_description);
|
return (self.run_client.take().unwrap())(state, mgr, client_description);
|
||||||
@ -523,8 +514,6 @@ where
|
|||||||
.serialize_state(self.serialize_state)
|
.serialize_state(self.serialize_state)
|
||||||
.hooks(hooks);
|
.hooks(hooks);
|
||||||
|
|
||||||
let builder = builder.time_ref(self.time_ref.clone());
|
|
||||||
|
|
||||||
builder.build().launch()?;
|
builder.build().launch()?;
|
||||||
|
|
||||||
//broker exited. kill all clients.
|
//broker exited. kill all clients.
|
||||||
@ -573,8 +562,6 @@ pub struct CentralizedLauncher<'a, CF, MF, MT, SP> {
|
|||||||
#[builder(default = 1338_u16)]
|
#[builder(default = 1338_u16)]
|
||||||
centralized_broker_port: u16,
|
centralized_broker_port: u16,
|
||||||
/// The time observer by which to adaptively serialize
|
/// The time observer by which to adaptively serialize
|
||||||
#[builder(default = None)]
|
|
||||||
time_obs: Option<Handle<TimeObserver>>,
|
|
||||||
/// The list of cores to run on
|
/// The list of cores to run on
|
||||||
cores: &'a Cores,
|
cores: &'a Cores,
|
||||||
/// The number of clients to spawn on each core
|
/// The number of clients to spawn on each core
|
||||||
@ -679,8 +666,6 @@ where
|
|||||||
.serialize_state(centralized_launcher.serialize_state)
|
.serialize_state(centralized_launcher.serialize_state)
|
||||||
.hooks(tuple_list!());
|
.hooks(tuple_list!());
|
||||||
|
|
||||||
let builder = builder.time_ref(centralized_launcher.time_obs.clone());
|
|
||||||
|
|
||||||
builder.build().launch()
|
builder.build().launch()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -803,7 +788,6 @@ where
|
|||||||
// tuple_list!(multi_machine_event_manager_hook.take().unwrap()),
|
// tuple_list!(multi_machine_event_manager_hook.take().unwrap()),
|
||||||
self.shmem_provider.clone(),
|
self.shmem_provider.clone(),
|
||||||
self.centralized_broker_port,
|
self.centralized_broker_port,
|
||||||
self.time_obs.clone(),
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
self.main_run_client.take().unwrap()(
|
self.main_run_client.take().unwrap()(
|
||||||
@ -829,7 +813,6 @@ where
|
|||||||
mgr,
|
mgr,
|
||||||
self.shmem_provider.clone(),
|
self.shmem_provider.clone(),
|
||||||
self.centralized_broker_port,
|
self.centralized_broker_port,
|
||||||
self.time_obs.clone(),
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
self.secondary_run_client.take().unwrap()(
|
self.secondary_run_client.take().unwrap()(
|
||||||
|
@ -41,7 +41,7 @@ use libafl_bolts::{
|
|||||||
os::CTRL_C_EXIT,
|
os::CTRL_C_EXIT,
|
||||||
shmem::{ShMem, ShMemProvider, StdShMem, StdShMemProvider},
|
shmem::{ShMem, ShMemProvider, StdShMem, StdShMemProvider},
|
||||||
staterestore::StateRestorer,
|
staterestore::StateRestorer,
|
||||||
tuples::{Handle, MatchNameRef, tuple_list},
|
tuples::tuple_list,
|
||||||
};
|
};
|
||||||
use serde::{Serialize, de::DeserializeOwned};
|
use serde::{Serialize, de::DeserializeOwned};
|
||||||
use typed_builder::TypedBuilder;
|
use typed_builder::TypedBuilder;
|
||||||
@ -54,16 +54,14 @@ use crate::{
|
|||||||
Error,
|
Error,
|
||||||
common::HasMetadata,
|
common::HasMetadata,
|
||||||
events::{
|
events::{
|
||||||
_LLMP_TAG_EVENT_TO_BROKER, AdaptiveSerializer, AwaitRestartSafe, CanSerializeObserver,
|
_LLMP_TAG_EVENT_TO_BROKER, AwaitRestartSafe, Event, EventConfig, EventFirer,
|
||||||
Event, EventConfig, EventFirer, EventManagerHooksTuple, EventManagerId, EventReceiver,
|
EventManagerHooksTuple, EventManagerId, EventReceiver, EventRestarter, HasEventManagerId,
|
||||||
EventRestarter, HasEventManagerId, LLMP_TAG_EVENT_TO_BOTH, LlmpShouldSaveState,
|
LLMP_TAG_EVENT_TO_BOTH, LlmpShouldSaveState, ProgressReporter, SendExiting,
|
||||||
ProgressReporter, RecordSerializationTime, SendExiting, StdLlmpEventHook,
|
StdLlmpEventHook, launcher::ClientDescription, std_maybe_report_progress,
|
||||||
launcher::ClientDescription, serialize_observers_adaptive, std_maybe_report_progress,
|
|
||||||
std_report_progress,
|
std_report_progress,
|
||||||
},
|
},
|
||||||
inputs::Input,
|
inputs::Input,
|
||||||
monitors::Monitor,
|
monitors::Monitor,
|
||||||
observers::TimeObserver,
|
|
||||||
state::{
|
state::{
|
||||||
HasCurrentStageId, HasCurrentTestcase, HasExecutions, HasImported, HasLastReportTime,
|
HasCurrentStageId, HasCurrentTestcase, HasExecutions, HasImported, HasLastReportTime,
|
||||||
HasSolutions, MaybeHasClientPerfMonitor, Stoppable,
|
HasSolutions, MaybeHasClientPerfMonitor, Stoppable,
|
||||||
@ -87,11 +85,6 @@ pub struct LlmpRestartingEventManager<EMH, I, S, SHM, SP> {
|
|||||||
/// A node will not re-use the observer values sent over LLMP
|
/// A node will not re-use the observer values sent over LLMP
|
||||||
/// from nodes with other configurations.
|
/// from nodes with other configurations.
|
||||||
configuration: EventConfig,
|
configuration: EventConfig,
|
||||||
serialization_time: Duration,
|
|
||||||
deserialization_time: Duration,
|
|
||||||
serializations_cnt: usize,
|
|
||||||
should_serialize_cnt: usize,
|
|
||||||
pub(crate) time_ref: Option<Handle<TimeObserver>>,
|
|
||||||
event_buffer: Vec<u8>,
|
event_buffer: Vec<u8>,
|
||||||
/// The staterestorer to serialize the state for the next runner
|
/// The staterestorer to serialize the state for the next runner
|
||||||
/// If this is Some, this event manager can restart. Else it does not.
|
/// If this is Some, this event manager can restart. Else it does not.
|
||||||
@ -101,50 +94,6 @@ pub struct LlmpRestartingEventManager<EMH, I, S, SHM, SP> {
|
|||||||
phantom: PhantomData<(I, S)>,
|
phantom: PhantomData<(I, S)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<EMH, I, S, SHM, SP> RecordSerializationTime for LlmpRestartingEventManager<EMH, I, S, SHM, SP>
|
|
||||||
where
|
|
||||||
SHM: ShMem,
|
|
||||||
{
|
|
||||||
fn set_deserialization_time(&mut self, dur: Duration) {
|
|
||||||
self.deserialization_time = dur;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<EMH, I, S, SHM, SP> AdaptiveSerializer for LlmpRestartingEventManager<EMH, I, S, SHM, SP>
|
|
||||||
where
|
|
||||||
SHM: ShMem,
|
|
||||||
{
|
|
||||||
fn serialization_time(&self) -> Duration {
|
|
||||||
self.serialization_time
|
|
||||||
}
|
|
||||||
fn deserialization_time(&self) -> Duration {
|
|
||||||
self.deserialization_time
|
|
||||||
}
|
|
||||||
fn serializations_cnt(&self) -> usize {
|
|
||||||
self.serializations_cnt
|
|
||||||
}
|
|
||||||
fn should_serialize_cnt(&self) -> usize {
|
|
||||||
self.should_serialize_cnt
|
|
||||||
}
|
|
||||||
|
|
||||||
fn serialization_time_mut(&mut self) -> &mut Duration {
|
|
||||||
&mut self.serialization_time
|
|
||||||
}
|
|
||||||
fn deserialization_time_mut(&mut self) -> &mut Duration {
|
|
||||||
&mut self.deserialization_time
|
|
||||||
}
|
|
||||||
fn serializations_cnt_mut(&mut self) -> &mut usize {
|
|
||||||
&mut self.serializations_cnt
|
|
||||||
}
|
|
||||||
fn should_serialize_cnt_mut(&mut self) -> &mut usize {
|
|
||||||
&mut self.should_serialize_cnt
|
|
||||||
}
|
|
||||||
|
|
||||||
fn time_ref(&self) -> &Option<Handle<TimeObserver>> {
|
|
||||||
&self.time_ref
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<EMH, I, S, SHM, SP> ProgressReporter<S> for LlmpRestartingEventManager<EMH, I, S, SHM, SP>
|
impl<EMH, I, S, SHM, SP> ProgressReporter<S> for LlmpRestartingEventManager<EMH, I, S, SHM, SP>
|
||||||
where
|
where
|
||||||
I: Serialize,
|
I: Serialize,
|
||||||
@ -237,18 +186,6 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "std")]
|
|
||||||
impl<EMH, I, OT, S, SHM, SP> CanSerializeObserver<OT>
|
|
||||||
for LlmpRestartingEventManager<EMH, I, S, SHM, SP>
|
|
||||||
where
|
|
||||||
OT: MatchNameRef + Serialize,
|
|
||||||
SHM: ShMem,
|
|
||||||
{
|
|
||||||
fn serialize_observers(&mut self, observers: &OT) -> Result<Option<Vec<u8>>, Error> {
|
|
||||||
serialize_observers_adaptive::<Self, OT>(self, observers, 2, 80)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<EMH, I, S, SHM, SP> EventRestarter<S> for LlmpRestartingEventManager<EMH, I, S, SHM, SP>
|
impl<EMH, I, S, SHM, SP> EventRestarter<S> for LlmpRestartingEventManager<EMH, I, S, SHM, SP>
|
||||||
where
|
where
|
||||||
S: Serialize + HasCurrentStageId,
|
S: Serialize + HasCurrentStageId,
|
||||||
@ -477,7 +414,6 @@ impl<EMH> LlmpEventManagerBuilder<EMH> {
|
|||||||
self,
|
self,
|
||||||
llmp: LlmpClient<SHM, SP>,
|
llmp: LlmpClient<SHM, SP>,
|
||||||
configuration: EventConfig,
|
configuration: EventConfig,
|
||||||
time_ref: Option<Handle<TimeObserver>>,
|
|
||||||
staterestorer: Option<StateRestorer<SHM, SP>>,
|
staterestorer: Option<StateRestorer<SHM, SP>>,
|
||||||
) -> Result<LlmpRestartingEventManager<EMH, I, S, SHM, SP>, Error> {
|
) -> Result<LlmpRestartingEventManager<EMH, I, S, SHM, SP>, Error> {
|
||||||
Ok(LlmpRestartingEventManager {
|
Ok(LlmpRestartingEventManager {
|
||||||
@ -488,11 +424,6 @@ impl<EMH> LlmpEventManagerBuilder<EMH> {
|
|||||||
#[cfg(feature = "llmp_compression")]
|
#[cfg(feature = "llmp_compression")]
|
||||||
compressor: GzipCompressor::with_threshold(COMPRESS_THRESHOLD),
|
compressor: GzipCompressor::with_threshold(COMPRESS_THRESHOLD),
|
||||||
configuration,
|
configuration,
|
||||||
serialization_time: Duration::ZERO,
|
|
||||||
deserialization_time: Duration::ZERO,
|
|
||||||
serializations_cnt: 0,
|
|
||||||
should_serialize_cnt: 0,
|
|
||||||
time_ref,
|
|
||||||
event_buffer: Vec::with_capacity(INITIAL_EVENT_BUFFER_SIZE),
|
event_buffer: Vec::with_capacity(INITIAL_EVENT_BUFFER_SIZE),
|
||||||
staterestorer,
|
staterestorer,
|
||||||
save_state: LlmpShouldSaveState::OnRestart,
|
save_state: LlmpShouldSaveState::OnRestart,
|
||||||
@ -508,7 +439,6 @@ impl<EMH> LlmpEventManagerBuilder<EMH> {
|
|||||||
shmem_provider: SP,
|
shmem_provider: SP,
|
||||||
port: u16,
|
port: u16,
|
||||||
configuration: EventConfig,
|
configuration: EventConfig,
|
||||||
time_ref: Option<Handle<TimeObserver>>,
|
|
||||||
staterestorer: Option<StateRestorer<SHM, SP>>,
|
staterestorer: Option<StateRestorer<SHM, SP>>,
|
||||||
) -> Result<LlmpRestartingEventManager<EMH, I, S, SHM, SP>, Error>
|
) -> Result<LlmpRestartingEventManager<EMH, I, S, SHM, SP>, Error>
|
||||||
where
|
where
|
||||||
@ -516,7 +446,7 @@ impl<EMH> LlmpEventManagerBuilder<EMH> {
|
|||||||
SP: ShMemProvider<ShMem = SHM>,
|
SP: ShMemProvider<ShMem = SHM>,
|
||||||
{
|
{
|
||||||
let llmp = LlmpClient::create_attach_to_tcp(shmem_provider, port)?;
|
let llmp = LlmpClient::create_attach_to_tcp(shmem_provider, port)?;
|
||||||
Self::build_from_client(self, llmp, configuration, time_ref, staterestorer)
|
Self::build_from_client(self, llmp, configuration, staterestorer)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// If a client respawns, it may reuse the existing connection, previously
|
/// If a client respawns, it may reuse the existing connection, previously
|
||||||
@ -527,7 +457,6 @@ impl<EMH> LlmpEventManagerBuilder<EMH> {
|
|||||||
shmem_provider: SP,
|
shmem_provider: SP,
|
||||||
env_name: &str,
|
env_name: &str,
|
||||||
configuration: EventConfig,
|
configuration: EventConfig,
|
||||||
time_ref: Option<Handle<TimeObserver>>,
|
|
||||||
staterestorer: Option<StateRestorer<SHM, SP>>,
|
staterestorer: Option<StateRestorer<SHM, SP>>,
|
||||||
) -> Result<LlmpRestartingEventManager<EMH, I, S, SHM, SP>, Error>
|
) -> Result<LlmpRestartingEventManager<EMH, I, S, SHM, SP>, Error>
|
||||||
where
|
where
|
||||||
@ -535,7 +464,7 @@ impl<EMH> LlmpEventManagerBuilder<EMH> {
|
|||||||
SP: ShMemProvider<ShMem = SHM>,
|
SP: ShMemProvider<ShMem = SHM>,
|
||||||
{
|
{
|
||||||
let llmp = LlmpClient::on_existing_from_env(shmem_provider, env_name)?;
|
let llmp = LlmpClient::on_existing_from_env(shmem_provider, env_name)?;
|
||||||
Self::build_from_client(self, llmp, configuration, time_ref, staterestorer)
|
Self::build_from_client(self, llmp, configuration, staterestorer)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create an existing client from description
|
/// Create an existing client from description
|
||||||
@ -544,7 +473,6 @@ impl<EMH> LlmpEventManagerBuilder<EMH> {
|
|||||||
shmem_provider: SP,
|
shmem_provider: SP,
|
||||||
description: &LlmpClientDescription,
|
description: &LlmpClientDescription,
|
||||||
configuration: EventConfig,
|
configuration: EventConfig,
|
||||||
time_ref: Option<Handle<TimeObserver>>,
|
|
||||||
staterestorer: Option<StateRestorer<SHM, SP>>,
|
staterestorer: Option<StateRestorer<SHM, SP>>,
|
||||||
) -> Result<LlmpRestartingEventManager<EMH, I, S, SHM, SP>, Error>
|
) -> Result<LlmpRestartingEventManager<EMH, I, S, SHM, SP>, Error>
|
||||||
where
|
where
|
||||||
@ -552,7 +480,7 @@ impl<EMH> LlmpEventManagerBuilder<EMH> {
|
|||||||
SP: ShMemProvider<ShMem = SHM>,
|
SP: ShMemProvider<ShMem = SHM>,
|
||||||
{
|
{
|
||||||
let llmp = LlmpClient::existing_client_from_description(shmem_provider, description)?;
|
let llmp = LlmpClient::existing_client_from_description(shmem_provider, description)?;
|
||||||
Self::build_from_client(self, llmp, configuration, time_ref, staterestorer)
|
Self::build_from_client(self, llmp, configuration, staterestorer)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -695,7 +623,6 @@ pub fn setup_restarting_mgr_std_adaptive<I, MT, S>(
|
|||||||
monitor: MT,
|
monitor: MT,
|
||||||
broker_port: u16,
|
broker_port: u16,
|
||||||
configuration: EventConfig,
|
configuration: EventConfig,
|
||||||
time_obs: Handle<TimeObserver>,
|
|
||||||
) -> Result<
|
) -> Result<
|
||||||
(
|
(
|
||||||
Option<S>,
|
Option<S>,
|
||||||
@ -714,7 +641,6 @@ where
|
|||||||
.broker_port(broker_port)
|
.broker_port(broker_port)
|
||||||
.configuration(configuration)
|
.configuration(configuration)
|
||||||
.hooks(tuple_list!())
|
.hooks(tuple_list!())
|
||||||
.time_ref(Some(time_obs))
|
|
||||||
.build()
|
.build()
|
||||||
.launch()
|
.launch()
|
||||||
}
|
}
|
||||||
@ -756,8 +682,6 @@ pub struct RestartingMgr<EMH, I, MT, S, SP> {
|
|||||||
serialize_state: LlmpShouldSaveState,
|
serialize_state: LlmpShouldSaveState,
|
||||||
/// The hooks passed to event manager:
|
/// The hooks passed to event manager:
|
||||||
hooks: EMH,
|
hooks: EMH,
|
||||||
#[builder(default = None)]
|
|
||||||
time_ref: Option<Handle<TimeObserver>>,
|
|
||||||
#[builder(setter(skip), default = PhantomData)]
|
#[builder(setter(skip), default = PhantomData)]
|
||||||
phantom_data: PhantomData<(EMH, I, S)>,
|
phantom_data: PhantomData<(EMH, I, S)>,
|
||||||
}
|
}
|
||||||
@ -828,12 +752,7 @@ where
|
|||||||
let mgr: LlmpRestartingEventManager<EMH, I, S, SP::ShMem, SP> =
|
let mgr: LlmpRestartingEventManager<EMH, I, S, SP::ShMem, SP> =
|
||||||
LlmpEventManagerBuilder::builder()
|
LlmpEventManagerBuilder::builder()
|
||||||
.hooks(self.hooks)
|
.hooks(self.hooks)
|
||||||
.build_from_client(
|
.build_from_client(client, self.configuration, None)?;
|
||||||
client,
|
|
||||||
self.configuration,
|
|
||||||
self.time_ref.clone(),
|
|
||||||
None,
|
|
||||||
)?;
|
|
||||||
(mgr, None)
|
(mgr, None)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -860,7 +779,6 @@ where
|
|||||||
self.shmem_provider.clone(),
|
self.shmem_provider.clone(),
|
||||||
self.broker_port,
|
self.broker_port,
|
||||||
self.configuration,
|
self.configuration,
|
||||||
self.time_ref.clone(),
|
|
||||||
None,
|
None,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
@ -1008,7 +926,6 @@ where
|
|||||||
new_shmem_provider,
|
new_shmem_provider,
|
||||||
&mgr_description,
|
&mgr_description,
|
||||||
self.configuration,
|
self.configuration,
|
||||||
self.time_ref.clone(),
|
|
||||||
Some(staterestorer),
|
Some(staterestorer),
|
||||||
)?,
|
)?,
|
||||||
)
|
)
|
||||||
@ -1024,7 +941,6 @@ where
|
|||||||
new_shmem_provider,
|
new_shmem_provider,
|
||||||
_ENV_FUZZER_BROKER_CLIENT_INITIAL,
|
_ENV_FUZZER_BROKER_CLIENT_INITIAL,
|
||||||
self.configuration,
|
self.configuration,
|
||||||
self.time_ref.clone(),
|
|
||||||
Some(staterestorer),
|
Some(staterestorer),
|
||||||
)?,
|
)?,
|
||||||
)
|
)
|
||||||
@ -1056,7 +972,7 @@ mod tests {
|
|||||||
rands::StdRand,
|
rands::StdRand,
|
||||||
shmem::{ShMemProvider, StdShMem, StdShMemProvider},
|
shmem::{ShMemProvider, StdShMem, StdShMemProvider},
|
||||||
staterestore::StateRestorer,
|
staterestore::StateRestorer,
|
||||||
tuples::{Handled, tuple_list},
|
tuples::tuple_list,
|
||||||
};
|
};
|
||||||
use serial_test::serial;
|
use serial_test::serial;
|
||||||
|
|
||||||
@ -1089,7 +1005,6 @@ mod tests {
|
|||||||
let rand = StdRand::with_seed(0);
|
let rand = StdRand::with_seed(0);
|
||||||
|
|
||||||
let time = TimeObserver::new("time");
|
let time = TimeObserver::new("time");
|
||||||
let time_ref = time.handle();
|
|
||||||
|
|
||||||
let mut corpus = InMemoryCorpus::<BytesInput>::new();
|
let mut corpus = InMemoryCorpus::<BytesInput>::new();
|
||||||
let testcase = Testcase::new(vec![0; 4].into());
|
let testcase = Testcase::new(vec![0; 4].into());
|
||||||
@ -1118,7 +1033,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut llmp_mgr = LlmpEventManagerBuilder::builder()
|
let mut llmp_mgr = LlmpEventManagerBuilder::builder()
|
||||||
.build_from_client(llmp_client, "fuzzer".into(), Some(time_ref.clone()), None)
|
.build_from_client(llmp_client, "fuzzer".into(), None)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let scheduler = RandScheduler::new();
|
let scheduler = RandScheduler::new();
|
||||||
@ -1170,7 +1085,6 @@ mod tests {
|
|||||||
shmem_provider,
|
shmem_provider,
|
||||||
&mgr_description,
|
&mgr_description,
|
||||||
"fuzzer".into(),
|
"fuzzer".into(),
|
||||||
Some(time_ref),
|
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
@ -33,13 +33,11 @@ use ahash::RandomState;
|
|||||||
pub use broker_hooks::*;
|
pub use broker_hooks::*;
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
pub use launcher::*;
|
pub use launcher::*;
|
||||||
|
use libafl_bolts::current_time;
|
||||||
#[cfg(all(unix, feature = "std"))]
|
#[cfg(all(unix, feature = "std"))]
|
||||||
use libafl_bolts::os::CTRL_C_EXIT;
|
use libafl_bolts::os::CTRL_C_EXIT;
|
||||||
#[cfg(all(unix, feature = "std"))]
|
#[cfg(all(unix, feature = "std"))]
|
||||||
use libafl_bolts::os::unix_signals::{Signal, SignalHandler, siginfo_t, ucontext_t};
|
use libafl_bolts::os::unix_signals::{Signal, SignalHandler, siginfo_t, ucontext_t};
|
||||||
#[cfg(feature = "std")]
|
|
||||||
use libafl_bolts::tuples::MatchNameRef;
|
|
||||||
use libafl_bolts::{current_time, tuples::Handle};
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
@ -108,7 +106,7 @@ pub struct EventManagerId(
|
|||||||
use crate::events::multi_machine::NodeId;
|
use crate::events::multi_machine::NodeId;
|
||||||
#[cfg(feature = "introspection")]
|
#[cfg(feature = "introspection")]
|
||||||
use crate::monitors::stats::ClientPerfStats;
|
use crate::monitors::stats::ClientPerfStats;
|
||||||
use crate::{observers::TimeObserver, state::HasCurrentStageId};
|
use crate::state::HasCurrentStageId;
|
||||||
|
|
||||||
/// The log event severity
|
/// The log event severity
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
|
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
|
||||||
@ -409,58 +407,6 @@ pub trait EventFirer<I, S> {
|
|||||||
fn should_send(&self) -> bool;
|
fn should_send(&self) -> bool;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Serialize all observers for this type and manager
|
|
||||||
/// Serialize the observer using the `time_factor` and `percentage_threshold`.
|
|
||||||
/// These parameters are unique to each of the different types of `EventManager`
|
|
||||||
#[cfg(feature = "std")]
|
|
||||||
pub(crate) fn serialize_observers_adaptive<EM, OT>(
|
|
||||||
manager: &mut EM,
|
|
||||||
observers: &OT,
|
|
||||||
time_factor: u32,
|
|
||||||
percentage_threshold: usize,
|
|
||||||
) -> Result<Option<Vec<u8>>, Error>
|
|
||||||
where
|
|
||||||
EM: AdaptiveSerializer,
|
|
||||||
OT: MatchNameRef + Serialize,
|
|
||||||
{
|
|
||||||
match manager.time_ref() {
|
|
||||||
Some(t) => {
|
|
||||||
let exec_time = observers
|
|
||||||
.get(t)
|
|
||||||
.map(|o| o.last_runtime().unwrap_or(Duration::ZERO))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let mut must_ser = (manager.serialization_time() + manager.deserialization_time())
|
|
||||||
* time_factor
|
|
||||||
< exec_time;
|
|
||||||
if must_ser {
|
|
||||||
*manager.should_serialize_cnt_mut() += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if manager.serializations_cnt() > 32 {
|
|
||||||
must_ser = (manager.should_serialize_cnt() * 100 / manager.serializations_cnt())
|
|
||||||
> percentage_threshold;
|
|
||||||
}
|
|
||||||
|
|
||||||
if manager.serialization_time() == Duration::ZERO
|
|
||||||
|| must_ser
|
|
||||||
|| manager.serializations_cnt().trailing_zeros() >= 8
|
|
||||||
{
|
|
||||||
let start = current_time();
|
|
||||||
let ser = postcard::to_allocvec(observers)?;
|
|
||||||
*manager.serialization_time_mut() = current_time() - start;
|
|
||||||
|
|
||||||
*manager.serializations_cnt_mut() += 1;
|
|
||||||
Ok(Some(ser))
|
|
||||||
} else {
|
|
||||||
*manager.serializations_cnt_mut() += 1;
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => Ok(None),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Default implementation of [`ProgressReporter::maybe_report_progress`] for implementors with the
|
/// Default implementation of [`ProgressReporter::maybe_report_progress`] for implementors with the
|
||||||
/// given constraints
|
/// given constraints
|
||||||
pub fn std_maybe_report_progress<PR, S>(
|
pub fn std_maybe_report_progress<PR, S>(
|
||||||
@ -571,12 +517,6 @@ where
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The class that implements this must be able to serialize an observer.
|
|
||||||
pub trait CanSerializeObserver<OT> {
|
|
||||||
/// Do serialize the observer
|
|
||||||
fn serialize_observers(&mut self, observers: &OT) -> Result<Option<Vec<u8>>, Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Send that we're about to exit
|
/// Send that we're about to exit
|
||||||
pub trait SendExiting {
|
pub trait SendExiting {
|
||||||
/// Send information that this client is exiting.
|
/// Send information that this client is exiting.
|
||||||
@ -625,8 +565,6 @@ impl NopEventManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RecordSerializationTime for NopEventManager {}
|
|
||||||
|
|
||||||
impl<I, S> EventFirer<I, S> for NopEventManager {
|
impl<I, S> EventFirer<I, S> for NopEventManager {
|
||||||
fn should_send(&self) -> bool {
|
fn should_send(&self) -> bool {
|
||||||
true
|
true
|
||||||
@ -673,15 +611,6 @@ impl<I, S> EventReceiver<I, S> for NopEventManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<OT> CanSerializeObserver<OT> for NopEventManager
|
|
||||||
where
|
|
||||||
OT: Serialize,
|
|
||||||
{
|
|
||||||
fn serialize_observers(&mut self, observers: &OT) -> Result<Option<Vec<u8>>, Error> {
|
|
||||||
Ok(Some(postcard::to_allocvec(observers)?))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S> ProgressReporter<S> for NopEventManager {
|
impl<S> ProgressReporter<S> for NopEventManager {
|
||||||
fn maybe_report_progress(
|
fn maybe_report_progress(
|
||||||
&mut self,
|
&mut self,
|
||||||
@ -710,8 +639,6 @@ pub struct MonitorTypedEventManager<EM, M> {
|
|||||||
phantom: PhantomData<M>,
|
phantom: PhantomData<M>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<EM, M> RecordSerializationTime for MonitorTypedEventManager<EM, M> {}
|
|
||||||
|
|
||||||
impl<EM, M> MonitorTypedEventManager<EM, M> {
|
impl<EM, M> MonitorTypedEventManager<EM, M> {
|
||||||
/// Creates a new `EventManager` that wraps another manager, but captures a `monitor` type as well.
|
/// Creates a new `EventManager` that wraps another manager, but captures a `monitor` type as well.
|
||||||
#[must_use]
|
#[must_use]
|
||||||
@ -723,15 +650,6 @@ impl<EM, M> MonitorTypedEventManager<EM, M> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<EM, M, OT> CanSerializeObserver<OT> for MonitorTypedEventManager<EM, M>
|
|
||||||
where
|
|
||||||
OT: Serialize,
|
|
||||||
{
|
|
||||||
fn serialize_observers(&mut self, observers: &OT) -> Result<Option<Vec<u8>>, Error> {
|
|
||||||
Ok(Some(postcard::to_allocvec(observers)?))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<EM, I, M, S> EventFirer<I, S> for MonitorTypedEventManager<EM, M>
|
impl<EM, I, M, S> EventFirer<I, S> for MonitorTypedEventManager<EM, M>
|
||||||
where
|
where
|
||||||
EM: EventFirer<I, S>,
|
EM: EventFirer<I, S>,
|
||||||
@ -837,36 +755,6 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Record the deserialization time for this event manager
|
|
||||||
pub trait RecordSerializationTime {
|
|
||||||
/// Set the deserialization time (mut)
|
|
||||||
fn set_deserialization_time(&mut self, _dur: Duration) {}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Collected stats to decide if observers must be serialized or not
|
|
||||||
pub trait AdaptiveSerializer {
|
|
||||||
/// Expose the collected observers serialization time
|
|
||||||
fn serialization_time(&self) -> Duration;
|
|
||||||
/// Expose the collected observers deserialization time
|
|
||||||
fn deserialization_time(&self) -> Duration;
|
|
||||||
/// How many times observers were serialized
|
|
||||||
fn serializations_cnt(&self) -> usize;
|
|
||||||
/// How many times shoukd have been serialized an observer
|
|
||||||
fn should_serialize_cnt(&self) -> usize;
|
|
||||||
|
|
||||||
/// Expose the collected observers serialization time (mut)
|
|
||||||
fn serialization_time_mut(&mut self) -> &mut Duration;
|
|
||||||
/// Expose the collected observers deserialization time (mut)
|
|
||||||
fn deserialization_time_mut(&mut self) -> &mut Duration;
|
|
||||||
/// How many times observers were serialized (mut)
|
|
||||||
fn serializations_cnt_mut(&mut self) -> &mut usize;
|
|
||||||
/// How many times shoukd have been serialized an observer (mut)
|
|
||||||
fn should_serialize_cnt_mut(&mut self) -> &mut usize;
|
|
||||||
|
|
||||||
/// A [`Handle`] to the time observer to determine the `time_factor`
|
|
||||||
fn time_ref(&self) -> &Option<Handle<TimeObserver>>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
|
@ -18,19 +18,19 @@ use libafl_bolts::{
|
|||||||
shmem::{ShMem, ShMemProvider},
|
shmem::{ShMem, ShMemProvider},
|
||||||
staterestore::StateRestorer,
|
staterestore::StateRestorer,
|
||||||
};
|
};
|
||||||
|
#[cfg(feature = "std")]
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
|
|
||||||
use super::{AwaitRestartSafe, ProgressReporter, RecordSerializationTime, std_on_restart};
|
use super::{AwaitRestartSafe, ProgressReporter, std_on_restart};
|
||||||
#[cfg(all(unix, feature = "std", not(miri)))]
|
#[cfg(all(unix, feature = "std", not(miri)))]
|
||||||
use crate::events::EVENTMGR_SIGHANDLER_STATE;
|
use crate::events::EVENTMGR_SIGHANDLER_STATE;
|
||||||
use crate::{
|
use crate::{
|
||||||
Error, HasMetadata,
|
Error, HasMetadata,
|
||||||
events::{
|
events::{
|
||||||
BrokerEventResult, CanSerializeObserver, Event, EventFirer, EventManagerId, EventReceiver,
|
BrokerEventResult, Event, EventFirer, EventManagerId, EventReceiver, EventRestarter,
|
||||||
EventRestarter, HasEventManagerId, SendExiting, std_maybe_report_progress,
|
HasEventManagerId, SendExiting, std_maybe_report_progress, std_report_progress,
|
||||||
std_report_progress,
|
|
||||||
},
|
},
|
||||||
monitors::{Monitor, stats::ClientStatsManager},
|
monitors::{Monitor, stats::ClientStatsManager},
|
||||||
state::{
|
state::{
|
||||||
@ -73,8 +73,6 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<I, MT, S> RecordSerializationTime for SimpleEventManager<I, MT, S> {}
|
|
||||||
|
|
||||||
impl<I, MT, S> EventFirer<I, S> for SimpleEventManager<I, MT, S>
|
impl<I, MT, S> EventFirer<I, S> for SimpleEventManager<I, MT, S>
|
||||||
where
|
where
|
||||||
I: Debug,
|
I: Debug,
|
||||||
@ -143,15 +141,6 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<I, MT, OT, S> CanSerializeObserver<OT> for SimpleEventManager<I, MT, S>
|
|
||||||
where
|
|
||||||
OT: Serialize,
|
|
||||||
{
|
|
||||||
fn serialize_observers(&mut self, observers: &OT) -> Result<Option<Vec<u8>>, Error> {
|
|
||||||
Ok(Some(postcard::to_allocvec(observers)?))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<I, MT, S> ProgressReporter<S> for SimpleEventManager<I, MT, S>
|
impl<I, MT, S> ProgressReporter<S> for SimpleEventManager<I, MT, S>
|
||||||
where
|
where
|
||||||
I: Debug,
|
I: Debug,
|
||||||
@ -295,12 +284,6 @@ pub struct SimpleRestartingEventManager<I, MT, S, SHM, SP> {
|
|||||||
staterestorer: StateRestorer<SHM, SP>,
|
staterestorer: StateRestorer<SHM, SP>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "std")]
|
|
||||||
impl<I, MT, S, SHM, SP> RecordSerializationTime
|
|
||||||
for SimpleRestartingEventManager<I, MT, S, SHM, SP>
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
impl<I, MT, S, SHM, SP> EventFirer<I, S> for SimpleRestartingEventManager<I, MT, S, SHM, SP>
|
impl<I, MT, S, SHM, SP> EventFirer<I, S> for SimpleRestartingEventManager<I, MT, S, SHM, SP>
|
||||||
where
|
where
|
||||||
@ -339,17 +322,6 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "std")]
|
|
||||||
impl<I, MT, OT, S, SHM, SP> CanSerializeObserver<OT>
|
|
||||||
for SimpleRestartingEventManager<I, MT, S, SHM, SP>
|
|
||||||
where
|
|
||||||
OT: Serialize,
|
|
||||||
{
|
|
||||||
fn serialize_observers(&mut self, observers: &OT) -> Result<Option<Vec<u8>>, Error> {
|
|
||||||
Ok(Some(postcard::to_allocvec(observers)?))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
impl<I, MT, S, SHM, SP> SendExiting for SimpleRestartingEventManager<I, MT, S, SHM, SP>
|
impl<I, MT, S, SHM, SP> SendExiting for SimpleRestartingEventManager<I, MT, S, SHM, SP>
|
||||||
where
|
where
|
||||||
|
@ -24,7 +24,7 @@ use windows::Win32::System::Threading::{CRITICAL_SECTION, PTP_TIMER};
|
|||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
use crate::executors::hooks::timer::TimerStruct;
|
use crate::executors::hooks::timer::TimerStruct;
|
||||||
use crate::{
|
use crate::{
|
||||||
Error, HasFeedback, HasObjective,
|
Error, HasFeedback, HasObjective, HasScheduler,
|
||||||
events::{EventFirer, EventRestarter},
|
events::{EventFirer, EventRestarter},
|
||||||
executors::{Executor, HasObservers, hooks::ExecutorHook, inprocess::HasInProcessHooks},
|
executors::{Executor, HasObservers, hooks::ExecutorHook, inprocess::HasInProcessHooks},
|
||||||
feedbacks::Feedback,
|
feedbacks::Feedback,
|
||||||
@ -240,7 +240,7 @@ impl<I, S> InProcessHooks<I, S> {
|
|||||||
F: Feedback<EM, I, E::Observers, S>,
|
F: Feedback<EM, I, E::Observers, S>,
|
||||||
OF: Feedback<EM, I, E::Observers, S>,
|
OF: Feedback<EM, I, E::Observers, S>,
|
||||||
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
|
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
I: Input + Clone,
|
I: Input + Clone,
|
||||||
{
|
{
|
||||||
// # Safety
|
// # Safety
|
||||||
@ -286,7 +286,7 @@ impl<I, S> InProcessHooks<I, S> {
|
|||||||
F: Feedback<EM, I, E::Observers, S>,
|
F: Feedback<EM, I, E::Observers, S>,
|
||||||
OF: Feedback<EM, I, E::Observers, S>,
|
OF: Feedback<EM, I, E::Observers, S>,
|
||||||
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
|
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
{
|
{
|
||||||
let ret;
|
let ret;
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
@ -351,7 +351,7 @@ impl<I, S> InProcessHooks<I, S> {
|
|||||||
F: Feedback<EM, I, E::Observers, S>,
|
F: Feedback<EM, I, E::Observers, S>,
|
||||||
OF: Feedback<EM, I, E::Observers, S>,
|
OF: Feedback<EM, I, E::Observers, S>,
|
||||||
S: HasExecutions + HasSolutions<I>,
|
S: HasExecutions + HasSolutions<I>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
{
|
{
|
||||||
#[cfg_attr(miri, allow(unused_variables))]
|
#[cfg_attr(miri, allow(unused_variables))]
|
||||||
let ret = Self {
|
let ret = Self {
|
||||||
@ -489,7 +489,7 @@ impl InProcessExecutorHandlerData {
|
|||||||
F: Feedback<EM, I, E::Observers, S>,
|
F: Feedback<EM, I, E::Observers, S>,
|
||||||
OF: Feedback<EM, I, E::Observers, S>,
|
OF: Feedback<EM, I, E::Observers, S>,
|
||||||
S: HasExecutions + HasSolutions<I> + HasCorpus<I> + HasCurrentTestcase<I>,
|
S: HasExecutions + HasSolutions<I> + HasCorpus<I> + HasCurrentTestcase<I>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
I: Input + Clone,
|
I: Input + Clone,
|
||||||
{
|
{
|
||||||
unsafe {
|
unsafe {
|
||||||
|
@ -12,7 +12,7 @@ pub mod unix_signal_handler {
|
|||||||
use libc::siginfo_t;
|
use libc::siginfo_t;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
HasFeedback,
|
HasFeedback, HasScheduler,
|
||||||
events::{EventFirer, EventRestarter},
|
events::{EventFirer, EventRestarter},
|
||||||
executors::{
|
executors::{
|
||||||
Executor, ExitKind, HasObservers, common_signals,
|
Executor, ExitKind, HasObservers, common_signals,
|
||||||
@ -95,7 +95,7 @@ pub mod unix_signal_handler {
|
|||||||
F: Feedback<EM, I, E::Observers, S>,
|
F: Feedback<EM, I, E::Observers, S>,
|
||||||
OF: Feedback<EM, I, E::Observers, S>,
|
OF: Feedback<EM, I, E::Observers, S>,
|
||||||
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
|
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
I: Input + Clone,
|
I: Input + Clone,
|
||||||
{
|
{
|
||||||
let old_hook = panic::take_hook();
|
let old_hook = panic::take_hook();
|
||||||
@ -154,7 +154,7 @@ pub mod unix_signal_handler {
|
|||||||
F: Feedback<EM, I, E::Observers, S>,
|
F: Feedback<EM, I, E::Observers, S>,
|
||||||
OF: Feedback<EM, I, E::Observers, S>,
|
OF: Feedback<EM, I, E::Observers, S>,
|
||||||
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
|
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
I: Input + Clone,
|
I: Input + Clone,
|
||||||
{
|
{
|
||||||
unsafe {
|
unsafe {
|
||||||
@ -213,7 +213,7 @@ pub mod unix_signal_handler {
|
|||||||
F: Feedback<EM, I, E::Observers, S>,
|
F: Feedback<EM, I, E::Observers, S>,
|
||||||
OF: Feedback<EM, I, E::Observers, S>,
|
OF: Feedback<EM, I, E::Observers, S>,
|
||||||
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
|
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
I: Input + Clone,
|
I: Input + Clone,
|
||||||
{
|
{
|
||||||
unsafe {
|
unsafe {
|
||||||
|
@ -16,7 +16,7 @@ pub mod windows_asan_handler {
|
|||||||
inprocess::run_observers_and_save_state,
|
inprocess::run_observers_and_save_state,
|
||||||
},
|
},
|
||||||
feedbacks::Feedback,
|
feedbacks::Feedback,
|
||||||
fuzzer::HasObjective,
|
fuzzer::{HasObjective, HasScheduler},
|
||||||
inputs::Input,
|
inputs::Input,
|
||||||
observers::ObserversTuple,
|
observers::ObserversTuple,
|
||||||
state::{HasCurrentTestcase, HasExecutions, HasSolutions},
|
state::{HasCurrentTestcase, HasExecutions, HasSolutions},
|
||||||
@ -33,7 +33,7 @@ pub mod windows_asan_handler {
|
|||||||
F: Feedback<EM, I, E::Observers, S>,
|
F: Feedback<EM, I, E::Observers, S>,
|
||||||
OF: Feedback<EM, I, E::Observers, S>,
|
OF: Feedback<EM, I, E::Observers, S>,
|
||||||
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
|
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
{
|
{
|
||||||
unsafe {
|
unsafe {
|
||||||
let data = &raw mut GLOBAL_STATE;
|
let data = &raw mut GLOBAL_STATE;
|
||||||
@ -147,7 +147,7 @@ pub mod windows_exception_handler {
|
|||||||
inprocess::{HasInProcessHooks, run_observers_and_save_state},
|
inprocess::{HasInProcessHooks, run_observers_and_save_state},
|
||||||
},
|
},
|
||||||
feedbacks::Feedback,
|
feedbacks::Feedback,
|
||||||
fuzzer::HasObjective,
|
fuzzer::{HasObjective, HasScheduler},
|
||||||
inputs::Input,
|
inputs::Input,
|
||||||
observers::ObserversTuple,
|
observers::ObserversTuple,
|
||||||
state::{HasCurrentTestcase, HasExecutions, HasSolutions},
|
state::{HasCurrentTestcase, HasExecutions, HasSolutions},
|
||||||
@ -209,7 +209,7 @@ pub mod windows_exception_handler {
|
|||||||
F: Feedback<EM, I, E::Observers, S>,
|
F: Feedback<EM, I, E::Observers, S>,
|
||||||
OF: Feedback<EM, I, E::Observers, S>,
|
OF: Feedback<EM, I, E::Observers, S>,
|
||||||
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
|
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
{
|
{
|
||||||
let old_hook = panic::take_hook();
|
let old_hook = panic::take_hook();
|
||||||
panic::set_hook(Box::new(move |panic_info| unsafe {
|
panic::set_hook(Box::new(move |panic_info| unsafe {
|
||||||
@ -278,7 +278,7 @@ pub mod windows_exception_handler {
|
|||||||
F: Feedback<EM, I, E::Observers, S>,
|
F: Feedback<EM, I, E::Observers, S>,
|
||||||
OF: Feedback<EM, I, E::Observers, S>,
|
OF: Feedback<EM, I, E::Observers, S>,
|
||||||
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
|
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
{
|
{
|
||||||
let data: &mut InProcessExecutorHandlerData =
|
let data: &mut InProcessExecutorHandlerData =
|
||||||
unsafe { &mut *(global_state as *mut InProcessExecutorHandlerData) };
|
unsafe { &mut *(global_state as *mut InProcessExecutorHandlerData) };
|
||||||
@ -357,7 +357,7 @@ pub mod windows_exception_handler {
|
|||||||
F: Feedback<EM, I, E::Observers, S>,
|
F: Feedback<EM, I, E::Observers, S>,
|
||||||
OF: Feedback<EM, I, E::Observers, S>,
|
OF: Feedback<EM, I, E::Observers, S>,
|
||||||
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
|
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
{
|
{
|
||||||
// Have we set a timer_before?
|
// Have we set a timer_before?
|
||||||
if data.ptp_timer.is_some() {
|
if data.ptp_timer.is_some() {
|
||||||
|
@ -11,8 +11,6 @@ use libafl_bolts::tuples::{Merge, RefIndexable, tuple_list};
|
|||||||
#[cfg(windows)]
|
#[cfg(windows)]
|
||||||
use windows::Win32::System::Threading::SetThreadStackGuarantee;
|
use windows::Win32::System::Threading::SetThreadStackGuarantee;
|
||||||
|
|
||||||
#[cfg(all(feature = "std", target_os = "linux"))]
|
|
||||||
use crate::executors::hooks::inprocess::HasTimeout;
|
|
||||||
#[cfg(all(windows, feature = "std"))]
|
#[cfg(all(windows, feature = "std"))]
|
||||||
use crate::executors::hooks::inprocess::HasTimeout;
|
use crate::executors::hooks::inprocess::HasTimeout;
|
||||||
use crate::{
|
use crate::{
|
||||||
@ -27,7 +25,7 @@ use crate::{
|
|||||||
inprocess::HasInProcessHooks,
|
inprocess::HasInProcessHooks,
|
||||||
},
|
},
|
||||||
feedbacks::Feedback,
|
feedbacks::Feedback,
|
||||||
fuzzer::HasObjective,
|
fuzzer::{HasObjective, HasScheduler},
|
||||||
inputs::Input,
|
inputs::Input,
|
||||||
observers::ObserversTuple,
|
observers::ObserversTuple,
|
||||||
state::{HasCurrentTestcase, HasExecutions, HasSolutions},
|
state::{HasCurrentTestcase, HasExecutions, HasSolutions},
|
||||||
@ -147,7 +145,7 @@ where
|
|||||||
F: Feedback<EM, I, E::Observers, S>,
|
F: Feedback<EM, I, E::Observers, S>,
|
||||||
OF: Feedback<EM, I, E::Observers, S>,
|
OF: Feedback<EM, I, E::Observers, S>,
|
||||||
S: HasCurrentTestcase<I> + HasSolutions<I>,
|
S: HasCurrentTestcase<I> + HasSolutions<I>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
{
|
{
|
||||||
Self::with_timeout_generic::<E, F, OF>(
|
Self::with_timeout_generic::<E, F, OF>(
|
||||||
user_hooks,
|
user_hooks,
|
||||||
@ -159,33 +157,6 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new in mem executor with the default timeout and use batch mode(5 sec)
|
|
||||||
#[cfg(all(feature = "std", target_os = "linux"))]
|
|
||||||
pub fn batched_timeout_generic<E, F, OF>(
|
|
||||||
user_hooks: HT,
|
|
||||||
observers: OT,
|
|
||||||
fuzzer: &mut Z,
|
|
||||||
state: &mut S,
|
|
||||||
event_mgr: &mut EM,
|
|
||||||
exec_tmout: Duration,
|
|
||||||
) -> Result<Self, Error>
|
|
||||||
where
|
|
||||||
E: Executor<EM, I, S, Z> + HasObservers + HasInProcessHooks<I, S>,
|
|
||||||
E::Observers: ObserversTuple<I, S>,
|
|
||||||
EM: EventFirer<I, S> + EventRestarter<S>,
|
|
||||||
I: Input + Clone,
|
|
||||||
F: Feedback<EM, I, E::Observers, S>,
|
|
||||||
OF: Feedback<EM, I, E::Observers, S>,
|
|
||||||
S: HasCurrentTestcase<I> + HasSolutions<I>,
|
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
|
||||||
{
|
|
||||||
let mut me = Self::with_timeout_generic::<E, F, OF>(
|
|
||||||
user_hooks, observers, fuzzer, state, event_mgr, exec_tmout,
|
|
||||||
)?;
|
|
||||||
me.hooks_mut().0.timer_mut().batch_mode = true;
|
|
||||||
Ok(me)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a new in mem executor.
|
/// Create a new in mem executor.
|
||||||
/// Caution: crash and restart in one of them will lead to odd behavior if multiple are used,
|
/// Caution: crash and restart in one of them will lead to odd behavior if multiple are used,
|
||||||
/// depending on different corpus or state.
|
/// depending on different corpus or state.
|
||||||
@ -209,7 +180,7 @@ where
|
|||||||
F: Feedback<EM, I, E::Observers, S>,
|
F: Feedback<EM, I, E::Observers, S>,
|
||||||
OF: Feedback<EM, I, E::Observers, S>,
|
OF: Feedback<EM, I, E::Observers, S>,
|
||||||
S: HasCurrentTestcase<I> + HasSolutions<I>,
|
S: HasCurrentTestcase<I> + HasSolutions<I>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
I: Input + Clone,
|
I: Input + Clone,
|
||||||
{
|
{
|
||||||
let default = InProcessHooks::new::<E, EM, F, OF, Z>(timeout)?;
|
let default = InProcessHooks::new::<E, EM, F, OF, Z>(timeout)?;
|
||||||
|
@ -15,7 +15,7 @@ use core::{
|
|||||||
use libafl_bolts::tuples::{RefIndexable, tuple_list};
|
use libafl_bolts::tuples::{RefIndexable, tuple_list};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Error, HasFeedback, HasMetadata,
|
Error, HasFeedback, HasScheduler,
|
||||||
corpus::{Corpus, Testcase},
|
corpus::{Corpus, Testcase},
|
||||||
events::{Event, EventFirer, EventRestarter},
|
events::{Event, EventFirer, EventRestarter},
|
||||||
executors::{
|
executors::{
|
||||||
@ -27,6 +27,7 @@ use crate::{
|
|||||||
fuzzer::HasObjective,
|
fuzzer::HasObjective,
|
||||||
inputs::Input,
|
inputs::Input,
|
||||||
observers::ObserversTuple,
|
observers::ObserversTuple,
|
||||||
|
schedulers::Scheduler,
|
||||||
state::{HasCorpus, HasCurrentTestcase, HasExecutions, HasSolutions},
|
state::{HasCorpus, HasCurrentTestcase, HasExecutions, HasSolutions},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -143,7 +144,7 @@ where
|
|||||||
EM: EventFirer<I, S> + EventRestarter<S>,
|
EM: EventFirer<I, S> + EventRestarter<S>,
|
||||||
F: Feedback<EM, I, OT, S>,
|
F: Feedback<EM, I, OT, S>,
|
||||||
OF: Feedback<EM, I, OT, S>,
|
OF: Feedback<EM, I, OT, S>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
{
|
{
|
||||||
Self::with_timeout_generic::<F, OF>(
|
Self::with_timeout_generic::<F, OF>(
|
||||||
tuple_list!(),
|
tuple_list!(),
|
||||||
@ -156,38 +157,6 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new in mem executor with the default timeout and use batch mode(5 sec)
|
|
||||||
#[cfg(all(feature = "std", target_os = "linux"))]
|
|
||||||
pub fn batched_timeout<F, OF>(
|
|
||||||
harness_fn: &'a mut H,
|
|
||||||
observers: OT,
|
|
||||||
fuzzer: &mut Z,
|
|
||||||
state: &mut S,
|
|
||||||
event_mgr: &mut EM,
|
|
||||||
exec_tmout: Duration,
|
|
||||||
) -> Result<Self, Error>
|
|
||||||
where
|
|
||||||
EM: EventFirer<I, S> + EventRestarter<S>,
|
|
||||||
F: Feedback<EM, I, OT, S>,
|
|
||||||
OF: Feedback<EM, I, OT, S>,
|
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
|
||||||
{
|
|
||||||
let inner = GenericInProcessExecutorInner::batched_timeout_generic::<Self, F, OF>(
|
|
||||||
tuple_list!(),
|
|
||||||
observers,
|
|
||||||
fuzzer,
|
|
||||||
state,
|
|
||||||
event_mgr,
|
|
||||||
exec_tmout,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
harness_fn,
|
|
||||||
inner,
|
|
||||||
phantom: PhantomData,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a new in mem executor.
|
/// Create a new in mem executor.
|
||||||
/// Caution: crash and restart in one of them will lead to odd behavior if multiple are used,
|
/// Caution: crash and restart in one of them will lead to odd behavior if multiple are used,
|
||||||
/// depending on different corpus or state.
|
/// depending on different corpus or state.
|
||||||
@ -208,7 +177,7 @@ where
|
|||||||
EM: EventFirer<I, S> + EventRestarter<S>,
|
EM: EventFirer<I, S> + EventRestarter<S>,
|
||||||
F: Feedback<EM, I, OT, S>,
|
F: Feedback<EM, I, OT, S>,
|
||||||
OF: Feedback<EM, I, OT, S>,
|
OF: Feedback<EM, I, OT, S>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
{
|
{
|
||||||
let inner = GenericInProcessExecutorInner::with_timeout_generic::<Self, F, OF>(
|
let inner = GenericInProcessExecutorInner::with_timeout_generic::<Self, F, OF>(
|
||||||
tuple_list!(),
|
tuple_list!(),
|
||||||
@ -249,7 +218,7 @@ where
|
|||||||
EM: EventFirer<I, S> + EventRestarter<S>,
|
EM: EventFirer<I, S> + EventRestarter<S>,
|
||||||
F: Feedback<EM, I, OT, S>,
|
F: Feedback<EM, I, OT, S>,
|
||||||
OF: Feedback<EM, I, OT, S>,
|
OF: Feedback<EM, I, OT, S>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
{
|
{
|
||||||
Self::with_timeout_generic::<F, OF>(
|
Self::with_timeout_generic::<F, OF>(
|
||||||
user_hooks,
|
user_hooks,
|
||||||
@ -262,34 +231,6 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new in mem executor with the default timeout and use batch mode(5 sec)
|
|
||||||
#[cfg(all(feature = "std", target_os = "linux"))]
|
|
||||||
pub fn batched_timeout_generic<F, OF>(
|
|
||||||
user_hooks: HT,
|
|
||||||
harness_fn: HB,
|
|
||||||
observers: OT,
|
|
||||||
fuzzer: &mut Z,
|
|
||||||
state: &mut S,
|
|
||||||
event_mgr: &mut EM,
|
|
||||||
exec_tmout: Duration,
|
|
||||||
) -> Result<Self, Error>
|
|
||||||
where
|
|
||||||
EM: EventFirer<I, S> + EventRestarter<S>,
|
|
||||||
F: Feedback<EM, I, OT, S>,
|
|
||||||
OF: Feedback<EM, I, OT, S>,
|
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
|
||||||
{
|
|
||||||
let inner = GenericInProcessExecutorInner::batched_timeout_generic::<Self, F, OF>(
|
|
||||||
user_hooks, observers, fuzzer, state, event_mgr, exec_tmout,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
harness_fn,
|
|
||||||
inner,
|
|
||||||
phantom: PhantomData,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a new [`InProcessExecutor`].
|
/// Create a new [`InProcessExecutor`].
|
||||||
/// Caution: crash and restart in one of them will lead to odd behavior if multiple are used,
|
/// Caution: crash and restart in one of them will lead to odd behavior if multiple are used,
|
||||||
/// depending on different corpus or state.
|
/// depending on different corpus or state.
|
||||||
@ -311,7 +252,7 @@ where
|
|||||||
EM: EventFirer<I, S> + EventRestarter<S>,
|
EM: EventFirer<I, S> + EventRestarter<S>,
|
||||||
F: Feedback<EM, I, OT, S>,
|
F: Feedback<EM, I, OT, S>,
|
||||||
OF: Feedback<EM, I, OT, S>,
|
OF: Feedback<EM, I, OT, S>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
{
|
{
|
||||||
let inner = GenericInProcessExecutorInner::with_timeout_generic::<Self, F, OF>(
|
let inner = GenericInProcessExecutorInner::with_timeout_generic::<Self, F, OF>(
|
||||||
user_hooks, observers, fuzzer, state, event_mgr, timeout,
|
user_hooks, observers, fuzzer, state, event_mgr, timeout,
|
||||||
@ -390,20 +331,62 @@ pub fn run_observers_and_save_state<E, EM, F, I, OF, S, Z>(
|
|||||||
OF: Feedback<EM, I, E::Observers, S>,
|
OF: Feedback<EM, I, E::Observers, S>,
|
||||||
F: Feedback<EM, I, E::Observers, S>,
|
F: Feedback<EM, I, E::Observers, S>,
|
||||||
S: HasExecutions + HasSolutions<I> + HasCorpus<I> + HasCurrentTestcase<I>,
|
S: HasExecutions + HasSolutions<I> + HasCorpus<I> + HasCurrentTestcase<I>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
I: Input + Clone,
|
I: Input + Clone,
|
||||||
{
|
{
|
||||||
|
log::info!("in crash handler!");
|
||||||
let mut observers = executor.observers_mut();
|
let mut observers = executor.observers_mut();
|
||||||
|
|
||||||
observers
|
observers
|
||||||
.post_exec_all(state, input, &exitkind)
|
.post_exec_all(state, input, &exitkind)
|
||||||
.expect("Observers post_exec_all failed");
|
.expect("Observers post_exec_all failed");
|
||||||
|
|
||||||
let _is_corpus = fuzzer
|
let is_corpus = fuzzer
|
||||||
.feedback_mut()
|
.feedback_mut()
|
||||||
.is_interesting(state, event_mgr, input, &*observers, &exitkind)
|
.is_interesting(state, event_mgr, input, &*observers, &exitkind)
|
||||||
.expect("In run_observers_and_save_state feedback failure");
|
.expect("In run_observers_and_save_state feedback failure");
|
||||||
|
|
||||||
|
if is_corpus {
|
||||||
|
// Add the input to the main corpus
|
||||||
|
let mut testcase = Testcase::from(input.clone());
|
||||||
|
#[cfg(feature = "track_hit_feedbacks")]
|
||||||
|
fuzzer
|
||||||
|
.feedback_mut()
|
||||||
|
.append_hit_feedbacks(testcase.hit_feedbacks_mut())
|
||||||
|
.expect("Failed to append hit feedbacks");
|
||||||
|
testcase.set_parent_id_optional(*state.corpus().current());
|
||||||
|
fuzzer
|
||||||
|
.feedback_mut()
|
||||||
|
.append_metadata(state, event_mgr, &observers, &mut testcase)
|
||||||
|
.expect("Failed to append metadata");
|
||||||
|
|
||||||
|
let id = state
|
||||||
|
.corpus_mut()
|
||||||
|
.add(testcase)
|
||||||
|
.expect("In run_observers_and_save_state failed to add to corpus.");
|
||||||
|
fuzzer
|
||||||
|
.scheduler_mut()
|
||||||
|
.on_add(state, id)
|
||||||
|
.expect("In run_observers_and_save_state failed to add to scheduler.");
|
||||||
|
|
||||||
|
event_mgr
|
||||||
|
.fire(
|
||||||
|
state,
|
||||||
|
Event::NewTestcase {
|
||||||
|
input: input.clone(),
|
||||||
|
observers_buf: None, // idk it's not effective anyway just leave it like this
|
||||||
|
exit_kind: ExitKind::Ok,
|
||||||
|
corpus_size: state.corpus().count(),
|
||||||
|
client_config: event_mgr.configuration(),
|
||||||
|
time: libafl_bolts::current_time(),
|
||||||
|
forward_id: None,
|
||||||
|
#[cfg(all(unix, feature = "std", feature = "multi_machine"))]
|
||||||
|
node_id: None,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.expect("Could not send off events in run_observers_and_save_state");
|
||||||
|
}
|
||||||
|
|
||||||
let is_solution = fuzzer
|
let is_solution = fuzzer
|
||||||
.objective_mut()
|
.objective_mut()
|
||||||
.is_interesting(state, event_mgr, input, &*observers, &exitkind)
|
.is_interesting(state, event_mgr, input, &*observers, &exitkind)
|
||||||
@ -411,7 +394,6 @@ pub fn run_observers_and_save_state<E, EM, F, I, OF, S, Z>(
|
|||||||
|
|
||||||
if is_solution {
|
if is_solution {
|
||||||
let mut new_testcase = Testcase::from(input.clone());
|
let mut new_testcase = Testcase::from(input.clone());
|
||||||
new_testcase.add_metadata(exitkind);
|
|
||||||
new_testcase.set_parent_id_optional(*state.corpus().current());
|
new_testcase.set_parent_id_optional(*state.corpus().current());
|
||||||
|
|
||||||
if let Ok(mut tc) = state.current_testcase_mut() {
|
if let Ok(mut tc) = state.current_testcase_mut() {
|
||||||
@ -435,7 +417,7 @@ pub fn run_observers_and_save_state<E, EM, F, I, OF, S, Z>(
|
|||||||
time: libafl_bolts::current_time(),
|
time: libafl_bolts::current_time(),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.expect("Could not save state in run_observers_and_save_state");
|
.expect("Could not send off events in run_observers_and_save_state");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serialize the state and wait safely for the broker to read pending messages
|
// Serialize the state and wait safely for the broker to read pending messages
|
||||||
@ -456,7 +438,7 @@ mod tests {
|
|||||||
feedbacks::CrashFeedback,
|
feedbacks::CrashFeedback,
|
||||||
inputs::NopInput,
|
inputs::NopInput,
|
||||||
schedulers::RandScheduler,
|
schedulers::RandScheduler,
|
||||||
state::{NopState, StdState},
|
state::StdState,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -467,7 +449,7 @@ mod tests {
|
|||||||
let solutions = InMemoryCorpus::new();
|
let solutions = InMemoryCorpus::new();
|
||||||
let mut objective = CrashFeedback::new();
|
let mut objective = CrashFeedback::new();
|
||||||
let mut feedback = tuple_list!();
|
let mut feedback = tuple_list!();
|
||||||
let sche: RandScheduler<NopState<NopInput>> = RandScheduler::new();
|
let sche = RandScheduler::new();
|
||||||
let mut mgr = NopEventManager::new();
|
let mut mgr = NopEventManager::new();
|
||||||
let mut state =
|
let mut state =
|
||||||
StdState::new(rand, corpus, solutions, &mut feedback, &mut objective).unwrap();
|
StdState::new(rand, corpus, solutions, &mut feedback, &mut objective).unwrap();
|
||||||
|
@ -19,7 +19,7 @@ use crate::{
|
|||||||
inprocess::{GenericInProcessExecutorInner, HasInProcessHooks},
|
inprocess::{GenericInProcessExecutorInner, HasInProcessHooks},
|
||||||
},
|
},
|
||||||
feedbacks::Feedback,
|
feedbacks::Feedback,
|
||||||
fuzzer::HasObjective,
|
fuzzer::{HasObjective, HasScheduler},
|
||||||
inputs::Input,
|
inputs::Input,
|
||||||
observers::ObserversTuple,
|
observers::ObserversTuple,
|
||||||
state::{HasCurrentTestcase, HasExecutions, HasSolutions},
|
state::{HasCurrentTestcase, HasExecutions, HasSolutions},
|
||||||
@ -143,7 +143,7 @@ where
|
|||||||
EM: EventFirer<I, S> + EventRestarter<S>,
|
EM: EventFirer<I, S> + EventRestarter<S>,
|
||||||
F: Feedback<EM, I, OT, S>,
|
F: Feedback<EM, I, OT, S>,
|
||||||
OF: Feedback<EM, I, OT, S>,
|
OF: Feedback<EM, I, OT, S>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
{
|
{
|
||||||
Self::with_timeout_generic(
|
Self::with_timeout_generic(
|
||||||
tuple_list!(),
|
tuple_list!(),
|
||||||
@ -157,40 +157,6 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new in mem executor with the default timeout and use batch mode(5 sec)
|
|
||||||
#[cfg(all(feature = "std", target_os = "linux"))]
|
|
||||||
pub fn batched_timeout<F, OF>(
|
|
||||||
harness_fn: &'a mut H,
|
|
||||||
exposed_executor_state: ES,
|
|
||||||
observers: OT,
|
|
||||||
fuzzer: &mut Z,
|
|
||||||
state: &mut S,
|
|
||||||
event_mgr: &mut EM,
|
|
||||||
exec_tmout: Duration,
|
|
||||||
) -> Result<Self, Error>
|
|
||||||
where
|
|
||||||
EM: EventFirer<I, S> + EventRestarter<S>,
|
|
||||||
F: Feedback<EM, I, OT, S>,
|
|
||||||
OF: Feedback<EM, I, OT, S>,
|
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
|
||||||
{
|
|
||||||
let inner = GenericInProcessExecutorInner::batched_timeout_generic::<Self, F, OF>(
|
|
||||||
tuple_list!(),
|
|
||||||
observers,
|
|
||||||
fuzzer,
|
|
||||||
state,
|
|
||||||
event_mgr,
|
|
||||||
exec_tmout,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
harness_fn,
|
|
||||||
exposed_executor_state,
|
|
||||||
inner,
|
|
||||||
phantom: PhantomData,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a new in mem executor.
|
/// Create a new in mem executor.
|
||||||
/// Caution: crash and restart in one of them will lead to odd behavior if multiple are used,
|
/// Caution: crash and restart in one of them will lead to odd behavior if multiple are used,
|
||||||
/// depending on different corpus or state.
|
/// depending on different corpus or state.
|
||||||
@ -212,7 +178,7 @@ where
|
|||||||
EM: EventFirer<I, S> + EventRestarter<S>,
|
EM: EventFirer<I, S> + EventRestarter<S>,
|
||||||
F: Feedback<EM, I, OT, S>,
|
F: Feedback<EM, I, OT, S>,
|
||||||
OF: Feedback<EM, I, OT, S>,
|
OF: Feedback<EM, I, OT, S>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
{
|
{
|
||||||
let inner = GenericInProcessExecutorInner::with_timeout_generic::<Self, F, OF>(
|
let inner = GenericInProcessExecutorInner::with_timeout_generic::<Self, F, OF>(
|
||||||
tuple_list!(),
|
tuple_list!(),
|
||||||
@ -270,7 +236,7 @@ where
|
|||||||
EM: EventFirer<I, S> + EventRestarter<S>,
|
EM: EventFirer<I, S> + EventRestarter<S>,
|
||||||
F: Feedback<EM, I, OT, S>,
|
F: Feedback<EM, I, OT, S>,
|
||||||
OF: Feedback<EM, I, OT, S>,
|
OF: Feedback<EM, I, OT, S>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
{
|
{
|
||||||
Self::with_timeout_generic(
|
Self::with_timeout_generic(
|
||||||
user_hooks,
|
user_hooks,
|
||||||
@ -284,37 +250,6 @@ where
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new in mem executor with the default timeout and use batch mode(5 sec)
|
|
||||||
#[cfg(all(feature = "std", target_os = "linux"))]
|
|
||||||
#[expect(clippy::too_many_arguments)]
|
|
||||||
pub fn batched_timeout_generic<F, OF>(
|
|
||||||
user_hooks: HT,
|
|
||||||
harness_fn: HB,
|
|
||||||
exposed_executor_state: ES,
|
|
||||||
observers: OT,
|
|
||||||
fuzzer: &mut Z,
|
|
||||||
state: &mut S,
|
|
||||||
event_mgr: &mut EM,
|
|
||||||
exec_tmout: Duration,
|
|
||||||
) -> Result<Self, Error>
|
|
||||||
where
|
|
||||||
EM: EventFirer<I, S> + EventRestarter<S>,
|
|
||||||
F: Feedback<EM, I, OT, S>,
|
|
||||||
OF: Feedback<EM, I, OT, S>,
|
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
|
||||||
{
|
|
||||||
let inner = GenericInProcessExecutorInner::batched_timeout_generic::<Self, F, OF>(
|
|
||||||
user_hooks, observers, fuzzer, state, event_mgr, exec_tmout,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
harness_fn,
|
|
||||||
exposed_executor_state,
|
|
||||||
inner,
|
|
||||||
phantom: PhantomData,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a new in mem executor.
|
/// Create a new in mem executor.
|
||||||
/// Caution: crash and restart in one of them will lead to odd behavior if multiple are used,
|
/// Caution: crash and restart in one of them will lead to odd behavior if multiple are used,
|
||||||
/// depending on different corpus or state.
|
/// depending on different corpus or state.
|
||||||
@ -338,7 +273,7 @@ where
|
|||||||
EM: EventFirer<I, S> + EventRestarter<S>,
|
EM: EventFirer<I, S> + EventRestarter<S>,
|
||||||
F: Feedback<EM, I, OT, S>,
|
F: Feedback<EM, I, OT, S>,
|
||||||
OF: Feedback<EM, I, OT, S>,
|
OF: Feedback<EM, I, OT, S>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
{
|
{
|
||||||
let inner = GenericInProcessExecutorInner::with_timeout_generic::<Self, F, OF>(
|
let inner = GenericInProcessExecutorInner::with_timeout_generic::<Self, F, OF>(
|
||||||
user_hooks, observers, fuzzer, state, event_mgr, timeout,
|
user_hooks, observers, fuzzer, state, event_mgr, timeout,
|
||||||
|
@ -443,7 +443,7 @@ where
|
|||||||
let meta = MapNoveltiesMetadata::new(novelties);
|
let meta = MapNoveltiesMetadata::new(novelties);
|
||||||
testcase.add_metadata(meta);
|
testcase.add_metadata(meta);
|
||||||
}
|
}
|
||||||
let observer = observers.get(&self.map_ref).unwrap().as_ref();
|
let observer = observers.get(&self.map_ref).expect("MapObserver not found. This is likely because you entered the crash handler with the wrong executor/observer").as_ref();
|
||||||
let initial = observer.initial();
|
let initial = observer.initial();
|
||||||
let map_state = state
|
let map_state = state
|
||||||
.named_metadata_map_mut()
|
.named_metadata_map_mut()
|
||||||
@ -472,7 +472,11 @@ where
|
|||||||
indices.push(i);
|
indices.push(i);
|
||||||
}
|
}
|
||||||
let meta = MapIndexesMetadata::new(indices);
|
let meta = MapIndexesMetadata::new(indices);
|
||||||
testcase.try_add_metadata(meta)?;
|
if testcase.try_add_metadata(meta).is_err() {
|
||||||
|
return Err(Error::key_exists(
|
||||||
|
"MapIndexesMetadata is already attached to this testcase. You should not have more than one observer with tracking.",
|
||||||
|
));
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
for (i, value) in observer
|
for (i, value) in observer
|
||||||
.as_iter()
|
.as_iter()
|
||||||
|
@ -15,10 +15,7 @@ use crate::monitors::stats::PerfFeature;
|
|||||||
use crate::{
|
use crate::{
|
||||||
Error, HasMetadata,
|
Error, HasMetadata,
|
||||||
corpus::{Corpus, CorpusId, HasCurrentCorpusId, HasTestcase, Testcase},
|
corpus::{Corpus, CorpusId, HasCurrentCorpusId, HasTestcase, Testcase},
|
||||||
events::{
|
events::{Event, EventConfig, EventFirer, EventReceiver, ProgressReporter, SendExiting},
|
||||||
CanSerializeObserver, Event, EventConfig, EventFirer, EventReceiver, ProgressReporter,
|
|
||||||
RecordSerializationTime, SendExiting,
|
|
||||||
},
|
|
||||||
executors::{Executor, ExitKind, HasObservers},
|
executors::{Executor, ExitKind, HasObservers},
|
||||||
feedbacks::Feedback,
|
feedbacks::Feedback,
|
||||||
inputs::Input,
|
inputs::Input,
|
||||||
@ -352,7 +349,7 @@ impl<CS, F, IF, OF> HasObjective for StdFuzzer<CS, F, IF, OF> {
|
|||||||
impl<CS, EM, F, I, IF, OF, OT, S> ExecutionProcessor<EM, I, OT, S> for StdFuzzer<CS, F, IF, OF>
|
impl<CS, EM, F, I, IF, OF, OT, S> ExecutionProcessor<EM, I, OT, S> for StdFuzzer<CS, F, IF, OF>
|
||||||
where
|
where
|
||||||
CS: Scheduler<I, S>,
|
CS: Scheduler<I, S>,
|
||||||
EM: EventFirer<I, S> + CanSerializeObserver<OT>,
|
EM: EventFirer<I, S>,
|
||||||
F: Feedback<EM, I, OT, S>,
|
F: Feedback<EM, I, OT, S>,
|
||||||
I: Input,
|
I: Input,
|
||||||
OF: Feedback<EM, I, OT, S>,
|
OF: Feedback<EM, I, OT, S>,
|
||||||
@ -455,12 +452,12 @@ where
|
|||||||
exit_kind: &ExitKind,
|
exit_kind: &ExitKind,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
// Now send off the event
|
// Now send off the event
|
||||||
let observers_buf = if exec_res.is_solution()
|
let observers_buf = if exec_res.is_corpus()
|
||||||
&& manager.should_send()
|
&& manager.should_send()
|
||||||
&& manager.configuration() != EventConfig::AlwaysUnique
|
&& manager.configuration() != EventConfig::AlwaysUnique
|
||||||
{
|
{
|
||||||
// TODO set None for fast targets
|
// TODO set None for fast targets
|
||||||
manager.serialize_observers(observers)?
|
Some(postcard::to_allocvec(observers)?)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
@ -537,7 +534,7 @@ where
|
|||||||
CS: Scheduler<I, S>,
|
CS: Scheduler<I, S>,
|
||||||
E: HasObservers + Executor<EM, I, S, Self>,
|
E: HasObservers + Executor<EM, I, S, Self>,
|
||||||
E::Observers: MatchName + ObserversTuple<I, S> + Serialize,
|
E::Observers: MatchName + ObserversTuple<I, S> + Serialize,
|
||||||
EM: EventFirer<I, S> + CanSerializeObserver<E::Observers>,
|
EM: EventFirer<I, S>,
|
||||||
F: Feedback<EM, I, E::Observers, S>,
|
F: Feedback<EM, I, E::Observers, S>,
|
||||||
OF: Feedback<EM, I, E::Observers, S>,
|
OF: Feedback<EM, I, E::Observers, S>,
|
||||||
S: HasCorpus<I>
|
S: HasCorpus<I>
|
||||||
@ -610,7 +607,7 @@ where
|
|||||||
CS: Scheduler<I, S>,
|
CS: Scheduler<I, S>,
|
||||||
E: HasObservers + Executor<EM, I, S, Self>,
|
E: HasObservers + Executor<EM, I, S, Self>,
|
||||||
E::Observers: MatchName + ObserversTuple<I, S> + Serialize,
|
E::Observers: MatchName + ObserversTuple<I, S> + Serialize,
|
||||||
EM: EventFirer<I, S> + CanSerializeObserver<E::Observers>,
|
EM: EventFirer<I, S>,
|
||||||
F: Feedback<EM, I, E::Observers, S>,
|
F: Feedback<EM, I, E::Observers, S>,
|
||||||
OF: Feedback<EM, I, E::Observers, S>,
|
OF: Feedback<EM, I, E::Observers, S>,
|
||||||
S: HasCorpus<I>
|
S: HasCorpus<I>
|
||||||
@ -725,7 +722,7 @@ where
|
|||||||
let observers_buf = if manager.configuration() == EventConfig::AlwaysUnique {
|
let observers_buf = if manager.configuration() == EventConfig::AlwaysUnique {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
manager.serialize_observers(&*observers)?
|
Some(postcard::to_allocvec(&*observers)?)
|
||||||
};
|
};
|
||||||
manager.fire(
|
manager.fire(
|
||||||
state,
|
state,
|
||||||
@ -758,10 +755,7 @@ where
|
|||||||
CS: Scheduler<I, S>,
|
CS: Scheduler<I, S>,
|
||||||
E: HasObservers + Executor<EM, I, S, Self>,
|
E: HasObservers + Executor<EM, I, S, Self>,
|
||||||
E::Observers: DeserializeOwned + Serialize + ObserversTuple<I, S>,
|
E::Observers: DeserializeOwned + Serialize + ObserversTuple<I, S>,
|
||||||
EM: EventReceiver<I, S>
|
EM: EventReceiver<I, S> + EventFirer<I, S>,
|
||||||
+ RecordSerializationTime
|
|
||||||
+ CanSerializeObserver<E::Observers>
|
|
||||||
+ EventFirer<I, S>,
|
|
||||||
F: Feedback<EM, I, E::Observers, S>,
|
F: Feedback<EM, I, E::Observers, S>,
|
||||||
I: Input,
|
I: Input,
|
||||||
OF: Feedback<EM, I, E::Observers, S>,
|
OF: Feedback<EM, I, E::Observers, S>,
|
||||||
@ -791,13 +785,8 @@ where
|
|||||||
exit_kind,
|
exit_kind,
|
||||||
..
|
..
|
||||||
} => {
|
} => {
|
||||||
let start = current_time();
|
|
||||||
let observers: E::Observers =
|
let observers: E::Observers =
|
||||||
postcard::from_bytes(observers_buf.as_ref().unwrap())?;
|
postcard::from_bytes(observers_buf.as_ref().unwrap())?;
|
||||||
{
|
|
||||||
let dur = current_time() - start;
|
|
||||||
manager.set_deserialization_time(dur);
|
|
||||||
}
|
|
||||||
let res = self.evaluate_execution(
|
let res = self.evaluate_execution(
|
||||||
state, manager, input, &observers, &exit_kind, false,
|
state, manager, input, &observers, &exit_kind, false,
|
||||||
)?;
|
)?;
|
||||||
@ -848,7 +837,7 @@ where
|
|||||||
CS: Scheduler<I, S>,
|
CS: Scheduler<I, S>,
|
||||||
E: HasObservers + Executor<EM, I, S, Self>,
|
E: HasObservers + Executor<EM, I, S, Self>,
|
||||||
E::Observers: DeserializeOwned + Serialize + ObserversTuple<I, S>,
|
E::Observers: DeserializeOwned + Serialize + ObserversTuple<I, S>,
|
||||||
EM: CanSerializeObserver<E::Observers> + EventFirer<I, S> + RecordSerializationTime,
|
EM: EventFirer<I, S>,
|
||||||
I: Input,
|
I: Input,
|
||||||
F: Feedback<EM, I, E::Observers, S>,
|
F: Feedback<EM, I, E::Observers, S>,
|
||||||
OF: Feedback<EM, I, E::Observers, S>,
|
OF: Feedback<EM, I, E::Observers, S>,
|
||||||
|
@ -73,7 +73,7 @@ pub unsafe fn inproc_qemu_crash_handler<E, EM, ET, F, I, OF, S, Z>(
|
|||||||
F: Feedback<EM, I, E::Observers, S>,
|
F: Feedback<EM, I, E::Observers, S>,
|
||||||
OF: Feedback<EM, I, E::Observers, S>,
|
OF: Feedback<EM, I, E::Observers, S>,
|
||||||
S: HasExecutions + HasSolutions<I> + HasCorpus<I> + HasCurrentTestcase<I> + Unpin,
|
S: HasExecutions + HasSolutions<I> + HasCorpus<I> + HasCurrentTestcase<I> + Unpin,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
I: Input + Clone + Unpin,
|
I: Input + Clone + Unpin,
|
||||||
{
|
{
|
||||||
log::debug!("QEMU signal handler has been triggered (signal {signal})");
|
log::debug!("QEMU signal handler has been triggered (signal {signal})");
|
||||||
@ -179,7 +179,7 @@ pub unsafe fn inproc_qemu_timeout_handler<E, EM, ET, F, I, OF, S, Z>(
|
|||||||
OF: Feedback<EM, I, E::Observers, S>,
|
OF: Feedback<EM, I, E::Observers, S>,
|
||||||
S: HasExecutions + HasSolutions<I> + Unpin + HasCurrentTestcase<I>,
|
S: HasExecutions + HasSolutions<I> + Unpin + HasCurrentTestcase<I>,
|
||||||
I: Input,
|
I: Input,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
{
|
{
|
||||||
#[cfg(feature = "systemmode")]
|
#[cfg(feature = "systemmode")]
|
||||||
unsafe {
|
unsafe {
|
||||||
|
@ -29,7 +29,7 @@ use libafl_bolts::{
|
|||||||
nonzero,
|
nonzero,
|
||||||
rands::StdRand,
|
rands::StdRand,
|
||||||
shmem::{ShMem, ShMemProvider, UnixShMemProvider},
|
shmem::{ShMem, ShMemProvider, UnixShMemProvider},
|
||||||
tuples::{Handled, Merge, tuple_list},
|
tuples::{Merge, tuple_list},
|
||||||
};
|
};
|
||||||
use typed_builder::TypedBuilder;
|
use typed_builder::TypedBuilder;
|
||||||
|
|
||||||
@ -117,7 +117,6 @@ impl ForkserverBytesCoverageSugar<'_> {
|
|||||||
|
|
||||||
// Create an observation channel to keep track of the execution time
|
// Create an observation channel to keep track of the execution time
|
||||||
let time_observer = TimeObserver::new("time");
|
let time_observer = TimeObserver::new("time");
|
||||||
let time_ref = time_observer.handle();
|
|
||||||
|
|
||||||
let mut run_client = |state: Option<_>,
|
let mut run_client = |state: Option<_>,
|
||||||
mut mgr: LlmpRestartingEventManager<_, _, _, _, _>,
|
mut mgr: LlmpRestartingEventManager<_, _, _, _, _>,
|
||||||
@ -300,8 +299,8 @@ impl ForkserverBytesCoverageSugar<'_> {
|
|||||||
.run_client(&mut run_client)
|
.run_client(&mut run_client)
|
||||||
.cores(self.cores)
|
.cores(self.cores)
|
||||||
.broker_port(self.broker_port)
|
.broker_port(self.broker_port)
|
||||||
.remote_broker_addr(self.remote_broker_addr)
|
.remote_broker_addr(self.remote_broker_addr);
|
||||||
.time_ref(Some(time_ref));
|
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
let launcher = launcher.stdout_file(Some("/dev/null"));
|
let launcher = launcher.stdout_file(Some("/dev/null"));
|
||||||
match launcher.build().launch() {
|
match launcher.build().launch() {
|
||||||
|
@ -36,7 +36,7 @@ use libafl_bolts::{
|
|||||||
ownedref::OwnedMutSlice,
|
ownedref::OwnedMutSlice,
|
||||||
rands::StdRand,
|
rands::StdRand,
|
||||||
shmem::{ShMemProvider, StdShMemProvider},
|
shmem::{ShMemProvider, StdShMemProvider},
|
||||||
tuples::{Handled, Merge, tuple_list},
|
tuples::{Merge, tuple_list},
|
||||||
};
|
};
|
||||||
use libafl_targets::{CmpLogObserver, edges_map_mut_ptr};
|
use libafl_targets::{CmpLogObserver, edges_map_mut_ptr};
|
||||||
use typed_builder::TypedBuilder;
|
use typed_builder::TypedBuilder;
|
||||||
@ -148,7 +148,6 @@ where
|
|||||||
|
|
||||||
// Create an observation channel to keep track of the execution time
|
// Create an observation channel to keep track of the execution time
|
||||||
let time_observer = TimeObserver::new("time");
|
let time_observer = TimeObserver::new("time");
|
||||||
let time_ref = time_observer.handle();
|
|
||||||
|
|
||||||
let mut run_client = |state: Option<_>,
|
let mut run_client = |state: Option<_>,
|
||||||
mut mgr: LlmpRestartingEventManager<_, _, _, _, _>,
|
mut mgr: LlmpRestartingEventManager<_, _, _, _, _>,
|
||||||
@ -355,8 +354,8 @@ where
|
|||||||
.run_client(&mut run_client)
|
.run_client(&mut run_client)
|
||||||
.cores(self.cores)
|
.cores(self.cores)
|
||||||
.broker_port(self.broker_port)
|
.broker_port(self.broker_port)
|
||||||
.remote_broker_addr(self.remote_broker_addr)
|
.remote_broker_addr(self.remote_broker_addr);
|
||||||
.time_ref(Some(time_ref));
|
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
let launcher = launcher.stdout_file(Some("/dev/null"));
|
let launcher = launcher.stdout_file(Some("/dev/null"));
|
||||||
match launcher.build().launch() {
|
match launcher.build().launch() {
|
||||||
|
@ -35,7 +35,7 @@ use libafl_bolts::{
|
|||||||
ownedref::OwnedMutSlice,
|
ownedref::OwnedMutSlice,
|
||||||
rands::StdRand,
|
rands::StdRand,
|
||||||
shmem::{ShMemProvider, StdShMemProvider},
|
shmem::{ShMemProvider, StdShMemProvider},
|
||||||
tuples::{Handled, Merge, tuple_list},
|
tuples::{Merge, tuple_list},
|
||||||
};
|
};
|
||||||
#[cfg(not(any(feature = "mips", feature = "hexagon")))]
|
#[cfg(not(any(feature = "mips", feature = "hexagon")))]
|
||||||
use libafl_qemu::modules::CmpLogModule;
|
use libafl_qemu::modules::CmpLogModule;
|
||||||
@ -150,7 +150,6 @@ where
|
|||||||
|
|
||||||
// Create an observation channel to keep track of the execution time
|
// Create an observation channel to keep track of the execution time
|
||||||
let time_observer = TimeObserver::new("time");
|
let time_observer = TimeObserver::new("time");
|
||||||
let time_ref = time_observer.handle();
|
|
||||||
|
|
||||||
let mut run_client = |state: Option<_>,
|
let mut run_client = |state: Option<_>,
|
||||||
mut mgr: LlmpRestartingEventManager<_, _, _, _, _>,
|
mut mgr: LlmpRestartingEventManager<_, _, _, _, _>,
|
||||||
@ -476,8 +475,8 @@ where
|
|||||||
.run_client(&mut run_client)
|
.run_client(&mut run_client)
|
||||||
.cores(self.cores)
|
.cores(self.cores)
|
||||||
.broker_port(self.broker_port)
|
.broker_port(self.broker_port)
|
||||||
.remote_broker_addr(self.remote_broker_addr)
|
.remote_broker_addr(self.remote_broker_addr);
|
||||||
.time_ref(Some(time_ref));
|
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
let launcher = launcher.stdout_file(Some("/dev/null"));
|
let launcher = launcher.stdout_file(Some("/dev/null"));
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
//! Setup asan death callbback
|
//! Setup asan death callbback
|
||||||
|
|
||||||
use libafl::{
|
use libafl::{
|
||||||
HasFeedback, HasObjective,
|
HasFeedback, HasObjective, HasScheduler,
|
||||||
events::{EventFirer, EventRestarter},
|
events::{EventFirer, EventRestarter},
|
||||||
executors::{Executor, HasObservers, hooks::windows::windows_asan_handler::asan_death_handler},
|
executors::{Executor, HasObservers, hooks::windows::windows_asan_handler::asan_death_handler},
|
||||||
feedbacks::Feedback,
|
feedbacks::Feedback,
|
||||||
@ -40,7 +40,7 @@ pub unsafe fn setup_asan_callback<E, EM, F, I, OF, S, Z>(
|
|||||||
F: Feedback<EM, I, E::Observers, S>,
|
F: Feedback<EM, I, E::Observers, S>,
|
||||||
OF: Feedback<EM, I, E::Observers, S>,
|
OF: Feedback<EM, I, E::Observers, S>,
|
||||||
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
|
S: HasExecutions + HasSolutions<I> + HasCurrentTestcase<I>,
|
||||||
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F>,
|
Z: HasObjective<Objective = OF> + HasFeedback<Feedback = F> + HasScheduler<I, S>,
|
||||||
I: Input + Clone,
|
I: Input + Clone,
|
||||||
{
|
{
|
||||||
unsafe {
|
unsafe {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user