Implement unstable edge detection+ignore in calibration stage (#398)

* step 1 for unstable calibration

* fmt

* fixed build

* done unstable implementation

* clippy

* finishing touches for unstable

* no_std

* fmt

* event mgr stablity

* fixed stability value

* displaying

* no_std

* fixed critical whitespace

* send msg only after calibration

* clippy

* Added log to mgr

* moved stability to state

* fix introspection

* space

* fixed docs

Co-authored-by: Dominik Maier <domenukk@gmail.com>
This commit is contained in:
van Hauser 2021-12-08 09:54:47 +01:00 committed by GitHub
parent 83583a867f
commit 4a23489acb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 271 additions and 66 deletions

View File

@ -22,7 +22,9 @@ use libafl::{
shmem::{ShMemProvider, StdShMemProvider},
tuples::{tuple_list, Merge},
},
corpus::{Corpus, IndexesLenTimeMinimizerCorpusScheduler, OnDiskCorpus, QueueCorpusScheduler},
corpus::{
Corpus, IndexesLenTimeMinimizerCorpusScheduler, OnDiskCorpus, PowerQueueCorpusScheduler,
},
events::SimpleRestartingEventManager,
executors::{inprocess::InProcessExecutor, ExitKind, TimeoutExecutor},
feedback_or,
@ -36,7 +38,11 @@ use libafl::{
tokens_mutations, Tokens,
},
observers::{StdMapObserver, TimeObserver},
stages::{StdMutationalStage, TracingStage},
stages::{
calibrate::CalibrationStage,
power::{PowerMutationalStage, PowerSchedule},
StdMutationalStage, TracingStage,
},
state::{HasCorpus, HasMetadata, StdState},
Error,
};
@ -248,8 +254,14 @@ fn fuzz(
println!("Warning: LLVMFuzzerInitialize failed with -1")
}
let calibration = CalibrationStage::new(&mut state, &edges_observer);
let mutator = StdScheduledMutator::new(havoc_mutations());
let power = PowerMutationalStage::new(mutator, PowerSchedule::FAST, &edges_observer);
let scheduler = IndexesLenTimeMinimizerCorpusScheduler::new(PowerQueueCorpusScheduler::new());
// A minimization+queue policy to get testcasess from the corpus
let scheduler = IndexesLenTimeMinimizerCorpusScheduler::new(QueueCorpusScheduler::new());
//let scheduler = IndexesLenTimeMinimizerCorpusScheduler::new(QueueCorpusScheduler::new());
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
@ -297,7 +309,7 @@ fn fuzz(
let mutational = StdMutationalStage::new(mutator);
// The order of the stages matter!
let mut stages = tuple_list!(tracing, i2s, mutational);
let mut stages = tuple_list!(calibration, tracing, i2s, mutational, power);
// Read tokens
if let Some(tokenfile) = tokenfile {
@ -324,6 +336,7 @@ fn fuzz(
dup2(null_fd, io::stdout().as_raw_fd())?;
dup2(null_fd, io::stderr().as_raw_fd())?;
}
// reopen file to make sure we're at the end
log.replace(
OpenOptions::new()

View File

@ -177,14 +177,18 @@ where
monitor.display(event.name().to_string(), client_id);
Ok(BrokerEventResult::Forward)
}
Event::UpdateExecutions {
Event::UpdateExecStats {
time,
executions,
stability,
phantom: _,
} => {
// TODO: The monitor buffer should be added on client add.
let client = monitor.client_stats_mut_for(client_id);
client.update_executions(*executions as u64, *time);
if let Some(stability) = stability {
client.update_stability(*stability);
}
monitor.display(event.name().to_string(), client_id);
Ok(BrokerEventResult::Handled)
}
@ -202,6 +206,7 @@ where
Event::UpdatePerfMonitor {
time,
executions,
stability,
introspection_monitor,
phantom: _,
} => {
@ -213,6 +218,10 @@ where
// Update the normal monitor for this client
client.update_executions(*executions as u64, *time);
if let Some(stability) = stability {
client.update_stability(*stability);
}
// Update the performance monitor for this client
client.update_introspection_monitor((**introspection_monitor).clone());

View File

@ -176,9 +176,11 @@ where
executions: usize,
},
/// New stats event to monitor.
UpdateExecutions {
UpdateExecStats {
/// The time of generation of the [`Event`]
time: Duration,
/// The stability of this fuzzer node, if known
stability: Option<f32>,
/// The executions of this client
executions: usize,
/// [`PhantomData`]
@ -198,10 +200,10 @@ where
UpdatePerfMonitor {
/// The time of generation of the event
time: Duration,
/// The executions of this client
executions: usize,
/// The stability of this fuzzer node, if known
stability: Option<f32>,
/// Current performance statistics
introspection_monitor: Box<ClientPerfMonitor>,
@ -243,8 +245,9 @@ where
time: _,
executions: _,
} => "Testcase",
Event::UpdateExecutions {
Event::UpdateExecStats {
time: _,
stability: _,
executions: _,
phantom: _,
}
@ -257,6 +260,7 @@ where
Event::UpdatePerfMonitor {
time: _,
executions: _,
stability: _,
introspection_monitor: _,
phantom: _,
} => "PerfMonitor",
@ -288,6 +292,24 @@ where
/// This should not happen for a normal use-cases.
fn fire<S>(&mut self, state: &mut S, event: Event<I>) -> Result<(), Error>;
/// Send off an [`Event::Log`] event to the broker
/// This is a shortcut for [`EventFirer::fire`] with [`Event::Log`] as argument.
fn log<S>(
&mut self,
state: &mut S,
severity_level: LogSeverity,
message: String,
) -> Result<(), Error> {
self.fire(
state,
Event::Log {
severity_level,
message,
phantom: PhantomData,
},
)
}
/// Serialize all observers for this type and manager
fn serialize_observers<OT, S>(&mut self, observers: &OT) -> Result<Vec<u8>, Error>
where
@ -320,6 +342,7 @@ where
S: HasExecutions + HasClientPerfMonitor,
{
let executions = *state.executions();
let stability = *state.stability();
let cur = current_time();
// default to 0 here to avoid crashes on clock skew
if cur.checked_sub(last_report_time).unwrap_or_default() > monitor_timeout {
@ -327,8 +350,9 @@ where
#[cfg(not(feature = "introspection"))]
self.fire(
state,
Event::UpdateExecutions {
Event::UpdateExecStats {
executions,
stability,
time: cur,
phantom: PhantomData,
},
@ -348,6 +372,7 @@ where
Event::UpdatePerfMonitor {
executions,
time: cur,
stability,
introspection_monitor: Box::new(state.introspection_monitor().clone()),
phantom: PhantomData,
},

View File

@ -150,15 +150,20 @@ where
monitor.display(event.name().to_string(), 0);
Ok(BrokerEventResult::Handled)
}
Event::UpdateExecutions {
Event::UpdateExecStats {
time,
executions,
stability,
phantom: _,
} => {
// TODO: The monitor buffer should be added on client add.
monitor
.client_stats_mut_for(0)
.update_executions(*executions as u64, *time);
let client = monitor.client_stats_mut_for(0);
client.update_executions(*executions as u64, *time);
if let Some(stability) = stability {
client.update_stability(*stability);
}
monitor.display(event.name().to_string(), 0);
Ok(BrokerEventResult::Handled)
}
@ -177,13 +182,17 @@ where
Event::UpdatePerfMonitor {
time,
executions,
stability,
introspection_monitor,
phantom: _,
} => {
// TODO: The monitor buffer should be added on client add.
monitor.client_stats_mut()[0].update_executions(*executions as u64, *time);
monitor.client_stats_mut()[0]
.update_introspection_monitor((**introspection_monitor).clone());
let client = &mut monitor.client_stats_mut()[0];
client.update_executions(*executions as u64, *time);
client.update_introspection_monitor((**introspection_monitor).clone());
if let Some(stability) = stability {
client.update_stability(*stability);
}
monitor.display(event.name().to_string(), 0);
Ok(BrokerEventResult::Handled)
}

View File

@ -40,7 +40,7 @@ use crate::{
use serde::{Deserialize, Serialize};
/// How an execution finished.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub enum ExitKind {
/// The run exited normally.
Ok,

View File

@ -243,7 +243,9 @@ where
T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
{
fn reset(&mut self) -> Result<(), Error> {
self.history_map.iter_mut().for_each(|x| *x = T::default());
self.history_map
.iter_mut()
.for_each(|x| *x = T::min_value());
Ok(())
}
}
@ -266,7 +268,7 @@ where
#[must_use]
pub fn new(name: &'static str, map_size: usize) -> Self {
Self {
history_map: vec![T::default(); map_size],
history_map: vec![T::min_value(); map_size],
name: name.to_string(),
}
}
@ -278,7 +280,7 @@ where
T: Debug,
{
Self {
history_map: vec![T::default(); map_observer.len()],
history_map: vec![T::min_value(); map_observer.len()],
name: map_observer.name().to_string(),
}
}

View File

@ -3,15 +3,13 @@
pub mod multi;
pub use multi::MultiMonitor;
use serde::{Deserialize, Serialize};
use alloc::{string::String, vec::Vec};
use alloc::{
string::{String, ToString},
vec::Vec,
};
use core::{fmt, time, time::Duration};
use hashbrown::HashMap;
#[cfg(feature = "introspection")]
use alloc::string::ToString;
use serde::{Deserialize, Serialize};
use crate::bolts::{current_time, format_duration_hms};
@ -59,7 +57,8 @@ pub struct ClientStats {
pub last_execs_per_sec: f32,
/// User-defined monitor
pub user_monitor: HashMap<String, UserStats>,
/// Stability, and if we ever received a stability value
pub stability: Option<f32>,
/// Client performance statistics
#[cfg(feature = "introspection")]
pub introspection_monitor: ClientPerfMonitor,
@ -89,6 +88,11 @@ impl ClientStats {
self.objective_size = objective_size;
}
/// we got a new information about stability for this client, insert it.
pub fn update_stability(&mut self, stability: f32) {
self.stability = Some(stability);
}
/// Get the calculated executions per second for this client
#[allow(clippy::cast_sign_loss, clippy::cast_precision_loss)]
pub fn execs_per_sec(&mut self, cur_time: time::Duration) -> u64 {
@ -150,6 +154,24 @@ pub trait Monitor {
/// show the monitor to the user
fn display(&mut self, event_msg: String, sender_id: u32);
/// Show the Stabiliity
fn stability(&self) -> Option<f32> {
let mut stability_total = 0_f32;
let mut num = 0_usize;
for stat in self.client_stats() {
if let Some(stability) = stat.stability {
stability_total += stability;
num += 1;
}
}
if num == 0 {
None
} else {
#[allow(clippy::cast_precision_loss)]
Some(stability_total / num as f32)
}
}
/// Amount of elements in the corpus (combined for all children)
fn corpus_size(&self) -> u64 {
self.client_stats()
@ -269,13 +291,18 @@ where
fn display(&mut self, event_msg: String, sender_id: u32) {
let fmt = format!(
"[{} #{}] run time: {}, clients: {}, corpus: {}, objectives: {}, executions: {}, exec/sec: {}",
"[{} #{}] run time: {}, clients: {}, corpus: {}, objectives: {}, executions: {}, {} exec/sec: {}",
event_msg,
sender_id,
format_duration_hms(&(current_time() - self.start_time)),
self.client_stats().len(),
self.corpus_size(),
self.objective_size(),
if let Some(stability) = self.stability() {
format!(", stability: {:.2}", stability)
} else {
"".to_string()
},
self.total_execs(),
self.execs_per_sec()
);

View File

@ -3,12 +3,14 @@
use crate::{
bolts::current_time,
corpus::{Corpus, PowerScheduleTestcaseMetaData},
executors::{Executor, HasObservers},
events::{EventFirer, LogSeverity},
executors::{Executor, ExitKind, HasObservers},
feedbacks::{FeedbackStatesTuple, MapFeedbackState},
fuzzer::Evaluator,
inputs::Input,
observers::{MapObserver, ObserversTuple},
stages::Stage,
state::{HasCorpus, HasMetadata},
state::{HasClientPerfMonitor, HasCorpus, HasFeedbackStates, HasMetadata},
Error,
};
use alloc::{
@ -20,11 +22,13 @@ use num_traits::PrimInt;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug)]
pub struct CalibrationStage<C, E, EM, I, O, OT, S, T, Z>
pub struct CalibrationStage<C, E, EM, FT, I, O, OT, S, T, Z>
where
T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug,
C: Corpus<I>,
E: Executor<EM, I, S, Z> + HasObservers<I, OT, S>,
EM: EventFirer<I>,
FT: FeedbackStatesTuple,
I: Input,
O: MapObserver<T>,
OT: ObserversTuple<I, S>,
@ -34,53 +38,145 @@ where
map_observer_name: String,
stage_max: usize,
#[allow(clippy::type_complexity)]
phantom: PhantomData<(C, E, EM, I, O, OT, S, T, Z)>,
phantom: PhantomData<(C, E, EM, FT, I, O, OT, S, T, Z)>,
}
const CAL_STAGE_MAX: usize = 8;
const CAL_STAGE_START: usize = 4;
const CAL_STAGE_MAX: usize = 16;
impl<C, E, EM, I, O, OT, S, T, Z> Stage<E, EM, S, Z>
for CalibrationStage<C, E, EM, I, O, OT, S, T, Z>
impl<C, E, EM, FT, I, O, OT, S, T, Z> Stage<E, EM, S, Z>
for CalibrationStage<C, E, EM, FT, I, O, OT, S, T, Z>
where
T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug,
C: Corpus<I>,
E: Executor<EM, I, S, Z> + HasObservers<I, OT, S>,
EM: EventFirer<I>,
FT: FeedbackStatesTuple,
I: Input,
O: MapObserver<T>,
OT: ObserversTuple<I, S>,
S: HasCorpus<C, I> + HasMetadata,
S: HasCorpus<C, I> + HasMetadata + HasFeedbackStates<FT> + HasClientPerfMonitor,
Z: Evaluator<E, EM, I, S>,
{
#[inline]
#[allow(clippy::let_and_return)]
#[allow(clippy::let_and_return, clippy::too_many_lines)]
fn perform(
&mut self,
fuzzer: &mut Z,
executor: &mut E,
state: &mut S,
manager: &mut EM,
mgr: &mut EM,
corpus_idx: usize,
) -> Result<(), Error> {
let iter = self.stage_max;
let mut iter = self.stage_max;
let handicap = state
.metadata()
.get::<PowerScheduleMetadata>()
.ok_or_else(|| Error::KeyNotFound("PowerScheduleMetadata not found".to_string()))?
.queue_cycles;
let start = current_time();
for _i in 0..iter {
let input = state
.corpus()
.get(corpus_idx)?
.borrow_mut()
.load_input()?
.clone();
let _ = executor.run_target(fuzzer, state, manager, &input)?;
// Run once to get the initial calibration map
executor.observers_mut().pre_exec_all(state, &input)?;
let mut start = current_time();
let mut total_time = if executor.run_target(fuzzer, state, mgr, &input)? == ExitKind::Ok {
current_time() - start
} else {
mgr.log(
state,
LogSeverity::Warn,
"Corpus entry errored on execution!".into(),
)?;
// assume one second as default time
Duration::from_secs(1)
};
let map_first = &executor
.observers()
.match_name::<O>(&self.map_observer_name)
.ok_or_else(|| Error::KeyNotFound("MapObserver not found".to_string()))?
.map()
.unwrap()
.to_vec();
// Run CAL_STAGE_START - 1 times, increase by 2 for every time a new
// run is found to be unstable, with CAL_STAGE_MAX total runs.
let mut i = 1;
let mut has_errors = false;
let mut unstable_entries: usize = 0;
let map_len: usize = map_first.len() as usize;
while i < iter {
let input = state
.corpus()
.get(corpus_idx)?
.borrow_mut()
.load_input()?
.clone();
executor.observers_mut().pre_exec_all(state, &input)?;
start = current_time();
if executor.run_target(fuzzer, state, mgr, &input)? != ExitKind::Ok {
if !has_errors {
mgr.log(
state,
LogSeverity::Warn,
"Corpus entry errored on execution!".into(),
)?;
has_errors = true;
if iter < CAL_STAGE_MAX {
iter += 2;
};
}
continue;
};
total_time += current_time() - start;
let map = &executor
.observers()
.match_name::<O>(&self.map_observer_name)
.ok_or_else(|| Error::KeyNotFound("MapObserver not found".to_string()))?
.map()
.unwrap()
.to_vec();
let history_map = &mut state
.feedback_states_mut()
.match_name_mut::<MapFeedbackState<T>>(&self.map_observer_name)
.unwrap()
.history_map;
for j in 0..map_len {
if map_first[j] != map[j] && history_map[j] != T::max_value() {
history_map[j] = T::max_value();
unstable_entries += 1;
};
}
let end = current_time();
i += 1;
}
#[allow(clippy::cast_precision_loss)]
if unstable_entries != 0 {
*state.stability_mut() = Some((map_len - unstable_entries) as f32 / (map_len as f32));
if iter < CAL_STAGE_MAX {
iter += 2;
}
};
let psmeta = state
.metadata_mut()
.get_mut::<PowerScheduleMetadata>()
.ok_or_else(|| Error::KeyNotFound("PowerScheduleMetadata not found".to_string()))?;
let map = executor
.observers()
@ -89,20 +185,14 @@ where
let bitmap_size = map.count_bytes();
let psmeta = state
.metadata_mut()
.get_mut::<PowerScheduleMetadata>()
.ok_or_else(|| Error::KeyNotFound("PowerScheduleMetadata not found".to_string()))?;
psmeta.set_exec_time(psmeta.exec_time() + (end - start));
psmeta.set_exec_time(psmeta.exec_time() + total_time);
psmeta.set_cycles(psmeta.cycles() + (iter as u64));
psmeta.set_bitmap_size(psmeta.bitmap_size() + bitmap_size);
psmeta.set_bitmap_entries(psmeta.bitmap_entries() + 1);
// println!("psmeta: {:#?}", psmeta);
let mut testcase = state.corpus().get(corpus_idx)?.borrow_mut();
testcase.set_exec_time((end - start) / (iter as u32));
testcase.set_exec_time(total_time / (iter as u32));
// println!("time: {:#?}", testcase.exec_time());
let data = testcase
.metadata_mut()
@ -208,11 +298,13 @@ impl PowerScheduleMetadata {
crate::impl_serdeany!(PowerScheduleMetadata);
impl<C, E, I, EM, O, OT, S, T, Z> CalibrationStage<C, E, EM, I, O, OT, S, T, Z>
impl<C, E, EM, FT, I, O, OT, S, T, Z> CalibrationStage<C, E, EM, FT, I, O, OT, S, T, Z>
where
T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug,
C: Corpus<I>,
E: Executor<EM, I, S, Z> + HasObservers<I, OT, S>,
EM: EventFirer<I>,
FT: FeedbackStatesTuple,
I: Input,
O: MapObserver<T>,
OT: ObserversTuple<I, S>,
@ -223,7 +315,7 @@ where
state.add_metadata::<PowerScheduleMetadata>(PowerScheduleMetadata::new());
Self {
map_observer_name: map_observer_name.name().to_string(),
stage_max: CAL_STAGE_MAX,
stage_max: CAL_STAGE_START,
phantom: PhantomData,
}
}

View File

@ -31,21 +31,16 @@ pub mod sync;
#[cfg(feature = "std")]
pub use sync::*;
use crate::events::EventFirer;
use crate::events::EventRestarter;
use crate::events::HasEventManagerId;
use crate::events::ProgressReporter;
use crate::state::HasExecutions;
use crate::{
bolts::rands::Rand,
corpus::Corpus,
corpus::CorpusScheduler,
executors::Executor,
executors::HasObservers,
corpus::{Corpus, CorpusScheduler},
events::{EventFirer, EventRestarter, HasEventManagerId, ProgressReporter},
executors::{Executor, HasObservers},
inputs::Input,
observers::ObserversTuple,
state::HasRand,
state::{HasClientPerfMonitor, HasCorpus},
state::{
HasExecutions, HasRand, {HasClientPerfMonitor, HasCorpus},
},
Error, EvaluatorObservers, ExecutesInput, ExecutionProcessor, HasCorpusScheduler,
};
use core::{convert::From, marker::PhantomData};

View File

@ -78,6 +78,12 @@ pub trait HasClientPerfMonitor {
/// Mutatable ref to [`ClientPerfMonitor`]
fn introspection_monitor_mut(&mut self) -> &mut ClientPerfMonitor;
/// This node's stability
fn stability(&self) -> &Option<f32>;
/// This node's stability (mut)
fn stability_mut(&mut self) -> &mut Option<f32>;
}
/// Trait for elements offering metadata
@ -163,6 +169,8 @@ where
metadata: SerdeAnyMap,
/// MaxSize testcase size for mutators that appreciate it
max_size: usize,
/// The stability of the current fuzzing process
stability: Option<f32>,
/// Performance statistics for this fuzzer
#[cfg(feature = "introspection")]
@ -552,6 +560,7 @@ where
Self {
rand,
executions: 0,
stability: None,
start_time: Duration::from_millis(0),
metadata: SerdeAnyMap::default(),
corpus,
@ -581,6 +590,18 @@ where
fn introspection_monitor_mut(&mut self) -> &mut ClientPerfMonitor {
&mut self.introspection_monitor
}
/// This node's stability
#[inline]
fn stability(&self) -> &Option<f32> {
&self.stability
}
/// This node's stability (mut)
#[inline]
fn stability_mut(&mut self) -> &mut Option<f32> {
&mut self.stability
}
}
#[cfg(not(feature = "introspection"))]
@ -599,4 +620,16 @@ where
fn introspection_monitor_mut(&mut self) -> &mut ClientPerfMonitor {
unimplemented!()
}
/// This node's stability
#[inline]
fn stability(&self) -> &Option<f32> {
&self.stability
}
/// This node's stability (mut)
#[inline]
fn stability_mut(&mut self) -> &mut Option<f32> {
&mut self.stability
}
}