* Defined PowerScheduleTestcase

* calibrate stage

* powerschedule

* PowerQueueCorpusScheduler

* Calstat

* Integer for observer

* update power.rs

* calculate_score

* no_std

* no_std

* calibrate_case

* calculate_score done

* update fuzz_level

* update depth

* rename, PowerScheduleQueueScheduler::next

* increment hashmap

* n_fuzz

* no_std

* fmt

* clippy

* check bitmap_size

* hash

* compile

* rename, vec

* fuzzer runs

* fixes

* rename, no_std log2

* fmt

* clippy

* fmt

* unused imports

* use exec_time

* getter/setter

* No unwrap

* ToString

* fixes

* ahash

* fmt

* use favored from power.rs side

* rename

* IsFavoredMetadata

* run_target

* clippy fixes

* doc & fix

* doc

* rename

* forgot log2

* fix

* fix

* added comments explaining why the COE and FAST schedules are different from the thesis

* saturated increment

* saturating_add

* put n_fuzz in PowerScheduleMetadata

Co-authored-by: Andrea Fioraldi <andreafioraldi@gmail.com>
This commit is contained in:
Toka 2021-07-29 04:19:50 +09:00 committed by GitHub
parent 77541da9fd
commit bfe0a97077
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 849 additions and 102 deletions

View File

@ -9,7 +9,7 @@ use libafl::{
bolts::{current_nanos, rands::StdRand}, bolts::{current_nanos, rands::StdRand},
corpus::{ corpus::{
Corpus, InMemoryCorpus, IndexesLenTimeMinimizerCorpusScheduler, OnDiskCorpus, Corpus, InMemoryCorpus, IndexesLenTimeMinimizerCorpusScheduler, OnDiskCorpus,
QueueCorpusScheduler, PowerQueueCorpusScheduler,
}, },
events::{setup_restarting_mgr_std, EventRestarter}, events::{setup_restarting_mgr_std, EventRestarter},
executors::{inprocess::InProcessExecutor, ExitKind, TimeoutExecutor}, executors::{inprocess::InProcessExecutor, ExitKind, TimeoutExecutor},
@ -17,11 +17,13 @@ use libafl::{
feedbacks::{CrashFeedback, MapFeedbackState, MaxMapFeedback, TimeFeedback, TimeoutFeedback}, feedbacks::{CrashFeedback, MapFeedbackState, MaxMapFeedback, TimeFeedback, TimeoutFeedback},
fuzzer::{Fuzzer, StdFuzzer}, fuzzer::{Fuzzer, StdFuzzer},
inputs::{BytesInput, HasTargetBytes}, inputs::{BytesInput, HasTargetBytes},
mutators::mopt_mutator::StdMOptMutator, mutators::scheduled::{havoc_mutations, tokens_mutations, StdScheduledMutator},
mutators::scheduled::{havoc_mutations, tokens_mutations},
mutators::token_mutations::Tokens, mutators::token_mutations::Tokens,
observers::{HitcountsMapObserver, StdMapObserver, TimeObserver}, observers::{HitcountsMapObserver, StdMapObserver, TimeObserver},
stages::mutational::StdMutationalStage, stages::{
calibrate::CalibrationStage,
power::{PowerMutationalStage, PowerSchedule},
},
state::{HasCorpus, HasMetadata, StdState}, state::{HasCorpus, HasMetadata, StdState},
stats::MultiStats, stats::MultiStats,
Error, Error,
@ -121,16 +123,15 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re
// Setup a basic mutator with a mutational stage // Setup a basic mutator with a mutational stage
let mutator = StdMOptMutator::new(&mut state, havoc_mutations().merge(tokens_mutations()), 5)?;
let mut stages = tuple_list!(StdMutationalStage::new(mutator));
/*
let mutator = StdScheduledMutator::new(havoc_mutations().merge(tokens_mutations())); let mutator = StdScheduledMutator::new(havoc_mutations().merge(tokens_mutations()));
let mut stages = tuple_list!(StdMutationalStage::new(mutator));
*/ let calibration = CalibrationStage::new(&mut state, &edges_observer);
let power = PowerMutationalStage::new(mutator, PowerSchedule::FAST, &edges_observer);
let mut stages = tuple_list!(calibration, power);
// A minimization+queue policy to get testcasess from the corpus // A minimization+queue policy to get testcasess from the corpus
let scheduler = IndexesLenTimeMinimizerCorpusScheduler::new(QueueCorpusScheduler::new()); let scheduler = IndexesLenTimeMinimizerCorpusScheduler::new(PowerQueueCorpusScheduler::new());
// A fuzzer with feedbacks and a corpus scheduler // A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);

View File

@ -74,6 +74,7 @@ hostname = { version = "^0.3", optional = true } # Is there really no gethostnam
rand = { version = "0.8.1", optional = true } # rand = { version = "0.8.1", optional = true } #
rand_core = { version = "0.6.2", optional = true } # This dependency allows us to export our RomuRand as rand::Rng. rand_core = { version = "0.6.2", optional = true } # This dependency allows us to export our RomuRand as rand::Rng.
nix = { version = "0.20.0", optional = true } nix = { version = "0.20.0", optional = true }
libm = "0.2.1"
[target.'cfg(target_os = "android")'.dependencies] [target.'cfg(target_os = "android")'.dependencies]
backtrace = { version = "0.3", optional = true, default-features = false, features = ["std", "libbacktrace"] } # for llmp_debug backtrace = { version = "0.3", optional = true, default-features = false, features = ["std", "libbacktrace"] } # for llmp_debug

View File

@ -1,7 +1,7 @@
//! Corpuses contain the testcases, either in mem, on disk, or somewhere else. //! Corpuses contain the testcases, either in mem, on disk, or somewhere else.
pub mod testcase; pub mod testcase;
pub use testcase::Testcase; pub use testcase::{PowerScheduleTestcaseMetaData, Testcase};
pub mod inmemory; pub mod inmemory;
pub use inmemory::InMemoryCorpus; pub use inmemory::InMemoryCorpus;
@ -26,6 +26,9 @@ pub use minimizer::{
TopRatedsMetadata, TopRatedsMetadata,
}; };
pub mod powersched;
pub use powersched::PowerQueueCorpusScheduler;
use alloc::borrow::ToOwned; use alloc::borrow::ToOwned;
use core::{cell::RefCell, marker::PhantomData}; use core::{cell::RefCell, marker::PhantomData};

View File

@ -0,0 +1,105 @@
//! The queue corpus scheduler for power schedules.
use alloc::string::{String, ToString};
use core::marker::PhantomData;
use crate::{
corpus::{Corpus, CorpusScheduler, PowerScheduleTestcaseMetaData},
inputs::Input,
stages::PowerScheduleMetadata,
state::{HasCorpus, HasMetadata},
Error,
};
pub struct PowerQueueCorpusScheduler<C, I, S>
where
S: HasCorpus<C, I> + HasMetadata,
C: Corpus<I>,
I: Input,
{
phantom: PhantomData<(C, I, S)>,
}
impl<C, I, S> Default for PowerQueueCorpusScheduler<C, I, S>
where
S: HasCorpus<C, I> + HasMetadata,
C: Corpus<I>,
I: Input,
{
fn default() -> Self {
Self::new()
}
}
impl<C, I, S> CorpusScheduler<I, S> for PowerQueueCorpusScheduler<C, I, S>
where
S: HasCorpus<C, I> + HasMetadata,
C: Corpus<I>,
I: Input,
{
/// Add an entry to the corpus and return its index
fn on_add(&self, state: &mut S, idx: usize) -> Result<(), Error> {
let current_idx = *state.corpus().current();
let mut depth = match current_idx {
Some(idx) => state
.corpus()
.get(idx)?
.borrow_mut()
.metadata_mut()
.get_mut::<PowerScheduleTestcaseMetaData>()
.ok_or_else(|| Error::KeyNotFound("PowerScheduleTestData not found".to_string()))?
.depth(),
None => 0,
};
// Attach a `PowerScheduleTestData` to the queue entry.
depth += 1;
state
.corpus()
.get(idx)?
.borrow_mut()
.add_metadata(PowerScheduleTestcaseMetaData::new(depth));
Ok(())
}
fn next(&self, state: &mut S) -> Result<usize, Error> {
if state.corpus().count() == 0 {
Err(Error::Empty(String::from("No entries in corpus")))
} else {
let id = match state.corpus().current() {
Some(cur) => {
if *cur + 1 >= state.corpus().count() {
let psmeta = state
.metadata_mut()
.get_mut::<PowerScheduleMetadata>()
.ok_or_else(|| {
Error::KeyNotFound("PowerScheduleMetadata not found".to_string())
})?;
psmeta.set_queue_cycles(psmeta.queue_cycles() + 1);
0
} else {
*cur + 1
}
}
None => 0,
};
*state.corpus_mut().current_mut() = Some(id);
Ok(id)
}
}
}
impl<C, I, S> PowerQueueCorpusScheduler<C, I, S>
where
S: HasCorpus<C, I> + HasMetadata,
C: Corpus<I>,
I: Input,
{
#[must_use]
pub fn new() -> Self {
Self {
phantom: PhantomData,
}
}
}

View File

@ -120,15 +120,22 @@ where
} }
/// Get the execution time of the testcase /// Get the execution time of the testcase
#[inline]
pub fn exec_time(&self) -> &Option<Duration> { pub fn exec_time(&self) -> &Option<Duration> {
&self.exec_time &self.exec_time
} }
/// Get the execution time of the testcase (mut) /// Get the execution time of the testcase (mut)
#[inline]
pub fn exec_time_mut(&mut self) -> &mut Option<Duration> { pub fn exec_time_mut(&mut self) -> &mut Option<Duration> {
&mut self.exec_time &mut self.exec_time
} }
#[inline]
pub fn set_exec_time(&mut self, time: Duration) {
self.exec_time = Some(time);
}
/// Create a new Testcase instace given an input /// Create a new Testcase instace given an input
#[inline] #[inline]
pub fn new<T>(input: T) -> Self pub fn new<T>(input: T) -> Self
@ -206,3 +213,78 @@ where
Testcase::new(input) Testcase::new(input)
} }
} }
/// The Metadata for each testcase used in power schedules.
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct PowerScheduleTestcaseMetaData {
/// Number of bits set in bitmap, updated in calibrate_case
bitmap_size: u64,
/// Number of fuzzing iterations, updated in perform_mutational
fuzz_level: u64,
/// Number of queue cycles behind
handicap: u64,
/// Path depth, initialized in on_add
depth: u64,
/// Offset in n_fuzz
n_fuzz_entry: usize,
}
impl PowerScheduleTestcaseMetaData {
#[must_use]
pub fn new(depth: u64) -> Self {
Self {
bitmap_size: 0,
fuzz_level: 0,
handicap: 0,
depth,
n_fuzz_entry: 0,
}
}
#[must_use]
pub fn bitmap_size(&self) -> u64 {
self.bitmap_size
}
pub fn set_bitmap_size(&mut self, val: u64) {
self.bitmap_size = val;
}
#[must_use]
pub fn fuzz_level(&self) -> u64 {
self.fuzz_level
}
pub fn set_fuzz_level(&mut self, val: u64) {
self.fuzz_level = val;
}
#[must_use]
pub fn handicap(&self) -> u64 {
self.handicap
}
pub fn set_handicap(&mut self, val: u64) {
self.handicap = val;
}
#[must_use]
pub fn depth(&self) -> u64 {
self.depth
}
pub fn set_depth(&mut self, val: u64) {
self.depth = val;
}
#[must_use]
pub fn n_fuzz_entry(&self) -> usize {
self.n_fuzz_entry
}
pub fn set_n_fuzz_entry(&mut self, val: usize) {
self.n_fuzz_entry = val;
}
}
crate::impl_serdeany!(PowerScheduleTestcaseMetaData);

View File

@ -1,10 +1,15 @@
//! The `MapObserver` provides access a map, usually injected into the target //! The `MapObserver` provides access a map, usually injected into the target
use ahash::AHasher;
use alloc::{ use alloc::{
string::{String, ToString}, string::{String, ToString},
vec::Vec, vec::Vec,
}; };
use core::slice::from_raw_parts_mut; use core::{
hash::Hasher,
slice::{from_raw_parts, from_raw_parts_mut},
};
use num::Integer;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::{ use crate::{
@ -19,7 +24,7 @@ use crate::{
/// A [`MapObserver`] observes the static map, as oftentimes used for afl-like coverage information /// A [`MapObserver`] observes the static map, as oftentimes used for afl-like coverage information
pub trait MapObserver<T>: Named + serde::Serialize + serde::de::DeserializeOwned pub trait MapObserver<T>: Named + serde::Serialize + serde::de::DeserializeOwned
where where
T: Default + Copy, T: Integer + Default + Copy,
{ {
/// Get the map /// Get the map
fn map(&self) -> &[T]; fn map(&self) -> &[T];
@ -32,6 +37,31 @@ where
self.map().len() self.map().len()
} }
/// Count the set bytes in the map
fn count_bytes(&self) -> u64 {
let initial = self.initial();
let cnt = self.usable_count();
let mut res = 0;
for x in self.map()[0..cnt].iter() {
if *x != initial {
res += 1;
}
}
res
}
/// Compute the hash of the map
fn hash(&self) -> u64 {
let mut hasher = AHasher::new_with_keys(0, 0);
let ptr = self.map().as_ptr() as *const u8;
let map_size = self.map().len() / core::mem::size_of::<T>();
unsafe {
hasher.write(from_raw_parts(ptr, map_size));
}
hasher.finish()
}
/// Get the initial value for reset() /// Get the initial value for reset()
fn initial(&self) -> T; fn initial(&self) -> T;
@ -62,7 +92,7 @@ where
#[allow(clippy::unsafe_derive_deserialize)] #[allow(clippy::unsafe_derive_deserialize)]
pub struct StdMapObserver<'a, T> pub struct StdMapObserver<'a, T>
where where
T: Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
{ {
map: OwnedSliceMut<'a, T>, map: OwnedSliceMut<'a, T>,
initial: T, initial: T,
@ -71,7 +101,7 @@ where
impl<'a, I, S, T> Observer<I, S> for StdMapObserver<'a, T> impl<'a, I, S, T> Observer<I, S> for StdMapObserver<'a, T>
where where
T: Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
Self: MapObserver<T>, Self: MapObserver<T>,
{ {
#[inline] #[inline]
@ -82,7 +112,7 @@ where
impl<'a, T> Named for StdMapObserver<'a, T> impl<'a, T> Named for StdMapObserver<'a, T>
where where
T: Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
{ {
#[inline] #[inline]
fn name(&self) -> &str { fn name(&self) -> &str {
@ -92,7 +122,7 @@ where
impl<'a, T> MapObserver<T> for StdMapObserver<'a, T> impl<'a, T> MapObserver<T> for StdMapObserver<'a, T>
where where
T: Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
{ {
#[inline] #[inline]
fn map(&self) -> &[T] { fn map(&self) -> &[T] {
@ -122,7 +152,7 @@ where
impl<'a, T> StdMapObserver<'a, T> impl<'a, T> StdMapObserver<'a, T>
where where
T: Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
{ {
/// Creates a new [`MapObserver`] /// Creates a new [`MapObserver`]
#[must_use] #[must_use]
@ -167,7 +197,7 @@ where
#[allow(clippy::unsafe_derive_deserialize)] #[allow(clippy::unsafe_derive_deserialize)]
pub struct ConstMapObserver<'a, T, const N: usize> pub struct ConstMapObserver<'a, T, const N: usize>
where where
T: Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
{ {
map: OwnedSliceMut<'a, T>, map: OwnedSliceMut<'a, T>,
initial: T, initial: T,
@ -176,7 +206,7 @@ where
impl<'a, I, S, T, const N: usize> Observer<I, S> for ConstMapObserver<'a, T, N> impl<'a, I, S, T, const N: usize> Observer<I, S> for ConstMapObserver<'a, T, N>
where where
T: Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
Self: MapObserver<T>, Self: MapObserver<T>,
{ {
#[inline] #[inline]
@ -187,7 +217,7 @@ where
impl<'a, T, const N: usize> Named for ConstMapObserver<'a, T, N> impl<'a, T, const N: usize> Named for ConstMapObserver<'a, T, N>
where where
T: Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
{ {
#[inline] #[inline]
fn name(&self) -> &str { fn name(&self) -> &str {
@ -197,7 +227,7 @@ where
impl<'a, T, const N: usize> MapObserver<T> for ConstMapObserver<'a, T, N> impl<'a, T, const N: usize> MapObserver<T> for ConstMapObserver<'a, T, N>
where where
T: Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
{ {
#[inline] #[inline]
fn usable_count(&self) -> usize { fn usable_count(&self) -> usize {
@ -232,7 +262,7 @@ where
impl<'a, T, const N: usize> ConstMapObserver<'a, T, N> impl<'a, T, const N: usize> ConstMapObserver<'a, T, N>
where where
T: Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
{ {
/// Creates a new [`MapObserver`] /// Creates a new [`MapObserver`]
#[must_use] #[must_use]
@ -278,7 +308,7 @@ where
#[allow(clippy::unsafe_derive_deserialize)] #[allow(clippy::unsafe_derive_deserialize)]
pub struct VariableMapObserver<'a, T> pub struct VariableMapObserver<'a, T>
where where
T: Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
{ {
map: OwnedSliceMut<'a, T>, map: OwnedSliceMut<'a, T>,
size: OwnedRefMut<'a, usize>, size: OwnedRefMut<'a, usize>,
@ -288,7 +318,7 @@ where
impl<'a, I, S, T> Observer<I, S> for VariableMapObserver<'a, T> impl<'a, I, S, T> Observer<I, S> for VariableMapObserver<'a, T>
where where
T: Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
Self: MapObserver<T>, Self: MapObserver<T>,
{ {
#[inline] #[inline]
@ -299,7 +329,7 @@ where
impl<'a, T> Named for VariableMapObserver<'a, T> impl<'a, T> Named for VariableMapObserver<'a, T>
where where
T: Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
{ {
#[inline] #[inline]
fn name(&self) -> &str { fn name(&self) -> &str {
@ -309,7 +339,7 @@ where
impl<'a, T> MapObserver<T> for VariableMapObserver<'a, T> impl<'a, T> MapObserver<T> for VariableMapObserver<'a, T>
where where
T: Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
{ {
#[inline] #[inline]
fn map(&self) -> &[T] { fn map(&self) -> &[T] {
@ -344,7 +374,7 @@ where
impl<'a, T> VariableMapObserver<'a, T> impl<'a, T> VariableMapObserver<'a, T>
where where
T: Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
{ {
/// Creates a new [`MapObserver`] /// Creates a new [`MapObserver`]
pub fn new(name: &'static str, map: &'a mut [T], size: &'a mut usize) -> Self { pub fn new(name: &'static str, map: &'a mut [T], size: &'a mut usize) -> Self {

View File

@ -0,0 +1,236 @@
//! The calibration stage. The fuzzer measures the average exec time and the bitmap size.
use crate::{
bolts::current_time,
corpus::{Corpus, PowerScheduleTestcaseMetaData},
executors::{Executor, HasObservers},
fuzzer::Evaluator,
inputs::Input,
observers::{MapObserver, ObserversTuple},
stages::Stage,
state::{HasCorpus, HasMetadata},
Error,
};
use alloc::{
string::{String, ToString},
vec::Vec,
};
use core::{marker::PhantomData, time::Duration};
use num::Integer;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug)]
pub struct CalibrationStage<C, E, EM, I, O, OT, S, T, Z>
where
T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
C: Corpus<I>,
E: Executor<EM, I, S, Z> + HasObservers<I, OT, S>,
I: Input,
O: MapObserver<T>,
OT: ObserversTuple<I, S>,
S: HasCorpus<C, I> + HasMetadata,
Z: Evaluator<E, EM, I, S>,
{
map_observer_name: String,
stage_max: usize,
#[allow(clippy::type_complexity)]
phantom: PhantomData<(C, E, EM, I, O, OT, S, T, Z)>,
}
const CAL_STAGE_MAX: usize = 8;
impl<C, E, EM, I, O, OT, S, T, Z> Stage<E, EM, S, Z>
for CalibrationStage<C, E, EM, I, O, OT, S, T, Z>
where
T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
C: Corpus<I>,
E: Executor<EM, I, S, Z> + HasObservers<I, OT, S>,
I: Input,
O: MapObserver<T>,
OT: ObserversTuple<I, S>,
S: HasCorpus<C, I> + HasMetadata,
Z: Evaluator<E, EM, I, S>,
{
#[inline]
#[allow(clippy::let_and_return)]
fn perform(
&mut self,
fuzzer: &mut Z,
executor: &mut E,
state: &mut S,
manager: &mut EM,
corpus_idx: usize,
) -> Result<(), Error> {
let iter = self.stage_max;
let handicap = state
.metadata()
.get::<PowerScheduleMetadata>()
.ok_or_else(|| Error::KeyNotFound("PowerScheduleMetadata not found".to_string()))?
.queue_cycles;
let start = current_time();
for _i in 0..iter {
let input = state
.corpus()
.get(corpus_idx)?
.borrow_mut()
.load_input()?
.clone();
let _ = executor.run_target(fuzzer, state, manager, &input)?;
}
let end = current_time();
let map = executor
.observers()
.match_name::<O>(&self.map_observer_name)
.ok_or_else(|| Error::KeyNotFound("MapObserver not found".to_string()))?;
let bitmap_size = map.count_bytes();
let psmeta = state
.metadata_mut()
.get_mut::<PowerScheduleMetadata>()
.ok_or_else(|| Error::KeyNotFound("PowerScheduleMetadata not found".to_string()))?;
psmeta.set_exec_time(psmeta.exec_time() + (end - start));
psmeta.set_cycles(psmeta.cycles() + (iter as u64));
psmeta.set_bitmap_size(psmeta.bitmap_size() + bitmap_size);
psmeta.set_bitmap_entries(psmeta.bitmap_entries() + 1);
// println!("psmeta: {:#?}", psmeta);
let mut testcase = state.corpus().get(corpus_idx)?.borrow_mut();
testcase.set_exec_time((end - start) / (iter as u32));
// println!("time: {:#?}", testcase.exec_time());
let data = testcase
.metadata_mut()
.get_mut::<PowerScheduleTestcaseMetaData>()
.ok_or_else(|| Error::KeyNotFound("PowerScheduleTestData not found".to_string()))?;
data.set_bitmap_size(bitmap_size);
data.set_handicap(handicap);
data.set_fuzz_level(data.fuzz_level() + 1);
// println!("data: {:#?}", data);
Ok(())
}
}
pub const N_FUZZ_SIZE: usize = 1 << 21;
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct PowerScheduleMetadata {
/// Measured exec time during calibration
exec_time: Duration,
/// Calibration cycles
cycles: u64,
/// Size of the observer map
bitmap_size: u64,
/// Number of filled map entries
bitmap_entries: u64,
/// Queue cycles
queue_cycles: u64,
/// The vector to contain the frequency of each execution path.
n_fuzz: Vec<u32>,
}
/// The metadata for runs in the calibration stage.
impl PowerScheduleMetadata {
#[must_use]
pub fn new() -> Self {
Self {
exec_time: Duration::from_millis(0),
cycles: 0,
bitmap_size: 0,
bitmap_entries: 0,
queue_cycles: 0,
n_fuzz: vec![0; N_FUZZ_SIZE],
}
}
#[must_use]
pub fn exec_time(&self) -> Duration {
self.exec_time
}
pub fn set_exec_time(&mut self, time: Duration) {
self.exec_time = time;
}
#[must_use]
pub fn cycles(&self) -> u64 {
self.cycles
}
pub fn set_cycles(&mut self, val: u64) {
self.cycles = val;
}
#[must_use]
pub fn bitmap_size(&self) -> u64 {
self.bitmap_size
}
pub fn set_bitmap_size(&mut self, val: u64) {
self.bitmap_size = val;
}
#[must_use]
pub fn bitmap_entries(&self) -> u64 {
self.bitmap_entries
}
pub fn set_bitmap_entries(&mut self, val: u64) {
self.bitmap_entries = val;
}
#[must_use]
pub fn queue_cycles(&self) -> u64 {
self.queue_cycles
}
pub fn set_queue_cycles(&mut self, val: u64) {
self.queue_cycles = val;
}
#[must_use]
pub fn n_fuzz(&self) -> &[u32] {
&self.n_fuzz
}
#[must_use]
pub fn n_fuzz_mut(&mut self) -> &mut [u32] {
&mut self.n_fuzz
}
}
crate::impl_serdeany!(PowerScheduleMetadata);
impl<C, E, I, EM, O, OT, S, T, Z> CalibrationStage<C, E, EM, I, O, OT, S, T, Z>
where
T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
C: Corpus<I>,
E: Executor<EM, I, S, Z> + HasObservers<I, OT, S>,
I: Input,
O: MapObserver<T>,
OT: ObserversTuple<I, S>,
S: HasCorpus<C, I> + HasMetadata,
Z: Evaluator<E, EM, I, S>,
{
pub fn new(state: &mut S, map_observer_name: &O) -> Self {
state.add_metadata::<PowerScheduleMetadata>(PowerScheduleMetadata::new());
Self {
map_observer_name: map_observer_name.name().to_string(),
stage_max: CAL_STAGE_MAX,
phantom: PhantomData,
}
}
}
impl Default for PowerScheduleMetadata {
fn default() -> Self {
Self::new()
}
}

View File

@ -11,9 +11,12 @@ pub use mutational::{MutationalStage, StdMutationalStage};
pub mod tracing; pub mod tracing;
pub use tracing::{ShadowTracingStage, TracingStage}; pub use tracing::{ShadowTracingStage, TracingStage};
//pub mod power; pub mod calibrate;
//pub use power::PowerMutationalStage; pub use calibrate::{CalibrationStage, PowerScheduleMetadata};
pub mod power;
use crate::Error; use crate::Error;
pub use power::PowerMutationalStage;
/// A stage is one step in the fuzzing process. /// A stage is one step in the fuzzing process.
/// Multiple stages will be scheduled one by one for each input. /// Multiple stages will be scheduled one by one for each input.

View File

@ -36,7 +36,7 @@ where
fn mutator_mut(&mut self) -> &mut M; fn mutator_mut(&mut self) -> &mut M;
/// Gets the number of iterations this mutator should run for. /// Gets the number of iterations this mutator should run for.
fn iterations(&self, state: &mut S) -> usize; fn iterations(&self, state: &mut S, corpus_idx: usize) -> Result<usize, Error>;
/// Runs this (mutational) stage for the given testcase /// Runs this (mutational) stage for the given testcase
#[allow(clippy::cast_possible_wrap)] // more than i32 stages on 32 bit system - highly unlikely... #[allow(clippy::cast_possible_wrap)] // more than i32 stages on 32 bit system - highly unlikely...
@ -48,7 +48,7 @@ where
manager: &mut EM, manager: &mut EM,
corpus_idx: usize, corpus_idx: usize,
) -> Result<(), Error> { ) -> Result<(), Error> {
let num = self.iterations(state); let num = self.iterations(state, corpus_idx)?;
for i in 0..num { for i in 0..num {
start_timer!(state); start_timer!(state);
@ -118,8 +118,8 @@ where
} }
/// Gets the number of iterations as a random number /// Gets the number of iterations as a random number
fn iterations(&self, state: &mut S) -> usize { fn iterations(&self, state: &mut S, _corpus_idx: usize) -> Result<usize, Error> {
1 + state.rand_mut().below(DEFAULT_MUTATIONAL_MAX_ITERATIONS) as usize Ok(1 + state.rand_mut().below(DEFAULT_MUTATIONAL_MAX_ITERATIONS) as usize)
} }
} }

View File

@ -1,48 +1,69 @@
//! The power schedules. This stage should be invoked after the calibration stage.
use alloc::string::{String, ToString};
use core::marker::PhantomData; use core::marker::PhantomData;
use num::Integer;
use crate::{ use crate::{
bolts::rands::Rand, corpus::{Corpus, IsFavoredMetadata, PowerScheduleTestcaseMetaData, Testcase},
corpus::{Corpus, CorpusScheduler},
events::EventManager,
executors::{Executor, HasObservers}, executors::{Executor, HasObservers},
fuzzer::Evaluator,
inputs::Input, inputs::Input,
mutators::Mutator, mutators::Mutator,
observers::ObserversTuple, observers::{MapObserver, ObserversTuple},
stages::{Stage, MutationalStage}, stages::{MutationalStage, PowerScheduleMetadata, Stage},
state::{Evaluator, HasCorpus, HasRand}, state::{HasClientPerfStats, HasCorpus, HasMetadata},
Error, Error,
}; };
/// The mutational stage using power schedules #[derive(Clone, Debug, PartialEq)]
#[derive(Clone, Debug)] pub enum PowerSchedule {
pub struct PowerMutationalStage<C, CS, E, EM, I, M, OT, R, S> EXPLORE,
where FAST,
M: Mutator<I, S>, COE,
I: Input, LIN,
S: HasCorpus<C, I> + Evaluator<I> + HasRand<R>, QUAD,
C: Corpus<I>, EXPLOIT,
EM: EventManager<I, S>,
E: Executor<I> + HasObservers<OT>,
OT: ObserversTuple,
CS: CorpusScheduler<I, S>,
R: Rand,
{
mutator: M,
phantom: PhantomData<(C, CS, E, EM, I, OT, R, S)>,
} }
impl<C, CS, E, EM, I, M, OT, R, S> MutationalStage<C, CS, E, EM, I, M, OT, S> const POWER_BETA: f64 = 1.0;
for PowerMutationalStage<C, CS, E, EM, I, M, OT, R, S> const MAX_FACTOR: f64 = POWER_BETA * 32.0;
const HAVOC_MAX_MULT: f64 = 64.0;
/// The mutational stage using power schedules
#[derive(Clone, Debug)]
pub struct PowerMutationalStage<C, E, EM, I, M, O, OT, S, T, Z>
where where
M: Mutator<I, S>, T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
I: Input,
S: HasCorpus<C, I> + Evaluator<I> + HasRand<R>,
C: Corpus<I>, C: Corpus<I>,
EM: EventManager<I, S>, E: Executor<EM, I, S, Z> + HasObservers<I, OT, S>,
E: Executor<I> + HasObservers<OT>, I: Input,
OT: ObserversTuple, M: Mutator<I, S>,
CS: CorpusScheduler<I, S>, O: MapObserver<T>,
R: Rand, OT: ObserversTuple<I, S>,
S: HasClientPerfStats + HasCorpus<C, I> + HasMetadata,
Z: Evaluator<E, EM, I, S>,
{
map_observer_name: String,
mutator: M,
/// The employed power schedule strategy
strat: PowerSchedule,
#[allow(clippy::type_complexity)]
phantom: PhantomData<(C, E, EM, I, O, OT, S, T, Z)>,
}
impl<C, E, EM, I, M, O, OT, S, T, Z> MutationalStage<C, E, EM, I, M, S, Z>
for PowerMutationalStage<C, E, EM, I, M, O, OT, S, T, Z>
where
T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
C: Corpus<I>,
E: Executor<EM, I, S, Z> + HasObservers<I, OT, S>,
I: Input,
M: Mutator<I, S>,
O: MapObserver<T>,
OT: ObserversTuple<I, S>,
S: HasClientPerfStats + HasCorpus<C, I> + HasMetadata,
Z: Evaluator<E, EM, I, S>,
{ {
/// The mutator, added to this stage /// The mutator, added to this stage
#[inline] #[inline]
@ -57,54 +78,319 @@ where
} }
/// Gets the number of iterations as a random number /// Gets the number of iterations as a random number
fn iterations(&self, state: &mut S) -> usize { fn iterations(&self, state: &mut S, corpus_idx: usize) -> Result<usize, Error> {
1 + state.rand_mut().below(DEFAULT_MUTATIONAL_MAX_ITERATIONS) as usize let mut testcase = state.corpus().get(corpus_idx)?.borrow_mut();
} let psmeta = state
.metadata()
.get::<PowerScheduleMetadata>()
.ok_or_else(|| Error::KeyNotFound("PowerScheduleMetadata not found".to_string()))?;
let mut fuzz_mu = 0.0;
if self.strat == PowerSchedule::COE {
fuzz_mu = self.fuzz_mu(state, psmeta)?;
} }
impl<C, CS, E, EM, I, M, OT, R, S> Stage<CS, E, EM, I, S> // 1 + state.rand_mut().below(DEFAULT_MUTATIONAL_MAX_ITERATIONS) as usize
for PowerMutationalStage<C, CS, E, EM, I, M, OT, R, S> self.calculate_score(&mut testcase, psmeta, fuzz_mu)
where }
M: Mutator<I, S>,
I: Input, #[allow(clippy::cast_possible_wrap)]
S: HasCorpus<C, I> + Evaluator<I> + HasRand<R>, fn perform_mutational(
C: Corpus<I>, &mut self,
EM: EventManager<I, S>, fuzzer: &mut Z,
E: Executor<I> + HasObservers<OT>,
OT: ObserversTuple,
CS: CorpusScheduler<I, S>,
R: Rand,
{
#[inline]
fn perform(
&self,
state: &mut S,
executor: &mut E, executor: &mut E,
state: &mut S,
manager: &mut EM, manager: &mut EM,
scheduler: &CS,
corpus_idx: usize, corpus_idx: usize,
) -> Result<(), Error> { ) -> Result<(), Error> {
self.perform_mutational(state, executor, manager, scheduler, corpus_idx) let num = self.iterations(state, corpus_idx)?;
for i in 0..num {
let mut input = state
.corpus()
.get(corpus_idx)?
.borrow_mut()
.load_input()?
.clone();
self.mutator_mut().mutate(state, &mut input, i as i32)?;
let (_, corpus_idx) = fuzzer.evaluate_input(state, executor, manager, input)?;
let observer = executor
.observers()
.match_name::<O>(&self.map_observer_name)
.ok_or_else(|| Error::KeyNotFound("MapObserver not found".to_string()))?;
let mut hash = observer.hash() as usize;
let psmeta = state
.metadata_mut()
.get_mut::<PowerScheduleMetadata>()
.ok_or_else(|| Error::KeyNotFound("PowerScheduleMetadata not found".to_string()))?;
hash %= psmeta.n_fuzz().len();
// Update the path frequency
psmeta.n_fuzz_mut()[hash] = psmeta.n_fuzz()[hash].saturating_add(1);
if let Some(idx) = corpus_idx {
state
.corpus()
.get(idx)?
.borrow_mut()
.metadata_mut()
.get_mut::<PowerScheduleTestcaseMetaData>()
.ok_or_else(|| {
Error::KeyNotFound("PowerScheduleTestData not found".to_string())
})?
.set_n_fuzz_entry(hash);
}
self.mutator_mut().post_exec(state, i as i32, corpus_idx)?;
}
Ok(())
} }
} }
impl<C, CS, E, EM, I, M, OT, R, S> PowerMutationalStage<C, CS, E, EM, I, M, OT, R, S> impl<C, E, EM, I, M, O, OT, S, T, Z> Stage<E, EM, S, Z>
for PowerMutationalStage<C, E, EM, I, M, O, OT, S, T, Z>
where where
M: Mutator<I, S>, T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
I: Input,
S: HasCorpus<C, I> + Evaluator<I> + HasRand<R>,
C: Corpus<I>, C: Corpus<I>,
EM: EventManager<I, S>, E: Executor<EM, I, S, Z> + HasObservers<I, OT, S>,
E: Executor<I> + HasObservers<OT>, I: Input,
OT: ObserversTuple, M: Mutator<I, S>,
CS: CorpusScheduler<I, S>, O: MapObserver<T>,
R: Rand, OT: ObserversTuple<I, S>,
S: HasClientPerfStats + HasCorpus<C, I> + HasMetadata,
Z: Evaluator<E, EM, I, S>,
{ {
/// Creates a new default mutational stage #[inline]
pub fn new(mutator: M) -> Self { #[allow(clippy::let_and_return)]
fn perform(
&mut self,
fuzzer: &mut Z,
executor: &mut E,
state: &mut S,
manager: &mut EM,
corpus_idx: usize,
) -> Result<(), Error> {
let ret = self.perform_mutational(fuzzer, executor, state, manager, corpus_idx);
ret
}
}
impl<C, E, EM, I, M, O, OT, S, T, Z> PowerMutationalStage<C, E, EM, I, M, O, OT, S, T, Z>
where
T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
C: Corpus<I>,
E: Executor<EM, I, S, Z> + HasObservers<I, OT, S>,
I: Input,
M: Mutator<I, S>,
O: MapObserver<T>,
OT: ObserversTuple<I, S>,
S: HasClientPerfStats + HasCorpus<C, I> + HasMetadata,
Z: Evaluator<E, EM, I, S>,
{
pub fn new(mutator: M, strat: PowerSchedule, map_observer_name: &O) -> Self {
Self { Self {
mutator: mutator, map_observer_name: map_observer_name.name().to_string(),
mutator,
strat,
phantom: PhantomData, phantom: PhantomData,
} }
} }
/// Compute the parameter `μ` used in the COE schedule.
#[inline]
pub fn fuzz_mu(&self, state: &S, psmeta: &PowerScheduleMetadata) -> Result<f64, Error> {
let corpus = state.corpus();
let mut n_paths = 0;
let mut fuzz_mu = 0.0;
for idx in 0..corpus.count() {
let n_fuzz_entry = corpus
.get(idx)?
.borrow()
.metadata()
.get::<PowerScheduleTestcaseMetaData>()
.ok_or_else(|| Error::KeyNotFound("PowerScheduleTestData not found".to_string()))?
.n_fuzz_entry();
fuzz_mu += libm::log2(f64::from(psmeta.n_fuzz()[n_fuzz_entry]));
n_paths += 1;
}
if n_paths == 0 {
return Err(Error::Unknown(String::from("Queue state corrput")));
}
fuzz_mu /= f64::from(n_paths);
Ok(fuzz_mu)
}
/// Compute the `power` we assign to each corpus entry
#[inline]
#[allow(
clippy::cast_precision_loss,
clippy::too_many_lines,
clippy::cast_sign_loss
)]
fn calculate_score(
&self,
testcase: &mut Testcase<I>,
psmeta: &PowerScheduleMetadata,
fuzz_mu: f64,
) -> Result<usize, Error> {
let mut perf_score = 100.0;
let q_exec_us = testcase
.exec_time()
.ok_or_else(|| Error::KeyNotFound("exec_time not set".to_string()))?
.as_nanos() as f64;
let avg_exec_us = psmeta.exec_time().as_nanos() as f64 / psmeta.cycles() as f64;
let avg_bitmap_size = psmeta.bitmap_size() / psmeta.bitmap_entries();
let favored = testcase.has_metadata::<IsFavoredMetadata>();
let tcmeta = testcase
.metadata_mut()
.get_mut::<PowerScheduleTestcaseMetaData>()
.ok_or_else(|| Error::KeyNotFound("PowerScheduleTestData not found".to_string()))?;
if q_exec_us * 0.1 > avg_exec_us {
perf_score = 10.0;
} else if q_exec_us * 0.2 > avg_exec_us {
perf_score = 25.0;
} else if q_exec_us * 0.5 > avg_exec_us {
perf_score = 50.0;
} else if q_exec_us * 0.75 > avg_exec_us {
perf_score = 75.0;
} else if q_exec_us * 4.0 < avg_exec_us {
perf_score = 300.0;
} else if q_exec_us * 3.0 < avg_exec_us {
perf_score = 200.0;
} else if q_exec_us * 2.0 < avg_exec_us {
perf_score = 150.0;
}
let q_bitmap_size = tcmeta.bitmap_size() as f64;
if q_bitmap_size * 0.3 > avg_bitmap_size as f64 {
perf_score *= 3.0;
} else if q_bitmap_size * 0.5 > avg_bitmap_size as f64 {
perf_score *= 2.0;
} else if q_bitmap_size * 0.75 > avg_bitmap_size as f64 {
perf_score *= 1.5;
} else if q_bitmap_size * 3.0 < avg_bitmap_size as f64 {
perf_score *= 0.25;
} else if q_bitmap_size * 2.0 < avg_bitmap_size as f64 {
perf_score *= 0.5;
} else if q_bitmap_size * 1.5 < avg_bitmap_size as f64 {
perf_score *= 0.75;
}
if tcmeta.handicap() >= 4 {
perf_score *= 4.0;
tcmeta.set_handicap(tcmeta.handicap() - 4);
} else if tcmeta.handicap() > 0 {
perf_score *= 2.0;
tcmeta.set_handicap(tcmeta.handicap() - 1);
}
if tcmeta.depth() >= 4 && tcmeta.depth() < 8 {
perf_score *= 2.0;
} else if tcmeta.depth() >= 8 && tcmeta.depth() < 14 {
perf_score *= 3.0;
} else if tcmeta.depth() >= 14 && tcmeta.depth() < 25 {
perf_score *= 4.0;
} else if tcmeta.depth() >= 25 {
perf_score *= 5.0;
}
let mut factor: f64 = 1.0;
// COE and Fast schedule are fairly different from what are described in the original thesis,
// This implementation follows the changes made in this pull request https://github.com/AFLplusplus/AFLplusplus/pull/568
match &self.strat {
PowerSchedule::EXPLORE => {
// Nothing happens in EXPLORE
}
PowerSchedule::EXPLOIT => {
factor = MAX_FACTOR;
}
PowerSchedule::COE => {
if libm::log2(f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()])) > fuzz_mu
&& !favored
{
// Never skip favorites.
factor = 0.0;
}
}
PowerSchedule::FAST => {
if tcmeta.fuzz_level() != 0 {
let lg = libm::log2(f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()]));
match lg {
f if f < 2.0 => {
factor = 4.0;
}
f if (2.0..4.0).contains(&f) => {
factor = 3.0;
}
f if (4.0..5.0).contains(&f) => {
factor = 2.0;
}
f if (6.0..7.0).contains(&f) => {
if !favored {
factor = 0.8;
}
}
f if (7.0..8.0).contains(&f) => {
if !favored {
factor = 0.6;
}
}
f if f >= 8.0 => {
if !favored {
factor = 0.4
}
}
_ => {
factor = 1.0;
}
}
if favored {
factor *= 1.15;
}
}
}
PowerSchedule::LIN => {
factor = (tcmeta.fuzz_level() as f64)
/ f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()] + 1);
}
PowerSchedule::QUAD => {
factor = ((tcmeta.fuzz_level() * tcmeta.fuzz_level()) as f64)
/ f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()] + 1);
}
}
if self.strat != PowerSchedule::EXPLORE {
if factor > MAX_FACTOR {
factor = MAX_FACTOR;
}
perf_score *= factor / POWER_BETA;
}
// Lower bound if the strat is not COE.
if self.strat == PowerSchedule::COE && perf_score < 1.0 {
perf_score = 1.0;
}
// Upper bound
if perf_score > HAVOC_MAX_MULT * 100.0 {
perf_score = HAVOC_MAX_MULT * 100.0;
}
Ok(perf_score as usize)
}
} }