Rename FavFactor to TestcaseScore; More TestcaseScores (#574)

* rework aflfast

* more

* move fuzz_Mu

* weighted

* fix

* borrow checker fix

* compute_weight

* alias_table

* fmt

* fix & rename

* fix & less mut

* no_std

* no_std

* clippy

* 32bit clippy fix

* top_rated for compute_weight

* fix

* clippy & metadata Init

* fix

* fix

* fix

* clippy & fmt

* change fuzzers

* fuzzbench_selected

* fmt

* compute() has state

* use favfactor for powerschedules also

* fix merge

* rename

* fmt & clippy

* no_std

* fmt

* clippy

* rename

* fmt

* rename

* fmt

* fix

* fix

* fmt

* fix

* fix
This commit is contained in:
Toka 2022-03-27 04:04:46 +09:00 committed by GitHub
parent e20d345d99
commit abf1a66028
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 471 additions and 407 deletions

View File

@ -43,7 +43,8 @@ use libafl::{
powersched::PowerSchedule, IndexesLenTimeMinimizerScheduler, PowerQueueScheduler,
},
stages::{
calibrate::CalibrationStage, power::PowerMutationalStage, StdMutationalStage, TracingStage,
calibrate::CalibrationStage, power::StdPowerMutationalStage, StdMutationalStage,
TracingStage,
},
state::{HasCorpus, HasMetadata, StdState},
Error,
@ -308,7 +309,7 @@ fn fuzz(
let mutator = StdMOptMutator::new(&mut state, havoc_mutations().merge(tokens_mutations()), 5)?;
let power =
PowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST);
StdPowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST);
// A minimization+queue policy to get testcasess from the corpus
let scheduler = IndexesLenTimeMinimizerScheduler::new(PowerQueueScheduler::new());

View File

@ -40,7 +40,7 @@ use libafl::{
powersched::PowerSchedule, IndexesLenTimeMinimizerScheduler, PowerQueueScheduler,
},
stages::{
calibrate::CalibrationStage, power::PowerMutationalStage, ShadowTracingStage,
calibrate::CalibrationStage, power::StdPowerMutationalStage, ShadowTracingStage,
StdMutationalStage,
},
state::{HasCorpus, HasMetadata, StdState},
@ -280,7 +280,7 @@ fn fuzz(
let mutator = StdMOptMutator::new(&mut state, havoc_mutations().merge(tokens_mutations()), 5)?;
let power =
PowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST);
StdPowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST);
// A minimization+queue policy to get testcasess from the corpus
let scheduler = IndexesLenTimeMinimizerScheduler::new(PowerQueueScheduler::new());

View File

@ -40,7 +40,7 @@ use libafl::{
powersched::PowerSchedule, IndexesLenTimeMinimizerScheduler, PowerQueueScheduler,
},
stages::{
calibrate::CalibrationStage, power::PowerMutationalStage, ShadowTracingStage,
calibrate::CalibrationStage, power::StdPowerMutationalStage, ShadowTracingStage,
StdMutationalStage,
},
state::{HasCorpus, HasMetadata, StdState},
@ -293,7 +293,7 @@ fn fuzz(
let mutator = StdMOptMutator::new(&mut state, havoc_mutations().merge(tokens_mutations()), 5)?;
let power =
PowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST);
StdPowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST);
// A minimization+queue policy to get testcasess from the corpus
let scheduler = IndexesLenTimeMinimizerScheduler::new(PowerQueueScheduler::new());

View File

@ -39,9 +39,12 @@ use libafl::{
StdMOptMutator, StdScheduledMutator, Tokens,
},
observers::{HitcountsMapObserver, StdMapObserver, TimeObserver},
schedulers::{powersched::PowerSchedule, IndexesLenTimeMinimizerScheduler, WeightedScheduler},
schedulers::{
powersched::PowerSchedule, IndexesLenTimeMinimizerScheduler, StdWeightedScheduler,
},
stages::{
calibrate::CalibrationStage, power::PowerMutationalStage, StdMutationalStage, TracingStage,
calibrate::CalibrationStage, power::StdPowerMutationalStage, StdMutationalStage,
TracingStage,
},
state::{HasCorpus, HasMetadata, StdState},
Error,
@ -306,10 +309,10 @@ fn fuzz(
let mutator = StdMOptMutator::new(&mut state, havoc_mutations().merge(tokens_mutations()), 5)?;
let power =
PowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST);
StdPowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST);
// A minimization+queue policy to get testcasess from the corpus
let scheduler = IndexesLenTimeMinimizerScheduler::new(WeightedScheduler::new());
let scheduler = IndexesLenTimeMinimizerScheduler::new(StdWeightedScheduler::new());
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);

View File

@ -49,7 +49,7 @@ use libafl::{
powersched::PowerSchedule, IndexesLenTimeMinimizerScheduler, PowerQueueScheduler,
},
stages::{
calibrate::CalibrationStage, power::PowerMutationalStage, GeneralizationStage,
calibrate::CalibrationStage, power::StdPowerMutationalStage, GeneralizationStage,
StdMutationalStage, TracingStage,
},
state::{HasCorpus, HasMetadata, StdState},
@ -370,7 +370,7 @@ fn fuzz_binary(
let mutator = StdMOptMutator::new(&mut state, havoc_mutations().merge(tokens_mutations()), 5)?;
let power =
PowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST);
StdPowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST);
// A minimization+queue policy to get testcasess from the corpus
let scheduler = IndexesLenTimeMinimizerScheduler::new(PowerQueueScheduler::new());
@ -575,7 +575,7 @@ fn fuzz_text(
let mutator = StdMOptMutator::new(&mut state, havoc_mutations().merge(tokens_mutations()), 5)?;
let power =
PowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST);
StdPowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST);
let grimoire_mutator = StdScheduledMutator::with_max_iterations(
tuple_list!(

View File

@ -25,8 +25,10 @@ use libafl::{
mutators::scheduled::{havoc_mutations, tokens_mutations, StdScheduledMutator},
mutators::token_mutations::Tokens,
observers::{HitcountsMapObserver, StdMapObserver, TimeObserver},
schedulers::{powersched::PowerSchedule, IndexesLenTimeMinimizerScheduler, WeightedScheduler},
stages::{calibrate::CalibrationStage, power::PowerMutationalStage},
schedulers::{
powersched::PowerSchedule, IndexesLenTimeMinimizerScheduler, StdWeightedScheduler,
},
stages::{calibrate::CalibrationStage, power::StdPowerMutationalStage},
state::{HasCorpus, HasMetadata, StdState},
Error,
};
@ -129,12 +131,12 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re
let calibration = CalibrationStage::new(&edges_observer);
let power =
PowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST);
StdPowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST);
let mut stages = tuple_list!(calibration, power);
// A minimization+queue policy to get testcasess from the corpus
let scheduler = IndexesLenTimeMinimizerScheduler::new(WeightedScheduler::new());
let scheduler = IndexesLenTimeMinimizerScheduler::new(StdWeightedScheduler::new());
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);

View File

@ -18,7 +18,7 @@ use libafl::{
monitors::MultiMonitor,
observers::{HitcountsMapObserver, StdMapObserver, TimeObserver},
schedulers::{powersched::PowerSchedule, PowerQueueScheduler},
stages::{calibrate::CalibrationStage, power::PowerMutationalStage},
stages::{calibrate::CalibrationStage, power::StdPowerMutationalStage},
state::{HasCorpus, StdState},
Error,
};
@ -127,7 +127,7 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re
let calibration = CalibrationStage::new(&edges_observer);
let power =
PowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST);
StdPowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST);
let mut stages = tuple_list!(calibration, power);

View File

@ -5,8 +5,8 @@ use libafl::{
executors::ExitKind,
feedbacks::{Feedback, MapIndexesMetadata},
observers::ObserversTuple,
schedulers::{FavFactor, MinimizerScheduler},
state::{HasClientPerfMonitor, HasMetadata},
schedulers::{MinimizerScheduler, TestcaseScore},
state::{HasClientPerfMonitor, HasCorpus, HasMetadata},
Error, SerdeAny,
};
@ -19,19 +19,22 @@ pub struct PacketLenMetadata {
pub length: u64,
}
pub struct PacketLenFavFactor {}
pub struct PacketLenTestcaseScore {}
impl FavFactor<PacketData> for PacketLenFavFactor {
fn compute(entry: &mut Testcase<PacketData>) -> Result<u64, Error> {
impl<S> TestcaseScore<PacketData, S> for PacketLenTestcaseScore
where
S: HasCorpus<PacketData> + HasMetadata,
{
fn compute(entry: &mut Testcase<PacketData>, _state: &S) -> Result<f64, Error> {
Ok(entry
.metadata()
.get::<PacketLenMetadata>()
.map_or(1, |m| m.length))
.map_or(1, |m| m.length) as f64)
}
}
pub type PacketLenMinimizerScheduler<CS, S> =
MinimizerScheduler<CS, PacketLenFavFactor, PacketData, MapIndexesMetadata, S>;
MinimizerScheduler<CS, PacketLenTestcaseScore, PacketData, MapIndexesMetadata, S>;
#[derive(Serialize, Deserialize, Default, Clone, Debug)]
pub struct PacketLenFeedback {

View File

@ -1,20 +1,14 @@
//! The testcase is a struct embedded in each corpus.
//! It will contain a respective input, and metadata.
use alloc::string::{String, ToString};
use alloc::string::String;
use core::{convert::Into, default::Default, option::Option, time::Duration};
use serde::{Deserialize, Serialize};
use crate::{
bolts::{serdeany::SerdeAnyMap, HasLen, HasRefCnt},
corpus::Corpus,
feedbacks::MapIndexesMetadata,
bolts::{serdeany::SerdeAnyMap, HasLen},
inputs::Input,
schedulers::{
minimizer::{IsFavoredMetadata, TopRatedsMetadata},
powersched::{PowerSchedule, PowerScheduleMetadata},
},
state::{HasCorpus, HasMetadata},
state::HasMetadata,
Error,
};
@ -58,11 +52,6 @@ where
}
}
/// Constants for powerschedules
const POWER_BETA: f64 = 1.0;
const MAX_FACTOR: f64 = POWER_BETA * 32.0;
const HAVOC_MAX_MULT: f64 = 64.0;
/// Impl of a testcase
impl<I> Testcase<I>
where
@ -212,280 +201,6 @@ where
..Testcase::default()
}
}
/// Compute the `weight` used in weighted corpus entry selection algo
#[allow(clippy::cast_precision_loss, clippy::cast_lossless)]
pub fn compute_weight<S>(&self, state: &S) -> Result<f64, Error>
where
S: HasCorpus<I> + HasMetadata,
{
let mut weight = 1.0;
let psmeta = state
.metadata()
.get::<PowerScheduleMetadata>()
.ok_or_else(|| Error::KeyNotFound("PowerScheduleMetadata not found".to_string()))?;
let tcmeta = self
.metadata()
.get::<PowerScheduleTestcaseMetaData>()
.ok_or_else(|| Error::KeyNotFound("PowerScheduleTestData not found".to_string()))?;
// This means that this testcase has never gone through the calibration stage before1,
// In this case we'll just return the default weight
if tcmeta.fuzz_level() == 0 || psmeta.cycles() == 0 {
return Ok(weight);
}
let q_exec_us = self
.exec_time()
.ok_or_else(|| Error::KeyNotFound("exec_time not set".to_string()))?
.as_nanos() as f64;
let favored = self.has_metadata::<IsFavoredMetadata>();
let avg_exec_us = psmeta.exec_time().as_nanos() as f64 / psmeta.cycles() as f64;
let avg_bitmap_size = psmeta.bitmap_size() / psmeta.bitmap_entries();
let q_bitmap_size = tcmeta.bitmap_size() as f64;
match psmeta.strat() {
PowerSchedule::FAST | PowerSchedule::COE | PowerSchedule::LIN | PowerSchedule::QUAD => {
let hits = psmeta.n_fuzz()[tcmeta.n_fuzz_entry()];
if hits > 0 {
weight *= libm::log10(f64::from(hits)) + 1.0;
}
}
// EXPLORE and EXPLOIT fall into this
_ => {}
}
weight *= avg_exec_us / q_exec_us;
weight *= libm::log2(q_bitmap_size) / (avg_bitmap_size as f64);
let tc_ref = match self.metadata().get::<MapIndexesMetadata>() {
Some(meta) => meta.refcnt() as f64,
None => 0.0,
};
let avg_top_size = state
.metadata()
.get::<TopRatedsMetadata>()
.ok_or_else(|| Error::KeyNotFound("TopRatedsMetadata not found".to_string()))?
.map()
.len() as f64;
weight *= 1.0 + (tc_ref / avg_top_size);
if favored {
weight *= 5.0;
}
// was it fuzzed before?
if tcmeta.fuzz_level() == 0 {
weight *= 2.0;
}
assert!(weight.is_normal());
Ok(weight)
}
/// Compute the `power` we assign to each corpus entry
#[inline]
#[allow(
clippy::cast_precision_loss,
clippy::too_many_lines,
clippy::cast_sign_loss
)]
pub fn calculate_score<S>(&self, state: &S) -> Result<usize, Error>
where
S: HasCorpus<I> + HasMetadata,
{
let psmeta = state
.metadata()
.get::<PowerScheduleMetadata>()
.ok_or_else(|| Error::KeyNotFound("PowerScheduleMetadata not found".to_string()))?;
let fuzz_mu = if psmeta.strat() == PowerSchedule::COE {
let corpus = state.corpus();
let mut n_paths = 0;
let mut v = 0.0;
for idx in 0..corpus.count() {
let n_fuzz_entry = corpus
.get(idx)?
.borrow()
.metadata()
.get::<PowerScheduleTestcaseMetaData>()
.ok_or_else(|| {
Error::KeyNotFound("PowerScheduleTestData not found".to_string())
})?
.n_fuzz_entry();
v += libm::log2(f64::from(psmeta.n_fuzz()[n_fuzz_entry]));
n_paths += 1;
}
if n_paths == 0 {
return Err(Error::Unknown(String::from("Queue state corrput")));
}
v /= f64::from(n_paths);
v
} else {
0.0
};
let mut perf_score = 100.0;
let q_exec_us = self
.exec_time()
.ok_or_else(|| Error::KeyNotFound("exec_time not set".to_string()))?
.as_nanos() as f64;
let avg_exec_us = psmeta.exec_time().as_nanos() as f64 / psmeta.cycles() as f64;
let avg_bitmap_size = psmeta.bitmap_size() / psmeta.bitmap_entries();
let favored = self.has_metadata::<IsFavoredMetadata>();
let tcmeta = self
.metadata()
.get::<PowerScheduleTestcaseMetaData>()
.ok_or_else(|| {
Error::KeyNotFound("PowerScheduleTestcaseMetaData not found".to_string())
})?;
if q_exec_us * 0.1 > avg_exec_us {
perf_score = 10.0;
} else if q_exec_us * 0.2 > avg_exec_us {
perf_score = 25.0;
} else if q_exec_us * 0.5 > avg_exec_us {
perf_score = 50.0;
} else if q_exec_us * 0.75 > avg_exec_us {
perf_score = 75.0;
} else if q_exec_us * 4.0 < avg_exec_us {
perf_score = 300.0;
} else if q_exec_us * 3.0 < avg_exec_us {
perf_score = 200.0;
} else if q_exec_us * 2.0 < avg_exec_us {
perf_score = 150.0;
}
let q_bitmap_size = tcmeta.bitmap_size() as f64;
if q_bitmap_size * 0.3 > avg_bitmap_size as f64 {
perf_score *= 3.0;
} else if q_bitmap_size * 0.5 > avg_bitmap_size as f64 {
perf_score *= 2.0;
} else if q_bitmap_size * 0.75 > avg_bitmap_size as f64 {
perf_score *= 1.5;
} else if q_bitmap_size * 3.0 < avg_bitmap_size as f64 {
perf_score *= 0.25;
} else if q_bitmap_size * 2.0 < avg_bitmap_size as f64 {
perf_score *= 0.5;
} else if q_bitmap_size * 1.5 < avg_bitmap_size as f64 {
perf_score *= 0.75;
}
if tcmeta.handicap() >= 4 {
perf_score *= 4.0;
// tcmeta.set_handicap(tcmeta.handicap() - 4);
} else if tcmeta.handicap() > 0 {
perf_score *= 2.0;
// tcmeta.set_handicap(tcmeta.handicap() - 1);
}
if tcmeta.depth() >= 4 && tcmeta.depth() < 8 {
perf_score *= 2.0;
} else if tcmeta.depth() >= 8 && tcmeta.depth() < 14 {
perf_score *= 3.0;
} else if tcmeta.depth() >= 14 && tcmeta.depth() < 25 {
perf_score *= 4.0;
} else if tcmeta.depth() >= 25 {
perf_score *= 5.0;
}
let mut factor: f64 = 1.0;
// COE and Fast schedule are fairly different from what are described in the original thesis,
// This implementation follows the changes made in this pull request https://github.com/AFLplusplus/AFLplusplus/pull/568
match psmeta.strat() {
PowerSchedule::EXPLORE => {
// Nothing happens in EXPLORE
}
PowerSchedule::EXPLOIT => {
factor = MAX_FACTOR;
}
PowerSchedule::COE => {
if libm::log2(f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()])) > fuzz_mu
&& !favored
{
// Never skip favorites.
factor = 0.0;
}
}
PowerSchedule::FAST => {
if tcmeta.fuzz_level() != 0 {
let lg = libm::log2(f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()]));
match lg {
f if f < 2.0 => {
factor = 4.0;
}
f if (2.0..4.0).contains(&f) => {
factor = 3.0;
}
f if (4.0..5.0).contains(&f) => {
factor = 2.0;
}
f if (6.0..7.0).contains(&f) => {
if !favored {
factor = 0.8;
}
}
f if (7.0..8.0).contains(&f) => {
if !favored {
factor = 0.6;
}
}
f if f >= 8.0 => {
if !favored {
factor = 0.4;
}
}
_ => {
factor = 1.0;
}
}
if favored {
factor *= 1.15;
}
}
}
PowerSchedule::LIN => {
factor = (tcmeta.fuzz_level() as f64)
/ f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()] + 1);
}
PowerSchedule::QUAD => {
factor = ((tcmeta.fuzz_level() * tcmeta.fuzz_level()) as f64)
/ f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()] + 1);
}
}
if psmeta.strat() != PowerSchedule::EXPLORE {
if factor > MAX_FACTOR {
factor = MAX_FACTOR;
}
perf_score *= factor / POWER_BETA;
}
// Lower bound if the strat is not COE.
if psmeta.strat() == PowerSchedule::COE && perf_score < 1.0 {
perf_score = 1.0;
}
// Upper bound
if perf_score > HAVOC_MAX_MULT * 100.0 {
perf_score = HAVOC_MAX_MULT * 100.0;
}
Ok(perf_score as usize)
}
}
impl<I> Default for Testcase<I>

View File

@ -7,7 +7,7 @@ use crate::{
inputs::Input,
schedulers::{
minimizer::{IsFavoredMetadata, MinimizerScheduler, DEFAULT_SKIP_NON_FAVORED_PROB},
LenTimeMulFavFactor, Scheduler,
LenTimeMulTestcaseScore, Scheduler,
},
state::{HasCorpus, HasMetadata, HasRand},
Error,
@ -100,7 +100,7 @@ where
{
accounting_map: &'a [u32],
skip_non_favored_prob: u64,
inner: MinimizerScheduler<CS, LenTimeMulFavFactor<I>, I, MapIndexesMetadata, S>,
inner: MinimizerScheduler<CS, LenTimeMulTestcaseScore<I, S>, I, MapIndexesMetadata, S>,
}
impl<'a, CS, I, S> Scheduler<I, S> for CoverageAccountingScheduler<'a, CS, I, S>

View File

@ -1,34 +0,0 @@
//! The `FavFactor` is an evaluator providing scores of corpus items.
use crate::{bolts::HasLen, corpus::Testcase, inputs::Input, Error};
use core::marker::PhantomData;
/// Compute the favor factor of a [`Testcase`]. Lower is better.
pub trait FavFactor<I>
where
I: Input,
{
/// Computes the favor factor of a [`Testcase`]. Lower is better.
fn compute(entry: &mut Testcase<I>) -> Result<u64, Error>;
}
/// Multiply the testcase size with the execution time.
/// This favors small and quick testcases.
#[derive(Debug, Clone)]
pub struct LenTimeMulFavFactor<I>
where
I: Input + HasLen,
{
phantom: PhantomData<I>,
}
impl<I> FavFactor<I> for LenTimeMulFavFactor<I>
where
I: Input + HasLen,
{
fn compute(entry: &mut Testcase<I>) -> Result<u64, Error> {
// TODO maybe enforce entry.exec_time().is_some()
Ok(entry.exec_time().map_or(1, |d| d.as_millis()) as u64 * entry.cached_len()? as u64)
}
}

View File

@ -6,7 +6,7 @@ use crate::{
corpus::{Corpus, Testcase},
feedbacks::MapIndexesMetadata,
inputs::Input,
schedulers::{FavFactor, LenTimeMulFavFactor, Scheduler},
schedulers::{LenTimeMulTestcaseScore, Scheduler, TestcaseScore},
state::{HasCorpus, HasMetadata, HasRand},
Error,
};
@ -57,12 +57,12 @@ impl Default for TopRatedsMetadata {
/// The [`MinimizerScheduler`] employs a genetic algorithm to compute a subset of the
/// corpus that exercise all the requested features (e.g. all the coverage seen so far)
/// prioritizing [`Testcase`]`s` using [`FavFactor`]
/// prioritizing [`Testcase`]`s` using [`TestcaseScore`]
#[derive(Debug, Clone)]
pub struct MinimizerScheduler<CS, F, I, M, S>
where
CS: Scheduler<I, S>,
F: FavFactor<I>,
F: TestcaseScore<I, S>,
I: Input,
M: AsSlice<usize> + SerdeAny + HasRefCnt,
S: HasCorpus<I> + HasMetadata,
@ -75,7 +75,7 @@ where
impl<CS, F, I, M, S> Scheduler<I, S> for MinimizerScheduler<CS, F, I, M, S>
where
CS: Scheduler<I, S>,
F: FavFactor<I>,
F: TestcaseScore<I, S>,
I: Input,
M: AsSlice<usize> + SerdeAny + HasRefCnt,
S: HasCorpus<I> + HasMetadata + HasRand,
@ -123,7 +123,7 @@ where
impl<CS, F, I, M, S> MinimizerScheduler<CS, F, I, M, S>
where
CS: Scheduler<I, S>,
F: FavFactor<I>,
F: TestcaseScore<I, S>,
I: Input,
M: AsSlice<usize> + SerdeAny + HasRefCnt,
S: HasCorpus<I> + HasMetadata + HasRand,
@ -140,7 +140,7 @@ where
let mut new_favoreds = vec![];
{
let mut entry = state.corpus().get(idx)?.borrow_mut();
let factor = F::compute(&mut *entry)?;
let factor = F::compute(&mut *entry, state)?;
let meta = entry.metadata_mut().get_mut::<M>().ok_or_else(|| {
Error::KeyNotFound(format!(
"Metadata needed for MinimizerScheduler not found in testcase #{}",
@ -156,7 +156,7 @@ where
.get(elem)
{
let mut old = state.corpus().get(*old_idx)?.borrow_mut();
if factor > F::compute(&mut *old)? {
if factor > F::compute(&mut *old, state)? {
continue;
}
@ -261,11 +261,11 @@ where
}
}
/// A [`MinimizerScheduler`] with [`LenTimeMulFavFactor`] to prioritize quick and small [`Testcase`]`s`.
/// A [`MinimizerScheduler`] with [`LenTimeMulTestcaseScore`] to prioritize quick and small [`Testcase`]`s`.
pub type LenTimeMinimizerScheduler<CS, I, M, S> =
MinimizerScheduler<CS, LenTimeMulFavFactor<I>, I, M, S>;
MinimizerScheduler<CS, LenTimeMulTestcaseScore<I, S>, I, M, S>;
/// A [`MinimizerScheduler`] with [`LenTimeMulFavFactor`] to prioritize quick and small [`Testcase`]`s`
/// A [`MinimizerScheduler`] with [`LenTimeMulTestcaseScore`] to prioritize quick and small [`Testcase`]`s`
/// that exercise all the entries registered in the [`MapIndexesMetadata`].
pub type IndexesLenTimeMinimizerScheduler<CS, I, S> =
MinimizerScheduler<CS, LenTimeMulFavFactor<I>, I, MapIndexesMetadata, S>;
MinimizerScheduler<CS, LenTimeMulTestcaseScore<I, S>, I, MapIndexesMetadata, S>;

View File

@ -9,8 +9,8 @@ pub use probabilistic_sampling::ProbabilitySamplingScheduler;
pub mod accounting;
pub use accounting::CoverageAccountingScheduler;
pub mod fav_factor;
pub use fav_factor::{FavFactor, LenTimeMulFavFactor};
pub mod testcase_score;
pub use testcase_score::{LenTimeMulTestcaseScore, TestcaseScore};
pub mod minimizer;
pub use minimizer::{
@ -18,7 +18,7 @@ pub use minimizer::{
};
pub mod weighted;
pub use weighted::WeightedScheduler;
pub use weighted::{StdWeightedScheduler, WeightedScheduler};
pub mod powersched;
pub use powersched::PowerQueueScheduler;

View File

@ -5,7 +5,7 @@ use crate::{
bolts::rands::Rand,
corpus::Corpus,
inputs::Input,
schedulers::{FavFactor, Scheduler},
schedulers::{Scheduler, TestcaseScore},
state::{HasCorpus, HasMetadata, HasRand},
Error,
};
@ -16,13 +16,13 @@ use serde::{Deserialize, Serialize};
/// Conduct reservoir sampling (probabilistic sampling) over all corpus elements.
#[derive(Debug, Clone)]
pub struct ProbabilitySamplingScheduler<I, S, F>
pub struct ProbabilitySamplingScheduler<F, I, S>
where
F: TestcaseScore<I, S>,
I: Input,
S: HasCorpus<I> + HasMetadata + HasRand,
F: FavFactor<I>,
{
phantom: PhantomData<(I, S, F)>,
phantom: PhantomData<(F, I, S)>,
}
/// A state metadata holding a map of probability of corpus elements.
@ -53,11 +53,11 @@ impl Default for ProbabilityMetadata {
}
}
impl<I, S, F> ProbabilitySamplingScheduler<I, S, F>
impl<F, I, S> ProbabilitySamplingScheduler<F, I, S>
where
F: TestcaseScore<I, S>,
I: Input,
S: HasCorpus<I> + HasMetadata + HasRand,
F: FavFactor<I>,
{
/// Creates a new [`struct@ProbabilitySamplingScheduler`]
#[must_use]
@ -71,8 +71,8 @@ where
#[allow(clippy::cast_precision_loss)]
#[allow(clippy::unused_self)]
pub fn store_probability(&self, state: &mut S, idx: usize) -> Result<(), Error> {
let factor = F::compute(&mut *state.corpus().get(idx)?.borrow_mut())?;
if factor == 0 {
let factor = F::compute(&mut *state.corpus().get(idx)?.borrow_mut(), state)?;
if factor == 0.0 {
return Err(Error::IllegalState(
"Infinity probability calculated for probabilistic sampling scheduler".into(),
));
@ -81,18 +81,18 @@ where
.metadata_mut()
.get_mut::<ProbabilityMetadata>()
.unwrap();
let prob = 1.0 / (factor as f64);
let prob = 1.0 / factor;
meta.map.insert(idx, prob);
meta.total_probability += prob;
Ok(())
}
}
impl<I, S, F> Scheduler<I, S> for ProbabilitySamplingScheduler<I, S, F>
impl<F, I, S> Scheduler<I, S> for ProbabilitySamplingScheduler<F, I, S>
where
F: TestcaseScore<I, S>,
I: Input,
S: HasCorpus<I> + HasMetadata + HasRand,
F: FavFactor<I>,
{
fn on_add(&self, state: &mut S, idx: usize) -> Result<(), Error> {
if state.metadata().get::<ProbabilityMetadata>().is_none() {
@ -122,11 +122,11 @@ where
}
}
impl<I, S, F> Default for ProbabilitySamplingScheduler<I, S, F>
impl<F, I, S> Default for ProbabilitySamplingScheduler<F, I, S>
where
F: TestcaseScore<I, S>,
I: Input,
S: HasCorpus<I> + HasMetadata + HasRand,
F: FavFactor<I>,
{
fn default() -> Self {
Self::new()
@ -142,13 +142,13 @@ mod tests {
bolts::rands::StdRand,
corpus::{Corpus, InMemoryCorpus, Testcase},
inputs::{bytes::BytesInput, Input},
schedulers::{FavFactor, ProbabilitySamplingScheduler, Scheduler},
state::StdState,
schedulers::{ProbabilitySamplingScheduler, Scheduler, TestcaseScore},
state::{HasCorpus, HasMetadata, StdState},
Error,
};
use core::marker::PhantomData;
const FACTOR: u64 = 1337;
const FACTOR: f64 = 1337.0;
#[derive(Debug, Clone)]
pub struct UniformDistribution<I>
@ -158,17 +158,18 @@ mod tests {
phantom: PhantomData<I>,
}
impl<I> FavFactor<I> for UniformDistribution<I>
impl<I, S> TestcaseScore<I, S> for UniformDistribution<I>
where
I: Input,
S: HasMetadata + HasCorpus<I>,
{
fn compute(_: &mut Testcase<I>) -> Result<u64, Error> {
fn compute(_: &mut Testcase<I>, _state: &S) -> Result<f64, Error> {
Ok(FACTOR)
}
}
pub type UniformProbabilitySamplingScheduler<I, S> =
ProbabilitySamplingScheduler<I, S, UniformDistribution<I>>;
ProbabilitySamplingScheduler<UniformDistribution<I>, I, S>;
#[test]
fn test_prob_sampling() {

View File

@ -0,0 +1,355 @@
//! The `TestcaseScore` is an evaluator providing scores of corpus items.
use crate::{
bolts::{HasLen, HasRefCnt},
corpus::{Corpus, PowerScheduleTestcaseMetaData, Testcase},
feedbacks::MapIndexesMetadata,
inputs::Input,
schedulers::{
minimizer::{IsFavoredMetadata, TopRatedsMetadata},
powersched::{PowerSchedule, PowerScheduleMetadata},
},
state::{HasCorpus, HasMetadata},
Error,
};
use alloc::string::{String, ToString};
use core::marker::PhantomData;
/// Compute the favor factor of a [`Testcase`]. Lower is better.
pub trait TestcaseScore<I, S>
where
I: Input,
S: HasMetadata + HasCorpus<I>,
{
/// Computes the favor factor of a [`Testcase`]. Lower is better.
fn compute(entry: &mut Testcase<I>, state: &S) -> Result<f64, Error>;
}
/// Multiply the testcase size with the execution time.
/// This favors small and quick testcases.
#[derive(Debug, Clone)]
pub struct LenTimeMulTestcaseScore<I, S>
where
I: Input + HasLen,
S: HasMetadata + HasCorpus<I>,
{
phantom: PhantomData<(I, S)>,
}
impl<I, S> TestcaseScore<I, S> for LenTimeMulTestcaseScore<I, S>
where
I: Input + HasLen,
S: HasMetadata + HasCorpus<I>,
{
#[allow(clippy::cast_precision_loss, clippy::cast_lossless)]
fn compute(entry: &mut Testcase<I>, _state: &S) -> Result<f64, Error> {
// TODO maybe enforce entry.exec_time().is_some()
Ok(entry.exec_time().map_or(1, |d| d.as_millis()) as f64 * entry.cached_len()? as f64)
}
}
/// Constants for powerschedules
const POWER_BETA: f64 = 1.0;
const MAX_FACTOR: f64 = POWER_BETA * 32.0;
const HAVOC_MAX_MULT: f64 = 64.0;
/// The power assigned to each corpus entry
/// This result is used for power scheduling
#[derive(Debug, Clone)]
pub struct CorpusPowerTestcaseScore<I, S>
where
I: Input + HasLen,
S: HasMetadata + HasCorpus<I>,
{
phantom: PhantomData<(I, S)>,
}
impl<I, S> TestcaseScore<I, S> for CorpusPowerTestcaseScore<I, S>
where
I: Input + HasLen,
S: HasMetadata + HasCorpus<I>,
{
/// Compute the `power` we assign to each corpus entry
#[allow(
clippy::cast_precision_loss,
clippy::too_many_lines,
clippy::cast_sign_loss
)]
fn compute(entry: &mut Testcase<I>, state: &S) -> Result<f64, Error> {
let psmeta = state
.metadata()
.get::<PowerScheduleMetadata>()
.ok_or_else(|| Error::KeyNotFound("PowerScheduleMetadata not found".to_string()))?;
let fuzz_mu = if psmeta.strat() == PowerSchedule::COE {
let corpus = state.corpus();
let mut n_paths = 0;
let mut v = 0.0;
for idx in 0..corpus.count() {
let n_fuzz_entry = corpus
.get(idx)?
.borrow()
.metadata()
.get::<PowerScheduleTestcaseMetaData>()
.ok_or_else(|| {
Error::KeyNotFound("PowerScheduleTestData not found".to_string())
})?
.n_fuzz_entry();
v += libm::log2(f64::from(psmeta.n_fuzz()[n_fuzz_entry]));
n_paths += 1;
}
if n_paths == 0 {
return Err(Error::Unknown(String::from("Queue state corrput")));
}
v /= f64::from(n_paths);
v
} else {
0.0
};
let mut perf_score = 100.0;
let q_exec_us = entry
.exec_time()
.ok_or_else(|| Error::KeyNotFound("exec_time not set".to_string()))?
.as_nanos() as f64;
let avg_exec_us = psmeta.exec_time().as_nanos() as f64 / psmeta.cycles() as f64;
let avg_bitmap_size = psmeta.bitmap_size() / psmeta.bitmap_entries();
let favored = entry.has_metadata::<IsFavoredMetadata>();
let tcmeta = entry
.metadata()
.get::<PowerScheduleTestcaseMetaData>()
.ok_or_else(|| {
Error::KeyNotFound("PowerScheduleTestcaseMetaData not found".to_string())
})?;
if q_exec_us * 0.1 > avg_exec_us {
perf_score = 10.0;
} else if q_exec_us * 0.2 > avg_exec_us {
perf_score = 25.0;
} else if q_exec_us * 0.5 > avg_exec_us {
perf_score = 50.0;
} else if q_exec_us * 0.75 > avg_exec_us {
perf_score = 75.0;
} else if q_exec_us * 4.0 < avg_exec_us {
perf_score = 300.0;
} else if q_exec_us * 3.0 < avg_exec_us {
perf_score = 200.0;
} else if q_exec_us * 2.0 < avg_exec_us {
perf_score = 150.0;
}
let q_bitmap_size = tcmeta.bitmap_size() as f64;
if q_bitmap_size * 0.3 > avg_bitmap_size as f64 {
perf_score *= 3.0;
} else if q_bitmap_size * 0.5 > avg_bitmap_size as f64 {
perf_score *= 2.0;
} else if q_bitmap_size * 0.75 > avg_bitmap_size as f64 {
perf_score *= 1.5;
} else if q_bitmap_size * 3.0 < avg_bitmap_size as f64 {
perf_score *= 0.25;
} else if q_bitmap_size * 2.0 < avg_bitmap_size as f64 {
perf_score *= 0.5;
} else if q_bitmap_size * 1.5 < avg_bitmap_size as f64 {
perf_score *= 0.75;
}
if tcmeta.handicap() >= 4 {
perf_score *= 4.0;
// tcmeta.set_handicap(tcmeta.handicap() - 4);
} else if tcmeta.handicap() > 0 {
perf_score *= 2.0;
// tcmeta.set_handicap(tcmeta.handicap() - 1);
}
if tcmeta.depth() >= 4 && tcmeta.depth() < 8 {
perf_score *= 2.0;
} else if tcmeta.depth() >= 8 && tcmeta.depth() < 14 {
perf_score *= 3.0;
} else if tcmeta.depth() >= 14 && tcmeta.depth() < 25 {
perf_score *= 4.0;
} else if tcmeta.depth() >= 25 {
perf_score *= 5.0;
}
let mut factor: f64 = 1.0;
// COE and Fast schedule are fairly different from what are described in the original thesis,
// This implementation follows the changes made in this pull request https://github.com/AFLplusplus/AFLplusplus/pull/568
match psmeta.strat() {
PowerSchedule::EXPLORE => {
// Nothing happens in EXPLORE
}
PowerSchedule::EXPLOIT => {
factor = MAX_FACTOR;
}
PowerSchedule::COE => {
if libm::log2(f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()])) > fuzz_mu
&& !favored
{
// Never skip favorites.
factor = 0.0;
}
}
PowerSchedule::FAST => {
if tcmeta.fuzz_level() != 0 {
let lg = libm::log2(f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()]));
match lg {
f if f < 2.0 => {
factor = 4.0;
}
f if (2.0..4.0).contains(&f) => {
factor = 3.0;
}
f if (4.0..5.0).contains(&f) => {
factor = 2.0;
}
f if (6.0..7.0).contains(&f) => {
if !favored {
factor = 0.8;
}
}
f if (7.0..8.0).contains(&f) => {
if !favored {
factor = 0.6;
}
}
f if f >= 8.0 => {
if !favored {
factor = 0.4;
}
}
_ => {
factor = 1.0;
}
}
if favored {
factor *= 1.15;
}
}
}
PowerSchedule::LIN => {
factor = (tcmeta.fuzz_level() as f64)
/ f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()] + 1);
}
PowerSchedule::QUAD => {
factor = ((tcmeta.fuzz_level() * tcmeta.fuzz_level()) as f64)
/ f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()] + 1);
}
}
if psmeta.strat() != PowerSchedule::EXPLORE {
if factor > MAX_FACTOR {
factor = MAX_FACTOR;
}
perf_score *= factor / POWER_BETA;
}
// Lower bound if the strat is not COE.
if psmeta.strat() == PowerSchedule::COE && perf_score < 1.0 {
perf_score = 1.0;
}
// Upper bound
if perf_score > HAVOC_MAX_MULT * 100.0 {
perf_score = HAVOC_MAX_MULT * 100.0;
}
Ok(perf_score)
}
}
/// The weight for each corpus entry
/// This result is used for corpus scheduling
#[derive(Debug, Clone)]
pub struct CorpusWeightTestcaseScore<I, S>
where
I: Input + HasLen,
S: HasMetadata + HasCorpus<I>,
{
phantom: PhantomData<(I, S)>,
}
impl<I, S> TestcaseScore<I, S> for CorpusWeightTestcaseScore<I, S>
where
I: Input + HasLen,
S: HasMetadata + HasCorpus<I>,
{
/// Compute the `weight` used in weighted corpus entry selection algo
#[allow(clippy::cast_precision_loss, clippy::cast_lossless)]
fn compute(entry: &mut Testcase<I>, state: &S) -> Result<f64, Error> {
let mut weight = 1.0;
let psmeta = state
.metadata()
.get::<PowerScheduleMetadata>()
.ok_or_else(|| Error::KeyNotFound("PowerScheduleMetadata not found".to_string()))?;
let tcmeta = entry
.metadata()
.get::<PowerScheduleTestcaseMetaData>()
.ok_or_else(|| Error::KeyNotFound("PowerScheduleTestData not found".to_string()))?;
// This means that this testcase has never gone through the calibration stage before1,
// In this case we'll just return the default weight
if tcmeta.fuzz_level() == 0 || psmeta.cycles() == 0 {
return Ok(weight);
}
let q_exec_us = entry
.exec_time()
.ok_or_else(|| Error::KeyNotFound("exec_time not set".to_string()))?
.as_nanos() as f64;
let favored = entry.has_metadata::<IsFavoredMetadata>();
let avg_exec_us = psmeta.exec_time().as_nanos() as f64 / psmeta.cycles() as f64;
let avg_bitmap_size = psmeta.bitmap_size() / psmeta.bitmap_entries();
let q_bitmap_size = tcmeta.bitmap_size() as f64;
match psmeta.strat() {
PowerSchedule::FAST | PowerSchedule::COE | PowerSchedule::LIN | PowerSchedule::QUAD => {
let hits = psmeta.n_fuzz()[tcmeta.n_fuzz_entry()];
if hits > 0 {
weight *= libm::log10(f64::from(hits)) + 1.0;
}
}
// EXPLORE and EXPLOIT fall into this
_ => {}
}
weight *= avg_exec_us / q_exec_us;
weight *= libm::log2(q_bitmap_size) / (avg_bitmap_size as f64);
let tc_ref = match entry.metadata().get::<MapIndexesMetadata>() {
Some(meta) => meta.refcnt() as f64,
None => 0.0,
};
let avg_top_size = state
.metadata()
.get::<TopRatedsMetadata>()
.ok_or_else(|| Error::KeyNotFound("TopRatedsMetadata not found".to_string()))?
.map()
.len() as f64;
weight *= 1.0 + (tc_ref / avg_top_size);
if favored {
weight *= 5.0;
}
// was it fuzzed before?
if tcmeta.fuzz_level() == 0 {
weight *= 2.0;
}
assert!(weight.is_normal());
Ok(weight)
}
}

View File

@ -10,7 +10,11 @@ use crate::{
bolts::rands::Rand,
corpus::{Corpus, PowerScheduleTestcaseMetaData},
inputs::Input,
schedulers::{powersched::PowerScheduleMetadata, Scheduler},
schedulers::{
powersched::PowerScheduleMetadata,
testcase_score::{CorpusWeightTestcaseScore, TestcaseScore},
Scheduler,
},
state::{HasCorpus, HasMetadata, HasRand},
Error,
};
@ -84,12 +88,13 @@ crate::impl_serdeany!(WeightedScheduleMetadata);
/// A corpus scheduler using power schedules with weighted queue item selection algo.
#[derive(Clone, Debug)]
pub struct WeightedScheduler<I, S> {
phantom: PhantomData<(I, S)>,
pub struct WeightedScheduler<F, I, S> {
phantom: PhantomData<(F, I, S)>,
}
impl<I, S> Default for WeightedScheduler<I, S>
impl<F, I, S> Default for WeightedScheduler<F, I, S>
where
F: TestcaseScore<I, S>,
I: Input,
S: HasCorpus<I> + HasMetadata + HasRand,
{
@ -98,8 +103,9 @@ where
}
}
impl<I, S> WeightedScheduler<I, S>
impl<F, I, S> WeightedScheduler<F, I, S>
where
F: TestcaseScore<I, S>,
I: Input,
S: HasCorpus<I> + HasMetadata + HasRand,
{
@ -132,8 +138,8 @@ where
let mut sum: f64 = 0.0;
for (i, item) in weights.iter_mut().enumerate().take(n) {
let testcase = state.corpus().get(i)?.borrow();
let weight = testcase.compute_weight(state)?;
let mut testcase = state.corpus().get(i)?.borrow_mut();
let weight = F::compute(&mut *testcase, state)?;
*item = weight;
sum += weight;
}
@ -199,8 +205,9 @@ where
}
}
impl<I, S> Scheduler<I, S> for WeightedScheduler<I, S>
impl<F, I, S> Scheduler<I, S> for WeightedScheduler<F, I, S>
where
F: TestcaseScore<I, S>,
S: HasCorpus<I> + HasMetadata + HasRand,
I: Input,
{
@ -283,3 +290,6 @@ where
}
}
}
/// The standard corpus weight, same as aflpp
pub type StdWeightedScheduler<I, S> = WeightedScheduler<CorpusWeightTestcaseScore<I, S>, I, S>;

View File

@ -17,7 +17,7 @@ pub mod calibrate;
pub use calibrate::CalibrationStage;
pub mod power;
pub use power::PowerMutationalStage;
pub use power::{PowerMutationalStage, StdPowerMutationalStage};
pub mod generalization;
pub use generalization::GeneralizationStage;

View File

@ -10,16 +10,21 @@ use crate::{
inputs::Input,
mutators::Mutator,
observers::{MapObserver, ObserversTuple},
schedulers::powersched::{PowerSchedule, PowerScheduleMetadata},
schedulers::{
powersched::{PowerSchedule, PowerScheduleMetadata},
testcase_score::CorpusPowerTestcaseScore,
TestcaseScore,
},
stages::{MutationalStage, Stage},
state::{HasClientPerfMonitor, HasCorpus, HasMetadata},
Error,
};
/// The mutational stage using power schedules
#[derive(Clone, Debug)]
pub struct PowerMutationalStage<E, EM, I, M, O, OT, S, Z>
pub struct PowerMutationalStage<E, F, EM, I, M, O, OT, S, Z>
where
E: Executor<EM, I, S, Z> + HasObservers<I, OT, S>,
F: TestcaseScore<I, S>,
I: Input,
M: Mutator<I, S>,
O: MapObserver,
@ -30,13 +35,14 @@ where
map_observer_name: String,
mutator: M,
#[allow(clippy::type_complexity)]
phantom: PhantomData<(E, EM, I, O, OT, S, Z)>,
phantom: PhantomData<(E, F, EM, I, O, OT, S, Z)>,
}
impl<E, EM, I, M, O, OT, S, Z> MutationalStage<E, EM, I, M, S, Z>
for PowerMutationalStage<E, EM, I, M, O, OT, S, Z>
impl<E, F, EM, I, M, O, OT, S, Z> MutationalStage<E, EM, I, M, S, Z>
for PowerMutationalStage<E, F, EM, I, M, O, OT, S, Z>
where
E: Executor<EM, I, S, Z> + HasObservers<I, OT, S>,
F: TestcaseScore<I, S>,
I: Input,
M: Mutator<I, S>,
O: MapObserver,
@ -57,16 +63,11 @@ where
}
/// Gets the number of iterations as a random number
#[allow(clippy::cast_sign_loss)]
fn iterations(&self, state: &mut S, corpus_idx: usize) -> Result<usize, Error> {
// Calculate score
let score = state
.corpus()
.get(corpus_idx)?
.borrow()
.calculate_score(state);
// Update handicap
let mut testcase = state.corpus().get(corpus_idx)?.borrow_mut();
let score = F::compute(&mut *testcase, state)? as usize;
let tcmeta = testcase
.metadata_mut()
.get_mut::<PowerScheduleTestcaseMetaData>()
@ -79,7 +80,7 @@ where
tcmeta.set_handicap(tcmeta.handicap() - 1);
}
score
Ok(score)
}
#[allow(clippy::cast_possible_wrap)]
@ -141,9 +142,11 @@ where
}
}
impl<E, EM, I, M, O, OT, S, Z> Stage<E, EM, S, Z> for PowerMutationalStage<E, EM, I, M, O, OT, S, Z>
impl<E, F, EM, I, M, O, OT, S, Z> Stage<E, EM, S, Z>
for PowerMutationalStage<E, F, EM, I, M, O, OT, S, Z>
where
E: Executor<EM, I, S, Z> + HasObservers<I, OT, S>,
F: TestcaseScore<I, S>,
I: Input,
M: Mutator<I, S>,
O: MapObserver,
@ -166,9 +169,10 @@ where
}
}
impl<E, EM, I, M, O, OT, S, Z> PowerMutationalStage<E, EM, I, M, O, OT, S, Z>
impl<E, F, EM, I, M, O, OT, S, Z> PowerMutationalStage<E, F, EM, I, M, O, OT, S, Z>
where
E: Executor<EM, I, S, Z> + HasObservers<I, OT, S>,
F: TestcaseScore<I, S>,
I: Input,
M: Mutator<I, S>,
O: MapObserver,
@ -186,3 +190,7 @@ where
}
}
}
/// The standard powerscheduling stage
pub type StdPowerMutationalStage<E, EM, I, M, O, OT, S, Z> =
PowerMutationalStage<E, CorpusPowerTestcaseScore<I, S>, EM, I, M, O, OT, S, Z>;