diff --git a/fuzzers/fuzzbench/src/lib.rs b/fuzzers/fuzzbench/src/lib.rs index 3de293f85f..ea66b95e2b 100644 --- a/fuzzers/fuzzbench/src/lib.rs +++ b/fuzzers/fuzzbench/src/lib.rs @@ -43,7 +43,8 @@ use libafl::{ powersched::PowerSchedule, IndexesLenTimeMinimizerScheduler, PowerQueueScheduler, }, stages::{ - calibrate::CalibrationStage, power::PowerMutationalStage, StdMutationalStage, TracingStage, + calibrate::CalibrationStage, power::StdPowerMutationalStage, StdMutationalStage, + TracingStage, }, state::{HasCorpus, HasMetadata, StdState}, Error, @@ -308,7 +309,7 @@ fn fuzz( let mutator = StdMOptMutator::new(&mut state, havoc_mutations().merge(tokens_mutations()), 5)?; let power = - PowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST); + StdPowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST); // A minimization+queue policy to get testcasess from the corpus let scheduler = IndexesLenTimeMinimizerScheduler::new(PowerQueueScheduler::new()); diff --git a/fuzzers/fuzzbench_fork_qemu/src/fuzzer.rs b/fuzzers/fuzzbench_fork_qemu/src/fuzzer.rs index 90699fe579..0f5e74c471 100644 --- a/fuzzers/fuzzbench_fork_qemu/src/fuzzer.rs +++ b/fuzzers/fuzzbench_fork_qemu/src/fuzzer.rs @@ -40,7 +40,7 @@ use libafl::{ powersched::PowerSchedule, IndexesLenTimeMinimizerScheduler, PowerQueueScheduler, }, stages::{ - calibrate::CalibrationStage, power::PowerMutationalStage, ShadowTracingStage, + calibrate::CalibrationStage, power::StdPowerMutationalStage, ShadowTracingStage, StdMutationalStage, }, state::{HasCorpus, HasMetadata, StdState}, @@ -280,7 +280,7 @@ fn fuzz( let mutator = StdMOptMutator::new(&mut state, havoc_mutations().merge(tokens_mutations()), 5)?; let power = - PowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST); + StdPowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST); // A minimization+queue policy to get testcasess from the corpus let scheduler = IndexesLenTimeMinimizerScheduler::new(PowerQueueScheduler::new()); diff --git a/fuzzers/fuzzbench_qemu/src/fuzzer.rs b/fuzzers/fuzzbench_qemu/src/fuzzer.rs index b8da496d40..8b55bfa9b7 100644 --- a/fuzzers/fuzzbench_qemu/src/fuzzer.rs +++ b/fuzzers/fuzzbench_qemu/src/fuzzer.rs @@ -40,7 +40,7 @@ use libafl::{ powersched::PowerSchedule, IndexesLenTimeMinimizerScheduler, PowerQueueScheduler, }, stages::{ - calibrate::CalibrationStage, power::PowerMutationalStage, ShadowTracingStage, + calibrate::CalibrationStage, power::StdPowerMutationalStage, ShadowTracingStage, StdMutationalStage, }, state::{HasCorpus, HasMetadata, StdState}, @@ -293,7 +293,7 @@ fn fuzz( let mutator = StdMOptMutator::new(&mut state, havoc_mutations().merge(tokens_mutations()), 5)?; let power = - PowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST); + StdPowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST); // A minimization+queue policy to get testcasess from the corpus let scheduler = IndexesLenTimeMinimizerScheduler::new(PowerQueueScheduler::new()); diff --git a/fuzzers/fuzzbench_selected/src/lib.rs b/fuzzers/fuzzbench_selected/src/lib.rs index bbc632d3ec..8b37c44367 100644 --- a/fuzzers/fuzzbench_selected/src/lib.rs +++ b/fuzzers/fuzzbench_selected/src/lib.rs @@ -39,9 +39,12 @@ use libafl::{ StdMOptMutator, StdScheduledMutator, Tokens, }, observers::{HitcountsMapObserver, StdMapObserver, TimeObserver}, - schedulers::{powersched::PowerSchedule, IndexesLenTimeMinimizerScheduler, WeightedScheduler}, + schedulers::{ + powersched::PowerSchedule, IndexesLenTimeMinimizerScheduler, StdWeightedScheduler, + }, stages::{ - calibrate::CalibrationStage, power::PowerMutationalStage, StdMutationalStage, TracingStage, + calibrate::CalibrationStage, power::StdPowerMutationalStage, StdMutationalStage, + TracingStage, }, state::{HasCorpus, HasMetadata, StdState}, Error, @@ -306,10 +309,10 @@ fn fuzz( let mutator = StdMOptMutator::new(&mut state, havoc_mutations().merge(tokens_mutations()), 5)?; let power = - PowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST); + StdPowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST); // A minimization+queue policy to get testcasess from the corpus - let scheduler = IndexesLenTimeMinimizerScheduler::new(WeightedScheduler::new()); + let scheduler = IndexesLenTimeMinimizerScheduler::new(StdWeightedScheduler::new()); // A fuzzer with feedbacks and a corpus scheduler let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); diff --git a/fuzzers/fuzzbench_text/src/lib.rs b/fuzzers/fuzzbench_text/src/lib.rs index 4672f8389a..7c25070c08 100644 --- a/fuzzers/fuzzbench_text/src/lib.rs +++ b/fuzzers/fuzzbench_text/src/lib.rs @@ -49,7 +49,7 @@ use libafl::{ powersched::PowerSchedule, IndexesLenTimeMinimizerScheduler, PowerQueueScheduler, }, stages::{ - calibrate::CalibrationStage, power::PowerMutationalStage, GeneralizationStage, + calibrate::CalibrationStage, power::StdPowerMutationalStage, GeneralizationStage, StdMutationalStage, TracingStage, }, state::{HasCorpus, HasMetadata, StdState}, @@ -370,7 +370,7 @@ fn fuzz_binary( let mutator = StdMOptMutator::new(&mut state, havoc_mutations().merge(tokens_mutations()), 5)?; let power = - PowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST); + StdPowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST); // A minimization+queue policy to get testcasess from the corpus let scheduler = IndexesLenTimeMinimizerScheduler::new(PowerQueueScheduler::new()); @@ -575,7 +575,7 @@ fn fuzz_text( let mutator = StdMOptMutator::new(&mut state, havoc_mutations().merge(tokens_mutations()), 5)?; let power = - PowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST); + StdPowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST); let grimoire_mutator = StdScheduledMutator::with_max_iterations( tuple_list!( diff --git a/fuzzers/libfuzzer_libpng/src/lib.rs b/fuzzers/libfuzzer_libpng/src/lib.rs index 97ca7a9e30..782fae3cde 100644 --- a/fuzzers/libfuzzer_libpng/src/lib.rs +++ b/fuzzers/libfuzzer_libpng/src/lib.rs @@ -25,8 +25,10 @@ use libafl::{ mutators::scheduled::{havoc_mutations, tokens_mutations, StdScheduledMutator}, mutators::token_mutations::Tokens, observers::{HitcountsMapObserver, StdMapObserver, TimeObserver}, - schedulers::{powersched::PowerSchedule, IndexesLenTimeMinimizerScheduler, WeightedScheduler}, - stages::{calibrate::CalibrationStage, power::PowerMutationalStage}, + schedulers::{ + powersched::PowerSchedule, IndexesLenTimeMinimizerScheduler, StdWeightedScheduler, + }, + stages::{calibrate::CalibrationStage, power::StdPowerMutationalStage}, state::{HasCorpus, HasMetadata, StdState}, Error, }; @@ -129,12 +131,12 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re let calibration = CalibrationStage::new(&edges_observer); let power = - PowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST); + StdPowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST); let mut stages = tuple_list!(calibration, power); // A minimization+queue policy to get testcasess from the corpus - let scheduler = IndexesLenTimeMinimizerScheduler::new(WeightedScheduler::new()); + let scheduler = IndexesLenTimeMinimizerScheduler::new(StdWeightedScheduler::new()); // A fuzzer with feedbacks and a corpus scheduler let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); diff --git a/fuzzers/tutorial/src/lib.rs b/fuzzers/tutorial/src/lib.rs index 59aae2f9f6..2c4a5e5fa6 100644 --- a/fuzzers/tutorial/src/lib.rs +++ b/fuzzers/tutorial/src/lib.rs @@ -18,7 +18,7 @@ use libafl::{ monitors::MultiMonitor, observers::{HitcountsMapObserver, StdMapObserver, TimeObserver}, schedulers::{powersched::PowerSchedule, PowerQueueScheduler}, - stages::{calibrate::CalibrationStage, power::PowerMutationalStage}, + stages::{calibrate::CalibrationStage, power::StdPowerMutationalStage}, state::{HasCorpus, StdState}, Error, }; @@ -127,7 +127,7 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re let calibration = CalibrationStage::new(&edges_observer); let power = - PowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST); + StdPowerMutationalStage::new(&mut state, mutator, &edges_observer, PowerSchedule::FAST); let mut stages = tuple_list!(calibration, power); diff --git a/fuzzers/tutorial/src/metadata.rs b/fuzzers/tutorial/src/metadata.rs index 820dce15fa..de865833f1 100644 --- a/fuzzers/tutorial/src/metadata.rs +++ b/fuzzers/tutorial/src/metadata.rs @@ -5,8 +5,8 @@ use libafl::{ executors::ExitKind, feedbacks::{Feedback, MapIndexesMetadata}, observers::ObserversTuple, - schedulers::{FavFactor, MinimizerScheduler}, - state::{HasClientPerfMonitor, HasMetadata}, + schedulers::{MinimizerScheduler, TestcaseScore}, + state::{HasClientPerfMonitor, HasCorpus, HasMetadata}, Error, SerdeAny, }; @@ -19,19 +19,22 @@ pub struct PacketLenMetadata { pub length: u64, } -pub struct PacketLenFavFactor {} +pub struct PacketLenTestcaseScore {} -impl FavFactor for PacketLenFavFactor { - fn compute(entry: &mut Testcase) -> Result { +impl TestcaseScore for PacketLenTestcaseScore +where + S: HasCorpus + HasMetadata, +{ + fn compute(entry: &mut Testcase, _state: &S) -> Result { Ok(entry .metadata() .get::() - .map_or(1, |m| m.length)) + .map_or(1, |m| m.length) as f64) } } pub type PacketLenMinimizerScheduler = - MinimizerScheduler; + MinimizerScheduler; #[derive(Serialize, Deserialize, Default, Clone, Debug)] pub struct PacketLenFeedback { diff --git a/libafl/src/corpus/testcase.rs b/libafl/src/corpus/testcase.rs index 0271adb239..e73dc27292 100644 --- a/libafl/src/corpus/testcase.rs +++ b/libafl/src/corpus/testcase.rs @@ -1,20 +1,14 @@ //! The testcase is a struct embedded in each corpus. //! It will contain a respective input, and metadata. -use alloc::string::{String, ToString}; +use alloc::string::String; use core::{convert::Into, default::Default, option::Option, time::Duration}; use serde::{Deserialize, Serialize}; use crate::{ - bolts::{serdeany::SerdeAnyMap, HasLen, HasRefCnt}, - corpus::Corpus, - feedbacks::MapIndexesMetadata, + bolts::{serdeany::SerdeAnyMap, HasLen}, inputs::Input, - schedulers::{ - minimizer::{IsFavoredMetadata, TopRatedsMetadata}, - powersched::{PowerSchedule, PowerScheduleMetadata}, - }, - state::{HasCorpus, HasMetadata}, + state::HasMetadata, Error, }; @@ -58,11 +52,6 @@ where } } -/// Constants for powerschedules -const POWER_BETA: f64 = 1.0; -const MAX_FACTOR: f64 = POWER_BETA * 32.0; -const HAVOC_MAX_MULT: f64 = 64.0; - /// Impl of a testcase impl Testcase where @@ -212,280 +201,6 @@ where ..Testcase::default() } } - - /// Compute the `weight` used in weighted corpus entry selection algo - #[allow(clippy::cast_precision_loss, clippy::cast_lossless)] - pub fn compute_weight(&self, state: &S) -> Result - where - S: HasCorpus + HasMetadata, - { - let mut weight = 1.0; - let psmeta = state - .metadata() - .get::() - .ok_or_else(|| Error::KeyNotFound("PowerScheduleMetadata not found".to_string()))?; - - let tcmeta = self - .metadata() - .get::() - .ok_or_else(|| Error::KeyNotFound("PowerScheduleTestData not found".to_string()))?; - - // This means that this testcase has never gone through the calibration stage before1, - // In this case we'll just return the default weight - if tcmeta.fuzz_level() == 0 || psmeta.cycles() == 0 { - return Ok(weight); - } - - let q_exec_us = self - .exec_time() - .ok_or_else(|| Error::KeyNotFound("exec_time not set".to_string()))? - .as_nanos() as f64; - let favored = self.has_metadata::(); - - let avg_exec_us = psmeta.exec_time().as_nanos() as f64 / psmeta.cycles() as f64; - let avg_bitmap_size = psmeta.bitmap_size() / psmeta.bitmap_entries(); - - let q_bitmap_size = tcmeta.bitmap_size() as f64; - - match psmeta.strat() { - PowerSchedule::FAST | PowerSchedule::COE | PowerSchedule::LIN | PowerSchedule::QUAD => { - let hits = psmeta.n_fuzz()[tcmeta.n_fuzz_entry()]; - if hits > 0 { - weight *= libm::log10(f64::from(hits)) + 1.0; - } - } - // EXPLORE and EXPLOIT fall into this - _ => {} - } - - weight *= avg_exec_us / q_exec_us; - weight *= libm::log2(q_bitmap_size) / (avg_bitmap_size as f64); - - let tc_ref = match self.metadata().get::() { - Some(meta) => meta.refcnt() as f64, - None => 0.0, - }; - - let avg_top_size = state - .metadata() - .get::() - .ok_or_else(|| Error::KeyNotFound("TopRatedsMetadata not found".to_string()))? - .map() - .len() as f64; - weight *= 1.0 + (tc_ref / avg_top_size); - - if favored { - weight *= 5.0; - } - - // was it fuzzed before? - if tcmeta.fuzz_level() == 0 { - weight *= 2.0; - } - - assert!(weight.is_normal()); - - Ok(weight) - } - - /// Compute the `power` we assign to each corpus entry - #[inline] - #[allow( - clippy::cast_precision_loss, - clippy::too_many_lines, - clippy::cast_sign_loss - )] - pub fn calculate_score(&self, state: &S) -> Result - where - S: HasCorpus + HasMetadata, - { - let psmeta = state - .metadata() - .get::() - .ok_or_else(|| Error::KeyNotFound("PowerScheduleMetadata not found".to_string()))?; - - let fuzz_mu = if psmeta.strat() == PowerSchedule::COE { - let corpus = state.corpus(); - let mut n_paths = 0; - let mut v = 0.0; - for idx in 0..corpus.count() { - let n_fuzz_entry = corpus - .get(idx)? - .borrow() - .metadata() - .get::() - .ok_or_else(|| { - Error::KeyNotFound("PowerScheduleTestData not found".to_string()) - })? - .n_fuzz_entry(); - v += libm::log2(f64::from(psmeta.n_fuzz()[n_fuzz_entry])); - n_paths += 1; - } - - if n_paths == 0 { - return Err(Error::Unknown(String::from("Queue state corrput"))); - } - - v /= f64::from(n_paths); - v - } else { - 0.0 - }; - - let mut perf_score = 100.0; - let q_exec_us = self - .exec_time() - .ok_or_else(|| Error::KeyNotFound("exec_time not set".to_string()))? - .as_nanos() as f64; - - let avg_exec_us = psmeta.exec_time().as_nanos() as f64 / psmeta.cycles() as f64; - let avg_bitmap_size = psmeta.bitmap_size() / psmeta.bitmap_entries(); - - let favored = self.has_metadata::(); - let tcmeta = self - .metadata() - .get::() - .ok_or_else(|| { - Error::KeyNotFound("PowerScheduleTestcaseMetaData not found".to_string()) - })?; - - if q_exec_us * 0.1 > avg_exec_us { - perf_score = 10.0; - } else if q_exec_us * 0.2 > avg_exec_us { - perf_score = 25.0; - } else if q_exec_us * 0.5 > avg_exec_us { - perf_score = 50.0; - } else if q_exec_us * 0.75 > avg_exec_us { - perf_score = 75.0; - } else if q_exec_us * 4.0 < avg_exec_us { - perf_score = 300.0; - } else if q_exec_us * 3.0 < avg_exec_us { - perf_score = 200.0; - } else if q_exec_us * 2.0 < avg_exec_us { - perf_score = 150.0; - } - - let q_bitmap_size = tcmeta.bitmap_size() as f64; - if q_bitmap_size * 0.3 > avg_bitmap_size as f64 { - perf_score *= 3.0; - } else if q_bitmap_size * 0.5 > avg_bitmap_size as f64 { - perf_score *= 2.0; - } else if q_bitmap_size * 0.75 > avg_bitmap_size as f64 { - perf_score *= 1.5; - } else if q_bitmap_size * 3.0 < avg_bitmap_size as f64 { - perf_score *= 0.25; - } else if q_bitmap_size * 2.0 < avg_bitmap_size as f64 { - perf_score *= 0.5; - } else if q_bitmap_size * 1.5 < avg_bitmap_size as f64 { - perf_score *= 0.75; - } - - if tcmeta.handicap() >= 4 { - perf_score *= 4.0; - // tcmeta.set_handicap(tcmeta.handicap() - 4); - } else if tcmeta.handicap() > 0 { - perf_score *= 2.0; - // tcmeta.set_handicap(tcmeta.handicap() - 1); - } - - if tcmeta.depth() >= 4 && tcmeta.depth() < 8 { - perf_score *= 2.0; - } else if tcmeta.depth() >= 8 && tcmeta.depth() < 14 { - perf_score *= 3.0; - } else if tcmeta.depth() >= 14 && tcmeta.depth() < 25 { - perf_score *= 4.0; - } else if tcmeta.depth() >= 25 { - perf_score *= 5.0; - } - - let mut factor: f64 = 1.0; - - // COE and Fast schedule are fairly different from what are described in the original thesis, - // This implementation follows the changes made in this pull request https://github.com/AFLplusplus/AFLplusplus/pull/568 - match psmeta.strat() { - PowerSchedule::EXPLORE => { - // Nothing happens in EXPLORE - } - PowerSchedule::EXPLOIT => { - factor = MAX_FACTOR; - } - PowerSchedule::COE => { - if libm::log2(f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()])) > fuzz_mu - && !favored - { - // Never skip favorites. - factor = 0.0; - } - } - PowerSchedule::FAST => { - if tcmeta.fuzz_level() != 0 { - let lg = libm::log2(f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()])); - - match lg { - f if f < 2.0 => { - factor = 4.0; - } - f if (2.0..4.0).contains(&f) => { - factor = 3.0; - } - f if (4.0..5.0).contains(&f) => { - factor = 2.0; - } - f if (6.0..7.0).contains(&f) => { - if !favored { - factor = 0.8; - } - } - f if (7.0..8.0).contains(&f) => { - if !favored { - factor = 0.6; - } - } - f if f >= 8.0 => { - if !favored { - factor = 0.4; - } - } - _ => { - factor = 1.0; - } - } - - if favored { - factor *= 1.15; - } - } - } - PowerSchedule::LIN => { - factor = (tcmeta.fuzz_level() as f64) - / f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()] + 1); - } - PowerSchedule::QUAD => { - factor = ((tcmeta.fuzz_level() * tcmeta.fuzz_level()) as f64) - / f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()] + 1); - } - } - - if psmeta.strat() != PowerSchedule::EXPLORE { - if factor > MAX_FACTOR { - factor = MAX_FACTOR; - } - - perf_score *= factor / POWER_BETA; - } - - // Lower bound if the strat is not COE. - if psmeta.strat() == PowerSchedule::COE && perf_score < 1.0 { - perf_score = 1.0; - } - - // Upper bound - if perf_score > HAVOC_MAX_MULT * 100.0 { - perf_score = HAVOC_MAX_MULT * 100.0; - } - - Ok(perf_score as usize) - } } impl Default for Testcase diff --git a/libafl/src/schedulers/accounting.rs b/libafl/src/schedulers/accounting.rs index 3652404ef1..ee31da7bba 100644 --- a/libafl/src/schedulers/accounting.rs +++ b/libafl/src/schedulers/accounting.rs @@ -7,7 +7,7 @@ use crate::{ inputs::Input, schedulers::{ minimizer::{IsFavoredMetadata, MinimizerScheduler, DEFAULT_SKIP_NON_FAVORED_PROB}, - LenTimeMulFavFactor, Scheduler, + LenTimeMulTestcaseScore, Scheduler, }, state::{HasCorpus, HasMetadata, HasRand}, Error, @@ -100,7 +100,7 @@ where { accounting_map: &'a [u32], skip_non_favored_prob: u64, - inner: MinimizerScheduler, I, MapIndexesMetadata, S>, + inner: MinimizerScheduler, I, MapIndexesMetadata, S>, } impl<'a, CS, I, S> Scheduler for CoverageAccountingScheduler<'a, CS, I, S> diff --git a/libafl/src/schedulers/fav_factor.rs b/libafl/src/schedulers/fav_factor.rs deleted file mode 100644 index d7adcb097f..0000000000 --- a/libafl/src/schedulers/fav_factor.rs +++ /dev/null @@ -1,34 +0,0 @@ -//! The `FavFactor` is an evaluator providing scores of corpus items. - -use crate::{bolts::HasLen, corpus::Testcase, inputs::Input, Error}; - -use core::marker::PhantomData; - -/// Compute the favor factor of a [`Testcase`]. Lower is better. -pub trait FavFactor -where - I: Input, -{ - /// Computes the favor factor of a [`Testcase`]. Lower is better. - fn compute(entry: &mut Testcase) -> Result; -} - -/// Multiply the testcase size with the execution time. -/// This favors small and quick testcases. -#[derive(Debug, Clone)] -pub struct LenTimeMulFavFactor -where - I: Input + HasLen, -{ - phantom: PhantomData, -} - -impl FavFactor for LenTimeMulFavFactor -where - I: Input + HasLen, -{ - fn compute(entry: &mut Testcase) -> Result { - // TODO maybe enforce entry.exec_time().is_some() - Ok(entry.exec_time().map_or(1, |d| d.as_millis()) as u64 * entry.cached_len()? as u64) - } -} diff --git a/libafl/src/schedulers/minimizer.rs b/libafl/src/schedulers/minimizer.rs index 3e7425f0de..c05b1b0b35 100644 --- a/libafl/src/schedulers/minimizer.rs +++ b/libafl/src/schedulers/minimizer.rs @@ -6,7 +6,7 @@ use crate::{ corpus::{Corpus, Testcase}, feedbacks::MapIndexesMetadata, inputs::Input, - schedulers::{FavFactor, LenTimeMulFavFactor, Scheduler}, + schedulers::{LenTimeMulTestcaseScore, Scheduler, TestcaseScore}, state::{HasCorpus, HasMetadata, HasRand}, Error, }; @@ -57,12 +57,12 @@ impl Default for TopRatedsMetadata { /// The [`MinimizerScheduler`] employs a genetic algorithm to compute a subset of the /// corpus that exercise all the requested features (e.g. all the coverage seen so far) -/// prioritizing [`Testcase`]`s` using [`FavFactor`] +/// prioritizing [`Testcase`]`s` using [`TestcaseScore`] #[derive(Debug, Clone)] pub struct MinimizerScheduler where CS: Scheduler, - F: FavFactor, + F: TestcaseScore, I: Input, M: AsSlice + SerdeAny + HasRefCnt, S: HasCorpus + HasMetadata, @@ -75,7 +75,7 @@ where impl Scheduler for MinimizerScheduler where CS: Scheduler, - F: FavFactor, + F: TestcaseScore, I: Input, M: AsSlice + SerdeAny + HasRefCnt, S: HasCorpus + HasMetadata + HasRand, @@ -123,7 +123,7 @@ where impl MinimizerScheduler where CS: Scheduler, - F: FavFactor, + F: TestcaseScore, I: Input, M: AsSlice + SerdeAny + HasRefCnt, S: HasCorpus + HasMetadata + HasRand, @@ -140,7 +140,7 @@ where let mut new_favoreds = vec![]; { let mut entry = state.corpus().get(idx)?.borrow_mut(); - let factor = F::compute(&mut *entry)?; + let factor = F::compute(&mut *entry, state)?; let meta = entry.metadata_mut().get_mut::().ok_or_else(|| { Error::KeyNotFound(format!( "Metadata needed for MinimizerScheduler not found in testcase #{}", @@ -156,7 +156,7 @@ where .get(elem) { let mut old = state.corpus().get(*old_idx)?.borrow_mut(); - if factor > F::compute(&mut *old)? { + if factor > F::compute(&mut *old, state)? { continue; } @@ -261,11 +261,11 @@ where } } -/// A [`MinimizerScheduler`] with [`LenTimeMulFavFactor`] to prioritize quick and small [`Testcase`]`s`. +/// A [`MinimizerScheduler`] with [`LenTimeMulTestcaseScore`] to prioritize quick and small [`Testcase`]`s`. pub type LenTimeMinimizerScheduler = - MinimizerScheduler, I, M, S>; + MinimizerScheduler, I, M, S>; -/// A [`MinimizerScheduler`] with [`LenTimeMulFavFactor`] to prioritize quick and small [`Testcase`]`s` +/// A [`MinimizerScheduler`] with [`LenTimeMulTestcaseScore`] to prioritize quick and small [`Testcase`]`s` /// that exercise all the entries registered in the [`MapIndexesMetadata`]. pub type IndexesLenTimeMinimizerScheduler = - MinimizerScheduler, I, MapIndexesMetadata, S>; + MinimizerScheduler, I, MapIndexesMetadata, S>; diff --git a/libafl/src/schedulers/mod.rs b/libafl/src/schedulers/mod.rs index ee65dcb787..25d3ffa47d 100644 --- a/libafl/src/schedulers/mod.rs +++ b/libafl/src/schedulers/mod.rs @@ -9,8 +9,8 @@ pub use probabilistic_sampling::ProbabilitySamplingScheduler; pub mod accounting; pub use accounting::CoverageAccountingScheduler; -pub mod fav_factor; -pub use fav_factor::{FavFactor, LenTimeMulFavFactor}; +pub mod testcase_score; +pub use testcase_score::{LenTimeMulTestcaseScore, TestcaseScore}; pub mod minimizer; pub use minimizer::{ @@ -18,7 +18,7 @@ pub use minimizer::{ }; pub mod weighted; -pub use weighted::WeightedScheduler; +pub use weighted::{StdWeightedScheduler, WeightedScheduler}; pub mod powersched; pub use powersched::PowerQueueScheduler; diff --git a/libafl/src/schedulers/probabilistic_sampling.rs b/libafl/src/schedulers/probabilistic_sampling.rs index c5bef87c6c..f5fd3d0b5a 100644 --- a/libafl/src/schedulers/probabilistic_sampling.rs +++ b/libafl/src/schedulers/probabilistic_sampling.rs @@ -5,7 +5,7 @@ use crate::{ bolts::rands::Rand, corpus::Corpus, inputs::Input, - schedulers::{FavFactor, Scheduler}, + schedulers::{Scheduler, TestcaseScore}, state::{HasCorpus, HasMetadata, HasRand}, Error, }; @@ -16,13 +16,13 @@ use serde::{Deserialize, Serialize}; /// Conduct reservoir sampling (probabilistic sampling) over all corpus elements. #[derive(Debug, Clone)] -pub struct ProbabilitySamplingScheduler +pub struct ProbabilitySamplingScheduler where + F: TestcaseScore, I: Input, S: HasCorpus + HasMetadata + HasRand, - F: FavFactor, { - phantom: PhantomData<(I, S, F)>, + phantom: PhantomData<(F, I, S)>, } /// A state metadata holding a map of probability of corpus elements. @@ -53,11 +53,11 @@ impl Default for ProbabilityMetadata { } } -impl ProbabilitySamplingScheduler +impl ProbabilitySamplingScheduler where + F: TestcaseScore, I: Input, S: HasCorpus + HasMetadata + HasRand, - F: FavFactor, { /// Creates a new [`struct@ProbabilitySamplingScheduler`] #[must_use] @@ -71,8 +71,8 @@ where #[allow(clippy::cast_precision_loss)] #[allow(clippy::unused_self)] pub fn store_probability(&self, state: &mut S, idx: usize) -> Result<(), Error> { - let factor = F::compute(&mut *state.corpus().get(idx)?.borrow_mut())?; - if factor == 0 { + let factor = F::compute(&mut *state.corpus().get(idx)?.borrow_mut(), state)?; + if factor == 0.0 { return Err(Error::IllegalState( "Infinity probability calculated for probabilistic sampling scheduler".into(), )); @@ -81,18 +81,18 @@ where .metadata_mut() .get_mut::() .unwrap(); - let prob = 1.0 / (factor as f64); + let prob = 1.0 / factor; meta.map.insert(idx, prob); meta.total_probability += prob; Ok(()) } } -impl Scheduler for ProbabilitySamplingScheduler +impl Scheduler for ProbabilitySamplingScheduler where + F: TestcaseScore, I: Input, S: HasCorpus + HasMetadata + HasRand, - F: FavFactor, { fn on_add(&self, state: &mut S, idx: usize) -> Result<(), Error> { if state.metadata().get::().is_none() { @@ -122,11 +122,11 @@ where } } -impl Default for ProbabilitySamplingScheduler +impl Default for ProbabilitySamplingScheduler where + F: TestcaseScore, I: Input, S: HasCorpus + HasMetadata + HasRand, - F: FavFactor, { fn default() -> Self { Self::new() @@ -142,13 +142,13 @@ mod tests { bolts::rands::StdRand, corpus::{Corpus, InMemoryCorpus, Testcase}, inputs::{bytes::BytesInput, Input}, - schedulers::{FavFactor, ProbabilitySamplingScheduler, Scheduler}, - state::StdState, + schedulers::{ProbabilitySamplingScheduler, Scheduler, TestcaseScore}, + state::{HasCorpus, HasMetadata, StdState}, Error, }; use core::marker::PhantomData; - const FACTOR: u64 = 1337; + const FACTOR: f64 = 1337.0; #[derive(Debug, Clone)] pub struct UniformDistribution @@ -158,17 +158,18 @@ mod tests { phantom: PhantomData, } - impl FavFactor for UniformDistribution + impl TestcaseScore for UniformDistribution where I: Input, + S: HasMetadata + HasCorpus, { - fn compute(_: &mut Testcase) -> Result { + fn compute(_: &mut Testcase, _state: &S) -> Result { Ok(FACTOR) } } pub type UniformProbabilitySamplingScheduler = - ProbabilitySamplingScheduler>; + ProbabilitySamplingScheduler, I, S>; #[test] fn test_prob_sampling() { diff --git a/libafl/src/schedulers/testcase_score.rs b/libafl/src/schedulers/testcase_score.rs new file mode 100644 index 0000000000..8482c3fb24 --- /dev/null +++ b/libafl/src/schedulers/testcase_score.rs @@ -0,0 +1,355 @@ +//! The `TestcaseScore` is an evaluator providing scores of corpus items. +use crate::{ + bolts::{HasLen, HasRefCnt}, + corpus::{Corpus, PowerScheduleTestcaseMetaData, Testcase}, + feedbacks::MapIndexesMetadata, + inputs::Input, + schedulers::{ + minimizer::{IsFavoredMetadata, TopRatedsMetadata}, + powersched::{PowerSchedule, PowerScheduleMetadata}, + }, + state::{HasCorpus, HasMetadata}, + Error, +}; +use alloc::string::{String, ToString}; + +use core::marker::PhantomData; + +/// Compute the favor factor of a [`Testcase`]. Lower is better. +pub trait TestcaseScore +where + I: Input, + S: HasMetadata + HasCorpus, +{ + /// Computes the favor factor of a [`Testcase`]. Lower is better. + fn compute(entry: &mut Testcase, state: &S) -> Result; +} + +/// Multiply the testcase size with the execution time. +/// This favors small and quick testcases. +#[derive(Debug, Clone)] +pub struct LenTimeMulTestcaseScore +where + I: Input + HasLen, + S: HasMetadata + HasCorpus, +{ + phantom: PhantomData<(I, S)>, +} + +impl TestcaseScore for LenTimeMulTestcaseScore +where + I: Input + HasLen, + S: HasMetadata + HasCorpus, +{ + #[allow(clippy::cast_precision_loss, clippy::cast_lossless)] + fn compute(entry: &mut Testcase, _state: &S) -> Result { + // TODO maybe enforce entry.exec_time().is_some() + Ok(entry.exec_time().map_or(1, |d| d.as_millis()) as f64 * entry.cached_len()? as f64) + } +} + +/// Constants for powerschedules +const POWER_BETA: f64 = 1.0; +const MAX_FACTOR: f64 = POWER_BETA * 32.0; +const HAVOC_MAX_MULT: f64 = 64.0; + +/// The power assigned to each corpus entry +/// This result is used for power scheduling +#[derive(Debug, Clone)] +pub struct CorpusPowerTestcaseScore +where + I: Input + HasLen, + S: HasMetadata + HasCorpus, +{ + phantom: PhantomData<(I, S)>, +} + +impl TestcaseScore for CorpusPowerTestcaseScore +where + I: Input + HasLen, + S: HasMetadata + HasCorpus, +{ + /// Compute the `power` we assign to each corpus entry + #[allow( + clippy::cast_precision_loss, + clippy::too_many_lines, + clippy::cast_sign_loss + )] + fn compute(entry: &mut Testcase, state: &S) -> Result { + let psmeta = state + .metadata() + .get::() + .ok_or_else(|| Error::KeyNotFound("PowerScheduleMetadata not found".to_string()))?; + + let fuzz_mu = if psmeta.strat() == PowerSchedule::COE { + let corpus = state.corpus(); + let mut n_paths = 0; + let mut v = 0.0; + for idx in 0..corpus.count() { + let n_fuzz_entry = corpus + .get(idx)? + .borrow() + .metadata() + .get::() + .ok_or_else(|| { + Error::KeyNotFound("PowerScheduleTestData not found".to_string()) + })? + .n_fuzz_entry(); + v += libm::log2(f64::from(psmeta.n_fuzz()[n_fuzz_entry])); + n_paths += 1; + } + + if n_paths == 0 { + return Err(Error::Unknown(String::from("Queue state corrput"))); + } + + v /= f64::from(n_paths); + v + } else { + 0.0 + }; + + let mut perf_score = 100.0; + let q_exec_us = entry + .exec_time() + .ok_or_else(|| Error::KeyNotFound("exec_time not set".to_string()))? + .as_nanos() as f64; + + let avg_exec_us = psmeta.exec_time().as_nanos() as f64 / psmeta.cycles() as f64; + let avg_bitmap_size = psmeta.bitmap_size() / psmeta.bitmap_entries(); + + let favored = entry.has_metadata::(); + let tcmeta = entry + .metadata() + .get::() + .ok_or_else(|| { + Error::KeyNotFound("PowerScheduleTestcaseMetaData not found".to_string()) + })?; + + if q_exec_us * 0.1 > avg_exec_us { + perf_score = 10.0; + } else if q_exec_us * 0.2 > avg_exec_us { + perf_score = 25.0; + } else if q_exec_us * 0.5 > avg_exec_us { + perf_score = 50.0; + } else if q_exec_us * 0.75 > avg_exec_us { + perf_score = 75.0; + } else if q_exec_us * 4.0 < avg_exec_us { + perf_score = 300.0; + } else if q_exec_us * 3.0 < avg_exec_us { + perf_score = 200.0; + } else if q_exec_us * 2.0 < avg_exec_us { + perf_score = 150.0; + } + + let q_bitmap_size = tcmeta.bitmap_size() as f64; + if q_bitmap_size * 0.3 > avg_bitmap_size as f64 { + perf_score *= 3.0; + } else if q_bitmap_size * 0.5 > avg_bitmap_size as f64 { + perf_score *= 2.0; + } else if q_bitmap_size * 0.75 > avg_bitmap_size as f64 { + perf_score *= 1.5; + } else if q_bitmap_size * 3.0 < avg_bitmap_size as f64 { + perf_score *= 0.25; + } else if q_bitmap_size * 2.0 < avg_bitmap_size as f64 { + perf_score *= 0.5; + } else if q_bitmap_size * 1.5 < avg_bitmap_size as f64 { + perf_score *= 0.75; + } + + if tcmeta.handicap() >= 4 { + perf_score *= 4.0; + // tcmeta.set_handicap(tcmeta.handicap() - 4); + } else if tcmeta.handicap() > 0 { + perf_score *= 2.0; + // tcmeta.set_handicap(tcmeta.handicap() - 1); + } + + if tcmeta.depth() >= 4 && tcmeta.depth() < 8 { + perf_score *= 2.0; + } else if tcmeta.depth() >= 8 && tcmeta.depth() < 14 { + perf_score *= 3.0; + } else if tcmeta.depth() >= 14 && tcmeta.depth() < 25 { + perf_score *= 4.0; + } else if tcmeta.depth() >= 25 { + perf_score *= 5.0; + } + + let mut factor: f64 = 1.0; + + // COE and Fast schedule are fairly different from what are described in the original thesis, + // This implementation follows the changes made in this pull request https://github.com/AFLplusplus/AFLplusplus/pull/568 + match psmeta.strat() { + PowerSchedule::EXPLORE => { + // Nothing happens in EXPLORE + } + PowerSchedule::EXPLOIT => { + factor = MAX_FACTOR; + } + PowerSchedule::COE => { + if libm::log2(f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()])) > fuzz_mu + && !favored + { + // Never skip favorites. + factor = 0.0; + } + } + PowerSchedule::FAST => { + if tcmeta.fuzz_level() != 0 { + let lg = libm::log2(f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()])); + + match lg { + f if f < 2.0 => { + factor = 4.0; + } + f if (2.0..4.0).contains(&f) => { + factor = 3.0; + } + f if (4.0..5.0).contains(&f) => { + factor = 2.0; + } + f if (6.0..7.0).contains(&f) => { + if !favored { + factor = 0.8; + } + } + f if (7.0..8.0).contains(&f) => { + if !favored { + factor = 0.6; + } + } + f if f >= 8.0 => { + if !favored { + factor = 0.4; + } + } + _ => { + factor = 1.0; + } + } + + if favored { + factor *= 1.15; + } + } + } + PowerSchedule::LIN => { + factor = (tcmeta.fuzz_level() as f64) + / f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()] + 1); + } + PowerSchedule::QUAD => { + factor = ((tcmeta.fuzz_level() * tcmeta.fuzz_level()) as f64) + / f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()] + 1); + } + } + + if psmeta.strat() != PowerSchedule::EXPLORE { + if factor > MAX_FACTOR { + factor = MAX_FACTOR; + } + + perf_score *= factor / POWER_BETA; + } + + // Lower bound if the strat is not COE. + if psmeta.strat() == PowerSchedule::COE && perf_score < 1.0 { + perf_score = 1.0; + } + + // Upper bound + if perf_score > HAVOC_MAX_MULT * 100.0 { + perf_score = HAVOC_MAX_MULT * 100.0; + } + + Ok(perf_score) + } +} + +/// The weight for each corpus entry +/// This result is used for corpus scheduling +#[derive(Debug, Clone)] +pub struct CorpusWeightTestcaseScore +where + I: Input + HasLen, + S: HasMetadata + HasCorpus, +{ + phantom: PhantomData<(I, S)>, +} + +impl TestcaseScore for CorpusWeightTestcaseScore +where + I: Input + HasLen, + S: HasMetadata + HasCorpus, +{ + /// Compute the `weight` used in weighted corpus entry selection algo + #[allow(clippy::cast_precision_loss, clippy::cast_lossless)] + fn compute(entry: &mut Testcase, state: &S) -> Result { + let mut weight = 1.0; + let psmeta = state + .metadata() + .get::() + .ok_or_else(|| Error::KeyNotFound("PowerScheduleMetadata not found".to_string()))?; + + let tcmeta = entry + .metadata() + .get::() + .ok_or_else(|| Error::KeyNotFound("PowerScheduleTestData not found".to_string()))?; + + // This means that this testcase has never gone through the calibration stage before1, + // In this case we'll just return the default weight + if tcmeta.fuzz_level() == 0 || psmeta.cycles() == 0 { + return Ok(weight); + } + + let q_exec_us = entry + .exec_time() + .ok_or_else(|| Error::KeyNotFound("exec_time not set".to_string()))? + .as_nanos() as f64; + let favored = entry.has_metadata::(); + + let avg_exec_us = psmeta.exec_time().as_nanos() as f64 / psmeta.cycles() as f64; + let avg_bitmap_size = psmeta.bitmap_size() / psmeta.bitmap_entries(); + + let q_bitmap_size = tcmeta.bitmap_size() as f64; + + match psmeta.strat() { + PowerSchedule::FAST | PowerSchedule::COE | PowerSchedule::LIN | PowerSchedule::QUAD => { + let hits = psmeta.n_fuzz()[tcmeta.n_fuzz_entry()]; + if hits > 0 { + weight *= libm::log10(f64::from(hits)) + 1.0; + } + } + // EXPLORE and EXPLOIT fall into this + _ => {} + } + + weight *= avg_exec_us / q_exec_us; + weight *= libm::log2(q_bitmap_size) / (avg_bitmap_size as f64); + + let tc_ref = match entry.metadata().get::() { + Some(meta) => meta.refcnt() as f64, + None => 0.0, + }; + + let avg_top_size = state + .metadata() + .get::() + .ok_or_else(|| Error::KeyNotFound("TopRatedsMetadata not found".to_string()))? + .map() + .len() as f64; + weight *= 1.0 + (tc_ref / avg_top_size); + + if favored { + weight *= 5.0; + } + + // was it fuzzed before? + if tcmeta.fuzz_level() == 0 { + weight *= 2.0; + } + + assert!(weight.is_normal()); + + Ok(weight) + } +} diff --git a/libafl/src/schedulers/weighted.rs b/libafl/src/schedulers/weighted.rs index 43a0c084d4..c3ee9ef987 100644 --- a/libafl/src/schedulers/weighted.rs +++ b/libafl/src/schedulers/weighted.rs @@ -10,7 +10,11 @@ use crate::{ bolts::rands::Rand, corpus::{Corpus, PowerScheduleTestcaseMetaData}, inputs::Input, - schedulers::{powersched::PowerScheduleMetadata, Scheduler}, + schedulers::{ + powersched::PowerScheduleMetadata, + testcase_score::{CorpusWeightTestcaseScore, TestcaseScore}, + Scheduler, + }, state::{HasCorpus, HasMetadata, HasRand}, Error, }; @@ -84,12 +88,13 @@ crate::impl_serdeany!(WeightedScheduleMetadata); /// A corpus scheduler using power schedules with weighted queue item selection algo. #[derive(Clone, Debug)] -pub struct WeightedScheduler { - phantom: PhantomData<(I, S)>, +pub struct WeightedScheduler { + phantom: PhantomData<(F, I, S)>, } -impl Default for WeightedScheduler +impl Default for WeightedScheduler where + F: TestcaseScore, I: Input, S: HasCorpus + HasMetadata + HasRand, { @@ -98,8 +103,9 @@ where } } -impl WeightedScheduler +impl WeightedScheduler where + F: TestcaseScore, I: Input, S: HasCorpus + HasMetadata + HasRand, { @@ -132,8 +138,8 @@ where let mut sum: f64 = 0.0; for (i, item) in weights.iter_mut().enumerate().take(n) { - let testcase = state.corpus().get(i)?.borrow(); - let weight = testcase.compute_weight(state)?; + let mut testcase = state.corpus().get(i)?.borrow_mut(); + let weight = F::compute(&mut *testcase, state)?; *item = weight; sum += weight; } @@ -199,8 +205,9 @@ where } } -impl Scheduler for WeightedScheduler +impl Scheduler for WeightedScheduler where + F: TestcaseScore, S: HasCorpus + HasMetadata + HasRand, I: Input, { @@ -283,3 +290,6 @@ where } } } + +/// The standard corpus weight, same as aflpp +pub type StdWeightedScheduler = WeightedScheduler, I, S>; diff --git a/libafl/src/stages/mod.rs b/libafl/src/stages/mod.rs index 1f48f72fbf..1373632ab1 100644 --- a/libafl/src/stages/mod.rs +++ b/libafl/src/stages/mod.rs @@ -17,7 +17,7 @@ pub mod calibrate; pub use calibrate::CalibrationStage; pub mod power; -pub use power::PowerMutationalStage; +pub use power::{PowerMutationalStage, StdPowerMutationalStage}; pub mod generalization; pub use generalization::GeneralizationStage; diff --git a/libafl/src/stages/power.rs b/libafl/src/stages/power.rs index 7a0642d2b2..86545aad0e 100644 --- a/libafl/src/stages/power.rs +++ b/libafl/src/stages/power.rs @@ -10,16 +10,21 @@ use crate::{ inputs::Input, mutators::Mutator, observers::{MapObserver, ObserversTuple}, - schedulers::powersched::{PowerSchedule, PowerScheduleMetadata}, + schedulers::{ + powersched::{PowerSchedule, PowerScheduleMetadata}, + testcase_score::CorpusPowerTestcaseScore, + TestcaseScore, + }, stages::{MutationalStage, Stage}, state::{HasClientPerfMonitor, HasCorpus, HasMetadata}, Error, }; /// The mutational stage using power schedules #[derive(Clone, Debug)] -pub struct PowerMutationalStage +pub struct PowerMutationalStage where E: Executor + HasObservers, + F: TestcaseScore, I: Input, M: Mutator, O: MapObserver, @@ -30,13 +35,14 @@ where map_observer_name: String, mutator: M, #[allow(clippy::type_complexity)] - phantom: PhantomData<(E, EM, I, O, OT, S, Z)>, + phantom: PhantomData<(E, F, EM, I, O, OT, S, Z)>, } -impl MutationalStage - for PowerMutationalStage +impl MutationalStage + for PowerMutationalStage where E: Executor + HasObservers, + F: TestcaseScore, I: Input, M: Mutator, O: MapObserver, @@ -57,16 +63,11 @@ where } /// Gets the number of iterations as a random number + #[allow(clippy::cast_sign_loss)] fn iterations(&self, state: &mut S, corpus_idx: usize) -> Result { - // Calculate score - let score = state - .corpus() - .get(corpus_idx)? - .borrow() - .calculate_score(state); - // Update handicap let mut testcase = state.corpus().get(corpus_idx)?.borrow_mut(); + let score = F::compute(&mut *testcase, state)? as usize; let tcmeta = testcase .metadata_mut() .get_mut::() @@ -79,7 +80,7 @@ where tcmeta.set_handicap(tcmeta.handicap() - 1); } - score + Ok(score) } #[allow(clippy::cast_possible_wrap)] @@ -141,9 +142,11 @@ where } } -impl Stage for PowerMutationalStage +impl Stage + for PowerMutationalStage where E: Executor + HasObservers, + F: TestcaseScore, I: Input, M: Mutator, O: MapObserver, @@ -166,9 +169,10 @@ where } } -impl PowerMutationalStage +impl PowerMutationalStage where E: Executor + HasObservers, + F: TestcaseScore, I: Input, M: Mutator, O: MapObserver, @@ -186,3 +190,7 @@ where } } } + +/// The standard powerscheduling stage +pub type StdPowerMutationalStage = + PowerMutationalStage, EM, I, M, O, OT, S, Z>;