Dedup common code in scheduler (#1702)

* dedup common code in scheduler

* del eco

* fixing

* fix
This commit is contained in:
Dongjia "toka" Zhang 2024-01-01 18:22:03 +01:00 committed by GitHub
parent df96bb02ee
commit 2717018601
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 252 additions and 658 deletions

View File

@ -1,431 +0,0 @@
//! The corpus scheduler from `EcoFuzz` (`https://www.usenix.org/conference/usenixsecurity20/presentation/yue`)
use alloc::string::{String, ToString};
use core::marker::PhantomData;
use libafl_bolts::math::integer_sqrt;
use serde::{Deserialize, Serialize};
use crate::{
corpus::{Corpus, CorpusId, HasTestcase, SchedulerTestcaseMetadata, Testcase},
observers::{MapObserver, ObserversTuple},
schedulers::{powersched::SchedulerMetadata, testcase_score::TestcaseScore, Scheduler},
state::{HasCorpus, HasExecutions, HasMetadata, HasRand, State, UsesState},
Error,
};
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Copy, Default)]
/// The state of the `EcoFuzz` scheduling algorithm
pub enum EcoState {
/// Initial state
#[default]
None = 0,
/// Same probability scheduling
Exploration = 1,
/// Focused fuzzing scheduling
Exploitation = 2,
}
/// The testcase Metadata for `EcoScheduler`
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
#[cfg_attr(
any(not(feature = "serdeany_autoreg"), miri),
allow(clippy::unsafe_derive_deserialize)
)] // for SerdeAny
pub struct EcoTestcaseMetadata {
mutation_num: u64,
exec_num: u64,
exec_by_mutation: u64,
found: usize,
last_energy: u64,
state: EcoState,
serial: u64,
computed_score: f64,
}
libafl_bolts::impl_serdeany!(EcoTestcaseMetadata);
/// The state Metadata for `EcoScheduler`
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
#[cfg_attr(
any(not(feature = "serdeany_autoreg"), miri),
allow(clippy::unsafe_derive_deserialize)
)] // for SerdeAny
pub struct EcoMetadata {
state: EcoState,
initial_corpus_count: Option<usize>,
last_mutation_num: u64,
last_corpus_count: usize,
last_executions: usize,
calculate_coe: u64,
rate: f64,
regret: f64,
}
libafl_bolts::impl_serdeany!(EcoMetadata);
/// A corpus scheduler implementing `EcoFuzz` (`https://www.usenix.org/conference/usenixsecurity20/presentation/yue`)
#[derive(Clone, Debug)]
pub struct EcoScheduler<O, S> {
map_observer_name: String,
last_hash: usize,
phantom: PhantomData<(O, S)>,
}
impl<O, S> EcoScheduler<O, S>
where
S: HasCorpus + HasMetadata + HasRand + HasExecutions + HasTestcase,
O: MapObserver,
{
/// Create a new [`EcoScheduler`] without any power schedule
#[must_use]
pub fn new(state: &mut S, map_observer: &O) -> Self {
if !state.has_metadata::<SchedulerMetadata>() {
state.add_metadata(SchedulerMetadata::new(None));
}
if !state.has_metadata::<EcoMetadata>() {
state.add_metadata(EcoMetadata::default());
}
Self {
map_observer_name: map_observer.name().to_string(),
last_hash: 0,
phantom: PhantomData,
}
}
#[allow(clippy::cast_precision_loss)]
fn handle_previous(id: CorpusId, state: &mut S) -> Result<(), Error> {
let count = state.corpus().count();
let (last_corpus_count, last_mutation_num, regret) = {
let m = state.metadata_mut::<EcoMetadata>()?;
(m.last_corpus_count, m.last_mutation_num, m.regret)
};
{
let mut testcase = state.testcase_mut(id)?;
let tcmeta = testcase.metadata_mut::<EcoTestcaseMetadata>()?;
debug_assert!(tcmeta.mutation_num >= last_mutation_num);
tcmeta.last_energy = tcmeta.mutation_num - last_mutation_num;
tcmeta.found = count - last_corpus_count;
// Set was_fuzzed for the old current
};
let meta = state.metadata_mut::<EcoMetadata>()?;
meta.rate =
((meta.rate * meta.calculate_coe as f64) + regret) / (meta.calculate_coe as f64 + 1.0);
meta.calculate_coe += 1;
if meta.calculate_coe > count as u64 / 100 {
meta.calculate_coe = count as u64 / 100;
}
if meta.rate > 1.5 {
meta.rate = 1.5;
} else if meta.rate < 0.1 {
meta.rate = 0.1;
}
Ok(())
}
fn first_iteration(state: &mut S) -> Result<(), Error> {
let count = state.corpus().count();
state
.metadata_mut::<EcoMetadata>()?
.initial_corpus_count
.get_or_insert(count);
Ok(())
}
/// Create a new alias table when the fuzzer finds a new corpus entry
fn schedule(state: &mut S) -> Result<CorpusId, Error> {
// println!("{:#?}", state.metadata::<EcoMetadata>());
for id in state.corpus().ids() {
let was_fuzzed = state.testcase(id)?.scheduled_count() > 0;
if !was_fuzzed {
let selection = Some(id);
state.metadata_mut::<EcoMetadata>()?.state = EcoState::Exploration;
#[allow(clippy::unnecessary_literal_unwrap)] // false positive
return Ok(selection.expect("Error in the algorithm, this cannot be None"));
}
}
state.metadata_mut::<EcoMetadata>()?.state = EcoState::Exploitation;
let mut cur = state.corpus().first();
while let Some(id) = cur {
let testcase_state = state.testcase(id)?.metadata::<EcoTestcaseMetadata>()?.state;
if testcase_state != EcoState::Exploitation {
break;
}
cur = state.corpus().next(id);
}
if cur.is_none() {
for id in state.corpus().ids() {
state
.testcase_mut(id)?
.metadata_mut::<EcoTestcaseMetadata>()?
.state = EcoState::None;
}
cur = state.corpus().first();
}
let mut selection = cur.unwrap();
let mut selection_meta = state
.testcase(selection)?
.metadata::<EcoTestcaseMetadata>()?
.clone();
for id in state.corpus().ids() {
let testcase = state.testcase(id)?;
let meta = testcase.metadata::<EcoTestcaseMetadata>()?;
if meta.exec_by_mutation
* selection_meta.mutation_num
* integer_sqrt(selection_meta.serial)
< selection_meta.exec_by_mutation * meta.mutation_num * integer_sqrt(meta.serial)
&& meta.state == EcoState::None
{
selection = id;
selection_meta = meta.clone();
}
}
// println!("selection_meta {:#?}", selection_meta);
Ok(selection)
}
}
impl<O, S> UsesState for EcoScheduler<O, S>
where
S: State,
{
type State = S;
}
impl<O, S> Scheduler for EcoScheduler<O, S>
where
S: HasCorpus + HasMetadata + HasRand + HasExecutions + HasTestcase + State,
O: MapObserver,
{
/// Called when a [`Testcase`] is added to the corpus
#[allow(clippy::cast_precision_loss)]
fn on_add(&mut self, state: &mut S, idx: CorpusId) -> Result<(), Error> {
let current_idx = *state.corpus().current();
let mut depth = match current_idx {
Some(parent_idx) => state
.testcase_mut(parent_idx)?
.metadata_mut::<SchedulerTestcaseMetadata>()?
.depth(),
None => 0,
};
// assert!(self.last_hash != 0);
let cur_exec = *state.executions();
let last_exec = state.metadata::<EcoMetadata>()?.last_executions;
let last_energy = if let Some(parent_idx) = current_idx {
let e = state
.testcase(parent_idx)?
.metadata::<EcoTestcaseMetadata>()?
.last_energy;
if e == 0 {
(cur_exec - last_exec) as u64
} else {
e
}
} else {
(cur_exec - last_exec) as u64
};
let mut regret = (cur_exec - last_exec) as f64 / last_energy as f64;
if regret == 0.0 {
regret = 1.1;
}
state.metadata_mut::<EcoMetadata>()?.regret = regret;
// Attach a `SchedulerTestcaseMetadata` to the queue entry.
depth += 1;
{
let mut testcase = state.testcase_mut(idx)?;
testcase.add_metadata(SchedulerTestcaseMetadata::with_n_fuzz_entry(
depth,
self.last_hash,
));
testcase.set_parent_id_optional(current_idx);
}
// Add the testcase metadata for this scheduler
state
.testcase_mut(idx)?
.add_metadata(EcoTestcaseMetadata::default());
let mut exec_num = 0;
for id in state.corpus().ids() {
let entry = state
.testcase(id)?
.metadata::<SchedulerTestcaseMetadata>()?
.n_fuzz_entry();
if entry == self.last_hash {
exec_num += 1;
}
}
let mut tc = state.testcase_mut(idx)?;
let tcmeta = tc.metadata_mut::<EcoTestcaseMetadata>()?;
tcmeta.exec_num = exec_num;
tcmeta.serial = (state.corpus().count() as u64).saturating_add(1);
Ok(())
}
fn on_evaluation<OT>(
&mut self,
state: &mut S,
_input: &S::Input,
observers: &OT,
) -> Result<(), Error>
where
OT: ObserversTuple<Self::State>,
{
let observer = observers
.match_name::<O>(&self.map_observer_name)
.ok_or_else(|| Error::key_not_found("MapObserver not found".to_string()))?;
let mut hash = observer.hash() as usize;
let psmeta = state.metadata_mut::<SchedulerMetadata>()?;
hash %= psmeta.n_fuzz().len();
// Update the path frequency
psmeta.n_fuzz_mut()[hash] = psmeta.n_fuzz()[hash].saturating_add(1);
if let Some(id) = *state.corpus().current() {
state
.testcase_mut(id)?
.metadata_mut::<EcoTestcaseMetadata>()?
.mutation_num += 1;
let entry = state
.testcase(id)?
.metadata::<SchedulerTestcaseMetadata>()?
.n_fuzz_entry();
if entry == hash {
state
.testcase_mut(id)?
.metadata_mut::<EcoTestcaseMetadata>()?
.exec_by_mutation += 1;
// println!("{entry} {hash}");
}
}
self.last_hash = hash;
Ok(())
}
fn next(&mut self, state: &mut S) -> Result<CorpusId, Error> {
if let Some(id) = *state.corpus().current() {
Self::handle_previous(id, state)?;
} else {
Self::first_iteration(state)?;
}
let id = Self::schedule(state)?;
self.set_current_scheduled(state, Some(id))?;
let count = state.corpus().count();
let executions = *state.executions();
let last_mutation_num = state
.testcase(id)?
.metadata::<EcoTestcaseMetadata>()?
.mutation_num;
let meta = state.metadata_mut::<EcoMetadata>()?;
meta.last_corpus_count = count;
meta.last_mutation_num = last_mutation_num;
// TODO in theory it should be assigned at the beginning of the mutational stage
// we must not count executions done in other stages
meta.last_executions = executions;
// println!("scheduling {id}");
Ok(id)
}
}
/// The weight for each corpus entry
/// This result is used for corpus scheduling
#[derive(Debug, Clone)]
pub struct EcoTestcaseScore<S> {
phantom: PhantomData<S>,
}
impl<S> TestcaseScore<S> for EcoTestcaseScore<S>
where
S: HasCorpus + HasMetadata + HasExecutions,
{
/// Compute the `weight` used in weighted corpus entry selection algo
#[allow(clippy::cast_precision_loss, clippy::cast_lossless)]
fn compute(state: &S, entry: &mut Testcase<S::Input>) -> Result<f64, Error> {
// subtract # initial inputs to the corpus count
let mut energy = 0;
let (cur_state, rate, initial_corpus_count) = {
let meta = state.metadata::<EcoMetadata>()?;
// println!("{:#?}", meta);
(meta.state, meta.rate, meta.initial_corpus_count)
};
let initial = initial_corpus_count.unwrap_or(0);
let mut average_cost: u64 = if state.corpus().count() == initial {
*state.executions() as u64 / state.corpus().count() as u64
} else {
*state.executions() as u64 / (state.corpus().count() - initial) as u64
};
if average_cost == 0 {
average_cost = 1024;
}
let meta = entry.metadata_mut::<EcoTestcaseMetadata>()?;
// println!("{} {} {:#?}", meta.last_energy, average_cost, cur_state);
if cur_state == EcoState::Exploitation {
meta.state = EcoState::Exploitation;
if meta.found == 0 {
energy = core::cmp::min(2 * meta.last_energy, 16 * average_cost);
} else {
energy = core::cmp::min(meta.last_energy, 16 * average_cost);
}
}
if cur_state == EcoState::Exploitation && energy == 0 || cur_state != EcoState::Exploitation
{
if meta.exec_num > average_cost {
energy = average_cost / 4;
} else if meta.exec_num > average_cost / 2 {
energy = average_cost / 2;
} else {
energy = average_cost;
}
}
let mut score = energy as f64 * rate;
meta.computed_score = score;
// println!("{score}");
if score < 1.0 {
score = 1.0;
}
Ok(score)
}
}

View File

@ -1,6 +1,9 @@
//! Schedule the access to the Corpus.
use alloc::borrow::ToOwned;
use alloc::{
borrow::ToOwned,
string::{String, ToString},
};
use core::marker::PhantomData;
pub mod testcase_score;
@ -15,7 +18,7 @@ pub use minimizer::{
};
pub mod powersched;
pub use powersched::PowerQueueScheduler;
pub use powersched::{PowerQueueScheduler, SchedulerMetadata};
pub mod probabilistic_sampling;
pub use probabilistic_sampling::ProbabilitySamplingScheduler;
@ -26,23 +29,20 @@ pub use accounting::CoverageAccountingScheduler;
pub mod weighted;
pub use weighted::{StdWeightedScheduler, WeightedScheduler};
pub mod ecofuzz;
pub use ecofuzz::{EcoMetadata, EcoScheduler, EcoState, EcoTestcaseMetadata, EcoTestcaseScore};
pub mod tuneable;
use libafl_bolts::rands::Rand;
pub use tuneable::*;
use crate::{
corpus::{Corpus, CorpusId, HasTestcase, Testcase},
corpus::{Corpus, CorpusId, HasTestcase, SchedulerTestcaseMetadata, Testcase},
inputs::UsesInput,
observers::ObserversTuple,
observers::{MapObserver, ObserversTuple},
random_corpus_id,
state::{HasCorpus, HasRand, State, UsesState},
state::{HasCorpus, HasMetadata, HasRand, State, UsesState},
Error,
};
/// The scheduler also implemnts `on_remove` and `on_replace` if it implements this stage.
/// The scheduler also implements `on_remove` and `on_replace` if it implements this stage.
pub trait RemovableScheduler: Scheduler
where
Self::State: HasCorpus,
@ -68,6 +68,171 @@ where
}
}
/// Define the metadata operations when removing testcase from AFL-style scheduler
pub trait HasAFLRemovableScheduler: RemovableScheduler
where
Self::State: HasCorpus + HasMetadata + HasTestcase,
{
#[allow(clippy::cast_precision_loss)]
#[allow(clippy::cast_precision_loss)]
/// Adjusting metadata when removing the testcase
fn on_remove_metadata(
&mut self,
state: &mut Self::State,
_idx: CorpusId,
prev: &Option<Testcase<<Self::State as UsesInput>::Input>>,
) -> Result<(), Error> {
let prev = prev.as_ref().ok_or_else(|| {
Error::illegal_argument(
"Power schedulers must be aware of the removed corpus entry for reweighting.",
)
})?;
let prev_meta = prev.metadata::<SchedulerTestcaseMetadata>()?;
// Use these to adjust `SchedulerMetadata`
let (prev_total_time, prev_cycles) = prev_meta.cycle_and_time();
let prev_bitmap_size = prev_meta.bitmap_size();
let prev_bitmap_size_log = libm::log2(prev_bitmap_size as f64);
let psmeta = state.metadata_mut::<SchedulerMetadata>()?;
psmeta.set_exec_time(psmeta.exec_time() - prev_total_time);
psmeta.set_cycles(psmeta.cycles() - (prev_cycles as u64));
psmeta.set_bitmap_size(psmeta.bitmap_size() - prev_bitmap_size);
psmeta.set_bitmap_size_log(psmeta.bitmap_size_log() - prev_bitmap_size_log);
psmeta.set_bitmap_entries(psmeta.bitmap_entries() - 1);
Ok(())
}
#[allow(clippy::cast_precision_loss)]
/// Adjusting metadata when replacing the corpus
fn on_replace_metadata(
&mut self,
state: &mut Self::State,
idx: CorpusId,
prev: &Testcase<<Self::State as UsesInput>::Input>,
) -> Result<(), Error> {
let prev_meta = prev.metadata::<SchedulerTestcaseMetadata>()?;
// Next depth is + 1
let prev_depth = prev_meta.depth() + 1;
// Use these to adjust `SchedulerMetadata`
let (prev_total_time, prev_cycles) = prev_meta.cycle_and_time();
let prev_bitmap_size = prev_meta.bitmap_size();
let prev_bitmap_size_log = libm::log2(prev_bitmap_size as f64);
let psmeta = state.metadata_mut::<SchedulerMetadata>()?;
// We won't add new one because it'll get added when it gets executed in calirbation next time.
psmeta.set_exec_time(psmeta.exec_time() - prev_total_time);
psmeta.set_cycles(psmeta.cycles() - (prev_cycles as u64));
psmeta.set_bitmap_size(psmeta.bitmap_size() - prev_bitmap_size);
psmeta.set_bitmap_size_log(psmeta.bitmap_size_log() - prev_bitmap_size_log);
psmeta.set_bitmap_entries(psmeta.bitmap_entries() - 1);
state
.corpus()
.get(idx)?
.borrow_mut()
.add_metadata(SchedulerTestcaseMetadata::new(prev_depth));
Ok(())
}
}
/// Defines the common metadata operations for the AFL-style schedulers
pub trait HasAFLSchedulerMetadata<O, S>: Scheduler
where
Self::State: HasCorpus + HasMetadata + HasTestcase,
O: MapObserver,
{
/// Return the last hash
fn last_hash(&self) -> usize;
/// Set the last hash
fn set_last_hash(&mut self, value: usize);
/// Get the observer map observer name
fn map_observer_name(&self) -> &String;
/// Called when a [`Testcase`] is added to the corpus
fn on_add_metadata(&self, state: &mut Self::State, idx: CorpusId) -> Result<(), Error> {
let current_idx = *state.corpus().current();
let mut depth = match current_idx {
Some(parent_idx) => state
.testcase(parent_idx)?
.metadata::<SchedulerTestcaseMetadata>()?
.depth(),
None => 0,
};
// TODO increase perf_score when finding new things like in AFL
// https://github.com/google/AFL/blob/master/afl-fuzz.c#L6547
// Attach a `SchedulerTestcaseMetadata` to the queue entry.
depth += 1;
let mut testcase = state.testcase_mut(idx)?;
testcase.add_metadata(SchedulerTestcaseMetadata::with_n_fuzz_entry(
depth,
self.last_hash(),
));
testcase.set_parent_id_optional(current_idx);
Ok(())
}
/// Called when a [`Testcase`] is evaluated
fn on_evaluation_metadata<OT>(
&mut self,
state: &mut Self::State,
_input: &<Self::State as UsesInput>::Input,
observers: &OT,
) -> Result<(), Error>
where
OT: ObserversTuple<Self::State>,
{
let observer = observers
.match_name::<O>(self.map_observer_name())
.ok_or_else(|| Error::key_not_found("MapObserver not found".to_string()))?;
let mut hash = observer.hash() as usize;
let psmeta = state.metadata_mut::<SchedulerMetadata>()?;
hash %= psmeta.n_fuzz().len();
// Update the path frequency
psmeta.n_fuzz_mut()[hash] = psmeta.n_fuzz()[hash].saturating_add(1);
self.set_last_hash(hash);
Ok(())
}
/// Called when choosing the next [`Testcase`]
fn on_next_metadata(
&mut self,
state: &mut Self::State,
_next_idx: Option<CorpusId>,
) -> Result<(), Error> {
let current_idx = *state.corpus().current();
if let Some(idx) = current_idx {
let mut testcase = state.testcase_mut(idx)?;
let tcmeta = testcase.metadata_mut::<SchedulerTestcaseMetadata>()?;
if tcmeta.handicap() >= 4 {
tcmeta.set_handicap(tcmeta.handicap() - 4);
} else if tcmeta.handicap() > 0 {
tcmeta.set_handicap(tcmeta.handicap() - 1);
}
}
Ok(())
}
}
/// The scheduler define how the fuzzer requests a testcase from the corpus.
/// It has hooks to corpus add/replace/remove to allow complex scheduling algorithms to collect data.
pub trait Scheduler: UsesState

View File

@ -9,10 +9,12 @@ use core::{marker::PhantomData, time::Duration};
use serde::{Deserialize, Serialize};
use crate::{
corpus::{Corpus, CorpusId, HasTestcase, SchedulerTestcaseMetadata, Testcase},
corpus::{Corpus, CorpusId, HasTestcase, Testcase},
inputs::UsesInput,
observers::{MapObserver, ObserversTuple},
schedulers::{RemovableScheduler, Scheduler},
schedulers::{
HasAFLRemovableScheduler, HasAFLSchedulerMetadata, RemovableScheduler, Scheduler,
},
state::{HasCorpus, HasMetadata, State, UsesState},
Error,
};
@ -167,6 +169,8 @@ pub enum PowerSchedule {
}
/// A corpus scheduler using power schedules
/// Note that this corpus is merely holding the metadata necessary for the power calculation
/// and here we DON'T actually calculate the power (we do it in the stage)
#[derive(Clone, Debug)]
pub struct PowerQueueScheduler<O, S> {
strat: PowerSchedule,
@ -182,72 +186,52 @@ where
type State = S;
}
impl<O, S> HasAFLRemovableScheduler for PowerQueueScheduler<O, S>
where
S: State + HasTestcase + HasMetadata + HasCorpus,
O: MapObserver,
{
}
impl<O, S> RemovableScheduler for PowerQueueScheduler<O, S>
where
S: HasCorpus + HasMetadata + HasTestcase + State,
O: MapObserver,
{
#[allow(clippy::cast_precision_loss)]
fn on_remove(
&mut self,
state: &mut Self::State,
idx: CorpusId,
prev: &Option<Testcase<<Self::State as UsesInput>::Input>>,
) -> Result<(), Error> {
self.on_remove_metadata(state, idx, prev)
}
fn on_replace(
&mut self,
state: &mut Self::State,
idx: CorpusId,
prev: &Testcase<<Self::State as UsesInput>::Input>,
) -> Result<(), Error> {
let prev_meta = prev.metadata::<SchedulerTestcaseMetadata>()?;
self.on_replace_metadata(state, idx, prev)
}
}
// Next depth is + 1
let prev_depth = prev_meta.depth() + 1;
// Use these to adjust `SchedulerMetadata`
let (prev_total_time, prev_cycles) = prev_meta.cycle_and_time();
let prev_bitmap_size = prev_meta.bitmap_size();
let prev_bitmap_size_log = libm::log2(prev_bitmap_size as f64);
let psmeta = state.metadata_mut::<SchedulerMetadata>()?;
// We won't add new one because it'll get added when it gets executed in calirbation next time.
psmeta.set_exec_time(psmeta.exec_time() - prev_total_time);
psmeta.set_cycles(psmeta.cycles() - (prev_cycles as u64));
psmeta.set_bitmap_size(psmeta.bitmap_size() - prev_bitmap_size);
psmeta.set_bitmap_size_log(psmeta.bitmap_size_log() - prev_bitmap_size_log);
psmeta.set_bitmap_entries(psmeta.bitmap_entries() - 1);
state
.testcase_mut(idx)?
.add_metadata(SchedulerTestcaseMetadata::new(prev_depth));
Ok(())
impl<O, S> HasAFLSchedulerMetadata<O, S> for PowerQueueScheduler<O, S>
where
S: HasCorpus + HasMetadata + HasTestcase + State,
O: MapObserver,
{
fn last_hash(&self) -> usize {
self.last_hash
}
#[allow(clippy::cast_precision_loss)]
fn on_remove(
&mut self,
state: &mut Self::State,
_idx: CorpusId,
prev: &Option<Testcase<<Self::State as UsesInput>::Input>>,
) -> Result<(), Error> {
let prev = prev.as_ref().ok_or_else(|| {
Error::illegal_argument(
"Power schedulers must be aware of the removed corpus entry for reweighting.",
)
})?;
fn set_last_hash(&mut self, hash: usize) {
self.last_hash = hash;
}
let prev_meta = prev.metadata::<SchedulerTestcaseMetadata>()?;
// Use these to adjust `SchedulerMetadata`
let (prev_total_time, prev_cycles) = prev_meta.cycle_and_time();
let prev_bitmap_size = prev_meta.bitmap_size();
let prev_bitmap_size_log = libm::log2(prev_bitmap_size as f64);
let psmeta = state.metadata_mut::<SchedulerMetadata>()?;
psmeta.set_exec_time(psmeta.exec_time() - prev_total_time);
psmeta.set_cycles(psmeta.cycles() - (prev_cycles as u64));
psmeta.set_bitmap_size(psmeta.bitmap_size() - prev_bitmap_size);
psmeta.set_bitmap_size_log(psmeta.bitmap_size_log() - prev_bitmap_size_log);
psmeta.set_bitmap_entries(psmeta.bitmap_entries() - 1);
Ok(())
fn map_observer_name(&self) -> &String {
&self.map_observer_name
}
}
@ -258,54 +242,19 @@ where
{
/// Called when a [`Testcase`] is added to the corpus
fn on_add(&mut self, state: &mut Self::State, idx: CorpusId) -> Result<(), Error> {
let current_idx = *state.corpus().current();
let mut depth = match current_idx {
Some(parent_idx) => state
.testcase(parent_idx)?
.metadata::<SchedulerTestcaseMetadata>()?
.depth(),
None => 0,
};
// TODO increase perf_score when finding new things like in AFL
// https://github.com/google/AFL/blob/master/afl-fuzz.c#L6547
// Attach a `SchedulerTestcaseMetadata` to the queue entry.
depth += 1;
let mut testcase = state.testcase_mut(idx)?;
testcase.add_metadata(SchedulerTestcaseMetadata::with_n_fuzz_entry(
depth,
self.last_hash,
));
testcase.set_parent_id_optional(current_idx);
Ok(())
self.on_add_metadata(state, idx)
}
fn on_evaluation<OT>(
&mut self,
state: &mut Self::State,
_input: &<Self::State as UsesInput>::Input,
input: &<Self::State as UsesInput>::Input,
observers: &OT,
) -> Result<(), Error>
where
OT: ObserversTuple<Self::State>,
{
let observer = observers
.match_name::<O>(&self.map_observer_name)
.ok_or_else(|| Error::key_not_found("MapObserver not found".to_string()))?;
let mut hash = observer.hash() as usize;
let psmeta = state.metadata_mut::<SchedulerMetadata>()?;
hash %= psmeta.n_fuzz().len();
// Update the path frequency
psmeta.n_fuzz_mut()[hash] = psmeta.n_fuzz()[hash].saturating_add(1);
self.last_hash = hash;
Ok(())
self.on_evaluation_metadata(state, input, observers)
}
fn next(&mut self, state: &mut Self::State) -> Result<CorpusId, Error> {
@ -336,18 +285,7 @@ where
state: &mut Self::State,
next_idx: Option<CorpusId>,
) -> Result<(), Error> {
let current_idx = *state.corpus().current();
if let Some(idx) = current_idx {
let mut testcase = state.testcase_mut(idx)?;
let tcmeta = testcase.metadata_mut::<SchedulerTestcaseMetadata>()?;
if tcmeta.handicap() >= 4 {
tcmeta.set_handicap(tcmeta.handicap() - 4);
} else if tcmeta.handicap() > 0 {
tcmeta.set_handicap(tcmeta.handicap() - 1);
}
}
self.on_next_metadata(state, next_idx)?;
*state.corpus_mut().current_mut() = next_idx;
Ok(())

View File

@ -8,17 +8,15 @@ use hashbrown::HashMap;
use libafl_bolts::rands::Rand;
use serde::{Deserialize, Serialize};
#[cfg(doc)]
use crate::corpus::Testcase;
use crate::{
corpus::{Corpus, CorpusId, HasTestcase, SchedulerTestcaseMetadata},
corpus::{Corpus, CorpusId, HasTestcase, Testcase},
inputs::UsesInput,
observers::{MapObserver, ObserversTuple},
random_corpus_id,
schedulers::{
powersched::{PowerSchedule, SchedulerMetadata},
testcase_score::{CorpusWeightTestcaseScore, TestcaseScore},
RemovableScheduler, Scheduler,
HasAFLRemovableScheduler, HasAFLSchedulerMetadata, RemovableScheduler, Scheduler,
},
state::{HasCorpus, HasMetadata, HasRand, State, UsesState},
Error,
@ -229,75 +227,55 @@ where
type State = S;
}
impl<F, O, S> HasAFLRemovableScheduler for WeightedScheduler<F, O, S>
where
F: TestcaseScore<S>,
S: State + HasTestcase + HasMetadata + HasCorpus + HasRand,
O: MapObserver,
{
}
impl<F, O, S> RemovableScheduler for WeightedScheduler<F, O, S>
where
F: TestcaseScore<S>,
O: MapObserver,
S: HasCorpus + HasMetadata + HasRand + HasTestcase + State,
{
#[allow(clippy::cast_precision_loss)]
fn on_remove(
&mut self,
state: &mut Self::State,
_idx: CorpusId,
prev: &Option<crate::corpus::Testcase<<Self::State as UsesInput>::Input>>,
idx: CorpusId,
prev: &Option<Testcase<<Self::State as UsesInput>::Input>>,
) -> Result<(), Error> {
let prev = prev.as_ref().ok_or_else(|| {
Error::illegal_argument(
"Power schedulers must be aware of the removed corpus entry for reweighting.",
)
})?;
let prev_meta = prev.metadata::<SchedulerTestcaseMetadata>()?;
// Use these to adjust `SchedulerMetadata`
let (prev_total_time, prev_cycles) = prev_meta.cycle_and_time();
let prev_bitmap_size = prev_meta.bitmap_size();
let prev_bitmap_size_log = libm::log2(prev_bitmap_size as f64);
let psmeta = state.metadata_mut::<SchedulerMetadata>()?;
psmeta.set_exec_time(psmeta.exec_time() - prev_total_time);
psmeta.set_cycles(psmeta.cycles() - (prev_cycles as u64));
psmeta.set_bitmap_size(psmeta.bitmap_size() - prev_bitmap_size);
psmeta.set_bitmap_size_log(psmeta.bitmap_size_log() - prev_bitmap_size_log);
psmeta.set_bitmap_entries(psmeta.bitmap_entries() - 1);
Ok(())
self.on_remove_metadata(state, idx, prev)
}
#[allow(clippy::cast_precision_loss)]
fn on_replace(
&mut self,
state: &mut Self::State,
idx: CorpusId,
prev: &crate::corpus::Testcase<<Self::State as UsesInput>::Input>,
prev: &Testcase<<Self::State as UsesInput>::Input>,
) -> Result<(), Error> {
let prev_meta = prev.metadata::<SchedulerTestcaseMetadata>()?;
self.on_replace_metadata(state, idx, prev)
}
}
// Next depth is + 1
let prev_depth = prev_meta.depth() + 1;
impl<F, O, S> HasAFLSchedulerMetadata<O, S> for WeightedScheduler<F, O, S>
where
F: TestcaseScore<S>,
S: HasCorpus + HasMetadata + HasTestcase + HasRand + State,
O: MapObserver,
{
fn last_hash(&self) -> usize {
self.last_hash
}
// Use these to adjust `SchedulerMetadata`
let (prev_total_time, prev_cycles) = prev_meta.cycle_and_time();
let prev_bitmap_size = prev_meta.bitmap_size();
let prev_bitmap_size_log = libm::log2(prev_bitmap_size as f64);
fn set_last_hash(&mut self, hash: usize) {
self.last_hash = hash;
}
let psmeta = state.metadata_mut::<SchedulerMetadata>()?;
// We won't add new one because it'll get added when it gets executed in calirbation next time.
psmeta.set_exec_time(psmeta.exec_time() - prev_total_time);
psmeta.set_cycles(psmeta.cycles() - (prev_cycles as u64));
psmeta.set_bitmap_size(psmeta.bitmap_size() - prev_bitmap_size);
psmeta.set_bitmap_size_log(psmeta.bitmap_size_log() - prev_bitmap_size_log);
psmeta.set_bitmap_entries(psmeta.bitmap_entries() - 1);
state
.corpus()
.get(idx)?
.borrow_mut()
.add_metadata(SchedulerTestcaseMetadata::new(prev_depth));
Ok(())
fn map_observer_name(&self) -> &String {
&self.map_observer_name
}
}
@ -309,59 +287,20 @@ where
{
/// Called when a [`Testcase`] is added to the corpus
fn on_add(&mut self, state: &mut S, idx: CorpusId) -> Result<(), Error> {
let current_idx = *state.corpus().current();
let mut depth = match current_idx {
Some(parent_idx) => state
.testcase_mut(parent_idx)?
.metadata_mut::<SchedulerTestcaseMetadata>()?
.depth(),
None => 0,
};
// Attach a `SchedulerTestcaseMetadata` to the queue entry.
depth += 1;
{
let mut testcase = state.corpus().get(idx)?.borrow_mut();
testcase.add_metadata(SchedulerTestcaseMetadata::with_n_fuzz_entry(
depth,
self.last_hash,
));
testcase.set_parent_id_optional(current_idx);
}
// TODO increase perf_score when finding new things like in AFL
// https://github.com/google/AFL/blob/master/afl-fuzz.c#L6547
// Recreate the alias table
self.create_alias_table(state)?;
Ok(())
self.on_add_metadata(state, idx)?;
self.create_alias_table(state)
}
fn on_evaluation<OT>(
&mut self,
state: &mut Self::State,
_input: &<Self::State as UsesInput>::Input,
input: &<Self::State as UsesInput>::Input,
observers: &OT,
) -> Result<(), Error>
where
OT: ObserversTuple<Self::State>,
{
let observer = observers
.match_name::<O>(&self.map_observer_name)
.ok_or_else(|| Error::key_not_found("MapObserver not found".to_string()))?;
let mut hash = observer.hash() as usize;
let psmeta = state.metadata_mut::<SchedulerMetadata>()?;
hash %= psmeta.n_fuzz().len();
// Update the path frequency
psmeta.n_fuzz_mut()[hash] = psmeta.n_fuzz()[hash].saturating_add(1);
self.last_hash = hash;
Ok(())
self.on_evaluation_metadata(state, input, observers)
}
#[allow(clippy::similar_names, clippy::cast_precision_loss)]
@ -409,18 +348,7 @@ where
state: &mut Self::State,
next_idx: Option<CorpusId>,
) -> Result<(), Error> {
let current_idx = *state.corpus().current();
if let Some(idx) = current_idx {
let mut testcase = state.testcase_mut(idx)?;
let tcmeta = testcase.metadata_mut::<SchedulerTestcaseMetadata>()?;
if tcmeta.handicap() >= 4 {
tcmeta.set_handicap(tcmeta.handicap() - 4);
} else if tcmeta.handicap() > 0 {
tcmeta.set_handicap(tcmeta.handicap() - 1);
}
}
self.on_next_metadata(state, next_idx)?;
*state.corpus_mut().current_mut() = next_idx;
Ok(())

View File

@ -7,9 +7,7 @@ use crate::{
executors::{Executor, HasObservers},
fuzzer::Evaluator,
mutators::Mutator,
schedulers::{
ecofuzz::EcoTestcaseScore, testcase_score::CorpusPowerTestcaseScore, TestcaseScore,
},
schedulers::{testcase_score::CorpusPowerTestcaseScore, TestcaseScore},
stages::{mutational::MutatedTransform, MutationalStage, Stage},
state::{HasCorpus, HasMetadata, HasRand, UsesState},
Error,
@ -124,7 +122,3 @@ where
/// The standard powerscheduling stage
pub type StdPowerMutationalStage<E, EM, I, M, Z> =
PowerMutationalStage<E, CorpusPowerTestcaseScore<<E as UsesState>::State>, EM, I, M, Z>;
/// Ecofuzz scheduling stage
pub type EcoPowerMutationalStage<E, EM, I, M, Z> =
PowerMutationalStage<E, EcoTestcaseScore<<E as UsesState>::State>, EM, I, M, Z>;