Re-Rename Retrying Restart Helper (#2340)

This commit is contained in:
Dominik Maier 2024-06-26 23:49:22 +02:00 committed by GitHub
parent 3616cc6a55
commit ea6e440762
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 67 additions and 67 deletions

View File

@ -11,7 +11,7 @@ use libafl::{
mutators::{havoc_mutations, StdScheduledMutator},
observers::StdMapObserver,
schedulers::QueueScheduler,
stages::{StdRestartHelper, StdMutationalStage},
stages::{RetryCountRestartHelper, StdMutationalStage},
state::{HasSolutions, StdState},
Fuzzer, StdFuzzer,
};
@ -44,7 +44,7 @@ pub fn fuzz() {
// No concurrency in WASM so these accesses are not racing.
unsafe {
RegistryBuilder::register::<MapFeedbackMetadata<u8>>();
RegistryBuilder::register::<StdRestartHelper>();
RegistryBuilder::register::<RetryCountRestartHelper>();
}
let mut signals = [0u8; 64];

View File

@ -20,7 +20,7 @@ use crate::{
monitors::{AggregatorOps, UserStats, UserStatsValue},
observers::{MapObserver, ObserversTuple},
schedulers::powersched::SchedulerMetadata,
stages::{Stage, StdRestartHelper},
stages::{RetryCountRestartHelper, Stage},
state::{HasCorpus, HasCurrentTestcase, HasExecutions, UsesState},
Error, HasMetadata, HasNamedMetadata,
};
@ -353,7 +353,7 @@ where
fn should_restart(&mut self, state: &mut Self::State) -> Result<bool, Error> {
// Calibration stage disallow restarts
// If a testcase that causes crash/timeout in the queue, we need to remove it from the queue immediately.
StdRestartHelper::no_retry(state, &self.name)
RetryCountRestartHelper::no_retry(state, &self.name)
// todo
// remove this guy from corpus queue
@ -361,7 +361,7 @@ where
fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> {
// TODO: Make sure this is the correct way / there may be a better way?
StdRestartHelper::clear_progress(state, &self.name)
RetryCountRestartHelper::clear_progress(state, &self.name)
}
}

View File

@ -19,7 +19,7 @@ use crate::{
inputs::HasMutatorBytes,
mutators::mutations::buffer_copy,
observers::{MapObserver, ObserversTuple},
stages::{Stage, StdRestartHelper},
stages::{RetryCountRestartHelper, Stage},
state::{HasCorpus, HasCurrentTestcase, HasRand, UsesState},
Error, HasMetadata, HasNamedMetadata,
};
@ -112,11 +112,11 @@ where
// This is a deterministic stage
// Once it failed, then don't retry,
// It will just fail again
StdRestartHelper::no_retry(state, &self.name)
RetryCountRestartHelper::no_retry(state, &self.name)
}
fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> {
StdRestartHelper::clear_progress(state, &self.name)
RetryCountRestartHelper::clear_progress(state, &self.name)
}
}

View File

@ -19,7 +19,7 @@ use crate::state::HasClientPerfMonitor;
use crate::{
executors::{Executor, HasObservers},
observers::concolic::ConcolicObserver,
stages::{Stage, StdRestartHelper, TracingStage},
stages::{RetryCountRestartHelper, Stage, TracingStage},
state::{HasCorpus, HasCurrentTestcase, HasExecutions, UsesState},
Error, HasMetadata, HasNamedMetadata,
};
@ -88,11 +88,11 @@ where
// This is a deterministic stage
// Once it failed, then don't retry,
// It will just fail again
StdRestartHelper::no_retry(state, &self.name)
RetryCountRestartHelper::no_retry(state, &self.name)
}
fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> {
StdRestartHelper::clear_progress(state, &self.name)
RetryCountRestartHelper::clear_progress(state, &self.name)
}
}
@ -436,12 +436,12 @@ where
// This is a deterministic stage
// Once it failed, then don't retry,
// It will just fail again
StdRestartHelper::no_retry(state, &self.name)
RetryCountRestartHelper::no_retry(state, &self.name)
}
#[inline]
fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> {
StdRestartHelper::clear_progress(state, &self.name)
RetryCountRestartHelper::clear_progress(state, &self.name)
}
}

View File

@ -19,7 +19,7 @@ use crate::{
mark_feature_time,
observers::{CanTrack, MapObserver, ObserversTuple},
require_novelties_tracking,
stages::{Stage, StdRestartHelper},
stages::{RetryCountRestartHelper, Stage},
start_timer,
state::{HasCorpus, HasExecutions, UsesState},
Error, HasMetadata, HasNamedMetadata,
@ -328,13 +328,13 @@ where
#[inline]
fn should_restart(&mut self, state: &mut Self::State) -> Result<bool, Error> {
// TODO: We need to be able to resume better if something crashes or times out
StdRestartHelper::should_restart(state, &self.name, 3)
RetryCountRestartHelper::should_restart(state, &self.name, 3)
}
#[inline]
fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> {
// TODO: We need to be able to resume better if something crashes or times out
StdRestartHelper::clear_progress(state, &self.name)
RetryCountRestartHelper::clear_progress(state, &self.name)
}
}

View File

@ -10,9 +10,9 @@ use crate::{
/// Progress for nested stages. This merely enters/exits the inner stage's scope.
#[derive(Debug)]
pub struct NestedStageStdRestartHelper;
pub struct NestedStageRetryCountRestartHelper;
impl NestedStageStdRestartHelper {
impl NestedStageRetryCountRestartHelper {
fn should_restart<S, ST>(state: &mut S, _stage: &ST) -> Result<bool, Error>
where
S: HasNestedStageStatus,
@ -71,11 +71,11 @@ where
}
fn should_restart(&mut self, state: &mut Self::State) -> Result<bool, Error> {
NestedStageStdRestartHelper::should_restart(state, self)
NestedStageRetryCountRestartHelper::should_restart(state, self)
}
fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> {
NestedStageStdRestartHelper::clear_progress(state, self)
NestedStageRetryCountRestartHelper::clear_progress(state, self)
}
}
@ -135,11 +135,11 @@ where
}
fn should_restart(&mut self, state: &mut Self::State) -> Result<bool, Error> {
NestedStageStdRestartHelper::should_restart(state, self)
NestedStageRetryCountRestartHelper::should_restart(state, self)
}
fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> {
NestedStageStdRestartHelper::clear_progress(state, self)
NestedStageRetryCountRestartHelper::clear_progress(state, self)
}
}
@ -220,11 +220,11 @@ where
}
fn should_restart(&mut self, state: &mut Self::State) -> Result<bool, Error> {
NestedStageStdRestartHelper::should_restart(state, self)
NestedStageRetryCountRestartHelper::should_restart(state, self)
}
fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> {
NestedStageStdRestartHelper::clear_progress(state, self)
NestedStageRetryCountRestartHelper::clear_progress(state, self)
}
}
@ -281,11 +281,11 @@ where
}
fn should_restart(&mut self, state: &mut Self::State) -> Result<bool, Error> {
NestedStageStdRestartHelper::should_restart(state, self)
NestedStageRetryCountRestartHelper::should_restart(state, self)
}
fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> {
NestedStageStdRestartHelper::clear_progress(state, self)
NestedStageRetryCountRestartHelper::clear_progress(state, self)
}
}

View File

@ -336,12 +336,12 @@ where
fn should_restart(&mut self, state: &mut Self::State) -> Result<bool, Error> {
// There's no restart safety in the content of the closure.
// don't restart
StdRestartHelper::no_retry(state, &self.name)
RetryCountRestartHelper::no_retry(state, &self.name)
}
#[inline]
fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> {
StdRestartHelper::clear_progress(state, &self.name)
RetryCountRestartHelper::clear_progress(state, &self.name)
}
}
@ -481,26 +481,26 @@ where
#[inline]
fn should_restart(&mut self, state: &mut Self::State) -> Result<bool, Error> {
// TODO: Proper restart handling - call post_exec at the right time, etc...
StdRestartHelper::no_retry(state, &self.name)
RetryCountRestartHelper::no_retry(state, &self.name)
}
#[inline]
fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> {
StdRestartHelper::clear_progress(state, &self.name)
RetryCountRestartHelper::clear_progress(state, &self.name)
}
}
/// Progress which permits a fixed amount of resumes per round of fuzzing. If this amount is ever
/// exceeded, the input will no longer be executed by this stage.
#[derive(Clone, Deserialize, Serialize, Debug)]
pub struct StdRestartHelper {
pub struct RetryCountRestartHelper {
tries_remaining: Option<usize>,
skipped: HashSet<CorpusId>,
}
impl_serdeany!(StdRestartHelper);
impl_serdeany!(RetryCountRestartHelper);
impl StdRestartHelper {
impl RetryCountRestartHelper {
/// Don't allow restart
pub fn no_retry<S>(state: &mut S, name: &str) -> Result<bool, Error>
where
@ -518,7 +518,7 @@ impl StdRestartHelper {
{
let corpus_id = state.current_corpus_id()?.ok_or_else(|| {
Error::illegal_state(
"No current_corpus_id set in State, but called StdRestartHelper::should_skip",
"No current_corpus_id set in State, but called RetryCountRestartHelper::should_skip",
)
})?;
@ -689,7 +689,7 @@ pub mod test {
use crate::{
corpus::{Corpus, HasCurrentCorpusId, Testcase},
inputs::NopInput,
stages::{Stage, StdRestartHelper},
stages::{RetryCountRestartHelper, Stage},
state::{test::test_std_state, HasCorpus, State, UsesState},
HasMetadata,
};
@ -776,7 +776,7 @@ pub mod test {
// No concurrency per testcase
#[cfg(any(not(feature = "serdeany_autoreg"), miri))]
unsafe {
StdRestartHelper::register();
RetryCountRestartHelper::register();
}
struct StageWithOneTry;
@ -797,37 +797,37 @@ pub mod test {
for _ in 0..10 {
// used normally, no retries means we never skip
assert!(StdRestartHelper::should_restart(
assert!(RetryCountRestartHelper::should_restart(
&mut state,
stage.name(),
1
)?);
StdRestartHelper::clear_progress(&mut state, stage.name())?;
RetryCountRestartHelper::clear_progress(&mut state, stage.name())?;
}
for _ in 0..10 {
// used normally, only one retry means we never skip
assert!(StdRestartHelper::should_restart(
assert!(RetryCountRestartHelper::should_restart(
&mut state,
stage.name(),
2
)?);
assert!(StdRestartHelper::should_restart(
assert!(RetryCountRestartHelper::should_restart(
&mut state,
stage.name(),
2
)?);
StdRestartHelper::clear_progress(&mut state, stage.name())?;
RetryCountRestartHelper::clear_progress(&mut state, stage.name())?;
}
assert!(StdRestartHelper::should_restart(
assert!(RetryCountRestartHelper::should_restart(
&mut state,
stage.name(),
2
)?);
// task failed, let's resume
// we still have one more try!
assert!(StdRestartHelper::should_restart(
assert!(RetryCountRestartHelper::should_restart(
&mut state,
stage.name(),
2
@ -835,20 +835,20 @@ pub mod test {
// task failed, let's resume
// out of retries, so now we skip
assert!(!StdRestartHelper::should_restart(
assert!(!RetryCountRestartHelper::should_restart(
&mut state,
stage.name(),
2
)?);
StdRestartHelper::clear_progress(&mut state, stage.name())?;
RetryCountRestartHelper::clear_progress(&mut state, stage.name())?;
// we previously exhausted this testcase's retries, so we skip
assert!(!StdRestartHelper::should_restart(
assert!(!RetryCountRestartHelper::should_restart(
&mut state,
stage.name(),
2
)?);
StdRestartHelper::clear_progress(&mut state, stage.name())?;
RetryCountRestartHelper::clear_progress(&mut state, stage.name())?;
Ok(())
}

View File

@ -15,7 +15,7 @@ use crate::{
inputs::Input,
mark_feature_time,
mutators::{MultiMutator, MutationResult, Mutator},
stages::{Stage, StdRestartHelper},
stages::{RetryCountRestartHelper, Stage},
start_timer,
state::{HasCorpus, HasCurrentTestcase, HasExecutions, HasRand, UsesState},
Error, HasMetadata, HasNamedMetadata,
@ -237,11 +237,11 @@ where
}
fn should_restart(&mut self, state: &mut Self::State) -> Result<bool, Error> {
StdRestartHelper::should_restart(state, &self.name, 3)
RetryCountRestartHelper::should_restart(state, &self.name, 3)
}
fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> {
StdRestartHelper::clear_progress(state, &self.name)
RetryCountRestartHelper::clear_progress(state, &self.name)
}
}
@ -335,12 +335,12 @@ where
#[inline]
fn should_restart(&mut self, state: &mut Self::State) -> Result<bool, Error> {
// Make sure we don't get stuck crashing on a single testcase
StdRestartHelper::should_restart(state, &self.name, 3)
RetryCountRestartHelper::should_restart(state, &self.name, 3)
}
#[inline]
fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> {
StdRestartHelper::clear_progress(state, &self.name)
RetryCountRestartHelper::clear_progress(state, &self.name)
}
#[inline]

View File

@ -13,7 +13,7 @@ use crate::{
fuzzer::Evaluator,
mutators::Mutator,
schedulers::{testcase_score::CorpusPowerTestcaseScore, TestcaseScore},
stages::{mutational::MutatedTransform, MutationalStage, Stage, StdRestartHelper},
stages::{mutational::MutatedTransform, MutationalStage, RetryCountRestartHelper, Stage},
state::{HasCorpus, HasCurrentTestcase, HasExecutions, HasRand, UsesState},
Error, HasMetadata, HasNamedMetadata,
};
@ -103,11 +103,11 @@ where
fn should_restart(&mut self, state: &mut Self::State) -> Result<bool, Error> {
// Make sure we don't get stuck crashing on a single testcase
StdRestartHelper::should_restart(state, &self.name, 3)
RetryCountRestartHelper::should_restart(state, &self.name, 3)
}
fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> {
StdRestartHelper::clear_progress(state, &self.name)
RetryCountRestartHelper::clear_progress(state, &self.name)
}
}

View File

@ -20,7 +20,7 @@ use crate::{
executors::{Executor, ExitKind, HasObservers},
fuzzer::{Evaluator, EvaluatorObservers, ExecutionProcessor},
inputs::{Input, InputConverter, UsesInput},
stages::{Stage, StdRestartHelper},
stages::{RetryCountRestartHelper, Stage},
state::{HasCorpus, HasExecutions, HasRand, State, UsesState},
Error, HasMetadata, HasNamedMetadata,
};
@ -152,12 +152,12 @@ where
fn should_restart(&mut self, state: &mut Self::State) -> Result<bool, Error> {
// TODO: Needs proper crash handling for when an imported testcase crashes
// For now, Make sure we don't get stuck crashing on this testcase
StdRestartHelper::no_retry(state, &self.name)
RetryCountRestartHelper::no_retry(state, &self.name)
}
#[inline]
fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> {
StdRestartHelper::clear_progress(state, &self.name)
RetryCountRestartHelper::clear_progress(state, &self.name)
}
}

View File

@ -12,7 +12,7 @@ use crate::{
executors::{Executor, HasObservers, ShadowExecutor},
mark_feature_time,
observers::ObserversTuple,
stages::{Stage, StdRestartHelper},
stages::{RetryCountRestartHelper, Stage},
start_timer,
state::{HasCorpus, HasCurrentTestcase, HasExecutions, State, UsesState},
Error, HasNamedMetadata,
@ -45,7 +45,7 @@ where
{
#[allow(rustdoc::broken_intra_doc_links)]
/// Perform tracing on the given `CorpusId`. Useful for if wrapping [`TracingStage`] with your
/// own stage and you need to manage [`super::NestedStageStdRestartHelper`] differently
/// own stage and you need to manage [`super::NestedStageRetryCountRestartHelper`] differently
/// see [`super::ConcolicTracingStage`]'s implementation as an example of usage.
pub fn trace(
&mut self,
@ -100,11 +100,11 @@ where
}
fn should_restart(&mut self, state: &mut Self::State) -> Result<bool, Error> {
StdRestartHelper::no_retry(state, &self.name)
RetryCountRestartHelper::no_retry(state, &self.name)
}
fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> {
StdRestartHelper::clear_progress(state, &self.name)
RetryCountRestartHelper::clear_progress(state, &self.name)
}
}
@ -220,11 +220,11 @@ where
}
fn should_restart(&mut self, state: &mut Self::State) -> Result<bool, Error> {
StdRestartHelper::no_retry(state, &self.name)
RetryCountRestartHelper::no_retry(state, &self.name)
}
fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> {
StdRestartHelper::clear_progress(state, &self.name)
RetryCountRestartHelper::clear_progress(state, &self.name)
}
}

View File

@ -5,7 +5,7 @@ use libafl::{
executors::{Executor, HasObservers},
inputs::{BytesInput, UsesInput},
observers::ObserversTuple,
stages::{colorization::TaintMetadata, Stage, StdRestartHelper},
stages::{colorization::TaintMetadata, RetryCountRestartHelper, Stage},
state::{HasCorpus, HasCurrentTestcase, HasExecutions, UsesState},
Error, HasMetadata, HasNamedMetadata,
};
@ -127,12 +127,12 @@ where
fn should_restart(&mut self, state: &mut Self::State) -> Result<bool, Error> {
// Tracing stage is always deterministic
// don't restart
StdRestartHelper::no_retry(state, &self.name)
RetryCountRestartHelper::no_retry(state, &self.name)
}
fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> {
// TODO: this may need better resumption? (Or is it always used with a forkserver?)
StdRestartHelper::clear_progress(state, &self.name)
RetryCountRestartHelper::clear_progress(state, &self.name)
}
}