Merge branch 'main' of github.com:AFLplusplus/LibAFL into main

This commit is contained in:
Andrea Fioraldi 2021-12-08 16:32:36 +01:00
commit 7c7c7e679f
19 changed files with 394 additions and 88 deletions

View File

@ -63,7 +63,7 @@ jobs:
# cargo-hack's --feature-powerset would be nice here but libafl has a too many knobs
- name: Check each feature
# Skipping python as it has to be built with the `maturin` tool
run: cargo hack check --feature-powerset --depth=2 --exclude-features=agpl,nautilus,python,sancov_pcguard_edges --no-dev-deps
run: cargo hack check --feature-powerset --depth=2 --exclude-features=agpl,nautilus,python,sancov_pcguard_edges,arm,aarch64,i386 --no-dev-deps
# pcguard edges and pcguard hitcounts are not compatible and we need to build them seperately
- name: Check pcguard edges
run: cargo check --features=sancov_pcguard_edges

View File

@ -34,14 +34,12 @@ pub fn main() {
Arg::new("executable")
.about("The instrumented binary we want to fuzz")
.required(true)
.index(1)
.takes_value(true),
)
.arg(
Arg::new("in")
.about("The directory to read initial inputs from ('seeds')")
.required(true)
.index(2)
.takes_value(true),
)
.arg(
@ -51,6 +49,11 @@ pub fn main() {
.long("timeout")
.default_value("1200"),
)
.arg(
Arg::new("arguments")
.setting(clap::ArgSettings::MultipleValues)
.takes_value(true),
)
.get_matches();
let corpus_dirs = vec![PathBuf::from(res.value_of("in").unwrap().to_string())];
@ -124,10 +127,15 @@ pub fn main() {
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
// Create the executor for the forkserver
let args = match res.values_of("arguments") {
Some(vec) => vec.map(|s| s.to_string()).collect::<Vec<String>>().to_vec(),
None => [].to_vec(),
};
let mut executor = TimeoutForkserverExecutor::new(
ForkserverExecutor::new(
res.value_of("executable").unwrap().to_string(),
&[],
&args,
true,
tuple_list!(edges_observer, time_observer),
)

View File

@ -22,7 +22,9 @@ use libafl::{
shmem::{ShMemProvider, StdShMemProvider},
tuples::{tuple_list, Merge},
},
corpus::{Corpus, IndexesLenTimeMinimizerCorpusScheduler, OnDiskCorpus, QueueCorpusScheduler},
corpus::{
Corpus, IndexesLenTimeMinimizerCorpusScheduler, OnDiskCorpus, PowerQueueCorpusScheduler,
},
events::SimpleRestartingEventManager,
executors::{inprocess::InProcessExecutor, ExitKind, TimeoutExecutor},
feedback_or,
@ -36,7 +38,11 @@ use libafl::{
tokens_mutations, Tokens,
},
observers::{StdMapObserver, TimeObserver},
stages::{StdMutationalStage, TracingStage},
stages::{
calibrate::CalibrationStage,
power::{PowerMutationalStage, PowerSchedule},
StdMutationalStage, TracingStage,
},
state::{HasCorpus, HasMetadata, StdState},
Error,
};
@ -248,8 +254,14 @@ fn fuzz(
println!("Warning: LLVMFuzzerInitialize failed with -1")
}
let calibration = CalibrationStage::new(&mut state, &edges_observer);
let mutator = StdScheduledMutator::new(havoc_mutations());
let power = PowerMutationalStage::new(mutator, PowerSchedule::FAST, &edges_observer);
let scheduler = IndexesLenTimeMinimizerCorpusScheduler::new(PowerQueueCorpusScheduler::new());
// A minimization+queue policy to get testcasess from the corpus
let scheduler = IndexesLenTimeMinimizerCorpusScheduler::new(QueueCorpusScheduler::new());
//let scheduler = IndexesLenTimeMinimizerCorpusScheduler::new(QueueCorpusScheduler::new());
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
@ -297,7 +309,7 @@ fn fuzz(
let mutational = StdMutationalStage::new(mutator);
// The order of the stages matter!
let mut stages = tuple_list!(tracing, i2s, mutational);
let mut stages = tuple_list!(calibration, tracing, i2s, mutational, power);
// Read tokens
if let Some(tokenfile) = tokenfile {
@ -324,6 +336,7 @@ fn fuzz(
dup2(null_fd, io::stdout().as_raw_fd())?;
dup2(null_fd, io::stderr().as_raw_fd())?;
}
// reopen file to make sure we're at the end
log.replace(
OpenOptions::new()

View File

@ -16,4 +16,4 @@ debug = true
[dependencies]
libafl = { path = "../../libafl/" }
libafl_qemu = { path = "../../libafl_qemu/" }
libafl_qemu = { path = "../../libafl_qemu/", features = ["x86_64"] }

View File

@ -177,14 +177,18 @@ where
monitor.display(event.name().to_string(), client_id);
Ok(BrokerEventResult::Forward)
}
Event::UpdateExecutions {
Event::UpdateExecStats {
time,
executions,
stability,
phantom: _,
} => {
// TODO: The monitor buffer should be added on client add.
let client = monitor.client_stats_mut_for(client_id);
client.update_executions(*executions as u64, *time);
if let Some(stability) = stability {
client.update_stability(*stability);
}
monitor.display(event.name().to_string(), client_id);
Ok(BrokerEventResult::Handled)
}
@ -202,6 +206,7 @@ where
Event::UpdatePerfMonitor {
time,
executions,
stability,
introspection_monitor,
phantom: _,
} => {
@ -213,6 +218,10 @@ where
// Update the normal monitor for this client
client.update_executions(*executions as u64, *time);
if let Some(stability) = stability {
client.update_stability(*stability);
}
// Update the performance monitor for this client
client.update_introspection_monitor((**introspection_monitor).clone());

View File

@ -176,9 +176,11 @@ where
executions: usize,
},
/// New stats event to monitor.
UpdateExecutions {
UpdateExecStats {
/// The time of generation of the [`Event`]
time: Duration,
/// The stability of this fuzzer node, if known
stability: Option<f32>,
/// The executions of this client
executions: usize,
/// [`PhantomData`]
@ -198,10 +200,10 @@ where
UpdatePerfMonitor {
/// The time of generation of the event
time: Duration,
/// The executions of this client
executions: usize,
/// The stability of this fuzzer node, if known
stability: Option<f32>,
/// Current performance statistics
introspection_monitor: Box<ClientPerfMonitor>,
@ -243,8 +245,9 @@ where
time: _,
executions: _,
} => "Testcase",
Event::UpdateExecutions {
Event::UpdateExecStats {
time: _,
stability: _,
executions: _,
phantom: _,
}
@ -257,6 +260,7 @@ where
Event::UpdatePerfMonitor {
time: _,
executions: _,
stability: _,
introspection_monitor: _,
phantom: _,
} => "PerfMonitor",
@ -288,6 +292,24 @@ where
/// This should not happen for a normal use-cases.
fn fire<S>(&mut self, state: &mut S, event: Event<I>) -> Result<(), Error>;
/// Send off an [`Event::Log`] event to the broker
/// This is a shortcut for [`EventFirer::fire`] with [`Event::Log`] as argument.
fn log<S>(
&mut self,
state: &mut S,
severity_level: LogSeverity,
message: String,
) -> Result<(), Error> {
self.fire(
state,
Event::Log {
severity_level,
message,
phantom: PhantomData,
},
)
}
/// Serialize all observers for this type and manager
fn serialize_observers<OT, S>(&mut self, observers: &OT) -> Result<Vec<u8>, Error>
where
@ -320,6 +342,7 @@ where
S: HasExecutions + HasClientPerfMonitor,
{
let executions = *state.executions();
let stability = *state.stability();
let cur = current_time();
// default to 0 here to avoid crashes on clock skew
if cur.checked_sub(last_report_time).unwrap_or_default() > monitor_timeout {
@ -327,8 +350,9 @@ where
#[cfg(not(feature = "introspection"))]
self.fire(
state,
Event::UpdateExecutions {
Event::UpdateExecStats {
executions,
stability,
time: cur,
phantom: PhantomData,
},
@ -348,6 +372,7 @@ where
Event::UpdatePerfMonitor {
executions,
time: cur,
stability,
introspection_monitor: Box::new(state.introspection_monitor().clone()),
phantom: PhantomData,
},

View File

@ -150,15 +150,20 @@ where
monitor.display(event.name().to_string(), 0);
Ok(BrokerEventResult::Handled)
}
Event::UpdateExecutions {
Event::UpdateExecStats {
time,
executions,
stability,
phantom: _,
} => {
// TODO: The monitor buffer should be added on client add.
monitor
.client_stats_mut_for(0)
.update_executions(*executions as u64, *time);
let client = monitor.client_stats_mut_for(0);
client.update_executions(*executions as u64, *time);
if let Some(stability) = stability {
client.update_stability(*stability);
}
monitor.display(event.name().to_string(), 0);
Ok(BrokerEventResult::Handled)
}
@ -177,13 +182,17 @@ where
Event::UpdatePerfMonitor {
time,
executions,
stability,
introspection_monitor,
phantom: _,
} => {
// TODO: The monitor buffer should be added on client add.
monitor.client_stats_mut()[0].update_executions(*executions as u64, *time);
monitor.client_stats_mut()[0]
.update_introspection_monitor((**introspection_monitor).clone());
let client = &mut monitor.client_stats_mut()[0];
client.update_executions(*executions as u64, *time);
client.update_introspection_monitor((**introspection_monitor).clone());
if let Some(stability) = stability {
client.update_stability(*stability);
}
monitor.display(event.name().to_string(), 0);
Ok(BrokerEventResult::Handled)
}

View File

@ -218,7 +218,6 @@ impl InProcessHandlers {
}
}
#[must_use]
pub fn new<E, EM, I, OC, OF, OT, S, Z>() -> Result<Self, Error>
where
I: Input,

View File

@ -40,7 +40,7 @@ use crate::{
use serde::{Deserialize, Serialize};
/// How an execution finished.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub enum ExitKind {
/// The run exited normally.
Ok,

View File

@ -118,6 +118,29 @@ impl<E> TimeoutExecutor<E> {
}
}
#[cfg(unix)]
pub fn set_timeout(&mut self, exec_tmout: Duration) {
let milli_sec = exec_tmout.as_millis();
let it_value = Timeval {
tv_sec: (milli_sec / 1000) as i64,
tv_usec: (milli_sec % 1000) as i64,
};
let it_interval = Timeval {
tv_sec: 0,
tv_usec: 0,
};
let itimerval = Itimerval {
it_interval,
it_value,
};
self.itimerval = itimerval;
}
#[cfg(windows)]
pub fn set_timeout(&mut self, exec_tmout: Duration) {
self.milli_sec = exec_tmout.as_millis() as u32;
}
/// Retrieve the inner `Executor` that is wrapped by this `TimeoutExecutor`.
pub fn inner(&mut self) -> &mut E {
&mut self.executor

View File

@ -243,7 +243,9 @@ where
T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
{
fn reset(&mut self) -> Result<(), Error> {
self.history_map.iter_mut().for_each(|x| *x = T::default());
self.history_map
.iter_mut()
.for_each(|x| *x = T::min_value());
Ok(())
}
}
@ -266,7 +268,7 @@ where
#[must_use]
pub fn new(name: &'static str, map_size: usize) -> Self {
Self {
history_map: vec![T::default(); map_size],
history_map: vec![T::min_value(); map_size],
name: name.to_string(),
}
}
@ -278,7 +280,7 @@ where
T: Debug,
{
Self {
history_map: vec![T::default(); map_observer.len()],
history_map: vec![T::min_value(); map_observer.len()],
name: map_observer.name().to_string(),
}
}

View File

@ -3,15 +3,13 @@
pub mod multi;
pub use multi::MultiMonitor;
use serde::{Deserialize, Serialize};
use alloc::{string::String, vec::Vec};
use alloc::{
string::{String, ToString},
vec::Vec,
};
use core::{fmt, time, time::Duration};
use hashbrown::HashMap;
#[cfg(feature = "introspection")]
use alloc::string::ToString;
use serde::{Deserialize, Serialize};
use crate::bolts::{current_time, format_duration_hms};
@ -59,7 +57,8 @@ pub struct ClientStats {
pub last_execs_per_sec: f32,
/// User-defined monitor
pub user_monitor: HashMap<String, UserStats>,
/// Stability, and if we ever received a stability value
pub stability: Option<f32>,
/// Client performance statistics
#[cfg(feature = "introspection")]
pub introspection_monitor: ClientPerfMonitor,
@ -89,6 +88,11 @@ impl ClientStats {
self.objective_size = objective_size;
}
/// we got a new information about stability for this client, insert it.
pub fn update_stability(&mut self, stability: f32) {
self.stability = Some(stability);
}
/// Get the calculated executions per second for this client
#[allow(clippy::cast_sign_loss, clippy::cast_precision_loss)]
pub fn execs_per_sec(&mut self, cur_time: time::Duration) -> u64 {
@ -150,6 +154,24 @@ pub trait Monitor {
/// show the monitor to the user
fn display(&mut self, event_msg: String, sender_id: u32);
/// Show the Stabiliity
fn stability(&self) -> Option<f32> {
let mut stability_total = 0_f32;
let mut num = 0_usize;
for stat in self.client_stats() {
if let Some(stability) = stat.stability {
stability_total += stability;
num += 1;
}
}
if num == 0 {
None
} else {
#[allow(clippy::cast_precision_loss)]
Some(stability_total / num as f32)
}
}
/// Amount of elements in the corpus (combined for all children)
fn corpus_size(&self) -> u64 {
self.client_stats()
@ -269,13 +291,18 @@ where
fn display(&mut self, event_msg: String, sender_id: u32) {
let fmt = format!(
"[{} #{}] run time: {}, clients: {}, corpus: {}, objectives: {}, executions: {}, exec/sec: {}",
"[{} #{}] run time: {}, clients: {}, corpus: {}, objectives: {}, executions: {}, {} exec/sec: {}",
event_msg,
sender_id,
format_duration_hms(&(current_time() - self.start_time)),
self.client_stats().len(),
self.corpus_size(),
self.objective_size(),
if let Some(stability) = self.stability() {
format!(", stability: {:.2}", stability)
} else {
"".to_string()
},
self.total_execs(),
self.execs_per_sec()
);

View File

@ -3,12 +3,14 @@
use crate::{
bolts::current_time,
corpus::{Corpus, PowerScheduleTestcaseMetaData},
executors::{Executor, HasObservers},
events::{EventFirer, LogSeverity},
executors::{Executor, ExitKind, HasObservers},
feedbacks::{FeedbackStatesTuple, MapFeedbackState},
fuzzer::Evaluator,
inputs::Input,
observers::{MapObserver, ObserversTuple},
stages::Stage,
state::{HasCorpus, HasMetadata},
state::{HasClientPerfMonitor, HasCorpus, HasFeedbackStates, HasMetadata},
Error,
};
use alloc::{
@ -20,11 +22,13 @@ use num_traits::PrimInt;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug)]
pub struct CalibrationStage<C, E, EM, I, O, OT, S, T, Z>
pub struct CalibrationStage<C, E, EM, FT, I, O, OT, S, T, Z>
where
T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug,
C: Corpus<I>,
E: Executor<EM, I, S, Z> + HasObservers<I, OT, S>,
EM: EventFirer<I>,
FT: FeedbackStatesTuple,
I: Input,
O: MapObserver<T>,
OT: ObserversTuple<I, S>,
@ -34,53 +38,145 @@ where
map_observer_name: String,
stage_max: usize,
#[allow(clippy::type_complexity)]
phantom: PhantomData<(C, E, EM, I, O, OT, S, T, Z)>,
phantom: PhantomData<(C, E, EM, FT, I, O, OT, S, T, Z)>,
}
const CAL_STAGE_MAX: usize = 8;
const CAL_STAGE_START: usize = 4;
const CAL_STAGE_MAX: usize = 16;
impl<C, E, EM, I, O, OT, S, T, Z> Stage<E, EM, S, Z>
for CalibrationStage<C, E, EM, I, O, OT, S, T, Z>
impl<C, E, EM, FT, I, O, OT, S, T, Z> Stage<E, EM, S, Z>
for CalibrationStage<C, E, EM, FT, I, O, OT, S, T, Z>
where
T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug,
C: Corpus<I>,
E: Executor<EM, I, S, Z> + HasObservers<I, OT, S>,
EM: EventFirer<I>,
FT: FeedbackStatesTuple,
I: Input,
O: MapObserver<T>,
OT: ObserversTuple<I, S>,
S: HasCorpus<C, I> + HasMetadata,
S: HasCorpus<C, I> + HasMetadata + HasFeedbackStates<FT> + HasClientPerfMonitor,
Z: Evaluator<E, EM, I, S>,
{
#[inline]
#[allow(clippy::let_and_return)]
#[allow(clippy::let_and_return, clippy::too_many_lines)]
fn perform(
&mut self,
fuzzer: &mut Z,
executor: &mut E,
state: &mut S,
manager: &mut EM,
mgr: &mut EM,
corpus_idx: usize,
) -> Result<(), Error> {
let iter = self.stage_max;
let mut iter = self.stage_max;
let handicap = state
.metadata()
.get::<PowerScheduleMetadata>()
.ok_or_else(|| Error::KeyNotFound("PowerScheduleMetadata not found".to_string()))?
.queue_cycles;
let input = state
.corpus()
.get(corpus_idx)?
.borrow_mut()
.load_input()?
.clone();
let start = current_time();
// Run once to get the initial calibration map
executor.observers_mut().pre_exec_all(state, &input)?;
let mut start = current_time();
for _i in 0..iter {
let mut total_time = if executor.run_target(fuzzer, state, mgr, &input)? == ExitKind::Ok {
current_time() - start
} else {
mgr.log(
state,
LogSeverity::Warn,
"Corpus entry errored on execution!".into(),
)?;
// assume one second as default time
Duration::from_secs(1)
};
let map_first = &executor
.observers()
.match_name::<O>(&self.map_observer_name)
.ok_or_else(|| Error::KeyNotFound("MapObserver not found".to_string()))?
.map()
.unwrap()
.to_vec();
// Run CAL_STAGE_START - 1 times, increase by 2 for every time a new
// run is found to be unstable, with CAL_STAGE_MAX total runs.
let mut i = 1;
let mut has_errors = false;
let mut unstable_entries: usize = 0;
let map_len: usize = map_first.len() as usize;
while i < iter {
let input = state
.corpus()
.get(corpus_idx)?
.borrow_mut()
.load_input()?
.clone();
let _ = executor.run_target(fuzzer, state, manager, &input)?;
executor.observers_mut().pre_exec_all(state, &input)?;
start = current_time();
if executor.run_target(fuzzer, state, mgr, &input)? != ExitKind::Ok {
if !has_errors {
mgr.log(
state,
LogSeverity::Warn,
"Corpus entry errored on execution!".into(),
)?;
has_errors = true;
if iter < CAL_STAGE_MAX {
iter += 2;
};
}
continue;
};
total_time += current_time() - start;
let map = &executor
.observers()
.match_name::<O>(&self.map_observer_name)
.ok_or_else(|| Error::KeyNotFound("MapObserver not found".to_string()))?
.map()
.unwrap()
.to_vec();
let history_map = &mut state
.feedback_states_mut()
.match_name_mut::<MapFeedbackState<T>>(&self.map_observer_name)
.unwrap()
.history_map;
for j in 0..map_len {
if map_first[j] != map[j] && history_map[j] != T::max_value() {
history_map[j] = T::max_value();
unstable_entries += 1;
};
}
i += 1;
}
let end = current_time();
#[allow(clippy::cast_precision_loss)]
if unstable_entries != 0 {
*state.stability_mut() = Some((map_len - unstable_entries) as f32 / (map_len as f32));
if iter < CAL_STAGE_MAX {
iter += 2;
}
};
let psmeta = state
.metadata_mut()
.get_mut::<PowerScheduleMetadata>()
.ok_or_else(|| Error::KeyNotFound("PowerScheduleMetadata not found".to_string()))?;
let map = executor
.observers()
@ -89,20 +185,14 @@ where
let bitmap_size = map.count_bytes();
let psmeta = state
.metadata_mut()
.get_mut::<PowerScheduleMetadata>()
.ok_or_else(|| Error::KeyNotFound("PowerScheduleMetadata not found".to_string()))?;
psmeta.set_exec_time(psmeta.exec_time() + (end - start));
psmeta.set_exec_time(psmeta.exec_time() + total_time);
psmeta.set_cycles(psmeta.cycles() + (iter as u64));
psmeta.set_bitmap_size(psmeta.bitmap_size() + bitmap_size);
psmeta.set_bitmap_entries(psmeta.bitmap_entries() + 1);
// println!("psmeta: {:#?}", psmeta);
let mut testcase = state.corpus().get(corpus_idx)?.borrow_mut();
testcase.set_exec_time((end - start) / (iter as u32));
testcase.set_exec_time(total_time / (iter as u32));
// println!("time: {:#?}", testcase.exec_time());
let data = testcase
.metadata_mut()
@ -208,11 +298,13 @@ impl PowerScheduleMetadata {
crate::impl_serdeany!(PowerScheduleMetadata);
impl<C, E, I, EM, O, OT, S, T, Z> CalibrationStage<C, E, EM, I, O, OT, S, T, Z>
impl<C, E, EM, FT, I, O, OT, S, T, Z> CalibrationStage<C, E, EM, FT, I, O, OT, S, T, Z>
where
T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug,
C: Corpus<I>,
E: Executor<EM, I, S, Z> + HasObservers<I, OT, S>,
EM: EventFirer<I>,
FT: FeedbackStatesTuple,
I: Input,
O: MapObserver<T>,
OT: ObserversTuple<I, S>,
@ -223,7 +315,7 @@ where
state.add_metadata::<PowerScheduleMetadata>(PowerScheduleMetadata::new());
Self {
map_observer_name: map_observer_name.name().to_string(),
stage_max: CAL_STAGE_MAX,
stage_max: CAL_STAGE_START,
phantom: PhantomData,
}
}

View File

@ -246,7 +246,7 @@ fn generate_mutations(iter: impl Iterator<Item = (SymExprRef, SymExpr)>) -> Vec<
assert_eq!(bits_to_insert % 8, 0, "can only insert full bytes");
let after_len = (u64::from(target.get_size()) / 8) - offset - (bits_to_insert / 8);
Some(
std::array::IntoIter::new([
[
if offset == 0 {
None
} else {
@ -267,7 +267,8 @@ fn generate_mutations(iter: impl Iterator<Item = (SymExprRef, SymExpr)>) -> Vec<
false,
))
},
])
]
.into_iter()
.reduce(|acc: Option<BV>, val: Option<BV>| match (acc, val) {
(Some(prev), Some(next)) => Some(prev.concat(&next)),
(Some(prev), None) => Some(prev),

View File

@ -31,21 +31,16 @@ pub mod sync;
#[cfg(feature = "std")]
pub use sync::*;
use crate::events::EventFirer;
use crate::events::EventRestarter;
use crate::events::HasEventManagerId;
use crate::events::ProgressReporter;
use crate::state::HasExecutions;
use crate::{
bolts::rands::Rand,
corpus::Corpus,
corpus::CorpusScheduler,
executors::Executor,
executors::HasObservers,
corpus::{Corpus, CorpusScheduler},
events::{EventFirer, EventRestarter, HasEventManagerId, ProgressReporter},
executors::{Executor, HasObservers},
inputs::Input,
observers::ObserversTuple,
state::HasRand,
state::{HasClientPerfMonitor, HasCorpus},
state::{
HasExecutions, HasRand, {HasClientPerfMonitor, HasCorpus},
},
Error, EvaluatorObservers, ExecutesInput, ExecutionProcessor, HasCorpusScheduler,
};
use core::{convert::From, marker::PhantomData};

View File

@ -78,6 +78,12 @@ pub trait HasClientPerfMonitor {
/// Mutatable ref to [`ClientPerfMonitor`]
fn introspection_monitor_mut(&mut self) -> &mut ClientPerfMonitor;
/// This node's stability
fn stability(&self) -> &Option<f32>;
/// This node's stability (mut)
fn stability_mut(&mut self) -> &mut Option<f32>;
}
/// Trait for elements offering metadata
@ -163,6 +169,8 @@ where
metadata: SerdeAnyMap,
/// MaxSize testcase size for mutators that appreciate it
max_size: usize,
/// The stability of the current fuzzing process
stability: Option<f32>,
/// Performance statistics for this fuzzer
#[cfg(feature = "introspection")]
@ -552,6 +560,7 @@ where
Self {
rand,
executions: 0,
stability: None,
start_time: Duration::from_millis(0),
metadata: SerdeAnyMap::default(),
corpus,
@ -581,6 +590,18 @@ where
fn introspection_monitor_mut(&mut self) -> &mut ClientPerfMonitor {
&mut self.introspection_monitor
}
/// This node's stability
#[inline]
fn stability(&self) -> &Option<f32> {
&self.stability
}
/// This node's stability (mut)
#[inline]
fn stability_mut(&mut self) -> &mut Option<f32> {
&mut self.stability
}
}
#[cfg(not(feature = "introspection"))]
@ -599,4 +620,16 @@ where
fn introspection_monitor_mut(&mut self) -> &mut ClientPerfMonitor {
unimplemented!()
}
/// This node's stability
#[inline]
fn stability(&self) -> &Option<f32> {
&self.stability
}
/// This node's stability (mut)
#[inline]
fn stability_mut(&mut self) -> &mut Option<f32> {
&mut self.stability
}
}

View File

@ -14,6 +14,14 @@ edition = "2021"
python = ["pyo3", "pyo3-build-config"]
default = []
# The following architecture features are mutually exclusive.
x86_64 = [] # build qemu for x86_64 (default)
i386 = [] # build qemu for i386
arm = [] # build qemu for arm
aarch64 = [] # build qemu for aarch64
clippy = [] # special feature for clippy, don't use in normal projects§
[dependencies]
libafl = { path = "../libafl", version = "0.7.0" }
libafl_targets = { path = "../libafl_targets", version = "0.7.0" }

View File

@ -11,12 +11,24 @@ fn build_dep_check(tools: &[&str]) {
}
}
#[macro_export]
macro_rules! assert_unique_feature {
() => {};
($first:tt $(,$rest:tt)*) => {
$(
#[cfg(not(feature = "clippy"))] // ignore multiple definition for clippy
#[cfg(all(feature = $first, feature = $rest))]
compile_error!(concat!("features \"", $first, "\" and \"", $rest, "\" cannot be used together"));
)*
assert_unique_feature!($($rest),*);
}
}
#[allow(clippy::too_many_lines)]
fn main() {
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed=src/asan-giovese.c");
println!("cargo:rerun-if-changed=src/asan-giovese.h");
println!("cargo:rerun-if-env-changed=CPU_TARGET");
println!("cargo:rerun-if-env-changed=CROSS_CC");
let target_os = env::var("CARGO_CFG_TARGET_OS").unwrap();
@ -24,13 +36,40 @@ fn main() {
return;
}
// Make sure we have at least and at most one architecutre feature set
// Else, we default to `x86_64` - having a default makes CI easier :)
assert_unique_feature!("arm", "aarch64", "i386", "i86_64");
#[cfg(not(any(
feature = "arm",
feature = "aarch64",
feature = "i386",
feature = "x86_64"
)))]
println!(
"cargo:warning=No architecture feature enabled for libafl_qemu, supported: arm, aarch64, i386, x86_64 - defaulting to x86_64"
);
let cpu_target = if cfg!(feature = "clippy") {
// assume x86_64 for clippy
"x86_64"
} else if cfg!(feature = "arm") {
"arm"
} else if cfg!(feature = "aarch64") {
"aarch64"
} else if cfg!(feature = "i386") {
"368"
} else {
// if cfg!(feature = "x86_64") {
"x86_64"
/*} else {
panic!("No architecture feture enabled for libafl_qemu");
*/
};
let jobs = env::var("CARGO_BUILD_JOBS");
let cpu_target = env::var("CPU_TARGET").unwrap_or_else(|_| {
println!("cargo:warning=CPU_TARGET is not set, default to x86_64");
"x86_64".to_owned()
});
let cross_cc = env::var("CROSS_CC").unwrap_or_else(|_| {
println!("cargo:warning=CROSS_CC is not set, default to cc (things can go wrong if CPU_TARGET is not the host arch)");
println!("cargo:warning=CROSS_CC is not set, default to cc (things can go wrong if the selected cpu target ({}) is not the host arch ({}))", cpu_target, env::consts::ARCH);
"cc".to_owned()
});

View File

@ -1,17 +1,40 @@
use std::env;
#[cfg(feature = "aarch64")]
pub mod aarch64;
pub mod arm;
pub mod i386;
pub mod x86_64;
#[cfg(cpu_target = "aarch64")]
#[cfg(all(feature = "aarch64", not(feature = "clippy")))]
pub use aarch64::*;
#[cfg(cpu_target = "arm")]
#[cfg(feature = "arm")]
pub mod arm;
#[cfg(all(feature = "arm", not(feature = "clippy")))]
pub use arm::*;
#[cfg(cpu_target = "i386")]
#[cfg(feature = "i386")]
pub mod i386;
#[cfg(all(feature = "i386", not(feature = "clippy")))]
pub use i386::*;
#[cfg(cpu_target = "x86_64")]
// We default to x86_64, having a default makes CI easier :)
#[cfg(any(
feature = "x86_64",
not(any(
feature = "arm",
feature = "aarch64",
feature = "i386",
feature = "x86_64"
))
))]
pub mod x86_64;
#[cfg(any(
feature = "x86_64",
not(any(
feature = "arm",
feature = "aarch64",
feature = "i386",
feature = "x86_64"
))
))]
pub use x86_64::*;
pub mod elf;