change Stability calculation formla to AFL++'s (#2275)

* change Stability calculation formla to AFL++'s

* clippy

* use MapFeedbackMetadata instead of recalculating filled entries in map

* calculate filled entries if MapFeedbackMetadata is not available

---------

Co-authored-by: Dongjia "toka" Zhang <tokazerkje@outlook.com>
This commit is contained in:
Aarnav 2024-06-07 14:56:37 +02:00 committed by GitHub
parent 2cc33464fa
commit 477941e0e2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -23,8 +23,7 @@ use crate::{
}; };
/// The metadata to keep unstable entries /// The metadata to keep unstable entries
/// In libafl, the stability is the number of the unstable entries divided by the size of the map /// Formula is same as AFL++: number of unstable entries divided by the number of filled entries.
/// This is different from AFL++, which shows the number of the unstable entries divided by the number of filled entries.
#[cfg_attr( #[cfg_attr(
any(not(feature = "serdeany_autoreg"), miri), any(not(feature = "serdeany_autoreg"), miri),
allow(clippy::unsafe_derive_deserialize) allow(clippy::unsafe_derive_deserialize)
@ -32,7 +31,7 @@ use crate::{
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug)]
pub struct UnstableEntriesMetadata { pub struct UnstableEntriesMetadata {
unstable_entries: HashSet<usize>, unstable_entries: HashSet<usize>,
map_len: usize, filled_entries_count: usize,
} }
impl_serdeany!(UnstableEntriesMetadata); impl_serdeany!(UnstableEntriesMetadata);
@ -42,7 +41,7 @@ impl UnstableEntriesMetadata {
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {
unstable_entries: HashSet::new(), unstable_entries: HashSet::new(),
map_len: 0, filled_entries_count: 0,
} }
} }
@ -54,8 +53,8 @@ impl UnstableEntriesMetadata {
/// Getter /// Getter
#[must_use] #[must_use]
pub fn map_len(&self) -> usize { pub fn filled_entries_count(&self) -> usize {
self.map_len self.filled_entries_count
} }
} }
@ -152,12 +151,27 @@ where
.observers_mut() .observers_mut()
.post_exec_all(state, &input, &exit_kind)?; .post_exec_all(state, &input, &exit_kind)?;
let map_first = &executor.observers()[&self.map_observer_handle] let observers = &executor.observers();
.as_ref() let map_first = observers[&self.map_observer_handle].as_ref();
.to_vec(); let map_first_filled_count = match state
.named_metadata_map()
.get::<MapFeedbackMetadata<O::Entry>>(&self.map_name)
{
Some(metadata) => metadata.num_covered_map_indexes,
None => map_first.count_bytes().try_into().map_err(|len| {
Error::illegal_state(
format!(
"map's filled entry count ({}) is greater than usize::MAX ({})",
len,
usize::MAX,
)
.as_str(),
)
})?,
};
let map_first_entries = map_first.to_vec();
let map_first_len = map_first.to_vec().len();
let mut unstable_entries: Vec<usize> = vec![]; let mut unstable_entries: Vec<usize> = vec![];
let map_len: usize = map_first.len();
// Run CAL_STAGE_START - 1 times, increase by 2 for every time a new // Run CAL_STAGE_START - 1 times, increase by 2 for every time a new
// run is found to be unstable or to crash with CAL_STAGE_MAX total runs. // run is found to be unstable or to crash with CAL_STAGE_MAX total runs.
let mut i = 1; let mut i = 1;
@ -203,11 +217,11 @@ where
.unwrap() .unwrap()
.history_map; .history_map;
if history_map.len() < map_len { if history_map.len() < map_first_len {
history_map.resize(map_len, O::Entry::default()); history_map.resize(map_first_len, O::Entry::default());
} }
for (idx, (first, (cur, history))) in map_first for (idx, (first, (cur, history))) in map_first_entries
.iter() .iter()
.zip(map.iter().zip(history_map.iter_mut())) .zip(map.iter().zip(history_map.iter_mut()))
.enumerate() .enumerate()
@ -230,11 +244,11 @@ where
if unstable_found { if unstable_found {
let metadata = state.metadata_or_insert_with(UnstableEntriesMetadata::new); let metadata = state.metadata_or_insert_with(UnstableEntriesMetadata::new);
// If we see new stable entries executing this new corpus entries, then merge with the existing one // If we see new unstable entries executing this new corpus entries, then merge with the existing one
for item in unstable_entries { for item in unstable_entries {
metadata.unstable_entries.insert(item); // Insert newly found items metadata.unstable_entries.insert(item); // Insert newly found items
} }
metadata.map_len = map_len; metadata.filled_entries_count = map_first_filled_count;
} else if !state.has_metadata::<UnstableEntriesMetadata>() { } else if !state.has_metadata::<UnstableEntriesMetadata>() {
send_default_stability = true; send_default_stability = true;
state.add_metadata(UnstableEntriesMetadata::new()); state.add_metadata(UnstableEntriesMetadata::new());
@ -299,16 +313,18 @@ where
if unstable_found { if unstable_found {
if let Some(meta) = state.metadata_map().get::<UnstableEntriesMetadata>() { if let Some(meta) = state.metadata_map().get::<UnstableEntriesMetadata>() {
let unstable_entries = meta.unstable_entries().len(); let unstable_entries = meta.unstable_entries().len();
let map_len = meta.map_len(); debug_assert_ne!(
debug_assert_ne!(map_len, 0, "The map_len must never be 0"); map_first_filled_count, 0,
"The map's filled count must never be 0"
);
mgr.fire( mgr.fire(
state, state,
Event::UpdateUserStats { Event::UpdateUserStats {
name: Cow::from("stability"), name: Cow::from("stability"),
value: UserStats::new( value: UserStats::new(
UserStatsValue::Ratio( UserStatsValue::Ratio(
(map_len - unstable_entries) as u64, (map_first_filled_count - unstable_entries) as u64,
map_len as u64, map_first_filled_count as u64,
), ),
AggregatorOps::Avg, AggregatorOps::Avg,
), ),
@ -322,7 +338,10 @@ where
Event::UpdateUserStats { Event::UpdateUserStats {
name: Cow::from("stability"), name: Cow::from("stability"),
value: UserStats::new( value: UserStats::new(
UserStatsValue::Ratio(map_len as u64, map_len as u64), UserStatsValue::Ratio(
map_first_filled_count as u64,
map_first_filled_count as u64,
),
AggregatorOps::Avg, AggregatorOps::Avg,
), ),
phantom: PhantomData, phantom: PhantomData,