Add global stats cache (#2956)

* Add global stats cache

* Fix

* Make clippy happy

* Merge manager files; Implement default for GlobalStats

* fmt code

* Use &Cow to avoid unnecessary clone

* Avoid push_str without cleanup
This commit is contained in:
EvianZhang 2025-02-10 23:11:36 +08:00 committed by GitHub
parent 404227d1a3
commit 800b8b417d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 755 additions and 400 deletions

View File

@ -12,11 +12,7 @@ use libafl_bolts::ClientId;
pub fn main() {
let mut monitor = TuiMonitor::builder().build();
let _client_stats = ClientStats {
corpus_size: 1024,
executions: 512,
..ClientStats::default()
};
let _client_stats = ClientStats::default();
let mut client_stats_manager = ClientStatsManager::default();
monitor.display(&mut client_stats_manager, "Test", ClientId(0));

View File

@ -8,7 +8,7 @@ use std::{
path::PathBuf,
};
use libafl_bolts::{current_time, format_duration_hms, ClientId};
use libafl_bolts::{current_time, ClientId};
use serde_json::json;
use crate::{monitors::Monitor, statistics::manager::ClientStatsManager};
@ -33,6 +33,8 @@ impl Monitor for OnDiskTomlMonitor {
if cur_time - self.last_update >= self.update_interval {
self.last_update = cur_time;
let global_stats = client_stats_manager.global_stats();
let mut file = File::create(&self.filename).expect("Failed to open the Toml file");
write!(
&mut file,
@ -46,12 +48,12 @@ objectives = {}
executions = {}
exec_sec = {}
",
format_duration_hms(&(cur_time - client_stats_manager.start_time())),
client_stats_manager.client_stats_count(),
client_stats_manager.corpus_size(),
client_stats_manager.objective_size(),
client_stats_manager.total_execs(),
client_stats_manager.execs_per_sec()
global_stats.run_time_pretty,
global_stats.client_stats_count,
global_stats.corpus_size,
global_stats.objective_size,
global_stats.total_execs,
global_stats.execs_per_sec
)
.expect("Failed to write to the Toml file");
@ -73,11 +75,15 @@ objectives = {}
executions = {}
exec_sec = {}
",
i, client.corpus_size, client.objective_size, client.executions, exec_sec
i,
client.corpus_size(),
client.objective_size(),
client.executions(),
exec_sec
)
.expect("Failed to write to the Toml file");
for (key, val) in &client.user_stats {
for (key, val) in client.user_stats() {
let k: String = key
.chars()
.map(|c| if c.is_whitespace() { '_' } else { c })
@ -172,13 +178,14 @@ where
.open(&self.path)
.expect("Failed to open logging file");
let global_stats = client_stats_manager.global_stats();
let line = json!({
"run_time": current_time() - client_stats_manager.start_time(),
"clients": client_stats_manager.client_stats_count(),
"corpus": client_stats_manager.corpus_size(),
"objectives": client_stats_manager.objective_size(),
"executions": client_stats_manager.total_execs(),
"exec_sec": client_stats_manager.execs_per_sec(),
"run_time": global_stats.run_time,
"clients": global_stats.client_stats_count,
"corpus": global_stats.corpus_size,
"objectives": global_stats.objective_size,
"executions": global_stats.total_execs,
"exec_sec": global_stats.execs_per_sec,
"client_stats": client_stats_manager.client_stats(),
});
writeln!(&file, "{line}").expect("Unable to write Json to file");

View File

@ -47,13 +47,14 @@ impl Monitor for OnDiskJsonAggregateMonitor {
.open(&self.json_path)
.expect("Failed to open JSON logging file");
let global_stats = client_stats_manager.global_stats();
let mut json_value = json!({
"run_time": (cur_time - client_stats_manager.start_time()).as_secs(),
"clients": client_stats_manager.client_stats_count(),
"corpus": client_stats_manager.corpus_size(),
"objectives": client_stats_manager.objective_size(),
"executions": client_stats_manager.total_execs(),
"exec_sec": client_stats_manager.execs_per_sec(),
"run_time": global_stats.run_time.as_secs(),
"clients": global_stats.client_stats_count,
"corpus": global_stats.corpus_size,
"objectives": global_stats.objective_size,
"executions": global_stats.total_execs,
"exec_sec": global_stats.execs_per_sec,
});
// Add all aggregated values directly to the root
@ -62,7 +63,7 @@ impl Monitor for OnDiskJsonAggregateMonitor {
client_stats_manager
.aggregated()
.iter()
.map(|(k, v)| (k.clone(), json!(v))),
.map(|(k, v)| (k.clone().into_owned(), json!(v))),
);
}

View File

@ -26,7 +26,7 @@ use alloc::fmt::Debug;
use alloc::vec::Vec;
use core::{fmt, fmt::Write, time::Duration};
use libafl_bolts::{current_time, format_duration_hms, ClientId};
use libafl_bolts::ClientId;
#[cfg(all(feature = "prometheus_monitor", feature = "std"))]
pub use prometheus::PrometheusMonitor;
@ -96,21 +96,22 @@ impl Monitor for SimplePrintingMonitor {
sender_id: ClientId,
) {
let mut userstats = client_stats_manager.client_stats()[sender_id.0 as usize]
.user_stats
.user_stats()
.iter()
.map(|(key, value)| format!("{key}: {value}"))
.collect::<Vec<_>>();
userstats.sort();
let global_stats = client_stats_manager.global_stats();
println!(
"[{} #{}] run time: {}, clients: {}, corpus: {}, objectives: {}, executions: {}, exec/sec: {}, {}",
event_msg,
sender_id.0,
format_duration_hms(&(current_time() - client_stats_manager.start_time())),
client_stats_manager.client_stats_count(),
client_stats_manager.corpus_size(),
client_stats_manager.objective_size(),
client_stats_manager.total_execs(),
client_stats_manager.execs_per_sec_pretty(),
global_stats.run_time_pretty,
global_stats.client_stats_count,
global_stats.corpus_size,
global_stats.objective_size,
global_stats.total_execs,
global_stats.execs_per_sec_pretty,
userstats.join(", ")
);
@ -158,22 +159,23 @@ where
event_msg: &str,
sender_id: ClientId,
) {
let global_stats = client_stats_manager.global_stats();
let mut fmt = format!(
"[{} #{}] run time: {}, clients: {}, corpus: {}, objectives: {}, executions: {}, exec/sec: {}",
event_msg,
sender_id.0,
format_duration_hms(&(current_time() - client_stats_manager.start_time())),
client_stats_manager.client_stats_count(),
client_stats_manager.corpus_size(),
client_stats_manager.objective_size(),
client_stats_manager.total_execs(),
client_stats_manager.execs_per_sec_pretty()
global_stats.run_time_pretty,
global_stats.client_stats_count,
global_stats.corpus_size,
global_stats.objective_size,
global_stats.total_execs,
global_stats.execs_per_sec_pretty
);
if self.print_user_monitor {
client_stats_manager.client_stats_insert(sender_id);
let client = client_stats_manager.client_stats_for(sender_id);
for (key, val) in &client.user_stats {
for (key, val) in client.user_stats() {
write!(fmt, ", {key}: {val}").unwrap();
}
}

View File

@ -6,7 +6,7 @@ use core::{
time::Duration,
};
use libafl_bolts::{current_time, format_duration_hms, ClientId};
use libafl_bolts::{current_time, ClientId};
use crate::{monitors::Monitor, statistics::manager::ClientStatsManager};
@ -45,15 +45,16 @@ where
String::new()
};
let head = format!("{event_msg}{pad} {sender}");
let global_stats = client_stats_manager.global_stats();
let mut global_fmt = format!(
"[{}] (GLOBAL) run time: {}, clients: {}, corpus: {}, objectives: {}, executions: {}, exec/sec: {}",
head,
format_duration_hms(&(current_time() - client_stats_manager.start_time())),
client_stats_manager.client_stats_count(),
client_stats_manager.corpus_size(),
client_stats_manager.objective_size(),
client_stats_manager.total_execs(),
client_stats_manager.execs_per_sec_pretty()
global_stats.run_time_pretty,
global_stats.client_stats_count,
global_stats.corpus_size,
global_stats.objective_size,
global_stats.total_execs,
global_stats.execs_per_sec_pretty
);
for (key, val) in client_stats_manager.aggregated() {
write!(global_fmt, ", {key}: {val}").unwrap();
@ -70,9 +71,13 @@ where
let pad = " ".repeat(head.len());
let mut fmt = format!(
" {} (CLIENT) corpus: {}, objectives: {}, executions: {}, exec/sec: {}",
pad, client.corpus_size, client.objective_size, client.executions, exec_sec
pad,
client.corpus_size(),
client.objective_size(),
client.executions(),
exec_sec
);
for (key, val) in &client.user_stats {
for (key, val) in client.user_stats() {
write!(fmt, ", {key}: {val}").unwrap();
}
(self.print_fn)(&fmt);
@ -84,7 +89,7 @@ where
for (i, client) in client_stats_manager
.client_stats()
.iter()
.filter(|x| x.enabled)
.filter(|x| x.enabled())
.enumerate()
{
let fmt = format!("Client {:03}:\n{}", i + 1, client.introspection_stats);

View File

@ -37,7 +37,7 @@ use std::{
// using thread in order to start the HTTP server in a separate thread
use futures::executor::block_on;
use libafl_bolts::{current_time, format_duration_hms, ClientId};
use libafl_bolts::{current_time, ClientId};
// using the official rust client library for Prometheus: https://github.com/prometheus/client_rust
use prometheus_client::{
encoding::{text::encode, EncodeLabelSet},
@ -102,8 +102,9 @@ where
// require a fair bit of logic to handle "amount to increment given
// time since last observation"
let global_stats = client_stats_manager.global_stats();
// Global (aggregated) metrics
let corpus_size = client_stats_manager.corpus_size();
let corpus_size = global_stats.corpus_size;
self.prometheus_global_stats
.corpus_count
.get_or_create(&Labels {
@ -112,7 +113,7 @@ where
})
.set(corpus_size.try_into().unwrap());
let objective_size = client_stats_manager.objective_size();
let objective_size = global_stats.objective_size;
self.prometheus_global_stats
.objective_count
.get_or_create(&Labels {
@ -121,7 +122,7 @@ where
})
.set(objective_size.try_into().unwrap());
let total_execs = client_stats_manager.total_execs();
let total_execs = global_stats.total_execs;
self.prometheus_global_stats
.executions
.get_or_create(&Labels {
@ -130,7 +131,7 @@ where
})
.set(total_execs.try_into().unwrap());
let execs_per_sec = client_stats_manager.execs_per_sec();
let execs_per_sec = global_stats.execs_per_sec;
self.prometheus_global_stats
.exec_rate
.get_or_create(&Labels {
@ -139,7 +140,7 @@ where
})
.set(execs_per_sec);
let run_time = (current_time() - client_stats_manager.start_time()).as_secs();
let run_time = global_stats.run_time.as_secs();
self.prometheus_global_stats
.runtime
.get_or_create(&Labels {
@ -148,10 +149,7 @@ where
})
.set(run_time.try_into().unwrap()); // run time in seconds, which can be converted to a time format by Grafana or similar
let total_clients = client_stats_manager
.client_stats_count()
.try_into()
.unwrap(); // convert usize to u64 (unlikely that # of clients will be > 2^64 -1...)
let total_clients = global_stats.client_stats_count.try_into().unwrap(); // convert usize to u64 (unlikely that # of clients will be > 2^64 -1...)
self.prometheus_global_stats
.clients_count
.get_or_create(&Labels {
@ -164,12 +162,12 @@ where
let mut global_fmt = format!(
"[Prometheus] [{} #GLOBAL] run time: {}, clients: {}, corpus: {}, objectives: {}, executions: {}, exec/sec: {}",
event_msg,
format_duration_hms(&(current_time() - client_stats_manager.start_time())),
client_stats_manager.client_stats_count(),
client_stats_manager.corpus_size(),
client_stats_manager.objective_size(),
client_stats_manager.total_execs(),
client_stats_manager.execs_per_sec_pretty()
global_stats.run_time_pretty,
global_stats.client_stats_count,
global_stats.corpus_size,
global_stats.objective_size,
global_stats.total_execs,
global_stats.execs_per_sec_pretty
);
for (key, val) in client_stats_manager.aggregated() {
// print global aggregated custom stats
@ -204,7 +202,7 @@ where
.custom_stat
.get_or_create(&Labels {
client: Cow::from("global"),
stat: Cow::from(key.clone()),
stat: key.clone(),
})
.set(value);
}
@ -223,7 +221,7 @@ where
client: Cow::from(sender_id.0.to_string()),
stat: Cow::from(""),
})
.set(cur_client_clone.corpus_size.try_into().unwrap());
.set(cur_client_clone.corpus_size().try_into().unwrap());
self.prometheus_client_stats
.objective_count
@ -231,7 +229,7 @@ where
client: Cow::from(sender_id.0.to_string()),
stat: Cow::from(""),
})
.set(cur_client_clone.objective_size.try_into().unwrap());
.set(cur_client_clone.objective_size().try_into().unwrap());
self.prometheus_client_stats
.executions
@ -239,7 +237,7 @@ where
client: Cow::from(sender_id.0.to_string()),
stat: Cow::from(""),
})
.set(cur_client_clone.executions.try_into().unwrap());
.set(cur_client_clone.executions().try_into().unwrap());
self.prometheus_client_stats
.exec_rate
@ -249,7 +247,7 @@ where
})
.set(cur_client_clone.execs_per_sec(current_time()));
let client_run_time = (current_time() - cur_client_clone.start_time).as_secs();
let client_run_time = (current_time() - cur_client_clone.start_time()).as_secs();
self.prometheus_client_stats
.runtime
.get_or_create(&Labels {
@ -270,13 +268,13 @@ where
"[Prometheus] [{} #{}] corpus: {}, objectives: {}, executions: {}, exec/sec: {}",
event_msg,
sender_id.0,
client.corpus_size,
client.objective_size,
client.executions,
client.corpus_size(),
client.objective_size(),
client.executions(),
cur_client_clone.execs_per_sec_pretty(current_time())
);
for (key, val) in cur_client_clone.user_stats {
for (key, val) in cur_client_clone.user_stats() {
// print the custom stats for each client
write!(fmt, ", {key}: {val}").unwrap();
// Update metrics added to the user_stats hashmap by feedback event-fires

View File

@ -3,7 +3,6 @@
//! It's based on [ratatui](https://ratatui.rs/)
use alloc::{borrow::Cow, boxed::Box, string::ToString};
use core::cmp;
use std::{
collections::VecDeque,
fmt::Write as _,
@ -25,7 +24,6 @@ use crossterm::{
use hashbrown::HashMap;
use libafl_bolts::{current_time, format_duration_hms, ClientId};
use ratatui::{backend::CrosstermBackend, Terminal};
use serde_json::Value;
use typed_builder::TypedBuilder;
#[cfg(feature = "introspection")]
@ -33,9 +31,8 @@ use crate::statistics::perf_stats::{ClientPerfStats, PerfFeature};
use crate::{
monitors::Monitor,
statistics::{
manager::ClientStatsManager,
user_stats::{AggregatorOps, UserStats, UserStatsValue},
ClientStats,
manager::ClientStatsManager, user_stats::UserStats, ClientStats, ItemGeometry,
ProcessTiming,
},
};
@ -209,50 +206,6 @@ impl PerfTuiContext {
}
}
/// Data struct to process timings
#[derive(Debug, Default, Clone)]
pub struct ProcessTiming {
/// The start time
pub client_start_time: Duration,
/// The executions speed
pub exec_speed: String,
/// Timing of the last new corpus entry
pub last_new_entry: Duration,
/// Timing of the last new solution
pub last_saved_solution: Duration,
}
impl ProcessTiming {
/// Create a new [`ProcessTiming`] struct
fn new() -> Self {
Self {
exec_speed: "0".to_string(),
..Default::default()
}
}
}
/// The geometry of a single data point
#[expect(missing_docs)]
#[derive(Debug, Default, Clone)]
pub struct ItemGeometry {
pub pending: u64,
pub pend_fav: u64,
pub own_finds: u64,
pub imported: u64,
pub stability: String,
}
impl ItemGeometry {
/// Create a new [`ItemGeometry`]
fn new() -> Self {
Self {
stability: "0%".to_string(),
..Default::default()
}
}
}
/// The context for a single client tracked in this [`TuiMonitor`]
#[expect(missing_docs)]
#[derive(Debug, Default, Clone)]
@ -272,53 +225,16 @@ pub struct ClientTuiContext {
impl ClientTuiContext {
/// Grab data for a single client
pub fn grab_data(&mut self, client: &ClientStats, exec_sec: String) {
self.corpus = client.corpus_size;
self.objectives = client.objective_size;
self.executions = client.executions;
self.process_timing.client_start_time = client.start_time;
self.process_timing.last_new_entry = if client.last_corpus_time > client.start_time {
client.last_corpus_time - client.start_time
} else {
Duration::default()
};
pub fn grab_data(&mut self, client: &mut ClientStats) {
self.corpus = client.corpus_size();
self.objectives = client.objective_size();
self.executions = client.executions();
self.process_timing = client.process_timing();
self.process_timing.last_saved_solution = if client.last_objective_time > client.start_time
{
client.last_objective_time - client.start_time
} else {
Duration::default()
};
self.map_density = client.map_density();
self.item_geometry = client.item_geometry();
self.process_timing.exec_speed = exec_sec;
self.map_density = client
.get_user_stats("edges")
.map_or("0%".to_string(), ToString::to_string);
let default_json = serde_json::json!({
"pending": 0,
"pend_fav": 0,
"imported": 0,
"own_finds": 0,
});
let afl_stats = client
.get_user_stats("AflStats")
.map_or(default_json.to_string(), ToString::to_string);
let afl_stats_json: Value =
serde_json::from_str(afl_stats.as_str()).unwrap_or(default_json);
self.item_geometry.pending = afl_stats_json["pending"].as_u64().unwrap_or_default();
self.item_geometry.pend_fav = afl_stats_json["pend_fav"].as_u64().unwrap_or_default();
self.item_geometry.imported = afl_stats_json["imported"].as_u64().unwrap_or_default();
self.item_geometry.own_finds = afl_stats_json["own_finds"].as_u64().unwrap_or_default();
let stability = client
.get_user_stats("stability")
.map_or("0%".to_string(), ToString::to_string);
self.item_geometry.stability = stability;
for (key, val) in &client.user_stats {
for (key, val) in client.user_stats() {
self.user_stats.insert(key.clone(), val.clone());
}
}
@ -412,27 +328,29 @@ impl Monitor for TuiMonitor {
let cur_time = current_time();
{
let global_stats = client_stats_manager.global_stats();
// TODO implement floating-point support for TimedStat
let execsec = client_stats_manager.execs_per_sec() as u64;
let totalexec = client_stats_manager.total_execs();
let run_time = cur_time - client_stats_manager.start_time();
let total_process_timing = get_process_timing(client_stats_manager);
let execsec = global_stats.execs_per_sec as u64;
let totalexec = global_stats.total_execs;
let run_time = global_stats.run_time;
let exec_per_sec_pretty = global_stats.execs_per_sec_pretty.clone();
let mut ctx = self.context.write().unwrap();
ctx.total_process_timing = total_process_timing;
ctx.total_corpus_count = global_stats.corpus_size;
ctx.total_solutions = global_stats.objective_size;
ctx.corpus_size_timed
.add(run_time, client_stats_manager.corpus_size());
.add(run_time, global_stats.corpus_size);
ctx.objective_size_timed
.add(run_time, client_stats_manager.objective_size());
.add(run_time, global_stats.objective_size);
let total_process_timing = client_stats_manager.process_timing(exec_per_sec_pretty);
ctx.total_process_timing = total_process_timing;
ctx.execs_per_sec_timed.add(run_time, execsec);
ctx.start_time = client_stats_manager.start_time();
ctx.total_execs = totalexec;
ctx.clients_num = client_stats_manager.client_stats().len();
ctx.total_map_density = get_map_density(client_stats_manager);
ctx.total_solutions = client_stats_manager.objective_size();
ctx.total_map_density = client_stats_manager.map_density();
ctx.total_cycles_done = 0;
ctx.total_corpus_count = client_stats_manager.corpus_size();
ctx.total_item_geometry = get_item_geometry(client_stats_manager);
ctx.total_item_geometry = client_stats_manager.item_geometry();
}
client_stats_manager.client_stats_insert(sender_id);
@ -449,9 +367,13 @@ impl Monitor for TuiMonitor {
let head = format!("{event_msg}{pad} {sender}");
let mut fmt = format!(
"[{}] corpus: {}, objectives: {}, executions: {}, exec/sec: {}",
head, client.corpus_size, client.objective_size, client.executions, exec_sec
head,
client.corpus_size(),
client.objective_size(),
client.executions(),
exec_sec
);
for (key, val) in &client.user_stats {
for (key, val) in client.user_stats() {
write!(fmt, ", {key}: {val}").unwrap();
}
for (key, val) in client_stats_manager.aggregated() {
@ -459,12 +381,13 @@ impl Monitor for TuiMonitor {
}
{
let client = &client_stats_manager.client_stats()[sender_id.0 as usize];
let mut ctx = self.context.write().unwrap();
client_stats_manager.update_client_stats_for(sender_id, |client| {
ctx.clients
.entry(sender_id.0 as usize)
.or_default()
.grab_data(client, exec_sec);
.grab_data(client);
});
while ctx.client_logs.len() >= DEFAULT_LOGS_NUMBER {
ctx.client_logs.pop_front();
}
@ -477,7 +400,7 @@ impl Monitor for TuiMonitor {
for (i, client) in client_stats_manager
.client_stats()
.iter()
.filter(|x| x.enabled)
.filter(|x| x.enabled())
.enumerate()
{
self.context
@ -554,86 +477,6 @@ impl TuiMonitor {
}
}
fn get_process_timing(client_stats_manager: &mut ClientStatsManager) -> ProcessTiming {
let mut total_process_timing = ProcessTiming::new();
total_process_timing.exec_speed = client_stats_manager.execs_per_sec_pretty();
if client_stats_manager.client_stats().len() > 1 {
let mut new_path_time = Duration::default();
let mut new_objectives_time = Duration::default();
for client in client_stats_manager
.client_stats()
.iter()
.filter(|client| client.enabled)
{
new_path_time = client.last_corpus_time.max(new_path_time);
new_objectives_time = client.last_objective_time.max(new_objectives_time);
}
if new_path_time > client_stats_manager.start_time() {
total_process_timing.last_new_entry = new_path_time - client_stats_manager.start_time();
}
if new_objectives_time > client_stats_manager.start_time() {
total_process_timing.last_saved_solution =
new_objectives_time - client_stats_manager.start_time();
}
}
total_process_timing
}
fn get_map_density(client_stats_manager: &ClientStatsManager) -> String {
client_stats_manager
.client_stats()
.iter()
.filter(|client| client.enabled)
.filter_map(|client| client.get_user_stats("edges"))
.map(ToString::to_string)
.fold("0%".to_string(), cmp::max)
}
fn get_item_geometry(client_stats_manager: &ClientStatsManager) -> ItemGeometry {
let mut total_item_geometry = ItemGeometry::new();
if client_stats_manager.client_stats().len() < 2 {
return total_item_geometry;
}
let mut ratio_a: u64 = 0;
let mut ratio_b: u64 = 0;
for client in client_stats_manager
.client_stats()
.iter()
.filter(|client| client.enabled)
{
let afl_stats = client
.get_user_stats("AflStats")
.map_or("None".to_string(), ToString::to_string);
let stability = client.get_user_stats("stability").map_or(
UserStats::new(UserStatsValue::Ratio(0, 100), AggregatorOps::Avg),
Clone::clone,
);
if afl_stats != "None" {
let default_json = serde_json::json!({
"pending": 0,
"pend_fav": 0,
"imported": 0,
"own_finds": 0,
});
let afl_stats_json: Value =
serde_json::from_str(afl_stats.as_str()).unwrap_or(default_json);
total_item_geometry.pending += afl_stats_json["pending"].as_u64().unwrap_or_default();
total_item_geometry.pend_fav += afl_stats_json["pend_fav"].as_u64().unwrap_or_default();
total_item_geometry.own_finds +=
afl_stats_json["own_finds"].as_u64().unwrap_or_default();
total_item_geometry.imported += afl_stats_json["imported"].as_u64().unwrap_or_default();
}
if let UserStatsValue::Ratio(a, b) = stability.value() {
ratio_a += a;
ratio_b += b;
}
}
total_item_geometry.stability = format!("{}%", ratio_a * 100 / ratio_b);
total_item_geometry
}
fn run_tui_thread<W: Write + Send + Sync + 'static>(
context: Arc<RwLock<TuiContext>>,
tick_rate: Duration,

View File

@ -1,22 +1,36 @@
//! Client statistics manager
use alloc::{string::String, vec::Vec};
use core::time::Duration;
use alloc::{
borrow::Cow,
string::{String, ToString},
vec::Vec,
};
use core::{cmp, time::Duration};
use hashbrown::HashMap;
use libafl_bolts::{current_time, ClientId};
use serde::{Deserialize, Serialize};
use libafl_bolts::{current_time, format_duration_hms, ClientId};
#[cfg(feature = "std")]
use serde_json::Value;
use super::{user_stats::UserStatsValue, ClientStats};
use super::{user_stats::UserStatsValue, ClientStats, ProcessTiming};
#[cfg(feature = "std")]
use super::{
user_stats::{AggregatorOps, UserStats},
ItemGeometry,
};
/// Manager of all client's statistics
#[derive(Serialize, Deserialize, Debug)]
#[derive(Debug)]
pub struct ClientStatsManager {
client_stats: Vec<ClientStats>,
/// Aggregated user stats value.
///
/// This map is updated by event manager, and is read by monitors to display user-defined stats.
pub(super) cached_aggregated_user_stats: HashMap<String, UserStatsValue>,
pub(super) cached_aggregated_user_stats: HashMap<Cow<'static, str>, UserStatsValue>,
/// Cached global stats.
///
/// This will be erased to `None` every time a client is updated with crucial stats.
cached_global_stats: Option<GlobalStats>,
start_time: Duration,
}
@ -27,10 +41,86 @@ impl ClientStatsManager {
Self {
client_stats: vec![],
cached_aggregated_user_stats: HashMap::new(),
cached_global_stats: None,
start_time: current_time(),
}
}
/// Get all client stats
#[must_use]
pub fn client_stats(&self) -> &[ClientStats] {
&self.client_stats
}
/// The client monitor for a specific id, creating new if it doesn't exist
pub fn client_stats_insert(&mut self, client_id: ClientId) {
let total_client_stat_count = self.client_stats().len();
for _ in total_client_stat_count..=(client_id.0) as usize {
self.client_stats.push(ClientStats {
enabled: false,
last_window_time: Duration::from_secs(0),
start_time: Duration::from_secs(0),
..ClientStats::default()
});
}
if total_client_stat_count <= client_id.0 as usize {
// The client count changed!
self.cached_global_stats = None;
}
self.update_client_stats_for(client_id, |new_stat| {
if !new_stat.enabled {
let timestamp = current_time();
// I have never seen this man in my life
new_stat.start_time = timestamp;
new_stat.last_window_time = timestamp;
new_stat.enabled = true;
new_stat.stats_status.basic_stats_updated = true;
}
});
}
/// Update sepecific client stats.
///
/// This will potentially clear the global stats cache.
pub fn update_client_stats_for<T, F: FnOnce(&mut ClientStats) -> T>(
&mut self,
client_id: ClientId,
update: F,
) -> T {
let client_stat = &mut self.client_stats[client_id.0 as usize];
client_stat.clear_stats_status();
let res = update(client_stat);
if client_stat.stats_status.basic_stats_updated {
self.cached_global_stats = None;
}
res
}
/// Update all client stats. This will clear all previous client stats, and fill in the new client stats.
///
/// This will clear global stats cache.
pub fn update_all_client_stats(&mut self, new_client_stats: Vec<ClientStats>) {
self.client_stats = new_client_stats;
self.cached_global_stats = None;
}
/// Get immutable reference to client stats
#[must_use]
pub fn client_stats_for(&self, client_id: ClientId) -> &ClientStats {
&self.client_stats()[client_id.0 as usize]
}
/// Aggregate user-defined stats
#[allow(clippy::ptr_arg)]
pub fn aggregate(&mut self, name: &Cow<'static, str>) {
super::user_stats::aggregate_user_stats(self, name);
}
/// Get aggregated user-defined stats
#[must_use]
pub fn aggregated(&self) -> &HashMap<Cow<'static, str>, UserStatsValue> {
&self.cached_aggregated_user_stats
}
/// Time this fuzzing run stated
#[must_use]
pub fn start_time(&self) -> Duration {
@ -42,117 +132,122 @@ impl ClientStatsManager {
self.start_time = time;
}
/// Get all client stats
#[must_use]
pub fn client_stats(&self) -> &[ClientStats] {
&self.client_stats
}
/// Get all client stats
pub fn client_stats_mut(&mut self) -> &mut Vec<ClientStats> {
&mut self.client_stats
}
/// Amount of elements in the corpus (combined for all children)
#[must_use]
pub fn corpus_size(&self) -> u64 {
self.client_stats()
.iter()
.fold(0_u64, |acc, x| acc + x.corpus_size)
}
/// Count the number of enabled client stats
#[must_use]
pub fn client_stats_count(&self) -> usize {
self.client_stats()
/// Get global stats.
///
/// This global stats will be cached until the underlined client stats are modified.
pub fn global_stats(&mut self) -> &GlobalStats {
let global_stats = self.cached_global_stats.get_or_insert_with(|| GlobalStats {
client_stats_count: self
.client_stats
.iter()
.filter(|client| client.enabled)
.count()
}
/// Amount of elements in the objectives (combined for all children)
#[must_use]
pub fn objective_size(&self) -> u64 {
self.client_stats()
.count(),
corpus_size: self
.client_stats
.iter()
.fold(0_u64, |acc, x| acc + x.objective_size)
}
/// Total executions
#[inline]
#[must_use]
pub fn total_execs(&self) -> u64 {
self.client_stats()
.fold(0_u64, |acc, x| acc + x.corpus_size),
objective_size: self
.client_stats
.iter()
.fold(0_u64, |acc, x| acc + x.executions)
}
.fold(0_u64, |acc, x| acc + x.objective_size),
total_execs: self
.client_stats
.iter()
.fold(0_u64, |acc, x| acc + x.executions),
..GlobalStats::default()
});
/// Executions per second
#[inline]
pub fn execs_per_sec(&mut self) -> f64 {
// Time-related data are always re-computed, since it is related with current time.
let cur_time = current_time();
self.client_stats_mut()
global_stats.run_time = cur_time - self.start_time;
global_stats.run_time_pretty = format_duration_hms(&global_stats.run_time);
global_stats.execs_per_sec = self
.client_stats
.iter_mut()
.fold(0.0, |acc, x| acc + x.execs_per_sec(cur_time))
.fold(0.0, |acc, x| acc + x.execs_per_sec(cur_time));
global_stats.execs_per_sec_pretty = super::prettify_float(global_stats.execs_per_sec);
global_stats
}
/// Executions per second
pub fn execs_per_sec_pretty(&mut self) -> String {
super::prettify_float(self.execs_per_sec())
}
/// The client monitor for a specific id, creating new if it doesn't exist
pub fn client_stats_insert(&mut self, client_id: ClientId) {
let total_client_stat_count = self.client_stats().len();
for _ in total_client_stat_count..=(client_id.0) as usize {
self.client_stats_mut().push(ClientStats {
enabled: false,
last_window_time: Duration::from_secs(0),
start_time: Duration::from_secs(0),
..ClientStats::default()
});
}
self.update_client_stats_for(client_id, |new_stat| {
if !new_stat.enabled {
let timestamp = current_time();
// I have never seen this man in my life
new_stat.start_time = timestamp;
new_stat.last_window_time = timestamp;
new_stat.enabled = true;
}
});
}
/// Update sepecific client stats.
pub fn update_client_stats_for<T, F: FnOnce(&mut ClientStats) -> T>(
&mut self,
client_id: ClientId,
update: F,
) -> T {
let client_stat = &mut self.client_stats_mut()[client_id.0 as usize];
update(client_stat)
}
/// Update all client stats. This will clear all previous client stats, and fill in the new client stats.
pub fn update_all_client_stats(&mut self, new_client_stats: Vec<ClientStats>) {
*self.client_stats_mut() = new_client_stats;
}
/// Get immutable reference to client stats
/// Get process timing. `execs_per_sec_pretty` could be retrieved from `GlobalStats`.
#[must_use]
pub fn client_stats_for(&self, client_id: ClientId) -> &ClientStats {
&self.client_stats()[client_id.0 as usize]
pub fn process_timing(&self, execs_per_sec_pretty: String) -> ProcessTiming {
let mut total_process_timing = ProcessTiming::new();
total_process_timing.exec_speed = execs_per_sec_pretty;
if self.client_stats().len() > 1 {
let mut new_path_time = Duration::default();
let mut new_objectives_time = Duration::default();
for client in self.client_stats().iter().filter(|client| client.enabled()) {
new_path_time = client.last_corpus_time().max(new_path_time);
new_objectives_time = client.last_objective_time().max(new_objectives_time);
}
if new_path_time > self.start_time() {
total_process_timing.last_new_entry = new_path_time - self.start_time();
}
if new_objectives_time > self.start_time() {
total_process_timing.last_saved_solution = new_objectives_time - self.start_time();
}
}
total_process_timing
}
/// Aggregate user-defined stats
pub fn aggregate(&mut self, name: &str) {
super::user_stats::aggregate_user_stats(self, name);
}
/// Get aggregated user-defined stats
/// Get map density
#[must_use]
pub fn aggregated(&self) -> &HashMap<String, UserStatsValue> {
&self.cached_aggregated_user_stats
pub fn map_density(&self) -> String {
self.client_stats()
.iter()
.filter(|client| client.enabled())
.filter_map(|client| client.get_user_stats("edges"))
.map(ToString::to_string)
.fold("0%".to_string(), cmp::max)
}
/// Get item geometry
#[cfg(feature = "std")]
#[must_use]
pub fn item_geometry(&self) -> ItemGeometry {
let mut total_item_geometry = ItemGeometry::new();
if self.client_stats().len() < 2 {
return total_item_geometry;
}
let mut ratio_a: u64 = 0;
let mut ratio_b: u64 = 0;
for client in self.client_stats().iter().filter(|client| client.enabled()) {
let afl_stats = client
.get_user_stats("AflStats")
.map_or("None".to_string(), ToString::to_string);
let stability = client.get_user_stats("stability").map_or(
UserStats::new(UserStatsValue::Ratio(0, 100), AggregatorOps::Avg),
Clone::clone,
);
if afl_stats != "None" {
let default_json = serde_json::json!({
"pending": 0,
"pend_fav": 0,
"imported": 0,
"own_finds": 0,
});
let afl_stats_json: Value =
serde_json::from_str(afl_stats.as_str()).unwrap_or(default_json);
total_item_geometry.pending +=
afl_stats_json["pending"].as_u64().unwrap_or_default();
total_item_geometry.pend_fav +=
afl_stats_json["pend_fav"].as_u64().unwrap_or_default();
total_item_geometry.own_finds +=
afl_stats_json["own_finds"].as_u64().unwrap_or_default();
total_item_geometry.imported +=
afl_stats_json["imported"].as_u64().unwrap_or_default();
}
if let UserStatsValue::Ratio(a, b) = stability.value() {
ratio_a += a;
ratio_b += b;
}
}
total_item_geometry.stability = format!("{}%", ratio_a * 100 / ratio_b);
total_item_geometry
}
}
@ -161,3 +256,24 @@ impl Default for ClientStatsManager {
Self::new()
}
}
/// Global statistics which aggregates client stats.
#[derive(Debug, Default)]
pub struct GlobalStats {
/// Run time since started
pub run_time: Duration,
/// Run time since started
pub run_time_pretty: String,
/// Count the number of enabled client stats
pub client_stats_count: usize,
/// Amount of elements in the corpus (combined for all children)
pub corpus_size: u64,
/// Amount of elements in the objectives (combined for all children)
pub objective_size: u64,
/// Total executions
pub total_execs: u64,
/// Executions per second
pub execs_per_sec: f64,
/// Executions per second
pub execs_per_sec_pretty: String,
}

View File

@ -0,0 +1,173 @@
//! Global statistics available for Monitors to use
use alloc::string::{String, ToString};
use core::{cmp, time::Duration};
use libafl_bolts::{current_time, format_duration_hms};
#[cfg(feature = "std")]
use serde_json::Value;
use super::ClientStatsManager;
use crate::statistics::ProcessTiming;
#[cfg(feature = "std")]
use crate::statistics::{
user_stats::{AggregatorOps, UserStats, UserStatsValue},
ItemGeometry,
};
impl ClientStatsManager {
/// Time this fuzzing run stated
#[must_use]
pub fn start_time(&self) -> Duration {
self.start_time
}
/// Time this fuzzing run stated
pub fn set_start_time(&mut self, time: Duration) {
self.start_time = time;
}
/// Get global stats.
///
/// This global stats will be cached until the underlined client stats are modified.
pub fn global_stats(&mut self) -> &GlobalStats {
let global_stats = self.cached_global_stats.get_or_insert_with(|| GlobalStats {
run_time: Duration::ZERO,
run_time_pretty: String::new(),
client_stats_count: self
.client_stats
.iter()
.filter(|client| client.enabled)
.count(),
corpus_size: self
.client_stats
.iter()
.fold(0_u64, |acc, x| acc + x.corpus_size),
objective_size: self
.client_stats
.iter()
.fold(0_u64, |acc, x| acc + x.objective_size),
total_execs: self
.client_stats
.iter()
.fold(0_u64, |acc, x| acc + x.executions),
execs_per_sec: 0.0,
execs_per_sec_pretty: String::new(),
});
// Time-related data are always re-computed, since it is related with current time.
let cur_time = current_time();
global_stats.run_time = cur_time - self.start_time;
global_stats.run_time_pretty = format_duration_hms(&global_stats.run_time);
global_stats.execs_per_sec = self
.client_stats
.iter_mut()
.fold(0.0, |acc, x| acc + x.execs_per_sec(cur_time));
global_stats
.execs_per_sec_pretty
.push_str(&super::super::prettify_float(global_stats.execs_per_sec));
global_stats
}
/// Get process timing. `execs_per_sec_pretty` could be retrieved from `GlobalStats`.
#[must_use]
pub fn process_timing(&self, execs_per_sec_pretty: String) -> ProcessTiming {
let mut total_process_timing = ProcessTiming::new();
total_process_timing.exec_speed = execs_per_sec_pretty;
if self.client_stats().len() > 1 {
let mut new_path_time = Duration::default();
let mut new_objectives_time = Duration::default();
for client in self.client_stats().iter().filter(|client| client.enabled()) {
new_path_time = client.last_corpus_time().max(new_path_time);
new_objectives_time = client.last_objective_time().max(new_objectives_time);
}
if new_path_time > self.start_time() {
total_process_timing.last_new_entry = new_path_time - self.start_time();
}
if new_objectives_time > self.start_time() {
total_process_timing.last_saved_solution = new_objectives_time - self.start_time();
}
}
total_process_timing
}
/// Get map density
#[must_use]
pub fn map_density(&self) -> String {
self.client_stats()
.iter()
.filter(|client| client.enabled())
.filter_map(|client| client.get_user_stats("edges"))
.map(ToString::to_string)
.fold("0%".to_string(), cmp::max)
}
/// Get item geometry
#[cfg(feature = "std")]
#[must_use]
pub fn item_geometry(&self) -> ItemGeometry {
let mut total_item_geometry = ItemGeometry::new();
if self.client_stats().len() < 2 {
return total_item_geometry;
}
let mut ratio_a: u64 = 0;
let mut ratio_b: u64 = 0;
for client in self.client_stats().iter().filter(|client| client.enabled()) {
let afl_stats = client
.get_user_stats("AflStats")
.map_or("None".to_string(), ToString::to_string);
let stability = client.get_user_stats("stability").map_or(
UserStats::new(UserStatsValue::Ratio(0, 100), AggregatorOps::Avg),
Clone::clone,
);
if afl_stats != "None" {
let default_json = serde_json::json!({
"pending": 0,
"pend_fav": 0,
"imported": 0,
"own_finds": 0,
});
let afl_stats_json: Value =
serde_json::from_str(afl_stats.as_str()).unwrap_or(default_json);
total_item_geometry.pending +=
afl_stats_json["pending"].as_u64().unwrap_or_default();
total_item_geometry.pend_fav +=
afl_stats_json["pend_fav"].as_u64().unwrap_or_default();
total_item_geometry.own_finds +=
afl_stats_json["own_finds"].as_u64().unwrap_or_default();
total_item_geometry.imported +=
afl_stats_json["imported"].as_u64().unwrap_or_default();
}
if let UserStatsValue::Ratio(a, b) = stability.value() {
ratio_a += a;
ratio_b += b;
}
}
total_item_geometry.stability = format!("{}%", ratio_a * 100 / ratio_b);
total_item_geometry
}
}
/// Global statistics which aggregates client stats.
#[derive(Debug)]
pub struct GlobalStats {
/// Run time since started
pub run_time: Duration,
/// Run time since started
pub run_time_pretty: String,
/// Count the number of enabled client stats
pub client_stats_count: usize,
/// Amount of elements in the corpus (combined for all children)
pub corpus_size: u64,
/// Amount of elements in the objectives (combined for all children)
pub objective_size: u64,
/// Total executions
pub total_execs: u64,
/// Executions per second
pub execs_per_sec: f64,
/// Executions per second
pub execs_per_sec_pretty: String,
}

View File

@ -5,7 +5,10 @@ pub mod manager;
pub mod perf_stats;
pub mod user_stats;
use alloc::{borrow::Cow, string::String};
use alloc::{
borrow::Cow,
string::{String, ToString},
};
use core::time::Duration;
use hashbrown::HashMap;
@ -13,6 +16,8 @@ use libafl_bolts::current_time;
#[cfg(feature = "introspection")]
use perf_stats::ClientPerfStats;
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
use serde_json::Value;
use user_stats::UserStats;
#[cfg(feature = "afl_exec_sec")]
@ -22,38 +27,164 @@ const CLIENT_STATS_TIME_WINDOW_SECS: u64 = 5; // 5 seconds
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ClientStats {
/// If this client is enabled. This is set to `true` the first time we see this client.
pub enabled: bool,
enabled: bool,
// monitor (maybe we need a separated struct?)
/// The corpus size for this client
pub corpus_size: u64,
corpus_size: u64,
/// The time for the last update of the corpus size
pub last_corpus_time: Duration,
last_corpus_time: Duration,
/// The total executions for this client
pub executions: u64,
executions: u64,
/// The number of executions of the previous state in case a client decrease the number of execution (e.g when restarting without saving the state)
pub prev_state_executions: u64,
prev_state_executions: u64,
/// The size of the objectives corpus for this client
pub objective_size: u64,
objective_size: u64,
/// The time for the last update of the objective size
pub last_objective_time: Duration,
last_objective_time: Duration,
/// The last reported executions for this client
#[cfg(feature = "afl_exec_sec")]
pub last_window_executions: u64,
last_window_executions: u64,
/// The last executions per sec
#[cfg(feature = "afl_exec_sec")]
pub last_execs_per_sec: f64,
last_execs_per_sec: f64,
/// The last time we got this information
pub last_window_time: Duration,
last_window_time: Duration,
/// the start time of the client
pub start_time: Duration,
start_time: Duration,
/// User-defined stats
pub user_stats: HashMap<Cow<'static, str>, UserStats>,
user_stats: HashMap<Cow<'static, str>, UserStats>,
/// Client performance statistics
#[cfg(feature = "introspection")]
pub introspection_stats: ClientPerfStats,
// This field is marked as skip_serializing and skip_deserializing,
// which means when deserializing, its default value, i.e. all stats
// is updated, will be filled in this field. This could help preventing
// something unexpected, since when we find they all need update, we will
// always invalid the cache.
/// Status of current client stats. This field is used to check
/// the validation of current cached global stats.
#[serde(skip_serializing, skip_deserializing)]
stats_status: ClientStatsStatus,
}
/// Status of client status
#[derive(Debug, Clone)]
struct ClientStatsStatus {
/// Basic stats, which could affect the global stats, have been updated
basic_stats_updated: bool,
}
impl Default for ClientStatsStatus {
fn default() -> Self {
ClientStatsStatus {
basic_stats_updated: true,
}
}
}
/// Data struct to process timings
#[derive(Debug, Default, Clone)]
pub struct ProcessTiming {
/// The start time
pub client_start_time: Duration,
/// The executions speed
pub exec_speed: String,
/// Timing of the last new corpus entry
pub last_new_entry: Duration,
/// Timing of the last new solution
pub last_saved_solution: Duration,
}
impl ProcessTiming {
/// Create a new [`ProcessTiming`] struct
#[must_use]
pub fn new() -> Self {
Self {
exec_speed: "0".to_string(),
..Default::default()
}
}
}
/// The geometry of a single data point
#[expect(missing_docs)]
#[derive(Debug, Default, Clone)]
pub struct ItemGeometry {
pub pending: u64,
pub pend_fav: u64,
pub own_finds: u64,
pub imported: u64,
pub stability: String,
}
impl ItemGeometry {
/// Create a new [`ItemGeometry`]
#[must_use]
pub fn new() -> Self {
Self {
stability: "0%".to_string(),
..Default::default()
}
}
}
impl ClientStats {
/// If this client is enabled. This is set to `true` the first time we see this client.
#[must_use]
pub fn enabled(&self) -> bool {
self.enabled
}
/// The corpus size for this client
#[must_use]
pub fn corpus_size(&self) -> u64 {
self.corpus_size
}
/// The total executions for this client
#[must_use]
pub fn last_corpus_time(&self) -> Duration {
self.last_corpus_time
}
/// The total executions for this client
#[must_use]
pub fn executions(&self) -> u64 {
self.executions
}
/// The number of executions of the previous state in case a client decrease the number of execution (e.g when restarting without saving the state)
#[must_use]
pub fn prev_state_executions(&self) -> u64 {
self.prev_state_executions
}
/// The size of the objectives corpus for this client
#[must_use]
pub fn objective_size(&self) -> u64 {
self.objective_size
}
/// The time for the last update of the objective size
#[must_use]
pub fn last_objective_time(&self) -> Duration {
self.last_objective_time
}
/// The last time we got this information
#[must_use]
pub fn last_window_time(&self) -> Duration {
self.last_window_time
}
/// the start time of the client
#[must_use]
pub fn start_time(&self) -> Duration {
self.start_time
}
/// User-defined stats
#[must_use]
pub fn user_stats(&self) -> &HashMap<Cow<'static, str>, UserStats> {
&self.user_stats
}
/// Clear current stats status. This is used before user update `ClientStats`.
fn clear_stats_status(&mut self) {
self.stats_status.basic_stats_updated = false;
}
/// We got new information about executions for this client, insert them.
#[cfg(feature = "afl_exec_sec")]
pub fn update_executions(&mut self, executions: u64, cur_time: Duration) {
@ -70,6 +201,7 @@ impl ClientStats {
self.prev_state_executions = self.executions;
}
self.executions = self.prev_state_executions + executions;
self.stats_status.basic_stats_updated = true;
}
/// We got a new information about executions for this client, insert them.
@ -80,19 +212,24 @@ impl ClientStats {
self.prev_state_executions = self.executions;
}
self.executions = self.prev_state_executions + executions;
self.stats_status.basic_stats_updated = true;
}
/// We got new information about corpus size for this client, insert them.
pub fn update_corpus_size(&mut self, corpus_size: u64) {
self.corpus_size = corpus_size;
self.last_corpus_time = current_time();
self.stats_status.basic_stats_updated = true;
}
/// We got a new information about objective corpus size for this client, insert them.
pub fn update_objective_size(&mut self, objective_size: u64) {
self.objective_size = objective_size;
self.stats_status.basic_stats_updated = true;
}
// This will not update stats status, since the value this function changed
// does not affect global stats.
/// Get the calculated executions per second for this client
#[expect(clippy::cast_precision_loss, clippy::cast_sign_loss)]
#[cfg(feature = "afl_exec_sec")]
@ -124,6 +261,7 @@ impl ClientStats {
self.last_execs_per_sec
}
// This will not update stats status, since there is no value changed
/// Get the calculated executions per second for this client
#[expect(clippy::cast_precision_loss, clippy::cast_sign_loss)]
#[cfg(not(feature = "afl_exec_sec"))]
@ -142,11 +280,15 @@ impl ClientStats {
(self.executions as f64) / elapsed
}
// This will not update stats status, since the value this function changed
// does not affect global stats.
/// Executions per second
pub fn execs_per_sec_pretty(&mut self, cur_time: Duration) -> String {
prettify_float(self.execs_per_sec(cur_time))
}
// This will not update stats status, since the value this function changed
// does not affect global stats.
/// Update the user-defined stat with name and value
pub fn update_user_stats(
&mut self,
@ -156,8 +298,8 @@ impl ClientStats {
self.user_stats.insert(name, value)
}
#[must_use]
/// Get a user-defined stat using the name
#[must_use]
pub fn get_user_stats(&self, name: &str) -> Option<&UserStats> {
self.user_stats.get(name)
}
@ -167,6 +309,72 @@ impl ClientStats {
pub fn update_introspection_stats(&mut self, introspection_stats: ClientPerfStats) {
self.introspection_stats = introspection_stats;
}
/// Get process timing of current client.
pub fn process_timing(&mut self) -> ProcessTiming {
let client_start_time = self.start_time();
let last_new_entry = if self.last_corpus_time() > self.start_time() {
self.last_corpus_time() - self.start_time()
} else {
Duration::default()
};
let last_saved_solution = if self.last_objective_time() > self.start_time() {
self.last_objective_time() - self.start_time()
} else {
Duration::default()
};
let exec_speed = self.execs_per_sec_pretty(current_time());
ProcessTiming {
client_start_time,
exec_speed,
last_new_entry,
last_saved_solution,
}
}
/// Get map density of current client
#[must_use]
pub fn map_density(&self) -> String {
self.get_user_stats("edges")
.map_or("0%".to_string(), ToString::to_string)
}
/// Get item geometry of current client
#[cfg(feature = "std")]
#[must_use]
pub fn item_geometry(&self) -> ItemGeometry {
let default_json = serde_json::json!({
"pending": 0,
"pend_fav": 0,
"imported": 0,
"own_finds": 0,
});
let afl_stats = self
.get_user_stats("AflStats")
.map_or(default_json.to_string(), ToString::to_string);
let afl_stats_json: Value =
serde_json::from_str(afl_stats.as_str()).unwrap_or(default_json);
let pending = afl_stats_json["pending"].as_u64().unwrap_or_default();
let pend_fav = afl_stats_json["pend_fav"].as_u64().unwrap_or_default();
let imported = afl_stats_json["imported"].as_u64().unwrap_or_default();
let own_finds = afl_stats_json["own_finds"].as_u64().unwrap_or_default();
let stability = self
.get_user_stats("stability")
.map_or("0%".to_string(), ToString::to_string);
ItemGeometry {
pending,
pend_fav,
own_finds,
imported,
stability,
}
}
}
/// Prettifies float values for human-readable output

View File

@ -1,7 +1,7 @@
//! User-defined statistics
mod user_stats_value;
use alloc::string::ToString;
use alloc::borrow::Cow;
use core::fmt;
use serde::{Deserialize, Serialize};
@ -58,12 +58,18 @@ pub enum AggregatorOps {
Max,
}
// clippy::ptr_arg is allowed here to avoid one unnecessary deep clone when
// inserting name into user_stats HashMap.
/// Aggregate user statistics according to their ops
pub(super) fn aggregate_user_stats(client_stats_manager: &mut ClientStatsManager, name: &str) {
#[allow(clippy::ptr_arg)]
pub(super) fn aggregate_user_stats(
client_stats_manager: &mut ClientStatsManager,
name: &Cow<'static, str>,
) {
let mut gather = client_stats_manager
.client_stats()
.iter()
.filter_map(|client| client.user_stats.get(name));
.filter_map(|client| client.user_stats.get(name.as_ref()));
let gather_count = gather.clone().count();
@ -119,5 +125,5 @@ pub(super) fn aggregate_user_stats(client_stats_manager: &mut ClientStatsManager
client_stats_manager
.cached_aggregated_user_stats
.insert(name.to_string(), init);
.insert(name.clone(), init);
}