From 800f2c87885b528690cbd1f7f2597cd99ad25584 Mon Sep 17 00:00:00 2001 From: Alwin Berger Date: Fri, 21 Feb 2025 18:29:44 +0100 Subject: [PATCH] feedback for job woet --- fuzzers/FRET/Cargo.toml | 8 +++- fuzzers/FRET/src/systemstate/mod.rs | 17 +++++--- fuzzers/FRET/src/systemstate/stg.rs | 62 +++++++++++++++++++---------- 3 files changed, 58 insertions(+), 29 deletions(-) diff --git a/fuzzers/FRET/Cargo.toml b/fuzzers/FRET/Cargo.toml index 302aa229bf..8399b58f65 100644 --- a/fuzzers/FRET/Cargo.toml +++ b/fuzzers/FRET/Cargo.toml @@ -27,9 +27,12 @@ trace_reads = [ "trace_stg", "trace_job_response_times" ] # feedbacks feed_stg = [ "trace_stg", "observe_systemstate" ] feed_stg_edge = [ "feed_stg"] +feed_stg_abb_woet = [ "feed_stg"] feed_stg_pathhash = [ "feed_stg"] feed_stg_abbhash = [ "feed_stg"] feed_stg_aggregatehash = [ "feed_stg"] +feed_job_woet = [ "trace_job_response_times"] +feed_job_wort = [ "trace_job_response_times"] mutate_stg = [ "observe_systemstate", "trace_reads" ] feed_longest = [ ] feed_afl = [ "observe_edges" ] @@ -47,10 +50,11 @@ sched_stg_pathhash = ['sched_stg'] # every path in the stg sched_stg_abbhash = ['sched_stg'] # every path of abbs sched_stg_aggregatehash = ['sched_stg'] # every aggregated path (order independent) # overall_configs -config_genetic = ["gensize_100","feed_genetic","sched_genetic","trace_stg"] +config_genetic = ["feed_genetic","sched_genetic","trace_stg"] config_afl = ["feed_afl","sched_afl","trace_stg"] config_frafl = ["feed_afl","sched_afl","feed_longest","trace_stg"] -config_stg = ["feed_stg_aggregatehash","sched_stg_aggregatehash","mutate_stg"] +config_stg = ["feed_stg_aggregatehash","sched_stg_aggregatehash","mutate_stg","feed_job_wort"] +config_stg_woet = ["feed_stg_aggregatehash","sched_stg_aggregatehash","mutate_stg","feed_job_wort","feed_job_woet","feed_stg_abb_woet"] # config_stg_aggregate = ["feed_stg_aggregatehash","sched_stg_aggregatehash","mutate_stg"] config_stg_abbpath = ["feed_stg_abbhash","sched_stg_abbhash","mutate_stg"] config_stg_edge = ["feed_stg_edge","sched_stg_edge","mutate_stg"] diff --git a/fuzzers/FRET/src/systemstate/mod.rs b/fuzzers/FRET/src/systemstate/mod.rs index 29cfc08012..ce2166d4af 100644 --- a/fuzzers/FRET/src/systemstate/mod.rs +++ b/fuzzers/FRET/src/systemstate/mod.rs @@ -263,10 +263,11 @@ impl RTOSJob { #[derive(Debug, Default, Serialize, Deserialize, Clone)] pub struct RTOSTask { pub name: String, - pub worst_bytes: Vec, + pub woet_bytes: Vec, pub woet_ticks: u64, pub woet_per_abb: Vec, pub abbs: Vec, + pub wort_ticks: u64, hash_cache: u64 } @@ -299,14 +300,19 @@ impl RTOSTask { self.hash_cache } } + /// Update woet (time, inputs) and wort (time only) if the new instance is better pub fn try_update(&mut self, other: &RTOSJob) -> bool { assert_eq!(self.get_hash(), other.get_hash_cached()); let mut ret = false; if other.exec_ticks > self.woet_ticks { self.woet_ticks = other.exec_ticks; self.woet_per_abb = other.ticks_per_abb.clone(); - self.worst_bytes = other.mem_reads.iter().sorted_by(|a,b| a.0.cmp(&b.0)).map(|x| x.1).collect(); - ret = true; + self.woet_bytes = other.mem_reads.iter().sorted_by(|a,b| a.0.cmp(&b.0)).map(|x| x.1).collect(); + ret |= true; + } + if other.response_time() > self.wort_ticks { + self.wort_ticks = other.response_time(); + ret |= true; } ret } @@ -314,16 +320,17 @@ impl RTOSTask { let c = input.get_hash_cached(); Self { name: input.name.clone(), - worst_bytes: input.mem_reads.iter().map(|x| x.1.clone()).collect(), + woet_bytes: input.mem_reads.iter().map(|x| x.1.clone()).collect(), woet_ticks: input.exec_ticks, woet_per_abb: input.ticks_per_abb.clone(), abbs: input.abbs.clone(), + wort_ticks: input.response_time(), hash_cache: c } } pub fn map_bytes_onto(&self, input: &RTOSJob, offset: Option) -> Vec<(u32,u8)> { if input.mem_reads.len() == 0 {return vec![];} - let ret = input.mem_reads.iter().take(self.worst_bytes.len()).enumerate().filter_map(|(idx,(addr,oldbyte))| if self.worst_bytes[idx]!=*oldbyte {Some((*addr-offset.unwrap_or_default(), self.worst_bytes[idx]))} else {None}).collect(); + let ret = input.mem_reads.iter().take(self.woet_bytes.len()).enumerate().filter_map(|(idx,(addr,oldbyte))| if self.woet_bytes[idx]!=*oldbyte {Some((*addr-offset.unwrap_or_default(), self.woet_bytes[idx]))} else {None}).collect(); // eprintln!("Mapped: {:?}", ret); ret } diff --git a/fuzzers/FRET/src/systemstate/stg.rs b/fuzzers/FRET/src/systemstate/stg.rs index 92692a8052..0096df6d3d 100644 --- a/fuzzers/FRET/src/systemstate/stg.rs +++ b/fuzzers/FRET/src/systemstate/stg.rs @@ -167,9 +167,9 @@ where entrypoint: NodeIndex, exitpoint: NodeIndex, // Metadata about aggregated traces. aggegated meaning, order has been removed - worst_observed_per_aggegated_path: HashMap,u64>, - worst_observed_per_abb_path: HashMap, - worst_observed_per_stg_path: HashMap, + wort_per_aggegated_path: HashMap,u64>, + wort_per_abb_path: HashMap, + wort_per_stg_path: HashMap, worst_abb_exec_count: HashMap, // Metadata about job instances pub worst_task_jobs: HashMap, @@ -207,9 +207,9 @@ where stgnode_index: index, entrypoint, exitpoint, - worst_observed_per_aggegated_path: HashMap::new(), - worst_observed_per_abb_path: HashMap::new(), - worst_observed_per_stg_path: HashMap::new(), + wort_per_aggegated_path: HashMap::new(), + wort_per_abb_path: HashMap::new(), + wort_per_stg_path: HashMap::new(), worst_abb_exec_count: HashMap::new(), systemstate_index, state_abb_hash_index, @@ -389,7 +389,7 @@ where } #[cfg(feature = "feed_stg")] const INTEREST_EDGE : bool = true; -#[cfg(feature = "feed_stg")] +#[cfg(feature = "feed_stg_abb_woet")] const INTEREST_EDGE_WEIGHT : bool = true; #[cfg(feature = "feed_stg")] const INTEREST_NODE : bool = true; @@ -399,10 +399,14 @@ const INTEREST_PATH : bool = true; const INTEREST_ABBPATH : bool = true; #[cfg(feature = "feed_stg_aggregatehash")] const INTEREST_AGGREGATE : bool = true; +#[cfg(feature = "feed_job_wort")] +pub const INTEREST_JOB_RT : bool = true; +#[cfg(feature = "feed_job_woet")] +pub const INTEREST_JOB_ET : bool = true; #[cfg(not(feature = "feed_stg"))] const INTEREST_EDGE : bool = false; -#[cfg(not(feature = "feed_stg"))] +#[cfg(not(feature = "feed_stg_abb_woet"))] const INTEREST_EDGE_WEIGHT : bool = true; #[cfg(not(feature = "feed_stg"))] const INTEREST_NODE : bool = false; @@ -412,8 +416,10 @@ const INTEREST_PATH : bool = false; const INTEREST_ABBPATH : bool = false; #[cfg(not(feature = "feed_stg_aggregatehash"))] const INTEREST_AGGREGATE : bool = false; - -const INTEREST_JOB_INSTANCE : bool = true; +#[cfg(not(feature = "feed_job_wort"))] +pub const INTEREST_JOB_RT : bool = false; +#[cfg(not(feature = "feed_job_woet"))] +pub const INTEREST_JOB_ET : bool = false; fn set_observer_map(trace : &Vec) { // dbg!(trace); @@ -598,9 +604,11 @@ where let last_runtime = clock_observer.last_runtime(); #[cfg(feature = "trace_job_response_times")] - let worst_jobs = trace.worst_jobs_per_task_by_response_time(); + let worst_jobs_rt = trace.worst_jobs_per_task_by_response_time(); #[cfg(feature = "trace_job_response_times")] - let worst_select_job = if let Some(t) = self.select_task.as_ref() {worst_jobs.get(t)} else {None}; + let worst_jobs_et = trace.worst_jobs_per_task_by_exec_time(); + #[cfg(feature = "trace_job_response_times")] + let worst_select_job = if let Some(t) = self.select_task.as_ref() {worst_jobs_rt.get(t)} else {None}; #[cfg(feature = "trace_job_response_times")] let last_runtime = if let Some(t) = self.select_task.as_ref() {worst_select_job.map_or(0, |x| x.response_time())} else {last_runtime}; @@ -627,8 +635,9 @@ where set_observer_map(&edgetrace.iter().map(|x| x.0).collect::>()); // --------------------------------- Update job instances - for i in worst_jobs.iter() { - interesting |= INTEREST_JOB_INSTANCE && if let Some(x) = feedbackstate.worst_task_jobs.get_mut(&i.1.get_hash_cached()) { + #[cfg(feature = "trace_job_response_times")] + for i in worst_jobs_rt.iter() { + interesting |= INTEREST_JOB_RT & if let Some(x) = feedbackstate.worst_task_jobs.get_mut(&i.1.get_hash_cached()) { // eprintln!("Job instance already present"); x.try_update(i.1) } else { @@ -637,26 +646,35 @@ where true } }; + #[cfg(feature = "trace_job_response_times")] + for i in worst_jobs_et.iter() { + interesting |= INTEREST_JOB_ET & if let Some(x) = feedbackstate.worst_task_jobs.get_mut(&i.1.get_hash_cached()) { + x.try_update(i.1) + } else { + feedbackstate.worst_task_jobs.insert(i.1.get_hash_cached(), RTOSTask::from_instance(&i.1)); + true + } + }; self.last_job_trace = Some(trace.jobs().clone()); // dbg!(&observer.job_instances); { let h = get_generic_hash(&edgetrace); - if let Some(x) = feedbackstate.worst_observed_per_stg_path.get_mut(&h) { + if let Some(x) = feedbackstate.wort_per_stg_path.get_mut(&h) { let t = last_runtime; if t > *x { *x = t; interesting |= INTEREST_PATH; } } else { - feedbackstate.worst_observed_per_stg_path.insert(h, last_runtime); + feedbackstate.wort_per_stg_path.insert(h, last_runtime); updated = true; interesting |= INTEREST_PATH; } } #[cfg(not(feature = "trace_job_response_times"))] - let tmp = StgFeedback::abbs_in_exec_order(&observer.last_trace); + let tmp = StgFeedback::::abbs_in_exec_order(&trace.intervals()); #[cfg(feature = "trace_job_response_times")] let tmp = { if let Some(worst_instance) = worst_select_job { @@ -675,14 +693,14 @@ where let h = get_generic_hash(&tmp); self.last_abbs_hash = Some(h); // order of execution is relevant - if let Some(x) = feedbackstate.worst_observed_per_abb_path.get_mut(&h) { + if let Some(x) = feedbackstate.wort_per_abb_path.get_mut(&h) { let t = last_runtime; if t > *x { *x = t; interesting |= INTEREST_ABBPATH; } } else { - feedbackstate.worst_observed_per_abb_path.insert(h, last_runtime); + feedbackstate.wort_per_abb_path.insert(h, last_runtime); interesting |= INTEREST_ABBPATH; } } @@ -706,14 +724,14 @@ where self.last_top_abb_hashes = Some(top_indices); self.last_aggregate_hash = Some(get_generic_hash(&_tmp)); - if let Some(x) = feedbackstate.worst_observed_per_aggegated_path.get_mut(&_tmp) { + if let Some(x) = feedbackstate.wort_per_aggegated_path.get_mut(&_tmp) { let t = last_runtime; if t > *x { *x = t; interesting |= INTEREST_AGGREGATE; } } else { - feedbackstate.worst_observed_per_aggegated_path.insert(_tmp, last_runtime); + feedbackstate.wort_per_aggegated_path.insert(_tmp, last_runtime); interesting |= INTEREST_AGGREGATE; } } @@ -737,7 +755,7 @@ where .create(true) .append(true) .open(dp).expect("Could not open stgsize"); - writeln!(file, "{},{},{},{},{}", feedbackstate.graph.edge_count(), feedbackstate.graph.node_count(), feedbackstate.worst_observed_per_aggegated_path.len(),feedbackstate.worst_observed_per_stg_path.len(), timestamp).expect("Write to dump failed"); + writeln!(file, "{},{},{},{},{}", feedbackstate.graph.edge_count(), feedbackstate.graph.node_count(), feedbackstate.wort_per_aggegated_path.len(),feedbackstate.wort_per_stg_path.len(), timestamp).expect("Write to dump failed"); } } Ok(interesting)