feedback for job woet

This commit is contained in:
Alwin Berger 2025-02-21 18:29:44 +01:00
parent ca8d9fdf0a
commit 800f2c8788
3 changed files with 58 additions and 29 deletions

View File

@ -27,9 +27,12 @@ trace_reads = [ "trace_stg", "trace_job_response_times" ]
# feedbacks # feedbacks
feed_stg = [ "trace_stg", "observe_systemstate" ] feed_stg = [ "trace_stg", "observe_systemstate" ]
feed_stg_edge = [ "feed_stg"] feed_stg_edge = [ "feed_stg"]
feed_stg_abb_woet = [ "feed_stg"]
feed_stg_pathhash = [ "feed_stg"] feed_stg_pathhash = [ "feed_stg"]
feed_stg_abbhash = [ "feed_stg"] feed_stg_abbhash = [ "feed_stg"]
feed_stg_aggregatehash = [ "feed_stg"] feed_stg_aggregatehash = [ "feed_stg"]
feed_job_woet = [ "trace_job_response_times"]
feed_job_wort = [ "trace_job_response_times"]
mutate_stg = [ "observe_systemstate", "trace_reads" ] mutate_stg = [ "observe_systemstate", "trace_reads" ]
feed_longest = [ ] feed_longest = [ ]
feed_afl = [ "observe_edges" ] feed_afl = [ "observe_edges" ]
@ -47,10 +50,11 @@ sched_stg_pathhash = ['sched_stg'] # every path in the stg
sched_stg_abbhash = ['sched_stg'] # every path of abbs sched_stg_abbhash = ['sched_stg'] # every path of abbs
sched_stg_aggregatehash = ['sched_stg'] # every aggregated path (order independent) sched_stg_aggregatehash = ['sched_stg'] # every aggregated path (order independent)
# overall_configs # overall_configs
config_genetic = ["gensize_100","feed_genetic","sched_genetic","trace_stg"] config_genetic = ["feed_genetic","sched_genetic","trace_stg"]
config_afl = ["feed_afl","sched_afl","trace_stg"] config_afl = ["feed_afl","sched_afl","trace_stg"]
config_frafl = ["feed_afl","sched_afl","feed_longest","trace_stg"] config_frafl = ["feed_afl","sched_afl","feed_longest","trace_stg"]
config_stg = ["feed_stg_aggregatehash","sched_stg_aggregatehash","mutate_stg"] config_stg = ["feed_stg_aggregatehash","sched_stg_aggregatehash","mutate_stg","feed_job_wort"]
config_stg_woet = ["feed_stg_aggregatehash","sched_stg_aggregatehash","mutate_stg","feed_job_wort","feed_job_woet","feed_stg_abb_woet"]
# config_stg_aggregate = ["feed_stg_aggregatehash","sched_stg_aggregatehash","mutate_stg"] # config_stg_aggregate = ["feed_stg_aggregatehash","sched_stg_aggregatehash","mutate_stg"]
config_stg_abbpath = ["feed_stg_abbhash","sched_stg_abbhash","mutate_stg"] config_stg_abbpath = ["feed_stg_abbhash","sched_stg_abbhash","mutate_stg"]
config_stg_edge = ["feed_stg_edge","sched_stg_edge","mutate_stg"] config_stg_edge = ["feed_stg_edge","sched_stg_edge","mutate_stg"]

View File

@ -263,10 +263,11 @@ impl RTOSJob {
#[derive(Debug, Default, Serialize, Deserialize, Clone)] #[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub struct RTOSTask { pub struct RTOSTask {
pub name: String, pub name: String,
pub worst_bytes: Vec<u8>, pub woet_bytes: Vec<u8>,
pub woet_ticks: u64, pub woet_ticks: u64,
pub woet_per_abb: Vec<u64>, pub woet_per_abb: Vec<u64>,
pub abbs: Vec<AtomicBasicBlock>, pub abbs: Vec<AtomicBasicBlock>,
pub wort_ticks: u64,
hash_cache: u64 hash_cache: u64
} }
@ -299,14 +300,19 @@ impl RTOSTask {
self.hash_cache self.hash_cache
} }
} }
/// Update woet (time, inputs) and wort (time only) if the new instance is better
pub fn try_update(&mut self, other: &RTOSJob) -> bool { pub fn try_update(&mut self, other: &RTOSJob) -> bool {
assert_eq!(self.get_hash(), other.get_hash_cached()); assert_eq!(self.get_hash(), other.get_hash_cached());
let mut ret = false; let mut ret = false;
if other.exec_ticks > self.woet_ticks { if other.exec_ticks > self.woet_ticks {
self.woet_ticks = other.exec_ticks; self.woet_ticks = other.exec_ticks;
self.woet_per_abb = other.ticks_per_abb.clone(); self.woet_per_abb = other.ticks_per_abb.clone();
self.worst_bytes = other.mem_reads.iter().sorted_by(|a,b| a.0.cmp(&b.0)).map(|x| x.1).collect(); self.woet_bytes = other.mem_reads.iter().sorted_by(|a,b| a.0.cmp(&b.0)).map(|x| x.1).collect();
ret = true; ret |= true;
}
if other.response_time() > self.wort_ticks {
self.wort_ticks = other.response_time();
ret |= true;
} }
ret ret
} }
@ -314,16 +320,17 @@ impl RTOSTask {
let c = input.get_hash_cached(); let c = input.get_hash_cached();
Self { Self {
name: input.name.clone(), name: input.name.clone(),
worst_bytes: input.mem_reads.iter().map(|x| x.1.clone()).collect(), woet_bytes: input.mem_reads.iter().map(|x| x.1.clone()).collect(),
woet_ticks: input.exec_ticks, woet_ticks: input.exec_ticks,
woet_per_abb: input.ticks_per_abb.clone(), woet_per_abb: input.ticks_per_abb.clone(),
abbs: input.abbs.clone(), abbs: input.abbs.clone(),
wort_ticks: input.response_time(),
hash_cache: c hash_cache: c
} }
} }
pub fn map_bytes_onto(&self, input: &RTOSJob, offset: Option<u32>) -> Vec<(u32,u8)> { pub fn map_bytes_onto(&self, input: &RTOSJob, offset: Option<u32>) -> Vec<(u32,u8)> {
if input.mem_reads.len() == 0 {return vec![];} if input.mem_reads.len() == 0 {return vec![];}
let ret = input.mem_reads.iter().take(self.worst_bytes.len()).enumerate().filter_map(|(idx,(addr,oldbyte))| if self.worst_bytes[idx]!=*oldbyte {Some((*addr-offset.unwrap_or_default(), self.worst_bytes[idx]))} else {None}).collect(); let ret = input.mem_reads.iter().take(self.woet_bytes.len()).enumerate().filter_map(|(idx,(addr,oldbyte))| if self.woet_bytes[idx]!=*oldbyte {Some((*addr-offset.unwrap_or_default(), self.woet_bytes[idx]))} else {None}).collect();
// eprintln!("Mapped: {:?}", ret); // eprintln!("Mapped: {:?}", ret);
ret ret
} }

View File

@ -167,9 +167,9 @@ where
entrypoint: NodeIndex, entrypoint: NodeIndex,
exitpoint: NodeIndex, exitpoint: NodeIndex,
// Metadata about aggregated traces. aggegated meaning, order has been removed // Metadata about aggregated traces. aggegated meaning, order has been removed
worst_observed_per_aggegated_path: HashMap<Vec<AtomicBasicBlock>,u64>, wort_per_aggegated_path: HashMap<Vec<AtomicBasicBlock>,u64>,
worst_observed_per_abb_path: HashMap<u64,u64>, wort_per_abb_path: HashMap<u64,u64>,
worst_observed_per_stg_path: HashMap<u64,u64>, wort_per_stg_path: HashMap<u64,u64>,
worst_abb_exec_count: HashMap<AtomicBasicBlock, usize>, worst_abb_exec_count: HashMap<AtomicBasicBlock, usize>,
// Metadata about job instances // Metadata about job instances
pub worst_task_jobs: HashMap<u64, RTOSTask>, pub worst_task_jobs: HashMap<u64, RTOSTask>,
@ -207,9 +207,9 @@ where
stgnode_index: index, stgnode_index: index,
entrypoint, entrypoint,
exitpoint, exitpoint,
worst_observed_per_aggegated_path: HashMap::new(), wort_per_aggegated_path: HashMap::new(),
worst_observed_per_abb_path: HashMap::new(), wort_per_abb_path: HashMap::new(),
worst_observed_per_stg_path: HashMap::new(), wort_per_stg_path: HashMap::new(),
worst_abb_exec_count: HashMap::new(), worst_abb_exec_count: HashMap::new(),
systemstate_index, systemstate_index,
state_abb_hash_index, state_abb_hash_index,
@ -389,7 +389,7 @@ where
} }
#[cfg(feature = "feed_stg")] #[cfg(feature = "feed_stg")]
const INTEREST_EDGE : bool = true; const INTEREST_EDGE : bool = true;
#[cfg(feature = "feed_stg")] #[cfg(feature = "feed_stg_abb_woet")]
const INTEREST_EDGE_WEIGHT : bool = true; const INTEREST_EDGE_WEIGHT : bool = true;
#[cfg(feature = "feed_stg")] #[cfg(feature = "feed_stg")]
const INTEREST_NODE : bool = true; const INTEREST_NODE : bool = true;
@ -399,10 +399,14 @@ const INTEREST_PATH : bool = true;
const INTEREST_ABBPATH : bool = true; const INTEREST_ABBPATH : bool = true;
#[cfg(feature = "feed_stg_aggregatehash")] #[cfg(feature = "feed_stg_aggregatehash")]
const INTEREST_AGGREGATE : bool = true; const INTEREST_AGGREGATE : bool = true;
#[cfg(feature = "feed_job_wort")]
pub const INTEREST_JOB_RT : bool = true;
#[cfg(feature = "feed_job_woet")]
pub const INTEREST_JOB_ET : bool = true;
#[cfg(not(feature = "feed_stg"))] #[cfg(not(feature = "feed_stg"))]
const INTEREST_EDGE : bool = false; const INTEREST_EDGE : bool = false;
#[cfg(not(feature = "feed_stg"))] #[cfg(not(feature = "feed_stg_abb_woet"))]
const INTEREST_EDGE_WEIGHT : bool = true; const INTEREST_EDGE_WEIGHT : bool = true;
#[cfg(not(feature = "feed_stg"))] #[cfg(not(feature = "feed_stg"))]
const INTEREST_NODE : bool = false; const INTEREST_NODE : bool = false;
@ -412,8 +416,10 @@ const INTEREST_PATH : bool = false;
const INTEREST_ABBPATH : bool = false; const INTEREST_ABBPATH : bool = false;
#[cfg(not(feature = "feed_stg_aggregatehash"))] #[cfg(not(feature = "feed_stg_aggregatehash"))]
const INTEREST_AGGREGATE : bool = false; const INTEREST_AGGREGATE : bool = false;
#[cfg(not(feature = "feed_job_wort"))]
const INTEREST_JOB_INSTANCE : bool = true; pub const INTEREST_JOB_RT : bool = false;
#[cfg(not(feature = "feed_job_woet"))]
pub const INTEREST_JOB_ET : bool = false;
fn set_observer_map(trace : &Vec<EdgeIndex>) { fn set_observer_map(trace : &Vec<EdgeIndex>) {
// dbg!(trace); // dbg!(trace);
@ -598,9 +604,11 @@ where
let last_runtime = clock_observer.last_runtime(); let last_runtime = clock_observer.last_runtime();
#[cfg(feature = "trace_job_response_times")] #[cfg(feature = "trace_job_response_times")]
let worst_jobs = trace.worst_jobs_per_task_by_response_time(); let worst_jobs_rt = trace.worst_jobs_per_task_by_response_time();
#[cfg(feature = "trace_job_response_times")] #[cfg(feature = "trace_job_response_times")]
let worst_select_job = if let Some(t) = self.select_task.as_ref() {worst_jobs.get(t)} else {None}; let worst_jobs_et = trace.worst_jobs_per_task_by_exec_time();
#[cfg(feature = "trace_job_response_times")]
let worst_select_job = if let Some(t) = self.select_task.as_ref() {worst_jobs_rt.get(t)} else {None};
#[cfg(feature = "trace_job_response_times")] #[cfg(feature = "trace_job_response_times")]
let last_runtime = if let Some(t) = self.select_task.as_ref() {worst_select_job.map_or(0, |x| x.response_time())} else {last_runtime}; let last_runtime = if let Some(t) = self.select_task.as_ref() {worst_select_job.map_or(0, |x| x.response_time())} else {last_runtime};
@ -627,8 +635,9 @@ where
set_observer_map(&edgetrace.iter().map(|x| x.0).collect::<Vec<_>>()); set_observer_map(&edgetrace.iter().map(|x| x.0).collect::<Vec<_>>());
// --------------------------------- Update job instances // --------------------------------- Update job instances
for i in worst_jobs.iter() { #[cfg(feature = "trace_job_response_times")]
interesting |= INTEREST_JOB_INSTANCE && if let Some(x) = feedbackstate.worst_task_jobs.get_mut(&i.1.get_hash_cached()) { for i in worst_jobs_rt.iter() {
interesting |= INTEREST_JOB_RT & if let Some(x) = feedbackstate.worst_task_jobs.get_mut(&i.1.get_hash_cached()) {
// eprintln!("Job instance already present"); // eprintln!("Job instance already present");
x.try_update(i.1) x.try_update(i.1)
} else { } else {
@ -637,26 +646,35 @@ where
true true
} }
}; };
#[cfg(feature = "trace_job_response_times")]
for i in worst_jobs_et.iter() {
interesting |= INTEREST_JOB_ET & if let Some(x) = feedbackstate.worst_task_jobs.get_mut(&i.1.get_hash_cached()) {
x.try_update(i.1)
} else {
feedbackstate.worst_task_jobs.insert(i.1.get_hash_cached(), RTOSTask::from_instance(&i.1));
true
}
};
self.last_job_trace = Some(trace.jobs().clone()); self.last_job_trace = Some(trace.jobs().clone());
// dbg!(&observer.job_instances); // dbg!(&observer.job_instances);
{ {
let h = get_generic_hash(&edgetrace); let h = get_generic_hash(&edgetrace);
if let Some(x) = feedbackstate.worst_observed_per_stg_path.get_mut(&h) { if let Some(x) = feedbackstate.wort_per_stg_path.get_mut(&h) {
let t = last_runtime; let t = last_runtime;
if t > *x { if t > *x {
*x = t; *x = t;
interesting |= INTEREST_PATH; interesting |= INTEREST_PATH;
} }
} else { } else {
feedbackstate.worst_observed_per_stg_path.insert(h, last_runtime); feedbackstate.wort_per_stg_path.insert(h, last_runtime);
updated = true; updated = true;
interesting |= INTEREST_PATH; interesting |= INTEREST_PATH;
} }
} }
#[cfg(not(feature = "trace_job_response_times"))] #[cfg(not(feature = "trace_job_response_times"))]
let tmp = StgFeedback::abbs_in_exec_order(&observer.last_trace); let tmp = StgFeedback::<SYS>::abbs_in_exec_order(&trace.intervals());
#[cfg(feature = "trace_job_response_times")] #[cfg(feature = "trace_job_response_times")]
let tmp = { let tmp = {
if let Some(worst_instance) = worst_select_job { if let Some(worst_instance) = worst_select_job {
@ -675,14 +693,14 @@ where
let h = get_generic_hash(&tmp); let h = get_generic_hash(&tmp);
self.last_abbs_hash = Some(h); self.last_abbs_hash = Some(h);
// order of execution is relevant // order of execution is relevant
if let Some(x) = feedbackstate.worst_observed_per_abb_path.get_mut(&h) { if let Some(x) = feedbackstate.wort_per_abb_path.get_mut(&h) {
let t = last_runtime; let t = last_runtime;
if t > *x { if t > *x {
*x = t; *x = t;
interesting |= INTEREST_ABBPATH; interesting |= INTEREST_ABBPATH;
} }
} else { } else {
feedbackstate.worst_observed_per_abb_path.insert(h, last_runtime); feedbackstate.wort_per_abb_path.insert(h, last_runtime);
interesting |= INTEREST_ABBPATH; interesting |= INTEREST_ABBPATH;
} }
} }
@ -706,14 +724,14 @@ where
self.last_top_abb_hashes = Some(top_indices); self.last_top_abb_hashes = Some(top_indices);
self.last_aggregate_hash = Some(get_generic_hash(&_tmp)); self.last_aggregate_hash = Some(get_generic_hash(&_tmp));
if let Some(x) = feedbackstate.worst_observed_per_aggegated_path.get_mut(&_tmp) { if let Some(x) = feedbackstate.wort_per_aggegated_path.get_mut(&_tmp) {
let t = last_runtime; let t = last_runtime;
if t > *x { if t > *x {
*x = t; *x = t;
interesting |= INTEREST_AGGREGATE; interesting |= INTEREST_AGGREGATE;
} }
} else { } else {
feedbackstate.worst_observed_per_aggegated_path.insert(_tmp, last_runtime); feedbackstate.wort_per_aggegated_path.insert(_tmp, last_runtime);
interesting |= INTEREST_AGGREGATE; interesting |= INTEREST_AGGREGATE;
} }
} }
@ -737,7 +755,7 @@ where
.create(true) .create(true)
.append(true) .append(true)
.open(dp).expect("Could not open stgsize"); .open(dp).expect("Could not open stgsize");
writeln!(file, "{},{},{},{},{}", feedbackstate.graph.edge_count(), feedbackstate.graph.node_count(), feedbackstate.worst_observed_per_aggegated_path.len(),feedbackstate.worst_observed_per_stg_path.len(), timestamp).expect("Write to dump failed"); writeln!(file, "{},{},{},{},{}", feedbackstate.graph.edge_count(), feedbackstate.graph.node_count(), feedbackstate.wort_per_aggegated_path.len(),feedbackstate.wort_per_stg_path.len(), timestamp).expect("Write to dump failed");
} }
} }
Ok(interesting) Ok(interesting)