minor refactoring

This commit is contained in:
Alwin Berger 2025-05-28 11:46:22 +00:00
parent 3ff617e4a9
commit 27811aaaca
7 changed files with 993 additions and 742 deletions

View File

@ -0,0 +1,234 @@
# install.packages(c("mosaic", "dplyr", "DBI", "tikzDevice", "colorspace", "heatmaply", "RColorBrewer", "RSQLite"))
library("mosaic")
library("dplyr")
library("DBI")
library("tikzDevice") # Add this line to include the tikzDevice library
library("colorspace")
library("heatmaply")
library("RColorBrewer")
args = commandArgs(trailingOnly=TRUE)
TOOL_TRANSLATION <- list(
feedgeneration100 = "evolution",
frafl = "coverage",
random = "random",
stgwoet = "FRET"
)
KNOWN_WCRT <- list(
waters_seq_bytes=0, # via INSERT_WC
waters_seq_int=0, # via INSERT_WC + manual interrupt
#waters_seq_int=219542, # via INSERT_WC + manual interrupt
waters_seq_full=0,# via INSERT_WC + manual interrupt
waters_seq_unsync_full=0,# via INSERT_WC + manual interrupt
polycopter_seq_dataflow_full=0, # via INSERT_WC + manual interrupt
polycopter_seq_dataflow_int=0, # via INSERT_WC + manual interrupt
release_seq_int=0, # via fuzzer, equals to manual interrupts; Bug: Task3 y=0
release_seq_full=0 # via INSERT_WC + manual interrupt; Bug: Task3 y=0
)
STATIC_WCRT <- list(
waters_seq_bytes=256632,
waters_seq_int=256632,
waters_seq_full=256632,
waters_seq_unsync_full=272091,
polycopter_seq_dataflow_full=373628,
polycopter_seq_dataflow_int=373628,
release_seq_int=921360,
release_seq_full=921360
)
# ISNS_PER_US = (10**3)/(2**5)
# print(list(sapply(STATIC_WCRT, function(x) x/ISNS_PER_US)))
# quit()
STATIC_WCRT <- list(
waters_seq_bytes=0,
waters_seq_int=0,
waters_seq_full=0,
waters_seq_unsync_full=0,
polycopter_seq_dataflow_full=0,
polycopter_seq_dataflow_int=0,
release_seq_int=0,
release_seq_full=0
)
MIN_Y <- list(
waters_seq_bytes=0,
waters_seq_int=0,
waters_seq_full=0,
waters_seq_unsync_full=0,
polycopter_seq_dataflow_full=0,
polycopter_seq_dataflow_int=0,
release_seq_int=0,
release_seq_full=0
)
LEG_POS <- list(
waters_seq_bytes="bottomright",
waters_seq_int="bottomright",
waters_seq_full="bottomright",
waters_seq_unsync_full="bottomright",
polycopter_seq_dataflow_full="bottomright",
polycopter_seq_dataflow_int="bottomright",
release_seq_int="bottomright",
release_seq_full="bottomright"
)
NAME_MAP <- list(
watersIc11_seq_full="t1 10ms",
watersIc12_seq_full="t2 10ms",
watersIc13_seq_full="t3 10ms",
watersIc14_seq_full="t4 10ms",
watersIc31_seq_full="t5 spro",
watersIc32_seq_full="t6 2ms",
watersIc33_seq_full="t7 50ms",
watersIc21_seq_full="t9 100ms",
watersIc22_seq_full="t10 10ms",
watersIc23_seq_full="t11 2ms"
)
# Read the first command line argument as an sqlite file
if (length(args) > 0) {
sqlite_file <- args[1]
con <- dbConnect(RSQLite::SQLite(), sqlite_file)
} else {
print("No sqlite file provided, assume defaults")
args = c("bench.sqlite", "remote")
sqlite_file <- args[1]
con <- dbConnect(RSQLite::SQLite(), sqlite_file)
}
combos <- dbGetQuery(con, "SELECT * FROM combos")
casenames <- dbGetQuery(con, "SELECT casename FROM combos GROUP BY casename")
toolnames <- dbGetQuery(con, "SELECT toolname FROM combos GROUP BY toolname")
ml2lines <- function(ml, casename) {
lines = NULL
last = 0
for (i in seq_len(dim(ml)[1])) {
lines = rbind(lines, cbind(X=last, Y=ml[i,1]))
lines = rbind(lines, cbind(X=ml[i,2], Y=ml[i,1]))
last = ml[i,2]
}
return(lines)
}
BREW=RdYlGn(8)
# BREW=Spectral(4)
# MY_COLORS <- c(BREW[[4]], BREW[[3]], BREW[[2]], BREW[[1]], "cyan", "pink", "gray", "orange", "black", "yellow","brown")
MY_COLORS=BREW
# draw limit
max_x <- 12
min_y <- -2800
max_y <- 2500
LEGEND_POS = "bottomright"
ISNS_PER_US = (10**3)/(2**5)
print(casenames[['casename']])
legend_names <- sapply(casenames[['casename']], function(x) NAME_MAP[[x]] %||% x)
legend_colors <- BREW
legend_styles <- c(rep("solid",10),"dotted","dashed")
h_ = 300
w_ = h_*4/3
png(file=sprintf("%s/all_tasks.png", args[2]), width=w_, height=h_)
#tikz(file=sprintf("%s/all_tasks.tex", args[2]), width=0.6*w_/72, height=0.6*h_/72)
#pdf(file=sprintf("%s/all_tasks.pdf", args[2]), width=w_/72, height=h_/72)
# plot setup
par(mar=c(4,4,1,1))
par(oma=c(0,0,0,0))
plot(c(0,max_x),c(min_y,max_y), col='white', xlab="Time [h]", ylab="FRET's improvement over competitors [µs]", pch='.')
draw_plot <- function(data, casename, color) {
# evo, cov, random, fret
# Pre-calculate all malines and medlines
malines_list <- list()
medlines_list <- list()
for (n in seq_along(data)) {
d <- data[[n]]
malines_list[[names(data)[n]]] <- ml2lines(d[c('max','timestamp')])
medlines_list[[names(data)[n]]] <- ml2lines(d[c('median','timestamp')])
}
# Plot the difference between malines['stgwoet'] (FRET) and malines['random']
if ("stgwoet" %in% names(malines_list) && "feedgeneration100" %in% names(malines_list)) {
fret_malines <- malines_list[["stgwoet"]]
compare_malines1 <- malines_list[["feedgeneration100"]]
compare_malines2 <- malines_list[["frafl"]]
fret_medlines <- medlines_list[["stgwoet"]]
compare_medlines1 <- medlines_list[["feedgeneration100"]]
compare_medlines2 <- medlines_list[["frafl"]]
# Ensure all have the same number of rows and matching X
min_len <- min(nrow(fret_malines), nrow(compare_malines1), nrow(compare_malines2))
# For each point, take the max of the two compare malines
compare_max_Y <- pmax(compare_malines1[1:min_len, "Y"], compare_malines2[1:min_len, "Y"])
diff_lines_ma <- data.frame(
X = fret_malines[1:min_len, "X"],
Y = fret_malines[1:min_len, "Y"] - compare_max_Y
)
lines(diff_lines_ma, col=color, lty="solid", lwd=2)
# Same for medlines
compare_max_med_Y <- pmax(compare_medlines1[1:min_len, "Y"], compare_medlines2[1:min_len, "Y"])
diff_lines_med <- data.frame(
X = fret_medlines[1:min_len, "X"],
Y = fret_medlines[1:min_len, "Y"] - compare_max_med_Y
)
lines(diff_lines_med, col=color, lty="dashed", lwd=2)
}
}
for (i in seq_len(length(casenames[['casename']]))) {
cn =casenames[['casename']][i]
color = MY_COLORS[i]
tables <- dbGetQuery(con, sprintf("SELECT * FROM combos WHERE casename == '%s'", cn[[1]]))
table_list <- list()
for (row in 1:nrow(tables)) {
table_name <- tables[row, 'fullname']
tool_name <- tables[row, 'toolname']
table_data <- dbGetQuery(con, sprintf("SELECT * FROM '%s'", table_name))
table_list[[tool_name]] <- table_data
}
# Convert timestamp from microseconds to hours
for (n in seq_len(length(table_list))) {
table_list[[n]]$timestamp <- table_list[[n]]$timestamp / 3600000
table_list[[n]]$min <- table_list[[n]]$min / ISNS_PER_US
table_list[[n]]$max <- table_list[[n]]$max / ISNS_PER_US
table_list[[n]]$median <- table_list[[n]]$median / ISNS_PER_US
table_list[[n]]$mean <- table_list[[n]]$mean / ISNS_PER_US
table_list[[n]]$sdiv <- table_list[[n]]$sdiv / ISNS_PER_US
}
table_list <- table_list[c('stgwoet', 'feedgeneration100', 'frafl', 'random')] # manual re-order
table_list <- table_list[!sapply(table_list, is.null)] # remove NULL entries
draw_plot(table_list, cn[[1]], color)
}
legend(LEGEND_POS, legend=legend_names,#"bottomright",
col=legend_colors,
lty=legend_styles,
lwd=2)
par(las = 2, mar = c(10, 5, 1, 1))
# png
## normal
dev.off()
dbDisconnect(con)

View File

@ -1,3 +1,4 @@
#![allow(unused_imports)]
//! A fuzzer using qemu in systemmode for binary-only coverage of kernels
//!
use core::time::Duration;

View File

@ -1,8 +1,7 @@
use hashbrown::HashMap;
use libafl_bolts::prelude::{SerdeAny, SerdeAnyMap};
use libafl_qemu::{elf::EasyElf, read_user_reg_unchecked, GuestAddr, GuestPhysAddr};
use std::cmp::min;
use std::ops::Range;
use std::{cmp::min, hash::{DefaultHasher, Hash, Hasher}, ops::Range};
use crate::{
fuzzer::{DO_NUM_INTERRUPT, FIRST_INT},
@ -274,3 +273,12 @@ pub fn abb_profile(
pub fn unmut<T>(x: &mut T) -> &T {
&(*x)
}
pub fn get_generic_hash<H>(input: &H) -> u64
where
H: Hash,
{
let mut s = DefaultHasher::new();
input.hash(&mut s);
s.finish()
}

View File

@ -28,6 +28,7 @@ use libafl::{executors::ExitKind, observers::ObserversTuple, common::HasMetadata
use serde::{Deserialize, Serialize};
use std::marker::PhantomData;
use super::helpers::get_generic_hash;
use super::helpers::metadata_insert_or_update_get;
use super::target_os::SystemState;
use super::AtomicBasicBlock;
@ -442,15 +443,6 @@ fn set_observer_map(trace : &Vec<EdgeIndex>) {
}
}
fn get_generic_hash<H>(input: &H) -> u64
where
H: Hash,
{
let mut s = DefaultHasher::new();
input.hash(&mut s);
s.finish()
}
/// Takes: trace of intervals
/// Returns: hashmap of abb instance id to (execution time, memory accesses)
fn execinterval_to_abb_instances(trace: &Vec<ExecInterval>, read_trace: &Vec<Vec<(u32, u8)>>) -> HashMap<usize, (u64, Vec<(u32, u8)>)>{

View File

@ -1,3 +1,4 @@
#![allow(non_camel_case_types)]
use libafl_qemu::GuestAddr;
use qemu_module::{FreeRTOSSystemStateHelper, MEM_READ};
use serde::{Deserialize, Serialize};
@ -10,6 +11,7 @@ use crate::{
pub mod bindings;
pub mod qemu_module;
pub mod config;
pub mod post_processing;
use bindings::*;
use super::QemuLookup;
@ -281,10 +283,10 @@ fn trigger_collection(
} else {
systemstate.read_invalid = true;
}
systemstate.mem_reads = unsafe { MEM_READ.take().unwrap_or_default() };
systemstate.mem_reads = unsafe { std::mem::replace((&raw mut MEM_READ).as_mut().unwrap(), vec![])};
unsafe {
CURRENT_SYSTEMSTATE_VEC.push(systemstate);
(&raw mut CURRENT_SYSTEMSTATE_VEC).as_mut().unwrap().push(systemstate);
}
}
@ -545,7 +547,7 @@ libafl_bolts::impl_serdeany!(RefinedTCB);
libafl_bolts::impl_serdeany!(FreeRTOSSystemState);
libafl_bolts::impl_serdeany!(FreeRTOSSystem);
fn get_task_names(trace: &Vec<FreeRTOSSystemState>) -> HashSet<String> {
pub(crate) fn get_task_names(trace: &Vec<FreeRTOSSystemState>) -> HashSet<String> {
let mut ret: HashSet<_, _> = HashSet::new();
for state in trace {
ret.insert(state.current_task.task_name.to_string());

View File

@ -0,0 +1,707 @@
use std::{cell::RefCell, collections::VecDeque, rc::Rc};
use freertos::USR_ISR_SYMBOLS;
use hashbrown::HashMap;
use crate::systemstate::{
target_os::{freertos::FreeRTOSStruct::*, *},
AtomicBasicBlock, CaptureEvent,
};
use super::{
bindings::*,
compute_hash, ExecInterval, FreeRTOSStruct, FreeRTOSSystemState,
FreeRTOSSystemStateContext, RawFreeRTOSSystemState, RefinedTCB,
};
//============================= Parsing helpers
/// Parse a List_t containing TCB_t into Vec<TCB_t> from cache. Consumes the elements from cache
pub fn tcb_list_to_vec_cached(list: List_t, dump: &mut HashMap<u32, FreeRTOSStruct>) -> Vec<TCB_t> {
let mut ret: Vec<TCB_t> = Vec::new();
if list.uxNumberOfItems == 0 {
return ret;
}
let last_list_item = match dump
.remove(&list.pxIndex)
.expect("List_t entry was not in Hashmap")
{
List_Item_struct(li) => li,
List_MiniItem_struct(mli) => match dump
.remove(&mli.pxNext)
.expect("MiniListItem pointer invaild")
{
List_Item_struct(li) => li,
_ => panic!("MiniListItem of a non empty List does not point to ListItem"),
},
_ => panic!("List_t entry was not a ListItem"),
};
let mut next_index = last_list_item.pxNext;
let last_tcb = match dump
.remove(&last_list_item.pvOwner)
.expect("ListItem Owner not in Hashmap")
{
TCB_struct(t) => t,
_ => panic!("List content does not equal type"),
};
for _ in 0..list.uxNumberOfItems - 1 {
let next_list_item = match dump
.remove(&next_index)
.expect("List_t entry was not in Hashmap")
{
List_Item_struct(li) => li,
List_MiniItem_struct(mli) => match dump
.remove(&mli.pxNext)
.expect("MiniListItem pointer invaild")
{
List_Item_struct(li) => li,
_ => panic!("MiniListItem of a non empty List does not point to ListItem"),
},
_ => panic!("List_t entry was not a ListItem"),
};
match dump
.remove(&next_list_item.pvOwner)
.expect("ListItem Owner not in Hashmap")
{
TCB_struct(t) => ret.push(t),
_ => panic!("List content does not equal type"),
}
next_index = next_list_item.pxNext;
}
ret.push(last_tcb);
ret
}
//============================= State refinement
/// Drains a List of raw SystemStates to produce a refined trace
/// returns:
/// - a Vec of FreeRTOSSystemState
/// - a Vec of FreeRTOSSystemStateContext (qemu_tick, (capture_event, capture_name), edge, mem_reads)
pub(crate) fn refine_system_states(
mut input: Vec<RawFreeRTOSSystemState>,
) -> (Vec<FreeRTOSSystemState>, Vec<FreeRTOSSystemStateContext>) {
let mut ret = (Vec::<_>::new(), Vec::<_>::new());
for mut i in input.drain(..) {
let cur = RefinedTCB::from_tcb_owned(i.current_tcb);
// println!("Refine: {} {:?} {:?} {:x}-{:x}", cur.task_name, i.capture_point.0, i.capture_point.1.to_string(), i.edge.0, i.edge.1);
// collect ready list
let mut collector = Vec::<RefinedTCB>::new();
for j in i.prio_ready_lists.into_iter().rev() {
let mut tmp = tcb_list_to_vec_cached(j, &mut i.dumping_ground)
.iter()
.map(|x| RefinedTCB::from_tcb(x))
.collect();
collector.append(&mut tmp);
}
// collect delay list
let mut delay_list: Vec<RefinedTCB> =
tcb_list_to_vec_cached(i.delay_list, &mut i.dumping_ground)
.iter()
.map(|x| RefinedTCB::from_tcb(x))
.collect();
let mut delay_list_overflow: Vec<RefinedTCB> =
tcb_list_to_vec_cached(i.delay_list_overflow, &mut i.dumping_ground)
.iter()
.map(|x| RefinedTCB::from_tcb(x))
.collect();
delay_list.append(&mut delay_list_overflow);
delay_list.sort_by(|a, b| a.task_name.cmp(&b.task_name));
ret.0.push(FreeRTOSSystemState {
current_task: cur,
ready_list_after: collector,
delay_list_after: delay_list,
read_invalid: i.read_invalid,
// input_counter: i.input_counter,//+IRQ_INPUT_BYTES_NUMBER,
});
ret.1.push(FreeRTOSSystemStateContext {
qemu_tick: i.qemu_tick,
capture_point: (i.capture_point.0, i.capture_point.1.to_string()),
edge: i.edge,
mem_reads: i.mem_reads,
});
}
return ret;
}
/// Transform the states and metadata into a list of ExecIntervals, along with a HashMap of states, a list of HashSets marking memory reads and a bool indicating success
/// returns:
/// - a Vec of ExecIntervals
/// - a Vec of HashSets marking memory reads during these intervals
/// - a HashMap of ReducedFreeRTOSSystemStates by hash
/// - a bool indicating success
pub(crate) fn states2intervals(
trace: Vec<FreeRTOSSystemState>,
meta: Vec<FreeRTOSSystemStateContext>,
) -> (
Vec<ExecInterval>,
Vec<Vec<(u32, u8)>>,
HashMap<u64, FreeRTOSSystemState>,
bool,
) {
if trace.len() == 0 {
return (Vec::new(), Vec::new(), HashMap::new(), true);
}
let mut isr_stack: VecDeque<u8> = VecDeque::from([]); // 2+ = ISR, 1 = systemcall, 0 = APP. Trace starts with an ISREnd and executes the app
let mut level_of_task: HashMap<&str, u8> = HashMap::new();
let mut ret: Vec<ExecInterval> = vec![];
let mut reads: Vec<Vec<(u32, u8)>> = vec![];
let mut edges: Vec<(u32, u32)> = vec![];
let mut last_hash: u64 = compute_hash(&trace[0]);
let mut table: HashMap<u64, FreeRTOSSystemState> = HashMap::new();
table.insert(last_hash, trace[0].clone());
for i in 0..trace.len() - 1 {
let curr_name = trace[i].current_task().task_name().as_str();
// let mut interval_name = curr_name; // Name of the interval, either the task name or the isr/api funtion name
let level = match meta[i].capture_point.0 {
CaptureEvent::APIEnd => {
// API end always exits towards the app
if !level_of_task.contains_key(curr_name) {
level_of_task.insert(curr_name, 0);
}
*level_of_task.get_mut(curr_name).unwrap() = 0;
0
}
CaptureEvent::APIStart => {
// API start can only be called in the app
if !level_of_task.contains_key(curr_name) {
// Should not happen, apps start from an ISR End. Some input exibited this behavior for unknown reasons
level_of_task.insert(curr_name, 0);
}
*level_of_task.get_mut(curr_name).unwrap() = 1;
// interval_name = &meta[i].2;
1
}
CaptureEvent::ISREnd => {
// special case where the next block is an app start
if !level_of_task.contains_key(curr_name) {
level_of_task.insert(curr_name, 0);
}
// nested isr, TODO: Test level > 2
if isr_stack.len() > 1 {
// interval_name = ""; // We can't know which isr is running
isr_stack.pop_back().unwrap();
*isr_stack.back().unwrap()
} else {
isr_stack.pop_back();
// possibly go back to an api call that is still running for this task
if level_of_task.get(curr_name).unwrap() == &1 {
// interval_name = ""; // We can't know which api is running
}
*level_of_task.get(curr_name).unwrap()
}
}
CaptureEvent::ISRStart => {
// special case for isrs which do not capture their end
// if meta[i].2 == "ISR_0_Handler" {
// &2
// } else {
// regular case
// interval_name = &meta[i].2;
if isr_stack.len() > 0 {
let l = *isr_stack.back().unwrap();
isr_stack.push_back(l + 1);
l + 1
} else {
isr_stack.push_back(2);
2
}
// }
}
_ => 100,
};
// if trace[i].2 == CaptureEvent::End {break;}
let next_hash = compute_hash(&trace[i + 1]);
if !table.contains_key(&next_hash) {
table.insert(next_hash, trace[i + 1].clone());
}
ret.push(ExecInterval {
start_tick: meta[i].qemu_tick,
end_tick: meta[i + 1].qemu_tick,
start_state: last_hash,
end_state: next_hash,
start_capture: meta[i].capture_point.clone(),
end_capture: meta[i + 1].capture_point.clone(),
level: level,
abb: None,
});
reads.push(meta[i + 1].mem_reads.clone());
last_hash = next_hash;
edges.push((meta[i].edge.1, meta[i + 1].edge.0));
}
let t = add_abb_info(&mut ret, &table, &edges);
(ret, reads, table, t)
}
/// Marks which abbs were executed at each interval
pub(crate) fn add_abb_info(
trace: &mut Vec<ExecInterval>,
state_table: &HashMap<u64, FreeRTOSSystemState>,
edges: &Vec<(u32, u32)>,
) -> bool {
let mut id_count = 0;
let mut ret = true;
let mut task_has_started: HashSet<&String> = HashSet::new();
let mut wip_abb_trace: Vec<Rc<RefCell<AtomicBasicBlock>>> = vec![];
// let mut open_abb_at_this_task_or_level : HashMap<(u8,&str),usize> = HashMap::new();
let mut open_abb_at_this_ret_addr_and_task: HashMap<(u32, &str), usize> = HashMap::new();
for i in 0..trace.len() {
let curr_name = state_table[&trace[i].start_state].current_task().task_name();
// let last : Option<&usize> = last_abb_start_of_task.get(&curr_name);
// let open_abb = open_abb_at_this_task_or_level.get(&(trace[i].level, if trace[i].level<2 {&curr_name} else {""})).to_owned(); // apps/apis are differentiated by task name, isrs by nested level
let open_abb = open_abb_at_this_ret_addr_and_task
.get(&(edges[i].0, if trace[i].level < 2 { &curr_name } else { "" }))
.to_owned(); // apps/apis are differentiated by task name, isrs by nested level
// println!("Edge {:x}-{:x}", edges[i].0.unwrap_or(0xffff), edges[i].1.unwrap_or(0xffff));
match trace[i].start_capture.0 {
// generic api abb start
CaptureEvent::APIStart => {
// assert_eq!(open_abb, None);
ret &= open_abb.is_none();
open_abb_at_this_ret_addr_and_task.insert(
(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }),
i,
);
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock {
start: edges[i].0,
ends: HashSet::new(),
level: if trace[i].level < 2 {
trace[i].level
} else {
2
},
instance_id: id_count,
instance_name: Some(trace[i].start_capture.1.clone()),
})));
id_count += 1;
}
// generic isr abb start
CaptureEvent::ISRStart => {
// assert_eq!(open_abb, None);
ret &= open_abb.is_none();
open_abb_at_this_ret_addr_and_task.insert(
(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }),
i,
);
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock {
start: edges[i].0,
ends: HashSet::new(),
level: if trace[i].level < 2 {
trace[i].level
} else {
2
},
instance_id: id_count,
instance_name: Some(trace[i].start_capture.1.clone()),
})));
id_count += 1;
}
// generic app abb start
CaptureEvent::APIEnd => {
// assert_eq!(open_abb, None);
ret &= open_abb.is_none();
open_abb_at_this_ret_addr_and_task.insert(
(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }),
i,
);
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock {
start: edges[i].0,
ends: HashSet::new(),
level: if trace[i].level < 2 {
trace[i].level
} else {
2
},
instance_id: id_count,
instance_name: if trace[i].level < 2 {
Some(curr_name.clone().clone())
} else {
None
},
})));
id_count += 1;
}
// generic continued blocks
CaptureEvent::ISREnd => {
// special case app abb start
if trace[i].start_capture.1 == "xPortPendSVHandler"
&& !task_has_started.contains(&curr_name)
{
// assert_eq!(open_abb, None);
ret &= open_abb.is_none();
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock {
start: 0,
ends: HashSet::new(),
level: if trace[i].level < 2 {
trace[i].level
} else {
2
},
instance_id: id_count,
instance_name: Some(curr_name.clone().clone()),
})));
id_count += 1;
open_abb_at_this_ret_addr_and_task.insert(
(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }),
i,
);
task_has_started.insert(&curr_name);
} else {
if let Some(last) = open_abb_at_this_ret_addr_and_task
.get(&(edges[i].0, if trace[i].level < 2 { &curr_name } else { "" }))
{
let last = last.clone(); // required to drop immutable reference
wip_abb_trace.push(wip_abb_trace[last].clone());
// if the abb is interrupted again, it will need to continue at edge[i].1
open_abb_at_this_ret_addr_and_task.remove(&(
edges[i].0,
if trace[i].level < 2 { &curr_name } else { "" },
));
open_abb_at_this_ret_addr_and_task.insert(
(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }),
last,
); // order matters!
} else {
// panic!();
// println!("Continued block with no start {} {} {:?} {:?} {:x}-{:x} {} {}", curr_name, trace[i].start_tick, trace[i].start_capture, trace[i].end_capture, edges[i].0, edges[i].1, task_has_started.contains(curr_name),trace[i].level);
// println!("{:x?}", open_abb_at_this_ret_addr_and_task);
ret = false;
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock {
start: edges[i].1,
ends: HashSet::new(),
level: if trace[i].level < 2 {
trace[i].level
} else {
2
},
instance_id: id_count,
instance_name: if trace[i].level < 1 {
Some(curr_name.clone().clone())
} else {
None
},
})));
id_count += 1;
}
}
}
_ => panic!("Undefined block start"),
}
match trace[i].end_capture.0 {
// generic app abb end
CaptureEvent::APIStart => {
let _t = &wip_abb_trace[i];
RefCell::borrow_mut(&*wip_abb_trace[i])
.ends
.insert(edges[i].1);
open_abb_at_this_ret_addr_and_task
.remove(&(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }));
}
// generic api abb end
CaptureEvent::APIEnd => {
RefCell::borrow_mut(&*wip_abb_trace[i])
.ends
.insert(edges[i].1);
open_abb_at_this_ret_addr_and_task
.remove(&(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }));
}
// generic isr abb end
CaptureEvent::ISREnd => {
RefCell::borrow_mut(&*wip_abb_trace[i])
.ends
.insert(edges[i].1);
open_abb_at_this_ret_addr_and_task
.remove(&(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }));
}
// end anything
CaptureEvent::End => {
RefCell::borrow_mut(&*wip_abb_trace[i])
.ends
.insert(edges[i].1);
open_abb_at_this_ret_addr_and_task
.remove(&(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }));
}
CaptureEvent::ISRStart => (),
_ => panic!("Undefined block end"),
}
// println!("{} {} {:x}-{:x} {:x}-{:x} {:?} {:?} {}",curr_name, trace[i].level, edges[i].0, edges[i].1, ((*wip_abb_trace[i])).borrow().start, ((*wip_abb_trace[i])).borrow().ends.iter().next().unwrap_or(&0xffff), trace[i].start_capture, trace[i].end_capture, trace[i].start_tick);
// println!("{:x?}", open_abb_at_this_ret_addr_and_task);
}
// drop(open_abb_at_this_task_or_level);
for i in 0..trace.len() {
trace[i].abb = Some((*wip_abb_trace[i]).borrow().clone());
}
return ret;
}
//============================================= Task release times
// Find all task release times.
pub(crate) fn get_releases(
trace: &Vec<ExecInterval>,
states: &HashMap<u64, FreeRTOSSystemState>,
) -> Vec<(u64, String)> {
let mut ret = Vec::new();
let mut initial_released = false;
for (_n, i) in trace.iter().enumerate() {
// The first release starts from xPortPendSVHandler
if !initial_released
&& i.start_capture.0 == CaptureEvent::ISREnd
&& i.start_capture.1 == "xPortPendSVHandler"
{
let start_state = states.get(&i.start_state).expect("State not found");
initial_released = true;
start_state.get_ready_lists().iter().for_each(|x| {
ret.push((i.start_tick, x.task_name().clone()));
});
continue;
}
// A timed release is SysTickHandler isr block that moves a task from the delay list to the ready list.
if i.start_capture.0 == CaptureEvent::ISRStart
&& (i.start_capture.1 == "xPortSysTickHandler"
|| USR_ISR_SYMBOLS.contains(&i.start_capture.1.as_str()))
{
// detect race-conditions, get start and end state from the nearest valid intervals
if states
.get(&i.start_state)
.map(|x| x.read_invalid)
.unwrap_or(true)
{
let mut start_index = None;
for n in 1.._n {
if let Some(interval_start) = trace.get(_n - n) {
let start_state = states.get(&interval_start.start_state).unwrap();
if !start_state.read_invalid {
start_index = Some(_n - n);
break;
}
} else {
break;
}
}
let mut end_index = None;
for n in (_n + 1)..trace.len() {
if let Some(interval_end) = trace.get(n) {
let end_state = states.get(&interval_end.end_state).unwrap();
if !end_state.read_invalid {
end_index = Some(n);
break;
}
} else {
break;
}
}
if let Some(Some(start_state)) =
start_index.map(|x| states.get(&trace[x].start_state))
{
if let Some(Some(end_state)) =
end_index.map(|x| states.get(&trace[x].end_state))
{
end_state.ready_list_after.iter().for_each(|x| {
if x.task_name != end_state.current_task.task_name
&& x.task_name != start_state.current_task.task_name
&& !start_state
.ready_list_after
.iter()
.any(|y| x.task_name == y.task_name)
{
ret.push((i.end_tick, x.task_name.clone()));
}
});
}
}
} else
// canonical case, userspace -> isr -> userspace
if i.end_capture.0 == CaptureEvent::ISREnd {
let start_state = states.get(&i.start_state).expect("State not found");
let end_state = states.get(&i.end_state).expect("State not found");
end_state.ready_list_after.iter().for_each(|x| {
if x.task_name != end_state.current_task.task_name
&& x.task_name != start_state.current_task.task_name
&& !start_state
.ready_list_after
.iter()
.any(|y| x.task_name == y.task_name)
{
ret.push((i.end_tick, x.task_name.clone()));
}
});
// start_state.delay_list_after.iter().for_each(|x| {
// if !end_state.delay_list_after.iter().any(|y| x.task_name == y.task_name) {
// ret.push((i.end_tick, x.task_name.clone()));
// }
// });
} else if i.end_capture.0 == CaptureEvent::ISRStart {
// Nested interrupts. Fast-forward to the end of the original interrupt, or the first valid state thereafter
// TODO: this may cause the same release to be registered multiple times
let mut isr_has_ended = false;
let start_state = states.get(&i.start_state).expect("State not found");
for n in (_n + 1)..trace.len() {
if let Some(interval_end) = trace.get(n) {
if interval_end.end_capture.1 == i.start_capture.1 || isr_has_ended {
let end_state = states.get(&interval_end.end_state).unwrap();
isr_has_ended = true;
if !end_state.read_invalid {
end_state.ready_list_after.iter().for_each(|x| {
if x.task_name != end_state.current_task.task_name
&& x.task_name != start_state.current_task.task_name
&& !start_state
.ready_list_after
.iter()
.any(|y| x.task_name == y.task_name)
{
ret.push((i.end_tick, x.task_name.clone()));
}
});
break;
}
}
} else {
break;
}
}
// if let Some(interval_end) = trace.get(_n+2) {
// if interval_end.start_capture.0 == CaptureEvent::ISREnd && interval_end.end_capture.0 == CaptureEvent::ISREnd && interval_end.end_capture.1 == i.start_capture.1 {
// let start_state = states.get(&i.start_state).expect("State not found");
// let end_state = states.get(&interval_end.end_state).expect("State not found");
// end_state.ready_list_after.iter().for_each(|x| {
// if x.task_name != end_state.current_task.task_name && x.task_name != start_state.current_task.task_name && !start_state.ready_list_after.iter().any(|y| x.task_name == y.task_name) {
// ret.push((i.end_tick, x.task_name.clone()));
// }
// });
// }
// }
}
}
// Release driven by an API call. This produces a lot of false positives, as a job may block multiple times per instance. Despite this, aperiodic jobs not be modeled otherwise. If we assume the first release is the real one, we can filter out the rest.
if i.start_capture.0 == CaptureEvent::APIStart {
let api_start_state = states.get(&i.start_state).expect("State not found");
let api_end_state = {
let mut end_index = _n;
for n in (_n)..trace.len() {
if trace[n].end_capture.0 == CaptureEvent::APIEnd
|| trace[n].end_capture.0 == CaptureEvent::End
{
end_index = n;
break;
} else if n > _n && trace[n].level == 0 {
// API Start -> ISR Start+End -> APP Continue
end_index = n - 1; // any return to a regular app block is a fair point of comparison for the ready list, because scheduling has been performed
break;
}
}
states
.get(&trace[end_index].end_state)
.expect("State not found")
};
api_end_state.ready_list_after.iter().for_each(|x| {
if x.task_name != api_start_state.current_task.task_name
&& !api_start_state
.ready_list_after
.iter()
.any(|y| x.task_name == y.task_name)
{
ret.push((i.end_tick, x.task_name.clone()));
// eprintln!("Task {} released by API call at {:.1}ms", x.task_name, crate::time::clock::tick_to_time(i.end_tick).as_micros() as f32/1000.0);
}
});
}
}
ret
}
pub(crate) fn get_release_response_pairs(
rel: &Vec<(u64, String)>,
resp: &Vec<(u64, String)>,
) -> (Vec<(u64, u64, String)>, bool) {
let mut maybe_error = false;
let mut ret = Vec::new();
let mut ready: HashMap<&String, u64> = HashMap::new();
let mut last_response: HashMap<&String, u64> = HashMap::new();
let mut r = rel.iter().peekable();
let mut d = resp.iter().peekable();
loop {
while let Some(peek_rel) = r.peek() {
// Fill releases as soon as possible
if !ready.contains_key(&peek_rel.1) {
ready.insert(&peek_rel.1, peek_rel.0);
r.next();
} else {
if let Some(peek_resp) = d.peek() {
if peek_resp.0 > peek_rel.0 {
// multiple releases before response
// It is unclear which release is real
// maybe_error = true;
// eprintln!("Task {} released multiple times before response ({:.1}ms and {:.1}ms)", peek_rel.1, crate::time::clock::tick_to_time(ready[&peek_rel.1]).as_micros()/1000, crate::time::clock::tick_to_time(peek_rel.0).as_micros()/1000);
// ready.insert(&peek_rel.1, peek_rel.0);
r.next();
} else {
// releases have overtaken responses, wait until the ready list clears up a bit
break;
}
} else {
// no more responses
break;
}
}
}
if let Some(next_resp) = d.next() {
if ready.contains_key(&next_resp.1) {
if ready[&next_resp.1] >= next_resp.0 {
if let Some(lr) = last_response.get(&next_resp.1) {
if u128::abs_diff(
crate::time::clock::tick_to_time(next_resp.0).as_micros(),
crate::time::clock::tick_to_time(*lr).as_micros(),
) > 500
{
// tolerate pending notifications for 500us
maybe_error = true;
// eprintln!("Task {} response at {:.1}ms before next release at {:.1}ms. Fallback to last response at {:.1}ms.", next_resp.1, crate::time::clock::tick_to_time(next_resp.0).as_micros() as f32/1000.0, crate::time::clock::tick_to_time(ready[&next_resp.1]).as_micros() as f32/1000.0, crate::time::clock::tick_to_time(*lr).as_micros() as f32/1000.0);
}
// Sometimes a task is released immediately after a response. This might not be detected.
// Assume that the release occured with the last response
ret.push((*lr, next_resp.0, next_resp.1.clone()));
last_response.insert(&next_resp.1, next_resp.0);
} else {
maybe_error = true;
// eprintln!("Task {} released after response", next_resp.1);
}
} else {
// assert!(peek_resp.0 >= ready[&peek_resp.1]);
last_response.insert(&next_resp.1, next_resp.0);
ret.push((ready[&next_resp.1], next_resp.0, next_resp.1.clone()));
ready.remove(&next_resp.1);
}
} else {
if let Some(lr) = last_response.get(&next_resp.1) {
if u128::abs_diff(
crate::time::clock::tick_to_time(next_resp.0).as_micros(),
crate::time::clock::tick_to_time(*lr).as_micros(),
) > 1000
{ // tolerate pending notifications for 1ms
// maybe_error = true;
// eprintln!("Task {} response at {:.1}ms not found in ready list. Fallback to last response at {:.1}ms.", next_resp.1, crate::time::clock::tick_to_time(next_resp.0).as_micros() as f32/1000.0, crate::time::clock::tick_to_time(*lr).as_micros() as f32/1000.0);
}
// Sometimes a task is released immediately after a response (e.g. pending notification). This might not be detected.
// Assume that the release occured with the last response
ret.push((*lr, next_resp.0, next_resp.1.clone()));
last_response.insert(&next_resp.1, next_resp.0);
} else {
maybe_error = true;
// eprintln!("Task {} response at {:.1}ms not found in ready list", next_resp.1, crate::time::clock::tick_to_time(next_resp.0).as_micros() as f32/1000.0);
}
}
} else {
// TODO: should remaining released tasks be counted as finished?
return (ret, maybe_error);
}
}
}

View File

@ -1,6 +1,6 @@
use std::{cell::RefCell, collections::VecDeque, ops::Range, rc::Rc};
use std::ops::Range;
use freertos::{FreeRTOSTraceMetadata, USR_ISR_SYMBOLS};
use freertos::FreeRTOSTraceMetadata;
use hashbrown::HashMap;
use libafl::{
@ -15,14 +15,12 @@ use libafl_qemu::{
use crate::{fuzzer::MAX_INPUT_SIZE, systemstate::{
helpers::{get_icount, in_any_range, read_rec_return_stackframe},
target_os::{freertos::FreeRTOSStruct::*, *},
AtomicBasicBlock, CaptureEvent, RTOSJob,
target_os::*,
CaptureEvent, RTOSJob,
}};
use super::{
bindings::{self, *},
compute_hash, trigger_collection, ExecInterval, FreeRTOSStruct, FreeRTOSSystemState,
FreeRTOSSystemStateContext, RawFreeRTOSSystemState, RefinedTCB, CURRENT_SYSTEMSTATE_VEC,
bindings::{self, *}, post_processing::{get_release_response_pairs, get_releases, refine_system_states, states2intervals}, trigger_collection, CURRENT_SYSTEMSTATE_VEC
};
//============================= Qemu Helper
@ -141,8 +139,8 @@ where
ET: EmulatorModuleTuple<S>,
{
unsafe {
CURRENT_SYSTEMSTATE_VEC.clear();
JOBS_DONE.clear();
(&raw mut CURRENT_SYSTEMSTATE_VEC).as_mut().unwrap().clear();
(&raw mut JOBS_DONE).as_mut().unwrap().clear();
}
if state.has_metadata::<FreeRTOSTraceMetadata>() {
state.remove_metadata::<FreeRTOSTraceMetadata>();
@ -161,39 +159,38 @@ where
ET: EmulatorModuleTuple<S>,
{
let mut need_to_debug = false;
if unsafe { CURRENT_SYSTEMSTATE_VEC.len() } == 0 {
let current_systemstate_vec = unsafe { (&raw mut CURRENT_SYSTEMSTATE_VEC).as_mut().unwrap() };
if { current_systemstate_vec.len() } == 0 {
eprintln!("No system states captured, aborting");
return;
}
// Collect the final system state
trigger_collection(&emulator_modules.qemu(), (0, 0), CaptureEvent::End, self);
unsafe {
let c = emulator_modules.qemu().cpu_from_index(0);
let pc = c.read_reg::<i32>(15).unwrap();
CURRENT_SYSTEMSTATE_VEC[CURRENT_SYSTEMSTATE_VEC.len() - 1].edge = (pc, 0);
CURRENT_SYSTEMSTATE_VEC[CURRENT_SYSTEMSTATE_VEC.len() - 1].capture_point =
(CaptureEvent::End, "Breakpoint".to_string());
}
let c = emulator_modules.qemu().cpu_from_index(0);
let pc = c.read_reg::<i32>(15).unwrap();
let last = current_systemstate_vec.last_mut().unwrap();
last.edge = (pc, 0);
last.capture_point =(CaptureEvent::End, "Breakpoint".to_string());
// Find the first ISREnd of vPortSVCHandler (start of the first task) and drop anything before
unsafe {
let mut index = 0;
while index < CURRENT_SYSTEMSTATE_VEC.len() {
if CaptureEvent::ISREnd == CURRENT_SYSTEMSTATE_VEC[index].capture_point.0
while index < current_systemstate_vec.len() {
if CaptureEvent::ISREnd == current_systemstate_vec[index].capture_point.0
&& CURRENT_SYSTEMSTATE_VEC[index].capture_point.1 == "xPortPendSVHandler"
{
break;
}
index += 1;
}
drop(CURRENT_SYSTEMSTATE_VEC.drain(..index));
if CURRENT_SYSTEMSTATE_VEC.len() == 1 {
drop(current_systemstate_vec.drain(..index));
if current_systemstate_vec.len() == 1 {
eprintln!("No system states captured, aborting");
return;
}
}
// Start refining the state trace
let (refined_states, metadata) =
refine_system_states(unsafe { CURRENT_SYSTEMSTATE_VEC.split_off(0) });
refine_system_states(current_systemstate_vec.split_off(0));
let (intervals, mem_reads, dumped_states, success) =
states2intervals(refined_states.clone(), metadata);
need_to_debug |= !success;
@ -202,7 +199,7 @@ where
#[cfg(feature = "trace_job_response_times")]
let jobs = {
let releases = get_releases(&intervals, &dumped_states);
let responses = unsafe { JOBS_DONE.split_off(0) };
let responses = unsafe { std::mem::take((&raw mut JOBS_DONE).as_mut().unwrap()) };
let (job_spans, do_report) = get_release_response_pairs(&releases, &responses);
need_to_debug |= do_report;
@ -309,7 +306,7 @@ pub fn job_done_hook<QT, S>(
.filter(|x| *x != '\0')
.collect::<String>();
unsafe {
JOBS_DONE.push((get_icount(&emulator), name));
(&raw mut JOBS_DONE).as_mut().unwrap().push((get_icount(&emulator), name));
}
}
@ -446,7 +443,7 @@ where
}
static mut INPUT_MEM: Range<GuestAddr> = 0..0;
pub static mut MEM_READ: Option<Vec<(GuestAddr, u8)>> = None;
pub static mut MEM_READ: Vec<(GuestAddr, u8)> = vec![];
#[allow(unused)]
pub fn trace_reads<QT, S>(
@ -454,714 +451,24 @@ pub fn trace_reads<QT, S>(
_state: Option<&mut S>,
_id: u64,
addr: GuestAddr,
_size: usize,
size: usize,
) where
S: UsesInput,
QT: EmulatorModuleTuple<S>,
{
if unsafe { INPUT_MEM.contains(&addr) } {
let emulator = hooks.qemu();
let mut buf: [u8; 1] = [0];
unsafe {
emulator.read_mem(addr, &mut buf);
}
if unsafe { MEM_READ.is_none() } {
unsafe { MEM_READ = Some(Vec::from([(addr, buf[0])])) };
} else {
unsafe { MEM_READ.as_mut().unwrap().push((addr, buf[0])) };
}
// println!("exec_read {:x} {}", addr, size);
if size == 0 {
return;
}
}
//============================= Parsing helpers
/// Parse a List_t containing TCB_t into Vec<TCB_t> from cache. Consumes the elements from cache
fn tcb_list_to_vec_cached(list: List_t, dump: &mut HashMap<u32, FreeRTOSStruct>) -> Vec<TCB_t> {
let mut ret: Vec<TCB_t> = Vec::new();
if list.uxNumberOfItems == 0 {
return ret;
}
let last_list_item = match dump
.remove(&list.pxIndex)
.expect("List_t entry was not in Hashmap")
{
List_Item_struct(li) => li,
List_MiniItem_struct(mli) => match dump
.remove(&mli.pxNext)
.expect("MiniListItem pointer invaild")
{
List_Item_struct(li) => li,
_ => panic!("MiniListItem of a non empty List does not point to ListItem"),
},
_ => panic!("List_t entry was not a ListItem"),
};
let mut next_index = last_list_item.pxNext;
let last_tcb = match dump
.remove(&last_list_item.pvOwner)
.expect("ListItem Owner not in Hashmap")
{
TCB_struct(t) => t,
_ => panic!("List content does not equal type"),
};
for _ in 0..list.uxNumberOfItems - 1 {
let next_list_item = match dump
.remove(&next_index)
.expect("List_t entry was not in Hashmap")
{
List_Item_struct(li) => li,
List_MiniItem_struct(mli) => match dump
.remove(&mli.pxNext)
.expect("MiniListItem pointer invaild")
{
List_Item_struct(li) => li,
_ => panic!("MiniListItem of a non empty List does not point to ListItem"),
},
_ => panic!("List_t entry was not a ListItem"),
};
match dump
.remove(&next_list_item.pvOwner)
.expect("ListItem Owner not in Hashmap")
{
TCB_struct(t) => ret.push(t),
_ => panic!("List content does not equal type"),
}
next_index = next_list_item.pxNext;
}
ret.push(last_tcb);
ret
}
//============================= State refinement
/// Drains a List of raw SystemStates to produce a refined trace
/// returns:
/// - a Vec of FreeRTOSSystemState
/// - a Vec of FreeRTOSSystemStateContext (qemu_tick, (capture_event, capture_name), edge, mem_reads)
fn refine_system_states(
mut input: Vec<RawFreeRTOSSystemState>,
) -> (Vec<FreeRTOSSystemState>, Vec<FreeRTOSSystemStateContext>) {
let mut ret = (Vec::<_>::new(), Vec::<_>::new());
for mut i in input.drain(..) {
let cur = RefinedTCB::from_tcb_owned(i.current_tcb);
// println!("Refine: {} {:?} {:?} {:x}-{:x}", cur.task_name, i.capture_point.0, i.capture_point.1.to_string(), i.edge.0, i.edge.1);
// collect ready list
let mut collector = Vec::<RefinedTCB>::new();
for j in i.prio_ready_lists.into_iter().rev() {
let mut tmp = tcb_list_to_vec_cached(j, &mut i.dumping_ground)
.iter()
.map(|x| RefinedTCB::from_tcb(x))
.collect();
collector.append(&mut tmp);
}
// collect delay list
let mut delay_list: Vec<RefinedTCB> =
tcb_list_to_vec_cached(i.delay_list, &mut i.dumping_ground)
.iter()
.map(|x| RefinedTCB::from_tcb(x))
.collect();
let mut delay_list_overflow: Vec<RefinedTCB> =
tcb_list_to_vec_cached(i.delay_list_overflow, &mut i.dumping_ground)
.iter()
.map(|x| RefinedTCB::from_tcb(x))
.collect();
delay_list.append(&mut delay_list_overflow);
delay_list.sort_by(|a, b| a.task_name.cmp(&b.task_name));
ret.0.push(FreeRTOSSystemState {
current_task: cur,
ready_list_after: collector,
delay_list_after: delay_list,
read_invalid: i.read_invalid,
// input_counter: i.input_counter,//+IRQ_INPUT_BYTES_NUMBER,
});
ret.1.push(FreeRTOSSystemStateContext {
qemu_tick: i.qemu_tick,
capture_point: (i.capture_point.0, i.capture_point.1.to_string()),
edge: i.edge,
mem_reads: i.mem_reads,
});
}
return ret;
}
/// Transform the states and metadata into a list of ExecIntervals, along with a HashMap of states, a list of HashSets marking memory reads and a bool indicating success
/// returns:
/// - a Vec of ExecIntervals
/// - a Vec of HashSets marking memory reads during these intervals
/// - a HashMap of ReducedFreeRTOSSystemStates by hash
/// - a bool indicating success
fn states2intervals(
trace: Vec<FreeRTOSSystemState>,
meta: Vec<FreeRTOSSystemStateContext>,
) -> (
Vec<ExecInterval>,
Vec<Vec<(u32, u8)>>,
HashMap<u64, FreeRTOSSystemState>,
bool,
) {
if trace.len() == 0 {
return (Vec::new(), Vec::new(), HashMap::new(), true);
}
let mut isr_stack: VecDeque<u8> = VecDeque::from([]); // 2+ = ISR, 1 = systemcall, 0 = APP. Trace starts with an ISREnd and executes the app
let mut level_of_task: HashMap<&str, u8> = HashMap::new();
let mut ret: Vec<ExecInterval> = vec![];
let mut reads: Vec<Vec<(u32, u8)>> = vec![];
let mut edges: Vec<(u32, u32)> = vec![];
let mut last_hash: u64 = compute_hash(&trace[0]);
let mut table: HashMap<u64, FreeRTOSSystemState> = HashMap::new();
table.insert(last_hash, trace[0].clone());
for i in 0..trace.len() - 1 {
let curr_name = trace[i].current_task().task_name().as_str();
// let mut interval_name = curr_name; // Name of the interval, either the task name or the isr/api funtion name
let level = match meta[i].capture_point.0 {
CaptureEvent::APIEnd => {
// API end always exits towards the app
if !level_of_task.contains_key(curr_name) {
level_of_task.insert(curr_name, 0);
}
*level_of_task.get_mut(curr_name).unwrap() = 0;
0
let input_mem = unsafe { (&raw const INPUT_MEM).as_ref().unwrap() };
let mut buf = vec![0u8; size];
let emulator = hooks.qemu();
unsafe {
emulator.read_mem(addr, &mut buf);
for (i, &byte) in buf.iter().enumerate() {
let curr_addr = addr.wrapping_add(i as GuestAddr);
if input_mem.contains(&curr_addr) {
(&raw mut MEM_READ).as_mut().unwrap().push((curr_addr, byte));
}
CaptureEvent::APIStart => {
// API start can only be called in the app
if !level_of_task.contains_key(curr_name) {
// Should not happen, apps start from an ISR End. Some input exibited this behavior for unknown reasons
level_of_task.insert(curr_name, 0);
}
*level_of_task.get_mut(curr_name).unwrap() = 1;
// interval_name = &meta[i].2;
1
}
CaptureEvent::ISREnd => {
// special case where the next block is an app start
if !level_of_task.contains_key(curr_name) {
level_of_task.insert(curr_name, 0);
}
// nested isr, TODO: Test level > 2
if isr_stack.len() > 1 {
// interval_name = ""; // We can't know which isr is running
isr_stack.pop_back().unwrap();
*isr_stack.back().unwrap()
} else {
isr_stack.pop_back();
// possibly go back to an api call that is still running for this task
if level_of_task.get(curr_name).unwrap() == &1 {
// interval_name = ""; // We can't know which api is running
}
*level_of_task.get(curr_name).unwrap()
}
}
CaptureEvent::ISRStart => {
// special case for isrs which do not capture their end
// if meta[i].2 == "ISR_0_Handler" {
// &2
// } else {
// regular case
// interval_name = &meta[i].2;
if isr_stack.len() > 0 {
let l = *isr_stack.back().unwrap();
isr_stack.push_back(l + 1);
l + 1
} else {
isr_stack.push_back(2);
2
}
// }
}
_ => 100,
};
// if trace[i].2 == CaptureEvent::End {break;}
let next_hash = compute_hash(&trace[i + 1]);
if !table.contains_key(&next_hash) {
table.insert(next_hash, trace[i + 1].clone());
}
ret.push(ExecInterval {
start_tick: meta[i].qemu_tick,
end_tick: meta[i + 1].qemu_tick,
start_state: last_hash,
end_state: next_hash,
start_capture: meta[i].capture_point.clone(),
end_capture: meta[i + 1].capture_point.clone(),
level: level,
abb: None,
});
reads.push(meta[i + 1].mem_reads.clone());
last_hash = next_hash;
edges.push((meta[i].edge.1, meta[i + 1].edge.0));
}
let t = add_abb_info(&mut ret, &table, &edges);
(ret, reads, table, t)
}
/// Marks which abbs were executed at each interval
fn add_abb_info(
trace: &mut Vec<ExecInterval>,
table: &HashMap<u64, FreeRTOSSystemState>,
edges: &Vec<(u32, u32)>,
) -> bool {
let mut id_count = 0;
let mut ret = true;
let mut task_has_started: HashSet<String> = HashSet::new();
let mut wip_abb_trace: Vec<Rc<RefCell<AtomicBasicBlock>>> = vec![];
// let mut open_abb_at_this_task_or_level : HashMap<(u8,&str),usize> = HashMap::new();
let mut open_abb_at_this_ret_addr_and_task: HashMap<(u32, &str), usize> = HashMap::new();
for i in 0..trace.len() {
let curr_name = &table[&trace[i].start_state].current_task().task_name();
// let last : Option<&usize> = last_abb_start_of_task.get(&curr_name);
// let open_abb = open_abb_at_this_task_or_level.get(&(trace[i].level, if trace[i].level<2 {&curr_name} else {""})).to_owned(); // apps/apis are differentiated by task name, isrs by nested level
let open_abb = open_abb_at_this_ret_addr_and_task
.get(&(edges[i].0, if trace[i].level < 2 { &curr_name } else { "" }))
.to_owned(); // apps/apis are differentiated by task name, isrs by nested level
// println!("Edge {:x}-{:x}", edges[i].0.unwrap_or(0xffff), edges[i].1.unwrap_or(0xffff));
match trace[i].start_capture.0 {
// generic api abb start
CaptureEvent::APIStart => {
// assert_eq!(open_abb, None);
ret &= open_abb.is_none();
open_abb_at_this_ret_addr_and_task.insert(
(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }),
i,
);
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock {
start: edges[i].0,
ends: HashSet::new(),
level: if trace[i].level < 2 {
trace[i].level
} else {
2
},
instance_id: id_count,
instance_name: Some(trace[i].start_capture.1.clone()),
})));
id_count += 1;
}
// generic isr abb start
CaptureEvent::ISRStart => {
// assert_eq!(open_abb, None);
ret &= open_abb.is_none();
open_abb_at_this_ret_addr_and_task.insert(
(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }),
i,
);
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock {
start: edges[i].0,
ends: HashSet::new(),
level: if trace[i].level < 2 {
trace[i].level
} else {
2
},
instance_id: id_count,
instance_name: Some(trace[i].start_capture.1.clone()),
})));
id_count += 1;
}
// generic app abb start
CaptureEvent::APIEnd => {
// assert_eq!(open_abb, None);
ret &= open_abb.is_none();
open_abb_at_this_ret_addr_and_task.insert(
(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }),
i,
);
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock {
start: edges[i].0,
ends: HashSet::new(),
level: if trace[i].level < 2 {
trace[i].level
} else {
2
},
instance_id: id_count,
instance_name: if trace[i].level < 2 {
Some(curr_name.clone().clone())
} else {
None
},
})));
id_count += 1;
}
// generic continued blocks
CaptureEvent::ISREnd => {
// special case app abb start
if trace[i].start_capture.1 == "xPortPendSVHandler"
&& !task_has_started.contains(curr_name.clone())
{
// assert_eq!(open_abb, None);
ret &= open_abb.is_none();
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock {
start: 0,
ends: HashSet::new(),
level: if trace[i].level < 2 {
trace[i].level
} else {
2
},
instance_id: id_count,
instance_name: Some(curr_name.clone().clone()),
})));
id_count += 1;
open_abb_at_this_ret_addr_and_task.insert(
(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }),
i,
);
task_has_started.insert(curr_name.clone().clone());
} else {
if let Some(last) = open_abb_at_this_ret_addr_and_task
.get(&(edges[i].0, if trace[i].level < 2 { &curr_name } else { "" }))
{
let last = last.clone(); // required to drop immutable reference
wip_abb_trace.push(wip_abb_trace[last].clone());
// if the abb is interrupted again, it will need to continue at edge[i].1
open_abb_at_this_ret_addr_and_task.remove(&(
edges[i].0,
if trace[i].level < 2 { &curr_name } else { "" },
));
open_abb_at_this_ret_addr_and_task.insert(
(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }),
last,
); // order matters!
} else {
// panic!();
// println!("Continued block with no start {} {} {:?} {:?} {:x}-{:x} {} {}", curr_name, trace[i].start_tick, trace[i].start_capture, trace[i].end_capture, edges[i].0, edges[i].1, task_has_started.contains(curr_name),trace[i].level);
// println!("{:x?}", open_abb_at_this_ret_addr_and_task);
ret = false;
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock {
start: edges[i].1,
ends: HashSet::new(),
level: if trace[i].level < 2 {
trace[i].level
} else {
2
},
instance_id: id_count,
instance_name: if trace[i].level < 1 {
Some(curr_name.clone().clone())
} else {
None
},
})));
id_count += 1;
}
}
}
_ => panic!("Undefined block start"),
}
match trace[i].end_capture.0 {
// generic app abb end
CaptureEvent::APIStart => {
let _t = &wip_abb_trace[i];
RefCell::borrow_mut(&*wip_abb_trace[i])
.ends
.insert(edges[i].1);
open_abb_at_this_ret_addr_and_task
.remove(&(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }));
}
// generic api abb end
CaptureEvent::APIEnd => {
RefCell::borrow_mut(&*wip_abb_trace[i])
.ends
.insert(edges[i].1);
open_abb_at_this_ret_addr_and_task
.remove(&(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }));
}
// generic isr abb end
CaptureEvent::ISREnd => {
RefCell::borrow_mut(&*wip_abb_trace[i])
.ends
.insert(edges[i].1);
open_abb_at_this_ret_addr_and_task
.remove(&(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }));
}
// end anything
CaptureEvent::End => {
RefCell::borrow_mut(&*wip_abb_trace[i])
.ends
.insert(edges[i].1);
open_abb_at_this_ret_addr_and_task
.remove(&(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }));
}
CaptureEvent::ISRStart => (),
_ => panic!("Undefined block end"),
}
// println!("{} {} {:x}-{:x} {:x}-{:x} {:?} {:?} {}",curr_name, trace[i].level, edges[i].0, edges[i].1, ((*wip_abb_trace[i])).borrow().start, ((*wip_abb_trace[i])).borrow().ends.iter().next().unwrap_or(&0xffff), trace[i].start_capture, trace[i].end_capture, trace[i].start_tick);
// println!("{:x?}", open_abb_at_this_ret_addr_and_task);
}
// drop(open_abb_at_this_task_or_level);
for i in 0..trace.len() {
trace[i].abb = Some((*wip_abb_trace[i]).borrow().clone());
}
return ret;
}
//============================================= Task release times
// Find all task release times.
fn get_releases(
trace: &Vec<ExecInterval>,
states: &HashMap<u64, FreeRTOSSystemState>,
) -> Vec<(u64, String)> {
let mut ret = Vec::new();
let mut initial_released = false;
for (_n, i) in trace.iter().enumerate() {
// The first release starts from xPortPendSVHandler
if !initial_released
&& i.start_capture.0 == CaptureEvent::ISREnd
&& i.start_capture.1 == "xPortPendSVHandler"
{
let start_state = states.get(&i.start_state).expect("State not found");
initial_released = true;
start_state.get_ready_lists().iter().for_each(|x| {
ret.push((i.start_tick, x.task_name().clone()));
});
continue;
}
// A timed release is SysTickHandler isr block that moves a task from the delay list to the ready list.
if i.start_capture.0 == CaptureEvent::ISRStart
&& (i.start_capture.1 == "xPortSysTickHandler"
|| USR_ISR_SYMBOLS.contains(&i.start_capture.1.as_str()))
{
// detect race-conditions, get start and end state from the nearest valid intervals
if states
.get(&i.start_state)
.map(|x| x.read_invalid)
.unwrap_or(true)
{
let mut start_index = None;
for n in 1.._n {
if let Some(interval_start) = trace.get(_n - n) {
let start_state = states.get(&interval_start.start_state).unwrap();
if !start_state.read_invalid {
start_index = Some(_n - n);
break;
}
} else {
break;
}
}
let mut end_index = None;
for n in (_n + 1)..trace.len() {
if let Some(interval_end) = trace.get(n) {
let end_state = states.get(&interval_end.end_state).unwrap();
if !end_state.read_invalid {
end_index = Some(n);
break;
}
} else {
break;
}
}
if let Some(Some(start_state)) =
start_index.map(|x| states.get(&trace[x].start_state))
{
if let Some(Some(end_state)) =
end_index.map(|x| states.get(&trace[x].end_state))
{
end_state.ready_list_after.iter().for_each(|x| {
if x.task_name != end_state.current_task.task_name
&& x.task_name != start_state.current_task.task_name
&& !start_state
.ready_list_after
.iter()
.any(|y| x.task_name == y.task_name)
{
ret.push((i.end_tick, x.task_name.clone()));
}
});
}
}
} else
// canonical case, userspace -> isr -> userspace
if i.end_capture.0 == CaptureEvent::ISREnd {
let start_state = states.get(&i.start_state).expect("State not found");
let end_state = states.get(&i.end_state).expect("State not found");
end_state.ready_list_after.iter().for_each(|x| {
if x.task_name != end_state.current_task.task_name
&& x.task_name != start_state.current_task.task_name
&& !start_state
.ready_list_after
.iter()
.any(|y| x.task_name == y.task_name)
{
ret.push((i.end_tick, x.task_name.clone()));
}
});
// start_state.delay_list_after.iter().for_each(|x| {
// if !end_state.delay_list_after.iter().any(|y| x.task_name == y.task_name) {
// ret.push((i.end_tick, x.task_name.clone()));
// }
// });
} else if i.end_capture.0 == CaptureEvent::ISRStart {
// Nested interrupts. Fast-forward to the end of the original interrupt, or the first valid state thereafter
// TODO: this may cause the same release to be registered multiple times
let mut isr_has_ended = false;
let start_state = states.get(&i.start_state).expect("State not found");
for n in (_n + 1)..trace.len() {
if let Some(interval_end) = trace.get(n) {
if interval_end.end_capture.1 == i.start_capture.1 || isr_has_ended {
let end_state = states.get(&interval_end.end_state).unwrap();
isr_has_ended = true;
if !end_state.read_invalid {
end_state.ready_list_after.iter().for_each(|x| {
if x.task_name != end_state.current_task.task_name
&& x.task_name != start_state.current_task.task_name
&& !start_state
.ready_list_after
.iter()
.any(|y| x.task_name == y.task_name)
{
ret.push((i.end_tick, x.task_name.clone()));
}
});
break;
}
}
} else {
break;
}
}
// if let Some(interval_end) = trace.get(_n+2) {
// if interval_end.start_capture.0 == CaptureEvent::ISREnd && interval_end.end_capture.0 == CaptureEvent::ISREnd && interval_end.end_capture.1 == i.start_capture.1 {
// let start_state = states.get(&i.start_state).expect("State not found");
// let end_state = states.get(&interval_end.end_state).expect("State not found");
// end_state.ready_list_after.iter().for_each(|x| {
// if x.task_name != end_state.current_task.task_name && x.task_name != start_state.current_task.task_name && !start_state.ready_list_after.iter().any(|y| x.task_name == y.task_name) {
// ret.push((i.end_tick, x.task_name.clone()));
// }
// });
// }
// }
}
}
// Release driven by an API call. This produces a lot of false positives, as a job may block multiple times per instance. Despite this, aperiodic jobs not be modeled otherwise. If we assume the first release is the real one, we can filter out the rest.
if i.start_capture.0 == CaptureEvent::APIStart {
let api_start_state = states.get(&i.start_state).expect("State not found");
let api_end_state = {
let mut end_index = _n;
for n in (_n)..trace.len() {
if trace[n].end_capture.0 == CaptureEvent::APIEnd
|| trace[n].end_capture.0 == CaptureEvent::End
{
end_index = n;
break;
} else if n > _n && trace[n].level == 0 {
// API Start -> ISR Start+End -> APP Continue
end_index = n - 1; // any return to a regular app block is a fair point of comparison for the ready list, because scheduling has been performed
break;
}
}
states
.get(&trace[end_index].end_state)
.expect("State not found")
};
api_end_state.ready_list_after.iter().for_each(|x| {
if x.task_name != api_start_state.current_task.task_name
&& !api_start_state
.ready_list_after
.iter()
.any(|y| x.task_name == y.task_name)
{
ret.push((i.end_tick, x.task_name.clone()));
// eprintln!("Task {} released by API call at {:.1}ms", x.task_name, crate::time::clock::tick_to_time(i.end_tick).as_micros() as f32/1000.0);
}
});
}
}
ret
}
fn get_release_response_pairs(
rel: &Vec<(u64, String)>,
resp: &Vec<(u64, String)>,
) -> (Vec<(u64, u64, String)>, bool) {
let mut maybe_error = false;
let mut ret = Vec::new();
let mut ready: HashMap<&String, u64> = HashMap::new();
let mut last_response: HashMap<&String, u64> = HashMap::new();
let mut r = rel.iter().peekable();
let mut d = resp.iter().peekable();
loop {
while let Some(peek_rel) = r.peek() {
// Fill releases as soon as possible
if !ready.contains_key(&peek_rel.1) {
ready.insert(&peek_rel.1, peek_rel.0);
r.next();
} else {
if let Some(peek_resp) = d.peek() {
if peek_resp.0 > peek_rel.0 {
// multiple releases before response
// It is unclear which release is real
// maybe_error = true;
// eprintln!("Task {} released multiple times before response ({:.1}ms and {:.1}ms)", peek_rel.1, crate::time::clock::tick_to_time(ready[&peek_rel.1]).as_micros()/1000, crate::time::clock::tick_to_time(peek_rel.0).as_micros()/1000);
// ready.insert(&peek_rel.1, peek_rel.0);
r.next();
} else {
// releases have overtaken responses, wait until the ready list clears up a bit
break;
}
} else {
// no more responses
break;
}
}
}
if let Some(next_resp) = d.next() {
if ready.contains_key(&next_resp.1) {
if ready[&next_resp.1] >= next_resp.0 {
if let Some(lr) = last_response.get(&next_resp.1) {
if u128::abs_diff(
crate::time::clock::tick_to_time(next_resp.0).as_micros(),
crate::time::clock::tick_to_time(*lr).as_micros(),
) > 500
{
// tolerate pending notifications for 500us
maybe_error = true;
// eprintln!("Task {} response at {:.1}ms before next release at {:.1}ms. Fallback to last response at {:.1}ms.", next_resp.1, crate::time::clock::tick_to_time(next_resp.0).as_micros() as f32/1000.0, crate::time::clock::tick_to_time(ready[&next_resp.1]).as_micros() as f32/1000.0, crate::time::clock::tick_to_time(*lr).as_micros() as f32/1000.0);
}
// Sometimes a task is released immediately after a response. This might not be detected.
// Assume that the release occured with the last response
ret.push((*lr, next_resp.0, next_resp.1.clone()));
last_response.insert(&next_resp.1, next_resp.0);
} else {
maybe_error = true;
// eprintln!("Task {} released after response", next_resp.1);
}
} else {
// assert!(peek_resp.0 >= ready[&peek_resp.1]);
last_response.insert(&next_resp.1, next_resp.0);
ret.push((ready[&next_resp.1], next_resp.0, next_resp.1.clone()));
ready.remove(&next_resp.1);
}
} else {
if let Some(lr) = last_response.get(&next_resp.1) {
if u128::abs_diff(
crate::time::clock::tick_to_time(next_resp.0).as_micros(),
crate::time::clock::tick_to_time(*lr).as_micros(),
) > 1000
{ // tolerate pending notifications for 1ms
// maybe_error = true;
// eprintln!("Task {} response at {:.1}ms not found in ready list. Fallback to last response at {:.1}ms.", next_resp.1, crate::time::clock::tick_to_time(next_resp.0).as_micros() as f32/1000.0, crate::time::clock::tick_to_time(*lr).as_micros() as f32/1000.0);
}
// Sometimes a task is released immediately after a response (e.g. pending notification). This might not be detected.
// Assume that the release occured with the last response
ret.push((*lr, next_resp.0, next_resp.1.clone()));
last_response.insert(&next_resp.1, next_resp.0);
} else {
maybe_error = true;
// eprintln!("Task {} response at {:.1}ms not found in ready list", next_resp.1, crate::time::clock::tick_to_time(next_resp.0).as_micros() as f32/1000.0);
}
}
} else {
// TODO: should remaining released tasks be counted as finished?
return (ret, maybe_error);
}
}
}
}