This commit is contained in:
Alwin Berger 2025-05-21 08:30:17 +00:00
parent 65623a630c
commit f2bbbdccdd
11 changed files with 809 additions and 1229 deletions

2
.envrc
View File

@ -1 +1 @@
use flake
use nix

58
README.md Normal file
View File

@ -0,0 +1,58 @@
# FRET
## Structure
* LibAFL-based fuzzer under `LibAFL/fuzzers/FRET`
* FreeRTOS demos under `FreeRTOS/FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC`
* QEMU instrumentation under `qemu-libafl-bridge`
## HowTo
### Development environment
`nix develop` or `nix-shell`
### Build FRET
```sh
cd LibAFL/fuzzers/FRET
# First time and after changes to QEMU
sh -c "unset CUSTOM_QEMU_NO_BUILD CUSTOM_QEMU_NO_CONFIGURE && cargo build"
# Afterwards, simply use
cargo build
```
### Build additional tools
```sh
# Trace analysis tool
cd state2gantt && cargo build && cd -
# Benchmark evaluation tool
cd LibAFL/fuzzers/FRET/benchmark/number_cruncher && cargo build && cd -
```
### Build FreeRTOS Demos
```sh
cd LibAFL/fuzzers/FRET/benchmark
sh build_all_demos.sh
```
### Example usage
* Build the demos and additional tools first
```sh
cd LibAFL/fuzzers/FRET
# Help for arguments
cargo run -- --help
# Example
export DUMP=$(mktemp -d)
dd if=/dev/random of=$DUMP/input bs=8K count=1
# fuzz for 10 seconds
cargo run -- -k benchmark/build/waters_seq_full.elf -c benchmark/target_symbols.csv -n $DUMP/output -tag fuzz -t 10 --seed 123456
# Produce a trace for the worst case found
cargo run -- -k benchmark/build/waters_seq_full.elf -c benchmark/target_symbols.csv -n $DUMP/show -tr showmap -i $DUMP/output.case
# plot the result
../../../state2gantt/driver.sh $DUMP/show.trace.ron
# view the gantt chart
open $DUMP/show_job.html
```
### Perform canned benchmarks
* Build the demos and additional tools first
* Select a benchmark set in `LibAFL/fuzzers/FRET/benchmark/Snakefile`
```sh
# $BENCHDIR
cd LibAFL/fuzzers/FRET/benchmark
# e.g.
snakemake -c 128 set48 set64 set128
# plot the resutls
sh plot_all_benchmarks.sh
sh plot_all_traces.sh
```

View File

@ -19,6 +19,7 @@
sphinx_rtd_theme
# other python packages
];
R-with-my-packages = pkgs.rWrapper.override{ packages = with pkgs.rPackages; [ ggplot2 readr dplyr plotly mosaic DBI tikzDevice colorspace heatmaply RColorBrewer RSQLite languageserver ]; };
in
with pkgs;
rec {
@ -47,13 +48,15 @@
# generate bindings from RTOS to Rust
rust-bindgen
# compare libafl edges
packages.edge_compare
#packages.edge_compare
# Debugging
ddd
# visualization
graphviz
#rstudioWrapper # prefer host packages for R
#R
R-with-my-packages
pandoc
# dependencies for mosaic
freetype
fontconfig
@ -75,6 +78,10 @@
# export CROSS_CC=arm-none-eabi-gcc
export LIBCLANG_PATH=${llvmPackages_19.libclang.lib}/lib
export BENCHDIR=bench_default
export PICO_SDK_PATH=$(pwd)/pico-sdk
export FREERTOS_KERNEL_PATH=$(pwd)/FreeRTOS-Kernel
mkdir -p $TMPDIR
'';
};

947
graph2viz/Cargo.lock generated

File diff suppressed because it is too large Load Diff

3
input_serde/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
target
*.case
*.edit

View File

@ -51,7 +51,7 @@ fn unfold_input(input : &MultipartInput<BytesInput>) -> HashMap<String,Either<Ve
res.insert(name.to_string(),Left(part.bytes().to_vec()));
} else {
// let times = unsafe{std::mem::transmute::<&[u8], &[u32]>(&part.bytes()[0..4*(part.bytes().len()/4)])}.to_vec();
println!("name {} len {}", name, part.bytes().len());
eprintln!("name {} len {}", name, part.bytes().len());
let mut times = part.bytes().chunks(4).filter(|x| x.len()==4).map(|x| u32::from_le_bytes(x.try_into().unwrap())).collect::<Vec<_>>();
times.sort_unstable();
res.insert(name.to_string(),Right(times));

947
state2gantt/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -8,7 +8,7 @@ edition = "2021"
[dependencies]
fret = { path = "../LibAFL/fuzzers/FRET" }
serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib
# hashbrown = { version = "0.12", features = ["serde", "ahash-compile-time-rng"] } # A faster hashmap, nostd compatible
hashbrown = { version = "0.14.0", features = ["serde"] } # A faster hashmap, nostd compatible
# petgraph = { version="0.6.0", features = ["serde-1"] }
ron = "0.7" # write serialized data - including hashmaps
rand = "0.5"

View File

@ -1,4 +1,5 @@
#!/bin/sh
ROOTDIR=~/code/work/FRETv3
if [ -z "$1" ]; then exit 1; fi
OFILE_A="$(dirname "$1")/$(basename -s .trace.ron "$1")_job.csv"
OFILE_B="$(dirname "$1")/$(basename -s .trace.ron "$1")_instance.csv"
@ -7,6 +8,7 @@ if [ -n "$2" ]; then
EXTRA="-t $2"
fi
rm -f "$OFILE_A" "$OFILE_B"
~/code/FRET/state2gantt/target/debug/state2gantt -i $1 -a "$OFILE_A" -r "$OFILE_B" -p "$OFILE_C" $EXTRA
# ~/code/FRET/state2gantt/plot.r "$OFILE_A" html
~/code/FRET/state2gantt/plot_response.r "$OFILE_A" "$OFILE_B" html
echo $ROOTDIR/state2gantt/target/debug/state2gantt -i $1 -a "$OFILE_A" -r "$OFILE_B" -p "$OFILE_C" $EXTRA
$ROOTDIR/state2gantt/target/debug/state2gantt -i $1 -a "$OFILE_A" -r "$OFILE_B" -p "$OFILE_C" $EXTRA
echo $ROOTDIR/state2gantt/plot_response.r "$OFILE_A" "$OFILE_B" html
$ROOTDIR/state2gantt/plot_response.r "$OFILE_A" "$OFILE_B" html

View File

@ -1,5 +1,6 @@
#!/usr/bin/env Rscript
# Load necessary libraries
#install.packages(c(ggplot2,readr,dplyr,plotly))
library(ggplot2)
library(readr)
library(dplyr)
@ -58,6 +59,8 @@ create_gantt_chart <- function(csv_file_a, csv_file_b, MIN_WIDTH, output_format
"<br>",
"State:", df$state,
"<br>",
"ABB:", df$abb,
"<br>",
"End:", df$end
)
df_b$label <- paste(
@ -84,14 +87,12 @@ create_gantt_chart <- function(csv_file_a, csv_file_b, MIN_WIDTH, output_format
aes(x = start, y = name),
color = "red", size = 1)
# Convert the ggplot object to a plotly object for interactivity
p_interactive <- ggplotly(p)
# Handle output format
if (!is.null(output_format)) {
output_file <- sub("\\.csv$", paste0(".", output_format), csv_file_a)
if (output_format == "html") {
# Convert the ggplot object to a plotly object for interactivity
p_interactive <- ggplotly(p)
htmlwidgets::saveWidget(p_interactive, output_file)
} else if (output_format == "png") {
ggsave(output_file, plot = p, device = "png")
@ -99,6 +100,8 @@ create_gantt_chart <- function(csv_file_a, csv_file_b, MIN_WIDTH, output_format
stop("Invalid output format. Use 'html' or 'png'.")
}
} else {
# Convert the ggplot object to a plotly object for interactivity
p_interactive <- ggplotly(p)
# Print the interactive Gantt chart
print(p_interactive)
}

View File

@ -1,7 +1,7 @@
use std::collections::HashMap;
use hashbrown::HashMap;
use std::path::PathBuf;
use std::{env,fs};
use fret::systemstate::{ExecInterval, JobInstance, ReducedFreeRTOSSystemState};
use fret::systemstate::{ExecInterval, RTOSJob, target_os::SystemTraceData, target_os::freertos::FreeRTOSTraceMetadata, target_os::SystemState, target_os::TaskControlBlock};
use std::io::Write;
use clap::Parser;
use itertools::Itertools;
@ -29,7 +29,7 @@ struct Config {
task: Option<String>,
/// Translate times to microseconds
#[arg(short, long, default_value = "false")]
#[arg(short, long)]
micros: bool,
}
@ -52,23 +52,29 @@ fn main() {
.append(false)
.open(x).expect("Could not create file"));
let mut level_per_task : HashMap<&String, u32> = HashMap::new();
let mut level_per_task : HashMap<String, u32> = HashMap::new();
let mut trace : (Vec<ExecInterval>, HashMap<u64, ReducedFreeRTOSSystemState>, Vec<JobInstance>, HashMap<String, HashMap<u32, (usize, usize, u64)>>) = ron::from_str(&String::from_utf8_lossy(&raw_input)).expect("Can not parse HashMap");
for s in &trace.0 {
// Store priority per task
let trace : FreeRTOSTraceMetadata = ron::from_str(&String::from_utf8_lossy(&raw_input)).expect("Can not parse HashMap");
// task_name -> (abb_addr -> (interval_count, exec_count, exec_time, woet))
let mut abb_profile : HashMap<String, HashMap<u32, (usize, usize, u64, u64)>> = trace.select_abb_profile(conf.task.clone());
for s in trace.intervals() {
if s.level == 0 {
level_per_task.insert(&trace.1[&s.start_state].current_task.task_name,trace.1[&s.start_state].current_task.priority);
let t = trace.states_map()[&s.start_state].current_task();
level_per_task.insert(t.task_name().clone(),t.base_priority);
}
}
let limits = conf.task.as_ref().map(|task| trace.2.iter().filter_map(move |x| if &x.name == task {Some(x)} else {None}).max_by_key(|x| x.response-x.release)).flatten().map(|x| x.release..x.response);
// Range of longest selected job
let limits = conf.task.as_ref().map(|task| trace.worst_jobs_per_task_by_response_time().get(task).map(|x| x.release..x.response)).flatten();
if let Some(limits) = &limits {
println!("Limits: {} - {}",limits.start,limits.end);
}
activation_file.as_mut().map(|x| writeln!(x,"start,end,prio,name,state_id,state").expect("Could not write to file"));
for s in trace.0.iter_mut() {
let mut intervals = trace.intervals().clone();
activation_file.as_mut().map(|x| writeln!(x,"start,end,prio,name,state_id,state,abb").expect("Could not write to file"));
for s in intervals.iter_mut() {
if let Some(l) = &limits {
if s.start_tick > l.end || s.end_tick < l.start {
continue;
@ -76,16 +82,17 @@ fn main() {
s.start_tick = s.start_tick.max(l.start);
s.end_tick = s.end_tick.min(l.end);
}
let start_tick = if conf.micros {s.start_tick / fret::time::clock::QEMU_ISNS_PER_USEC as u64} else {s.start_tick};
let end_tick = if conf.micros {s.end_tick / fret::time::clock::QEMU_ISNS_PER_USEC as u64} else {s.end_tick};
let state = &trace.1[&s.start_state];
let start_tick = if conf.micros {s.start_tick as f32 / fret::time::clock::QEMU_ISNS_PER_USEC} else {s.start_tick as f32};
let end_tick = if conf.micros {s.end_tick as f32 / fret::time::clock::QEMU_ISNS_PER_USEC} else {s.end_tick as f32};
let state = &trace.states_map()[&s.start_state];
if s.level == 0 {
activation_file.as_mut().map(|x| writeln!(x,"{},{},{},{},{:X},{}",start_tick,end_tick,trace.1[&s.start_state].current_task.priority,trace.1[&s.start_state].current_task.task_name, state.get_hash()>>48, state).expect("Could not write to file"));
activation_file.as_mut().map(|x| writeln!(x,"{},{},{},{},{:X},{},{}",start_tick,end_tick,trace.states_map()[&s.start_state].current_task().priority,trace.states_map()[&s.start_state].current_task().task_name, state.get_hash()>>48, state, s.abb.as_ref().map(|x| x.get_start()).unwrap_or(u32::MAX) ).expect("Could not write to file"));
} else {
activation_file.as_mut().map(|x| writeln!(x,"{},{},-{},{},{:X},{}",start_tick,end_tick,s.level,s.start_capture.1, state.get_hash()>>48, state).expect("Could not write to file"));
activation_file.as_mut().map(|x| writeln!(x,"{},{},-{},{},{:X},{},{}",start_tick,end_tick,s.level,s.start_capture.1, state.get_hash()>>48, state, s.abb.as_ref().map(|x| x.get_start()).unwrap_or(u32::MAX)).expect("Could not write to file"));
}
}
let mut jobs = trace.jobs().clone();
/* Write all job instances from release to response */
let instance_file = instance_path.map(|x| std::fs::OpenOptions::new()
.read(false)
@ -96,7 +103,7 @@ fn main() {
if let Some(mut file) = instance_file {
writeln!(file,"start,end,prio,name").expect("Could not write to file");
for s in trace.2.iter_mut() {
for s in jobs.iter_mut() {
if limits.as_ref().map(|x| !x.contains(&s.release) && !x.contains(&s.response) ).unwrap_or(false) {
continue;
}
@ -121,13 +128,13 @@ fn main() {
if let Some(mut file) = abb_file {
conf.micros = true;
if trace.3.is_empty() {
if abb_profile.is_empty() {
return;
}
writeln!(file,"name,addr,active,finish,micros").expect("Could not write to file");
for (name, rest) in trace.3.iter_mut().sorted_by_key(|x| x.0) {
rest.iter().sorted_by_key(|x| x.0).for_each(|(addr, (active, finish, time))| {
writeln!(file,"{},{},{},{},{:.1}",name,addr,active,finish,if conf.micros {*time as f64 / fret::time::clock::QEMU_ISNS_PER_USEC as f64} else {*time as f64}).expect("Could not write to file");
writeln!(file,"name,addr,active,finish,micros,woet").expect("Could not write to file");
for (name, rest) in abb_profile.iter_mut().sorted_by_key(|x| x.0) {
rest.iter().sorted_by_key(|x| x.0).for_each(|(addr, (active, finish, time, woet))| {
writeln!(file,"{},{},{},{},{},{}",name,addr,active,finish,if conf.micros {*time as f64 / fret::time::clock::QEMU_ISNS_PER_USEC as f64} else {*time as f64}, if conf.micros {*woet as f64 / fret::time::clock::QEMU_ISNS_PER_USEC as f64} else {*woet as f64}).expect("Could not write to file");
});
}
}