move tools, update README

This commit is contained in:
Alwin Berger 2025-08-12 11:43:30 +00:00
parent fbe99d22a6
commit e1179c479a
31 changed files with 160 additions and 6877 deletions

21
Docker/Containerfile Normal file
View File

@ -0,0 +1,21 @@
FROM ubuntu:rolling
# Nix + systemd
RUN apt update -y
RUN apt install curl systemd zsh -y
RUN curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install linux \
--extra-conf "sandbox = false" \
--no-start-daemon \
--no-confirm
ENV PATH="${PATH}:/nix/var/nix/profiles/default/bin"
RUN nix run nixpkgs#hello
RUN systemctl enable nix-daemon.service
CMD [ "/usr/lib/systemd/systemd" ]
# SSH
RUN apt update && apt install -y openssh-server
RUN echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config
RUN echo 'PasswordAuthentication no' >> /etc/ssh/sshd_config
RUN systemctl enable ssh
# FRET packages
COPY ./fret.list /tmp/apt.list
RUN apt install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y $(grep -vE "^\s*#" /tmp/apt.list | tr "\n" " ")
RUN rm /tmp/apt.list

42
Docker/README.md Normal file
View File

@ -0,0 +1,42 @@
# Develop using a Container
Use podamn oder docker to provide the environment.
## Building the Container
```sh
docker build -t fretenv .
```
## Creating Container
```sh
docker create --name fretbuilder -v fretnixstore:/nix/store -v ../:/root/FRET localhost/fretenv:latest
```
The volume ``fretnixstore`` is optional. It is used to cache the packges installed using nix in case you need to re-create the container.
Additionally, you can pass the following options to access the environment over ssh:
```sh
-p 2222:22 # 2222 is the ssh port of the container
-v $SOMEWHERE:/root # somewhere with a .ssh directory
```
## Starting the Container
```sh
docker start fretbuilder
```
## Entering the Container
```sh
docker exec -it fretbuilder bash
```
## Using Nix
```sh
cd ~/FRET
nix develop # or nix-shell
```
If you want to load the nix-shell automatically:
```sh
eval "$(direnv hook bash)"
direnv allow
```
## Removing the Environment
```sh
docker stop fretbuilder
docker container rm fretbuilder
docker image rm fretenv
```
## Potential Issues
If you run into a limit on threads when using podman, use ``podman create --pids-limit=8192 ...``

20
Docker/fret.list Normal file
View File

@ -0,0 +1,20 @@
# Essentials
git
vim
zsh
rsync
wget
tmux
direnv
htop
gdb
ffmpeg
# FRET
sqlite3
build-essential
python3-pandas
python3-jinja2
python3-matplotlib
python3-numpy
python3-reportbug
python3-scipy

1
Docker/prep_sys.sh Normal file
View File

@ -0,0 +1 @@
podman create --memory 350g --memory-swap 351g --name basepod -p 127.0.0.1:8224:22 -v basestore:/nix/store -v /fs/scratch/alwin/containers/home:/root:z -v /fs/scratch/alwin/FRET:/root/FRET:z localhost/ubuntu-nix-basepod:latest

View File

@ -5,8 +5,13 @@
* FreeRTOS demos under `FreeRTOS/FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC`
* QEMU instrumentation under `qemu-libafl-bridge`
## HowTo
### Development environment
`nix develop` or `nix-shell`
### Development environment using nix
Use `nix develop` or `nix-shell` to enter a shell with all required tools.
### Development environment using podman/docker
If you don't have nix installed, you can use it though a container.
See Docker/README.md.
### Potential Issues
If you encounter errors where a temporary directory is not found, use `mkdir -p $TMPDIR`
### Build FRET
```sh
cd LibAFL/fuzzers/FRET
@ -17,15 +22,13 @@ cargo build
```
### Build additional tools
```sh
# Trace analysis tool
cd state2gantt && cargo build && cd -
# Benchmark evaluation tool
cd LibAFL/fuzzers/FRET/benchmark/number_cruncher && cargo build && cd -
LibAFL/fuzzers/FRET/tools/build.sh
```
### Build FreeRTOS Demos
```sh
cd LibAFL/fuzzers/FRET/benchmark
sh build_all_demos.sh
# see LibAFL/fuzzers/FRET/benchmark/build
```
### Example usage
* Build the demos and additional tools first
@ -48,12 +51,20 @@ open $DUMP/show_job.html
### Perform canned benchmarks
* Build the demos and additional tools first
* Select a benchmark set in `LibAFL/fuzzers/FRET/benchmark/Snakefile`
* Hardware Requirements:
- Recommendation: 512GiB of RAM with 64 physical cores
- About 8GB of RAM per Job on average are required to prevent OOMs
- The set used for the paper consists of ~270 Jobs, so you will need about five day to reproduce the results
```sh
# $BENCHDIR
cd LibAFL/fuzzers/FRET/benchmark
# e.g.
snakemake -c 128 set48 set64 set128
# optional
export BENCHDIR="eval_$(date -I)"
# Reproduce the evals in the paper e.g.
snakemake --cores 64 eval_bytes eval_int eval_full waters_multi
# plot the resutls
sh plot_all_benchmarks.sh
# See images in $BENCHDIR
sh plot_all_traces.sh
# See HTML files in $BENCHDIR/timedump/*/ for traces of the worst cases
```

View File

@ -1,6 +0,0 @@
*.axf
*.qcow2
demo
*.ron
*.bsp
target

220
edge_compare/Cargo.lock generated
View File

@ -1,220 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "atty"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
dependencies = [
"hermit-abi",
"libc",
"winapi",
]
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "base64"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "clap"
version = "3.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d76c22c9b9b215eeb8d016ad3a90417bd13cb24cf8142756e6472445876cab7"
dependencies = [
"atty",
"bitflags",
"indexmap",
"os_str_bytes",
"strsim",
"termcolor",
"textwrap",
]
[[package]]
name = "edge_compare"
version = "0.1.0"
dependencies = [
"clap",
"ron",
"serde",
]
[[package]]
name = "hashbrown"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e"
[[package]]
name = "hermit-abi"
version = "0.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
dependencies = [
"libc",
]
[[package]]
name = "indexmap"
version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223"
dependencies = [
"autocfg",
"hashbrown",
]
[[package]]
name = "libc"
version = "0.2.119"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bf2e165bb3457c8e098ea76f3e3bc9db55f87aa90d52d0e6be741470916aaa4"
[[package]]
name = "memchr"
version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a"
[[package]]
name = "os_str_bytes"
version = "6.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64"
dependencies = [
"memchr",
]
[[package]]
name = "proc-macro2"
version = "1.0.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029"
dependencies = [
"unicode-xid",
]
[[package]]
name = "quote"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "864d3e96a899863136fc6e99f3d7cae289dafe43bf2c5ac19b70df7210c0a145"
dependencies = [
"proc-macro2",
]
[[package]]
name = "ron"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b861ecaade43ac97886a512b360d01d66be9f41f3c61088b42cedf92e03d678"
dependencies = [
"base64",
"bitflags",
"serde",
]
[[package]]
name = "serde"
version = "1.0.136"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.136"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "strsim"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
[[package]]
name = "syn"
version = "1.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b"
dependencies = [
"proc-macro2",
"quote",
"unicode-xid",
]
[[package]]
name = "termcolor"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4"
dependencies = [
"winapi-util",
]
[[package]]
name = "textwrap"
version = "0.14.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0066c8d12af8b5acd21e00547c3797fde4e8677254a7ee429176ccebbe93dd80"
[[package]]
name = "unicode-xid"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
dependencies = [
"winapi",
]
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"

View File

@ -1,17 +0,0 @@
[package]
name = "edge_compare"
version = "0.1.0"
authors = [ "Alwin Berger <alwin.berger@tu-dortmund.de>" ]
edition = "2021"
[features]
default = ["std"]
std = []
[profile.release]
debug = true
[dependencies]
clap = { version = "3.1.1", features = ["default"] }
serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib
ron = "0.7" # write serialized data - including hashmaps

View File

@ -1,71 +0,0 @@
use std::collections::HashMap;
use std::path::PathBuf;
use clap::Arg;
use clap::App;
use std::{env,fs};
fn main() {
let res = match App::new("edge_compare")
.version("0.1.0")
.author("Alwin Berger")
.about("Compare Serialized Edge-Maps.")
.arg(
Arg::new("a")
.short('a')
.long("map-a")
.required(true)
.takes_value(true),
)
.arg(
Arg::new("b")
.short('b')
.long("map-b")
.required(true)
.takes_value(true),
)
.try_get_matches_from(env::args())
{
Ok(res) => res,
Err(err) => {
println!(
"Syntax: {}, --map-a <input> --map-b <input>\n{:?}",
env::current_exe()
.unwrap_or_else(|_| "fuzzer".into())
.to_string_lossy(),
err.info,
);
return;
}
};
let path_a = PathBuf::from(res.value_of("a").unwrap().to_string());
let path_b = PathBuf::from(res.value_of("b").unwrap().to_string());
let raw_a = fs::read(path_a).expect("Can not read dumped edges a");
let hmap_a : HashMap<(u64,u64),u64> = ron::from_str(&String::from_utf8_lossy(&raw_a)).expect("Can not parse HashMap");
let raw_b = fs::read(path_b).expect("Can not read dumped edges b");
let hmap_b : HashMap<(u64,u64),u64> = ron::from_str(&String::from_utf8_lossy(&raw_b)).expect("Can not parse HashMap");
let mut a_and_b = Vec::<((u64,u64),u64)>::new();
let mut a_and_b_differ = Vec::<((u64,u64),(u64,u64))>::new();
let mut a_sans_b = Vec::<((u64,u64),u64)>::new();
for i_a in hmap_a.clone() {
match hmap_b.get(&i_a.0) {
None => a_sans_b.push(i_a),
Some(x) => if i_a.1 == *x {
a_and_b.push(i_a);
} else {
a_and_b_differ.push((i_a.0,(i_a.1,*x)));
}
}
}
let b_sans_a : Vec<((u64,u64),u64)> = hmap_b.into_iter().filter(|x| !hmap_a.contains_key(&x.0) ).collect();
println!("a_sans_b: {:#?}\na_and_b_differ: {:#?}\nb_sans_a: {:#?}",&a_sans_b,&a_and_b_differ,&b_sans_a);
println!("Stats: a\\b: {} a&=b: {} a&!=b: {} b\\a: {} avb: {} jaccarde: {}",
a_sans_b.len(),a_and_b.len(),a_and_b_differ.len(),b_sans_a.len(),
a_and_b.len()+a_and_b_differ.len()+a_sans_b.len()+b_sans_a.len(),
(a_and_b.len()+a_and_b_differ.len())as f64/(a_and_b.len()+a_and_b_differ.len()+a_sans_b.len()+b_sans_a.len()) as f64);
}

View File

@ -1,4 +0,0 @@
*.csv
*.png
*.pdf
target

2003
graph2viz/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,14 +0,0 @@
[package]
name = "graph2viz"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
fret = { path = "../LibAFL/fuzzers/FRET" }
serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib
hashbrown = { version = "0.12", features = ["serde", "ahash-compile-time-rng"] } # A faster hashmap, nostd compatible
petgraph = { version="0.6.0", features = ["serde-1"] }
ron = "0.7" # write serialized data - including hashmaps
rand = "0.5"

View File

@ -1,4 +0,0 @@
all(%):
target/debug/state2gantt $%_afl.ron > $%_afl.csv
target/debug/state2gantt $%_state.ron > $%_state.csv
target/debug/state2gantt $%_random.ron > $%_random.csv

View File

@ -1,71 +0,0 @@
use std::path::PathBuf;
use std::{env,fs};
use fret::systemstate::graph::SysGraphFeedbackState;
use petgraph::Direction::{Outgoing, Incoming};
use petgraph::dot::{Dot, Config};
fn main() {
let args : Vec<String> = env::args().collect();
let path_a = PathBuf::from(args[1].clone());
let raw_a = fs::read(path_a).expect("Can not read dumped traces b");
// let path_b = PathBuf::from(args[2].clone());
let feedbackstate : SysGraphFeedbackState = ron::from_str(&String::from_utf8_lossy(&raw_a)).expect("Can not parse HashMap");
let mut splits = 0;
let mut unites = 0;
let mut g = feedbackstate.graph;
dbg!(g.node_count());
let mut straight = 0;
let mut stub = 0;
let mut done = false;
while !done {
done = true;
for i in g.node_indices() {
let li = g.neighbors_directed(i, Incoming).count();
let lo = g.neighbors_directed(i, Outgoing).count();
if li == 1 && lo == 1 {
let prev = g.neighbors_directed(i, Incoming).into_iter().next().unwrap();
let next = g.neighbors_directed(i, Outgoing).into_iter().next().unwrap();
if prev != next {
g.update_edge(prev, next, ());
g.remove_node(i);
straight+=1;
done = false;
break;
}
}
}
}
for i in g.node_indices() {
let li = g.neighbors_directed(i, Incoming).count();
if li>1 {
unites += 1;
}
let lo = g.neighbors_directed(i, Outgoing).count();
if lo>1 {
splits += 1;
}
if li == 0 || lo == 0 {
// g.remove_node(i);
stub += 1;
}
}
dbg!(splits);
dbg!(unites);
dbg!(straight);
dbg!(stub);
let newgraph = g.map(
|_, n| n.pretty_print(),
// |_, n| format!("{} {:?}",n.get_taskname(),n.get_input_counts().iter().min().unwrap_or(&0)),
|_, e| e,
);
// let tempg = format!("{:?}",Dot::with_config(&newgraph, &[Config::EdgeNoLabel]));
let f = format!("{:?}",Dot::with_config(&newgraph, &[Config::EdgeNoLabel]));
let f = f.replace("\\\\n", "\n");
let f = f.replace("\\\"", "");
println!("{}",f);
}

View File

@ -1 +0,0 @@
target

View File

@ -1,3 +0,0 @@
target
*.case
*.edit

1868
input_serde/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,21 +0,0 @@
[package]
name = "input_serde"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
fret = { path = "../LibAFL/fuzzers/FRET" }
libafl = { path = "../LibAFL/libafl" }
serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib
hashbrown = { version = "0.14.0", features = ["serde"] } # A faster hashmap, nostd compatible
# petgraph = { version="0.6.0", features = ["serde-1"] }
ron = "0.7" # write serialized data - including hashmaps
rand = "0.5"
clap = "4.5.17"
itertools = "0.13.0"
either = { version = "1.13.0", features = ["serde"] }
postcard = { version = "1.0.10", features = [
"alloc",
], default-features = false } # no_std compatible serde serialization format

View File

@ -1,149 +0,0 @@
use either::Either::{self, Left, Right};
use hashbrown::HashMap;
use rand::rngs::StdRng;
use std::path::PathBuf;
use std::{env,fs};
use fret::systemstate::{ExecInterval, RTOSJob, target_os::SystemTraceData, target_os::freertos::FreeRTOSTraceMetadata, target_os::SystemState, target_os::TaskControlBlock, helpers::interrupt_times_to_input_bytes};
use libafl::inputs::multi::MultipartInput;
use libafl::inputs::{BytesInput, Input};
use std::io::Write;
use clap::Parser;
use itertools::{assert_equal, join, Itertools};
use rand::RngCore;
use libafl::inputs::HasMutatorBytes;
const MAX_NUM_INTERRUPT: usize = 128;
const NUM_INTERRUPT_SOURCES: usize = 6; // Keep in sync with qemu-libafl-bridge/hw/timer/armv7m_systick.c:319 and FreeRTOS/FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC/init/startup.c:216
pub const QEMU_ICOUNT_SHIFT: u32 = 5;
pub const QEMU_ISNS_PER_SEC: u32 = u32::pow(10, 9) / u32::pow(2, QEMU_ICOUNT_SHIFT);
pub const QEMU_ISNS_PER_USEC: f32 = QEMU_ISNS_PER_SEC as f32 / 1000000.0;
#[derive(Parser)]
struct Config {
/// Input Case
#[arg(short, long, value_name = "FILE")]
case: PathBuf,
/// Input format
#[arg(short, long, value_name = "FORMAT")]
input_format: Option<String>,
/// Output format
#[arg(short, long, value_name = "FORMAT", default_value = "edit")]
format: String,
}
/// Setup the interrupt inputs. Noop if interrupts are not fuzzed
fn setup_interrupt_inputs(mut input : MultipartInput<BytesInput>) -> MultipartInput<BytesInput> {
for i in 0..MAX_NUM_INTERRUPT {
let name = format!("isr_{}_times",i);
if input.parts_by_name(&name).next().is_none() {
input.add_part(name, BytesInput::new([0; MAX_NUM_INTERRUPT*4].to_vec()));
}
}
input
}
fn unfold_input(input : &MultipartInput<BytesInput>) -> HashMap<String,Either<Vec<u8>,Vec<u32>>> {
let mut res = HashMap::new();
for (name, part) in input.iter() {
if name == "bytes" {
res.insert(name.to_string(),Left(part.bytes().to_vec()));
} else {
// let times = unsafe{std::mem::transmute::<&[u8], &[u32]>(&part.bytes()[0..4*(part.bytes().len()/4)])}.to_vec();
eprintln!("name {} len {}", name, part.bytes().len());
let mut times = part.bytes().chunks(4).filter(|x| x.len()==4).map(|x| u32::from_le_bytes(x.try_into().unwrap())).collect::<Vec<_>>();
times.sort_unstable();
res.insert(name.to_string(),Right(times));
}
}
res
}
fn fold_input(input : HashMap<String,Either<Vec<u8>,Vec<u32>>>) -> MultipartInput<BytesInput> {
let mut res = MultipartInput::new();
for (name, data) in input {
match data {
Left(x) => res.add_part(name, BytesInput::new(x)),
Right(x) => res.add_part(name, BytesInput::new(interrupt_times_to_input_bytes(&x))),
}
}
res
}
fn main() {
let conf = Config::parse();
let show_input = match conf.input_format {
Some(x) => {
match x.as_str() {
"case" => {
eprintln!("Interpreting input file as multipart input");
MultipartInput::from_file(conf.case.as_os_str()).unwrap()
},
"edit" => {
let bytes = fs::read(conf.case).expect("Can not read input file");
let input_str = String::from_utf8_lossy(&bytes);
eprintln!("Interpreting input file as custom edit input");
fold_input(ron::from_str::<HashMap<String,Either<Vec<u8>,Vec<u32>>>>(&input_str).expect("Failed to parse input"))
},
"ron" => {
let bytes = fs::read(conf.case).expect("Can not read input file");
let input_str = String::from_utf8_lossy(&bytes);
eprintln!("Interpreting input file as raw ron input");
ron::from_str::<MultipartInput<BytesInput>>(&input_str).expect("Failed to parse input")
},
"raw" => {
let bytes = fs::read(conf.case).expect("Can not read input file");
setup_interrupt_inputs(MultipartInput::from([("bytes",BytesInput::new(bytes))]))
},
x => panic!("Unknown input format: {}", x),
}
}
Option::None => match MultipartInput::from_file(conf.case.as_os_str()) {
Ok(x) => {
eprintln!("Interpreting input file as multipart input");
x
},
Err(_) => {
let bytes = fs::read(conf.case).expect("Can not read input file");
let input_str = String::from_utf8_lossy(&bytes);
match ron::from_str::<HashMap<String,Either<Vec<u8>,Vec<u32>>>>(&input_str) {
Ok(x) => {
eprintln!("Interpreting input file as custom edit input");
fold_input(x)
},
Err(_) => {
match ron::from_str::<MultipartInput<BytesInput>>(&input_str) {
Ok(x) => {
eprintln!("Interpreting input file as raw ron input");
x
},
Err(_) => {
eprintln!("Interpreting input file as raw input");
setup_interrupt_inputs(MultipartInput::from([("bytes",BytesInput::new(bytes))]))
}
}
}
}
}
}
};
// let uf = unfold_input(&show_input);
// println!("{:?}", show_input);
match conf.format.as_str() {
"edit" => {
let output = ron::to_string(&unfold_input(&show_input)).expect("Could not serialize input");
println!("{}", output);
},
"ron" => {
let output = ron::to_string(&show_input).expect("Could not serialize input");
println!("{}", output);
},
"case" => {
let output = postcard::to_allocvec(&show_input).expect("Could not serialize input");
std::io::stdout().write_all(&output).expect("Could not write output");
},
_ => panic!("Unknown format")
}
}

26
one_time_setup.sh Executable file
View File

@ -0,0 +1,26 @@
#!/usr/bin/env bash
if [[ -z "$INSIDE_DEVSHELL" ]]; then
echo "This script should be run inside a nix-shell. Run 'nix develop' or 'nix-shell' first."
exit 1
fi
# Always use the script's directory as the working directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR"
# Ensure that all sources are up-to-date
#git submodule update --init --recursive
# The central directory for the benchmarks
cd LibAFL/fuzzers/FRET/benchmark
# one-time setup
# build QEMU for the first time
snakemake -c 1 rebuild_qemu
# Build kelper tools to aid the analysis of the benchmarks
snakemake -c 1 build_tools
# Build the kernels for the benchmarks
snakemake -c 1 build_kernels
cd -

31
run_eval.sh Executable file
View File

@ -0,0 +1,31 @@
#!/usr/bin/env bash
# Configuration
export CORES=64 # Number of physical cores
export RUNTIME=86400 # 24 hours in seconds
export TARGET_REPLICA_NUMBER=9
export RANDOM_REPLICA_NUMBER=1
export MULTIJOB_REPLICA_NUMBER=3
if [[ -z "$INSIDE_DEVSHELL" ]]; then
echo "This script should be run inside a nix-shell. Run 'nix develop' or 'nix-shell' first."
exit 1
fi
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR"
cd LibAFL/fuzzers/FRET/benchmark
export BENCHDIR="eval_$(date -I)"
# prepare all fuzzer configurations
snakemake --keep-incomplete -n --cores $CORES all_bins
# Run the eval examples from the paper
snakemake --keep-incomplete -n --cores $CORES eval_bytes eval_int eval_full waters_multi
# plot the resutls
snakemake --keep-incomplete -n --cores $CORES plot_benchmarks
# See images in $BENCHDIR
snakemake --keep-incomplete -n --cores $CORES plot_traces
# See HTML files in $BENCHDIR/timedump/*/ for traces of the worst cases
cd -

View File

@ -1,4 +0,0 @@
*.csv
*.png
*.pdf
target

1946
state2gantt/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,16 +0,0 @@
[package]
name = "state2gantt"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
fret = { path = "../LibAFL/fuzzers/FRET" }
serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib
hashbrown = { version = "0.14.0", features = ["serde"] } # A faster hashmap, nostd compatible
# petgraph = { version="0.6.0", features = ["serde-1"] }
ron = "0.7" # write serialized data - including hashmaps
rand = "0.5"
clap = "4.5.17"
itertools = "0.13.0"

View File

@ -1,4 +0,0 @@
all(%):
target/debug/state2gantt $%_afl.ron > $%_afl.csv
target/debug/state2gantt $%_state.ron > $%_state.csv
target/debug/state2gantt $%_random.ron > $%_random.csv

View File

@ -1,14 +0,0 @@
#!/bin/sh
ROOTDIR=~/code/work/FRETv3
if [ -z "$1" ]; then exit 1; fi
OFILE_A="$(dirname "$1")/$(basename -s .trace.ron "$1")_job.csv"
OFILE_B="$(dirname "$1")/$(basename -s .trace.ron "$1")_instance.csv"
OFILE_C="$(dirname "$1")/$(basename -s .trace.ron "$1")_abbs.csv"
if [ -n "$2" ]; then
EXTRA="-t $2"
fi
rm -f "$OFILE_A" "$OFILE_B"
echo $ROOTDIR/state2gantt/target/debug/state2gantt -i $1 -a "$OFILE_A" -r "$OFILE_B" -p "$OFILE_C" $EXTRA
$ROOTDIR/state2gantt/target/debug/state2gantt -i $1 -a "$OFILE_A" -r "$OFILE_B" -p "$OFILE_C" $EXTRA
echo $ROOTDIR/state2gantt/plot_response.r "$OFILE_A" "$OFILE_B" html
$ROOTDIR/state2gantt/plot_response.r "$OFILE_A" "$OFILE_B" html

View File

@ -1,44 +0,0 @@
args = commandArgs(trailingOnly=TRUE)
if (length(args)==0) {
filename="~/code/FRET/state2gantt/trace.csv"
} else {
filename=args[1]
}
trace <- read.csv(filename)
task_ids = unique(trace[[3]]) # assume this has descending prio order
prio_from_name <- function(t) {
1 + length(task_ids) - Position(function(y) y==t, task_ids)
}
trace[[3]]=sapply(trace[[3]], prio_from_name )
width = 710
height = (9/16) * width
if (length(args)>0) { png(file=sprintf("%s.png",filename), width=width, height=height) }
# prepare an empty plot
plot(c(trace[[2]][1],trace[[2]][length(trace[[2]])]),
c(0,length(task_ids)),
col = "white", xlab = "", ylab = "")
# draw all segments
segments(x0 = trace$start,
y0 = trace$name,
x1 = trace$end,
y1 = trace$name,
lwd = 3)
highlight_prio <- function(p,col) {
interest = trace[which(trace[[3]] == p),]
segments(x0 = interest$start,
y0 = interest$name,
x1 = interest$end,
y1 = interest$name,
lwd = 3,
col = col)
}
highlight_prio(1,"red")
#highlight_prio(2,"green")
if (length(args)>0) { dev.off() }

View File

@ -1,87 +0,0 @@
#!/usr/bin/env Rscript
# Load necessary libraries
library(ggplot2)
library(readr)
library(dplyr)
library(plotly)
QEMU_SHIFT<-5
TIMESCALE<-1000000
# Function to create a Gantt chart with dots on short segments
create_gantt_chart <- function(csv_file, MIN_WIDTH, output_format = NULL) {
# Read the CSV file
df <- read_csv(csv_file)
# Ensure start and end columns are treated as integers
df <- df %>%
mutate(start = (as.integer(start) * 2**QEMU_SHIFT)/TIMESCALE,
end = (as.integer(end) * 2**QEMU_SHIFT)/TIMESCALE)
# Calculate the segment width
df <- df %>%
mutate(width = end - start)
# Sort the DataFrame by 'prio' column in descending order
df <- df %>%
arrange(prio)
df$label <- paste(
"Start:", df$start,
"<br>",
"Prio:", df$prio,
"<br>",
"Name:", df$name,
"<br>",
"State:", df$state_id,
"<br>",
"State:", df$state,
"<br>",
"End:", df$end
)
# Create the Gantt chart with ggplot2
p <- ggplot(df, aes(x = start, xend = end, y = reorder(name, prio), yend = name, text = label)) +
geom_segment(aes(color = factor(prio)), size = 6) +
labs(title = "Gantt Chart", x = "Time Step", y = "Task", color = "Priority") +
theme_minimal()
# Add dots on segments shorter than MIN_WIDTH
p <- p + geom_point(data = df %>% filter(width < MIN_WIDTH),
aes(x = start, y = name),
color = "red", size = 1)
# Convert the ggplot object to a plotly object for interactivity
p_interactive <- ggplotly(p)
# Handle output format
if (!is.null(output_format)) {
output_file <- sub("\\.csv$", paste0(".", output_format), csv_file)
if (output_format == "html") {
htmlwidgets::saveWidget(p_interactive, output_file)
} else if (output_format == "png") {
ggsave(output_file, plot = p, device = "png")
} else {
stop("Invalid output format. Use 'html' or 'png'.")
}
} else {
# Print the interactive Gantt chart
print(p_interactive)
}
}
# Main execution
args <- commandArgs(trailingOnly = TRUE)
if (length(args) < 1 || length(args) > 2) {
stop("Usage: Rscript script.R <csv_file> [output_format]")
} else {
csv_file <- args[1]
if (length(args) == 2) {
output_format <- args[2]
} else {
output_format <- NULL
}
}
MIN_WIDTH <- 500 # You can set your desired minimum width here
create_gantt_chart(csv_file, MIN_WIDTH, output_format)

View File

@ -1,29 +0,0 @@
import plotly.figure_factory as ff
import plotly.express as px
import pandas as pd
import sys
def create_gantt_chart(csv_file):
# Read the CSV file
df = pd.read_csv(csv_file)
df.sort_values(by='prio', inplace=True, ascending=False)
# Prepare the data for the Gantt chart
gantt_data = [
dict(Task=row['name'], Start=row['start'], Finish=row['end'], Priority=row['prio'])
for _, row in df.iterrows()
]
# Create the Gantt chart
fig = ff.create_gantt(gantt_data, group_tasks=True)
# Show the Gantt chart
fig.write_html("plot.html")
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: python script.py <csv_file>")
sys.exit(1)
csv_file = sys.argv[1]
create_gantt_chart(csv_file)

View File

@ -1,132 +0,0 @@
#!/usr/bin/env Rscript
# Load necessary libraries
#install.packages(c(ggplot2,readr,dplyr,plotly))
library(ggplot2)
library(readr)
library(dplyr)
library(plotly)
QEMU_SHIFT<-5
TIMESCALE<-1000000
# Function to create a Gantt chart with dots on short segments
create_gantt_chart <- function(csv_file_a, csv_file_b, MIN_WIDTH, output_format = NULL, startpoint, endpoint) {
# Read the CSV file
df <- read_csv(csv_file_a)
# df_b <- read_csv(csv_file_b)
df_b <- read_csv(csv_file_b, col_types = cols(.default = "d", name = col_character()))
# df <- df %>% bind_rows(df_b)
# Cut out everything outside the range
df <- df %>%
filter(end >= startpoint & start <= endpoint) %>% rowwise %>% mutate(end = min(end, endpoint), start = max(start, startpoint))
df_b <- df_b %>%
filter(end >= startpoint & start <= endpoint) %>% rowwise %>% mutate(end = min(end, endpoint), start = max(start, startpoint))
# Add a placeholder for all tasks that don't have job instances in the range
s <- min(df$start)
placeholder <- df_b %>% mutate(start = s, end = s)
df <- df %>% bind_rows(placeholder)
# Ensure start and end columns are treated as integers
df <- df %>%
mutate(start = (as.integer(start) * 2**QEMU_SHIFT)/TIMESCALE,
end = (as.integer(end) * 2**QEMU_SHIFT)/TIMESCALE)
df_b <- df_b %>%
mutate(start = (as.integer(start) * 2**QEMU_SHIFT)/TIMESCALE,
end = (as.integer(end) * 2**QEMU_SHIFT)/TIMESCALE)
# Calculate the segment width
df <- df %>%
mutate(width = end - start)
# Sort the DataFrame by 'prio' column in descending order
df <- df %>%
arrange(prio)
# Add labels to segments
df$label <- paste(
"Start:", df$start,
"<br>",
"Prio:", df$prio,
"<br>",
"Name:", df$name,
"<br>",
"Id:", df$state_id,
"<br>",
"State:", df$state,
"<br>",
"ABB:", df$abb,
"<br>",
"End:", df$end
)
df_b$label <- paste(
"Start:", df_b$start,
"<br>",
"End:", df_b$end
)
# Create the Gantt chart with ggplot2
p <- ggplot(df, aes(x = start, xend = end, y = reorder(name, prio), yend = name, text = label)) +
geom_segment(aes(color = factor(prio)), size = 6) +
labs(title = "Gantt Chart", x = "Time Step", y = "Task", color = "Priority") +
theme_minimal()
# Plot Ranges
p <- p + geom_segment(data = df_b, aes(color = factor(prio)), size = 1)
p <- p + geom_point(data = df_b,
aes(x = end, y = name),
color = "blue", size = 2)
# Add dots on segments shorter than MIN_WIDTH
p <- p + geom_point(data = df %>% filter(width < MIN_WIDTH & width > 0),
aes(x = start, y = name),
color = "red", size = 1)
# Handle output format
if (!is.null(output_format)) {
output_file <- sub("\\.csv$", paste0(".", output_format), csv_file_a)
if (output_format == "html") {
# Convert the ggplot object to a plotly object for interactivity
p_interactive <- ggplotly(p)
htmlwidgets::saveWidget(p_interactive, output_file)
} else if (output_format == "png") {
ggsave(output_file, plot = p, device = "png")
} else {
stop("Invalid output format. Use 'html' or 'png'.")
}
} else {
# Convert the ggplot object to a plotly object for interactivity
p_interactive <- ggplotly(p)
# Print the interactive Gantt chart
print(p_interactive)
}
}
# Main execution
args <- commandArgs(trailingOnly = TRUE)
if (length(args) < 2 || length(args) > 5) {
stop("Usage: Rscript script.R <csv_file> <csv_file> [output_format] [<strt> <end>]")
} else {
csv_file_a <- args[1]
csv_file_b <- args[2]
if (length(args) >= 3) {
output_format <- args[3]
} else {
output_format <- NULL
}
if (length(args) >= 5) {
start <- as.integer(args[4])
end <- as.integer(args[5])
} else {
start <- 0
end <- Inf
}
}
MIN_WIDTH <- 500 # You can set your desired minimum width here
create_gantt_chart(csv_file_a, csv_file_b, MIN_WIDTH, output_format, start, end)

View File

@ -1,141 +0,0 @@
use hashbrown::HashMap;
use std::path::PathBuf;
use std::{env,fs};
use fret::systemstate::{ExecInterval, RTOSJob, target_os::SystemTraceData, target_os::freertos::FreeRTOSTraceMetadata, target_os::SystemState, target_os::TaskControlBlock};
use std::io::Write;
use clap::Parser;
use itertools::Itertools;
#[derive(Parser)]
struct Config {
/// Input Trace
#[arg(short, long, value_name = "FILE")]
input_trace: PathBuf,
/// Output for activations
#[arg(short, long, value_name = "FILE")]
activation: Option<PathBuf>,
/// Output for Release-Response intervals
#[arg(short, long, value_name = "FILE")]
response: Option<PathBuf>,
/// Output abbs by task
#[arg(short, long, value_name = "FILE")]
per_task: Option<PathBuf>,
/// Focussed Task
#[arg(short, long, value_name = "TASK")]
task: Option<String>,
/// Translate times to microseconds
#[arg(short, long)]
micros: bool,
}
fn main() {
// let args : Vec<String> = env::args().collect();
let mut conf = Config::parse();
let input_path = conf.input_trace;
let raw_input = fs::read(input_path).expect("Can not read dumped traces");
let activation_path = conf.activation;
let instance_path = conf.response;
let abb_path = conf.per_task;
/* Write all execution intervals */
let mut activation_file = activation_path.map(|x| std::fs::OpenOptions::new()
.read(false)
.write(true)
.create(true)
.append(false)
.open(x).expect("Could not create file"));
let mut level_per_task : HashMap<String, u32> = HashMap::new();
// Store priority per task
let trace : FreeRTOSTraceMetadata = ron::from_str(&String::from_utf8_lossy(&raw_input)).expect("Can not parse HashMap");
// task_name -> (abb_addr -> (interval_count, exec_count, exec_time, woet))
let mut abb_profile : HashMap<String, HashMap<u32, (usize, usize, u64, u64)>> = trace.select_abb_profile(conf.task.clone());
for s in trace.intervals() {
if s.level == 0 {
let t = trace.states_map()[&s.start_state].current_task();
level_per_task.insert(t.task_name().clone(),t.base_priority);
}
}
// Range of longest selected job
let limits = conf.task.as_ref().map(|task| trace.worst_jobs_per_task_by_response_time().get(task).map(|x| x.release..x.response)).flatten();
if let Some(limits) = &limits {
println!("Limits: {} - {}",limits.start,limits.end);
}
let mut intervals = trace.intervals().clone();
activation_file.as_mut().map(|x| writeln!(x,"start,end,prio,name,state_id,state,abb").expect("Could not write to file"));
for s in intervals.iter_mut() {
if let Some(l) = &limits {
if s.start_tick > l.end || s.end_tick < l.start {
continue;
}
s.start_tick = s.start_tick.max(l.start);
s.end_tick = s.end_tick.min(l.end);
}
let start_tick = if conf.micros {s.start_tick as f32 / fret::time::clock::QEMU_ISNS_PER_USEC} else {s.start_tick as f32};
let end_tick = if conf.micros {s.end_tick as f32 / fret::time::clock::QEMU_ISNS_PER_USEC} else {s.end_tick as f32};
let state = &trace.states_map()[&s.start_state];
if s.level == 0 {
activation_file.as_mut().map(|x| writeln!(x,"{},{},{},{},{:X},{},{}",start_tick,end_tick,trace.states_map()[&s.start_state].current_task().priority,trace.states_map()[&s.start_state].current_task().task_name, state.get_hash()>>48, state, s.abb.as_ref().map(|x| x.get_start()).unwrap_or(u32::MAX) ).expect("Could not write to file"));
} else {
activation_file.as_mut().map(|x| writeln!(x,"{},{},-{},{},{:X},{},{}",start_tick,end_tick,s.level,s.start_capture.1, state.get_hash()>>48, state, s.abb.as_ref().map(|x| x.get_start()).unwrap_or(u32::MAX)).expect("Could not write to file"));
}
}
let mut jobs = trace.jobs().clone();
/* Write all job instances from release to response */
let instance_file = instance_path.map(|x| std::fs::OpenOptions::new()
.read(false)
.write(true)
.create(true)
.append(false)
.open(x).expect("Could not create file"));
if let Some(mut file) = instance_file {
writeln!(file,"start,end,prio,name").expect("Could not write to file");
for s in jobs.iter_mut() {
if limits.as_ref().map(|x| !x.contains(&s.release) && !x.contains(&s.response) ).unwrap_or(false) {
continue;
}
if let Some(l) = &limits {
if s.release > l.end || s.response < l.start {
continue;
}
s.release = s.release.max(l.start);
s.response = s.response.min(l.end);
}
writeln!(file,"{},{},{},{}",s.release,s.response,level_per_task[&s.name],s.name).expect("Could not write to file");
}
}
/* Write all abbs per task */
let abb_file = abb_path.map(|x| std::fs::OpenOptions::new()
.read(false)
.write(true)
.create(true)
.append(false)
.open(x).expect("Could not create file"));
if let Some(mut file) = abb_file {
conf.micros = true;
if abb_profile.is_empty() {
return;
}
writeln!(file,"name,addr,active,finish,micros,woet").expect("Could not write to file");
for (name, rest) in abb_profile.iter_mut().sorted_by_key(|x| x.0) {
rest.iter().sorted_by_key(|x| x.0).for_each(|(addr, (active, finish, time, woet))| {
writeln!(file,"{},{},{},{},{},{}",name,addr,active,finish,if conf.micros {*time as f64 / fret::time::clock::QEMU_ISNS_PER_USEC as f64} else {*time as f64}, if conf.micros {*woet as f64 / fret::time::clock::QEMU_ISNS_PER_USEC as f64} else {*woet as f64}).expect("Could not write to file");
});
}
}
}