diff --git a/fuzzers/FRET/benchmark/Snakefile b/fuzzers/FRET/benchmark/Snakefile index eb8220d15c..d8c01f1859 100644 --- a/fuzzers/FRET/benchmark/Snakefile +++ b/fuzzers/FRET/benchmark/Snakefile @@ -20,6 +20,12 @@ rule rebuild_qemu: shell: "unset CUSTOM_QEMU_NO_BUILD CUSTOM_QEMU_NO_CONFIGURE && cargo build" +rule build_tools: + output: + directory("../tools/bin") + shell: + "../tools/build.sh" + rule build_default: input: "../Cargo.toml", @@ -214,6 +220,7 @@ rule run_showmap: rule transform_trace: input: "{benchdir}/timedump/{fuzzer}/{target}#{num}_case.trace.ron", + "../tools/bin" output: "{benchdir}/timedump/{fuzzer}/{target}#{num}_case.jobs.csv", "{benchdir}/timedump/{fuzzer}/{target}#{num}_case.resp.csv", @@ -231,19 +238,20 @@ rule transform_trace: bkp=line['return_function'] select_task=line['select_task'] script=""" - echo $(pwd)/../../../../state2gantt/target/debug/state2gantt -i {input} -a {output[0]} -r {output[1]} -p {output[2]} -t {select_task} - $(pwd)/../../../../state2gantt/target/debug/state2gantt -i {input} -a {output[0]} -r {output[1]} -p {output[2]} -t {select_task} + echo ../tools/bin/state2gantt -i {input[0]} -a {output[0]} -r {output[1]} -p {output[2]} -t {select_task} + ../tools/bin/state2gantt -i {input[0]} -a {output[0]} -r {output[1]} -p {output[2]} -t {select_task} """ shell(script) rule trace2gantt: input: "{benchdir}/timedump/{fuzzer}/{target}#{num}_case.jobs.csv", - "{benchdir}/timedump/{fuzzer}/{target}#{num}_case.resp.csv" + "{benchdir}/timedump/{fuzzer}/{target}#{num}_case.resp.csv", + "../tools/bin" output: "{benchdir}/timedump/{fuzzer}/{target}#{num}_case.jobs.html", shell: - "Rscript $(pwd)/../../../../state2gantt/plot_response.r {input[0]} {input[1]} html" + "../tools/bin/plot_gantt.r {input[0]} {input[1]} html" rule quicktest: params: @@ -251,50 +259,6 @@ rule quicktest: input: expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['feedgeneration100', 'stg', 'random'], target=['polycopter'], variant=['_seq_dataflow_full'], num=range(0,int( 1 ))), -# main scenarios -# main competitors: 10 -# frafl: 10 -# random: 5 - -# low prio scenarios -# main competitors: 8 -# frafl: 8 -# random: 5 - -rule set128: - params: - benchdir=benchdir - input: - # waters full - expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['feedgeneration100', 'stgwoet'], target=['waters'], variant=['_seq_full', '_seq_unsync_full'], num=range(0,int( 5 ))), - expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['frafl'], target=['waters'], variant=['_seq_full', '_seq_unsync_full'], num=range(0,int( 10 ))), - expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['random'], target=['waters'], variant=['_seq_full', '_seq_unsync_full'], num=range(0,int( 5 ))), - # release full - expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['feedgeneration100', 'stgwoet'], target=['release'], variant=['_seq_full'], num=range(0,int( 10 ))), - expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['frafl'], target=['release'], variant=['_seq_full'], num=range(0,int( 10 ))), - expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['random'], target=['release'], variant=['_seq_full'], num=range(0,int( 5 ))), - # release int (low prio) - expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['feedgeneration100', 'stgwoet'], target=['release'], variant=['_seq_int'], num=range(0,int( 5 ))), - expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['random', 'frafl'], target=['release'], variant=['_seq_int'], num=range(0,int( 5 ))), - -rule set48: - params: - benchdir=benchdir - input: - # polycopter full - expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['feedgeneration100', 'stgwoet'], target=['polycopter'], variant=['_seq_dataflow_full'], num=range(0,int( 12 ))), - expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['frafl'], target=['polycopter'], variant=['_seq_dataflow_full'], num=range(0,int( 12 ))), - expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['random'], target=['polycopter'], variant=['_seq_dataflow_full'], num=range(0,int( 10 ))), - - -rule set64: - params: - benchdir=benchdir - input: - # waters int+bytes (low prio) - expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['feedgeneration100', 'stgwoet', 'frafl'], target=['waters'], variant=['_seq_int', '_seq_bytes'], num=range(0,int( 8 ))), - expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['random'], target=['waters'], variant=['_seq_int', '_seq_bytes'], num=range(0,int( 5 ))), - rule eval_bytes: params: benchdir=benchdir @@ -348,3 +312,11 @@ rule clean: rule full_clean: shell: "rm -rf {benchdir}/bins & rm -rf {benchdir}/timedump" + +rule plot_benchmarks: + shell: + "bash scripts/plot_all_benchmarks.sh {benchdir}" + +rule plot_traces: + shell: + "bash scripts/plot_all_traces.sh {benchdir}" \ No newline at end of file diff --git a/fuzzers/FRET/benchmark/old_plot_all_benchmarks.sh b/fuzzers/FRET/benchmark/old_plot_all_benchmarks.sh deleted file mode 100644 index 9ab34addff..0000000000 --- a/fuzzers/FRET/benchmark/old_plot_all_benchmarks.sh +++ /dev/null @@ -1,35 +0,0 @@ -BDIR=remote -plot () { - [ ! -f ../benchmark/$BDIR/${1}${2}_all.png ] && Rscript plot_multi.r $BDIR/timedump ${1}${2} ../benchmark/$BDIR -} - -# Only bytes - -export SUFFIX="_seq_bytes" - -plot waters $SUFFIX -#plot release $SUFFIX -plot copter $SUFFIX -#plot interact $SUFFIX - -# Only interrupts - -export SUFFIX="_seq_int" - -plot waters $SUFFIX -plot release $SUFFIX -plot copter $SUFFIX -#plot interact $SUFFIX - -# Full - -export SUFFIX="_seq_full" - -plot waters $SUFFIX -#plot release $SUFFIX -plot copter $SUFFIX -#plot interact $SUFFIX - -plot copter "_seq_stateless_full" - -plot copter "_par_full" diff --git a/fuzzers/FRET/benchmark/plot_comparison.r b/fuzzers/FRET/benchmark/plot_comparison.r deleted file mode 100644 index 53d6ae4604..0000000000 --- a/fuzzers/FRET/benchmark/plot_comparison.r +++ /dev/null @@ -1,83 +0,0 @@ -library("mosaic") -args = commandArgs(trailingOnly=TRUE) - -#myolors=c("#339933","#0066ff","#993300") # grün, balu, rot -myolors=c("dark green","dark blue","dark red", "yellow") # grün, balu, rot - -if (length(args)==0) { - runtype="timedump" - target="waters" - filename_1=sprintf("%s.png",target) - filename_2=sprintf("%s_maxline.png",target) - filename_3=sprintf("%s_hist.png",target) -} else { - runtype=args[1] - target=args[2] - filename_1=sprintf("%s.png",args[2]) - filename_2=sprintf("%s_maxline.png",args[2]) - filename_3=sprintf("%s_hist.png",args[2]) - # filename_1=args[3] -} - -file_1=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s/%s_state",runtype,target) -file_2=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s/%s_afl",runtype,target) -file_3=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s/%s_random",runtype,target) -file_4=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s/%s_graph",runtype,target) -timetrace <- read.table(file_1, quote="\"", comment.char="") -timetrace_afl <- read.table(file_2, quote="\"", comment.char="") -timetrace_rand <- read.table(file_3, quote="\"", comment.char="") -timetrace_graph <- read.table(file_4, quote="\"", comment.char="") -timetrace[[2]]=seq_len(length(timetrace[[1]])) -timetrace_afl[[2]]=seq_len(length(timetrace_afl[[1]])) -timetrace_rand[[2]]=seq_len(length(timetrace_rand[[1]])) -timetrace_graph[[2]]=seq_len(length(timetrace_graph[[1]])) -names(timetrace)[1] <- "timetrace" -names(timetrace)[2] <- "iter" -names(timetrace_afl)[1] <- "timetrace" -names(timetrace_afl)[2] <- "iter" -names(timetrace_rand)[1] <- "timetrace" -names(timetrace_rand)[2] <- "iter" -names(timetrace_graph)[1] <- "timetrace" -names(timetrace_graph)[2] <- "iter" - -png(file=filename_1) -# pdf(file=filename_1,width=8, height=8) -plot(timetrace[[2]],timetrace[[1]], col=myolors[1], xlab="iters", ylab="wcet", pch='.') -points(timetrace_afl[[2]],timetrace_afl[[1]], col=myolors[2], pch='.') -points(timetrace_rand[[2]],timetrace_rand[[1]], col=myolors[3], pch='.') -points(timetrace_graph[[2]],timetrace_graph[[1]], col=myolors[4], pch='.') -abline(lm(timetrace ~ iter, data=timetrace),col=myolors[1]) -abline(lm(timetrace ~ iter, data=timetrace_afl),col=myolors[2]) -abline(lm(timetrace ~ iter, data=timetrace_rand),col=myolors[3]) -dev.off() - -png(file=filename_3) -gf_histogram(~ timetrace,data=timetrace, fill=myolors[1]) %>% -gf_histogram(~ timetrace,data=timetrace_afl, fill=myolors[2]) %>% -gf_histogram(~ timetrace,data=timetrace_rand, fill=myolors[3]) %>% -gf_histogram(~ timetrace,data=timetrace_graph, fill=myolors[4]) -dev.off() - -# Takes a flat list -trace2maxline <- function(tr) { - maxline = tr - for (var in seq_len(length(maxline))[2:length(maxline)]) { - maxline[var] = max(maxline[var],maxline[var-1]) - } - #plot(seq_len(length(maxline)),maxline,"l",xlab="Index",ylab="WOET") - return(maxline) -} -timetrace[[1]] <- trace2maxline(timetrace[[1]]) -timetrace_afl[[1]] <- trace2maxline(timetrace_afl[[1]]) -timetrace_rand[[1]] <- trace2maxline(timetrace_rand[[1]]) -timetrace_graph[[1]] <- trace2maxline(timetrace_graph[[1]]) - -png(file=filename_2) -plot(timetrace[[2]],timetrace[[1]], col=myolors[1], xlab="iters", ylab="wcet", pch='.') -points(timetrace_afl[[2]],timetrace_afl[[1]], col=myolors[2], pch='.') -points(timetrace_rand[[2]],timetrace_rand[[1]], col=myolors[3], pch='.') -points(timetrace_graph[[2]],timetrace_graph[[1]], col=myolors[4], pch='.') -#abline(lm(timetrace ~ iter, data=timetrace),col=myolors[1]) -#abline(lm(timetrace ~ iter, data=timetrace_afl),col=myolors[2]) -#abline(lm(timetrace ~ iter, data=timetrace_rand),col=myolors[3]) -dev.off() \ No newline at end of file diff --git a/fuzzers/FRET/benchmark/plot_multi.r b/fuzzers/FRET/benchmark/plot_multi.r deleted file mode 100644 index 8776376947..0000000000 --- a/fuzzers/FRET/benchmark/plot_multi.r +++ /dev/null @@ -1,340 +0,0 @@ -# install.packages(c("mosaic", "dplyr", "foreach", "doParallel")) -library("mosaic") -library("dplyr") -library("foreach") -library("doParallel") - -#setup parallel backend to use many processors -cores=detectCores() -cl <- makeCluster(cores[1]-4) #not to overload your computer -registerDoParallel(cl) - -args = commandArgs(trailingOnly=TRUE) - -if (length(args)==0) { - runtype="remote" - #target="waters" - target="waters" - #target="waters_int" - #target="watersv2_int" - outputpath="../benchmark" - #MY_SELECTION <- c('state', 'afl', 'graph', 'random') - SAVE_FILE=TRUE -} else { - runtype=args[1] - target=args[2] - outputpath=args[3] - #MY_SELECTION <- args[4:length(args)] - #if (length(MY_SELECTION) == 0) - # MY_SELECTION<-NULL - SAVE_FILE=TRUE - print(runtype) - print(target) - print(outputpath) -} -worst_cases <- list(waters=0, waters_int=0, tmr=405669, micro_longint=0, gen3=0, copter_par_full=164311) -worst_case <- worst_cases[[target]] -if (is.null(worst_case)) { - worst_case = 0 -} - -#MY_COLORS=c("green","blue","red", "orange", "pink", "black") -MY_COLORS <- c("green", "blue", "red", "magenta", "orange", "cyan", "pink", "gray", "orange", "black", "yellow","brown") -BENCHDIR=sprintf("../benchmark/%s",runtype) -BASENAMES=Filter(function(x) x!="" && substr(x,1,1)!='.',list.dirs(BENCHDIR,full.names=FALSE)) -PATTERNS="%s#[0-9]*.time$" -#RIBBON='sd' -#RIBBON='span' -RIBBON='both' -DRAW_WC = worst_case > 0 -LEGEND_POS="bottomright" -#LEGEND_POS="bottomright" -CONTINUE_LINE_TO_END=FALSE - -# https://www.r-bloggers.com/2013/04/how-to-change-the-alpha-value-of-colours-in-r/ -alpha <- function(col, alpha=1){ - if(missing(col)) - stop("Please provide a vector of colours.") - apply(sapply(col, col2rgb)/255, 2, - function(x) - rgb(x[1], x[2], x[3], alpha=alpha)) -} - -# Trimm a list of data frames to common length -trim_data <- function(input,len=NULL) { - if (is.null(len)) { - len <- min(sapply(input, function(v) dim(v)[1])) - } - return(lapply(input, function(d) slice_head(d,n=len))) -} - -length_of_data <- function(input) { - min(sapply(input, function(v) dim(v)[1])) -} - -# Takes a flat list -trace2maxline <- function(tr) { - maxline = tr - for (var in seq_len(length(maxline))[2:length(maxline)]) { - #if (maxline[var]>1000000000) { - # maxline[var]=maxline[var-1] - #} else { - maxline[var] = max(maxline[var],maxline[var-1]) - #} - } - #plot(seq_len(length(maxline)),maxline,"l",xlab="Index",ylab="WOET") - return(maxline) -} - -# Take a list of data frames, output same form but maxlines -data2maxlines <- function(tr) { - min_length <- min(sapply(tr, function(v) dim(v)[1])) - maxline <- tr - for (var in seq_len(length(tr))) { - maxline[[var]][[1]]=trace2maxline(tr[[var]][[1]]) - } - return(maxline) -} -# Take a multi-column data frame, output same form but maxlines -frame2maxlines <- function(tr) { - for (var in seq_len(length(tr))) { - tr[[var]]=trace2maxline(tr[[var]]) - } - return(tr) -} - -trace2maxpoints <- function(tr) { - minval = tr[1,1] - collect = tr[1,] - for (i in seq_len(dim(tr)[1])) { - if (minval < tr[i,1]) { - collect = rbind(collect,tr[i,]) - minval = tr[i,1] - } - } - tmp = tr[dim(tr)[1],] - tmp[1] = minval[1] - collect = rbind(collect,tmp) - return(collect) -} - -sample_maxpoints <- function(tr,po) { - index = 1 - collect=NULL - endpoint = dim(tr)[1] - for (p in po) { - if (p<=tr[1,2]) { - tmp = tr[index,] - tmp[2] = p - collect = rbind(collect, tmp) - } else if (p>=tr[endpoint,2]) { - tmp = tr[endpoint,] - tmp[2] = p - collect = rbind(collect, tmp) - } else { - for (i in seq(index,endpoint)-1) { - if (p >= tr[i,2] && p 0) { - runtypetables_reduced <- foreach(i=seq_len(length(runtypefiles))) %dopar% { - rtable = read.csv(runtypefiles[[i]], col.names=c(sprintf("%s%d",bn,i),sprintf("times%d",i))) - trace2maxpoints(rtable) - } - #runtypetables <- lapply(seq_len(length(runtypefiles)), - # function(i)read.csv(runtypefiles[[i]], col.names=c(sprintf("%s%d",bn,i),sprintf("times%d",i)))) - #runtypetables_reduced <- lapply(runtypetables, trace2maxpoints) - runtypetables_reduced - #all_runtypetables = c(all_runtypetables, list(runtypetables_reduced)) - } -} -all_runtypetables = all_runtypetables[lapply(all_runtypetables, length) > 0] -all_min_points = foreach(rtt=all_runtypetables,.combine = cbind) %do% { - bn = substr(names(rtt[[1]])[1],1,nchar(names(rtt[[1]])[1])-1) - ret = data.frame(min(unlist(lapply(rtt, function(v) v[dim(v)[1],2])))) - names(ret)[1] = bn - ret/(3600 * 1000) -} -all_max_points = foreach(rtt=all_runtypetables,.combine = cbind) %do% { - bn = substr(names(rtt[[1]])[1],1,nchar(names(rtt[[1]])[1])-1) - ret = data.frame(max(unlist(lapply(rtt, function(v) v[dim(v)[1],2])))) - names(ret)[1] = bn - ret/(3600 * 1000) -} -all_points = sort(unique(Reduce(c, lapply(all_runtypetables, function(v) Reduce(c, lapply(v, function(w) w[[2]])))))) -all_maxlines <- foreach (rtt=all_runtypetables) %do% { - bn = substr(names(rtt[[1]])[1],1,nchar(names(rtt[[1]])[1])-1) - runtypetables_sampled = foreach(v=rtt) %dopar% { - sample_maxpoints(v, all_points)[1] - } - #runtypetables_sampled = lapply(rtt, function(v) sample_maxpoints(v, all_points)[1]) - tmp_frame <- Reduce(cbind, runtypetables_sampled) - statframe <- data.frame(rowMeans(tmp_frame),apply(tmp_frame, 1, sd),apply(tmp_frame, 1, min),apply(tmp_frame, 1, max), apply(tmp_frame, 1, median)) - names(statframe) <- c(bn, sprintf("%s_sd",bn), sprintf("%s_min",bn), sprintf("%s_max",bn), sprintf("%s_med",bn)) - #statframe[sprintf("%s_times",bn)] = all_points - round(statframe) - #all_maxlines = c(all_maxlines, list(round(statframe))) -} -one_frame<-data.frame(all_maxlines) -one_frame[length(one_frame)+1] <- all_points/(3600 * 1000) -names(one_frame)[length(one_frame)] <- 'time' - -typenames = names(one_frame)[which(names(one_frame) != 'time')] -typenames = typenames[which(!endsWith(typenames, "_sd"))] -typenames = typenames[which(!endsWith(typenames, "_med"))] -ylow=min(one_frame[typenames]) -yhigh=max(one_frame[typenames],worst_case) -typenames = typenames[which(!endsWith(typenames, "_min"))] -typenames = typenames[which(!endsWith(typenames, "_max"))] - -ml2lines <- function(ml,lim) { - lines = NULL - last = 0 - for (i in seq_len(dim(ml)[1])) { - if (!CONTINUE_LINE_TO_END && lim 0) { } combos <- dbGetQuery(con, "SELECT * FROM combos") -casenames <- dbGetQuery(con, "SELECT casename FROM combos GROUP BY casename") +casenames <- dbGetQuery(con, "SELECT casename FROM combos WHERE casename LIKE 'watersIc_%' GROUP BY casename") +#casenames <- dbGetQuery(con, "SELECT casename FROM combos GROUP BY casename") toolnames <- dbGetQuery(con, "SELECT toolname FROM combos GROUP BY toolname") ml2lines <- function(ml, casename) { diff --git a/fuzzers/FRET/benchmark/plot_sqlite.r b/fuzzers/FRET/benchmark/scripts/plot_sqlite.r old mode 100644 new mode 100755 similarity index 97% rename from fuzzers/FRET/benchmark/plot_sqlite.r rename to fuzzers/FRET/benchmark/scripts/plot_sqlite.r index 83bacf33f2..a736a73a3d --- a/fuzzers/FRET/benchmark/plot_sqlite.r +++ b/fuzzers/FRET/benchmark/scripts/plot_sqlite.r @@ -89,7 +89,8 @@ if (length(args) > 0) { } combos <- dbGetQuery(con, "SELECT * FROM combos") -casenames <- dbGetQuery(con, "SELECT casename FROM combos GROUP BY casename") +casenames <- dbGetQuery(con, "SELECT casename FROM combos WHERE NOT casename LIKE 'watersIc_%' GROUP BY casename") +# casenames <- dbGetQuery(con, "SELECT casename FROM combos GROUP BY casename") toolnames <- dbGetQuery(con, "SELECT toolname FROM combos GROUP BY toolname") ml2lines <- function(ml, casename) { diff --git a/fuzzers/FRET/benchmark/plot_stgsize.r b/fuzzers/FRET/benchmark/scripts/plot_stgsize.r similarity index 100% rename from fuzzers/FRET/benchmark/plot_stgsize.r rename to fuzzers/FRET/benchmark/scripts/plot_stgsize.r diff --git a/fuzzers/FRET/benchmark/plot_stgsize_multi.r b/fuzzers/FRET/benchmark/scripts/plot_stgsize_multi.r similarity index 100% rename from fuzzers/FRET/benchmark/plot_stgsize_multi.r rename to fuzzers/FRET/benchmark/scripts/plot_stgsize_multi.r diff --git a/fuzzers/FRET/tools/.gitignore b/fuzzers/FRET/tools/.gitignore new file mode 100644 index 0000000000..ba077a4031 --- /dev/null +++ b/fuzzers/FRET/tools/.gitignore @@ -0,0 +1 @@ +bin diff --git a/fuzzers/FRET/tools/build.sh b/fuzzers/FRET/tools/build.sh new file mode 100755 index 0000000000..9d07d6f74e --- /dev/null +++ b/fuzzers/FRET/tools/build.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# Always use the script's directory as the working directory +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +cd "$SCRIPT_DIR" + +mkdir -p bin + +build() { + if [ -d "$1" ]; then + cd "$1" || exit 1 + cargo build --release + ln -rsf target/release/"$(basename "$1")" ../bin/"$(basename "$1")" + cd - || exit 1 + else + echo "Directory $1 does not exist." + fi +} + +build edge_compare +build graph2viz +build input_serde +build number_cruncher +build state2gantt +ln -rsf state2gantt/gantt_driver bin/gantt_driver +ln -rsf state2gantt/plot_gantt.r bin/plot_gantt.r \ No newline at end of file diff --git a/fuzzers/FRET/tools/edge_compare/.gitignore b/fuzzers/FRET/tools/edge_compare/.gitignore new file mode 100644 index 0000000000..f153339ce3 --- /dev/null +++ b/fuzzers/FRET/tools/edge_compare/.gitignore @@ -0,0 +1,6 @@ +*.axf +*.qcow2 +demo +*.ron +*.bsp +target diff --git a/fuzzers/FRET/tools/edge_compare/Cargo.toml b/fuzzers/FRET/tools/edge_compare/Cargo.toml new file mode 100644 index 0000000000..48cb0b8ebd --- /dev/null +++ b/fuzzers/FRET/tools/edge_compare/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "edge_compare" +version = "0.1.0" +authors = [ "Alwin Berger " ] +edition = "2021" + +[features] +default = ["std"] +std = [] + +[profile.release] +debug = true + +[dependencies] +clap = { version = "3.1.1", features = ["default"] } +serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib +ron = "0.7" # write serialized data - including hashmaps \ No newline at end of file diff --git a/fuzzers/FRET/tools/edge_compare/src/main.rs b/fuzzers/FRET/tools/edge_compare/src/main.rs new file mode 100644 index 0000000000..4e42fcbc9b --- /dev/null +++ b/fuzzers/FRET/tools/edge_compare/src/main.rs @@ -0,0 +1,71 @@ +use std::collections::HashMap; +use std::path::PathBuf; +use clap::Arg; +use clap::App; +use std::{env,fs}; + +fn main() { + let res = match App::new("edge_compare") + .version("0.1.0") + .author("Alwin Berger") + .about("Compare Serialized Edge-Maps.") + .arg( + Arg::new("a") + .short('a') + .long("map-a") + .required(true) + .takes_value(true), + ) + .arg( + Arg::new("b") + .short('b') + .long("map-b") + .required(true) + .takes_value(true), + ) + .try_get_matches_from(env::args()) + { + Ok(res) => res, + Err(err) => { + println!( + "Syntax: {}, --map-a --map-b \n{:?}", + env::current_exe() + .unwrap_or_else(|_| "fuzzer".into()) + .to_string_lossy(), + err.info, + ); + return; + } + }; + + let path_a = PathBuf::from(res.value_of("a").unwrap().to_string()); + let path_b = PathBuf::from(res.value_of("b").unwrap().to_string()); + + let raw_a = fs::read(path_a).expect("Can not read dumped edges a"); + let hmap_a : HashMap<(u64,u64),u64> = ron::from_str(&String::from_utf8_lossy(&raw_a)).expect("Can not parse HashMap"); + + let raw_b = fs::read(path_b).expect("Can not read dumped edges b"); + let hmap_b : HashMap<(u64,u64),u64> = ron::from_str(&String::from_utf8_lossy(&raw_b)).expect("Can not parse HashMap"); + + let mut a_and_b = Vec::<((u64,u64),u64)>::new(); + let mut a_and_b_differ = Vec::<((u64,u64),(u64,u64))>::new(); + let mut a_sans_b = Vec::<((u64,u64),u64)>::new(); + + for i_a in hmap_a.clone() { + match hmap_b.get(&i_a.0) { + None => a_sans_b.push(i_a), + Some(x) => if i_a.1 == *x { + a_and_b.push(i_a); + } else { + a_and_b_differ.push((i_a.0,(i_a.1,*x))); + } + } + } + let b_sans_a : Vec<((u64,u64),u64)> = hmap_b.into_iter().filter(|x| !hmap_a.contains_key(&x.0) ).collect(); + + println!("a_sans_b: {:#?}\na_and_b_differ: {:#?}\nb_sans_a: {:#?}",&a_sans_b,&a_and_b_differ,&b_sans_a); + println!("Stats: a\\b: {} a&=b: {} a&!=b: {} b\\a: {} avb: {} jaccarde: {}", + a_sans_b.len(),a_and_b.len(),a_and_b_differ.len(),b_sans_a.len(), + a_and_b.len()+a_and_b_differ.len()+a_sans_b.len()+b_sans_a.len(), + (a_and_b.len()+a_and_b_differ.len())as f64/(a_and_b.len()+a_and_b_differ.len()+a_sans_b.len()+b_sans_a.len()) as f64); +} diff --git a/fuzzers/FRET/tools/graph2viz/.gitignore b/fuzzers/FRET/tools/graph2viz/.gitignore new file mode 100644 index 0000000000..230f5af1a5 --- /dev/null +++ b/fuzzers/FRET/tools/graph2viz/.gitignore @@ -0,0 +1,4 @@ +*.csv +*.png +*.pdf +target diff --git a/fuzzers/FRET/tools/graph2viz/Cargo.toml b/fuzzers/FRET/tools/graph2viz/Cargo.toml new file mode 100644 index 0000000000..d707636663 --- /dev/null +++ b/fuzzers/FRET/tools/graph2viz/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "graph2viz" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +fret = { path = "../.." } +serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib +hashbrown = { version = "0.12", features = ["serde", "ahash-compile-time-rng"] } # A faster hashmap, nostd compatible +petgraph = { version="0.6.0", features = ["serde-1"] } +ron = "0.7" # write serialized data - including hashmaps +rand = "0.5" \ No newline at end of file diff --git a/fuzzers/FRET/tools/graph2viz/src/main.rs b/fuzzers/FRET/tools/graph2viz/src/main.rs new file mode 100644 index 0000000000..ae49e6fee3 --- /dev/null +++ b/fuzzers/FRET/tools/graph2viz/src/main.rs @@ -0,0 +1,73 @@ +use std::path::PathBuf; +use std::{env,fs}; +use fret::systemstate::stg::STGFeedbackState; +use fret::systemstate::stg::STGEdge; +use fret::systemstate::{target_os::SystemTraceData, target_os::freertos::FreeRTOSSystem, target_os::SystemState, target_os::TaskControlBlock}; +use petgraph::Direction::{Outgoing, Incoming}; +use petgraph::dot::{Dot, Config}; + +fn main() { + let args : Vec = env::args().collect(); + + let path_a = PathBuf::from(args[1].clone()); + let raw_a = fs::read(path_a).expect("Can not read dumped traces b"); + // let path_b = PathBuf::from(args[2].clone()); + + let feedbackstate : STGFeedbackState = ron::from_str(&String::from_utf8_lossy(&raw_a)).expect("Can not parse HashMap"); + + let mut splits = 0; + let mut unites = 0; + let mut g = feedbackstate.graph; + dbg!(g.node_count()); + let mut straight = 0; + let mut stub = 0; + let mut done = false; + while !done { + done = true; + for i in g.node_indices() { + let li = g.neighbors_directed(i, Incoming).count(); + let lo = g.neighbors_directed(i, Outgoing).count(); + if li == 1 && lo == 1 { + let prev = g.neighbors_directed(i, Incoming).into_iter().next().unwrap(); + let next = g.neighbors_directed(i, Outgoing).into_iter().next().unwrap(); + if prev != next { + g.update_edge(prev, next, STGEdge::default()); + g.remove_node(i); + straight+=1; + done = false; + break; + } + } + } + } + for i in g.node_indices() { + let li = g.neighbors_directed(i, Incoming).count(); + if li>1 { + unites += 1; + } + let lo = g.neighbors_directed(i, Outgoing).count(); + if lo>1 { + splits += 1; + } + if li == 0 || lo == 0 { + // g.remove_node(i); + stub += 1; + } + } + dbg!(splits); + dbg!(unites); + dbg!(straight); + dbg!(stub); + + let newgraph = g.map( + |_, n| n._pretty_print(&feedbackstate.systemstate_index), + // |_, n| format!("{} {:?}",n.get_taskname(),n.get_input_counts().iter().min().unwrap_or(&0)), + |_, e| e, + ); + // let tempg = format!("{:?}",Dot::with_config(&newgraph, &[Config::EdgeNoLabel])); + let f = format!("{:?}",Dot::with_config(&newgraph, &[Config::EdgeNoLabel])); + let f = f.replace("\\\\n", "\n"); + let f = f.replace("\\\"", ""); + println!("{}",f); + +} diff --git a/fuzzers/FRET/tools/input_serde/.gitignore b/fuzzers/FRET/tools/input_serde/.gitignore new file mode 100644 index 0000000000..732450b396 --- /dev/null +++ b/fuzzers/FRET/tools/input_serde/.gitignore @@ -0,0 +1,3 @@ +target +*.case +*.edit diff --git a/fuzzers/FRET/tools/input_serde/Cargo.toml b/fuzzers/FRET/tools/input_serde/Cargo.toml new file mode 100644 index 0000000000..6ee9fb3cad --- /dev/null +++ b/fuzzers/FRET/tools/input_serde/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "input_serde" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +fret = { path = "../.." } +libafl = { path = "../../../../libafl" } +serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib +hashbrown = { version = "0.14.0", features = ["serde"] } # A faster hashmap, nostd compatible +# petgraph = { version="0.6.0", features = ["serde-1"] } +ron = "0.7" # write serialized data - including hashmaps +rand = "0.5" +clap = "4.5.17" +itertools = "0.13.0" +either = { version = "1.13.0", features = ["serde"] } +postcard = { version = "1.0.10", features = [ + "alloc", +], default-features = false } # no_std compatible serde serialization format diff --git a/fuzzers/FRET/tools/input_serde/src/main.rs b/fuzzers/FRET/tools/input_serde/src/main.rs new file mode 100644 index 0000000000..e6836af026 --- /dev/null +++ b/fuzzers/FRET/tools/input_serde/src/main.rs @@ -0,0 +1,149 @@ +use either::Either::{self, Left, Right}; +use hashbrown::HashMap; +use rand::rngs::StdRng; +use std::path::PathBuf; +use std::{env,fs}; +use fret::systemstate::{ExecInterval, RTOSJob, target_os::SystemTraceData, target_os::freertos::FreeRTOSTraceMetadata, target_os::SystemState, target_os::TaskControlBlock, helpers::interrupt_times_to_input_bytes}; +use libafl::inputs::multi::MultipartInput; +use libafl::inputs::{BytesInput, Input}; +use std::io::Write; +use clap::Parser; +use itertools::{assert_equal, join, Itertools}; +use rand::RngCore; +use libafl::inputs::HasMutatorBytes; + +const MAX_NUM_INTERRUPT: usize = 128; +const NUM_INTERRUPT_SOURCES: usize = 6; // Keep in sync with qemu-libafl-bridge/hw/timer/armv7m_systick.c:319 and FreeRTOS/FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC/init/startup.c:216 +pub const QEMU_ICOUNT_SHIFT: u32 = 5; +pub const QEMU_ISNS_PER_SEC: u32 = u32::pow(10, 9) / u32::pow(2, QEMU_ICOUNT_SHIFT); +pub const QEMU_ISNS_PER_USEC: f32 = QEMU_ISNS_PER_SEC as f32 / 1000000.0; + +#[derive(Parser)] +struct Config { + /// Input Case + #[arg(short, long, value_name = "FILE")] + case: PathBuf, + + /// Input format + #[arg(short, long, value_name = "FORMAT")] + input_format: Option, + + /// Output format + #[arg(short, long, value_name = "FORMAT", default_value = "edit")] + format: String, +} + +/// Setup the interrupt inputs. Noop if interrupts are not fuzzed +fn setup_interrupt_inputs(mut input : MultipartInput) -> MultipartInput { + for i in 0..MAX_NUM_INTERRUPT { + let name = format!("isr_{}_times",i); + if input.parts_by_name(&name).next().is_none() { + input.add_part(name, BytesInput::new([0; MAX_NUM_INTERRUPT*4].to_vec())); + } + } + input +} + +fn unfold_input(input : &MultipartInput) -> HashMap,Vec>> { + let mut res = HashMap::new(); + for (name, part) in input.iter() { + if name == "bytes" { + res.insert(name.to_string(),Left(part.bytes().to_vec())); + } else { + // let times = unsafe{std::mem::transmute::<&[u8], &[u32]>(&part.bytes()[0..4*(part.bytes().len()/4)])}.to_vec(); + eprintln!("name {} len {}", name, part.bytes().len()); + let mut times = part.bytes().chunks(4).filter(|x| x.len()==4).map(|x| u32::from_le_bytes(x.try_into().unwrap())).collect::>(); + times.sort_unstable(); + res.insert(name.to_string(),Right(times)); + } + } + res +} + +fn fold_input(input : HashMap,Vec>>) -> MultipartInput { + let mut res = MultipartInput::new(); + for (name, data) in input { + match data { + Left(x) => res.add_part(name, BytesInput::new(x)), + Right(x) => res.add_part(name, BytesInput::new(interrupt_times_to_input_bytes(&x))), + } + } + res +} + + +fn main() { + let conf = Config::parse(); + let show_input = match conf.input_format { + Some(x) => { + match x.as_str() { + "case" => { + eprintln!("Interpreting input file as multipart input"); + MultipartInput::from_file(conf.case.as_os_str()).unwrap() + }, + "edit" => { + let bytes = fs::read(conf.case).expect("Can not read input file"); + let input_str = String::from_utf8_lossy(&bytes); + eprintln!("Interpreting input file as custom edit input"); + fold_input(ron::from_str::,Vec>>>(&input_str).expect("Failed to parse input")) + }, + "ron" => { + let bytes = fs::read(conf.case).expect("Can not read input file"); + let input_str = String::from_utf8_lossy(&bytes); + eprintln!("Interpreting input file as raw ron input"); + ron::from_str::>(&input_str).expect("Failed to parse input") + }, + "raw" => { + let bytes = fs::read(conf.case).expect("Can not read input file"); + setup_interrupt_inputs(MultipartInput::from([("bytes",BytesInput::new(bytes))])) + }, + x => panic!("Unknown input format: {}", x), + } + } + Option::None => match MultipartInput::from_file(conf.case.as_os_str()) { + Ok(x) => { + eprintln!("Interpreting input file as multipart input"); + x + }, + Err(_) => { + let bytes = fs::read(conf.case).expect("Can not read input file"); + let input_str = String::from_utf8_lossy(&bytes); + match ron::from_str::,Vec>>>(&input_str) { + Ok(x) => { + eprintln!("Interpreting input file as custom edit input"); + fold_input(x) + }, + Err(_) => { + match ron::from_str::>(&input_str) { + Ok(x) => { + eprintln!("Interpreting input file as raw ron input"); + x + }, + Err(_) => { + eprintln!("Interpreting input file as raw input"); + setup_interrupt_inputs(MultipartInput::from([("bytes",BytesInput::new(bytes))])) + } + } + } + } + } + } + }; + // let uf = unfold_input(&show_input); + // println!("{:?}", show_input); + match conf.format.as_str() { + "edit" => { + let output = ron::to_string(&unfold_input(&show_input)).expect("Could not serialize input"); + println!("{}", output); + }, + "ron" => { + let output = ron::to_string(&show_input).expect("Could not serialize input"); + println!("{}", output); + }, + "case" => { + let output = postcard::to_allocvec(&show_input).expect("Could not serialize input"); + std::io::stdout().write_all(&output).expect("Could not write output"); + }, + _ => panic!("Unknown format") + } +} diff --git a/fuzzers/FRET/benchmark/number_cruncher/.gitignore b/fuzzers/FRET/tools/number_cruncher/.gitignore similarity index 56% rename from fuzzers/FRET/benchmark/number_cruncher/.gitignore rename to fuzzers/FRET/tools/number_cruncher/.gitignore index 9b1dffd90f..436d28b38a 100644 --- a/fuzzers/FRET/benchmark/number_cruncher/.gitignore +++ b/fuzzers/FRET/tools/number_cruncher/.gitignore @@ -1 +1,2 @@ *.sqlite +target diff --git a/fuzzers/FRET/benchmark/number_cruncher/Cargo.toml b/fuzzers/FRET/tools/number_cruncher/Cargo.toml similarity index 100% rename from fuzzers/FRET/benchmark/number_cruncher/Cargo.toml rename to fuzzers/FRET/tools/number_cruncher/Cargo.toml diff --git a/fuzzers/FRET/benchmark/number_cruncher/src/main.rs b/fuzzers/FRET/tools/number_cruncher/src/main.rs similarity index 100% rename from fuzzers/FRET/benchmark/number_cruncher/src/main.rs rename to fuzzers/FRET/tools/number_cruncher/src/main.rs diff --git a/fuzzers/FRET/tools/state2gantt/.gitignore b/fuzzers/FRET/tools/state2gantt/.gitignore new file mode 100644 index 0000000000..230f5af1a5 --- /dev/null +++ b/fuzzers/FRET/tools/state2gantt/.gitignore @@ -0,0 +1,4 @@ +*.csv +*.png +*.pdf +target diff --git a/fuzzers/FRET/tools/state2gantt/Cargo.toml b/fuzzers/FRET/tools/state2gantt/Cargo.toml new file mode 100644 index 0000000000..2eac0598c3 --- /dev/null +++ b/fuzzers/FRET/tools/state2gantt/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "state2gantt" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +fret = { path = "../.." } +serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib +hashbrown = { version = "0.14.0", features = ["serde"] } # A faster hashmap, nostd compatible +# petgraph = { version="0.6.0", features = ["serde-1"] } +ron = "0.7" # write serialized data - including hashmaps +rand = "0.5" +clap = "4.5.17" +itertools = "0.13.0" diff --git a/fuzzers/FRET/tools/state2gantt/gantt_driver b/fuzzers/FRET/tools/state2gantt/gantt_driver new file mode 100755 index 0000000000..c769999a03 --- /dev/null +++ b/fuzzers/FRET/tools/state2gantt/gantt_driver @@ -0,0 +1,13 @@ +#!/bin/sh +if [ -z "$1" ]; then exit 1; fi +OFILE_A="$(dirname "$1")/$(basename -s .trace.ron "$1")_job.csv" +OFILE_B="$(dirname "$1")/$(basename -s .trace.ron "$1")_instance.csv" +OFILE_C="$(dirname "$1")/$(basename -s .trace.ron "$1")_abbs.csv" +if [ -n "$2" ]; then +EXTRA="-t $2" +fi +rm -f "$OFILE_A" "$OFILE_B" +echo state2gantt -i $1 -a "$OFILE_A" -r "$OFILE_B" -p "$OFILE_C" $EXTRA +state2gantt -i $1 -a "$OFILE_A" -r "$OFILE_B" -p "$OFILE_C" $EXTRA +echo plot_response.r "$OFILE_A" "$OFILE_B" html +plot_response.r "$OFILE_A" "$OFILE_B" html diff --git a/fuzzers/FRET/tools/state2gantt/plot_gantt.r b/fuzzers/FRET/tools/state2gantt/plot_gantt.r new file mode 100755 index 0000000000..42d7be6526 --- /dev/null +++ b/fuzzers/FRET/tools/state2gantt/plot_gantt.r @@ -0,0 +1,132 @@ +#!/usr/bin/env Rscript +# Load necessary libraries +#install.packages(c(ggplot2,readr,dplyr,plotly)) +library(ggplot2) +library(readr) +library(dplyr) +library(plotly) + +QEMU_SHIFT<-5 +TIMESCALE<-1000000 + +# Function to create a Gantt chart with dots on short segments +create_gantt_chart <- function(csv_file_a, csv_file_b, MIN_WIDTH, output_format = NULL, startpoint, endpoint) { + # Read the CSV file + df <- read_csv(csv_file_a) + # df_b <- read_csv(csv_file_b) + df_b <- read_csv(csv_file_b, col_types = cols(.default = "d", name = col_character())) + # df <- df %>% bind_rows(df_b) + + # Cut out everything outside the range + df <- df %>% + filter(end >= startpoint & start <= endpoint) %>% rowwise %>% mutate(end = min(end, endpoint), start = max(start, startpoint)) + + df_b <- df_b %>% + filter(end >= startpoint & start <= endpoint) %>% rowwise %>% mutate(end = min(end, endpoint), start = max(start, startpoint)) + + # Add a placeholder for all tasks that don't have job instances in the range + s <- min(df$start) + placeholder <- df_b %>% mutate(start = s, end = s) + df <- df %>% bind_rows(placeholder) + + + # Ensure start and end columns are treated as integers + df <- df %>% + mutate(start = (as.integer(start) * 2**QEMU_SHIFT)/TIMESCALE, + end = (as.integer(end) * 2**QEMU_SHIFT)/TIMESCALE) + + df_b <- df_b %>% + mutate(start = (as.integer(start) * 2**QEMU_SHIFT)/TIMESCALE, + end = (as.integer(end) * 2**QEMU_SHIFT)/TIMESCALE) + + # Calculate the segment width + df <- df %>% + mutate(width = end - start) + + # Sort the DataFrame by 'prio' column in descending order + df <- df %>% + arrange(prio) + + # Add labels to segments + df$label <- paste( + "Start:", df$start, + "
", + "Prio:", df$prio, + "
", + "Name:", df$name, + "
", + "Id:", df$state_id, + "
", + "State:", df$state, + "
", + "ABB:", df$abb, + "
", + "End:", df$end + ) + df_b$label <- paste( + "Start:", df_b$start, + "
", + "End:", df_b$end + ) + + # Create the Gantt chart with ggplot2 + p <- ggplot(df, aes(x = start, xend = end, y = reorder(name, prio), yend = name, text = label)) + + geom_segment(aes(color = factor(prio)), size = 6) + + labs(title = "Gantt Chart", x = "Time Step", y = "Task", color = "Priority") + + theme_minimal() + + # Plot Ranges + p <- p + geom_segment(data = df_b, aes(color = factor(prio)), size = 1) + + p <- p + geom_point(data = df_b, + aes(x = end, y = name), + color = "blue", size = 2) + + # Add dots on segments shorter than MIN_WIDTH + p <- p + geom_point(data = df %>% filter(width < MIN_WIDTH & width > 0), + aes(x = start, y = name), + color = "red", size = 1) + + # Handle output format + if (!is.null(output_format)) { + output_file <- sub("\\.csv$", paste0(".", output_format), csv_file_a) + if (output_format == "html") { + # Convert the ggplot object to a plotly object for interactivity + p_interactive <- ggplotly(p) + htmlwidgets::saveWidget(p_interactive, output_file) + } else if (output_format == "png") { + ggsave(output_file, plot = p, device = "png") + } else { + stop("Invalid output format. Use 'html' or 'png'.") + } + } else { + # Convert the ggplot object to a plotly object for interactivity + p_interactive <- ggplotly(p) + # Print the interactive Gantt chart + print(p_interactive) + } +} + +# Main execution +args <- commandArgs(trailingOnly = TRUE) +if (length(args) < 2 || length(args) > 5) { + stop("Usage: Rscript script.R [output_format] [ ]") +} else { + csv_file_a <- args[1] + csv_file_b <- args[2] + if (length(args) >= 3) { + output_format <- args[3] + } else { + output_format <- NULL + } + if (length(args) >= 5) { + start <- as.integer(args[4]) + end <- as.integer(args[5]) + } else { + start <- 0 + end <- Inf + } +} + +MIN_WIDTH <- 500 # You can set your desired minimum width here +create_gantt_chart(csv_file_a, csv_file_b, MIN_WIDTH, output_format, start, end) diff --git a/fuzzers/FRET/tools/state2gantt/src/main.rs b/fuzzers/FRET/tools/state2gantt/src/main.rs new file mode 100644 index 0000000000..554764dc82 --- /dev/null +++ b/fuzzers/FRET/tools/state2gantt/src/main.rs @@ -0,0 +1,142 @@ +use hashbrown::HashMap; +use std::borrow::Cow; +use std::path::PathBuf; +use std::fs; +use fret::systemstate::{target_os::SystemTraceData, target_os::freertos::FreeRTOSTraceMetadata, target_os::SystemState, target_os::TaskControlBlock}; +use std::io::Write; +use clap::Parser; +use itertools::Itertools; + +#[derive(Parser)] +struct Config { + /// Input Trace + #[arg(short, long, value_name = "FILE")] + input_trace: PathBuf, + + /// Output for activations + #[arg(short, long, value_name = "FILE")] + activation: Option, + + /// Output for Release-Response intervals + #[arg(short, long, value_name = "FILE")] + response: Option, + + /// Output abbs by task + #[arg(short, long, value_name = "FILE")] + per_task: Option, + + /// Focussed Task + #[arg(short, long, value_name = "TASK")] + task: Option, + + /// Translate times to microseconds + #[arg(short, long)] + micros: bool, +} + +fn main() { + // let args : Vec = env::args().collect(); + let mut conf = Config::parse(); + + let input_path = conf.input_trace; + let raw_input = fs::read(input_path).expect("Can not read dumped traces"); + + let activation_path = conf.activation; + let instance_path = conf.response; + let abb_path = conf.per_task; + + /* Write all execution intervals */ + let mut activation_file = activation_path.map(|x| std::fs::OpenOptions::new() + .read(false) + .write(true) + .create(true) + .append(false) + .open(x).expect("Could not create file")); + + let mut level_per_task : HashMap = HashMap::new(); + + + // Store priority per task + let trace : FreeRTOSTraceMetadata = ron::from_str(&String::from_utf8_lossy(&raw_input)).expect("Can not parse HashMap"); + // task_name -> (abb_addr -> (interval_count, exec_count, exec_time, woet)) + let mut abb_profile : HashMap, HashMap> = trace.select_abb_profile(conf.task.clone()); + for s in trace.intervals() { + if s.level == 0 { + let t = trace.states_map()[&s.start_state].current_task(); + level_per_task.insert(t.task_name().clone(),t.base_priority); + } + } + + // Range of longest selected job + let limits = conf.task.as_ref().map(|task| trace.worst_jobs_per_task_by_response_time().get(task).map(|x| x.release..x.response)).flatten(); + if let Some(limits) = &limits { + println!("Limits: {} - {}",limits.start,limits.end); + } + + let mut intervals = trace.intervals().clone(); + activation_file.as_mut().map(|x| writeln!(x,"start,end,prio,name,state_id,state,abb").expect("Could not write to file")); + for s in intervals.iter_mut() { + if let Some(l) = &limits { + if s.start_tick > l.end || s.end_tick < l.start { + continue; + } + s.start_tick = s.start_tick.max(l.start); + s.end_tick = s.end_tick.min(l.end); + } + let start_tick = if conf.micros {s.start_tick as f32 / fret::time::clock::QEMU_ISNS_PER_USEC} else {s.start_tick as f32}; + let end_tick = if conf.micros {s.end_tick as f32 / fret::time::clock::QEMU_ISNS_PER_USEC} else {s.end_tick as f32}; + let state = &trace.states_map()[&s.start_state]; + if s.level == 0 { + activation_file.as_mut().map(|x| writeln!(x,"{},{},{},{},{:X},{},{}",start_tick,end_tick,trace.states_map()[&s.start_state].current_task().priority,trace.states_map()[&s.start_state].current_task().task_name, state.get_hash()>>48, state, s.abb.as_ref().map(|x| x.get_start()).unwrap_or(u32::MAX) ).expect("Could not write to file")); + } else { + activation_file.as_mut().map(|x| writeln!(x,"{},{},-{},{},{:X},{},{}",start_tick,end_tick,s.level,s.start_capture.1, state.get_hash()>>48, state, s.abb.as_ref().map(|x| x.get_start()).unwrap_or(u32::MAX)).expect("Could not write to file")); + } + } + + let mut jobs = trace.jobs().clone(); + /* Write all job instances from release to response */ + let instance_file = instance_path.map(|x| std::fs::OpenOptions::new() + .read(false) + .write(true) + .create(true) + .append(false) + .open(x).expect("Could not create file")); + + if let Some(mut file) = instance_file { + writeln!(file,"start,end,prio,name").expect("Could not write to file"); + for s in jobs.iter_mut() { + if limits.as_ref().map(|x| !x.contains(&s.release) && !x.contains(&s.response) ).unwrap_or(false) { + continue; + } + if let Some(l) = &limits { + if s.release > l.end || s.response < l.start { + continue; + } + s.release = s.release.max(l.start); + s.response = s.response.min(l.end); + } + writeln!(file,"{},{},{},{}",s.release,s.response,level_per_task[&s.name],s.name).expect("Could not write to file"); + } + } + + /* Write all abbs per task */ + let abb_file = abb_path.map(|x| std::fs::OpenOptions::new() + .read(false) + .write(true) + .create(true) + .append(false) + .open(x).expect("Could not create file")); + + if let Some(mut file) = abb_file { + conf.micros = true; + if abb_profile.is_empty() { + return; + } + writeln!(file,"name,addr,active,finish,micros,woet").expect("Could not write to file"); + for (name, rest) in abb_profile.iter_mut().sorted_by_key(|x| x.0) { + rest.iter().sorted_by_key(|x| x.0).for_each(|(addr, (active, finish, time, woet))| { + writeln!(file,"{},{},{},{},{},{}",name,addr,active,finish,if conf.micros {*time as f64 / fret::time::clock::QEMU_ISNS_PER_USEC as f64} else {*time as f64}, if conf.micros {*woet as f64 / fret::time::clock::QEMU_ISNS_PER_USEC as f64} else {*woet as f64}).expect("Could not write to file"); + }); + } + } +}