add scripts

This commit is contained in:
Alwin Berger 2025-08-14 10:24:17 +00:00
parent 061f0f2d8c
commit 990533a9a3
41 changed files with 1624 additions and 670 deletions

View File

@ -10,4 +10,7 @@ bins
.snakemake .snakemake
*.zip *.zip
*.tar.* *.tar.*
*.sqlite *.sqlite
eval*
test_*
bench_*

View File

@ -4,7 +4,15 @@ envvars:
"BENCHDIR" "BENCHDIR"
def_flags="--release --no-default-features --features std,snapshot_fast,restarting,do_hash_notify_state,do_hash_notify_value,fuzz_int,trace_job_response_times" def_flags="--release --no-default-features --features std,snapshot_fast,restarting,do_hash_notify_state,do_hash_notify_value,fuzz_int,trace_job_response_times"
benchdir=os.environ["BENCHDIR"] benchdir=os.environ["BENCHDIR"]
RUNTIME=(3600*24) RUNTIME=int(os.environ["RUNTIME"]) if "RUNTIME" in os.environ else (3600*24)
TARGET_SET=['feedgeneration100', 'stgwoet', 'frafl']
TARGET_REPLICA_NUMBER=int(os.environ["TARGET_REPLICA_NUMBER"]) if "TARGET_REPLICA_NUMBER" in os.environ else 10
RANDOM_REPLICA_NUMBER=int(os.environ["RANDOM_REPLICA_NUMBER"]) if "RANDOM_REPLICA_NUMBER" in os.environ else 1
MULTIJOB_REPLICA_NUMBER=int(os.environ["MULTIJOB_REPLICA_NUMBER"]) if "MULTIJOB_REPLICA_NUMBER" in os.environ else 3
rule build_kernels:
shell:
"bash scripts/build_all_demos.sh"
rule copy_kernel: rule copy_kernel:
input: input:
@ -18,6 +26,12 @@ rule rebuild_qemu:
shell: shell:
"unset CUSTOM_QEMU_NO_BUILD CUSTOM_QEMU_NO_CONFIGURE && cargo build" "unset CUSTOM_QEMU_NO_BUILD CUSTOM_QEMU_NO_CONFIGURE && cargo build"
rule build_tools:
output:
directory("../tools/bin")
shell:
"../tools/build.sh"
rule build_default: rule build_default:
input: input:
"../Cargo.toml", "../Cargo.toml",
@ -212,6 +226,7 @@ rule run_showmap:
rule transform_trace: rule transform_trace:
input: input:
"{benchdir}/timedump/{fuzzer}/{target}#{num}_case.trace.ron", "{benchdir}/timedump/{fuzzer}/{target}#{num}_case.trace.ron",
"../tools/bin"
output: output:
"{benchdir}/timedump/{fuzzer}/{target}#{num}_case.jobs.csv", "{benchdir}/timedump/{fuzzer}/{target}#{num}_case.jobs.csv",
"{benchdir}/timedump/{fuzzer}/{target}#{num}_case.resp.csv", "{benchdir}/timedump/{fuzzer}/{target}#{num}_case.resp.csv",
@ -229,19 +244,20 @@ rule transform_trace:
bkp=line['return_function'] bkp=line['return_function']
select_task=line['select_task'] select_task=line['select_task']
script=""" script="""
echo $(pwd)/../../../../state2gantt/target/debug/state2gantt -i {input} -a {output[0]} -r {output[1]} -p {output[2]} -t {select_task} echo ../tools/bin/state2gantt -i {input[0]} -a {output[0]} -r {output[1]} -p {output[2]} -t {select_task}
$(pwd)/../../../../state2gantt/target/debug/state2gantt -i {input} -a {output[0]} -r {output[1]} -p {output[2]} -t {select_task} ../tools/bin/state2gantt -i {input[0]} -a {output[0]} -r {output[1]} -p {output[2]} -t {select_task}
""" """
shell(script) shell(script)
rule trace2gantt: rule trace2gantt:
input: input:
"{benchdir}/timedump/{fuzzer}/{target}#{num}_case.jobs.csv", "{benchdir}/timedump/{fuzzer}/{target}#{num}_case.jobs.csv",
"{benchdir}/timedump/{fuzzer}/{target}#{num}_case.resp.csv" "{benchdir}/timedump/{fuzzer}/{target}#{num}_case.resp.csv",
"../tools/bin"
output: output:
"{benchdir}/timedump/{fuzzer}/{target}#{num}_case.jobs.html", "{benchdir}/timedump/{fuzzer}/{target}#{num}_case.jobs.html",
shell: shell:
"Rscript $(pwd)/../../../../state2gantt/plot_response.r {input[0]} {input[1]} html" "../tools/bin/plot_gantt.r {input[0]} {input[1]} html"
rule quicktest: rule quicktest:
params: params:
@ -249,56 +265,51 @@ rule quicktest:
input: input:
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['feedgeneration100', 'stg', 'random'], target=['polycopter'], variant=['_seq_dataflow_full'], num=range(0,int( 1 ))), expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['feedgeneration100', 'stg', 'random'], target=['polycopter'], variant=['_seq_dataflow_full'], num=range(0,int( 1 ))),
# main scenarios rule eval_bytes:
# main competitors: 10 params:
# frafl: 10 benchdir=benchdir
# random: 5 input:
# waters bytes
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=TARGET_SET, target=['waters'], variant=['_seq_bytes'], num=range(0,int( TARGET_REPLICA_NUMBER ))),
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['random'], target=['waters'], variant=['_seq_bytes'], num=range(0,int( RANDOM_REPLICA_NUMBER ))),
# polycopter full
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=TARGET_SET, target=['polycopter'], variant=['_seq_full'], num=range(0,int( TARGET_REPLICA_NUMBER ))),
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['random'], target=['polycopter'], variant=['_seq_full'], num=range(0,int( RANDOM_REPLICA_NUMBER ))),
# low prio scenarios rule eval_int:
# main competitors: 8 params:
# frafl: 8 benchdir=benchdir
# random: 5 input:
# waters int
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=TARGET_SET, target=['waters'], variant=['_seq_int'], num=range(0,int( TARGET_REPLICA_NUMBER ))),
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['random'], target=['waters'], variant=['_seq_int'], num=range(0,int( RANDOM_REPLICA_NUMBER ))),
# release int
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=TARGET_SET, target=['release'], variant=['_seq_int'], num=range(0,int( TARGET_REPLICA_NUMBER ))),
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['random'], target=['release'], variant=['_seq_int'], num=range(0,int( RANDOM_REPLICA_NUMBER ))),
rule set128: rule eval_full:
params: params:
benchdir=benchdir benchdir=benchdir
input: input:
# waters full # waters full
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['feedgeneration100', 'stgwoet'], target=['waters'], variant=['_seq_full', '_seq_unsync_full'], num=range(0,int( 10 ))), expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=TARGET_SET, target=['waters'], variant=['_seq_full'], num=range(0,int( TARGET_REPLICA_NUMBER ))),
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['frafl'], target=['waters'], variant=['_seq_full', '_seq_unsync_full'], num=range(0,int( 10 ))), expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['random'], target=['waters'], variant=['_seq_full'], num=range(0,int( RANDOM_REPLICA_NUMBER ))),
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['random'], target=['waters'], variant=['_seq_full', '_seq_unsync_full'], num=range(0,int( 5 ))),
# release full # release full
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['feedgeneration100', 'stgwoet'], target=['release'], variant=['_seq_full'], num=range(0,int( 10 ))), expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=TARGET_SET, target=['release'], variant=['_seq_full'], num=range(0,int( TARGET_REPLICA_NUMBER ))),
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['frafl'], target=['release'], variant=['_seq_full'], num=range(0,int( 10 ))), expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['random'], target=['release'], variant=['_seq_full'], num=range(0,int( RANDOM_REPLICA_NUMBER ))),
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['random'], target=['release'], variant=['_seq_full'], num=range(0,int( 5 ))),
# release int (low prio)
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['feedgeneration100', 'stgwoet'], target=['release'], variant=['_seq_int'], num=range(0,int( 5 ))),
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['random', 'frafl'], target=['release'], variant=['_seq_int'], num=range(0,int( 5 ))),
rule set48: rule waters_multi:
params: params:
benchdir=benchdir benchdir=benchdir
input: input:
# polycopter full expand("{benchdir}/timedump/{fuzzer}/{target}{chain}{variant}#{num}.time", benchdir=benchdir, fuzzer=['feedgeneration100', 'stgwoet', 'frafl'], target=['waters'], chain=['Ic11','Ic12','Ic13','Ic14','Ic21','Ic22','Ic23','Ic32','Ic33'], variant=['_seq_full'], num=range(0,int( MULTIJOB_REPLICA_NUMBER ))), # 'Ic31'
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['feedgeneration100', 'stgwoet'], target=['polycopter'], variant=['_seq_dataflow_full'], num=range(0,int( 12 ))),
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['frafl'], target=['polycopter'], variant=['_seq_dataflow_full'], num=range(0,int( 12 ))),
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['random'], target=['polycopter'], variant=['_seq_dataflow_full'], num=range(0,int( 10 ))),
rule set64:
params:
benchdir=benchdir
input:
# waters int+bytes (low prio)
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['feedgeneration100', 'stgwoet', 'frafl'], target=['waters'], variant=['_seq_int', '_seq_bytes'], num=range(0,int( 8 ))),
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['random'], target=['waters'], variant=['_seq_int', '_seq_bytes'], num=range(0,int( 5 ))),
rule all_bins: rule all_bins:
params: params:
benchdir=benchdir benchdir=benchdir
input: input:
expand("{benchdir}/bins/target_{target}", benchdir=benchdir, target=['random','frafl','stg','stgwoet','feedgeneration100','genetic100']) expand("{benchdir}/bins/target_{target}", benchdir=benchdir, target=TARGET_SET+['random'])
rule clean: rule clean:
shell: shell:
@ -306,4 +317,12 @@ rule clean:
rule full_clean: rule full_clean:
shell: shell:
"rm -rf {benchdir}/bins & rm -rf {benchdir}/timedump" "rm -rf {benchdir}/bins & rm -rf {benchdir}/timedump"
rule plot_benchmarks:
shell:
"bash scripts/plot_all_benchmarks.sh {benchdir}"
rule plot_traces:
shell:
"bash scripts/plot_all_traces.sh {benchdir}"

View File

@ -1,35 +0,0 @@
BDIR=remote
plot () {
[ ! -f ../benchmark/$BDIR/${1}${2}_all.png ] && Rscript plot_multi.r $BDIR/timedump ${1}${2} ../benchmark/$BDIR
}
# Only bytes
export SUFFIX="_seq_bytes"
plot waters $SUFFIX
#plot release $SUFFIX
plot copter $SUFFIX
#plot interact $SUFFIX
# Only interrupts
export SUFFIX="_seq_int"
plot waters $SUFFIX
plot release $SUFFIX
plot copter $SUFFIX
#plot interact $SUFFIX
# Full
export SUFFIX="_seq_full"
plot waters $SUFFIX
#plot release $SUFFIX
plot copter $SUFFIX
#plot interact $SUFFIX
plot copter "_seq_stateless_full"
plot copter "_par_full"

View File

@ -1,8 +0,0 @@
#!/bin/sh
if [[ -n "$1" ]]; then
TARGET="$1"
else
TARGET=$BENCHDIR
fi
number_cruncher/target/debug/number_cruncher -i $TARGET/timedump -o $TARGET/bench.sqlite
Rscript plot_sqlite.r $TARGET/bench.sqlite $TARGET

View File

@ -1,30 +0,0 @@
get_max_nodecount () {
rm -f sizecomp && for sizefile in remote/timedump/**/$1*.stgsize;do echo "$(tail -n 1 $sizefile),${sizefile}" >> sizecomp; done; sort -n sizecomp | tail -n 1
}
get_largest_files () {
T=$(get_max_nodecount $1)
echo $T | cut -d',' -f6
}
perform () {
T=$(get_max_nodecount $1)
echo $T | cut -d',' -f6
echo $T | cut -d',' -f6 | xargs -I {} ./plot_stgsize.r {}
mv "$(echo $T | cut -d',' -f6 | xargs -I {} basename -s .stgsize {})_nodes.png" $1_nodes.png
}
# perform copter
# perform release
# perform waters
A=$(get_largest_files copter)
B=$(get_largest_files release)
C=$(get_largest_files waters)
A_="$(echo $A | sed 's/copter/UAV w. hid. com./')"
B_="$(echo $B | sed 's/release/Async. rel./')"
C_="$(echo $C | sed 's/waters/Waters ind. ch./')"
echo $A_ $B_ $C_
cp $A "$A_"
cp $B "$B_"
cp $C "$C_"
./plot_stgsize_multi.r "$A_" "$B_" "$C_"

View File

@ -1,83 +0,0 @@
library("mosaic")
args = commandArgs(trailingOnly=TRUE)
#myolors=c("#339933","#0066ff","#993300") # grün, balu, rot
myolors=c("dark green","dark blue","dark red", "yellow") # grün, balu, rot
if (length(args)==0) {
runtype="timedump"
target="waters"
filename_1=sprintf("%s.png",target)
filename_2=sprintf("%s_maxline.png",target)
filename_3=sprintf("%s_hist.png",target)
} else {
runtype=args[1]
target=args[2]
filename_1=sprintf("%s.png",args[2])
filename_2=sprintf("%s_maxline.png",args[2])
filename_3=sprintf("%s_hist.png",args[2])
# filename_1=args[3]
}
file_1=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s/%s_state",runtype,target)
file_2=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s/%s_afl",runtype,target)
file_3=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s/%s_random",runtype,target)
file_4=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s/%s_graph",runtype,target)
timetrace <- read.table(file_1, quote="\"", comment.char="")
timetrace_afl <- read.table(file_2, quote="\"", comment.char="")
timetrace_rand <- read.table(file_3, quote="\"", comment.char="")
timetrace_graph <- read.table(file_4, quote="\"", comment.char="")
timetrace[[2]]=seq_len(length(timetrace[[1]]))
timetrace_afl[[2]]=seq_len(length(timetrace_afl[[1]]))
timetrace_rand[[2]]=seq_len(length(timetrace_rand[[1]]))
timetrace_graph[[2]]=seq_len(length(timetrace_graph[[1]]))
names(timetrace)[1] <- "timetrace"
names(timetrace)[2] <- "iter"
names(timetrace_afl)[1] <- "timetrace"
names(timetrace_afl)[2] <- "iter"
names(timetrace_rand)[1] <- "timetrace"
names(timetrace_rand)[2] <- "iter"
names(timetrace_graph)[1] <- "timetrace"
names(timetrace_graph)[2] <- "iter"
png(file=filename_1)
# pdf(file=filename_1,width=8, height=8)
plot(timetrace[[2]],timetrace[[1]], col=myolors[1], xlab="iters", ylab="wcet", pch='.')
points(timetrace_afl[[2]],timetrace_afl[[1]], col=myolors[2], pch='.')
points(timetrace_rand[[2]],timetrace_rand[[1]], col=myolors[3], pch='.')
points(timetrace_graph[[2]],timetrace_graph[[1]], col=myolors[4], pch='.')
abline(lm(timetrace ~ iter, data=timetrace),col=myolors[1])
abline(lm(timetrace ~ iter, data=timetrace_afl),col=myolors[2])
abline(lm(timetrace ~ iter, data=timetrace_rand),col=myolors[3])
dev.off()
png(file=filename_3)
gf_histogram(~ timetrace,data=timetrace, fill=myolors[1]) %>%
gf_histogram(~ timetrace,data=timetrace_afl, fill=myolors[2]) %>%
gf_histogram(~ timetrace,data=timetrace_rand, fill=myolors[3]) %>%
gf_histogram(~ timetrace,data=timetrace_graph, fill=myolors[4])
dev.off()
# Takes a flat list
trace2maxline <- function(tr) {
maxline = tr
for (var in seq_len(length(maxline))[2:length(maxline)]) {
maxline[var] = max(maxline[var],maxline[var-1])
}
#plot(seq_len(length(maxline)),maxline,"l",xlab="Index",ylab="WOET")
return(maxline)
}
timetrace[[1]] <- trace2maxline(timetrace[[1]])
timetrace_afl[[1]] <- trace2maxline(timetrace_afl[[1]])
timetrace_rand[[1]] <- trace2maxline(timetrace_rand[[1]])
timetrace_graph[[1]] <- trace2maxline(timetrace_graph[[1]])
png(file=filename_2)
plot(timetrace[[2]],timetrace[[1]], col=myolors[1], xlab="iters", ylab="wcet", pch='.')
points(timetrace_afl[[2]],timetrace_afl[[1]], col=myolors[2], pch='.')
points(timetrace_rand[[2]],timetrace_rand[[1]], col=myolors[3], pch='.')
points(timetrace_graph[[2]],timetrace_graph[[1]], col=myolors[4], pch='.')
#abline(lm(timetrace ~ iter, data=timetrace),col=myolors[1])
#abline(lm(timetrace ~ iter, data=timetrace_afl),col=myolors[2])
#abline(lm(timetrace ~ iter, data=timetrace_rand),col=myolors[3])
dev.off()

View File

@ -1,339 +0,0 @@
library("mosaic")
library("dplyr")
library("foreach")
library("doParallel")
#setup parallel backend to use many processors
cores=detectCores()
cl <- makeCluster(cores[1]-4) #not to overload your computer
registerDoParallel(cl)
args = commandArgs(trailingOnly=TRUE)
if (length(args)==0) {
runtype="remote"
#target="waters"
target="waters"
#target="waters_int"
#target="watersv2_int"
outputpath="../benchmark"
#MY_SELECTION <- c('state', 'afl', 'graph', 'random')
SAVE_FILE=TRUE
} else {
runtype=args[1]
target=args[2]
outputpath=args[3]
#MY_SELECTION <- args[4:length(args)]
#if (length(MY_SELECTION) == 0)
# MY_SELECTION<-NULL
SAVE_FILE=TRUE
print(runtype)
print(target)
print(outputpath)
}
worst_cases <- list(waters=0, waters_int=0, tmr=405669, micro_longint=0, gen3=0)
worst_case <- worst_cases[[target]]
if (is.null(worst_case)) {
worst_case = 0
}
#MY_COLORS=c("green","blue","red", "orange", "pink", "black")
MY_COLORS <- c("green", "blue", "red", "magenta", "orange", "cyan", "pink", "gray", "orange", "black", "yellow","brown")
BENCHDIR=sprintf("../benchmark/%s",runtype)
BASENAMES=Filter(function(x) x!="" && substr(x,1,1)!='.',list.dirs(BENCHDIR,full.names=FALSE))
PATTERNS="%s#[0-9]*.time$"
#RIBBON='sd'
#RIBBON='span'
RIBBON='both'
DRAW_WC = worst_case > 0
LEGEND_POS="bottomright"
#LEGEND_POS="bottomright"
CONTINUE_LINE_TO_END=FALSE
# https://www.r-bloggers.com/2013/04/how-to-change-the-alpha-value-of-colours-in-r/
alpha <- function(col, alpha=1){
if(missing(col))
stop("Please provide a vector of colours.")
apply(sapply(col, col2rgb)/255, 2,
function(x)
rgb(x[1], x[2], x[3], alpha=alpha))
}
# Trimm a list of data frames to common length
trim_data <- function(input,len=NULL) {
if (is.null(len)) {
len <- min(sapply(input, function(v) dim(v)[1]))
}
return(lapply(input, function(d) slice_head(d,n=len)))
}
length_of_data <- function(input) {
min(sapply(input, function(v) dim(v)[1]))
}
# Takes a flat list
trace2maxline <- function(tr) {
maxline = tr
for (var in seq_len(length(maxline))[2:length(maxline)]) {
#if (maxline[var]>1000000000) {
# maxline[var]=maxline[var-1]
#} else {
maxline[var] = max(maxline[var],maxline[var-1])
#}
}
#plot(seq_len(length(maxline)),maxline,"l",xlab="Index",ylab="WOET")
return(maxline)
}
# Take a list of data frames, output same form but maxlines
data2maxlines <- function(tr) {
min_length <- min(sapply(tr, function(v) dim(v)[1]))
maxline <- tr
for (var in seq_len(length(tr))) {
maxline[[var]][[1]]=trace2maxline(tr[[var]][[1]])
}
return(maxline)
}
# Take a multi-column data frame, output same form but maxlines
frame2maxlines <- function(tr) {
for (var in seq_len(length(tr))) {
tr[[var]]=trace2maxline(tr[[var]])
}
return(tr)
}
trace2maxpoints <- function(tr) {
minval = tr[1,1]
collect = tr[1,]
for (i in seq_len(dim(tr)[1])) {
if (minval < tr[i,1]) {
collect = rbind(collect,tr[i,])
minval = tr[i,1]
}
}
tmp = tr[dim(tr)[1],]
tmp[1] = minval[1]
collect = rbind(collect,tmp)
return(collect)
}
sample_maxpoints <- function(tr,po) {
index = 1
collect=NULL
endpoint = dim(tr)[1]
for (p in po) {
if (p<=tr[1,2]) {
tmp = tr[index,]
tmp[2] = p
collect = rbind(collect, tmp)
} else if (p>=tr[endpoint,2]) {
tmp = tr[endpoint,]
tmp[2] = p
collect = rbind(collect, tmp)
} else {
for (i in seq(index,endpoint)-1) {
if (p >= tr[i,2] && p<tr[i+1,2]) {
tmp = tr[i,]
tmp[2] = p
collect = rbind(collect, tmp)
index = i
break
}
}
}
}
return(collect)
}
#https://www.r-bloggers.com/2012/01/parallel-r-loops-for-windows-and-linux/
all_runtypetables <- foreach (bn=BASENAMES) %do% {
runtypefiles <- list.files(file.path(BENCHDIR,bn),pattern=sprintf(PATTERNS,target),full.names = TRUE)
if (length(runtypefiles) > 0) {
runtypetables_reduced <- foreach(i=seq_len(length(runtypefiles))) %dopar% {
rtable = read.csv(runtypefiles[[i]], col.names=c(sprintf("%s%d",bn,i),sprintf("times%d",i)))
trace2maxpoints(rtable)
}
#runtypetables <- lapply(seq_len(length(runtypefiles)),
# function(i)read.csv(runtypefiles[[i]], col.names=c(sprintf("%s%d",bn,i),sprintf("times%d",i))))
#runtypetables_reduced <- lapply(runtypetables, trace2maxpoints)
runtypetables_reduced
#all_runtypetables = c(all_runtypetables, list(runtypetables_reduced))
}
}
all_runtypetables = all_runtypetables[lapply(all_runtypetables, length) > 0]
all_min_points = foreach(rtt=all_runtypetables,.combine = cbind) %do% {
bn = substr(names(rtt[[1]])[1],1,nchar(names(rtt[[1]])[1])-1)
ret = data.frame(min(unlist(lapply(rtt, function(v) v[dim(v)[1],2]))))
names(ret)[1] = bn
ret/(3600 * 1000)
}
all_max_points = foreach(rtt=all_runtypetables,.combine = cbind) %do% {
bn = substr(names(rtt[[1]])[1],1,nchar(names(rtt[[1]])[1])-1)
ret = data.frame(max(unlist(lapply(rtt, function(v) v[dim(v)[1],2]))))
names(ret)[1] = bn
ret/(3600 * 1000)
}
all_points = sort(unique(Reduce(c, lapply(all_runtypetables, function(v) Reduce(c, lapply(v, function(w) w[[2]]))))))
all_maxlines <- foreach (rtt=all_runtypetables) %do% {
bn = substr(names(rtt[[1]])[1],1,nchar(names(rtt[[1]])[1])-1)
runtypetables_sampled = foreach(v=rtt) %dopar% {
sample_maxpoints(v, all_points)[1]
}
#runtypetables_sampled = lapply(rtt, function(v) sample_maxpoints(v, all_points)[1])
tmp_frame <- Reduce(cbind, runtypetables_sampled)
statframe <- data.frame(rowMeans(tmp_frame),apply(tmp_frame, 1, sd),apply(tmp_frame, 1, min),apply(tmp_frame, 1, max), apply(tmp_frame, 1, median))
names(statframe) <- c(bn, sprintf("%s_sd",bn), sprintf("%s_min",bn), sprintf("%s_max",bn), sprintf("%s_med",bn))
#statframe[sprintf("%s_times",bn)] = all_points
round(statframe)
#all_maxlines = c(all_maxlines, list(round(statframe)))
}
one_frame<-data.frame(all_maxlines)
one_frame[length(one_frame)+1] <- all_points/(3600 * 1000)
names(one_frame)[length(one_frame)] <- 'time'
typenames = names(one_frame)[which(names(one_frame) != 'time')]
typenames = typenames[which(!endsWith(typenames, "_sd"))]
typenames = typenames[which(!endsWith(typenames, "_med"))]
ylow=min(one_frame[typenames])
yhigh=max(one_frame[typenames],worst_case)
typenames = typenames[which(!endsWith(typenames, "_min"))]
typenames = typenames[which(!endsWith(typenames, "_max"))]
ml2lines <- function(ml,lim) {
lines = NULL
last = 0
for (i in seq_len(dim(ml)[1])) {
if (!CONTINUE_LINE_TO_END && lim<ml[i,2]) {
break
}
lines = rbind(lines, cbind(X=last, Y=ml[i,1]))
lines = rbind(lines, cbind(X=ml[i,2], Y=ml[i,1]))
last = ml[i,2]
}
return(lines)
}
plotting <- function(selection, filename, MY_COLORS_) {
# filter out names of iters and sd cols
typenames = names(one_frame)[which(names(one_frame) != 'times')]
typenames = typenames[which(!endsWith(typenames, "_sd"))]
typenames = typenames[which(!endsWith(typenames, "_med"))]
typenames = typenames[which(!endsWith(typenames, "_min"))]
typenames = typenames[which(!endsWith(typenames, "_max"))]
typenames = selection[which(selection %in% typenames)]
if (length(typenames) == 0) {return()}
h_ = 380
w_ = h_*4/3
if (SAVE_FILE) {png(file=sprintf("%s/%s_%s.png",outputpath,target,filename), width=w_, height=h_)}
par(mar=c(4,4,1,1))
par(oma=c(0,0,0,0))
plot(c(0,max(one_frame['time'])),c(ylow,yhigh), col='white', xlab="Time [h]", ylab="WCRT estimate [insn]", pch='.')
for (t in seq_len(length(typenames))) {
#proj = one_frame[seq(1, dim(one_frame)[1], by=max(1, length(one_frame[[1]])/(10*w_))),]
#points(proj[c('iters',typenames[t])], col=MY_COLORS_[t], pch='.')
avglines = ml2lines(one_frame[c(typenames[t],'time')],all_max_points[typenames[t]])
#lines(avglines, col=MY_COLORS_[t])
medlines = ml2lines(one_frame[c(sprintf("%s_med",typenames[t]),'time')],all_max_points[typenames[t]])
lines(medlines, col=MY_COLORS_[t], lty='solid')
milines = NULL
malines = NULL
milines = ml2lines(one_frame[c(sprintf("%s_min",typenames[t]),'time')],all_max_points[typenames[t]])
malines = ml2lines(one_frame[c(sprintf("%s_max",typenames[t]),'time')],all_max_points[typenames[t]])
if (exists("RIBBON") && ( RIBBON=='max' )) {
#lines(milines, col=MY_COLORS_[t], lty='dashed')
lines(malines, col=MY_COLORS_[t], lty='dashed')
#points(proj[c('iters',sprintf("%s_min",typenames[t]))], col=MY_COLORS_[t], pch='.')
#points(proj[c('iters',sprintf("%s_max",typenames[t]))], col=MY_COLORS_[t], pch='.')
}
if (exists("RIBBON") && RIBBON != '') {
for (i in seq_len(dim(avglines)[1]-1)) {
if (RIBBON=='both') {
# draw boxes
x_l <- milines[i,][['X']]
x_r <- milines[i+1,][['X']]
y_l <- milines[i,][['Y']]
y_h <- malines[i,][['Y']]
rect(x_l, y_l, x_r, y_h, col=alpha(MY_COLORS_[t], alpha=0.1), lwd=0)
}
if (FALSE && RIBBON=='span') {
# draw boxes
x_l <- milines[i,][['X']]
x_r <- milines[i+1,][['X']]
y_l <- milines[i,][['Y']]
y_h <- malines[i,][['Y']]
rect(x_l, y_l, x_r, y_h, col=alpha(MY_COLORS_[t], alpha=0.1), lwd=0)
}
#if (FALSE && RIBBON=='both' || RIBBON=='sd') {
# # draw sd
# x_l <- avglines[i,][['X']]
# x_r <- avglines[i+1,][['X']]
# y_l <- avglines[i,][['Y']]-one_frame[ceiling(i/2),][[sprintf("%s_sd",typenames[t])]]
# y_h <- avglines[i,][['Y']]+one_frame[ceiling(i/2),][[sprintf("%s_sd",typenames[t])]]
# if (x_r != x_l) {
# rect(x_l, y_l, x_r, y_h, col=alpha(MY_COLORS_[t], alpha=0.1), lwd=0)
# }
#}
#sd_ <- row[sprintf("%s_sd",typenames[t])][[1]]
#min_ <- row[sprintf("%s_min",typenames[t])][[1]]
#max_ <- row[sprintf("%s_max",typenames[t])][[1]]
#if (exists("RIBBON")) {
# switch (RIBBON,
# 'sd' = arrows(x_, y_-sd_, x_, y_+sd_, length=0, angle=90, code=3, col=alpha(MY_COLORS_[t], alpha=0.03)),
# 'both' = arrows(x_, y_-sd_, x_, y_+sd_, length=0, angle=90, code=3, col=alpha(MY_COLORS_[t], alpha=0.05)),
# 'span' = #arrows(x_, min_, x_, max_, length=0, angle=90, code=3, col=alpha(MY_COLORS_[t], alpha=0.03))
# )
#}
##arrows(x_, y_-sd_, x_, y_+sd_, length=0.05, angle=90, code=3, col=alpha(MY_COLORS[t], alpha=0.1))
}
}
}
leglines=typenames
if (DRAW_WC) {
lines(c(0,length(one_frame[[1]])),y=c(worst_case,worst_case), lty='dotted')
leglines=c(typenames, 'worst observed')
}
legend(LEGEND_POS, legend=leglines,#"bottomright",
col=c(MY_COLORS_[1:length(typenames)],"black"),
lty=c(rep("solid",length(typenames)),"dotted"))
if (SAVE_FILE) {
dev.new()
par(las = 2, mar = c(10, 5, 1, 1))
dev.off()
}
}
stopCluster(cl)
par(mar=c(3.8,3.8,0,0))
par(oma=c(0,0,0,0))
#RIBBON='both'
#MY_SELECTION = c('state_int','generation100_int')
#MY_SELECTION = c('state','frafl')
if (exists("MY_SELECTION")) {
plotting(MY_SELECTION, 'custom', MY_COLORS[c(1,2)])
} else {
# MY_SELECTION=c('state', 'afl', 'random', 'feedlongest', 'feedgeneration', 'feedgeneration10')
#MY_SELECTION=c('state_int', 'afl_int', 'random_int', 'feedlongest_int', 'feedgeneration_int', 'feedgeneration10_int')
#MY_SELECTION=c('state', 'frAFL', 'statenohash', 'feedgeneration10')
#MY_SELECTION=c('state_int', 'frAFL_int', 'statenohash_int', 'feedgeneration10_int')
MY_SELECTION=typenames
RIBBON='both'
for (i in seq_len(length(MY_SELECTION))) {
n <- MY_SELECTION[i]
plotting(c(n), n, c(MY_COLORS[i]))
}
RIBBON='max'
plotting(MY_SELECTION,'all', MY_COLORS)
}
for (t in seq_len(length(typenames))) {
li = one_frame[dim(one_frame)[1],]
pear = (li[[typenames[[t]]]]-li[[sprintf("%s_med",typenames[[t]])]])/li[[sprintf("%s_sd",typenames[[t]])]]
print(sprintf("%s pearson: %g",typenames[[t]],pear))
}

View File

@ -1,130 +0,0 @@
library("mosaic")
library("dplyr")
library("DBI")
args = commandArgs(trailingOnly=TRUE)
KNOWN_WCRT <- list(
waters_seq_bytes=219542, # via INSERT_WC
waters_seq_int=219542, # via INSERT_WC + manual interrupt
waters_seq_full=219542,# via INSERT_WC + manual interrupt
polycopter_seq_dataflow_full=343493, # via INSERT_WC + manual interrupt
polycopter_seq_dataflow_int=343493, # via INSERT_WC + manual interrupt
release_seq_int=645885, # via INSERT_WC + manual interrupt
release_seq_full=645885 # via INSERT_WC + manual interrupt
)
STATIC_WCRT <- list(
#waters_seq_bytes=,
waters_seq_int=270789
#waters_seq_full=,
#polycopter_seq_dataflow_full=, # via INSERT_WC + manual interrupt
#polycopter_seq_dataflow_int=, # via INSERT_WC + manual interrupt
#release_seq_int=, # via INSERT_WC + manual interrupt
#release_seq_full= # via INSERT_WC + manual interrupt
)
# Read the first command line argument as an sqlite file
if (length(args) > 0) {
sqlite_file <- args[1]
con <- dbConnect(RSQLite::SQLite(), sqlite_file)
} else {
print("No sqlite file provided, assume defaults")
args = c("bench.sqlite", "remote")
sqlite_file <- args[1]
con <- dbConnect(RSQLite::SQLite(), sqlite_file)
}
combos <- dbGetQuery(con, "SELECT * FROM combos")
casenames <- dbGetQuery(con, "SELECT casename FROM combos GROUP BY casename")
toolnames <- dbGetQuery(con, "SELECT toolname FROM combos GROUP BY toolname")
ml2lines <- function(ml, casename) {
lines = NULL
last = 0
for (i in seq_len(dim(ml)[1])) {
lines = rbind(lines, cbind(X=last, Y=ml[i,1]))
lines = rbind(lines, cbind(X=ml[i,2], Y=ml[i,1]))
last = ml[i,2]
}
return(lines)
}
draw_plot <- function(data, casename) {
MY_COLORS <- c("green", "blue", "red", "magenta", "orange", "cyan", "pink", "gray", "orange", "black", "yellow","brown")
LEGEND_POS="bottomright"
ISNS_PER_US = (10**3)/(2**5)
# Convert timestamp from microseconds to hours
for (n in seq_len(length(data))) {
data[[n]]$timestamp <- data[[n]]$timestamp / 3600000
data[[n]]$min <- data[[n]]$min / ISNS_PER_US
data[[n]]$max <- data[[n]]$max / ISNS_PER_US
data[[n]]$median <- data[[n]]$median / ISNS_PER_US
data[[n]]$mean <- data[[n]]$mean / ISNS_PER_US
data[[n]]$sdiv <- data[[n]]$sdiv / ISNS_PER_US
}
wcrt = KNOWN_WCRT[[casename]]
if (!is.null(wcrt)) {
wcrt = wcrt / ISNS_PER_US
} else {
wcrt = 0
}
# draw limits
max_x <- max(sapply(data, function(tbl) max(tbl$timestamp, na.rm = TRUE)))
max_y <- max(wcrt,max(sapply(data, function(tbl) max(tbl$max, na.rm = TRUE))))
min_y <- min(sapply(data, function(tbl) min(tbl$min, na.rm = TRUE)))
# plot setup
h_ = 380
w_ = h_*4/3
png(file=sprintf("%s/sql_%s.png", args[2],casename), width=w_, height=h_)
par(mar=c(4,4,1,1))
par(oma=c(0,0,0,0))
plot(c(0,max_x),c(min_y,max_y), col='white', xlab="Time [h]", ylab="WCRT estimate [us]", pch='.')
# plot data
for (n in seq_len(length(data))) {
d <- data[[n]]
malines = ml2lines(d[c('max','timestamp')])
lines(malines, col=MY_COLORS[[n]], lty='dashed')
medlines = ml2lines(d[c('median','timestamp')])
lines(medlines, col=MY_COLORS[[n]], lty='solid')
milines = ml2lines(d[c('min','timestamp')])
lines(milines, col=MY_COLORS[[n]], lty='dashed')
}
legend_names <- names(data)
legend_colors <- c(MY_COLORS[1:length(data)],"black")
legend_styles <- c(rep("solid",length(data)),"dotted")
if (wcrt > 0) {
abline(h=wcrt, col='black', lty='dotted')
legend_names <- c(names(data), "WCRT")
}
legend(LEGEND_POS, legend=legend_names,#"bottomright",
col=legend_colors,
lty=legend_styles)
par(las = 2, mar = c(10, 5, 1, 1))
dev.off()
}
print(casenames[['casename']])
for (cn in casenames[['casename']]) {
tables <- dbGetQuery(con, sprintf("SELECT * FROM combos WHERE casename == '%s'", cn[[1]]))
table_list <- list()
for (row in 1:nrow(tables)) {
table_name <- tables[row, 'fullname']
tool_name <- tables[row, 'toolname']
table_data <- dbGetQuery(con, sprintf("SELECT * FROM '%s'", table_name))
table_list[[tool_name]] <- table_data
}
draw_plot(table_list, cn[[1]])
}
dbDisconnect(con)

View File

@ -1,8 +1,12 @@
#!/usr/bin/env bash
export INSERT_WC=${2:-0}
export BUILD_DIR=${1:-build}
mkdir -p $BUILD_DIR
build () { build () {
make -C ../../../../FreeRTOS/FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC clean && make -C ../../../../FreeRTOS/FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC $1=1 IGNORE_INTERRUPTS=$IGNORE_INTERRUPTS IGNORE_BYTES=$IGNORE_BYTES IGNORE_INTERNAL_STATE=$IGNORE_INTERNAL_STATE make -C ../../../../FreeRTOS/FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC clean && make -C ../../../../FreeRTOS/FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC $1=1 IGNORE_INTERRUPTS=$IGNORE_INTERRUPTS IGNORE_BYTES=$IGNORE_BYTES IGNORE_INTERNAL_STATE=$IGNORE_INTERNAL_STATE INSERT_WC=$INSERT_WC $EXTRA_MAKE_ARGS
cp ../../../../FreeRTOS/FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC/build/RTOSDemo.axf build/$(echo $1 | cut -d_ -f1 | tr '[:upper:]' '[:lower:]')$2.elf cp ../../../../FreeRTOS/FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC/build/RTOSDemo.axf $BUILD_DIR/$(echo $1 | cut -d_ -f1 | tr '[:upper:]' '[:lower:]')$EXTRA_NAME_SUFFIX$2.elf
} }
# INSERT_WC=1
mkdir -p build mkdir -p build
@ -109,3 +113,15 @@ export IGNORE_INTERRUPTS=0 IGNORE_BYTES=0 SUFFIX="_seq_unsync_full"
export SPECIAL_CFLAGS="-DWATERS_UNSYNCHRONIZED=1" export SPECIAL_CFLAGS="-DWATERS_UNSYNCHRONIZED=1"
build WATERS_DEMO $SUFFIX build WATERS_DEMO $SUFFIX
unset SPECIAL_CFLAGS unset SPECIAL_CFLAGS
# Create copies with special names
cp -f $BUILD_DIR/waters_seq_full.elf $BUILD_DIR/watersIc12_seq_full.elf
cp -f $BUILD_DIR/waters_seq_full.elf $BUILD_DIR/watersIc13_seq_full.elf
cp -f $BUILD_DIR/waters_seq_full.elf $BUILD_DIR/watersIc14_seq_full.elf
cp -f $BUILD_DIR/waters_seq_full.elf $BUILD_DIR/watersIc11_seq_full.elf
cp -f $BUILD_DIR/waters_seq_full.elf $BUILD_DIR/watersIc21_seq_full.elf
cp -f $BUILD_DIR/waters_seq_full.elf $BUILD_DIR/watersIc22_seq_full.elf
cp -f $BUILD_DIR/waters_seq_full.elf $BUILD_DIR/watersIc23_seq_full.elf
cp -f $BUILD_DIR/waters_seq_full.elf $BUILD_DIR/watersIc31_seq_full.elf
cp -f $BUILD_DIR/waters_seq_full.elf $BUILD_DIR/watersIc32_seq_full.elf
cp -f $BUILD_DIR/waters_seq_full.elf $BUILD_DIR/watersIc33_seq_full.elf

View File

@ -0,0 +1,8 @@
#!/usr/bin/env bash
find $1 -type 'f' -iname "${2}#*.log" | while IFS="" read -r p || [ -n "$p" ]
do
LINE=$(tail -n 100 $p | grep -io "run time: .* corpus: [0-9]*" | tail -n 1)
echo $p: $LINE
LINE=$(grep -i "interesting corpus elements" $p | tail -n 1)
echo $p: $LINE
done

View File

@ -0,0 +1,14 @@
#!/bin/sh
if [[ -n "$1" ]]; then
TARGET="$1"
else
TARGET=$BENCHDIR
fi
# Check if bench.sqlite needs to be updated
if [[ ! -f $TARGET/bench.sqlite || $(find $TARGET/timedump -name '.*[0-9]+\.time' -newer $TARGET/bench.sqlite | wc -l) -gt 0 ]]; then
number_cruncher -i $TARGET/timedump -o $TARGET/bench.sqlite
fi
Rscript scripts/plot_sqlite.r $TARGET/bench.sqlite $TARGET
Rscript scripts/plot_diffs.r $TARGET/bench.sqlite $TARGET

View File

@ -0,0 +1,33 @@
get_max_nodecount () {
rm -f sizecomp && for sizefile in $BENCHDIR/timedump/**/$1*.stgsize;do echo "$(tail -n 1 $sizefile),${sizefile}" >> sizecomp; done; sort -n sizecomp | tail -n 1
}
get_largest_files () {
T=$(get_max_nodecount $1)
echo $T | cut -d',' -f6
}
perform () {
T=$(get_max_nodecount $1)
echo $T | cut -d',' -f6
echo $T | cut -d',' -f6 | xargs -I {} ./plot_stgsize.r {}
mv "$(echo $T | cut -d',' -f6 | xargs -I {} basename -s .stgsize {})_nodes.png" $1_nodes.png
}
# perform copter
# perform release
# perform waters
A=$(get_largest_files polycopter_seq_dataflow_full)
B=$(get_largest_files release_seq_full)
C=$(get_largest_files waters_seq_full)
# A_="$(echo $A | sed 's/polycopter_seq_dataflow_full/UAV w. hid. com./')"
# B_="$(echo $B | sed 's/release_seq_full/Async. rel./')"
# C_="$(echo $C | sed 's/waters_seq_full/Waters ind. ch./')"
A_="UAV"
B_="Async. rel."
C_="Waters ind. ch."
echo $A_ $B_ $C_
cp $A "$A_"
cp $B "$B_"
cp $C "$C_"
./plot_stgsize_multi.r "$A_" "$B_" "$C_"

View File

@ -0,0 +1,235 @@
# install.packages(c("mosaic", "dplyr", "DBI", "tikzDevice", "colorspace", "heatmaply", "RColorBrewer", "RSQLite"))
library("mosaic")
library("dplyr")
library("DBI")
library("tikzDevice") # Add this line to include the tikzDevice library
library("colorspace")
library("heatmaply")
library("RColorBrewer")
args = commandArgs(trailingOnly=TRUE)
TOOL_TRANSLATION <- list(
feedgeneration100 = "evolution",
frafl = "coverage",
random = "random",
stgwoet = "FRET"
)
KNOWN_WCRT <- list(
waters_seq_bytes=0, # via INSERT_WC
waters_seq_int=0, # via INSERT_WC + manual interrupt
#waters_seq_int=219542, # via INSERT_WC + manual interrupt
waters_seq_full=0,# via INSERT_WC + manual interrupt
waters_seq_unsync_full=0,# via INSERT_WC + manual interrupt
polycopter_seq_dataflow_full=0, # via INSERT_WC + manual interrupt
polycopter_seq_dataflow_int=0, # via INSERT_WC + manual interrupt
release_seq_int=0, # via fuzzer, equals to manual interrupts; Bug: Task3 y=0
release_seq_full=0 # via INSERT_WC + manual interrupt; Bug: Task3 y=0
)
STATIC_WCRT <- list(
waters_seq_bytes=256632,
waters_seq_int=256632,
waters_seq_full=256632,
waters_seq_unsync_full=272091,
polycopter_seq_dataflow_full=373628,
polycopter_seq_dataflow_int=373628,
release_seq_int=921360,
release_seq_full=921360
)
# ISNS_PER_US = (10**3)/(2**5)
# print(list(sapply(STATIC_WCRT, function(x) x/ISNS_PER_US)))
# quit()
STATIC_WCRT <- list(
waters_seq_bytes=0,
waters_seq_int=0,
waters_seq_full=0,
waters_seq_unsync_full=0,
polycopter_seq_dataflow_full=0,
polycopter_seq_dataflow_int=0,
release_seq_int=0,
release_seq_full=0
)
MIN_Y <- list(
waters_seq_bytes=0,
waters_seq_int=0,
waters_seq_full=0,
waters_seq_unsync_full=0,
polycopter_seq_dataflow_full=0,
polycopter_seq_dataflow_int=0,
release_seq_int=0,
release_seq_full=0
)
LEG_POS <- list(
waters_seq_bytes="bottomright",
waters_seq_int="bottomright",
waters_seq_full="bottomright",
waters_seq_unsync_full="bottomright",
polycopter_seq_dataflow_full="bottomright",
polycopter_seq_dataflow_int="bottomright",
release_seq_int="bottomright",
release_seq_full="bottomright"
)
NAME_MAP <- list(
watersIc11_seq_full="t1 10ms",
watersIc12_seq_full="t2 10ms",
watersIc13_seq_full="t3 10ms",
watersIc14_seq_full="t4 10ms",
watersIc31_seq_full="t5 spro",
watersIc32_seq_full="t6 2ms",
watersIc33_seq_full="t7 50ms",
watersIc21_seq_full="t9 100ms",
watersIc22_seq_full="t10 10ms",
watersIc23_seq_full="t11 2ms"
)
# Read the first command line argument as an sqlite file
if (length(args) > 0) {
sqlite_file <- args[1]
con <- dbConnect(RSQLite::SQLite(), sqlite_file)
} else {
print("No sqlite file provided, assume defaults")
args = c("bench.sqlite", "remote")
sqlite_file <- args[1]
con <- dbConnect(RSQLite::SQLite(), sqlite_file)
}
combos <- dbGetQuery(con, "SELECT * FROM combos")
casenames <- dbGetQuery(con, "SELECT casename FROM combos WHERE casename LIKE 'watersIc_%' GROUP BY casename")
#casenames <- dbGetQuery(con, "SELECT casename FROM combos GROUP BY casename")
toolnames <- dbGetQuery(con, "SELECT toolname FROM combos GROUP BY toolname")
ml2lines <- function(ml, casename) {
lines = NULL
last = 0
for (i in seq_len(dim(ml)[1])) {
lines = rbind(lines, cbind(X=last, Y=ml[i,1]))
lines = rbind(lines, cbind(X=ml[i,2], Y=ml[i,1]))
last = ml[i,2]
}
return(lines)
}
# BREW=RdYlGn(8)
BREW=Spectral(8)
# MY_COLORS <- c(BREW[[4]], BREW[[3]], BREW[[2]], BREW[[1]], "cyan", "pink", "gray", "orange", "black", "yellow","brown")
MY_COLORS=BREW
# draw limit
max_x <- 12
min_y <- -2500
max_y <- 2500
LEGEND_POS = "bottomright"
ISNS_PER_US = (10**3)/(2**5)
print(casenames[['casename']])
legend_names <- sapply(casenames[['casename']], function(x) NAME_MAP[[x]] %||% x)
legend_colors <- BREW
legend_styles <- c(rep("solid",10),"dotted","dashed")
h_ = 300
w_ = h_*4/3
png(file=sprintf("%s/all_tasks.png", args[2]), width=w_, height=h_)
#tikz(file=sprintf("%s/all_tasks.tex", args[2]), width=0.6*w_/72, height=0.6*h_/72)
#pdf(file=sprintf("%s/all_tasks.pdf", args[2]), width=w_/72, height=h_/72)
# plot setup
par(mar=c(4,4,1,1))
par(oma=c(0,0,0,0))
plot(c(0,max_x),c(min_y,max_y), col='white', xlab="Time [h]", ylab="FRET's improvement over competitors [µs]", pch='.')
draw_plot <- function(data, casename, color) {
# evo, cov, random, fret
# Pre-calculate all malines and medlines
malines_list <- list()
medlines_list <- list()
for (n in seq_along(data)) {
d <- data[[n]]
malines_list[[names(data)[n]]] <- ml2lines(d[c('max','timestamp')])
medlines_list[[names(data)[n]]] <- ml2lines(d[c('median','timestamp')])
}
# Plot the difference between malines['stgwoet'] (FRET) and malines['random']
if ("stgwoet" %in% names(malines_list) && "feedgeneration100" %in% names(malines_list)) {
fret_malines <- malines_list[["stgwoet"]]
compare_malines1 <- malines_list[["feedgeneration100"]]
compare_malines2 <- malines_list[["frafl"]]
fret_medlines <- medlines_list[["stgwoet"]]
compare_medlines1 <- medlines_list[["feedgeneration100"]]
compare_medlines2 <- medlines_list[["frafl"]]
# Ensure all have the same number of rows and matching X
min_len <- min(nrow(fret_malines), nrow(compare_malines1), nrow(compare_malines2))
# For each point, take the max of the two compare malines
compare_max_Y <- pmax(compare_malines1[1:min_len, "Y"], compare_malines2[1:min_len, "Y"])
diff_lines_ma <- data.frame(
X = fret_malines[1:min_len, "X"],
Y = fret_malines[1:min_len, "Y"] - compare_max_Y
)
lines(diff_lines_ma, col=color, lty="solid", lwd=2)
# Same for medlines
compare_max_med_Y <- pmax(compare_medlines1[1:min_len, "Y"], compare_medlines2[1:min_len, "Y"])
diff_lines_med <- data.frame(
X = fret_medlines[1:min_len, "X"],
Y = fret_medlines[1:min_len, "Y"] - compare_max_med_Y
)
lines(diff_lines_med, col=color, lty="dashed", lwd=2)
}
}
for (i in seq_len(length(casenames[['casename']]))) {
cn =casenames[['casename']][i]
color = MY_COLORS[i]
tables <- dbGetQuery(con, sprintf("SELECT * FROM combos WHERE casename == '%s'", cn[[1]]))
table_list <- list()
for (row in 1:nrow(tables)) {
table_name <- tables[row, 'fullname']
tool_name <- tables[row, 'toolname']
table_data <- dbGetQuery(con, sprintf("SELECT * FROM '%s'", table_name))
table_list[[tool_name]] <- table_data
}
# Convert timestamp from microseconds to hours
for (n in seq_len(length(table_list))) {
table_list[[n]]$timestamp <- table_list[[n]]$timestamp / 3600000
table_list[[n]]$min <- table_list[[n]]$min / ISNS_PER_US
table_list[[n]]$max <- table_list[[n]]$max / ISNS_PER_US
table_list[[n]]$median <- table_list[[n]]$median / ISNS_PER_US
table_list[[n]]$mean <- table_list[[n]]$mean / ISNS_PER_US
table_list[[n]]$sdiv <- table_list[[n]]$sdiv / ISNS_PER_US
}
table_list <- table_list[c('stgwoet', 'feedgeneration100', 'frafl', 'random')] # manual re-order
table_list <- table_list[!sapply(table_list, is.null)] # remove NULL entries
draw_plot(table_list, cn[[1]], color)
}
legend(LEGEND_POS, legend=legend_names,#"bottomright",
col=legend_colors,
lty=legend_styles,
lwd=2, ncol=2)
par(las = 2, mar = c(10, 5, 1, 1))
# png
## normal
dev.off()
dbDisconnect(con)

View File

@ -0,0 +1,238 @@
# install.packages(c("mosaic", "dplyr", "DBI", "tikzDevice", "colorspace", "heatmaply", "RColorBrewer", "RSQLite"))
library("mosaic")
library("dplyr")
library("DBI")
library("tikzDevice") # Add this line to include the tikzDevice library
library("colorspace")
library("heatmaply")
library("RColorBrewer")
args = commandArgs(trailingOnly=TRUE)
TOOL_TRANSLATION <- list(
feedgeneration100 = "evolution",
frafl = "coverage",
random = "random",
stgwoet = "FRET"
)
KNOWN_WCRT <- list(
waters_seq_bytes=212252, # via INSERT_WC
waters_seq_int=0, # via INSERT_WC + manual interrupt
#waters_seq_int=219542, # via INSERT_WC + manual interrupt
waters_seq_full=219542,# via INSERT_WC + manual interrupt
waters_seq_unsync_full=234439,# via INSERT_WC + manual interrupt
polycopter_seq_dataflow_full=174866, # via INSERT_WC + manual interrupt
polycopter_seq_dataflow_int=174866, # via INSERT_WC + manual interrupt
release_seq_int=582699, # via fuzzer, equals to manual interrupts; Bug: Task3 y=0
release_seq_full=614583 # via INSERT_WC + manual interrupt; Bug: Task3 y=0
)
STATIC_WCRT <- list(
waters_seq_bytes=256632,
waters_seq_int=256632,
waters_seq_full=256632,
waters_seq_unsync_full=272091,
polycopter_seq_dataflow_full=373628,
polycopter_seq_dataflow_int=373628,
release_seq_int=921360,
release_seq_full=921360
)
# ISNS_PER_US = (10**3)/(2**5)
# print(list(sapply(STATIC_WCRT, function(x) x/ISNS_PER_US)))
# quit()
STATIC_WCRT <- list(
waters_seq_bytes=0,
waters_seq_int=0,
waters_seq_full=0,
waters_seq_unsync_full=0,
polycopter_seq_dataflow_full=0,
polycopter_seq_dataflow_int=0,
release_seq_int=0,
release_seq_full=0
)
MIN_Y <- list(
waters_seq_bytes=5250,
waters_seq_int=5700,
waters_seq_full=5250,
waters_seq_unsync_full=0,
polycopter_seq_dataflow_full=0,
polycopter_seq_dataflow_int=0,
release_seq_int=16500,
release_seq_full=16500
)
LEG_POS <- list(
waters_seq_bytes="bottomright",
waters_seq_int="bottomright",
waters_seq_full="bottomright",
waters_seq_unsync_full="bottomright",
polycopter_seq_dataflow_full="bottomright",
polycopter_seq_dataflow_int="bottomright",
release_seq_int="bottomright",
release_seq_full="bottomright"
)
# Read the first command line argument as an sqlite file
if (length(args) > 0) {
sqlite_file <- args[1]
con <- dbConnect(RSQLite::SQLite(), sqlite_file)
} else {
print("No sqlite file provided, assume defaults")
args = c("bench.sqlite", "remote")
sqlite_file <- args[1]
con <- dbConnect(RSQLite::SQLite(), sqlite_file)
}
combos <- dbGetQuery(con, "SELECT * FROM combos")
casenames <- dbGetQuery(con, "SELECT casename FROM combos WHERE NOT casename LIKE 'watersIc_%' GROUP BY casename")
# casenames <- dbGetQuery(con, "SELECT casename FROM combos GROUP BY casename")
toolnames <- dbGetQuery(con, "SELECT toolname FROM combos GROUP BY toolname")
ml2lines <- function(ml, casename) {
lines = NULL
last = 0
for (i in seq_len(dim(ml)[1])) {
lines = rbind(lines, cbind(X=last, Y=ml[i,1]))
lines = rbind(lines, cbind(X=ml[i,2], Y=ml[i,1]))
last = ml[i,2]
}
return(lines)
}
BREW=RdYlGn(4)
# BREW=Spectral(4)
draw_plot <- function(data, casename) {
# evo, cov, random, fret
MY_COLORS <- c(BREW[[4]], BREW[[3]], BREW[[2]], BREW[[1]], "cyan", "pink", "gray", "orange", "black", "yellow","brown")
# MY_COLORS <- c("orange", "blue", "red", "green", "orange", "cyan", "pink", "gray", "orange", "black", "yellow","brown")
# MY_COLORS <- c("green", "blue", "red", "magenta", "orange", "cyan", "pink", "gray", "orange", "black", "yellow","brown")
LEGEND_POS=LEG_POS[[casename]]
if (is.null(LEGEND_POS)) {
LEGEND_POS = "bottomright"
}
ISNS_PER_US = (10**3)/(2**5)
# Convert timestamp from microseconds to hours
for (n in seq_len(length(data))) {
data[[n]]$timestamp <- data[[n]]$timestamp / 3600000
data[[n]]$min <- data[[n]]$min / ISNS_PER_US
data[[n]]$max <- data[[n]]$max / ISNS_PER_US
data[[n]]$median <- data[[n]]$median / ISNS_PER_US
data[[n]]$mean <- data[[n]]$mean / ISNS_PER_US
data[[n]]$sdiv <- data[[n]]$sdiv / ISNS_PER_US
}
data <- data[c('stgwoet', 'feedgeneration100', 'frafl', 'random')] # manual re-order
data <- data[!sapply(data, is.null)] # remove NULL entries
wcrt = KNOWN_WCRT[[casename]]
if (!is.null(wcrt)) {
wcrt = wcrt / ISNS_PER_US
} else {
wcrt = 0
}
static_wcrt = STATIC_WCRT[[casename]]
if (!is.null(static_wcrt)) {
static_wcrt = static_wcrt / ISNS_PER_US
} else {
static_wcrt = 0
}
# draw limits
max_x <- max(sapply(data, function(tbl) max(tbl$timestamp, na.rm = TRUE)))
max_x <- min(max_x, 24) # quick fix, cap to 16h
max_y <- max(wcrt,max(sapply(data, function(tbl) max(tbl$max, na.rm = TRUE))))
min_y <- min(sapply(data, function(tbl) min(tbl$min, na.rm = TRUE)))
min_y <- max(min_y, MIN_Y[[casename]])
# draw static wcrt
max_y <- max(max_y, static_wcrt)
# plot setup
par(mar=c(4,4,1,1))
par(oma=c(0,0,0,0))
plot(c(0,max_x),c(min_y,max_y), col='white', xlab="Time [h]", ylab="WORT [µs]", pch='.')
# plot data
for (n in seq_len(length(data))) {
d <- data[[n]]
malines = ml2lines(d[c('max','timestamp')])
lines(malines, col=MY_COLORS[[n]], lty='solid', lwd=2) # Increase line width
medlines = ml2lines(d[c('median','timestamp')])
lines(medlines, col=MY_COLORS[[n]], lty='dashed', lwd=2) # Increase line width
# milines = ml2lines(d[c('min','timestamp')])
# lines(milines, col=MY_COLORS[[n]], lty='dashed', lwd=2) # Increase line width
}
legend_names <- sapply(names(data), function(n) TOOL_TRANSLATION[[n]])
legend_colors <- c(MY_COLORS[1:length(data)],"grey","grey")
legend_styles <- c(rep("solid",length(data)),"dotted","dashed")
if (wcrt > 0) {
# abline(h=wcrt, col='grey', lty='dotted', lwd=3)
abline(h=max(wcrt,max(sapply(data, function(tbl) max(tbl$max, na.rm = TRUE)))), col='grey', lty='dotted', lwd=3) # If the manual WCRT was slightly too low
legend_names <- c(legend_names, "WCRT")
}
if (static_wcrt > 0) {
abline(h=static_wcrt, col='grey', lty='dashed', lwd=3)
legend_names <- c(legend_names, "static bound")
}
# legend(LEGEND_POS, legend=legend_names,#"bottomright",
# col=legend_colors,
# lty=legend_styles,
# lwd=2)
par(las = 2, mar = c(10, 5, 1, 1))
}
print(casenames[['casename']])
for (cn in casenames[['casename']]) {
tables <- dbGetQuery(con, sprintf("SELECT * FROM combos WHERE casename == '%s'", cn[[1]]))
table_list <- list()
for (row in 1:nrow(tables)) {
table_name <- tables[row, 'fullname']
tool_name <- tables[row, 'toolname']
table_data <- dbGetQuery(con, sprintf("SELECT * FROM '%s'", table_name))
table_list[[tool_name]] <- table_data
}
h_ = 300
w_ = h_*4/3
# png
## normal
png(file=sprintf("%s/sql_%s.png", args[2],cn[[1]]), width=w_, height=h_)
draw_plot(table_list, cn[[1]])
dev.off()
# ## wide
# png(file=sprintf("%s/sql_%s_wide.png", args[2],cn[[1]]), width=2*w_, height=h_)
# draw_plot(table_list, cn[[1]])
# dev.off()
# # tikz
# ## normal
# tikz(file=sprintf("%s/sql_%s.tex", args[2],cn[[1]]), width=0.6*w_/72, height=0.6*h_/72)
# draw_plot(table_list, cn[[1]])
# dev.off()
# ## wide
# tikz(file=sprintf("%s/sql_%s_wide.tex", args[2],cn[[1]]), width=(w_*2)/72, height=h_/72)
# draw_plot(table_list, cn[[1]])
# dev.off()
# # pdf
# ## normal
# pdf(file=sprintf("%s/sql_%s.pdf", args[2],cn[[1]]), width=w_/72, height=h_/72)
# draw_plot(table_list, cn[[1]])
# dev.off()
# ## wide
# pdf(file=sprintf("%s/sql_%s_wide.pdf", args[2],cn[[1]]), width=2*w_/72, height=h_/72)
# draw_plot(table_list, cn[[1]])
# dev.off()
}
dbDisconnect(con)

View File

@ -25,7 +25,7 @@ plot_multiple_files <- function(file_paths) {
theme_minimal() theme_minimal()
# Save the plot # Save the plot
ggsave("stg_node_sizes.png", plot = p + theme_bw(base_size = 10), width = 4, height = 2.5, dpi = 300, units = "in", device = "png") ggsave("stg_node_sizes.png", plot = p + theme_bw(base_size = 10), width = 4, height = 1.5, dpi = 300, units = "in", device = "png")
} }
# Example usage # Example usage

View File

@ -46,4 +46,15 @@ polycopter_seq_dataflow_full,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,0#2
polycopter_seq_dataflow_bytes,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC, polycopter_seq_dataflow_bytes,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,
watersc14_par_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C14,0#1000 watersc14_par_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C14,0#1000
watersc14_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C14,0#1000 watersc14_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C14,0#1000
waters_seq_unsync_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C13,0#1000 waters_seq_unsync_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C13,0#1000
watersgen1_par_bytes,main_waters,FUZZ_INPUT,40960,trigger_Qemu_break,T_24,0#10000;1#10000;2#10000;3#10000
watersIc11_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C11,0#1000
watersIc12_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C12,0#1000
watersIc13_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C13,0#1000
watersIc14_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C14,0#1000
watersIc21_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C21,0#1000
watersIc22_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C22,0#1000
watersIc23_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C23,0#1000
watersIc31_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C31,0#1000
watersIc32_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C32,0#1000
watersIc33_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C33,0#1000

1 kernel main_function input_symbol input_size return_function select_task interrupts
46 polycopter_seq_dataflow_bytes main_osek FUZZ_INPUT 4096 trigger_Qemu_break FC
47 watersc14_par_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C14 0#1000
48 watersc14_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C14 0#1000
49 waters_seq_unsync_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C13 0#1000
50 watersgen1_par_bytes main_waters FUZZ_INPUT 40960 trigger_Qemu_break T_24 0#10000;1#10000;2#10000;3#10000
51 watersIc11_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C11 0#1000
52 watersIc12_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C12 0#1000
53 watersIc13_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C13 0#1000
54 watersIc14_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C14 0#1000
55 watersIc21_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C21 0#1000
56 watersIc22_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C22 0#1000
57 watersIc23_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C23 0#1000
58 watersIc31_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C31 0#1000
59 watersIc32_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C32 0#1000
60 watersIc33_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C33 0#1000

1
fuzzers/FRET/tools/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
bin

26
fuzzers/FRET/tools/build.sh Executable file
View File

@ -0,0 +1,26 @@
#!/usr/bin/env bash
# Always use the script's directory as the working directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR"
mkdir -p bin
build() {
if [ -d "$1" ]; then
cd "$1" || exit 1
cargo build --release
ln -rsf target/release/"$(basename "$1")" ../bin/"$(basename "$1")"
cd - || exit 1
else
echo "Directory $1 does not exist."
fi
}
build edge_compare
build graph2viz
build input_serde
build number_cruncher
build state2gantt
ln -rsf state2gantt/gantt_driver bin/gantt_driver
ln -rsf state2gantt/plot_gantt.r bin/plot_gantt.r

View File

@ -0,0 +1,6 @@
*.axf
*.qcow2
demo
*.ron
*.bsp
target

View File

@ -0,0 +1,17 @@
[package]
name = "edge_compare"
version = "0.1.0"
authors = [ "Alwin Berger <alwin.berger@tu-dortmund.de>" ]
edition = "2021"
[features]
default = ["std"]
std = []
[profile.release]
debug = true
[dependencies]
clap = { version = "3.1.1", features = ["default"] }
serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib
ron = "0.7" # write serialized data - including hashmaps

View File

@ -0,0 +1,71 @@
use std::collections::HashMap;
use std::path::PathBuf;
use clap::Arg;
use clap::App;
use std::{env,fs};
fn main() {
let res = match App::new("edge_compare")
.version("0.1.0")
.author("Alwin Berger")
.about("Compare Serialized Edge-Maps.")
.arg(
Arg::new("a")
.short('a')
.long("map-a")
.required(true)
.takes_value(true),
)
.arg(
Arg::new("b")
.short('b')
.long("map-b")
.required(true)
.takes_value(true),
)
.try_get_matches_from(env::args())
{
Ok(res) => res,
Err(err) => {
println!(
"Syntax: {}, --map-a <input> --map-b <input>\n{:?}",
env::current_exe()
.unwrap_or_else(|_| "fuzzer".into())
.to_string_lossy(),
err.info,
);
return;
}
};
let path_a = PathBuf::from(res.value_of("a").unwrap().to_string());
let path_b = PathBuf::from(res.value_of("b").unwrap().to_string());
let raw_a = fs::read(path_a).expect("Can not read dumped edges a");
let hmap_a : HashMap<(u64,u64),u64> = ron::from_str(&String::from_utf8_lossy(&raw_a)).expect("Can not parse HashMap");
let raw_b = fs::read(path_b).expect("Can not read dumped edges b");
let hmap_b : HashMap<(u64,u64),u64> = ron::from_str(&String::from_utf8_lossy(&raw_b)).expect("Can not parse HashMap");
let mut a_and_b = Vec::<((u64,u64),u64)>::new();
let mut a_and_b_differ = Vec::<((u64,u64),(u64,u64))>::new();
let mut a_sans_b = Vec::<((u64,u64),u64)>::new();
for i_a in hmap_a.clone() {
match hmap_b.get(&i_a.0) {
None => a_sans_b.push(i_a),
Some(x) => if i_a.1 == *x {
a_and_b.push(i_a);
} else {
a_and_b_differ.push((i_a.0,(i_a.1,*x)));
}
}
}
let b_sans_a : Vec<((u64,u64),u64)> = hmap_b.into_iter().filter(|x| !hmap_a.contains_key(&x.0) ).collect();
println!("a_sans_b: {:#?}\na_and_b_differ: {:#?}\nb_sans_a: {:#?}",&a_sans_b,&a_and_b_differ,&b_sans_a);
println!("Stats: a\\b: {} a&=b: {} a&!=b: {} b\\a: {} avb: {} jaccarde: {}",
a_sans_b.len(),a_and_b.len(),a_and_b_differ.len(),b_sans_a.len(),
a_and_b.len()+a_and_b_differ.len()+a_sans_b.len()+b_sans_a.len(),
(a_and_b.len()+a_and_b_differ.len())as f64/(a_and_b.len()+a_and_b_differ.len()+a_sans_b.len()+b_sans_a.len()) as f64);
}

View File

@ -0,0 +1,4 @@
*.csv
*.png
*.pdf
target

View File

@ -0,0 +1,14 @@
[package]
name = "graph2viz"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
fret = { path = "../.." }
serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib
hashbrown = { version = "0.12", features = ["serde", "ahash-compile-time-rng"] } # A faster hashmap, nostd compatible
petgraph = { version="0.6.0", features = ["serde-1"] }
ron = "0.7" # write serialized data - including hashmaps
rand = "0.5"

View File

@ -0,0 +1,4 @@
all(%):
target/debug/state2gantt $%_afl.ron > $%_afl.csv
target/debug/state2gantt $%_state.ron > $%_state.csv
target/debug/state2gantt $%_random.ron > $%_random.csv

View File

@ -0,0 +1,71 @@
use std::path::PathBuf;
use std::{env,fs};
use fret::systemstate::{stg::STGFeedbackState,stg::STGEdge,target_os::freertos::FreeRTOSSystem};
use petgraph::Direction::{Outgoing, Incoming};
use petgraph::dot::{Dot, Config};
fn main() {
let args : Vec<String> = env::args().collect();
let path_a = PathBuf::from(args[1].clone());
let raw_a = fs::read(path_a).expect("Can not read dumped traces b");
// let path_b = PathBuf::from(args[2].clone());
let feedbackstate : STGFeedbackState<FreeRTOSSystem> = ron::from_str(&String::from_utf8_lossy(&raw_a)).expect("Can not parse HashMap");
let mut splits = 0;
let mut unites = 0;
let mut g = feedbackstate.graph;
dbg!(g.node_count());
let mut straight = 0;
let mut stub = 0;
let mut done = false;
while !done {
done = true;
for i in g.node_indices() {
let li = g.neighbors_directed(i, Incoming).count();
let lo = g.neighbors_directed(i, Outgoing).count();
if li == 1 && lo == 1 {
let prev = g.neighbors_directed(i, Incoming).into_iter().next().unwrap();
let next = g.neighbors_directed(i, Outgoing).into_iter().next().unwrap();
if prev != next {
g.update_edge(prev, next, STGEdge::default());
g.remove_node(i);
straight+=1;
done = false;
break;
}
}
}
}
for i in g.node_indices() {
let li = g.neighbors_directed(i, Incoming).count();
if li>1 {
unites += 1;
}
let lo = g.neighbors_directed(i, Outgoing).count();
if lo>1 {
splits += 1;
}
if li == 0 || lo == 0 {
// g.remove_node(i);
stub += 1;
}
}
dbg!(splits);
dbg!(unites);
dbg!(straight);
dbg!(stub);
let newgraph = g.map(
|_, n| n._pretty_print(),
// |_, n| format!("{} {:?}",n.get_taskname(),n.get_input_counts().iter().min().unwrap_or(&0)),
|_, e| e,
);
// let tempg = format!("{:?}",Dot::with_config(&newgraph, &[Config::EdgeNoLabel]));
let f = format!("{:?}",Dot::with_config(&newgraph, &[Config::EdgeNoLabel]));
let f = f.replace("\\\\n", "\n");
let f = f.replace("\\\"", "");
println!("{}",f);
}

View File

@ -0,0 +1,3 @@
target
*.case
*.edit

View File

@ -0,0 +1,21 @@
[package]
name = "input_serde"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
fret = { path = "../.." }
libafl = { path = "../../../../libafl" }
serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib
hashbrown = { version = "0.14.0", features = ["serde"] } # A faster hashmap, nostd compatible
# petgraph = { version="0.6.0", features = ["serde-1"] }
ron = "0.7" # write serialized data - including hashmaps
rand = "0.5"
clap = "4.5.17"
itertools = "0.13.0"
either = { version = "1.13.0", features = ["serde"] }
postcard = { version = "1.0.10", features = [
"alloc",
], default-features = false } # no_std compatible serde serialization format

View File

@ -0,0 +1,149 @@
use either::Either::{self, Left, Right};
use hashbrown::HashMap;
use rand::rngs::StdRng;
use std::path::PathBuf;
use std::{env,fs};
use fret::systemstate::{ExecInterval, RTOSJob, target_os::SystemTraceData, target_os::freertos::FreeRTOSTraceMetadata, target_os::SystemState, target_os::TaskControlBlock, helpers::interrupt_times_to_input_bytes};
use libafl::inputs::multi::MultipartInput;
use libafl::inputs::{BytesInput, Input};
use std::io::Write;
use clap::Parser;
use itertools::{assert_equal, join, Itertools};
use rand::RngCore;
use libafl::inputs::HasMutatorBytes;
const MAX_NUM_INTERRUPT: usize = 128;
const NUM_INTERRUPT_SOURCES: usize = 6; // Keep in sync with qemu-libafl-bridge/hw/timer/armv7m_systick.c:319 and FreeRTOS/FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC/init/startup.c:216
pub const QEMU_ICOUNT_SHIFT: u32 = 5;
pub const QEMU_ISNS_PER_SEC: u32 = u32::pow(10, 9) / u32::pow(2, QEMU_ICOUNT_SHIFT);
pub const QEMU_ISNS_PER_USEC: f32 = QEMU_ISNS_PER_SEC as f32 / 1000000.0;
#[derive(Parser)]
struct Config {
/// Input Case
#[arg(short, long, value_name = "FILE")]
case: PathBuf,
/// Input format
#[arg(short, long, value_name = "FORMAT")]
input_format: Option<String>,
/// Output format
#[arg(short, long, value_name = "FORMAT", default_value = "edit")]
format: String,
}
/// Setup the interrupt inputs. Noop if interrupts are not fuzzed
fn setup_interrupt_inputs(mut input : MultipartInput<BytesInput>) -> MultipartInput<BytesInput> {
for i in 0..MAX_NUM_INTERRUPT {
let name = format!("isr_{}_times",i);
if input.parts_by_name(&name).next().is_none() {
input.add_part(name, BytesInput::new([0; MAX_NUM_INTERRUPT*4].to_vec()));
}
}
input
}
fn unfold_input(input : &MultipartInput<BytesInput>) -> HashMap<String,Either<Vec<u8>,Vec<u32>>> {
let mut res = HashMap::new();
for (name, part) in input.iter() {
if name == "bytes" {
res.insert(name.to_string(),Left(part.bytes().to_vec()));
} else {
// let times = unsafe{std::mem::transmute::<&[u8], &[u32]>(&part.bytes()[0..4*(part.bytes().len()/4)])}.to_vec();
eprintln!("name {} len {}", name, part.bytes().len());
let mut times = part.bytes().chunks(4).filter(|x| x.len()==4).map(|x| u32::from_le_bytes(x.try_into().unwrap())).collect::<Vec<_>>();
times.sort_unstable();
res.insert(name.to_string(),Right(times));
}
}
res
}
fn fold_input(input : HashMap<String,Either<Vec<u8>,Vec<u32>>>) -> MultipartInput<BytesInput> {
let mut res = MultipartInput::new();
for (name, data) in input {
match data {
Left(x) => res.add_part(name, BytesInput::new(x)),
Right(x) => res.add_part(name, BytesInput::new(interrupt_times_to_input_bytes(&x))),
}
}
res
}
fn main() {
let conf = Config::parse();
let show_input = match conf.input_format {
Some(x) => {
match x.as_str() {
"case" => {
eprintln!("Interpreting input file as multipart input");
MultipartInput::from_file(conf.case.as_os_str()).unwrap()
},
"edit" => {
let bytes = fs::read(conf.case).expect("Can not read input file");
let input_str = String::from_utf8_lossy(&bytes);
eprintln!("Interpreting input file as custom edit input");
fold_input(ron::from_str::<HashMap<String,Either<Vec<u8>,Vec<u32>>>>(&input_str).expect("Failed to parse input"))
},
"ron" => {
let bytes = fs::read(conf.case).expect("Can not read input file");
let input_str = String::from_utf8_lossy(&bytes);
eprintln!("Interpreting input file as raw ron input");
ron::from_str::<MultipartInput<BytesInput>>(&input_str).expect("Failed to parse input")
},
"raw" => {
let bytes = fs::read(conf.case).expect("Can not read input file");
setup_interrupt_inputs(MultipartInput::from([("bytes",BytesInput::new(bytes))]))
},
x => panic!("Unknown input format: {}", x),
}
}
Option::None => match MultipartInput::from_file(conf.case.as_os_str()) {
Ok(x) => {
eprintln!("Interpreting input file as multipart input");
x
},
Err(_) => {
let bytes = fs::read(conf.case).expect("Can not read input file");
let input_str = String::from_utf8_lossy(&bytes);
match ron::from_str::<HashMap<String,Either<Vec<u8>,Vec<u32>>>>(&input_str) {
Ok(x) => {
eprintln!("Interpreting input file as custom edit input");
fold_input(x)
},
Err(_) => {
match ron::from_str::<MultipartInput<BytesInput>>(&input_str) {
Ok(x) => {
eprintln!("Interpreting input file as raw ron input");
x
},
Err(_) => {
eprintln!("Interpreting input file as raw input");
setup_interrupt_inputs(MultipartInput::from([("bytes",BytesInput::new(bytes))]))
}
}
}
}
}
}
};
// let uf = unfold_input(&show_input);
// println!("{:?}", show_input);
match conf.format.as_str() {
"edit" => {
let output = ron::to_string(&unfold_input(&show_input)).expect("Could not serialize input");
println!("{}", output);
},
"ron" => {
let output = ron::to_string(&show_input).expect("Could not serialize input");
println!("{}", output);
},
"case" => {
let output = postcard::to_allocvec(&show_input).expect("Could not serialize input");
std::io::stdout().write_all(&output).expect("Could not write output");
},
_ => panic!("Unknown format")
}
}

View File

@ -0,0 +1,2 @@
*.sqlite
target

View File

@ -0,0 +1,11 @@
[package]
name = "number_cruncher"
version = "0.1.0"
edition = "2021"
[dependencies]
clap = { version = "4.5.28", features = ["derive"] }
itertools = "0.14.0"
rayon = "1.10.0"
regex = "1.11.1"
rusqlite = "0.33.0"

View File

@ -0,0 +1,295 @@
use clap::parser::ValueSource;
use clap::Parser;
use itertools::Group;
use itertools::Itertools;
use rayon::iter::ParallelBridge;
use rayon::prelude::*;
use rayon::result;
use std::fs;
use std::fs::File;
use std::io::Write;
use std::io::{self, BufRead, BufReader};
use std::path::Path;
use std::path::PathBuf;
use rusqlite::{params, Connection, Result};
use std::collections::HashMap;
#[derive(clap::ValueEnum, Clone, PartialEq)]
enum Endpoint {
AllMin,
ToolMin,
ToolMax,
Max
}
#[derive(Parser)]
struct Config {
/// Input
#[arg(short, long, value_name = "DIR")]
input: PathBuf,
/// Output
#[arg(short, long, value_name = "FILE", default_value = "out.sqlite")]
output: PathBuf,
/// End each group after the first termination
#[arg(short, long, default_value = "max")]
end_early: Endpoint,
}
fn visit_dirs(
dir: &Path,
results: &mut Vec<(PathBuf, String, String, String)>,
) -> std::io::Result<()> {
if dir.is_dir() {
for entry in fs::read_dir(dir)? {
let entry = entry?;
let path = entry.path();
if path.is_dir() {
visit_dirs(&path, results)?;
} else if path.extension().and_then(|s| s.to_str()) == Some("time") {
if let Some(file_name) = path.file_name().and_then(|s| s.to_str()) {
let re = regex::Regex::new(r".*#[0-9]+\.time$").unwrap();
if re.is_match(file_name) {
if let Some(dir_name) = path
.parent()
.and_then(|p| p.file_name())
.and_then(|s| s.to_str())
{
{
let mut file_stem =
path.file_stem().unwrap().to_str().unwrap().split("#");
let case_name = file_stem.next().unwrap();
let case_number = file_stem.next().unwrap();
results.push((
path.clone(),
dir_name.to_string(),
case_name.to_string(),
case_number.to_string(),
));
}
}
}
}
}
}
}
Ok(())
}
fn maxpoints_of_file(file_path: &Path) -> io::Result<Vec<(usize, usize)>> {
let file = File::open(file_path)?;
let reader = BufReader::new(file);
let mut results = Vec::new();
let mut watermark = 0;
let mut last_timestamp = 0;
for line in reader.lines() {
let line = line?;
let mut parts = line.split(',');
if let (Some(first_str), Some(second_str)) = (parts.next(), parts.next()) {
let first: usize = first_str.trim().parse().unwrap();
let second: usize = second_str.trim().parse().unwrap();
if first > watermark {
results.push((first, second));
watermark = first;
}
last_timestamp = second;
}
}
if results.len() > 1 {
results[0].1 = 0;
results.push((results[results.len() - 1].0, last_timestamp));
}
if results.len() == 0 {
results.push((0, 0));
results.push((0, last_timestamp));
}
Ok(results)
}
fn sample_maxpoints(points: &Vec<(usize, usize)>, samples: &Vec<usize>) -> Vec<(usize, usize)> {
let mut todo = samples.iter().peekable();
let mut ret = Vec::new();
for i in 0..points.len() {
if todo.peek().is_none() {
// Done
break;
}
while let Some(&&peek) = todo.peek() {
if peek >= points[i].1 && (i+1 >= points.len() || peek < points[i+1].1) {
// End or inside the interval
ret.push((points[i].0, peek));
todo.next();
} else if peek < points[i].1 {
if i == 0 {
// Before the first interval, just take the first
ret.push((points[i].0, peek));
todo.next();
} else {
// Already passed
eprintln!("WARNING Skipped: {}", todo.next().unwrap());
}
} else {
// Not yet
break;
}
}
}
ret
}
// https://rust-lang-nursery.github.io/rust-cookbook/science/mathematics/statistics.html
fn mean(data: &[usize]) -> Option<f64> {
let sum = data.iter().sum::<usize>() as f64;
let count = data.len();
match count {
positive if positive > 0 => Some(sum / count as f64),
_ => None,
}
}
fn median(data: &[usize]) -> Option<f64> {
let mut data = data.to_vec();
data.sort();
let size = data.len();
if size == 0 {
return None;
}
match size {
even if even % 2 == 0 => {
let fst_med = data[(even / 2) - 1];
let snd_med = data[even / 2];
fst_med.checked_add(snd_med).map(|x| x as f64 / 2.0)
},
odd => data.get(odd / 2).map(|x| *x as f64)
}
}
// https://rust-lang-nursery.github.io/rust-cookbook/science/mathematics/statistics.html
fn std_deviation(data: &[usize]) -> Option<f64> {
match (mean(data), data.len()) {
(Some(data_mean), count) if count > 0 => {
let variance = data
.iter()
.map(|value| {
let diff = data_mean - (*value as f64);
diff * diff
})
.sum::<f64>()
/ count as f64;
Some(variance.sqrt())
}
_ => None,
}
}
fn main() {
let conf = Config::parse();
let mut results = Vec::new();
if let Err(e) = visit_dirs(&conf.input, &mut results) {
eprintln!("Error reading directories: {}", e);
}
println!("Files: {:?}", results);
let mut connection = Connection::open(conf.output).unwrap();
connection.execute("DROP TABLE IF EXISTS combos", ()).unwrap();
connection.execute("CREATE TABLE IF NOT EXISTS combos (casename TEXT, toolname TEXT, fullname TEXT PRIMARY KEY)", ()).unwrap();
let mut points: Vec<_> = results
.par_iter()
.map(|(path, fuzzer, case, n)| {
(
case,
fuzzer,
n.parse::<usize>().unwrap(),
maxpoints_of_file(path).unwrap(),
)
})
.collect();
let mut last_common_point = points.iter().map(|x| x.3.last().expect(&format!("Missing maxpoint for {}", x.0)).1).min().unwrap();
points.sort_by_key(|x| x.0); // by case for grouping
for (case, casegroup) in &points.into_iter().chunk_by(|x| x.0) {
let casegroup = casegroup.collect::<Vec<_>>();
let last_case_point = casegroup.iter().map(|x| x.3.last().unwrap().1).min().unwrap();
println!("Processing case {}: {}", case, casegroup.len());
let mut timestamps = Vec::new();
for (_, _, _, points) in &casegroup {
timestamps.extend(points.iter().map(|(_, t)| *t));
}
timestamps.sort();
if matches!(conf.end_early, Endpoint::AllMin) {
// Dont' sample anything after the shortest run
timestamps = timestamps.into_iter().filter(|x| x<=&last_common_point).collect();
}
let least_runtime_per_tool = casegroup.iter().map(|g| (g.1, g.2, g.3.last().unwrap().1)).sorted_by_key(|x| x.0).chunk_by(|x| x.0).into_iter().map(|(tool, toolgroup)| (tool, toolgroup.min_by_key(|y| y.2))).collect::<HashMap<_,_>>();
let longest_runtime_per_tool = casegroup.iter().map(|g| (g.1, g.2, g.3.last().unwrap().1)).sorted_by_key(|x| x.0).chunk_by(|x| x.0).into_iter().map(|(tool, toolgroup)| (tool, toolgroup.max_by_key(|y| y.2))).collect::<HashMap<_,_>>();
timestamps.dedup();
let mut maxpoints_per_tool = casegroup
.par_iter()
.map(|g| (g.0, g.1, g.2, sample_maxpoints(&g.3, &timestamps)))
.collect::<Vec<_>>();
maxpoints_per_tool.sort_by_key(|x| x.1); // by tool
for (tool, toolgroup) in &maxpoints_per_tool.into_iter().chunk_by(|x| x.1) {
let toolgroup = toolgroup.collect::<Vec<_>>();
println!("Processing tool {}: {}", tool, toolgroup.len());
let mut lowest_common_length = toolgroup
.iter()
.map(|(_, _, _, points)| points.len())
.min()
.unwrap();
if conf.end_early == Endpoint::ToolMin {
lowest_common_length = timestamps.binary_search(&least_runtime_per_tool[tool].unwrap().2).unwrap();
}
if conf.end_early == Endpoint::ToolMax {
lowest_common_length = std::cmp::min(lowest_common_length, timestamps.binary_search(&longest_runtime_per_tool[tool].unwrap().2).unwrap());
}
let time_min_max_med_mean_sdiv : Vec<(usize,usize,usize,f64,f64,f64)> = (0..lowest_common_length)
.into_par_iter()
.map(|i| {
let slice = toolgroup.iter().map(|(_, _, _, p)| p[i].0).collect::<Vec<_>>();
assert_eq!(slice.len(), toolgroup.len());
(
toolgroup[0].3[i].1,
*slice.iter().min().unwrap_or(&0),
*slice.iter().max().unwrap_or(&0),
median(&slice).unwrap_or(0.0),
mean(&slice).unwrap_or(0.0),
std_deviation(&slice).unwrap_or(0.0),
)
})
.collect::<Vec<_>>();
// Save to db
connection.execute("INSERT INTO combos (casename, toolname, fullname) VALUES (?, ?, ?)", (case, tool, format!("{}${}",case, tool))).unwrap();
connection.execute(&format!("DROP TABLE IF EXISTS {}${}", case, tool), ()).unwrap();
connection.execute(&format!("CREATE TABLE IF NOT EXISTS {}${} (timestamp INTEGER PRIMARY KEY, min INTEGER, max INTEGER, median REAL, mean REAL, sdiv REAL)", case, tool), ()).unwrap();
// Start a transaction
let transaction = connection.transaction().unwrap();
let mut stmt = transaction.prepare(&format!(
"INSERT INTO {}${} (timestamp , min , max , median , mean , sdiv ) VALUES (?, ?, ?, ?, ?, ?)",
case, tool
)).unwrap();
for (timestamp, min, max, median, mean, sdiv) in time_min_max_med_mean_sdiv {
stmt.execute([(timestamp as i64).to_string(), (min as i64).to_string(), (max as i64).to_string(), median.to_string(), mean.to_string(), sdiv.to_string()]).unwrap();
}
drop(stmt);
// Commit the transaction
transaction.commit().unwrap();
}
}
}

View File

@ -0,0 +1,4 @@
*.csv
*.png
*.pdf
target

View File

@ -0,0 +1,16 @@
[package]
name = "state2gantt"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
fret = { path = "../.." }
serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib
hashbrown = { version = "0.14.0", features = ["serde"] } # A faster hashmap, nostd compatible
# petgraph = { version="0.6.0", features = ["serde-1"] }
ron = "0.7" # write serialized data - including hashmaps
rand = "0.5"
clap = "4.5.17"
itertools = "0.13.0"

View File

@ -0,0 +1,13 @@
#!/bin/sh
if [ -z "$1" ]; then exit 1; fi
OFILE_A="$(dirname "$1")/$(basename -s .trace.ron "$1")_job.csv"
OFILE_B="$(dirname "$1")/$(basename -s .trace.ron "$1")_instance.csv"
OFILE_C="$(dirname "$1")/$(basename -s .trace.ron "$1")_abbs.csv"
if [ -n "$2" ]; then
EXTRA="-t $2"
fi
rm -f "$OFILE_A" "$OFILE_B"
echo state2gantt -i $1 -a "$OFILE_A" -r "$OFILE_B" -p "$OFILE_C" $EXTRA
state2gantt -i $1 -a "$OFILE_A" -r "$OFILE_B" -p "$OFILE_C" $EXTRA
echo plot_gantt.r "$OFILE_A" "$OFILE_B" html
plot_gantt.r "$OFILE_A" "$OFILE_B" html

View File

@ -0,0 +1,132 @@
#!/usr/bin/env Rscript
# Load necessary libraries
#install.packages(c(ggplot2,readr,dplyr,plotly))
library(ggplot2)
library(readr)
library(dplyr)
library(plotly)
QEMU_SHIFT<-5
TIMESCALE<-1000000
# Function to create a Gantt chart with dots on short segments
create_gantt_chart <- function(csv_file_a, csv_file_b, MIN_WIDTH, output_format = NULL, startpoint, endpoint) {
# Read the CSV file
df <- read_csv(csv_file_a)
# df_b <- read_csv(csv_file_b)
df_b <- read_csv(csv_file_b, col_types = cols(.default = "d", name = col_character()))
# df <- df %>% bind_rows(df_b)
# Cut out everything outside the range
df <- df %>%
filter(end >= startpoint & start <= endpoint) %>% rowwise %>% mutate(end = min(end, endpoint), start = max(start, startpoint))
df_b <- df_b %>%
filter(end >= startpoint & start <= endpoint) %>% rowwise %>% mutate(end = min(end, endpoint), start = max(start, startpoint))
# Add a placeholder for all tasks that don't have job instances in the range
s <- min(df$start)
placeholder <- df_b %>% mutate(start = s, end = s)
df <- df %>% bind_rows(placeholder)
# Ensure start and end columns are treated as integers
df <- df %>%
mutate(start = (as.integer(start) * 2**QEMU_SHIFT)/TIMESCALE,
end = (as.integer(end) * 2**QEMU_SHIFT)/TIMESCALE)
df_b <- df_b %>%
mutate(start = (as.integer(start) * 2**QEMU_SHIFT)/TIMESCALE,
end = (as.integer(end) * 2**QEMU_SHIFT)/TIMESCALE)
# Calculate the segment width
df <- df %>%
mutate(width = end - start)
# Sort the DataFrame by 'prio' column in descending order
df <- df %>%
arrange(prio)
# Add labels to segments
df$label <- paste(
"Start:", df$start,
"<br>",
"Prio:", df$prio,
"<br>",
"Name:", df$name,
"<br>",
"Id:", df$state_id,
"<br>",
"State:", df$state,
"<br>",
"ABB:", df$abb,
"<br>",
"End:", df$end
)
df_b$label <- paste(
"Start:", df_b$start,
"<br>",
"End:", df_b$end
)
# Create the Gantt chart with ggplot2
p <- ggplot(df, aes(x = start, xend = end, y = reorder(name, prio), yend = name, text = label)) +
geom_segment(aes(color = factor(prio)), size = 6) +
labs(title = "Gantt Chart", x = "Time Step", y = "Task", color = "Priority") +
theme_minimal()
# Plot Ranges
p <- p + geom_segment(data = df_b, aes(color = factor(prio)), size = 1)
p <- p + geom_point(data = df_b,
aes(x = end, y = name),
color = "blue", size = 2)
# Add dots on segments shorter than MIN_WIDTH
p <- p + geom_point(data = df %>% filter(width < MIN_WIDTH & width > 0),
aes(x = start, y = name),
color = "red", size = 1)
# Handle output format
if (!is.null(output_format)) {
output_file <- sub("\\.csv$", paste0(".", output_format), csv_file_a)
if (output_format == "html") {
# Convert the ggplot object to a plotly object for interactivity
p_interactive <- ggplotly(p)
htmlwidgets::saveWidget(p_interactive, output_file)
} else if (output_format == "png") {
ggsave(output_file, plot = p, device = "png")
} else {
stop("Invalid output format. Use 'html' or 'png'.")
}
} else {
# Convert the ggplot object to a plotly object for interactivity
p_interactive <- ggplotly(p)
# Print the interactive Gantt chart
print(p_interactive)
}
}
# Main execution
args <- commandArgs(trailingOnly = TRUE)
if (length(args) < 2 || length(args) > 5) {
stop("Usage: Rscript script.R <csv_file> <csv_file> [output_format] [<strt> <end>]")
} else {
csv_file_a <- args[1]
csv_file_b <- args[2]
if (length(args) >= 3) {
output_format <- args[3]
} else {
output_format <- NULL
}
if (length(args) >= 5) {
start <- as.integer(args[4])
end <- as.integer(args[5])
} else {
start <- 0
end <- Inf
}
}
MIN_WIDTH <- 500 # You can set your desired minimum width here
create_gantt_chart(csv_file_a, csv_file_b, MIN_WIDTH, output_format, start, end)

View File

@ -0,0 +1,142 @@
use hashbrown::HashMap;
use std::borrow::Cow;
use std::path::PathBuf;
use std::fs;
use fret::systemstate::{target_os::SystemTraceData, target_os::freertos::FreeRTOSTraceMetadata, target_os::SystemState, target_os::TaskControlBlock};
use std::io::Write;
use clap::Parser;
use itertools::Itertools;
#[derive(Parser)]
struct Config {
/// Input Trace
#[arg(short, long, value_name = "FILE")]
input_trace: PathBuf,
/// Output for activations
#[arg(short, long, value_name = "FILE")]
activation: Option<PathBuf>,
/// Output for Release-Response intervals
#[arg(short, long, value_name = "FILE")]
response: Option<PathBuf>,
/// Output abbs by task
#[arg(short, long, value_name = "FILE")]
per_task: Option<PathBuf>,
/// Focussed Task
#[arg(short, long, value_name = "TASK")]
task: Option<String>,
/// Translate times to microseconds
#[arg(short, long)]
micros: bool,
}
fn main() {
// let args : Vec<String> = env::args().collect();
let mut conf = Config::parse();
let input_path = conf.input_trace;
let raw_input = fs::read(input_path).expect("Can not read dumped traces");
let activation_path = conf.activation;
let instance_path = conf.response;
let abb_path = conf.per_task;
/* Write all execution intervals */
let mut activation_file = activation_path.map(|x| std::fs::OpenOptions::new()
.read(false)
.write(true)
.create(true)
.append(false)
.open(x).expect("Could not create file"));
let mut level_per_task : HashMap<String, u32> = HashMap::new();
// Store priority per task
let trace : FreeRTOSTraceMetadata = ron::from_str(&String::from_utf8_lossy(&raw_input)).expect("Can not parse HashMap");
// task_name -> (abb_addr -> (interval_count, exec_count, exec_time, woet))
let mut abb_profile : HashMap<Cow<'static, str>, HashMap<u32, (usize, usize, u64, u64)>> = trace.select_abb_profile(conf.task.clone());
for s in trace.intervals() {
if s.level == 0 {
let t = trace.states_map()[&s.start_state].current_task();
level_per_task.insert(t.task_name().clone(),t.base_priority);
}
}
// Range of longest selected job
let limits = conf.task.as_ref().map(|task| trace.worst_jobs_per_task_by_response_time().get(task).map(|x| x.release..x.response)).flatten();
if let Some(limits) = &limits {
println!("Limits: {} - {}",limits.start,limits.end);
}
let mut intervals = trace.intervals().clone();
activation_file.as_mut().map(|x| writeln!(x,"start,end,prio,name,state_id,state,abb").expect("Could not write to file"));
for s in intervals.iter_mut() {
if let Some(l) = &limits {
if s.start_tick > l.end || s.end_tick < l.start {
continue;
}
s.start_tick = s.start_tick.max(l.start);
s.end_tick = s.end_tick.min(l.end);
}
let start_tick = if conf.micros {s.start_tick as f32 / fret::time::clock::QEMU_ISNS_PER_USEC} else {s.start_tick as f32};
let end_tick = if conf.micros {s.end_tick as f32 / fret::time::clock::QEMU_ISNS_PER_USEC} else {s.end_tick as f32};
let state = &trace.states_map()[&s.start_state];
if s.level == 0 {
activation_file.as_mut().map(|x| writeln!(x,"{},{},{},{},{:X},{},{}",start_tick,end_tick,trace.states_map()[&s.start_state].current_task().priority,trace.states_map()[&s.start_state].current_task().task_name, state.get_hash()>>48, state, s.abb.as_ref().map(|x| x.get_start()).unwrap_or(u32::MAX) ).expect("Could not write to file"));
} else {
activation_file.as_mut().map(|x| writeln!(x,"{},{},-{},{},{:X},{},{}",start_tick,end_tick,s.level,s.start_capture.1, state.get_hash()>>48, state, s.abb.as_ref().map(|x| x.get_start()).unwrap_or(u32::MAX)).expect("Could not write to file"));
}
}
let mut jobs = trace.jobs().clone();
/* Write all job instances from release to response */
let instance_file = instance_path.map(|x| std::fs::OpenOptions::new()
.read(false)
.write(true)
.create(true)
.append(false)
.open(x).expect("Could not create file"));
if let Some(mut file) = instance_file {
writeln!(file,"start,end,prio,name").expect("Could not write to file");
for s in jobs.iter_mut() {
if limits.as_ref().map(|x| !x.contains(&s.release) && !x.contains(&s.response) ).unwrap_or(false) {
continue;
}
if let Some(l) = &limits {
if s.release > l.end || s.response < l.start {
continue;
}
s.release = s.release.max(l.start);
s.response = s.response.min(l.end);
}
writeln!(file,"{},{},{},{}",s.release,s.response,level_per_task[&s.name],s.name).expect("Could not write to file");
}
}
/* Write all abbs per task */
let abb_file = abb_path.map(|x| std::fs::OpenOptions::new()
.read(false)
.write(true)
.create(true)
.append(false)
.open(x).expect("Could not create file"));
if let Some(mut file) = abb_file {
conf.micros = true;
if abb_profile.is_empty() {
return;
}
writeln!(file,"name,addr,active,finish,micros,woet").expect("Could not write to file");
for (name, rest) in abb_profile.iter_mut().sorted_by_key(|x| x.0) {
rest.iter().sorted_by_key(|x| x.0).for_each(|(addr, (active, finish, time, woet))| {
writeln!(file,"{},{},{},{},{},{}",name,addr,active,finish,if conf.micros {*time as f64 / fret::time::clock::QEMU_ISNS_PER_USEC as f64} else {*time as f64}, if conf.micros {*woet as f64 / fret::time::clock::QEMU_ISNS_PER_USEC as f64} else {*woet as f64}).expect("Could not write to file");
});
}
}
}