Merge branch 'fret_141' into fret_153

This commit is contained in:
Alwin Berger 2025-07-28 12:03:09 +00:00
commit 9bbc5e7c78
73 changed files with 9685 additions and 9 deletions

4
fuzzers/FRET/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
*.qcow2
corpus
*.axf
demo

83
fuzzers/FRET/Cargo.toml Normal file
View File

@ -0,0 +1,83 @@
[package]
name = "fret"
version = "0.8.2"
authors = ["Alwin Berger <alwin.berger@tu-dortmund.de>"]
edition = "2021"
[features]
default = ["std", "snapshot_fast", "restarting", "do_hash_notify_state", "do_hash_notify_value", "config_stg", "fuzz_int", "shortcut", "trace_job_response_times", "observe_systemstate_unordered" ]
std = []
# Exec environemnt basics
snapshot_restore = []
snapshot_fast = [ "snapshot_restore" ]
singlecore = []
restarting = ['singlecore']
run_until_saturation = []
fuzz_int = []
shortcut = []
# information capture
observe_edges = [] # observe cfg edges
observe_hitcounts = [ "observe_edges" ] # reduces edge granularity
observe_systemstate = []
observe_systemstate_unordered = []
do_hash_notify_state = []
do_hash_notify_value = []
trace_job_response_times = [ "trace_stg" ]
trace_stg = [ "observe_systemstate" ]
trace_reads = [ "trace_stg", "trace_job_response_times" ]
# feedbacks
feed_stg = [ "trace_stg", "observe_systemstate" ]
feed_stg_edge = [ "feed_stg"]
feed_stg_abb_woet = [ "feed_stg"]
feed_stg_pathhash = [ "feed_stg"]
feed_stg_abbhash = [ "feed_stg"]
feed_stg_aggregatehash = [ "feed_stg"]
feed_job_woet = [ "trace_job_response_times"]
feed_job_wort = [ "trace_job_response_times"]
mutate_stg = [ "observe_systemstate", "trace_reads" ]
feed_longest = [ ]
feed_afl = [ "observe_edges" ]
feed_genetic = []
gensize_1 = [ ]
gensize_10 = [ ]
gensize_100 = [ ]
gensize_1000 = [ ]
# schedulers
sched_genetic = []
sched_afl = []
sched_stg = []
sched_stg_edge = ['sched_stg'] # every edge in the stg
sched_stg_pathhash = ['sched_stg'] # every path in the stg
sched_stg_abbhash = ['sched_stg'] # every path of abbs
sched_stg_aggregatehash = ['sched_stg'] # every aggregated path (order independent)
# overall_configs
config_genetic = ["feed_genetic","sched_genetic","trace_stg"]
config_afl = ["feed_afl","sched_afl","trace_stg"]
config_frafl = ["feed_afl","sched_afl","feed_longest","trace_stg"]
config_stg = ["feed_stg_aggregatehash","sched_stg_aggregatehash","mutate_stg","feed_job_wort"]
config_stg_woet = ["feed_stg_aggregatehash","sched_stg_aggregatehash","mutate_stg","feed_job_wort","feed_job_woet","feed_stg_abb_woet"]
# config_stg_aggregate = ["feed_stg_aggregatehash","sched_stg_aggregatehash","mutate_stg"]
config_stg_abbpath = ["feed_stg_abbhash","sched_stg_abbhash","mutate_stg"]
config_stg_edge = ["feed_stg_edge","sched_stg_edge","mutate_stg"]
[profile.release]
lto = true
codegen-units = 1
debug = true
[dependencies]
libafl = { path = "../../libafl/", features = ["multipart_inputs", "prelude"] }
libafl_bolts = { path = "../../libafl_bolts/" }
libafl_targets = { path = "../../libafl_targets/" }
libafl_qemu = { path = "../../libafl_qemu/", features = ["arm", "systemmode"], default-features = false }
serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib
serde_json = { version = "1.0", default-features = false, features = ["alloc"] }
hashbrown = { version = "0.14.0", features = ["serde"] } # A faster hashmap, nostd compatible
petgraph = { version="0.6.5", features = ["serde-1"] }
ron = "0.7" # write serialized data - including hashmaps
rand = "0.5"
clap = { version = "4.4.11", features = ["derive"] }
csv = "1.3.0"
log = "0.4"
simple_moving_average = "1.0.2"
itertools = "0.13.0"

16
fuzzers/FRET/benchmark/.gitignore vendored Normal file
View File

@ -0,0 +1,16 @@
*dump
timedump*
corpora
build
mnt
.R*
*.png
*.pdf
bins
.snakemake
*.zip
*.tar.*
*.sqlite
eval*
test_*
bench_*

View File

@ -0,0 +1,57 @@
TIME=7200
corpora/%/seed:
mkdir -p $$(dirname $@)
LINE=$$(grep "^$$(basename $*)" target_symbols.csv); \
export \
KERNEL=benchmark/build/$*.elf \
FUZZ_MAIN=$$(echo $$LINE | cut -d, -f2) \
FUZZ_INPUT=$$(echo $$LINE | cut -d, -f3) \
FUZZ_INPUT_LEN=$$(echo $$LINE | cut -d, -f4) \
BREAKPOINT=$$(echo $$LINE | cut -d, -f5) \
SEED_DIR=benchmark/corpora/$* \
DUMP_SEED=seed; \
../fuzzer.sh
timedump/%$(FUZZ_RANDOM)$(SUFFIX): corpora/%/seed
mkdir -p $$(dirname $@)
LINE=$$(grep "^$$(basename $*)" target_symbols.csv); \
export \
KERNEL=benchmark/build/$*.elf \
FUZZ_MAIN=$$(echo $$LINE | cut -d, -f2) \
FUZZ_INPUT=$$(echo $$LINE | cut -d, -f3) \
FUZZ_INPUT_LEN=$$(echo $$LINE | cut -d, -f4) \
BREAKPOINT=$$(echo $$LINE | cut -d, -f5) \
SEED_RANDOM=1 \
TIME_DUMP=benchmark/$@ \
CASE_DUMP=benchmark/$@; \
../fuzzer.sh + + + + + $(TIME) + + + > $@_log
#SEED_DIR=benchmark/corpora/$*
all_sequential: timedump/sequential/mpeg2$(FUZZ_RANDOM) timedump/sequential/dijkstra$(FUZZ_RANDOM) timedump/sequential/epic$(FUZZ_RANDOM) \
timedump/sequential/g723_enc$(FUZZ_RANDOM) timedump/sequential/audiobeam$(FUZZ_RANDOM) \
timedump/sequential/gsm_enc$(FUZZ_RANDOM)
all_kernel: timedump/kernel/bsort$(FUZZ_RANDOM) timedump/kernel/insertsort$(FUZZ_RANDOM) #timedump/kernel/fft$(FUZZ_RANDOM)
all_app: timedump/app/lift$(FUZZ_RANDOM)
all_system: timedump/lift$(FUZZ_RANDOM)$(SUFFIX)
all_period: timedump/waters$(FUZZ_RANDOM)$(SUFFIX)
tacle_rtos: timedump/tacle_rtos$(FUZZ_RANDOM)
graphics:
Rscript --vanilla plot_comparison.r mnt/timedump/sequential audiobeam
Rscript --vanilla plot_comparison.r mnt/timedump/sequential dijkstra
Rscript --vanilla plot_comparison.r mnt/timedump/sequential epic
Rscript --vanilla plot_comparison.r mnt/timedump/sequential g723_enc
# Rscript --vanilla plot_comparison.r mnt/timedump/sequential gsm_enc
# Rscript --vanilla plot_comparison.r mnt/timedump/sequential huff_dec
Rscript --vanilla plot_comparison.r mnt/timedump/sequential mpeg2
# Rscript --vanilla plot_comparison.r mnt/timedump/sequential rijndael_dec
# Rscript --vanilla plot_comparison.r mnt/timedump/sequential rijndael_enc
clean:
rm -rf corpora timedump

View File

@ -0,0 +1,309 @@
import csv
import os
envvars:
"BENCHDIR"
def_flags="--release --no-default-features --features std,snapshot_fast,restarting,do_hash_notify_state,do_hash_notify_value,fuzz_int,trace_job_response_times,observe_systemstate_unordered"
benchdir=os.environ["BENCHDIR"]
RUNTIME=(3600*24)
rule copy_kernel:
input:
"build/{target}.elf"
output:
"{benchdir}/build/{target}.elf"
shell:
"mkdir -p {benchdir}/build && cp {input} {output}"
rule rebuild_qemu:
shell:
"unset CUSTOM_QEMU_NO_BUILD CUSTOM_QEMU_NO_CONFIGURE && cargo build"
rule build_default:
input:
"../Cargo.toml",
"../src"
output:
directory("{benchdir}/bins/target_default")
shell:
"cargo build --target-dir {output} {def_flags}"
rule build_showmap:
input:
"{benchdir}/bins/target_default"
output:
directory("{benchdir}/bins/target_showmap")
shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},config_stg"
rule build_random:
input:
"{benchdir}/bins/target_default"
output:
directory("{benchdir}/bins/target_random")
shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},feed_longest"
rule build_frafl:
input:
"{benchdir}/bins/target_default"
output:
directory("{benchdir}/bins/target_frafl")
shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},config_frafl,feed_longest"
rule build_afl:
input:
"{benchdir}/bins/target_default"
output:
directory("{benchdir}/bins/target_afl")
shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},config_afl"
rule build_stg:
input:
"{benchdir}/bins/target_default"
output:
directory("{benchdir}/bins/target_stg")
shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},config_stg"
rule build_stgwoet:
input:
"{benchdir}/bins/target_default"
output:
directory("{benchdir}/bins/target_stgwoet")
shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},config_stg_woet"
rule build_stg_abbpath:
input:
"{benchdir}/bins/target_default"
output:
directory("{benchdir}/bins/target_stg_abbpath")
shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},config_stg_abbpath"
rule build_stg_edge:
input:
"{benchdir}/bins/target_default"
output:
directory("{benchdir}/bins/target_stg_edge")
shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},config_stg_edge"
rule build_feedgeneration1:
input:
"{benchdir}/bins/target_default"
output:
directory("{benchdir}/bins/target_feedgeneration1")
shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},feed_genetic,gensize_1"
rule build_feedgeneration10:
input:
"{benchdir}/bins/target_default"
output:
directory("{benchdir}/bins/target_feedgeneration10")
shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},feed_genetic,gensize_10"
rule build_feedgeneration100:
input:
"{benchdir}/bins/target_default"
output:
directory("{benchdir}/bins/target_feedgeneration100")
shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},config_genetic,gensize_100"
rule build_genetic100:
input:
"{benchdir}/bins/target_default"
output:
directory("{benchdir}/bins/target_genetic100")
shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},config_genetic,mutate_stg,gensize_100"
rule build_feedgeneration1000:
input:
"{benchdir}/bins/target_default"
output:
directory("{benchdir}/bins/target_feedgeneration1000")
shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},config_genetic,gensize_1000"
rule build_genetic1000:
input:
"{benchdir}/bins/target_default"
output:
directory("{benchdir}/bins/target_genetic1000")
shell:
"cp -r -a --reflink=auto {input} {output} && cargo build --target-dir {output} {def_flags},config_genetic,mutate_stg,gensize_1000"
rule run_bench:
input:
"{benchdir}/build/{target}.elf",
"{benchdir}/bins/target_{fuzzer}"
output:
multiext("{benchdir}/timedump/{fuzzer}/{target}#{num}", ".time", ".log") # , ".case"
run:
with open('target_symbols.csv') as csvfile:
reader = csv.DictReader(csvfile)
line = next((x for x in reader if x['\ufeffkernel']==wildcards.target), None)
if line == None:
return False
kernel=line['\ufeffkernel']
fuzz_main=line['main_function']
fuzz_input=line['input_symbol']
fuzz_len=line['input_size']
bkp=line['return_function']
select_task=line['select_task']
if wildcards.fuzzer.find('random') >= 0:
script="""
export RUST_BACKTRACE=1
mkdir -p $(dirname {output[0]})
set +e
echo $(pwd)/{input[1]}/release/fret -n $(pwd)/{benchdir}/timedump/{wildcards.fuzzer}/{wildcards.target}#{wildcards.num} -s {select_task} -t -a -r -g -k {input[0]} -c ./target_symbols.csv fuzz --random -t {RUNTIME} -s {wildcards.num}
$(pwd)/{input[1]}/release/fret -n $(pwd)/{benchdir}/timedump/{wildcards.fuzzer}/{wildcards.target}#{wildcards.num} -s {select_task} -t -a -r -g -k {input[0]} -c ./target_symbols.csv fuzz --random -t {RUNTIME} -s {wildcards.num} > {output[1]} 2>&1
exit 0
"""
else:
script="""
export RUST_BACKTRACE=1
mkdir -p $(dirname {output[0]})
set +e
echo $(pwd)/{input[1]}/release/fret -n $(pwd)/{benchdir}/timedump/{wildcards.fuzzer}/{wildcards.target}#{wildcards.num} -s {select_task} -t -a -r -g -k {input[0]} -c ./target_symbols.csv fuzz -t {RUNTIME} -s {wildcards.num}
$(pwd)/{input[1]}/release/fret -n $(pwd)/{benchdir}/timedump/{wildcards.fuzzer}/{wildcards.target}#{wildcards.num} -s {select_task} -t -a -r -g -k {input[0]} -c ./target_symbols.csv fuzz -t {RUNTIME} -s {wildcards.num} > {output[1]} 2>&1
exit 0
"""
shell(script)
rule run_showmap:
input:
"{benchdir}/build/{target}.elf",
"{benchdir}/bins/target_showmap",
"{benchdir}/timedump/{fuzzer}/{target}#{num}.case"
output:
"{benchdir}/timedump/{fuzzer}/{target}#{num}_case.trace.ron",
"{benchdir}/timedump/{fuzzer}/{target}#{num}_case.time",
run:
with open('target_symbols.csv') as csvfile:
reader = csv.DictReader(csvfile)
line = next((x for x in reader if x['\ufeffkernel']==wildcards.target), None)
if line == None:
return False
kernel=line['\ufeffkernel']
fuzz_main=line['main_function']
fuzz_input=line['input_symbol']
fuzz_len=line['input_size']
bkp=line['return_function']
select_task=line['select_task']
script="""
export FUZZER=$(pwd)/{input[1]}/release/fret
mkdir -p $(dirname {output})
set +e
echo $FUZZER -n $(pwd)/{benchdir}/timedump/{wildcards.fuzzer}/{wildcards.target}#{wildcards.num}_case -s {select_task} -t -a -r -g -k {input[0]} -c ./target_symbols.csv showmap -i {input[2]}
$FUZZER -n $(pwd)/{benchdir}/timedump/{wildcards.fuzzer}/{wildcards.target}#{wildcards.num}_case -s {select_task} -t -a -r -g -k {input[0]} -c ./target_symbols.csv showmap -i {input[2]}
exit 0
"""
if wildcards.fuzzer.find('random') >= 0:
script="export FUZZ_RANDOM=1\n"+script
shell(script)
rule transform_trace:
input:
"{benchdir}/timedump/{fuzzer}/{target}#{num}_case.trace.ron",
output:
"{benchdir}/timedump/{fuzzer}/{target}#{num}_case.jobs.csv",
"{benchdir}/timedump/{fuzzer}/{target}#{num}_case.resp.csv",
"{benchdir}/timedump/{fuzzer}/{target}#{num}_case.abbs.csv"
run:
with open('target_symbols.csv') as csvfile:
reader = csv.DictReader(csvfile)
line = next((x for x in reader if x['\ufeffkernel']==wildcards.target), None)
if line == None:
return False
kernel=line['\ufeffkernel']
fuzz_main=line['main_function']
fuzz_input=line['input_symbol']
fuzz_len=line['input_size']
bkp=line['return_function']
select_task=line['select_task']
script="""
echo $(pwd)/../../../../state2gantt/target/debug/state2gantt -i {input} -a {output[0]} -r {output[1]} -p {output[2]} -t {select_task}
$(pwd)/../../../../state2gantt/target/debug/state2gantt -i {input} -a {output[0]} -r {output[1]} -p {output[2]} -t {select_task}
"""
shell(script)
rule trace2gantt:
input:
"{benchdir}/timedump/{fuzzer}/{target}#{num}_case.jobs.csv",
"{benchdir}/timedump/{fuzzer}/{target}#{num}_case.resp.csv"
output:
"{benchdir}/timedump/{fuzzer}/{target}#{num}_case.jobs.html",
shell:
"Rscript $(pwd)/../../../../state2gantt/plot_response.r {input[0]} {input[1]} html"
rule quicktest:
params:
benchdir=benchdir
input:
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['feedgeneration100', 'stg', 'random'], target=['polycopter'], variant=['_seq_dataflow_full'], num=range(0,int( 1 ))),
# main scenarios
# main competitors: 10
# frafl: 10
# random: 5
# low prio scenarios
# main competitors: 8
# frafl: 8
# random: 5
rule set128:
params:
benchdir=benchdir
input:
# waters full
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['feedgeneration100', 'stgwoet'], target=['waters'], variant=['_seq_full', '_seq_unsync_full'], num=range(0,int( 10 ))),
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['frafl'], target=['waters'], variant=['_seq_full', '_seq_unsync_full'], num=range(0,int( 10 ))),
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['random'], target=['waters'], variant=['_seq_full', '_seq_unsync_full'], num=range(0,int( 5 ))),
# release full
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['feedgeneration100', 'stgwoet'], target=['release'], variant=['_seq_full'], num=range(0,int( 10 ))),
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['frafl'], target=['release'], variant=['_seq_full'], num=range(0,int( 10 ))),
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['random'], target=['release'], variant=['_seq_full'], num=range(0,int( 5 ))),
# release int (low prio)
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['feedgeneration100', 'stgwoet'], target=['release'], variant=['_seq_int'], num=range(0,int( 5 ))),
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['random', 'frafl'], target=['release'], variant=['_seq_int'], num=range(0,int( 5 ))),
rule set48:
params:
benchdir=benchdir
input:
# polycopter full
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['feedgeneration100', 'stgwoet'], target=['polycopter'], variant=['_seq_dataflow_full'], num=range(0,int( 12 ))),
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['frafl'], target=['polycopter'], variant=['_seq_dataflow_full'], num=range(0,int( 12 ))),
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['random'], target=['polycopter'], variant=['_seq_dataflow_full'], num=range(0,int( 10 ))),
rule set64:
params:
benchdir=benchdir
input:
# waters int+bytes (low prio)
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['feedgeneration100', 'stgwoet', 'frafl'], target=['waters'], variant=['_seq_int', '_seq_bytes'], num=range(0,int( 8 ))),
expand("{benchdir}/timedump/{fuzzer}/{target}{variant}#{num}.time", benchdir=benchdir, fuzzer=['random'], target=['waters'], variant=['_seq_int', '_seq_bytes'], num=range(0,int( 5 ))),
rule all_bins:
params:
benchdir=benchdir
input:
expand("{benchdir}/bins/target_{target}", benchdir=benchdir, target=['random','frafl','stg','stgwoet','feedgeneration100','genetic100'])
rule clean:
shell:
"rm -rf {benchdir}/timedump"
rule full_clean:
shell:
"rm -rf {benchdir}/bins & rm -rf {benchdir}/timedump"

View File

@ -0,0 +1,5 @@
#!/bin/sh
export TOPLEVEL="remote/timedump"
[ -d "$TOPLEVEL/feedgeneration100" ] && mv $TOPLEVEL/feedgeneration100 $TOPLEVEL/evolutionary
[ -d "$TOPLEVEL/stg" ] && mv $TOPLEVEL/stg $TOPLEVEL/fret
[ -d "$TOPLEVEL/frafl" ] && mv $TOPLEVEL/frafl $TOPLEVEL/coverage

View File

@ -0,0 +1,15 @@
def_flags="--no-default-features --features std,snapshot_fast,restarting,do_hash_notify_state,trace_job_response_times,fuzz_int"
set -e
cargo build --target-dir ./bins/target_showmap ${def_flags},config_stg
cargo build --target-dir ./bins/target_random ${def_flags},feed_longest
cargo build --target-dir ./bins/target_frafl ${def_flags},config_frafl,feed_longest
cargo build --target-dir ./bins/target_afl ${def_flags},config_afl,observe_hitcounts
cargo build --target-dir ./bins/target_stg ${def_flags},config_stg
cargo build --target-dir ./bins/target_stgpath ${def_flags},feed_stg_abbhash,sched_stg_abbhash,mutate_stg
cargo build --target-dir ./bins/target_feedgeneration1 ${def_flags},feed_genetic,gensize_1
cargo build --target-dir ./bins/target_feedgeneration10 ${def_flags},feed_genetic,gensize_10
cargo build --target-dir ./bins/target_feedgeneration100 ${def_flags},feed_genetic,gensize_100
cargo build --target-dir ./bins/target_feedgeneration1000 ${def_flags},feed_genetic,gensize_1000
cargo build --target-dir ./bins/target_genetic100 ${def_flags},feed_genetic,mutate_stg,gensize_100
cargo build --target-dir ./bins/target_genetic1000 ${def_flags},feed_genetic,mutate_stg,gensize_1000

View File

@ -0,0 +1,125 @@
#!/usr/bin/env bash
export INSERT_WC=${2:-0}
export BUILD_DIR=${1:-build}
mkdir -p $BUILD_DIR
build () {
make -C ../../../../FreeRTOS/FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC clean && make -C ../../../../FreeRTOS/FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC $1=1 IGNORE_INTERRUPTS=$IGNORE_INTERRUPTS IGNORE_BYTES=$IGNORE_BYTES IGNORE_INTERNAL_STATE=$IGNORE_INTERNAL_STATE INSERT_WC=$INSERT_WC $EXTRA_MAKE_ARGS
cp ../../../../FreeRTOS/FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC/build/RTOSDemo.axf $BUILD_DIR/$(echo $1 | cut -d_ -f1 | tr '[:upper:]' '[:lower:]')$EXTRA_NAME_SUFFIX$2.elf
}
mkdir -p build
# Sequential inputs!
export PARTITION_INPUT=0
unset SPECIAL_CFLAGS
# Baseline
## Don't keep rng states
export IGNORE_INTERNAL_STATE=1
### Only bytes
export IGNORE_INTERRUPTS=1 IGNORE_BYTES=0 SUFFIX="_seq_bytes"
build WATERS_DEMO $SUFFIX
build RELEASE_DEMO $SUFFIX
build COPTER_DEMO $SUFFIX
### Only interrupts
export IGNORE_INTERRUPTS=0 IGNORE_BYTES=1 SUFFIX="_seq_int"
build WATERS_DEMO $SUFFIX
build RELEASE_DEMO $SUFFIX
build COPTER_DEMO $SUFFIX
### Full
export IGNORE_INTERRUPTS=0 IGNORE_BYTES=0 SUFFIX="_seq_full"
build WATERS_DEMO $SUFFIX
build RELEASE_DEMO $SUFFIX
build COPTER_DEMO $SUFFIX
build POLYCOPTER_DEMO $SUFFIX
# Stateful -> presumably bad for us
## keep rng states
export IGNORE_INTERNAL_STATE=0
### Full
export IGNORE_INTERRUPTS=0 IGNORE_BYTES=0 SUFFIX="_seq_stateful_full"
build WATERS_DEMO $SUFFIX
build RELEASE_DEMO $SUFFIX
build COPTER_DEMO $SUFFIX
# Paritioned inputs
export PARTITION_INPUT=1
# Alternative input scheme
## Don't keep rng states
export IGNORE_INTERNAL_STATE=1
### Only bytes
export IGNORE_INTERRUPTS=1 IGNORE_BYTES=0 SUFFIX="_par_bytes"
build WATERS_DEMO $SUFFIX
build RELEASE_DEMO $SUFFIX
build COPTER_DEMO $SUFFIX
### Only interrupts
export IGNORE_INTERRUPTS=0 IGNORE_BYTES=1 SUFFIX="_par_int"
build WATERS_DEMO $SUFFIX
build RELEASE_DEMO $SUFFIX
build COPTER_DEMO $SUFFIX
### Full
export IGNORE_INTERRUPTS=0 IGNORE_BYTES=0 SUFFIX="_par_full"
build WATERS_DEMO $SUFFIX
build RELEASE_DEMO $SUFFIX
build COPTER_DEMO $SUFFIX
build POLYCOPTER_DEMO $SUFFIX
# Stateful -> presumably bad for us
## keep rng states
export IGNORE_INTERNAL_STATE=0
### Full
export IGNORE_INTERRUPTS=0 IGNORE_BYTES=0 SUFFIX="_par_stateful_full"
build WATERS_DEMO $SUFFIX
build RELEASE_DEMO $SUFFIX
build COPTER_DEMO $SUFFIX
# Stateful -> presumably bad for us
## keep rng states
export IGNORE_INTERNAL_STATE=0
export PARTITION_INPUT=0
export IGNORE_INTERRUPTS=0 IGNORE_BYTES=0 SUFFIX="_seq_stateful_full"
build POLYCOPTER_DEMO $SUFFIX
# stateless + dataflow
export PARTITION_INPUT=0
export IGNORE_INTERNAL_STATE=1
export IGNORE_INTERRUPTS=0 IGNORE_BYTES=0 SUFFIX="_seq_dataflow_full"
export SPECIAL_CFLAGS="-DCOPTER_DATAFLOW=1"
build POLYCOPTER_DEMO $SUFFIX
unset SPECIAL_CFLAGS
export PARTITION_INPUT=0
export IGNORE_INTERNAL_STATE=1
export IGNORE_INTERRUPTS=1 IGNORE_BYTES=0 SUFFIX="_seq_dataflow_bytes"
export SPECIAL_CFLAGS="-DCOPTER_DATAFLOW=1"
build POLYCOPTER_DEMO $SUFFIX
unset SPECIAL_CFLAGS
# stateless + dataflow
export PARTITION_INPUT=1
export IGNORE_INTERNAL_STATE=1
export IGNORE_INTERRUPTS=0 IGNORE_BYTES=0 SUFFIX="_par_dataflow_full"
export SPECIAL_CFLAGS="-DCOPTER_DATAFLOW=1"
build POLYCOPTER_DEMO $SUFFIX
unset SPECIAL_CFLAGS
# special waters with no synchronization
export PARTITION_INPUT=0
export IGNORE_INTERNAL_STATE=1
export IGNORE_INTERRUPTS=0 IGNORE_BYTES=0 SUFFIX="_seq_unsync_full"
export SPECIAL_CFLAGS="-DWATERS_UNSYNCHRONIZED=1"
build WATERS_DEMO $SUFFIX
unset SPECIAL_CFLAGS
# special generated waters 2015
export PARTITION_INPUT=0
export IGNORE_INTERNAL_STATE=1
export IGNORE_INTERRUPTS=0 IGNORE_BYTES=0 SUFFIX="_par_bytes"
export EXTRA_MAKE_ARGS="SEED=1"
export EXTRA_NAME_SUFFIX="1"
build WATERSGEN_DEMO $SUFFIX
unset EXTRA_MAKE_ARGS
unset EXTRA_NAME_SUFFIX

View File

@ -0,0 +1,8 @@
#!/usr/bin/env bash
find $1 -type 'f' -iname "${2}#*.log" | while IFS="" read -r p || [ -n "$p" ]
do
LINE=$(tail -n 100 $p | grep -io "run time: .* corpus: [0-9]*" | tail -n 1)
echo $p: $LINE
LINE=$(grep -i "interesting corpus elements" $p | tail -n 1)
echo $p: $LINE
done

View File

@ -0,0 +1 @@
*.sqlite

View File

@ -0,0 +1,11 @@
[package]
name = "number_cruncher"
version = "0.1.0"
edition = "2021"
[dependencies]
clap = { version = "4.5.28", features = ["derive"] }
itertools = "0.14.0"
rayon = "1.10.0"
regex = "1.11.1"
rusqlite = "0.33.0"

View File

@ -0,0 +1,295 @@
use clap::parser::ValueSource;
use clap::Parser;
use itertools::Group;
use itertools::Itertools;
use rayon::iter::ParallelBridge;
use rayon::prelude::*;
use rayon::result;
use std::fs;
use std::fs::File;
use std::io::Write;
use std::io::{self, BufRead, BufReader};
use std::path::Path;
use std::path::PathBuf;
use rusqlite::{params, Connection, Result};
use std::collections::HashMap;
#[derive(clap::ValueEnum, Clone, PartialEq)]
enum Endpoint {
AllMin,
ToolMin,
ToolMax,
Max
}
#[derive(Parser)]
struct Config {
/// Input
#[arg(short, long, value_name = "DIR")]
input: PathBuf,
/// Output
#[arg(short, long, value_name = "FILE", default_value = "out.sqlite")]
output: PathBuf,
/// End each group after the first termination
#[arg(short, long, default_value = "max")]
end_early: Endpoint,
}
fn visit_dirs(
dir: &Path,
results: &mut Vec<(PathBuf, String, String, String)>,
) -> std::io::Result<()> {
if dir.is_dir() {
for entry in fs::read_dir(dir)? {
let entry = entry?;
let path = entry.path();
if path.is_dir() {
visit_dirs(&path, results)?;
} else if path.extension().and_then(|s| s.to_str()) == Some("time") {
if let Some(file_name) = path.file_name().and_then(|s| s.to_str()) {
let re = regex::Regex::new(r".*#[0-9]+\.time$").unwrap();
if re.is_match(file_name) {
if let Some(dir_name) = path
.parent()
.and_then(|p| p.file_name())
.and_then(|s| s.to_str())
{
{
let mut file_stem =
path.file_stem().unwrap().to_str().unwrap().split("#");
let case_name = file_stem.next().unwrap();
let case_number = file_stem.next().unwrap();
results.push((
path.clone(),
dir_name.to_string(),
case_name.to_string(),
case_number.to_string(),
));
}
}
}
}
}
}
}
Ok(())
}
fn maxpoints_of_file(file_path: &Path) -> io::Result<Vec<(usize, usize)>> {
let file = File::open(file_path)?;
let reader = BufReader::new(file);
let mut results = Vec::new();
let mut watermark = 0;
let mut last_timestamp = 0;
for line in reader.lines() {
let line = line?;
let mut parts = line.split(',');
if let (Some(first_str), Some(second_str)) = (parts.next(), parts.next()) {
let first: usize = first_str.trim().parse().unwrap();
let second: usize = second_str.trim().parse().unwrap();
if first > watermark {
results.push((first, second));
watermark = first;
}
last_timestamp = second;
}
}
if results.len() > 1 {
results[0].1 = 0;
results.push((results[results.len() - 1].0, last_timestamp));
}
if results.len() == 0 {
results.push((0, 0));
results.push((0, last_timestamp));
}
Ok(results)
}
fn sample_maxpoints(points: &Vec<(usize, usize)>, samples: &Vec<usize>) -> Vec<(usize, usize)> {
let mut todo = samples.iter().peekable();
let mut ret = Vec::new();
for i in 0..points.len() {
if todo.peek().is_none() {
// Done
break;
}
while let Some(&&peek) = todo.peek() {
if peek >= points[i].1 && (i+1 >= points.len() || peek < points[i+1].1) {
// End or inside the interval
ret.push((points[i].0, peek));
todo.next();
} else if peek < points[i].1 {
if i == 0 {
// Before the first interval, just take the first
ret.push((points[i].0, peek));
todo.next();
} else {
// Already passed
eprintln!("WARNING Skipped: {}", todo.next().unwrap());
}
} else {
// Not yet
break;
}
}
}
ret
}
// https://rust-lang-nursery.github.io/rust-cookbook/science/mathematics/statistics.html
fn mean(data: &[usize]) -> Option<f64> {
let sum = data.iter().sum::<usize>() as f64;
let count = data.len();
match count {
positive if positive > 0 => Some(sum / count as f64),
_ => None,
}
}
fn median(data: &[usize]) -> Option<f64> {
let mut data = data.to_vec();
data.sort();
let size = data.len();
if size == 0 {
return None;
}
match size {
even if even % 2 == 0 => {
let fst_med = data[(even / 2) - 1];
let snd_med = data[even / 2];
fst_med.checked_add(snd_med).map(|x| x as f64 / 2.0)
},
odd => data.get(odd / 2).map(|x| *x as f64)
}
}
// https://rust-lang-nursery.github.io/rust-cookbook/science/mathematics/statistics.html
fn std_deviation(data: &[usize]) -> Option<f64> {
match (mean(data), data.len()) {
(Some(data_mean), count) if count > 0 => {
let variance = data
.iter()
.map(|value| {
let diff = data_mean - (*value as f64);
diff * diff
})
.sum::<f64>()
/ count as f64;
Some(variance.sqrt())
}
_ => None,
}
}
fn main() {
let conf = Config::parse();
let mut results = Vec::new();
if let Err(e) = visit_dirs(&conf.input, &mut results) {
eprintln!("Error reading directories: {}", e);
}
println!("Files: {:?}", results);
let mut connection = Connection::open(conf.output).unwrap();
connection.execute("DROP TABLE IF EXISTS combos", ()).unwrap();
connection.execute("CREATE TABLE IF NOT EXISTS combos (casename TEXT, toolname TEXT, fullname TEXT PRIMARY KEY)", ()).unwrap();
let mut points: Vec<_> = results
.par_iter()
.map(|(path, fuzzer, case, n)| {
(
case,
fuzzer,
n.parse::<usize>().unwrap(),
maxpoints_of_file(path).unwrap(),
)
})
.collect();
let mut last_common_point = points.iter().map(|x| x.3.last().expect(&format!("Missing maxpoint for {}", x.0)).1).min().unwrap();
points.sort_by_key(|x| x.0); // by case for grouping
for (case, casegroup) in &points.into_iter().chunk_by(|x| x.0) {
let casegroup = casegroup.collect::<Vec<_>>();
let last_case_point = casegroup.iter().map(|x| x.3.last().unwrap().1).min().unwrap();
println!("Processing case {}: {}", case, casegroup.len());
let mut timestamps = Vec::new();
for (_, _, _, points) in &casegroup {
timestamps.extend(points.iter().map(|(_, t)| *t));
}
timestamps.sort();
if matches!(conf.end_early, Endpoint::AllMin) {
// Dont' sample anything after the shortest run
timestamps = timestamps.into_iter().filter(|x| x<=&last_common_point).collect();
}
let least_runtime_per_tool = casegroup.iter().map(|g| (g.1, g.2, g.3.last().unwrap().1)).sorted_by_key(|x| x.0).chunk_by(|x| x.0).into_iter().map(|(tool, toolgroup)| (tool, toolgroup.min_by_key(|y| y.2))).collect::<HashMap<_,_>>();
let longest_runtime_per_tool = casegroup.iter().map(|g| (g.1, g.2, g.3.last().unwrap().1)).sorted_by_key(|x| x.0).chunk_by(|x| x.0).into_iter().map(|(tool, toolgroup)| (tool, toolgroup.max_by_key(|y| y.2))).collect::<HashMap<_,_>>();
timestamps.dedup();
let mut maxpoints_per_tool = casegroup
.par_iter()
.map(|g| (g.0, g.1, g.2, sample_maxpoints(&g.3, &timestamps)))
.collect::<Vec<_>>();
maxpoints_per_tool.sort_by_key(|x| x.1); // by tool
for (tool, toolgroup) in &maxpoints_per_tool.into_iter().chunk_by(|x| x.1) {
let toolgroup = toolgroup.collect::<Vec<_>>();
println!("Processing tool {}: {}", tool, toolgroup.len());
let mut lowest_common_length = toolgroup
.iter()
.map(|(_, _, _, points)| points.len())
.min()
.unwrap();
if conf.end_early == Endpoint::ToolMin {
lowest_common_length = timestamps.binary_search(&least_runtime_per_tool[tool].unwrap().2).unwrap();
}
if conf.end_early == Endpoint::ToolMax {
lowest_common_length = std::cmp::min(lowest_common_length, timestamps.binary_search(&longest_runtime_per_tool[tool].unwrap().2).unwrap());
}
let time_min_max_med_mean_sdiv : Vec<(usize,usize,usize,f64,f64,f64)> = (0..lowest_common_length)
.into_par_iter()
.map(|i| {
let slice = toolgroup.iter().map(|(_, _, _, p)| p[i].0).collect::<Vec<_>>();
assert_eq!(slice.len(), toolgroup.len());
(
toolgroup[0].3[i].1,
*slice.iter().min().unwrap_or(&0),
*slice.iter().max().unwrap_or(&0),
median(&slice).unwrap_or(0.0),
mean(&slice).unwrap_or(0.0),
std_deviation(&slice).unwrap_or(0.0),
)
})
.collect::<Vec<_>>();
// Save to db
connection.execute("INSERT INTO combos (casename, toolname, fullname) VALUES (?, ?, ?)", (case, tool, format!("{}${}",case, tool))).unwrap();
connection.execute(&format!("DROP TABLE IF EXISTS {}${}", case, tool), ()).unwrap();
connection.execute(&format!("CREATE TABLE IF NOT EXISTS {}${} (timestamp INTEGER PRIMARY KEY, min INTEGER, max INTEGER, median REAL, mean REAL, sdiv REAL)", case, tool), ()).unwrap();
// Start a transaction
let transaction = connection.transaction().unwrap();
let mut stmt = transaction.prepare(&format!(
"INSERT INTO {}${} (timestamp , min , max , median , mean , sdiv ) VALUES (?, ?, ?, ?, ?, ?)",
case, tool
)).unwrap();
for (timestamp, min, max, median, mean, sdiv) in time_min_max_med_mean_sdiv {
stmt.execute([(timestamp as i64).to_string(), (min as i64).to_string(), (max as i64).to_string(), median.to_string(), mean.to_string(), sdiv.to_string()]).unwrap();
}
drop(stmt);
// Commit the transaction
transaction.commit().unwrap();
}
}
}

View File

@ -0,0 +1,35 @@
BDIR=remote
plot () {
[ ! -f ../benchmark/$BDIR/${1}${2}_all.png ] && Rscript plot_multi.r $BDIR/timedump ${1}${2} ../benchmark/$BDIR
}
# Only bytes
export SUFFIX="_seq_bytes"
plot waters $SUFFIX
#plot release $SUFFIX
plot copter $SUFFIX
#plot interact $SUFFIX
# Only interrupts
export SUFFIX="_seq_int"
plot waters $SUFFIX
plot release $SUFFIX
plot copter $SUFFIX
#plot interact $SUFFIX
# Full
export SUFFIX="_seq_full"
plot waters $SUFFIX
#plot release $SUFFIX
plot copter $SUFFIX
#plot interact $SUFFIX
plot copter "_seq_stateless_full"
plot copter "_par_full"

View File

@ -0,0 +1,13 @@
#!/bin/sh
if [[ -n "$1" ]]; then
TARGET="$1"
else
TARGET=$BENCHDIR
fi
# Check if bench.sqlite needs to be updated
if [[ ! -f $TARGET/bench.sqlite || $(find $TARGET/timedump -name '.*[0-9]+\.time' -newer $TARGET/bench.sqlite | wc -l) -gt 0 ]]; then
number_cruncher/target/debug/number_cruncher -i $TARGET/timedump -o $TARGET/bench.sqlite
fi
Rscript plot_sqlite.r $TARGET/bench.sqlite $TARGET

View File

@ -0,0 +1,13 @@
#!/usr/bin/env bash
./sem.sh /tmp/plot reset 20
declare -a PLOTS
COUNT=0
while IFS="" read -r p || [ -n "$p" ];
do
if [[ -z "$p" ]]; then
continue
fi
PLOTS[$COUNT]="$p"
COUNT=$((COUNT+1))
../../../../state2gantt/driver_sem.sh $p &
done < <(find $BENCHDIR/timedump -maxdepth 2 -type 'f' -iregex '.*icounttrace.ron$')

View File

@ -0,0 +1,33 @@
get_max_nodecount () {
rm -f sizecomp && for sizefile in $BENCHDIR/timedump/**/$1*.stgsize;do echo "$(tail -n 1 $sizefile),${sizefile}" >> sizecomp; done; sort -n sizecomp | tail -n 1
}
get_largest_files () {
T=$(get_max_nodecount $1)
echo $T | cut -d',' -f6
}
perform () {
T=$(get_max_nodecount $1)
echo $T | cut -d',' -f6
echo $T | cut -d',' -f6 | xargs -I {} ./plot_stgsize.r {}
mv "$(echo $T | cut -d',' -f6 | xargs -I {} basename -s .stgsize {})_nodes.png" $1_nodes.png
}
# perform copter
# perform release
# perform waters
A=$(get_largest_files polycopter_seq_dataflow_full)
B=$(get_largest_files release_seq_full)
C=$(get_largest_files waters_seq_full)
# A_="$(echo $A | sed 's/polycopter_seq_dataflow_full/UAV w. hid. com./')"
# B_="$(echo $B | sed 's/release_seq_full/Async. rel./')"
# C_="$(echo $C | sed 's/waters_seq_full/Waters ind. ch./')"
A_="UAV"
B_="Async. rel."
C_="Waters ind. ch."
echo $A_ $B_ $C_
cp $A "$A_"
cp $B "$B_"
cp $C "$C_"
./plot_stgsize_multi.r "$A_" "$B_" "$C_"

View File

@ -0,0 +1,28 @@
#!/usr/bin/env bash
declare -a PLOTS
COUNT=0
while IFS="" read -r p || [ -n "$p" ];
do
if [[ -z "$p" ]]; then
continue
fi
N="$(dirname "$p")/$(basename -s .case "$p")"
T="${N}_case.trace.ron"
P="${N}_case"
H="${N}_case.jobs.html"
echo "$COUNT $p -> $H"
IFS=" "
# PLOTS+=("$H")
PLOTS[$COUNT]="$H"
COUNT=$((COUNT+1))
# if [ ! -f "$T" ]; then
# snakemake -c1 "$T"
# fi
# if [ ! -f "$P.html" ]; then
# ~/code/FRET/state2gantt/driver.sh "$T"
# fi
done < <(find $BENCHDIR/timedump -maxdepth 2 -type 'f' -iregex '.*[0-9]+\.case')
echo "${PLOTS[@]}"
snakemake -c 20 --rerun-incomplete --keep-incomplete "${PLOTS[@]}"

View File

@ -0,0 +1,83 @@
library("mosaic")
args = commandArgs(trailingOnly=TRUE)
#myolors=c("#339933","#0066ff","#993300") # grün, balu, rot
myolors=c("dark green","dark blue","dark red", "yellow") # grün, balu, rot
if (length(args)==0) {
runtype="timedump"
target="waters"
filename_1=sprintf("%s.png",target)
filename_2=sprintf("%s_maxline.png",target)
filename_3=sprintf("%s_hist.png",target)
} else {
runtype=args[1]
target=args[2]
filename_1=sprintf("%s.png",args[2])
filename_2=sprintf("%s_maxline.png",args[2])
filename_3=sprintf("%s_hist.png",args[2])
# filename_1=args[3]
}
file_1=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s/%s_state",runtype,target)
file_2=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s/%s_afl",runtype,target)
file_3=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s/%s_random",runtype,target)
file_4=sprintf("~/code/FRET/LibAFL/fuzzers/FRET/benchmark/%s/%s_graph",runtype,target)
timetrace <- read.table(file_1, quote="\"", comment.char="")
timetrace_afl <- read.table(file_2, quote="\"", comment.char="")
timetrace_rand <- read.table(file_3, quote="\"", comment.char="")
timetrace_graph <- read.table(file_4, quote="\"", comment.char="")
timetrace[[2]]=seq_len(length(timetrace[[1]]))
timetrace_afl[[2]]=seq_len(length(timetrace_afl[[1]]))
timetrace_rand[[2]]=seq_len(length(timetrace_rand[[1]]))
timetrace_graph[[2]]=seq_len(length(timetrace_graph[[1]]))
names(timetrace)[1] <- "timetrace"
names(timetrace)[2] <- "iter"
names(timetrace_afl)[1] <- "timetrace"
names(timetrace_afl)[2] <- "iter"
names(timetrace_rand)[1] <- "timetrace"
names(timetrace_rand)[2] <- "iter"
names(timetrace_graph)[1] <- "timetrace"
names(timetrace_graph)[2] <- "iter"
png(file=filename_1)
# pdf(file=filename_1,width=8, height=8)
plot(timetrace[[2]],timetrace[[1]], col=myolors[1], xlab="iters", ylab="wcet", pch='.')
points(timetrace_afl[[2]],timetrace_afl[[1]], col=myolors[2], pch='.')
points(timetrace_rand[[2]],timetrace_rand[[1]], col=myolors[3], pch='.')
points(timetrace_graph[[2]],timetrace_graph[[1]], col=myolors[4], pch='.')
abline(lm(timetrace ~ iter, data=timetrace),col=myolors[1])
abline(lm(timetrace ~ iter, data=timetrace_afl),col=myolors[2])
abline(lm(timetrace ~ iter, data=timetrace_rand),col=myolors[3])
dev.off()
png(file=filename_3)
gf_histogram(~ timetrace,data=timetrace, fill=myolors[1]) %>%
gf_histogram(~ timetrace,data=timetrace_afl, fill=myolors[2]) %>%
gf_histogram(~ timetrace,data=timetrace_rand, fill=myolors[3]) %>%
gf_histogram(~ timetrace,data=timetrace_graph, fill=myolors[4])
dev.off()
# Takes a flat list
trace2maxline <- function(tr) {
maxline = tr
for (var in seq_len(length(maxline))[2:length(maxline)]) {
maxline[var] = max(maxline[var],maxline[var-1])
}
#plot(seq_len(length(maxline)),maxline,"l",xlab="Index",ylab="WOET")
return(maxline)
}
timetrace[[1]] <- trace2maxline(timetrace[[1]])
timetrace_afl[[1]] <- trace2maxline(timetrace_afl[[1]])
timetrace_rand[[1]] <- trace2maxline(timetrace_rand[[1]])
timetrace_graph[[1]] <- trace2maxline(timetrace_graph[[1]])
png(file=filename_2)
plot(timetrace[[2]],timetrace[[1]], col=myolors[1], xlab="iters", ylab="wcet", pch='.')
points(timetrace_afl[[2]],timetrace_afl[[1]], col=myolors[2], pch='.')
points(timetrace_rand[[2]],timetrace_rand[[1]], col=myolors[3], pch='.')
points(timetrace_graph[[2]],timetrace_graph[[1]], col=myolors[4], pch='.')
#abline(lm(timetrace ~ iter, data=timetrace),col=myolors[1])
#abline(lm(timetrace ~ iter, data=timetrace_afl),col=myolors[2])
#abline(lm(timetrace ~ iter, data=timetrace_rand),col=myolors[3])
dev.off()

View File

@ -0,0 +1,234 @@
# install.packages(c("mosaic", "dplyr", "DBI", "tikzDevice", "colorspace", "heatmaply", "RColorBrewer", "RSQLite"))
library("mosaic")
library("dplyr")
library("DBI")
library("tikzDevice") # Add this line to include the tikzDevice library
library("colorspace")
library("heatmaply")
library("RColorBrewer")
args = commandArgs(trailingOnly=TRUE)
TOOL_TRANSLATION <- list(
feedgeneration100 = "evolution",
frafl = "coverage",
random = "random",
stgwoet = "FRET"
)
KNOWN_WCRT <- list(
waters_seq_bytes=0, # via INSERT_WC
waters_seq_int=0, # via INSERT_WC + manual interrupt
#waters_seq_int=219542, # via INSERT_WC + manual interrupt
waters_seq_full=0,# via INSERT_WC + manual interrupt
waters_seq_unsync_full=0,# via INSERT_WC + manual interrupt
polycopter_seq_dataflow_full=0, # via INSERT_WC + manual interrupt
polycopter_seq_dataflow_int=0, # via INSERT_WC + manual interrupt
release_seq_int=0, # via fuzzer, equals to manual interrupts; Bug: Task3 y=0
release_seq_full=0 # via INSERT_WC + manual interrupt; Bug: Task3 y=0
)
STATIC_WCRT <- list(
waters_seq_bytes=256632,
waters_seq_int=256632,
waters_seq_full=256632,
waters_seq_unsync_full=272091,
polycopter_seq_dataflow_full=373628,
polycopter_seq_dataflow_int=373628,
release_seq_int=921360,
release_seq_full=921360
)
# ISNS_PER_US = (10**3)/(2**5)
# print(list(sapply(STATIC_WCRT, function(x) x/ISNS_PER_US)))
# quit()
STATIC_WCRT <- list(
waters_seq_bytes=0,
waters_seq_int=0,
waters_seq_full=0,
waters_seq_unsync_full=0,
polycopter_seq_dataflow_full=0,
polycopter_seq_dataflow_int=0,
release_seq_int=0,
release_seq_full=0
)
MIN_Y <- list(
waters_seq_bytes=0,
waters_seq_int=0,
waters_seq_full=0,
waters_seq_unsync_full=0,
polycopter_seq_dataflow_full=0,
polycopter_seq_dataflow_int=0,
release_seq_int=0,
release_seq_full=0
)
LEG_POS <- list(
waters_seq_bytes="bottomright",
waters_seq_int="bottomright",
waters_seq_full="bottomright",
waters_seq_unsync_full="bottomright",
polycopter_seq_dataflow_full="bottomright",
polycopter_seq_dataflow_int="bottomright",
release_seq_int="bottomright",
release_seq_full="bottomright"
)
NAME_MAP <- list(
watersIc11_seq_full="t1 10ms",
watersIc12_seq_full="t2 10ms",
watersIc13_seq_full="t3 10ms",
watersIc14_seq_full="t4 10ms",
watersIc31_seq_full="t5 spro",
watersIc32_seq_full="t6 2ms",
watersIc33_seq_full="t7 50ms",
watersIc21_seq_full="t9 100ms",
watersIc22_seq_full="t10 10ms",
watersIc23_seq_full="t11 2ms"
)
# Read the first command line argument as an sqlite file
if (length(args) > 0) {
sqlite_file <- args[1]
con <- dbConnect(RSQLite::SQLite(), sqlite_file)
} else {
print("No sqlite file provided, assume defaults")
args = c("bench.sqlite", "remote")
sqlite_file <- args[1]
con <- dbConnect(RSQLite::SQLite(), sqlite_file)
}
combos <- dbGetQuery(con, "SELECT * FROM combos")
casenames <- dbGetQuery(con, "SELECT casename FROM combos GROUP BY casename")
toolnames <- dbGetQuery(con, "SELECT toolname FROM combos GROUP BY toolname")
ml2lines <- function(ml, casename) {
lines = NULL
last = 0
for (i in seq_len(dim(ml)[1])) {
lines = rbind(lines, cbind(X=last, Y=ml[i,1]))
lines = rbind(lines, cbind(X=ml[i,2], Y=ml[i,1]))
last = ml[i,2]
}
return(lines)
}
# BREW=RdYlGn(8)
BREW=Spectral(8)
# MY_COLORS <- c(BREW[[4]], BREW[[3]], BREW[[2]], BREW[[1]], "cyan", "pink", "gray", "orange", "black", "yellow","brown")
MY_COLORS=BREW
# draw limit
max_x <- 12
min_y <- -2500
max_y <- 2500
LEGEND_POS = "bottomright"
ISNS_PER_US = (10**3)/(2**5)
print(casenames[['casename']])
legend_names <- sapply(casenames[['casename']], function(x) NAME_MAP[[x]] %||% x)
legend_colors <- BREW
legend_styles <- c(rep("solid",10),"dotted","dashed")
h_ = 300
w_ = h_*4/3
png(file=sprintf("%s/all_tasks.png", args[2]), width=w_, height=h_)
#tikz(file=sprintf("%s/all_tasks.tex", args[2]), width=0.6*w_/72, height=0.6*h_/72)
#pdf(file=sprintf("%s/all_tasks.pdf", args[2]), width=w_/72, height=h_/72)
# plot setup
par(mar=c(4,4,1,1))
par(oma=c(0,0,0,0))
plot(c(0,max_x),c(min_y,max_y), col='white', xlab="Time [h]", ylab="FRET's improvement over competitors [µs]", pch='.')
draw_plot <- function(data, casename, color) {
# evo, cov, random, fret
# Pre-calculate all malines and medlines
malines_list <- list()
medlines_list <- list()
for (n in seq_along(data)) {
d <- data[[n]]
malines_list[[names(data)[n]]] <- ml2lines(d[c('max','timestamp')])
medlines_list[[names(data)[n]]] <- ml2lines(d[c('median','timestamp')])
}
# Plot the difference between malines['stgwoet'] (FRET) and malines['random']
if ("stgwoet" %in% names(malines_list) && "feedgeneration100" %in% names(malines_list)) {
fret_malines <- malines_list[["stgwoet"]]
compare_malines1 <- malines_list[["feedgeneration100"]]
compare_malines2 <- malines_list[["frafl"]]
fret_medlines <- medlines_list[["stgwoet"]]
compare_medlines1 <- medlines_list[["feedgeneration100"]]
compare_medlines2 <- medlines_list[["frafl"]]
# Ensure all have the same number of rows and matching X
min_len <- min(nrow(fret_malines), nrow(compare_malines1), nrow(compare_malines2))
# For each point, take the max of the two compare malines
compare_max_Y <- pmax(compare_malines1[1:min_len, "Y"], compare_malines2[1:min_len, "Y"])
diff_lines_ma <- data.frame(
X = fret_malines[1:min_len, "X"],
Y = fret_malines[1:min_len, "Y"] - compare_max_Y
)
lines(diff_lines_ma, col=color, lty="solid", lwd=2)
# Same for medlines
compare_max_med_Y <- pmax(compare_medlines1[1:min_len, "Y"], compare_medlines2[1:min_len, "Y"])
diff_lines_med <- data.frame(
X = fret_medlines[1:min_len, "X"],
Y = fret_medlines[1:min_len, "Y"] - compare_max_med_Y
)
lines(diff_lines_med, col=color, lty="dashed", lwd=2)
}
}
for (i in seq_len(length(casenames[['casename']]))) {
cn =casenames[['casename']][i]
color = MY_COLORS[i]
tables <- dbGetQuery(con, sprintf("SELECT * FROM combos WHERE casename == '%s'", cn[[1]]))
table_list <- list()
for (row in 1:nrow(tables)) {
table_name <- tables[row, 'fullname']
tool_name <- tables[row, 'toolname']
table_data <- dbGetQuery(con, sprintf("SELECT * FROM '%s'", table_name))
table_list[[tool_name]] <- table_data
}
# Convert timestamp from microseconds to hours
for (n in seq_len(length(table_list))) {
table_list[[n]]$timestamp <- table_list[[n]]$timestamp / 3600000
table_list[[n]]$min <- table_list[[n]]$min / ISNS_PER_US
table_list[[n]]$max <- table_list[[n]]$max / ISNS_PER_US
table_list[[n]]$median <- table_list[[n]]$median / ISNS_PER_US
table_list[[n]]$mean <- table_list[[n]]$mean / ISNS_PER_US
table_list[[n]]$sdiv <- table_list[[n]]$sdiv / ISNS_PER_US
}
table_list <- table_list[c('stgwoet', 'feedgeneration100', 'frafl', 'random')] # manual re-order
table_list <- table_list[!sapply(table_list, is.null)] # remove NULL entries
draw_plot(table_list, cn[[1]], color)
}
legend(LEGEND_POS, legend=legend_names,#"bottomright",
col=legend_colors,
lty=legend_styles,
lwd=2, ncol=2)
par(las = 2, mar = c(10, 5, 1, 1))
# png
## normal
dev.off()
dbDisconnect(con)

View File

@ -0,0 +1,340 @@
# install.packages(c("mosaic", "dplyr", "foreach", "doParallel"))
library("mosaic")
library("dplyr")
library("foreach")
library("doParallel")
#setup parallel backend to use many processors
cores=detectCores()
cl <- makeCluster(cores[1]-4) #not to overload your computer
registerDoParallel(cl)
args = commandArgs(trailingOnly=TRUE)
if (length(args)==0) {
runtype="remote"
#target="waters"
target="waters"
#target="waters_int"
#target="watersv2_int"
outputpath="../benchmark"
#MY_SELECTION <- c('state', 'afl', 'graph', 'random')
SAVE_FILE=TRUE
} else {
runtype=args[1]
target=args[2]
outputpath=args[3]
#MY_SELECTION <- args[4:length(args)]
#if (length(MY_SELECTION) == 0)
# MY_SELECTION<-NULL
SAVE_FILE=TRUE
print(runtype)
print(target)
print(outputpath)
}
worst_cases <- list(waters=0, waters_int=0, tmr=405669, micro_longint=0, gen3=0, copter_par_full=164311)
worst_case <- worst_cases[[target]]
if (is.null(worst_case)) {
worst_case = 0
}
#MY_COLORS=c("green","blue","red", "orange", "pink", "black")
MY_COLORS <- c("green", "blue", "red", "magenta", "orange", "cyan", "pink", "gray", "orange", "black", "yellow","brown")
BENCHDIR=sprintf("../benchmark/%s",runtype)
BASENAMES=Filter(function(x) x!="" && substr(x,1,1)!='.',list.dirs(BENCHDIR,full.names=FALSE))
PATTERNS="%s#[0-9]*.time$"
#RIBBON='sd'
#RIBBON='span'
RIBBON='both'
DRAW_WC = worst_case > 0
LEGEND_POS="bottomright"
#LEGEND_POS="bottomright"
CONTINUE_LINE_TO_END=FALSE
# https://www.r-bloggers.com/2013/04/how-to-change-the-alpha-value-of-colours-in-r/
alpha <- function(col, alpha=1){
if(missing(col))
stop("Please provide a vector of colours.")
apply(sapply(col, col2rgb)/255, 2,
function(x)
rgb(x[1], x[2], x[3], alpha=alpha))
}
# Trimm a list of data frames to common length
trim_data <- function(input,len=NULL) {
if (is.null(len)) {
len <- min(sapply(input, function(v) dim(v)[1]))
}
return(lapply(input, function(d) slice_head(d,n=len)))
}
length_of_data <- function(input) {
min(sapply(input, function(v) dim(v)[1]))
}
# Takes a flat list
trace2maxline <- function(tr) {
maxline = tr
for (var in seq_len(length(maxline))[2:length(maxline)]) {
#if (maxline[var]>1000000000) {
# maxline[var]=maxline[var-1]
#} else {
maxline[var] = max(maxline[var],maxline[var-1])
#}
}
#plot(seq_len(length(maxline)),maxline,"l",xlab="Index",ylab="WOET")
return(maxline)
}
# Take a list of data frames, output same form but maxlines
data2maxlines <- function(tr) {
min_length <- min(sapply(tr, function(v) dim(v)[1]))
maxline <- tr
for (var in seq_len(length(tr))) {
maxline[[var]][[1]]=trace2maxline(tr[[var]][[1]])
}
return(maxline)
}
# Take a multi-column data frame, output same form but maxlines
frame2maxlines <- function(tr) {
for (var in seq_len(length(tr))) {
tr[[var]]=trace2maxline(tr[[var]])
}
return(tr)
}
trace2maxpoints <- function(tr) {
minval = tr[1,1]
collect = tr[1,]
for (i in seq_len(dim(tr)[1])) {
if (minval < tr[i,1]) {
collect = rbind(collect,tr[i,])
minval = tr[i,1]
}
}
tmp = tr[dim(tr)[1],]
tmp[1] = minval[1]
collect = rbind(collect,tmp)
return(collect)
}
sample_maxpoints <- function(tr,po) {
index = 1
collect=NULL
endpoint = dim(tr)[1]
for (p in po) {
if (p<=tr[1,2]) {
tmp = tr[index,]
tmp[2] = p
collect = rbind(collect, tmp)
} else if (p>=tr[endpoint,2]) {
tmp = tr[endpoint,]
tmp[2] = p
collect = rbind(collect, tmp)
} else {
for (i in seq(index,endpoint)-1) {
if (p >= tr[i,2] && p<tr[i+1,2]) {
tmp = tr[i,]
tmp[2] = p
collect = rbind(collect, tmp)
index = i
break
}
}
}
}
return(collect)
}
#https://www.r-bloggers.com/2012/01/parallel-r-loops-for-windows-and-linux/
all_runtypetables <- foreach (bn=BASENAMES) %do% {
runtypefiles <- list.files(file.path(BENCHDIR,bn),pattern=sprintf(PATTERNS,target),full.names = TRUE)
if (length(runtypefiles) > 0) {
runtypetables_reduced <- foreach(i=seq_len(length(runtypefiles))) %dopar% {
rtable = read.csv(runtypefiles[[i]], col.names=c(sprintf("%s%d",bn,i),sprintf("times%d",i)))
trace2maxpoints(rtable)
}
#runtypetables <- lapply(seq_len(length(runtypefiles)),
# function(i)read.csv(runtypefiles[[i]], col.names=c(sprintf("%s%d",bn,i),sprintf("times%d",i))))
#runtypetables_reduced <- lapply(runtypetables, trace2maxpoints)
runtypetables_reduced
#all_runtypetables = c(all_runtypetables, list(runtypetables_reduced))
}
}
all_runtypetables = all_runtypetables[lapply(all_runtypetables, length) > 0]
all_min_points = foreach(rtt=all_runtypetables,.combine = cbind) %do% {
bn = substr(names(rtt[[1]])[1],1,nchar(names(rtt[[1]])[1])-1)
ret = data.frame(min(unlist(lapply(rtt, function(v) v[dim(v)[1],2]))))
names(ret)[1] = bn
ret/(3600 * 1000)
}
all_max_points = foreach(rtt=all_runtypetables,.combine = cbind) %do% {
bn = substr(names(rtt[[1]])[1],1,nchar(names(rtt[[1]])[1])-1)
ret = data.frame(max(unlist(lapply(rtt, function(v) v[dim(v)[1],2]))))
names(ret)[1] = bn
ret/(3600 * 1000)
}
all_points = sort(unique(Reduce(c, lapply(all_runtypetables, function(v) Reduce(c, lapply(v, function(w) w[[2]]))))))
all_maxlines <- foreach (rtt=all_runtypetables) %do% {
bn = substr(names(rtt[[1]])[1],1,nchar(names(rtt[[1]])[1])-1)
runtypetables_sampled = foreach(v=rtt) %dopar% {
sample_maxpoints(v, all_points)[1]
}
#runtypetables_sampled = lapply(rtt, function(v) sample_maxpoints(v, all_points)[1])
tmp_frame <- Reduce(cbind, runtypetables_sampled)
statframe <- data.frame(rowMeans(tmp_frame),apply(tmp_frame, 1, sd),apply(tmp_frame, 1, min),apply(tmp_frame, 1, max), apply(tmp_frame, 1, median))
names(statframe) <- c(bn, sprintf("%s_sd",bn), sprintf("%s_min",bn), sprintf("%s_max",bn), sprintf("%s_med",bn))
#statframe[sprintf("%s_times",bn)] = all_points
round(statframe)
#all_maxlines = c(all_maxlines, list(round(statframe)))
}
one_frame<-data.frame(all_maxlines)
one_frame[length(one_frame)+1] <- all_points/(3600 * 1000)
names(one_frame)[length(one_frame)] <- 'time'
typenames = names(one_frame)[which(names(one_frame) != 'time')]
typenames = typenames[which(!endsWith(typenames, "_sd"))]
typenames = typenames[which(!endsWith(typenames, "_med"))]
ylow=min(one_frame[typenames])
yhigh=max(one_frame[typenames],worst_case)
typenames = typenames[which(!endsWith(typenames, "_min"))]
typenames = typenames[which(!endsWith(typenames, "_max"))]
ml2lines <- function(ml,lim) {
lines = NULL
last = 0
for (i in seq_len(dim(ml)[1])) {
if (!CONTINUE_LINE_TO_END && lim<ml[i,2]) {
break
}
lines = rbind(lines, cbind(X=last, Y=ml[i,1]))
lines = rbind(lines, cbind(X=ml[i,2], Y=ml[i,1]))
last = ml[i,2]
}
return(lines)
}
plotting <- function(selection, filename, MY_COLORS_) {
# filter out names of iters and sd cols
typenames = names(one_frame)[which(names(one_frame) != 'times')]
typenames = typenames[which(!endsWith(typenames, "_sd"))]
typenames = typenames[which(!endsWith(typenames, "_med"))]
typenames = typenames[which(!endsWith(typenames, "_min"))]
typenames = typenames[which(!endsWith(typenames, "_max"))]
typenames = selection[which(selection %in% typenames)]
if (length(typenames) == 0) {return()}
h_ = 380
w_ = h_*4/3
if (SAVE_FILE) {png(file=sprintf("%s/%s_%s.png",outputpath,target,filename), width=w_, height=h_)}
par(mar=c(4,4,1,1))
par(oma=c(0,0,0,0))
plot(c(0,max(one_frame['time'])),c(ylow,yhigh), col='white', xlab="Time [h]", ylab="WCRT estimate [insn]", pch='.')
for (t in seq_len(length(typenames))) {
#proj = one_frame[seq(1, dim(one_frame)[1], by=max(1, length(one_frame[[1]])/(10*w_))),]
#points(proj[c('iters',typenames[t])], col=MY_COLORS_[t], pch='.')
avglines = ml2lines(one_frame[c(typenames[t],'time')],all_max_points[typenames[t]])
#lines(avglines, col=MY_COLORS_[t])
medlines = ml2lines(one_frame[c(sprintf("%s_med",typenames[t]),'time')],all_max_points[typenames[t]])
lines(medlines, col=MY_COLORS_[t], lty='solid')
milines = NULL
malines = NULL
milines = ml2lines(one_frame[c(sprintf("%s_min",typenames[t]),'time')],all_max_points[typenames[t]])
malines = ml2lines(one_frame[c(sprintf("%s_max",typenames[t]),'time')],all_max_points[typenames[t]])
if (exists("RIBBON") && ( RIBBON=='max' )) {
#lines(milines, col=MY_COLORS_[t], lty='dashed')
lines(malines, col=MY_COLORS_[t], lty='dashed')
#points(proj[c('iters',sprintf("%s_min",typenames[t]))], col=MY_COLORS_[t], pch='.')
#points(proj[c('iters',sprintf("%s_max",typenames[t]))], col=MY_COLORS_[t], pch='.')
}
if (exists("RIBBON") && RIBBON != '') {
for (i in seq_len(dim(avglines)[1]-1)) {
if (RIBBON=='both') {
# draw boxes
x_l <- milines[i,][['X']]
x_r <- milines[i+1,][['X']]
y_l <- milines[i,][['Y']]
y_h <- malines[i,][['Y']]
rect(x_l, y_l, x_r, y_h, col=alpha(MY_COLORS_[t], alpha=0.1), lwd=0)
}
if (FALSE && RIBBON=='span') {
# draw boxes
x_l <- milines[i,][['X']]
x_r <- milines[i+1,][['X']]
y_l <- milines[i,][['Y']]
y_h <- malines[i,][['Y']]
rect(x_l, y_l, x_r, y_h, col=alpha(MY_COLORS_[t], alpha=0.1), lwd=0)
}
#if (FALSE && RIBBON=='both' || RIBBON=='sd') {
# # draw sd
# x_l <- avglines[i,][['X']]
# x_r <- avglines[i+1,][['X']]
# y_l <- avglines[i,][['Y']]-one_frame[ceiling(i/2),][[sprintf("%s_sd",typenames[t])]]
# y_h <- avglines[i,][['Y']]+one_frame[ceiling(i/2),][[sprintf("%s_sd",typenames[t])]]
# if (x_r != x_l) {
# rect(x_l, y_l, x_r, y_h, col=alpha(MY_COLORS_[t], alpha=0.1), lwd=0)
# }
#}
#sd_ <- row[sprintf("%s_sd",typenames[t])][[1]]
#min_ <- row[sprintf("%s_min",typenames[t])][[1]]
#max_ <- row[sprintf("%s_max",typenames[t])][[1]]
#if (exists("RIBBON")) {
# switch (RIBBON,
# 'sd' = arrows(x_, y_-sd_, x_, y_+sd_, length=0, angle=90, code=3, col=alpha(MY_COLORS_[t], alpha=0.03)),
# 'both' = arrows(x_, y_-sd_, x_, y_+sd_, length=0, angle=90, code=3, col=alpha(MY_COLORS_[t], alpha=0.05)),
# 'span' = #arrows(x_, min_, x_, max_, length=0, angle=90, code=3, col=alpha(MY_COLORS_[t], alpha=0.03))
# )
#}
##arrows(x_, y_-sd_, x_, y_+sd_, length=0.05, angle=90, code=3, col=alpha(MY_COLORS[t], alpha=0.1))
}
}
}
leglines=typenames
if (DRAW_WC) {
lines(c(0,length(one_frame[[1]])),y=c(worst_case,worst_case), lty='dotted')
leglines=c(typenames, 'worst observed')
}
legend(LEGEND_POS, legend=leglines,#"bottomright",
col=c(MY_COLORS_[1:length(typenames)],"black"),
lty=c(rep("solid",length(typenames)),"dotted"))
if (SAVE_FILE) {
dev.new()
par(las = 2, mar = c(10, 5, 1, 1))
dev.off()
}
}
stopCluster(cl)
par(mar=c(3.8,3.8,0,0))
par(oma=c(0,0,0,0))
#RIBBON='both'
#MY_SELECTION = c('state_int','generation100_int')
#MY_SELECTION = c('state','frafl')
if (exists("MY_SELECTION")) {
plotting(MY_SELECTION, 'custom', MY_COLORS[c(1,2)])
} else {
# MY_SELECTION=c('state', 'afl', 'random', 'feedlongest', 'feedgeneration', 'feedgeneration10')
#MY_SELECTION=c('state_int', 'afl_int', 'random_int', 'feedlongest_int', 'feedgeneration_int', 'feedgeneration10_int')
#MY_SELECTION=c('state', 'frAFL', 'statenohash', 'feedgeneration10')
#MY_SELECTION=c('state_int', 'frAFL_int', 'statenohash_int', 'feedgeneration10_int')
MY_SELECTION=typenames
RIBBON='both'
for (i in seq_len(length(MY_SELECTION))) {
n <- MY_SELECTION[i]
plotting(c(n), n, c(MY_COLORS[i]))
}
RIBBON='max'
plotting(MY_SELECTION,'all', MY_COLORS)
}
for (t in seq_len(length(typenames))) {
li = one_frame[dim(one_frame)[1],]
pear = (li[[typenames[[t]]]]-li[[sprintf("%s_med",typenames[[t]])]])/li[[sprintf("%s_sd",typenames[[t]])]]
print(sprintf("%s pearson: %g",typenames[[t]],pear))
}

View File

@ -0,0 +1,237 @@
# install.packages(c("mosaic", "dplyr", "DBI", "tikzDevice", "colorspace", "heatmaply", "RColorBrewer", "RSQLite"))
library("mosaic")
library("dplyr")
library("DBI")
library("tikzDevice") # Add this line to include the tikzDevice library
library("colorspace")
library("heatmaply")
library("RColorBrewer")
args = commandArgs(trailingOnly=TRUE)
TOOL_TRANSLATION <- list(
feedgeneration100 = "evolution",
frafl = "coverage",
random = "random",
stgwoet = "FRET"
)
KNOWN_WCRT <- list(
waters_seq_bytes=212252, # via INSERT_WC
waters_seq_int=0, # via INSERT_WC + manual interrupt
#waters_seq_int=219542, # via INSERT_WC + manual interrupt
waters_seq_full=219542,# via INSERT_WC + manual interrupt
waters_seq_unsync_full=234439,# via INSERT_WC + manual interrupt
polycopter_seq_dataflow_full=174866, # via INSERT_WC + manual interrupt
polycopter_seq_dataflow_int=174866, # via INSERT_WC + manual interrupt
release_seq_int=582699, # via fuzzer, equals to manual interrupts; Bug: Task3 y=0
release_seq_full=614583 # via INSERT_WC + manual interrupt; Bug: Task3 y=0
)
STATIC_WCRT <- list(
waters_seq_bytes=256632,
waters_seq_int=256632,
waters_seq_full=256632,
waters_seq_unsync_full=272091,
polycopter_seq_dataflow_full=373628,
polycopter_seq_dataflow_int=373628,
release_seq_int=921360,
release_seq_full=921360
)
# ISNS_PER_US = (10**3)/(2**5)
# print(list(sapply(STATIC_WCRT, function(x) x/ISNS_PER_US)))
# quit()
STATIC_WCRT <- list(
waters_seq_bytes=0,
waters_seq_int=0,
waters_seq_full=0,
waters_seq_unsync_full=0,
polycopter_seq_dataflow_full=0,
polycopter_seq_dataflow_int=0,
release_seq_int=0,
release_seq_full=0
)
MIN_Y <- list(
waters_seq_bytes=5250,
waters_seq_int=5700,
waters_seq_full=5250,
waters_seq_unsync_full=0,
polycopter_seq_dataflow_full=0,
polycopter_seq_dataflow_int=0,
release_seq_int=16500,
release_seq_full=16500
)
LEG_POS <- list(
waters_seq_bytes="bottomright",
waters_seq_int="bottomright",
waters_seq_full="bottomright",
waters_seq_unsync_full="bottomright",
polycopter_seq_dataflow_full="bottomright",
polycopter_seq_dataflow_int="bottomright",
release_seq_int="bottomright",
release_seq_full="bottomright"
)
# Read the first command line argument as an sqlite file
if (length(args) > 0) {
sqlite_file <- args[1]
con <- dbConnect(RSQLite::SQLite(), sqlite_file)
} else {
print("No sqlite file provided, assume defaults")
args = c("bench.sqlite", "remote")
sqlite_file <- args[1]
con <- dbConnect(RSQLite::SQLite(), sqlite_file)
}
combos <- dbGetQuery(con, "SELECT * FROM combos")
casenames <- dbGetQuery(con, "SELECT casename FROM combos GROUP BY casename")
toolnames <- dbGetQuery(con, "SELECT toolname FROM combos GROUP BY toolname")
ml2lines <- function(ml, casename) {
lines = NULL
last = 0
for (i in seq_len(dim(ml)[1])) {
lines = rbind(lines, cbind(X=last, Y=ml[i,1]))
lines = rbind(lines, cbind(X=ml[i,2], Y=ml[i,1]))
last = ml[i,2]
}
return(lines)
}
BREW=RdYlGn(4)
# BREW=Spectral(4)
draw_plot <- function(data, casename) {
# evo, cov, random, fret
MY_COLORS <- c(BREW[[4]], BREW[[3]], BREW[[2]], BREW[[1]], "cyan", "pink", "gray", "orange", "black", "yellow","brown")
# MY_COLORS <- c("orange", "blue", "red", "green", "orange", "cyan", "pink", "gray", "orange", "black", "yellow","brown")
# MY_COLORS <- c("green", "blue", "red", "magenta", "orange", "cyan", "pink", "gray", "orange", "black", "yellow","brown")
LEGEND_POS=LEG_POS[[casename]]
if (is.null(LEGEND_POS)) {
LEGEND_POS = "bottomright"
}
ISNS_PER_US = (10**3)/(2**5)
# Convert timestamp from microseconds to hours
for (n in seq_len(length(data))) {
data[[n]]$timestamp <- data[[n]]$timestamp / 3600000
data[[n]]$min <- data[[n]]$min / ISNS_PER_US
data[[n]]$max <- data[[n]]$max / ISNS_PER_US
data[[n]]$median <- data[[n]]$median / ISNS_PER_US
data[[n]]$mean <- data[[n]]$mean / ISNS_PER_US
data[[n]]$sdiv <- data[[n]]$sdiv / ISNS_PER_US
}
data <- data[c('stgwoet', 'feedgeneration100', 'frafl', 'random')] # manual re-order
data <- data[!sapply(data, is.null)] # remove NULL entries
wcrt = KNOWN_WCRT[[casename]]
if (!is.null(wcrt)) {
wcrt = wcrt / ISNS_PER_US
} else {
wcrt = 0
}
static_wcrt = STATIC_WCRT[[casename]]
if (!is.null(static_wcrt)) {
static_wcrt = static_wcrt / ISNS_PER_US
} else {
static_wcrt = 0
}
# draw limits
max_x <- max(sapply(data, function(tbl) max(tbl$timestamp, na.rm = TRUE)))
max_x <- min(max_x, 24) # quick fix, cap to 16h
max_y <- max(wcrt,max(sapply(data, function(tbl) max(tbl$max, na.rm = TRUE))))
min_y <- min(sapply(data, function(tbl) min(tbl$min, na.rm = TRUE)))
min_y <- max(min_y, MIN_Y[[casename]])
# draw static wcrt
max_y <- max(max_y, static_wcrt)
# plot setup
par(mar=c(4,4,1,1))
par(oma=c(0,0,0,0))
plot(c(0,max_x),c(min_y,max_y), col='white', xlab="Time [h]", ylab="WORT [µs]", pch='.')
# plot data
for (n in seq_len(length(data))) {
d <- data[[n]]
malines = ml2lines(d[c('max','timestamp')])
lines(malines, col=MY_COLORS[[n]], lty='solid', lwd=2) # Increase line width
medlines = ml2lines(d[c('median','timestamp')])
lines(medlines, col=MY_COLORS[[n]], lty='dashed', lwd=2) # Increase line width
# milines = ml2lines(d[c('min','timestamp')])
# lines(milines, col=MY_COLORS[[n]], lty='dashed', lwd=2) # Increase line width
}
legend_names <- sapply(names(data), function(n) TOOL_TRANSLATION[[n]])
legend_colors <- c(MY_COLORS[1:length(data)],"grey","grey")
legend_styles <- c(rep("solid",length(data)),"dotted","dashed")
if (wcrt > 0) {
# abline(h=wcrt, col='grey', lty='dotted', lwd=3)
abline(h=max(wcrt,max(sapply(data, function(tbl) max(tbl$max, na.rm = TRUE)))), col='grey', lty='dotted', lwd=3) # If the manual WCRT was slightly too low
legend_names <- c(legend_names, "WCRT")
}
if (static_wcrt > 0) {
abline(h=static_wcrt, col='grey', lty='dashed', lwd=3)
legend_names <- c(legend_names, "static bound")
}
# legend(LEGEND_POS, legend=legend_names,#"bottomright",
# col=legend_colors,
# lty=legend_styles,
# lwd=2)
par(las = 2, mar = c(10, 5, 1, 1))
}
print(casenames[['casename']])
for (cn in casenames[['casename']]) {
tables <- dbGetQuery(con, sprintf("SELECT * FROM combos WHERE casename == '%s'", cn[[1]]))
table_list <- list()
for (row in 1:nrow(tables)) {
table_name <- tables[row, 'fullname']
tool_name <- tables[row, 'toolname']
table_data <- dbGetQuery(con, sprintf("SELECT * FROM '%s'", table_name))
table_list[[tool_name]] <- table_data
}
h_ = 300
w_ = h_*4/3
# png
## normal
png(file=sprintf("%s/sql_%s.png", args[2],cn[[1]]), width=w_, height=h_)
draw_plot(table_list, cn[[1]])
dev.off()
# ## wide
# png(file=sprintf("%s/sql_%s_wide.png", args[2],cn[[1]]), width=2*w_, height=h_)
# draw_plot(table_list, cn[[1]])
# dev.off()
# # tikz
# ## normal
# tikz(file=sprintf("%s/sql_%s.tex", args[2],cn[[1]]), width=0.6*w_/72, height=0.6*h_/72)
# draw_plot(table_list, cn[[1]])
# dev.off()
# ## wide
# tikz(file=sprintf("%s/sql_%s_wide.tex", args[2],cn[[1]]), width=(w_*2)/72, height=h_/72)
# draw_plot(table_list, cn[[1]])
# dev.off()
# # pdf
# ## normal
# pdf(file=sprintf("%s/sql_%s.pdf", args[2],cn[[1]]), width=w_/72, height=h_/72)
# draw_plot(table_list, cn[[1]])
# dev.off()
# ## wide
# pdf(file=sprintf("%s/sql_%s_wide.pdf", args[2],cn[[1]]), width=2*w_/72, height=h_/72)
# draw_plot(table_list, cn[[1]])
# dev.off()
}
dbDisconnect(con)

View File

@ -0,0 +1,23 @@
#!/usr/bin/env Rscript
# Load necessary libraries
library(ggplot2)
# Define the function to load CSV and plot
plot_stgsize <- function(file_path) {
print(file_path)
# Read the CSV file without headers
data <- read.csv(file_path, header = FALSE)
data['V5'] <- data['V5']/(3600*1000)
# Plot the line chart
p <- ggplot(data, aes(x = V5, y = V2)) +
geom_line() +
labs(x = "runtime [h]", y = "# of nodes") + #, title = "Number of nodes over time.") +
theme_minimal()
output_file <- sub("\\.stgsize$", paste0("_nodes.png"), file_path)
ggsave(basename(output_file), plot = p + theme_bw(base_size = 10), width = 3.5, height = 2, dpi = 300, units = "in", device = "png")
}
args <- commandArgs(trailingOnly = TRUE)
plot_stgsize(args[1])

View File

@ -0,0 +1,33 @@
#!/usr/bin/env Rscript
library(ggplot2)
# Function to plot multiple files
plot_multiple_files <- function(file_paths) {
all_data <- data.frame()
for (file_path in file_paths) {
# Read the CSV file without headers
data <- read.csv(file_path, header = FALSE)
data['V5'] <- data['V5']/(3600*1000)
# Extract the name for the line
application <- sub("_.*", "", basename(file_path))
data$application <- application
# Combine data
all_data <- rbind(all_data, data)
}
# Plot the line chart
p <- ggplot(all_data, aes(x = V5, y = V2, color = application)) +
geom_line() +
labs(x = "runtime [h]", y = "# of nodes") +
theme_minimal()
# Save the plot
ggsave("stg_node_sizes.png", plot = p + theme_bw(base_size = 10), width = 4, height = 1.5, dpi = 300, units = "in", device = "png")
}
# Example usage
file_paths <- commandArgs(trailingOnly = TRUE)
plot_multiple_files(file_paths)

52
fuzzers/FRET/benchmark/sem.sh Executable file
View File

@ -0,0 +1,52 @@
#!/usr/bin/env bash
# A generic counting semaphore in bash
# Parameter is the lockfile and operation
# Setup:
# rm /tmp/test
# echo $num > /tmp/test
set -e
if [[ $2 = "reset" ]]; then
if [[ ! "$3" -gt "0" ]]; then echo "Parameter 3: Needs to be a number"; exit;fi
rm -f $1
[[ -d "$1_lockdir" ]] && rmdir $1_lockdir
echo $3 > $1
exit 0
fi
if [[ ! -f $1 ]]; then echo "Parameter 1: File Does not exist"; exit; fi
if [[ $2 != "lock" ]] && [[ $2 != "release" ]] && [[ $2 != "reset" ]]; then echo "Parameter 2: must be lock, release or reset"; exit; fi
if [[ $2 = "lock" ]]; then
SEM=''
while [[ -z $SEM ]]; do
if (( $(cat $1 ) == 0 )); then sleep 1; wait; continue; fi
if mkdir $1_lockdir > /dev/null 2>&1 ; then
VAL=$(cat $1)
if (( $VAL > 0 ))
then
SEM=$(sed -i "s@$VAL@$(( $VAL - 1))@w /dev/stdout" $1)
echo "Take $VAL -> $SEM"
else
sleep 1; wait
fi
else
sleep 0.5;
fi
done
rmdir $1_lockdir
else
SEM=''
while [[ -z $SEM ]]; do
if mkdir $1_lockdir > /dev/null 2>&1 ; then
VAL=$(cat $1)
SEM=$(sed -i "s@$VAL@$(( $VAL + 1))@w /dev/stdout" $1)
echo "Give $VAL -> $(( $VAL + 1 ))"
else
sleep 0.1;
fi
done
rmdir $1_lockdir
fi
#SEM=''; while [[ -z SEM ]]; do VAL=$(cat /tmp/test); if (( $VAL > 0 )); then SEM=$(sed -i "s@$VAL@$(( $VAL - 1))@w /dev/stdout" /tmp/test); else sleep 1; wait; fi; done

View File

@ -0,0 +1,60 @@
kernel,main_function,input_symbol,input_size,return_function,select_task,interrupts
waters_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C13,0#1000
waters_seq_int,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C13,0#1000
waters_seq_bytes,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C13,
waters_par_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C13,0#1000
waters_par_int,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C13,0#1000
waters_par_bytes,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C13,
waters_seq_stateful_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C13,0#1000
waters_seq_stateful_int,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C13,0#1000
waters_seq_stateful_bytes,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C13,
waters_par_stateful_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C13,0#1000
waters_par_stateful_int,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C13,0#1000
waters_par_stateful_bytes,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C13,
release_seq_full,main_release,FUZZ_INPUT,4096,trigger_Qemu_break,T3,0#30000;1#5000
release_seq_full,main_release,FUZZ_INPUT,4096,trigger_Qemu_break,T3,0#30000;1#5000
release_seq_int,main_release,FUZZ_INPUT,4096,trigger_Qemu_break,T3,0#30000;1#5000
release_seq_bytes,main_release,FUZZ_INPUT,4096,trigger_Qemu_break,T3,
release_par_full,main_release,FUZZ_INPUT,4096,trigger_Qemu_break,T3,0#30000;1#5000
release_par_int,main_release,FUZZ_INPUT,4096,trigger_Qemu_break,T3,0#30000;1#5000
release_par_bytes,main_release,FUZZ_INPUT,4096,trigger_Qemu_break,T3,
release_seq_stateful_full,main_release,FUZZ_INPUT,4096,trigger_Qemu_break,T3,0#30000;1#5000
release_seq_stateful_int,main_release,FUZZ_INPUT,4096,trigger_Qemu_break,T3,0#30000;1#5000
release_seq_stateful_bytes,main_release,FUZZ_INPUT,4096,trigger_Qemu_break,T3,
release_par_stateful_full,main_release,FUZZ_INPUT,4096,trigger_Qemu_break,T3,0#30000;1#5000
release_par_stateful_int,main_release,FUZZ_INPUT,4096,trigger_Qemu_break,T3,0#30000;1#5000
release_par_stateful_bytes,main_release,FUZZ_INPUT,4096,trigger_Qemu_break,T3,
copter_seq_full,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,0#20000
copter_seq_full,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,0#20000
copter_seq_int,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,0#20000
copter_seq_bytes,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,
copter_par_full,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,0#20000
copter_par_int,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,0#20000
copter_par_bytes,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,
copter_seq_stateful_full,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,0#20000
copter_seq_stateful_int,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,0#20000
copter_seq_stateful_bytes,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,
copter_par_stateful_full,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,0#20000
copter_par_stateful_int,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,0#20000
copter_par_stateful_bytes,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,
copter_seq_dataflow_full,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,0#20000
copter_par_dataflow_full,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,0#20000
polycopter_par_full,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,0#20000
polycopter_seq_full,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,0#20000
polycopter_par_dataflow_full,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,0#20000
polycopter_seq_dataflow_full,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,0#20000
polycopter_seq_dataflow_bytes,main_osek,FUZZ_INPUT,4096,trigger_Qemu_break,FC,
watersc14_par_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C14,0#1000
watersc14_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C14,0#1000
waters_seq_unsync_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C13,0#1000
watersgen1_par_bytes,main_waters,FUZZ_INPUT,40960,trigger_Qemu_break,T_24,0#10000;1#10000;2#10000;3#10000
watersIc11_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C11,0#1000
watersIc12_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C12,0#1000
watersIc13_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C13,0#1000
watersIc14_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C14,0#1000
watersIc21_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C21,0#1000
watersc22_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C22,0#1000
watersIc23_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C23,0#1000
watersIc31_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C31,0#1000
watersIc32_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C32,0#1000
watersIc33_seq_full,main_waters,FUZZ_INPUT,4096,trigger_Qemu_break,C33,0#1000
1 kernel main_function input_symbol input_size return_function select_task interrupts
2 waters_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C13 0#1000
3 waters_seq_int main_waters FUZZ_INPUT 4096 trigger_Qemu_break C13 0#1000
4 waters_seq_bytes main_waters FUZZ_INPUT 4096 trigger_Qemu_break C13
5 waters_par_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C13 0#1000
6 waters_par_int main_waters FUZZ_INPUT 4096 trigger_Qemu_break C13 0#1000
7 waters_par_bytes main_waters FUZZ_INPUT 4096 trigger_Qemu_break C13
8 waters_seq_stateful_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C13 0#1000
9 waters_seq_stateful_int main_waters FUZZ_INPUT 4096 trigger_Qemu_break C13 0#1000
10 waters_seq_stateful_bytes main_waters FUZZ_INPUT 4096 trigger_Qemu_break C13
11 waters_par_stateful_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C13 0#1000
12 waters_par_stateful_int main_waters FUZZ_INPUT 4096 trigger_Qemu_break C13 0#1000
13 waters_par_stateful_bytes main_waters FUZZ_INPUT 4096 trigger_Qemu_break C13
14 release_seq_full main_release FUZZ_INPUT 4096 trigger_Qemu_break T3 0#30000;1#5000
15 release_seq_full main_release FUZZ_INPUT 4096 trigger_Qemu_break T3 0#30000;1#5000
16 release_seq_int main_release FUZZ_INPUT 4096 trigger_Qemu_break T3 0#30000;1#5000
17 release_seq_bytes main_release FUZZ_INPUT 4096 trigger_Qemu_break T3
18 release_par_full main_release FUZZ_INPUT 4096 trigger_Qemu_break T3 0#30000;1#5000
19 release_par_int main_release FUZZ_INPUT 4096 trigger_Qemu_break T3 0#30000;1#5000
20 release_par_bytes main_release FUZZ_INPUT 4096 trigger_Qemu_break T3
21 release_seq_stateful_full main_release FUZZ_INPUT 4096 trigger_Qemu_break T3 0#30000;1#5000
22 release_seq_stateful_int main_release FUZZ_INPUT 4096 trigger_Qemu_break T3 0#30000;1#5000
23 release_seq_stateful_bytes main_release FUZZ_INPUT 4096 trigger_Qemu_break T3
24 release_par_stateful_full main_release FUZZ_INPUT 4096 trigger_Qemu_break T3 0#30000;1#5000
25 release_par_stateful_int main_release FUZZ_INPUT 4096 trigger_Qemu_break T3 0#30000;1#5000
26 release_par_stateful_bytes main_release FUZZ_INPUT 4096 trigger_Qemu_break T3
27 copter_seq_full main_osek FUZZ_INPUT 4096 trigger_Qemu_break FC 0#20000
28 copter_seq_full main_osek FUZZ_INPUT 4096 trigger_Qemu_break FC 0#20000
29 copter_seq_int main_osek FUZZ_INPUT 4096 trigger_Qemu_break FC 0#20000
30 copter_seq_bytes main_osek FUZZ_INPUT 4096 trigger_Qemu_break FC
31 copter_par_full main_osek FUZZ_INPUT 4096 trigger_Qemu_break FC 0#20000
32 copter_par_int main_osek FUZZ_INPUT 4096 trigger_Qemu_break FC 0#20000
33 copter_par_bytes main_osek FUZZ_INPUT 4096 trigger_Qemu_break FC
34 copter_seq_stateful_full main_osek FUZZ_INPUT 4096 trigger_Qemu_break FC 0#20000
35 copter_seq_stateful_int main_osek FUZZ_INPUT 4096 trigger_Qemu_break FC 0#20000
36 copter_seq_stateful_bytes main_osek FUZZ_INPUT 4096 trigger_Qemu_break FC
37 copter_par_stateful_full main_osek FUZZ_INPUT 4096 trigger_Qemu_break FC 0#20000
38 copter_par_stateful_int main_osek FUZZ_INPUT 4096 trigger_Qemu_break FC 0#20000
39 copter_par_stateful_bytes main_osek FUZZ_INPUT 4096 trigger_Qemu_break FC
40 copter_seq_dataflow_full main_osek FUZZ_INPUT 4096 trigger_Qemu_break FC 0#20000
41 copter_par_dataflow_full main_osek FUZZ_INPUT 4096 trigger_Qemu_break FC 0#20000
42 polycopter_par_full main_osek FUZZ_INPUT 4096 trigger_Qemu_break FC 0#20000
43 polycopter_seq_full main_osek FUZZ_INPUT 4096 trigger_Qemu_break FC 0#20000
44 polycopter_par_dataflow_full main_osek FUZZ_INPUT 4096 trigger_Qemu_break FC 0#20000
45 polycopter_seq_dataflow_full main_osek FUZZ_INPUT 4096 trigger_Qemu_break FC 0#20000
46 polycopter_seq_dataflow_bytes main_osek FUZZ_INPUT 4096 trigger_Qemu_break FC
47 watersc14_par_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C14 0#1000
48 watersc14_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C14 0#1000
49 waters_seq_unsync_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C13 0#1000
50 watersgen1_par_bytes main_waters FUZZ_INPUT 40960 trigger_Qemu_break T_24 0#10000;1#10000;2#10000;3#10000
51 watersIc11_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C11 0#1000
52 watersIc12_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C12 0#1000
53 watersIc13_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C13 0#1000
54 watersIc14_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C14 0#1000
55 watersIc21_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C21 0#1000
56 watersc22_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C22 0#1000
57 watersIc23_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C23 0#1000
58 watersIc31_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C31 0#1000
59 watersIc32_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C32 0#1000
60 watersIc33_seq_full main_waters FUZZ_INPUT 4096 trigger_Qemu_break C33 0#1000

2
fuzzers/FRET/example/build.sh Executable file
View File

@ -0,0 +1,2 @@
#!/bin/sh
arm-none-eabi-gcc -ggdb -ffreestanding -nostartfiles -lgcc -T mps2_m3.ld -mcpu=cortex-m3 main.c startup.c -o example.elf

View File

@ -0,0 +1,38 @@
int BREAKPOINT() {
for (;;)
{
}
}
int LLVMFuzzerTestOneInput(unsigned int* Data, unsigned int Size) {
//if (Data[3] == 0) {while(1){}} // cause a timeout
for (int i=0; i<Size; i++) {
// if (Data[i] > 0xFFd0 && Data[i] < 0xFFFF) {return 1;} // cause qemu to crash
for (int j=i+1; j<Size; j++) {
if (Data[j] == 0) {continue;}
if (Data[j]>Data[i]) {
int tmp = Data[i];
Data[i]=Data[j];
Data[j]=tmp;
if (Data[i] <= 100) {j--;}
}
}
}
return BREAKPOINT();
}
unsigned int FUZZ_INPUT[] = {
101,201,700,230,860,
234,980,200,340,678,
230,134,900,236,900,
123,800,123,658,607,
246,804,567,568,207,
407,246,678,457,892,
834,456,878,246,699,
854,234,844,290,125,
324,560,852,928,910,
790,853,345,234,586,
};
int main() {
LLVMFuzzerTestOneInput(FUZZ_INPUT, 50);
}

View File

@ -0,0 +1,143 @@
/*
* FreeRTOS V202112.00
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* https://www.FreeRTOS.org
* https://github.com/FreeRTOS
*
*/
MEMORY
{
RAM (xrw) : ORIGIN = 0x00000000, LENGTH = 4M
/* Originally */
/* FLASH (xr) : ORIGIN = 0x00000000, LENGTH = 4M */
/* RAM (xrw) : ORIGIN = 0x20000000, LENGTH = 4M */
}
ENTRY(Reset_Handler)
_Min_Heap_Size = 0x300000 ; /* Required amount of heap. */
_Min_Stack_Size = 0x4000 ; /* Required amount of stack. */
M_VECTOR_RAM_SIZE = (16 + 48) * 4;
_estack = ORIGIN(RAM) + LENGTH(RAM);
SECTIONS
{
.isr_vector :
{
__vector_table = .;
KEEP(*(.isr_vector))
. = ALIGN(4);
} > RAM /* FLASH */
.text :
{
. = ALIGN(4);
*(.text*)
KEEP (*(.init))
KEEP (*(.fini))
KEEP(*(.eh_frame))
*(.rodata*)
. = ALIGN(4);
_etext = .;
} > RAM /* FLASH */
.ARM.extab :
{
. = ALIGN(4);
*(.ARM.extab* .gnu.linkonce.armextab.*)
. = ALIGN(4);
} >RAM /* FLASH */
.ARM :
{
. = ALIGN(4);
__exidx_start = .;
*(.ARM.exidx* .gnu.linkonce.armexidx.*)
__exidx_end = .;
. = ALIGN(4);
} >RAM /* FLASH */
.interrupts_ram :
{
. = ALIGN(4);
__VECTOR_RAM__ = .;
__interrupts_ram_start__ = .;
. += M_VECTOR_RAM_SIZE;
. = ALIGN(4);
__interrupts_ram_end = .;
} > RAM
_sidata = LOADADDR(.data);
.data : /* AT ( _sidata ) */
{
. = ALIGN(4);
_sdata = .;
*(.data*)
. = ALIGN(4);
_edata = .;
} > RAM /* RAM AT > FLASH */
.uninitialized (NOLOAD):
{
. = ALIGN(32);
__uninitialized_start = .;
*(.uninitialized)
KEEP(*(.keep.uninitialized))
. = ALIGN(32);
__uninitialized_end = .;
} > RAM
.bss :
{
. = ALIGN(4);
_sbss = .;
__bss_start__ = _sbss;
*(.bss*)
*(COMMON)
. = ALIGN(4);
_ebss = .;
__bss_end__ = _ebss;
} >RAM
.heap :
{
. = ALIGN(8);
PROVIDE ( end = . );
PROVIDE ( _end = . );
_heap_bottom = .;
. = . + _Min_Heap_Size;
_heap_top = .;
. = . + _Min_Stack_Size;
. = ALIGN(8);
} >RAM
/* Set stack top to end of RAM, and stack limit move down by
* size of stack_dummy section */
__StackTop = ORIGIN(RAM) + LENGTH(RAM);
__StackLimit = __StackTop - _Min_Stack_Size;
PROVIDE(__stack = __StackTop);
/* Check if data + heap + stack exceeds RAM limit */
ASSERT(__StackLimit >= _heap_top, "region RAM overflowed with stack")
}

View File

@ -0,0 +1,114 @@
/*
* FreeRTOS V202112.00
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* https://www.FreeRTOS.org
* https://github.com/FreeRTOS
*
*/
typedef unsigned int uint32_t;
extern int main();
extern uint32_t _estack, _sidata, _sdata, _edata, _sbss, _ebss;
/* Prevent optimization so gcc does not replace code with memcpy */
__attribute__( ( optimize( "O0" ) ) )
__attribute__( ( naked ) )
void Reset_Handler( void )
{
/* set stack pointer */
__asm volatile ( "ldr r0, =_estack" );
__asm volatile ( "mov sp, r0" );
/* copy .data section from flash to RAM */
// Not needed for this example, see linker script
// for( uint32_t * src = &_sidata, * dest = &_sdata; dest < &_edata; )
// {
// *dest++ = *src++;
// }
/* zero out .bss section */
for( uint32_t * dest = &_sbss; dest < &_ebss; )
{
*dest++ = 0;
}
/* jump to board initialisation */
void _start( void );
_start();
}
const uint32_t * isr_vector[] __attribute__( ( section( ".isr_vector" ) ) ) =
{
( uint32_t * ) &_estack,
( uint32_t * ) &Reset_Handler, /* Reset -15 */
0, /* NMI_Handler -14 */
0, /* HardFault_Handler -13 */
0, /* MemManage_Handler -12 */
0, /* BusFault_Handler -11 */
0, /* UsageFault_Handler -10 */
0, /* reserved */
0, /* reserved */
0, /* reserved */
0, /* reserved -6 */
0, /* SVC_Handler -5 */
0, /* DebugMon_Handler -4 */
0, /* reserved */
0, /* PendSV handler -2 */
0, /* SysTick_Handler -1 */
0, /* uart0 receive 0 */
0, /* uart0 transmit */
0, /* uart1 receive */
0, /* uart1 transmit */
0, /* uart 2 receive */
0, /* uart 2 transmit */
0, /* GPIO 0 combined interrupt */
0, /* GPIO 2 combined interrupt */
0, /* Timer 0 */
0, /* Timer 1 */
0, /* Dial Timer */
0, /* SPI0 SPI1 */
0, /* uart overflow 1, 2,3 */
0, /* Ethernet 13 */
};
__attribute__( ( naked ) ) void exit(__attribute__((unused)) int status )
{
/* Force qemu to exit using ARM Semihosting */
__asm volatile (
"mov r1, r0\n"
"cmp r1, #0\n"
"bne .notclean\n"
"ldr r1, =0x20026\n" /* ADP_Stopped_ApplicationExit, a clean exit */
".notclean:\n"
"movs r0, #0x18\n" /* SYS_EXIT */
"bkpt 0xab\n"
"end: b end\n"
);
}
void _start( void )
{
main( );
exit( 0 );
}

121
fuzzers/FRET/src/cli.rs Normal file
View File

@ -0,0 +1,121 @@
use clap::{Parser, Subcommand};
use std::path::PathBuf;
// Argument parsing ================================================================================
#[derive(Parser,Debug)]
#[command(author, version, about, long_about = None)]
pub struct Cli {
/// Kernel Image
#[arg(short, long, value_name = "FILE")]
pub kernel: PathBuf,
/// Sets a custom config file
#[arg(short, long, value_name = "FILE")]
pub config: PathBuf,
/// Sets the prefix of dumed files
#[arg(short='n', long, value_name = "FILENAME")]
pub dump_name: Option<PathBuf>,
/// do time dumps
#[arg(short='t', long)]
pub dump_times: bool,
/// do worst-case dumps
#[arg(short='a', long)]
pub dump_cases: bool,
/// do trace dumps (if supported)
#[arg(short='r', long)]
pub dump_traces: bool,
/// do graph dumps (if supported)
#[arg(short='g', long)]
pub dump_graph: bool,
/// select a task for measurments
#[arg(short='s', long)]
pub select_task: Option<String>,
#[command(subcommand)]
pub command: Commands,
}
#[derive(Subcommand,Clone,Debug)]
pub enum Commands {
/// run a single input
Showmap {
/// take this input
#[arg(short, long)]
input: PathBuf,
},
/// start fuzzing campaign
Fuzz {
/// disable heuristic
#[arg(short, long)]
random: bool,
/// seed for randomness
#[arg(short, long)]
seed: Option<u64>,
/// runtime in seconds
#[arg(short, long)]
time: Option<u64>,
}
}
pub fn set_env_from_config(kernel : &PathBuf, path : &PathBuf) {
let is_csv = path.as_path().extension().map_or(false, |x| x=="csv");
if !is_csv {
let lines = std::fs::read_to_string(path).expect("Config file not found");
let lines = lines.lines().filter(
|x| x.len()>0
);
for l in lines {
let pair = l.split_once('=').expect("Non VAR=VAL line in config");
std::env::set_var(pair.0, pair.1);
}
} else {
let mut reader = csv::Reader::from_path(path).expect("CSV read from config failed");
let p = kernel.as_path();
let stem = p.file_stem().expect("Kernel filename error").to_str().unwrap();
let mut found = false;
for r in reader.records() {
let rec = r.expect("CSV entry error");
if stem == &rec[0] {
println!("Config from file {:?}", rec);
found = true;
std::env::set_var("FUZZ_MAIN", &rec[1]);
std::env::set_var("FUZZ_INPUT", &rec[2]);
std::env::set_var("FUZZ_INPUT_LEN", &rec[3]);
std::env::set_var("BREAKPOINT", &rec[4]);
break;
}
}
if !found {
eprintln!("No config found for kernel {:?}", stem);
}
}
}
pub fn get_interrupt_config(kernel : &PathBuf, path : &PathBuf) -> Vec<(usize,u32)>{
let is_csv = path.as_path().extension().map_or(false, |x| x=="csv");
if !is_csv {
panic!("Interrupt config must be inside a CSV file");
} else {
let mut reader = csv::Reader::from_path(path).expect("CSV read from config failed");
let p = kernel.as_path();
let stem = p.file_stem().expect("Kernel filename error").to_str().unwrap();
for r in reader.records() {
let rec = r.expect("CSV entry error");
if stem == &rec[0] {
let ret = rec[6].split(';').filter(|x| x != &"").map(|x| {
let pair = x.split_once('#').expect("Interrupt config error");
(pair.0.parse().expect("Interrupt config error"), pair.1.parse().expect("Interrupt config error"))
}).collect();
println!("Interrupt config {:?}", ret);
return ret;
}
}
}
return Vec::new();
}

View File

@ -0,0 +1,95 @@
use hashbrown::HashMap;
use libafl_qemu::{elf::EasyElf, GuestAddr};
use std::env;
use crate::systemstate::helpers::{load_symbol, try_load_symbol};
pub fn get_target_symbols(elf: &EasyElf) -> HashMap<&'static str, GuestAddr> {
let mut addrs = HashMap::new();
addrs.insert(
"__APP_CODE_START__",
load_symbol(&elf, "__APP_CODE_START__", false),
);
addrs.insert(
"__APP_CODE_END__",
load_symbol(&elf, "__APP_CODE_END__", false),
);
addrs.insert(
"__API_CODE_START__",
load_symbol(&elf, "__API_CODE_START__", false),
);
addrs.insert(
"__API_CODE_END__",
load_symbol(&elf, "__API_CODE_END__", false),
);
addrs.insert(
"trigger_job_done",
load_symbol(&elf, "trigger_job_done", false),
);
crate::systemstate::target_os::freertos::config::add_target_symbols(elf, &mut addrs);
// the main address where the fuzzer starts
// if this is set for freeRTOS it has an influence on where the data will have to be written,
// since the startup routine copies the data segemnt to it's virtual address
let main_addr = elf.resolve_symbol(
&env::var("FUZZ_MAIN").unwrap_or_else(|_| "FUZZ_MAIN".to_owned()),
0,
);
if let Some(main_addr) = main_addr {
addrs.insert("FUZZ_MAIN", main_addr);
}
let input_addr = load_symbol(
&elf,
&env::var("FUZZ_INPUT").unwrap_or_else(|_| "FUZZ_INPUT".to_owned()),
true,
);
addrs.insert("FUZZ_INPUT", input_addr);
let input_length_ptr = try_load_symbol(
&elf,
&env::var("FUZZ_LENGTH").unwrap_or_else(|_| "FUZZ_LENGTH".to_owned()),
true,
);
if let Some(input_length_ptr) = input_length_ptr {
addrs.insert("FUZZ_LENGTH", input_length_ptr);
}
let input_counter_ptr = try_load_symbol(
&elf,
&env::var("FUZZ_POINTER").unwrap_or_else(|_| "FUZZ_POINTER".to_owned()),
true,
);
if let Some(input_counter_ptr) = input_counter_ptr {
addrs.insert("FUZZ_POINTER", input_counter_ptr);
}
addrs.insert(
"BREAKPOINT",
elf.resolve_symbol(
&env::var("BREAKPOINT").unwrap_or_else(|_| "BREAKPOINT".to_owned()),
0,
)
.expect("Symbol or env BREAKPOINT not found"),
);
addrs
}
pub fn get_target_ranges(
_elf: &EasyElf,
symbols: &HashMap<&'static str, GuestAddr>,
) -> HashMap<&'static str, std::ops::Range<GuestAddr>> {
let mut ranges = HashMap::new();
ranges.insert(
"APP_CODE",
symbols["__APP_CODE_START__"]..symbols["__APP_CODE_END__"],
);
ranges.insert(
"API_CODE",
symbols["__API_CODE_START__"]..symbols["__API_CODE_END__"],
);
ranges
}

167
fuzzers/FRET/src/debug.rs Normal file
View File

@ -0,0 +1,167 @@
use libafl::*;
use libafl_bolts::*;
use std::borrow::Cow;
use serde::*;
use serde::ser::Serialize;
use libafl::prelude::Feedback;
use libafl::prelude::Testcase;
use libafl::prelude::*;
use std::marker::PhantomData;
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Eq)]
pub struct DebugMetadata {
pub val: bool
}
libafl_bolts::impl_serdeany!(DebugMetadata);
//==================================================================================================
/// The [`DebugFeedback`] reports the same value, always.
/// It can be used to enable or disable feedback results through composition.
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Eq)]
pub enum DebugFeedback {
/// Always returns `true`
True,
/// Alsways returns `false`
False,
}
static mut counter : usize = 10;
impl<EM, I, OT, S> Feedback<EM, I, OT, S> for DebugFeedback
where
S: State,
{
#[inline]
#[allow(clippy::wrong_self_convention)]
fn is_interesting(
&mut self,
_state: &mut S,
_manager: &mut EM,
_input: &I,
_observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<State = S>,
OT: ObserversTuple<I, S>,
{
if unsafe { counter } > 0 {
unsafe { counter -= 1; }
return Ok(true);
} else {
return Ok(false);
}
Ok((*self).into())
}
#[cfg(feature = "track_hit_feedbacks")]
fn last_result(&self) -> Result<bool, Error> {
Ok((*self).into())
}
fn append_metadata(
&mut self,
state: &mut S,
_manager: &mut EM,
_observers: &OT,
testcase: &mut Testcase<<S>::Input>,
) -> Result<(), Error>
where
OT: ObserversTuple<I, S>,
EM: EventFirer<State = S>,
{
testcase.metadata_map_mut().insert(DebugMetadata { val: true });
eprintln!("Attach: {:?}",testcase.metadata::<DebugMetadata>());
Ok(())
}
}
impl Named for DebugFeedback {
#[inline]
fn name(&self) -> &Cow<'static, str> {
static NAME: Cow<'static, str> = Cow::Borrowed("DebugFeedback");
&NAME
}
}
impl DebugFeedback {
/// Creates a new [`DebugFeedback`] from the given boolean
#[must_use]
pub fn new(val: bool) -> Self {
Self::from(val)
}
}
impl From<bool> for DebugFeedback {
fn from(val: bool) -> Self {
if val {
Self::True
} else {
Self::False
}
}
}
impl From<DebugFeedback> for bool {
fn from(value: DebugFeedback) -> Self {
match value {
DebugFeedback::True => true,
DebugFeedback::False => false,
}
}
}
//==================================================================================================
/// The default mutational stage
#[derive(Clone, Debug, Default)]
pub struct DebugStage<E, OT> {
#[allow(clippy::type_complexity)]
phantom: PhantomData<(E, OT)>,
}
impl<E, OT> UsesState for DebugStage<E, OT>
where
E: UsesState,
{
type State = E::State;
}
impl<E, OT> DebugStage<E, OT>
{
pub fn new() -> Self {
Self { phantom: PhantomData}
}
}
impl<E, EM, OT, Z> Stage<E, EM, Z> for DebugStage<E, OT>
where
E: Executor<EM, Z> + HasObservers<Observers = OT>,
EM: EventFirer<State = Self::State>,
OT: ObserversTuple<Self::State>,
Self::State: HasCorpus + HasMetadata + HasNamedMetadata + HasExecutions,
Z: Evaluator<E, EM, State = Self::State>,
{
fn perform(
&mut self,
fuzzer: &mut Z,
executor: &mut E,
state: &mut Self::State,
manager: &mut EM
) -> Result<(), Error> {
// eprintln!("DebugStage {:?}", state.current_testcase());
let testcase = state.current_testcase()?;
eprintln!("Stage: {:?}",testcase.metadata::<DebugMetadata>());
Ok(())
}
fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result<bool, Error> {
Ok(true)
}
fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error> {
Ok(())
}
}

682
fuzzers/FRET/src/fuzzer.rs Normal file
View File

@ -0,0 +1,682 @@
#![allow(unused_imports)]
//! A fuzzer using qemu in systemmode for binary-only coverage of kernels
//!
use core::time::Duration;
use std::{env, path::PathBuf, process::{self, abort}, io::{Read, Write}, fs::{self, OpenOptions}, cmp::{min, max}, mem::transmute_copy, ptr::addr_of_mut, ffi::OsStr};
use hashbrown::HashMap;
use libafl_bolts::{
core_affinity::Cores, ownedref::OwnedMutSlice, rands::StdRand, shmem::{ShMemProvider, StdShMemProvider}, tuples::tuple_list, AsSlice, SimpleStderrLogger
};
use libafl::{
common::{HasMetadata, HasNamedMetadata}, corpus::{Corpus, InMemoryCorpus, OnDiskCorpus}, events::{launcher::Launcher, EventConfig}, executors::ExitKind, feedback_or, feedback_or_fast, feedbacks::{CrashFeedback, MaxMapFeedback, TimeoutFeedback}, fuzzer::{Fuzzer, StdFuzzer}, inputs::{multi::MultipartInput, BytesInput, HasTargetBytes, Input, Keyed}, monitors::MultiMonitor, observers::{CanTrack, VariableMapObserver}, prelude::{havoc_mutations, minimizer::TopRatedsMetadata, CorpusId, Generator, HavocScheduledMutator, HitcountsMapObserver, RandBytesGenerator, ScheduledMutator, SimpleEventManager, SimpleMonitor, SimplePrintingMonitor, SimpleRestartingEventManager}, schedulers::QueueScheduler, stages::StdMutationalStage, state::{HasCorpus, StdState}, Error, Evaluator,
};
use libafl_qemu::{
elf::EasyElf, emu::Emulator, modules::{edges::{self}, utils::filters::AddressFilter, EdgeCoverageModule, StdEdgeCoverageModule}, GuestAddr, GuestPhysAddr, QemuExecutor, QemuExitReason, QemuHooks, Regs
};
use libafl_targets::{edges_map_mut_ptr, EDGES_MAP_DEFAULT_SIZE, MAX_EDGES_FOUND};
use rand::{SeedableRng, StdRng, Rng};
use crate::{
config::{get_target_ranges, get_target_symbols}, systemstate::{self, feedbacks::{DumpSystraceFeedback, SystraceErrorFeedback}, helpers::{get_function_range, input_bytes_to_interrupt_times, load_symbol, try_load_symbol}, mutational::{InterruptShiftStage, STGSnippetStage}, schedulers::{GenerationScheduler, LongestTraceScheduler}, stg::{stg_map_mut_slice, GraphMaximizerCorpusScheduler, STGEdge, STGNode, StgFeedback, MAX_STG_NUM}, target_os::freertos::{config::get_range_groups, qemu_module::FreeRTOSSystemStateHelper, FreeRTOSSystem}}, time::{
clock::{ClockTimeFeedback, IcHist, QemuClockIncreaseFeedback, QemuClockObserver, FUZZ_START_TIMESTAMP, QEMU_ICOUNT_SHIFT, QEMU_ISNS_PER_MSEC, QEMU_ISNS_PER_USEC}, qemustate::QemuStateRestoreHelper, worst::{AlwaysTrueFeedback, ExecTimeIncFeedback, RateLimitedMonitor, TimeMaximizerCorpusScheduler, TimeProbMassScheduler, TimeStateMaximizerCorpusScheduler}
}
};
use std::time::SystemTime;
use petgraph::dot::Dot;
use crate::systemstate::stg::STGFeedbackState;
use libafl::inputs::HasMutatorBytes;
use libafl_qemu::Qemu;
use crate::cli::Cli;
use crate::cli::Commands;
use crate::cli::set_env_from_config;
use clap::Parser;
use log;
use rand::RngCore;
use crate::templates;
use std::ops::Range;
// Constants ================================================================================
pub static mut RNG_SEED: u64 = 1;
pub const FIRST_INT : u32 = 200000;
pub const MAX_NUM_INTERRUPT: usize = 128;
pub const NUM_INTERRUPT_SOURCES: usize = 6; // Keep in sync with qemu-libafl-bridge/hw/timer/armv7m_systick.c:319 and FreeRTOS/FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC/init/startup.c:216
pub const DO_NUM_INTERRUPT: usize = 128;
pub static mut MAX_INPUT_SIZE: usize = 1024;
pub fn get_all_fn_symbol_ranges(elf: &EasyElf, range: std::ops::Range<GuestAddr>) -> HashMap<String,std::ops::Range<GuestAddr>> {
let mut ret : HashMap<String,std::ops::Range<GuestAddr>> = HashMap::new();
let gob = elf.goblin();
let mut funcs : Vec<_> = gob.syms.iter().filter(|x| x.is_function() && range.contains(&x.st_value.try_into().unwrap())).collect();
funcs.sort_unstable_by(|x,y| x.st_value.cmp(&y.st_value));
for sym in &funcs {
let sym_name = gob.strtab.get_at(sym.st_name);
if let Some(sym_name) = sym_name {
// if ISR_SYMBOLS.contains(&sym_name) {continue;}; // skip select symbols, which correspond to ISR-safe system calls
if let Some(r) = get_function_range(elf, sym_name) {
ret.insert(sym_name.to_string(), r);
}
}
}
return ret;
}
#[allow(unused)]
extern "C" {
static mut libafl_interrupt_offsets : [[u32; MAX_NUM_INTERRUPT]; NUM_INTERRUPT_SOURCES];
static mut libafl_num_interrupts : [u64; NUM_INTERRUPT_SOURCES];
}
/// Takes a state, cli and a suffix, writes out the current worst case
macro_rules! do_dump_case {
( $s:expr,$cli:expr, $c:expr) => {
if ($cli.dump_cases) {
let dump_path = $cli.dump_name.clone().unwrap().with_extension(if $c=="" {"case"} else {$c});
println!("Dumping worst case to {:?}", &dump_path);
let corpus = $s.corpus();
let mut worst = Duration::new(0,0);
let mut worst_input = None;
for i in 0..corpus.count() {
let tc = corpus.get(corpus.nth(i.into())).expect("Could not get element from corpus").borrow();
if worst < tc.exec_time().expect("Testcase missing duration") {
worst_input = Some(tc.input().as_ref().unwrap().clone());
worst = tc.exec_time().expect("Testcase missing duration");
}
}
if let Some(wi) = worst_input {
wi.to_file(dump_path);
}
}
}
}
/// Takes a state, cli and a suffix, appends icount history
macro_rules! do_dump_times {
($state:expr, $cli:expr, $c:expr) => {
if $cli.dump_times {
let dump_path = $cli.dump_name.clone().unwrap().with_extension(if $c=="" {"time"} else {$c});
let mut file = std::fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.append(true)
.open(dump_path).expect("Could not open timedump");
if let Ok(ichist) = $state.metadata_mut::<IcHist>() {
for i in ichist.0.drain(..) {
writeln!(file, "{},{}", i.0, i.1).expect("Write to dump failed");
}
}
}
};
}
/// Takes a state and a bool, writes out the current graph
macro_rules! do_dump_stg {
($state:expr, $cli:expr, $c:expr) => {
#[cfg(feature = "trace_stg")]
if $cli.dump_graph {
let dump_path = $cli.dump_name.clone().unwrap().with_extension(if $c=="" {"dot"} else {$c});
println!("Dumping graph to {:?}", &dump_path);
if let Some(md) = $state.named_metadata_map_mut().get_mut::<STGFeedbackState<FreeRTOSSystem>>("stgfeedbackstate") {
let out = md.graph.map(|_i,x| x.color_print(&md.systemstate_index), |_i,x| x.color_print());
let outs = Dot::with_config(&out, &[]).to_string();
let outs = outs.replace("\\\"","\"");
let outs = outs.replace(';',"\\n");
fs::write(dump_path,outs).expect("Failed to write graph");
}
}
};
}
/// Takes a state and a bool, writes out top rated inputs
macro_rules! do_dump_toprated {
($state:expr, $cli:expr, $c:expr) => {
if $cli.dump_cases {
{
let dump_path = $cli.dump_name.clone().unwrap().with_extension(if $c=="" {"toprated"} else {$c});
println!("Dumping toprated to {:?}", &dump_path);
if let Some(md) = $state.metadata_map_mut().get_mut::<TopRatedsMetadata>() {
let mut uniq: Vec<CorpusId> = md.map.values().map(|x| x.clone()).collect();
uniq.sort();
uniq.dedup();
fs::write(dump_path,ron::to_string(&md.map).expect("Failed to serialize metadata")).expect("Failed to write graph");
}
}
}
};
}
// Fuzzer setup ================================================================================
#[allow(unused)]
pub fn fuzz() {
log::set_max_level(log::LevelFilter::Info);
SimpleStderrLogger::set_logger().unwrap();
let cli = Cli::parse();
dbg!(&cli);
set_env_from_config(&cli.kernel, &cli.config);
let interrupt_config = crate::cli::get_interrupt_config(&cli.kernel, &cli.config);
unsafe {FUZZ_START_TIMESTAMP = SystemTime::now();}
if cli.dump_name.is_none() && (cli.dump_times || cli.dump_cases || cli.dump_traces || cli.dump_graph) {
panic!("Dump name not give but dump is requested");
}
let mut starttime = std::time::Instant::now();
// Hardcoded parameters
let timeout = Duration::from_secs(10);
let broker_port = 1337;
let cores = Cores::from_cmdline("1").unwrap();
let corpus_dirs = [PathBuf::from("./corpus")];
let objective_dir = PathBuf::from(cli.dump_name.clone().map(|x| x.with_extension("crashes")).unwrap_or("./crashes".try_into().unwrap()));
let mut elf_buffer = Vec::new();
let elf = EasyElf::from_file(
&cli.kernel,
&mut elf_buffer,
)
.unwrap();
let TARGET_SYMBOLS: HashMap<&'static str, GuestAddr> = get_target_symbols(&elf);
let TARGET_RANGES: HashMap<&'static str, Range<GuestAddr>> = get_target_ranges(&elf, &TARGET_SYMBOLS);
let TARGET_GROUPS: HashMap<&'static str, HashMap<String, Range<GuestAddr>>> = get_range_groups(&elf, &TARGET_SYMBOLS, &TARGET_RANGES);
unsafe {
libafl_num_interrupts = [0; NUM_INTERRUPT_SOURCES];
}
if let Ok(input_len) = env::var("FUZZ_INPUT_LEN") {
unsafe {MAX_INPUT_SIZE = str::parse::<usize>(&input_len).expect("FUZZ_INPUT_LEN was not a number");}
}
unsafe {dbg!(MAX_INPUT_SIZE);}
if let Ok(seed) = env::var("SEED_RANDOM") {
unsafe {RNG_SEED = str::parse::<u64>(&seed).expect("SEED_RANDOM must be an integer.");}
}
let denylist: Vec<_> = TARGET_GROUPS["ISR_FN"].values().map(|x| x.clone()).collect();
// let denylist = AddressFilter::deny_list(denylist); // do not count isr jumps, which are useless
/// Setup the interrupt inputs. Noop if interrupts are not fuzzed
fn setup_interrupt_inputs(mut input : MultipartInput<BytesInput, String>, interrupt_config : &Vec<(usize,u32)>, mut random: Option<&mut StdRng>) -> MultipartInput<BytesInput, String> {
#[cfg(feature = "fuzz_int")]
for (i,_) in interrupt_config {
let name = format!("isr_{}_times",i);
if input.with_key(&name).next().is_none() {
if let Some(random) = random.as_mut() {
input.append_part((name, BytesInput::new((0..MAX_NUM_INTERRUPT).map(|_| (random.next_u32()%(100*QEMU_ISNS_PER_MSEC)).to_le_bytes()).flatten().collect())));
} else {
input.append_part((name, BytesInput::new([0; MAX_NUM_INTERRUPT*4].to_vec())));
}
}
}
input
}
// Client setup ================================================================================
let run_client = |state: Option<_>, mut mgr, _core_id| {
// Initialize QEMU
let args: Vec<String> = vec![
"target/debug/fret",
"-icount",
&format!("shift={},align=off,sleep=off", QEMU_ICOUNT_SHIFT),
"-machine",
"mps2-an385",
"-cpu",
"cortex-m3",
"-monitor",
"null",
"-kernel",
&cli.kernel.as_os_str().to_str().expect("kernel path is not a string"),
"-serial",
"null",
"-nographic",
"-S",
// "-semihosting",
// "--semihosting-config",
// "enable=on,target=native",
#[cfg(not(feature = "snapshot_fast"))]
"-snapshot",
#[cfg(not(feature = "snapshot_fast"))]
"-drive",
#[cfg(not(feature = "snapshot_fast"))]
"if=none,format=qcow2,file=/dev/null", // dummy drive to avoid qemu error
].into_iter().map(String::from).collect();
let env: Vec<(String, String)> = env::vars().collect();
let harness_input_addr = TARGET_SYMBOLS["FUZZ_INPUT"];
let harness_input_length_ptr = TARGET_SYMBOLS.get("FUZZ_LENGTH").copied();
let harness_breakpoint = TARGET_SYMBOLS["BREAKPOINT"];
// The wrapped harness function, calling out to the LLVM-style harness
let mut harness = |emulator: &mut Emulator<_, _, _, _, _, _, _>, state: &mut _, input: &MultipartInput<BytesInput, String>| {
unsafe {
#[cfg(feature = "fuzz_int")]
{
libafl_interrupt_offsets=[[0;MAX_NUM_INTERRUPT];NUM_INTERRUPT_SOURCES];
for &c in &interrupt_config {
use libafl_bolts::prelude::OwnedSlice;
let (i,_) = c;
let name = format!("isr_{}_times",i);
let input_bytes = input.with_key(&name).next().map(|x| x.1.target_bytes()).unwrap_or(OwnedSlice::from(vec![]));
let t = input_bytes_to_interrupt_times(&input_bytes, c);
for j in 0..t.len() {libafl_interrupt_offsets[i][j]=t[j];}
libafl_num_interrupts[i]=t.len() as u64;
}
// println!("Load: {:?}", libafl_interrupt_offsets[0..libafl_num_interrupts].to_vec());
}
let mut bytes = input.with_key(&"bytes".to_string()).next().unwrap().1.target_bytes();
let mut len = bytes.len();
if len > MAX_INPUT_SIZE {
bytes.truncate(MAX_INPUT_SIZE);
len = MAX_INPUT_SIZE;
}
// Note: I could not find a difference between write_mem and write_phys_mem for my usecase
emulator.qemu().write_mem(harness_input_addr, &bytes);
if let Some(s) = harness_input_length_ptr {
emulator.qemu().write_mem(s, &(len as u32).to_le_bytes());
}
emulator.run(state, input);
// If the execution stops at any point other then the designated breakpoint (e.g. a breakpoint on a panic method) we consider it a crash
let mut pcs = (0..emulator.qemu().num_cpus())
.map(|i| emulator.qemu().cpu_from_index(i).expect("CPU index not found"))
.map(|cpu| -> Result<u32, _> { cpu.read_reg(Regs::Pc) });
match pcs
.find(|pc| (harness_breakpoint..harness_breakpoint + 5).contains(pc.as_ref().unwrap_or(&0)))
{
Some(_) => ExitKind::Ok,
Option::None => ExitKind::Crash,
}
}
};
// Create an observation channel to keep track of the execution time
let clock_time_observer = QemuClockObserver::new("clocktime", &cli.select_task); // if cli.dump_times {cli.dump_name.clone().map(|x| x.with_extension("time"))} else {None}
// Create an observation channel using the coverage map
#[cfg(feature = "observe_edges")]
let mut edges_observer = unsafe { VariableMapObserver::from_mut_slice(
"edges",
OwnedMutSlice::from_raw_parts_mut(edges_map_mut_ptr(), EDGES_MAP_DEFAULT_SIZE),
addr_of_mut!(MAX_EDGES_FOUND),
)};
#[cfg(feature = "observe_hitcounts")]
let mut edges_observer = HitcountsMapObserver::new(edges_observer);
#[cfg(feature = "observe_edges")]
let mut edges_observer = edges_observer.track_indices();
#[cfg(feature = "observe_systemstate")]
let stg_coverage_observer = unsafe { VariableMapObserver::from_mut_slice(
"stg",
stg_map_mut_slice(),
addr_of_mut!(MAX_STG_NUM)
)}.track_indices();
// Feedback to rate the interestingness of an input
// This one is composed by two Feedbacks in OR
let mut feedback = feedback_or!(
// Time feedback, this one does not need a feedback state
ClockTimeFeedback::<FreeRTOSSystem>::new_with_observer(&clock_time_observer, &cli.select_task, if cli.dump_times {cli.dump_name.clone().map(|x| x.with_extension("time"))} else {None})
);
#[cfg(feature = "feed_genetic")]
let mut feedback = feedback_or!(
feedback,
AlwaysTrueFeedback::new()
);
#[cfg(feature = "feed_afl")]
let mut feedback = feedback_or!(
feedback,
// New maximization map feedback linked to the edges observer and the feedback state
MaxMapFeedback::new(&edges_observer)
);
#[cfg(feature = "feed_longest")]
let mut feedback = feedback_or!(
// afl feedback needs to be activated first for MapIndexesMetadata
feedback,
// Feedback to reward any input which increses the execution time
ExecTimeIncFeedback::<FreeRTOSSystem>::new()
);
#[cfg(all(feature = "observe_systemstate"))]
let mut feedback = feedback_or!(
feedback,
DumpSystraceFeedback::<FreeRTOSSystem>::with_dump(if cli.dump_traces {cli.dump_name.clone()} else {None})
);
#[cfg(feature = "trace_stg")]
let mut feedback = feedback_or!(
feedback,
StgFeedback::<FreeRTOSSystem>::new(cli.select_task.clone(), if cli.dump_graph {cli.dump_name.clone()} else {None})
);
#[cfg(feature = "feed_stg_edge")]
let mut feedback = feedback_or!(
feedback,
MaxMapFeedback::new(&stg_coverage_observer)
);
// A feedback to choose if an input is producing an error
let mut objective = feedback_or_fast!(CrashFeedback::new(), TimeoutFeedback::new(), SystraceErrorFeedback::<FreeRTOSSystem>::new(matches!(cli.command, Commands::Fuzz{..}), Some(10)));
// If not restarting, create a State from scratch
let mut state = state.unwrap_or_else(|| {
StdState::new(
// RNG
unsafe {StdRand::with_seed(RNG_SEED) },
// Corpus that will be evolved, we keep it in memory for performance
InMemoryCorpus::new(),
// Corpus in which we store solutions (crashes in this example),
// on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(objective_dir.clone()).unwrap(),
// States of the feedbacks.
// The feedbacks can report the data that should persist in the State.
&mut feedback,
// Same for objective feedbacks
&mut objective,
)
.unwrap()
});
// A minimization+queue policy to get testcasess from the corpus
#[cfg(not(any(feature = "sched_afl", feature = "sched_stg", feature = "sched_genetic")))]
let scheduler = QueueScheduler::new(); // fallback
#[cfg(feature = "sched_afl",)]
let scheduler = TimeMaximizerCorpusScheduler::new(&edges_observer,TimeProbMassScheduler::new());
#[cfg(feature = "sched_stg")]
let mut scheduler = GraphMaximizerCorpusScheduler::non_metadata_removing(&stg_coverage_observer,TimeProbMassScheduler::new());
#[cfg(feature = "sched_stg")]
{
scheduler.skip_non_favored_prob = 0.8;
}
#[cfg(feature = "sched_genetic")]
let scheduler = GenerationScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
let qhelpers = tuple_list!();
#[cfg(feature = "observe_systemstate")]
let qhelpers = (FreeRTOSSystemStateHelper::new(&TARGET_SYMBOLS,&TARGET_RANGES,&TARGET_GROUPS), qhelpers);
#[cfg(feature = "observe_edges")]
let qhelpers = (
StdEdgeCoverageModule::builder()
.map_observer(edges_observer.as_mut())
.address_filter(denylist)
.build()
.unwrap(), qhelpers);//StdEdgeCoverageModule::new(denylist, FilterList::None), qhelpers);
let qhelpers = (QemuStateRestoreHelper::new(), qhelpers);
let emulator = Emulator::empty()
.qemu_parameters(args)
.modules(qhelpers)
.build()
.expect("Failed to initialise QEMU");
if let Some(&main_addr) = TARGET_SYMBOLS.get("FUZZ_MAIN") {
emulator.qemu().set_breakpoint(main_addr);
unsafe {
match emulator.qemu().run() {
Ok(QemuExitReason::Breakpoint(_)) => {}
_ => panic!("Unexpected QEMU exit."),
}
}
emulator.qemu().remove_breakpoint(main_addr);
}
emulator.qemu().set_breakpoint(TARGET_SYMBOLS["BREAKPOINT"]); // BREAKPOINT
let devices = emulator.qemu().list_devices();
println!("Devices = {devices:?}");
// #[cfg(feature = "snapshot_fast")]
// let initial_snap = Some(emulator.qemu().create_fast_snapshot(true));
// #[cfg(not(feature = "snapshot_fast"))]
// let initial_snap = None;
let observer_list = tuple_list!();
#[cfg(feature = "observe_systemstate")]
let observer_list = (stg_coverage_observer, observer_list); // must come after clock
#[cfg(feature = "observe_edges")]
let observer_list = (edges_observer, observer_list);
let observer_list = (clock_time_observer, observer_list);
// Create a QEMU in-process executor
let mut executor = QemuExecutor::new(
emulator,
&mut harness,
observer_list,
&mut fuzzer,
&mut state,
&mut mgr,
timeout
)
.expect("Failed to create QemuExecutor");
executor.break_on_timeout();
let mutations = havoc_mutations();
// Setup an havoc mutator with a mutational stage
let mutator = HavocScheduledMutator::new(mutations);
let stages = (systemstate::report::SchedulerStatsStage::default(),());
let stages = (StdMutationalStage::new(mutator), stages);
#[cfg(feature = "mutate_stg")]
let mut stages = (STGSnippetStage::<_,_,_,_,_,FreeRTOSSystem>::new(TARGET_SYMBOLS["FUZZ_INPUT"]), stages);
#[cfg(feature = "fuzz_int")]
let mut stages = (InterruptShiftStage::<_,_,_,_,_,FreeRTOSSystem>::new(&interrupt_config), stages);
if let Commands::Showmap { input } = cli.command.clone() {
let s = input.as_os_str();
// let show_input = BytesInput::new(if s=="-" {
// let mut buf = Vec::<u8>::new();
// std::io::stdin().read_to_end(&mut buf).expect("Could not read Stdin");
// buf
// } else if s=="$" {
// env::var("SHOWMAP_TEXTINPUT").expect("SHOWMAP_TEXTINPUT not set").as_bytes().to_owned()
// } else {
// // fs::read(s).expect("Input file for DO_SHOWMAP can not be read")
// });
let show_input = match MultipartInput::from_file(input.as_os_str()) {
Ok(x) => x,
Err(_) => {
println!("Interpreting input file as raw input");
setup_interrupt_inputs(MultipartInput::from([("bytes".to_string(),BytesInput::new(fs::read(input).expect("Can not read input file")))]), &interrupt_config, None)
}
};
fuzzer.evaluate_input(&mut state, &mut executor, &mut mgr, &show_input)
.unwrap();
do_dump_times!(state, &cli, "");
do_dump_stg!(state, &cli, "");
} else if let Commands::Fuzz { random, time, seed } = cli.command {
if let Some(se) = seed {
unsafe {
let mut rng = StdRng::seed_from_u64(se);
let bound = 10000;
#[cfg(feature = "shortcut")]
let bound = 100;
for _ in 0..bound {
let inp2 = BytesInput::new((0..MAX_INPUT_SIZE).map(|_| rng.gen::<u8>()).collect());
let inp = setup_interrupt_inputs(MultipartInput::from([("bytes".to_string(),inp2)]), &interrupt_config, Some(&mut rng));
fuzzer.evaluate_input(&mut state, &mut executor, &mut mgr, &inp).unwrap();
}
}
}
else if let Ok(sf) = env::var("SEED_DIR") {
state
.load_initial_inputs(&mut fuzzer, &mut executor, &mut mgr, &[PathBuf::from(&sf)])
.unwrap_or_else(|_| {
println!("Failed to load initial corpus at {:?}", &corpus_dirs);
process::exit(0);
});
println!("We imported {} inputs from seedfile.", state.corpus().count());
} else if state.corpus().count() < 1 {
state
.load_initial_inputs(&mut fuzzer, &mut executor, &mut mgr, &corpus_dirs)
.unwrap_or_else(|_| {
println!("Failed to load initial corpus at {:?}", &corpus_dirs);
process::exit(0);
});
println!("We imported {} inputs from disk.", state.corpus().count());
}
match time {
Option::None => {
fuzzer
.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)
.unwrap();
},
Some(t) => {
println!("Iterations {}",t);
let num = t;
if random { unsafe {
println!("Random Fuzzing, ignore corpus");
// let mut generator = RandBytesGenerator::new(MAX_INPUT_SIZE);
let target_duration = Duration::from_secs(num);
let start_time = std::time::Instant::now();
let mut rng = StdRng::seed_from_u64(RNG_SEED);
while start_time.elapsed() < target_duration {
// let inp = generator.generate(&mut state).unwrap();
// libafl's generator is too slow
let inp2 = BytesInput::new((0..MAX_INPUT_SIZE).map(|_| rng.gen::<u8>()).collect());
let inp = setup_interrupt_inputs(MultipartInput::from([("bytes".to_string(),inp2)]), &interrupt_config, Some(&mut rng));
fuzzer.evaluate_input(&mut state, &mut executor, &mut mgr, &inp).unwrap();
}
}} else {
// fuzzer
// .fuzz_loop_for_duration(&mut stages, &mut executor, &mut state, &mut mgr, Duration::from_secs(num))
// .unwrap();
fuzzer
.fuzz_loop_until(&mut stages, &mut executor, &mut state, &mut mgr, starttime.checked_add(Duration::from_secs(num)).unwrap())
.unwrap();
#[cfg(feature = "run_until_saturation")]
{
let mut dumper = |marker : String| {
let d = format!("{}.case",marker);
do_dump_case!(state, &cli, &d);
let _d = format!("{}.dot",marker);
do_dump_stg!(state, &cli, &_d);
let d = format!("{}.toprated",marker);
do_dump_toprated!(state, &cli, &d);
};
dumper(format!(".iter_{}",t));
do_dump_times!(state, &cli, "");
println!("Start running until saturation");
let mut last = state.metadata_map().get::<IcHist>().unwrap().1;
while SystemTime::now().duration_since(unsafe {FUZZ_START_TIMESTAMP}).unwrap().as_millis() < last.1 + Duration::from_secs(10800).as_millis() {
starttime=starttime.checked_add(Duration::from_secs(30)).unwrap();
fuzzer
.fuzz_loop_until(&mut stages, &mut executor, &mut state, &mut mgr, starttime)
.unwrap();
let after = state.metadata_map().get::<IcHist>().unwrap().1;
if after.0 > last.0 {
last=after;
}
do_dump_case!(state, &cli, "");
do_dump_stg!(state, &cli, "");
do_dump_toprated!(state, &cli, "");
}
}
}
do_dump_times!(state, &cli, "");
do_dump_case!(state, &cli, "");
do_dump_stg!(state, &cli, "");
do_dump_toprated!(state, &cli, "");
},
}
}
#[cfg(not(feature = "singlecore"))]
return Ok(());
};
// Special case where no fuzzing happens, but standard input is dumped
if let Ok(input_dump) = env::var("DUMP_SEED") {
// Initialize QEMU
let args: Vec<String> = env::args().collect();
let env: Vec<(String, String)> = env::vars().collect();
let emu = Qemu::init(&args).expect("Emu creation failed");
if let Some(&main_addr) = TARGET_SYMBOLS.get("FUZZ_MAIN") {
emu.set_breakpoint(main_addr); // BREAKPOINT
}
unsafe {
emu.run();
let mut buf = [0u8].repeat(MAX_INPUT_SIZE);
emu.read_mem(TARGET_SYMBOLS["FUZZ_INPUT"], buf.as_mut_slice());
let dir = env::var("SEED_DIR").map_or("./corpus".to_string(), |x| x);
let filename = if input_dump == "" {"input"} else {&input_dump};
println!("Dumping input to: {}/{}",&dir,filename);
fs::write(format!("{}/{}",&dir,filename), buf).expect("could not write input dump");
}
return
}
#[cfg(feature = "singlecore")]
{
let monitor = RateLimitedMonitor::new();
#[cfg(not(feature = "restarting"))]
{
let mgr = SimpleEventManager::new(monitor);
run_client(None, mgr, 0);
}
#[cfg(feature = "restarting")]
{
let mut shmem_provider = StdShMemProvider::new().unwrap();
let (state, mgr) = match SimpleRestartingEventManager::launch(monitor, &mut shmem_provider)
{
// The restarting state will spawn the same process again as child, then restarted it each time it crashes.
Ok(res) => res,
Err(err) => match err {
Error::ShuttingDown => {
return;
}
_ => {
panic!("Failed to setup the restarter: {}", err);
}
},
};
run_client(state, mgr, 0);
}
}
// else -> multicore
#[cfg(not(feature = "singlecore"))]
{
// The shared memory allocator
let shmem_provider = StdShMemProvider::new().expect("Failed to init shared memory");
// The stats reporter for the broker
let monitor = MultiMonitor::new(|s| println!("{}", s));
// Build and run a Launcher
match Launcher::builder()
.shmem_provider(shmem_provider)
.broker_port(broker_port)
.configuration(EventConfig::from_build_id())
.monitor(monitor)
.run_client(&mut run_client)
.cores(&cores)
// .stdout_file(Some("/dev/null"))
.build()
.launch()
{
Ok(()) => (),
Err(Error::ShuttingDown) => println!("Fuzzing stopped by user. Good bye."),
Err(err) => panic!("Failed to run launcher: {:?}", err),
}
}
}

12
fuzzers/FRET/src/lib.rs Normal file
View File

@ -0,0 +1,12 @@
#[cfg(target_os = "linux")]
mod fuzzer;
#[cfg(target_os = "linux")]
pub mod time;
#[cfg(target_os = "linux")]
pub mod systemstate;
#[cfg(target_os = "linux")]
mod cli;
#[cfg(target_os = "linux")]
pub mod templates;
#[cfg(target_os = "linux")]
mod config;

23
fuzzers/FRET/src/main.rs Normal file
View File

@ -0,0 +1,23 @@
//! A libfuzzer-like fuzzer using qemu for binary-only coverage
#[cfg(target_os = "linux")]
mod fuzzer;
#[cfg(target_os = "linux")]
mod time;
#[cfg(target_os = "linux")]
mod systemstate;
#[cfg(target_os = "linux")]
mod cli;
#[cfg(target_os = "linux")]
mod templates;
#[cfg(target_os = "linux")]
mod config;
#[cfg(target_os = "linux")]
pub fn main() {
fuzzer::fuzz();
}
#[cfg(not(target_os = "linux"))]
pub fn main() {
panic!("qemu-user and libafl_qemu is only supported on linux!");
}

View File

@ -0,0 +1,12 @@
# System-state heuristics
## Information flow
- ``fuzzer.rs`` resolves symbols and creates ``api_ranges`` and ``isr_ranges``
- ``helpers::QemuSystemStateHelper`` captures a series of ``RawFreeRTOSSystemState``
- ``observers::QemuSystemStateObserver`` divides this into ``ReducedFreeRTOSSystemState`` and ``ExecInterval``, the first contains the raw states and the second contains information about the flow between states
- ``stg::StgFeedback`` builds an stg from the intervals
## Target-specific (systemstate/target_os)
- config ``add_target_symbols`` and ``get_range_groups`` resolve important symbols
- provides a helper (e.g. ``FreeRTOSSystemStateHelper`` ) to capture the state
- collects locally into e.g. ``CURRENT_SYSTEMSTATE_VEC``
- post-processing
- replaces ``SystemTraceData`` in state metadata

View File

@ -0,0 +1,212 @@
use libafl::{
common::HasMetadata,
executors::ExitKind,
feedbacks::Feedback,
observers::ObserversTuple,
state::{HasCorpus, MaybeHasClientPerfMonitor},
Error,
corpus::Corpus,
inputs::Input,
};
use libafl::events::EventFirer;
use libafl_bolts::Named;
use std::path::PathBuf;
use std::time::{Duration, Instant};
use super::target_os::TargetSystem;
use std::borrow::Cow;
use std::marker::PhantomData;
use crate::systemstate::target_os::*;
use libafl::prelude::StateInitializer;
//=========================== Debugging Feedback
/// A [`Feedback`] meant to dump the system-traces for debugging. Depends on [`QemuSystemStateObserver`]
#[derive(Debug)]
pub struct DumpSystraceFeedback<SYS>
where
SYS: TargetSystem,
{
name: Cow<'static, str>,
dumpfile: Option<PathBuf>,
phantom: PhantomData<SYS>,
init_time: Instant,
last_dump: Option<Instant>,
}
impl<S, SYS> StateInitializer<S> for DumpSystraceFeedback<SYS> where SYS: TargetSystem {}
impl<EM, I, OT, S, SYS> Feedback<EM, I, OT, S> for DumpSystraceFeedback<SYS>
where
S: MaybeHasClientPerfMonitor + HasMetadata + HasCorpus<I>,
EM: EventFirer<I, S>,
OT: ObserversTuple<I, S>,
SYS: TargetSystem,
I: Input,
{
fn is_interesting(
&mut self,
state: &mut S,
_manager: &mut EM,
_input: &I,
_observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where {
match &self.dumpfile {
Some(s) => {
let time_has_come = self.last_dump.map(|t| Instant::now()-t > Duration::from_secs(600)).unwrap_or(true);
if time_has_come {
self.last_dump = Some(Instant::now());
// Try dumping the worst case
let casename = s.with_file_name(&(s.file_stem().unwrap().to_str().unwrap().to_owned()+&format!("_at_{}h", (Instant::now()-self.init_time).as_secs()/3600))).with_extension("case");
let corpus = state.corpus();
let mut worst = Duration::new(0,0);
let mut worst_input = None;
for i in 0..corpus.count() {
let tc = corpus.get(corpus.nth(i.into())).expect("Could not get element from corpus").borrow();
if worst < tc.exec_time().expect("Testcase missing duration") {
worst_input = Some(tc.input().as_ref().unwrap().clone());
worst = tc.exec_time().expect("Testcase missing duration");
}
}
if let Some(wi) = worst_input {
wi.to_file(casename).expect("Could not dump testcase");
}
// Try dumping the current case
let tracename = s.with_extension("trace.ron");
let trace = state
.metadata::<SYS::TraceData>()
.expect("TraceData not found");
std::fs::write(
tracename,
ron::to_string(trace)
.expect("Error serializing hashmap"),
)
.expect("Can not dump to file");
}
}
Option::None => {
()
}
};
Ok(false)
}
}
impl<SYS> Named for DumpSystraceFeedback<SYS>
where
SYS: TargetSystem,
{
#[inline]
fn name(&self) -> &Cow<'static, str> {
&self.name
}
}
impl<SYS> DumpSystraceFeedback<SYS>
where
SYS: TargetSystem,
{
/// Creates a new [`DumpSystraceFeedback`]
#[allow(unused)]
pub fn new() -> Self {
Self {
name: Cow::from("Dumpsystemstate".to_string()),
dumpfile: None,
phantom: PhantomData,
init_time: std::time::Instant::now(),
last_dump: None,
}
}
#[allow(unused)]
pub fn with_dump(dumpfile: Option<PathBuf>) -> Self {
Self {
name: Cow::from("Dumpsystemstate".to_string()),
dumpfile: dumpfile,
phantom: PhantomData,
init_time: std::time::Instant::now(),
last_dump: None,
}
}
}
#[derive(Debug, Default)]
pub struct SystraceErrorFeedback<SYS>
where
SYS: TargetSystem,
{
name: Cow<'static, str>,
dump_case: bool,
max_reports: Option<usize>,
phantom: std::marker::PhantomData<SYS>,
}
impl<S, SYS> StateInitializer<S> for SystraceErrorFeedback<SYS> where SYS: TargetSystem {}
impl<EM, I, OT, S, SYS> Feedback<EM, I, OT, S> for SystraceErrorFeedback<SYS>
where
S: MaybeHasClientPerfMonitor + HasMetadata,
EM: EventFirer<I, S>,
OT: ObserversTuple<I, S>,
SYS: TargetSystem,
{
fn is_interesting(
&mut self,
state: &mut S,
_manager: &mut EM,
_input: &I,
_observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where {
#[cfg(feature = "trace_stg")]
{
if let Some(m) = self.max_reports {
if m <= 0 {
return Ok(false);
}
let need_to_debug = state
.metadata::<SYS::TraceData>()
.expect("TraceData not found")
.need_to_debug();
if need_to_debug {
self.max_reports = Some(m - 1);
}
return Ok(self.dump_case && need_to_debug);
} else {
return Ok(false);
}
}
#[cfg(not(feature = "trace_stg"))]
{
return Ok(false);
}
}
}
impl<SYS> Named for SystraceErrorFeedback<SYS>
where
SYS: TargetSystem,
{
#[inline]
fn name(&self) -> &Cow<'static, str> {
&self.name
}
}
impl<SYS> SystraceErrorFeedback<SYS>
where
SYS: TargetSystem,
{
#[must_use]
pub fn new(dump_case: bool, max_reports: Option<usize>) -> Self {
Self {
name: Cow::from(String::from("SystraceErrorFeedback")),
dump_case,
max_reports,
phantom: std::marker::PhantomData,
}
}
}

View File

@ -0,0 +1,284 @@
use hashbrown::HashMap;
use libafl_bolts::prelude::{SerdeAny, SerdeAnyMap};
use libafl_qemu::{elf::EasyElf, read_user_reg_unchecked, GuestAddr, GuestPhysAddr};
use std::{cmp::min, hash::{DefaultHasher, Hash, Hasher}, ops::Range};
use crate::{
fuzzer::{DO_NUM_INTERRUPT, FIRST_INT},
time::clock::QEMU_ISNS_PER_USEC,
};
use super::ExecInterval;
//============================= API symbols
/// Read ELF program headers to resolve physical load addresses.
fn virt2phys(vaddr: GuestPhysAddr, tab: &EasyElf) -> GuestPhysAddr {
let ret;
for i in &tab.goblin().program_headers {
if i.vm_range().contains(&vaddr.try_into().unwrap()) {
ret = vaddr - TryInto::<GuestPhysAddr>::try_into(i.p_vaddr).unwrap()
+ TryInto::<GuestPhysAddr>::try_into(i.p_paddr).unwrap();
return ret - (ret % 2);
}
}
return vaddr;
}
/// Lookup a symbol in the ELF file, optionally resolve segment offsets
pub fn load_symbol(elf: &EasyElf, symbol: &str, do_translation: bool) -> GuestAddr {
try_load_symbol(elf, symbol, do_translation).expect(&format!("Symbol {} not found", symbol))
}
/// Lookup a symbol in the ELF file, optionally resolve segment offsets
pub fn try_load_symbol(elf: &EasyElf, symbol: &str, do_translation: bool) -> Option<GuestAddr> {
let ret = elf.resolve_symbol(symbol, 0);
if do_translation {
Option::map_or(ret, None, |x| {
Some(virt2phys(x as GuestPhysAddr, &elf) as GuestAddr)
})
} else {
ret
}
}
/// Try looking up the address range of a function in the ELF file
pub fn get_function_range(elf: &EasyElf, symbol: &str) -> Option<std::ops::Range<GuestAddr>> {
let gob = elf.goblin();
let mut funcs: Vec<_> = gob.syms.iter().filter(|x| x.is_function()).collect();
funcs.sort_unstable_by(|x, y| x.st_value.cmp(&y.st_value));
for sym in &gob.syms {
if let Some(sym_name) = gob.strtab.get_at(sym.st_name) {
if sym_name == symbol {
if sym.st_value == 0 {
return None;
} else {
//#[cfg(cpu_target = "arm")]
// Required because of arm interworking addresses aka bit(0) for thumb mode
let addr = (sym.st_value as GuestAddr) & !(0x1 as GuestAddr);
//#[cfg(not(cpu_target = "arm"))]
//let addr = sym.st_value as GuestAddr;
// look for first function after addr
let sym_end = funcs.iter().find(|x| x.st_value > sym.st_value);
if let Some(sym_end) = sym_end {
// println!("{} {:#x}..{} {:#x}", gob.strtab.get_at(sym.st_name).unwrap_or(""),addr, gob.strtab.get_at(sym_end.st_name).unwrap_or(""),sym_end.st_value & !0x1);
return Some(addr..((sym_end.st_value & !0x1) as GuestAddr));
}
return None;
};
}
}
}
return None;
}
/// Check if an address is in any of the ranges
pub fn in_any_range<'a>(
ranges: &'a Vec<(String, Range<u32>)>,
addr: GuestAddr,
) -> Option<&'a std::ops::Range<GuestAddr>> {
for (_, r) in ranges {
if r.contains(&addr) {
return Some(r);
}
}
return None;
}
//============================= QEMU related utility functions
pub fn get_icount(emulator: &libafl_qemu::Qemu) -> u64 {
unsafe {
// TODO: investigate why can_do_io is not set sometimes, as this is just a workaround
let c = emulator.cpu_from_index(0).expect("CPU 0 not found");
let can_do_io = (*c.raw_ptr()).neg.can_do_io;
(*c.raw_ptr()).neg.can_do_io = true;
let r = libafl_qemu::sys::icount_get_raw();
(*c.raw_ptr()).neg.can_do_io = can_do_io;
r
}
}
pub fn input_bytes_to_interrupt_times(buf: &[u8], config: (usize, u32)) -> Vec<u32> {
let len = buf.len();
let mut start_tick;
let mut ret = Vec::with_capacity(min(DO_NUM_INTERRUPT, len / 4));
for i in 0..DO_NUM_INTERRUPT {
let mut buf4b: [u8; 4] = [0, 0, 0, 0];
if len >= (i + 1) * 4 {
for j in 0usize..4usize {
buf4b[j] = buf[i * 4 + j];
}
start_tick = u32::from_le_bytes(buf4b);
if start_tick < FIRST_INT {
start_tick = 0;
}
ret.push(start_tick);
} else {
break;
}
}
ret.sort_unstable();
// obey the minimum inter arrival time while maintaining the sort
for i in 0..ret.len() {
if ret[i] == 0 {
continue;
}
for j in i + 1..ret.len() {
if ret[j] - ret[i] < (config.1 as f32 * QEMU_ISNS_PER_USEC) as u32 {
// ret[j] = u32::saturating_add(ret[i],config.1 * QEMU_ISNS_PER_USEC);
ret[j] = 0; // remove the interrupt
ret.sort_unstable();
break;
} else {
break;
}
}
}
ret
}
pub fn interrupt_times_to_input_bytes(interrupt_times: &[u32]) -> Vec<u8> {
let mut ret = Vec::with_capacity(interrupt_times.len() * 4);
for i in interrupt_times {
ret.extend(u32::to_le_bytes(*i));
}
ret
}
pub fn read_rec_return_stackframe(emu: &libafl_qemu::Qemu, lr: GuestAddr) -> GuestAddr {
let lr_ = lr & u32::MAX - 1;
if lr_ == 0xfffffffc || lr_ == 0xFFFFFFF8 || lr_ == 0xFFFFFFF0 {
// if 0xFFFFFFF0/1 0xFFFFFFF8/9 -> "main stack" MSP
let mut buf = [0u8; 4];
let sp: GuestAddr = if lr_ == 0xfffffffc || lr_ == 0xFFFFFFF0 {
// PSP
read_user_reg_unchecked(emu) as u32
} else {
emu.read_reg(13).unwrap()
};
let ret_pc = sp + 0x18; // https://developer.arm.com/documentation/dui0552/a/the-cortex-m3-processor/exception-model/exception-entry-and-return
emu.read_mem(ret_pc, buf.as_mut_slice())
.expect("Failed to read return address");
return u32::from_le_bytes(buf);
// elseif 0xfffffffc/d
} else {
return lr;
};
}
//============================= Tracing related utility functions
pub fn metadata_insert_or_update_get<T>(
metadata: &mut SerdeAnyMap,
default: impl FnOnce() -> T,
update: impl FnOnce(&mut T),
) -> &mut T
where
T: SerdeAny,
{
if metadata.contains::<T>() {
let v = metadata.get_mut::<T>().unwrap();
update(v);
return v;
} else {
return metadata.get_or_insert_with(default);
}
}
/// Build an ABB-profile from a stretch of intervals
/// returns mapping: task_name -> (abb_addr -> (interval_count, exec_count, exec_time, woet))
#[allow(unused)]
pub fn abb_profile(
mut intervals: Vec<ExecInterval>,
) -> HashMap<String, HashMap<u32, (usize, usize, u64, u64)>> {
let mut ret: HashMap<String, HashMap<u32, (usize, usize, u64, u64)>> = HashMap::new();
intervals.sort_by_key(|x| x.get_task_name_unchecked());
intervals
.chunk_by_mut(|x, y| x.get_task_name_unchecked() == y.get_task_name_unchecked())
// Iterate over all tasks
.for_each(|intv_of_task| {
// Iterate over all intervals of this task
intv_of_task.sort_by_key(|y| y.abb.as_ref().unwrap().start);
// Iterate over each abb of this task
let mut inter_per_abb_of_task: Vec<&mut [ExecInterval]> = intv_of_task
.chunk_by_mut(|y, z| y.abb.as_ref().unwrap().start == z.abb.as_ref().unwrap().start)
.collect();
// arrange the abbs by their start address
inter_per_abb_of_task
.iter_mut()
.for_each(|ivs_of_abb_of_task| {
ivs_of_abb_of_task.sort_by_key(|y| y.abb.as_ref().unwrap().instance_id)
});
// find the woet for this abb
let abb_woet: HashMap<GuestAddr, u64> = inter_per_abb_of_task
.iter()
.map(|ivs_of_abb_of_task| {
// group intervals by id, sum up the exec time of the abb instance
ivs_of_abb_of_task
.chunk_by(
|y, z| {
y.abb.as_ref().unwrap().instance_id
== z.abb.as_ref().unwrap().instance_id
},
)
.map(|intv_of_abb_with_id| {
(
intv_of_abb_with_id[0].abb.as_ref().unwrap().start,
intv_of_abb_with_id
.iter()
.map(|z| z.get_exec_time())
.sum::<_>(),
)
})
.max_by_key(|x| x.1)
.unwrap()
})
.collect();
inter_per_abb_of_task.into_iter().for_each(|y| {
match ret.get_mut(&y[0].get_task_name_unchecked()) {
Option::None => {
ret.insert(
y[0].get_task_name_unchecked(),
HashMap::from([(
y[0].abb.as_ref().unwrap().start,
(
y.len(),
y.iter().filter(|x| x.is_abb_end()).count(),
y.iter().map(|z| z.get_exec_time()).sum::<_>(),
abb_woet[&y[0].abb.as_ref().unwrap().start],
),
)]),
);
}
Some(x) => {
x.insert(
y[0].abb.as_ref().unwrap().start,
(
y.len(),
y.iter().filter(|x| x.is_abb_end()).count(),
y.iter().map(|z| z.get_exec_time()).sum(),
abb_woet[&y[0].abb.as_ref().unwrap().start],
),
);
}
}
});
});
ret
}
pub fn unmut<T>(x: &mut T) -> &T {
&(*x)
}
pub fn get_generic_hash<H>(input: &H) -> u64
where
H: Hash,
{
let mut s = DefaultHasher::new();
input.hash(&mut s);
s.finish()
}

View File

@ -0,0 +1,344 @@
//! systemstate referes to the State of a FreeRTOS fuzzing target
use std::collections::hash_map::DefaultHasher;
use std::fmt;
use hashbrown::HashSet;
use libafl_bolts::HasRefCnt;
use libafl_qemu::GuestAddr;
use std::hash::Hasher;
use std::hash::Hash;
use hashbrown::HashMap;
use serde::{Deserialize, Serialize};
use itertools::Itertools;
pub mod helpers;
pub mod feedbacks;
pub mod schedulers;
pub mod stg;
pub mod mutational;
pub mod report;
pub mod target_os;
//============================= Struct definitions
#[derive(Debug, Default, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Hash)]
pub enum CaptureEvent {
APIStart, /// src,dst
APIEnd, /// src,dst
ISRStart, /// _,dst
ISREnd, /// src,_
End, /// src,_
#[default]
Undefined,
}
/*
Hierarchy of tracing data:
- RawFreeRTOSSystemState: Raw data from Qemu, represents a particular instant
- ReducedFreeRTOSSystemState: Generalized state of the system, without execution context
- ExecInterval: Some interval of execution between instants
- AtomicBasicBlock: A single-entry multiple-exit region between api calls. May be used referenced in multiple intervals.
- RTOSJob: A single execution of a task, records the place and input read
- RTOSTask: Generalized Job instance, records the worst inputs seen so far
*/
// ============================= Interval info
// #[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq, Eq, Hash)]
// pub enum ExecLevel {
// APP = 0,
// API = 1,
// ISR = 2,
// }
#[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq, Eq, Hash)]
pub struct ExecInterval {
pub start_tick: u64,
pub end_tick: u64,
pub start_state: u64,
pub end_state: u64,
pub start_capture: (CaptureEvent, String),
pub end_capture: (CaptureEvent, String),
pub level: u8,
// tick_spend_preempted: u64,
pub abb: Option<AtomicBasicBlock>
}
impl ExecInterval {
pub fn get_exec_time(&self) -> u64 {
self.end_tick-self.start_tick//-self.tick_spend_preempted
}
pub fn is_valid(&self) -> bool {
self.start_tick != 0 || self.end_tick != 0
}
pub fn invaildate(&mut self) {
self.start_tick = 0;
self.end_tick = 0;
}
/// Attach this interval to the later one, keep a record of the time spend preempted
// pub fn try_unite_with_later_interval(&mut self, later_interval : &mut Self) -> bool {
// if self.end_state!=later_interval.start_state || self.abb!=later_interval.abb || !self.is_valid() || !later_interval.is_valid() {
// return false;
// }
// // assert_eq!(self.end_state, later_interval.start_state);
// // assert_eq!(self.abb, later_interval.abb);
// later_interval.tick_spend_preempted += self.tick_spend_preempted + (later_interval.start_tick-self.end_tick);
// later_interval.start_tick = self.start_tick;
// later_interval.start_state = self.start_state;
// self.invaildate();
// return true;
// }
pub fn get_hash_index(&self) -> (u64, u64) {
return (self.start_state, self.abb.as_ref().expect("ABB not set").get_hash())
}
pub fn get_task_name(&self) -> Option<String> {
self.abb.as_ref().map(|x| x.instance_name.clone()).flatten()
}
pub fn get_task_name_unchecked(&self) -> String {
self.get_task_name().unwrap_or_else(|| "unknown".to_string())
}
pub fn is_abb_end(&self) -> bool {
match self.end_capture.0 {
CaptureEvent::APIStart | CaptureEvent::APIEnd | CaptureEvent::ISREnd | CaptureEvent::End => true,
_ => false
}
}
}
// ============================= Atomic Basic Block
/// A single-entry multiple-exit region between api calls. May be used referenced in multiple intervals.
#[derive(Default, Serialize, Deserialize, Clone)]
pub struct AtomicBasicBlock {
start: GuestAddr,
ends: HashSet<GuestAddr>,
level: u8,
instance_id: usize,
instance_name: Option<String>,
}
impl PartialEq for AtomicBasicBlock {
fn eq(&self, other: &Self) -> bool {
self.start == other.start && self.ends == other.ends && self.level == other.level && self.instance_name == other.instance_name
}
}
impl Eq for AtomicBasicBlock {}
impl Hash for AtomicBasicBlock {
fn hash<H: Hasher>(&self, state: &mut H) {
// Use a combination of the start address and the set of ending addresses to compute the hash value
self.start.hash(state);
let mut keys : Vec<_> = self.ends.iter().collect();
keys.sort();
self.level.hash(state);
self.instance_name.hash(state);
keys.hash(state);
}
}
impl fmt::Display for AtomicBasicBlock {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut ends_str = String::new();
for end in &self.ends {
ends_str.push_str(&format!("0x{:#x}, ", end));
}
write!(f, "ABB {} {{ level: {}, start: 0x{:#x}, ends: [{}]}}", &self.instance_name.as_ref().unwrap_or(&"".to_string()), self.level, self.start, ends_str.trim().trim_matches(','))
}
}
impl fmt::Debug for AtomicBasicBlock {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut ends_str = String::new();
for end in &self.ends {
ends_str.push_str(&format!("{:#x}, ", end));
}
write!(f, "ABB {} {{ level: {}, start: 0x{:#x}, ends: [{}]}}", &self.instance_name.as_ref().unwrap_or(&"".to_string()), self.level, self.start, ends_str.trim().trim_matches(','))
}
}
impl PartialOrd for AtomicBasicBlock {
fn partial_cmp(&self, other: &AtomicBasicBlock) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for AtomicBasicBlock {
fn cmp(&self, other: &AtomicBasicBlock) -> std::cmp::Ordering {
if self.start.cmp(&other.start) == std::cmp::Ordering::Equal {
if self.level.cmp(&other.level) != std::cmp::Ordering::Equal {
return self.level.cmp(&other.level);
}
// If the start addresses are equal, compare by 'ends'
let end1 = if self.ends.len() == 1 { *self.ends.iter().next().unwrap() as u64 } else {
let mut temp = self.ends.iter().collect::<Vec<_>>().into_iter().collect::<Vec<&GuestAddr>>();
temp.sort_unstable();
let mut h = DefaultHasher::new();
temp.hash(&mut h);
h.finish()
};
let end2 = if other.ends.len() == 1 { *self.ends.iter().next().unwrap() as u64 } else {
let mut temp = other.ends.iter().collect::<Vec<_>>().into_iter().collect::<Vec<&GuestAddr>>();
temp.sort_unstable();
let mut h = DefaultHasher::new();
temp.hash(&mut h);
h.finish()
};
end1.cmp(&end2)
} else {
// If the start addresses are not equal, compare by 'start'
self.start.cmp(&other.start)
}
}
}
impl AtomicBasicBlock {
pub fn get_hash(&self) -> u64 {
let mut s = DefaultHasher::new();
self.hash(&mut s);
s.finish()
}
pub fn instance_eq(&self, other: &Self) -> bool {
self == other && self.instance_id == other.instance_id
}
pub fn get_start(&self) -> GuestAddr {
self.start
}
}
libafl_bolts::impl_serdeany!(AtomicBasicBlock);
// ============================= Job instances
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub struct RTOSJob {
pub name: String,
pub mem_reads: Vec<(u32, u8)>,
pub release: u64,
pub response: u64,
pub exec_ticks: u64,
pub ticks_per_abb: Vec<u64>,
pub abbs: Vec<AtomicBasicBlock>,
hash_cache: u64
}
impl PartialEq for RTOSJob {
fn eq(&self, other: &Self) -> bool {
self.abbs == other.abbs
}
}
impl Eq for RTOSJob {}
impl Hash for RTOSJob {
fn hash<H: Hasher>(&self, state: &mut H) {
self.abbs.hash(state);
}
}
impl RTOSJob {
pub fn get_hash(&mut self) -> u64 {
if self.hash_cache == 0 {
let mut s = DefaultHasher::new();
self.hash(&mut s);
self.hash_cache = s.finish();
}
self.hash_cache
}
pub fn get_hash_cached(&self) -> u64 {
if self.hash_cache == 0 {
let mut s = DefaultHasher::new();
self.hash(&mut s);
s.finish()
} else {
self.hash_cache
}
}
pub fn response_time(&self) -> u64 {
self.response-self.release
}
}
// ============================= Generalized job instances
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub struct RTOSTask {
pub name: String,
pub woet_bytes: Vec<u8>,
pub woet_ticks: u64,
pub woet_per_abb: Vec<u64>,
pub abbs: Vec<AtomicBasicBlock>,
pub wort_ticks: u64,
hash_cache: u64
}
impl PartialEq for RTOSTask {
fn eq(&self, other: &Self) -> bool {
self.abbs == other.abbs
}
}
impl Eq for RTOSTask {}
impl Hash for RTOSTask {
fn hash<H: Hasher>(&self, state: &mut H) {
self.abbs.hash(state);
}
}
impl RTOSTask {
pub fn get_hash(&mut self) -> u64 {
if self.hash_cache == 0 {
let mut s = DefaultHasher::new();
self.hash(&mut s);
self.hash_cache = s.finish();
}
self.hash_cache
}
pub fn get_hash_cached(&self) -> u64 {
if self.hash_cache == 0 {
let mut s = DefaultHasher::new();
self.hash(&mut s);
s.finish()
} else {
self.hash_cache
}
}
/// Update woet (time, inputs) and wort (time only) if the new instance is better
pub fn try_update(&mut self, other: &RTOSJob) -> bool {
assert_eq!(self.get_hash(), other.get_hash_cached());
let mut ret = false;
if other.exec_ticks > self.woet_ticks {
self.woet_ticks = other.exec_ticks;
self.woet_per_abb = other.ticks_per_abb.clone();
self.woet_bytes = other.mem_reads.iter().sorted_by(|a,b| a.0.cmp(&b.0)).map(|x| x.1).collect();
ret |= true;
}
if other.response_time() > self.wort_ticks {
self.wort_ticks = other.response_time();
ret |= true;
}
ret
}
pub fn from_instance(input: &RTOSJob) -> Self {
let c = input.get_hash_cached();
Self {
name: input.name.clone(),
woet_bytes: input.mem_reads.iter().map(|x| x.1.clone()).collect(),
woet_ticks: input.exec_ticks,
woet_per_abb: input.ticks_per_abb.clone(),
abbs: input.abbs.clone(),
wort_ticks: input.response_time(),
hash_cache: c
}
}
pub fn map_bytes_onto(&self, input: &RTOSJob, offset: Option<u32>) -> Vec<(u32,u8)> {
if input.mem_reads.len() == 0 {return vec![];}
let ret = input.mem_reads.iter().take(self.woet_bytes.len()).enumerate().filter_map(|(idx,(addr,oldbyte))| if self.woet_bytes[idx]!=*oldbyte {Some((*addr-offset.unwrap_or_default(), self.woet_bytes[idx]))} else {None}).collect();
// eprintln!("Mapped: {:?}", ret);
ret
}
}
// ============================= Per testcase metadata

View File

@ -0,0 +1,496 @@
//| The [`MutationalStage`] is the default stage used during fuzzing.
//! For the current input, it will perform a range of random mutations, and then run them in the executor.
use core::marker::PhantomData;
use std::cmp::{max, min};
use hashbrown::HashMap;
use itertools::Itertools;
use libafl_bolts::{rands::{
random_seed, Rand, StdRand
}, Named};
use libafl::{
common::{HasMetadata, HasNamedMetadata}, corpus::Testcase, events::{Event, EventFirer, EventWithStats, LogSeverity}, fuzzer::Evaluator, inputs::{HasMutatorBytes, HasTargetBytes, Input, Keyed, MultipartInput}, mark_feature_time, prelude::{new_hash_feedback, stats::{AggregatorOps, UserStats, UserStatsValue}, CorpusId, MutationResult, Mutator}, stages::{Restartable, Stage}, start_timer, state::{HasCorpus, HasExecutions, HasRand, MaybeHasClientPerfMonitor}, Error
};
use petgraph::{graph::NodeIndex, graph::{self, DiGraph}};
use crate::{time::clock::{IcHist, QEMU_ISNS_PER_USEC}, fuzzer::{DO_NUM_INTERRUPT, FIRST_INT, MAX_NUM_INTERRUPT}, systemstate::{stg::{STGFeedbackState, STGNodeMetadata}, CaptureEvent, ExecInterval}};
use libafl::state::HasCurrentTestcase;
use std::borrow::Cow;
use simple_moving_average::SMA;
use super::{helpers::{input_bytes_to_interrupt_times, interrupt_times_to_input_bytes}, stg::{STGEdge, STGNode}, target_os::TargetSystem, RTOSJob};
// pub static mut MINIMUM_INTER_ARRIVAL_TIME : u32 = 1000 /*us*/ * QEMU_ISNS_PER_USEC;
// one isn per 2**4 ns
// virtual insn/sec 62500000 = 1/16 GHz
// 1ms = 62500 insn
// 1us = 62.5 insn
//======================= Custom mutator
fn is_interrupt_handler<SYS>(graph: &DiGraph<STGNode<SYS>, STGEdge>, node: NodeIndex) -> bool
where
SYS: TargetSystem,
{
graph.edges_directed(node as NodeIndex, petgraph::Direction::Incoming).any(|x| x.weight().event == CaptureEvent::ISRStart)
}
fn has_interrupt_handler_non_systick<SYS>(graph: &DiGraph<STGNode<SYS>, STGEdge>, node: NodeIndex) -> bool
where
SYS: TargetSystem,
{
graph.edges_directed(node as NodeIndex, petgraph::Direction::Outgoing).any(|x| x.weight().event == CaptureEvent::ISRStart && x.weight().name!="xPortSysTickHandler")
}
fn is_candidate_for_new_branches<SYS>(graph: &DiGraph<STGNode<SYS>, STGEdge>, node: NodeIndex) -> bool
where
SYS: TargetSystem,
{
!has_interrupt_handler_non_systick(graph, node) && !is_interrupt_handler(graph, node)
}
// TODO: this can be much more efficient, if the graph stored snapshots of the state and input progress was tracked
/// Determines if a given node in the state transition graph (STG) is a candidate for introducing new branches.
pub fn try_force_new_branches<SYS>(interrupt_ticks : &[u32], fbs: &STGFeedbackState<SYS>, meta: &STGNodeMetadata, config: (usize, u32)) -> Option<Vec<u32>>
where
SYS: TargetSystem,
{
let mut new = false;
let mut new_interrupt_times = Vec::new();
for (num,&interrupt_time) in interrupt_ticks.iter().enumerate() {
let lower_bound = if num==0 {FIRST_INT} else {interrupt_ticks[num-1].saturating_add((config.1 as f32 * QEMU_ISNS_PER_USEC) as u32)};
let next = if interrupt_ticks.len()>num+1 {interrupt_ticks[num+1]} else {u32::MAX};
for exec_interval in meta.intervals().iter().filter(|x| x.start_tick >= lower_bound as u64 && x.start_tick < next as u64) {
if !(exec_interval.start_capture.0==CaptureEvent::ISRStart) { // shortcut to skip interrupt handers without node lookup
let node_index = fbs.state_abb_hash_index.get(&exec_interval.get_hash_index()).unwrap();
if !has_interrupt_handler_non_systick(&fbs.graph, node_index.clone()) {
let new_time = exec_interval.start_tick.saturating_add((exec_interval.end_tick+exec_interval.start_tick)/4);
new_interrupt_times.push(new_time.try_into().expect("ticks > u32"));
if (new_time + config.1 as u64) < next as u64 { // the new interrupt is not too close to the next one
new_interrupt_times.extend(interrupt_ticks.iter().skip(num).cloned());
} else { // the new interrupt is too close to the next one, skip the next one
new_interrupt_times.extend(interrupt_ticks.iter().skip(num+1).cloned());
}
new=true;
break;
}
}
}
if new {break;}
new_interrupt_times.push(interrupt_time);
}
if new {Some(new_interrupt_times)} else {None}
}
/// The default mutational stage
#[derive(Clone, Debug)]
pub struct InterruptShiftStage<E, EM, Z, I, S, SYS> {
#[allow(clippy::type_complexity)]
phantom: PhantomData<(E, EM, Z, I, S, SYS)>,
interrup_config: Vec<(usize,u32)>,
success: simple_moving_average::SingleSumSMA<f32, f32, 50>
}
impl<E, EM, Z, I, S, SYS> InterruptShiftStage<E, EM, Z, I, S, SYS>
{
pub fn new(config : &Vec<(usize,u32)>) -> Self {
Self { phantom: PhantomData, interrup_config: config.clone(), success: simple_moving_average::SingleSumSMA::from_zero(1.0) }
}
}
static mut num_stage_execs : u64 = 0;
static mut sum_reruns : u64 = 0;
static mut sum_interesting_reruns : u64 = 0;
impl<E, EM, Z, I, S, SYS> InterruptShiftStage<E, EM, Z, I, S, SYS>
where
EM: EventFirer<I, S>,
S: HasNamedMetadata + HasExecutions,
SYS: TargetSystem,
{
fn report_stats(&self, state: &mut S, manager: &mut EM) {
unsafe {
let _ = manager.fire(
state,
EventWithStats::with_current_time(
Event::UpdateUserStats {
name: Cow::from("InterruptShiftStage"),
value: UserStats::new(
UserStatsValue::String(Cow::from(format!("{} -> {}/{} {:.1}% ", num_stage_execs, sum_interesting_reruns, sum_reruns, sum_interesting_reruns as f32 * 100.0 / sum_reruns as f32))),
AggregatorOps::None,
),
phantom: PhantomData,
}, *state.executions()
)
);
}
}
}
impl<E, EM, Z, S, I, SYS> Stage<E, EM, S, Z> for InterruptShiftStage<E, EM, Z, MultipartInput<I, String>, S, SYS>
where
S: HasRand + HasMetadata + HasNamedMetadata + HasExecutions + HasCorpus<MultipartInput<I, String>> + HasCurrentTestcase<MultipartInput<I, String>>,
EM: EventFirer<MultipartInput<I, String>, S>,
SYS: TargetSystem,
I: Input + Default + HasMutatorBytes + libafl::inputs::ResizableMutator<u8>,
Z: Evaluator<E, EM, MultipartInput<I, String>, S>,
{
fn perform(
&mut self,
fuzzer: &mut Z,
executor: &mut E,
state: &mut S,
manager: &mut EM
) -> Result<(), Error>
{
if self.interrup_config.len() == 0 {return Ok(());} // configuration implies no interrupts
let mut myrand = StdRand::new();
myrand.set_seed(state.rand_mut().next());
unsafe {num_stage_execs+=1;}
let mut rerun_count = 0; // count how many times we rerun the executor
let mut interesting_rerun_count = 0; // count how many reruns were interesting
// Try many times to find a mutation that is not already in the corpus
let loopbound = max(1, (self.success.get_average()*100.0) as usize);
for _ in 0..loopbound {
// Choose which isr to mutate
let interrup_config = match myrand.choose(&self.interrup_config) {
Some(s) => s,
Option::None => {
self.report_stats(state, manager);
return Ok(())
}
};
let name = format!("isr_{}_times", interrup_config.0);
// manager.log(state, LogSeverity::Info, format!("Mutation {}/{}", loopbound, loopcount))?;
let curr_case : std::cell::Ref<Testcase<MultipartInput<_,_>>> = state.current_testcase()?;
let curr_input = curr_case.input().as_ref().unwrap();
let mut new_input : MultipartInput<_, _> = curr_input.clone();
let new_interrupt_part : &mut I = if Iterator::next(&mut new_input.with_key(&name)).is_some() {
Iterator::next(&mut new_input.with_key_mut(&name)).unwrap()
} else {
new_input.append_part((String::from(&name), I::default())); Iterator::next(&mut new_input.with_key_mut(&name)).unwrap()
}.1;
let old_interrupt_times : Vec<u32> = input_bytes_to_interrupt_times(new_interrupt_part.mutator_bytes(), *interrup_config);
let mut new_interrupt_times = Vec::with_capacity(MAX_NUM_INTERRUPT);
let mut do_rerun = false;
// if state.rand_mut().between(1, 100) <= 50 // only attempt the mutation half of the time
{
#[cfg(feature = "mutate_stg")]
{
let metadata = state.metadata_map();
let maxtick = {metadata.get::<IcHist>().unwrap().1.0};
drop(new_interrupt_part.drain(..).collect::<Vec<u8>>());
{
let choice = myrand.between(1,100);
if choice <= 25 || *old_interrupt_times.get(0).unwrap_or(&u32::MAX) as u64 > maxtick { // 0.5*0.25 = 12.5% of the time fully randomize all interrupts
do_rerun = true;
let hist = metadata.get::<IcHist>().unwrap();
let maxtick : u64 = hist.1.0;
// let maxtick : u64 = (_input.exec_time().expect("No duration found").as_nanos() >> 4).try_into().unwrap();
for _ in 0..myrand.between(0,min(MAX_NUM_INTERRUPT, (maxtick as usize * 3) / (interrup_config.1 as usize * QEMU_ISNS_PER_USEC as usize * 2))) {
new_interrupt_times.push(myrand.between(0, min(maxtick, u32::MAX as u64) as usize).try_into().expect("ticks > u32"));
}
}
else if choice <= 75 { // 0.5 * 0.25 = 12.5% of cases
let feedbackstate = match state
.metadata::<STGFeedbackState<SYS>>() {
Ok(s) => s,
Error => {
panic!("STGfeedbackstate not visible")
}
};
if let Some(meta) = curr_case.metadata_map().get::<STGNodeMetadata>() {
if let Some(t) = try_force_new_branches(&old_interrupt_times, feedbackstate, meta, *interrup_config) {
do_rerun = true;
new_interrupt_times=t;
}
}
}
else { // old version of the alternative search
new_interrupt_times = old_interrupt_times.clone();
let tmp = curr_case.metadata_map().get::<STGNodeMetadata>();
if tmp.is_some() {
let trace = tmp.expect("STGNodeMetadata not found");
// calculate hits and identify snippets
let mut last_m = false;
let mut marks : Vec<(&ExecInterval, usize, usize)>= vec![]; // 1: got interrupted, 2: interrupt handler
for i in 0..trace.intervals().len() {
let curr = &trace.intervals()[i];
let m = old_interrupt_times.iter().any(|x| (curr.start_tick..curr.end_tick).contains(&(*x as u64)));
if m {
marks.push((curr, i, 1));
// println!("1: {}",curr.current_task.0.task_name);
} else if last_m {
marks.push((curr, i, 2));
// println!("2: {}",curr.current_task.0.task_name);
} else {
marks.push((curr, i, 0));
}
last_m = m;
}
for i in 0..old_interrupt_times.len() {
// bounds based on minimum inter-arrival time
let mut lb = FIRST_INT;
let mut ub : u32 = trace.intervals()[trace.intervals().len()-1].end_tick.try_into().expect("ticks > u32");
if i > 0 {
// use the new times, because changes to preceding timings are not accounted for yet
lb = u32::saturating_add(new_interrupt_times[i-1], (interrup_config.1 as f32 * QEMU_ISNS_PER_USEC) as u32);
}
if i < old_interrupt_times.len()-1 {
ub = u32::saturating_sub(new_interrupt_times[i+1], (interrup_config.1 as f32 * QEMU_ISNS_PER_USEC) as u32);
}
// get old hit and handler
let old_hit = marks.iter().filter(
|x| x.0.start_tick < (old_interrupt_times[i] as u64) && (old_interrupt_times[i] as u64) < x.0.end_tick
).next();
let old_handler = match old_hit {
Some(s) => if s.1 < old_interrupt_times.len()-1 && s.1 < marks.len()-1 {
Some(marks[s.1+1])
} else {None},
None => None
};
// find reachable alternatives
let alternatives : Vec<_> = marks.iter().filter(|x|
x.2 != 2 &&
(
x.0.start_tick < (lb as u64) && (lb as u64) < x.0.end_tick
|| x.0.start_tick > (lb as u64) && x.0.start_tick < (ub as u64))
).collect();
// in cases there are no alternatives
if alternatives.len() == 0 {
if old_hit.is_none() {
// choose something random
let untouched : Vec<_> = marks.iter().filter(
|x| x.2 == 0
).collect();
if untouched.len() > 0 {
let tmp = old_interrupt_times[i];
let choice = myrand.choose(untouched).unwrap();
new_interrupt_times[i] = myrand.between(choice.0.start_tick as usize, choice.0.end_tick as usize)
.try_into().expect("tick > u32");
do_rerun = true;
}
// println!("no alternatives, choose random i: {} {} -> {}",i,tmp,interrupt_offsets[i]);
continue;
} else {
// do nothing
// println!("no alternatives, do nothing i: {} {}",i,interrupt_offsets[i]);
continue;
}
}
let replacement = myrand.choose(alternatives).unwrap();
if (old_hit.map_or(false, |x| x == replacement)) {
// use the old value
// println!("chose old value, do nothing i: {} {}",i,interrupt_offsets[i]);
continue;
} else {
let extra = if (old_hit.map_or(false, |x| x.1 < replacement.1)) {
// move futher back, respect old_handler
old_handler.map_or(0, |x| x.0.end_tick - x.0.start_tick)
} else { 0 };
// let tmp = new_interrupt_times[i];
new_interrupt_times[i] = (myrand.between(replacement.0.start_tick as usize,
replacement.0.end_tick as usize) + extra as usize).try_into().expect("ticks > u32");
// println!("chose new alternative, i: {} {} -> {}",i,tmp, interrupt_offsets[i]);
do_rerun = true;
}
}
// println!("Mutator: {:?}", numbers);
// let mut start : u32 = 0;
// for i in 0..numbers.len() {
// let tmp = numbers[i];
// numbers[i] = numbers[i]-start;
// start = tmp;
// }
new_interrupt_part.extend(&interrupt_times_to_input_bytes(&new_interrupt_times));
}
}
}
}
#[cfg(not(feature = "mutate_stg"))]
{
if myrand.between(1,100) <= 25 { // we have no hint if interrupt times will change anything
do_rerun = true;
let metadata = state.metadata_map();
let maxtick = {metadata.get::<IcHist>().unwrap().1.0};
new_interrupt_times = Vec::with_capacity(MAX_NUM_INTERRUPT);
for i in 0..myrand.between(0,min(MAX_NUM_INTERRUPT, (maxtick as usize * 3) / (interrup_config.1 as usize * QEMU_ISNS_PER_USEC as usize * 2))) {
new_interrupt_times.push(myrand.between(0, min(maxtick, u32::MAX as u64) as usize).try_into().expect("ticks > u32"));
}
}
}
new_interrupt_part.extend(&interrupt_times_to_input_bytes(&new_interrupt_times));
}
drop(curr_case);
if do_rerun {
rerun_count+=1;
let (_, corpus_idx) = fuzzer.evaluate_input(state, executor, manager, &new_input)?;
if corpus_idx.is_some() { unsafe{interesting_rerun_count+=1;}} else
if corpus_idx.is_none() && loopbound<=0 { break;}
} else {if loopbound<=0 {break;}}
}
unsafe {
sum_reruns+=rerun_count;
sum_interesting_reruns+=interesting_rerun_count;
if rerun_count>0 {self.success.add_sample(interesting_rerun_count as f32 / rerun_count as f32);}
}
self.report_stats(state, manager);
Ok(())
}
}
impl<E, EM, Z, S, I, SYS> Restartable<S> for InterruptShiftStage<E, EM, Z, MultipartInput<I, String>, S, SYS>
{
fn clear_progress(&mut self, _state: &mut S) -> Result<(), libafl_bolts::Error> {
Ok(())
}
fn should_restart(&mut self, _state: &mut S) -> Result<bool, libafl_bolts::Error> {
Ok(false)
}
}
pub fn try_worst_snippets<SYS>(bytes : &[u8], fbs: &STGFeedbackState<SYS>, meta: &STGNodeMetadata) -> Option<Vec<u8>>
where
SYS: TargetSystem,
{
let mut new = false;
let mut ret = Vec::new();
for (num,interval) in meta.intervals().iter().enumerate() {
todo!();
}
if new {Some(ret)} else {None}
}
static mut num_snippet_stage_execs : u64 = 0;
static mut num_snippet_rerun : u64 = 0;
static mut num_snippet_success : u64 = 0;
/// The default mutational stage
#[derive(Clone, Debug, Default)]
pub struct STGSnippetStage<E, EM, Z, S, I, SYS> {
#[allow(clippy::type_complexity)]
phantom: PhantomData<(E, EM, Z, S, I, SYS)>,
input_addr: u32
}
impl<E, EM, Z, S, I, SYS> STGSnippetStage<E, EM, Z, S, I, SYS>
where
SYS: TargetSystem,
{
pub fn new(input_addr: u32) -> Self {
Self { phantom: PhantomData, input_addr }
}
}
impl<E, EM, Z, S, I, SYS> STGSnippetStage<E, EM, Z, S, MultipartInput<I, String>, SYS>
where
EM: EventFirer<MultipartInput<I, String>, S>,
I: HasMutatorBytes + Default + Clone,
SYS: TargetSystem,
S: HasRand + HasCorpus<MultipartInput<I, String>> + HasCurrentTestcase<MultipartInput<I, String>> + HasMetadata + HasExecutions,
Z: Evaluator<E, EM, MultipartInput<I, String>, S>,
{
fn report_stats(&self, state: &mut S, manager: &mut EM) {
unsafe {
let _ = manager.fire(
state,
EventWithStats::with_current_time(
Event::UpdateUserStats {
name: Cow::from("STGSnippetStage"),
value: UserStats::new(
UserStatsValue::String(Cow::from(format!("{} -> {}/{} {:.1}% ", num_snippet_stage_execs, num_snippet_success, num_snippet_rerun, num_snippet_success as f32 * 100.0 / num_snippet_rerun as f32))),
AggregatorOps::None,
),
phantom: PhantomData,
}, *state.executions()
),
);
}
}
}
impl<E, EM, Z, S, I, SYS> Stage<E, EM, S, Z> for STGSnippetStage<E, EM, Z, S, MultipartInput<I, String>, SYS>
where
EM: EventFirer<MultipartInput<I, String>, S>,
I: HasMutatorBytes + Default + Clone,
SYS: TargetSystem,
S: HasRand + HasCorpus<MultipartInput<I, String>> + HasCurrentTestcase<MultipartInput<I, String>> + HasMetadata + HasExecutions,
Z: Evaluator<E, EM, MultipartInput<I, String>, S>,
{
fn perform(
&mut self,
fuzzer: &mut Z,
executor: &mut E,
state: &mut S,
manager: &mut EM
) -> Result<(), Error> {
let mut myrand = StdRand::new();
myrand.set_seed(state.rand_mut().next());
let mut do_rerun = false;
let current_case = state.current_testcase()?;
let old_input = current_case.input().as_ref().unwrap();
let mut new_input : MultipartInput<I, String> = old_input.clone();
let new_bytes = new_input.with_key_mut(&"bytes".to_string()).next().expect("bytes not found in multipart input").1.mutator_bytes_mut();
// dbg!(current_case.metadata_map());
// eprintln!("Run mutator {}", current_case.metadata_map().get::<STGNodeMetadata>().is_some());
if let Some(meta) = current_case.metadata_map().get::<STGNodeMetadata>() {
let feedbackstate = match state
.metadata::<STGFeedbackState<SYS>>() {
Ok(s) => s,
Error => {
panic!("STGfeedbackstate not visible")
}
};
// Maximize all snippets
// dbg!(meta.jobs().len());
for jobinst in meta.jobs().iter() {
match feedbackstate.worst_task_jobs.get(&jobinst.get_hash_cached()) {
Some(worst) => {
let new = worst.map_bytes_onto(jobinst, Some(self.input_addr));
do_rerun |= new.len() > 0;
for (addr, byte) in new {
if (addr as usize) < new_bytes.len() {
new_bytes[addr as usize] = byte;
}
}
},
Option::None => {}
}
}
}
drop(current_case);
unsafe {num_snippet_stage_execs+=1;}
if do_rerun {
unsafe {num_snippet_rerun+=1;}
let (_, corpus_idx) = fuzzer.evaluate_input(state, executor, manager, &new_input)?;
if corpus_idx.is_some() { unsafe{num_snippet_success+=1};}
}
self.report_stats(state, manager);
Ok(())
}
}
impl<E, EM, Z, S, I, SYS> Restartable<S> for STGSnippetStage<E, EM, Z, S, MultipartInput<I, String>, SYS>
{
fn clear_progress(&mut self, _state: &mut S) -> Result<(), libafl_bolts::Error> {
Ok(())
}
fn should_restart(&mut self, _state: &mut S) -> Result<bool, libafl_bolts::Error> {
Ok(false)
}
}

View File

@ -0,0 +1,207 @@
//! Stage to compute/report AFL stats
use core::{marker::PhantomData, time::Duration};
use libafl_bolts::current_time;
use itertools::Itertools;
use libafl::{
corpus::{Corpus, HasCurrentCorpusId}, events::EventFirer, schedulers::{minimizer::{IsFavoredMetadata, TopRatedsMetadata}, RemovableScheduler}, stages::{Restartable, Stage}, state::{HasCorpus, HasExecutions, HasImported}, Error, HasMetadata, HasNamedMetadata, HasScheduler
};
use libafl::{
events::Event,
};
use std::borrow::Cow;
use serde_json::json;
use libafl::prelude::mutational::MUTATION_STAGE_ITER;
use libafl::prelude::mutational::MUTATION_STAGE_RETRY;
use libafl::prelude::mutational::MUTATION_STAGE_SUCCESS;
use libafl_bolts::HasLen;
use crate::time::clock::{tick_to_time, time_to_tick, IcHist};
/// The [`AflStatsStage`] is a simple stage that computes and reports some stats.
#[derive(Debug, Clone)]
pub struct SchedulerStatsStage<E, EM, S, I, Z> {
last_report_time: Duration,
// the interval that we report all stats
stats_report_interval: Duration,
phantom: PhantomData<(E, EM, S, I, Z)>,
}
impl<E, EM, S, I, Z> Stage<E, EM, S, Z> for SchedulerStatsStage<E, EM, S, I, Z>
where
EM: EventFirer<I, S>,
S: HasMetadata + HasExecutions + HasCorpus<I>,
Z: HasScheduler<I, S>,
<Z as HasScheduler<I, S>>::Scheduler: RemovableScheduler<I, S>,
{
fn perform(
&mut self,
fuzzer: &mut Z,
_executor: &mut E,
state: &mut S,
_manager: &mut EM,
) -> Result<(), Error> {
// let Some(corpus_idx) = state.current_corpus_id()? else {
// return Err(Error::illegal_state(
// "state is not currently processing a corpus index",
// ));
// };
// let corpus_size = state.corpus().count();
let cur = current_time();
if cur.checked_sub(self.last_report_time).unwrap_or_default() > self.stats_report_interval {
let wort = tick_to_time(state.metadata_map().get::<IcHist>().unwrap_or(&IcHist::default()).1.0);
if let Some(meta) = state.metadata_map().get::<TopRatedsMetadata>() {
let kc = meta.map.keys().count();
let mut v : Vec<_> = meta.map.values().cloned().collect();
v.sort_unstable();
v.dedup();
let vc = v.len();
#[cfg(feature = "std")]
{
use libafl::{events::EventWithStats, prelude::stats::{AggregatorOps, UserStats, UserStatsValue}};
let json = json!({
"relevant":vc,
"objects":kc,
});
_manager.fire(
state,
EventWithStats::with_current_time(
Event::UpdateUserStats {
name: Cow::from("Minimizer"),
value: UserStats::new(
UserStatsValue::String(Cow::from(json.to_string())),
AggregatorOps::None,
),
phantom: PhantomData,
}, *state.executions()
),
)?;
}
#[cfg(not(feature = "std"))]
log::info!(
"pending: {}, pend_favored: {}, own_finds: {}, imported: {}",
pending_size,
pend_favored_size,
self.own_finds_size,
self.imported_size
);
self.last_report_time = cur;
// Experimental pruning
#[cfg(any(feature = "sched_stg",feature = "sched_afl"))]
{
const MULTI: usize = 10;
const PRUNE_THRESHOLD: usize = 20;
const PRUNE_MAX_KEEP: usize = 1000;
const PRUNE_MIN_KEEP: usize = 100;
let cc = state.corpus().count();
let to_keep = usize::max(vc*MULTI, PRUNE_MIN_KEEP);
let activate = cc > PRUNE_MAX_KEEP || cc > usize::max(vc*PRUNE_THRESHOLD, PRUNE_MIN_KEEP*2);
let mut wort_preserved = false;
if activate {
println!("Pruning corpus, keeping {} / {}", to_keep, cc);
let corpus = state.corpus_mut();
let currid = corpus.current();
let ids : Vec<_> = corpus.ids().filter_map(|x| {
let tc = corpus.get(x).unwrap().borrow();
let md = tc.metadata_map();
if !wort_preserved && tc.exec_time() == &Some(wort) && wort>Duration::ZERO {
wort_preserved = true; // Keep the worst observed under all circumstances
Some((x, tc.exec_time().clone()))
} else {
if vc < PRUNE_MAX_KEEP && (md.get::<IsFavoredMetadata>().is_some() || &Some(x) == currid || v.contains(&&x)) {
None
} else {
Some((x, tc.exec_time().clone()))
}
}
}).sorted_by_key(|x| x.1).take(usize::saturating_sub(corpus.count(),to_keep)).sorted_by_key(|x| x.0).unique().rev().collect();
for (cid, _) in ids {
let c = state.corpus_mut().remove(cid).unwrap();
fuzzer
.scheduler_mut()
.on_remove(state, cid, &Some(c))?;
}
}
}
#[cfg(feature = "std")]
unsafe {
use libafl::{events::EventWithStats, prelude::stats::{AggregatorOps, UserStats, UserStatsValue}};
let _ = _manager.fire(
state,
EventWithStats::with_current_time(
Event::UpdateUserStats {
name: Cow::from("StdMutationalStage"),
value: UserStats::new(
UserStatsValue::String(Cow::from(format!("{} -> {}/{} {:.1}% ", MUTATION_STAGE_ITER, MUTATION_STAGE_SUCCESS, MUTATION_STAGE_RETRY, MUTATION_STAGE_SUCCESS as f32 * 100.0 / MUTATION_STAGE_RETRY as f32))),
AggregatorOps::None,
),
phantom: PhantomData,
}, *state.executions()
)
);
}
}
}
Ok(())
}
// #[inline]
// fn should_restart(&mut self, _state: &mut <Self as UsesState>::State) -> Result<bool, Error> {
// // Not running the target so we wont't crash/timeout and, hence, don't need to restore anything
// Ok(true)
// }
// #[inline]
// fn clear_progress(&mut self, _state: &mut <Self as UsesState>::State) -> Result<(), Error> {
// // Not running the target so we wont't crash/timeout and, hence, don't need to restore anything
// Ok(())
// }
}
impl<E, EM, S, I, Z> SchedulerStatsStage<E, EM, S, I, Z> {
/// create a new instance of the [`AflStatsStage`]
#[must_use]
pub fn new(interval: Duration) -> Self {
Self {
stats_report_interval: interval,
..Default::default()
}
}
}
impl<E, EM, S, I, Z> Default for SchedulerStatsStage<E, EM, S, I, Z> {
/// the default instance of the [`AflStatsStage`]
#[must_use]
fn default() -> Self {
Self {
last_report_time: current_time(),
stats_report_interval: Duration::from_secs(3),
phantom: PhantomData,
}
}
}
impl<E, EM, S, I, Z> Restartable<S> for SchedulerStatsStage<E, EM, S, I, Z> {
fn clear_progress(&mut self, _state: &mut S) -> Result<(), libafl_bolts::Error> {
Ok(())
}
fn should_restart(&mut self, _state: &mut S) -> Result<bool, libafl_bolts::Error> {
Ok(false)
}
}

View File

@ -0,0 +1,311 @@
//! The Minimizer schedulers are a family of corpus schedulers that feed the fuzzer
//! with testcases only from a subset of the total corpus.
use core::marker::PhantomData;
use std::{cmp::{max, min}, mem::swap};
use serde::{Deserialize, Serialize};
use libafl_bolts::{rands::Rand, AsIter, HasLen};
use libafl::{
common::HasMetadata, corpus::{Corpus, Testcase}, inputs::Input, prelude::{CanTrack, CorpusId, RemovableScheduler}, schedulers::{minimizer::DEFAULT_SKIP_NON_FAVORED_PROB, Scheduler, TestcaseScore }, state::{HasCorpus, HasRand}, Error, SerdeAny
};
use crate::time::worst::MaxTimeFavFactor;
use super::{stg::STGNodeMetadata, target_os::*};
/// A state metadata holding a map of favoreds testcases for each map entry
#[derive(Debug, Serialize, Deserialize, SerdeAny, Default)]
pub struct LongestTracesMetadata {
/// map index -> corpus index
pub max_trace_length: usize,
}
impl LongestTracesMetadata {
fn new(l : usize) -> Self {
Self {max_trace_length: l}
}
}
/// The [`MinimizerScheduler`] employs a genetic algorithm to compute a subset of the
/// corpus that exercise all the requested features (e.g. all the coverage seen so far)
/// prioritizing [`Testcase`]`s` using [`TestcaseScore`]
#[derive(Debug, Clone)]
pub struct LongestTraceScheduler<CS, SYS> {
base: CS,
skip_non_favored_prob: f64,
phantom: PhantomData<SYS>,
}
impl<CS, I, SYS> Scheduler<I, CS> for LongestTraceScheduler<CS, SYS>
where
CS: Scheduler<I, CS> + HasCorpus<I> + HasMetadata + HasRand,
SYS: TargetSystem,
{
/// Add an entry to the corpus and return its index
fn on_add(&mut self, state: &mut CS, idx: CorpusId) -> Result<(), Error> {
let l = state.corpus()
.get(idx)?
.borrow()
.metadata_map()
.get::<SYS::TraceData>().map_or(0, |x| x.trace_length());
self.get_update_trace_length(state,l);
self.base.on_add(state, idx)
}
/// Replaces the testcase at the given idx
// fn on_replace(
// &mut self,
// state: &mut CS::State,
// idx: CorpusId,
// testcase: &Testcase<<CS::State as UsesInput>::Input>,
// ) -> Result<(), Error> {
// let l = state.corpus()
// .get(idx)?
// .borrow()
// .metadata()
// .get::<FreeRTOSSystemStateMetadata>().map_or(0, |x| x.trace_length);
// self.get_update_trace_length(state, l);
// self.base.on_replace(state, idx, testcase)
// }
/// Removes an entry from the corpus, returning M if M was present.
// fn on_remove(
// &self,
// state: &mut CS::State,
// idx: usize,
// testcase: &Option<Testcase<<CS::State as UsesInput>::Input>>,
// ) -> Result<(), Error> {
// self.base.on_remove(state, idx, testcase)?;
// Ok(())
// }
/// Gets the next entry
fn next(&mut self, state: &mut CS) -> Result<CorpusId, Error> {
let mut idx = self.base.next(state)?;
while {
let l = state.corpus()
.get(idx)?
.borrow()
.metadata_map()
.get::<STGNodeMetadata>().map_or(0, |x| x.nodes().len());
let m = self.get_update_trace_length(state,l);
state.rand_mut().below(std::num::NonZero::new(m as usize+1).unwrap()) > l
} && state.rand_mut().coinflip(self.skip_non_favored_prob)
{
idx = self.base.next(state)?;
}
Ok(idx)
}
fn set_current_scheduled(
&mut self,
state: &mut CS,
next_id: Option<libafl::corpus::CorpusId>,
) -> Result<(), Error> {
self.base.set_current_scheduled(state, next_id)
}
}
impl<CS, SYS> LongestTraceScheduler<CS, SYS>
where
CS: HasMetadata + HasRand,
SYS: TargetSystem,
{
pub fn get_update_trace_length(&self, state: &mut CS, par: usize) -> u64 {
// Create a new top rated meta if not existing
if let Some(td) = state.metadata_map_mut().get_mut::<LongestTracesMetadata>() {
let m = max(td.max_trace_length, par);
td.max_trace_length = m;
m as u64
} else {
state.add_metadata(LongestTracesMetadata::new(par));
par as u64
}
}
#[allow(unused)]
pub fn new(base: CS) -> Self {
Self {
base,
skip_non_favored_prob: DEFAULT_SKIP_NON_FAVORED_PROB,
phantom: PhantomData,
}
}
}
//==========================================================================================
/// A state metadata holding a map of favoreds testcases for each map entry
#[derive(Debug, Serialize, Deserialize, SerdeAny, Default)]
pub struct GeneticMetadata {
pub current_gen: Vec<(usize, f64)>,
pub current_cursor: usize,
pub next_gen: Vec<(usize, f64)>,
pub gen: usize
}
impl GeneticMetadata {
fn new(current_gen: Vec<(usize, f64)>, next_gen: Vec<(usize, f64)>) -> Self {
Self {current_gen, current_cursor: 0, next_gen, gen: 0}
}
}
#[derive(Debug, Clone)]
pub struct GenerationScheduler<S> {
phantom: PhantomData<S>,
gen_size: usize,
}
impl<S> GenerationScheduler<S>
{
#[allow(unused)]
pub fn new() -> Self {
let gen_size = 100;
#[cfg(feature = "gensize_1")]
let gen_size= 1;
#[cfg(feature = "gensize_10")]
let gen_size= 10;
#[cfg(feature = "gensize_100")]
let gen_size= 100;
#[cfg(feature = "gensize_1000")]
let gen_size= 1000;
Self {
phantom: PhantomData,
gen_size
}
}
}
impl<I, S> Scheduler<I, S> for GenerationScheduler<S>
where
S: HasCorpus<I> + HasMetadata,
I: Clone,
{
/// get first element in current gen,
/// if current_gen is empty, swap lists, sort by FavFactor, take top k and return first
fn next(&mut self, state: &mut S) -> Result<CorpusId, Error> {
let mut to_remove : Vec<(usize, f64)> = vec![];
let mut _to_return : usize = 0;
let corpus_len = state.corpus().count();
let mut _current_len = 0;
let gm = state.metadata_map_mut().get_mut::<GeneticMetadata>().expect("Corpus Scheduler empty");
// println!("index: {} curr: {:?} next: {:?} gen: {} corp: {}", gm.current_cursor, gm.current_gen.len(), gm.next_gen.len(), gm.gen,
// c);
match gm.current_gen.get(gm.current_cursor) {
Some(c) => {
_current_len = gm.current_gen.len();
gm.current_cursor+=1;
// println!("normal next: {}", (*c).0);
return Ok((*c).0.into())
},
Option::None => {
swap(&mut to_remove, &mut gm.current_gen);
swap(&mut gm.next_gen, &mut gm.current_gen);
gm.current_gen.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
// gm.current_gen.reverse();
if gm.current_gen.len() == 0 {panic!("Corpus is empty");}
let d : Vec<(usize, f64)> = gm.current_gen.drain(min(gm.current_gen.len(), self.gen_size)..).collect();
to_remove.extend(d);
// move all indices to the left, since all other indices will be deleted
gm.current_gen.sort_by(|a,b| a.0.cmp(&(*b).0)); // in order of the corpus index
// for i in 0..gm.current_gen.len() {
// gm.current_gen[i] = (i, gm.current_gen[i].1);
// }
_to_return = gm.current_gen.get(0).unwrap().0;
// assert_eq!(to_return, 0);
gm.current_cursor=1;
gm.gen+=1;
_current_len = gm.current_gen.len();
}
};
// removing these elements will move all indices left by to_remove.len()
// to_remove.sort_by(|x,y| x.0.cmp(&(*y).0));
// to_remove.reverse();
let cm = state.corpus_mut();
assert_eq!(corpus_len-to_remove.len(), _current_len);
assert_ne!(_current_len,0);
for i in to_remove {
cm.remove(i.0.into()).unwrap();
}
assert_eq!(cm.get(_to_return.into()).is_ok(),true);
// println!("switch next: {to_return}");
return Ok(_to_return.into());
}
/// Add the new input to the next generation
fn on_add(
&mut self,
state: &mut S,
idx: CorpusId
) -> Result<(), Error> {
// println!("On Add {idx}");
let mut tc = state.corpus_mut().get(idx).expect("Newly added testcase not found by index").borrow_mut().clone();
let ff = MaxTimeFavFactor::compute(state, &mut tc).unwrap();
if let Some(gm) = state.metadata_map_mut().get_mut::<GeneticMetadata>() {
gm.next_gen.push((idx.into(),ff));
} else {
state.add_metadata(GeneticMetadata::new(vec![], vec![(idx.into(),ff)]));
}
Ok(())
}
fn set_current_scheduled(
&mut self,
state: &mut S,
next_id: Option<libafl::corpus::CorpusId>,
) -> Result<(), Error> {
Ok(())
}
// fn on_replace(
// &self,
// _state: &mut Self::State,
// _idx: usize,
// _prev: &Testcase<<Self::State as UsesInput>::Input>
// ) -> Result<(), Error> {
// // println!("On Replace {_idx}");
// Ok(())
// }
// fn on_remove(
// &self,
// state: &mut Self::State,
// idx: usize,
// _testcase: &Option<Testcase<<Self::State as UsesInput>::Input>>
// ) -> Result<(), Error> {
// // println!("On Remove {idx}");
// if let Some(gm) = state.metadata_mut().get_mut::<GeneticMetadata>() {
// gm.next_gen = gm.next_gen.drain(..).into_iter().filter(|x| (*x).0 != idx).collect::<Vec<(usize, f64)>>();
// gm.current_gen = gm.current_gen.drain(..).into_iter().filter(|x| (*x).0 != idx).collect::<Vec<(usize, f64)>>();
// } else {
// state.add_metadata(GeneticMetadata::new(vec![], vec![]));
// }
// Ok(())
// }
}
impl<I,S> RemovableScheduler<I,S> for GenerationScheduler<S>
where
S: HasCorpus<I> + HasMetadata,
{
/// Replaces the testcase at the given idx
fn on_replace(
&mut self,
state: &mut S,
idx: CorpusId,
testcase: &Testcase<I>,
) -> Result<(), Error> {
Ok(())
}
/// Removes an entry from the corpus
fn on_remove(
&mut self,
state: &mut S,
idx: CorpusId,
testcase: &Option<Testcase<I>>,
) -> Result<(), Error> {
Ok(())
}
}

View File

@ -0,0 +1,794 @@
use hashbrown::HashSet;
use libafl::inputs::Input;
/// Feedbacks organizing SystemStates as a graph
use libafl_bolts::prelude::SerdeAny;
use libafl_bolts::ownedref::OwnedMutSlice;
use log::Metadata;
use petgraph::graph::EdgeIndex;
use libafl::common::HasNamedMetadata;
use libafl::schedulers::MinimizerScheduler;
use libafl_bolts::HasRefCnt;
use serde::de::DeserializeOwned;
use std::path::PathBuf;
use libafl::corpus::Testcase;
use std::collections::hash_map::DefaultHasher;
use std::hash::Hasher;
use std::hash::Hash;
use libafl::events::EventFirer;
use libafl::state::MaybeHasClientPerfMonitor;
use libafl::feedbacks::Feedback;
use libafl_bolts::Named;
use libafl::Error;
use hashbrown::HashMap;
use libafl::{executors::ExitKind, observers::ObserversTuple, common::HasMetadata};
use serde::{Deserialize, Serialize};
use std::marker::PhantomData;
use super::helpers::get_generic_hash;
use super::helpers::metadata_insert_or_update_get;
use super::target_os::SystemState;
use super::AtomicBasicBlock;
use super::CaptureEvent;
use super::ExecInterval;
use super::RTOSJob;
use super::RTOSTask;
use petgraph::prelude::DiGraph;
use petgraph::graph::NodeIndex;
use petgraph::Direction;
use crate::time::clock::QemuClockObserver;
use crate::time::clock::FUZZ_START_TIMESTAMP;
use crate::time::worst::MaxTimeFavFactor;
use std::time::SystemTime;
use std::{fs::OpenOptions, io::Write};
use std::borrow::Cow;
use std::ops::Deref;
use std::ops::DerefMut;
use std::rc::Rc;
use petgraph::visit::EdgeRef;
use crate::systemstate::target_os::*;
use libafl::prelude::StateInitializer;
//============================= Data Structures
#[derive(Serialize, Deserialize, Clone, Debug, Default, Hash)]
#[serde(bound = "SYS: Serialize, for<'de2> SYS: Deserialize<'de2>")]
pub struct STGNode<SYS>
where
SYS: TargetSystem,
for<'de2> SYS: Deserialize<'de2>,
{
//base: SYS::State,
state: u64,
abb: AtomicBasicBlock,
_phantom: PhantomData<SYS>
}
impl<SYS> STGNode<SYS>
where SYS: TargetSystem {
pub fn _pretty_print(&self, map: &HashMap<u64, SYS::State>) -> String {
format!("{}\nl{} {:x}-{:x}\n{}", map[&self.state].current_task().task_name(), self.abb.level, self.abb.start, self.abb.ends.iter().next().unwrap_or_else(||&0xFFFF), map[&self.state].print_lists())
}
pub fn color_print(&self, map: &HashMap<u64, SYS::State>) -> String {
let color = match self.abb.level {
1 => "\", shape=box, style=filled, fillcolor=\"lightblue",
2 => "\", shape=box, style=filled, fillcolor=\"yellow",
0 => "\", shape=box, style=filled, fillcolor=\"white",
_ => "\", style=filled, fillcolor=\"lightgray",
};
let message = match self.abb.level {
1 => format!("API Call"),
2 => format!("ISR"),
0 => format!("Task: {}",map[&self.state].current_task().task_name()),
_ => format!(""),
};
let mut label = format!("{}\nABB: {:x}-{:x}\nHash:{:X}\n{}", message, self.abb.start, self.abb.ends.iter().next().unwrap_or_else(||&0xFFFF), self.state>>48, map[&self.state].print_lists());
label.push_str(color);
label
}
fn get_hash(&self) -> u64 {
let mut s = DefaultHasher::new();
self.state.hash(&mut s);
self.abb.hash(&mut s);
s.finish()
}
}
impl<SYS> PartialEq for STGNode<SYS>
where
SYS: TargetSystem,
{
fn eq(&self, other: &STGNode<SYS>) -> bool {
self.state==other.state
}
}
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq)]
pub struct STGEdge
{
pub event: CaptureEvent,
pub name: String,
pub worst: Option<(u64, Vec<(u32, u8)>)>,
}
impl STGEdge {
pub fn _pretty_print(&self) -> String {
let mut short = match self.event {
CaptureEvent::APIStart => "Call: ",
CaptureEvent::APIEnd => "Ret: ",
CaptureEvent::ISRStart => "Int: ",
CaptureEvent::ISREnd => "IRet: ",
CaptureEvent::End => "End: ",
CaptureEvent::Undefined => "",
}.to_string();
short.push_str(&self.name);
short
}
pub fn color_print(&self) -> String {
let mut short = self.name.clone();
short.push_str(match self.event {
CaptureEvent::APIStart => "\", color=\"blue",
CaptureEvent::APIEnd => "\", color=\"black",
CaptureEvent::ISRStart => "\", color=red, style=\"dashed",
CaptureEvent::ISREnd => "\", color=red, style=\"solid",
CaptureEvent::End => "",
CaptureEvent::Undefined => "",
});
short
}
pub fn is_abb_end(&self) -> bool {
match self.event {
CaptureEvent::APIStart | CaptureEvent::APIEnd | CaptureEvent::ISREnd | CaptureEvent::End => true,
_ => false
}
}
}
impl Hash for STGEdge {
fn hash<H: Hasher>(&self, state: &mut H) {
self.event.hash(state);
self.name.hash(state);
}
}
/// Shared Metadata for a systemstateFeedback
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(bound = "SYS: Serialize, for<'de2> SYS: Deserialize<'de2>")]
pub struct STGFeedbackState<SYS>
where
SYS: TargetSystem,
for<'de2> SYS: Deserialize<'de2>,
{
name: Cow<'static, str>,
// aggregated traces as a graph
pub graph: DiGraph<STGNode<SYS>, STGEdge>,
pub systemstate_index: HashMap<u64, SYS::State>,
pub state_abb_hash_index: HashMap<(u64, u64), NodeIndex>,
stgnode_index: HashMap<u64, NodeIndex>,
entrypoint: NodeIndex,
exitpoint: NodeIndex,
// Metadata about aggregated traces. aggegated meaning, order has been removed
wort: u64,
wort_per_aggegated_path: HashMap<Vec<AtomicBasicBlock>,u64>,
wort_per_abb_path: HashMap<u64,u64>,
wort_per_stg_path: HashMap<u64,u64>,
worst_abb_exec_count: HashMap<AtomicBasicBlock, usize>,
// Metadata about job instances
pub worst_task_jobs: HashMap<u64, RTOSTask>,
}
libafl_bolts::impl_serdeany!(STGFeedbackState<SYS: SerdeAny+TargetSystem>);
impl<SYS> Default for STGFeedbackState<SYS>
where
SYS: TargetSystem,
for<'de2> SYS: Deserialize<'de2>,
{
fn default() -> STGFeedbackState<SYS> {
let mut graph = DiGraph::new();
let mut entry_state = SYS::State::default();
let mut exit_state = SYS::State::default();
*(entry_state.current_task_mut().task_name_mut())="Start".to_string();
*(exit_state.current_task_mut().task_name_mut())="End".to_string();
let mut entry : STGNode<SYS> = STGNode::default();
let mut exit : STGNode<SYS> = STGNode::default();
entry.state=compute_hash(&entry_state);
exit.state=compute_hash(&exit_state);
let systemstate_index = HashMap::from([(entry.state, entry_state), (exit.state, exit_state)]);
let h_entry = entry.get_hash();
let h_exit = exit.get_hash();
let entrypoint = graph.add_node(entry.clone());
let exitpoint = graph.add_node(exit.clone());
let state_abb_hash_index = HashMap::from([((entry.state, entry.abb.get_hash()), entrypoint), ((exit.state, exit.abb.get_hash()), exitpoint)]);
let index = HashMap::from([(h_entry, entrypoint), (h_exit, exitpoint)]);
STGFeedbackState {
name: Cow::from("stgfeedbackstate".to_string()),
graph,
stgnode_index: index,
entrypoint,
exitpoint,
wort: 0,
wort_per_aggegated_path: HashMap::new(),
wort_per_abb_path: HashMap::new(),
wort_per_stg_path: HashMap::new(),
worst_abb_exec_count: HashMap::new(),
systemstate_index,
state_abb_hash_index,
worst_task_jobs: HashMap::new(),
}
}
}
impl<SYS> Named for STGFeedbackState<SYS>
where
SYS: TargetSystem,
{
#[inline]
fn name(&self) -> &Cow<'static, str> {
&self.name
}
}
// Wrapper around Vec<RefinedFreeRTOSSystemState> to attach as Metadata
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub struct STGNodeMetadata {
nodes: Vec<NodeIndex>,
edges: Vec<EdgeIndex>,
abbs: u64,
aggregate: u64,
top_abb_counts: Vec<u64>,
intervals: Vec<ExecInterval>,
jobs: Vec<RTOSJob>,
indices: Vec<usize>,
tcref: isize,
}
impl STGNodeMetadata {
pub fn new(nodes: Vec<NodeIndex>, edges: Vec<EdgeIndex>, abb_trace: Vec<AtomicBasicBlock>, abbs_pathhash: u64, aggregate: u64, top_abb_counts: Vec<u64>, intervals: Vec<ExecInterval>, jobs: Vec<RTOSJob>) -> Self {
#[allow(unused)]
let mut indices : Vec<_> = vec![];
#[cfg(feature = "sched_stg_edge")]
{
indices = edges.iter().map(|x| x.index()).collect();
indices.sort_unstable();
indices.dedup();
}
#[cfg(feature = "sched_stg_pathhash")]
{
indices.push(get_generic_hash(&edges) as usize);
}
#[cfg(feature = "sched_stg_abbhash")]
{
indices.push(abbs_pathhash as usize);
}
#[cfg(feature = "sched_stg_aggregatehash")]
{
// indices.push(aggregate as usize);
indices = top_abb_counts.iter().map(|x| (*x) as usize).collect();
}
Self {indices, intervals, jobs, nodes, abbs: abbs_pathhash, aggregate, top_abb_counts, edges, tcref: 0}
}
pub fn nodes(&self) -> &Vec<NodeIndex> {
&self.nodes
}
pub fn edges(&self) -> &Vec<EdgeIndex> {
&self.edges
}
pub fn abbs(&self) -> u64 {
self.abbs
}
pub fn aggregate(&self) -> u64 {
self.aggregate
}
pub fn top_abb_counts(&self) -> &Vec<u64> {
&self.top_abb_counts
}
pub fn intervals(&self) -> &Vec<ExecInterval> {
&self.intervals
}
pub fn jobs(&self) -> &Vec<RTOSJob> {
&self.jobs
}
}
impl Deref for STGNodeMetadata {
type Target = [usize];
/// Convert to a slice
fn deref(&self) -> &[usize] {
&self.indices
}
}
impl DerefMut for STGNodeMetadata {
/// Convert to a slice
fn deref_mut(&mut self) -> &mut [usize] {
&mut self.indices
}
}
impl HasRefCnt for STGNodeMetadata {
fn refcnt(&self) -> isize {
self.tcref
}
fn refcnt_mut(&mut self) -> &mut isize {
&mut self.tcref
}
}
libafl_bolts::impl_serdeany!(STGNodeMetadata);
pub type GraphMaximizerCorpusScheduler<S, I, O> =
MinimizerScheduler<S, MaxTimeFavFactor, I, STGNodeMetadata,O>;
// AI generated, human verified
/// Count the occurrences of each element in a vector, assumes the vector is sorted
fn count_occurrences_sorted<T>(vec: &Vec<T>) -> HashMap<&T, usize>
where
T: PartialEq + Eq + Hash + Clone,
{
let mut counts = HashMap::new();
if vec.is_empty() {
return counts;
}
let mut current_obj = &vec[0];
let mut current_count = 1;
for obj in vec.iter().skip(1) {
if obj == current_obj {
current_count += 1;
} else {
counts.insert(current_obj, current_count);
current_obj = obj;
current_count = 1;
}
}
// Insert the count of the last object
counts.insert(current_obj, current_count);
counts
}
//============================= Graph Feedback
pub const STG_MAP_SIZE: usize = 1<<28; // 512MB
pub static mut STG_MAP: [u16; STG_MAP_SIZE] = [0; STG_MAP_SIZE];
pub static mut MAX_STG_NUM: usize = 0;
pub unsafe fn stg_map_mut_slice<'a>() -> OwnedMutSlice<'a, u16> {
OwnedMutSlice::from_raw_parts_mut(STG_MAP.as_mut_ptr(), STG_MAP.len())
}
/// A Feedback reporting novel System-State Transitions. Depends on [`QemuSystemStateObserver`]
#[derive(Serialize, Deserialize, Clone, Debug, Default)]
#[serde(bound = "SYS: Serialize, for<'de2> SYS: Deserialize<'de2>")]
pub struct StgFeedback<SYS>
where
SYS: TargetSystem,
for<'de2> SYS: Deserialize<'de2>,
{
name: Cow<'static, str>,
last_node_trace: Option<Vec<NodeIndex>>,
last_edge_trace: Option<Vec<EdgeIndex>>,
last_intervals: Option<Vec<ExecInterval>>,
last_abb_trace: Option<Vec<AtomicBasicBlock>>,
last_abbs_hash: Option<u64>, // only set, if it was interesting
last_aggregate_hash: Option<u64>, // only set, if it was interesting
last_top_abb_hashes: Option<Vec<u64>>, // only set, if it was interesting
last_job_trace: Option<Vec<RTOSJob>>, // only set, if it was interesting
dump_path: Option<PathBuf>,
select_task: Option<String>,
_phantom_data: PhantomData<SYS>,
}
#[cfg(feature = "feed_stg")]
const INTEREST_EDGE : bool = true;
#[cfg(feature = "feed_stg_abb_woet")]
const INTEREST_EDGE_WEIGHT : bool = true;
#[cfg(feature = "feed_stg")]
const INTEREST_NODE : bool = true;
#[cfg(feature = "feed_stg_pathhash")]
const INTEREST_PATH : bool = true;
#[cfg(feature = "feed_stg_abbhash")]
const INTEREST_ABBPATH : bool = true;
#[cfg(feature = "feed_stg_aggregatehash")]
const INTEREST_AGGREGATE : bool = true;
#[cfg(feature = "feed_job_wort")]
pub const INTEREST_JOB_RT : bool = true;
#[cfg(feature = "feed_job_woet")]
pub const INTEREST_JOB_ET : bool = true;
#[cfg(not(feature = "feed_stg"))]
const INTEREST_EDGE : bool = false;
#[cfg(not(feature = "feed_stg_abb_woet"))]
const INTEREST_EDGE_WEIGHT : bool = true;
#[cfg(not(feature = "feed_stg"))]
const INTEREST_NODE : bool = false;
#[cfg(not(feature = "feed_stg_pathhash"))]
const INTEREST_PATH : bool = false;
#[cfg(not(feature = "feed_stg_abbhash"))]
const INTEREST_ABBPATH : bool = false;
#[cfg(not(feature = "feed_stg_aggregatehash"))]
const INTEREST_AGGREGATE : bool = false;
#[cfg(not(feature = "feed_job_wort"))]
pub const INTEREST_JOB_RT : bool = false;
#[cfg(not(feature = "feed_job_woet"))]
pub const INTEREST_JOB_ET : bool = false;
fn set_observer_map(trace : &Vec<EdgeIndex>) {
// dbg!(trace);
unsafe {
for i in 0..MAX_STG_NUM {
STG_MAP[i] = 0;
}
for i in trace {
if MAX_STG_NUM < i.index() {
MAX_STG_NUM = i.index();
}
if i.index() < STG_MAP.len() {
STG_MAP[i.index()] = STG_MAP[i.index()].saturating_add(1);
} else {
eprintln!("STG Map index out of bounds: {}", i.index());
}
}
}
}
/// Takes: trace of intervals
/// Returns: hashmap of abb instance id to (execution time, memory accesses)
fn execinterval_to_abb_instances(trace: &Vec<ExecInterval>, read_trace: &Vec<Vec<(u32, u8)>>) -> HashMap<usize, (u64, Vec<(u32, u8)>)>{
let mut instance_time: HashMap<usize, (u64, Vec<(u32, u8)>)> = HashMap::new();
for (_i,interval) in trace.iter().enumerate() { // Iterate intervals
// sum up execution time and accesses per ABB
let temp = interval.abb.as_ref().map(|abb| abb.instance_id).unwrap_or(usize::MAX);
match instance_time.get_mut(&temp) {
Some(x) => {
x.0 += interval.get_exec_time();
x.1.extend(read_trace[_i].clone());
},
None => {
if temp != usize::MAX {
instance_time.insert(temp, (interval.get_exec_time(), read_trace[_i].clone()));
}
}
};
}
return instance_time;
}
impl<SYS> StgFeedback<SYS>
where
SYS: TargetSystem,
{
pub fn new(select_task: Option<String>, dump_name: Option<PathBuf>) -> Self {
// Self {name: String::from("STGFeedback"), last_node_trace: None, last_edge_trace: None, last_intervals: None }
let mut s = Self::default();
unsafe{libafl_bolts::prelude::RegistryBuilder::register::<STGFeedbackState<SYS>>()};
s.dump_path = dump_name.map(|x| x.with_extension("stgsize"));
s.select_task = select_task;
s
}
/// params:
/// tarce of intervals
/// hashtable of states
/// feedbackstate
/// produces:
/// tarce of node indexes representing the path trough the graph
/// newly discovered node?
/// side effect:
/// the graph gets new nodes and edge
fn update_stg_interval(trace: &Vec<ExecInterval>, read_trace: &Vec<Vec<(u32, u8)>>, table: &HashMap<u64, SYS::State>, fbs: &mut STGFeedbackState<SYS>) -> (Vec<(NodeIndex, u64)>, Vec<(EdgeIndex, u64)>, bool, bool) {
let mut return_node_trace = vec![(fbs.entrypoint, 0)]; // Assuming entrypoint timestamp is 0
let mut return_edge_trace = vec![];
let mut interesting = false;
let mut updated = false;
if trace.is_empty() {
return (return_node_trace, return_edge_trace, interesting, updated);
}
let mut instance_time = execinterval_to_abb_instances(trace, read_trace);
// add all missing state+abb combinations to the graph
for (_i,interval) in trace.iter().enumerate() { // Iterate intervals
let start_s = table[&interval.start_state].clone();
let start_h = compute_hash(&start_s);
fbs.systemstate_index.insert(start_h, start_s);
let node : STGNode<SYS> = STGNode {state: start_h, abb: interval.abb.as_ref().unwrap().clone(), _phantom: PhantomData};
let h_node = node.get_hash();
let next_idx = if let Some(idx) = fbs.stgnode_index.get(&h_node) {
// already present
*idx
} else {
// not present
let h = (start_h, node.abb.get_hash());
let idx = fbs.graph.add_node(node);
fbs.stgnode_index.insert(h_node, idx);
fbs.state_abb_hash_index.insert(h, idx);
interesting |= INTEREST_NODE;
updated = true;
idx
};
// connect in graph if edge not present
let e = fbs.graph.edges_directed(return_node_trace[return_node_trace.len()-1].0, Direction::Outgoing).find(|x| petgraph::visit::EdgeRef::target(x) == next_idx);
if let Some(e_) = e {
return_edge_trace.push((petgraph::visit::EdgeRef::id(&e_), interval.start_tick));
if let Some((time, accesses)) = instance_time.get_mut(&interval.abb.as_ref().unwrap().instance_id) {
let ref_ = &mut fbs.graph.edge_weight_mut(e_.id()).unwrap().worst;
if ref_.is_some() {
let w = ref_.as_mut().unwrap();
if w.0 < *time {
*w = (*time, accesses.clone());
interesting |= INTEREST_EDGE_WEIGHT;
};
} else {
*ref_ = Some((*time, accesses.clone()));
}
}
} else {
let mut e__ = STGEdge{event: interval.start_capture.0, name: interval.start_capture.1.clone(), worst: None};
if e__.is_abb_end() {
if let Some((time,accesses)) = instance_time.get_mut(&interval.abb.as_ref().unwrap().instance_id) {
e__.worst = Some((*time, accesses.clone()));
}
}
let e_ = fbs.graph.add_edge(return_node_trace[return_node_trace.len()-1].0, next_idx, e__);
return_edge_trace.push((e_, interval.start_tick));
interesting |= INTEREST_EDGE;
updated = true;
}
return_node_trace.push((next_idx, interval.start_tick));
}
// every path terminates at the end
if !fbs.graph.neighbors_directed(return_node_trace[return_node_trace.len()-1].0, Direction::Outgoing).any(|x| x == fbs.exitpoint) {
let mut e__ = STGEdge { event: CaptureEvent::End, name: String::from("End"), worst: None };
if let Some((time, accesses)) = instance_time.get_mut(&trace[trace.len()-1].abb.as_ref().unwrap().instance_id) {
e__.worst = Some((*time, accesses.clone()));
}
let e_ = fbs.graph.add_edge(return_node_trace[return_node_trace.len()-1].0, fbs.exitpoint, e__);
return_edge_trace.push((e_, trace[trace.len()-1].start_tick));
interesting |= INTEREST_EDGE;
updated = true;
}
return_node_trace.push((fbs.exitpoint, trace[trace.len()-1].start_tick));
(return_node_trace, return_edge_trace, interesting, updated)
}
fn abbs_in_exec_order(trace: &Vec<ExecInterval>) -> Vec<AtomicBasicBlock> {
let mut ret = Vec::new();
for i in 0..trace.len() {
if trace[i].abb != None &&
(trace[i].end_capture.0 == CaptureEvent::APIStart || trace[i].end_capture.0 == CaptureEvent::APIEnd || trace[i].end_capture.0 == CaptureEvent::End || trace[i].end_capture.0 == CaptureEvent::ISREnd) {
ret.push(trace[i].abb.as_ref().unwrap().clone());
}
}
ret
}
}
impl<S, SYS> StateInitializer<S> for StgFeedback<SYS>
where
SYS: TargetSystem,
{}
impl<EM, I, OT, S, SYS> Feedback<EM, I, OT, S> for StgFeedback<SYS>
where
S: MaybeHasClientPerfMonitor + HasNamedMetadata + HasMetadata,
I: Default,
EM: EventFirer<I, S>,
OT: ObserversTuple<I, S>,
SYS: TargetSystem,
{
#[allow(clippy::wrong_self_convention)]
fn is_interesting(
&mut self,
state: &mut S,
_manager: &mut EM,
_input: &I,
observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error>
{
// TODO: don't remove metadata. work around ownership issues
let trace : SYS::TraceData = *state.remove_metadata::<SYS::TraceData>().expect("TraceData not found");
let clock_observer = observers.match_name::<QemuClockObserver<SYS>>("clocktime")
.expect("QemuClockObserver not found");
let last_runtime = clock_observer.last_runtime();
#[cfg(feature = "trace_job_response_times")]
let worst_jobs_rt = trace.worst_jobs_per_task_by_response_time();
#[cfg(feature = "trace_job_response_times")]
let worst_jobs_et = trace.worst_jobs_per_task_by_exec_time();
#[cfg(feature = "trace_job_response_times")]
let worst_select_job = if let Some(t) = self.select_task.as_ref() {worst_jobs_rt.get(t)} else {None};
#[cfg(feature = "trace_job_response_times")]
let last_runtime = if let Some(t) = self.select_task.as_ref() {worst_select_job.map_or(0, |x| x.response_time())} else {last_runtime};
let feedbackstate = state.metadata_map_mut().get_or_insert_with(||{
STGFeedbackState::<SYS>::default()
});
// --------------------------------- Update STG
let (mut nodetrace, mut edgetrace, mut interesting, mut updated) = StgFeedback::update_stg_interval(trace.intervals(), &trace.mem_reads(), trace.states_map(), feedbackstate);
// the longest running case is always intersting
if last_runtime > feedbackstate.wort {
feedbackstate.wort = last_runtime;
interesting |= true;
}
#[cfg(feature = "trace_job_response_times")]
if let Some(worst_instance) = worst_select_job {
edgetrace = edgetrace.into_iter().filter(|x| x.1 <= worst_instance.response && x.1 >= worst_instance.release ).collect();
nodetrace = nodetrace.into_iter().filter(|x| x.1 <= worst_instance.response && x.1 >= worst_instance.release ).collect();
} else {
if self.select_task.is_some() { // if nothing was selected, just take the whole trace, otherwise there is nothing interesting here
edgetrace = Vec::new();
nodetrace = Vec::new();
}
}
#[cfg(feature = "feed_stg")]
set_observer_map(&edgetrace.iter().map(|x| x.0).collect::<Vec<_>>());
// --------------------------------- Update job instances
#[cfg(feature = "trace_job_response_times")]
for i in worst_jobs_rt.iter() {
interesting |= INTEREST_JOB_RT & if let Some(x) = feedbackstate.worst_task_jobs.get_mut(&i.1.get_hash_cached()) {
// eprintln!("Job instance already present");
x.try_update(i.1)
} else {
// eprintln!("New Job instance");
feedbackstate.worst_task_jobs.insert(i.1.get_hash_cached(), RTOSTask::from_instance(&i.1));
true
}
};
#[cfg(feature = "trace_job_response_times")]
for i in worst_jobs_et.iter() {
interesting |= INTEREST_JOB_ET & if let Some(x) = feedbackstate.worst_task_jobs.get_mut(&i.1.get_hash_cached()) {
x.try_update(i.1)
} else {
feedbackstate.worst_task_jobs.insert(i.1.get_hash_cached(), RTOSTask::from_instance(&i.1));
true
}
};
self.last_job_trace = Some(trace.jobs().clone());
// dbg!(&observer.job_instances);
{
let h = get_generic_hash(&edgetrace);
if let Some(x) = feedbackstate.wort_per_stg_path.get_mut(&h) {
let t = last_runtime;
if t > *x {
*x = t;
interesting |= INTEREST_PATH;
}
} else {
feedbackstate.wort_per_stg_path.insert(h, last_runtime);
updated = true;
interesting |= INTEREST_PATH;
}
}
#[cfg(not(feature = "trace_job_response_times"))]
let tmp = StgFeedback::<SYS>::abbs_in_exec_order(&trace.intervals());
#[cfg(feature = "trace_job_response_times")]
let tmp = {
if let Some(worst_instance) = worst_select_job {
let t = trace.intervals().iter().filter(|x| x.start_tick < worst_instance.response && x.end_tick > worst_instance.release ).cloned().collect();
StgFeedback::<SYS>::abbs_in_exec_order(&t)
} else {
if self.select_task.is_none() { // if nothing was selected, just take the whole trace, otherwise there is nothing interesting here
StgFeedback::<SYS>::abbs_in_exec_order(trace.intervals())
} else {
Vec::new()
}
}
};
if INTEREST_AGGREGATE || INTEREST_ABBPATH {
if INTEREST_ABBPATH {
let h = get_generic_hash(&tmp);
self.last_abbs_hash = Some(h);
// order of execution is relevant
if let Some(x) = feedbackstate.wort_per_abb_path.get_mut(&h) {
let t = last_runtime;
if t > *x {
*x = t;
interesting |= INTEREST_ABBPATH;
}
} else {
feedbackstate.wort_per_abb_path.insert(h, last_runtime);
interesting |= INTEREST_ABBPATH;
}
}
if INTEREST_AGGREGATE {
// aggegation by sorting, order of states is not relevant
let mut _tmp = tmp.clone();
_tmp.sort(); // use sort+count, because we need the sorted trace anyways
let counts = count_occurrences_sorted(&_tmp);
let mut top_indices = Vec::new();
if last_runtime >= feedbackstate.wort {
top_indices.push(u64::MAX); // pseudo trace to keep worts
}
for (k,c) in counts {
if let Some(reference) = feedbackstate.worst_abb_exec_count.get_mut(k) {
if *reference < c {
*reference = c;
top_indices.push(get_generic_hash(k));
}
} else {
top_indices.push(get_generic_hash(k));
feedbackstate.worst_abb_exec_count.insert(k.clone(), c);
}
}
self.last_top_abb_hashes = Some(top_indices);
self.last_aggregate_hash = Some(get_generic_hash(&_tmp));
if let Some(x) = feedbackstate.wort_per_aggegated_path.get_mut(&_tmp) {
let t = last_runtime;
if t > *x {
*x = t;
interesting |= INTEREST_AGGREGATE;
}
} else {
feedbackstate.wort_per_aggegated_path.insert(_tmp, last_runtime);
interesting |= INTEREST_AGGREGATE;
}
}
}
// let out = feedbackstate.graph.map(|i,x| x.pretty_print(), |_,_| "");
// let outs = Dot::with_config(&out, &[Config::EdgeNoLabel]).to_string();
// let outs = outs.replace(';',"\\n");
// fs::write("./mystg.dot",outs).expect("Failed to write graph");
self.last_node_trace = Some(nodetrace.into_iter().map(|x| x.0).collect::<Vec<_>>());
self.last_edge_trace = Some(edgetrace.into_iter().map(|x| x.0).collect::<Vec<_>>());
self.last_intervals = Some(trace.intervals().clone());
self.last_abb_trace = Some(tmp);
if let Some(dp) = &self.dump_path {
if updated {
let timestamp = SystemTime::now().duration_since(unsafe {FUZZ_START_TIMESTAMP}).unwrap().as_millis();
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.append(true)
.open(dp).expect("Could not open stgsize");
writeln!(file, "{},{},{},{},{}", feedbackstate.graph.edge_count(), feedbackstate.graph.node_count(), feedbackstate.wort_per_aggegated_path.len(),feedbackstate.wort_per_stg_path.len(), timestamp).expect("Write to dump failed");
}
}
// Re-add trace data
state.add_metadata(trace);
Ok(interesting)
}
/// Append to the testcase the generated metadata in case of a new corpus item
#[inline]
fn append_metadata(&mut self, _state: &mut S, _manager: &mut EM, _observers: &OT, testcase: &mut Testcase<I>) -> Result<(), Error> {
let meta = STGNodeMetadata::new(self.last_node_trace.take().unwrap_or_default(), self.last_edge_trace.take().unwrap_or_default(), self.last_abb_trace.take().unwrap_or_default(), self.last_abbs_hash.take().unwrap_or_default(), self.last_aggregate_hash.take().unwrap_or_default(), self.last_top_abb_hashes.take().unwrap_or_default(), self.last_intervals.take().unwrap_or_default(), self.last_job_trace.take().unwrap_or_default());
testcase.metadata_map_mut().insert(meta);
Ok(())
}
}
impl<SYS> Named for StgFeedback<SYS>
where
SYS: TargetSystem,
{
#[inline]
fn name(&self) -> &Cow<'static, str> {
&self.name
}
}

View File

@ -0,0 +1,192 @@
#![allow(non_camel_case_types,non_snake_case,non_upper_case_globals,deref_nullptr,unused)]
use serde::{Deserialize, Serialize};
use std::fmt::Debug;
use std::fmt::Formatter;
use std::fmt;
impl fmt::Debug for QueueDefinition__bindgen_ty_1 {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("union").finish()
}
}
/*========== Start of generated Code =============*/
pub type char_ptr = ::std::os::raw::c_uint;
pub type void_ptr = ::std::os::raw::c_uint;
pub type ListItem_t_ptr = ::std::os::raw::c_uint;
pub type QueueDefinition_ptr = ::std::os::raw::c_uint;
pub type StackType_t_ptr = ::std::os::raw::c_uint;
pub type i_ptr8 = ::std::os::raw::c_uint;
pub type tskTaskControlBlock_ptr = ::std::os::raw::c_uint;
pub type xLIST_ptr = ::std::os::raw::c_uint;
pub type xLIST_ITEM_ptr = ::std::os::raw::c_uint;
/* automatically generated by rust-bindgen 0.71.1 */
pub const configASSERT_DEFINED: u32 = 1;
pub const configQUEUE_REGISTRY_SIZE: u32 = 20;
pub const configUSE_PREEMPTION: u32 = 1;
pub const configUSE_TIME_SLICING: u32 = 0;
pub const configUSE_PORT_OPTIMISED_TASK_SELECTION: u32 = 0;
pub const configUSE_IDLE_HOOK: u32 = 1;
pub const configUSE_TICK_HOOK: u32 = 1;
pub const configUSE_DAEMON_TASK_STARTUP_HOOK: u32 = 0;
pub const configMAX_TASK_NAME_LEN: u32 = 10;
pub const configUSE_TRACE_FACILITY: u32 = 0;
pub const configUSE_STATS_FORMATTING_FUNCTIONS: u32 = 0;
pub const configUSE_16_BIT_TICKS: u32 = 0;
pub const configIDLE_SHOULD_YIELD: u32 = 1;
pub const configUSE_CO_ROUTINES: u32 = 0;
pub const configMAX_PRIORITIES: u32 = 15;
pub const configMAX_CO_ROUTINE_PRIORITIES: u32 = 2;
pub const configTIMER_QUEUE_LENGTH: u32 = 20;
pub const configTIMER_TASK_PRIORITY: u32 = 14;
pub const configUSE_COUNTING_SEMAPHORES: u32 = 1;
pub const configSUPPORT_DYNAMIC_ALLOCATION: u32 = 1;
pub const configSUPPORT_STATIC_ALLOCATION: u32 = 1;
pub const configNUM_TX_DESCRIPTORS: u32 = 15;
pub const configSTREAM_BUFFER_TRIGGER_LEVEL_TEST_MARGIN: u32 = 2;
pub const configUSE_QUEUE_SETS: u32 = 1;
pub const configUSE_MALLOC_FAILED_HOOK: u32 = 1;
pub const configUSE_MUTEXES: u32 = 1;
pub const configUSE_RECURSIVE_MUTEXES: u32 = 1;
pub const configUSE_TIMERS: u32 = 1;
pub const INCLUDE_vTaskPrioritySet: u32 = 1;
pub const INCLUDE_uxTaskPriorityGet: u32 = 1;
pub const INCLUDE_vTaskDelete: u32 = 1;
pub const INCLUDE_vTaskCleanUpResources: u32 = 0;
pub const INCLUDE_vTaskSuspend: u32 = 1;
pub const INCLUDE_vTaskDelayUntil: u32 = 1;
pub const INCLUDE_vTaskDelay: u32 = 1;
pub const INCLUDE_uxTaskGetStackHighWaterMark: u32 = 1;
pub const INCLUDE_uxTaskGetStackHighWaterMark2: u32 = 1;
pub const INCLUDE_xTaskGetSchedulerState: u32 = 1;
pub const INCLUDE_xTimerGetTimerDaemonTaskHandle: u32 = 1;
pub const INCLUDE_xTaskGetIdleTaskHandle: u32 = 1;
pub const INCLUDE_xTaskGetHandle: u32 = 1;
pub const INCLUDE_eTaskGetState: u32 = 1;
pub const INCLUDE_xSemaphoreGetMutexHolder: u32 = 1;
pub const INCLUDE_xTimerPendFunctionCall: u32 = 1;
pub const INCLUDE_xTaskAbortDelay: u32 = 1;
pub const projCOVERAGE_TEST: u32 = 0;
pub const configKERNEL_INTERRUPT_PRIORITY: u32 = 255;
pub const configMAX_SYSCALL_INTERRUPT_PRIORITY: u32 = 191;
pub const configMAC_INTERRUPT_PRIORITY: u32 = 5;
pub const configUSE_TASK_NOTIFICATIONS: u32 = 1;
pub const configTASK_NOTIFICATION_ARRAY_ENTRIES: u32 = 10;
/* automatically generated by rust-bindgen 0.71.1 */
pub type StackType_t = u32;
pub type UBaseType_t = ::std::os::raw::c_uint;
pub type TickType_t = u32;
#[repr(C)]
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
pub struct xLIST_ITEM {
pub xItemValue: TickType_t,
pub pxNext: xLIST_ITEM_ptr,
pub pxPrevious: xLIST_ITEM_ptr,
pub pvOwner: void_ptr,
pub pvContainer: xLIST_ptr,
}
pub type ListItem_t = xLIST_ITEM;
#[repr(C)]
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
pub struct xMINI_LIST_ITEM {
pub xItemValue: TickType_t,
pub pxNext: xLIST_ITEM_ptr,
pub pxPrevious: xLIST_ITEM_ptr,
}
pub type MiniListItem_t = xMINI_LIST_ITEM;
#[repr(C)]
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
pub struct xLIST {
pub uxNumberOfItems: UBaseType_t,
pub pxIndex: ListItem_t_ptr,
pub xListEnd: MiniListItem_t,
}
pub type List_t = xLIST;
pub type TaskHandle_t = tskTaskControlBlock_ptr;
pub const eTaskState_eRunning: eTaskState = 0;
pub const eTaskState_eReady: eTaskState = 1;
pub const eTaskState_eBlocked: eTaskState = 2;
pub const eTaskState_eSuspended: eTaskState = 3;
pub const eTaskState_eDeleted: eTaskState = 4;
pub const eTaskState_eInvalid: eTaskState = 5;
pub type eTaskState = ::std::os::raw::c_uint;
#[repr(C)]
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
pub struct xTASK_STATUS {
pub xHandle: TaskHandle_t,
pub pcTaskName: char_ptr,
pub xTaskNumber: UBaseType_t,
pub eCurrentState: eTaskState,
pub uxCurrentPriority: UBaseType_t,
pub uxBasePriority: UBaseType_t,
pub ulRunTimeCounter: u32,
pub pxStackBase: StackType_t_ptr,
pub usStackHighWaterMark: u16,
}
pub type TaskStatus_t = xTASK_STATUS;
pub type QueueHandle_t = QueueDefinition_ptr;
#[repr(C)]
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
pub struct QueuePointers {
pub pcTail: i_ptr8,
pub pcReadFrom: i_ptr8,
}
pub type QueuePointers_t = QueuePointers;
#[repr(C)]
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
pub struct SemaphoreData {
pub xMutexHolder: TaskHandle_t,
pub uxRecursiveCallCount: UBaseType_t,
}
pub type SemaphoreData_t = SemaphoreData;
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct QueueDefinition {
pub pcHead: i_ptr8,
pub pcWriteTo: i_ptr8,
pub u: QueueDefinition__bindgen_ty_1,
pub xTasksWaitingToSend: List_t,
pub xTasksWaitingToReceive: List_t,
pub uxMessagesWaiting: UBaseType_t,
pub uxLength: UBaseType_t,
pub uxItemSize: UBaseType_t,
pub cRxLock: i8,
pub cTxLock: i8,
pub ucStaticallyAllocated: u8,
pub pxQueueSetContainer: QueueDefinition_ptr,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union QueueDefinition__bindgen_ty_1 {
pub xQueue: QueuePointers_t,
pub xSemaphore: SemaphoreData_t,
}
pub type xQUEUE = QueueDefinition;
pub type Queue_t = xQUEUE;
#[repr(C)]
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
pub struct QUEUE_REGISTRY_ITEM {
pub pcQueueName: char_ptr,
pub xHandle: QueueHandle_t,
}
pub type xQueueRegistryItem = QUEUE_REGISTRY_ITEM;
pub type QueueRegistryItem_t = xQueueRegistryItem;
#[repr(C)]
#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize)]
pub struct tskTaskControlBlock {
pub pxTopOfStack: StackType_t_ptr,
pub xStateListItem: ListItem_t,
pub xEventListItem: ListItem_t,
pub uxPriority: UBaseType_t,
pub pxStack: StackType_t_ptr,
pub pcTaskName: [::std::os::raw::c_char; 10usize],
pub uxBasePriority: UBaseType_t,
pub uxMutexesHeld: UBaseType_t,
pub ulNotifiedValue: [u32; 10usize],
pub ucNotifyState: [u8; 10usize],
pub ucStaticallyAllocated: u8,
pub ucDelayAborted: u8,
}
pub type tskTCB = tskTaskControlBlock;
pub type TCB_t = tskTCB;

View File

@ -0,0 +1,88 @@
use hashbrown::HashMap;
use libafl_qemu::{elf::EasyElf, GuestAddr};
use crate::{
fuzzer::get_all_fn_symbol_ranges,
systemstate::{helpers::{get_function_range, load_symbol}, target_os::freertos::ISR_SYMBOLS},
};
// Add os-specific symbols to the target symbol hashmap
pub fn add_target_symbols(elf: &EasyElf, addrs: &mut HashMap<&'static str, GuestAddr>) {
// required for system state observation
addrs.insert("pxCurrentTCB", load_symbol(&elf, "pxCurrentTCB", false)); // loads to the address specified in elf, without respecting program headers
addrs.insert(
"pxReadyTasksLists",
load_symbol(&elf, "pxReadyTasksLists", false),
);
addrs.insert(
"pxDelayedTaskList",
load_symbol(&elf, "pxDelayedTaskList", false),
);
addrs.insert(
"pxOverflowDelayedTaskList",
load_symbol(&elf, "pxOverflowDelayedTaskList", false),
);
addrs.insert(
"uxSchedulerSuspended",
load_symbol(&elf, "uxSchedulerSuspended", false),
);
addrs.insert(
"xSchedulerRunning",
load_symbol(&elf, "xSchedulerRunning", false),
);
addrs.insert(
"uxCriticalNesting",
load_symbol(&elf, "uxCriticalNesting", false),
);
addrs.insert(
"xQueueRegistry",
load_symbol(&elf, "xQueueRegistry", false),
);
}
// Group functions into api, app and isr functions
pub fn get_range_groups(
elf: &EasyElf,
_addrs: &HashMap<&'static str, GuestAddr>,
ranges: &HashMap<&'static str, std::ops::Range<GuestAddr>>,
) -> HashMap<&'static str, hashbrown::HashMap<String, std::ops::Range<u32>>> {
let api_range = ranges.get("API_CODE").unwrap();
let app_range = ranges.get("APP_CODE").unwrap();
let mut api_fn_ranges = get_all_fn_symbol_ranges(&elf, api_range.clone());
let mut app_fn_ranges = get_all_fn_symbol_ranges(&elf, app_range.clone());
// Regular ISR functions, remove from API functions
let mut isr_fn_ranges: HashMap<String, std::ops::Range<GuestAddr>> = ISR_SYMBOLS
.iter()
.filter_map(|x| {
api_fn_ranges
.remove(&x.to_string())
.map(|y| (x.to_string(), y.clone()))
})
.collect();
// User-defined ISR functions, remove from APP functions
ISR_SYMBOLS.iter().for_each(|x| {
let _ = (app_fn_ranges
.remove(&x.to_string())
.map(|y| (x.to_string(), y.clone())))
.map(|z| isr_fn_ranges.insert(z.0, z.1));
});
// Add the rest of the ISR function, if not already found
for i in ISR_SYMBOLS {
if isr_fn_ranges.get(&i.to_string()).is_none() {
if let Some(fr) = get_function_range(&elf, i) {
isr_fn_ranges.insert(i.to_string(), fr);
}
}
}
let mut groups = HashMap::new();
groups.insert("API_FN", api_fn_ranges);
groups.insert("APP_FN", app_fn_ranges);
groups.insert("ISR_FN", isr_fn_ranges);
return groups;
}

View File

@ -0,0 +1,556 @@
#![allow(non_camel_case_types)]
use libafl_qemu::GuestAddr;
use qemu_module::{FreeRTOSSystemStateHelper, MEM_READ};
use serde::{Deserialize, Serialize};
use crate::{
impl_emu_lookup,
systemstate::{helpers::get_icount, CaptureEvent},
};
pub mod bindings;
pub mod qemu_module;
pub mod config;
pub mod post_processing;
use bindings::*;
use super::QemuLookup;
use crate::systemstate::target_os::*;
// Constants
const NUM_PRIOS: usize = 15;
//============================================================================= Outside interface
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct FreeRTOSSystem {
pub raw_trace: Vec<RawFreeRTOSSystemState>,
}
impl TargetSystem for FreeRTOSSystem {
type State = FreeRTOSSystemState;
type TCB = RefinedTCB;
type TraceData = FreeRTOSTraceMetadata;
}
impl TaskControlBlock for RefinedTCB {
fn task_name(&self) -> &String {
&self.task_name
}
fn task_name_mut(&mut self) -> &mut String {
&mut self.task_name
}
}
impl SystemState for FreeRTOSSystemState {
type TCB = RefinedTCB;
fn current_task(&self) -> &Self::TCB {
&self.current_task
}
fn get_ready_lists(&self) -> &Vec<Self::TCB> {
&self.ready_list_after
}
fn get_delay_list(&self) -> &Vec<Self::TCB> {
&self.delay_list_after
}
fn print_lists(&self) -> String {
self.print_lists()
}
fn current_task_mut(&mut self) -> &mut Self::TCB {
&mut self.current_task
}
}
//============================================================================= Data structures
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub enum FreeRTOSStruct {
TCB_struct(TCB_t),
List_struct(List_t),
List_Item_struct(ListItem_t),
List_MiniItem_struct(MiniListItem_t),
}
impl_emu_lookup!(TCB_t);
impl_emu_lookup!(List_t);
impl_emu_lookup!(ListItem_t);
impl_emu_lookup!(MiniListItem_t);
impl_emu_lookup!(void_ptr);
impl_emu_lookup!(TaskStatus_t);
impl_emu_lookup!(QueueRegistryItem_t);
impl_emu_lookup!(Queue_t);
pub const ISR_SYMBOLS: &'static [&'static str] = &[
// ISRs
"Reset_Handler",
"Default_Handler",
"Default_Handler2",
"Default_Handler3",
"Default_Handler4",
"Default_Handler5",
"Default_Handler6",
"vPortSVCHandler",
"xPortPendSVHandler",
"xPortSysTickHandler",
"ISR_0_Handler",
"ISR_1_Handler",
"ISR_2_Handler",
"ISR_3_Handler",
"ISR_4_Handler",
"ISR_5_Handler",
"ISR_6_Handler",
"ISR_7_Handler",
"ISR_8_Handler",
"ISR_9_Handler",
"ISR_10_Handler",
"ISR_11_Handler",
"ISR_12_Handler",
"ISR_13_Handler",
];
pub const USR_ISR_SYMBOLS: &'static [&'static str] = &[
"ISR_0_Handler",
"ISR_1_Handler",
"ISR_2_Handler",
"ISR_3_Handler",
"ISR_4_Handler",
"ISR_5_Handler",
"ISR_6_Handler",
"ISR_7_Handler",
"ISR_8_Handler",
"ISR_9_Handler",
"ISR_10_Handler",
"ISR_11_Handler",
"ISR_12_Handler",
"ISR_13_Handler",
];
//============================================================================= Helper functions
fn read_freertos_list(
systemstate: &mut RawFreeRTOSSystemState,
emulator: &libafl_qemu::Qemu,
target: GuestAddr,
) -> (List_t, bool) {
let read: List_t = QemuLookup::lookup(emulator, target);
let listbytes: GuestAddr = GuestAddr::try_from(std::mem::size_of::<List_t>()).unwrap();
let mut next_index = read.pxIndex;
for _j in 0..read.uxNumberOfItems {
// always jump over the xListEnd marker
if (target..target + listbytes).contains(&next_index) {
let next_item: MiniListItem_t = QemuLookup::lookup(emulator, next_index);
let new_next_index = next_item.pxNext;
systemstate
.dumping_ground
.insert(next_index, FreeRTOSStruct::List_MiniItem_struct(next_item));
next_index = new_next_index;
}
let next_item: ListItem_t = QemuLookup::lookup(emulator, next_index);
// println!("Item at {}: {:?}",next_index,next_item);
if next_item.pvContainer != target {
// the list is being modified, abort by setting the list empty
eprintln!("Warning: attempted to read a list that is being modified");
let mut read = read;
read.uxNumberOfItems = 0;
return (read, false);
}
// assert_eq!(next_item.pvContainer,target);
let new_next_index = next_item.pxNext;
let next_tcb: TCB_t = QemuLookup::lookup(emulator, next_item.pvOwner);
// println!("TCB at {}: {:?}",next_item.pvOwner,next_tcb);
systemstate.dumping_ground.insert(
next_item.pvOwner,
FreeRTOSStruct::TCB_struct(next_tcb.clone()),
);
systemstate
.dumping_ground
.insert(next_index, FreeRTOSStruct::List_Item_struct(next_item));
next_index = new_next_index;
}
// Handle edge case where the end marker was not included yet
if (target..target + listbytes).contains(&next_index) {
let next_item: freertos::MiniListItem_t = QemuLookup::lookup(emulator, next_index);
systemstate
.dumping_ground
.insert(next_index, FreeRTOSStruct::List_MiniItem_struct(next_item));
}
return (read, true);
}
#[inline]
fn trigger_collection(
emulator: &libafl_qemu::Qemu,
edge: (GuestAddr, GuestAddr),
event: CaptureEvent,
h: &FreeRTOSSystemStateHelper,
) {
let listbytes: GuestAddr =
GuestAddr::try_from(std::mem::size_of::<freertos::List_t>()).unwrap();
let mut systemstate = RawFreeRTOSSystemState::default();
match event {
CaptureEvent::APIStart => {
let s = h.api_fn_addrs.get(&edge.1).unwrap();
systemstate.capture_point = (CaptureEvent::APIStart, s.to_string());
}
CaptureEvent::APIEnd => {
let s = h.api_fn_addrs.get(&edge.0).unwrap();
systemstate.capture_point = (CaptureEvent::APIEnd, s.to_string());
}
CaptureEvent::ISRStart => {
let s = h.isr_fn_addrs.get(&edge.1).unwrap();
systemstate.capture_point = (CaptureEvent::ISRStart, s.to_string());
}
CaptureEvent::ISREnd => {
let s = h.isr_fn_addrs.get(&edge.0).unwrap();
systemstate.capture_point = (CaptureEvent::ISREnd, s.to_string());
}
CaptureEvent::End => {
systemstate.capture_point = (CaptureEvent::End, "".to_string());
}
CaptureEvent::Undefined => (),
}
if systemstate.capture_point.0 == CaptureEvent::Undefined {
// println!("Not found: {:#x} {:#x}", edge.0.unwrap_or(0), edge.1.unwrap_or(0));
}
systemstate.edge = ((edge.0), (edge.1));
systemstate.qemu_tick = get_icount(emulator);
let curr_tcb_addr: freertos::void_ptr = QemuLookup::lookup(emulator, h.tcb_addr);
if curr_tcb_addr == 0 {
return;
};
/*
let mut queue_registry : Vec<QueueRegistryItem_t> = QemuLookup::lookup_slice(emulator, h.queue_registry_addrs, configQUEUE_REGISTRY_SIZE as usize);
let queue_registry = queue_registry.into_iter().filter(|x| x.xHandle != 0).map(|x| {
let queue_def: freertos::QueueDefinition = QemuLookup::lookup(emulator, x.xHandle);
let queue_name: String = emu_lookup_string(emulator, x.pcQueueName, None);
if queue_def.cRxLock == 0xFF && queue_def.cTxLock == 0xFF {
let sending = read_freertos_list(&mut systemstate, emulator, queue_def.xTasksWaitingToSend);
let recieving = read_freertos_list(&mut systemstate, emulator, queue_def.xTasksWaitingToSend);
}
(queue_def, queue_name)
}
).collect::<Vec<_>>();
dbg!(&queue_registry);
*/
// println!("{:?}",std::str::from_utf8(&current_tcb.pcTaskName));
let critical: void_ptr = QemuLookup::lookup(emulator, h.critical_addr);
let suspended: void_ptr = QemuLookup::lookup(emulator, h.scheduler_lock_addr);
let _running: void_ptr = QemuLookup::lookup(emulator, h.scheduler_running_addr);
systemstate.current_tcb = QemuLookup::lookup(emulator, curr_tcb_addr);
// During ISRs it is only safe to extract structs if they are not currently being modified
if systemstate.capture_point.0 == CaptureEvent::APIStart
|| systemstate.capture_point.0 == CaptureEvent::APIEnd
|| (critical == 0 && suspended == 0)
{
// Extract delay list
let mut target: GuestAddr = h.delay_queue;
target = QemuLookup::lookup(emulator, target);
let _temp = read_freertos_list(&mut systemstate, emulator, target);
systemstate.delay_list = _temp.0;
systemstate.read_invalid |= !_temp.1;
// Extract delay list overflow
let mut target: GuestAddr = h.delay_queue_overflow;
target = QemuLookup::lookup(emulator, target);
let _temp = read_freertos_list(&mut systemstate, emulator, target);
systemstate.delay_list_overflow = _temp.0;
systemstate.read_invalid |= !_temp.1;
// Extract suspended tasks (infinite wait), seems broken, always appreas to be modified
// let mut target : GuestAddr = h.suspended_queue;
// target = QemuLookup::lookup(emulator, target);
// systemstate.suspended_list = read_freertos_list(&mut systemstate, emulator, target);
// Extract priority lists
for i in 0..NUM_PRIOS {
let target: GuestAddr = listbytes * GuestAddr::try_from(i).unwrap() + h.ready_queues;
let _temp = read_freertos_list(&mut systemstate, emulator, target);
systemstate.prio_ready_lists[i] = _temp.0;
systemstate.read_invalid |= !_temp.1;
}
} else {
systemstate.read_invalid = true;
}
systemstate.mem_reads = unsafe { std::mem::replace((&raw mut MEM_READ).as_mut().unwrap(), vec![])};
unsafe {
(&raw mut CURRENT_SYSTEMSTATE_VEC).as_mut().unwrap().push(systemstate);
}
}
/// Raw info Dump from Qemu
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
pub struct RawFreeRTOSSystemState {
qemu_tick: u64,
current_tcb: TCB_t,
prio_ready_lists: [freertos::List_t; NUM_PRIOS],
delay_list: freertos::List_t,
delay_list_overflow: freertos::List_t,
dumping_ground: HashMap<u32, freertos::FreeRTOSStruct>,
read_invalid: bool,
input_counter: u32,
edge: (GuestAddr, GuestAddr),
capture_point: (CaptureEvent, String),
mem_reads: Vec<(u32, u8)>,
}
/// List of system state dumps from EmulatorModules
static mut CURRENT_SYSTEMSTATE_VEC: Vec<RawFreeRTOSSystemState> = vec![];
/// A reduced version of freertos::TCB_t
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub struct RefinedTCB {
pub task_name: String,
pub priority: u32,
pub base_priority: u32,
mutexes_held: u32,
notify_value: [u32; configTASK_NOTIFICATION_ARRAY_ENTRIES as usize],
notify_state: [u8; configTASK_NOTIFICATION_ARRAY_ENTRIES as usize],
}
impl PartialEq for RefinedTCB {
fn eq(&self, other: &Self) -> bool {
let ret = self.task_name == other.task_name
&& self.priority == other.priority
&& self.base_priority == other.base_priority;
#[cfg(feature = "do_hash_notify_state")]
let ret = ret && self.notify_state == other.notify_state;
#[cfg(feature = "do_hash_notify_value")]
let ret = ret && self.notify_value == other.notify_value;
ret
}
}
impl Hash for RefinedTCB {
fn hash<H: Hasher>(&self, state: &mut H) {
self.task_name.hash(state);
self.priority.hash(state);
self.mutexes_held.hash(state);
#[cfg(feature = "do_hash_notify_state")]
self.notify_state.hash(state);
#[cfg(feature = "do_hash_notify_value")]
self.notify_value.hash(state);
}
}
impl RefinedTCB {
pub fn from_tcb(input: &TCB_t) -> Self {
unsafe {
let tmp = std::mem::transmute::<[i8; 10], [u8; 10]>(input.pcTaskName);
let name: String = std::str::from_utf8(&tmp)
.expect("TCB name was not utf8")
.chars()
.filter(|x| *x != '\0')
.collect::<String>();
Self {
task_name: name,
priority: input.uxPriority,
base_priority: input.uxBasePriority,
mutexes_held: input.uxMutexesHeld,
notify_value: input.ulNotifiedValue,
notify_state: input.ucNotifyState,
}
}
}
pub fn from_tcb_owned(input: TCB_t) -> Self {
unsafe {
let tmp = std::mem::transmute::<[i8; 10], [u8; 10]>(input.pcTaskName);
let name: String = std::str::from_utf8(&tmp)
.expect("TCB name was not utf8")
.chars()
.filter(|x| *x != '\0')
.collect::<String>();
Self {
task_name: name,
priority: input.uxPriority,
base_priority: input.uxBasePriority,
mutexes_held: input.uxMutexesHeld,
notify_value: input.ulNotifiedValue,
notify_state: input.ucNotifyState,
}
}
}
}
/// Reduced information about a systems state, without any execution context
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub struct FreeRTOSSystemState {
current_task: RefinedTCB,
ready_list_after: Vec<RefinedTCB>,
delay_list_after: Vec<RefinedTCB>,
read_invalid: bool,
}
impl PartialEq for FreeRTOSSystemState {
fn eq(&self, other: &Self) -> bool {
self.current_task == other.current_task
&& self.ready_list_after == other.ready_list_after
&& self.delay_list_after == other.delay_list_after
&& self.read_invalid == other.read_invalid
}
}
impl Hash for FreeRTOSSystemState {
fn hash<H: Hasher>(&self, state: &mut H) {
self.current_task.hash(state);
self.ready_list_after.hash(state);
self.delay_list_after.hash(state);
self.read_invalid.hash(state);
}
}
impl FreeRTOSSystemState {
// fn get_tick(&self) -> u64 {
// self.tick
// }
pub fn print_lists(&self) -> String {
let mut ret = String::from("+");
for j in self.ready_list_after.iter() {
ret.push_str(format!(" {}", j.task_name).as_str());
}
ret.push_str("\n-");
for j in self.delay_list_after.iter() {
ret.push_str(format!(" {}", j.task_name).as_str());
}
ret
}
pub fn get_hash(&self) -> u64 {
let mut h = DefaultHasher::new();
self.hash(&mut h);
h.finish()
}
}
impl fmt::Display for FreeRTOSSystemState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let ready = self
.ready_list_after
.iter()
.map(|x| x.task_name.clone())
.collect::<Vec<_>>()
.join(" ");
let delay = self
.delay_list_after
.iter()
.map(|x| x.task_name.clone())
.collect::<Vec<_>>()
.join(" ");
write!(
f,
"Valid: {} | Current: {} | Ready: {} | Delay: {}",
u32::from(!self.read_invalid),
self.current_task.task_name,
ready,
delay
)
}
}
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub(crate)struct FreeRTOSSystemStateContext {
pub qemu_tick: u64,
pub capture_point: (CaptureEvent, String),
pub edge: (GuestAddr, GuestAddr),
pub mem_reads: Vec<(u32, u8)>,
}
#[derive(Debug, Default, Serialize, Deserialize, Clone)]
pub struct FreeRTOSTraceMetadata
{
trace_map: HashMap<u64, <FreeRTOSTraceMetadata as SystemTraceData>::State>,
intervals: Vec<ExecInterval>,
mem_reads: Vec<Vec<(u32, u8)>>,
jobs: Vec<RTOSJob>,
trace_length: usize,
indices: Vec<usize>, // Hashed enumeration of States
tcref: isize,
need_to_debug: bool,
}
impl FreeRTOSTraceMetadata
{
pub fn new(trace: Vec<<FreeRTOSTraceMetadata as SystemTraceData>::State>, intervals: Vec<ExecInterval>, mem_reads: Vec<Vec<(u32, u8)>>, jobs: Vec<RTOSJob>, need_to_debug: bool) -> Self {
let hashes : Vec<_> = trace
.iter()
.map(|x| compute_hash(&x) as usize)
.collect();
let trace_map = HashMap::from_iter(trace.into_iter().zip(hashes.iter()).map(|(x, y)| (*y as u64, x)));
Self {
trace_length: hashes.len(), // TODO make this configurable
trace_map: trace_map,
intervals: intervals,
mem_reads: mem_reads,
jobs: jobs,
indices: hashes,
tcref: 0,
need_to_debug: need_to_debug,
}
}
}
impl HasRefCnt for FreeRTOSTraceMetadata
{
fn refcnt(&self) -> isize {
self.tcref
}
fn refcnt_mut(&mut self) -> &mut isize {
&mut self.tcref
}
}
impl SystemTraceData for FreeRTOSTraceMetadata
{
type State = FreeRTOSSystemState;
fn states(&self) -> Vec<&Self::State> {
self.indices.iter().filter_map(|x| self.trace_map.get(&(*x as u64))).collect()
}
fn intervals(&self) -> &Vec<ExecInterval> {
&self.intervals
}
fn jobs(&self) -> &Vec<RTOSJob> {
&self.jobs
}
fn trace_length(&self) -> usize {
self.trace_length
}
fn mem_reads(&self) -> &Vec<Vec<(u32, u8)>> {
&self.mem_reads
}
fn states_map(&self) -> &HashMap<u64, Self::State> {
&self.trace_map
}
fn need_to_debug(&self) -> bool {
self.need_to_debug
}
}
libafl_bolts::impl_serdeany!(FreeRTOSTraceMetadata);
libafl_bolts::impl_serdeany!(RefinedTCB);
libafl_bolts::impl_serdeany!(FreeRTOSSystemState);
libafl_bolts::impl_serdeany!(FreeRTOSSystem);
pub(crate) fn get_task_names(trace: &Vec<FreeRTOSSystemState>) -> HashSet<String> {
let mut ret: HashSet<_, _> = HashSet::new();
for state in trace {
ret.insert(state.current_task.task_name.to_string());
}
ret
}

View File

@ -0,0 +1,714 @@
use std::{cell::RefCell, collections::VecDeque, rc::Rc};
use freertos::USR_ISR_SYMBOLS;
use hashbrown::HashMap;
use crate::systemstate::{
target_os::{freertos::FreeRTOSStruct::*, *},
AtomicBasicBlock, CaptureEvent,
};
use super::{
bindings::*,
compute_hash, ExecInterval, FreeRTOSStruct, FreeRTOSSystemState,
FreeRTOSSystemStateContext, RawFreeRTOSSystemState, RefinedTCB,
};
//============================= Parsing helpers
/// Parse a List_t containing TCB_t into Vec<TCB_t> from cache. Consumes the elements from cache
pub fn tcb_list_to_vec_cached(list: List_t, dump: &mut HashMap<u32, FreeRTOSStruct>) -> Vec<TCB_t> {
let mut ret: Vec<TCB_t> = Vec::new();
if list.uxNumberOfItems == 0 {
return ret;
}
let last_list_item = match dump
.remove(&list.pxIndex)
.expect("List_t entry was not in Hashmap")
{
List_Item_struct(li) => li,
List_MiniItem_struct(mli) => match dump
.remove(&mli.pxNext)
.expect("MiniListItem pointer invaild")
{
List_Item_struct(li) => li,
_ => panic!("MiniListItem of a non empty List does not point to ListItem"),
},
_ => panic!("List_t entry was not a ListItem"),
};
let mut next_index = last_list_item.pxNext;
let last_tcb = match dump
.remove(&last_list_item.pvOwner)
.expect("ListItem Owner not in Hashmap")
{
TCB_struct(t) => t,
_ => panic!("List content does not equal type"),
};
for _ in 0..list.uxNumberOfItems - 1 {
let next_list_item = match dump
.remove(&next_index)
.expect("List_t entry was not in Hashmap")
{
List_Item_struct(li) => li,
List_MiniItem_struct(mli) => match dump
.remove(&mli.pxNext)
.expect("MiniListItem pointer invaild")
{
List_Item_struct(li) => li,
_ => panic!("MiniListItem of a non empty List does not point to ListItem"),
},
_ => panic!("List_t entry was not a ListItem"),
};
match dump
.remove(&next_list_item.pvOwner)
.expect("ListItem Owner not in Hashmap")
{
TCB_struct(t) => ret.push(t),
_ => panic!("List content does not equal type"),
}
next_index = next_list_item.pxNext;
}
ret.push(last_tcb);
ret
}
//============================= State refinement
/// Drains a List of raw SystemStates to produce a refined trace
/// returns:
/// - a Vec of FreeRTOSSystemState
/// - a Vec of FreeRTOSSystemStateContext (qemu_tick, (capture_event, capture_name), edge, mem_reads)
pub(crate) fn refine_system_states(
mut input: Vec<RawFreeRTOSSystemState>,
) -> (Vec<FreeRTOSSystemState>, Vec<FreeRTOSSystemStateContext>) {
let mut ret = (Vec::<_>::new(), Vec::<_>::new());
for mut i in input.drain(..) {
let cur = RefinedTCB::from_tcb_owned(i.current_tcb);
// println!("Refine: {} {:?} {:?} {:x}-{:x}", cur.task_name, i.capture_point.0, i.capture_point.1.to_string(), i.edge.0, i.edge.1);
// collect ready list
let mut collector = Vec::<RefinedTCB>::new();
for j in i.prio_ready_lists.into_iter().rev() {
let mut tmp = tcb_list_to_vec_cached(j, &mut i.dumping_ground)
.iter()
.map(|x| RefinedTCB::from_tcb(x))
.collect();
collector.append(&mut tmp);
}
#[cfg(feature = "observe_systemstate_unordered")]
{
// respect the order of the first ``lookahead`` tasks and sort the rest by task name
const lookahead : usize = 2;
collector.get_mut(lookahead..).map(|x| x.sort_by(|a, b| a.task_name.cmp(&b.task_name)));
}
// collect delay list
let mut delay_list: Vec<RefinedTCB> =
tcb_list_to_vec_cached(i.delay_list, &mut i.dumping_ground)
.iter()
.map(|x| RefinedTCB::from_tcb(x))
.collect();
let mut delay_list_overflow: Vec<RefinedTCB> =
tcb_list_to_vec_cached(i.delay_list_overflow, &mut i.dumping_ground)
.iter()
.map(|x| RefinedTCB::from_tcb(x))
.collect();
delay_list.append(&mut delay_list_overflow);
delay_list.sort_by(|a, b| a.task_name.cmp(&b.task_name));
ret.0.push(FreeRTOSSystemState {
current_task: cur,
ready_list_after: collector,
delay_list_after: delay_list,
read_invalid: i.read_invalid,
// input_counter: i.input_counter,//+IRQ_INPUT_BYTES_NUMBER,
});
ret.1.push(FreeRTOSSystemStateContext {
qemu_tick: i.qemu_tick,
capture_point: (i.capture_point.0, i.capture_point.1.to_string()),
edge: i.edge,
mem_reads: i.mem_reads,
});
}
return ret;
}
/// Transform the states and metadata into a list of ExecIntervals, along with a HashMap of states, a list of HashSets marking memory reads and a bool indicating success
/// returns:
/// - a Vec of ExecIntervals
/// - a Vec of HashSets marking memory reads during these intervals
/// - a HashMap of ReducedFreeRTOSSystemStates by hash
/// - a bool indicating success
pub(crate) fn states2intervals(
trace: Vec<FreeRTOSSystemState>,
meta: Vec<FreeRTOSSystemStateContext>,
) -> (
Vec<ExecInterval>,
Vec<Vec<(u32, u8)>>,
HashMap<u64, FreeRTOSSystemState>,
bool,
) {
if trace.len() == 0 {
return (Vec::new(), Vec::new(), HashMap::new(), true);
}
let mut isr_stack: VecDeque<u8> = VecDeque::from([]); // 2+ = ISR, 1 = systemcall, 0 = APP. Trace starts with an ISREnd and executes the app
let mut level_of_task: HashMap<&str, u8> = HashMap::new();
let mut ret: Vec<ExecInterval> = vec![];
let mut reads: Vec<Vec<(u32, u8)>> = vec![];
let mut edges: Vec<(u32, u32)> = vec![];
let mut last_hash: u64 = compute_hash(&trace[0]);
let mut table: HashMap<u64, FreeRTOSSystemState> = HashMap::new();
table.insert(last_hash, trace[0].clone());
for i in 0..trace.len() - 1 {
let curr_name = trace[i].current_task().task_name().as_str();
// let mut interval_name = curr_name; // Name of the interval, either the task name or the isr/api funtion name
let level = match meta[i].capture_point.0 {
CaptureEvent::APIEnd => {
// API end always exits towards the app
if !level_of_task.contains_key(curr_name) {
level_of_task.insert(curr_name, 0);
}
*level_of_task.get_mut(curr_name).unwrap() = 0;
0
}
CaptureEvent::APIStart => {
// API start can only be called in the app
if !level_of_task.contains_key(curr_name) {
// Should not happen, apps start from an ISR End. Some input exibited this behavior for unknown reasons
level_of_task.insert(curr_name, 0);
}
*level_of_task.get_mut(curr_name).unwrap() = 1;
// interval_name = &meta[i].2;
1
}
CaptureEvent::ISREnd => {
// special case where the next block is an app start
if !level_of_task.contains_key(curr_name) {
level_of_task.insert(curr_name, 0);
}
// nested isr, TODO: Test level > 2
if isr_stack.len() > 1 {
// interval_name = ""; // We can't know which isr is running
isr_stack.pop_back().unwrap();
*isr_stack.back().unwrap()
} else {
isr_stack.pop_back();
// possibly go back to an api call that is still running for this task
if level_of_task.get(curr_name).unwrap() == &1 {
// interval_name = ""; // We can't know which api is running
}
*level_of_task.get(curr_name).unwrap()
}
}
CaptureEvent::ISRStart => {
// special case for isrs which do not capture their end
// if meta[i].2 == "ISR_0_Handler" {
// &2
// } else {
// regular case
// interval_name = &meta[i].2;
if isr_stack.len() > 0 {
let l = *isr_stack.back().unwrap();
isr_stack.push_back(l + 1);
l + 1
} else {
isr_stack.push_back(2);
2
}
// }
}
_ => 100,
};
// if trace[i].2 == CaptureEvent::End {break;}
let next_hash = compute_hash(&trace[i + 1]);
if !table.contains_key(&next_hash) {
table.insert(next_hash, trace[i + 1].clone());
}
ret.push(ExecInterval {
start_tick: meta[i].qemu_tick,
end_tick: meta[i + 1].qemu_tick,
start_state: last_hash,
end_state: next_hash,
start_capture: meta[i].capture_point.clone(),
end_capture: meta[i + 1].capture_point.clone(),
level: level,
abb: None,
});
reads.push(meta[i + 1].mem_reads.clone());
last_hash = next_hash;
edges.push((meta[i].edge.1, meta[i + 1].edge.0));
}
let t = add_abb_info(&mut ret, &table, &edges);
(ret, reads, table, t)
}
/// Marks which abbs were executed at each interval
pub(crate) fn add_abb_info(
trace: &mut Vec<ExecInterval>,
state_table: &HashMap<u64, FreeRTOSSystemState>,
edges: &Vec<(u32, u32)>,
) -> bool {
let mut id_count = 0;
let mut ret = true;
let mut task_has_started: HashSet<&String> = HashSet::new();
let mut wip_abb_trace: Vec<Rc<RefCell<AtomicBasicBlock>>> = vec![];
// let mut open_abb_at_this_task_or_level : HashMap<(u8,&str),usize> = HashMap::new();
let mut open_abb_at_this_ret_addr_and_task: HashMap<(u32, &str), usize> = HashMap::new();
for i in 0..trace.len() {
let curr_name = state_table[&trace[i].start_state].current_task().task_name();
// let last : Option<&usize> = last_abb_start_of_task.get(&curr_name);
// let open_abb = open_abb_at_this_task_or_level.get(&(trace[i].level, if trace[i].level<2 {&curr_name} else {""})).to_owned(); // apps/apis are differentiated by task name, isrs by nested level
let open_abb = open_abb_at_this_ret_addr_and_task
.get(&(edges[i].0, if trace[i].level < 2 { &curr_name } else { "" }))
.to_owned(); // apps/apis are differentiated by task name, isrs by nested level
// println!("Edge {:x}-{:x}", edges[i].0.unwrap_or(0xffff), edges[i].1.unwrap_or(0xffff));
match trace[i].start_capture.0 {
// generic api abb start
CaptureEvent::APIStart => {
// assert_eq!(open_abb, None);
ret &= open_abb.is_none();
open_abb_at_this_ret_addr_and_task.insert(
(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }),
i,
);
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock {
start: edges[i].0,
ends: HashSet::new(),
level: if trace[i].level < 2 {
trace[i].level
} else {
2
},
instance_id: id_count,
instance_name: Some(trace[i].start_capture.1.clone()),
})));
id_count += 1;
}
// generic isr abb start
CaptureEvent::ISRStart => {
// assert_eq!(open_abb, None);
ret &= open_abb.is_none();
open_abb_at_this_ret_addr_and_task.insert(
(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }),
i,
);
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock {
start: edges[i].0,
ends: HashSet::new(),
level: if trace[i].level < 2 {
trace[i].level
} else {
2
},
instance_id: id_count,
instance_name: Some(trace[i].start_capture.1.clone()),
})));
id_count += 1;
}
// generic app abb start
CaptureEvent::APIEnd => {
// assert_eq!(open_abb, None);
ret &= open_abb.is_none();
open_abb_at_this_ret_addr_and_task.insert(
(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }),
i,
);
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock {
start: edges[i].0,
ends: HashSet::new(),
level: if trace[i].level < 2 {
trace[i].level
} else {
2
},
instance_id: id_count,
instance_name: if trace[i].level < 2 {
Some(curr_name.clone().clone())
} else {
None
},
})));
id_count += 1;
}
// generic continued blocks
CaptureEvent::ISREnd => {
// special case app abb start
if trace[i].start_capture.1 == "xPortPendSVHandler"
&& !task_has_started.contains(&curr_name)
{
// assert_eq!(open_abb, None);
ret &= open_abb.is_none();
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock {
start: 0,
ends: HashSet::new(),
level: if trace[i].level < 2 {
trace[i].level
} else {
2
},
instance_id: id_count,
instance_name: Some(curr_name.clone().clone()),
})));
id_count += 1;
open_abb_at_this_ret_addr_and_task.insert(
(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }),
i,
);
task_has_started.insert(&curr_name);
} else {
if let Some(last) = open_abb_at_this_ret_addr_and_task
.get(&(edges[i].0, if trace[i].level < 2 { &curr_name } else { "" }))
{
let last = last.clone(); // required to drop immutable reference
wip_abb_trace.push(wip_abb_trace[last].clone());
// if the abb is interrupted again, it will need to continue at edge[i].1
open_abb_at_this_ret_addr_and_task.remove(&(
edges[i].0,
if trace[i].level < 2 { &curr_name } else { "" },
));
open_abb_at_this_ret_addr_and_task.insert(
(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }),
last,
); // order matters!
} else {
// panic!();
// println!("Continued block with no start {} {} {:?} {:?} {:x}-{:x} {} {}", curr_name, trace[i].start_tick, trace[i].start_capture, trace[i].end_capture, edges[i].0, edges[i].1, task_has_started.contains(curr_name),trace[i].level);
// println!("{:x?}", open_abb_at_this_ret_addr_and_task);
ret = false;
wip_abb_trace.push(Rc::new(RefCell::new(AtomicBasicBlock {
start: edges[i].1,
ends: HashSet::new(),
level: if trace[i].level < 2 {
trace[i].level
} else {
2
},
instance_id: id_count,
instance_name: if trace[i].level < 1 {
Some(curr_name.clone().clone())
} else {
None
},
})));
id_count += 1;
}
}
}
_ => panic!("Undefined block start"),
}
match trace[i].end_capture.0 {
// generic app abb end
CaptureEvent::APIStart => {
let _t = &wip_abb_trace[i];
RefCell::borrow_mut(&*wip_abb_trace[i])
.ends
.insert(edges[i].1);
open_abb_at_this_ret_addr_and_task
.remove(&(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }));
}
// generic api abb end
CaptureEvent::APIEnd => {
RefCell::borrow_mut(&*wip_abb_trace[i])
.ends
.insert(edges[i].1);
open_abb_at_this_ret_addr_and_task
.remove(&(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }));
}
// generic isr abb end
CaptureEvent::ISREnd => {
RefCell::borrow_mut(&*wip_abb_trace[i])
.ends
.insert(edges[i].1);
open_abb_at_this_ret_addr_and_task
.remove(&(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }));
}
// end anything
CaptureEvent::End => {
RefCell::borrow_mut(&*wip_abb_trace[i])
.ends
.insert(edges[i].1);
open_abb_at_this_ret_addr_and_task
.remove(&(edges[i].1, if trace[i].level < 2 { &curr_name } else { "" }));
}
CaptureEvent::ISRStart => (),
_ => panic!("Undefined block end"),
}
// println!("{} {} {:x}-{:x} {:x}-{:x} {:?} {:?} {}",curr_name, trace[i].level, edges[i].0, edges[i].1, ((*wip_abb_trace[i])).borrow().start, ((*wip_abb_trace[i])).borrow().ends.iter().next().unwrap_or(&0xffff), trace[i].start_capture, trace[i].end_capture, trace[i].start_tick);
// println!("{:x?}", open_abb_at_this_ret_addr_and_task);
}
// drop(open_abb_at_this_task_or_level);
for i in 0..trace.len() {
trace[i].abb = Some((*wip_abb_trace[i]).borrow().clone());
}
return ret;
}
//============================================= Task release times
// Find all task release times.
pub(crate) fn get_releases(
trace: &Vec<ExecInterval>,
states: &HashMap<u64, FreeRTOSSystemState>,
) -> Vec<(u64, String)> {
let mut ret = Vec::new();
let mut initial_released = false;
for (_n, i) in trace.iter().enumerate() {
// The first release starts from xPortPendSVHandler
if !initial_released
&& i.start_capture.0 == CaptureEvent::ISREnd
&& i.start_capture.1 == "xPortPendSVHandler"
{
let start_state = states.get(&i.start_state).expect("State not found");
initial_released = true;
start_state.get_ready_lists().iter().for_each(|x| {
ret.push((i.start_tick, x.task_name().clone()));
});
continue;
}
// A timed release is SysTickHandler isr block that moves a task from the delay list to the ready list.
if i.start_capture.0 == CaptureEvent::ISRStart
&& (i.start_capture.1 == "xPortSysTickHandler"
|| USR_ISR_SYMBOLS.contains(&i.start_capture.1.as_str()))
{
// detect race-conditions, get start and end state from the nearest valid intervals
if states
.get(&i.start_state)
.map(|x| x.read_invalid)
.unwrap_or(true)
{
let mut start_index = None;
for n in 1.._n {
if let Some(interval_start) = trace.get(_n - n) {
let start_state = states.get(&interval_start.start_state).unwrap();
if !start_state.read_invalid {
start_index = Some(_n - n);
break;
}
} else {
break;
}
}
let mut end_index = None;
for n in (_n + 1)..trace.len() {
if let Some(interval_end) = trace.get(n) {
let end_state = states.get(&interval_end.end_state).unwrap();
if !end_state.read_invalid {
end_index = Some(n);
break;
}
} else {
break;
}
}
if let Some(Some(start_state)) =
start_index.map(|x| states.get(&trace[x].start_state))
{
if let Some(Some(end_state)) =
end_index.map(|x| states.get(&trace[x].end_state))
{
end_state.ready_list_after.iter().for_each(|x| {
if x.task_name != end_state.current_task.task_name
&& x.task_name != start_state.current_task.task_name
&& !start_state
.ready_list_after
.iter()
.any(|y| x.task_name == y.task_name)
{
ret.push((i.end_tick, x.task_name.clone()));
}
});
}
}
} else
// canonical case, userspace -> isr -> userspace
if i.end_capture.0 == CaptureEvent::ISREnd {
let start_state = states.get(&i.start_state).expect("State not found");
let end_state = states.get(&i.end_state).expect("State not found");
end_state.ready_list_after.iter().for_each(|x| {
if x.task_name != end_state.current_task.task_name
&& x.task_name != start_state.current_task.task_name
&& !start_state
.ready_list_after
.iter()
.any(|y| x.task_name == y.task_name)
{
ret.push((i.end_tick, x.task_name.clone()));
}
});
// start_state.delay_list_after.iter().for_each(|x| {
// if !end_state.delay_list_after.iter().any(|y| x.task_name == y.task_name) {
// ret.push((i.end_tick, x.task_name.clone()));
// }
// });
} else if i.end_capture.0 == CaptureEvent::ISRStart {
// Nested interrupts. Fast-forward to the end of the original interrupt, or the first valid state thereafter
// TODO: this may cause the same release to be registered multiple times
let mut isr_has_ended = false;
let start_state = states.get(&i.start_state).expect("State not found");
for n in (_n + 1)..trace.len() {
if let Some(interval_end) = trace.get(n) {
if interval_end.end_capture.1 == i.start_capture.1 || isr_has_ended {
let end_state = states.get(&interval_end.end_state).unwrap();
isr_has_ended = true;
if !end_state.read_invalid {
end_state.ready_list_after.iter().for_each(|x| {
if x.task_name != end_state.current_task.task_name
&& x.task_name != start_state.current_task.task_name
&& !start_state
.ready_list_after
.iter()
.any(|y| x.task_name == y.task_name)
{
ret.push((i.end_tick, x.task_name.clone()));
}
});
break;
}
}
} else {
break;
}
}
// if let Some(interval_end) = trace.get(_n+2) {
// if interval_end.start_capture.0 == CaptureEvent::ISREnd && interval_end.end_capture.0 == CaptureEvent::ISREnd && interval_end.end_capture.1 == i.start_capture.1 {
// let start_state = states.get(&i.start_state).expect("State not found");
// let end_state = states.get(&interval_end.end_state).expect("State not found");
// end_state.ready_list_after.iter().for_each(|x| {
// if x.task_name != end_state.current_task.task_name && x.task_name != start_state.current_task.task_name && !start_state.ready_list_after.iter().any(|y| x.task_name == y.task_name) {
// ret.push((i.end_tick, x.task_name.clone()));
// }
// });
// }
// }
}
}
// Release driven by an API call. This produces a lot of false positives, as a job may block multiple times per instance. Despite this, aperiodic jobs not be modeled otherwise. If we assume the first release is the real one, we can filter out the rest.
if i.start_capture.0 == CaptureEvent::APIStart {
let api_start_state = states.get(&i.start_state).expect("State not found");
let api_end_state = {
let mut end_index = _n;
for n in (_n)..trace.len() {
if trace[n].end_capture.0 == CaptureEvent::APIEnd
|| trace[n].end_capture.0 == CaptureEvent::End
{
end_index = n;
break;
} else if n > _n && trace[n].level == 0 {
// API Start -> ISR Start+End -> APP Continue
end_index = n - 1; // any return to a regular app block is a fair point of comparison for the ready list, because scheduling has been performed
break;
}
}
states
.get(&trace[end_index].end_state)
.expect("State not found")
};
api_end_state.ready_list_after.iter().for_each(|x| {
if x.task_name != api_start_state.current_task.task_name
&& !api_start_state
.ready_list_after
.iter()
.any(|y| x.task_name == y.task_name)
{
ret.push((i.end_tick, x.task_name.clone()));
// eprintln!("Task {} released by API call at {:.1}ms", x.task_name, crate::time::clock::tick_to_time(i.end_tick).as_micros() as f32/1000.0);
}
});
}
}
ret
}
pub(crate) fn get_release_response_pairs(
rel: &Vec<(u64, String)>,
resp: &Vec<(u64, String)>,
) -> (Vec<(u64, u64, String)>, bool) {
let mut maybe_error = false;
let mut ret = Vec::new();
let mut ready: HashMap<&String, u64> = HashMap::new();
let mut last_response: HashMap<&String, u64> = HashMap::new();
let mut r = rel.iter().peekable();
let mut d = resp.iter().peekable();
loop {
while let Some(peek_rel) = r.peek() {
// Fill releases as soon as possible
if !ready.contains_key(&peek_rel.1) {
ready.insert(&peek_rel.1, peek_rel.0);
r.next();
} else {
if let Some(peek_resp) = d.peek() {
if peek_resp.0 > peek_rel.0 {
// multiple releases before response
// It is unclear which release is real
// maybe_error = true;
// eprintln!("Task {} released multiple times before response ({:.1}ms and {:.1}ms)", peek_rel.1, crate::time::clock::tick_to_time(ready[&peek_rel.1]).as_micros()/1000, crate::time::clock::tick_to_time(peek_rel.0).as_micros()/1000);
// ready.insert(&peek_rel.1, peek_rel.0);
r.next();
} else {
// releases have overtaken responses, wait until the ready list clears up a bit
break;
}
} else {
// no more responses
break;
}
}
}
if let Some(next_resp) = d.next() {
if ready.contains_key(&next_resp.1) {
if ready[&next_resp.1] >= next_resp.0 {
if let Some(lr) = last_response.get(&next_resp.1) {
if u128::abs_diff(
crate::time::clock::tick_to_time(next_resp.0).as_micros(),
crate::time::clock::tick_to_time(*lr).as_micros(),
) > 500
{
// tolerate pending notifications for 500us
maybe_error = true;
// eprintln!("Task {} response at {:.1}ms before next release at {:.1}ms. Fallback to last response at {:.1}ms.", next_resp.1, crate::time::clock::tick_to_time(next_resp.0).as_micros() as f32/1000.0, crate::time::clock::tick_to_time(ready[&next_resp.1]).as_micros() as f32/1000.0, crate::time::clock::tick_to_time(*lr).as_micros() as f32/1000.0);
}
// Sometimes a task is released immediately after a response. This might not be detected.
// Assume that the release occured with the last response
ret.push((*lr, next_resp.0, next_resp.1.clone()));
last_response.insert(&next_resp.1, next_resp.0);
} else {
maybe_error = true;
// eprintln!("Task {} released after response", next_resp.1);
}
} else {
// assert!(peek_resp.0 >= ready[&peek_resp.1]);
last_response.insert(&next_resp.1, next_resp.0);
ret.push((ready[&next_resp.1], next_resp.0, next_resp.1.clone()));
ready.remove(&next_resp.1);
}
} else {
if let Some(lr) = last_response.get(&next_resp.1) {
if u128::abs_diff(
crate::time::clock::tick_to_time(next_resp.0).as_micros(),
crate::time::clock::tick_to_time(*lr).as_micros(),
) > 1000
{ // tolerate pending notifications for 1ms
// maybe_error = true;
// eprintln!("Task {} response at {:.1}ms not found in ready list. Fallback to last response at {:.1}ms.", next_resp.1, crate::time::clock::tick_to_time(next_resp.0).as_micros() as f32/1000.0, crate::time::clock::tick_to_time(*lr).as_micros() as f32/1000.0);
}
// Sometimes a task is released immediately after a response (e.g. pending notification). This might not be detected.
// Assume that the release occured with the last response
ret.push((*lr, next_resp.0, next_resp.1.clone()));
last_response.insert(&next_resp.1, next_resp.0);
} else {
maybe_error = true;
// eprintln!("Task {} response at {:.1}ms not found in ready list", next_resp.1, crate::time::clock::tick_to_time(next_resp.0).as_micros() as f32/1000.0);
}
}
} else {
// TODO: should remaining released tasks be counted as finished?
return (ret, maybe_error);
}
}
}

View File

@ -0,0 +1,473 @@
use std::ops::Range;
use freertos::FreeRTOSTraceMetadata;
use hashbrown::HashMap;
use libafl::{
prelude::{ExitKind, ObserversTuple}, HasMetadata,
};
#[cfg(feature = "trace_reads")]
use libafl_qemu::{ReadExecHook, ReadExecNHook, ReadGenHook};
use libafl_qemu::{
modules::{EmulatorModule, EmulatorModuleTuple}, sys::TCGTemp, EmulatorModules, GuestAddr, Hook, InstructionHook, JmpExecHook, JmpGenHook, MemAccessInfo
};
use crate::{fuzzer::MAX_INPUT_SIZE, systemstate::{
helpers::{get_icount, in_any_range, read_rec_return_stackframe},
target_os::*,
CaptureEvent, RTOSJob,
}};
use super::{
bindings::{self, *}, post_processing::{get_release_response_pairs, get_releases, refine_system_states, states2intervals}, trigger_collection, CURRENT_SYSTEMSTATE_VEC
};
//============================= Qemu Helper
/// A Qemu Helper with reads FreeRTOS specific structs from Qemu whenever certain syscalls occur, also inject inputs
#[derive(Debug)]
pub struct FreeRTOSSystemStateHelper {
// Address of the application code
pub app_range: Range<GuestAddr>,
// Address of API functions
pub api_fn_addrs: HashMap<GuestAddr, String>,
pub api_fn_ranges: Vec<(String, std::ops::Range<GuestAddr>)>,
// Address of interrupt routines
pub isr_fn_addrs: HashMap<GuestAddr, String>,
pub isr_fn_ranges: Vec<(String, std::ops::Range<GuestAddr>)>,
// Address of input memory
pub input_mem: Range<GuestAddr>,
// FreeRTOS specific addresses
pub tcb_addr: GuestAddr,
pub ready_queues: GuestAddr,
pub delay_queue: GuestAddr,
pub delay_queue_overflow: GuestAddr,
pub scheduler_lock_addr: GuestAddr,
pub scheduler_running_addr: GuestAddr,
pub critical_addr: GuestAddr,
pub job_done_addrs: GuestAddr,
pub queue_registry_addrs: GuestAddr,
}
impl FreeRTOSSystemStateHelper {
#[must_use]
pub fn new(
target_symbols: &HashMap<&'static str, GuestAddr>,
target_ranges: &HashMap<&'static str, Range<GuestAddr>>,
target_groups: &HashMap<&'static str, HashMap<String, Range<GuestAddr>>>,
) -> Self {
let app_range = target_ranges.get("APP_CODE").unwrap().clone();
let api_fn_ranges : Vec<_> = target_groups.get("API_FN").unwrap().iter().sorted_by_key(|x|x.1.start).map(|(n,r)| (n.clone(),r.clone())).collect();
let api_fn_addrs = api_fn_ranges.iter().map(|(n,r)| (r.start,n.clone())).collect();
let isr_fn_ranges : Vec<_> = target_groups.get("ISR_FN").unwrap().iter().sorted_by_key(|x|x.1.start).map(|(n,r)| (n.clone(),r.clone())).collect();
let isr_fn_addrs = isr_fn_ranges.iter().map(|(n,r)| (r.start,n.clone())).collect();
let input_mem = target_symbols.get("FUZZ_INPUT").map(|x| *x..(*x+unsafe{MAX_INPUT_SIZE as GuestAddr})).unwrap();
let tcb_addr = *target_symbols.get("pxCurrentTCB").unwrap();
let ready_queues = *target_symbols.get("pxReadyTasksLists").unwrap();
let delay_queue = *target_symbols.get("pxDelayedTaskList").unwrap();
let delay_queue_overflow = *target_symbols.get("pxOverflowDelayedTaskList").unwrap();
let scheduler_lock_addr = *target_symbols.get("uxSchedulerSuspended").unwrap();
let scheduler_running_addr = *target_symbols.get("xSchedulerRunning").unwrap();
let critical_addr = *target_symbols.get("uxCriticalNesting").unwrap();
let job_done_addrs = *target_symbols.get("trigger_job_done").unwrap();
let queue_registry_addrs = *target_symbols.get("xQueueRegistry").unwrap();
FreeRTOSSystemStateHelper {
app_range,
api_fn_addrs,
api_fn_ranges,
isr_fn_addrs,
isr_fn_ranges,
input_mem,
tcb_addr,
ready_queues,
delay_queue,
delay_queue_overflow,
scheduler_lock_addr,
scheduler_running_addr,
critical_addr,
job_done_addrs,
queue_registry_addrs,
}
}
}
impl<S, I> EmulatorModule<I, S> for FreeRTOSSystemStateHelper
where
S: Unpin + HasMetadata,
I: Unpin,
{
fn first_exec<ET>(&mut self, _qemu: Qemu, emulator_modules: &mut EmulatorModules<ET, I, S>, _state: &mut S)
where
ET: EmulatorModuleTuple<I, S>,
{
for wp in self.isr_fn_addrs.keys() {
emulator_modules.instructions(*wp, InstructionHook::Function(exec_isr_hook::<ET, I, S>), false);
}
emulator_modules.jmps(
JmpGenHook::Function(gen_jmp_is_syscall::<ET, I, S>),
JmpExecHook::Function(trace_jmp::<ET, I, S>),
);
#[cfg(feature = "trace_job_response_times")]
emulator_modules.instructions(
self.job_done_addrs,
InstructionHook::Function(job_done_hook::<ET, I, S>),
false,
);
#[cfg(feature = "trace_reads")]
emulator_modules.reads(
ReadGenHook::Function(gen_read_is_input::<ET, I, S>),
ReadExecHook::Empty,
ReadExecHook::Empty,
ReadExecHook::Empty,
ReadExecHook::Empty,
ReadExecNHook::Function(trace_reads::<ET, I, S>),
);
unsafe { INPUT_MEM = self.input_mem.clone() };
}
// TODO: refactor duplicate code
fn pre_exec<ET>(
&mut self,
emulator: Qemu,
_emulator_modules: &mut EmulatorModules<ET, I, S>,
state: &mut S,
_input: &I,
) where
ET: EmulatorModuleTuple<I, S>,
{
unsafe {
(&raw mut CURRENT_SYSTEMSTATE_VEC).as_mut().unwrap().clear();
(&raw mut JOBS_DONE).as_mut().unwrap().clear();
}
if state.has_metadata::<FreeRTOSTraceMetadata>() {
state.remove_metadata::<FreeRTOSTraceMetadata>();
}
}
fn post_exec<OT, ET>(
&mut self,
emulator: Qemu,
emulator_modules: &mut EmulatorModules<ET, I, S>,
_state: &mut S,
_input: &I,
_observers: &mut OT,
_exit_kind: &mut ExitKind,
) where
OT: ObserversTuple<I, S>,
ET: EmulatorModuleTuple<I, S>,
{
let mut need_to_debug = false;
let current_systemstate_vec = unsafe { (&raw mut CURRENT_SYSTEMSTATE_VEC).as_mut().unwrap() };
if { current_systemstate_vec.len() } == 0 {
eprintln!("No system states captured, aborting");
return;
}
// Collect the final system state
trigger_collection(&emulator, (0, 0), CaptureEvent::End, self);
let c = emulator.cpu_from_index(0).expect("CPU 0 not found");
let pc = c.read_reg::<i32>(15).unwrap();
let last = current_systemstate_vec.last_mut().unwrap();
last.edge = (pc, 0);
last.capture_point =(CaptureEvent::End, "Breakpoint".to_string());
// Find the first ISREnd of vPortSVCHandler (start of the first task) and drop anything before
unsafe {
let mut index = 0;
while index < current_systemstate_vec.len() {
if CaptureEvent::ISREnd == current_systemstate_vec[index].capture_point.0
&& CURRENT_SYSTEMSTATE_VEC[index].capture_point.1 == "xPortPendSVHandler"
{
break;
}
index += 1;
}
drop(current_systemstate_vec.drain(..index));
if current_systemstate_vec.len() == 1 {
eprintln!("No system states captured, aborting");
return;
}
}
// Start refining the state trace
let (refined_states, metadata) =
refine_system_states(current_systemstate_vec.split_off(0));
let (intervals, mem_reads, dumped_states, success) =
states2intervals(refined_states.clone(), metadata);
need_to_debug |= !success;
#[cfg(not(feature = "trace_job_response_times"))]
let jobs = Vec::new();
#[cfg(feature = "trace_job_response_times")]
let jobs = {
let releases = get_releases(&intervals, &dumped_states);
let responses = unsafe { std::mem::take((&raw mut JOBS_DONE).as_mut().unwrap()) };
let (job_spans, do_report) = get_release_response_pairs(&releases, &responses);
need_to_debug |= do_report;
let jobs : Vec<RTOSJob> = job_spans
.into_iter()
.map(|x| {
let intervals_of_job_x = intervals
.iter()
.enumerate()
.filter(|y| {
y.1.start_tick <= x.1
&& y.1.end_tick >= x.0
&& x.2 == y.1.get_task_name_unchecked()
})
.map(|(idx, x)| (x, &mem_reads[idx]))
.collect::<Vec<_>>();
let (abbs, rest): (Vec<_>, Vec<_>) = intervals_of_job_x
.chunk_by(|a, b| {
a.0.abb
.as_ref()
.unwrap()
.instance_eq(b.0.abb.as_ref().unwrap())
})
.into_iter() // group by abb
.map(|intervals| {
(
intervals[0].0.abb.as_ref().unwrap().clone(),
(
intervals.iter().fold(0, |sum, z| sum + z.0.get_exec_time()),
intervals.iter().fold(Vec::new(), |mut sum, z| {
sum.extend(z.1.iter());
sum
}),
),
)
})
.unzip();
let (ticks_per_abb, mem_reads_per_abb): (Vec<_>, Vec<_>) = rest.into_iter().unzip();
RTOSJob {
name: x.2,
mem_reads: mem_reads_per_abb.into_iter().flatten().collect(), // TODO: add read values
release: x.0,
response: x.1,
exec_ticks: ticks_per_abb.iter().sum(),
ticks_per_abb: ticks_per_abb,
abbs: abbs,
hash_cache: 0,
}
})
.collect::<Vec<_>>();
jobs
};
_state.add_metadata(FreeRTOSTraceMetadata::new(refined_states, intervals, mem_reads, jobs, need_to_debug));
}
// type ModuleAddressFilter = NopAddressFilter;
// type ModulePageFilter = NopPageFilter;
// fn address_filter(&self) -> &Self::ModuleAddressFilter {
// todo!()
// }
// fn address_filter_mut(&mut self) -> &mut Self::ModuleAddressFilter {
// todo!()
// }
// fn page_filter(&self) -> &Self::ModulePageFilter {
// todo!()
// }
// fn page_filter_mut(&mut self) -> &mut Self::ModulePageFilter {
// todo!()
// }
}
//============================= Trace job response times
pub static mut JOBS_DONE: Vec<(u64, String)> = vec![];
pub fn job_done_hook<QT, I, S>(
emulator: Qemu,
hooks: &mut EmulatorModules<QT, I, S>,
_state: Option<&mut S>,
_pc: GuestAddr,
) where
QT: EmulatorModuleTuple<I, S>,
{
let h = hooks
.modules()
.match_first_type::<FreeRTOSSystemStateHelper>()
.expect("QemuSystemHelper not found in helper tupel");
let curr_tcb_addr: bindings::void_ptr = super::QemuLookup::lookup(&emulator, h.tcb_addr);
if curr_tcb_addr == 0 {
return;
};
let current_tcb: TCB_t = super::QemuLookup::lookup(&emulator, curr_tcb_addr);
let tmp = unsafe { std::mem::transmute::<[i8; 10], [u8; 10]>(current_tcb.pcTaskName) };
let name: String = std::str::from_utf8(&tmp)
.expect("TCB name was not utf8")
.chars()
.filter(|x| *x != '\0')
.collect::<String>();
unsafe {
(&raw mut JOBS_DONE).as_mut().unwrap().push((get_icount(&emulator), name));
}
}
//============================= Trace interrupt service routines
pub fn exec_isr_hook<QT, I, S>(
emulator: Qemu,
hooks: &mut EmulatorModules<QT, I, S>,
_state: Option<&mut S>,
pc: GuestAddr,
) where
QT: EmulatorModuleTuple<I, S>,
{
let h = hooks
.modules()
.match_first_type::<FreeRTOSSystemStateHelper>()
.expect("QemuSystemHelper not found in helper tupel");
let src = read_rec_return_stackframe(&emulator, 0xfffffffc);
trigger_collection(&emulator, (src, pc), CaptureEvent::ISRStart, h);
// println!("Exec ISR Call {:#x} {:#x} {}", src, pc, get_icount(emulator));
}
//============================= Trace syscalls and returns
pub fn gen_jmp_is_syscall<QT, I, S>(
emulator: Qemu,
hooks: &mut EmulatorModules<QT, I, S>,
_state: Option<&mut S>,
src: GuestAddr,
dest: GuestAddr,
) -> Option<u64>
where
QT: EmulatorModuleTuple<I, S>,
{
if let Some(h) = hooks
.modules()
.match_first_type::<FreeRTOSSystemStateHelper>()
{
if h.app_range.contains(&src)
&& !h.app_range.contains(&dest)
&& in_any_range(&h.isr_fn_ranges, src).is_none()
{
if let Some(_) = in_any_range(&h.api_fn_ranges, dest) {
// println!("New jmp {:x} {:x}", src, dest);
// println!("API Call Edge {:x} {:x}", src, dest);
return Some(1);
// TODO: trigger collection right here
// otherwise there can be a race-condition, where LAST_API_CALL is set before the api starts, if the interrupt handler calls an api function, it will misidentify the callsite of that api call
}
} else if dest == 0 {
// !h.app_range.contains(&src) &&
if let Some(_) = in_any_range(&h.api_fn_ranges, src) {
// println!("API Return Edge {:#x}", src);
return Some(2);
}
if let Some(_) = in_any_range(&h.isr_fn_ranges, src) {
// println!("ISR Return Edge {:#x}", src);
return Some(3);
}
}
}
return None;
}
pub fn trace_jmp<QT, I, S>(
emulator: Qemu,
hooks: &mut EmulatorModules<QT, I, S>,
_state: Option<&mut S>,
src: GuestAddr,
mut dest: GuestAddr,
id: u64,
) where
QT: EmulatorModuleTuple<I, S>,
{
let h = hooks
.modules()
.match_first_type::<FreeRTOSSystemStateHelper>()
.expect("QemuSystemHelper not found in helper tupel");
if id == 1 {
// API call
trigger_collection(&emulator, (src, dest), CaptureEvent::APIStart, h);
// println!("Exec API Call {:#x} {:#x} {}", src, dest, get_icount(emulator));
} else if id == 2 {
// API return
// Ignore returns to other APIs or ISRs. We only account for the first call depth of API calls from user space.
if in_any_range(&h.api_fn_ranges, dest).is_none()
&& in_any_range(&h.isr_fn_ranges, dest).is_none()
{
let mut edge = (0, 0);
edge.0 = in_any_range(&h.api_fn_ranges, src).unwrap().start;
edge.1 = dest;
trigger_collection(&emulator, edge, CaptureEvent::APIEnd, h);
// println!("Exec API Return Edge {:#x} {:#x} {}", src, dest, get_icount(emulator));
}
} else if id == 3 {
// ISR return
dest = read_rec_return_stackframe(&emulator, dest);
let mut edge = (0, 0);
edge.0 = in_any_range(&h.isr_fn_ranges, src).unwrap().start;
edge.1 = dest;
trigger_collection(&emulator, edge, CaptureEvent::ISREnd, h);
// println!("Exec ISR Return Edge {:#x} {:#x} {}", src, dest, get_icount(emulator));
}
}
//============================= Read Hooks
#[allow(unused)]
pub fn gen_read_is_input<QT, I, S>(
emulator: Qemu,
hooks: &mut EmulatorModules<QT, I, S>,
_state: Option<&mut S>,
pc: GuestAddr,
_addr: *mut TCGTemp,
_info: MemAccessInfo,
) -> Option<u64>
where
QT: EmulatorModuleTuple<I, S>,
{
if let Some(h) = hooks
.modules()
.match_first_type::<FreeRTOSSystemStateHelper>()
{
if h.app_range.contains(&pc) {
// println!("gen_read {:x}", pc);
return Some(1);
}
}
return None;
}
static mut INPUT_MEM: Range<GuestAddr> = 0..0;
pub static mut MEM_READ: Vec<(GuestAddr, u8)> = vec![];
#[allow(unused)]
pub fn trace_reads<QT, I, S>(
emulator: Qemu,
hooks: &mut EmulatorModules<QT, I, S>,
_state: Option<&mut S>,
_id: u64,
_pc: GuestAddr,
addr: GuestAddr,
size: usize,
) where
QT: EmulatorModuleTuple<I, S>,
{
if size == 0 {
return;
}
let input_mem = unsafe { (&raw const INPUT_MEM).as_ref().unwrap() };
let mut buf = vec![0u8; size];
unsafe {
emulator.read_mem(addr, &mut buf);
for (i, &byte) in buf.iter().enumerate() {
let curr_addr = addr.wrapping_add(i as GuestAddr);
if input_mem.contains(&curr_addr) {
(&raw mut MEM_READ).as_mut().unwrap().push((curr_addr, byte));
}
}
}
}

View File

@ -0,0 +1,176 @@
use std::collections::hash_map::DefaultHasher;
use std::fmt;
use hashbrown::HashSet;
use libafl_bolts::prelude::SerdeAny;
use libafl_bolts::HasRefCnt;
use libafl_qemu::Qemu;
use std::hash::Hasher;
use std::hash::Hash;
use hashbrown::HashMap;
use serde::{Deserialize, Serialize};
use itertools::Itertools;
use std::fmt::Debug;
use super::helpers::abb_profile;
use super::ExecInterval;
use super::RTOSJob;
pub mod freertos;
//============================= Trait definitions
pub trait TargetSystem: Serialize + Sized + for<'a> Deserialize<'a> + Default + Debug + Clone + SerdeAny {
type State: SystemState<TCB = Self::TCB>;
type TCB: TaskControlBlock;
type TraceData: SystemTraceData<State = Self::State>;
}
pub trait SystemState: Serialize + Sized + for<'a> Deserialize<'a> + Default + Debug + Hash + PartialEq + Clone + SerdeAny {
type TCB: TaskControlBlock;
fn current_task(&self) -> &Self::TCB;
fn current_task_mut(&mut self) -> &mut Self::TCB;
fn get_ready_lists(&self) -> &Vec<Self::TCB>;
fn get_delay_list(&self) -> &Vec<Self::TCB>;
fn print_lists(&self) -> String;
}
pub trait SystemTraceData: Serialize + Sized + for<'a> Deserialize<'a> + Default + Debug + Clone + SerdeAny + HasRefCnt {
type State: SystemState;
fn states(&self) -> Vec<&Self::State>;
fn states_map(&self) -> &HashMap<u64, Self::State>;
fn intervals(&self) -> &Vec<ExecInterval>;
fn mem_reads(&self) -> &Vec<Vec<(u32, u8)>>;
fn jobs(&self) -> &Vec<RTOSJob>;
fn trace_length(&self) -> usize;
#[inline]
fn worst_jobs_per_task_by(&self, pred: &dyn Fn(&RTOSJob,&RTOSJob) -> bool) -> HashMap<String, RTOSJob> {
self.jobs().iter().fold(HashMap::new(), |mut acc, next| {
match acc.get_mut(&next.name) {
Some(old) => {
if pred(old,next) {
*old=next.clone();
}
},
Option::None => {
acc.insert(next.name.clone(), next.clone());
}
}
acc
})
}
#[inline]
fn worst_jobs_per_task_by_exec_time(&self) -> HashMap<String, RTOSJob> {
self.worst_jobs_per_task_by(&|old, x| x.exec_ticks > old.exec_ticks)
}
#[inline]
fn worst_jobs_per_task_by_response_time(&self) -> HashMap<String, RTOSJob> {
self.worst_jobs_per_task_by(&|old, x| x.response_time() > old.response_time())
}
#[inline]
/// Gives the response time of the worst job of the selected task, or 0 if the task is not found
fn wort_of_task(&self, select_task: &String) -> u64 {
self.worst_jobs_per_task_by_response_time().get(select_task).map_or(0, |job| job.response_time())
}
#[inline]
/// extract computation time spent in each task and abb
/// task_name -> (abb_addr -> (interval_count, exec_count, exec_time, woet))
fn select_abb_profile(
&self,
select_task: Option<String>,
) -> HashMap<String, HashMap<u32, (usize, usize, u64, u64)>> {
if let Some(select_task) = select_task.as_ref() {
// Task selected, only profile this task
let wjptybrt = self.worst_jobs_per_task_by_response_time();
if let Some(worst_instance) = wjptybrt.get(select_task)
{
let t: Vec<_> = self
.intervals()
.iter()
.filter(|x| {
x.start_tick < worst_instance.response && x.end_tick > worst_instance.release
})
.cloned()
.collect();
abb_profile(t)
} else {
HashMap::new()
}
} else {
// Profile all tasks
abb_profile(self.intervals().clone())
}
}
fn need_to_debug(&self) -> bool;
}
pub trait TaskControlBlock: Serialize + for<'a> Deserialize<'a> + Default + Debug + Hash + PartialEq + Clone + SerdeAny {
fn task_name(&self) -> &String;
fn task_name_mut(&mut self) -> &mut String;
// Define methods common to TCBs across different systems
}
//=============================
pub trait QemuLookup {
fn lookup(emu: &Qemu, addr: ::std::os::raw::c_uint) -> Self;
fn lookup_slice(emu: &Qemu, addr: ::std::os::raw::c_uint, count: usize) -> Vec<Self> where Self: Sized {
let mut res = Vec::with_capacity(count);
for i in 0..count {
let tmp = Self::lookup(emu, addr + (i * std::mem::size_of::<Self>()) as u32);
res.push(tmp);
}
res
}
}
#[macro_export]
macro_rules! impl_emu_lookup {
($struct_name:ident) => {
impl $crate::systemstate::target_os::QemuLookup for $struct_name {
fn lookup(emu: &Qemu, addr: ::std::os::raw::c_uint) -> $struct_name {
let mut tmp : [u8; std::mem::size_of::<$struct_name>()] = [0u8; std::mem::size_of::<$struct_name>()];
unsafe {
emu.read_mem(addr.into(), &mut tmp).unwrap();
std::mem::transmute::<[u8; std::mem::size_of::<$struct_name>()], $struct_name>(tmp)
}
}
}
};
}
fn emu_lookup_string(emu: &Qemu, addr: ::std::os::raw::c_uint, length: Option<usize>) -> String {
let mut res = String::new();
let mut tmp = [0u8; 1];
let mut cur_addr = addr;
loop {
unsafe {
emu.read_mem(cur_addr.into(), &mut tmp).unwrap();
}
if tmp[0] == 0 {
break;
}
res.push(tmp[0] as char);
cur_addr += 1;
if let Some(length) = length {
if res.len() >= length {
break;
}
}
}
res
}
pub fn compute_hash<T>(obj: &T) -> u64
where
T: Hash,
{
let mut s = DefaultHasher::new();
obj.hash(&mut s);
s.finish()
}

View File

@ -0,0 +1,112 @@
use libafl::{events::EventFirer, prelude::{Feedback, ObserversTuple, StateInitializer}};
use libafl_bolts::Named;
use libafl_qemu::{modules::EmulatorModule, EmulatorModules};
use std::borrow::Cow;
use libafl::prelude::*;
use libafl_qemu::modules::*;
use libafl_qemu::*;
//============================================== Feedback
/// Example Feedback for type correctness
#[derive(Clone, Debug, Default)]
pub struct MinimalFeedback {
/// The name
name: Cow<'static, str>,
}
impl<S> StateInitializer<S> for MinimalFeedback {}
impl<EM, I, OT, S> Feedback<EM, I, OT, S> for MinimalFeedback
where
EM: EventFirer<S, I>,
OT: ObserversTuple<I, S>,
{
#[allow(clippy::wrong_self_convention)]
fn is_interesting(
&mut self,
state: &mut S,
manager: &mut EM,
input: &I,
observers: &OT,
exit_kind: &ExitKind,
) -> Result<bool, Error> {
Ok(false)
}
}
impl Named for MinimalFeedback {
#[inline]
fn name(&self) -> &Cow<'static, str> {
&self.name
}
}
//============================================== TestcaseScore
pub struct MinimalTestcaseScore {}
impl<I, S> TestcaseScore<I, S> for MinimalTestcaseScore
where
S: HasMetadata + HasCorpus<I>,
{
fn compute(
_state: &S,
entry: &mut Testcase<I>,
) -> Result<f64, Error> {
Ok(0 as f64)
}
}
//============================================== EmulatorModule
#[derive(Debug)]
pub struct MinimalEmulatorModule {}
impl<I, S> EmulatorModule<I, S> for MinimalEmulatorModule
where
{
const HOOKS_DO_SIDE_EFFECTS: bool = true;
/// Hook run **before** QEMU is initialized.
/// This is always run when Emulator gets initialized, in any case.
/// Install here hooks that should be alive for the whole execution of the VM, even before QEMU gets initialized.
fn pre_qemu_init<ET>(&mut self, _emulator_hooks: &mut EmulatorModules<ET, I, S>, _qemu_params: &mut QemuParams)
where
ET: EmulatorModuleTuple<I, S>,
{
}
/// Hook run **after** QEMU is initialized.
/// This is always run when Emulator gets initialized, in any case.
/// Install here hooks that should be alive for the whole execution of the VM, after QEMU gets initialized.
fn post_qemu_init<ET>(&mut self, _emulator_hooks: libafl_qemu::Qemu, _qemu_params: &mut EmulatorModules<ET, I, S>)
where
ET: EmulatorModuleTuple<I, S>,
{
}
/// Run once just before fuzzing starts.
/// This call can be delayed to the point at which fuzzing is supposed to start.
/// It is mostly used to avoid running hooks during VM initialization, either
/// because it is useless or it would produce wrong results.
fn first_exec<ET>(&mut self, __qemu: Qemu,
_emulator_modules: &mut EmulatorModules<ET, I, S>,
_state: &mut S)
where
ET: EmulatorModuleTuple<I, S>,
{
}
/// # Safety
///
/// This is getting executed in a signal handler.
unsafe fn on_crash(&mut self) {}
/// # Safety
///
/// This is getting executed in a signal handler.
unsafe fn on_timeout(&mut self) {}
}

View File

@ -0,0 +1,443 @@
use libafl::{
common::HasNamedMetadata, executors::ExitKind, observers::Observer, observers::ObserversTuple,
Error,
};
use libafl_bolts::Named;
use serde::{Deserialize, Serialize};
use std::{fs::OpenOptions, io::Write};
use core::{fmt::Debug, time::Duration};
use libafl::common::HasMetadata;
use libafl::corpus::testcase::Testcase;
use libafl::events::EventFirer;
use libafl::feedbacks::Feedback;
use libafl::state::MaybeHasClientPerfMonitor;
use libafl_bolts::tuples::MatchNameRef;
use libafl::SerdeAny;
use std::borrow::Cow;
use std::path::PathBuf;
use std::time::{SystemTime, UNIX_EPOCH};
use crate::systemstate::helpers::metadata_insert_or_update_get;
use crate::systemstate::target_os::TargetSystem;
use crate::systemstate::target_os::SystemTraceData;
use libafl::prelude::StateInitializer;
pub static mut FUZZ_START_TIMESTAMP: SystemTime = UNIX_EPOCH;
pub const QEMU_ICOUNT_SHIFT: u32 = 5;
pub const QEMU_ISNS_PER_SEC: u32 = u32::pow(10, 9) / u32::pow(2, QEMU_ICOUNT_SHIFT);
pub const QEMU_ISNS_PER_MSEC: u32 = QEMU_ISNS_PER_SEC / 1000;
pub const QEMU_ISNS_PER_USEC: f32 = QEMU_ISNS_PER_SEC as f32 / 1000000.0;
pub const _QEMU_NS_PER_ISN: u32 = 1 << QEMU_ICOUNT_SHIFT;
pub const _TARGET_SYSCLK_FREQ: u32 = 25 * 1000 * 1000;
pub const _TARGET_MHZ_PER_MIPS: f32 = _TARGET_SYSCLK_FREQ as f32 / QEMU_ISNS_PER_SEC as f32;
pub const _TARGET_MIPS_PER_MHZ: f32 = QEMU_ISNS_PER_SEC as f32 / _TARGET_SYSCLK_FREQ as f32;
pub const _TARGET_SYSCLK_PER_QEMU_SEC: u32 =
(_TARGET_SYSCLK_FREQ as f32 * _TARGET_MIPS_PER_MHZ) as u32;
pub const _QEMU_SYSCLK_PER_TARGET_SEC: u32 =
(_TARGET_SYSCLK_FREQ as f32 * _TARGET_MHZ_PER_MIPS) as u32;
pub fn tick_to_time(ticks: u64) -> Duration {
Duration::from_nanos((ticks * _QEMU_NS_PER_ISN as u64))
}
pub fn tick_to_ms(ticks: u64) -> f32 {
(tick_to_time(ticks).as_micros() as f32 / 10.0).round() / 100.0
}
pub fn time_to_tick(time: Duration) -> u64 {
time.as_nanos() as u64 / _QEMU_NS_PER_ISN as u64
}
//========== Metadata
#[derive(Debug, SerdeAny, Serialize, Deserialize)]
pub struct QemuIcountMetadata {
runtime: u64,
}
/// Metadata for [`QemuClockIncreaseFeedback`]
#[derive(Debug, Serialize, Deserialize, SerdeAny)]
pub struct MaxIcountMetadata {
pub max_icount_seen: u64,
pub name: Cow<'static, str>,
}
// impl FeedbackState for MaxIcountMetadata
// {
// fn reset(&mut self) -> Result<(), Error> {
// self.max_icount_seen = 0;
// Ok(())
// }
// }
impl Named for MaxIcountMetadata {
#[inline]
fn name(&self) -> &Cow<'static, str> {
&self.name
}
}
impl MaxIcountMetadata {
/// Create new `MaxIcountMetadata`
#[must_use]
pub fn new(name: &'static str) -> Self {
Self {
max_icount_seen: 0,
name: Cow::from(name),
}
}
}
impl Default for MaxIcountMetadata {
fn default() -> Self {
Self::new("MaxClock")
}
}
/// A piece of metadata tracking all icounts
#[derive(Debug, Default, SerdeAny, Serialize, Deserialize)]
pub struct IcHist(pub Vec<(u64, u128)>, pub (u64, u128));
//========== Observer
/// A simple observer, just overlooking the runtime of the target.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct QemuClockObserver<SYS: TargetSystem> {
name: Cow<'static, str>,
start_tick: u64,
end_tick: u64,
select_task: Option<String>,
phantom: std::marker::PhantomData<SYS>,
}
impl<SYS: TargetSystem> QemuClockObserver<SYS> {
/// Creates a new [`QemuClockObserver`] with the given name.
#[must_use]
pub fn new(name: &'static str, select_task: &Option<String>) -> Self {
Self {
name: Cow::from(name),
start_tick: 0,
end_tick: 0,
select_task: select_task.clone(),
phantom: std::marker::PhantomData,
}
}
/// Gets the runtime for the last execution of this target.
#[must_use]
pub fn last_runtime(&self) -> u64 {
self.end_tick - self.start_tick
}
}
impl<I, S, SYS> Observer<I, S> for QemuClockObserver<SYS>
where
S: HasMetadata,
SYS: TargetSystem,
{
fn pre_exec(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> {
self.start_tick = 0;
// Only remember the pre-run ticks if presistent mode ist used
#[cfg(not(feature = "snapshot_restore"))]
unsafe {
self.start_tick = emu::icount_get_raw();
self.end_tick = self.start_tick;
}
Ok(())
}
fn post_exec(
&mut self,
state: &mut S,
_input: &I,
_exit_kind: &ExitKind,
) -> Result<(), Error> {
if _exit_kind != &ExitKind::Ok {
self.start_tick = 0;
self.end_tick = 0;
return Ok(());
}
#[cfg(feature = "trace_job_response_times")]
let icount = {
if let Some(select) = self.select_task.as_ref() {
let trace = state
.metadata::<SYS::TraceData>()
.expect("TraceData not found");
trace.wort_of_task(select)
} else {
unsafe {libafl_qemu::sys::icount_get_raw()}
}
};
#[cfg(not(feature = "trace_job_response_times"))]
let icount = unsafe {libafl_qemu::sys::icount_get_raw()};
self.end_tick = icount;
Ok(())
}
}
impl<SYS: TargetSystem> Named for QemuClockObserver<SYS> {
#[inline]
fn name(&self) -> &Cow<'static, str> {
&self.name
}
}
impl<SYS: TargetSystem> Default for QemuClockObserver<SYS> {
fn default() -> Self {
Self {
name: Cow::from(String::from("clock")),
start_tick: 0,
end_tick: 0,
select_task: None,
phantom: std::marker::PhantomData,
}
}
}
//========== Feedback
/// Nop feedback that annotates execution time in the new testcase, if any
/// for this Feedback, the testcase is never interesting (use with an OR).
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ClockTimeFeedback<SYS> {
exec_time: Option<Duration>,
select_task: Option<String>,
name: Cow<'static, str>,
dump_path: Option<PathBuf>,
phantom: std::marker::PhantomData<SYS>,
}
impl<S, SYS> StateInitializer<S> for ClockTimeFeedback<SYS> where SYS: TargetSystem {}
impl<EM, I, OT, S, SYS> Feedback<EM, I, OT, S> for ClockTimeFeedback<SYS>
where
S: MaybeHasClientPerfMonitor + HasMetadata,
I: Default,
EM: EventFirer<I, S>,
OT: ObserversTuple<I, S>,
SYS: TargetSystem,
{
#[allow(clippy::wrong_self_convention)]
fn is_interesting(
&mut self,
state: &mut S,
_manager: &mut EM,
_input: &I,
observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where {
#[cfg(feature = "trace_job_response_times")]
let icount = {
if let Some(select) = self.select_task.as_ref() {
let trace = state
.metadata::<SYS::TraceData>()
.expect("TraceData not found");
trace.wort_of_task(select)
} else {
let observer = observers
.match_name::<QemuClockObserver<SYS>>(self.name())
.unwrap();
observer.last_runtime()
}
};
#[cfg(not(feature = "trace_job_response_times"))]
let icount = {
let observer = observers
.match_name::<QemuClockObserver<SYS>>(self.name())
.unwrap();
observer.last_runtime()
};
self.exec_time = Some(tick_to_time(icount));
// Dump the icounts to a file
if let Some(td) = &self.dump_path {
let metadata = state.metadata_map_mut();
let timestamp = SystemTime::now()
.duration_since(unsafe { FUZZ_START_TIMESTAMP })
.unwrap()
.as_millis();
let hist = metadata_insert_or_update_get::<IcHist>(
metadata,
|| IcHist(
vec![(icount, timestamp)],
(icount, timestamp),
),
|hist| {
hist.0.push((icount, timestamp));
if hist.1 .0 < icount {
hist.1 = (icount, timestamp);
}
},
);
if hist.0.len() >= 100 {
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.append(true)
.open(td)
.expect("Could not open timedump");
let newv: Vec<(u64, u128)> = Vec::with_capacity(110);
for i in std::mem::replace(&mut hist.0, newv).into_iter() {
writeln!(file, "{},{}", i.0, i.1).expect("Write to dump failed");
}
}
// write out the worst case trace
if hist.1 == (icount, timestamp) {
let tracename = td.with_extension("icounttrace.ron");
let trace = state
.metadata::<SYS::TraceData>()
.expect("TraceData not found");
std::fs::write(
tracename,
ron::to_string(trace)
.expect("Error serializing hashmap"),
)
.expect("Can not dump to file");
}
}
Ok(false)
}
/// Append to the testcase the generated metadata in case of a new corpus item
#[inline]
fn append_metadata(
&mut self,
_state: &mut S,
_manager: &mut EM,
_observers: &OT,
testcase: &mut Testcase<I>,
) -> Result<(), Error> {
*testcase.exec_time_mut() = self.exec_time;
self.exec_time = None;
Ok(())
}
// /// Discard the stored metadata in case that the testcase is not added to the corpus
// #[inline]
// fn discard_metadata(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> {
// self.exec_time = None;
// Ok(())
// }
}
impl<SYS> Named for ClockTimeFeedback<SYS> {
#[inline]
fn name(&self) -> &Cow<'static, str> {
&self.name
}
}
impl<SYS: TargetSystem> ClockTimeFeedback<SYS> {
/// Creates a new [`ClockFeedback`], deciding if the value of a [`QemuClockObserver`] with the given `name` of a run is interesting.
#[must_use]
pub fn new(name: &'static str, select_task: Option<String>, dump_path: Option<PathBuf>) -> Self {
Self {
exec_time: None,
select_task: select_task,
name: Cow::from(name.to_string()),
dump_path: dump_path,
phantom: std::marker::PhantomData,
}
}
/// Creates a new [`ClockFeedback`], deciding if the given [`QemuClockObserver`] value of a run is interesting.
#[must_use]
pub fn new_with_observer(observer: &QemuClockObserver<SYS>, select_task: &Option<String>, dump_path: Option<PathBuf>) -> Self {
Self {
exec_time: None,
select_task: select_task.clone(),
name: observer.name().clone(),
dump_path: dump_path,
phantom: std::marker::PhantomData,
}
}
}
/// A [`Feedback`] rewarding increasing the execution cycles on Qemu.
#[derive(Debug)]
pub struct QemuClockIncreaseFeedback<SYS: TargetSystem> {
name: Cow<'static, str>,
phantom: std::marker::PhantomData<SYS>,
}
impl<S,SYS: TargetSystem> StateInitializer<S> for QemuClockIncreaseFeedback<SYS> {}
impl<EM, I, OT, S, SYS: TargetSystem> Feedback<EM, I, OT, S> for QemuClockIncreaseFeedback<SYS>
where
S: HasNamedMetadata + MaybeHasClientPerfMonitor + Debug,
EM: EventFirer<I, S>,
OT: ObserversTuple<I, S>,
{
fn is_interesting(
&mut self,
state: &mut S,
_manager: &mut EM,
_input: &I,
_observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where {
let observer = _observers
.match_name::<QemuClockObserver<SYS>>("clock")
.expect("QemuClockObserver not found");
let clock_state = state
.named_metadata_map_mut()
.get_mut::<MaxIcountMetadata>(&self.name)
.unwrap();
if observer.last_runtime() > clock_state.max_icount_seen {
// println!("Clock improving {}",observer.last_runtime());
clock_state.max_icount_seen = observer.last_runtime();
return Ok(true);
}
Ok(false)
}
/// Append to the testcase the generated metadata in case of a new corpus item
#[inline]
fn append_metadata(
&mut self,
_state: &mut S,
_manager: &mut EM,
_observers: &OT,
_testcase: &mut Testcase<I>,
) -> Result<(), Error> {
// testcase.metadata_mut().insert(QemuIcountMetadata{runtime: self.last_runtime});
Ok(())
}
// /// Discard the stored metadata in case that the testcase is not added to the corpus
// #[inline]
// fn discard_metadata(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> {
// Ok(())
// }
}
impl<SYS: TargetSystem> Named for QemuClockIncreaseFeedback<SYS> {
#[inline]
fn name(&self) -> &Cow<'static, str> {
&self.name
}
}
impl<SYS: TargetSystem> QemuClockIncreaseFeedback<SYS> {
/// Creates a new [`HitFeedback`]
#[must_use]
pub fn new(name: &'static str) -> Self {
Self {
name: Cow::from(String::from(name)),
phantom: std::marker::PhantomData,
}
}
}
impl<SYS: TargetSystem> Default for QemuClockIncreaseFeedback<SYS> {
fn default() -> Self {
Self::new("MaxClock")
}
}

View File

@ -0,0 +1,3 @@
pub mod clock;
pub mod qemustate;
pub mod worst;

View File

@ -0,0 +1,106 @@
use libafl_qemu::sys::CPUArchState;
use libafl_qemu::FastSnapshotPtr;
use libafl_qemu::modules::EmulatorModule;
use libafl_qemu::modules::EmulatorModuleTuple;
use libafl::executors::ExitKind;
use libafl_qemu::Qemu;
use libafl_qemu::QemuHooks;
use libafl_qemu::EmulatorModules;
use libafl::prelude::ObserversTuple;
// TODO be thread-safe maybe with https://amanieu.github.io/thread_local-rs/thread_local/index.html
#[derive(Debug)]
pub struct QemuStateRestoreHelper {
#[allow(unused)]
has_snapshot: bool,
#[allow(unused)]
saved_cpu_states: Vec<CPUArchState>,
fastsnap: Option<FastSnapshotPtr>
}
impl QemuStateRestoreHelper {
#[must_use]
pub fn new() -> Self {
Self {
has_snapshot: false,
saved_cpu_states: vec![],
fastsnap: None
}
}
#[allow(unused)]
pub fn with_fast(fastsnap: Option<FastSnapshotPtr>) -> Self {
let mut r = Self::new();
r.fastsnap = fastsnap;
r
}
}
impl Default for QemuStateRestoreHelper {
fn default() -> Self {
Self::new()
}
}
impl<I, S> EmulatorModule<I, S> for QemuStateRestoreHelper
{
const HOOKS_DO_SIDE_EFFECTS: bool = true;
// type ModuleAddressFilter = NopAddressFilter;
// type ModulePageFilter = NopPageFilter;
fn post_exec<OT, ET>(
&mut self,
_qemu: Qemu,
_emulator_modules: &mut EmulatorModules<ET, I, S>,
_state: &mut S,
_input: &I,
_observers: &mut OT,
_exit_kind: &mut ExitKind,
) where
OT: ObserversTuple<I, S>,
ET: EmulatorModuleTuple<I, S>,
{
// unsafe { println!("snapshot post {}",emu::icount_get_raw()) };
}
fn pre_exec<ET>(
&mut self,
qemu: Qemu,
_emulator_modules: &mut EmulatorModules<ET, I, S>,
_state: &mut S,
_input: &I,
) where
ET: EmulatorModuleTuple<I, S>,
{
// only restore in pre-exec, to preserve the post-execution state for inspection
#[cfg(feature = "snapshot_restore")]
{
#[cfg(feature = "snapshot_fast")]
match self.fastsnap {
Some(s) => unsafe { qemu.restore_fast_snapshot(s) },
None => {self.fastsnap = Some(qemu.create_fast_snapshot(true));},
}
#[cfg(not(feature = "snapshot_fast"))]
if !self.has_snapshot {
emulator.save_snapshot("Start", true);
self.has_snapshot = true;
}
else
{
emulator.load_snapshot("Start", true);
}
}
#[cfg(not(feature = "snapshot_restore"))]
if !self.has_snapshot {
self.saved_cpu_states = (0..emulator.num_cpus())
.map(|i| emulator.cpu_from_index(i).save_state())
.collect();
self.has_snapshot = true;
} else {
for (i, s) in self.saved_cpu_states.iter().enumerate() {
emulator.cpu_from_index(i).restore_state(s);
}
}
// unsafe { println!("snapshot pre {}",emu::icount_get_raw()) };
}
}

View File

@ -0,0 +1,269 @@
use core::{fmt::Debug, marker::PhantomData};
use std::{
borrow::Cow, ops::Sub, time::{Duration, Instant}
};
use serde::{Serialize, Deserialize};
use libafl::{
common::HasMetadata,
corpus::{Corpus, Testcase},
events::EventFirer,
executors::ExitKind,
feedbacks::{Feedback, MapIndexesMetadata},
observers::ObserversTuple,
prelude::{Monitor, SimplePrintingMonitor, StateInitializer},
schedulers::{MinimizerScheduler, ProbabilitySamplingScheduler, TestcaseScore},
state::{HasCorpus, MaybeHasClientPerfMonitor},
Error,
};
use libafl_bolts::{ClientId, HasLen, Named};
use crate::systemstate::target_os::TargetSystem;
use crate::time::clock::QemuClockObserver;
//=========================== Scheduler
// Scheduler types
pub type TimeMaximizerCorpusScheduler<S, I, O> =
MinimizerScheduler<S, I, MaxTimeFavFactor, MapIndexesMetadata, O>;
pub type LenTimeMaximizerCorpusScheduler<S, I, O> =
MinimizerScheduler<S, I, MaxExecsLenFavFactor<S, I>, MapIndexesMetadata, O>;
pub type TimeStateMaximizerCorpusScheduler<S, I, O> =
MinimizerScheduler<S, I, MaxTimeFavFactor, MapIndexesMetadata, O>;
/// Multiply the testcase size with the execution time.
/// This favors small and quick testcases.
#[derive(Debug, Clone)]
pub struct MaxTimeFavFactor {}
impl<I, S> TestcaseScore<I, S> for MaxTimeFavFactor
where
S: HasCorpus<I>,
{
fn compute(
_state: &S,
entry: &mut Testcase<I>,
) -> Result<f64, Error> {
// TODO maybe enforce entry.exec_time().is_some()
let et = entry
.exec_time()
.expect("testcase.exec_time is needed for scheduler");
let tns: i64 = et.as_nanos().try_into().expect("failed to convert time");
Ok(-tns as f64)
}
}
// MaxExecsLenFavFactor
/// Multiply the testcase size with the execution time.
/// This favors small and quick testcases
#[derive(Debug, Clone)]
pub struct MaxExecsLenFavFactor<S, I> {
phantom: PhantomData<(S, I)>,
}
impl<I, S> TestcaseScore<I, S> for MaxExecsLenFavFactor<S, I>
where
S: HasCorpus<I> + HasMetadata,
I: HasLen,
{
fn compute(
state: &S,
entry: &mut Testcase<I>,
) -> Result<f64, Error> {
let execs_per_hour = 3600.0
/ entry
.exec_time()
.expect("testcase.exec_time is needed for scheduler")
.as_secs_f64();
let execs_times_length_per_hour =
execs_per_hour * entry.load_len(state.corpus()).unwrap() as f64;
Ok(execs_times_length_per_hour)
}
}
//===================================================================
/// A Feedback which rewards each increase in execution time
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ExecTimeIncFeedback<SYS: TargetSystem> {
name: Cow<'static, str>,
longest_time: u64,
last_is_longest: bool,
phantom: PhantomData<SYS>,
}
impl<S, SYS: TargetSystem> StateInitializer<S> for ExecTimeIncFeedback<SYS> {}
impl<EM, I, OT, S, SYS: TargetSystem> Feedback<EM, I, OT, S> for ExecTimeIncFeedback<SYS>
where
S: HasCorpus<I> + MaybeHasClientPerfMonitor,
EM: EventFirer<S, I>,
OT: ObserversTuple<I, S>,
{
fn is_interesting(
&mut self,
_state: &mut S,
_manager: &mut EM,
_input: &I,
observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error> {
let observer = observers
.match_name::<QemuClockObserver<SYS>>("clocktime")
.expect("QemuClockObserver not found");
if observer.last_runtime() > self.longest_time {
self.longest_time = observer.last_runtime();
self.last_is_longest = true;
Ok(true)
} else {
self.last_is_longest = false;
Ok(false)
}
}
fn append_metadata(
&mut self,
_state: &mut S,
_manager: &mut EM,
_observers: &OT,
testcase: &mut Testcase<I>,
) -> Result<(), Error> {
#[cfg(feature = "feed_afl")]
if self.last_is_longest {
let mim: Option<&mut MapIndexesMetadata> = testcase.metadata_map_mut().get_mut();
// pretend that the longest input alone excercises some non-existing edge, to keep it relevant
mim.unwrap().list.push(usize::MAX);
}
Ok(())
}
}
impl<SYS: TargetSystem> Named for ExecTimeIncFeedback<SYS> {
#[inline]
fn name(&self) -> &Cow<'static, str> {
&self.name
}
}
impl<SYS: TargetSystem> ExecTimeIncFeedback<SYS> {
/// Creates a new [`ExecTimeReachedFeedback`]
#[must_use]
pub fn new() -> Self {
Self {
name: Cow::from("ExecTimeReachedFeedback".to_string()),
longest_time: 0,
last_is_longest: false,
phantom: PhantomData,
}
}
}
/// A Noop Feedback which records a list of all execution times
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct AlwaysTrueFeedback {
name: Cow<'static, str>,
}
impl<S> StateInitializer<S> for AlwaysTrueFeedback {}
impl<EM, I, OT, S> Feedback<EM, I, OT, S> for AlwaysTrueFeedback
where
S: HasCorpus<I> + MaybeHasClientPerfMonitor,
EM: EventFirer<S, I>,
OT: ObserversTuple<I, S>,
{
fn is_interesting(
&mut self,
_state: &mut S,
_manager: &mut EM,
_input: &I,
_observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error> {
Ok(true)
}
}
impl Named for AlwaysTrueFeedback {
#[inline]
fn name(&self) -> &Cow<'static, str> {
&self.name
}
}
impl AlwaysTrueFeedback {
/// Creates a new [`ExecTimeCollectorFeedback`]
#[must_use]
pub fn new() -> Self {
Self {
name: Cow::from("AlwaysTrueFeedback".to_string()),
}
}
}
//=========================== Probability Mass Scheduler
// Probability Mass Scheduler
pub type TimeProbMassScheduler<S, I> = ProbabilitySamplingScheduler<TimeProbFactor<S, I>>;
#[derive(Debug, Clone)]
pub struct TimeProbFactor<S, I> {
phantom: PhantomData<(S, I)>,
}
impl<I, S> TestcaseScore<I, S> for TimeProbFactor<S, I>
where
S: HasCorpus<I>,
{
fn compute(
_state: &S,
entry: &mut Testcase<I>,
) -> Result<f64, Error> {
let et = entry
.exec_time()
.expect("testcase.exec_time is needed for scheduler");
let tns: i64 = et.as_nanos().try_into().expect("failed to convert time");
Ok(((tns as f64) / 1000.0).powf(2.0)) //microseconds
}
}
/// Monitor that prints with a limited rate.
#[derive(Debug, Clone)]
pub struct RateLimitedMonitor {
inner: SimplePrintingMonitor,
last: Instant,
}
impl Monitor for RateLimitedMonitor {
#[inline]
fn display(&mut self, client_stats_manager: &mut libafl::prelude::stats::ClientStatsManager, event_msg: &str, sender_id: ClientId) -> Result<(), libafl_bolts::Error> {
let now = Instant::now();
const RATE: Duration = Duration::from_secs(5);
if (event_msg != "Testcase" && event_msg != "UserStats")
|| now.duration_since(self.last) > RATE
{
self.inner.display(client_stats_manager, event_msg, sender_id)?;
self.last = now;
}
Ok(())
}
}
impl RateLimitedMonitor {
/// Create new [`NopMonitor`]
#[must_use]
pub fn new() -> Self {
Self {
inner: SimplePrintingMonitor::new(),
last: Instant::now().sub(Duration::from_secs(7200)),
}
}
}
impl Default for RateLimitedMonitor {
fn default() -> Self {
Self::new()
}
}

5
fuzzers/FRET/tests/.gitignore vendored Normal file
View File

@ -0,0 +1,5 @@
dump
demo*
*.dot
*.time
*.case

4
fuzzers/FRET/tests/iterate.sh Executable file
View File

@ -0,0 +1,4 @@
#!/bin/sh
../../../../input_serde/target/debug/input_serde -i edit -c "$1" -f case > test.case
../target/debug/fret -k "$2" -c ../benchmark/target_symbols.csv -n ./dump/test -targ -s "$3" showmap -i ./test.case
../../../../state2gantt/driver.sh dump/test.trace.ron $4

View File

@ -0,0 +1,31 @@
#!/bin/sh
TEST_KERNEL=../benchmark/build/waters_seq_full.elf
TEST_SYMBOLS=../benchmark/target_symbols.csv
DEF_ARGS="-k $TEST_KERNEL -c $TEST_SYMBOLS -n ./dump/test"
# cargo build --no-default-features --features std,snapshot_restore,singlecore,feed_afl,observer_hitcounts
# Test basic fuzzing loop
# ../target/debug/fret $DEF_ARGS -tar fuzz -t 10 -s 123
# Test reprodcibility
rm -f ./dump/test.time
../target/debug/fret $DEF_ARGS -tr showmap -i ./waters.case.test
if [[ $(cut -d, -f1 ./dump/test.time) != $(cut -d, -f1 ./waters.time.test) ]]; then echo "Not reproducible!" && exit 1; else echo "Reproducible"; fi
# Test state dump
# cargo build --no-default-features --features std,snapshot_restore,singlecore,feed_afl,observer_hitcounts,systemstate
if [[ -n "$(diff -q demo.example.state.ron dump/demo.trace.ron)" ]]; then echo "State not reproducible!"; else echo "State Reproducible"; fi
# Test abb traces
# cargo build --no-default-features --features std,snapshot_restore,singlecore,feed_afl,observer_hitcounts,systemstate,trace_abbs
if [[ -n "$(diff -q demo.example.abb.ron dump/demo.trace.ron)" ]]; then echo "ABB not reproducible!"; else echo "ABB Reproducible"; fi
# ../target/debug/fret -k ../benchmark/build/minimal.elf -c ../benchmark/target_symbols.csv -n ./dump/minimal -tar fuzz -t 20 -s 123
# ../target/debug/fret -k ../benchmark/build/minimal.elf -c ../benchmark/target_symbols.csv -n ./dump/minimal_worst -tr showmap -i ./dump/minimal.case
# Test fuzzing using systemtraces
cargo build --no-default-features --features std,snapshot_restore,singlecore,config_stg
../target/debug/fret -k ../benchmark/build/waters_seq_full.elf -c ../benchmark/target_symbols.csv -n ./dump/waters -tar fuzz -t 10 -s 123

Binary file not shown.

32
fuzzers/FRET/tests/times.py Executable file
View File

@ -0,0 +1,32 @@
#!/usr/bin/env python3
import sys
if len(sys.argv) < 2:
print("Usage: time.py <number>")
sys.exit(1)
try:
number = float(sys.argv[1])
except ValueError:
print("The first argument must be a number.")
sys.exit(1)
QEMU_SHIFT=5
ISNS_PER_US=10**3 / (2**QEMU_SHIFT)
int_offset=53430
if len(sys.argv) == 2:
print("Time span")
print("ISNS -> µs", f"{number / ISNS_PER_US:.2f} us")
print("µs -> ISNS", f"{number * ISNS_PER_US:.2f}")
print("Interrupt offset")
print("ISNS -> µs", f"{((number + int_offset) / ISNS_PER_US):.2f} us")
print("µs -> ISNS", f"{((number * ISNS_PER_US)-int_offset):.2f}")
elif len(sys.argv) > 2:
for i in range(1, len(sys.argv)):
try:
number = float(sys.argv[i])
except ValueError:
print(f"The argument {i} must be a number.")
sys.exit(1)
print(f"{((number + int_offset) / (ISNS_PER_US*1000)):.2f}")

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,11 @@
{"bytes":Left([175,101,239,153,39,25,250,35,208,208,167,208,191,35,23,225,24,190,143,201,201,201,201,201,235,201,247,203,225,55,35,185,91,71,235,86,156,212,175,201,18,201,201,201,201,201,201,194,236,201,201,201,201,201,201,201,225,251,131,91,91,71,235,225,251,131,92,91,7,235,127,255,255,225,201,201,201,201,54,201,187,201,201,201,201,201,201,201,201,200,201,201,201,201,201,239,239,239,239,201,201,201,239,239,240,15,239,239,255,239,201,201,201,201,201,54,201,201,201,201,201,201,201,201,201,201,201,0,255,175,101,239,153])
,
"isr_0_times":Right([
329095
]),
"isr_1_times":Right([
349851,506101,667851,860632,
3506320, 5068820, 6676010, 8894760
])
}

View File

@ -0,0 +1,9 @@
{"bytes":Left([241,241,241,15,241,56,241,133,237,250,159,177,24,253,127,7,7,7,7,7,15,241,56,241,133,237,250,159,177,24,253,127,241,241,241,241,241,241,241,241,241,241,242,127,0,241,241,241,237,225,211,247,254,0,133,255,255,133,237,250,176,171,152,250,93,255,255,255,237,2,237,127,251,255,241,215,241,241,211,241,241,241,241,241,241,241,241,253,241,241,250,159,241,242,241,241,242,127,0,0,127,255,0,0,0,0,241,128,71,221,241,241,69,243,58,211,247,2,0,127,234,255,133,237,250,241,241,241,241,242,127,0,241,241,241,237,225,211,247,254,0,127,215,241,241,211,241,241,241,241,241,241,241,241,253,241,241,241,128,71,241,241,241,15,241,56,241,133,237,250,159,177,24,253,127,133,237,241,241,241,253,241,241,241,24,253,128,241,241,2,2,2,2,2,2,2,0,0,0,241,128,71,221,241,241,69,243,79,146,0,0,241,128,71,221,241,241,69,12,80,146,237,127,255,255,241]),
"isr_0_times":Right([128,65535,522875,556908,603648,605758,
654632,675391,686821,686822,708351,724562,724562,735120,
766098,766098,766098,
829189,860723,878296,892400,
1080757,1112007,1143257,1174507,1205757,1237007,1268257,1299507,1330757,1362007,1393257
])}

View File

@ -260,6 +260,34 @@ pub trait Fuzzer<E, EM, I, S, ST> {
manager: &mut EM,
iters: u64,
) -> Result<CorpusId, Error>;
/// Fuzz for n iterations.
/// Returns the index of the last fuzzed corpus item.
/// (Note: An iteration represents a complete run of every stage.
/// therefore the number n is not always equal to the number of the actual harness executions,
/// because each stage could run the harness for multiple times)
///
/// If you use this fn in a restarting scenario to only run for `n` iterations,
/// before exiting, make sure you call `event_mgr.on_restart(&mut state)?;`.
/// This way, the state will be available in the next, respawned, iteration.
fn fuzz_loop_for_duration(
&mut self,
stages: &mut ST,
executor: &mut E,
state: &mut S,
manager: &mut EM,
time: Duration
) -> Result<CorpusId, Error>;
/// Fuzz until a certain time has passed
fn fuzz_loop_until(
&mut self,
stages: &mut ST,
executor: &mut E,
state: &mut S,
manager: &mut EM,
time: std::time::Instant
) -> Result<CorpusId, Error>;
}
/// The corpus this input should be added to
@ -1011,6 +1039,84 @@ where
Ok(ret.unwrap())
}
/// Fuzz for n iterations.
/// Returns the index of the last fuzzed corpus item.
/// (Note: An iteration represents a complete run of every stage.
/// therefore the number n is not always equal to the number of the actual harness executions,
/// because each stage could run the harness for multiple times)
///
/// If you use this fn in a restarting scenario to only run for `n` iterations,
/// before exiting, make sure you call `event_mgr.on_restart(&mut state)?;`.
/// This way, the state will be available in the next, respawned, iteration.
fn fuzz_loop_for_duration(
&mut self,
stages: &mut ST,
executor: &mut E,
state: &mut S,
manager: &mut EM,
time: Duration
) -> Result<CorpusId, Error> {
if time==Duration::ZERO {
return Err(Error::illegal_argument(
"Cannot fuzz for 0 duration!".to_string(),
));
}
let mut ret = None;
let monitor_timeout = STATS_TIMEOUT_DEFAULT;
let starttime = std::time::Instant::now();
while std::time::Instant::now().duration_since(starttime) < time {
ret = Some(self.fuzz_one(stages, executor, state, manager)?);
manager.maybe_report_progress(state, monitor_timeout)?;
}
// If we would assume the fuzzer loop will always exit after this, we could do this here:
// manager.on_restart(state)?;
// But as the state may grow to a few megabytes,
// for now we won' and the user has to do it (unless we find a way to do this on `Drop`).
Ok(ret.unwrap())
}
/// Fuzz for n iterations.
/// Returns the index of the last fuzzed corpus item.
/// (Note: An iteration represents a complete run of every stage.
/// therefore the number n is not always equal to the number of the actual harness executions,
/// because each stage could run the harness for multiple times)
///
/// If you use this fn in a restarting scenario to only run for `n` iterations,
/// before exiting, make sure you call `event_mgr.on_restart(&mut state)?;`.
/// This way, the state will be available in the next, respawned, iteration.
fn fuzz_loop_until(
&mut self,
stages: &mut ST,
executor: &mut E,
state: &mut S,
manager: &mut EM,
time: std::time::Instant
) -> Result<CorpusId, Error> {
let mut ret = None;
let monitor_timeout = STATS_TIMEOUT_DEFAULT;
while std::time::Instant::now() < time {
ret = Some(self.fuzz_one(stages, executor, state, manager)?);
manager.maybe_report_progress(state, monitor_timeout)?;
}
// If we would assume the fuzzer loop will always exit after this, we could do this here:
// manager.on_restart(state)?;
// But as the state may grow to a few megabytes,
// for now we won' and the user has to do it (unless we find a way to do this on `Drop`).
if ret.is_none() {
eprintln!("Warning: fuzzing loop ended with no last element");
ret = Some(CorpusId(0));
}
Ok(ret.unwrap())
}
}
/// The builder for std fuzzer
@ -1217,6 +1323,28 @@ where
) -> Result<CorpusId, Error> {
unimplemented!("NopFuzzer cannot fuzz");
}
fn fuzz_loop_for_duration(
&mut self,
_stages: &mut ST,
_executor: &mut E,
_state: &mut S,
_manager: &mut EM,
_time: Duration
) -> Result<CorpusId, Error> {
unimplemented!("NopFuzzer cannot fuzz");
}
fn fuzz_loop_until(
&mut self,
_stages: &mut ST,
_executor: &mut E,
_state: &mut S,
_manager: &mut EM,
_time: std::time::Instant
) -> Result<CorpusId, Error> {
unimplemented!("NopFuzzer cannot fuzz");
}
}
#[cfg(all(test, feature = "std"))]

View File

@ -59,6 +59,15 @@ impl TopRatedsMetadata {
pub fn map(&self) -> &HashMap<usize, CorpusId> {
&self.map
}
/// Retruns the number of inices that are considered interesting
pub fn get_number(&self) -> usize {
let mut tmp = HashSet::new();
for i in self.map.values() {
tmp.insert(*i);
}
tmp.len()
}
}
impl Default for TopRatedsMetadata {
@ -74,7 +83,8 @@ impl Default for TopRatedsMetadata {
#[derive(Debug, Clone)]
pub struct MinimizerScheduler<CS, F, I, M, S> {
base: CS,
skip_non_favored_prob: f64,
/// Probability to skip non-favored [`Testcase`]`s`
pub skip_non_favored_prob: f64,
remove_metadata: bool,
phantom: PhantomData<(F, I, M, S)>,
}
@ -284,8 +294,8 @@ where
old_meta.refcnt() <= 0
};
if must_remove && self.remove_metadata {
drop(old.metadata_map_mut().remove::<M>());
if must_remove {
// drop(old.metadata_map_mut().remove::<M>());
}
}
@ -315,6 +325,7 @@ where
.map
.insert(elem, id);
}
// println!("Number of interesting corpus elements: {}", state.metadata_map_mut().get::<TopRatedsMetadata>().unwrap().get_number());
Ok(())
}

View File

@ -73,6 +73,13 @@ where
}
}
/// Number of iterations performed by the mutational stage
pub static mut MUTATION_STAGE_ITER: usize = 0;
/// Number of retries performed by the mutational stage
pub static mut MUTATION_STAGE_RETRY: usize = 0;
/// Number of successful mutations performed by the mutational stage
pub static mut MUTATION_STAGE_SUCCESS: usize = 0;
/// A Mutational stage is the stage in a fuzzing run that mutates inputs.
/// Mutational stages will usually have a range of mutations that are
/// being applied to the input one by one, between executions.

View File

@ -81,6 +81,7 @@ const WRAPPER_HEADER: &str = r#"
#include "libafl/exit.h"
#include "libafl/jit.h"
#include "libafl/utils.h"
#include "libafl/interrupt_injection.h"
#include "libafl/hook.h"
@ -90,6 +91,7 @@ const WRAPPER_HEADER: &str = r#"
#include "libafl/hooks/tcg/edge.h"
#include "libafl/hooks/tcg/instruction.h"
#include "libafl/hooks/tcg/read_write.h"
#include "libafl/hooks/tcg/jmp.h"
#include "libafl/hooks/cpu_run.h"
#include "libafl/hooks/thread.h"
@ -173,6 +175,7 @@ pub fn generate(
.allowlist_function("vm_start")
.allowlist_function("qemu_main_loop")
.allowlist_function("qemu_cleanup")
.allowlist_function("icount_get_raw")
.blocklist_function("main_loop_wait") // bindgen issue #1313
.blocklist_type("siginfo_t")
.raw_line("use libc::siginfo_t;")

View File

@ -22,10 +22,10 @@ mod usermode;
#[cfg(feature = "usermode")]
pub use usermode::*;
// #[cfg(feature = "systemmode")]
// mod systemmode;
// #[cfg(feature = "systemmode")]
// pub use systemmode::*;
#[cfg(feature = "systemmode")]
mod systemmode;
#[cfg(feature = "systemmode")]
pub use systemmode::*;
/// Safe linking with of extern "C" functions.
///

View File

@ -0,0 +1,6 @@
use paste::paste;
use crate::extern_c_checked;
extern_c_checked!(
pub fn icount_get_raw() -> u64;
);

View File

@ -16,6 +16,17 @@ impl CallingConvention {
pub const Default: CallingConvention = CallingConvention::Aapcs;
}
use crate::sys::CPUStatePtr;
use crate::{Qemu};
unsafe extern "C" {
fn libafl_qemu_read_user_sp_unchecked(cpu: CPUStatePtr) -> i32;
}
pub fn read_user_reg_unchecked(emu : &Qemu) -> i32
{
unsafe {libafl_qemu_read_user_sp_unchecked(emu.current_cpu().unwrap().cpu_ptr)}.into()
}
/// Registers for the ARM instruction set.
#[derive(IntoPrimitive, TryFromPrimitive, Debug, Clone, Copy, EnumIter)]
#[repr(i32)]

View File

@ -37,7 +37,10 @@ use crate::{
read_4_exec_hook_wrapper, read_gen_hook_wrapper, write_0_exec_hook_wrapper,
write_1_exec_hook_wrapper, write_2_exec_hook_wrapper, write_3_exec_hook_wrapper,
write_4_exec_hook_wrapper, write_gen_hook_wrapper,
},
}
};
use crate::{jmp_0_exec_hook_wrapper, jmp_gen_hook_wrapper,JmpExecHook, JmpGenHook, JmpHookId
};
/// Get a C-compatible function pointer from the input hook.
@ -117,6 +120,7 @@ struct EmulatorHookCollection<ET, I, S> {
read_hooks: Vec<Pin<Box<TcgHookState<5, ReadHookId>>>>,
write_hooks: Vec<Pin<Box<TcgHookState<5, WriteHookId>>>>,
cmp_hooks: Vec<Pin<Box<TcgHookState<4, CmpHookId>>>>,
jmp_hooks: Vec<Pin<Box<TcgHookState<1, JmpHookId>>>>,
cpu_run_hooks: Vec<Pin<Box<HookState<CpuRunHookId>>>>,
@ -144,6 +148,7 @@ impl<ET, I, S> Default for EmulatorHookCollection<ET, I, S> {
read_hooks: Vec::default(),
write_hooks: Vec::default(),
cmp_hooks: Vec::default(),
jmp_hooks: Vec::new(),
cpu_run_hooks: Vec::default(),
@ -845,6 +850,56 @@ where
id
}
}
pub fn jmps(
&mut self,
generation_hook: JmpGenHook<ET, I, S>,
execution_hook: JmpExecHook<ET, I, S>,
) -> JmpHookId {
unsafe {
let genh = get_raw_hook!(
generation_hook,
jmp_gen_hook_wrapper::<ET, I, S>,
unsafe extern "C" fn(&mut TcgHookState<1, JmpHookId>, src: GuestAddr, dest: GuestAddr) -> u64
);
let exec = get_raw_hook!(
execution_hook,
jmp_0_exec_hook_wrapper::<ET, I, S>,
unsafe extern "C" fn(&mut TcgHookState<1, JmpHookId>, src: GuestAddr, dest: GuestAddr, id: u64)
);
self.hook_collection.jmp_hooks.push(Box::pin(TcgHookState::new(
JmpHookId::invalid(),
hook_to_repr!(generation_hook),
HookRepr::Empty,
[
hook_to_repr!(execution_hook),
],
)));
let hook_state = &mut *ptr::from_mut::<TcgHookState<1, JmpHookId>>(
self
.hook_collection.jmp_hooks
.last_mut()
.unwrap()
.as_mut()
.get_unchecked_mut());
let id = self
.qemu_hooks
.add_jmp_hooks(&mut *hook_state,
genh,
exec
);
self.hook_collection.jmp_hooks
.last_mut()
.unwrap()
.as_mut()
.get_unchecked_mut()
.set_id(id);
id
}
}
}
#[cfg(feature = "usermode")]
@ -1172,6 +1227,14 @@ where
) -> NewThreadHookId {
self.hooks.thread_creation_closure(hook)
}
pub fn jmps(
&mut self,
generation_hook: JmpGenHook<ET, I, S>,
execution_hook: JmpExecHook<ET, I, S>,
) -> JmpHookId {
self.hooks.jmps(generation_hook, execution_hook)
}
}
impl<ET, I, S> EmulatorModules<ET, I, S>

View File

@ -927,6 +927,38 @@ create_exec_wrapper!(cmp, (id: u64, v0: u16, v1: u16), 1, 4, CmpHookId);
create_exec_wrapper!(cmp, (id: u64, v0: u32, v1: u32), 2, 4, CmpHookId);
create_exec_wrapper!(cmp, (id: u64, v0: u64, v1: u64), 3, 4, CmpHookId);
// Jmp hook wrappers
create_hook_types!(
JmpGen,
fn(
Qemu,
&mut EmulatorModules<ET, I, S>,
Option<&mut S>,
src: GuestAddr,
dest: GuestAddr
) -> Option<u64>,
Box<
dyn for<'a> FnMut(
Qemu,
&'a mut EmulatorModules<ET, I, S>,
Option<&'a mut S>,
GuestAddr,
GuestAddr,
) -> Option<u64>,
>,
extern "C" fn(libafl_qemu_opaque: *const (), src: GuestAddr, dest: GuestAddr) -> u64
);
create_hook_types!(
JmpExec,
fn(Qemu,&mut EmulatorModules<ET, I, S>, Option<&mut S>, src: GuestAddr, dest: GuestAddr, id: u64),
Box<dyn for<'a> FnMut(Qemu,&'a mut EmulatorModules<ET, I, S>, Option<&'a mut S>, GuestAddr, GuestAddr, u64)>,
unsafe extern "C" fn(*const (), src: GuestAddr, dest: GuestAddr, id: u64)
);
create_hook_id!(Jmp, libafl_qemu_remove_jmp_hook, true);
create_gen_wrapper!(jmp, (src: GuestAddr, dest: GuestAddr), u64, 1, JmpHookId);
create_exec_wrapper!(jmp, (src: GuestAddr, dst: GuestAddr, id: u64), 0, 1, JmpHookId);
// static mut JMP_HOOKS: Vec<Pin<Box<HookState<1, JmpHookId>>>> = vec![];
// Crash hook wrappers
#[cfg(feature = "usermode")]
pub type CrashHookFn<ET, I, S> = fn(Qemu, &mut EmulatorModules<ET, I, S>, i32);
@ -1254,6 +1286,22 @@ impl QemuHooks {
NewThreadHookId(num)
}
}
pub fn add_jmp_hooks<T: Into<HookData>>(
&self,
data: T,
genh: Option<unsafe extern "C" fn(T, GuestAddr, GuestAddr) -> u64>,
exec: Option<unsafe extern "C" fn(T, GuestAddr, GuestAddr, u64)>,
) -> JmpHookId {
unsafe {
let data: u64 = data.into().0;
let genh: Option<unsafe extern "C" fn(u64, GuestAddr, GuestAddr) -> u64> =
core::mem::transmute(genh);
let exec: Option<unsafe extern "C" fn(u64, GuestAddr, GuestAddr, u64)> = core::mem::transmute(exec);
let num = libafl_qemu_sys::libafl_add_jmp_hook(genh, exec, data);
JmpHookId(num)
}
}
}
#[cfg(feature = "usermode")]

View File

@ -154,7 +154,7 @@ pub struct MemAccessInfo {
#[derive(Debug, Clone, Copy)]
#[repr(transparent)]
pub struct CPU {
cpu_ptr: CPUStatePtr,
pub cpu_ptr: CPUStatePtr,
}
#[derive(Debug, Clone, PartialEq)]

View File

@ -20,6 +20,8 @@ use crate::{
QemuSnapshotCheckResult,
};
use crate::sys::libafl_start_int_timer;
pub(super) extern "C" fn qemu_cleanup_atexit() {
unsafe {
qemu_cleanup(EXIT_SUCCESS);
@ -221,6 +223,7 @@ impl Qemu {
#[expect(clippy::trivially_copy_pass_by_ref)]
pub(super) unsafe fn run_inner(&self) {
unsafe {
libafl_start_int_timer(); // prepare interrupt timers
vm_start();
qemu_main_loop();
}