feedback for aggregated traces

This commit is contained in:
Alwin Berger 2024-05-06 16:00:11 +02:00
parent 0393f18a47
commit 88c5c8a19f
3 changed files with 45 additions and 10 deletions

View File

@ -5,7 +5,7 @@ authors = ["Andrea Fioraldi <andreafioraldi@gmail.com>", "Dominik Maier <domenuk
edition = "2021"
[features]
default = ["std", "snapshot_restore", "singlecore", "restarting", "feed_systemtrace", "fuzz_int" ]
default = ["std", "snapshot_restore", "singlecore", "restarting", "feed_systemtrace", "fuzz_int", "no_hash_state" ]
std = []
snapshot_restore = []
snapshot_fast = [ "snapshot_restore" ]
@ -15,6 +15,7 @@ trace_abbs = []
systemstate = []
feed_systemgraph = [ "systemstate" ]
feed_systemtrace = [ "systemstate" ]
feed_stg = [ "systemstate", "trace_abbs" ]
feed_longest = [ ]
feed_afl = [ ]
feed_genetic = [ ]
@ -24,6 +25,7 @@ gensize_10 = [ ]
gensize_100 = [ ]
observer_hitcounts = []
no_hash_state = []
count_iterations = []
run_until_saturation = []
[profile.release]
@ -37,7 +39,7 @@ libafl_bolts = { path = "../../libafl_bolts/" }
libafl_qemu = { path = "../../libafl_qemu/", features = ["arm", "systemmode"] }
serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib
hashbrown = { version = "0.14.0", features = ["serde"] } # A faster hashmap, nostd compatible
petgraph = { version="0.6.0", features = ["serde-1"] }
petgraph = { version="0.6.4", features = ["serde-1"] }
ron = "0.7" # write serialized data - including hashmaps
rand = "0.5"
clap = { version = "4.4.11", features = ["derive"] }

View File

@ -1,8 +1,8 @@
import csv
import os
def_flags="--no-default-features --features std,snapshot_restore,singlecore,restarting,run_until_saturation"
def_flags="--no-default-features --features std,snapshot_restore,singlecore,restarting,no_hash_state"
remote="timedump_253048_1873f6_all/"
RUNTIME=30
RUNTIME=7600
TARGET_REPS_A=2
TARGET_REPS_B=2
NUM_NODES=2
@ -46,7 +46,13 @@ rule build_state:
output:
directory("bins/target_state")
shell:
"cargo build --target-dir {output} {def_flags},feed_systemtrace"
"cargo build --target-dir {output} {def_flags},feed_systemtrace,trace_abbs"
rule build_stg:
output:
directory("bins/target_stg")
shell:
"cargo build --target-dir {output} {def_flags},feed_stg"
rule build_nohashstate:
output:
@ -156,10 +162,11 @@ rule run_bench:
fuzz_len=line['input_size']
bkp=line['return_function']
script="""
export RUST_BACKTRACE=1
mkdir -p $(dirname {output[0]})
set +e
echo $(pwd)/{input[1]}/debug/fret -n $(pwd)/timedump/{wildcards.fuzzer}/{wildcards.target}#{wildcards.num} -t -a -r -k {input[0]} -c ./target_symbols.csv fuzz -t {RUNTIME} -s {wildcards.num}
$(pwd)/{input[1]}/debug/fret -n $(pwd)/timedump/{wildcards.fuzzer}/{wildcards.target}#{wildcards.num} -t -a -r -k {input[0]} -c ./target_symbols.csv fuzz -t {RUNTIME} -s {wildcards.num} > {output[1]} 2>&1
echo $(pwd)/{input[1]}/debug/fret -n $(pwd)/timedump/{wildcards.fuzzer}/{wildcards.target}#{wildcards.num} -t -a -r -g -k {input[0]} -c ./target_symbols.csv fuzz -t {RUNTIME} -s {wildcards.num}
$(pwd)/{input[1]}/debug/fret -n $(pwd)/timedump/{wildcards.fuzzer}/{wildcards.target}#{wildcards.num} -t -a -r -g -k {input[0]} -c ./target_symbols.csv fuzz -t {RUNTIME} -s {wildcards.num} > {output[1]} 2>&1
exit 0
"""
if wildcards.fuzzer.find('random') >= 0:
@ -259,6 +266,10 @@ rule clusterfuzz:
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['afl','frafl','feedlongest'], target=['waters','watersv2'],num=MY_RANGE_B),
expand("timedump/{fuzzer}/{target}.{num}", fuzzer=['afl_int','frafl_int','feedlongest_int'], target=['waters_int','watersv2_int'],num=MY_RANGE_B),
rule all_new:
input:
expand("timedump/{fuzzer}/{target}#{num}.time", fuzzer=['feedgeneration100', 'frafl', 'state', 'stg'], target=['waters', 'watersv2'],num=range(0,3))
rule all_bins:
input:
expand("bins/target_{target}{flag}",target=['random','afl','frafl','state','feedgeneration100'],flag=['','_int'])

View File

@ -86,10 +86,13 @@ impl PartialEq for STGNode {
#[derive(Debug, Serialize, Deserialize, SerdeAny, Clone)]
pub struct STGFeedbackState
{
// aggregated traces as a graph
pub graph: DiGraph<STGNode, ()>,
index: HashMap<u64, NodeIndex>,
entrypoint: NodeIndex,
exit: NodeIndex,
// Metadata about aggregated traces. aggegated meaning, order has been removed
worst_observed_per_aggegated_path: HashMap<Vec<NodeIndex>,u64>
}
impl Default for STGFeedbackState {
@ -112,7 +115,8 @@ impl Default for STGFeedbackState {
graph,
index,
entrypoint,
exit
exit,
worst_observed_per_aggegated_path: HashMap::new(),
}
}
}
@ -142,6 +146,7 @@ pub struct StgFeedback
}
const INTEREST_EDGE : bool = true;
const INTEREST_NODE : bool = true;
const INTEREST_AGGREGATE : bool = true;
fn set_observer_map(trace : &Vec<EdgeIndex>) {
unsafe {
for i in 0..MAX_STG_NUM {
@ -238,6 +243,8 @@ where
{
let observer = observers.match_name::<QemuSystemStateObserver>("systemstate")
.expect("QemuSystemStateObserver not found");
let clock_observer = observers.match_name::<QemuClockObserver>("clocktime")
.expect("QemuClockObserver not found");
let feedbackstate = match state
.named_metadata_map_mut()
.get_mut::<STGFeedbackState>("stgfeedbackstate") {
@ -251,14 +258,29 @@ where
let abbs = trace_to_state_abb(&observer.last_run);
// println!("{:?}",abbs);
let (trace, _, new_edge) = StgFeedback::update_stg(&observer.last_run, abbs, feedbackstate);
let (trace, _, mut interesting) = StgFeedback::update_stg(&observer.last_run, abbs, feedbackstate);
if INTEREST_AGGREGATE {
// aggegation by sorting, order of states is not relevant
let mut tmp = trace.clone();
tmp.sort();
if let Some(x) = feedbackstate.worst_observed_per_aggegated_path.get_mut(&tmp) {
let t = clock_observer.last_runtime();
if t > *x {
*x = t;
interesting |= true;
}
} else {
feedbackstate.worst_observed_per_aggegated_path.insert(tmp, clock_observer.last_runtime());
interesting |= true;
}
}
// let out = feedbackstate.graph.map(|i,x| x.pretty_print(), |_,_| "");
// let outs = Dot::with_config(&out, &[Config::EdgeNoLabel]).to_string();
// let outs = outs.replace(';',"\\n");
// fs::write("./mystg.dot",outs).expect("Failed to write graph");
Ok(false)
Ok(interesting)
}
/// Append to the testcase the generated metadata in case of a new corpus item