remove obsolete scripts

This commit is contained in:
Alwin Berger 2025-08-18 08:58:34 +00:00
parent aaffb3606c
commit a7e00004b2
7 changed files with 1 additions and 361 deletions

View File

@ -1,26 +0,0 @@
# Qemu systemmode with launcher
This folder contains an example fuzzer for the qemu systemmode, using LLMP for fast multi-process fuzzing and crash detection.
## Build
To build this example, run
```bash
cargo build --release
cd example; sh build.sh; cd ..
```
This will build the the fuzzer (src/fuzzer.rs) and a small example binary based on FreeRTOS, which can run under a qemu emulation target.
## Run
Since the instrumentation is based on snapshtos QEMU needs a virtual drive (even if it is unused...).
Create on and then run the fuzzer:
```bash
# create an image
qemu-img create -f qcow2 dummy.qcow2 32M
# run the fuzzer
KERNEL=./example/example.elf target/release/qemu_systemmode -icount shift=auto,align=off,sleep=off -machine mps2-an385 -monitor null -kernel ./example/example.elf -serial null -nographic -snapshot -drive if=none,format=qcow2,file=dummy.qcow2 -S
```
Currently the ``KERNEL`` variable is needed because the fuzzer does not parse QEMUs arguments to find the binary.

View File

@ -14,3 +14,4 @@ bins
eval*
test_*
bench_*
results_*

View File

@ -1,8 +0,0 @@
#!/usr/bin/env bash
find $1 -type 'f' -iname "${2}#*.log" | while IFS="" read -r p || [ -n "$p" ]
do
LINE=$(tail -n 100 $p | grep -io "run time: .* corpus: [0-9]*" | tail -n 1)
echo $p: $LINE
LINE=$(grep -i "interesting corpus elements" $p | tail -n 1)
echo $p: $LINE
done

View File

@ -1 +0,0 @@
*.sqlite

View File

@ -1,11 +0,0 @@
[package]
name = "number_cruncher"
version = "0.1.0"
edition = "2021"
[dependencies]
clap = { version = "4.5.28", features = ["derive"] }
itertools = "0.14.0"
rayon = "1.10.0"
regex = "1.11.1"
rusqlite = "0.33.0"

View File

@ -1,263 +0,0 @@
use clap::parser::ValueSource;
use clap::Parser;
use itertools::Group;
use itertools::Itertools;
use rayon::iter::ParallelBridge;
use rayon::prelude::*;
use std::fs;
use std::fs::File;
use std::io::Write;
use std::io::{self, BufRead, BufReader};
use std::path::Path;
use std::path::PathBuf;
use rusqlite::{params, Connection, Result};
#[derive(Parser)]
struct Config {
/// Input
#[arg(short, long, value_name = "DIR")]
input: PathBuf,
/// Output
#[arg(short, long, value_name = "FILE", default_value = "out.sqlite")]
output: PathBuf,
}
fn visit_dirs(
dir: &Path,
results: &mut Vec<(PathBuf, String, String, String)>,
) -> std::io::Result<()> {
if dir.is_dir() {
for entry in fs::read_dir(dir)? {
let entry = entry?;
let path = entry.path();
if path.is_dir() {
visit_dirs(&path, results)?;
} else if path.extension().and_then(|s| s.to_str()) == Some("time") {
if let Some(file_name) = path.file_name().and_then(|s| s.to_str()) {
let re = regex::Regex::new(r".*#[0-9]+\.time$").unwrap();
if re.is_match(file_name) {
if let Some(dir_name) = path
.parent()
.and_then(|p| p.file_name())
.and_then(|s| s.to_str())
{
{
let mut file_stem =
path.file_stem().unwrap().to_str().unwrap().split("#");
let case_name = file_stem.next().unwrap();
let case_number = file_stem.next().unwrap();
results.push((
path.clone(),
dir_name.to_string(),
case_name.to_string(),
case_number.to_string(),
));
}
}
}
}
}
}
}
Ok(())
}
fn maxpoints_of_file(file_path: &Path) -> io::Result<Vec<(usize, usize)>> {
let file = File::open(file_path)?;
let reader = BufReader::new(file);
let mut results = Vec::new();
let mut watermark = 0;
let mut last_timestamp = 0;
for line in reader.lines() {
let line = line?;
let mut parts = line.split(',');
if let (Some(first_str), Some(second_str)) = (parts.next(), parts.next()) {
let first: usize = first_str.trim().parse().unwrap();
let second: usize = second_str.trim().parse().unwrap();
if first > watermark {
results.push((first, second));
watermark = first;
}
last_timestamp = second;
}
}
if results.len() > 1 {
results[0].1 = 0;
results.push((results[results.len() - 1].0, last_timestamp));
}
Ok(results)
}
fn sample_maxpoints(points: &Vec<(usize, usize)>, samples: &Vec<usize>) -> Vec<(usize, usize)> {
let mut todo = samples.iter().peekable();
let mut ret = Vec::new();
for i in 0..points.len() {
if todo.peek().is_none() {
// Done
break;
}
while let Some(&&peek) = todo.peek() {
if peek >= points[i].1 && (i+1 >= points.len() || peek < points[i+1].1) {
// End or inside the interval
ret.push((points[i].0, peek));
todo.next();
} else if peek < points[i].1 {
if i == 0 {
// Before the first interval, just take the first
ret.push((points[i].0, peek));
todo.next();
} else {
// Already passed
eprintln!("WARNING Skipped: {}", todo.next().unwrap());
}
} else {
// Not yet
break;
}
}
}
ret
}
// https://rust-lang-nursery.github.io/rust-cookbook/science/mathematics/statistics.html
fn mean(data: &[usize]) -> Option<f64> {
let sum = data.iter().sum::<usize>() as f64;
let count = data.len();
match count {
positive if positive > 0 => Some(sum / count as f64),
_ => None,
}
}
fn median(data: &[usize]) -> Option<f64> {
let mut data = data.to_vec();
data.sort();
let size = data.len();
if size == 0 {
return None;
}
match size {
even if even % 2 == 0 => {
let fst_med = data[(even / 2) - 1];
let snd_med = data[even / 2];
fst_med.checked_add(snd_med).map(|x| x as f64 / 2.0)
},
odd => data.get(odd / 2).map(|x| *x as f64)
}
}
// https://rust-lang-nursery.github.io/rust-cookbook/science/mathematics/statistics.html
fn std_deviation(data: &[usize]) -> Option<f64> {
match (mean(data), data.len()) {
(Some(data_mean), count) if count > 0 => {
let variance = data
.iter()
.map(|value| {
let diff = data_mean - (*value as f64);
diff * diff
})
.sum::<f64>()
/ count as f64;
Some(variance.sqrt())
}
_ => None,
}
}
fn main() {
let conf = Config::parse();
let mut results = Vec::new();
if let Err(e) = visit_dirs(&conf.input, &mut results) {
eprintln!("Error reading directories: {}", e);
}
println!("Files: {:?}", results);
let mut connection = Connection::open(conf.output).unwrap();
connection.execute("DROP TABLE IF EXISTS combos", ()).unwrap();
connection.execute("CREATE TABLE IF NOT EXISTS combos (casename TEXT, toolname TEXT, fullname TEXT PRIMARY KEY)", ()).unwrap();
let mut points: Vec<_> = results
.par_iter()
.map(|(path, fuzzer, case, n)| {
(
case,
fuzzer,
n.parse::<usize>().unwrap(),
maxpoints_of_file(path).unwrap(),
)
})
.collect();
points.sort_by_key(|x| x.0); // by case for grouping
for (case, casegroup) in &points.into_iter().chunk_by(|x| x.0) {
let casegroup = casegroup.collect::<Vec<_>>();
println!("Processing case {}: {}", case, casegroup.len());
let mut timestamps = Vec::new();
for (_, _, _, points) in &casegroup {
timestamps.extend(points.iter().map(|(_, t)| *t));
}
timestamps.sort();
timestamps.dedup();
let mut maxpoints_per_tool = casegroup
.par_iter()
.map(|g| (g.0, g.1, g.2, sample_maxpoints(&g.3, &timestamps)))
.collect::<Vec<_>>();
maxpoints_per_tool.sort_by_key(|x| x.1); // by tool
for (tool, toolgroup) in &maxpoints_per_tool.into_iter().chunk_by(|x| x.1) {
let toolgroup = toolgroup.collect::<Vec<_>>();
println!("Processing tool {}: {}", tool, toolgroup.len());
let lowest_common_length = toolgroup
.iter()
.map(|(_, _, _, points)| points.len())
.min()
.unwrap();
let time_min_max_med_mean_sdiv : Vec<(usize,usize,usize,f64,f64,f64)> = (0..lowest_common_length)
.into_par_iter()
.map(|i| {
let slice = toolgroup.iter().map(|(_, _, _, p)| p[i].0).collect::<Vec<_>>();
assert_eq!(slice.len(), toolgroup.len());
(
toolgroup[0].3[i].1,
*slice.iter().min().unwrap_or(&0),
*slice.iter().max().unwrap_or(&0),
median(&slice).unwrap_or(0.0),
mean(&slice).unwrap_or(0.0),
std_deviation(&slice).unwrap_or(0.0),
)
})
.collect::<Vec<_>>();
// Save to db
connection.execute("INSERT INTO combos (casename, toolname, fullname) VALUES (?, ?, ?)", (case, tool, format!("{}${}",case, tool))).unwrap();
connection.execute(&format!("DROP TABLE IF EXISTS {}${}", case, tool), ()).unwrap();
connection.execute(&format!("CREATE TABLE IF NOT EXISTS {}${} (timestamp INTEGER PRIMARY KEY, min INTEGER, max INTEGER, median REAL, mean REAL, sdiv REAL)", case, tool), ()).unwrap();
// Start a transaction
let transaction = connection.transaction().unwrap();
let mut stmt = transaction.prepare(&format!(
"INSERT INTO {}${} (timestamp , min , max , median , mean , sdiv ) VALUES (?, ?, ?, ?, ?, ?)",
case, tool
)).unwrap();
for (timestamp, min, max, median, mean, sdiv) in time_min_max_med_mean_sdiv {
stmt.execute([(timestamp as i64).to_string(), (min as i64).to_string(), (max as i64).to_string(), median.to_string(), mean.to_string(), sdiv.to_string()]).unwrap();
}
drop(stmt);
// Commit the transaction
transaction.commit().unwrap();
}
}
}

View File

@ -1,52 +0,0 @@
#!/usr/bin/env bash
# A generic counting semaphore in bash
# Parameter is the lockfile and operation
# Setup:
# rm /tmp/test
# echo $num > /tmp/test
set -e
if [[ $2 = "reset" ]]; then
if [[ ! "$3" -gt "0" ]]; then echo "Parameter 3: Needs to be a number"; exit;fi
rm -f $1
[[ -d "$1_lockdir" ]] && rmdir $1_lockdir
echo $3 > $1
exit 0
fi
if [[ ! -f $1 ]]; then echo "Parameter 1: File Does not exist"; exit; fi
if [[ $2 != "lock" ]] && [[ $2 != "release" ]] && [[ $2 != "reset" ]]; then echo "Parameter 2: must be lock, release or reset"; exit; fi
if [[ $2 = "lock" ]]; then
SEM=''
while [[ -z $SEM ]]; do
if (( $(cat $1 ) == 0 )); then sleep 1; wait; continue; fi
if mkdir $1_lockdir > /dev/null 2>&1 ; then
VAL=$(cat $1)
if (( $VAL > 0 ))
then
SEM=$(sed -i "s@$VAL@$(( $VAL - 1))@w /dev/stdout" $1)
echo "Take $VAL -> $SEM"
else
sleep 1; wait
fi
else
sleep 0.5;
fi
done
rmdir $1_lockdir
else
SEM=''
while [[ -z $SEM ]]; do
if mkdir $1_lockdir > /dev/null 2>&1 ; then
VAL=$(cat $1)
SEM=$(sed -i "s@$VAL@$(( $VAL + 1))@w /dev/stdout" $1)
echo "Give $VAL -> $(( $VAL + 1 ))"
else
sleep 0.1;
fi
done
rmdir $1_lockdir
fi
#SEM=''; while [[ -z SEM ]]; do VAL=$(cat /tmp/test); if (( $VAL > 0 )); then SEM=$(sed -i "s@$VAL@$(( $VAL - 1))@w /dev/stdout" /tmp/test); else sleep 1; wait; fi; done