ForkserverExecutor (#111)
* add Forkserver, Pipe Outfile struct * add forkserver executor struct, and shmem init * close pipes in the destructor of Forkserver * fill pre_exec to write out the inputs * fix * read_st, write_ctl * more handshakes * wrap Pipe in Arc, fill post_exec * add Forkserver, Pipe Outfile struct * add forkserver executor struct, and shmem init * close pipes in the destructor of Forkserver * fill pre_exec to write out the inputs * fix * read_st, write_ctl * more handshakes * wrap Pipe in Arc, fill post_exec * fix for the lastest HasExecHooks trait * use Dominik's pipe, remove Arc and temporarily pass RawFd to setstdin but trying to figure out other solutions * add libafl_tests, put a very simple vulnerable program * fix * added forkserver_simple (mostly copy-pasted from babyfuzzer) * fix test * handle crash in post_exec * add README.md * check exec time to see why it's so slow * remove double invokation of is_interesting for the obejctive * make forkserver_simple AFL-like and improve speed * some debugging help * do not evaluate feedback if solution * speedup the things * working input placement via stdin in Forkserver * don't call panic! but return errors, rewrite some comments * use AFLplusplus/afl-cc instead of AFL * use .cur_input like AFL * bring the test for forkserver back * add better README.md message * failing the initial handshake should return an error * delete some commented-out code * format * format * ForkserverExecutor needs std and is unix-only for now * clippy * OutFile error handling * fmt * clippy * don't build libafl_tests on windows * fix * keep test in forkserver.rs simple * add forkserver_test feature for libafl_tests * format * some doc Co-authored-by: Andrea Fioraldi <andreafioraldi@gmail.com>
This commit is contained in:
parent
1089c93577
commit
d4410c072a
@ -11,12 +11,14 @@ members = [
|
||||
"libafl_cc",
|
||||
"libafl_targets",
|
||||
"libafl_frida",
|
||||
"libafl_tests",
|
||||
]
|
||||
default-members = [
|
||||
"libafl",
|
||||
"libafl_derive",
|
||||
"libafl_cc",
|
||||
"libafl_targets",
|
||||
"libafl_tests",
|
||||
]
|
||||
exclude = [
|
||||
"fuzzers",
|
||||
|
20
fuzzers/forkserver_simple/Cargo.toml
Normal file
20
fuzzers/forkserver_simple/Cargo.toml
Normal file
@ -0,0 +1,20 @@
|
||||
[package]
|
||||
name = "forkserver_simple"
|
||||
version = "0.1.0"
|
||||
authors = ["tokatoka <tokazerkje@outlook.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[profile.dev]
|
||||
panic = "abort"
|
||||
|
||||
[profile.release]
|
||||
panic = "abort"
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
opt-level = 3
|
||||
|
||||
[dependencies]
|
||||
libafl = { path = "../../libafl/" }
|
||||
libafl_tests = { path = "../../libafl_tests/", features = ["forkserver_test"] }
|
7
fuzzers/forkserver_simple/README.md
Normal file
7
fuzzers/forkserver_simple/README.md
Normal file
@ -0,0 +1,7 @@
|
||||
# Simple Forkserver Fuzzer
|
||||
|
||||
This is a simple fuzzer to test the ForkserverExecutor.
|
||||
You can test it with the following procedures.
|
||||
1. `cargo build --release`
|
||||
2. `cp ./target/release/forkserver_simple .`
|
||||
3. `taskset -c 1 ./forkserver_simple`
|
1
fuzzers/forkserver_simple/corpus/testfile
Normal file
1
fuzzers/forkserver_simple/corpus/testfile
Normal file
@ -0,0 +1 @@
|
||||
aaa
|
121
fuzzers/forkserver_simple/src/main.rs
Normal file
121
fuzzers/forkserver_simple/src/main.rs
Normal file
@ -0,0 +1,121 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use libafl::{
|
||||
bolts::{
|
||||
current_nanos,
|
||||
rands::StdRand,
|
||||
shmem::{ShMem, ShMemProvider, StdShMemProvider},
|
||||
tuples::tuple_list,
|
||||
},
|
||||
corpus::{
|
||||
Corpus, InMemoryCorpus, IndexesLenTimeMinimizerCorpusScheduler, OnDiskCorpus,
|
||||
QueueCorpusScheduler,
|
||||
},
|
||||
events::SimpleEventManager,
|
||||
executors::forkserver::ForkserverExecutor,
|
||||
feedback_and, feedback_or,
|
||||
feedbacks::{CrashFeedback, MapFeedbackState, MaxMapFeedback, TimeFeedback},
|
||||
fuzzer::{Fuzzer, StdFuzzer},
|
||||
inputs::BytesInput,
|
||||
mutators::scheduled::{havoc_mutations, StdScheduledMutator},
|
||||
observers::{ConstMapObserver, HitcountsMapObserver, TimeObserver},
|
||||
stages::mutational::StdMutationalStage,
|
||||
state::{HasCorpus, StdState},
|
||||
stats::SimpleStats,
|
||||
};
|
||||
|
||||
#[allow(clippy::similar_names)]
|
||||
pub fn main() {
|
||||
let corpus_dirs = vec![PathBuf::from("./corpus")];
|
||||
|
||||
const MAP_SIZE: usize = 65536;
|
||||
//Coverage map shared between observer and executor
|
||||
let mut shmem = StdShMemProvider::new().unwrap().new_map(MAP_SIZE).unwrap();
|
||||
//let the forkserver know the shmid
|
||||
shmem.write_to_env("__AFL_SHM_ID").unwrap();
|
||||
let mut shmem_map = shmem.map_mut();
|
||||
|
||||
// Create an observation channel using the signals map
|
||||
let edges_observer = HitcountsMapObserver::new(ConstMapObserver::<_, MAP_SIZE>::new(
|
||||
"shared_mem",
|
||||
&mut shmem_map,
|
||||
));
|
||||
|
||||
// Create an observation channel to keep track of the execution time
|
||||
let time_observer = TimeObserver::new("time");
|
||||
|
||||
// The state of the edges feedback.
|
||||
let feedback_state = MapFeedbackState::with_observer(&edges_observer);
|
||||
|
||||
// The state of the edges feedback for crashes.
|
||||
let objective_state = MapFeedbackState::new("crash_edges", MAP_SIZE);
|
||||
|
||||
// Feedback to rate the interestingness of an input
|
||||
// This one is composed by two Feedbacks in OR
|
||||
let feedback = feedback_or!(
|
||||
// New maximization map feedback linked to the edges observer and the feedback state
|
||||
MaxMapFeedback::new_tracking(&feedback_state, &edges_observer, true, false),
|
||||
// Time feedback, this one does not need a feedback state
|
||||
TimeFeedback::new_with_observer(&time_observer)
|
||||
);
|
||||
|
||||
// A feedback to choose if an input is a solution or not
|
||||
// We want to do the same crash deduplication that AFL does
|
||||
let objective = feedback_and!(
|
||||
// Must be a crash
|
||||
CrashFeedback::new(),
|
||||
// Take it onlt if trigger new coverage over crashes
|
||||
MaxMapFeedback::new(&objective_state, &edges_observer)
|
||||
);
|
||||
|
||||
// create a State from scratch
|
||||
let mut state = StdState::new(
|
||||
// RNG
|
||||
StdRand::with_seed(current_nanos()),
|
||||
// Corpus that will be evolved, we keep it in memory for performance
|
||||
InMemoryCorpus::<BytesInput>::new(),
|
||||
// Corpus in which we store solutions (crashes in this example),
|
||||
// on disk so the user can get them after stopping the fuzzer
|
||||
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
|
||||
// States of the feedbacks.
|
||||
// They are the data related to the feedbacks that you want to persist in the State.
|
||||
tuple_list!(feedback_state, objective_state),
|
||||
);
|
||||
|
||||
// The Stats trait define how the fuzzer stats are reported to the user
|
||||
let stats = SimpleStats::new(|s| println!("{}", s));
|
||||
|
||||
// The event manager handle the various events generated during the fuzzing loop
|
||||
// such as the notification of the addition of a new item to the corpus
|
||||
let mut mgr = SimpleEventManager::new(stats);
|
||||
|
||||
// A minimization+queue policy to get testcasess from the corpus
|
||||
let scheduler = IndexesLenTimeMinimizerCorpusScheduler::new(QueueCorpusScheduler::new());
|
||||
|
||||
// A fuzzer with feedbacks and a corpus scheduler
|
||||
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
|
||||
|
||||
// Create the executor for the forkserver
|
||||
let mut executor = ForkserverExecutor::new(
|
||||
"../../libafl_tests/src/forkserver_test.o".to_string(),
|
||||
vec![],
|
||||
tuple_list!(edges_observer, time_observer),
|
||||
)
|
||||
.expect("Failed to create the Executor");
|
||||
|
||||
// In case the corpus is empty (on first run), reset
|
||||
if state.corpus().count() < 1 {
|
||||
state
|
||||
.load_initial_inputs(&mut fuzzer, &mut executor, &mut mgr, &corpus_dirs)
|
||||
.unwrap_or_else(|_| panic!("Failed to load initial corpus at {:?}", &corpus_dirs));
|
||||
println!("We imported {} inputs from disk.", state.corpus().count());
|
||||
}
|
||||
|
||||
// Setup a mutational stage with a basic bytes mutator
|
||||
let mutator = StdScheduledMutator::new(havoc_mutations());
|
||||
let mut stages = tuple_list!(StdMutationalStage::new(mutator));
|
||||
|
||||
fuzzer
|
||||
.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)
|
||||
.expect("Error in the fuzzing loop");
|
||||
}
|
@ -41,6 +41,16 @@ impl Pipe {
|
||||
self.write_end = None;
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn read_end(&self) -> Option<RawFd> {
|
||||
self.read_end
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn write_end(&self) -> Option<RawFd> {
|
||||
self.write_end
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
|
454
libafl/src/executors/forkserver.rs
Normal file
454
libafl/src/executors/forkserver.rs
Normal file
@ -0,0 +1,454 @@
|
||||
//! Expose an `Executor` based on a `Forkserver` in order to execute AFL/AFL++ binaries
|
||||
|
||||
use core::marker::PhantomData;
|
||||
use std::{
|
||||
fs::{File, OpenOptions},
|
||||
io::{self, prelude::*, SeekFrom},
|
||||
os::unix::{
|
||||
io::{AsRawFd, RawFd},
|
||||
process::CommandExt,
|
||||
},
|
||||
process::{Command, Stdio},
|
||||
};
|
||||
|
||||
use crate::bolts::os::{dup2, pipes::Pipe};
|
||||
use crate::{
|
||||
executors::{
|
||||
Executor, ExitKind, HasExecHooks, HasExecHooksTuple, HasObservers, HasObserversHooks,
|
||||
},
|
||||
inputs::{HasTargetBytes, Input},
|
||||
observers::ObserversTuple,
|
||||
Error,
|
||||
};
|
||||
|
||||
const FORKSRV_FD: i32 = 198;
|
||||
|
||||
// Configure the target. setlimit, setsid, pipe_stdin, I borrowed the code from Angora fuzzer
|
||||
pub trait ConfigTarget {
|
||||
fn setsid(&mut self) -> &mut Self;
|
||||
fn setlimit(&mut self, memlimit: u64) -> &mut Self;
|
||||
fn setstdin(&mut self, fd: RawFd, use_stdin: bool) -> &mut Self;
|
||||
fn setpipe(
|
||||
&mut self,
|
||||
st_read: RawFd,
|
||||
st_write: RawFd,
|
||||
ctl_read: RawFd,
|
||||
ctl_write: RawFd,
|
||||
) -> &mut Self;
|
||||
}
|
||||
|
||||
impl ConfigTarget for Command {
|
||||
fn setsid(&mut self) -> &mut Self {
|
||||
let func = move || {
|
||||
unsafe {
|
||||
libc::setsid();
|
||||
};
|
||||
Ok(())
|
||||
};
|
||||
unsafe { self.pre_exec(func) }
|
||||
}
|
||||
|
||||
fn setpipe(
|
||||
&mut self,
|
||||
st_read: RawFd,
|
||||
st_write: RawFd,
|
||||
ctl_read: RawFd,
|
||||
ctl_write: RawFd,
|
||||
) -> &mut Self {
|
||||
let func = move || {
|
||||
match dup2(ctl_read, FORKSRV_FD) {
|
||||
Ok(_) => (),
|
||||
Err(_) => {
|
||||
return Err(io::Error::last_os_error());
|
||||
}
|
||||
}
|
||||
|
||||
match dup2(st_write, FORKSRV_FD + 1) {
|
||||
Ok(_) => (),
|
||||
Err(_) => {
|
||||
return Err(io::Error::last_os_error());
|
||||
}
|
||||
}
|
||||
unsafe {
|
||||
libc::close(st_read);
|
||||
libc::close(st_write);
|
||||
libc::close(ctl_read);
|
||||
libc::close(ctl_write);
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
unsafe { self.pre_exec(func) }
|
||||
}
|
||||
|
||||
fn setstdin(&mut self, fd: RawFd, use_stdin: bool) -> &mut Self {
|
||||
if use_stdin {
|
||||
let func = move || {
|
||||
match dup2(fd, libc::STDIN_FILENO) {
|
||||
Ok(_) => (),
|
||||
Err(_) => {
|
||||
return Err(io::Error::last_os_error());
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
unsafe { self.pre_exec(func) }
|
||||
} else {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
fn setlimit(&mut self, memlimit: u64) -> &mut Self {
|
||||
if memlimit == 0 {
|
||||
return self;
|
||||
}
|
||||
let func = move || {
|
||||
let memlimit: libc::rlim_t = memlimit << 20;
|
||||
let r = libc::rlimit {
|
||||
rlim_cur: memlimit,
|
||||
rlim_max: memlimit,
|
||||
};
|
||||
let r0 = libc::rlimit {
|
||||
rlim_cur: 0,
|
||||
rlim_max: 0,
|
||||
};
|
||||
|
||||
let mut ret = unsafe { libc::setrlimit(libc::RLIMIT_AS, &r) };
|
||||
if ret < 0 {
|
||||
return Err(io::Error::last_os_error());
|
||||
}
|
||||
ret = unsafe { libc::setrlimit(libc::RLIMIT_CORE, &r0) };
|
||||
if ret < 0 {
|
||||
return Err(io::Error::last_os_error());
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
unsafe { self.pre_exec(func) }
|
||||
}
|
||||
}
|
||||
|
||||
pub struct OutFile {
|
||||
file: File,
|
||||
}
|
||||
|
||||
impl OutFile {
|
||||
pub fn new(file_name: &str) -> Result<Self, Error> {
|
||||
let f = OpenOptions::new()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.create(true)
|
||||
.open(file_name)?;
|
||||
Ok(Self { file: f })
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn as_raw_fd(&self) -> RawFd {
|
||||
self.file.as_raw_fd()
|
||||
}
|
||||
|
||||
pub fn write_buf(&mut self, buf: &[u8]) {
|
||||
self.rewind();
|
||||
self.file.write_all(buf).unwrap();
|
||||
self.file.set_len(buf.len() as u64).unwrap();
|
||||
self.file.flush().unwrap();
|
||||
// Rewind again otherwise the target will not read stdin from the beginning
|
||||
self.rewind();
|
||||
}
|
||||
|
||||
pub fn rewind(&mut self) {
|
||||
self.file.seek(SeekFrom::Start(0)).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
/// The [`Forkserver`] is communication channel with a child process that forks on request of the fuzzer.
|
||||
/// The communication happens via pipe.
|
||||
pub struct Forkserver {
|
||||
st_pipe: Pipe,
|
||||
ctl_pipe: Pipe,
|
||||
child_pid: u32,
|
||||
status: i32,
|
||||
last_run_timed_out: i32,
|
||||
}
|
||||
|
||||
impl Forkserver {
|
||||
pub fn new(
|
||||
target: String,
|
||||
args: Vec<String>,
|
||||
out_filefd: RawFd,
|
||||
use_stdin: bool,
|
||||
memlimit: u64,
|
||||
) -> Result<Self, Error> {
|
||||
let mut st_pipe = Pipe::new().unwrap();
|
||||
let mut ctl_pipe = Pipe::new().unwrap();
|
||||
|
||||
match Command::new(target)
|
||||
.args(args)
|
||||
.stdin(Stdio::null())
|
||||
.stdout(Stdio::null())
|
||||
.stderr(Stdio::null())
|
||||
.env("LD_BIND_LAZY", "1")
|
||||
.setlimit(memlimit)
|
||||
.setsid()
|
||||
.setstdin(out_filefd, use_stdin)
|
||||
.setpipe(
|
||||
st_pipe.read_end().unwrap(),
|
||||
st_pipe.write_end().unwrap(),
|
||||
ctl_pipe.read_end().unwrap(),
|
||||
ctl_pipe.write_end().unwrap(),
|
||||
)
|
||||
.spawn()
|
||||
{
|
||||
Ok(_) => {}
|
||||
Err(_) => {
|
||||
return Err(Error::Forkserver(
|
||||
"Could not spawn a forkserver!".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
// Ctl_pipe.read_end and st_pipe.write_end are unnecessary for the parent, so we'll close them
|
||||
ctl_pipe.close_read_end();
|
||||
st_pipe.close_write_end();
|
||||
|
||||
Ok(Self {
|
||||
st_pipe,
|
||||
ctl_pipe,
|
||||
child_pid: 0,
|
||||
status: 0,
|
||||
last_run_timed_out: 0,
|
||||
})
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn last_run_timed_out(&self) -> i32 {
|
||||
self.last_run_timed_out
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn status(&self) -> i32 {
|
||||
self.status
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn child_pid(&self) -> u32 {
|
||||
self.child_pid
|
||||
}
|
||||
|
||||
pub fn read_st(&mut self) -> Result<(usize, i32), io::Error> {
|
||||
let mut buf: [u8; 4] = [0u8; 4];
|
||||
|
||||
let rlen = self.st_pipe.read(&mut buf)?;
|
||||
let val: i32 = i32::from_ne_bytes(buf);
|
||||
|
||||
Ok((rlen, val))
|
||||
}
|
||||
|
||||
pub fn write_ctl(&mut self, val: i32) -> Result<usize, io::Error> {
|
||||
let slen = self.ctl_pipe.write(&val.to_ne_bytes())?;
|
||||
|
||||
Ok(slen)
|
||||
}
|
||||
}
|
||||
|
||||
/// This [`Executor`] can run binaries compiled for AFL/AFL++ that make use of a forkserver.
|
||||
pub struct ForkserverExecutor<I, OT>
|
||||
where
|
||||
I: Input + HasTargetBytes,
|
||||
OT: ObserversTuple,
|
||||
{
|
||||
target: String,
|
||||
args: Vec<String>,
|
||||
out_file: OutFile,
|
||||
forkserver: Forkserver,
|
||||
observers: OT,
|
||||
phantom: PhantomData<I>,
|
||||
}
|
||||
|
||||
impl<I, OT> ForkserverExecutor<I, OT>
|
||||
where
|
||||
I: Input + HasTargetBytes,
|
||||
OT: ObserversTuple,
|
||||
{
|
||||
pub fn new(target: String, arguments: Vec<String>, observers: OT) -> Result<Self, Error> {
|
||||
let mut args = Vec::<String>::new();
|
||||
let mut use_stdin = true;
|
||||
let out_filename = ".cur_input".to_string();
|
||||
|
||||
for item in arguments {
|
||||
if item == "@@" && use_stdin {
|
||||
use_stdin = false;
|
||||
args.push(out_filename.clone());
|
||||
} else {
|
||||
args.push(item.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
let out_file = OutFile::new(&out_filename)?;
|
||||
|
||||
let mut forkserver = Forkserver::new(
|
||||
target.clone(),
|
||||
args.clone(),
|
||||
out_file.as_raw_fd(),
|
||||
use_stdin,
|
||||
0,
|
||||
)?;
|
||||
|
||||
let (rlen, _) = forkserver.read_st()?; // Initial handshake, read 4-bytes hello message from the forkserver.
|
||||
|
||||
match rlen {
|
||||
4 => {
|
||||
println!("All right - fork server is up.");
|
||||
}
|
||||
_ => {
|
||||
return Err(Error::Forkserver(
|
||||
"Failed to start a forkserver".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
target,
|
||||
args,
|
||||
out_file,
|
||||
forkserver,
|
||||
observers,
|
||||
phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn target(&self) -> &String {
|
||||
&self.target
|
||||
}
|
||||
|
||||
pub fn args(&self) -> &[String] {
|
||||
&self.args
|
||||
}
|
||||
|
||||
pub fn forkserver(&self) -> &Forkserver {
|
||||
&self.forkserver
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, OT> Executor<I> for ForkserverExecutor<I, OT>
|
||||
where
|
||||
I: Input + HasTargetBytes,
|
||||
OT: ObserversTuple,
|
||||
{
|
||||
#[inline]
|
||||
fn run_target(&mut self, input: &I) -> Result<ExitKind, Error> {
|
||||
let mut exit_kind = ExitKind::Ok;
|
||||
|
||||
// Write to testcase
|
||||
self.out_file.write_buf(&input.target_bytes().as_slice());
|
||||
|
||||
let send_len = self
|
||||
.forkserver
|
||||
.write_ctl(self.forkserver().last_run_timed_out())?;
|
||||
if send_len != 4 {
|
||||
return Err(Error::Forkserver(
|
||||
"Unable to request new process from fork server (OOM?)".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let (recv_len, pid) = self.forkserver.read_st()?;
|
||||
if recv_len != 4 {
|
||||
return Err(Error::Forkserver(
|
||||
"Unable to request new process from fork server (OOM?)".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if pid <= 0 {
|
||||
return Err(Error::Forkserver(
|
||||
"Fork server is misbehaving (OOM?)".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let (_, status) = self.forkserver.read_st()?;
|
||||
self.forkserver.status = status;
|
||||
|
||||
if !libc::WIFSTOPPED(self.forkserver.status()) {
|
||||
self.forkserver.child_pid = 0;
|
||||
}
|
||||
|
||||
if libc::WIFSIGNALED(self.forkserver.status()) {
|
||||
exit_kind = ExitKind::Crash;
|
||||
}
|
||||
|
||||
Ok(exit_kind)
|
||||
}
|
||||
}
|
||||
|
||||
impl<EM, I, OT, S, Z> HasExecHooks<EM, I, S, Z> for ForkserverExecutor<I, OT>
|
||||
where
|
||||
I: Input + HasTargetBytes,
|
||||
OT: ObserversTuple,
|
||||
{
|
||||
}
|
||||
|
||||
impl<I, OT> HasObservers<OT> for ForkserverExecutor<I, OT>
|
||||
where
|
||||
I: Input + HasTargetBytes,
|
||||
OT: ObserversTuple,
|
||||
{
|
||||
#[inline]
|
||||
fn observers(&self) -> &OT {
|
||||
&self.observers
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn observers_mut(&mut self) -> &mut OT {
|
||||
&mut self.observers
|
||||
}
|
||||
}
|
||||
|
||||
impl<EM, I, OT, S, Z> HasObserversHooks<EM, I, OT, S, Z> for ForkserverExecutor<I, OT>
|
||||
where
|
||||
I: Input + HasTargetBytes,
|
||||
OT: ObserversTuple + HasExecHooksTuple<EM, I, S, Z>,
|
||||
{
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use crate::{
|
||||
bolts::{
|
||||
shmem::{ShMem, ShMemProvider, StdShMemProvider},
|
||||
tuples::tuple_list,
|
||||
},
|
||||
executors::ForkserverExecutor,
|
||||
inputs::NopInput,
|
||||
observers::{ConstMapObserver, HitcountsMapObserver},
|
||||
Error,
|
||||
};
|
||||
#[test]
|
||||
|
||||
fn test_forkserver() {
|
||||
const MAP_SIZE: usize = 65536;
|
||||
let bin = "/usr/bin/echo".to_string();
|
||||
let args = vec![String::from("@@")];
|
||||
|
||||
let mut shmem = StdShMemProvider::new()
|
||||
.unwrap()
|
||||
.new_map(MAP_SIZE as usize)
|
||||
.unwrap();
|
||||
shmem.write_to_env("__AFL_SHM_ID").unwrap();
|
||||
let mut shmem_map = shmem.map_mut();
|
||||
|
||||
let edges_observer = HitcountsMapObserver::new(ConstMapObserver::<_, MAP_SIZE>::new(
|
||||
"shared_mem",
|
||||
&mut shmem_map,
|
||||
));
|
||||
|
||||
let executor =
|
||||
ForkserverExecutor::<NopInput, _>::new(bin, args, tuple_list!(edges_observer));
|
||||
// Since /usr/bin/echo is not a instrumented binary file, the test will just check if the forkserver has failed at the initial handshake
|
||||
let result = match executor {
|
||||
Ok(_) => true,
|
||||
Err(e) => match e {
|
||||
Error::Forkserver(s) => s == "Failed to start a forkserver",
|
||||
_ => false,
|
||||
},
|
||||
};
|
||||
assert!(result);
|
||||
}
|
||||
}
|
@ -4,6 +4,12 @@ pub mod inprocess;
|
||||
pub use inprocess::InProcessExecutor;
|
||||
pub mod timeout;
|
||||
pub use timeout::TimeoutExecutor;
|
||||
|
||||
#[cfg(all(feature = "std", unix))]
|
||||
pub mod forkserver;
|
||||
#[cfg(all(feature = "std", unix))]
|
||||
pub use forkserver::{Forkserver, ForkserverExecutor, OutFile};
|
||||
|
||||
pub mod combined;
|
||||
pub use combined::CombinedExecutor;
|
||||
|
||||
|
@ -240,7 +240,32 @@ where
|
||||
assert!(size <= map_state.history_map.len());
|
||||
assert!(size <= observer.map().len());
|
||||
|
||||
if self.indexes.is_none() && self.novelties.is_none() {
|
||||
if self.novelties.is_some() {
|
||||
for i in 0..size {
|
||||
let history = map_state.history_map[i];
|
||||
let item = observer.map()[i];
|
||||
|
||||
let reduced = R::reduce(history, item);
|
||||
if history != reduced {
|
||||
map_state.history_map[i] = reduced;
|
||||
interesting = true;
|
||||
self.novelties.as_mut().unwrap().push(i);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for i in 0..size {
|
||||
let history = map_state.history_map[i];
|
||||
let item = observer.map()[i];
|
||||
|
||||
let reduced = R::reduce(history, item);
|
||||
if history != reduced {
|
||||
map_state.history_map[i] = reduced;
|
||||
interesting = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*if self.indexes.is_none() && self.novelties.is_none() {
|
||||
for i in 0..size {
|
||||
let history = map_state.history_map[i];
|
||||
let item = observer.map()[i];
|
||||
@ -293,13 +318,16 @@ where
|
||||
self.novelties.as_mut().unwrap().push(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
}*/
|
||||
|
||||
if interesting {
|
||||
let mut filled = 0;
|
||||
for i in 0..size {
|
||||
if map_state.history_map[i] != initial {
|
||||
filled += 1;
|
||||
if self.indexes.is_some() {
|
||||
self.indexes.as_mut().unwrap().push(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
manager.fire(
|
||||
|
@ -145,10 +145,11 @@ where
|
||||
let a = self
|
||||
.first
|
||||
.is_interesting(state, manager, input, observers, exit_kind)?;
|
||||
let b = self
|
||||
.second
|
||||
.is_interesting(state, manager, input, observers, exit_kind)?;
|
||||
Ok(a && b)
|
||||
let b = a
|
||||
&& self
|
||||
.second
|
||||
.is_interesting(state, manager, input, observers, exit_kind)?;
|
||||
Ok(b)
|
||||
}
|
||||
|
||||
#[cfg(feature = "introspection")]
|
||||
@ -176,16 +177,17 @@ where
|
||||
feedback_stats,
|
||||
feedback_index,
|
||||
)?;
|
||||
let b = self.second.is_interesting_with_perf(
|
||||
state,
|
||||
manager,
|
||||
input,
|
||||
observers,
|
||||
&exit_kind,
|
||||
feedback_stats,
|
||||
feedback_index + 1,
|
||||
)?;
|
||||
Ok(a && b)
|
||||
let b = a
|
||||
&& self.second.is_interesting_with_perf(
|
||||
state,
|
||||
manager,
|
||||
input,
|
||||
observers,
|
||||
&exit_kind,
|
||||
feedback_stats,
|
||||
feedback_index + 1,
|
||||
)?;
|
||||
Ok(b)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -268,10 +270,11 @@ where
|
||||
let a = self
|
||||
.first
|
||||
.is_interesting(state, manager, input, observers, exit_kind)?;
|
||||
let b = self
|
||||
.second
|
||||
.is_interesting(state, manager, input, observers, exit_kind)?;
|
||||
Ok(a || b)
|
||||
let b = a
|
||||
|| self
|
||||
.second
|
||||
.is_interesting(state, manager, input, observers, exit_kind)?;
|
||||
Ok(b)
|
||||
}
|
||||
|
||||
#[cfg(feature = "introspection")]
|
||||
@ -299,16 +302,17 @@ where
|
||||
feedback_stats,
|
||||
feedback_index,
|
||||
)?;
|
||||
let b = self.second.is_interesting_with_perf(
|
||||
state,
|
||||
manager,
|
||||
input,
|
||||
observers,
|
||||
&exit_kind,
|
||||
feedback_stats,
|
||||
feedback_index + 1,
|
||||
)?;
|
||||
Ok(a || b)
|
||||
let b = a
|
||||
|| self.second.is_interesting_with_perf(
|
||||
state,
|
||||
manager,
|
||||
input,
|
||||
observers,
|
||||
&exit_kind,
|
||||
feedback_stats,
|
||||
feedback_index + 1,
|
||||
)?;
|
||||
Ok(b)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -188,6 +188,12 @@ pub trait Fuzzer<E, EM, I, S, ST> {
|
||||
) -> Result<Duration, Error>;
|
||||
}
|
||||
|
||||
pub enum ExecuteInputResult {
|
||||
None,
|
||||
Interesting,
|
||||
Solution,
|
||||
}
|
||||
|
||||
/// Your default fuzzer instance, for everyday use.
|
||||
#[derive(Debug)]
|
||||
pub struct StdFuzzer<C, CS, F, I, OF, OT, S, SC>
|
||||
@ -336,35 +342,57 @@ where
|
||||
manager: &mut EM,
|
||||
input: I,
|
||||
) -> Result<(bool, Option<usize>), Error> {
|
||||
let (is_interesting, is_solution) = self.execute_input(state, executor, manager, &input)?;
|
||||
let result = self.execute_input(state, executor, manager, &input)?;
|
||||
let observers = executor.observers();
|
||||
|
||||
if is_solution {
|
||||
// If the input is a solution, add it to the respective corpus
|
||||
let mut testcase = Testcase::new(input.clone());
|
||||
self.objective_mut().append_metadata(state, &mut testcase)?;
|
||||
state.solutions_mut().add(testcase)?;
|
||||
} else {
|
||||
self.objective_mut().discard_metadata(state, &input)?;
|
||||
}
|
||||
match result {
|
||||
ExecuteInputResult::None => {
|
||||
self.feedback_mut().discard_metadata(state, &input)?;
|
||||
self.objective_mut().discard_metadata(state, &input)?;
|
||||
Ok((false, None))
|
||||
}
|
||||
ExecuteInputResult::Interesting => {
|
||||
// Not a solution
|
||||
self.objective_mut().discard_metadata(state, &input)?;
|
||||
|
||||
let corpus_idx = self.add_if_interesting(state, &input, is_interesting)?;
|
||||
if corpus_idx.is_some() {
|
||||
let observers_buf = manager.serialize_observers(observers)?;
|
||||
manager.fire(
|
||||
state,
|
||||
Event::NewTestcase {
|
||||
input,
|
||||
observers_buf,
|
||||
corpus_size: state.corpus().count(),
|
||||
client_config: "TODO".into(),
|
||||
time: current_time(),
|
||||
executions: *state.executions(),
|
||||
},
|
||||
)?;
|
||||
}
|
||||
// Add the input to the main corpus
|
||||
let mut testcase = Testcase::new(input.clone());
|
||||
self.feedback_mut().append_metadata(state, &mut testcase)?;
|
||||
let idx = state.corpus_mut().add(testcase)?;
|
||||
self.scheduler_mut().on_add(state, idx)?;
|
||||
|
||||
Ok((is_interesting, corpus_idx))
|
||||
let observers_buf = manager.serialize_observers(observers)?;
|
||||
manager.fire(
|
||||
state,
|
||||
Event::NewTestcase {
|
||||
input,
|
||||
observers_buf,
|
||||
corpus_size: state.corpus().count(),
|
||||
client_config: "TODO".into(),
|
||||
time: current_time(),
|
||||
executions: *state.executions(),
|
||||
},
|
||||
)?;
|
||||
Ok((true, Some(idx)))
|
||||
}
|
||||
ExecuteInputResult::Solution => {
|
||||
// Not interesting
|
||||
self.feedback_mut().discard_metadata(state, &input)?;
|
||||
|
||||
// The input is a solution, add it to the respective corpus
|
||||
let mut testcase = Testcase::new(input);
|
||||
self.objective_mut().append_metadata(state, &mut testcase)?;
|
||||
state.solutions_mut().add(testcase)?;
|
||||
manager.fire(
|
||||
state,
|
||||
Event::Objective {
|
||||
objective_size: state.solutions().count(),
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok((false, None))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -491,7 +519,7 @@ where
|
||||
executor: &mut E,
|
||||
event_mgr: &mut EM,
|
||||
input: &I,
|
||||
) -> Result<(bool, bool), Error>
|
||||
) -> Result<ExecuteInputResult, Error>
|
||||
where
|
||||
E: Executor<I>
|
||||
+ HasObservers<OT>
|
||||
@ -523,6 +551,17 @@ where
|
||||
mark_feature_time!(state, PerfFeature::PostExecObservers);
|
||||
|
||||
let observers = executor.observers();
|
||||
|
||||
start_timer!(state);
|
||||
let is_solution = self
|
||||
.objective_mut()
|
||||
.is_interesting(state, event_mgr, &input, observers, &exit_kind)?;
|
||||
mark_feature_time!(state, PerfFeature::GetObjectivesInterestingAll);
|
||||
|
||||
if is_solution {
|
||||
return Ok(ExecuteInputResult::Solution);
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "introspection"))]
|
||||
let is_interesting = self
|
||||
.feedback_mut()
|
||||
@ -555,13 +594,10 @@ where
|
||||
is_interesting
|
||||
};
|
||||
|
||||
start_timer!(state);
|
||||
let is_solution = self
|
||||
.objective_mut()
|
||||
.is_interesting(state, event_mgr, &input, observers, &exit_kind)?;
|
||||
|
||||
mark_feature_time!(state, PerfFeature::GetObjectivesInterestingAll);
|
||||
|
||||
Ok((is_interesting, is_solution))
|
||||
if is_interesting {
|
||||
Ok(ExecuteInputResult::Interesting)
|
||||
} else {
|
||||
Ok(ExecuteInputResult::None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -71,6 +71,8 @@ pub enum Error {
|
||||
IllegalState(String),
|
||||
/// The argument passed to this method or function is not valid
|
||||
IllegalArgument(String),
|
||||
/// Forkserver related Error
|
||||
Forkserver(String),
|
||||
/// Shutting down, not really an error.
|
||||
ShuttingDown,
|
||||
/// Something else happened
|
||||
@ -94,6 +96,7 @@ impl fmt::Display for Error {
|
||||
Self::NotImplemented(s) => write!(f, "Not implemented: {0}", &s),
|
||||
Self::IllegalState(s) => write!(f, "Illegal state: {0}", &s),
|
||||
Self::IllegalArgument(s) => write!(f, "Illegal argument: {0}", &s),
|
||||
Self::Forkserver(s) => write!(f, "Forkserver : {0}", &s),
|
||||
Self::ShuttingDown => write!(f, "Shutting down!"),
|
||||
Self::Unknown(s) => write!(f, "Unknown error: {0}", &s),
|
||||
}
|
||||
|
@ -48,8 +48,8 @@ where
|
||||
// Normal memset, see https://rust.godbolt.org/z/Trs5hv
|
||||
let initial = self.initial();
|
||||
let cnt = self.usable_count();
|
||||
for i in self.map_mut()[0..cnt].iter_mut() {
|
||||
*i = initial;
|
||||
for x in self.map_mut()[0..cnt].iter_mut() {
|
||||
*x = initial;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -460,7 +460,8 @@ where
|
||||
mgr: &mut EM,
|
||||
input: &I,
|
||||
) -> Result<(), Error> {
|
||||
for x in self.map_mut().iter_mut() {
|
||||
let cnt = self.usable_count();
|
||||
for x in self.map_mut()[0..cnt].iter_mut() {
|
||||
*x = COUNT_CLASS_LOOKUP[*x as usize];
|
||||
}
|
||||
self.base.post_exec(fuzzer, state, mgr, input)
|
||||
|
@ -243,14 +243,15 @@ where
|
||||
// Only print perf stats if the feature is enabled
|
||||
#[cfg(feature = "introspection")]
|
||||
{
|
||||
// Print the client performance stats. Skip the Client 0 which is the broker
|
||||
for (i, client) in self.client_stats.iter().skip(1).enumerate() {
|
||||
let fmt = format!("Client {:03}: {}", i + 1, client.introspection_stats);
|
||||
(self.print_fn)(fmt);
|
||||
}
|
||||
// Print the client performance stats.
|
||||
let fmt = format!(
|
||||
"Client {:03}: {}",
|
||||
sender_id, self.client_stats[sender_id as usize].introspection_stats
|
||||
);
|
||||
(self.print_fn)(fmt);
|
||||
|
||||
// Separate the spacing just a bit
|
||||
(self.print_fn)("\n".to_string());
|
||||
(self.print_fn)("".to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
13
libafl_tests/Cargo.toml
Normal file
13
libafl_tests/Cargo.toml
Normal file
@ -0,0 +1,13 @@
|
||||
[package]
|
||||
name = "libafl_tests"
|
||||
version = "0.1.0"
|
||||
authors = ["tokatoka <tokazerkje@outlook.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[features]
|
||||
forkserver_test = []
|
||||
|
||||
[dependencies]
|
||||
|
48
libafl_tests/build.rs
Normal file
48
libafl_tests/build.rs
Normal file
@ -0,0 +1,48 @@
|
||||
use std::env;
|
||||
use std::path::Path;
|
||||
use std::process::{exit, Command};
|
||||
|
||||
const AFL_URL: &str = "https://github.com/AFLplusplus/AFLplusplus";
|
||||
|
||||
fn main() {
|
||||
if cfg!(feature = "forkserver_test") {
|
||||
if cfg!(windows) {
|
||||
println!("cargo:warning=No support for windows yet.");
|
||||
exit(0);
|
||||
}
|
||||
|
||||
let cwd = env::current_dir().unwrap().to_string_lossy().to_string();
|
||||
|
||||
let afl = format!("{}/AFLplusplus", &cwd);
|
||||
let afl_gcc = format!("{}/AFLplusplus/afl-cc", &cwd);
|
||||
|
||||
let afl_path = Path::new(&afl);
|
||||
let afl_gcc_path = Path::new(&afl_gcc);
|
||||
|
||||
if !afl_path.is_dir() {
|
||||
println!("cargo:warning=AFL++ not found, downloading...");
|
||||
Command::new("git")
|
||||
.arg("clone")
|
||||
.arg(AFL_URL)
|
||||
.status()
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
if !afl_gcc_path.is_file() {
|
||||
Command::new("make")
|
||||
.arg("all")
|
||||
.current_dir(&afl_path)
|
||||
.status()
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
Command::new(afl_gcc_path)
|
||||
.args(&["src/forkserver_test.c", "-o"])
|
||||
.arg(&format!("{}/forkserver_test.o", "src"))
|
||||
.status()
|
||||
.unwrap();
|
||||
|
||||
println!("cargo:rerun-if-changed=build.rs");
|
||||
println!("cargo:rerun-if-changed=src/");
|
||||
}
|
||||
}
|
26
libafl_tests/src/forkserver_test.c
Normal file
26
libafl_tests/src/forkserver_test.c
Normal file
@ -0,0 +1,26 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
int main(int argc, char **argv){
|
||||
|
||||
FILE* file = stdin;
|
||||
if (argc > 1) {
|
||||
file = fopen(argv[1], "rb");
|
||||
}
|
||||
|
||||
char buf[16];
|
||||
char* p = fgets(buf, 16, file);
|
||||
buf[15] = 0;
|
||||
|
||||
printf("input: %s\n", p);
|
||||
|
||||
if(buf[0] == 'b'){
|
||||
if(buf[1] == 'a'){
|
||||
if(buf[2] == 'd'){
|
||||
abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
1
libafl_tests/src/lib.rs
Normal file
1
libafl_tests/src/lib.rs
Normal file
@ -0,0 +1 @@
|
||||
|
Loading…
x
Reference in New Issue
Block a user