Add TargetBytesConverter to allow Nautilus for ForkserverExecutor (#2630)

* Add TargetBytesConverter to allow Nautilus for ForkserverExecutor

* ci

* ci

* More

* fmt
This commit is contained in:
Dominik Maier 2024-10-24 14:10:26 +02:00 committed by GitHub
parent 261b6b5a52
commit e27ec269ce
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 519 additions and 43 deletions

View File

@ -272,6 +272,7 @@ jobs:
- ./fuzzers/structure_aware/baby_fuzzer_multi - ./fuzzers/structure_aware/baby_fuzzer_multi
- ./fuzzers/structure_aware/baby_fuzzer_custom_input - ./fuzzers/structure_aware/baby_fuzzer_custom_input
- ./fuzzers/structure_aware/baby_fuzzer_nautilus - ./fuzzers/structure_aware/baby_fuzzer_nautilus
- ./fuzzers/structure_aware/forkserver_simple_nautilus
# In-process # In-process
- ./fuzzers/fuzz_anything/cargo_fuzz - ./fuzzers/fuzz_anything/cargo_fuzz

1
.gitignore vendored
View File

@ -6,6 +6,7 @@ vendor
.DS_Store .DS_Store
.env .env
.vscode
*.test *.test
*.tmp *.tmp

View File

@ -76,13 +76,9 @@ pub fn main() {
) )
.unwrap(); .unwrap();
if state let _ = state.metadata_or_insert_with::<NautilusChunksMetadata>(|| {
.metadata_map() NautilusChunksMetadata::new("/tmp/".into())
.get::<NautilusChunksMetadata>() });
.is_none()
{
state.add_metadata(NautilusChunksMetadata::new("/tmp/".into()));
}
// The Monitor trait define how the fuzzer stats are reported to the user // The Monitor trait define how the fuzzer stats are reported to the user
let monitor = SimpleMonitor::new(|s| println!("{s}")); let monitor = SimpleMonitor::new(|s| println!("{s}"));
@ -139,9 +135,11 @@ pub fn main() {
*/ */
// Generate 8 initial inputs // Generate 8 initial inputs
if state.must_load_initial_inputs() {
state state
.generate_initial_inputs_forced(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8) .generate_initial_inputs_forced(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8)
.expect("Failed to generate the initial corpus"); .expect("Failed to generate the initial corpus");
}
// Setup a mutational stage with a basic bytes mutator // Setup a mutational stage with a basic bytes mutator
let mutator = StdScheduledMutator::with_max_stack_pow( let mutator = StdScheduledMutator::with_max_stack_pow(

View File

@ -0,0 +1 @@
forkserver_simple

View File

@ -0,0 +1,24 @@
[package]
name = "forkserver_simple"
version = "0.13.2"
authors = ["tokatoka <tokazerkje@outlook.com>"]
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[profile.dev]
panic = "abort"
[profile.release]
panic = "abort"
lto = true
codegen-units = 1
opt-level = 3
[dependencies]
clap = { version = "4.5.18", features = ["derive"] }
env_logger = "0.11.5"
libafl = { path = "../../../libafl", features = ["std", "derive"] }
libafl_bolts = { path = "../../../libafl_bolts" }
log = { version = "0.4.22", features = ["release_max_level_info"] }
nix = { version = "0.29.0", features = ["signal"] }

View File

@ -0,0 +1,13 @@
# Simple Forkserver Fuzzer
This is a simple example fuzzer to fuzz a executable instrumented by afl-cc.
## Usage
You can build this example by `cargo build --release`.
This downloads AFLplusplus/AFLplusplus and compiles the example harness program in src/program.c with afl-cc
## Run
After you build it you can run
`cp ./target/release/forkserver_simple .` to copy the fuzzer into this directory,
and you can run
`taskset -c 1 ./forkserver_simple ./target/release/program ./corpus/ -t 1000` to run the fuzzer.
`taskset` binds this process to a specific core to improve the throughput.

View File

@ -0,0 +1,59 @@
use std::{
env,
path::Path,
process::{exit, Command},
};
const AFL_URL: &str = "https://github.com/AFLplusplus/AFLplusplus";
fn main() {
if cfg!(windows) {
println!("cargo:warning=No support for windows yet.");
exit(0);
}
env::remove_var("DEBUG");
let cwd = env::current_dir().unwrap().to_string_lossy().to_string();
let afl = format!("{}/AFLplusplus", &cwd);
let afl_cc = format!("{}/AFLplusplus/afl-cc", &cwd);
let afl_path = Path::new(&afl);
let afl_cc_path = Path::new(&afl_cc);
if !afl_path.is_dir() {
println!("cargo:warning=AFL++ not found, downloading...");
Command::new("git")
.arg("clone")
.arg(AFL_URL)
.status()
.unwrap();
}
if !afl_cc_path.is_file() {
let mut afl_cc_make = Command::new("make");
afl_cc_make.arg("all").current_dir(afl_path);
if let Ok(llvm_config) = env::var("LLVM_CONFIG") {
if !llvm_config.is_empty() {
afl_cc_make.env("LLVM_CONFIG", llvm_config);
}
}
afl_cc_make.status().unwrap();
}
let mut compile_command = Command::new(afl_cc_path);
compile_command
.args(["src/program.c", "-o"])
.arg(format!("{cwd}/target/release/program"));
if let Ok(llvm_config) = env::var("LLVM_CONFIG") {
if !llvm_config.is_empty() {
compile_command.env("LLVM_CONFIG", llvm_config);
}
}
compile_command.status().unwrap();
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed=src/");
}

View File

@ -0,0 +1 @@
aaa

View File

@ -0,0 +1,227 @@
use core::time::Duration;
use std::path::PathBuf;
use clap::Parser;
use libafl::{
corpus::{InMemoryCorpus, OnDiskCorpus},
events::SimpleEventManager,
executors::{forkserver::ForkserverExecutor, HasObservers},
feedback_and_fast, feedback_or,
feedbacks::{
CrashFeedback, MaxMapFeedback, NautilusChunksMetadata, NautilusFeedback, TimeFeedback,
},
fuzzer::{Fuzzer, StdFuzzer},
generators::{NautilusContext, NautilusGenerator},
inputs::{NautilusInput, NautilusTargetBytesConverter},
monitors::SimpleMonitor,
mutators::{
NautilusRandomMutator, NautilusRecursionMutator, NautilusSpliceMutator,
StdScheduledMutator, Tokens,
},
observers::{CanTrack, HitcountsMapObserver, StdMapObserver, TimeObserver},
schedulers::{IndexesLenTimeMinimizerScheduler, QueueScheduler},
stages::mutational::StdMutationalStage,
state::StdState,
HasMetadata,
};
use libafl_bolts::{
current_nanos,
rands::StdRand,
shmem::{ShMem, ShMemProvider, UnixShMemProvider},
tuples::{tuple_list, Handled},
AsSliceMut, Truncate,
};
use nix::sys::signal::Signal;
/// The commandline args this fuzzer accepts
#[derive(Debug, Parser)]
#[command(
name = "forkserver_simple",
about = "This is a simple example fuzzer to fuzz a executable instrumented by afl-cc, using Nautilus grammar.",
author = "tokatoka <tokazerkje@outlook.com>, dmnk <domenukk@gmail.com>"
)]
struct Opt {
#[arg(
help = "The instrumented binary we want to fuzz",
name = "EXEC",
required = true
)]
executable: String,
#[arg(
help = "Timeout for each individual execution, in milliseconds",
short = 't',
long = "timeout",
default_value = "1200"
)]
timeout: u64,
#[arg(
help = "If not set, the child's stdout and stderror will be redirected to /dev/null",
short = 'd',
long = "debug-child",
default_value = "false"
)]
debug_child: bool,
#[arg(
help = "Arguments passed to the target",
name = "arguments",
num_args(1..),
allow_hyphen_values = true,
)]
arguments: Vec<String>,
#[arg(
help = "Signal used to stop child",
short = 's',
long = "signal",
value_parser = str::parse::<Signal>,
default_value = "SIGKILL"
)]
signal: Signal,
#[arg(help = "The nautilus grammar file", short)]
grammar: PathBuf,
}
#[allow(clippy::similar_names)]
pub fn main() {
env_logger::init();
const MAP_SIZE: usize = 65536;
let opt = Opt::parse();
let mut shmem_provider = UnixShMemProvider::new().unwrap();
// The coverage map shared between observer and executor
let mut shmem = shmem_provider.new_shmem(MAP_SIZE).unwrap();
// let the forkserver know the shmid
shmem.write_to_env("__AFL_SHM_ID").unwrap();
let shmem_buf = shmem.as_slice_mut();
// Create an observation channel using the signals map
let edges_observer = unsafe {
HitcountsMapObserver::new(StdMapObserver::new("shared_mem", shmem_buf)).track_indices()
};
// Create an observation channel to keep track of the execution time
let time_observer = TimeObserver::new("time");
let context = NautilusContext::from_file(15, opt.grammar);
// Feedback to rate the interestingness of an input
// This one is composed by two Feedbacks in OR
let mut feedback = feedback_or!(
// New maximization map feedback linked to the edges observer and the feedback state
MaxMapFeedback::new(&edges_observer),
// Time feedback, this one does not need a feedback state
TimeFeedback::new(&time_observer),
// Nautilus context
NautilusFeedback::new(&context),
);
// A feedback to choose if an input is a solution or not
// We want to do the same crash deduplication that AFL does
let mut objective = feedback_and_fast!(
// Must be a crash
CrashFeedback::new(),
// Take it only if trigger new coverage over crashes
// Uses `with_name` to create a different history from the `MaxMapFeedback` in `feedback` above
MaxMapFeedback::with_name("mapfeedback_metadata_objective", &edges_observer)
);
// create a State from scratch
let mut state = StdState::new(
// RNG
StdRand::with_seed(current_nanos()),
// Corpus that will be evolved, we keep it in memory for performance
InMemoryCorpus::<NautilusInput>::new(),
// Corpus in which we store solutions (crashes in this example),
// on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
// States of the feedbacks.
// The feedbacks can report the data that should persist in the State.
&mut feedback,
// Same for objective feedbacks
&mut objective,
)
.unwrap();
let _ = state.metadata_or_insert_with::<NautilusChunksMetadata>(|| {
NautilusChunksMetadata::new("/tmp/".into())
});
// The Monitor trait define how the fuzzer stats are reported to the user
let monitor = SimpleMonitor::new(|s| println!("{s}"));
// The event manager handle the various events generated during the fuzzing loop
// such as the notification of the addition of a new item to the corpus
let mut mgr = SimpleEventManager::new(monitor);
// A minimization+queue policy to get testcasess from the corpus
let scheduler = IndexesLenTimeMinimizerScheduler::new(&edges_observer, QueueScheduler::new());
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
// If we should debug the child
let debug_child = opt.debug_child;
// Create the executor for the forkserver
let args = opt.arguments;
let observer_ref = edges_observer.handle();
let mut tokens = Tokens::new();
let mut executor = ForkserverExecutor::builder()
.program(opt.executable)
.debug_child(debug_child)
.shmem_provider(&mut shmem_provider)
.autotokens(&mut tokens)
.parse_afl_cmdline(args)
.coverage_map_size(MAP_SIZE)
.timeout(Duration::from_millis(opt.timeout))
.kill_signal(opt.signal)
.target_bytes_converter(NautilusTargetBytesConverter::new(&context))
.build(tuple_list!(time_observer, edges_observer))
.unwrap();
if let Some(dynamic_map_size) = executor.coverage_map_size() {
executor.observers_mut()[&observer_ref]
.as_mut()
.truncate(dynamic_map_size);
}
let mut generator = NautilusGenerator::new(&context);
if state.must_load_initial_inputs() {
state
.generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8)
.expect("Failed to generate inputs");
}
state.add_metadata(tokens);
// Setup a mutational stage with a basic bytes mutator
let mutator = StdScheduledMutator::with_max_stack_pow(
tuple_list!(
NautilusRandomMutator::new(&context),
NautilusRandomMutator::new(&context),
NautilusRandomMutator::new(&context),
NautilusRandomMutator::new(&context),
NautilusRandomMutator::new(&context),
NautilusRandomMutator::new(&context),
NautilusRecursionMutator::new(&context),
NautilusSpliceMutator::new(&context),
NautilusSpliceMutator::new(&context),
NautilusSpliceMutator::new(&context),
),
2,
);
let mut stages = tuple_list!(StdMutationalStage::new(mutator));
fuzzer
.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)
.expect("Error in the fuzzing loop");
}

View File

@ -0,0 +1,35 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// The following line is needed for shared memeory testcase fuzzing
__AFL_FUZZ_INIT();
void vuln(char *buf) {
if (strcmp(buf, "vuln") == 0) { abort(); }
}
int main(int argc, char **argv) {
FILE *file = stdin;
if (argc > 1) { file = fopen(argv[1], "rb"); }
// The following three lines are for normal fuzzing.
/*
char buf[16];
char* p = fgets(buf, 16, file);
buf[15] = 0;
*/
// The following line is also needed for shared memory testcase fuzzing
unsigned char *buf = __AFL_FUZZ_TESTCASE_BUF;
printf("input: %s\n", buf);
if (buf[0] == 'b') {
if (buf[1] == 'a') {
if (buf[2] == 'd') { abort(); }
}
}
vuln(buf);
return 0;
}

View File

@ -27,6 +27,7 @@ rustc-args = ["--cfg", "docsrs"]
[features] [features]
default = [ default = [
"nautilus",
"std", "std",
"derive", "derive",
"llmp_compression", "llmp_compression",

View File

@ -42,7 +42,9 @@ use crate::observers::{
}; };
use crate::{ use crate::{
executors::{Executor, ExitKind, HasObservers}, executors::{Executor, ExitKind, HasObservers},
inputs::{HasTargetBytes, Input, UsesInput}, inputs::{
BytesInput, HasTargetBytes, Input, NopTargetBytesConverter, TargetBytesConverter, UsesInput,
},
mutators::Tokens, mutators::Tokens,
observers::{MapObserver, Observer, ObserversTuple}, observers::{MapObserver, Observer, ObserversTuple},
state::{HasExecutions, State, UsesState}, state::{HasExecutions, State, UsesState},
@ -70,9 +72,9 @@ const FS_OPT_SHDMEM_FUZZ: i32 = 0x01000000_u32 as i32;
const FS_NEW_OPT_SHDMEM_FUZZ: i32 = 2_u32 as i32; const FS_NEW_OPT_SHDMEM_FUZZ: i32 = 2_u32 as i32;
#[allow(clippy::cast_possible_wrap)] #[allow(clippy::cast_possible_wrap)]
const FS_NEW_OPT_AUTODICT: i32 = 0x00000800_u32 as i32; const FS_NEW_OPT_AUTODTCT: i32 = 0x00000800_u32 as i32;
#[allow(clippy::cast_possible_wrap)] #[allow(clippy::cast_possible_wrap)]
const FS_OPT_AUTODICT: i32 = 0x10000000_u32 as i32; const FS_OPT_AUTODTCT: i32 = 0x10000000_u32 as i32;
#[allow(clippy::cast_possible_wrap)] #[allow(clippy::cast_possible_wrap)]
const FS_ERROR_MAP_SIZE: i32 = 1_u32 as i32; const FS_ERROR_MAP_SIZE: i32 = 1_u32 as i32;
@ -584,13 +586,14 @@ impl Forkserver {
/// ///
/// Shared memory feature is also available, but you have to set things up in your code. /// Shared memory feature is also available, but you have to set things up in your code.
/// Please refer to AFL++'s docs. <https://github.com/AFLplusplus/AFLplusplus/blob/stable/instrumentation/README.persistent_mode.md> /// Please refer to AFL++'s docs. <https://github.com/AFLplusplus/AFLplusplus/blob/stable/instrumentation/README.persistent_mode.md>
pub struct ForkserverExecutor<OT, S, SP> pub struct ForkserverExecutor<TC, OT, S, SP>
where where
SP: ShMemProvider, SP: ShMemProvider,
{ {
target: OsString, target: OsString,
args: Vec<OsString>, args: Vec<OsString>,
input_file: InputFile, input_file: InputFile,
target_bytes_converter: TC,
uses_shmem_testcase: bool, uses_shmem_testcase: bool,
forkserver: Forkserver, forkserver: Forkserver,
observers: OT, observers: OT,
@ -605,8 +608,9 @@ where
crash_exitcode: Option<i8>, crash_exitcode: Option<i8>,
} }
impl<OT, S, SP> Debug for ForkserverExecutor<OT, S, SP> impl<TC, OT, S, SP> Debug for ForkserverExecutor<TC, OT, S, SP>
where where
TC: Debug,
OT: Debug, OT: Debug,
SP: ShMemProvider, SP: ShMemProvider,
{ {
@ -615,6 +619,7 @@ where
.field("target", &self.target) .field("target", &self.target)
.field("args", &self.args) .field("args", &self.args)
.field("input_file", &self.input_file) .field("input_file", &self.input_file)
.field("target_bytes_converter", &self.target_bytes_converter)
.field("uses_shmem_testcase", &self.uses_shmem_testcase) .field("uses_shmem_testcase", &self.uses_shmem_testcase)
.field("forkserver", &self.forkserver) .field("forkserver", &self.forkserver)
.field("observers", &self.observers) .field("observers", &self.observers)
@ -623,15 +628,17 @@ where
} }
} }
impl ForkserverExecutor<(), (), UnixShMemProvider> { impl ForkserverExecutor<(), (), (), UnixShMemProvider> {
/// Builder for `ForkserverExecutor` /// Builder for `ForkserverExecutor`
#[must_use] #[must_use]
pub fn builder() -> ForkserverExecutorBuilder<'static, UnixShMemProvider> { pub fn builder(
) -> ForkserverExecutorBuilder<'static, NopTargetBytesConverter<BytesInput>, UnixShMemProvider>
{
ForkserverExecutorBuilder::new() ForkserverExecutorBuilder::new()
} }
} }
impl<OT, S, SP> ForkserverExecutor<OT, S, SP> impl<TC, OT, S, SP> ForkserverExecutor<TC, OT, S, SP>
where where
OT: ObserversTuple<S::Input, S>, OT: ObserversTuple<S::Input, S>,
S: UsesInput, S: UsesInput,
@ -671,7 +678,7 @@ where
/// The builder for `ForkserverExecutor` /// The builder for `ForkserverExecutor`
#[derive(Debug)] #[derive(Debug)]
#[allow(clippy::struct_excessive_bools)] #[allow(clippy::struct_excessive_bools)]
pub struct ForkserverExecutorBuilder<'a, SP> { pub struct ForkserverExecutorBuilder<'a, TC, SP> {
program: Option<OsString>, program: Option<OsString>,
arguments: Vec<OsString>, arguments: Vec<OsString>,
envs: Vec<(OsString, OsString)>, envs: Vec<(OsString, OsString)>,
@ -691,9 +698,10 @@ pub struct ForkserverExecutorBuilder<'a, SP> {
#[cfg(feature = "regex")] #[cfg(feature = "regex")]
asan_obs: Option<Handle<AsanBacktraceObserver>>, asan_obs: Option<Handle<AsanBacktraceObserver>>,
crash_exitcode: Option<i8>, crash_exitcode: Option<i8>,
target_bytes_converter: TC,
} }
impl<'a, SP> ForkserverExecutorBuilder<'a, SP> impl<'a, TC, SP> ForkserverExecutorBuilder<'a, TC, SP>
where where
SP: ShMemProvider, SP: ShMemProvider,
{ {
@ -703,11 +711,12 @@ where
/// in case no input file is specified. /// in case no input file is specified.
/// If `debug_child` is set, the child will print to `stdout`/`stderr`. /// If `debug_child` is set, the child will print to `stdout`/`stderr`.
#[allow(clippy::pedantic)] #[allow(clippy::pedantic)]
pub fn build<OT, S>(&mut self, observers: OT) -> Result<ForkserverExecutor<OT, S, SP>, Error> pub fn build<OT, S>(mut self, observers: OT) -> Result<ForkserverExecutor<TC, OT, S, SP>, Error>
where where
OT: ObserversTuple<S::Input, S>, OT: ObserversTuple<S::Input, S>,
S: UsesInput, S: UsesInput,
S::Input: Input + HasTargetBytes, S::Input: Input,
TC: TargetBytesConverter,
SP: ShMemProvider, SP: ShMemProvider,
{ {
let (forkserver, input_file, map) = self.build_helper()?; let (forkserver, input_file, map) = self.build_helper()?;
@ -758,16 +767,17 @@ where
.clone() .clone()
.unwrap_or(AsanBacktraceObserver::default().handle()), .unwrap_or(AsanBacktraceObserver::default().handle()),
crash_exitcode: self.crash_exitcode, crash_exitcode: self.crash_exitcode,
target_bytes_converter: self.target_bytes_converter,
}) })
} }
/// Builds `ForkserverExecutor` downsizing the coverage map to fit exaclty the AFL++ map size. /// Builds `ForkserverExecutor` downsizing the coverage map to fit exaclty the AFL++ map size.
#[allow(clippy::pedantic)] #[allow(clippy::pedantic)]
pub fn build_dynamic_map<A, MO, OT, S>( pub fn build_dynamic_map<A, MO, OT, S>(
&mut self, mut self,
mut map_observer: A, mut map_observer: A,
other_observers: OT, other_observers: OT,
) -> Result<ForkserverExecutor<(A, OT), S, SP>, Error> ) -> Result<ForkserverExecutor<TC, (A, OT), S, SP>, Error>
where where
MO: MapObserver + Truncate, // TODO maybe enforce Entry = u8 for the cov map MO: MapObserver + Truncate, // TODO maybe enforce Entry = u8 for the cov map
A: Observer<S::Input, S> + AsMut<MO>, A: Observer<S::Input, S> + AsMut<MO>,
@ -822,6 +832,7 @@ where
.clone() .clone()
.unwrap_or(AsanBacktraceObserver::default().handle()), .unwrap_or(AsanBacktraceObserver::default().handle()),
crash_exitcode: self.crash_exitcode, crash_exitcode: self.crash_exitcode,
target_bytes_converter: self.target_bytes_converter,
}) })
} }
@ -957,7 +968,7 @@ where
} }
} }
if status & FS_NEW_OPT_AUTODICT != 0 { if status & FS_NEW_OPT_AUTODTCT != 0 {
// Here unlike shmem input fuzzing, we are forced to read things // Here unlike shmem input fuzzing, we are forced to read things
// hence no self.autotokens.is_some() to check if we proceed // hence no self.autotokens.is_some() to check if we proceed
let autotokens_size = forkserver.read_st().map_err(|err| { let autotokens_size = forkserver.read_st().map_err(|err| {
@ -1010,14 +1021,14 @@ where
self.set_map_size(fsrv_map_size)?; self.set_map_size(fsrv_map_size)?;
} }
// Only with SHMEM or AUTODICT we can send send_status back or it breaks! // Only with SHMEM or AUTODTCT we can send send_status back or it breaks!
// If forkserver is responding, we then check if there's any option enabled. // If forkserver is responding, we then check if there's any option enabled.
// We'll send 4-bytes message back to the forkserver to tell which features to use // We'll send 4-bytes message back to the forkserver to tell which features to use
// The forkserver is listening to our response if either shmem fuzzing is enabled or auto dict is enabled // The forkserver is listening to our response if either shmem fuzzing is enabled or auto dict is enabled
// <https://github.com/AFLplusplus/AFLplusplus/blob/147654f8715d237fe45c1657c87b2fe36c4db22a/instrumentation/afl-compiler-rt.o.c#L1026> // <https://github.com/AFLplusplus/AFLplusplus/blob/147654f8715d237fe45c1657c87b2fe36c4db22a/instrumentation/afl-compiler-rt.o.c#L1026>
if status & FS_OPT_ENABLED == FS_OPT_ENABLED if status & FS_OPT_ENABLED == FS_OPT_ENABLED
&& (status & FS_OPT_SHDMEM_FUZZ == FS_OPT_SHDMEM_FUZZ && (status & FS_OPT_SHDMEM_FUZZ == FS_OPT_SHDMEM_FUZZ
|| status & FS_OPT_AUTODICT == FS_OPT_AUTODICT) || status & FS_OPT_AUTODTCT == FS_OPT_AUTODTCT)
{ {
let mut send_status = FS_OPT_ENABLED; let mut send_status = FS_OPT_ENABLED;
@ -1027,9 +1038,9 @@ where
self.uses_shmem_testcase = true; self.uses_shmem_testcase = true;
} }
if (status & FS_OPT_AUTODICT == FS_OPT_AUTODICT) && self.autotokens.is_some() { if (status & FS_OPT_AUTODTCT == FS_OPT_AUTODTCT) && self.autotokens.is_some() {
log::info!("Using AUTODICT feature"); log::info!("Using AUTODTCT feature");
send_status |= FS_OPT_AUTODICT; send_status |= FS_OPT_AUTODTCT;
} }
if send_status != FS_OPT_ENABLED { if send_status != FS_OPT_ENABLED {
@ -1042,7 +1053,7 @@ where
))); )));
} }
if (send_status & FS_OPT_AUTODICT) == FS_OPT_AUTODICT { if (send_status & FS_OPT_AUTODTCT) == FS_OPT_AUTODTCT {
let dict_size = forkserver.read_st().map_err(|err| { let dict_size = forkserver.read_st().map_err(|err| {
Error::illegal_state(format!("Reading from forkserver failed: {err:?}")) Error::illegal_state(format!("Reading from forkserver failed: {err:?}"))
})?; })?;
@ -1318,7 +1329,7 @@ where
} }
} }
impl<'a> ForkserverExecutorBuilder<'a, UnixShMemProvider> { impl<'a> ForkserverExecutorBuilder<'a, NopTargetBytesConverter<BytesInput>, UnixShMemProvider> {
/// Creates a new `AFL`-style [`ForkserverExecutor`] with the given target, arguments and observers. /// Creates a new `AFL`-style [`ForkserverExecutor`] with the given target, arguments and observers.
/// This is the builder for `ForkserverExecutor` /// This is the builder for `ForkserverExecutor`
/// This Forkserver will attempt to provide inputs over shared mem when `shmem_provider` is given. /// This Forkserver will attempt to provide inputs over shared mem when `shmem_provider` is given.
@ -1326,7 +1337,8 @@ impl<'a> ForkserverExecutorBuilder<'a, UnixShMemProvider> {
/// in case no input file is specified. /// in case no input file is specified.
/// If `debug_child` is set, the child will print to `stdout`/`stderr`. /// If `debug_child` is set, the child will print to `stdout`/`stderr`.
#[must_use] #[must_use]
pub fn new() -> ForkserverExecutorBuilder<'a, UnixShMemProvider> { pub fn new(
) -> ForkserverExecutorBuilder<'a, NopTargetBytesConverter<BytesInput>, UnixShMemProvider> {
ForkserverExecutorBuilder { ForkserverExecutorBuilder {
program: None, program: None,
arguments: vec![], arguments: vec![],
@ -1346,14 +1358,17 @@ impl<'a> ForkserverExecutorBuilder<'a, UnixShMemProvider> {
timeout: None, timeout: None,
asan_obs: None, asan_obs: None,
crash_exitcode: None, crash_exitcode: None,
target_bytes_converter: NopTargetBytesConverter::new(),
} }
} }
}
impl<'a, TC> ForkserverExecutorBuilder<'a, TC, UnixShMemProvider> {
/// Shmem provider for forkserver's shared memory testcase feature. /// Shmem provider for forkserver's shared memory testcase feature.
pub fn shmem_provider<SP: ShMemProvider>( pub fn shmem_provider<SP: ShMemProvider>(
self, self,
shmem_provider: &'a mut SP, shmem_provider: &'a mut SP,
) -> ForkserverExecutorBuilder<'a, SP> { ) -> ForkserverExecutorBuilder<'a, TC, SP> {
ForkserverExecutorBuilder { ForkserverExecutorBuilder {
// Set the new provider // Set the new provider
shmem_provider: Some(shmem_provider), shmem_provider: Some(shmem_provider),
@ -1375,22 +1390,57 @@ impl<'a> ForkserverExecutorBuilder<'a, UnixShMemProvider> {
timeout: self.timeout, timeout: self.timeout,
asan_obs: self.asan_obs, asan_obs: self.asan_obs,
crash_exitcode: self.crash_exitcode, crash_exitcode: self.crash_exitcode,
target_bytes_converter: self.target_bytes_converter,
} }
} }
} }
impl Default for ForkserverExecutorBuilder<'_, UnixShMemProvider> { impl<'a, TC, SP> ForkserverExecutorBuilder<'a, TC, SP> {
/// Shmem provider for forkserver's shared memory testcase feature.
pub fn target_bytes_converter<TC2: TargetBytesConverter>(
self,
target_bytes_converter: TC2,
) -> ForkserverExecutorBuilder<'a, TC2, SP> {
ForkserverExecutorBuilder {
// Set the new provider
shmem_provider: self.shmem_provider,
// Copy all other values from the old Builder
program: self.program,
arguments: self.arguments,
envs: self.envs,
debug_child: self.debug_child,
use_stdin: self.use_stdin,
uses_shmem_testcase: self.uses_shmem_testcase,
is_persistent: self.is_persistent,
is_deferred_frksrv: self.is_deferred_frksrv,
autotokens: self.autotokens,
input_filename: self.input_filename,
map_size: self.map_size,
max_input_size: self.max_input_size,
min_input_size: self.min_input_size,
kill_signal: self.kill_signal,
timeout: self.timeout,
asan_obs: self.asan_obs,
crash_exitcode: self.crash_exitcode,
target_bytes_converter: target_bytes_converter,
}
}
}
impl Default
for ForkserverExecutorBuilder<'_, NopTargetBytesConverter<BytesInput>, UnixShMemProvider>
{
fn default() -> Self { fn default() -> Self {
Self::new() Self::new()
} }
} }
impl<EM, OT, S, SP, Z> Executor<EM, Z> for ForkserverExecutor<OT, S, SP> impl<EM, TC, OT, S, SP, Z> Executor<EM, Z> for ForkserverExecutor<TC, OT, S, SP>
where where
OT: ObserversTuple<S::Input, S>, OT: ObserversTuple<S::Input, S>,
SP: ShMemProvider, SP: ShMemProvider,
S: State + HasExecutions, S: State + HasExecutions,
S::Input: HasTargetBytes, TC: TargetBytesConverter<Input = S::Input>,
EM: UsesState<State = S>, EM: UsesState<State = S>,
Z: UsesState<State = S>, Z: UsesState<State = S>,
{ {
@ -1408,7 +1458,7 @@ where
let last_run_timed_out = self.forkserver.last_run_timed_out_raw(); let last_run_timed_out = self.forkserver.last_run_timed_out_raw();
let mut input_bytes = input.target_bytes(); let mut input_bytes = self.target_bytes_converter.to_target_bytes(input);
let mut input_size = input_bytes.as_slice().len(); let mut input_size = input_bytes.as_slice().len();
if input_size > self.max_input_size { if input_size > self.max_input_size {
// Truncate like AFL++ does // Truncate like AFL++ does
@ -1497,7 +1547,7 @@ where
} }
} }
impl<OT, S, SP> UsesState for ForkserverExecutor<OT, S, SP> impl<TC, OT, S, SP> UsesState for ForkserverExecutor<TC, OT, S, SP>
where where
S: State, S: State,
SP: ShMemProvider, SP: ShMemProvider,
@ -1505,7 +1555,7 @@ where
type State = S; type State = S;
} }
impl<OT, S, SP> HasObservers for ForkserverExecutor<OT, S, SP> impl<TC, OT, S, SP> HasObservers for ForkserverExecutor<TC, OT, S, SP>
where where
OT: ObserversTuple<S::Input, S>, OT: ObserversTuple<S::Input, S>,
S: State, S: State,

View File

@ -91,9 +91,9 @@ pub trait Input: Clone + Serialize + serde::de::DeserializeOwned + Debug {
/// Convert between two input types with a state /// Convert between two input types with a state
pub trait InputConverter: Debug { pub trait InputConverter: Debug {
/// Source type /// Source type
type From: Input; type From;
/// Destination type /// Destination type
type To: Input; type To;
/// Convert the src type to the dest /// Convert the src type to the dest
fn convert(&mut self, input: Self::From) -> Result<Self::To, Error>; fn convert(&mut self, input: Self::From) -> Result<Self::To, Error>;
@ -342,3 +342,42 @@ where
(self.convert_cb)(input) (self.convert_cb)(input)
} }
} }
/// A converter that converts from `input` to target bytes
pub trait TargetBytesConverter {
/// The input
type Input;
/// Create target bytes
fn to_target_bytes<'a>(&mut self, input: &'a Self::Input) -> OwnedSlice<'a, u8>;
}
/// Simply gets the target bytes out from a [`HasTargetBytes`] type.
#[derive(Debug)]
pub struct NopTargetBytesConverter<I> {
phantom: PhantomData<I>,
}
impl<I> NopTargetBytesConverter<I> {
/// Create a new [`NopTargetBytesConverter`]
#[must_use]
pub fn new() -> NopTargetBytesConverter<I> {
Self {
phantom: PhantomData,
}
}
}
impl<I> Default for NopTargetBytesConverter<I> {
fn default() -> Self {
Self::new()
}
}
impl<I: HasTargetBytes> TargetBytesConverter for NopTargetBytesConverter<I> {
type Input = I;
fn to_target_bytes<'a>(&mut self, input: &'a Self::Input) -> OwnedSlice<'a, u8> {
input.target_bytes()
}
}

View File

@ -5,9 +5,10 @@ use alloc::{rc::Rc, string::String, vec::Vec};
use core::cell::RefCell; use core::cell::RefCell;
use std::hash::{Hash, Hasher}; use std::hash::{Hash, Hasher};
use libafl_bolts::HasLen; use libafl_bolts::{ownedref::OwnedSlice, HasLen};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use super::TargetBytesConverter;
use crate::{ use crate::{
common::nautilus::grammartec::{ common::nautilus::grammartec::{
newtypes::NodeId, newtypes::NodeId,
@ -137,3 +138,28 @@ impl InputConverter for NautilusToBytesInputConverter<'_> {
Ok(BytesInput::new(bytes)) Ok(BytesInput::new(bytes))
} }
} }
/// A converter to convert a nautilus context to target bytes
#[derive(Debug)]
pub struct NautilusTargetBytesConverter<'a> {
/// The Nautilus Context
ctx: &'a NautilusContext,
}
impl<'a> NautilusTargetBytesConverter<'a> {
/// Create a new [`NautilusTargetBytesConverter`]
#[must_use]
pub fn new(ctx: &'a NautilusContext) -> NautilusTargetBytesConverter<'a> {
NautilusTargetBytesConverter { ctx }
}
}
impl TargetBytesConverter for NautilusTargetBytesConverter<'_> {
type Input = NautilusInput;
fn to_target_bytes<'a>(&mut self, input: &'a Self::Input) -> OwnedSlice<'a, u8> {
let mut bytes = Vec::new();
input.unparse(self.ctx, &mut bytes);
OwnedSlice::from(bytes)
}
}