Make fuzzer hold testcase bytes converter (#3127)

* add

* fixing

* done

* fix

* a

* no std

* mm

* aaa

* fix

* mm

* please stop

* please stop

* please stop

* Just

* aaaaa

* aaa

* plz stop

* aaa

* mmm

* aaa

* AAAAAAAAAAAAAA

* a bit faster CI?

* win

* wtf are these garbages..?

* no cache?

* ???

* always

* ?/

* aa

* aa

* glib

* glib

* pixman

* AAA

* AA

* unicorn

* unicron

* nyx
This commit is contained in:
Dongjia "toka" Zhang 2025-04-24 19:34:24 +02:00 committed by GitHub
parent 50e1239de9
commit 95f7c155bd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
36 changed files with 310 additions and 338 deletions

View File

@ -270,7 +270,7 @@ jobs:
- ./fuzzers/binary_only/fuzzbench_qemu - ./fuzzers/binary_only/fuzzbench_qemu
- ./fuzzers/binary_only/intel_pt_baby_fuzzer - ./fuzzers/binary_only/intel_pt_baby_fuzzer
- ./fuzzers/binary_only/intel_pt_command_executor - ./fuzzers/binary_only/intel_pt_command_executor
- ./fuzzers/binary_only/tinyinst_simple # - ./fuzzers/binary_only/tinyinst_simple
# Forkserver # Forkserver
- ./fuzzers/forkserver/forkserver_simple - ./fuzzers/forkserver/forkserver_simple
@ -326,6 +326,8 @@ jobs:
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: ./.github/workflows/fuzzer-tester-prepare - uses: ./.github/workflows/fuzzer-tester-prepare
with:
fuzzer-name: ${{ matrix.fuzzer }}
- name: Build and run example fuzzers (Linux) - name: Build and run example fuzzers (Linux)
if: runner.os == 'Linux' if: runner.os == 'Linux'
shell: bash shell: bash
@ -366,7 +368,7 @@ jobs:
- name: "Install dependencies" - name: "Install dependencies"
if: runner.os == 'Linux' if: runner.os == 'Linux'
shell: bash shell: bash
run: sudo apt-get update && sudo apt-get install gcc gcc-arm-linux-gnueabihf gcc-aarch64-linux-gnu run: sudo apt-get update && sudo apt-get install gcc gcc-arm-linux-gnueabihf gcc-aarch64-linux-gnu gcc-riscv64-linux-gnu
- name: Build and run example fuzzers (Linux) - name: Build and run example fuzzers (Linux)
if: runner.os == 'Linux' if: runner.os == 'Linux'
shell: bash shell: bash

View File

@ -1,5 +1,9 @@
name: Setup Rust Environment name: Setup Rust Environment
description: Sets up the Rust environment for the CI workflow description: Sets up the Rust environment for the CI workflow
inputs:
fuzzer-name:
description: 'The fuzzer name to run'
required: true
runs: runs:
using: composite using: composite
steps: steps:
@ -8,33 +12,33 @@ runs:
submodules: true submodules: true
fetch-depth: 0 fetch-depth: 0
- uses: ./.github/workflows/ubuntu-prepare - uses: ./.github/workflows/ubuntu-prepare
- uses: Swatinem/rust-cache@v2
with: { shared-key: "${{ runner.os }}-shared-fuzzer-cache" }
- name: Install fuzzers deps
shell: bash
run: sudo apt-get update && sudo apt-get install -y nasm nlohmann-json3-dev gcc-aarch64-linux-gnu g++-aarch64-linux-gnu gcc-mipsel-linux-gnu g++-mipsel-linux-gnu gcc-riscv64-linux-gnu gcc-powerpc-linux-gnu g++-powerpc-linux-gnu libc6-dev-i386-cross libc6-dev libc6-dev-i386 lib32gcc-11-dev lib32stdc++-11-dev libgtk-3-dev pax-utils python3-msgpack python3-jinja2
- name: enable mult-thread for `make` - name: enable mult-thread for `make`
shell: bash shell: bash
run: export MAKEFLAGS="-j$(expr $(nproc) \+ 1)" run: export MAKEFLAGS="-j$(expr $(nproc) \+ 1)"
- name: Add no_std toolchain - name: Add no_std toolchain
if: ${{ inputs.fuzzer-name == './fuzzers/fuzz_anything/baby_no_std' }}
shell: bash shell: bash
run: rustup toolchain install nightly-x86_64-unknown-linux-gnu ; rustup component add rust-src --toolchain nightly-x86_64-unknown-linux-gnu run: rustup toolchain install nightly-x86_64-unknown-linux-gnu ; rustup component add rust-src --toolchain nightly-x86_64-unknown-linux-gnu
- name: Add wasm target - name: Add nyx deps
if: ${{ inputs.fuzzer-name == './fuzzers/full_system/nyx_launcher' || inputs.fuzzer-name == './fuzzers/full_system/nyx_libxml2_standalone' || inputs.fuzzer-name == './fuzzers/full_system/nyx_libxml2_parallel' }}
shell: bash shell: bash
run: rustup target add wasm32-unknown-unknown run: sudo apt update && sudo apt install -y libgtk-3-dev pax-utils python3-msgpack python3-jinja2 libcapstone-dev
- name: install just - name: install just
uses: extractions/setup-just@v2 uses: extractions/setup-just@v2
with: with:
just-version: 1.39.0 just-version: '1.40.0'
- name: Add wasm target
if: ${{ inputs.fuzzer-name == './fuzzers/fuzz_anything/baby_fuzzer_wasm' }}
shell: bash
run: rustup target add wasm32-unknown-unknown
- name: install wasm-pack - name: install wasm-pack
if: ${{ inputs.fuzzer-name == './fuzzers/fuzz_anything/baby_fuzzer_wasm' }}
uses: baptiste0928/cargo-install@v3 uses: baptiste0928/cargo-install@v3
with: with:
crate: wasm-pack crate: wasm-pack
- name: install cxxbridge-cmd
uses: baptiste0928/cargo-install@v3
with:
crate: cxxbridge-cmd
- name: install chrome - name: install chrome
if: ${{ inputs.fuzzer-name == './fuzzers/fuzz_anything/baby_fuzzer_wasm' }}
uses: browser-actions/setup-chrome@v1 uses: browser-actions/setup-chrome@v1
with: with:
chrome-version: stable chrome-version: stable

View File

@ -47,7 +47,7 @@ runs:
- name: install just - name: install just
uses: extractions/setup-just@v2 uses: extractions/setup-just@v2
with: with:
just-version: 1.39.0 just-version: '1.40.0'
- name: Install cargo-binstall - name: Install cargo-binstall
shell: bash shell: bash
run: | run: |

View File

@ -16,7 +16,7 @@ runs:
- name: install just - name: install just
uses: extractions/setup-just@v2 uses: extractions/setup-just@v2
with: with:
just-version: 1.39.0 just-version: '1.40.0'
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
submodules: true submodules: true

View File

@ -7,13 +7,12 @@ runs:
shell: bash shell: bash
run: | run: |
sudo apt-get update sudo apt-get update
sudo apt-get install -y curl lsb-release wget software-properties-common gnupg shellcheck pax-utils \ sudo apt-get install -y curl lsb-release wget software-properties-common gnupg shellcheck pax-utils libsqlite3-dev libpixman-1-dev libc6-dev gcc g++ build-essential libglib2.0-dev
nasm libsqlite3-dev libc6-dev libgtk-3-dev gcc g++ libslirp-dev libz3-dev build-essential \
- uses: dtolnay/rust-toolchain@stable - uses: dtolnay/rust-toolchain@stable
- name: install just - name: install just
uses: extractions/setup-just@v2 uses: extractions/setup-just@v2
with: with:
just-version: 1.39.0 just-version: '1.40.0'
- name: Add stable clippy - name: Add stable clippy
shell: bash shell: bash
run: rustup toolchain install stable --component clippy --allow-downgrade run: rustup toolchain install stable --component clippy --allow-downgrade

View File

@ -18,4 +18,4 @@ runs:
- name: install just - name: install just
uses: extractions/setup-just@v2 uses: extractions/setup-just@v2
with: with:
just-version: 1.39.0 just-version: '1.40.0'

3
.gitignore vendored
View File

@ -5,9 +5,6 @@ vendor
# cargo lockfiles except from binaries # cargo lockfiles except from binaries
**/Cargo.lock **/Cargo.lock
# !fuzzers/**/Cargo.lock
# !utils/**/Cargo.lock
.DS_Store .DS_Store
.env .env

View File

@ -14,15 +14,15 @@ use libafl::{
feedbacks::{CrashFeedback, MaxMapFeedback}, feedbacks::{CrashFeedback, MaxMapFeedback},
fuzzer::{Fuzzer, StdFuzzer}, fuzzer::{Fuzzer, StdFuzzer},
generators::RandPrintablesGenerator, generators::RandPrintablesGenerator,
inputs::HasTargetBytes, inputs::{HasTargetBytes, NopBytesConverter},
mutators::{havoc_mutations::havoc_mutations, scheduled::HavocScheduledMutator}, mutators::{havoc_mutations::havoc_mutations, scheduled::HavocScheduledMutator},
observers::StdMapObserver, observers::StdMapObserver,
schedulers::QueueScheduler, schedulers::QueueScheduler,
stages::{mutational::StdMutationalStage, AflStatsStage, CalibrationStage}, stages::{mutational::StdMutationalStage, AflStatsStage, CalibrationStage},
state::{HasCorpus, HasExecutions, StdState}, state::{HasCorpus, HasExecutions, StdState},
BloomInputFilter, StdFuzzerBuilder,
}; };
use libafl_bolts::{current_nanos, nonzero, rands::StdRand, tuples::tuple_list, AsSlice}; use libafl_bolts::{current_nanos, nonzero, rands::StdRand, tuples::tuple_list, AsSlice};
/// Coverage map with explicit assignments due to the lack of instrumentation /// Coverage map with explicit assignments due to the lack of instrumentation
static mut SIGNALS: [u8; 16] = [0; 16]; static mut SIGNALS: [u8; 16] = [0; 16];
static mut SIGNALS_PTR: *mut u8 = &raw mut SIGNALS as _; static mut SIGNALS_PTR: *mut u8 = &raw mut SIGNALS as _;
@ -138,8 +138,13 @@ pub fn main() {
#[cfg(not(feature = "bloom_input_filter"))] #[cfg(not(feature = "bloom_input_filter"))]
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
#[cfg(feature = "bloom_input_filter")] #[cfg(feature = "bloom_input_filter")]
let mut fuzzer = let filter = BloomInputFilter::new(10_000_000, 0.001);
StdFuzzer::with_bloom_input_filter(scheduler, feedback, objective, 10_000_000, 0.001); #[cfg(feature = "bloom_input_filter")]
let mut fuzzer = StdFuzzerBuilder::new()
.input_filter(filter)
.bytes_converter(NopBytesConverter::default())
.build(scheduler, feedback, objective)
.unwrap();
// Create the executor for an in-process function with just one observer // Create the executor for an in-process function with just one observer
let executor = CustomExecutor::new(&state); let executor = CustomExecutor::new(&state);

View File

@ -22,7 +22,7 @@ use libafl::{
CaptureTimeoutFeedback, ConstFeedback, CrashFeedback, MaxMapFeedback, TimeFeedback, CaptureTimeoutFeedback, ConstFeedback, CrashFeedback, MaxMapFeedback, TimeFeedback,
}, },
fuzzer::StdFuzzer, fuzzer::StdFuzzer,
inputs::{BytesInput, NopTargetBytesConverter}, inputs::BytesInput,
mutators::{havoc_mutations, tokens_mutations, AFLppRedQueen, HavocScheduledMutator, Tokens}, mutators::{havoc_mutations, tokens_mutations, AFLppRedQueen, HavocScheduledMutator, Tokens},
observers::{CanTrack, HitcountsMapObserver, StdMapObserver, TimeObserver}, observers::{CanTrack, HitcountsMapObserver, StdMapObserver, TimeObserver},
schedulers::{ schedulers::{
@ -567,7 +567,7 @@ fn base_forkserver_builder<'a>(
opt: &'a Opt, opt: &'a Opt,
shmem_provider: &'a mut UnixShMemProvider, shmem_provider: &'a mut UnixShMemProvider,
fuzzer_dir: &Path, fuzzer_dir: &Path,
) -> ForkserverExecutorBuilder<'a, NopTargetBytesConverter<BytesInput>, UnixShMemProvider> { ) -> ForkserverExecutorBuilder<'a, UnixShMemProvider> {
let mut executor = ForkserverExecutor::builder() let mut executor = ForkserverExecutor::builder()
.program(opt.executable.clone()) .program(opt.executable.clone())
.coverage_map_size(opt.map_size.unwrap_or(AFL_DEFAULT_MAP_SIZE)) .coverage_map_size(opt.map_size.unwrap_or(AFL_DEFAULT_MAP_SIZE))

View File

@ -10,9 +10,9 @@ use libafl::{
feedbacks::{ feedbacks::{
CrashFeedback, MaxMapFeedback, NautilusChunksMetadata, NautilusFeedback, TimeFeedback, CrashFeedback, MaxMapFeedback, NautilusChunksMetadata, NautilusFeedback, TimeFeedback,
}, },
fuzzer::{Fuzzer, StdFuzzer}, fuzzer::Fuzzer,
generators::{NautilusContext, NautilusGenerator}, generators::{NautilusContext, NautilusGenerator},
inputs::{NautilusInput, NautilusTargetBytesConverter}, inputs::{NautilusBytesConverter, NautilusInput},
monitors::SimpleMonitor, monitors::SimpleMonitor,
mutators::{ mutators::{
HavocScheduledMutator, NautilusRandomMutator, NautilusRecursionMutator, HavocScheduledMutator, NautilusRandomMutator, NautilusRecursionMutator,
@ -22,7 +22,7 @@ use libafl::{
schedulers::{IndexesLenTimeMinimizerScheduler, QueueScheduler}, schedulers::{IndexesLenTimeMinimizerScheduler, QueueScheduler},
stages::mutational::StdMutationalStage, stages::mutational::StdMutationalStage,
state::StdState, state::StdState,
HasMetadata, BloomInputFilter, HasMetadata, StdFuzzerBuilder,
}; };
use libafl_bolts::{ use libafl_bolts::{
current_nanos, current_nanos,
@ -166,7 +166,12 @@ pub fn main() {
let scheduler = IndexesLenTimeMinimizerScheduler::new(&edges_observer, QueueScheduler::new()); let scheduler = IndexesLenTimeMinimizerScheduler::new(&edges_observer, QueueScheduler::new());
// A fuzzer with feedbacks and a corpus scheduler // A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); let converter = NautilusBytesConverter::new(&context);
let mut fuzzer = StdFuzzerBuilder::new()
.input_filter(BloomInputFilter::default())
.bytes_converter(converter)
.build(scheduler, feedback, objective)
.unwrap();
// If we should debug the child // If we should debug the child
let debug_child = opt.debug_child; let debug_child = opt.debug_child;
@ -186,7 +191,6 @@ pub fn main() {
.coverage_map_size(MAP_SIZE) .coverage_map_size(MAP_SIZE)
.timeout(Duration::from_millis(opt.timeout)) .timeout(Duration::from_millis(opt.timeout))
.kill_signal(opt.signal) .kill_signal(opt.signal)
.target_bytes_converter(NautilusTargetBytesConverter::new(&context))
.build(tuple_list!(time_observer, edges_observer)) .build(tuple_list!(time_observer, edges_observer))
.unwrap(); .unwrap();

View File

@ -15,7 +15,7 @@ use libafl::{
feedbacks::{CrashFeedback, MaxMapFeedback, NautilusChunksMetadata, NautilusFeedback}, feedbacks::{CrashFeedback, MaxMapFeedback, NautilusChunksMetadata, NautilusFeedback},
fuzzer::{Fuzzer, StdFuzzer}, fuzzer::{Fuzzer, StdFuzzer},
generators::{NautilusContext, NautilusGenerator}, generators::{NautilusContext, NautilusGenerator},
inputs::{NautilusInput, NautilusToBytesInputConverter}, inputs::{NautilusBytesConverter, NautilusInput},
monitors::SimpleMonitor, monitors::SimpleMonitor,
mutators::{ mutators::{
HavocScheduledMutator, NautilusRandomMutator, NautilusRecursionMutator, HavocScheduledMutator, NautilusRandomMutator, NautilusRecursionMutator,
@ -126,7 +126,7 @@ pub extern "C" fn libafl_main() {
.build_on_port( .build_on_port(
shmem_provider.clone(), shmem_provider.clone(),
port, port,
Some(NautilusToBytesInputConverter::new(&context)), Some(NautilusBytesConverter::new(&context)),
none_input_converter!(), none_input_converter!(),
) )
.unwrap() .unwrap()

View File

@ -366,7 +366,7 @@ where
"Not spawning broker (spawn_broker is false). Waiting for fuzzer children to exit..." "Not spawning broker (spawn_broker is false). Waiting for fuzzer children to exit..."
); );
unsafe { unsafe {
libc::waitpid(*handle, &mut status, 0); libc::waitpid(*handle, &raw mut status, 0);
if status != 0 { if status != 0 {
log::info!("Client with pid {handle} exited with status {status}"); log::info!("Client with pid {handle} exited with status {status}");
} }

View File

@ -18,7 +18,7 @@ use crate::{
Error, Error,
events::{Event, EventFirer, EventWithStats}, events::{Event, EventFirer, EventWithStats},
fuzzer::EvaluatorObservers, fuzzer::EvaluatorObservers,
inputs::{Input, InputConverter, NopInput, NopInputConverter}, inputs::{Input, InputConverter, NopInput},
state::{HasCurrentTestcase, HasSolutions, NopState}, state::{HasCurrentTestcase, HasSolutions, NopState},
}; };
@ -88,16 +88,7 @@ pub struct LlmpEventConverter<I, IC, ICB, S, SHM, SP> {
phantom: PhantomData<(I, S)>, phantom: PhantomData<(I, S)>,
} }
impl impl LlmpEventConverter<NopInput, (), (), NopState<NopInput>, NopShMem, NopShMemProvider> {
LlmpEventConverter<
NopInput,
NopInputConverter<NopInput>,
NopInputConverter<NopInput>,
NopState<NopInput>,
NopShMem,
NopShMemProvider,
>
{
/// Create a builder for [`LlmpEventConverter`] /// Create a builder for [`LlmpEventConverter`]
#[must_use] #[must_use]
pub fn builder() -> LlmpEventConverterBuilder { pub fn builder() -> LlmpEventConverterBuilder {

View File

@ -21,7 +21,6 @@ use libafl_bolts::{
AsSlice, AsSliceMut, InputLocation, TargetArgs, Truncate, AsSlice, AsSliceMut, InputLocation, TargetArgs, Truncate,
fs::{InputFile, get_unique_std_input_file}, fs::{InputFile, get_unique_std_input_file},
os::{dup2, pipes::Pipe}, os::{dup2, pipes::Pipe},
ownedref::OwnedSlice,
shmem::{ShMem, ShMemProvider, UnixShMem, UnixShMemProvider}, shmem::{ShMem, ShMemProvider, UnixShMem, UnixShMemProvider},
tuples::{Handle, Handled, MatchNameRef, Prepend, RefIndexable}, tuples::{Handle, Handled, MatchNameRef, Prepend, RefIndexable},
}; };
@ -42,9 +41,9 @@ use crate::observers::{
AsanBacktraceObserver, get_asan_runtime_flags, get_asan_runtime_flags_with_log_path, AsanBacktraceObserver, get_asan_runtime_flags, get_asan_runtime_flags_with_log_path,
}; };
use crate::{ use crate::{
Error, Error, HasBytesConverter,
executors::{Executor, ExitKind, HasObservers}, executors::{Executor, ExitKind, HasObservers},
inputs::{BytesInput, Input, NopTargetBytesConverter, TargetBytesConverter}, inputs::{Input, InputToBytes},
mutators::Tokens, mutators::Tokens,
observers::{MapObserver, Observer, ObserversTuple}, observers::{MapObserver, Observer, ObserversTuple},
state::HasExecutions, state::HasExecutions,
@ -223,7 +222,7 @@ impl ConfigTarget for Command {
#[cfg(target_os = "openbsd")] #[cfg(target_os = "openbsd")]
let ret = unsafe { libc::setrlimit(libc::RLIMIT_RSS, &r) }; let ret = unsafe { libc::setrlimit(libc::RLIMIT_RSS, &r) };
#[cfg(not(target_os = "openbsd"))] #[cfg(not(target_os = "openbsd"))]
let ret = unsafe { libc::setrlimit(libc::RLIMIT_AS, &r) }; let ret = unsafe { libc::setrlimit(libc::RLIMIT_AS, &raw const r) };
if ret < 0 { if ret < 0 {
return Err(io::Error::last_os_error()); return Err(io::Error::last_os_error());
} }
@ -240,7 +239,7 @@ impl ConfigTarget for Command {
rlim_cur: if enable { RLIM_INFINITY } else { 0 }, rlim_cur: if enable { RLIM_INFINITY } else { 0 },
rlim_max: if enable { RLIM_INFINITY } else { 0 }, rlim_max: if enable { RLIM_INFINITY } else { 0 },
}; };
let ret = unsafe { libc::setrlimit(libc::RLIMIT_CORE, &r0) }; let ret = unsafe { libc::setrlimit(libc::RLIMIT_CORE, &raw const r0) };
if ret < 0 { if ret < 0 {
return Err(io::Error::last_os_error()); return Err(io::Error::last_os_error());
} }
@ -576,11 +575,10 @@ impl Forkserver {
/// ///
/// Shared memory feature is also available, but you have to set things up in your code. /// Shared memory feature is also available, but you have to set things up in your code.
/// Please refer to AFL++'s docs. <https://github.com/AFLplusplus/AFLplusplus/blob/stable/instrumentation/README.persistent_mode.md> /// Please refer to AFL++'s docs. <https://github.com/AFLplusplus/AFLplusplus/blob/stable/instrumentation/README.persistent_mode.md>
pub struct ForkserverExecutor<I, OT, S, SHM, TC> { pub struct ForkserverExecutor<I, OT, S, SHM> {
target: OsString, target: OsString,
args: Vec<OsString>, args: Vec<OsString>,
input_file: InputFile, input_file: InputFile,
target_bytes_converter: TC,
uses_shmem_testcase: bool, uses_shmem_testcase: bool,
forkserver: Forkserver, forkserver: Forkserver,
observers: OT, observers: OT,
@ -595,9 +593,8 @@ pub struct ForkserverExecutor<I, OT, S, SHM, TC> {
crash_exitcode: Option<i8>, crash_exitcode: Option<i8>,
} }
impl<I, OT, S, SHM, TC> Debug for ForkserverExecutor<I, OT, S, SHM, TC> impl<I, OT, S, SHM> Debug for ForkserverExecutor<I, OT, S, SHM>
where where
TC: Debug,
OT: Debug, OT: Debug,
SHM: Debug, SHM: Debug,
{ {
@ -606,7 +603,6 @@ where
.field("target", &self.target) .field("target", &self.target)
.field("args", &self.args) .field("args", &self.args)
.field("input_file", &self.input_file) .field("input_file", &self.input_file)
.field("target_bytes_converter", &self.target_bytes_converter)
.field("uses_shmem_testcase", &self.uses_shmem_testcase) .field("uses_shmem_testcase", &self.uses_shmem_testcase)
.field("forkserver", &self.forkserver) .field("forkserver", &self.forkserver)
.field("observers", &self.observers) .field("observers", &self.observers)
@ -615,20 +611,17 @@ where
} }
} }
impl ForkserverExecutor<(), (), (), UnixShMem, ()> { impl ForkserverExecutor<(), (), UnixShMem, ()> {
/// Builder for `ForkserverExecutor` /// Builder for `ForkserverExecutor`
#[must_use] #[must_use]
pub fn builder() pub fn builder() -> ForkserverExecutorBuilder<'static, UnixShMemProvider> {
-> ForkserverExecutorBuilder<'static, NopTargetBytesConverter<BytesInput>, UnixShMemProvider>
{
ForkserverExecutorBuilder::new() ForkserverExecutorBuilder::new()
} }
} }
impl<I, OT, S, SHM, TC> ForkserverExecutor<I, OT, S, SHM, TC> impl<I, OT, S, SHM> ForkserverExecutor<I, OT, S, SHM>
where where
OT: ObserversTuple<I, S>, OT: ObserversTuple<I, S>,
TC: TargetBytesConverter<I>,
SHM: ShMem, SHM: ShMem,
{ {
/// The `target` binary that's going to run. /// The `target` binary that's going to run.
@ -663,7 +656,7 @@ where
/// Execute input and increase the execution counter. /// Execute input and increase the execution counter.
#[inline] #[inline]
fn execute_input(&mut self, state: &mut S, input: &I) -> Result<ExitKind, Error> fn execute_input(&mut self, state: &mut S, input: &[u8]) -> Result<ExitKind, Error>
where where
S: HasExecutions, S: HasExecutions,
{ {
@ -672,27 +665,7 @@ where
self.execute_input_uncounted(input) self.execute_input_uncounted(input)
} }
/// Execute input, but side-step the execution counter. fn map_input_to_shmem(&mut self, input: &[u8], input_size: usize) -> Result<(), Error> {
#[inline]
fn execute_input_uncounted(&mut self, input: &I) -> Result<ExitKind, Error> {
let mut exit_kind = ExitKind::Ok;
let last_run_timed_out = self.forkserver.last_run_timed_out_raw();
let mut input_bytes = self.target_bytes_converter.to_target_bytes(input);
let mut input_size = input_bytes.as_slice().len();
if input_size > self.max_input_size {
// Truncate like AFL++ does
input_size = self.max_input_size;
} else if input_size < self.min_input_size {
// Extend like AFL++ does
input_size = self.min_input_size;
let mut input_bytes_copy = Vec::with_capacity(input_size);
input_bytes_copy
.as_slice_mut()
.copy_from_slice(input_bytes.as_slice());
input_bytes = OwnedSlice::from(input_bytes_copy);
}
let input_size_in_bytes = input_size.to_ne_bytes(); let input_size_in_bytes = input_size.to_ne_bytes();
if self.uses_shmem_testcase { if self.uses_shmem_testcase {
debug_assert!( debug_assert!(
@ -706,10 +679,35 @@ where
map.as_slice_mut()[..SHMEM_FUZZ_HDR_SIZE] map.as_slice_mut()[..SHMEM_FUZZ_HDR_SIZE]
.copy_from_slice(&input_size_in_bytes[..SHMEM_FUZZ_HDR_SIZE]); .copy_from_slice(&input_size_in_bytes[..SHMEM_FUZZ_HDR_SIZE]);
map.as_slice_mut()[SHMEM_FUZZ_HDR_SIZE..(SHMEM_FUZZ_HDR_SIZE + input_size)] map.as_slice_mut()[SHMEM_FUZZ_HDR_SIZE..(SHMEM_FUZZ_HDR_SIZE + input_size)]
.copy_from_slice(&input_bytes.as_slice()[..input_size]); .copy_from_slice(&input[..input_size]);
} else { } else {
self.input_file self.input_file.write_buf(&input[..input_size])?;
.write_buf(&input_bytes.as_slice()[..input_size])?; }
Ok(())
}
/// Execute input, but side-step the execution counter.
#[inline]
fn execute_input_uncounted(&mut self, input: &[u8]) -> Result<ExitKind, Error> {
let mut exit_kind = ExitKind::Ok;
let last_run_timed_out = self.forkserver.last_run_timed_out_raw();
let mut input_size = input.len();
if input_size > self.max_input_size {
// Truncate like AFL++ does
input_size = self.max_input_size;
self.map_input_to_shmem(input, input_size)?;
} else if input_size < self.min_input_size {
// Extend like AFL++ does
input_size = self.min_input_size;
let mut input_bytes_copy = Vec::with_capacity(input_size);
input_bytes_copy
.as_slice_mut()
.copy_from_slice(input.as_slice());
self.map_input_to_shmem(&input_bytes_copy, input_size)?;
} else {
self.map_input_to_shmem(input, input_size)?;
} }
self.forkserver.set_last_run_timed_out(false); self.forkserver.set_last_run_timed_out(false);
@ -771,7 +769,7 @@ where
/// The builder for `ForkserverExecutor` /// The builder for `ForkserverExecutor`
#[derive(Debug)] #[derive(Debug)]
#[expect(clippy::struct_excessive_bools)] #[expect(clippy::struct_excessive_bools)]
pub struct ForkserverExecutorBuilder<'a, TC, SP> { pub struct ForkserverExecutorBuilder<'a, SP> {
program: Option<OsString>, program: Option<OsString>,
arguments: Vec<OsString>, arguments: Vec<OsString>,
envs: Vec<(OsString, OsString)>, envs: Vec<(OsString, OsString)>,
@ -790,10 +788,9 @@ pub struct ForkserverExecutorBuilder<'a, TC, SP> {
#[cfg(feature = "regex")] #[cfg(feature = "regex")]
asan_obs: Option<Handle<AsanBacktraceObserver>>, asan_obs: Option<Handle<AsanBacktraceObserver>>,
crash_exitcode: Option<i8>, crash_exitcode: Option<i8>,
target_bytes_converter: TC,
} }
impl<TC, SP> TargetArgs for ForkserverExecutorBuilder<'_, TC, SP> { impl<SP> TargetArgs for ForkserverExecutorBuilder<'_, SP> {
fn arguments_ref(&self) -> &Vec<OsString> { fn arguments_ref(&self) -> &Vec<OsString> {
&self.arguments &self.arguments
} }
@ -830,7 +827,7 @@ impl<TC, SP> TargetArgs for ForkserverExecutorBuilder<'_, TC, SP> {
} }
} }
impl<'a, TC, SHM, SP> ForkserverExecutorBuilder<'a, TC, SP> impl<'a, SHM, SP> ForkserverExecutorBuilder<'a, SP>
where where
SHM: ShMem, SHM: ShMem,
SP: ShMemProvider<ShMem = SHM>, SP: ShMemProvider<ShMem = SHM>,
@ -844,10 +841,9 @@ where
pub fn build<I, OT, S>( pub fn build<I, OT, S>(
mut self, mut self,
observers: OT, observers: OT,
) -> Result<ForkserverExecutor<I, OT, S, SHM, TC>, Error> ) -> Result<ForkserverExecutor<I, OT, S, SHM>, Error>
where where
OT: ObserversTuple<I, S>, OT: ObserversTuple<I, S>,
TC: TargetBytesConverter<I>,
{ {
let (forkserver, input_file, map) = self.build_helper()?; let (forkserver, input_file, map) = self.build_helper()?;
@ -898,17 +894,16 @@ where
.clone() .clone()
.unwrap_or(AsanBacktraceObserver::default().handle()), .unwrap_or(AsanBacktraceObserver::default().handle()),
crash_exitcode: self.crash_exitcode, crash_exitcode: self.crash_exitcode,
target_bytes_converter: self.target_bytes_converter,
}) })
} }
/// Builds `ForkserverExecutor` downsizing the coverage map to fit exaclty the AFL++ map size. /// Builds `ForkserverExecutor` downsizing the coverage map to fit exaclty the AFL++ map size.
#[expect(clippy::pedantic, clippy::type_complexity)] #[expect(clippy::pedantic)]
pub fn build_dynamic_map<A, MO, OT, I, S>( pub fn build_dynamic_map<A, MO, OT, I, S>(
mut self, mut self,
mut map_observer: A, mut map_observer: A,
other_observers: OT, other_observers: OT,
) -> Result<ForkserverExecutor<I, (A, OT), S, SHM, TC>, Error> ) -> Result<ForkserverExecutor<I, (A, OT), S, SHM>, Error>
where where
A: Observer<I, S> + AsMut<MO>, A: Observer<I, S> + AsMut<MO>,
I: Input, I: Input,
@ -962,7 +957,6 @@ where
.clone() .clone()
.unwrap_or(AsanBacktraceObserver::default().handle()), .unwrap_or(AsanBacktraceObserver::default().handle()),
crash_exitcode: self.crash_exitcode, crash_exitcode: self.crash_exitcode,
target_bytes_converter: self.target_bytes_converter,
}) })
} }
@ -1319,18 +1313,20 @@ where
/// Determine if the asan observer is present (always false if feature "regex" is disabled) /// Determine if the asan observer is present (always false if feature "regex" is disabled)
#[cfg(feature = "regex")] #[cfg(feature = "regex")]
#[must_use]
pub fn has_asan_obs(&self) -> bool { pub fn has_asan_obs(&self) -> bool {
self.asan_obs.is_some() self.asan_obs.is_some()
} }
/// Determine if the asan observer is present (always false if feature "regex" is disabled) /// Determine if the asan observer is present (always false if feature "regex" is disabled)
#[cfg(not(feature = "regex"))] #[cfg(not(feature = "regex"))]
#[must_use]
pub fn has_asan_obs(&self) -> bool { pub fn has_asan_obs(&self) -> bool {
false false
} }
} }
impl<'a> ForkserverExecutorBuilder<'a, NopTargetBytesConverter<BytesInput>, UnixShMemProvider> { impl<'a> ForkserverExecutorBuilder<'a, UnixShMemProvider> {
/// Creates a new `AFL`-style [`ForkserverExecutor`] with the given target, arguments and observers. /// Creates a new `AFL`-style [`ForkserverExecutor`] with the given target, arguments and observers.
/// This is the builder for `ForkserverExecutor` /// This is the builder for `ForkserverExecutor`
/// This Forkserver will attempt to provide inputs over shared mem when `shmem_provider` is given. /// This Forkserver will attempt to provide inputs over shared mem when `shmem_provider` is given.
@ -1338,8 +1334,7 @@ impl<'a> ForkserverExecutorBuilder<'a, NopTargetBytesConverter<BytesInput>, Unix
/// in case no input file is specified. /// in case no input file is specified.
/// If `debug_child` is set, the child will print to `stdout`/`stderr`. /// If `debug_child` is set, the child will print to `stdout`/`stderr`.
#[must_use] #[must_use]
pub fn new() pub fn new() -> ForkserverExecutorBuilder<'a, UnixShMemProvider> {
-> ForkserverExecutorBuilder<'a, NopTargetBytesConverter<BytesInput>, UnixShMemProvider> {
ForkserverExecutorBuilder { ForkserverExecutorBuilder {
program: None, program: None,
arguments: vec![], arguments: vec![],
@ -1359,17 +1354,16 @@ impl<'a> ForkserverExecutorBuilder<'a, NopTargetBytesConverter<BytesInput>, Unix
#[cfg(feature = "regex")] #[cfg(feature = "regex")]
asan_obs: None, asan_obs: None,
crash_exitcode: None, crash_exitcode: None,
target_bytes_converter: NopTargetBytesConverter::new(),
} }
} }
} }
impl<'a, TC> ForkserverExecutorBuilder<'a, TC, UnixShMemProvider> { impl<'a> ForkserverExecutorBuilder<'a, UnixShMemProvider> {
/// Shmem provider for forkserver's shared memory testcase feature. /// Shmem provider for forkserver's shared memory testcase feature.
pub fn shmem_provider<SP>( pub fn shmem_provider<SP>(
self, self,
shmem_provider: &'a mut SP, shmem_provider: &'a mut SP,
) -> ForkserverExecutorBuilder<'a, TC, SP> { ) -> ForkserverExecutorBuilder<'a, SP> {
ForkserverExecutorBuilder { ForkserverExecutorBuilder {
// Set the new provider // Set the new provider
shmem_provider: Some(shmem_provider), shmem_provider: Some(shmem_provider),
@ -1391,71 +1385,39 @@ impl<'a, TC> ForkserverExecutorBuilder<'a, TC, UnixShMemProvider> {
#[cfg(feature = "regex")] #[cfg(feature = "regex")]
asan_obs: self.asan_obs, asan_obs: self.asan_obs,
crash_exitcode: self.crash_exitcode, crash_exitcode: self.crash_exitcode,
target_bytes_converter: self.target_bytes_converter,
} }
} }
} }
impl<'a, TC, SP> ForkserverExecutorBuilder<'a, TC, SP> { impl Default for ForkserverExecutorBuilder<'_, UnixShMemProvider> {
/// Shmem provider for forkserver's shared memory testcase feature.
pub fn target_bytes_converter<I, TC2: TargetBytesConverter<I>>(
self,
target_bytes_converter: TC2,
) -> ForkserverExecutorBuilder<'a, TC2, SP> {
ForkserverExecutorBuilder {
// Set the new provider
shmem_provider: self.shmem_provider,
// Copy all other values from the old Builder
program: self.program,
arguments: self.arguments,
envs: self.envs,
debug_child: self.debug_child,
uses_shmem_testcase: self.uses_shmem_testcase,
is_persistent: self.is_persistent,
is_deferred_frksrv: self.is_deferred_frksrv,
autotokens: self.autotokens,
input_location: InputLocation::StdIn,
map_size: self.map_size,
max_input_size: self.max_input_size,
min_input_size: self.min_input_size,
kill_signal: self.kill_signal,
timeout: self.timeout,
#[cfg(feature = "regex")]
asan_obs: self.asan_obs,
crash_exitcode: self.crash_exitcode,
target_bytes_converter,
}
}
}
impl Default
for ForkserverExecutorBuilder<'_, NopTargetBytesConverter<BytesInput>, UnixShMemProvider>
{
fn default() -> Self { fn default() -> Self {
Self::new() Self::new()
} }
} }
impl<EM, I, OT, S, SHM, TC, Z> Executor<EM, I, S, Z> for ForkserverExecutor<I, OT, S, SHM, TC> impl<EM, I, OT, S, SHM, Z> Executor<EM, I, S, Z> for ForkserverExecutor<I, OT, S, SHM>
where where
OT: ObserversTuple<I, S>, OT: ObserversTuple<I, S>,
S: HasExecutions, S: HasExecutions,
TC: TargetBytesConverter<I>,
SHM: ShMem, SHM: ShMem,
Z: HasBytesConverter,
Z::Converter: InputToBytes<I>,
{ {
#[inline] #[inline]
fn run_target( fn run_target(
&mut self, &mut self,
_fuzzer: &mut Z, fuzzer: &mut Z,
state: &mut S, state: &mut S,
_mgr: &mut EM, _mgr: &mut EM,
input: &I, input: &I,
) -> Result<ExitKind, Error> { ) -> Result<ExitKind, Error> {
self.execute_input(state, input) let converter = fuzzer.converter_mut();
let bytes = converter.to_bytes(input);
self.execute_input(state, bytes.as_slice())
} }
} }
impl<I, OT, S, SHM, TC> HasTimeout for ForkserverExecutor<I, OT, S, SHM, TC> { impl<I, OT, S, SHM> HasTimeout for ForkserverExecutor<I, OT, S, SHM> {
#[inline] #[inline]
fn timeout(&self) -> Duration { fn timeout(&self) -> Duration {
self.timeout.into() self.timeout.into()
@ -1467,7 +1429,7 @@ impl<I, OT, S, SHM, TC> HasTimeout for ForkserverExecutor<I, OT, S, SHM, TC> {
} }
} }
impl<I, OT, S, SHM, TC> HasObservers for ForkserverExecutor<I, OT, S, SHM, TC> impl<I, OT, S, SHM> HasObservers for ForkserverExecutor<I, OT, S, SHM>
where where
OT: ObserversTuple<I, S>, OT: ObserversTuple<I, S>,
{ {

View File

@ -190,7 +190,7 @@ impl TimerStruct {
let mut critical = CRITICAL_SECTION::default(); let mut critical = CRITICAL_SECTION::default();
unsafe { unsafe {
InitializeCriticalSection(&mut critical); InitializeCriticalSection(&raw mut critical);
} }
Self { Self {
milli_sec, milli_sec,
@ -258,7 +258,7 @@ impl TimerStruct {
// # Safety // # Safety
// Safe because the variables are all alive at this time and don't contain pointers. // Safe because the variables are all alive at this time and don't contain pointers.
unsafe { unsafe {
setitimer(ITIMER_REAL, &mut self.itimerval, core::ptr::null_mut()); setitimer(ITIMER_REAL, &raw mut self.itimerval, core::ptr::null_mut());
} }
} }
@ -289,7 +289,7 @@ impl TimerStruct {
LeaveCriticalSection(self.critical_mut()); LeaveCriticalSection(self.critical_mut());
compiler_fence(Ordering::SeqCst); compiler_fence(Ordering::SeqCst);
SetThreadpoolTimer(*self.ptp_timer(), Some(&ft), 0, None); SetThreadpoolTimer(*self.ptp_timer(), Some(&raw const ft), 0, None);
} }
} }
@ -317,7 +317,7 @@ impl TimerStruct {
// No user-provided values. // No user-provided values.
unsafe { unsafe {
let mut itimerval_zero: Itimerval = core::mem::zeroed(); let mut itimerval_zero: Itimerval = core::mem::zeroed();
setitimer(ITIMER_REAL, &mut itimerval_zero, core::ptr::null_mut()); setitimer(ITIMER_REAL, &raw mut itimerval_zero, core::ptr::null_mut());
} }
} }

View File

@ -199,7 +199,7 @@ where
This number 0x20000 could vary depending on the compilers optimization for future compression library changes. This number 0x20000 could vary depending on the compilers optimization for future compression library changes.
*/ */
let mut stack_reserved = 0x20000; let mut stack_reserved = 0x20000;
SetThreadStackGuarantee(&mut stack_reserved)?; SetThreadStackGuarantee(&raw mut stack_reserved)?;
} }
#[cfg(all(feature = "std", windows))] #[cfg(all(feature = "std", windows))]

View File

@ -139,7 +139,7 @@ where
} }
#[cfg(not(target_os = "linux"))] #[cfg(not(target_os = "linux"))]
{ {
setitimer(ITIMER_REAL, &mut self.itimerval, null_mut()); setitimer(ITIMER_REAL, &raw mut self.itimerval, null_mut());
} }
// log::trace!("{v:#?} {}", nix::errno::errno()); // log::trace!("{v:#?} {}", nix::errno::errno());

View File

@ -21,7 +21,7 @@ use crate::{
}, },
executors::{Executor, ExitKind, HasObservers}, executors::{Executor, ExitKind, HasObservers},
feedbacks::Feedback, feedbacks::Feedback,
inputs::Input, inputs::{Input, NopBytesConverter},
mark_feature_time, mark_feature_time,
observers::ObserversTuple, observers::ObserversTuple,
schedulers::Scheduler, schedulers::Scheduler,
@ -78,6 +78,16 @@ pub trait HasObjective {
fn set_share_objectives(&mut self, share_objectives: bool); fn set_share_objectives(&mut self, share_objectives: bool);
} }
/// Can convert input to another type
pub trait HasBytesConverter {
/// The converter itself
type Converter;
/// the input converter
fn converter(&self) -> &Self::Converter;
/// the input converter(mut)
fn converter_mut(&mut self) -> &mut Self::Converter;
}
/// Evaluates if an input is interesting using the feedback /// Evaluates if an input is interesting using the feedback
pub trait ExecutionProcessor<EM, I, OT, S> { pub trait ExecutionProcessor<EM, I, OT, S> {
/// Check the outcome of the execution, find if it is worth for corpus or objectives /// Check the outcome of the execution, find if it is worth for corpus or objectives
@ -294,16 +304,17 @@ impl ExecuteInputResult {
/// Your default fuzzer instance, for everyday use. /// Your default fuzzer instance, for everyday use.
#[derive(Debug)] #[derive(Debug)]
pub struct StdFuzzer<CS, F, IF, OF> { pub struct StdFuzzer<CS, F, IC, IF, OF> {
scheduler: CS, scheduler: CS,
feedback: F, feedback: F,
objective: OF, objective: OF,
bytes_converter: IC,
input_filter: IF, input_filter: IF,
// Handles whether to share objective testcases among nodes // Handles whether to share objective testcases among nodes
share_objectives: bool, share_objectives: bool,
} }
impl<CS, F, I, IF, OF, S> HasScheduler<I, S> for StdFuzzer<CS, F, IF, OF> impl<CS, F, I, IC, IF, OF, S> HasScheduler<I, S> for StdFuzzer<CS, F, IC, IF, OF>
where where
CS: Scheduler<I, S>, CS: Scheduler<I, S>,
{ {
@ -318,7 +329,7 @@ where
} }
} }
impl<CS, F, IF, OF> HasFeedback for StdFuzzer<CS, F, IF, OF> { impl<CS, F, IC, IF, OF> HasFeedback for StdFuzzer<CS, F, IC, IF, OF> {
type Feedback = F; type Feedback = F;
fn feedback(&self) -> &Self::Feedback { fn feedback(&self) -> &Self::Feedback {
@ -330,7 +341,7 @@ impl<CS, F, IF, OF> HasFeedback for StdFuzzer<CS, F, IF, OF> {
} }
} }
impl<CS, F, IF, OF> HasObjective for StdFuzzer<CS, F, IF, OF> { impl<CS, F, IC, IF, OF> HasObjective for StdFuzzer<CS, F, IC, IF, OF> {
type Objective = OF; type Objective = OF;
fn objective(&self) -> &OF { fn objective(&self) -> &OF {
@ -350,7 +361,8 @@ impl<CS, F, IF, OF> HasObjective for StdFuzzer<CS, F, IF, OF> {
} }
} }
impl<CS, EM, F, I, IF, OF, OT, S> ExecutionProcessor<EM, I, OT, S> for StdFuzzer<CS, F, IF, OF> impl<CS, EM, F, I, IC, IF, OF, OT, S> ExecutionProcessor<EM, I, OT, S>
for StdFuzzer<CS, F, IC, IF, OF>
where where
CS: Scheduler<I, S>, CS: Scheduler<I, S>,
EM: EventFirer<I, S>, EM: EventFirer<I, S>,
@ -546,7 +558,8 @@ where
} }
} }
impl<CS, E, EM, F, I, IF, OF, S> EvaluatorObservers<E, EM, I, S> for StdFuzzer<CS, F, IF, OF> impl<CS, E, EM, F, I, IC, IF, OF, S> EvaluatorObservers<E, EM, I, S>
for StdFuzzer<CS, F, IC, IF, OF>
where where
CS: Scheduler<I, S>, CS: Scheduler<I, S>,
E: HasObservers + Executor<EM, I, S, Self>, E: HasObservers + Executor<EM, I, S, Self>,
@ -604,10 +617,19 @@ pub struct BloomInputFilter {
bloom: BloomFilter, bloom: BloomFilter,
} }
#[cfg(feature = "std")]
impl Default for BloomInputFilter {
fn default() -> Self {
let bloom = BloomFilter::with_false_pos(1e-4).expected_items(10_000_000);
Self { bloom }
}
}
#[cfg(feature = "std")] #[cfg(feature = "std")]
impl BloomInputFilter { impl BloomInputFilter {
#[must_use] #[must_use]
fn new(items_count: usize, fp_p: f64) -> Self { /// Constructor
pub fn new(items_count: usize, fp_p: f64) -> Self {
let bloom = BloomFilter::with_false_pos(fp_p).expected_items(items_count); let bloom = BloomFilter::with_false_pos(fp_p).expected_items(items_count);
Self { bloom } Self { bloom }
} }
@ -621,7 +643,7 @@ impl<I: Hash> InputFilter<I> for BloomInputFilter {
} }
} }
impl<CS, E, EM, F, I, IF, OF, S> Evaluator<E, EM, I, S> for StdFuzzer<CS, F, IF, OF> impl<CS, E, EM, F, I, IC, IF, OF, S> Evaluator<E, EM, I, S> for StdFuzzer<CS, F, IC, IF, OF>
where where
CS: Scheduler<I, S>, CS: Scheduler<I, S>,
E: HasObservers + Executor<EM, I, S, Self>, E: HasObservers + Executor<EM, I, S, Self>,
@ -777,7 +799,7 @@ where
} }
} }
impl<CS, E, EM, F, I, IF, OF, S> EventProcessor<E, EM, I, S> for StdFuzzer<CS, F, IF, OF> impl<CS, E, EM, F, I, IC, IF, OF, S> EventProcessor<E, EM, I, S> for StdFuzzer<CS, F, IC, IF, OF>
where where
CS: Scheduler<I, S>, CS: Scheduler<I, S>,
E: HasObservers + Executor<EM, I, S, Self>, E: HasObservers + Executor<EM, I, S, Self>,
@ -859,7 +881,7 @@ where
} }
} }
impl<CS, E, EM, F, I, IF, OF, S, ST> Fuzzer<E, EM, I, S, ST> for StdFuzzer<CS, F, IF, OF> impl<CS, E, EM, F, I, IC, IF, OF, S, ST> Fuzzer<E, EM, I, S, ST> for StdFuzzer<CS, F, IC, IF, OF>
where where
CS: Scheduler<I, S>, CS: Scheduler<I, S>,
E: HasObservers + Executor<EM, I, S, Self>, E: HasObservers + Executor<EM, I, S, Self>,
@ -991,45 +1013,96 @@ where
} }
} }
impl<CS, F, IF, OF> StdFuzzer<CS, F, IF, OF> { /// The builder for std fuzzer
/// Create a new [`StdFuzzer`] with standard behavior and the provided duplicate input execution filter. #[derive(Debug, Default)]
pub fn with_input_filter(scheduler: CS, feedback: F, objective: OF, input_filter: IF) -> Self { pub struct StdFuzzerBuilder<IC, IF> {
bytes_converter: Option<IC>,
input_filter: Option<IF>,
}
impl StdFuzzerBuilder<(), ()> {
/// Contstuctor
#[must_use]
pub fn new() -> Self {
Self {
input_filter: None,
bytes_converter: None,
}
}
}
impl<IF> StdFuzzerBuilder<(), IF> {
/// set input converter
pub fn bytes_converter<IC>(self, bytes_converter: IC) -> StdFuzzerBuilder<IC, IF> {
StdFuzzerBuilder {
bytes_converter: Some(bytes_converter),
input_filter: self.input_filter,
}
}
}
impl<IC> StdFuzzerBuilder<IC, ()> {
/// set input filter
pub fn input_filter<IF>(self, input_filter: IF) -> StdFuzzerBuilder<IC, IF> {
StdFuzzerBuilder {
bytes_converter: self.bytes_converter,
input_filter: Some(input_filter),
}
}
}
impl<IC, IF> StdFuzzerBuilder<IC, IF> {
/// build it
pub fn build<CS, F, OF>(
self,
scheduler: CS,
feedback: F,
objective: OF,
) -> Result<StdFuzzer<CS, F, IC, IF, OF>, Error> {
let Some(bytes_converter) = self.bytes_converter else {
return Err(Error::illegal_argument("input converter not set"));
};
let Some(input_filter) = self.input_filter else {
return Err(Error::illegal_argument("input filter not set"));
};
Ok(StdFuzzer {
bytes_converter,
input_filter,
scheduler,
feedback,
objective,
share_objectives: false,
})
}
}
impl<CS, F, IC, IF, OF> HasBytesConverter for StdFuzzer<CS, F, IC, IF, OF> {
type Converter = IC;
fn converter(&self) -> &Self::Converter {
&self.bytes_converter
}
fn converter_mut(&mut self) -> &mut Self::Converter {
&mut self.bytes_converter
}
}
impl<CS, F, OF> StdFuzzer<CS, F, NopBytesConverter, NopInputFilter, OF> {
/// Create a new [`StdFuzzer`] with standard behavior and no duplicate input execution filtering.
pub fn new(scheduler: CS, feedback: F, objective: OF) -> Self {
Self { Self {
scheduler, scheduler,
feedback, feedback,
objective, objective,
input_filter, bytes_converter: NopBytesConverter::default(),
input_filter: NopInputFilter,
share_objectives: false, share_objectives: false,
} }
} }
} }
impl<CS, F, OF> StdFuzzer<CS, F, NopInputFilter, OF> {
/// Create a new [`StdFuzzer`] with standard behavior and no duplicate input execution filtering.
pub fn new(scheduler: CS, feedback: F, objective: OF) -> Self {
Self::with_input_filter(scheduler, feedback, objective, NopInputFilter)
}
}
#[cfg(feature = "std")] // hashing requires std
impl<CS, F, OF> StdFuzzer<CS, F, BloomInputFilter, OF> {
/// Create a new [`StdFuzzer`], which, with a certain certainty, executes each input only once.
///
/// This is achieved by hashing each input and using a bloom filter to differentiate inputs.
///
/// Use this implementation if hashing each input is very fast compared to executing potential duplicate inputs.
pub fn with_bloom_input_filter(
scheduler: CS,
feedback: F,
objective: OF,
items_count: usize,
fp_p: f64,
) -> Self {
let input_filter = BloomInputFilter::new(items_count, fp_p);
Self::with_input_filter(scheduler, feedback, objective, input_filter)
}
}
/// Structs with this trait will execute an input /// Structs with this trait will execute an input
pub trait ExecutesInput<E, EM, I, S> { pub trait ExecutesInput<E, EM, I, S> {
/// Runs the input and triggers observers and feedback /// Runs the input and triggers observers and feedback
@ -1042,7 +1115,7 @@ pub trait ExecutesInput<E, EM, I, S> {
) -> Result<ExitKind, Error>; ) -> Result<ExitKind, Error>;
} }
impl<CS, E, EM, F, I, IF, OF, S> ExecutesInput<E, EM, I, S> for StdFuzzer<CS, F, IF, OF> impl<CS, E, EM, F, I, IC, IF, OF, S> ExecutesInput<E, EM, I, S> for StdFuzzer<CS, F, IC, IF, OF>
where where
CS: Scheduler<I, S>, CS: Scheduler<I, S>,
E: Executor<EM, I, S, Self> + HasObservers, E: Executor<EM, I, S, Self> + HasObservers,
@ -1137,12 +1210,12 @@ mod tests {
use libafl_bolts::rands::StdRand; use libafl_bolts::rands::StdRand;
use super::{Evaluator, StdFuzzer};
use crate::{ use crate::{
corpus::InMemoryCorpus, corpus::InMemoryCorpus,
events::NopEventManager, events::NopEventManager,
executors::{ExitKind, InProcessExecutor}, executors::{ExitKind, InProcessExecutor},
inputs::BytesInput, fuzzer::{BloomInputFilter, Evaluator, StdFuzzerBuilder},
inputs::{BytesInput, NopBytesConverter},
schedulers::StdScheduler, schedulers::StdScheduler,
state::StdState, state::StdState,
}; };
@ -1151,7 +1224,12 @@ mod tests {
fn filtered_execution() { fn filtered_execution() {
let execution_count = RefCell::new(0); let execution_count = RefCell::new(0);
let scheduler = StdScheduler::new(); let scheduler = StdScheduler::new();
let mut fuzzer = StdFuzzer::with_bloom_input_filter(scheduler, (), (), 100, 1e-4); let bloom_filter = BloomInputFilter::default();
let mut fuzzer = StdFuzzerBuilder::new()
.input_filter(bloom_filter)
.bytes_converter(NopBytesConverter::default())
.build(scheduler, (), ())
.unwrap();
let mut state = StdState::new( let mut state = StdState::new(
StdRand::new(), StdRand::new(),
InMemoryCorpus::new(), InMemoryCorpus::new(),

View File

@ -39,7 +39,6 @@ use core::{
clone::Clone, clone::Clone,
fmt::Debug, fmt::Debug,
hash::Hash, hash::Hash,
marker::PhantomData,
ops::{DerefMut, RangeBounds}, ops::{DerefMut, RangeBounds},
}; };
#[cfg(feature = "std")] #[cfg(feature = "std")]
@ -116,6 +115,12 @@ pub trait InputConverter: Debug {
fn convert(&mut self, input: Self::From) -> Result<Self::To, Error>; fn convert(&mut self, input: Self::From) -> Result<Self::To, Error>;
} }
/// This trait can transfor any input to bytes
pub trait InputToBytes<I>: Debug {
/// Transform to bytes
fn to_bytes<'a>(&mut self, input: &'a I) -> OwnedSlice<'a, u8>;
}
/// `None` type to satisfy the type infearence in an `Option` /// `None` type to satisfy the type infearence in an `Option`
#[macro_export] #[macro_export]
macro_rules! none_input_converter { macro_rules! none_input_converter {
@ -293,29 +298,16 @@ impl ResizableMutator<u8> for &mut Vec<u8> {
} }
} }
#[derive(Debug)] #[derive(Debug, Default)]
/// Basic `InputConverter` with just one type that is not converting /// Basic `NopBytesConverter` with just one type that is not converting
pub struct NopInputConverter<I> { pub struct NopBytesConverter {}
phantom: PhantomData<I>,
}
impl<I> Default for NopInputConverter<I> { impl<I> InputToBytes<I> for NopBytesConverter
fn default() -> Self {
Self {
phantom: PhantomData,
}
}
}
impl<I> InputConverter for NopInputConverter<I>
where where
I: Input, I: HasTargetBytes + Debug,
{ {
type From = I; fn to_bytes<'a>(&mut self, input: &'a I) -> OwnedSlice<'a, u8> {
type To = I; input.target_bytes()
fn convert(&mut self, input: Self::From) -> Result<Self::To, Error> {
Ok(input)
} }
} }
@ -363,40 +355,3 @@ where
(self.convert_cb)(input) (self.convert_cb)(input)
} }
} }
/// A converter that converts from `input` to target bytes
pub trait TargetBytesConverter<I> {
/// Create target bytes
fn to_target_bytes<'a>(&mut self, input: &'a I) -> OwnedSlice<'a, u8>;
}
/// Simply gets the target bytes out from a [`HasTargetBytes`] type.
#[derive(Debug)]
pub struct NopTargetBytesConverter<I> {
phantom: PhantomData<I>,
}
impl<I> NopTargetBytesConverter<I> {
/// Create a new [`NopTargetBytesConverter`]
#[must_use]
pub fn new() -> NopTargetBytesConverter<I> {
Self {
phantom: PhantomData,
}
}
}
impl<I> Default for NopTargetBytesConverter<I> {
fn default() -> Self {
Self::new()
}
}
impl<I> TargetBytesConverter<I> for NopTargetBytesConverter<I>
where
I: HasTargetBytes,
{
fn to_target_bytes<'a>(&mut self, input: &'a I) -> OwnedSlice<'a, u8> {
input.target_bytes()
}
}

View File

@ -8,16 +8,15 @@ use core::{
use libafl_bolts::{HasLen, ownedref::OwnedSlice}; use libafl_bolts::{HasLen, ownedref::OwnedSlice};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use super::TargetBytesConverter; use super::BytesInput;
use crate::{ use crate::{
Error,
common::nautilus::grammartec::{ common::nautilus::grammartec::{
newtypes::NodeId, newtypes::NodeId,
rule::RuleIdOrCustom, rule::RuleIdOrCustom,
tree::{Tree, TreeLike}, tree::{Tree, TreeLike},
}, },
generators::nautilus::NautilusContext, generators::nautilus::NautilusContext,
inputs::{BytesInput, Input, InputConverter}, inputs::{Input, InputConverter, InputToBytes},
}; };
/// An [`Input`] implementation for `Nautilus` grammar. /// An [`Input`] implementation for `Nautilus` grammar.
@ -97,49 +96,34 @@ impl Hash for NautilusInput {
} }
} }
/// `InputConverter` to convert from `NautilusInput` to `BytesInput` /// Convert from `NautilusInput` to `BytesInput`
#[derive(Debug)] #[derive(Debug)]
pub struct NautilusToBytesInputConverter<'a> { pub struct NautilusBytesConverter<'a> {
ctx: &'a NautilusContext, ctx: &'a NautilusContext,
} }
impl<'a> NautilusToBytesInputConverter<'a> { impl<'a> NautilusBytesConverter<'a> {
#[must_use] #[must_use]
/// Create a new `NautilusToBytesInputConverter` from a context /// Create a new `NautilusBytesConverter` from a context
pub fn new(ctx: &'a NautilusContext) -> Self { pub fn new(ctx: &'a NautilusContext) -> Self {
Self { ctx } Self { ctx }
} }
} }
impl InputConverter for NautilusToBytesInputConverter<'_> { impl InputConverter for NautilusBytesConverter<'_> {
type From = NautilusInput; type From = NautilusInput;
type To = BytesInput; type To = BytesInput;
fn convert(&mut self, input: Self::From) -> Result<Self::To, Error> { fn convert(&mut self, input: Self::From) -> Result<Self::To, libafl_bolts::Error> {
let mut bytes = vec![]; let mut bytes = vec![];
input.unparse(self.ctx, &mut bytes); input.unparse(self.ctx, &mut bytes);
Ok(BytesInput::new(bytes)) Ok(BytesInput::new(bytes))
} }
} }
/// A converter to convert a nautilus context to target bytes impl InputToBytes<NautilusInput> for NautilusBytesConverter<'_> {
#[derive(Debug)] fn to_bytes<'a>(&mut self, input: &'a NautilusInput) -> OwnedSlice<'a, u8> {
pub struct NautilusTargetBytesConverter<'a> { let mut bytes = vec![];
/// The Nautilus Context
ctx: &'a NautilusContext,
}
impl<'a> NautilusTargetBytesConverter<'a> {
/// Create a new [`NautilusTargetBytesConverter`]
#[must_use]
pub fn new(ctx: &'a NautilusContext) -> NautilusTargetBytesConverter<'a> {
NautilusTargetBytesConverter { ctx }
}
}
impl TargetBytesConverter<NautilusInput> for NautilusTargetBytesConverter<'_> {
fn to_target_bytes<'a>(&mut self, input: &'a NautilusInput) -> OwnedSlice<'a, u8> {
let mut bytes = Vec::new();
input.unparse(self.ctx, &mut bytes); input.unparse(self.ctx, &mut bytes);
OwnedSlice::from(bytes) OwnedSlice::from(bytes)
} }

View File

@ -93,7 +93,7 @@ impl From<i32> for MutationId {
/// ///
/// [`MutationResult::Skipped`] does not necessarily mean that the input changed, /// [`MutationResult::Skipped`] does not necessarily mean that the input changed,
/// just that the mutator did something. For slow targets, consider using /// just that the mutator did something. For slow targets, consider using
/// a filtered fuzzer (see [`crate::fuzzer::StdFuzzer::with_input_filter`]) /// a fuzzer with a input filter
/// or wrapping your mutator in a [`hash::MutationChecker`]. /// or wrapping your mutator in a [`hash::MutationChecker`].
#[derive(Clone, Copy, Debug, PartialEq, Eq)] #[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum MutationResult { pub enum MutationResult {

View File

@ -40,7 +40,6 @@ use crate::{
inputs::{Input, NopInput}, inputs::{Input, NopInput},
stages::StageId, stages::StageId,
}; };
/// The maximum size of a testcase /// The maximum size of a testcase
pub const DEFAULT_MAX_SIZE: usize = 1_048_576; pub const DEFAULT_MAX_SIZE: usize = 1_048_576;
@ -203,8 +202,8 @@ impl<I, S, Z> Debug for LoadConfig<'_, I, S, Z> {
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug)]
#[serde(bound = " #[serde(bound = "
C: serde::Serialize + for<'a> serde::Deserialize<'a>, C: serde::Serialize + for<'a> serde::Deserialize<'a>,
R: serde::Serialize + for<'a> serde::Deserialize<'a>,
SC: serde::Serialize + for<'a> serde::Deserialize<'a>, SC: serde::Serialize + for<'a> serde::Deserialize<'a>,
R: serde::Serialize + for<'a> serde::Deserialize<'a>
")] ")]
pub struct StdState<C, I, R, SC> { pub struct StdState<C, I, R, SC> {
/// RNG instance /// RNG instance
@ -1110,7 +1109,15 @@ where
{ {
self.generate_initial_internal(fuzzer, executor, generator, manager, num, false) self.generate_initial_internal(fuzzer, executor, generator, manager, num, false)
} }
}
impl<C, I, R, SC> StdState<C, I, R, SC>
where
C: Corpus<I>,
I: Input,
R: Rand,
SC: Corpus<I>,
{
/// Creates a new `State`, taking ownership of all of the individual components during fuzzing. /// Creates a new `State`, taking ownership of all of the individual components during fuzzing.
pub fn new<F, O>( pub fn new<F, O>(
rand: R, rand: R,

View File

@ -278,7 +278,7 @@ mod linux {
sched_setaffinity( sched_setaffinity(
0, // Defaults to current thread 0, // Defaults to current thread
size_of::<cpu_set_t>(), size_of::<cpu_set_t>(),
&set, &raw const set,
) )
}; };
@ -297,7 +297,7 @@ mod linux {
sched_getaffinity( sched_getaffinity(
0, // Defaults to current thread 0, // Defaults to current thread
size_of::<cpu_set_t>(), size_of::<cpu_set_t>(),
&mut set, &raw mut set,
) )
}; };
@ -452,7 +452,8 @@ mod windows {
let mut outga = GROUP_AFFINITY::default(); let mut outga = GROUP_AFFINITY::default();
let result = SetThreadGroupAffinity(GetCurrentThread(), &ga, Some(&mut outga)); let result =
SetThreadGroupAffinity(GetCurrentThread(), &raw const ga, Some(&raw mut outga));
if result.0 == 0 { if result.0 == 0 {
Err(Error::unknown("Failed to set_for_current")) Err(Error::unknown("Failed to set_for_current"))
} else { } else {

View File

@ -1127,7 +1127,7 @@ mod windows_logging {
h_stdout, h_stdout,
bytes.as_ptr() as *const _, bytes.as_ptr() as *const _,
bytes.len() as u32, bytes.len() as u32,
&mut bytes_written, &raw mut bytes_written,
ptr::null_mut(), ptr::null_mut(),
) )
}; };

View File

@ -2281,7 +2281,7 @@ impl CtrlHandler for LlmpShutdownSignalHandler {
fn handle(&mut self, ctrl_type: u32) -> bool { fn handle(&mut self, ctrl_type: u32) -> bool {
log::info!("LLMP: Received shutdown signal, ctrl_type {ctrl_type:?}"); log::info!("LLMP: Received shutdown signal, ctrl_type {ctrl_type:?}");
unsafe { unsafe {
ptr::write_volatile(&mut self.shutting_down, true); ptr::write_volatile(&raw mut self.shutting_down, true);
} }
true true
} }

View File

@ -1022,11 +1022,11 @@ fn write_minibsod<W: Write>(writer: &mut BufWriter<W>) -> Result<(), std::io::Er
r = unsafe { r = unsafe {
mach_vm_region_recurse( mach_vm_region_recurse(
task, task,
&mut addr, &raw mut addr,
&mut sz, &raw mut sz,
&mut reg, &raw mut reg,
pvminfo.as_mut_ptr() as vm_region_recurse_info_t, pvminfo.as_mut_ptr() as vm_region_recurse_info_t,
&mut _cnt, &raw mut _cnt,
) )
}; };
if r != libc::KERN_SUCCESS { if r != libc::KERN_SUCCESS {

View File

@ -58,7 +58,7 @@ impl ChildHandle {
pub fn status(&self) -> i32 { pub fn status(&self) -> i32 {
let mut status = -1; let mut status = -1;
unsafe { unsafe {
libc::waitpid(self.pid, &mut status, 0); libc::waitpid(self.pid, &raw mut status, 0);
} }
libc::WEXITSTATUS(status) libc::WEXITSTATUS(status)
} }

View File

@ -524,7 +524,7 @@ pub unsafe fn setup_signal_handler<T: 'static + SignalHandler>(
pub fn ucontext() -> Result<ucontext_t, Error> { pub fn ucontext() -> Result<ucontext_t, Error> {
let mut ucontext = unsafe { mem::zeroed() }; let mut ucontext = unsafe { mem::zeroed() };
if cfg!(not(any(target_os = "openbsd", target_os = "haiku"))) { if cfg!(not(any(target_os = "openbsd", target_os = "haiku"))) {
if unsafe { getcontext(&mut ucontext) } == 0 { if unsafe { getcontext(&raw mut ucontext) } == 0 {
Ok(ucontext) Ok(ucontext)
} else { } else {
#[cfg(not(feature = "std"))] #[cfg(not(feature = "std"))]

View File

@ -1159,7 +1159,7 @@ impl<T: Sized> OwnedMutPtr<T> {
pub fn as_ptr(&self) -> *const T { pub fn as_ptr(&self) -> *const T {
match self { match self {
OwnedMutPtr::Ptr(ptr) => *ptr, OwnedMutPtr::Ptr(ptr) => *ptr,
OwnedMutPtr::Owned(owned) => &**owned, OwnedMutPtr::Owned(owned) => &raw const **owned,
} }
} }
@ -1168,7 +1168,7 @@ impl<T: Sized> OwnedMutPtr<T> {
pub fn as_mut_ptr(&mut self) -> *mut T { pub fn as_mut_ptr(&mut self) -> *mut T {
match self { match self {
OwnedMutPtr::Ptr(ptr) => *ptr, OwnedMutPtr::Ptr(ptr) => *ptr,
OwnedMutPtr::Owned(owned) => &mut **owned, OwnedMutPtr::Owned(owned) => &raw mut **owned,
} }
} }
} }

View File

@ -1452,7 +1452,7 @@ pub mod unix_shmem {
let fd = i32::from(id); let fd = i32::from(id);
unsafe { unsafe {
let mut stat = core::mem::zeroed(); let mut stat = core::mem::zeroed();
if fstat(fd, &mut stat) == -1 { if fstat(fd, &raw mut stat) == -1 {
return Err(Error::unknown( return Err(Error::unknown(
"Failed to map the memfd mapping".to_string(), "Failed to map the memfd mapping".to_string(),
)); ));

View File

@ -51,7 +51,7 @@ impl StateShMemContent {
/// Get a length that's safe to deref from this map, or error. /// Get a length that's safe to deref from this map, or error.
pub fn buf_len_checked(&self, shmem_size: usize) -> Result<usize, Error> { pub fn buf_len_checked(&self, shmem_size: usize) -> Result<usize, Error> {
let buf_len = unsafe { read_volatile(&self.buf_len) }; let buf_len = unsafe { read_volatile(&raw const self.buf_len) };
if size_of::<StateShMemContent>() + buf_len > shmem_size { if size_of::<StateShMemContent>() + buf_len > shmem_size {
Err(Error::illegal_state(format!( Err(Error::illegal_state(format!(
"Stored buf_len is larger than the shared map! Shared data corrupted? Expected {shmem_size} bytes max, but got {} (buf_len {buf_len})", "Stored buf_len is larger than the shared map! Shared data corrupted? Expected {shmem_size} bytes max, but got {} (buf_len {buf_len})",

View File

@ -544,7 +544,7 @@ impl AsanRuntime {
// Write something to (hopefully) make sure the val isn't optimized out // Write something to (hopefully) make sure the val isn't optimized out
unsafe { unsafe {
write_volatile(&mut stack_var, 0xfadbeef); write_volatile(&raw mut stack_var, 0xfadbeef);
} }
let range = Self::range_for_address(stack_address); let range = Self::range_for_address(stack_address);

View File

@ -15,9 +15,9 @@ use frida_gum::{
#[cfg(windows)] #[cfg(windows)]
use libafl::executors::{hooks::inprocess::InProcessHooks, inprocess::HasInProcessHooks}; use libafl::executors::{hooks::inprocess::InProcessHooks, inprocess::HasInProcessHooks};
use libafl::{ use libafl::{
Error, Error, HasBytesConverter,
executors::{Executor, ExitKind, HasObservers, InProcessExecutor}, executors::{Executor, ExitKind, HasObservers, InProcessExecutor},
inputs::{Input, NopTargetBytesConverter, TargetBytesConverter}, inputs::{Input, InputToBytes},
observers::ObserversTuple, observers::ObserversTuple,
state::{HasCurrentTestcase, HasExecutions, HasSolutions}, state::{HasCurrentTestcase, HasExecutions, HasSolutions},
}; };
@ -30,7 +30,7 @@ use crate::helper::{FridaInstrumentationHelper, FridaRuntimeTuple};
use crate::windows_hooks::initialize; use crate::windows_hooks::initialize;
/// The [`FridaInProcessExecutor`] is an [`Executor`] that executes the target in the same process, usinig [`frida`](https://frida.re/) for binary-only instrumentation. /// The [`FridaInProcessExecutor`] is an [`Executor`] that executes the target in the same process, usinig [`frida`](https://frida.re/) for binary-only instrumentation.
pub struct FridaInProcessExecutor<'a, 'b, EM, H, I, OT, RT, S, TC, Z> { pub struct FridaInProcessExecutor<'a, 'b, EM, H, I, OT, RT, S, Z> {
base: InProcessExecutor<'a, EM, H, I, OT, S, Z>, base: InProcessExecutor<'a, EM, H, I, OT, S, Z>,
/// `thread_id` for the Stalker /// `thread_id` for the Stalker
thread_id: Option<u32>, thread_id: Option<u32>,
@ -38,13 +38,11 @@ pub struct FridaInProcessExecutor<'a, 'b, EM, H, I, OT, RT, S, TC, Z> {
stalker: Stalker, stalker: Stalker,
/// User provided callback for instrumentation /// User provided callback for instrumentation
helper: Rc<RefCell<FridaInstrumentationHelper<'b, RT>>>, helper: Rc<RefCell<FridaInstrumentationHelper<'b, RT>>>,
target_bytes_converter: TC,
followed: bool, followed: bool,
_phantom: PhantomData<&'b u8>, _phantom: PhantomData<&'b u8>,
} }
impl<EM, H, I, OT, RT, S, TC, Z> Debug impl<EM, H, I, OT, RT, S, Z> Debug for FridaInProcessExecutor<'_, '_, EM, H, I, OT, RT, S, Z>
for FridaInProcessExecutor<'_, '_, EM, H, I, OT, RT, S, TC, Z>
where where
OT: Debug, OT: Debug,
{ {
@ -57,17 +55,16 @@ where
} }
} }
impl<EM, H, I, OT, RT, S, TC, Z> Executor<EM, I, S, Z> impl<EM, H, I, OT, RT, S, Z> Executor<EM, I, S, Z>
for FridaInProcessExecutor<'_, '_, EM, H, I, OT, RT, S, TC, Z> for FridaInProcessExecutor<'_, '_, EM, H, I, OT, RT, S, Z>
where where
H: FnMut(&I) -> ExitKind, H: FnMut(&I) -> ExitKind,
I: Input, I: Input,
S: HasExecutions, S: HasExecutions + HasCurrentTestcase<I> + HasSolutions<I>,
S: HasCurrentTestcase<I>,
S: HasSolutions<I>,
TC: TargetBytesConverter<I>,
OT: ObserversTuple<I, S>, OT: ObserversTuple<I, S>,
RT: FridaRuntimeTuple, RT: FridaRuntimeTuple,
Z: HasBytesConverter,
Z::Converter: InputToBytes<I>,
{ {
/// Instruct the target about the input and run /// Instruct the target about the input and run
#[inline] #[inline]
@ -78,7 +75,8 @@ where
mgr: &mut EM, mgr: &mut EM,
input: &I, input: &I,
) -> Result<ExitKind, Error> { ) -> Result<ExitKind, Error> {
let target_bytes = self.target_bytes_converter.to_target_bytes(input); let converter = fuzzer.converter_mut();
let target_bytes = converter.to_bytes(input);
self.helper.borrow_mut().pre_exec(target_bytes.as_slice())?; self.helper.borrow_mut().pre_exec(target_bytes.as_slice())?;
if self.helper.borrow_mut().stalker_enabled() { if self.helper.borrow_mut().stalker_enabled() {
if !(self.followed) { if !(self.followed) {
@ -125,8 +123,8 @@ where
} }
} }
impl<EM, H, I, OT, RT, S, TC, Z> HasObservers impl<EM, H, I, OT, RT, S, Z> HasObservers
for FridaInProcessExecutor<'_, '_, EM, H, I, OT, RT, S, TC, Z> for FridaInProcessExecutor<'_, '_, EM, H, I, OT, RT, S, Z>
{ {
type Observers = OT; type Observers = OT;
#[inline] #[inline]
@ -140,8 +138,7 @@ impl<EM, H, I, OT, RT, S, TC, Z> HasObservers
} }
} }
impl<'a, 'b, EM, H, I, OT, RT, S, Z> impl<'a, 'b, EM, H, I, OT, RT, S, Z> FridaInProcessExecutor<'a, 'b, EM, H, I, OT, RT, S, Z>
FridaInProcessExecutor<'a, 'b, EM, H, I, OT, RT, S, NopTargetBytesConverter<I>, Z>
where where
RT: FridaRuntimeTuple, RT: FridaRuntimeTuple,
{ {
@ -151,13 +148,7 @@ where
base: InProcessExecutor<'a, EM, H, I, OT, S, Z>, base: InProcessExecutor<'a, EM, H, I, OT, S, Z>,
helper: Rc<RefCell<FridaInstrumentationHelper<'b, RT>>>, helper: Rc<RefCell<FridaInstrumentationHelper<'b, RT>>>,
) -> Self { ) -> Self {
FridaInProcessExecutor::with_target_bytes_converter( FridaInProcessExecutor::with_target_bytes_converter(gum, base, helper, None)
gum,
base,
helper,
None,
NopTargetBytesConverter::new(),
)
} }
/// Creates a new [`FridaInProcessExecutor`] tracking the given `thread_id`. /// Creates a new [`FridaInProcessExecutor`] tracking the given `thread_id`.
@ -167,17 +158,11 @@ where
helper: Rc<RefCell<FridaInstrumentationHelper<'b, RT>>>, helper: Rc<RefCell<FridaInstrumentationHelper<'b, RT>>>,
thread_id: u32, thread_id: u32,
) -> Self { ) -> Self {
FridaInProcessExecutor::with_target_bytes_converter( FridaInProcessExecutor::with_target_bytes_converter(gum, base, helper, Some(thread_id))
gum,
base,
helper,
Some(thread_id),
NopTargetBytesConverter::new(),
)
} }
} }
impl<'a, 'b, EM, H, I, OT, RT, S, TC, Z> FridaInProcessExecutor<'a, 'b, EM, H, I, OT, RT, S, TC, Z> impl<'a, 'b, EM, H, I, OT, RT, S, Z> FridaInProcessExecutor<'a, 'b, EM, H, I, OT, RT, S, Z>
where where
RT: FridaRuntimeTuple, RT: FridaRuntimeTuple,
{ {
@ -187,7 +172,6 @@ where
base: InProcessExecutor<'a, EM, H, I, OT, S, Z>, base: InProcessExecutor<'a, EM, H, I, OT, S, Z>,
helper: Rc<RefCell<FridaInstrumentationHelper<'b, RT>>>, helper: Rc<RefCell<FridaInstrumentationHelper<'b, RT>>>,
thread_id: Option<u32>, thread_id: Option<u32>,
target_bytes_converter: TC,
) -> Self { ) -> Self {
let mut stalker = Stalker::new(gum); let mut stalker = Stalker::new(gum);
let ranges = helper.borrow_mut().ranges().clone(); let ranges = helper.borrow_mut().ranges().clone();
@ -237,7 +221,6 @@ where
thread_id, thread_id,
stalker, stalker,
helper, helper,
target_bytes_converter,
followed: false, followed: false,
_phantom: PhantomData, _phantom: PhantomData,
} }
@ -245,13 +228,12 @@ where
} }
#[cfg(windows)] #[cfg(windows)]
impl<'a, 'b, EM, H, I, OT, RT, S, TC, Z> HasInProcessHooks<I, S> impl<'a, 'b, EM, H, I, OT, RT, S, Z> HasInProcessHooks<I, S>
for FridaInProcessExecutor<'a, 'b, EM, H, I, OT, RT, S, TC, Z> for FridaInProcessExecutor<'a, 'b, EM, H, I, OT, RT, S, Z>
where where
H: FnMut(&I) -> ExitKind, H: FnMut(&I) -> ExitKind,
S: HasSolutions<I> + HasCurrentTestcase<I> + HasExecutions, S: HasSolutions<I> + HasCurrentTestcase<I> + HasExecutions,
I: Input, I: Input,
TC: TargetBytesConverter<I>,
OT: ObserversTuple<I, S>, OT: ObserversTuple<I, S>,
RT: FridaRuntimeTuple, RT: FridaRuntimeTuple,
{ {

View File

@ -903,7 +903,7 @@ fn linux_version() -> Result<(usize, usize, usize), ()> {
domainname: [0; 65], domainname: [0; 65],
}; };
if unsafe { libc::uname(&mut uname_data) } != 0 { if unsafe { libc::uname(&raw mut uname_data) } != 0 {
return Err(()); return Err(());
} }

View File

@ -24,7 +24,6 @@ use libafl_bolts::{
use libafl_targets::LLVMCustomMutator; use libafl_targets::LLVMCustomMutator;
use crate::{CustomMutationStatus, options::LibfuzzerOptions}; use crate::{CustomMutationStatus, options::LibfuzzerOptions};
type TMinState = type TMinState =
StdState<InMemoryCorpus<BytesInput>, BytesInput, RomuDuoJrRand, InMemoryCorpus<BytesInput>>; StdState<InMemoryCorpus<BytesInput>, BytesInput, RomuDuoJrRand, InMemoryCorpus<BytesInput>>;

View File

@ -31,12 +31,12 @@ if [[ "$OSTYPE" == "linux-gnu"* ]]; then
"libafl_frida" "libafl_frida"
"libafl_libfuzzer" "libafl_libfuzzer"
"libafl_libfuzzer_runtime" "libafl_libfuzzer_runtime"
"libafl_intelpt"
"libafl_nyx"
"libafl_qemu" "libafl_qemu"
"libafl_tinyinst" "libafl_tinyinst"
"libafl_qemu/libafl_qemu_build" "libafl_qemu/libafl_qemu_build"
"libafl_qemu/libafl_qemu_sys" "libafl_qemu/libafl_qemu_sys"
"libafl_nyx"
"libafl_intelpt"
) )
fi fi
@ -70,6 +70,8 @@ for project in "${PROJECTS[@]}"; do
fi fi
done done
eval "$CLIPPY_CMD --workspace -- $RUSTC_FLAGS"
echo "Clippy run completed for all specified projects." echo "Clippy run completed for all specified projects."
# Last run it on all # Last run it on all