Autolaunch ShMemService, add MacOS fuzzers to CI, various fixes (#246)

* starting to fix macos linker bugs

* mdetailed error prints

* start shmem service manually

* not a doc comment

* Some fixes

* only send exit msg to shmemservice when start was successful

* incorporated shmem service into provider

* removed unused imports

* trying to fix fuzzers

* fixed build

* check if join_handle is_some

* more debug prints

* fixed shmem service autolaunch

* fixed macos linker

* ignoring broken libpng testcase on macos for now (see #252)

* fmt

* try to fix missing llvm_config (see #253)

* empty issue template added

* Mmanually look for llvm-config on MacOS

* fixing CI

* fixed docs

* ignoring libmozjpg for CI, see #254
This commit is contained in:
Dominik Maier 2021-08-09 01:10:29 +02:00 committed by GitHub
parent ac8bbdbd0a
commit bb21ab7a63
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
47 changed files with 419 additions and 221 deletions

8
.github/ISSUE_TEMPLATE/empty.md vendored Normal file
View File

@ -0,0 +1,8 @@
---
name: Empty
about: A question or issue that doesn't fit the templates
title: ''
labels: ''
assignees: ''
---

View File

@ -92,7 +92,7 @@ jobs:
- name: Install deps - name: Install deps
run: sudo apt-get install -y llvm llvm-dev clang nasm run: sudo apt-get install -y llvm llvm-dev clang nasm
- name: Build and run example fuzzers - name: Build and run example fuzzers
run: ./scripts/build_all_fuzzers.sh run: ./scripts/test_all_fuzzers.sh
nostd-build: nostd-build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
@ -157,12 +157,14 @@ jobs:
- uses: Swatinem/rust-cache@v1 - uses: Swatinem/rust-cache@v1
- name: Add nightly rustfmt and clippy - name: Add nightly rustfmt and clippy
run: rustup toolchain install nightly --component rustfmt --component clippy --allow-downgrade run: rustup toolchain install nightly --component rustfmt --component clippy --allow-downgrade
- name: YOLO remove ancient libpng for mozjpeg build
run: rm -rf /usr/local/lib/libpng.a
- name: Install deps - name: Install deps
run: brew install llvm libpng nasm run: brew install llvm libpng nasm coreutils && brew link --force llvm
- name: Increase map sizes - name: Increase map sizes
run: ./scripts/shmem_limits_macos.sh run: ./scripts/shmem_limits_macos.sh
- name: Build and run example fuzzers - name: Build and run example fuzzers
run: ./scripts/build_all_fuzzers.sh run: ./scripts/test_all_fuzzers.sh
# TODO: Figure out how to properly build stuff with clang # TODO: Figure out how to properly build stuff with clang
#- name: Add clang path to $PATH env #- name: Add clang path to $PATH env
# if: runner.os == 'Windows' # if: runner.os == 'Windows'

View File

@ -105,6 +105,6 @@ RUN cargo build && cargo build --release
# Copy fuzzers over # Copy fuzzers over
COPY fuzzers fuzzers COPY fuzzers fuzzers
# RUN ./scripts/build_all_fuzzers.sh --no-fmt # RUN ./scripts/test_all_fuzzers.sh --no-fmt
ENTRYPOINT [ "/bin/bash" ] ENTRYPOINT [ "/bin/bash" ]

1
fuzzers/forkserver_simple/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
forkserver_simple

View File

@ -3,9 +3,9 @@ PROJECT_DIR=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
PHONY: all PHONY: all
all: fuzzer all: $(FUZZER_NAME)
fuzzer: $(FUZZER_NAME):
cargo build --release cargo build --release
cp $(PROJECT_DIR)/target/release/$(FUZZER_NAME) . cp $(PROJECT_DIR)/target/release/$(FUZZER_NAME) .
@ -16,7 +16,8 @@ run: all
taskset -c 0 ./$(FUZZER_NAME) 2>/dev/null & taskset -c 0 ./$(FUZZER_NAME) 2>/dev/null &
short_test: all short_test: all
rm -rf libafl_unix_shmem_server || true
timeout 10s taskset -c 0 ./$(FUZZER_NAME) 2>/dev/null & timeout 10s taskset -c 0 ./$(FUZZER_NAME) 2>/dev/null &
test: all test: all
timeout 60s taskset -c 0 ./$(FUZZER_NAME) 2>/dev/null & timeout 60s taskset -c 0 ./$(FUZZER_NAME) 2>/dev/null &

View File

@ -3,7 +3,7 @@ use libafl::{
bolts::{ bolts::{
current_nanos, current_nanos,
rands::StdRand, rands::StdRand,
shmem::{ShMem, ShMemProvider, StdShMemProvider, StdShMemService}, shmem::{ShMem, ShMemProvider, StdShMemProvider},
tuples::tuple_list, tuples::tuple_list,
}, },
corpus::{ corpus::{
@ -30,8 +30,6 @@ pub fn main() {
const MAP_SIZE: usize = 65536; const MAP_SIZE: usize = 65536;
let _service = StdShMemService::start().unwrap();
//Coverage map shared between observer and executor //Coverage map shared between observer and executor
let mut shmem = StdShMemProvider::new().unwrap().new_map(MAP_SIZE).unwrap(); let mut shmem = StdShMemProvider::new().unwrap().new_map(MAP_SIZE).unwrap();
//let the forkserver know the shmid //let the forkserver know the shmid
@ -114,7 +112,12 @@ pub fn main() {
if state.corpus().count() < 1 { if state.corpus().count() < 1 {
state state
.load_initial_inputs(&mut fuzzer, &mut executor, &mut mgr, &corpus_dirs) .load_initial_inputs(&mut fuzzer, &mut executor, &mut mgr, &corpus_dirs)
.unwrap_or_else(|_| panic!("Failed to load initial corpus at {:?}", &corpus_dirs)); .unwrap_or_else(|err| {
panic!(
"Failed to load initial corpus at {:?}: {:?}",
&corpus_dirs, err
)
});
println!("We imported {} inputs from disk.", state.corpus().count()); println!("We imported {} inputs from disk.", state.corpus().count());
} }

View File

@ -1,3 +1,4 @@
libpng-* libpng-*
corpus_discovered corpus_discovered
libafl_frida libafl_frida
frida_libpng

View File

@ -5,15 +5,15 @@ PHONY: all
all: libafl_frida libpng-harness.so all: libafl_frida libpng-harness.so
libpng-1.6.37: libpng-1.6.37.tar.xz:
wget https://deac-fra.dl.sourceforge.net/project/libpng/libpng16/1.6.37/libpng-1.6.37.tar.xz wget https://deac-fra.dl.sourceforge.net/project/libpng/libpng16/1.6.37/libpng-1.6.37.tar.xz
tar -xvf libpng-1.6.37.tar.xz
target/release/frida_libpng: src/* target/release/frida_libpng: src/*
# Build the frida libpng libfuzzer fuzzer # Build the frida libpng libfuzzer fuzzer
cargo build --release cargo build --release
libpng-1.6.37/.libs/libpng16.a: libpng-1.6.37 libpng-1.6.37/.libs/libpng16.a: libpng-1.6.37.tar.xz
tar -xvf libpng-1.6.37.tar.xz
cd libpng-1.6.37 && ./configure --enable-hardware-optimizations=yes --with-pic=yes cd libpng-1.6.37 && ./configure --enable-hardware-optimizations=yes --with-pic=yes
$(MAKE) -C libpng-1.6.37 $(MAKE) -C libpng-1.6.37
@ -32,8 +32,9 @@ run: all
./$(FUZZER_NAME) ./libpng-harness.so LLVMFuzzerTestOneInput ./libpng-harness.so --cores=0 ./$(FUZZER_NAME) ./libpng-harness.so LLVMFuzzerTestOneInput ./libpng-harness.so --cores=0
short_test: all short_test: all
rm -rf libafl_unix_shmem_server || true
# We allow exit code 124 too, which is sigterm # We allow exit code 124 too, which is sigterm
(timeout 3s ./libafl_frida ./libpng-harness.so LLVMFuzzerTestOneInput ./libpng-harness.so --cores=0,1 || [ $$? -eq 124 ]) (timeout 3s ./libafl_frida ./libpng-harness.so LLVMFuzzerTestOneInput ./libpng-harness.so --cores=0,1 || [ $$? -eq 124 ])
test: all test: all
timeout 60s ./$(FUZZER_NAME) ./libpng-harness.so LLVMFuzzerTestOneInput ./libpng-harness.so --cores=0,1 (timeout 60s ./$(FUZZER_NAME) ./libpng-harness.so LLVMFuzzerTestOneInput ./libpng-harness.so --cores=0,1 || [ $$? -eq 124 ])

View File

@ -3,16 +3,13 @@
use clap::{App, Arg}; use clap::{App, Arg};
#[cfg(all(cfg = "std", unix))]
use libafl::bolts::os::unix_shmem_server::ShMemService;
use libafl::{ use libafl::{
bolts::{ bolts::{
current_nanos, current_nanos,
launcher::Launcher, launcher::Launcher,
os::parse_core_bind_arg, os::parse_core_bind_arg,
rands::StdRand, rands::StdRand,
shmem::{ShMemProvider, StdShMemProvider, StdShMemService}, shmem::{ShMemProvider, StdShMemProvider},
tuples::{tuple_list, Merge}, tuples::{tuple_list, Merge},
}, },
corpus::{ corpus::{
@ -253,7 +250,7 @@ pub fn main() {
.unwrap_or("default launcher") .unwrap_or("default launcher")
.to_string(), .to_string(),
) { ) {
Ok(()) | Err(Error::ShuttingDown) => println!("Finished fuzzing. Good bye."), Ok(()) | Err(Error::ShuttingDown) => println!("\nFinished fuzzing. Good bye."),
Err(e) => panic!("Error during fuzzing: {:?}", e), Err(e) => panic!("Error during fuzzing: {:?}", e),
} }
} }
@ -294,7 +291,6 @@ unsafe fn fuzz(
// 'While the stats are state, they are usually used in the broker - which is likely never restarted // 'While the stats are state, they are usually used in the broker - which is likely never restarted
let stats = MultiStats::new(|s| println!("{}", s)); let stats = MultiStats::new(|s| println!("{}", s));
let _service = StdShMemService::start().expect("Failed to start ShMem service");
let shmem_provider = StdShMemProvider::new()?; let shmem_provider = StdShMemProvider::new()?;
let mut run_client = |state: Option<StdState<_, _, _, _, _>>, mut mgr| { let mut run_client = |state: Option<StdState<_, _, _, _, _>>, mut mgr| {

View File

@ -1 +1,2 @@
libpng-* libpng-*
fuzzer

View File

@ -0,0 +1,48 @@
FUZZER_NAME="fuzzer"
PROJECT_DIR=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
PHONY: all
all: fuzzer
target/release/libafl_cxx: src/* src/bin/*
# Build the libpng libfuzzer library
cargo build --release
target/release/libafl_cc: target/release/libafl_cxx
fuzz.o: fuzz.c target/release/libafl_cc
target/release/libafl_cc -O3 -c $^ -o $@
fuzzer: target/release/libafl_cxx fuzz.o
# Build the fuzzer compiler
cargo build --release
# Build the harness
target/release/libafl_cxx \
fuzz.o \
-o $(FUZZER_NAME) \
-lm -lz
clean:
rm ./$(FUZZER_NAME) || true
rm fuzz.o || true
run: all
./$(FUZZER_NAME)
short_test: all
rm -rf libafl_unix_shmem_server || true
mkdir in || true
echo a > in/a
# Allow sigterm as exit code
(timeout 11s ./$(FUZZER_NAME) out in || [ $$? -eq 124 ])
rm -rf out
rm -rf in
test: all
mkdir in || true
echo a > in/a
(timeout 60s ./$(FUZZER_NAME) out in || [ $$? -eq 124 ])
rm -rf out
rm -rf in

View File

@ -19,7 +19,7 @@ use libafl::{
current_nanos, current_time, current_nanos, current_time,
os::dup2, os::dup2,
rands::StdRand, rands::StdRand,
shmem::{ShMemProvider, StdShMemProvider, StdShMemService}, shmem::{ShMemProvider, StdShMemProvider},
tuples::{tuple_list, Merge}, tuples::{tuple_list, Merge},
}, },
corpus::{Corpus, IndexesLenTimeMinimizerCorpusScheduler, OnDiskCorpus, QueueCorpusScheduler}, corpus::{Corpus, IndexesLenTimeMinimizerCorpusScheduler, OnDiskCorpus, QueueCorpusScheduler},
@ -181,7 +181,6 @@ fn fuzz(
// We need a shared map to store our state before a crash. // We need a shared map to store our state before a crash.
// This way, we are able to continue fuzzing afterwards. // This way, we are able to continue fuzzing afterwards.
let _service = StdShMemService::start().expect("Failed to start ShMem service");
let mut shmem_provider = StdShMemProvider::new()?; let mut shmem_provider = StdShMemProvider::new()?;
let (state, mut mgr) = match SimpleRestartingEventManager::launch(stats, &mut shmem_provider) { let (state, mut mgr) = match SimpleRestartingEventManager::launch(stats, &mut shmem_provider) {

View File

@ -19,7 +19,7 @@ use libafl::{
current_nanos, current_time, current_nanos, current_time,
os::dup2, os::dup2,
rands::StdRand, rands::StdRand,
shmem::{ShMemProvider, StdShMemProvider, StdShMemService}, shmem::{ShMemProvider, StdShMemProvider},
tuples::{tuple_list, Merge}, tuples::{tuple_list, Merge},
}, },
corpus::{Corpus, IndexesLenTimeMinimizerCorpusScheduler, OnDiskCorpus, QueueCorpusScheduler}, corpus::{Corpus, IndexesLenTimeMinimizerCorpusScheduler, OnDiskCorpus, QueueCorpusScheduler},

View File

@ -11,7 +11,7 @@ use libafl::{
launcher::Launcher, launcher::Launcher,
os::parse_core_bind_arg, os::parse_core_bind_arg,
rands::StdRand, rands::StdRand,
shmem::{ShMemProvider, StdShMemProvider, StdShMemService}, shmem::{ShMemProvider, StdShMemProvider},
tuples::{tuple_list, Merge}, tuples::{tuple_list, Merge},
}, },
corpus::{ corpus::{
@ -49,7 +49,7 @@ pub fn libafl_main() {
let yaml = load_yaml!("clap-config.yaml"); let yaml = load_yaml!("clap-config.yaml");
let matches = App::from(yaml).get_matches(); let matches = App::from(yaml).get_matches();
let cores = parse_core_bind_arg(&matches.value_of("cores").unwrap()) let cores = parse_core_bind_arg(matches.value_of("cores").unwrap())
.expect("No valid core count given!"); .expect("No valid core count given!");
let broker_port = matches let broker_port = matches
.value_of("broker_port") .value_of("broker_port")
@ -78,7 +78,6 @@ pub fn libafl_main() {
println!("Workdir: {:?}", workdir.to_string_lossy().to_string()); println!("Workdir: {:?}", workdir.to_string_lossy().to_string());
let _service = StdShMemService::start().expect("Failed to start ShMem service");
let shmem_provider = StdShMemProvider::new().expect("Failed to init shared memory"); let shmem_provider = StdShMemProvider::new().expect("Failed to init shared memory");
let stats = MultiStats::new(|s| println!("{}", s)); let stats = MultiStats::new(|s| println!("{}", s));

View File

@ -1,5 +1,6 @@
FUZZER_NAME="fuzzer_mozjpeg" FUZZER_NAME="fuzzer_mozjpeg"
PROJECT_DIR=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) PROJECT_DIR=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
UNAME := $(shell uname)
PHONY: all PHONY: all
@ -43,7 +44,13 @@ run: all
sleep 0.2 sleep 0.2
./$(FUZZER_NAME) >/dev/null 2>/dev/null & ./$(FUZZER_NAME) >/dev/null 2>/dev/null &
ifeq ($(UNAME), Darwin)
short_test: libafl_cc
$(warning "Skipping build on MacOS as libpng in Github is ancient, see LibAFL GH issue #254")
else
short_test: all short_test: all
rm -rf libafl_unix_shmem_server || true
timeout 11s ./$(FUZZER_NAME) & timeout 11s ./$(FUZZER_NAME) &
sleep 0.2 sleep 0.2
timeout 10s taskset -c 0 ./$(FUZZER_NAME) >/dev/null 2>/dev/null & timeout 10s taskset -c 0 ./$(FUZZER_NAME) >/dev/null 2>/dev/null &
@ -51,6 +58,8 @@ short_test: all
timeout 10s taskset -c 2 ./$(FUZZER_NAME) >/dev/null 2>/dev/null & timeout 10s taskset -c 2 ./$(FUZZER_NAME) >/dev/null 2>/dev/null &
timeout 10s taskset -c 3 ./$(FUZZER_NAME) >/dev/null 2>/dev/null & timeout 10s taskset -c 3 ./$(FUZZER_NAME) >/dev/null 2>/dev/null &
endif
test: all test: all
timeout 60s ./$(FUZZER_NAME) & timeout 60s ./$(FUZZER_NAME) &
sleep 0.2 sleep 0.2

View File

@ -60,8 +60,16 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re
// The restarting state will spawn the same process again as child, then restarted it each time it crashes. // The restarting state will spawn the same process again as child, then restarted it each time it crashes.
let (state, mut restarting_mgr) = let (state, mut restarting_mgr) =
setup_restarting_mgr_std(stats, broker_port, "default".into()) match setup_restarting_mgr_std(stats, broker_port, "default".into()) {
.expect("Failed to setup the restarter"); Ok(tuple) => tuple,
Err(Error::ShuttingDown) => {
println!("\nFinished fuzzing. Good bye.");
return Ok(());
}
Err(err) => {
panic!("Failed to setup the restarter: {:?}", err);
}
};
// Create an observation channel using the coverage map // Create an observation channel using the coverage map
let edges = unsafe { &mut EDGES_MAP[0..MAX_EDGES_NUM] }; let edges = unsafe { &mut EDGES_MAP[0..MAX_EDGES_NUM] };
@ -156,12 +164,7 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re
// In case the corpus is empty (on first run), reset // In case the corpus is empty (on first run), reset
if state.corpus().count() < 1 { if state.corpus().count() < 1 {
state state
.load_initial_inputs( .load_initial_inputs(&mut fuzzer, &mut executor, &mut restarting_mgr, corpus_dirs)
&mut fuzzer,
&mut executor,
&mut restarting_mgr,
&corpus_dirs,
)
.unwrap_or_else(|_| panic!("Failed to load initial corpus at {:?}", &corpus_dirs)); .unwrap_or_else(|_| panic!("Failed to load initial corpus at {:?}", &corpus_dirs));
println!("We imported {} inputs from disk.", state.corpus().count()); println!("We imported {} inputs from disk.", state.corpus().count());
} }

View File

@ -1,5 +1,6 @@
FUZZER_NAME="fuzzer_libpng" FUZZER_NAME="fuzzer_libpng"
PROJECT_DIR=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) PROJECT_DIR=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
UNAME := $(shell uname)
PHONY: all PHONY: all
@ -43,7 +44,13 @@ run: all
sleep 0.2 sleep 0.2
./$(FUZZER_NAME) >/dev/null 2>/dev/null & ./$(FUZZER_NAME) >/dev/null 2>/dev/null &
ifeq ($(UNAME), Darwin)
short_test: libafl_cc
$(warning "The libpng linking step is currently broken on MacOS! See Issue #246")
else
short_test: all short_test: all
rm -rf libafl_unix_shmem_server || true
timeout 11s ./$(FUZZER_NAME) & timeout 11s ./$(FUZZER_NAME) &
sleep 0.2 sleep 0.2
timeout 10s taskset -c 0 ./$(FUZZER_NAME) >/dev/null 2>/dev/null & timeout 10s taskset -c 0 ./$(FUZZER_NAME) >/dev/null 2>/dev/null &
@ -51,6 +58,8 @@ short_test: all
timeout 10s taskset -c 2 ./$(FUZZER_NAME) >/dev/null 2>/dev/null & timeout 10s taskset -c 2 ./$(FUZZER_NAME) >/dev/null 2>/dev/null &
timeout 10s taskset -c 3 ./$(FUZZER_NAME) >/dev/null 2>/dev/null & timeout 10s taskset -c 3 ./$(FUZZER_NAME) >/dev/null 2>/dev/null &
endif
test: all test: all
timeout 60s ./$(FUZZER_NAME) & timeout 60s ./$(FUZZER_NAME) &
sleep 0.2 sleep 0.2

View File

@ -5,8 +5,11 @@ use core::time::Duration;
use std::{env, path::PathBuf}; use std::{env, path::PathBuf};
use libafl::{ use libafl::{
bolts::tuples::{tuple_list, Merge}, bolts::{
bolts::{current_nanos, rands::StdRand}, current_nanos,
rands::StdRand,
tuples::{tuple_list, Merge},
},
corpus::{ corpus::{
Corpus, InMemoryCorpus, IndexesLenTimeMinimizerCorpusScheduler, OnDiskCorpus, Corpus, InMemoryCorpus, IndexesLenTimeMinimizerCorpusScheduler, OnDiskCorpus,
PowerQueueCorpusScheduler, PowerQueueCorpusScheduler,
@ -167,12 +170,7 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re
// In case the corpus is empty (on first run), reset // In case the corpus is empty (on first run), reset
if state.corpus().count() < 1 { if state.corpus().count() < 1 {
state state
.load_initial_inputs( .load_initial_inputs(&mut fuzzer, &mut executor, &mut restarting_mgr, corpus_dirs)
&mut fuzzer,
&mut executor,
&mut restarting_mgr,
&corpus_dirs,
)
.unwrap_or_else(|_| panic!("Failed to load initial corpus at {:?}", &corpus_dirs)); .unwrap_or_else(|_| panic!("Failed to load initial corpus at {:?}", &corpus_dirs));
println!("We imported {} inputs from disk.", state.corpus().count()); println!("We imported {} inputs from disk.", state.corpus().count());
} }

View File

@ -1,5 +1,6 @@
FUZZER_NAME="fuzzer_libpng" FUZZER_NAME="fuzzer_libpng"
PROJECT_DIR=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) PROJECT_DIR=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
UNAME := $(shell uname)
PHONY: all PHONY: all
@ -41,8 +42,16 @@ clean:
run: all run: all
./$(FUZZER_NAME) --cores 0 & ./$(FUZZER_NAME) --cores 0 &
ifeq ($(UNAME), Darwin)
short_test: libafl_cc
$(warning "The libpng linking step is currently broken on MacOS! See Issue #246")
else
short_test: all short_test: all
rm -rf libafl_unix_shmem_server || true
timeout 10s ./$(FUZZER_NAME) --cores 0 & timeout 10s ./$(FUZZER_NAME) --cores 0 &
endif
test: all test: all
timeout 60s ./$(FUZZER_NAME) --cores 0 & timeout 60s ./$(FUZZER_NAME) --cores 0 &

View File

@ -13,7 +13,7 @@ use libafl::{
launcher::Launcher, launcher::Launcher,
os::parse_core_bind_arg, os::parse_core_bind_arg,
rands::StdRand, rands::StdRand,
shmem::{ShMemProvider, StdShMemProvider, StdShMemService}, shmem::{ShMemProvider, StdShMemProvider},
tuples::{tuple_list, Merge}, tuples::{tuple_list, Merge},
}, },
corpus::{ corpus::{
@ -46,7 +46,7 @@ pub fn libafl_main() {
let broker_port = 1337; let broker_port = 1337;
let cores = parse_core_bind_arg(&matches.value_of("cores").unwrap()) let cores = parse_core_bind_arg(matches.value_of("cores").unwrap())
.expect("No valid core count given!"); .expect("No valid core count given!");
println!( println!(
@ -54,7 +54,6 @@ pub fn libafl_main() {
env::current_dir().unwrap().to_string_lossy().to_string() env::current_dir().unwrap().to_string_lossy().to_string()
); );
let _service = StdShMemService::start().expect("Failed to start ShMem service");
let shmem_provider = StdShMemProvider::new().expect("Failed to init shared memory"); let shmem_provider = StdShMemProvider::new().expect("Failed to init shared memory");
let stats = MultiStats::new(|s| println!("{}", s)); let stats = MultiStats::new(|s| println!("{}", s));

View File

@ -27,6 +27,7 @@ run: all
./$(FUZZER_NAME) >/dev/null 2>/dev/null & ./$(FUZZER_NAME) >/dev/null 2>/dev/null &
short_test: all short_test: all
rm -rf libafl_unix_shmem_server || true
timeout 11s ./$(FUZZER_NAME) & timeout 11s ./$(FUZZER_NAME) &
sleep 0.2 sleep 0.2
timeout 10s taskset -c 0 ./$(FUZZER_NAME) >/dev/null 2>/dev/null & timeout 10s taskset -c 0 ./$(FUZZER_NAME) >/dev/null 2>/dev/null &

Binary file not shown.

View File

@ -1 +1,2 @@
libpng-* libpng-*
libfuzzer_stb_image

View File

@ -27,6 +27,7 @@ run: all
./$(FUZZER_NAME) >/dev/null 2>/dev/null & ./$(FUZZER_NAME) >/dev/null 2>/dev/null &
short_test: all short_test: all
rm -rf libafl_unix_shmem_server || true
timeout 11s ./$(FUZZER_NAME) & timeout 11s ./$(FUZZER_NAME) &
sleep 0.2 sleep 0.2
timeout 10s taskset -c 0 ./$(FUZZER_NAME) >/dev/null 2>/dev/null & timeout 10s taskset -c 0 ./$(FUZZER_NAME) >/dev/null 2>/dev/null &

View File

@ -117,8 +117,6 @@ fn main() {
#[cfg(unix)] #[cfg(unix)]
fn main() { fn main() {
use libafl::bolts::shmem::StdShMemService;
/* The main node has a broker, and a few worker threads */ /* The main node has a broker, and a few worker threads */
let mode = std::env::args() let mode = std::env::args()
@ -139,9 +137,6 @@ fn main() {
match mode.as_str() { match mode.as_str() {
"broker" => { "broker" => {
// The shmem service is needed on some platforms like Android and MacOS
let _service = StdShMemService::start().unwrap();
let mut broker = llmp::LlmpBroker::new(StdShMemProvider::new().unwrap()).unwrap(); let mut broker = llmp::LlmpBroker::new(StdShMemProvider::new().unwrap()).unwrap();
broker.launch_tcp_listener_on(port).unwrap(); broker.launch_tcp_listener_on(port).unwrap();
broker.loop_forever(&mut broker_message_hook, Some(Duration::from_millis(5))) broker.loop_forever(&mut broker_message_hook, Some(Duration::from_millis(5)))

View File

@ -11,8 +11,8 @@ use crate::bolts::current_nanos;
/// ///
/// This function is a wrapper around different ways to get a timestamp, fast /// This function is a wrapper around different ways to get a timestamp, fast
/// In this way, an experiment only has to /// In this way, an experiment only has to
/// change this implementation rather than every instead of [`cpu::read_time_counter`] /// change this implementation rather than every instead of `read_time_counter`.
/// It is using [`rdtsc`] on `x86_64` and `x86`. /// It is using `rdtsc` on `x86_64` and `x86`.
#[cfg(any(target_arch = "x86_64", target_arch = "x86"))] #[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
#[must_use] #[must_use]
pub fn read_time_counter() -> u64 { pub fn read_time_counter() -> u64 {

View File

@ -57,7 +57,7 @@ where
configuration: String, configuration: String,
/// The 'main' function to run for each client forked. This probably shouldn't return /// The 'main' function to run for each client forked. This probably shouldn't return
run_client: LauncherClientFnRef<'a, I, OT, S, SP>, run_client: LauncherClientFnRef<'a, I, OT, S, SP>,
/// The broker port to use (or to attach to, in case [`Self::with_broker`] is `false`) /// The broker port to use (or to attach to, in case [`Self::spawn_broker`] is `false`)
#[builder(default = 1337_u16)] #[builder(default = 1337_u16)]
broker_port: u16, broker_port: u16,
/// The list of cores to run on /// The list of cores to run on
@ -111,6 +111,7 @@ where
println!("child spawned and bound to core {}", id); println!("child spawned and bound to core {}", id);
} }
ForkResult::Child => { ForkResult::Child => {
println!("{:?} PostFork", unsafe { libc::getpid() });
self.shmem_provider.post_fork(true)?; self.shmem_provider.post_fork(true)?;
#[cfg(feature = "std")] #[cfg(feature = "std")]

View File

@ -2659,14 +2659,12 @@ mod tests {
Tag, Tag,
}; };
use crate::bolts::shmem::{ShMemProvider, StdShMemProvider, StdShMemService}; use crate::bolts::shmem::{ShMemProvider, StdShMemProvider};
#[test] #[test]
#[serial] #[serial]
pub fn llmp_connection() { pub fn llmp_connection() {
#[allow(unused_variables)] #[allow(unused_variables)]
let service = StdShMemService::start().unwrap();
let shmem_provider = StdShMemProvider::new().unwrap(); let shmem_provider = StdShMemProvider::new().unwrap();
let mut broker = match LlmpConnection::on_port(shmem_provider.clone(), 1337).unwrap() { let mut broker = match LlmpConnection::on_port(shmem_provider.clone(), 1337).unwrap() {
IsClient { client: _ } => panic!("Could not bind to port as broker"), IsClient { client: _ } => panic!("Could not bind to port as broker"),

View File

@ -1,7 +1,7 @@
/*! /*!
On `Android`, we can only share maps between processes by serializing fds over sockets. On `Android`, we can only share maps between processes by serializing fds over sockets.
On `MacOS`, we cannot rely on reference counting for Maps. On `MacOS`, we cannot rely on reference counting for Maps.
Hence, the [`unix_shmem_server`] keeps track of existing maps, creates new maps for clients, Hence, the `unix_shmem_server` keeps track of existing maps, creates new maps for clients,
and forwards them over unix domain sockets. and forwards them over unix domain sockets.
*/ */
@ -13,8 +13,8 @@ use core::mem::ManuallyDrop;
use hashbrown::HashMap; use hashbrown::HashMap;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::{ use std::{
borrow::BorrowMut,
cell::RefCell, cell::RefCell,
fs,
io::{Read, Write}, io::{Read, Write},
marker::PhantomData, marker::PhantomData,
rc::{Rc, Weak}, rc::{Rc, Weak},
@ -22,6 +22,9 @@ use std::{
thread::JoinHandle, thread::JoinHandle,
}; };
#[cfg(any(target_os = "macos", target_os = "ios"))]
use std::fs;
#[cfg(all(feature = "std", unix))] #[cfg(all(feature = "std", unix))]
use nix::poll::{poll, PollFd, PollFlags}; use nix::poll::{poll, PollFd, PollFlags};
@ -46,13 +49,19 @@ const UNIX_SERVER_NAME: &str = "./libafl_unix_shmem_server";
/// Hands out served shared maps, as used on Android. /// Hands out served shared maps, as used on Android.
#[derive(Debug)] #[derive(Debug)]
pub struct ServedShMemProvider<SP> { pub struct ServedShMemProvider<SP>
where
SP: ShMemProvider,
{
stream: UnixStream, stream: UnixStream,
inner: SP, inner: SP,
id: i32, id: i32,
/// A referencde to the [`ShMemService`] backing this provider.
/// It will be started only once for all processes and providers.
service: ShMemService<SP>,
} }
/// [`ShMem`] that got served from a [`AshmemService`] via domain sockets and can now be used in this program. /// [`ShMem`] that got served from a [`ShMemService`] via domain sockets and can now be used in this program.
/// It works around Android's lack of "proper" shared maps. /// It works around Android's lack of "proper" shared maps.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct ServedShMem<SH> pub struct ServedShMem<SH>
@ -85,7 +94,10 @@ where
} }
} }
impl<SP> ServedShMemProvider<SP> { impl<SP> ServedShMemProvider<SP>
where
SP: ShMemProvider,
{
/// Send a request to the server, and wait for a response /// Send a request to the server, and wait for a response
#[allow(clippy::similar_names)] // id and fd #[allow(clippy::similar_names)] // id and fd
fn send_receive(&mut self, request: ServedShMemRequest) -> Result<(i32, i32), Error> { fn send_receive(&mut self, request: ServedShMemRequest) -> Result<(i32, i32), Error> {
@ -125,7 +137,9 @@ where
SP: ShMemProvider, SP: ShMemProvider,
{ {
fn clone(&self) -> Self { fn clone(&self) -> Self {
Self::new().unwrap() let mut cloned = Self::new().unwrap();
cloned.service = self.service.clone();
cloned
} }
} }
@ -136,11 +150,16 @@ where
type Mem = ServedShMem<SP::Mem>; type Mem = ServedShMem<SP::Mem>;
/// Connect to the server and return a new [`ServedShMemProvider`] /// Connect to the server and return a new [`ServedShMemProvider`]
/// Will try to spawn a [`ShMemService`]. This will only work for the first try.
fn new() -> Result<Self, Error> { fn new() -> Result<Self, Error> {
// Needed for MacOS and Android to get sharedmaps working.
let service = ShMemService::<SP>::start();
let mut res = Self { let mut res = Self {
stream: UnixStream::connect_to_unix_addr(&UnixSocketAddr::new(UNIX_SERVER_NAME)?)?, stream: UnixStream::connect_to_unix_addr(&UnixSocketAddr::new(UNIX_SERVER_NAME)?)?,
inner: SP::new()?, inner: SP::new()?,
id: -1, id: -1,
service,
}; };
let (id, _) = res.send_receive(ServedShMemRequest::Hello(None))?; let (id, _) = res.send_receive(ServedShMemRequest::Hello(None))?;
res.id = id; res.id = id;
@ -175,6 +194,10 @@ where
fn post_fork(&mut self, is_child: bool) -> Result<(), Error> { fn post_fork(&mut self, is_child: bool) -> Result<(), Error> {
if is_child { if is_child {
// After fork, only the parent keeps the join handle.
if let ShMemService::Started { bg_thread, .. } = &mut self.service {
bg_thread.borrow_mut().lock().unwrap().join_handle = None;
}
// After fork, the child needs to reconnect as to not share the fds with the parent. // After fork, the child needs to reconnect as to not share the fds with the parent.
self.stream = self.stream =
UnixStream::connect_to_unix_addr(&UnixSocketAddr::new(UNIX_SERVER_NAME)?)?; UnixStream::connect_to_unix_addr(&UnixSocketAddr::new(UNIX_SERVER_NAME)?)?;
@ -212,6 +235,7 @@ pub enum ServedShMemRequest {
Exit, Exit,
} }
/// Client side communicating with the [`ShMemServer`]
#[derive(Debug)] #[derive(Debug)]
struct SharedShMemClient<SH> struct SharedShMemClient<SH>
where where
@ -233,17 +257,7 @@ where
} }
} }
/// The [`AshmemService`] is a service handing out [`ShMem`] pages via unix domain sockets. /// Response from Server to Client
/// It is mainly used and needed on Android.
#[derive(Debug)]
pub struct ShMemService<SP>
where
SP: ShMemProvider,
{
join_handle: Option<JoinHandle<Result<(), Error>>>,
phantom: PhantomData<*const SP>,
}
#[derive(Debug)] #[derive(Debug)]
enum ServedShMemResponse<SP> enum ServedShMemResponse<SP>
where where
@ -254,61 +268,42 @@ where
RefCount(u32), RefCount(u32),
} }
impl<SP> ShMemService<SP> /// Report the status of the [`ShMem`] background thread start status
where #[derive(Clone, Copy, Debug, PartialEq, Eq)]
SP: ShMemProvider, enum ShMemServiceStatus {
{ Starting,
/// Create a new [`ShMemService`], then listen and service incoming connections in a new thread. Started,
pub fn start() -> Result<Self, Error> { Failed,
println!("Starting ShMemService");
#[allow(clippy::mutex_atomic)]
let syncpair = Arc::new((Mutex::new(false), Condvar::new()));
let childsyncpair = Arc::clone(&syncpair);
let join_handle = thread::spawn(move || {
println!("Thread...");
let mut worker = match ServedShMemServiceWorker::<SP>::new() {
Ok(worker) => worker,
Err(e) => {
// Make sure the parent processes can continue
let (lock, cvar) = &*childsyncpair;
*lock.lock().unwrap() = true;
cvar.notify_one();
println!("Error creating ShMemService: {:?}", e);
return Err(e);
}
};
if let Err(e) = worker.listen(UNIX_SERVER_NAME, &childsyncpair) {
println!("Error spawning ShMemService: {:?}", e);
Err(e)
} else {
Ok(())
}
});
let (lock, cvar) = &*syncpair;
let mut started = lock.lock().unwrap();
while !*started {
started = cvar.wait(started).unwrap();
}
Ok(Self {
join_handle: Some(join_handle),
phantom: PhantomData,
})
}
} }
impl<SP> Drop for ShMemService<SP> /// The [`ShMemService`] is a service handing out [`ShMem`] pages via unix domain sockets.
/// It is mainly used and needed on Android.
#[derive(Debug, Clone)]
pub enum ShMemService<SP>
where where
SP: ShMemProvider, SP: ShMemProvider,
{ {
Started {
bg_thread: Arc<Mutex<ShMemServiceThread>>,
phantom: PhantomData<SP>,
},
Failed {
err_msg: String,
phantom: PhantomData<SP>,
},
}
/// Wrapper for the service background thread.
/// When this is dropped, the background thread will get killed and joined.
#[derive(Debug)]
pub struct ShMemServiceThread {
join_handle: Option<JoinHandle<Result<(), Error>>>,
}
impl Drop for ShMemServiceThread {
fn drop(&mut self) { fn drop(&mut self) {
let join_handle = self.join_handle.take(); if self.join_handle.is_some() {
// TODO: Guess we could use the `cvar` // Mutex here instead? println!("Stopping ShMemService");
if let Some(join_handle) = join_handle {
let mut stream = match UnixStream::connect_to_unix_addr( let mut stream = match UnixStream::connect_to_unix_addr(
&UnixSocketAddr::new(UNIX_SERVER_NAME).unwrap(), &UnixSocketAddr::new(UNIX_SERVER_NAME).unwrap(),
) { ) {
@ -325,10 +320,80 @@ where
stream stream
.write_all(&message) .write_all(&message)
.expect("Failed to send bye-message to ShMemService"); .expect("Failed to send bye-message to ShMemService");
join_handle self.join_handle
.take()
.unwrap()
.join() .join()
.expect("Failed to join ShMemService thread!") .expect("Failed to join ShMemService thread!")
.expect("Error in ShMemService thread!"); .expect("Error in ShMemService background thread!");
// try to remove the file from fs, and ignore errors.
#[cfg(any(target_os = "macos", target_os = "ios"))]
fs::remove_file(&UNIX_SERVER_NAME).unwrap();
}
}
}
impl<SP> ShMemService<SP>
where
SP: ShMemProvider,
{
/// Create a new [`ShMemService`], then listen and service incoming connections in a new thread.
/// Returns [`ShMemService::Failed`] on error.
#[must_use]
pub fn start() -> Self {
#[allow(clippy::mutex_atomic)]
let syncpair = Arc::new((Mutex::new(ShMemServiceStatus::Starting), Condvar::new()));
let childsyncpair = Arc::clone(&syncpair);
let join_handle = thread::spawn(move || {
let mut worker = match ServedShMemServiceWorker::<SP>::new() {
Ok(worker) => worker,
Err(e) => {
// Make sure the parent processes can continue
let (lock, cvar) = &*childsyncpair;
*lock.lock().unwrap() = ShMemServiceStatus::Failed;
cvar.notify_one();
println!("Error creating ShMemService: {:?}", e);
return Err(e);
}
};
if let Err(e) = worker.listen(UNIX_SERVER_NAME, &childsyncpair) {
println!("Error spawning ShMemService: {:?}", e);
Err(e)
} else {
Ok(())
}
});
let (lock, cvar) = &*syncpair;
let mut success = lock.lock().unwrap();
while *success == ShMemServiceStatus::Starting {
success = cvar.wait(success).unwrap();
}
match *success {
ShMemServiceStatus::Starting => panic!("Unreachable"),
ShMemServiceStatus::Started => {
println!("Started ShMem Service");
// We got a service
Self::Started {
bg_thread: Arc::new(Mutex::new(ShMemServiceThread {
join_handle: Some(join_handle),
})),
phantom: PhantomData,
}
}
ShMemServiceStatus::Failed => {
// We ignore errors as multiple threads may call start.
let err = join_handle.join();
let err = err.expect("Failed to join ShMemService thread!");
let err = err.expect_err("Expected service start to have failed, but it didn't?");
Self::Failed {
err_msg: format!("{}", err),
phantom: PhantomData,
}
}
} }
} }
} }
@ -455,7 +520,7 @@ where
match response { match response {
ServedShMemResponse::Mapping(mapping) => { ServedShMemResponse::Mapping(mapping) => {
let id = mapping.borrow().id(); let id = mapping.as_ref().borrow().id();
let server_fd: i32 = id.to_string().parse().unwrap(); let server_fd: i32 = id.to_string().parse().unwrap();
let client = self.clients.get_mut(&client_id).unwrap(); let client = self.clients.get_mut(&client_id).unwrap();
client client
@ -482,21 +547,18 @@ where
fn listen( fn listen(
&mut self, &mut self,
filename: &str, filename: &str,
syncpair: &Arc<(Mutex<bool>, Condvar)>, syncpair: &Arc<(Mutex<ShMemServiceStatus>, Condvar)>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let listener = if let Ok(listener) = let listener = match UnixListener::bind_unix_addr(&UnixSocketAddr::new(filename)?) {
UnixListener::bind_unix_addr(&UnixSocketAddr::new(filename)?) Ok(listener) => listener,
{ Err(err) => {
listener let (lock, cvar) = &**syncpair;
} else { *lock.lock().unwrap() = ShMemServiceStatus::Failed;
let (lock, cvar) = &**syncpair; cvar.notify_one();
*lock.lock().unwrap() = true;
cvar.notify_one();
println!("Error in ShMem Worker"); return Err(Error::Unknown(format!(
return Err(Error::Unknown( "The ShMem server appears to already be running. We are probably a client. Error: {:?}", err)));
"The server appears to already be running. We are probably a client".to_string(), }
));
}; };
let mut poll_fds: Vec<PollFd> = vec![PollFd::new( let mut poll_fds: Vec<PollFd> = vec![PollFd::new(
@ -505,7 +567,7 @@ where
)]; )];
let (lock, cvar) = &**syncpair; let (lock, cvar) = &**syncpair;
*lock.lock().unwrap() = true; *lock.lock().unwrap() = ShMemServiceStatus::Started;
cvar.notify_one(); cvar.notify_one();
loop { loop {
@ -570,14 +632,3 @@ where
} }
} }
} }
impl<SP> Drop for ServedShMemServiceWorker<SP>
where
SP: ShMemProvider,
{
fn drop(&mut self) {
// try to remove the file from fs, and ignore errors.
#[cfg(target_os = "macos")]
drop(fs::remove_file(&UNIX_SERVER_NAME));
}
}

View File

@ -300,32 +300,35 @@ impl<T: ShMemProvider> Drop for RcShMem<T> {
/// Useful if the `ShMemProvider` needs to keep local state. /// Useful if the `ShMemProvider` needs to keep local state.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
#[cfg(all(unix, feature = "std"))] #[cfg(all(unix, feature = "std"))]
pub struct RcShMemProvider<T: ShMemProvider> { pub struct RcShMemProvider<SP>
where
SP: ShMemProvider,
{
/// The wrapped [`ShMemProvider`]. /// The wrapped [`ShMemProvider`].
internal: Rc<RefCell<T>>, internal: Rc<RefCell<SP>>,
/// A pipe the child uses to communicate progress to the parent after fork. /// A pipe the child uses to communicate progress to the parent after fork.
/// This prevents a potential race condition when using the [`AshmemService`]. /// This prevents a potential race condition when using the [`ShMemService`].
#[cfg(unix)] #[cfg(unix)]
child_parent_pipe: Option<Pipe>, child_parent_pipe: Option<Pipe>,
#[cfg(unix)] #[cfg(unix)]
/// A pipe the parent uses to communicate progress to the child after fork. /// A pipe the parent uses to communicate progress to the child after fork.
/// This prevents a potential race condition when using the [`AshmemService`]. /// This prevents a potential race condition when using the [`ShMemService`].
parent_child_pipe: Option<Pipe>, parent_child_pipe: Option<Pipe>,
} }
#[cfg(all(unix, feature = "std"))] #[cfg(all(unix, feature = "std"))]
unsafe impl<T: ShMemProvider> Send for RcShMemProvider<T> {} unsafe impl<SP: ShMemProvider> Send for RcShMemProvider<SP> {}
#[cfg(all(unix, feature = "std"))] #[cfg(all(unix, feature = "std"))]
impl<T> ShMemProvider for RcShMemProvider<T> impl<SP> ShMemProvider for RcShMemProvider<SP>
where where
T: ShMemProvider + alloc::fmt::Debug, SP: ShMemProvider + alloc::fmt::Debug,
{ {
type Mem = RcShMem<T>; type Mem = RcShMem<SP>;
fn new() -> Result<Self, Error> { fn new() -> Result<Self, Error> {
Ok(Self { Ok(Self {
internal: Rc::new(RefCell::new(T::new()?)), internal: Rc::new(RefCell::new(SP::new()?)),
child_parent_pipe: None, child_parent_pipe: None,
parent_child_pipe: None, parent_child_pipe: None,
}) })
@ -387,9 +390,9 @@ where
} }
#[cfg(all(unix, feature = "std"))] #[cfg(all(unix, feature = "std"))]
impl<T> RcShMemProvider<T> impl<SP> RcShMemProvider<SP>
where where
T: ShMemProvider, SP: ShMemProvider,
{ {
/// "set" the "latch" /// "set" the "latch"
/// (we abuse `pipes` as `semaphores`, as they don't need an additional shared mem region.) /// (we abuse `pipes` as `semaphores`, as they don't need an additional shared mem region.)
@ -450,9 +453,9 @@ where
} }
#[cfg(all(unix, feature = "std"))] #[cfg(all(unix, feature = "std"))]
impl<T> Default for RcShMemProvider<T> impl<SP> Default for RcShMemProvider<SP>
where where
T: ShMemProvider + alloc::fmt::Debug, SP: ShMemProvider + alloc::fmt::Debug,
{ {
fn default() -> Self { fn default() -> Self {
Self::new().unwrap() Self::new().unwrap()
@ -462,7 +465,7 @@ where
/// A Unix sharedmem implementation. /// A Unix sharedmem implementation.
/// ///
/// On Android, this is partially reused to wrap [`unix_shmem::ashmem::AshmemShMem`], /// On Android, this is partially reused to wrap [`unix_shmem::ashmem::AshmemShMem`],
/// Although for an [`unix_shmem::ashmem::ServedShMemProvider`] using a unix domain socket /// Although for an [`ServedShMemProvider`] using a unix domain socket
/// Is needed on top. /// Is needed on top.
#[cfg(all(unix, feature = "std"))] #[cfg(all(unix, feature = "std"))]
pub mod unix_shmem { pub mod unix_shmem {
@ -672,7 +675,7 @@ pub mod unix_shmem {
} }
} }
/// Implement [`ShMemProvider`] for [`UnixShMemProvider`]. /// Implement [`ShMemProvider`] for [`MmapShMemProvider`].
#[cfg(unix)] #[cfg(unix)]
impl ShMemProvider for MmapShMemProvider { impl ShMemProvider for MmapShMemProvider {
type Mem = MmapShMem; type Mem = MmapShMem;
@ -1337,13 +1340,11 @@ impl<T: ShMem> std::io::Seek for ShMemCursor<T> {
mod tests { mod tests {
use serial_test::serial; use serial_test::serial;
use crate::bolts::shmem::{ShMem, ShMemProvider, StdShMemProvider, StdShMemService}; use crate::bolts::shmem::{ShMem, ShMemProvider, StdShMemProvider};
#[test] #[test]
#[serial] #[serial]
fn test_shmem_service() { fn test_shmem_service() {
#[allow(unused_variables)]
let service = StdShMemService::start().unwrap();
let mut provider = StdShMemProvider::new().unwrap(); let mut provider = StdShMemProvider::new().unwrap();
let mut map = provider.new_map(1024).unwrap(); let mut map = provider.new_map(1024).unwrap();
map.map_mut()[0] = 1; map.map_mut()[0] = 1;

View File

@ -77,7 +77,7 @@ where
self.shmem.write_to_env(env_name) self.shmem.write_to_env(env_name)
} }
/// Create a [`StateRrestore`] from `env` variable name /// Create a [`StateRestorer`] from `env` variable name
pub fn from_env(shmem_provider: &mut SP, env_name: &str) -> Result<Self, Error> { pub fn from_env(shmem_provider: &mut SP, env_name: &str) -> Result<Self, Error> {
Ok(Self { Ok(Self {
shmem: shmem_provider.existing_from_env(env_name)?, shmem: shmem_provider.existing_from_env(env_name)?,
@ -242,7 +242,7 @@ mod tests {
use serial_test::serial; use serial_test::serial;
use crate::bolts::{ use crate::bolts::{
shmem::{ShMemProvider, StdShMemProvider, StdShMemService}, shmem::{ShMemProvider, StdShMemProvider},
staterestore::StateRestorer, staterestore::StateRestorer,
}; };
@ -251,8 +251,6 @@ mod tests {
fn test_state_restore() { fn test_state_restore() {
const TESTMAP_SIZE: usize = 1024; const TESTMAP_SIZE: usize = 1024;
let _service = StdShMemService::start().unwrap();
let mut shmem_provider = StdShMemProvider::new().unwrap(); let mut shmem_provider = StdShMemProvider::new().unwrap();
let shmem = shmem_provider.new_map(TESTMAP_SIZE).unwrap(); let shmem = shmem_provider.new_map(TESTMAP_SIZE).unwrap();
let mut state_restorer = StateRestorer::<StdShMemProvider>::new(shmem); let mut state_restorer = StateRestorer::<StdShMemProvider>::new(shmem);

View File

@ -394,8 +394,10 @@ where
/// Iterate over a tuple, executing the given `expr` for each element. /// Iterate over a tuple, executing the given `expr` for each element.
#[macro_export] #[macro_export]
#[allow(clippy::items_after_statements)]
macro_rules! tuple_for_each { macro_rules! tuple_for_each {
($fn_name:ident, $trait_name:path, $tuple_name:ident, $body:expr) => { ($fn_name:ident, $trait_name:path, $tuple_name:ident, $body:expr) => {
#[allow(clippy::items_after_statements)]
mod $fn_name { mod $fn_name {
pub trait ForEach { pub trait ForEach {
fn for_each(&self); fn for_each(&self);
@ -410,6 +412,7 @@ macro_rules! tuple_for_each {
Head: $trait_name, Head: $trait_name,
Tail: tuple_list::TupleList + ForEach, Tail: tuple_list::TupleList + ForEach,
{ {
#[allow(clippy::redundant_closure_call)]
fn for_each(&self) { fn for_each(&self) {
($body)(&self.0); ($body)(&self.0);
self.1.for_each(); self.1.for_each();
@ -428,6 +431,7 @@ macro_rules! tuple_for_each {
#[macro_export] #[macro_export]
macro_rules! tuple_for_each_mut { macro_rules! tuple_for_each_mut {
($fn_name:ident, $trait_name:path, $tuple_name:ident, $body:expr) => { ($fn_name:ident, $trait_name:path, $tuple_name:ident, $body:expr) => {
#[allow(clippy::items_after_statements)]
mod $fn_name { mod $fn_name {
pub trait ForEachMut { pub trait ForEachMut {
fn for_each_mut(&mut self); fn for_each_mut(&mut self);
@ -442,6 +446,7 @@ macro_rules! tuple_for_each_mut {
Head: $trait_name, Head: $trait_name,
Tail: tuple_list::TupleList + ForEachMut, Tail: tuple_list::TupleList + ForEachMut,
{ {
#[allow(clippy::redundant_closure_call)]
fn for_each_mut(&mut self) { fn for_each_mut(&mut self) {
($body)(&mut self.0); ($body)(&mut self.0);
self.1.for_each_mut(); self.1.for_each_mut();
@ -456,21 +461,21 @@ macro_rules! tuple_for_each_mut {
}; };
} }
/* #[cfg(test)]
#[cfg(feature = "std")]
#[test]
#[allow(clippy::items_after_statements)]
pub fn test_macros() { pub fn test_macros() {
let mut t = tuple_list!(1, "a");
let mut t = tuple_list!(1, "a"); tuple_for_each!(f1, std::fmt::Display, t, |x| {
println!("{}", x);
tuple_for_each!(f1, std::fmt::Display, t, |x| { });
println!("{}", x);
});
tuple_for_each_mut!(f2, std::fmt::Display, t, |x| {
println!("{}", x);
});
tuple_for_each_mut!(f2, std::fmt::Display, t, |x| {
println!("{}", x);
});
} }
*/
/* /*

View File

@ -15,7 +15,7 @@ use std::net::{SocketAddr, ToSocketAddrs};
#[cfg(feature = "std")] #[cfg(feature = "std")]
use crate::bolts::{ use crate::bolts::{
llmp::{LlmpClient, LlmpConnection}, llmp::{LlmpClient, LlmpConnection},
shmem::{StdShMemProvider, StdShMemService}, shmem::StdShMemProvider,
staterestore::StateRestorer, staterestore::StateRestorer,
}; };
@ -662,7 +662,7 @@ pub enum ManagerKind {
Any, Any,
/// A client, getting messages from a local broker. /// A client, getting messages from a local broker.
Client { cpu_core: Option<CoreId> }, Client { cpu_core: Option<CoreId> },
/// A [`LlmpBroker`], forwarding the packets of local clients. /// A [`llmp::LlmpBroker`], forwarding the packets of local clients.
Broker, Broker,
} }
@ -689,8 +689,6 @@ where
OT: ObserversTuple<I, S> + serde::de::DeserializeOwned, OT: ObserversTuple<I, S> + serde::de::DeserializeOwned,
S: DeserializeOwned, S: DeserializeOwned,
{ {
let _service = StdShMemService::start().expect("Error starting ShMem Service");
RestartingMgr::builder() RestartingMgr::builder()
.shmem_provider(StdShMemProvider::new()?) .shmem_provider(StdShMemProvider::new()?)
.stats(Some(stats)) .stats(Some(stats))
@ -934,7 +932,7 @@ mod tests {
bolts::{ bolts::{
llmp::{LlmpClient, LlmpSharedMap}, llmp::{LlmpClient, LlmpSharedMap},
rands::StdRand, rands::StdRand,
shmem::{ShMemProvider, StdShMemProvider, StdShMemService}, shmem::{ShMemProvider, StdShMemProvider},
staterestore::StateRestorer, staterestore::StateRestorer,
tuples::tuple_list, tuples::tuple_list,
}, },
@ -952,8 +950,6 @@ mod tests {
#[test] #[test]
#[serial] #[serial]
fn test_mgr_state_restore() { fn test_mgr_state_restore() {
let _service = StdShMemService::start().unwrap();
let rand = StdRand::with_seed(0); let rand = StdRand::with_seed(0);
let mut corpus = InMemoryCorpus::<BytesInput>::new(); let mut corpus = InMemoryCorpus::<BytesInput>::new();

View File

@ -215,7 +215,7 @@ where
/// this serializes the [`Event`] and commits it to the [`llmp`] page. /// this serializes the [`Event`] and commits it to the [`llmp`] page.
/// In this case, if you `fire` faster than the broker can consume /// In this case, if you `fire` faster than the broker can consume
/// (for example for each [`Input`], on multiple cores) /// (for example for each [`Input`], on multiple cores)
/// the [`llmp`] [`ShMem`] may fill up and the client will eventually OOM or [`panic`]. /// the [`llmp`] shared map may fill up and the client will eventually OOM or [`panic`].
/// This should not happen for a normal use-cases. /// This should not happen for a normal use-cases.
fn fire(&mut self, state: &mut S, event: Event<I>) -> Result<(), Error>; fn fire(&mut self, state: &mut S, event: Event<I>) -> Result<(), Error>;
@ -267,7 +267,7 @@ pub trait HasEventManagerId {
} }
/// [`EventManager`] is the main communications hub. /// [`EventManager`] is the main communications hub.
/// For the "normal" multi-processed mode, you may want to look into [`RestartingEventManager`] /// For the "normal" multi-processed mode, you may want to look into [`LlmpRestartingEventManager`]
pub trait EventManager<E, I, S, Z>: pub trait EventManager<E, I, S, Z>:
EventFirer<I, S> + EventProcessor<E, I, S, Z> + EventRestarter<S> + HasEventManagerId EventFirer<I, S> + EventProcessor<E, I, S, Z> + EventRestarter<S> + HasEventManagerId
where where

View File

@ -209,7 +209,7 @@ where
} }
} }
/// Provides a `builder` which can be used to build a [`RestartingMgr`], which is a combination of a /// Provides a `builder` which can be used to build a [`SimpleRestartingEventManager`], which is a combination of a
/// `restarter` and `runner`, that can be used on systems both with and without `fork` support. The /// `restarter` and `runner`, that can be used on systems both with and without `fork` support. The
/// `restarter` will start a new process each time the child crashes or times out. /// `restarter` will start a new process each time the child crashes or times out.
#[cfg(feature = "std")] #[cfg(feature = "std")]

View File

@ -618,7 +618,7 @@ mod tests {
use crate::{ use crate::{
bolts::{ bolts::{
shmem::{ShMem, ShMemProvider, StdShMemProvider, StdShMemService}, shmem::{ShMem, ShMemProvider, StdShMemProvider},
tuples::tuple_list, tuples::tuple_list,
}, },
executors::ForkserverExecutor, executors::ForkserverExecutor,
@ -633,8 +633,6 @@ mod tests {
let bin = "echo"; let bin = "echo";
let args = vec![String::from("@@")]; let args = vec![String::from("@@")];
let _service = StdShMemService::start().unwrap();
let mut shmem = StdShMemProvider::new() let mut shmem = StdShMemProvider::new()
.unwrap() .unwrap()
.new_map(MAP_SIZE as usize) .new_map(MAP_SIZE as usize)

View File

@ -4,6 +4,7 @@ Welcome to `LibAFL`
#![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(feature = "RUSTC_IS_NIGHTLY", feature(specialization))] #![cfg_attr(feature = "RUSTC_IS_NIGHTLY", feature(specialization))]
#![deny(rustdoc::broken_intra_doc_links)]
#[macro_use] #[macro_use]
extern crate alloc; extern crate alloc;

View File

@ -108,7 +108,7 @@ where
/// Get the `CmpMap` (mut) /// Get the `CmpMap` (mut)
fn map_mut(&mut self) -> &mut CM; fn map_mut(&mut self) -> &mut CM;
/// Add [`CmpValuesMetadata`] to the State including the logged values. /// Add [`struct@CmpValuesMetadata`] to the State including the logged values.
/// This routine does a basic loop filtering because loop index cmps are not interesting. /// This routine does a basic loop filtering because loop index cmps are not interesting.
fn add_cmpvalues_meta(&mut self, state: &mut S) fn add_cmpvalues_meta(&mut self, state: &mut S)
where where

View File

@ -469,7 +469,7 @@ impl<T: ShMem> MessageFileWriter<ShMemCursor<T>> {
} }
impl MessageFileWriter<ShMemCursor<<StdShMemProvider as ShMemProvider>::Mem>> { impl MessageFileWriter<ShMemCursor<<StdShMemProvider as ShMemProvider>::Mem>> {
/// Creates a new `MessageFileWriter` by reading a [`StdShMem`] from the given environment variable. /// Creates a new `MessageFileWriter` by reading a [`ShMem`] from the given environment variable.
pub fn from_stdshmem_env_with_name(env_name: impl AsRef<str>) -> io::Result<Self> { pub fn from_stdshmem_env_with_name(env_name: impl AsRef<str>) -> io::Result<Self> {
Self::from_shmem( Self::from_shmem(
StdShMemProvider::new() StdShMemProvider::new()
@ -479,7 +479,7 @@ impl MessageFileWriter<ShMemCursor<<StdShMemProvider as ShMemProvider>::Mem>> {
) )
} }
/// Creates a new `MessageFileWriter` by reading a [`StdShMem`] using [`DEFAULT_ENV_NAME`]. /// Creates a new `MessageFileWriter` by reading a [`ShMem`] using [`DEFAULT_ENV_NAME`].
pub fn from_stdshmem_default_env() -> io::Result<Self> { pub fn from_stdshmem_default_env() -> io::Result<Self> {
Self::from_stdshmem_env_with_name(DEFAULT_ENV_NAME) Self::from_stdshmem_env_with_name(DEFAULT_ENV_NAME)
} }

View File

@ -88,7 +88,7 @@ use crate::{
start_timer, Evaluator, start_timer, Evaluator,
}; };
#[cfg(feature = "introspection")] #[cfg(feature = "concolic_mutation")]
use crate::stats::PerfFeature; use crate::stats::PerfFeature;
#[cfg(feature = "concolic_mutation")] #[cfg(feature = "concolic_mutation")]
@ -341,7 +341,7 @@ fn generate_mutations(iter: impl Iterator<Item = (SymExprRef, SymExpr)>) -> Vec<
res res
} }
/// A mutational stage that uses Z3 to solve concolic constraints attached to the [`Testcase`] by the [`ConcolicTracingStage`]. /// A mutational stage that uses Z3 to solve concolic constraints attached to the [`crate::corpus::Testcase`] by the [`ConcolicTracingStage`].
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct SimpleConcolicMutationalStage<C, EM, I, S, Z> pub struct SimpleConcolicMutationalStage<C, EM, I, S, Z>
where where

View File

@ -354,10 +354,10 @@ macro_rules! mark_feedback_time {
/// Client performance statistics /// Client performance statistics
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ClientPerfStats { pub struct ClientPerfStats {
/// Starting counter (in clock cycles from [`cpu::read_time_counter`]) /// Starting counter (in clock cycles from `read_time_counter`)
start_time: u64, start_time: u64,
/// Current counter in the fuzzer (in clock cycles from [`cpu::read_time_counter`] /// Current counter in the fuzzer (in clock cycles from `read_time_counter`
current_time: u64, current_time: u64,
/// Clock cycles spent in the scheduler /// Clock cycles spent in the scheduler
@ -399,22 +399,22 @@ pub enum PerfFeature {
/// Actual time spent executing the target /// Actual time spent executing the target
TargetExecution = 3, TargetExecution = 3,
/// Time spent in the [`pre_exec`](crate::executors::Executor::pre_exec) callback /// Time spent in `pre_exec`
PreExec = 4, PreExec = 4,
/// Time spent in the [`post_exec`](crate::executors::Executor::post_exec) callback /// Time spent in `post_exec`
PostExec = 5, PostExec = 5,
/// Time spent in the [`pre_exec_observers`](crate::executors::Executor::pre_exec_observers) callback /// Time spent in `observer` `pre_exec_all`
PreExecObservers = 6, PreExecObservers = 6,
/// Time spent in the [`post_exec_observers`](crate::executors::Executor::post_exec_observers) callback /// Time spent in `executor.observers_mut().post_exec_all`
PostExecObservers = 7, PostExecObservers = 7,
/// Time spent getting the feedback from [`is_interesting`] from all feedbacks /// Time spent getting the feedback from `is_interesting` from all feedbacks
GetFeedbackInterestingAll = 8, GetFeedbackInterestingAll = 8,
/// Time spent getting the feedback from [`is_interesting`] from all objectives /// Time spent getting the feedback from `is_interesting` from all objectives
GetObjectivesInterestingAll = 9, GetObjectivesInterestingAll = 9,
/// Used as a counter to know how many elements are in [`PerfFeature`]. Must be the /// Used as a counter to know how many elements are in [`PerfFeature`]. Must be the

View File

@ -15,4 +15,7 @@ edition = "2018"
[build-dependencies] [build-dependencies]
cc = { version = "1.0", features = ["parallel"] } cc = { version = "1.0", features = ["parallel"] }
[target.'cfg(target_os = "macos")'.build-dependencies]
glob = "0.3"
[dependencies] [dependencies]

View File

@ -1,5 +1,11 @@
use std::{env, fs::File, io::Write, path::Path, process::Command, str}; use std::{env, fs::File, io::Write, path::Path, process::Command, str};
#[cfg(target_os = "macos")]
use glob::glob;
#[cfg(target_os = "macos")]
use std::path::PathBuf;
fn dll_extension<'a>() -> &'a str { fn dll_extension<'a>() -> &'a str {
match env::var("CARGO_CFG_TARGET_OS").unwrap().as_str() { match env::var("CARGO_CFG_TARGET_OS").unwrap().as_str() {
"windwos" => "dll", "windwos" => "dll",
@ -8,6 +14,49 @@ fn dll_extension<'a>() -> &'a str {
} }
} }
/// Github Actions for `MacOS` seems to have troubles finding `llvm-config`.
/// Hence, we go look for it ourselves.
#[cfg(target_os = "macos")]
fn find_llvm_config_brew() -> Result<PathBuf, String> {
match Command::new("brew").arg("--cellar").output() {
Ok(output) => {
let brew_cellar_location = str::from_utf8(&output.stdout).unwrap_or_default().trim();
if brew_cellar_location.is_empty() {
return Err("Empty return from brew --cellar".to_string());
}
let cellar_glob = format!("{}/llvm/*/bin/llvm-config", brew_cellar_location);
let glob_results = glob(&cellar_glob).unwrap_or_else(|err| {
panic!("Could not read glob path {} ({})", &cellar_glob, err);
});
match glob_results.last() {
Some(path) => Ok(path.unwrap()),
None => Err(format!(
"No llvm-config found in brew cellar with pattern {}",
cellar_glob
)),
}
}
Err(err) => Err(format!("Could not execute brew --cellar: {:?}", err)),
}
}
fn find_llvm_config() -> String {
env::var("LLVM_CONFIG").unwrap_or_else(|_| {
// for Ghithub Actions, we check if we find llvm-config in brew.
#[cfg(target_os = "macos")]
match find_llvm_config_brew() {
Ok(llvm_dir) => llvm_dir.to_str().unwrap().to_string(),
Err(err) => {
println!("cargo:warning={}", err);
// falling back to system llvm-config
"llvm-config".to_string()
}
}
#[cfg(not(target_os = "macos"))]
"llvm-config".to_string()
})
}
fn main() { fn main() {
let out_dir = env::var_os("OUT_DIR").unwrap(); let out_dir = env::var_os("OUT_DIR").unwrap();
let out_dir = Path::new(&out_dir); let out_dir = Path::new(&out_dir);
@ -16,7 +65,8 @@ fn main() {
let dest_path = Path::new(&out_dir).join("clang_constants.rs"); let dest_path = Path::new(&out_dir).join("clang_constants.rs");
let mut clang_constants_file = File::create(&dest_path).expect("Could not create file"); let mut clang_constants_file = File::create(&dest_path).expect("Could not create file");
let llvm_config = env::var("LLVM_CONFIG").unwrap_or_else(|_| "llvm-config".into()); let llvm_config = find_llvm_config();
if let Ok(output) = Command::new(&llvm_config).args(&["--bindir"]).output() { if let Ok(output) = Command::new(&llvm_config).args(&["--bindir"]).output() {
let llvm_bindir = Path::new( let llvm_bindir = Path::new(
str::from_utf8(&output.stdout) str::from_utf8(&output.stdout)
@ -28,9 +78,9 @@ fn main() {
&mut clang_constants_file, &mut clang_constants_file,
"// These constants are autogenerated by build.rs "// These constants are autogenerated by build.rs
pub const CLANG_PATH: &str = {:?}; pub const CLANG_PATH: &str = {:?};
pub const CLANGXX_PATH: &str = {:?}; pub const CLANGXX_PATH: &str = {:?};
", ",
llvm_bindir.join("clang"), llvm_bindir.join("clang"),
llvm_bindir.join("clang++") llvm_bindir.join("clang++")
) )

View File

@ -106,7 +106,7 @@ impl CompilerWrapper for ClangWrapper {
"-m64" => self.bit_mode = 64, "-m64" => self.bit_mode = 64,
"-c" | "-S" | "-E" => linking = false, "-c" | "-S" | "-E" => linking = false,
"-shared" => linking = false, // TODO dynamic list? "-shared" => linking = false, // TODO dynamic list?
"-Wl,-z,defs" | "-Wl,--no-undefined" => continue, "-Wl,-z,defs" | "-Wl,--no-undefined" | "--no-undefined" => continue,
_ => (), _ => (),
}; };
new_args.push(arg.as_ref().to_string()); new_args.push(arg.as_ref().to_string());
@ -129,6 +129,12 @@ impl CompilerWrapper for ClangWrapper {
new_args.push("-lBcrypt".into()); new_args.push("-lBcrypt".into());
new_args.push("-lAdvapi32".into()); new_args.push("-lAdvapi32".into());
} }
// MacOS has odd linker behavior sometimes
#[cfg(any(target_os = "macos", target_os = "ios"))]
if linking {
new_args.push("-undefined".into());
new_args.push("dynamic_lookup".into());
}
self.base_args = new_args; self.base_args = new_args;
Ok(self) Ok(self)

View File

@ -26,6 +26,7 @@ use libafl::{
stages::StdMutationalStage, stages::StdMutationalStage,
state::{HasCorpus, HasMetadata, StdState}, state::{HasCorpus, HasMetadata, StdState},
stats::MultiStats, stats::MultiStats,
Error,
}; };
use libafl_targets::{EDGES_MAP, MAX_EDGES_NUM}; use libafl_targets::{EDGES_MAP, MAX_EDGES_NUM};
@ -235,6 +236,10 @@ where
.remote_broker_addr(self.remote_broker_addr); .remote_broker_addr(self.remote_broker_addr);
#[cfg(unix)] #[cfg(unix)]
let launcher = launcher.stdout_file(Some("/dev/null")); let launcher = launcher.stdout_file(Some("/dev/null"));
launcher.build().launch().expect("Launcher failed"); match launcher.build().launch() {
Ok(()) => (),
Err(Error::ShuttingDown) => println!("\nFuzzing stopped by user. Good Bye."),
Err(err) => panic!("Fuzzingg failed {:?}", err),
}
} }
} }

View File

@ -7,7 +7,7 @@ use libafl::{
current_nanos, current_nanos,
launcher::Launcher, launcher::Launcher,
rands::StdRand, rands::StdRand,
shmem::{ShMemProvider, StdShMemProvider, StdShMemService}, shmem::{ShMemProvider, StdShMemProvider},
tuples::{tuple_list, Merge}, tuples::{tuple_list, Merge},
}, },
corpus::{ corpus::{

View File

@ -21,7 +21,7 @@ do
echo "[+] Skipping fmt and clippy for $fuzzer (--no-fmt specified)" echo "[+] Skipping fmt and clippy for $fuzzer (--no-fmt specified)"
fi fi
if [ -e ./Makefile ] && [ "$(uname)" == "Linux" ]; then if [ -e ./Makefile ]; then
echo "[*] Testing $fuzzer" echo "[*] Testing $fuzzer"
make short_test || exit 1 make short_test || exit 1
echo "[+] Done testing $fuzzer" echo "[+] Done testing $fuzzer"