Bridge grammartec from Nautilus to libafl (#342)

* nautilus dep

* nautilus generation

* fix mutator

* start new mutator for nautilus

* other mutators

* baby

* ci

* NautilusFeedback

* fix unparse

* ci

* ci

* ci

* ci

* nigghtly clippy

* ci

* fix

* ci

* ci

* update construct automatata

* fix

* ci

* clippy

* clippy

* nightly clippy

* more clippy

* minor clippy

Co-authored-by: Dominik Maier <domenukk@gmail.com>
This commit is contained in:
Andrea Fioraldi 2021-11-06 02:21:53 +01:00 committed by GitHub
parent ea820a1694
commit b4e15fe9f3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
49 changed files with 2046 additions and 155 deletions

View File

@ -19,7 +19,7 @@ jobs:
- uses: actions-rs/toolchain@v1 - uses: actions-rs/toolchain@v1
with: with:
profile: minimal profile: minimal
toolchain: stable toolchain: nightly
- uses: Swatinem/rust-cache@v1 - uses: Swatinem/rust-cache@v1
- name: install mdbook - name: install mdbook
run: cargo install mdbook run: cargo install mdbook
@ -52,6 +52,8 @@ jobs:
run: command -v llvm-config && clang -v run: command -v llvm-config && clang -v
- name: Install cargo-hack - name: Install cargo-hack
run: curl -LsSf https://github.com/taiki-e/cargo-hack/releases/latest/download/cargo-hack-x86_64-unknown-linux-gnu.tar.gz | tar xzf - -C ~/.cargo/bin run: curl -LsSf https://github.com/taiki-e/cargo-hack/releases/latest/download/cargo-hack-x86_64-unknown-linux-gnu.tar.gz | tar xzf - -C ~/.cargo/bin
- name: Add nightly rustfmt and clippy
run: rustup toolchain install nightly --component rustfmt --component clippy --allow-downgrade
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: Run a normal build - name: Run a normal build
run: cargo build --verbose run: cargo build --verbose
@ -61,7 +63,7 @@ jobs:
# cargo-hack's --feature-powerset would be nice here but libafl has a too many knobs # cargo-hack's --feature-powerset would be nice here but libafl has a too many knobs
- name: Check each feature - name: Check each feature
# Skipping python as it has to be built with the `maturin` tool # Skipping python as it has to be built with the `maturin` tool
run: cargo hack check --feature-powerset --depth=2 --exclude-features=python,sancov_pcguard_edges,sancov_pcguard_edges_ptr --no-dev-deps run: cargo hack check --feature-powerset --depth=2 --exclude-features=agpl,nautilus,python,sancov_pcguard_edges,sancov_pcguard_edges_ptr --no-dev-deps
# pcguard edges and pcguard hitcounts are not compatible and we need to build them seperately # pcguard edges and pcguard hitcounts are not compatible and we need to build them seperately
- name: Check pcguard edges - name: Check pcguard edges
run: cargo check --features=sancov_pcguard_edges,sancov_pcguard_edges_ptr run: cargo check --features=sancov_pcguard_edges,sancov_pcguard_edges_ptr
@ -74,7 +76,7 @@ jobs:
- name: Build Docs - name: Build Docs
run: cargo doc run: cargo doc
- name: Test Docs - name: Test Docs
run: cargo test --all-features --doc run: cargo +nightly test --doc --all-features
- name: Run clippy - name: Run clippy
run: ./scripts/clippy.sh run: ./scripts/clippy.sh
ubuntu-concolic: ubuntu-concolic:
@ -153,6 +155,8 @@ jobs:
profile: minimal profile: minimal
toolchain: stable toolchain: stable
- uses: Swatinem/rust-cache@v1 - uses: Swatinem/rust-cache@v1
- name: Add nightly rustfmt and clippy
run: rustup toolchain install nightly --component rustfmt --component clippy --allow-downgrade
- name: Install deps - name: Install deps
run: brew install z3 run: brew install z3
- uses: actions/checkout@v2 - uses: actions/checkout@v2

View File

@ -18,6 +18,7 @@ members = [
"libafl_concolic/test/dump_constraints", "libafl_concolic/test/dump_constraints",
"libafl_concolic/test/runtime_test", "libafl_concolic/test/runtime_test",
"utils/deexit", "utils/deexit",
"utils/gramatron/construct_automata",
] ]
default-members = [ default-members = [
"libafl", "libafl",

View File

@ -104,3 +104,10 @@ for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
be dual licensed as above, without any additional terms or conditions. be dual licensed as above, without any additional terms or conditions.
</sub> </sub>
<br>
<sub>
Dependencies under more restrictive licenses, such as GPL or AGPL, can be enabled
using the respective feature in each crate when it is present, such as the
'agpl' feature of the libafl crate.
</sub>

View File

@ -113,21 +113,31 @@ pub fn main() {
/* /*
use libafl::generators::Generator; use libafl::generators::Generator;
use std::collections::HashSet; use std::collections::HashSet;
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
fn calculate_hash<T: Hash>(t: &T) -> u64 {
let mut s = DefaultHasher::new();
t.hash(&mut s);
s.finish()
}
let mut set = HashSet::new(); let mut set = HashSet::new();
let st = libafl::bolts::current_milliseconds(); let st = libafl::bolts::current_milliseconds();
let mut b = vec![]; let mut b = vec![];
let mut c = 0; let mut c = 0;
for _ in 0..100000000 { for _ in 0..100000 {
let i = generator.generate(&mut state).unwrap(); let i = generator.generate(&mut state).unwrap();
i.unparse(&mut b); i.unparse(&mut b);
set.insert(b.clone()); set.insert(calculate_hash(&b));
c += b.len(); c += b.len();
} }
println!("{} / {}", c, libafl::bolts::current_milliseconds() - st); println!("{} / {}", c, libafl::bolts::current_milliseconds() - st);
println!("{} / 100000000", set.len()); println!("{} / 100000", set.len());
return; return;
*/ */
// Generate 8 initial inputs // Generate 8 initial inputs
state state
.generate_initial_inputs_forced(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8) .generate_initial_inputs_forced(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8)

View File

@ -0,0 +1 @@
libpng-*

View File

@ -0,0 +1,22 @@
[package]
name = "baby_fuzzer"
version = "0.6.0"
authors = ["Andrea Fioraldi <andreafioraldi@gmail.com>", "Dominik Maier <domenukk@gmail.com>"]
edition = "2018"
[features]
default = ["std"]
std = []
[profile.dev]
panic = "abort"
[profile.release]
panic = "abort"
lto = true
codegen-units = 1
opt-level = 3
debug = true
[dependencies]
libafl = { path = "../../libafl/", features = ["default", "nautilus"] }

View File

@ -0,0 +1,8 @@
# Baby fuzzer
This is a minimalistic example about how to create a libafl based fuzzer.
It runs on a single core until a crash occurs and then exits.
The tested program is a simple Rust function without any instrumentation.
For real fuzzing, you will want to add some sort to add coverage or other feedback.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1 @@
nightly

View File

@ -0,0 +1,163 @@
use std::path::PathBuf;
#[cfg(windows)]
use std::ptr::write_volatile;
use libafl::{
bolts::{current_nanos, rands::StdRand, tuples::tuple_list},
corpus::{InMemoryCorpus, OnDiskCorpus, QueueCorpusScheduler},
events::SimpleEventManager,
executors::{inprocess::InProcessExecutor, ExitKind},
feedback_or,
feedbacks::{
CrashFeedback, MapFeedbackState, MaxMapFeedback, NautilusChunksMetadata, NautilusFeedback,
},
fuzzer::{Fuzzer, StdFuzzer},
generators::{NautilusContext, NautilusGenerator},
inputs::NautilusInput,
mutators::{
NautilusRandomMutator, NautilusRecursionMutator, NautilusSpliceMutator, StdScheduledMutator,
},
observers::StdMapObserver,
stages::mutational::StdMutationalStage,
state::{HasMetadata, StdState},
stats::SimpleStats,
};
/// Coverage map with explicit assignments due to the lack of instrumentation
static mut SIGNALS: [u8; 16] = [0; 16];
/*
/// Assign a signal to the signals map
fn signals_set(idx: usize) {
unsafe { SIGNALS[idx] = 1 };
}
*/
#[allow(clippy::similar_names)]
pub fn main() {
let context = NautilusContext::from_file(15, "grammar.json");
let mut bytes = vec![];
// The closure that we want to fuzz
let mut harness = |input: &NautilusInput| {
input.unparse(&context, &mut bytes);
unsafe {
println!(">>> {}", std::str::from_utf8_unchecked(&bytes));
}
ExitKind::Ok
};
// Create an observation channel using the signals map
let observer = StdMapObserver::new("signals", unsafe { &mut SIGNALS });
// The state of the edges feedback.
let feedback_state = MapFeedbackState::with_observer(&observer);
// Feedback to rate the interestingness of an input
let feedback = feedback_or!(
MaxMapFeedback::new(&feedback_state, &observer),
NautilusFeedback::new(&context)
);
// A feedback to choose if an input is a solution or not
let objective = CrashFeedback::new();
// create a State from scratch
let mut state = StdState::new(
// RNG
StdRand::with_seed(current_nanos()),
// Corpus that will be evolved, we keep it in memory for performance
InMemoryCorpus::new(),
// Corpus in which we store solutions (crashes in this example),
// on disk so the user can get them after stopping the fuzzer
OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(),
// States of the feedbacks.
// They are the data related to the feedbacks that you want to persist in the State.
tuple_list!(feedback_state),
);
if state.metadata().get::<NautilusChunksMetadata>().is_none() {
state.add_metadata(NautilusChunksMetadata::new("/tmp/".into()));
}
// The Stats trait define how the fuzzer stats are reported to the user
let stats = SimpleStats::new(|s| println!("{}", s));
// The event manager handle the various events generated during the fuzzing loop
// such as the notification of the addition of a new item to the corpus
let mut mgr = SimpleEventManager::new(stats);
// A queue policy to get testcasess from the corpus
let scheduler = QueueCorpusScheduler::new();
// A fuzzer with feedbacks and a corpus scheduler
let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective);
// Create the executor for an in-process function with just one observer
let mut executor = InProcessExecutor::new(
&mut harness,
tuple_list!(observer),
&mut fuzzer,
&mut state,
&mut mgr,
)
.expect("Failed to create the Executor");
let mut generator = NautilusGenerator::new(&context);
// Use this code to profile the generator performance
/*
use libafl::generators::Generator;
use std::collections::hash_map::DefaultHasher;
use std::collections::HashSet;
use std::hash::{Hash, Hasher};
fn calculate_hash<T: Hash>(t: &T) -> u64 {
let mut s = DefaultHasher::new();
t.hash(&mut s);
s.finish()
}
let mut set = HashSet::new();
let st = libafl::bolts::current_milliseconds();
let mut b = vec![];
let mut c = 0;
for _ in 0..100000 {
let i = generator.generate(&mut state).unwrap();
i.unparse(&context, &mut b);
set.insert(calculate_hash(&b));
c += b.len();
}
println!("{} / {}", c, libafl::bolts::current_milliseconds() - st);
println!("{} / 100000", set.len());
return;
*/
// Generate 8 initial inputs
state
.generate_initial_inputs_forced(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8)
.expect("Failed to generate the initial corpus");
// Setup a mutational stage with a basic bytes mutator
let mutator = StdScheduledMutator::with_max_iterations(
tuple_list!(
NautilusRandomMutator::new(&context),
NautilusRandomMutator::new(&context),
NautilusRandomMutator::new(&context),
NautilusRandomMutator::new(&context),
NautilusRandomMutator::new(&context),
NautilusRandomMutator::new(&context),
NautilusRecursionMutator::new(&context),
NautilusSpliceMutator::new(&context),
NautilusSpliceMutator::new(&context),
NautilusSpliceMutator::new(&context),
),
2,
);
let mut stages = tuple_list!(StdMutationalStage::new(mutator));
fuzzer
.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)
.expect("Error in the fuzzing loop");
}

View File

@ -11,6 +11,26 @@ keywords = ["fuzzing", "testing", "security"]
edition = "2021" edition = "2021"
build = "build.rs" build = "build.rs"
[features]
default = ["std", "anymap_debug", "derive", "llmp_compression", "rand_trait", "fork"]
std = ["serde_json", "serde_json/std", "hostname", "core_affinity", "nix", "serde/std", "bincode", "wait-timeout", "regex", "build_id", "uuid"] # print, env, launcher ... support
anymap_debug = ["serde_json"] # uses serde_json to Debug the anymap trait. Disable for smaller footprint.
derive = ["libafl_derive"] # provide derive(SerdeAny) macro.
fork = [] # uses the fork() syscall to spawn children, instead of launching a new command, if supported by the OS (has no effect on Windows, no_std).
rand_trait = ["rand_core"] # If set, libafl's rand implementations will implement `rand::Rng`
introspection = [] # Include performance statistics of the fuzzing pipeline
concolic_mutation = ["z3"] # include a simple concolic mutator based on z3
# features hiding dependencies licensed under GPL
gpl = []
# features hiding dependencies licensed under AGPL
agpl = ["gpl", "nautilus"]
nautilus = ["grammartec", "std", "serde_json/std"]
# LLMP features
llmp_bind_public = [] # If set, llmp will bind to 0.0.0.0, allowing cross-device communication. Binds to localhost by default.
llmp_compression = ["miniz_oxide"] # llmp compression using GZip
llmp_debug = ["backtrace"] # Enables debug output for LLMP
llmp_small_maps = [] # reduces initial map size for llmp
[build-dependencies] [build-dependencies]
rustc_version = "0.4" rustc_version = "0.4"
@ -23,38 +43,6 @@ serde_json = "1.0.60"
num_cpus = "1.0" # cpu count, for llmp example num_cpus = "1.0" # cpu count, for llmp example
serial_test = "0.5" serial_test = "0.5"
[[bench]]
name = "rand_speeds"
harness = false
[[bench]]
name = "hash_speeds"
harness = false
#[profile.release]
#lto = true
#opt-level = 3
#debug = true
[features]
default = ["std", "anymap_debug", "derive", "llmp_compression", "rand_trait", "fork"]
std = ["serde_json", "hostname", "core_affinity", "nix", "serde/std", "bincode", "wait-timeout", "regex", "build_id", "uuid"] # print, env, launcher ... support
anymap_debug = ["serde_json"] # uses serde_json to Debug the anymap trait. Disable for smaller footprint.
derive = ["libafl_derive"] # provide derive(SerdeAny) macro.
fork = [] # uses the fork() syscall to spawn children, instead of launching a new command, if supported by the OS (has no effect on Windows, no_std).
rand_trait = ["rand_core"] # If set, libafl's rand implementations will implement `rand::Rng`
llmp_bind_public = [] # If set, llmp will bind to 0.0.0.0, allowing cross-device communication. Binds to localhost by default.
llmp_compression = ["miniz_oxide"] # llmp compression using GZip
llmp_debug = ["backtrace"] # Enables debug output for LLMP
llmp_small_maps = [] # reduces initial map size for llmp
introspection = [] # Include performance statistics of the fuzzing pipeline
concolic_mutation = ["z3"] # include a simple concolic mutator based on z3
[[example]]
name = "llmp_test"
path = "./examples/llmp_test/main.rs"
required-features = ["std"]
[dependencies] [dependencies]
tuple_list = { version = "0.1.3" } tuple_list = { version = "0.1.3" }
hashbrown = { version = "0.11", features = ["serde", "ahash-compile-time-rng"], default-features=false } # A faster hashmap, nostd compatible hashbrown = { version = "0.11", features = ["serde", "ahash-compile-time-rng"], default-features=false } # A faster hashmap, nostd compatible
@ -87,6 +75,9 @@ wait-timeout = { version = "0.2", optional = true } # used by CommandExecutor to
z3 = { version = "0.11", features = ["static-link-z3"], optional = true } # for concolic mutation z3 = { version = "0.11", features = ["static-link-z3"], optional = true } # for concolic mutation
# AGPL
grammartec = { git = "https://github.com/andreafioraldi/nautilus", optional = true }
[target.'cfg(target_os = "android")'.dependencies] [target.'cfg(target_os = "android")'.dependencies]
backtrace = { version = "0.3", optional = true, default-features = false, features = ["std", "libbacktrace"] } # for llmp_debug backtrace = { version = "0.3", optional = true, default-features = false, features = ["std", "libbacktrace"] } # for llmp_debug
@ -105,3 +96,21 @@ uuid = { version = "0.8", features = ["v4"] }
[target.'cfg(windows)'.build-dependencies] [target.'cfg(windows)'.build-dependencies]
windows = "0.18.0" windows = "0.18.0"
[[bench]]
name = "rand_speeds"
harness = false
[[bench]]
name = "hash_speeds"
harness = false
#[profile.release]
#lto = true
#opt-level = 3
#debug = true
[[example]]
name = "llmp_test"
path = "./examples/llmp_test/main.rs"
required-features = ["std"]

View File

@ -81,7 +81,7 @@ where
/// Then, clients launched by this [`Launcher`] can connect to the original `broker`. /// Then, clients launched by this [`Launcher`] can connect to the original `broker`.
#[builder(default = true)] #[builder(default = true)]
spawn_broker: bool, spawn_broker: bool,
#[builder(default = PhantomData)] #[builder(setter(skip), default = PhantomData)]
phantom_data: PhantomData<(&'a I, &'a OT, &'a S, &'a SP)>, phantom_data: PhantomData<(&'a I, &'a OT, &'a S, &'a SP)>,
} }

View File

@ -425,12 +425,16 @@ unsafe fn _llmp_page_init<SHM: ShMem>(shmem: &mut SHM, sender: u32, allow_reinit
let page = shmem2page_mut(shmem); let page = shmem2page_mut(shmem);
#[cfg(all(feature = "llmp_debug", feature = "std"))] #[cfg(all(feature = "llmp_debug", feature = "std"))]
dbg!("_llmp_page_init: page {}", *page); dbg!("_llmp_page_init: page {}", *page);
if (*page).magic == PAGE_INITIALIZED_MAGIC && !allow_reinit {
panic!( if !allow_reinit {
assert!(
(*page).magic != PAGE_INITIALIZED_MAGIC,
"Tried to initialize page {:?} twice (for shmem {:?})", "Tried to initialize page {:?} twice (for shmem {:?})",
page, shmem page,
shmem
); );
}; }
(*page).magic = PAGE_INITIALIZED_MAGIC; (*page).magic = PAGE_INITIALIZED_MAGIC;
(*page).sender = sender; (*page).sender = sender;
ptr::write_volatile(ptr::addr_of_mut!((*page).current_msg_id), 0); ptr::write_volatile(ptr::addr_of_mut!((*page).current_msg_id), 0);
@ -898,18 +902,20 @@ where
let map = self.out_maps.last_mut().unwrap(); let map = self.out_maps.last_mut().unwrap();
let page = map.page_mut(); let page = map.page_mut();
let last_msg = self.last_msg_sent; let last_msg = self.last_msg_sent;
if (*page).size_used + EOP_MSG_SIZE > (*page).size_total { assert!((*page).size_used + EOP_MSG_SIZE <= (*page).size_total,
panic!("PROGRAM ABORT : BUG: EOP does not fit in page! page {:?}, size_current {:?}, size_total {:?}", page, "PROGRAM ABORT : BUG: EOP does not fit in page! page {:?}, size_current {:?}, size_total {:?}", page,
ptr::addr_of!((*page).size_used), ptr::addr_of!((*page).size_total)); ptr::addr_of!((*page).size_used), ptr::addr_of!((*page).size_total));
}
let mut ret: *mut LlmpMsg = if last_msg.is_null() { let mut ret: *mut LlmpMsg = if last_msg.is_null() {
(*page).messages.as_mut_ptr() (*page).messages.as_mut_ptr()
} else { } else {
llmp_next_msg_ptr_checked(map, last_msg, EOP_MSG_SIZE)? llmp_next_msg_ptr_checked(map, last_msg, EOP_MSG_SIZE)?
}; };
if (*ret).tag == LLMP_TAG_UNINITIALIZED { assert!(
panic!("Did not call send() on last message!"); (*ret).tag != LLMP_TAG_UNINITIALIZED,
} "Did not call send() on last message!"
);
(*ret).buf_len = size_of::<LlmpPayloadSharedMapInfo>() as u64; (*ret).buf_len = size_of::<LlmpPayloadSharedMapInfo>() as u64;
// We don't need to pad the EOP message: it'll always be the last in this page. // We don't need to pad the EOP message: it'll always be the last in this page.
@ -932,9 +938,10 @@ where
let page = map.page_mut(); let page = map.page_mut();
let last_msg = self.last_msg_sent; let last_msg = self.last_msg_sent;
if self.has_unsent_message { assert!(
panic!("Called alloc without callind send inbetween"); !self.has_unsent_message,
} "Called alloc without calling send inbetween"
);
#[cfg(all(feature = "llmp_debug", feature = "std"))] #[cfg(all(feature = "llmp_debug", feature = "std"))]
println!( println!(
@ -1009,12 +1016,12 @@ where
unsafe fn send(&mut self, msg: *mut LlmpMsg, overwrite_client_id: bool) -> Result<(), Error> { unsafe fn send(&mut self, msg: *mut LlmpMsg, overwrite_client_id: bool) -> Result<(), Error> {
// dbg!("Sending msg {:?}", msg); // dbg!("Sending msg {:?}", msg);
if self.last_msg_sent == msg { assert!(self.last_msg_sent != msg, "Message sent twice!");
panic!("Message sent twice!"); assert!(
} (*msg).tag != LLMP_TAG_UNSET,
if (*msg).tag == LLMP_TAG_UNSET { "No tag set on message with id {}",
panic!("No tag set on message with id {}", (*msg).message_id); (*msg).message_id
} );
// A client gets the sender id assigned to by the broker during the initial handshake. // A client gets the sender id assigned to by the broker during the initial handshake.
if overwrite_client_id { if overwrite_client_id {
(*msg).sender = self.id; (*msg).sender = self.id;
@ -1369,14 +1376,14 @@ where
#[cfg(feature = "std")] #[cfg(feature = "std")]
println!("Received end of page, allocating next"); println!("Received end of page, allocating next");
// Handle end of page // Handle end of page
if (*msg).buf_len < size_of::<LlmpPayloadSharedMapInfo>() as u64 { assert!(
panic!( (*msg).buf_len >= size_of::<LlmpPayloadSharedMapInfo>() as u64,
"Illegal message length for EOP (is {}/{}, expected {})", "Illegal message length for EOP (is {}/{}, expected {})",
(*msg).buf_len, (*msg).buf_len,
(*msg).buf_len_padded, (*msg).buf_len_padded,
size_of::<LlmpPayloadSharedMapInfo>() size_of::<LlmpPayloadSharedMapInfo>()
); );
}
#[allow(clippy::cast_ptr_alignment)] #[allow(clippy::cast_ptr_alignment)]
let pageinfo = (*msg).buf.as_mut_ptr() as *mut LlmpPayloadSharedMapInfo; let pageinfo = (*msg).buf.as_mut_ptr() as *mut LlmpPayloadSharedMapInfo;
@ -1427,9 +1434,11 @@ where
let page = self.current_recv_map.page_mut(); let page = self.current_recv_map.page_mut();
let last_msg = self.last_msg_recvd; let last_msg = self.last_msg_recvd;
if !last_msg.is_null() { if !last_msg.is_null() {
if (*last_msg).tag == LLMP_TAG_END_OF_PAGE && !llmp_msg_in_page(page, last_msg) { assert!(
panic!("BUG: full page passed to await_message_blocking or reset failed"); !((*last_msg).tag == LLMP_TAG_END_OF_PAGE && !llmp_msg_in_page(page, last_msg)),
} "BUG: full page passed to await_message_blocking or reset failed"
);
current_msg_id = (*last_msg).message_id; current_msg_id = (*last_msg).message_id;
} }
loop { loop {
@ -1564,9 +1573,11 @@ where
shmem: existing_map, shmem: existing_map,
}; };
unsafe { unsafe {
if (*ret.page()).magic != PAGE_INITIALIZED_MAGIC { assert!(
panic!("Map was not priviously initialized at {:?}", &ret.shmem); (*ret.page()).magic == PAGE_INITIALIZED_MAGIC,
} "Map was not priviously initialized at {:?}",
&ret.shmem
);
#[cfg(all(feature = "llmp_debug", feature = "std"))] #[cfg(all(feature = "llmp_debug", feature = "std"))]
dbg!("PAGE: {}", *ret.page()); dbg!("PAGE: {}", *ret.page());
} }

View File

@ -82,7 +82,7 @@ where
{ {
fn id(&self) -> ShMemId { fn id(&self) -> ShMemId {
let client_id = self.inner.id(); let client_id = self.inner.id();
ShMemId::from_string(&format!("{}:{}", self.server_fd, client_id.to_string())) ShMemId::from_string(&format!("{}:{}", self.server_fd, client_id))
} }
fn len(&self) -> usize { fn len(&self) -> usize {

View File

@ -194,13 +194,12 @@ pub unsafe fn setup_signal_handler<T: 'static + Handler>(handler: &mut T) -> Res
if SIGNAL_STACK_PTR.is_null() { if SIGNAL_STACK_PTR.is_null() {
SIGNAL_STACK_PTR = malloc(SIGNAL_STACK_SIZE); SIGNAL_STACK_PTR = malloc(SIGNAL_STACK_SIZE);
if SIGNAL_STACK_PTR.is_null() { // Rust always panics on OOM, so we will, too.
// Rust always panics on OOM, so we will, too. assert!(
panic!( !SIGNAL_STACK_PTR.is_null(),
"Failed to allocate signal stack with {} bytes!", "Failed to allocate signal stack with {} bytes!",
SIGNAL_STACK_SIZE SIGNAL_STACK_SIZE
); );
}
} }
let mut ss: stack_t = mem::zeroed(); let mut ss: stack_t = mem::zeroed();
ss.ss_size = SIGNAL_STACK_SIZE; ss.ss_size = SIGNAL_STACK_SIZE;

View File

@ -142,9 +142,7 @@ macro_rules! create_serde_registry_for_trait {
where where
T: $trait_name + Serialize + serde::de::DeserializeOwned, T: $trait_name + Serialize + serde::de::DeserializeOwned,
{ {
if self.finalized { assert!(!self.finalized, "Registry is already finalized!");
panic!("Registry is already finalized!");
}
let deserializers = self.deserializers.get_or_insert_with(HashMap::default); let deserializers = self.deserializers.get_or_insert_with(HashMap::default);
deserializers.insert(unpack_type_id(TypeId::of::<T>()), |de| { deserializers.insert(unpack_type_id(TypeId::of::<T>()), |de| {

View File

@ -700,16 +700,18 @@ pub mod unix_shmem {
impl Drop for MmapShMem { impl Drop for MmapShMem {
fn drop(&mut self) { fn drop(&mut self) {
unsafe { unsafe {
if self.map.is_null() { assert!(
panic!("Map should never be null for MmapShMem (on Drop)"); !self.map.is_null(),
} "Map should never be null for MmapShMem (on Drop)"
);
munmap(self.map as *mut _, self.map_size); munmap(self.map as *mut _, self.map_size);
self.map = ptr::null_mut(); self.map = ptr::null_mut();
if self.shm_fd == -1 { assert!(
panic!("FD should never be -1 for MmapShMem (on Drop)"); self.shm_fd != -1,
} "FD should never be -1 for MmapShMem (on Drop)"
);
// None in case we didn't [`shm_open`] this ourselves, but someone sent us the FD. // None in case we didn't [`shm_open`] this ourselves, but someone sent us the FD.
if let Some(filename_path) = self.filename_path { if let Some(filename_path) = self.filename_path {

View File

@ -467,9 +467,11 @@ where
let mut events = vec![]; let mut events = vec![];
let self_id = self.llmp.sender.id; let self_id = self.llmp.sender.id;
while let Some((client_id, tag, _flags, msg)) = self.llmp.recv_buf_with_flags()? { while let Some((client_id, tag, _flags, msg)) = self.llmp.recv_buf_with_flags()? {
if tag == _LLMP_TAG_EVENT_TO_BROKER { assert!(
panic!("EVENT_TO_BROKER parcel should not have arrived in the client!"); tag != _LLMP_TAG_EVENT_TO_BROKER,
} "EVENT_TO_BROKER parcel should not have arrived in the client!"
);
if client_id == self_id { if client_id == self_id {
continue; continue;
} }
@ -729,8 +731,8 @@ where
/// The type of manager to build /// The type of manager to build
#[builder(default = ManagerKind::Any)] #[builder(default = ManagerKind::Any)]
kind: ManagerKind, kind: ManagerKind,
#[builder(setter(skip), default = PhantomData {})] #[builder(setter(skip), default = PhantomData)]
_phantom: PhantomData<(I, OT, S)>, phantom_data: PhantomData<(I, OT, S)>,
} }
#[cfg(feature = "std")] #[cfg(feature = "std")]
@ -856,6 +858,7 @@ where
if !staterestorer.has_content() { if !staterestorer.has_content() {
#[cfg(unix)] #[cfg(unix)]
#[allow(clippy::manual_assert)]
if child_status == 137 { if child_status == 137 {
// Out of Memory, see https://tldp.org/LDP/abs/html/exitcodes.html // Out of Memory, see https://tldp.org/LDP/abs/html/exitcodes.html
// and https://github.com/AFLplusplus/LibAFL/issues/32 for discussion. // and https://github.com/AFLplusplus/LibAFL/issues/32 for discussion.

View File

@ -368,6 +368,7 @@ where
if !staterestorer.has_content() { if !staterestorer.has_content() {
#[cfg(unix)] #[cfg(unix)]
#[allow(clippy::manual_assert)]
if child_status == 137 { if child_status == 137 {
// Out of Memory, see https://tldp.org/LDP/abs/html/exitcodes.html // Out of Memory, see https://tldp.org/LDP/abs/html/exitcodes.html
// and https://github.com/AFLplusplus/LibAFL/issues/32 for discussion. // and https://github.com/AFLplusplus/LibAFL/issues/32 for discussion.

View File

@ -254,9 +254,8 @@ where
.match_name_mut::<MapFeedbackState<T>>(&self.name) .match_name_mut::<MapFeedbackState<T>>(&self.name)
.unwrap(); .unwrap();
if size > map_state.history_map.len() { assert!(size <= map_state.history_map.len(), "The size of the associated map observer cannot exceed the size of the history map of the feedback. If you are running multiple instances of slightly different fuzzers (e.g. one with ASan and another without) synchronized using LLMP please check the `configuration` field of the LLMP manager.");
panic!("The size of the associated map observer cannot exceed the size of the history map of the feedback. If you are running multiple instances of slightly different fuzzers (e.g. one with ASan and another without) synchronized using LLMP please check the `configuration` field of the LLMP manager.");
}
assert!(size <= observer.len()); assert!(size <= observer.len());
if self.novelties.is_some() { if self.novelties.is_some() {

View File

@ -9,6 +9,11 @@ pub mod concolic;
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub use concolic::ConcolicFeedback; pub use concolic::ConcolicFeedback;
#[cfg(feature = "nautilus")]
pub mod nautilus;
#[cfg(feature = "nautilus")]
pub use nautilus::*;
use alloc::string::{String, ToString}; use alloc::string::{String, ToString};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};

View File

@ -0,0 +1,89 @@
use grammartec::{chunkstore::ChunkStore, context::Context};
use serde::{Deserialize, Serialize};
use std::fs::create_dir_all;
use crate::{
bolts::tuples::Named,
corpus::Testcase,
events::EventFirer,
executors::ExitKind,
feedbacks::Feedback,
generators::NautilusContext,
inputs::NautilusInput,
observers::ObserversTuple,
state::{HasClientPerfStats, HasMetadata},
Error,
};
#[derive(Serialize, Deserialize)]
pub struct NautilusChunksMetadata {
pub cks: ChunkStore,
}
crate::impl_serdeany!(NautilusChunksMetadata);
impl NautilusChunksMetadata {
#[must_use]
pub fn new(work_dir: String) -> Self {
create_dir_all(format!("{}/outputs/chunks", &work_dir))
.expect("Could not create folder in workdir");
Self {
cks: ChunkStore::new(work_dir),
}
}
}
pub struct NautilusFeedback<'a> {
ctx: &'a Context,
}
impl<'a> NautilusFeedback<'a> {
#[must_use]
pub fn new(context: &'a NautilusContext) -> Self {
Self { ctx: &context.ctx }
}
}
impl<'a> Named for NautilusFeedback<'a> {
fn name(&self) -> &str {
"NautilusFeedback"
}
}
impl<'a, S> Feedback<NautilusInput, S> for NautilusFeedback<'a>
where
S: HasMetadata + HasClientPerfStats,
{
fn is_interesting<EM, OT>(
&mut self,
_state: &mut S,
_manager: &mut EM,
_input: &NautilusInput,
_observers: &OT,
_exit_kind: &ExitKind,
) -> Result<bool, Error>
where
EM: EventFirer<NautilusInput, S>,
OT: ObserversTuple<NautilusInput, S>,
{
Ok(false)
}
fn append_metadata(
&mut self,
state: &mut S,
testcase: &mut Testcase<NautilusInput>,
) -> Result<(), Error> {
let input = testcase.load_input()?.clone();
let meta = state
.metadata_mut()
.get_mut::<NautilusChunksMetadata>()
.expect("NautilusChunksMetadata not in the state");
meta.cks.add_tree(input.tree, self.ctx);
Ok(())
}
fn discard_metadata(&mut self, _state: &mut S, _input: &NautilusInput) -> Result<(), Error> {
Ok(())
}
}

View File

@ -13,6 +13,11 @@ use crate::{
pub mod gramatron; pub mod gramatron;
pub use gramatron::*; pub use gramatron::*;
#[cfg(feature = "nautilus")]
pub mod nautilus;
#[cfg(feature = "nautilus")]
pub use nautilus::*;
/// The maximum size of dummy bytes generated by _dummy generator methods /// The maximum size of dummy bytes generated by _dummy generator methods
const DUMMY_BYTES_MAX: usize = 64; const DUMMY_BYTES_MAX: usize = 64;

View File

@ -0,0 +1,75 @@
use alloc::{string::String, vec::Vec};
use std::{fs, io::BufReader, path::Path};
use crate::{generators::Generator, inputs::nautilus::NautilusInput, Error};
use grammartec::context::Context;
pub use grammartec::newtypes::NTermID;
pub struct NautilusContext {
pub ctx: Context,
}
impl NautilusContext {
/// Returns a new [`NautilusGenerator`]
#[must_use]
pub fn new(tree_depth: usize, rules: &[Vec<String>]) -> Self {
assert!(!rules.is_empty());
assert!(!rules[0].is_empty());
let mut ctx = Context::new();
for rule in rules {
ctx.add_rule(&rule[0], rule[1].as_bytes());
}
let root = "{".to_string() + &rules[0][0] + "}";
ctx.add_rule("START", root.as_bytes());
ctx.initialize(tree_depth);
Self { ctx }
}
#[must_use]
pub fn from_file<P: AsRef<Path>>(tree_depth: usize, grammar_file: P) -> Self {
let file = fs::File::open(grammar_file).expect("Cannot open grammar file");
let reader = BufReader::new(file);
let rules: Vec<Vec<String>> =
serde_json::from_reader(reader).expect("Cannot parse grammar file");
Self::new(tree_depth, &rules)
}
}
#[derive(Clone)]
/// Generates random inputs from a grammar
pub struct NautilusGenerator<'a> {
pub ctx: &'a Context,
}
impl<'a, S> Generator<NautilusInput, S> for NautilusGenerator<'a> {
fn generate(&mut self, _state: &mut S) -> Result<NautilusInput, Error> {
let nonterm = self.nonterminal("START");
let len = self.ctx.get_random_len_for_nt(&nonterm);
let mut input = NautilusInput::empty();
self.generate_from_nonterminal(&mut input, nonterm, len);
Ok(input)
}
fn generate_dummy(&self, _state: &mut S) -> NautilusInput {
NautilusInput::empty()
}
}
impl<'a> NautilusGenerator<'a> {
/// Returns a new [`NautilusGenerator`]
#[must_use]
pub fn new(context: &'a NautilusContext) -> Self {
Self { ctx: &context.ctx }
}
// TODO create from a python grammar
#[must_use]
pub fn nonterminal(&self, name: &str) -> NTermID {
self.ctx.nt_id(name)
}
pub fn generate_from_nonterminal(&self, input: &mut NautilusInput, start: NTermID, len: usize) {
input.tree_mut().generate_from_nt(start, len, self.ctx);
}
}

View File

@ -9,6 +9,11 @@ pub use encoded::*;
pub mod gramatron; pub mod gramatron;
pub use gramatron::*; pub use gramatron::*;
#[cfg(feature = "nautilus")]
pub mod nautilus;
#[cfg(feature = "nautilus")]
pub use nautilus::*;
use alloc::{ use alloc::{
string::{String, ToString}, string::{String, ToString},
vec::Vec, vec::Vec,

View File

@ -0,0 +1,80 @@
//use ahash::AHasher;
//use core::hash::Hasher;
use alloc::{rc::Rc, string::String};
use core::{cell::RefCell, convert::From};
use serde::{Deserialize, Serialize};
use crate::{bolts::HasLen, generators::nautilus::NautilusContext, inputs::Input};
use grammartec::{
newtypes::NodeID,
tree::{Tree, TreeLike},
};
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct NautilusInput {
/// The input representation as Tree
pub tree: Tree,
}
impl Input for NautilusInput {
/// Generate a name for this input
#[must_use]
fn generate_name(&self, idx: usize) -> String {
/*let mut hasher = AHasher::new_with_keys(0, 0);
for term in &self.terms {
hasher.write(term.symbol.as_bytes());
}
format!("{:016x}", hasher.finish())*/
format!("id:{}", idx)
}
}
/// Rc Ref-cell from Input
impl From<NautilusInput> for Rc<RefCell<NautilusInput>> {
fn from(input: NautilusInput) -> Self {
Rc::new(RefCell::new(input))
}
}
impl HasLen for NautilusInput {
#[inline]
fn len(&self) -> usize {
self.tree.size()
}
}
impl NautilusInput {
/// Creates a new codes input using the given terminals
#[must_use]
pub fn new(tree: Tree) -> Self {
Self { tree }
}
#[must_use]
pub fn empty() -> Self {
Self {
tree: Tree {
rules: vec![],
sizes: vec![],
paren: vec![],
},
}
}
pub fn unparse(&self, context: &NautilusContext, bytes: &mut Vec<u8>) {
bytes.clear();
self.tree.unparse(NodeID::from(0), &context.ctx, bytes);
}
#[must_use]
pub fn tree(&self) -> &Tree {
&self.tree
}
#[must_use]
pub fn tree_mut(&mut self) -> &mut Tree {
&mut self.tree
}
}

View File

@ -67,7 +67,7 @@ where
} }
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
struct GramatronIdxMapMetadata { pub struct GramatronIdxMapMetadata {
pub map: HashMap<usize, Vec<usize>>, pub map: HashMap<usize, Vec<usize>>,
} }

View File

@ -13,6 +13,11 @@ pub use mopt_mutator::*;
pub mod gramatron; pub mod gramatron;
pub use gramatron::*; pub use gramatron::*;
#[cfg(feature = "nautilus")]
pub mod nautilus;
#[cfg(feature = "nautilus")]
pub use nautilus::*;
use crate::{ use crate::{
bolts::tuples::{HasConstLen, Named}, bolts::tuples::{HasConstLen, Named},
inputs::Input, inputs::Input,

View File

@ -0,0 +1,192 @@
use core::marker::PhantomData;
use crate::{
bolts::tuples::Named,
corpus::Corpus,
feedbacks::NautilusChunksMetadata,
generators::nautilus::NautilusContext,
inputs::nautilus::NautilusInput,
mutators::{MutationResult, Mutator},
state::{HasCorpus, HasMetadata},
Error,
};
use grammartec::mutator::Mutator as BackingMutator;
use grammartec::{
context::Context,
tree::{Tree, TreeMutation},
};
pub struct NautilusRandomMutator<'a> {
ctx: &'a Context,
mutator: BackingMutator,
}
impl<'a, S> Mutator<NautilusInput, S> for NautilusRandomMutator<'a> {
fn mutate(
&mut self,
_state: &mut S,
input: &mut NautilusInput,
_stage_idx: i32,
) -> Result<MutationResult, Error> {
// TODO get rid of tmp
let mut tmp = vec![];
self.mutator
.mut_random::<_, ()>(
&input.tree,
self.ctx,
&mut |t: &TreeMutation, _ctx: &Context| {
tmp.extend_from_slice(t.prefix);
tmp.extend_from_slice(t.repl);
tmp.extend_from_slice(t.postfix);
Ok(())
},
)
.unwrap();
if tmp.is_empty() {
Ok(MutationResult::Skipped)
} else {
input.tree = Tree::from_rule_vec(tmp, self.ctx);
Ok(MutationResult::Mutated)
}
}
}
impl<'a> Named for NautilusRandomMutator<'a> {
fn name(&self) -> &str {
"NautilusRandomMutator"
}
}
impl<'a> NautilusRandomMutator<'a> {
/// Creates a new [`NautilusRandomMutator`].
#[must_use]
pub fn new(context: &'a NautilusContext) -> Self {
let mutator = BackingMutator::new(&context.ctx);
Self {
ctx: &context.ctx,
mutator,
}
}
}
// TODO calculate reucursions only for new items in corpus
pub struct NautilusRecursionMutator<'a> {
ctx: &'a Context,
mutator: BackingMutator,
}
impl<'a, S> Mutator<NautilusInput, S> for NautilusRecursionMutator<'a> {
fn mutate(
&mut self,
_state: &mut S,
input: &mut NautilusInput,
_stage_idx: i32,
) -> Result<MutationResult, Error> {
// TODO don't calc recursions here
if let Some(ref mut recursions) = input.tree.calc_recursions(self.ctx) {
// TODO get rid of tmp
let mut tmp = vec![];
self.mutator
.mut_random_recursion::<_, ()>(
&input.tree,
recursions,
self.ctx,
&mut |t: &TreeMutation, _ctx: &Context| {
tmp.extend_from_slice(t.prefix);
tmp.extend_from_slice(t.repl);
tmp.extend_from_slice(t.postfix);
Ok(())
},
)
.unwrap();
if !tmp.is_empty() {
input.tree = Tree::from_rule_vec(tmp, self.ctx);
return Ok(MutationResult::Mutated);
}
}
Ok(MutationResult::Skipped)
}
}
impl<'a> Named for NautilusRecursionMutator<'a> {
fn name(&self) -> &str {
"NautilusRecursionMutator"
}
}
impl<'a> NautilusRecursionMutator<'a> {
/// Creates a new [`NautilusRecursionMutator`].
#[must_use]
pub fn new(context: &'a NautilusContext) -> Self {
let mutator = BackingMutator::new(&context.ctx);
Self {
ctx: &context.ctx,
mutator,
}
}
}
pub struct NautilusSpliceMutator<'a, C> {
ctx: &'a Context,
mutator: BackingMutator,
phantom: PhantomData<C>,
}
impl<'a, S, C> Mutator<NautilusInput, S> for NautilusSpliceMutator<'a, C>
where
C: Corpus<NautilusInput>,
S: HasCorpus<C, NautilusInput> + HasMetadata,
{
fn mutate(
&mut self,
state: &mut S,
input: &mut NautilusInput,
_stage_idx: i32,
) -> Result<MutationResult, Error> {
let meta = state
.metadata()
.get::<NautilusChunksMetadata>()
.expect("NautilusChunksMetadata not in the state");
// TODO get rid of tmp
let mut tmp = vec![];
self.mutator
.mut_splice::<_, ()>(
&input.tree,
self.ctx,
&meta.cks,
&mut |t: &TreeMutation, _ctx: &Context| {
tmp.extend_from_slice(t.prefix);
tmp.extend_from_slice(t.repl);
tmp.extend_from_slice(t.postfix);
Ok(())
},
)
.unwrap();
if tmp.is_empty() {
Ok(MutationResult::Skipped)
} else {
input.tree = Tree::from_rule_vec(tmp, self.ctx);
Ok(MutationResult::Mutated)
}
}
}
impl<'a, C> Named for NautilusSpliceMutator<'a, C> {
fn name(&self) -> &str {
"NautilusSpliceMutator"
}
}
impl<'a, C> NautilusSpliceMutator<'a, C> {
/// Creates a new [`NautilusSpliceMutator`].
#[must_use]
pub fn new(context: &'a NautilusContext) -> Self {
let mutator = BackingMutator::new(&context.ctx);
Self {
ctx: &context.ctx,
mutator,
phantom: PhantomData,
}
}
}

View File

@ -21,9 +21,11 @@ mod clone {
/// Checks out the repository into the given directory with the given URL and commit hash. /// Checks out the repository into the given directory with the given URL and commit hash.
/// Any errors will trigger a panic. /// Any errors will trigger a panic.
pub fn clone_symcc_at_version(path: &Path, url: &str, commit: &str) { pub fn clone_symcc_at_version(path: &Path, url: &str, commit: &str) {
if which("git").is_err() { assert!(
panic!("ERROR: unable to find git. Git is required to download SymCC."); which("git").is_ok(),
} "ERROR: unable to find git. Git is required to download SymCC."
);
let mut cmd = Command::new("git"); let mut cmd = Command::new("git");
cmd.arg("clone").arg(url).arg(&path); cmd.arg("clone").arg(url).arg(&path);
let output = cmd.output().expect("failed to execute git clone"); let output = cmd.output().expect("failed to execute git clone");

View File

@ -57,9 +57,12 @@ pub(crate) struct AllocationMetadata {
impl Allocator { impl Allocator {
pub fn new(options: FridaOptions) -> Self { pub fn new(options: FridaOptions) -> Self {
let ret = unsafe { sysconf(_SC_PAGESIZE) }; let ret = unsafe { sysconf(_SC_PAGESIZE) };
if ret < 0 { assert!(
panic!("Failed to read pagesize {:?}", io::Error::last_os_error()); ret >= 0,
} "Failed to read pagesize {:?}",
io::Error::last_os_error()
);
#[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_sign_loss)]
let page_size = ret as usize; let page_size = ret as usize;
// probe to find a usable shadow bit: // probe to find a usable shadow bit:
@ -200,9 +203,11 @@ impl Allocator {
size size
}; };
if size > self.options.asan_max_allocation() { if size > self.options.asan_max_allocation() {
#[allow(clippy::manual_assert)]
if self.options.asan_max_allocation_panics() { if self.options.asan_max_allocation_panics() {
panic!("Allocation is too large: 0x{:x}", size); panic!("ASAN: Allocation is too large: 0x{:x}", size);
} }
return std::ptr::null_mut(); return std::ptr::null_mut();
} }
let rounded_up_size = self.round_up_to_page(size) + 2 * self.page_size; let rounded_up_size = self.round_up_to_page(size) + 2 * self.page_size;

View File

@ -534,8 +534,9 @@ impl AsanErrors {
} }
}; };
#[allow(clippy::manual_assert)]
if !self.options.asan_continue_after_error() { if !self.options.asan_continue_after_error() {
panic!("Crashing target!"); panic!("ASAN: Crashing target!");
} }
} }
} }

View File

@ -109,23 +109,21 @@ impl FridaOptions {
"drcov" => { "drcov" => {
options.enable_drcov = value.parse().unwrap(); options.enable_drcov = value.parse().unwrap();
#[cfg(not(target_arch = "aarch64"))] #[cfg(not(target_arch = "aarch64"))]
if options.enable_drcov { assert!(
panic!( !options.enable_drcov,
"DrCov is not currently supported on targets other than aarch64" "DrCov is not currently supported on targets other than aarch64"
); );
}
} }
"cmplog" => { "cmplog" => {
options.enable_cmplog = value.parse().unwrap(); options.enable_cmplog = value.parse().unwrap();
#[cfg(not(target_arch = "aarch64"))] #[cfg(not(target_arch = "aarch64"))]
if options.enable_cmplog { assert!(
panic!( !options.enable_cmplog,
"cmplog is not currently supported on targets other than aarch64" "cmplog is not currently supported on targets other than aarch64"
); );
}
if !cfg!(feature = "cmplog") && options.enable_cmplog { if options.enable_cmplog {
panic!("cmplog feature is disabled!"); assert!(cfg!(feature = "cmplog"), "cmplog feature is disabled!");
} }
} }
"cmplog-cores" => { "cmplog-cores" => {

View File

@ -333,6 +333,7 @@ where
let state = inprocess_get_state::<S>().unwrap(); let state = inprocess_get_state::<S>().unwrap();
let mut res = SyscallHookResult::new(None); let mut res = SyscallHookResult::new(None);
for hook in unsafe { &SYSCALL_HOOKS } { for hook in unsafe { &SYSCALL_HOOKS } {
#[allow(clippy::type_complexity)]
let func: fn( let func: fn(
&mut QT, &mut QT,
&mut S, &mut S,
@ -585,6 +586,7 @@ where
} }
#[allow(clippy::unused_self)] #[allow(clippy::unused_self)]
#[allow(clippy::type_complexity)]
pub fn hook_syscalls( pub fn hook_syscalls(
&self, &self,
hook: fn( hook: fn(

View File

@ -85,9 +85,11 @@ where
let mut out_dir = self.output_dir.clone(); let mut out_dir = self.output_dir.clone();
if fs::create_dir(&out_dir).is_err() { if fs::create_dir(&out_dir).is_err() {
println!("Out dir at {:?} already exists.", &out_dir); println!("Out dir at {:?} already exists.", &out_dir);
if !out_dir.is_dir() { assert!(
panic!("Out dir at {:?} is not a valid directory!", &out_dir); out_dir.is_dir(),
} "Out dir at {:?} is not a valid directory!",
&out_dir
);
} }
let mut crashes = out_dir.clone(); let mut crashes = out_dir.clone();
crashes.push("crashes"); crashes.push("crashes");

View File

@ -84,9 +84,11 @@ where
let mut out_dir = self.output_dir.clone(); let mut out_dir = self.output_dir.clone();
if fs::create_dir(&out_dir).is_err() { if fs::create_dir(&out_dir).is_err() {
println!("Out dir at {:?} already exists.", &out_dir); println!("Out dir at {:?} already exists.", &out_dir);
if !out_dir.is_dir() { assert!(
panic!("Out dir at {:?} is not a valid directory!", &out_dir); out_dir.is_dir(),
} "Out dir at {:?} is not a valid directory!",
&out_dir
);
} }
let mut crashes = out_dir.clone(); let mut crashes = out_dir.clone();
crashes.push("crashes"); crashes.push("crashes");

View File

@ -1,4 +1,4 @@
//! (Libfuzzer)[https://www.llvm.org/docs/LibFuzzer.html]-style runtime wrapper for `LibAFL`. //! [`Libfuzzer`](https://www.llvm.org/docs/LibFuzzer.html)-style runtime wrapper for `LibAFL`.
//! This makes `LibAFL` interoperable with harnesses written for other fuzzers like `Libfuzzer` and [`AFLplusplus`](aflplus.plus). //! This makes `LibAFL` interoperable with harnesses written for other fuzzers like `Libfuzzer` and [`AFLplusplus`](aflplus.plus).
//! We will interact with a C++ target, so use external c functionality //! We will interact with a C++ target, so use external c functionality

View File

@ -85,8 +85,7 @@ pub unsafe extern "C" fn __sanitizer_cov_trace_pc_guard_init(mut start: *mut u32
*start = MAX_EDGES_NUM as u32; *start = MAX_EDGES_NUM as u32;
start = start.offset(1); start = start.offset(1);
MAX_EDGES_NUM = MAX_EDGES_NUM.wrapping_add(1); MAX_EDGES_NUM = MAX_EDGES_NUM.wrapping_add(1);
if MAX_EDGES_NUM > EDGES_MAP.len() {
panic!("The number of edges reported by SanitizerCoverage exceed the size of the edges map ({}). Use the LIBAFL_EDGES_MAP_SIZE env to increase it at compile time.", EDGES_MAP.len()); assert!((MAX_EDGES_NUM <= EDGES_MAP.len()), "The number of edges reported by SanitizerCoverage exceed the size of the edges map ({}). Use the LIBAFL_EDGES_MAP_SIZE env to increase it at compile time.", EDGES_MAP.len());
}
} }
} }

View File

@ -8,7 +8,7 @@ if [ "$1" != "--no-clean" ]; then
echo "[+] Cleaning up previous builds..." echo "[+] Cleaning up previous builds..."
cargo clean -p libafl cargo clean -p libafl
fi fi
RUST_BACKTRACE=full cargo clippy --all --all-features --tests -- \ RUST_BACKTRACE=full cargo +nightly clippy --all --all-features --tests -- -Z macro-backtrace \
-D clippy::pedantic \ -D clippy::pedantic \
-W clippy::similar_names \ -W clippy::similar_names \
-A clippy::type_repetition_in_bounds \ -A clippy::type_repetition_in_bounds \

View File

@ -9,3 +9,6 @@ In the `deexit` folder, you'll find a ldpreloadable library, that changes calls
When a target exits, it quits, and LibAFL will not be able to catch this or recover. When a target exits, it quits, and LibAFL will not be able to catch this or recover.
Abort, on the other hand, raises an error LibAFL's inprocess executor will be able to catch, thanks to its signal handlers. Abort, on the other hand, raises an error LibAFL's inprocess executor will be able to catch, thanks to its signal handlers.
## Gramatron: gramatron grammars and preprocessing utils
See https://github.com/HexHive/Gramatron

View File

@ -1,7 +1,7 @@
[package] [package]
name = "construct_automata" name = "construct_automata"
version = "0.1.0" version = "0.1.0"
edition = "2018" edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

View File

@ -50,7 +50,7 @@ fn tokenize(rule: &str) -> (String, Vec<String>, bool) {
let ss = cap.get(3).map_or(vec![], |m| { let ss = cap.get(3).map_or(vec![], |m| {
m.as_str() m.as_str()
.split_whitespace() .split_whitespace()
.map(|x| x.to_owned()) .map(ToOwned::to_owned)
.collect() .collect()
}); });
if terminal == "\\n" { if terminal == "\\n" {
@ -66,7 +66,7 @@ fn prepare_transitions(
state_stacks: &mut Stacks, state_stacks: &mut Stacks,
state_count: &mut usize, state_count: &mut usize,
worklist: &mut VecDeque<Element>, worklist: &mut VecDeque<Element>,
element: Element, element: &Element,
stack_limit: usize, stack_limit: usize,
) { ) {
if element.items.is_empty() { if element.items.is_empty() {
@ -88,7 +88,7 @@ fn prepare_transitions(
let mut state_stack = state_stacks let mut state_stack = state_stacks
.q .q
.get(&state) .get(&state)
.map_or(VecDeque::new(), |x| x.clone()); .map_or(VecDeque::new(), Clone::clone);
if !state_stack.is_empty() { if !state_stack.is_empty() {
state_stack.pop_front(); state_stack.pop_front();
} }
@ -109,7 +109,7 @@ fn prepare_transitions(
// Check if a recursive transition state being created, if so make a backward // Check if a recursive transition state being created, if so make a backward
// edge and don't add anything to the worklist // edge and don't add anything to the worklist
for (key, val) in state_stacks.s.iter() { for (key, val) in &state_stacks.s {
if state_stack_sorted == *val { if state_stack_sorted == *val {
transition.dest = *key; transition.dest = *key;
// i += 1; // i += 1;
@ -123,11 +123,9 @@ fn prepare_transitions(
// If the generated state has a stack size > stack_limit then that state is abandoned // If the generated state has a stack size > stack_limit then that state is abandoned
// and not added to the FSA or the worklist for further expansion // and not added to the FSA or the worklist for further expansion
if stack_limit > 0 { if stack_limit > 0 && transition.stack.len() > stack_limit {
if transition.stack.len() > stack_limit { // TODO add to unexpanded_rules
// TODO add to unexpanded_rules continue;
continue;
}
} }
// Create transitions for the non-recursive relations and add to the worklist // Create transitions for the non-recursive relations and add to the worklist
@ -153,11 +151,11 @@ fn get_states(pda: &[Transition]) -> (HashSet<usize>, HashSet<usize>, HashSet<us
source.insert(transition.source); source.insert(transition.source);
dest.insert(transition.dest); dest.insert(transition.dest);
} }
let all = source.union(&dest).map(|x| *x).collect(); let all = source.union(&dest).copied().collect();
( (
all, all,
dest.difference(&source).map(|x| *x).collect(), dest.difference(&source).copied().collect(),
source.difference(&dest).map(|x| *x).collect(), source.difference(&dest).copied().collect(),
) )
} }
@ -182,24 +180,24 @@ fn postprocess(pda: &[Transition], stack_limit: usize) -> Automaton {
//let mut culled_pda_unique = HashSet::new(); //let mut culled_pda_unique = HashSet::new();
for final_state in &finals { for final_state in &finals {
pda.iter().for_each(|transition| { for transition in pda.iter() {
if transition.dest == *final_state && transition.stack.len() > 0 { if transition.dest == *final_state && transition.stack.len() > 0 {
blocklist.insert(transition.dest); blocklist.insert(transition.dest);
} else { } else {
culled_pda.push(transition); culled_pda.push(transition);
//culled_pda_unique.insert(transition); //culled_pda_unique.insert(transition);
} }
}); }
} }
// println!("culled_pda size: {} pda size: {}", culled_pda.len(), pda.len()); // println!("culled_pda size: {} pda size: {}", culled_pda.len(), pda.len());
let culled_finals: HashSet<usize> = finals.difference(&blocklist).map(|x| *x).collect(); let culled_finals: HashSet<usize> = finals.difference(&blocklist).copied().collect();
assert!(culled_finals.len() == 1); assert!(culled_finals.len() == 1);
culled_pda.iter().for_each(|transition| { for transition in &culled_pda {
if blocklist.contains(&transition.dest) { if blocklist.contains(&transition.dest) {
return; continue;
} }
num_transition += 1; num_transition += 1;
let state = transition.source; let state = transition.source;
@ -218,7 +216,7 @@ fn postprocess(pda: &[Transition], stack_limit: usize) -> Automaton {
culled_pda.len() culled_pda.len()
); );
} }
}); }
/* /*
culled_pda_unique.iter().for_each(|transition| { culled_pda_unique.iter().for_each(|transition| {
@ -235,13 +233,13 @@ fn postprocess(pda: &[Transition], stack_limit: usize) -> Automaton {
*/ */
Automaton { Automaton {
init_state: initial.iter().next().cloned().unwrap(), init_state: initial.iter().next().copied().unwrap(),
final_state: culled_finals.iter().next().cloned().unwrap(), final_state: culled_finals.iter().next().copied().unwrap(),
pda: memoized, pda: memoized,
} }
} else { } else {
// Running FSA construction in exact approximation mode and postprocessing it like so // Running FSA construction in exact approximation mode and postprocessing it like so
pda.iter().for_each(|transition| { for transition in pda.iter() {
num_transition += 1; num_transition += 1;
let state = transition.source; let state = transition.source;
if state >= memoized.len() { if state >= memoized.len() {
@ -259,11 +257,11 @@ fn postprocess(pda: &[Transition], stack_limit: usize) -> Automaton {
pda.len() pda.len()
); );
} }
}); }
Automaton { Automaton {
init_state: initial.iter().next().cloned().unwrap(), init_state: initial.iter().next().copied().unwrap(),
final_state: finals.iter().next().cloned().unwrap(), final_state: finals.iter().next().copied().unwrap(),
pda: memoized, pda: memoized,
} }
} }
@ -298,7 +296,7 @@ fn main() {
&mut state_stacks, &mut state_stacks,
&mut state_count, &mut state_count,
&mut worklist, &mut worklist,
element, &element,
stack_limit, stack_limit,
); );
} }