From 6b5181250c40f2ccf691ece34e1837aadd5a0765 Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Thu, 23 Dec 2021 17:13:18 +0100 Subject: [PATCH 01/25] Drcov remodelling (#415) * drcov remodelling * fmt * fix Co-authored-by: tokatoka --- libafl_frida/src/executor.rs | 2 +- libafl_frida/src/helper.rs | 10 +++-- libafl_targets/src/drcov.rs | 71 +++++++++++++++++++----------------- 3 files changed, 45 insertions(+), 38 deletions(-) diff --git a/libafl_frida/src/executor.rs b/libafl_frida/src/executor.rs index 8a2eb15b20..ab6b7448cb 100644 --- a/libafl_frida/src/executor.rs +++ b/libafl_frida/src/executor.rs @@ -76,7 +76,7 @@ where libc::raise(libc::SIGABRT); } } - self.helper.post_exec(input); + self.helper.post_exec(input)?; res } } diff --git a/libafl_frida/src/helper.rs b/libafl_frida/src/helper.rs index 480ab63064..83c222695d 100644 --- a/libafl_frida/src/helper.rs +++ b/libafl_frida/src/helper.rs @@ -2,7 +2,7 @@ use ahash::AHasher; use std::hash::Hasher; use libafl::inputs::{HasTargetBytes, Input}; - +use libafl::Error; use libafl_targets::drcov::{DrCovBasicBlock, DrCovWriter}; #[cfg(target_arch = "aarch64")] @@ -69,7 +69,7 @@ pub trait FridaHelper<'a> { fn pre_exec(&mut self, input: &I); /// Called after execution of an input - fn post_exec(&mut self, input: &I); + fn post_exec(&mut self, input: &I) -> Result<(), Error>; /// Returns `true` if stalker is enabled fn stalker_enabled(&self) -> bool; @@ -124,13 +124,14 @@ impl<'a> FridaHelper<'a> for FridaInstrumentationHelper<'a> { } } - fn post_exec(&mut self, input: &I) { + fn post_exec(&mut self, input: &I) -> Result<(), Error> { if self.options.drcov_enabled() { let mut hasher = AHasher::new_with_keys(0, 0); hasher.write(input.target_bytes().as_slice()); let filename = format!("./coverage/{:016x}.drcov", hasher.finish(),); - DrCovWriter::new(&filename, &self.ranges, &mut self.drcov_basic_blocks).write(); + DrCovWriter::new(&self.ranges).write(&filename, &self.drcov_basic_blocks)?; + self.drcov_basic_blocks.clear(); } #[cfg(unix)] @@ -145,6 +146,7 @@ impl<'a> FridaHelper<'a> for FridaInstrumentationHelper<'a> { .poison(slice.as_ptr() as usize, slice.len()); self.asan_runtime.reset_allocations(); } + Ok(()) } fn stalker_enabled(&self) -> bool { diff --git a/libafl_targets/src/drcov.rs b/libafl_targets/src/drcov.rs index 4703e38dc3..9176a1bc2b 100644 --- a/libafl_targets/src/drcov.rs +++ b/libafl_targets/src/drcov.rs @@ -2,26 +2,22 @@ //! writing basic-block trace files to be read by coverage analysis tools, such as [Lighthouse](https://github.com/gaasedelen/lighthouse), //! [bncov](https://github.com/ForAllSecure/bncov), [dragondance](https://github.com/0ffffffffh/dragondance), etc. +use libafl::Error; use rangemap::RangeMap; use std::{ fs::File, io::{BufWriter, Write}, + path::Path, }; /// A basic block struct -#[derive(Clone, Copy)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct DrCovBasicBlock { - start: usize, - end: usize, -} - -/// A writer for `DrCov` files -pub struct DrCovWriter<'a> { - writer: BufWriter, - module_mapping: &'a RangeMap, - basic_blocks: &'a mut Vec, + pub start: usize, + pub end: usize, } +#[derive(Clone, Copy, Debug, PartialEq, Eq)] #[repr(C)] struct DrCovBasicBlockEntry { start: u32, @@ -29,46 +25,54 @@ struct DrCovBasicBlockEntry { mod_id: u16, } +/// A writer for `DrCov` files +pub struct DrCovWriter<'a> { + module_mapping: &'a RangeMap, +} + impl DrCovBasicBlock { /// Create a new [`DrCovBasicBlock`] with the given `start` and `end` addresses. #[must_use] pub fn new(start: usize, end: usize) -> Self { Self { start, end } } + + /// Create a new [`DrCovBasicBlock`] with a given `start` address and a block size. + #[must_use] + pub fn new_with_size(start: usize, size: usize) -> Self { + Self::new(start, start + size) + } } + impl<'a> DrCovWriter<'a> { /// Create a new [`DrCovWriter`] - pub fn new( - path: &str, - module_mapping: &'a RangeMap, - basic_blocks: &'a mut Vec, - ) -> Self { - Self { - writer: BufWriter::new( - File::create(path).expect("Unable to create file for coverage data"), - ), - module_mapping, - basic_blocks, - } + #[must_use] + pub fn new(module_mapping: &'a RangeMap) -> Self { + Self { module_mapping } } - /// Write the `DrCov` file. - pub fn write(&mut self) { - self.writer + /// Write the list of basic blocks to a `DrCov` file. + pub fn write

(&mut self, path: P, basic_blocks: &[DrCovBasicBlock]) -> Result<(), Error> + where + P: AsRef, + { + let mut writer = BufWriter::new(File::create(path)?); + + writer .write_all(b"DRCOV VERSION: 2\nDRCOV FLAVOR: libafl\n") .unwrap(); let modules: Vec<(&std::ops::Range, &(u16, String))> = self.module_mapping.iter().collect(); - self.writer + writer .write_all(format!("Module Table: version 2, count {}\n", modules.len()).as_bytes()) .unwrap(); - self.writer + writer .write_all(b"Columns: id, base, end, entry, checksum, timestamp, path\n") .unwrap(); for module in modules { let (range, (id, path)) = module; - self.writer + writer .write_all( format!( "{:03}, 0x{:x}, 0x{:x}, 0x00000000, 0x00000000, 0x00000000, {}\n", @@ -78,23 +82,24 @@ impl<'a> DrCovWriter<'a> { ) .unwrap(); } - self.writer - .write_all(format!("BB Table: {} bbs\n", self.basic_blocks.len()).as_bytes()) + writer + .write_all(format!("BB Table: {} bbs\n", basic_blocks.len()).as_bytes()) .unwrap(); - for block in self.basic_blocks.drain(0..) { + for block in basic_blocks { let (range, (id, _)) = self.module_mapping.get_key_value(&block.start).unwrap(); let basic_block = DrCovBasicBlockEntry { start: (block.start - range.start) as u32, size: (block.end - block.start) as u16, mod_id: *id, }; - self.writer + writer .write_all(unsafe { std::slice::from_raw_parts(&basic_block as *const _ as *const u8, 8) }) .unwrap(); } - self.writer.flush().unwrap(); + writer.flush()?; + Ok(()) } } From 9cd0d2228cf382df7c6d2134b69b2483f9fc3468 Mon Sep 17 00:00:00 2001 From: tokatoka Date: Fri, 24 Dec 2021 15:45:08 +0900 Subject: [PATCH 02/25] drcov runtime --- libafl_frida/src/drcov_rt.rs | 38 ++++++++++++++++++++++++++++++++++++ libafl_frida/src/helper.rs | 29 ++++++++++----------------- libafl_frida/src/lib.rs | 2 ++ 3 files changed, 50 insertions(+), 19 deletions(-) create mode 100644 libafl_frida/src/drcov_rt.rs diff --git a/libafl_frida/src/drcov_rt.rs b/libafl_frida/src/drcov_rt.rs new file mode 100644 index 0000000000..c08c18d36c --- /dev/null +++ b/libafl_frida/src/drcov_rt.rs @@ -0,0 +1,38 @@ +use std::hash::Hasher; +use ahash::AHasher; +use libafl::inputs::{HasTargetBytes, Input}; +use libafl_targets::drcov::{DrCovBasicBlock, DrCovWriter}; +use libafl::Error; +use rangemap::RangeMap; + + +pub struct DrCovRuntime { + pub drcov_basic_blocks: Vec, + ranges: RangeMap, +} + +impl DrCovRuntime { + pub fn new() -> Self { + Self { + drcov_basic_blocks: vec![], + ranges: RangeMap::new(), + } + } + + pub fn pre_exec(&mut self, _input: &I) -> Result<(), Error> { + Ok(()) + } + + pub fn post_exec(&mut self, input: &I) -> Result<(), Error> { + let mut hasher = AHasher::new_with_keys(0, 0); + hasher.write(input.target_bytes().as_slice()); + + let filename = format!("./coverage/{:016x}.drcov", hasher.finish(),); + DrCovWriter::new(&self.ranges).write(&filename, &self.drcov_basic_blocks)?; + self.drcov_basic_blocks.clear(); + + Ok(()) + } + + +} \ No newline at end of file diff --git a/libafl_frida/src/helper.rs b/libafl_frida/src/helper.rs index 83c222695d..1d4f583a85 100644 --- a/libafl_frida/src/helper.rs +++ b/libafl_frida/src/helper.rs @@ -1,9 +1,6 @@ -use ahash::AHasher; -use std::hash::Hasher; - use libafl::inputs::{HasTargetBytes, Input}; use libafl::Error; -use libafl_targets::drcov::{DrCovBasicBlock, DrCovWriter}; +use libafl_targets::drcov::{DrCovBasicBlock}; #[cfg(target_arch = "aarch64")] use capstone::{ @@ -46,6 +43,8 @@ use crate::{asan::asan_rt::AsanRuntime, FridaOptions}; #[cfg(windows)] use crate::FridaOptions; +use crate::drcov_rt::DrCovRuntime; + use crate::coverage_rt::CoverageRuntime; #[cfg(feature = "cmplog")] @@ -66,7 +65,7 @@ pub trait FridaHelper<'a> { fn register_thread(&mut self); /// Called prior to execution of an input - fn pre_exec(&mut self, input: &I); + fn pre_exec(&mut self, input: &I) -> Result<(), Error>; /// Called after execution of an input fn post_exec(&mut self, input: &I) -> Result<(), Error>; @@ -93,10 +92,10 @@ pub struct FridaInstrumentationHelper<'a> { asan_runtime: AsanRuntime, #[cfg(feature = "cmplog")] cmplog_runtime: CmpLogRuntime, + drcov_runtime: DrCovRuntime, ranges: RangeMap, module_map: ModuleMap, options: &'a FridaOptions, - drcov_basic_blocks: Vec, } impl<'a> FridaHelper<'a> for FridaInstrumentationHelper<'a> { @@ -114,7 +113,7 @@ impl<'a> FridaHelper<'a> for FridaInstrumentationHelper<'a> { fn pre_exec(&mut self, _input: &I) {} #[cfg(unix)] - fn pre_exec(&mut self, input: &I) { + fn pre_exec(&mut self, input: &I) -> Result<(), Error>{ let target_bytes = input.target_bytes(); let slice = target_bytes.as_slice(); //println!("target_bytes: {:#x}: {:02x?}", slice.as_ptr() as usize, slice); @@ -122,18 +121,11 @@ impl<'a> FridaHelper<'a> for FridaInstrumentationHelper<'a> { self.asan_runtime .unpoison(slice.as_ptr() as usize, slice.len()); } + Ok(()) } fn post_exec(&mut self, input: &I) -> Result<(), Error> { - if self.options.drcov_enabled() { - let mut hasher = AHasher::new_with_keys(0, 0); - hasher.write(input.target_bytes().as_slice()); - - let filename = format!("./coverage/{:016x}.drcov", hasher.finish(),); - DrCovWriter::new(&self.ranges).write(&filename, &self.drcov_basic_blocks)?; - self.drcov_basic_blocks.clear(); - } - + self.drcov_runtime.post_exec(input)?; #[cfg(unix)] if self.options.asan_enabled() { if self.options.asan_detect_leaks() { @@ -246,10 +238,10 @@ impl<'a> FridaInstrumentationHelper<'a> { asan_runtime: AsanRuntime::new(options.clone()), #[cfg(feature = "cmplog")] cmplog_runtime: CmpLogRuntime::new(), + drcov_runtime: DrCovRuntime::new(), ranges: RangeMap::new(), module_map: ModuleMap::new_from_names(modules_to_instrument), options, - drcov_basic_blocks: vec![], }; if helper.options().stalker_enabled() { @@ -309,8 +301,7 @@ impl<'a> FridaInstrumentationHelper<'a> { helper.asan_runtime.real_address_for_stalked(pc(&context)); //let (range, (id, name)) = helper.ranges.get_key_value(&real_address).unwrap(); //println!("{}:0x{:016x}", name, real_address - range.start); - helper - .drcov_basic_blocks + helper.drcov_runtime.drcov_basic_blocks .push(DrCovBasicBlock::new(real_address, real_address + 4)); }); } diff --git a/libafl_frida/src/lib.rs b/libafl_frida/src/lib.rs index 4c1d8f805c..70915cbc95 100644 --- a/libafl_frida/src/lib.rs +++ b/libafl_frida/src/lib.rs @@ -19,6 +19,8 @@ pub mod cmplog_rt; /// The `LibAFL` firda helper pub mod helper; +pub mod drcov_rt; + /// The frida executor pub mod executor; From e6434d2ec2b8383a254e90b5cd86a26d467bc995 Mon Sep 17 00:00:00 2001 From: tokatoka Date: Fri, 24 Dec 2021 15:46:27 +0900 Subject: [PATCH 03/25] fmt --- libafl_frida/src/drcov_rt.rs | 9 +++------ libafl_frida/src/helper.rs | 8 +++++--- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/libafl_frida/src/drcov_rt.rs b/libafl_frida/src/drcov_rt.rs index c08c18d36c..d2ef88b2f3 100644 --- a/libafl_frida/src/drcov_rt.rs +++ b/libafl_frida/src/drcov_rt.rs @@ -1,10 +1,9 @@ -use std::hash::Hasher; use ahash::AHasher; use libafl::inputs::{HasTargetBytes, Input}; -use libafl_targets::drcov::{DrCovBasicBlock, DrCovWriter}; use libafl::Error; +use libafl_targets::drcov::{DrCovBasicBlock, DrCovWriter}; use rangemap::RangeMap; - +use std::hash::Hasher; pub struct DrCovRuntime { pub drcov_basic_blocks: Vec, @@ -33,6 +32,4 @@ impl DrCovRuntime { Ok(()) } - - -} \ No newline at end of file +} diff --git a/libafl_frida/src/helper.rs b/libafl_frida/src/helper.rs index 1d4f583a85..2b035adc6f 100644 --- a/libafl_frida/src/helper.rs +++ b/libafl_frida/src/helper.rs @@ -1,6 +1,6 @@ use libafl::inputs::{HasTargetBytes, Input}; use libafl::Error; -use libafl_targets::drcov::{DrCovBasicBlock}; +use libafl_targets::drcov::DrCovBasicBlock; #[cfg(target_arch = "aarch64")] use capstone::{ @@ -113,7 +113,7 @@ impl<'a> FridaHelper<'a> for FridaInstrumentationHelper<'a> { fn pre_exec(&mut self, _input: &I) {} #[cfg(unix)] - fn pre_exec(&mut self, input: &I) -> Result<(), Error>{ + fn pre_exec(&mut self, input: &I) -> Result<(), Error> { let target_bytes = input.target_bytes(); let slice = target_bytes.as_slice(); //println!("target_bytes: {:#x}: {:02x?}", slice.as_ptr() as usize, slice); @@ -301,7 +301,9 @@ impl<'a> FridaInstrumentationHelper<'a> { helper.asan_runtime.real_address_for_stalked(pc(&context)); //let (range, (id, name)) = helper.ranges.get_key_value(&real_address).unwrap(); //println!("{}:0x{:016x}", name, real_address - range.start); - helper.drcov_runtime.drcov_basic_blocks + helper + .drcov_runtime + .drcov_basic_blocks .push(DrCovBasicBlock::new(real_address, real_address + 4)); }); } From 97c169fe63142593dc974f6683e693a5ad51a889 Mon Sep 17 00:00:00 2001 From: tokatoka Date: Fri, 24 Dec 2021 16:34:53 +0900 Subject: [PATCH 04/25] init ranges later --- libafl_frida/src/drcov_rt.rs | 6 ++++++ libafl_frida/src/helper.rs | 2 ++ 2 files changed, 8 insertions(+) diff --git a/libafl_frida/src/drcov_rt.rs b/libafl_frida/src/drcov_rt.rs index d2ef88b2f3..66bdf2271c 100644 --- a/libafl_frida/src/drcov_rt.rs +++ b/libafl_frida/src/drcov_rt.rs @@ -11,6 +11,7 @@ pub struct DrCovRuntime { } impl DrCovRuntime { + #[must_use] pub fn new() -> Self { Self { drcov_basic_blocks: vec![], @@ -18,6 +19,11 @@ impl DrCovRuntime { } } + pub fn init(&mut self, ranges: &RangeMap) { + self.ranges = ranges.clone(); + } + + #[allow(clippy::unused_self)] pub fn pre_exec(&mut self, _input: &I) -> Result<(), Error> { Ok(()) } diff --git a/libafl_frida/src/helper.rs b/libafl_frida/src/helper.rs index 2b035adc6f..bd45710d65 100644 --- a/libafl_frida/src/helper.rs +++ b/libafl_frida/src/helper.rs @@ -377,6 +377,8 @@ impl<'a> FridaInstrumentationHelper<'a> { if helper.options().asan_enabled() || helper.options().drcov_enabled() { helper.asan_runtime.init(gum, modules_to_instrument); } + + helper.drcov_runtime.init(&helper.ranges); #[cfg(feature = "cmplog")] if helper.options.cmplog_enabled() { helper.cmplog_runtime.init(); From 11ae49b7cd4cc5f4948b936e0e32e49435bf12ee Mon Sep 17 00:00:00 2001 From: s1341 Date: Sun, 26 Dec 2021 10:44:25 +0200 Subject: [PATCH 05/25] Implement max total allocation size for frida asan --- libafl_frida/src/alloc.rs | 9 ++++++++- libafl_frida/src/lib.rs | 12 ++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/libafl_frida/src/alloc.rs b/libafl_frida/src/alloc.rs index 733a92a70d..22912881fa 100644 --- a/libafl_frida/src/alloc.rs +++ b/libafl_frida/src/alloc.rs @@ -28,6 +28,7 @@ pub struct Allocator { shadow_pages: RangeSet, allocation_queue: HashMap>, largest_allocation: usize, + total_allocation_size: usize, base_mapping_addr: usize, current_mapping_addr: usize, } @@ -70,7 +71,7 @@ impl Allocator { let mut shadow_bit = 0; #[cfg(all(target_arch = "aarch64", target_os = "android"))] - for try_shadow_bit in &[46usize, 36usize] { + for try_shadow_bit in &[44usize, 36usize] { let addr: usize = 1 << try_shadow_bit; if unsafe { mmap( @@ -147,6 +148,7 @@ impl Allocator { shadow_pages: RangeSet::new(), allocation_queue: HashMap::new(), largest_allocation: 0, + total_allocation_size: 0, base_mapping_addr: addr + addr + addr, current_mapping_addr: addr + addr + addr, } @@ -205,6 +207,11 @@ impl Allocator { } let rounded_up_size = self.round_up_to_page(size) + 2 * self.page_size; + if self.total_allocation_size + rounded_up_size > self.options.asan_max_total_allocation() { + return std::ptr::null_mut(); + } + self.total_allocation_size += rounded_up_size; + let metadata = if let Some(mut metadata) = self.find_smallest_fit(rounded_up_size) { //println!("reusing allocation at {:x}, (actual mapping starts at {:x}) size {:x}", metadata.address, metadata.address - self.page_size, size); metadata.is_malloc_zero = is_malloc_zero; diff --git a/libafl_frida/src/lib.rs b/libafl_frida/src/lib.rs index 4c1d8f805c..50ca01c210 100644 --- a/libafl_frida/src/lib.rs +++ b/libafl_frida/src/lib.rs @@ -37,6 +37,7 @@ pub struct FridaOptions { enable_asan_continue_after_error: bool, enable_asan_allocation_backtraces: bool, asan_max_allocation: usize, + asan_max_total_allocation: usize, asan_max_allocation_panics: bool, enable_coverage: bool, enable_drcov: bool, @@ -79,6 +80,9 @@ impl FridaOptions { "asan-max-allocation" => { options.asan_max_allocation = value.parse().unwrap(); } + "asan-max-total-allocation" => { + options.asan_max_total_allocation = value.parse().unwrap(); + } "asan-max-allocation-panics" => { options.asan_max_allocation_panics = value.parse().unwrap(); } @@ -208,6 +212,13 @@ impl FridaOptions { self.asan_max_allocation } + /// The maximum total allocation size that the ASAN allocator should allocate + #[must_use] + #[inline] + pub fn asan_max_total_allocation(&self) -> usize { + self.asan_max_total_allocation + } + /// Should we panic if the max ASAN allocation size is exceeded #[must_use] #[inline] @@ -252,6 +263,7 @@ impl Default for FridaOptions { enable_asan_continue_after_error: false, enable_asan_allocation_backtraces: true, asan_max_allocation: 1 << 30, + asan_max_total_allocation: 1 << 32, asan_max_allocation_panics: false, enable_coverage: true, enable_drcov: false, From 2e92a34494295f0ffe0d5b2eaf5b991448423579 Mon Sep 17 00:00:00 2001 From: s1341 Date: Sun, 26 Dec 2021 11:17:27 +0200 Subject: [PATCH 06/25] Reset total allocations on reset --- libafl_frida/src/alloc.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libafl_frida/src/alloc.rs b/libafl_frida/src/alloc.rs index 22912881fa..f9c4ae2bb9 100644 --- a/libafl_frida/src/alloc.rs +++ b/libafl_frida/src/alloc.rs @@ -352,6 +352,8 @@ impl Allocator { for allocation in tmp_allocations { self.allocations.insert(allocation.address, allocation); } + + self.total_allocation_size = 0; } pub fn get_usable_size(&self, ptr: *mut c_void) -> usize { From eeac0f4f068208c1416e5ed12db33a7c10798dee Mon Sep 17 00:00:00 2001 From: s1341 Date: Tue, 28 Dec 2021 11:00:44 +0200 Subject: [PATCH 07/25] Fix strncmp hook to only check the length of the string (#434) --- libafl_frida/src/asan/hook_funcs.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/libafl_frida/src/asan/hook_funcs.rs b/libafl_frida/src/asan/hook_funcs.rs index 5f6cbc4b62..9bd2516bc3 100644 --- a/libafl_frida/src/asan/hook_funcs.rs +++ b/libafl_frida/src/asan/hook_funcs.rs @@ -768,8 +768,9 @@ impl AsanRuntime { pub fn hook_strncmp(&mut self, s1: *const c_char, s2: *const c_char, n: usize) -> i32 { extern "C" { fn strncmp(s1: *const c_char, s2: *const c_char, n: usize) -> i32; + fn strnlen(s: *const c_char, n: usize) -> usize; } - if !(self.shadow_check_func().unwrap())(s1 as *const c_void, n) { + if !(self.shadow_check_func().unwrap())(s1 as *const c_void, unsafe { strnlen(s1, n) }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strncmp".to_string(), self.real_address_for_stalked(AsanRuntime::pc()), @@ -778,7 +779,7 @@ impl AsanRuntime { Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(s2 as *const c_void, n) { + if !(self.shadow_check_func().unwrap())(s2 as *const c_void, unsafe { strnlen(s2, n) }) { AsanErrors::get_mut().report_error(AsanError::BadFuncArgRead(( "strncmp".to_string(), self.real_address_for_stalked(AsanRuntime::pc()), From e47c3be3fdf5af16c3020f70efbf8e144a1aee91 Mon Sep 17 00:00:00 2001 From: Evan Richter Date: Tue, 28 Dec 2021 18:30:14 -0600 Subject: [PATCH 08/25] [libafl_qemu] fix build.rs (#435) I noticed qemu was only building on one core, so I debugged the jobs environment variable. Evidently cargo passes `CARGO_BUILD_JOBS` is passed to build.rs scripts as `NUM_JOBS`. Other env vars for build.rs can be found [here](https://web.mit.edu/rust-lang_v1.25/arch/amd64_ubuntu1404/share/doc/rust/html/cargo/reference/environment-variables.html#environment-variables-cargo-sets-for-build-scripts) --- libafl_qemu/build.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libafl_qemu/build.rs b/libafl_qemu/build.rs index 0dae22b92e..efb906b8f1 100644 --- a/libafl_qemu/build.rs +++ b/libafl_qemu/build.rs @@ -47,7 +47,7 @@ fn main() { } else if cfg!(feature = "aarch64") { "aarch64".to_string() } else if cfg!(feature = "i386") { - "i368".to_string() + "i386".to_string() } else { env::var("CPU_TARGET").unwrap_or_else(|_| { println!( @@ -57,7 +57,7 @@ fn main() { }) }; - let jobs = env::var("CARGO_BUILD_JOBS"); + let jobs = env::var("NUM_JOBS"); let cross_cc = env::var("CROSS_CC").unwrap_or_else(|_| { println!("cargo:warning=CROSS_CC is not set, default to cc (things can go wrong if the selected cpu target ({}) is not the host arch ({}))", cpu_target, env::consts::ARCH); From b5153cc525e74298d69e3e22d27242cff1125304 Mon Sep 17 00:00:00 2001 From: s1341 Date: Wed, 29 Dec 2021 19:47:33 +0200 Subject: [PATCH 09/25] Frida various fixes (#436) * Make drcov post_exec dependent on whether drcov is enabled * Fix find_smallest_fit algorithm * Fix missing ? * fix warnings * fix * todo for non-linux/android shadow, clippy * typo * removed unsupposted eq * cleanup, docu * libafl::Error * fixed import Co-authored-by: tokatoka Co-authored-by: Dominik Maier --- fuzzers/frida_libpng/src/fuzzer.rs | 3 ++- libafl/src/bolts/launcher.rs | 8 +++----- libafl_frida/src/alloc.rs | 22 ++++++++++++---------- libafl_frida/src/asan/asan_rt.rs | 13 +++++-------- libafl_frida/src/asan/errors.rs | 6 ++---- libafl_frida/src/drcov_rt.rs | 22 ++++++++++++++++++++-- libafl_frida/src/executor.rs | 2 +- libafl_frida/src/helper.rs | 4 +++- 8 files changed, 48 insertions(+), 32 deletions(-) diff --git a/fuzzers/frida_libpng/src/fuzzer.rs b/fuzzers/frida_libpng/src/fuzzer.rs index 376a35cae4..cbccf1b39c 100644 --- a/fuzzers/frida_libpng/src/fuzzer.rs +++ b/fuzzers/frida_libpng/src/fuzzer.rs @@ -110,6 +110,7 @@ struct Opt { )] output: PathBuf, + /* #[structopt( parse(try_from_str = timeout_from_millis_str), short, @@ -129,7 +130,7 @@ struct Opt { multiple = true )] tokens: Vec, - + */ #[structopt( long, help = "The configuration this fuzzer runs with, for multiprocessing", diff --git a/libafl/src/bolts/launcher.rs b/libafl/src/bolts/launcher.rs index b8f8d9eda1..8e37c40635 100644 --- a/libafl/src/bolts/launcher.rs +++ b/libafl/src/bolts/launcher.rs @@ -112,11 +112,9 @@ where println!("spawning on cores: {:?}", self.cores); #[cfg(feature = "std")] - let stdout_file = if let Some(filename) = self.stdout_file { - Some(File::create(filename).unwrap()) - } else { - None - }; + let stdout_file = self + .stdout_file + .map(|filename| File::create(filename).unwrap()); // Spawn clients let mut index = 0_u64; diff --git a/libafl_frida/src/alloc.rs b/libafl_frida/src/alloc.rs index f9c4ae2bb9..6badfdea44 100644 --- a/libafl_frida/src/alloc.rs +++ b/libafl_frida/src/alloc.rs @@ -10,7 +10,7 @@ use backtrace::Backtrace; use libc::{sysconf, _SC_PAGESIZE}; use rangemap::RangeSet; use serde::{Deserialize, Serialize}; -use std::{ffi::c_void, io}; +use std::{collections::BTreeMap, ffi::c_void, io}; use crate::{ asan::errors::{AsanError, AsanErrors}, @@ -26,7 +26,7 @@ pub struct Allocator { pre_allocated_shadow: bool, allocations: HashMap, shadow_pages: RangeSet, - allocation_queue: HashMap>, + allocation_queue: BTreeMap>, largest_allocation: usize, total_allocation_size: usize, base_mapping_addr: usize, @@ -118,8 +118,13 @@ impl Allocator { shadow_bit = try_shadow_bit; } } - assert!(shadow_bit != 0); + #[cfg(not(any( + target_os = "linux", + all(target_arch = "aarch64", target_os = "android") + )))] + todo!("Shadow region not yet supported for this platform!"); + assert!(shadow_bit != 0); // attempt to pre-map the entire shadow-memory space let addr: usize = 1 << shadow_bit; @@ -146,7 +151,7 @@ impl Allocator { shadow_bit, allocations: HashMap::new(), shadow_pages: RangeSet::new(), - allocation_queue: HashMap::new(), + allocation_queue: BTreeMap::new(), largest_allocation: 0, total_allocation_size: 0, base_mapping_addr: addr + addr + addr, @@ -173,15 +178,12 @@ impl Allocator { } fn find_smallest_fit(&mut self, size: usize) -> Option { - let mut current_size = size; - while current_size <= self.largest_allocation { - if self.allocation_queue.contains_key(¤t_size) { - if let Some(metadata) = self.allocation_queue.entry(current_size).or_default().pop() - { + for (current_size, list) in &mut self.allocation_queue { + if *current_size >= size { + if let Some(metadata) = list.pop() { return Some(metadata); } } - current_size *= 2; } None } diff --git a/libafl_frida/src/asan/asan_rt.rs b/libafl_frida/src/asan/asan_rt.rs index 799c079a21..566b12748f 100644 --- a/libafl_frida/src/asan/asan_rt.rs +++ b/libafl_frida/src/asan/asan_rt.rs @@ -6,13 +6,10 @@ even if the target would not have crashed under normal conditions. this helps finding mem errors early. */ -use frida_gum::NativePointer; -use frida_gum::{ModuleDetails, RangeDetails}; -use hashbrown::HashMap; - -use nix::sys::mman::{mmap, mprotect, MapFlags, ProtFlags}; - use backtrace::Backtrace; +use frida_gum::{ModuleDetails, NativePointer, RangeDetails}; +use hashbrown::HashMap; +use nix::sys::mman::{mmap, MapFlags, ProtFlags}; use crate::helper::FridaInstrumentationHelper; @@ -182,10 +179,9 @@ impl AsanRuntime { } self.hook_functions(_gum); - + /* unsafe { let mem = self.allocator.alloc(0xac + 2, 8); - unsafe { mprotect( (self.shadow_check_func.unwrap() as usize & 0xffffffffffff000) as *mut c_void, 0x1000, @@ -256,6 +252,7 @@ impl AsanRuntime { } // assert!((self.shadow_check_func.unwrap())(((mem2 as usize) + 8875) as *const c_void, 4)); } + */ } /// Reset all allocations so that they can be reused for new allocation requests. diff --git a/libafl_frida/src/asan/errors.rs b/libafl_frida/src/asan/errors.rs index bbd42e7528..f0e641764c 100644 --- a/libafl_frida/src/asan/errors.rs +++ b/libafl_frida/src/asan/errors.rs @@ -1,10 +1,11 @@ +#[cfg(target_arch = "x86_64")] +use crate::asan::asan_rt::ASAN_SAVE_REGISTER_NAMES; use backtrace::Backtrace; use capstone::{arch::BuildsCapstone, Capstone}; use color_backtrace::{default_output_stream, BacktracePrinter, Verbosity}; #[cfg(target_arch = "aarch64")] use frida_gum::interceptor::Interceptor; use frida_gum::ModuleDetails; - use libafl::{ bolts::{ownedref::OwnedPtr, tuples::Named}, corpus::Testcase, @@ -20,9 +21,6 @@ use serde::{Deserialize, Serialize}; use std::io::Write; use termcolor::{Color, ColorSpec, WriteColor}; -#[cfg(target_arch = "x86_64")] -use crate::asan::asan_rt::ASAN_SAVE_REGISTER_NAMES; - use crate::{alloc::AllocationMetadata, asan::asan_rt::ASAN_SAVE_REGISTER_COUNT, FridaOptions}; #[derive(Debug, Clone, Serialize, Deserialize)] diff --git a/libafl_frida/src/drcov_rt.rs b/libafl_frida/src/drcov_rt.rs index 66bdf2271c..c30ba95c9b 100644 --- a/libafl_frida/src/drcov_rt.rs +++ b/libafl_frida/src/drcov_rt.rs @@ -1,16 +1,24 @@ +//! Generates `DrCov` traces use ahash::AHasher; -use libafl::inputs::{HasTargetBytes, Input}; -use libafl::Error; +use libafl::{ + inputs::{HasTargetBytes, Input}, + Error, +}; use libafl_targets::drcov::{DrCovBasicBlock, DrCovWriter}; use rangemap::RangeMap; use std::hash::Hasher; +/// Generates `DrCov` traces +#[derive(Clone, Debug)] pub struct DrCovRuntime { + /// The basic blocks of this execution pub drcov_basic_blocks: Vec, + /// The memory ragnes of this target ranges: RangeMap, } impl DrCovRuntime { + /// Creates a new [`DrCovRuntime`] #[must_use] pub fn new() -> Self { Self { @@ -19,15 +27,19 @@ impl DrCovRuntime { } } + /// initializes this runtime wiith the given `ranges` pub fn init(&mut self, ranges: &RangeMap) { self.ranges = ranges.clone(); } + /// Called before execution, does nothing #[allow(clippy::unused_self)] pub fn pre_exec(&mut self, _input: &I) -> Result<(), Error> { Ok(()) } + /// Called after execution, writes the trace to a unique `DrCov` file for this trace + /// into `./coverage/.drcov` pub fn post_exec(&mut self, input: &I) -> Result<(), Error> { let mut hasher = AHasher::new_with_keys(0, 0); hasher.write(input.target_bytes().as_slice()); @@ -39,3 +51,9 @@ impl DrCovRuntime { Ok(()) } } + +impl Default for DrCovRuntime { + fn default() -> Self { + Self::new() + } +} diff --git a/libafl_frida/src/executor.rs b/libafl_frida/src/executor.rs index ab6b7448cb..c71c5d82d2 100644 --- a/libafl_frida/src/executor.rs +++ b/libafl_frida/src/executor.rs @@ -55,7 +55,7 @@ where mgr: &mut EM, input: &I, ) -> Result { - self.helper.pre_exec(input); + self.helper.pre_exec(input)?; if self.helper.stalker_enabled() { if self.followed { self.stalker.activate(NativePointer(core::ptr::null_mut())); diff --git a/libafl_frida/src/helper.rs b/libafl_frida/src/helper.rs index bd45710d65..680fced438 100644 --- a/libafl_frida/src/helper.rs +++ b/libafl_frida/src/helper.rs @@ -125,7 +125,9 @@ impl<'a> FridaHelper<'a> for FridaInstrumentationHelper<'a> { } fn post_exec(&mut self, input: &I) -> Result<(), Error> { - self.drcov_runtime.post_exec(input)?; + if self.options().enable_drcov { + self.drcov_runtime.post_exec(input)?; + } #[cfg(unix)] if self.options.asan_enabled() { if self.options.asan_detect_leaks() { From b537539b54045a19f8bfd8b6db479e8584467e4d Mon Sep 17 00:00:00 2001 From: Dongjia Zhang Date: Fri, 31 Dec 2021 00:33:23 +0900 Subject: [PATCH 10/25] Use MiMalloc for in-process fuzzers (#439) * MiMalloc * docu * other fuzzers * mention asan --- docs/src/core_concepts/executor.md | 41 +++++++++++++++++ .../forkserver_and_inprocessforkserver.md | 44 ------------------- fuzzers/frida_libpng/Cargo.toml | 1 + fuzzers/frida_libpng/src/fuzzer.rs | 3 ++ fuzzers/fuzzbench/Cargo.toml | 1 + fuzzers/fuzzbench/src/lib.rs | 3 ++ fuzzers/generic_inmemory/Cargo.toml | 1 + fuzzers/generic_inmemory/src/lib.rs | 3 ++ fuzzers/libfuzzer_libmozjpeg/Cargo.toml | 1 + fuzzers/libfuzzer_libmozjpeg/src/lib.rs | 3 ++ fuzzers/libfuzzer_libpng/Cargo.toml | 1 + fuzzers/libfuzzer_libpng/src/lib.rs | 3 ++ fuzzers/libfuzzer_libpng_ctx/Cargo.toml | 1 + fuzzers/libfuzzer_libpng_ctx/src/lib.rs | 3 ++ fuzzers/libfuzzer_libpng_launcher/Cargo.toml | 1 + fuzzers/libfuzzer_libpng_launcher/src/lib.rs | 3 ++ fuzzers/libfuzzer_reachability/Cargo.toml | 1 + fuzzers/libfuzzer_reachability/src/lib.rs | 3 ++ fuzzers/libfuzzer_stb_image/Cargo.toml | 1 + fuzzers/libfuzzer_stb_image/src/main.rs | 3 ++ .../fuzzer/Cargo.toml | 1 + .../fuzzer/src/main.rs | 3 ++ fuzzers/libfuzzer_stb_image_sugar/Cargo.toml | 1 + fuzzers/libfuzzer_stb_image_sugar/src/main.rs | 3 ++ 24 files changed, 85 insertions(+), 44 deletions(-) delete mode 100644 docs/src/core_concepts/forkserver_and_inprocessforkserver.md diff --git a/docs/src/core_concepts/executor.md b/docs/src/core_concepts/executor.md index ae461e43ba..11934f056c 100644 --- a/docs/src/core_concepts/executor.md +++ b/docs/src/core_concepts/executor.md @@ -14,3 +14,44 @@ In Rust, we bind this concept to the [`Executor`](https://docs.rs/libafl/0/libaf By default, we implement some commonly used Executors such as [`InProcessExecutor`](https://docs.rs/libafl/0/libafl/executors/inprocess/struct.InProcessExecutor.html) is which the target is a harness function providing in-process crash detection. Another Executor is the [`ForkserverExecutor`](https://docs.rs/libafl/0/libafl/executors/forkserver/struct.ForkserverExecutor.html) that implements an AFL-like mechanism to spawn child processes to fuzz. A common pattern when creating an Executor is wrapping an existing one, for instance [`TimeoutExecutor`](https://docs.rs/libafl/0.6.1/libafl/executors/timeout/struct.TimeoutExecutor.html) wraps an executor and install a timeout callback before calling the original run function of the wrapped executor. + +## InProcessExecutor +Let's begin with the base case; `InProcessExecutor`. +This executor uses [_SanitizerCoverage_](https://clang.llvm.org/docs/SanitizerCoverage.html) as its backend, as you can find the related code in `libafl_targets/src/sancov_pcguards`. Here we allocate a map called `EDGES_MAP` and then our compiler wrapper compiles the harness to write the coverage into this map. +When you want to execute the harness as fast as possible, you will most probably want to use this `InprocessExecutor`. + One thing to note here is, when your harness is likely to have heap corruption bugs, you want to use another allocator so that corrupted heap does not affect the fuzzer itself. (For example, we adopt MiMalloc in some of our fuzzers.). Alternatively you can compile your harness with address sanitizer to make sure you can catch these heap bugs. + +## ForkserverExecutor +Next, we'll take a look at the `ForkserverExecutor`. In this case, it is `afl-cc` (from AFLplusplus/AFLplusplus) that compiles the harness code, and therefore, we can't use `EDGES_MAP` anymore. Hopefully, we have [_a way_](https://github.com/AFLplusplus/AFLplusplus/blob/2e15661f184c77ac1fbb6f868c894e946cbb7f17/instrumentation/afl-compiler-rt.o.c#L270) to tell the forkserver which map to record the coverage. +As you can see from the forkserver example, +```rust,ignore +//Coverage map shared between observer and executor +let mut shmem = StdShMemProvider::new().unwrap().new_map(MAP_SIZE).unwrap(); +//let the forkserver know the shmid +shmem.write_to_env("__AFL_SHM_ID").unwrap(); +let mut shmem_map = shmem.map_mut(); +``` +Here we make a shared memory region; `shmem`, and write this to environmental variable `__AFL_SHM_ID`. Then the instrumented binary, or the forkserver, finds this shared memory region (from the aforementioned env var) to record its coverage. On your fuzzer side, you can pass this shmem map to your `Observer` to obtain coverage feedbacks combined with any `Feedback`. + +Another feature of the `ForkserverExecutor` to mention is the shared memory testcases. In normal cases, the mutated input is passed between the forkserver and the instrumented binary via `.cur_input` file. You can improve your forkserver fuzzer's performance by passing the input with shared memory. +See AFL++'s [_documentation_](https://github.com/AFLplusplus/AFLplusplus/blob/stable/instrumentation/README.persistent_mode.md#5-shared-memory-fuzzing) or the fuzzer example in `forkserver_simple/src/program.c` for reference. +It is very simple, when you call `ForkserverExecutor::new()` with `use_shmem_testcase` true, the `ForkserverExecutor` sets things up and your harness can just fetch the input from `__AFL_FUZZ_TESTCASE_BUF` + +## InprocessForkExecutor +Finally, we'll talk about the `InProcessForkExecutor`. +`InProcessForkExecutor` has only one difference from `InprocessExecutor`; It forks before running the harness and that's it. +But why do we want to do so? well, under some circumstances, you may find your harness pretty unstable or your harness wreaks havoc on the global states. In this case, you want to fork it before executing the harness runs in the child process so that it doesn't break things. +However, we have to take care of the shared memory, it's the child process that runs the harness code and writes the coverage to the map. +We have to make the map shared between the parent process and the child process, so we'll use shared memory again. You should compile your harness with `pointer_maps` (for `libafl_targes`) features enabled, this way, we can have a pointer; `EDGES_MAP_PTR` that can point to any coverage map. +On your fuzzer side, you can allocate a shared memory region and make the `EDGES_MAP_PTR` point to your shared memory. +```rust,ignore +let mut shmem; +unsafe{ + shmem = StdShMemProvider::new().unwrap().new_map(MAX_EDGES_NUM).unwrap(); +} +let shmem_map = shmem.map_mut(); +unsafe{ + EDGES_PTR = shmem_map.as_ptr(); +} +``` +Again, you can pass this shmem map to your `Observer` and `Feedback` to obtain coverage feedbacks. diff --git a/docs/src/core_concepts/forkserver_and_inprocessforkserver.md b/docs/src/core_concepts/forkserver_and_inprocessforkserver.md deleted file mode 100644 index 8ead0b5c1a..0000000000 --- a/docs/src/core_concepts/forkserver_and_inprocessforkserver.md +++ /dev/null @@ -1,44 +0,0 @@ -# ForkserverExecutor and InprocessForkExecutor - -## Introduction -We have `ForkserverExecutor` and `InprocessForkExecutor` in libafl crate. -On this page, we'll quickly explain how they work and see how they compare to normal `InProcessExecutor` - -## InprocessExecutor -Let's begin with the base case; `InProcessExecutor`. -This executor uses [_SanitizerCoverage_](https://clang.llvm.org/docs/SanitizerCoverage.html) as its backend, as you can find the related code in `libafl_targets/src/sancov_pcguards`. Here we allocate a map called `EDGES_MAP` and then our compiler wrapper compiles the harness to write the coverage into this map. - -## ForkserverExecutor -Next, we'll look at the `ForkserverExecutor`. In this case, it is `afl-cc` (from AFLplusplus/AFLplusplus) that compiles the harness code, and therefore, we can't use `EDGES_MAP` anymore. Hopefully, we have [_a way_](https://github.com/AFLplusplus/AFLplusplus/blob/2e15661f184c77ac1fbb6f868c894e946cbb7f17/instrumentation/afl-compiler-rt.o.c#L270) to tell the forkserver which map to record the coverage. -As you can see from the forkserver example, -```rust,ignore -//Coverage map shared between observer and executor -let mut shmem = StdShMemProvider::new().unwrap().new_map(MAP_SIZE).unwrap(); -//let the forkserver know the shmid -shmem.write_to_env("__AFL_SHM_ID").unwrap(); -let mut shmem_map = shmem.map_mut(); -``` -Here we make a shared memory region; `shmem`, and write this to environmental variable `__AFL_SHM_ID`. Then the instrumented binary, or the forkserver, finds this shared memory region (from the aforementioned env var) to record its coverage. On your fuzzer side, you can pass this shmem map to your `Observer` to obtain coverage feedbacks combined with any `Feedback`. - -Another feature of the `ForkserverExecutor` to mention is the shared memory testcases. In normal cases, the mutated input is passed between the forkserver and the instrumented binary via `.cur_input` file. You can improve your forkserver fuzzer's performance by passing the input with shared memory. -See AFL++'s [_documentation_](https://github.com/AFLplusplus/AFLplusplus/blob/stable/instrumentation/README.persistent_mode.md#5-shared-memory-fuzzing) or the fuzzer example in `forkserver_simple/src/program.c` for reference. -It is very simple, when you call `ForkserverExecutor::new()` with `use_shmem_testcase` true, the `ForkserverExecutor` sets things up and your harness can just fetch the input from `__AFL_FUZZ_TESTCASE_BUF` - -## InprocessForkExecutor -Finally, we'll talk about the `InProcessForkExecutor`. -`InProcessForkExecutor` has only one difference from `InprocessExecutor`; It forks before running the harness and that's it. -But why do we want to do so? well, under some circumstances, you may find your harness pretty unstable or your harness wreaks havoc on the global states. In this case, you want to fork it before executing the harness runs in the child process so that it doesn't break things. -However, we have to take care of the shared memory, it's the child process that runs the harness code and writes the coverage to the map. -We have to make the map shared between the parent process and the child process, so we'll use shared memory again. You should compile your harness with `pointer_maps` (for `libafl_targes`) features enabled, this way, we can have a pointer; `EDGES_MAP_PTR` that can point to any coverage map. -On your fuzzer side, you can allocate a shared memory region and make the `EDGES_MAP_PTR` point to your shared memory. -```rust,ignore -let mut shmem; -unsafe{ - shmem = StdShMemProvider::new().unwrap().new_map(MAX_EDGES_NUM).unwrap(); -} -let shmem_map = shmem.map_mut(); -unsafe{ - EDGES_PTR = shmem_map.as_ptr(); -} -``` -Again, you can pass this shmem map to your `Observer` and `Feedback` to obtain coverage feedbacks. diff --git a/fuzzers/frida_libpng/Cargo.toml b/fuzzers/frida_libpng/Cargo.toml index 80b49f3bca..bbd3ce266e 100644 --- a/fuzzers/frida_libpng/Cargo.toml +++ b/fuzzers/frida_libpng/Cargo.toml @@ -40,6 +40,7 @@ num-traits = "0.2.14" rangemap = "0.1.10" structopt = "0.3.25" serde = "1.0" +mimalloc = { version = "*", default-features = false } backtrace = "0.3" color-backtrace = "0.5" diff --git a/fuzzers/frida_libpng/src/fuzzer.rs b/fuzzers/frida_libpng/src/fuzzer.rs index cbccf1b39c..ccf78c69b1 100644 --- a/fuzzers/frida_libpng/src/fuzzer.rs +++ b/fuzzers/frida_libpng/src/fuzzer.rs @@ -1,5 +1,8 @@ //! A libfuzzer-like fuzzer with llmp-multithreading support and restarts //! The example harness is built for libpng. +use mimalloc::MiMalloc; +#[global_allocator] +static GLOBAL: MiMalloc = MiMalloc; use frida_gum::Gum; use std::{ diff --git a/fuzzers/fuzzbench/Cargo.toml b/fuzzers/fuzzbench/Cargo.toml index e728c5d9c4..e3410e1fcf 100644 --- a/fuzzers/fuzzbench/Cargo.toml +++ b/fuzzers/fuzzbench/Cargo.toml @@ -26,6 +26,7 @@ libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_h libafl_cc = { path = "../../libafl_cc/" } clap = { version = "3.0.0-rc.4", features = ["default"] } nix = "0.23.0" +mimalloc = { version = "*", default-features = false } [lib] name = "fuzzbench" diff --git a/fuzzers/fuzzbench/src/lib.rs b/fuzzers/fuzzbench/src/lib.rs index f010df48d7..ec71be1b1e 100644 --- a/fuzzers/fuzzbench/src/lib.rs +++ b/fuzzers/fuzzbench/src/lib.rs @@ -1,4 +1,7 @@ //! A singlethreaded libfuzzer-like fuzzer that can auto-restart. +use mimalloc::MiMalloc; +#[global_allocator] +static GLOBAL: MiMalloc = MiMalloc; use clap::{App, Arg}; use core::{cell::RefCell, time::Duration}; diff --git a/fuzzers/generic_inmemory/Cargo.toml b/fuzzers/generic_inmemory/Cargo.toml index c080a13135..e842a56836 100644 --- a/fuzzers/generic_inmemory/Cargo.toml +++ b/fuzzers/generic_inmemory/Cargo.toml @@ -25,6 +25,7 @@ libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_h # TODO Include it only when building cc libafl_cc = { path = "../../libafl_cc/" } structopt = "0.3.25" +mimalloc = { version = "*", default-features = false } [lib] name = "generic_inmemory" diff --git a/fuzzers/generic_inmemory/src/lib.rs b/fuzzers/generic_inmemory/src/lib.rs index cbc8d7c4f2..60d62ab56a 100644 --- a/fuzzers/generic_inmemory/src/lib.rs +++ b/fuzzers/generic_inmemory/src/lib.rs @@ -1,5 +1,8 @@ //! A libfuzzer-like fuzzer with llmp-multithreading support and restarts //! The `launcher` will spawn new processes for each cpu core. +use mimalloc::MiMalloc; +#[global_allocator] +static GLOBAL: MiMalloc = MiMalloc; use core::time::Duration; use std::{env, net::SocketAddr, path::PathBuf}; diff --git a/fuzzers/libfuzzer_libmozjpeg/Cargo.toml b/fuzzers/libfuzzer_libmozjpeg/Cargo.toml index 544fa8f7b8..94d8f48eba 100644 --- a/fuzzers/libfuzzer_libmozjpeg/Cargo.toml +++ b/fuzzers/libfuzzer_libmozjpeg/Cargo.toml @@ -19,6 +19,7 @@ libafl = { path = "../../libafl/" } libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_edges", "sancov_value_profile", "libfuzzer"] } # TODO Include it only when building cc libafl_cc = { path = "../../libafl_cc/" } +mimalloc = { version = "*", default-features = false } [build-dependencies] cc = { version = "1.0", features = ["parallel"] } diff --git a/fuzzers/libfuzzer_libmozjpeg/src/lib.rs b/fuzzers/libfuzzer_libmozjpeg/src/lib.rs index 9c7e0dfe96..ddcbc5907a 100644 --- a/fuzzers/libfuzzer_libmozjpeg/src/lib.rs +++ b/fuzzers/libfuzzer_libmozjpeg/src/lib.rs @@ -1,5 +1,8 @@ //! A libfuzzer-like fuzzer with llmp-multithreading support and restarts //! The example harness is built for libmozjpeg. +use mimalloc::MiMalloc; +#[global_allocator] +static GLOBAL: MiMalloc = MiMalloc; use std::{env, path::PathBuf}; diff --git a/fuzzers/libfuzzer_libpng/Cargo.toml b/fuzzers/libfuzzer_libpng/Cargo.toml index dd2867b7c7..7246771b71 100644 --- a/fuzzers/libfuzzer_libpng/Cargo.toml +++ b/fuzzers/libfuzzer_libpng/Cargo.toml @@ -25,6 +25,7 @@ libafl = { path = "../../libafl/", features = ["default"] } libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_hitcounts", "libfuzzer", "sancov_cmplog"] } # TODO Include it only when building cc libafl_cc = { path = "../../libafl_cc/" } +mimalloc = { version = "*", default-features = false } [lib] name = "libfuzzer_libpng" diff --git a/fuzzers/libfuzzer_libpng/src/lib.rs b/fuzzers/libfuzzer_libpng/src/lib.rs index b1367d442f..dc3810f15f 100644 --- a/fuzzers/libfuzzer_libpng/src/lib.rs +++ b/fuzzers/libfuzzer_libpng/src/lib.rs @@ -1,5 +1,8 @@ //! A libfuzzer-like fuzzer with llmp-multithreading support and restarts //! The example harness is built for libpng. +use mimalloc::MiMalloc; +#[global_allocator] +static GLOBAL: MiMalloc = MiMalloc; use core::time::Duration; use std::{env, path::PathBuf}; diff --git a/fuzzers/libfuzzer_libpng_ctx/Cargo.toml b/fuzzers/libfuzzer_libpng_ctx/Cargo.toml index b589d6762b..452edd9560 100644 --- a/fuzzers/libfuzzer_libpng_ctx/Cargo.toml +++ b/fuzzers/libfuzzer_libpng_ctx/Cargo.toml @@ -25,6 +25,7 @@ libafl_targets = { path = "../../libafl_targets/", features = ["libfuzzer"] } # TODO Include it only when building cc libafl_cc = { path = "../../libafl_cc/" } structopt = "0.3.25" +mimalloc = { version = "*", default-features = false } [lib] name = "libfuzzer_libpng" diff --git a/fuzzers/libfuzzer_libpng_ctx/src/lib.rs b/fuzzers/libfuzzer_libpng_ctx/src/lib.rs index f87417466c..414b89dcc8 100644 --- a/fuzzers/libfuzzer_libpng_ctx/src/lib.rs +++ b/fuzzers/libfuzzer_libpng_ctx/src/lib.rs @@ -2,6 +2,9 @@ //! The example harness is built for libpng. //! In this example, you will see the use of the `launcher` feature. //! The `launcher` will spawn new processes for each cpu core. +use mimalloc::MiMalloc; +#[global_allocator] +static GLOBAL: MiMalloc = MiMalloc; use core::time::Duration; use std::{env, net::SocketAddr, path::PathBuf}; diff --git a/fuzzers/libfuzzer_libpng_launcher/Cargo.toml b/fuzzers/libfuzzer_libpng_launcher/Cargo.toml index b225e962c5..7c70283460 100644 --- a/fuzzers/libfuzzer_libpng_launcher/Cargo.toml +++ b/fuzzers/libfuzzer_libpng_launcher/Cargo.toml @@ -25,6 +25,7 @@ libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_h # TODO Include it only when building cc libafl_cc = { path = "../../libafl_cc/" } structopt = "0.3.25" +mimalloc = { version = "*", default-features = false } [lib] name = "libfuzzer_libpng" diff --git a/fuzzers/libfuzzer_libpng_launcher/src/lib.rs b/fuzzers/libfuzzer_libpng_launcher/src/lib.rs index 5d63d2c4e4..77daf9c526 100644 --- a/fuzzers/libfuzzer_libpng_launcher/src/lib.rs +++ b/fuzzers/libfuzzer_libpng_launcher/src/lib.rs @@ -2,6 +2,9 @@ //! The example harness is built for libpng. //! In this example, you will see the use of the `launcher` feature. //! The `launcher` will spawn new processes for each cpu core. +use mimalloc::MiMalloc; +#[global_allocator] +static GLOBAL: MiMalloc = MiMalloc; use core::time::Duration; use std::{env, net::SocketAddr, path::PathBuf}; diff --git a/fuzzers/libfuzzer_reachability/Cargo.toml b/fuzzers/libfuzzer_reachability/Cargo.toml index 9a36f60c8f..129c503011 100644 --- a/fuzzers/libfuzzer_reachability/Cargo.toml +++ b/fuzzers/libfuzzer_reachability/Cargo.toml @@ -24,6 +24,7 @@ libafl = { path = "../../libafl/" } libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_hitcounts", "libfuzzer"] } # TODO Include it only when building cc libafl_cc = { path = "../../libafl_cc/" } +mimalloc = { version = "*", default-features = false } [lib] name = "libfuzzer_libpng" diff --git a/fuzzers/libfuzzer_reachability/src/lib.rs b/fuzzers/libfuzzer_reachability/src/lib.rs index eed219ebd9..4d27fa8e92 100644 --- a/fuzzers/libfuzzer_reachability/src/lib.rs +++ b/fuzzers/libfuzzer_reachability/src/lib.rs @@ -1,5 +1,8 @@ //! A libfuzzer-like fuzzer with llmp-multithreading support and restarts //! The example harness is built for libpng. +use mimalloc::MiMalloc; +#[global_allocator] +static GLOBAL: MiMalloc = MiMalloc; use std::{env, path::PathBuf}; diff --git a/fuzzers/libfuzzer_stb_image/Cargo.toml b/fuzzers/libfuzzer_stb_image/Cargo.toml index ff8639623c..980ab304ba 100644 --- a/fuzzers/libfuzzer_stb_image/Cargo.toml +++ b/fuzzers/libfuzzer_stb_image/Cargo.toml @@ -18,6 +18,7 @@ debug = true [dependencies] libafl = { path = "../../libafl/" } libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_edges", "sancov_cmplog", "libfuzzer"] } +mimalloc = { version = "*", default-features = false } [build-dependencies] cc = { version = "1.0", features = ["parallel"] } diff --git a/fuzzers/libfuzzer_stb_image/src/main.rs b/fuzzers/libfuzzer_stb_image/src/main.rs index c89e26f21e..810251735e 100644 --- a/fuzzers/libfuzzer_stb_image/src/main.rs +++ b/fuzzers/libfuzzer_stb_image/src/main.rs @@ -1,5 +1,8 @@ //! A libfuzzer-like fuzzer with llmp-multithreading support and restarts //! The example harness is built for `stb_image`. +use mimalloc::MiMalloc; +#[global_allocator] +static GLOBAL: MiMalloc = MiMalloc; use std::{env, path::PathBuf}; diff --git a/fuzzers/libfuzzer_stb_image_concolic/fuzzer/Cargo.toml b/fuzzers/libfuzzer_stb_image_concolic/fuzzer/Cargo.toml index 5c47a68919..95adad50f2 100644 --- a/fuzzers/libfuzzer_stb_image_concolic/fuzzer/Cargo.toml +++ b/fuzzers/libfuzzer_stb_image_concolic/fuzzer/Cargo.toml @@ -19,6 +19,7 @@ debug = true libafl = { path = "../../../libafl/", features = ["concolic_mutation"] } libafl_targets = { path = "../../../libafl_targets/", features = ["sancov_pcguard_edges", "sancov_cmplog", "libfuzzer"] } structopt = "0.3.21" +mimalloc = { version = "*", default-features = false } [build-dependencies] cc = { version = "1.0", features = ["parallel"] } diff --git a/fuzzers/libfuzzer_stb_image_concolic/fuzzer/src/main.rs b/fuzzers/libfuzzer_stb_image_concolic/fuzzer/src/main.rs index 2cb9cdb8c5..dc1d642e66 100644 --- a/fuzzers/libfuzzer_stb_image_concolic/fuzzer/src/main.rs +++ b/fuzzers/libfuzzer_stb_image_concolic/fuzzer/src/main.rs @@ -1,5 +1,8 @@ //! A libfuzzer-like fuzzer with llmp-multithreading support and restarts //! The example harness is built for `stb_image`. +use mimalloc::MiMalloc; +#[global_allocator] +static GLOBAL: MiMalloc = MiMalloc; use std::{env, path::PathBuf}; diff --git a/fuzzers/libfuzzer_stb_image_sugar/Cargo.toml b/fuzzers/libfuzzer_stb_image_sugar/Cargo.toml index 2aaf2052a5..6ac790e013 100644 --- a/fuzzers/libfuzzer_stb_image_sugar/Cargo.toml +++ b/fuzzers/libfuzzer_stb_image_sugar/Cargo.toml @@ -19,6 +19,7 @@ debug = true libafl = { path = "../../libafl/" } libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_edges", "sancov_cmplog", "libfuzzer"] } libafl_sugar = { path = "../../libafl_sugar/" } +mimalloc = { version = "*", default-features = false } [build-dependencies] cc = { version = "1.0", features = ["parallel"] } diff --git a/fuzzers/libfuzzer_stb_image_sugar/src/main.rs b/fuzzers/libfuzzer_stb_image_sugar/src/main.rs index 2b377bf680..37d2c1e2b7 100644 --- a/fuzzers/libfuzzer_stb_image_sugar/src/main.rs +++ b/fuzzers/libfuzzer_stb_image_sugar/src/main.rs @@ -1,5 +1,8 @@ //! A libfuzzer-like fuzzer with llmp-multithreading support and restarts //! The example harness is built for `stb_image`. +use mimalloc::MiMalloc; +#[global_allocator] +static GLOBAL: MiMalloc = MiMalloc; use std::{env, path::PathBuf}; From d669b063f4f4a3ee3504a9ffeca2aa63f2e4b792 Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Thu, 30 Dec 2021 18:38:28 +0100 Subject: [PATCH 11/25] clippy --- libafl_frida/src/asan/asan_rt.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/libafl_frida/src/asan/asan_rt.rs b/libafl_frida/src/asan/asan_rt.rs index 566b12748f..59cd2bb97c 100644 --- a/libafl_frida/src/asan/asan_rt.rs +++ b/libafl_frida/src/asan/asan_rt.rs @@ -182,24 +182,24 @@ impl AsanRuntime { /* unsafe { let mem = self.allocator.alloc(0xac + 2, 8); - mprotect( - (self.shadow_check_func.unwrap() as usize & 0xffffffffffff000) as *mut c_void, - 0x1000, - ProtFlags::PROT_READ | ProtFlags::PROT_WRITE | ProtFlags::PROT_EXEC, - ) - }; + mprotect( + (self.shadow_check_func.unwrap() as usize & 0xffffffffffff000) as *mut c_void, + 0x1000, + ProtFlags::PROT_READ | ProtFlags::PROT_WRITE | ProtFlags::PROT_EXEC, + ) + .unwrap(); println!("Test0"); /* 0x555555916ce9 je libafl_frida::asan_rt::AsanRuntime::init+14852 0x555555916cef mov rdi, r15 <0x555558392338> */ assert!((self.shadow_check_func.unwrap())( - ((mem as usize) + 0) as *const c_void, + (mem as usize) as *const c_void, 0x00 )); println!("Test1"); assert!((self.shadow_check_func.unwrap())( - ((mem as usize) + 0) as *const c_void, + (mem as usize) as *const c_void, 0xac )); println!("Test2"); From cb3662da54e0e0da2987e492ed08191e5da0c582 Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Sat, 1 Jan 2022 19:51:27 +0100 Subject: [PATCH 12/25] Enable errors for missing docs, add documentation (#440) * documentation, warnings * fixed docs * docs * no_std * test * windows * nautilus docs * more fixes * more docs * nits * windows clippy * docs, windows * nits --- libafl/src/bolts/launcher.rs | 4 +- libafl/src/bolts/llmp.rs | 18 ++-- libafl/src/bolts/minibsod.rs | 11 +- libafl/src/bolts/mod.rs | 3 + libafl/src/bolts/os/mod.rs | 5 + libafl/src/bolts/os/pipes.rs | 8 ++ libafl/src/bolts/os/unix_shmem_server.rs | 16 ++- libafl/src/bolts/os/unix_signals.rs | 2 +- libafl/src/bolts/os/windows_exceptions.rs | 7 +- libafl/src/bolts/rands.rs | 7 +- libafl/src/bolts/serdeany.rs | 28 ++++- libafl/src/bolts/shmem.rs | 68 +++++++----- libafl/src/bolts/staterestore.rs | 8 +- libafl/src/corpus/minimizer.rs | 6 +- libafl/src/corpus/mod.rs | 1 + libafl/src/corpus/ondisk.rs | 2 +- libafl/src/corpus/powersched.rs | 3 + libafl/src/corpus/queue.rs | 1 + libafl/src/corpus/testcase.rs | 12 +++ libafl/src/events/llmp.rs | 101 ++++++++---------- libafl/src/events/mod.rs | 16 ++- libafl/src/events/simple.rs | 1 + libafl/src/executors/combined.rs | 1 + libafl/src/executors/command.rs | 6 ++ libafl/src/executors/forkserver.rs | 54 ++++++++-- libafl/src/executors/inprocess.rs | 53 ++++++--- libafl/src/executors/shadow.rs | 6 ++ libafl/src/executors/timeout.rs | 28 ++--- libafl/src/executors/with_observers.rs | 2 + libafl/src/feedbacks/concolic.rs | 7 ++ libafl/src/feedbacks/map.rs | 64 ++++------- libafl/src/feedbacks/mod.rs | 31 +++++- libafl/src/feedbacks/nautilus.rs | 24 +++++ libafl/src/fuzzer/mod.rs | 5 + libafl/src/generators/gramatron.rs | 9 ++ libafl/src/generators/nautilus.rs | 25 ++++- libafl/src/inputs/encoded.rs | 19 ++++ libafl/src/inputs/gramatron.rs | 11 ++ libafl/src/inputs/mod.rs | 2 +- libafl/src/inputs/nautilus.rs | 8 ++ libafl/src/lib.rs | 43 +++++++- libafl/src/monitors/mod.rs | 25 +++-- libafl/src/monitors/multi.rs | 6 +- libafl/src/mutators/encoded_mutations.rs | 20 ++-- libafl/src/mutators/gramatron.rs | 15 ++- libafl/src/mutators/mopt_mutator.rs | 22 +++- libafl/src/mutators/mutations.rs | 44 ++++---- libafl/src/mutators/nautilus.rs | 24 ++++- libafl/src/mutators/scheduled.rs | 2 +- libafl/src/mutators/token_mutations.rs | 8 +- libafl/src/observers/cmp.rs | 16 ++- libafl/src/observers/concolic/mod.rs | 1 + libafl/src/observers/concolic/observer.rs | 1 + .../concolic/serialization_format.rs | 15 +-- libafl/src/observers/map.rs | 58 +++++----- libafl/src/stages/calibrate.rs | 25 ++++- libafl/src/stages/mod.rs | 5 + libafl/src/stages/power.rs | 3 + libafl/src/stages/push/mutational.rs | 6 +- libafl/src/stages/sync.rs | 6 +- libafl/src/stages/tracing.rs | 1 + libafl/src/state/mod.rs | 3 + 62 files changed, 723 insertions(+), 309 deletions(-) diff --git a/libafl/src/bolts/launcher.rs b/libafl/src/bolts/launcher.rs index 8e37c40635..d1995357e6 100644 --- a/libafl/src/bolts/launcher.rs +++ b/libafl/src/bolts/launcher.rs @@ -44,7 +44,7 @@ const _AFL_LAUNCHER_CLIENT: &str = "AFL_LAUNCHER_CLIENT"; /// Provides a Launcher, which can be used to launch a fuzzing run on a specified list of cores #[cfg(feature = "std")] #[derive(TypedBuilder)] -#[allow(clippy::type_complexity)] +#[allow(clippy::type_complexity, missing_debug_implementations)] pub struct Launcher<'a, CF, I, MT, OT, S, SP> where CF: FnOnce(Option, LlmpRestartingEventManager, usize) -> Result<(), Error>, @@ -90,7 +90,7 @@ impl<'a, CF, I, MT, OT, S, SP> Launcher<'a, CF, I, MT, OT, S, SP> where CF: FnOnce(Option, LlmpRestartingEventManager, usize) -> Result<(), Error>, I: Input, - OT: ObserversTuple + serde::de::DeserializeOwned, + OT: ObserversTuple + DeserializeOwned, MT: Monitor + Clone, SP: ShMemProvider + 'static, S: DeserializeOwned, diff --git a/libafl/src/bolts/llmp.rs b/libafl/src/bolts/llmp.rs index 321a4af028..8be287c193 100644 --- a/libafl/src/bolts/llmp.rs +++ b/libafl/src/bolts/llmp.rs @@ -192,7 +192,7 @@ pub enum TcpRequest { } impl TryFrom<&Vec> for TcpRequest { - type Error = crate::Error; + type Error = Error; fn try_from(bytes: &Vec) -> Result { Ok(postcard::from_bytes(bytes)?) @@ -213,7 +213,7 @@ pub struct TcpRemoteNewMessage { } impl TryFrom<&Vec> for TcpRemoteNewMessage { - type Error = crate::Error; + type Error = Error; fn try_from(bytes: &Vec) -> Result { Ok(postcard::from_bytes(bytes)?) @@ -249,7 +249,7 @@ pub enum TcpResponse { } impl TryFrom<&Vec> for TcpResponse { - type Error = crate::Error; + type Error = Error; fn try_from(bytes: &Vec) -> Result { Ok(postcard::from_bytes(bytes)?) @@ -258,6 +258,7 @@ impl TryFrom<&Vec> for TcpResponse { /// Abstraction for listeners #[cfg(feature = "std")] +#[derive(Debug)] pub enum Listener { /// Listener listening on `tcp`. Tcp(TcpListener), @@ -265,6 +266,7 @@ pub enum Listener { /// A listener stream abstraction #[cfg(feature = "std")] +#[derive(Debug)] pub enum ListenerStream { /// Listener listening on `tcp`. Tcp(TcpStream, SocketAddr), @@ -389,11 +391,11 @@ fn recv_tcp_msg(stream: &mut TcpStream) -> Result, Error> { stream.read_timeout().unwrap_or(None) ); - let mut size_bytes = [0u8; 4]; + let mut size_bytes = [0_u8; 4]; stream.read_exact(&mut size_bytes)?; let size = u32::from_be_bytes(size_bytes); let mut bytes = vec![]; - bytes.resize(size as usize, 0u8); + bytes.resize(size as usize, 0_u8); #[cfg(feature = "llmp_debug")] println!("LLMP TCP: Receiving payload of size {}", size); @@ -556,8 +558,7 @@ impl LlmpMsg { let map_size = map.shmem.map().len(); let buf_ptr = self.buf.as_ptr(); if buf_ptr > (map.page_mut() as *const u8).add(size_of::()) - && buf_ptr - <= (map.page_mut() as *const u8).add(map_size - size_of::() as usize) + && buf_ptr <= (map.page_mut() as *const u8).add(map_size - size_of::()) { // The message header is in the page. Continue with checking the body. let len = self.buf_len_padded as usize + size_of::(); @@ -1185,7 +1186,7 @@ where // Doing this step by step will catch underflows in debug builds :) (*page).size_used -= old_len_padded as usize; - (*page).size_used += buf_len_padded as usize; + (*page).size_used += buf_len_padded; (*_llmp_next_msg_ptr(msg)).tag = LLMP_TAG_UNSET; @@ -1691,6 +1692,7 @@ where /// A signal handler for the [`LlmpBroker`]. #[cfg(unix)] +#[derive(Debug, Clone)] pub struct LlmpBrokerSignalHandler { shutting_down: bool, } diff --git a/libafl/src/bolts/minibsod.rs b/libafl/src/bolts/minibsod.rs index f59944fcf9..76abf9bef6 100644 --- a/libafl/src/bolts/minibsod.rs +++ b/libafl/src/bolts/minibsod.rs @@ -108,14 +108,14 @@ pub fn dump_registers( writer, "x{:02}: 0x{:016x} ", reg, mcontext.__ss.__x[reg as usize] - ); + )?; if reg % 4 == 3 { - writeln!(writer); + writeln!(writer)?; } } - write!(writer, "fp: 0x{:016x} ", mcontext.__ss.__fp); - write!(writer, "lr: 0x{:016x} ", mcontext.__ss.__lr); - write!(writer, "pc: 0x{:016x} ", mcontext.__ss.__pc); + write!(writer, "fp: 0x{:016x} ", mcontext.__ss.__fp)?; + write!(writer, "lr: 0x{:016x} ", mcontext.__ss.__lr)?; + write!(writer, "pc: 0x{:016x} ", mcontext.__ss.__pc)?; Ok(()) } @@ -269,6 +269,7 @@ fn write_crash( /// Generates a mini-BSOD given a signal and context. #[cfg(unix)] +#[allow(clippy::non_ascii_literal)] pub fn generate_minibsod( writer: &mut BufWriter, signal: Signal, diff --git a/libafl/src/bolts/mod.rs b/libafl/src/bolts/mod.rs index 286e6b1065..b896d2d6c7 100644 --- a/libafl/src/bolts/mod.rs +++ b/libafl/src/bolts/mod.rs @@ -41,8 +41,11 @@ pub trait HasLen { } } +/// Has a ref count pub trait HasRefCnt { + /// The ref count fn refcnt(&self) -> isize; + /// The ref count, mutable fn refcnt_mut(&mut self) -> &mut isize; } diff --git a/libafl/src/bolts/os/mod.rs b/libafl/src/bolts/os/mod.rs index 9d40c72255..71394ed988 100644 --- a/libafl/src/bolts/os/mod.rs +++ b/libafl/src/bolts/os/mod.rs @@ -25,6 +25,7 @@ pub mod pipes; use std::ffi::CString; #[cfg(all(windows, feature = "std"))] +#[allow(missing_docs)] pub mod windows_exceptions; #[cfg(unix)] @@ -32,7 +33,9 @@ use libc::pid_t; /// Child Process Handle #[cfg(unix)] +#[derive(Debug)] pub struct ChildHandle { + /// The process id pub pid: pid_t, } @@ -51,6 +54,7 @@ impl ChildHandle { /// The `ForkResult` (result of a fork) #[cfg(unix)] +#[derive(Debug)] pub enum ForkResult { /// The fork finished, we are the parent process. /// The child has the handle `ChildHandle`. @@ -103,6 +107,7 @@ pub fn dup2(fd: i32, device: i32) -> Result<(), Error> { /// Core ID #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct CoreId { + /// The id of this core pub id: usize, } diff --git a/libafl/src/bolts/os/pipes.rs b/libafl/src/bolts/os/pipes.rs index 528823a369..68dd69e73f 100644 --- a/libafl/src/bolts/os/pipes.rs +++ b/libafl/src/bolts/os/pipes.rs @@ -11,15 +11,19 @@ use std::{ #[cfg(not(feature = "std"))] type RawFd = i32; +/// A unix pipe wrapper for `LibAFL` #[cfg(feature = "std")] #[derive(Debug, Clone)] pub struct Pipe { + /// The read end of the pipe read_end: Option, + /// The write end of the pipe write_end: Option, } #[cfg(feature = "std")] impl Pipe { + /// Create a new `Unix` pipe pub fn new() -> Result { let (read_end, write_end) = pipe()?; Ok(Self { @@ -28,6 +32,7 @@ impl Pipe { }) } + /// Close the read end of a pipe pub fn close_read_end(&mut self) { if let Some(read_end) = self.read_end { let _ = close(read_end); @@ -35,6 +40,7 @@ impl Pipe { } } + /// Close the write end of a pipe pub fn close_write_end(&mut self) { if let Some(write_end) = self.write_end { let _ = close(write_end); @@ -42,11 +48,13 @@ impl Pipe { } } + /// The read end #[must_use] pub fn read_end(&self) -> Option { self.read_end } + /// The write end #[must_use] pub fn write_end(&self) -> Option { self.write_end diff --git a/libafl/src/bolts/os/unix_shmem_server.rs b/libafl/src/bolts/os/unix_shmem_server.rs index 8301809a5d..8096559aa5 100644 --- a/libafl/src/bolts/os/unix_shmem_server.rs +++ b/libafl/src/bolts/os/unix_shmem_server.rs @@ -118,7 +118,7 @@ where .write_all(&message) .expect("Failed to send message"); - let mut shm_slice = [0u8; 20]; + let mut shm_slice = [0_u8; 20]; let mut fd_buf = [-1; 1]; self.stream .recv_fds(&mut shm_slice, &mut fd_buf) @@ -172,7 +172,7 @@ where res.id = id; Ok(res) } - fn new_map(&mut self, map_size: usize) -> Result { + fn new_map(&mut self, map_size: usize) -> Result { let (server_fd, client_fd) = self.send_receive(ServedShMemRequest::NewMap(map_size))?; Ok(ServedShMem { @@ -302,12 +302,18 @@ pub enum ShMemService where SP: ShMemProvider, { + /// A started service Started { + /// The background thread bg_thread: Arc>, + /// The pantom data phantom: PhantomData, }, + /// A failed service Failed { + /// The error message err_msg: String, + /// The pantom data phantom: PhantomData, }, } @@ -541,7 +547,7 @@ where let client = self.clients.get_mut(&client_id).unwrap(); let maps = client.maps.entry(map_id).or_default(); if maps.is_empty() { - Ok(ServedShMemResponse::RefCount(0u32)) + Ok(ServedShMemResponse::RefCount(0_u32)) } else { Ok(ServedShMemResponse::RefCount( Rc::strong_count(&maps.pop().unwrap()) as u32, @@ -563,11 +569,11 @@ where let client = self.clients.get_mut(&client_id).unwrap(); // Always receive one be u32 of size, then the command. - let mut size_bytes = [0u8; 4]; + let mut size_bytes = [0_u8; 4]; client.stream.read_exact(&mut size_bytes)?; let size = u32::from_be_bytes(size_bytes); let mut bytes = vec![]; - bytes.resize(size as usize, 0u8); + bytes.resize(size as usize, 0_u8); client .stream .read_exact(&mut bytes) diff --git a/libafl/src/bolts/os/unix_signals.rs b/libafl/src/bolts/os/unix_signals.rs index b487f571f2..e8f9d56957 100644 --- a/libafl/src/bolts/os/unix_signals.rs +++ b/libafl/src/bolts/os/unix_signals.rs @@ -74,7 +74,7 @@ extern "C" { } /// All signals on this system, as `enum`. -#[derive(IntoPrimitive, TryFromPrimitive, Clone, Copy)] +#[derive(Debug, IntoPrimitive, TryFromPrimitive, Clone, Copy)] #[repr(i32)] pub enum Signal { /// `SIGABRT` signal id diff --git a/libafl/src/bolts/os/windows_exceptions.rs b/libafl/src/bolts/os/windows_exceptions.rs index 2d2ac7abb5..a970e0153a 100644 --- a/libafl/src/bolts/os/windows_exceptions.rs +++ b/libafl/src/bolts/os/windows_exceptions.rs @@ -83,7 +83,7 @@ pub const STATUS_ASSERTION_FAILURE: u32 = 0xC0000420; pub const STATUS_SXS_EARLY_DEACTIVATION: u32 = 0xC015000F; pub const STATUS_SXS_INVALID_DEACTIVATION: u32 = 0xC0150010; -#[derive(TryFromPrimitive, Clone, Copy)] +#[derive(Debug, TryFromPrimitive, Clone, Copy)] #[repr(u32)] pub enum ExceptionCode { // From https://docs.microsoft.com/en-us/windows/win32/debug/getexceptioncode @@ -210,7 +210,7 @@ impl Display for ExceptionCode { ExceptionCode::HeapCorruption => write!(f, "STATUS_HEAP_CORRUPTION")?, ExceptionCode::StackBufferOverrun => write!(f, "STATUS_STACK_BUFFER_OVERRUN")?, ExceptionCode::InvalidCRuntimeParameter => { - write!(f, "STATUS_INVALID_CRUNTIME_PARAMETER")? + write!(f, "STATUS_INVALID_CRUNTIME_PARAMETER")?; } ExceptionCode::AssertionFailure => write!(f, "STATUS_ASSERTION_FAILURE")?, ExceptionCode::SXSEarlyDeactivation => write!(f, "STATUS_SXS_EARLY_DEACTIVATION")?, @@ -325,8 +325,7 @@ unsafe extern "system" fn handle_exception(exception_pointers: *mut EXCEPTION_PO .ExceptionCode; let exception_code = ExceptionCode::try_from(code.0).unwrap(); // println!("Received {}", exception_code); - let ret = internal_handle_exception(exception_code, exception_pointers); - ret + internal_handle_exception(exception_code, exception_pointers) } type NativeSignalHandlerType = unsafe extern "C" fn(i32); diff --git a/libafl/src/bolts/rands.rs b/libafl/src/bolts/rands.rs index 9ecbe9917b..5e45a3b48d 100644 --- a/libafl/src/bolts/rands.rs +++ b/libafl/src/bolts/rands.rs @@ -1,3 +1,4 @@ +//! The random number generators of `LibAFL` use core::{debug_assert, fmt::Debug}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use xxhash_rust::xxh3::xxh3_64_with_seed; @@ -83,7 +84,7 @@ macro_rules! default_rand { /// A default RNG will usually produce a nondeterministic stream of random numbers. /// As we do not have any way to get random seeds for `no_std`, they have to be reproducible there. /// Use [`$rand::with_seed`] to generate a reproducible RNG. - impl core::default::Default for $rand { + impl Default for $rand { #[cfg(feature = "std")] fn default() -> Self { Self::new() @@ -295,7 +296,7 @@ impl Rand for RomuTrioRand { let xp = self.x_state; let yp = self.y_state; let zp = self.z_state; - self.x_state = 15241094284759029579u64.wrapping_mul(zp); + self.x_state = 15241094284759029579_u64.wrapping_mul(zp); self.y_state = yp.wrapping_sub(xp).rotate_left(12); self.z_state = zp.wrapping_sub(yp).rotate_left(44); xp @@ -332,7 +333,7 @@ impl Rand for RomuDuoJrRand { #[allow(clippy::unreadable_literal)] fn next(&mut self) -> u64 { let xp = self.x_state; - self.x_state = 15241094284759029579u64.wrapping_mul(self.y_state); + self.x_state = 15241094284759029579_u64.wrapping_mul(self.y_state); self.y_state = self.y_state.wrapping_sub(xp).rotate_left(27); xp } diff --git a/libafl/src/bolts/serdeany.rs b/libafl/src/bolts/serdeany.rs index 674f8682f0..3a43bd2b49 100644 --- a/libafl/src/bolts/serdeany.rs +++ b/libafl/src/bolts/serdeany.rs @@ -1,6 +1,6 @@ //! Poor-rust-man's downcasts for stuff we send over the wire (or shared maps) -use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use serde::{de::DeserializeSeed, Deserialize, Deserializer, Serialize, Serializer}; use alloc::boxed::Box; use core::any::{Any, TypeId}; @@ -40,6 +40,7 @@ pub trait SerdeAny: Any + erased_serde::Serialize { } /// Wrap a type for serialization +#[allow(missing_debug_implementations)] pub struct Wrap<'a, T: ?Sized>(pub &'a T); impl<'a, T> Serialize for Wrap<'a, T> where @@ -59,6 +60,7 @@ pub type DeserializeCallback = fn(&mut dyn erased_serde::Deserializer) -> Result, erased_serde::Error>; /// Callback struct for deserialization of a [`SerdeAny`] type. +#[allow(missing_debug_implementations)] pub struct DeserializeCallbackSeed where B: ?Sized, @@ -67,7 +69,7 @@ where pub cb: DeserializeCallback, } -impl<'de, B> serde::de::DeserializeSeed<'de> for DeserializeCallbackSeed +impl<'de, B> DeserializeSeed<'de> for DeserializeCallbackSeed where B: ?Sized, { @@ -75,7 +77,7 @@ where fn deserialize(self, deserializer: D) -> Result where - D: serde::de::Deserializer<'de>, + D: Deserializer<'de>, { let mut erased = ::erase(deserializer); (self.cb)(&mut erased).map_err(serde::de::Error::custom) @@ -105,7 +107,9 @@ macro_rules! create_serde_registry_for_trait { use $crate::Error; /// Visitor object used internally for the [`SerdeAny`] registry. + #[derive(Debug)] pub struct BoxDynVisitor {} + #[allow(unused_qualifications)] impl<'de> serde::de::Visitor<'de> for BoxDynVisitor { type Value = Box; @@ -132,11 +136,13 @@ macro_rules! create_serde_registry_for_trait { } } + #[allow(unused_qualifications)] struct Registry { deserializers: Option>>, finalized: bool, } + #[allow(unused_qualifications)] impl Registry { pub fn register(&mut self) where @@ -162,8 +168,10 @@ macro_rules! create_serde_registry_for_trait { /// This shugar must be used to register all the structs which /// have trait objects that can be serialized and deserialized in the program + #[derive(Debug)] pub struct RegistryBuilder {} + #[allow(unused_qualifications)] impl RegistryBuilder { /// Register a given struct type for trait object (de)serialization pub fn register() @@ -214,6 +222,7 @@ macro_rules! create_serde_registry_for_trait { } } + #[allow(unused_qualifications)] impl SerdeAnyMap { /// Get an element from the map. #[must_use] @@ -309,11 +318,13 @@ macro_rules! create_serde_registry_for_trait { } /// A serializable [`HashMap`] wrapper for [`SerdeAny`] types, addressable by name. + #[allow(unused_qualifications, missing_debug_implementations)] #[derive(Serialize, Deserialize)] pub struct NamedSerdeAnyMap { map: HashMap>>, } + #[allow(unused_qualifications)] impl NamedSerdeAnyMap { /// Get an element by name #[must_use] @@ -332,6 +343,7 @@ macro_rules! create_serde_registry_for_trait { /// Get an element of a given type contained in this map by [`TypeId`]. #[must_use] + #[allow(unused_qualifications)] #[inline] pub fn by_typeid(&self, name: &str, typeid: &TypeId) -> Option<&dyn $trait_name> { match self.map.get(&unpack_type_id(*typeid)) { @@ -375,6 +387,7 @@ macro_rules! create_serde_registry_for_trait { /// Get all elements of a type contained in this map. #[must_use] + #[allow(unused_qualifications)] #[inline] pub fn get_all( &self, @@ -398,6 +411,7 @@ macro_rules! create_serde_registry_for_trait { /// Get all elements of a given type contained in this map by [`TypeId`]. #[must_use] + #[allow(unused_qualifications)] #[inline] pub fn all_by_typeid( &self, @@ -417,6 +431,7 @@ macro_rules! create_serde_registry_for_trait { /// Get all elements contained in this map, as mut. #[inline] + #[allow(unused_qualifications)] pub fn get_all_mut( &mut self, ) -> Option< @@ -440,6 +455,7 @@ macro_rules! create_serde_registry_for_trait { /// Get all [`TypeId`]`s` contained in this map, as mut. #[inline] + #[allow(unused_qualifications)] pub fn all_by_typeid_mut( &mut self, typeid: &TypeId, @@ -458,6 +474,7 @@ macro_rules! create_serde_registry_for_trait { /// Get all [`TypeId`]`s` contained in this map. #[inline] + #[allow(unused_qualifications)] pub fn all_typeids( &self, ) -> core::iter::Map< @@ -469,6 +486,7 @@ macro_rules! create_serde_registry_for_trait { /// Run `func` for each element in this map. #[inline] + #[allow(unused_qualifications)] pub fn for_each( &self, func: fn(&TypeId, &Box) -> Result<(), Error>, @@ -497,6 +515,7 @@ macro_rules! create_serde_registry_for_trait { /// Insert an element into this map. #[inline] + #[allow(unused_qualifications)] pub fn insert(&mut self, val: Box, name: &str) { let id = unpack_type_id((*val).type_id()); if !self.map.contains_key(&id) { @@ -560,6 +579,7 @@ macro_rules! create_serde_registry_for_trait { } } + #[allow(unused_qualifications)] impl<'a> Serialize for dyn $trait_name { fn serialize(&self, se: S) -> Result where @@ -575,6 +595,7 @@ macro_rules! create_serde_registry_for_trait { } } + #[allow(unused_qualifications)] impl<'de> Deserialize<'de> for Box { fn deserialize(deserializer: D) -> Result, D::Error> where @@ -618,6 +639,7 @@ macro_rules! impl_serdeany { }; } +/// Implement [`SerdeAny`] for a type #[cfg(not(feature = "std"))] #[macro_export] macro_rules! impl_serdeany { diff --git a/libafl/src/bolts/shmem.rs b/libafl/src/bolts/shmem.rs index f74acb3258..51f9d3befc 100644 --- a/libafl/src/bolts/shmem.rs +++ b/libafl/src/bolts/shmem.rs @@ -1,43 +1,60 @@ //! A generic sharememory region to be used by any functions (queues or feedbacks // too.) +#[cfg(all(unix, feature = "std"))] +use crate::bolts::os::pipes::Pipe; +use crate::Error; use alloc::{rc::Rc, string::ToString}; use core::{ cell::RefCell, fmt::{self, Debug, Display}, mem::ManuallyDrop, }; +use serde::{Deserialize, Serialize}; +#[cfg(feature = "std")] +use std::env; +#[cfg(all(unix, feature = "std"))] +use std::io::Read; +#[cfg(feature = "std")] +use std::io::Write; + #[cfg(all(feature = "std", unix, not(target_os = "android")))] pub use unix_shmem::{MmapShMem, MmapShMemProvider}; #[cfg(all(feature = "std", unix))] pub use unix_shmem::{UnixShMem, UnixShMemProvider}; -use crate::Error; - #[cfg(all(feature = "std", unix))] pub use crate::bolts::os::unix_shmem_server::{ServedShMemProvider, ShMemService}; #[cfg(all(windows, feature = "std"))] pub use win32_shmem::{Win32ShMem, Win32ShMemProvider}; +/// The standard sharedmem provider #[cfg(all(windows, feature = "std"))] pub type StdShMemProvider = Win32ShMemProvider; +/// The standard sharedmem type #[cfg(all(windows, feature = "std"))] pub type StdShMem = Win32ShMem; +/// The standard sharedmem provider #[cfg(all(target_os = "android", feature = "std"))] pub type StdShMemProvider = RcShMemProvider>; +/// The standard sharedmem type #[cfg(all(target_os = "android", feature = "std"))] pub type StdShMem = RcShMem>; +/// The standard sharedmem service #[cfg(all(target_os = "android", feature = "std"))] pub type StdShMemService = ShMemService; +/// The standard sharedmem provider #[cfg(all(feature = "std", target_vendor = "apple"))] pub type StdShMemProvider = RcShMemProvider>; +/// The standard sharedmem type #[cfg(all(feature = "std", target_vendor = "apple"))] pub type StdShMem = RcShMem>; #[cfg(all(feature = "std", target_vendor = "apple"))] +/// The standard sharedmem service pub type StdShMemService = ShMemService; /// The default [`ShMemProvider`] for this os. @@ -55,21 +72,13 @@ pub type StdShMemProvider = UnixShMemProvider; ))] pub type StdShMem = UnixShMem; +/// The standard sharedmem service #[cfg(any( not(any(target_os = "android", target_vendor = "apple")), not(feature = "std") ))] pub type StdShMemService = DummyShMemService; -use serde::{Deserialize, Serialize}; -#[cfg(feature = "std")] -use std::env; - -#[cfg(all(unix, feature = "std"))] -use crate::bolts::os::pipes::Pipe; -#[cfg(all(unix, feature = "std"))] -use std::io::{Read, Write}; - /// Description of a shared map. /// May be used to restore the map by id. #[derive(Copy, Clone, Debug, Serialize, Deserialize)] @@ -262,7 +271,7 @@ pub struct RcShMem { impl ShMem for RcShMem where - T: ShMemProvider + alloc::fmt::Debug, + T: ShMemProvider + Debug, { fn id(&self) -> ShMemId { self.internal.id() @@ -314,7 +323,7 @@ where #[cfg(all(unix, feature = "std"))] impl ShMemProvider for RcShMemProvider where - SP: ShMemProvider + alloc::fmt::Debug, + SP: ShMemProvider + Debug, { type Mem = RcShMem; @@ -391,7 +400,7 @@ where fn pipe_set(pipe: &mut Option) -> Result<(), Error> { match pipe { Some(pipe) => { - let ok = [0u8; 4]; + let ok = [0_u8; 4]; pipe.write_all(&ok)?; Ok(()) } @@ -405,7 +414,7 @@ where fn pipe_await(pipe: &mut Option) -> Result<(), Error> { match pipe { Some(pipe) => { - let ok = [0u8; 4]; + let ok = [0_u8; 4]; let mut ret = ok; pipe.read_exact(&mut ret)?; if ret == ok { @@ -447,7 +456,7 @@ where #[cfg(all(unix, feature = "std"))] impl Default for RcShMemProvider where - SP: ShMemProvider + alloc::fmt::Debug, + SP: ShMemProvider + Debug, { fn default() -> Self { Self::new().unwrap() @@ -489,7 +498,7 @@ pub mod unix_shmem { c_int, c_long, c_uchar, c_uint, c_ulong, c_ushort, close, ftruncate, mmap, munmap, perror, shm_open, shm_unlink, shmat, shmctl, shmget, }; - use std::{io::Write, process, ptr::null_mut}; + use std::{io::Write, process}; use crate::{ bolts::shmem::{ShMem, ShMemId, ShMemProvider}, @@ -549,6 +558,7 @@ pub mod unix_shmem { } impl MmapShMem { + /// Create a new [`MmapShMem`] pub fn new(map_size: usize, shmem_ctr: usize) -> Result { unsafe { let mut filename_path = [0_u8; MAX_MMAP_FILENAME_LEN]; @@ -585,7 +595,7 @@ pub mod unix_shmem { /* map the shared memory segment to the address space of the process */ let map = mmap( - null_mut(), + ptr::null_mut(), map_size, libc::PROT_READ | libc::PROT_WRITE, libc::MAP_SHARED, @@ -618,7 +628,7 @@ pub mod unix_shmem { /* map the shared memory segment to the address space of the process */ let map = mmap( - null_mut(), + ptr::null_mut(), map_size, libc::PROT_READ | libc::PROT_WRITE, libc::MAP_SHARED, @@ -766,7 +776,7 @@ pub mod unix_shmem { let id_int: i32 = id.into(); let map = shmat(id_int, ptr::null(), 0) as *mut c_uchar; - if map.is_null() || map == null_mut::().wrapping_sub(1) { + if map.is_null() || map == ptr::null_mut::().wrapping_sub(1) { return Err(Error::Unknown( "Failed to map the shared mapping".to_string(), )); @@ -842,7 +852,7 @@ pub mod unix_shmem { /// Module containing `ashmem` shared memory support, commonly used on Android. #[cfg(all(unix, feature = "std"))] pub mod ashmem { - use core::slice; + use core::{ptr, slice}; use libc::{ c_uint, c_ulong, c_void, close, ioctl, mmap, open, MAP_SHARED, O_RDWR, PROT_READ, PROT_WRITE, @@ -909,6 +919,7 @@ pub mod unix_shmem { //return Err(Error::Unknown("Failed to set the ashmem mapping's name".to_string())); //}; + #[allow(trivial_numeric_casts)] if ioctl(fd, ASHMEM_SET_SIZE as _, map_size) != 0 { close(fd); return Err(Error::Unknown( @@ -917,7 +928,7 @@ pub mod unix_shmem { }; let map = mmap( - std::ptr::null_mut(), + ptr::null_mut(), map_size, PROT_READ | PROT_WRITE, MAP_SHARED, @@ -943,7 +954,7 @@ pub mod unix_shmem { pub fn from_id_and_size(id: ShMemId, map_size: usize) -> Result { unsafe { let fd: i32 = id.to_string().parse().unwrap(); - #[allow(clippy::cast_sign_loss)] + #[allow(trivial_numeric_casts, clippy::cast_sign_loss)] if ioctl(fd, ASHMEM_GET_SIZE as _) as u32 as usize != map_size { return Err(Error::Unknown( "The mapping's size differs from the requested size".to_string(), @@ -951,7 +962,7 @@ pub mod unix_shmem { }; let map = mmap( - std::ptr::null_mut(), + ptr::null_mut(), map_size, PROT_READ | PROT_WRITE, MAP_SHARED, @@ -996,10 +1007,12 @@ pub mod unix_shmem { /// [`Drop`] implementation for [`AshmemShMem`], which cleans up the mapping. #[cfg(unix)] impl Drop for AshmemShMem { + #[allow(trivial_numeric_casts)] fn drop(&mut self) { unsafe { let fd: i32 = self.id.to_string().parse().unwrap(); + #[allow(trivial_numeric_casts)] #[allow(clippy::cast_sign_loss)] let length = ioctl(fd, ASHMEM_GET_SIZE as _) as u32; @@ -1049,6 +1062,7 @@ pub mod unix_shmem { } } +/// Then `win32` implementation for shared memory. #[cfg(all(feature = "std", windows))] pub mod win32_shmem { @@ -1219,8 +1233,9 @@ impl DummyShMemService { } } -#[cfg(feature = "std")] /// A cursor around [`ShMem`] that immitates [`std::io::Cursor`]. Notably, this implements [`Write`] for [`ShMem`] in std environments. +#[cfg(feature = "std")] +#[derive(Debug)] pub struct ShMemCursor { inner: T, pos: usize, @@ -1228,6 +1243,7 @@ pub struct ShMemCursor { #[cfg(feature = "std")] impl ShMemCursor { + /// Create a new [`ShMemCursor`] around [`ShMem`] pub fn new(shmem: T) -> Self { Self { inner: shmem, @@ -1242,7 +1258,7 @@ impl ShMemCursor { } #[cfg(feature = "std")] -impl std::io::Write for ShMemCursor { +impl Write for ShMemCursor { fn write(&mut self, buf: &[u8]) -> std::io::Result { match self.empty_slice_mut().write(buf) { Ok(w) => { diff --git a/libafl/src/bolts/staterestore.rs b/libafl/src/bolts/staterestore.rs index b006603447..f6b12ad0ba 100644 --- a/libafl/src/bolts/staterestore.rs +++ b/libafl/src/bolts/staterestore.rs @@ -1,5 +1,5 @@ -/// Stores and restores state when a client needs to relaunch. -/// Uses a [`ShMem`] up to a threshold, then write to disk. +//! Stores and restores state when a client needs to relaunch. +//! Uses a [`ShMem`] up to a threshold, then write to disk. use ahash::AHasher; use core::{hash::Hasher, marker::PhantomData, mem::size_of, ptr, slice}; use serde::{de::DeserializeOwned, Serialize}; @@ -204,7 +204,7 @@ where S: DeserializeOwned, { if !self.has_content() { - return Ok(Option::None); + return Ok(None); } let state_shmem_content = self.content(); let bytes = unsafe { @@ -216,7 +216,7 @@ where let mut state = bytes; let mut file_content; if state_shmem_content.buf_len == 0 { - return Ok(Option::None); + return Ok(None); } else if state_shmem_content.is_disk { let filename: String = postcard::from_bytes(bytes)?; let tmpfile = temp_dir().join(&filename); diff --git a/libafl/src/corpus/minimizer.rs b/libafl/src/corpus/minimizer.rs index e12580de38..e6b661fad2 100644 --- a/libafl/src/corpus/minimizer.rs +++ b/libafl/src/corpus/minimizer.rs @@ -18,13 +18,13 @@ use serde::{Deserialize, Serialize}; pub const DEFAULT_SKIP_NON_FAVORED_PROB: u64 = 95; /// A testcase metadata saying if a testcase is favored -#[derive(Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize)] pub struct IsFavoredMetadata {} crate::impl_serdeany!(IsFavoredMetadata); /// A state metadata holding a map of favoreds testcases for each map entry -#[derive(Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize)] pub struct TopRatedsMetadata { /// map index -> corpus index pub map: HashMap, @@ -59,6 +59,7 @@ where /// Multiply the testcase size with the execution time. /// This favors small and quick testcases. +#[derive(Debug, Clone)] pub struct LenTimeMulFavFactor where I: Input + HasLen, @@ -79,6 +80,7 @@ where /// The [`MinimizerCorpusScheduler`] employs a genetic algorithm to compute a subset of the /// corpus that exercise all the requested features (e.g. all the coverage seen so far) /// prioritizing [`Testcase`]`s` using [`FavFactor`] +#[derive(Debug, Clone)] pub struct MinimizerCorpusScheduler where CS: CorpusScheduler, diff --git a/libafl/src/corpus/mod.rs b/libafl/src/corpus/mod.rs index 28f4c7b0a2..77a0b30d35 100644 --- a/libafl/src/corpus/mod.rs +++ b/libafl/src/corpus/mod.rs @@ -107,6 +107,7 @@ where } /// Feed the fuzzer simpply with a random testcase on request +#[derive(Debug, Clone)] pub struct RandCorpusScheduler where S: HasCorpus + HasRand, diff --git a/libafl/src/corpus/ondisk.rs b/libafl/src/corpus/ondisk.rs index f4b373e298..c6a155c054 100644 --- a/libafl/src/corpus/ondisk.rs +++ b/libafl/src/corpus/ondisk.rs @@ -30,7 +30,7 @@ pub enum OnDiskMetadataFormat { /// A corpus able to store testcases to disk, and load them from disk, when they are being used. #[cfg(feature = "std")] -#[derive(Serialize)] +#[derive(Debug, Serialize)] pub struct OnDiskMetadata<'a> { metadata: &'a SerdeAnyMap, exec_time: &'a Option, diff --git a/libafl/src/corpus/powersched.rs b/libafl/src/corpus/powersched.rs index 4d8044d606..987ff55460 100644 --- a/libafl/src/corpus/powersched.rs +++ b/libafl/src/corpus/powersched.rs @@ -11,6 +11,8 @@ use crate::{ Error, }; +/// A corpus scheduler using power schedules +#[derive(Clone, Debug)] pub struct PowerQueueCorpusScheduler where S: HasCorpus + HasMetadata, @@ -96,6 +98,7 @@ where C: Corpus, I: Input, { + /// Create a new [`PowerQueueCorpusScheduler`] #[must_use] pub fn new() -> Self { Self { diff --git a/libafl/src/corpus/queue.rs b/libafl/src/corpus/queue.rs index 7c8b5114dc..d434d1ab93 100644 --- a/libafl/src/corpus/queue.rs +++ b/libafl/src/corpus/queue.rs @@ -11,6 +11,7 @@ use crate::{ }; /// Walk the corpus in a queue-like fashion +#[derive(Debug, Clone)] pub struct QueueCorpusScheduler where S: HasCorpus, diff --git a/libafl/src/corpus/testcase.rs b/libafl/src/corpus/testcase.rs index c71504d1f7..2a2594c744 100644 --- a/libafl/src/corpus/testcase.rs +++ b/libafl/src/corpus/testcase.rs @@ -133,6 +133,7 @@ where &mut self.exec_time } + /// Sets the execution time of the current testcase #[inline] pub fn set_exec_time(&mut self, time: Duration) { self.exec_time = Some(time); @@ -260,6 +261,7 @@ pub struct PowerScheduleTestcaseMetaData { } impl PowerScheduleTestcaseMetaData { + /// Create new [`struct@PowerScheduleTestcaseMetaData`] #[must_use] pub fn new(depth: u64) -> Self { Self { @@ -271,47 +273,57 @@ impl PowerScheduleTestcaseMetaData { } } + /// Get the bitmap size #[must_use] pub fn bitmap_size(&self) -> u64 { self.bitmap_size } + /// Set the bitmap size pub fn set_bitmap_size(&mut self, val: u64) { self.bitmap_size = val; } + /// Get the fuzz level #[must_use] pub fn fuzz_level(&self) -> u64 { self.fuzz_level } + /// Set the fuzz level pub fn set_fuzz_level(&mut self, val: u64) { self.fuzz_level = val; } + /// Get the handicap #[must_use] pub fn handicap(&self) -> u64 { self.handicap } + /// Set the handicap pub fn set_handicap(&mut self, val: u64) { self.handicap = val; } + /// Get the depth #[must_use] pub fn depth(&self) -> u64 { self.depth } + /// Set the depth pub fn set_depth(&mut self, val: u64) { self.depth = val; } + /// Get the `n_fuzz_entry` #[must_use] pub fn n_fuzz_entry(&self) -> usize { self.n_fuzz_entry } + /// Set the `n_fuzz_entry` pub fn set_n_fuzz_entry(&mut self, val: usize) { self.n_fuzz_entry = val; } diff --git a/libafl/src/events/llmp.rs b/libafl/src/events/llmp.rs index f988d7de92..5485a38c57 100644 --- a/libafl/src/events/llmp.rs +++ b/libafl/src/events/llmp.rs @@ -1,32 +1,24 @@ //! LLMP-backed event manager for scalable multi-processed fuzzing -use alloc::string::ToString; -use core::{marker::PhantomData, time::Duration}; - -#[cfg(feature = "std")] -use core::sync::atomic::{compiler_fence, Ordering}; -#[cfg(feature = "std")] -use core_affinity::CoreId; -#[cfg(feature = "std")] -use serde::{de::DeserializeOwned, Serialize}; -#[cfg(feature = "std")] -use std::net::{SocketAddr, ToSocketAddrs}; - -#[cfg(feature = "std")] +#[cfg(all(feature = "std", any(windows, not(feature = "fork"))))] +use crate::bolts::os::startable_self; +#[cfg(all(feature = "std", feature = "fork", unix))] +use crate::bolts::os::{fork, ForkResult}; +#[cfg(feature = "llmp_compression")] use crate::bolts::{ - llmp::{LlmpClient, LlmpConnection}, - shmem::StdShMemProvider, - staterestore::StateRestorer, + compress::GzipCompressor, + llmp::{LLMP_FLAG_COMPRESSED, LLMP_FLAG_INITIALIZED}, }; - +#[cfg(feature = "std")] +use crate::bolts::{llmp::LlmpConnection, shmem::StdShMemProvider, staterestore::StateRestorer}; use crate::{ bolts::{ - llmp::{self, Flags, LlmpClientDescription, Tag}, + llmp::{self, Flags, LlmpClient, LlmpClientDescription, Tag}, shmem::ShMemProvider, }, events::{ BrokerEventResult, Event, EventConfig, EventFirer, EventManager, EventManagerId, - EventProcessor, EventRestarter, HasEventManagerId, + EventProcessor, EventRestarter, HasEventManagerId, ProgressReporter, }, executors::{Executor, HasObservers}, fuzzer::{EvaluatorObservers, ExecutionProcessor}, @@ -35,38 +27,35 @@ use crate::{ observers::ObserversTuple, Error, }; - -#[cfg(feature = "llmp_compression")] -use crate::bolts::{ - compress::GzipCompressor, - llmp::{LLMP_FLAG_COMPRESSED, LLMP_FLAG_INITIALIZED}, -}; - -#[cfg(all(feature = "std", any(windows, not(feature = "fork"))))] -use crate::bolts::os::startable_self; - -#[cfg(all(feature = "std", feature = "fork", unix))] -use crate::bolts::os::{fork, ForkResult}; - +use alloc::string::ToString; +#[cfg(feature = "std")] +use core::sync::atomic::{compiler_fence, Ordering}; +use core::{marker::PhantomData, time::Duration}; +#[cfg(feature = "std")] +use core_affinity::CoreId; +use serde::de::DeserializeOwned; +#[cfg(feature = "std")] +use serde::Serialize; +#[cfg(feature = "std")] +use std::net::{SocketAddr, ToSocketAddrs}; #[cfg(feature = "std")] use typed_builder::TypedBuilder; -use super::ProgressReporter; - /// Forward this to the client -const _LLMP_TAG_EVENT_TO_CLIENT: llmp::Tag = 0x2C11E471; +const _LLMP_TAG_EVENT_TO_CLIENT: Tag = 0x2C11E471; /// Only handle this in the broker -const _LLMP_TAG_EVENT_TO_BROKER: llmp::Tag = 0x2B80438; +const _LLMP_TAG_EVENT_TO_BROKER: Tag = 0x2B80438; /// Handle in both /// -const LLMP_TAG_EVENT_TO_BOTH: llmp::Tag = 0x2B0741; -const _LLMP_TAG_RESTART: llmp::Tag = 0x8357A87; -const _LLMP_TAG_NO_RESTART: llmp::Tag = 0x57A7EE71; +const LLMP_TAG_EVENT_TO_BOTH: Tag = 0x2B0741; +const _LLMP_TAG_RESTART: Tag = 0x8357A87; +const _LLMP_TAG_NO_RESTART: Tag = 0x57A7EE71; /// The minimum buffer size at which to compress LLMP IPC messages. #[cfg(feature = "llmp_compression")] const COMPRESS_THRESHOLD: usize = 1024; +/// An LLMP-backed event manager for scalable multi-processed fuzzing #[derive(Debug)] pub struct LlmpEventBroker where @@ -112,6 +101,7 @@ where }) } + /// Connect to an llmp broker on the givien address #[cfg(feature = "std")] pub fn connect_b2b(&mut self, addr: A) -> Result<(), Error> where @@ -262,7 +252,7 @@ where SP: ShMemProvider + 'static, //CE: CustomEvent, { - llmp: llmp::LlmpClient, + llmp: LlmpClient, #[cfg(feature = "llmp_compression")] compressor: GzipCompressor, configuration: EventConfig, @@ -288,7 +278,7 @@ where SP: ShMemProvider + 'static, { /// Create a manager from a raw llmp client - pub fn new(llmp: llmp::LlmpClient, configuration: EventConfig) -> Result { + pub fn new(llmp: LlmpClient, configuration: EventConfig) -> Result { Ok(Self { llmp, #[cfg(feature = "llmp_compression")] @@ -369,7 +359,7 @@ where event: Event, ) -> Result<(), Error> where - OT: ObserversTuple + serde::de::DeserializeOwned, + OT: ObserversTuple + DeserializeOwned, E: Executor + HasObservers, Z: ExecutionProcessor + EvaluatorObservers, { @@ -470,7 +460,7 @@ where SP: ShMemProvider, E: Executor + HasObservers, I: Input, - OT: ObserversTuple + serde::de::DeserializeOwned, + OT: ObserversTuple + DeserializeOwned, Z: ExecutionProcessor + EvaluatorObservers, //CE: CustomEvent, { fn process(&mut self, fuzzer: &mut Z, state: &mut S, executor: &mut E) -> Result { @@ -512,7 +502,7 @@ impl EventManager for LlmpEventManager + HasObservers, I: Input, - OT: ObserversTuple + serde::de::DeserializeOwned, + OT: ObserversTuple + DeserializeOwned, SP: ShMemProvider, Z: ExecutionProcessor + EvaluatorObservers, //CE: CustomEvent, { @@ -521,7 +511,7 @@ where impl ProgressReporter for LlmpEventManager where I: Input, - OT: ObserversTuple + serde::de::DeserializeOwned, + OT: ObserversTuple + DeserializeOwned, SP: ShMemProvider, { } @@ -529,7 +519,7 @@ where impl HasEventManagerId for LlmpEventManager where I: Input, - OT: ObserversTuple + serde::de::DeserializeOwned, + OT: ObserversTuple + DeserializeOwned, SP: ShMemProvider, { /// Gets the id assigned to this staterestorer. @@ -615,7 +605,7 @@ where E: Executor, I, S, Z> + HasObservers, I: Input, Z: ExecutionProcessor + EvaluatorObservers, - OT: ObserversTuple + serde::de::DeserializeOwned, + OT: ObserversTuple + DeserializeOwned, SP: ShMemProvider + 'static, //CE: CustomEvent, { @@ -631,7 +621,7 @@ where I: Input, S: Serialize, Z: ExecutionProcessor + EvaluatorObservers, - OT: ObserversTuple + serde::de::DeserializeOwned, + OT: ObserversTuple + DeserializeOwned, SP: ShMemProvider + 'static, //CE: CustomEvent, { @@ -641,7 +631,7 @@ where impl HasEventManagerId for LlmpRestartingEventManager where I: Input, - OT: ObserversTuple + serde::de::DeserializeOwned, + OT: ObserversTuple + DeserializeOwned, S: Serialize, SP: ShMemProvider + 'static, { @@ -660,7 +650,7 @@ const _ENV_FUZZER_BROKER_CLIENT_INITIAL: &str = "_AFL_ENV_FUZZER_BROKER_CLIENT"; impl LlmpRestartingEventManager where I: Input, - OT: ObserversTuple + serde::de::DeserializeOwned, + OT: ObserversTuple + DeserializeOwned, SP: ShMemProvider + 'static, //CE: CustomEvent, { @@ -690,7 +680,10 @@ pub enum ManagerKind { /// Any kind will do Any, /// A client, getting messages from a local broker. - Client { cpu_core: Option }, + Client { + /// The cpu core id of this client + cpu_core: Option, + }, /// A [`llmp::LlmpBroker`], forwarding the packets of local clients. Broker, } @@ -715,7 +708,7 @@ where I: Input, S: DeserializeOwned, MT: Monitor + Clone, - OT: ObserversTuple + serde::de::DeserializeOwned, + OT: ObserversTuple + DeserializeOwned, S: DeserializeOwned, { RestartingMgr::builder() @@ -736,7 +729,7 @@ where pub struct RestartingMgr where I: Input, - OT: ObserversTuple + serde::de::DeserializeOwned, + OT: ObserversTuple + DeserializeOwned, S: DeserializeOwned, SP: ShMemProvider + 'static, MT: Monitor, @@ -768,7 +761,7 @@ where impl RestartingMgr where I: Input, - OT: ObserversTuple + serde::de::DeserializeOwned, + OT: ObserversTuple + DeserializeOwned, S: DeserializeOwned, SP: ShMemProvider, MT: Monitor + Clone, diff --git a/libafl/src/events/mod.rs b/libafl/src/events/mod.rs index 135f56e491..be9bdf0361 100644 --- a/libafl/src/events/mod.rs +++ b/libafl/src/events/mod.rs @@ -72,17 +72,23 @@ pub enum BrokerEventResult { /// Distinguish a fuzzer by its config #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)] pub enum EventConfig { + /// Always assume unique setups for fuzzer configs AlwaysUnique, + /// Create a fuzzer config from a name hash FromName { + /// The name hash name_hash: u64, }, + /// Create a fuzzer config from a build-time [`Uuid`] #[cfg(feature = "std")] BuildID { + /// The build-time [`Uuid`] id: Uuid, }, } impl EventConfig { + /// Create a new [`EventConfig`] from a name hash #[must_use] pub fn from_name(name: &str) -> Self { let mut hasher = AHasher::new_with_keys(0, 0); @@ -92,6 +98,7 @@ impl EventConfig { } } + /// Create a new [`EventConfig`] from a build-time [`Uuid`] #[cfg(feature = "std")] #[must_use] pub fn from_build_id() -> Self { @@ -100,6 +107,7 @@ impl EventConfig { } } + /// Match if the currenti [`EventConfig`] matches another given config #[must_use] pub fn match_with(&self, other: &EventConfig) -> bool { match self { @@ -207,6 +215,7 @@ where /// Current performance statistics introspection_monitor: Box, + /// phantomm data phantom: PhantomData, }, /// A new objective was found @@ -313,7 +322,7 @@ where /// Serialize all observers for this type and manager fn serialize_observers(&mut self, observers: &OT) -> Result, Error> where - OT: ObserversTuple + serde::Serialize, + OT: ObserversTuple + Serialize, { Ok(postcard::to_allocvec(observers)?) } @@ -387,6 +396,7 @@ where } } +/// Restartable trait pub trait EventRestarter { /// For restarting event managers, implement a way to forward state to their next peers. #[inline] @@ -413,7 +423,9 @@ pub trait EventProcessor { Ok(postcard::from_bytes(observers_buf)?) } } - +/// The id of this [`EventManager`]. +/// For multi processed [`EventManager`]s, +/// each connected client sholud have a unique ids. pub trait HasEventManagerId { /// The id of this manager. For Multiprocessed [`EventManager`]s, /// each client sholud have a unique ids. diff --git a/libafl/src/events/simple.rs b/libafl/src/events/simple.rs index 080117b1b9..31f3d04f6c 100644 --- a/libafl/src/events/simple.rs +++ b/libafl/src/events/simple.rs @@ -231,6 +231,7 @@ where /// `restarter` will start a new process each time the child crashes or times out. #[cfg(feature = "std")] #[allow(clippy::default_trait_access)] +#[derive(Debug, Clone)] pub struct SimpleRestartingEventManager<'a, C, I, MT, S, SC, SP> where C: Corpus, diff --git a/libafl/src/executors/combined.rs b/libafl/src/executors/combined.rs index 49ebb294dc..9108ea6f90 100644 --- a/libafl/src/executors/combined.rs +++ b/libafl/src/executors/combined.rs @@ -8,6 +8,7 @@ use crate::{ }; /// A [`CombinedExecutor`] wraps a primary executor, forwarding its methods, and a secondary one +#[allow(missing_debug_implementations)] pub struct CombinedExecutor { primary: A, secondary: B, diff --git a/libafl/src/executors/command.rs b/libafl/src/executors/command.rs index e7373f47ae..c0d630da32 100644 --- a/libafl/src/executors/command.rs +++ b/libafl/src/executors/command.rs @@ -1,3 +1,4 @@ +//! The command executor executes a sub program for each run use core::marker::PhantomData; #[cfg(feature = "std")] @@ -14,13 +15,16 @@ use std::time::Duration; /// A `CommandExecutor` is a wrapper around [`std::process::Command`] to execute a target as a child process. /// Construct a `CommandExecutor` by implementing [`CommandConfigurator`] for a type of your choice and calling [`CommandConfigurator::into_executor`] on it. +#[allow(missing_debug_implementations)] pub struct CommandExecutor { inner: T, + /// [`crate::observers::Observer`]s for this executor observers: OT, phantom: PhantomData<(EM, I, S, Z)>, } impl CommandExecutor { + /// Accesses the inner value pub fn inner(&mut self) -> &mut T { &mut self.inner } @@ -119,6 +123,7 @@ where /// ``` #[cfg(all(feature = "std", unix))] pub trait CommandConfigurator: Sized { + /// Spawns a new process with the given configuration. fn spawn_child( &mut self, fuzzer: &mut Z, @@ -127,6 +132,7 @@ pub trait CommandConfigurator: Sized { input: &I, ) -> Result; + /// Create an `Executor` from this `CommandConfigurator`. fn into_executor(self, observers: OT) -> CommandExecutor where OT: ObserversTuple, diff --git a/libafl/src/executors/forkserver.rs b/libafl/src/executors/forkserver.rs index bdcadc89bd..6ed6017554 100644 --- a/libafl/src/executors/forkserver.rs +++ b/libafl/src/executors/forkserver.rs @@ -33,17 +33,21 @@ use nix::{ const FORKSRV_FD: i32 = 198; #[allow(clippy::cast_possible_wrap)] -const FS_OPT_ENABLED: i32 = 0x80000001u32 as i32; +const FS_OPT_ENABLED: i32 = 0x80000001_u32 as i32; #[allow(clippy::cast_possible_wrap)] -const FS_OPT_SHDMEM_FUZZ: i32 = 0x01000000u32 as i32; +const FS_OPT_SHDMEM_FUZZ: i32 = 0x01000000_u32 as i32; const SHMEM_FUZZ_HDR_SIZE: usize = 4; const MAX_FILE: usize = 1024 * 1024; -// Configure the target. setlimit, setsid, pipe_stdin, I borrowed the code from Angora fuzzer +/// Configure the target, `limit`, `setsid`, `pipe_stdin`, the code was borrowed from the [`Angora`](https://github.com/AngoraFuzzer/Angora) fuzzer pub trait ConfigTarget { + /// Sets the sid fn setsid(&mut self) -> &mut Self; + /// Sets a mem limit fn setlimit(&mut self, memlimit: u64) -> &mut Self; + /// Sets the stdin fn setstdin(&mut self, fd: RawFd, use_stdin: bool) -> &mut Self; + /// Sets the AFL forkserver pipes fn setpipe( &mut self, st_read: RawFd, @@ -113,6 +117,7 @@ impl ConfigTarget for Command { } } + #[allow(trivial_numeric_casts)] fn setlimit(&mut self, memlimit: u64) -> &mut Self { if memlimit == 0 { return self; @@ -145,11 +150,15 @@ impl ConfigTarget for Command { } } +/// The [`OutFile`] to write to +#[allow(missing_debug_implementations)] pub struct OutFile { + /// The file file: File, } impl OutFile { + /// Creates a new [`OutFile`] pub fn new(file_name: &str) -> Result { let f = OpenOptions::new() .read(true) @@ -159,11 +168,13 @@ impl OutFile { Ok(Self { file: f }) } + /// Gets the file as raw file descriptor #[must_use] pub fn as_raw_fd(&self) -> RawFd { self.file.as_raw_fd() } + /// Writes the given buffer to the file pub fn write_buf(&mut self, buf: &[u8]) { self.rewind(); self.file.write_all(buf).unwrap(); @@ -173,6 +184,7 @@ impl OutFile { self.rewind(); } + /// Rewinds the file to the beginning pub fn rewind(&mut self) { self.file.seek(SeekFrom::Start(0)).unwrap(); } @@ -180,6 +192,7 @@ impl OutFile { /// The [`Forkserver`] is communication channel with a child process that forks on request of the fuzzer. /// The communication happens via pipe. +#[derive(Debug)] pub struct Forkserver { st_pipe: Pipe, ctl_pipe: Pipe, @@ -189,6 +202,7 @@ pub struct Forkserver { } impl Forkserver { + /// Create a new [`Forkserver`] pub fn new( target: String, args: Vec, @@ -245,35 +259,42 @@ impl Forkserver { }) } + /// If the last run timed out #[must_use] pub fn last_run_timed_out(&self) -> i32 { self.last_run_timed_out } + /// Sets if the last run timed out pub fn set_last_run_timed_out(&mut self, last_run_timed_out: i32) { self.last_run_timed_out = last_run_timed_out; } + /// The status #[must_use] pub fn status(&self) -> i32 { self.status } + /// Sets the status pub fn set_status(&mut self, status: i32) { self.status = status; } + /// The child pid #[must_use] pub fn child_pid(&self) -> Pid { self.child_pid } + /// Set the child pid pub fn set_child_pid(&mut self, child_pid: Pid) { self.child_pid = child_pid; } + /// Read from the st pipe pub fn read_st(&mut self) -> Result<(usize, i32), Error> { - let mut buf: [u8; 4] = [0u8; 4]; + let mut buf: [u8; 4] = [0_u8; 4]; let rlen = self.st_pipe.read(&mut buf)?; let val: i32 = i32::from_ne_bytes(buf); @@ -281,14 +302,16 @@ impl Forkserver { Ok((rlen, val)) } + /// Write to the ctl pipe pub fn write_ctl(&mut self, val: i32) -> Result { let slen = self.ctl_pipe.write(&val.to_ne_bytes())?; Ok(slen) } + /// Read a message from the child process. pub fn read_st_timed(&mut self, timeout: &TimeSpec) -> Result, Error> { - let mut buf: [u8; 4] = [0u8; 4]; + let mut buf: [u8; 4] = [0_u8; 4]; let st_read = match self.st_pipe.read_end() { Some(fd) => fd, None => { @@ -324,27 +347,36 @@ impl Forkserver { } } +/// A struct that has a forkserver pub trait HasForkserver { + /// The forkserver fn forkserver(&self) -> &Forkserver; + /// The forkserver, mutable fn forkserver_mut(&mut self) -> &mut Forkserver; + /// The file the forkserver is reading from fn out_file(&self) -> &OutFile; + /// The file the forkserver is reading from, mutable fn out_file_mut(&mut self) -> &mut OutFile; + /// The map of the fuzzer fn map(&self) -> &Option; + /// The map of the fuzzer, mutable fn map_mut(&mut self) -> &mut Option; } /// The timeout forkserver executor that wraps around the standard forkserver executor and sets a timeout before each run. +#[allow(missing_debug_implementations)] pub struct TimeoutForkserverExecutor { executor: E, timeout: TimeSpec, } impl TimeoutForkserverExecutor { + /// Create a new [`TimeoutForkserverExecutor`] pub fn new(executor: E, exec_tmout: Duration) -> Result { let milli_sec = exec_tmout.as_millis() as i64; let timeout = TimeSpec::milliseconds(milli_sec); @@ -450,6 +482,7 @@ where /// This [`Executor`] can run binaries compiled for AFL/AFL++ that make use of a forkserver. /// Shared memory feature is also available, but you have to set things up in your code. /// Please refer to AFL++'s docs. +#[allow(missing_debug_implementations)] pub struct ForkserverExecutor where I: Input + HasTargetBytes, @@ -469,6 +502,7 @@ where I: Input + HasTargetBytes, OT: ObserversTuple, { + /// Creates a new [`ForkserverExecutor`] with the given target, arguments and observers. pub fn new( target: String, arguments: &[String], @@ -478,6 +512,7 @@ where Self::with_debug(target, arguments, use_shmem_testcase, observers, false) } + /// Creates a new [`ForkserverExecutor`] with the given target, arguments and observers, with debug mode pub fn with_debug( target: String, arguments: &[String], @@ -557,18 +592,22 @@ where }) } + /// The `target` binary that's going to run. pub fn target(&self) -> &String { &self.target } + /// The `args` used for the binary. pub fn args(&self) -> &[String] { &self.args } + /// The [`Forkserver`] instance. pub fn forkserver(&self) -> &Forkserver { &self.forkserver } + /// The [`OutFile`] used by this [`Executor`]. pub fn out_file(&self) -> &OutFile { &self.out_file } @@ -737,10 +776,7 @@ mod tests { let bin = "echo"; let args = vec![String::from("@@")]; - let mut shmem = StdShMemProvider::new() - .unwrap() - .new_map(MAP_SIZE as usize) - .unwrap(); + let mut shmem = StdShMemProvider::new().unwrap().new_map(MAP_SIZE).unwrap(); shmem.write_to_env("__AFL_SHM_ID").unwrap(); let shmem_map = shmem.map_mut(); diff --git a/libafl/src/executors/inprocess.rs b/libafl/src/executors/inprocess.rs index 136f85d56f..a58c827078 100644 --- a/libafl/src/executors/inprocess.rs +++ b/libafl/src/executors/inprocess.rs @@ -159,17 +159,20 @@ where self.harness_fn } + /// The inprocess handlers #[inline] pub fn handlers(&self) -> &InProcessHandlers { &self.handlers } + /// The inprocess handlers, mut #[inline] pub fn handlers_mut(&mut self) -> &mut InProcessHandlers { &mut self.handlers } } +/// The inmem executor's handlers. #[derive(Debug)] pub struct InProcessHandlers { /// On crash C function pointer @@ -179,32 +182,33 @@ pub struct InProcessHandlers { } impl InProcessHandlers { + /// Call before running a target. pub fn pre_run_target( &self, - executor: &E, - fuzzer: &mut Z, - state: &mut S, - mgr: &mut EM, - input: &I, + _executor: &E, + _fuzzer: &mut Z, + _state: &mut S, + _mgr: &mut EM, + _input: &I, ) { #[cfg(unix)] unsafe { let data = &mut GLOBAL_STATE; write_volatile( &mut data.current_input_ptr, - input as *const _ as *const c_void, + _input as *const _ as *const c_void, ); write_volatile( &mut data.executor_ptr, - executor as *const _ as *const c_void, + _executor as *const _ as *const c_void, ); data.crash_handler = self.crash_handler; data.timeout_handler = self.timeout_handler; // Direct raw pointers access /aliasing is pretty undefined behavior. // Since the state and event may have moved in memory, refresh them right before the signal may happen - write_volatile(&mut data.state_ptr, state as *mut _ as *mut c_void); - write_volatile(&mut data.event_mgr_ptr, mgr as *mut _ as *mut c_void); - write_volatile(&mut data.fuzzer_ptr, fuzzer as *mut _ as *mut c_void); + write_volatile(&mut data.state_ptr, _state as *mut _ as *mut c_void); + write_volatile(&mut data.event_mgr_ptr, _mgr as *mut _ as *mut c_void); + write_volatile(&mut data.fuzzer_ptr, _fuzzer as *mut _ as *mut c_void); compiler_fence(Ordering::SeqCst); } #[cfg(all(windows, feature = "std"))] @@ -212,23 +216,24 @@ impl InProcessHandlers { let data = &mut GLOBAL_STATE; write_volatile( &mut data.current_input_ptr, - input as *const _ as *const c_void, + _input as *const _ as *const c_void, ); write_volatile( &mut data.executor_ptr, - executor as *const _ as *const c_void, + _executor as *const _ as *const c_void, ); data.crash_handler = self.crash_handler; data.timeout_handler = self.timeout_handler; // Direct raw pointers access /aliasing is pretty undefined behavior. // Since the state and event may have moved in memory, refresh them right before the signal may happen - write_volatile(&mut data.state_ptr, state as *mut _ as *mut c_void); - write_volatile(&mut data.event_mgr_ptr, mgr as *mut _ as *mut c_void); - write_volatile(&mut data.fuzzer_ptr, fuzzer as *mut _ as *mut c_void); + write_volatile(&mut data.state_ptr, _state as *mut _ as *mut c_void); + write_volatile(&mut data.event_mgr_ptr, _mgr as *mut _ as *mut c_void); + write_volatile(&mut data.fuzzer_ptr, _fuzzer as *mut _ as *mut c_void); compiler_fence(Ordering::SeqCst); } } + /// Call after running a target. #[allow(clippy::unused_self)] pub fn post_run_target(&self) { #[cfg(unix)] @@ -243,6 +248,7 @@ impl InProcessHandlers { } } + /// Create new [`InProcessHandlers`]. pub fn new() -> Result where I: Input, @@ -311,6 +317,7 @@ impl InProcessHandlers { }) } + /// Replace the handlers with `nop` handlers, deactivating the handlers #[must_use] pub fn nop() -> Self { Self { @@ -320,6 +327,9 @@ impl InProcessHandlers { } } +/// The global state of the in-process harness. +#[derive(Debug)] +#[allow(missing_docs)] pub struct InProcessExecutorHandlerData { pub state_ptr: *mut c_void, pub event_mgr_ptr: *mut c_void, @@ -367,21 +377,25 @@ pub static mut GLOBAL_STATE: InProcessExecutorHandlerData = InProcessExecutorHan timeout_input_ptr: ptr::null_mut(), }; +/// Get the inprocess [`crate::state::State`] #[must_use] pub fn inprocess_get_state<'a, S>() -> Option<&'a mut S> { unsafe { (GLOBAL_STATE.state_ptr as *mut S).as_mut() } } +/// Get the [`crate::events::EventManager`] #[must_use] pub fn inprocess_get_event_manager<'a, EM>() -> Option<&'a mut EM> { unsafe { (GLOBAL_STATE.event_mgr_ptr as *mut EM).as_mut() } } +/// Gets the inprocess [`crate::fuzzer::Fuzzer`] #[must_use] pub fn inprocess_get_fuzzer<'a, F>() -> Option<&'a mut F> { unsafe { (GLOBAL_STATE.fuzzer_ptr as *mut F).as_mut() } } +/// Gets the inprocess [`Executor`] #[must_use] pub fn inprocess_get_executor<'a, E>() -> Option<&'a mut E> { unsafe { (GLOBAL_STATE.executor_ptr as *mut E).as_mut() } @@ -697,7 +711,7 @@ mod windows_exception_handler { impl Handler for InProcessExecutorHandlerData { #[allow(clippy::not_unsafe_ptr_arg_deref)] - fn handle(&mut self, code: ExceptionCode, exception_pointers: *mut EXCEPTION_POINTERS) { + fn handle(&mut self, _code: ExceptionCode, exception_pointers: *mut EXCEPTION_POINTERS) { unsafe { let data = &mut GLOBAL_STATE; if !data.crash_handler.is_null() { @@ -908,7 +922,7 @@ mod windows_exception_handler { let interesting = fuzzer .objective_mut() - .is_interesting(state, event_mgr, &input, observers, &ExitKind::Crash) + .is_interesting(state, event_mgr, input, observers, &ExitKind::Crash) .expect("In crash handler objective failure."); if interesting { @@ -945,8 +959,10 @@ mod windows_exception_handler { } } +/// The struct has [`InProcessHandlers`]. #[cfg(windows)] pub trait HasInProcessHandlers { + /// Get the in-process handlers. fn inprocess_handlers(&self) -> &InProcessHandlers; } @@ -964,7 +980,9 @@ where } } +/// [`InProcessForkExecutor`] is an executor that forks the current process before each execution. #[cfg(all(feature = "std", unix))] +#[allow(missing_debug_implementations)] pub struct InProcessForkExecutor<'a, H, I, OT, S, SP> where H: FnMut(&I) -> ExitKind, @@ -1033,6 +1051,7 @@ where OT: ObserversTuple, SP: ShMemProvider, { + /// Creates a new [`InProcessForkExecutor`] pub fn new( harness_fn: &'a mut H, observers: OT, diff --git a/libafl/src/executors/shadow.rs b/libafl/src/executors/shadow.rs index 3a6c0866f8..06210ec4bb 100644 --- a/libafl/src/executors/shadow.rs +++ b/libafl/src/executors/shadow.rs @@ -10,9 +10,13 @@ use crate::{ }; /// A [`ShadowExecutor`] wraps an executor and a set of shadow observers +#[allow(missing_debug_implementations)] pub struct ShadowExecutor { + /// The wrapped executor executor: E, + /// The shadow observers shadow_observers: SOT, + /// phantom data phantom: PhantomData<(I, S)>, } @@ -29,11 +33,13 @@ where } } + /// The shadow observers are not considered by the feedbacks and the manager, mutable #[inline] pub fn shadow_observers(&self) -> &SOT { &self.shadow_observers } + /// The shadow observers are not considered by the feedbacks and the manager, mutable #[inline] pub fn shadow_observers_mut(&mut self) -> &mut SOT { &mut self.shadow_observers diff --git a/libafl/src/executors/timeout.rs b/libafl/src/executors/timeout.rs index b8be61b9e1..612bed56df 100644 --- a/libafl/src/executors/timeout.rs +++ b/libafl/src/executors/timeout.rs @@ -24,15 +24,12 @@ use windows::Win32::{ System::Threading::{ CloseThreadpoolTimer, CreateThreadpoolTimer, EnterCriticalSection, InitializeCriticalSection, LeaveCriticalSection, SetThreadpoolTimer, RTL_CRITICAL_SECTION, - TP_CALLBACK_ENVIRON_V3, TP_TIMER, + TP_CALLBACK_ENVIRON_V3, TP_CALLBACK_INSTANCE, TP_TIMER, }, }; #[cfg(all(windows, feature = "std"))] -use core::{ - ffi::c_void, - ptr::{write, write_volatile}, -}; +use core::{ffi::c_void, ptr::write_volatile}; #[cfg(windows)] use core::sync::atomic::{compiler_fence, Ordering}; @@ -77,6 +74,7 @@ pub(crate) unsafe fn windows_delete_timer_queue(tp_timer: *mut TP_TIMER) { } /// The timeout excutor is a wrapper that sets a timeout before each run +#[allow(missing_debug_implementations)] pub struct TimeoutExecutor { executor: E, #[cfg(unix)] @@ -92,14 +90,14 @@ pub struct TimeoutExecutor { #[cfg(windows)] #[allow(non_camel_case_types)] type PTP_TIMER_CALLBACK = unsafe extern "system" fn( - param0: *mut windows::Win32::System::Threading::TP_CALLBACK_INSTANCE, + param0: *mut TP_CALLBACK_INSTANCE, param1: *mut c_void, - param2: *mut windows::Win32::System::Threading::TP_TIMER, + param2: *mut TP_TIMER, ); #[cfg(unix)] impl TimeoutExecutor { - /// Create a new `TimeoutExecutor`, wrapping the given `executor` and checking for timeouts. + /// Create a new [`TimeoutExecutor`], wrapping the given `executor` and checking for timeouts. /// This should usually be used for `InProcess` fuzzing. pub fn new(executor: E, exec_tmout: Duration) -> Self { let milli_sec = exec_tmout.as_millis(); @@ -124,6 +122,7 @@ impl TimeoutExecutor { #[cfg(windows)] impl TimeoutExecutor { + /// Create a new [`TimeoutExecutor`], wrapping the given `executor` and checking for timeouts. pub fn new(executor: E, exec_tmout: Duration) -> Self { let milli_sec = exec_tmout.as_millis() as i64; let timeout_handler: PTP_TIMER_CALLBACK = @@ -149,6 +148,7 @@ impl TimeoutExecutor { } } + /// Set the timeout for this executor #[cfg(unix)] pub fn set_timeout(&mut self, exec_tmout: Duration) { let milli_sec = exec_tmout.as_millis(); @@ -167,6 +167,7 @@ impl TimeoutExecutor { self.itimerval = itimerval; } + /// Set the timeout for this executor #[cfg(windows)] pub fn set_timeout(&mut self, exec_tmout: Duration) { self.milli_sec = exec_tmout.as_millis() as i64; @@ -177,6 +178,7 @@ impl TimeoutExecutor { &mut self.executor } + /// Reset the timeout for this executor #[cfg(windows)] pub fn windows_reset_timeout(&self) -> Result<(), Error> { unsafe { @@ -192,6 +194,7 @@ where E: Executor + HasInProcessHandlers, I: Input, { + #[allow(clippy::cast_sign_loss)] fn run_target( &mut self, fuzzer: &mut Z, @@ -210,10 +213,11 @@ where &mut data.timeout_input_ptr, &mut data.current_input_ptr as *mut _ as *mut c_void, ); - let tm: i64 = -1 * self.milli_sec * 10 * 1000; - let mut ft = FILETIME::default(); - ft.dwLowDateTime = (tm & 0xffffffff) as u32; - ft.dwHighDateTime = (tm >> 32) as u32; + let tm: i64 = -self.milli_sec * 10 * 1000; + let ft = FILETIME { + dwLowDateTime: (tm & 0xffffffff) as u32, + dwHighDateTime: (tm >> 32) as u32, + }; compiler_fence(Ordering::SeqCst); EnterCriticalSection(&mut self.critical); diff --git a/libafl/src/executors/with_observers.rs b/libafl/src/executors/with_observers.rs index e1c746f3bb..7fbe684f5d 100644 --- a/libafl/src/executors/with_observers.rs +++ b/libafl/src/executors/with_observers.rs @@ -1,8 +1,10 @@ +//! A wrapper for any [`Executor`] to make it implement [`HasObservers`] using a given [`ObserversTuple`]. use crate::{inputs::Input, observers::ObserversTuple, Error}; use super::{Executor, ExitKind, HasObservers}; /// A wrapper for any [`Executor`] to make it implement [`HasObservers`] using a given [`ObserversTuple`]. +#[allow(missing_debug_implementations)] pub struct WithObservers { executor: E, observers: OT, diff --git a/libafl/src/feedbacks/concolic.rs b/libafl/src/feedbacks/concolic.rs index 434d940ea6..52401c71b0 100644 --- a/libafl/src/feedbacks/concolic.rs +++ b/libafl/src/feedbacks/concolic.rs @@ -1,3 +1,8 @@ +//! Concoliic feedback for comcolic fuzzing. +//! It is used to attach concolic tracing metadata to the testcase. +//! This feedback should be used in combination with another feedback as this feedback always considers testcases +//! to be not interesting. +//! Requires a [`ConcolicObserver`] to observe the concolic trace. use crate::{ bolts::tuples::Named, corpus::Testcase, @@ -17,12 +22,14 @@ use crate::{ /// This feedback should be used in combination with another feedback as this feedback always considers testcases /// to be not interesting. /// Requires a [`ConcolicObserver`] to observe the concolic trace. +#[derive(Debug)] pub struct ConcolicFeedback { name: String, metadata: Option, } impl ConcolicFeedback { + /// Creates a concolic feedback from an observer #[allow(unused)] #[must_use] pub fn from_observer(observer: &ConcolicObserver) -> Self { diff --git a/libafl/src/feedbacks/map.rs b/libafl/src/feedbacks/map.rs index ca13ce5967..f95a6ded44 100644 --- a/libafl/src/feedbacks/map.rs +++ b/libafl/src/feedbacks/map.rs @@ -41,7 +41,7 @@ pub type MaxMapOneOrFilledFeedback = /// A `Reducer` function is used to aggregate values for the novelty search pub trait Reducer: Serialize + serde::de::DeserializeOwned + 'static where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { /// Reduce two values to one value, with the current [`Reducer`]. fn reduce(first: T, second: T) -> T; @@ -53,13 +53,7 @@ pub struct OrReducer {} impl Reducer for OrReducer where - T: PrimInt - + Default - + Copy - + 'static - + serde::Serialize - + serde::de::DeserializeOwned - + PartialOrd, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + PartialOrd, { #[inline] fn reduce(history: T, new: T) -> T { @@ -73,13 +67,7 @@ pub struct AndReducer {} impl Reducer for AndReducer where - T: PrimInt - + Default - + Copy - + 'static - + serde::Serialize - + serde::de::DeserializeOwned - + PartialOrd, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + PartialOrd, { #[inline] fn reduce(history: T, new: T) -> T { @@ -93,13 +81,7 @@ pub struct MaxReducer {} impl Reducer for MaxReducer where - T: PrimInt - + Default - + Copy - + 'static - + serde::Serialize - + serde::de::DeserializeOwned - + PartialOrd, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + PartialOrd, { #[inline] fn reduce(first: T, second: T) -> T { @@ -117,13 +99,7 @@ pub struct MinReducer {} impl Reducer for MinReducer where - T: PrimInt - + Default - + Copy - + 'static - + serde::Serialize - + serde::de::DeserializeOwned - + PartialOrd, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + PartialOrd, { #[inline] fn reduce(first: T, second: T) -> T { @@ -138,7 +114,7 @@ where /// A `IsNovel` function is used to discriminate if a reduced value is considered novel. pub trait IsNovel: Serialize + serde::de::DeserializeOwned + 'static where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { /// If a new value in the [`MapFeedback`] was found, /// this filter can decide if the result is considered novel or not. @@ -151,7 +127,7 @@ pub struct AllIsNovel {} impl IsNovel for AllIsNovel where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { #[inline] fn is_novel(_old: T, _new: T) -> bool { @@ -178,7 +154,7 @@ fn saturating_next_power_of_two(n: T) -> T { pub struct DifferentIsNovel {} impl IsNovel for DifferentIsNovel where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { #[inline] fn is_novel(old: T, new: T) -> bool { @@ -191,7 +167,7 @@ where pub struct NextPow2IsNovel {} impl IsNovel for NextPow2IsNovel where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { #[inline] fn is_novel(old: T, new: T) -> bool { @@ -211,7 +187,7 @@ where pub struct OneOrFilledIsNovel {} impl IsNovel for OneOrFilledIsNovel where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { #[inline] fn is_novel(old: T, new: T) -> bool { @@ -220,7 +196,7 @@ where } /// A testcase metadata holding a list of indexes of a map -#[derive(Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize)] pub struct MapIndexesMetadata { /// The list of indexes. pub list: Vec, @@ -256,7 +232,7 @@ impl MapIndexesMetadata { } /// A testcase metadata holding a list of indexes of a map -#[derive(Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize)] pub struct MapNoveltiesMetadata { /// A `list` of novelties. pub list: Vec, @@ -284,7 +260,7 @@ impl MapNoveltiesMetadata { #[serde(bound = "T: serde::de::DeserializeOwned")] pub struct MapFeedbackState where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { /// Contains information about untouched entries pub history_map: Vec, @@ -294,7 +270,7 @@ where impl FeedbackState for MapFeedbackState where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { fn reset(&mut self) -> Result<(), Error> { self.history_map @@ -306,7 +282,7 @@ where impl Named for MapFeedbackState where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { #[inline] fn name(&self) -> &str { @@ -316,7 +292,7 @@ where impl MapFeedbackState where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { /// Create new `MapFeedbackState` #[must_use] @@ -355,7 +331,7 @@ where #[serde(bound = "T: serde::de::DeserializeOwned")] pub struct MapFeedback where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, R: Reducer, O: MapObserver, N: IsNovel, @@ -376,7 +352,7 @@ where impl Feedback for MapFeedback where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, R: Reducer, O: MapObserver, N: IsNovel, @@ -485,7 +461,7 @@ where impl Named for MapFeedback where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, R: Reducer, N: IsNovel, O: MapObserver, @@ -504,7 +480,7 @@ where + Default + Copy + 'static - + serde::Serialize + + Serialize + serde::de::DeserializeOwned + PartialOrd + Debug, diff --git a/libafl/src/feedbacks/mod.rs b/libafl/src/feedbacks/mod.rs index 81ec585158..4923f687da 100644 --- a/libafl/src/feedbacks/mod.rs +++ b/libafl/src/feedbacks/mod.rs @@ -51,6 +51,8 @@ where EM: EventFirer, OT: ObserversTuple; + /// Returns if the result of a run is interesting and the value input should be stored in a corpus. + /// It also keeps track of introspection stats. #[cfg(feature = "introspection")] #[allow(clippy::too_many_arguments)] fn is_interesting_introspection( @@ -101,7 +103,7 @@ where /// [`FeedbackState`] is the data associated with a [`Feedback`] that must persist as part /// of the fuzzer State -pub trait FeedbackState: Named + serde::Serialize + serde::de::DeserializeOwned { +pub trait FeedbackState: Named + Serialize + serde::de::DeserializeOwned { /// Reset the internal state fn reset(&mut self) -> Result<(), Error> { Ok(()) @@ -109,7 +111,8 @@ pub trait FeedbackState: Named + serde::Serialize + serde::de::DeserializeOwned } /// A haskell-style tuple of feedback states -pub trait FeedbackStatesTuple: MatchName + serde::Serialize + serde::de::DeserializeOwned { +pub trait FeedbackStatesTuple: MatchName + Serialize + serde::de::DeserializeOwned { + /// Resets all the feedback states of the tuple fn reset_all(&mut self) -> Result<(), Error>; } @@ -130,6 +133,8 @@ where } } +/// A cobined feedback consisting of ultiple [`Feedback`]s +#[allow(missing_debug_implementations)] pub struct CombinedFeedback where A: Feedback, @@ -138,7 +143,9 @@ where I: Input, S: HasClientPerfMonitor, { + /// First [`Feedback`] pub first: A, + /// Second [`Feedback`] pub second: B, name: String, phantom: PhantomData<(I, S, FL)>, @@ -165,6 +172,7 @@ where I: Input, S: HasClientPerfMonitor, { + /// Create a new combined feedback pub fn new(first: A, second: B) -> Self { let name = format!("{} ({},{})", FL::name(), first.name(), second.name()); Self { @@ -244,6 +252,7 @@ where } } +/// Logical combination of two feedbacks pub trait FeedbackLogic: 'static where A: Feedback, @@ -251,8 +260,10 @@ where I: Input, S: HasClientPerfMonitor, { + /// The name of this cobination fn name() -> &'static str; + /// If the feedback pair is interesting fn is_pair_interesting( first: &mut A, second: &mut B, @@ -266,6 +277,7 @@ where EM: EventFirer, OT: ObserversTuple; + /// If this pair is interesting (with introspection features enabled) #[cfg(feature = "introspection")] #[allow(clippy::too_many_arguments)] fn is_pair_interesting_introspection( @@ -282,9 +294,20 @@ where OT: ObserversTuple; } +/// Eager `OR` combination of two feedbacks +#[derive(Debug, Clone)] pub struct LogicEagerOr {} + +/// Fast `OR` combination of two feedbacks +#[derive(Debug, Clone)] pub struct LogicFastOr {} + +/// Eager `AND` combination of two feedbacks +#[derive(Debug, Clone)] pub struct LogicEagerAnd {} + +/// Fast `AND` combination of two feedbacks +#[derive(Debug, Clone)] pub struct LogicFastAnd {} impl FeedbackLogic for LogicEagerOr @@ -521,7 +544,8 @@ pub type EagerOrFeedback = CombinedFeedback = CombinedFeedback; -/// Compose feedbacks with an OR operation +/// Compose feedbacks with an `NOT` operation +#[derive(Clone, Debug)] pub struct NotFeedback where A: Feedback, @@ -631,6 +655,7 @@ macro_rules! feedback_or { }; } +/// Combines multiple feedbacks with an `OR` operation, not executing feedbacks after the first positive result #[macro_export] macro_rules! feedback_or_fast { ( $last:expr ) => { $last }; diff --git a/libafl/src/feedbacks/nautilus.rs b/libafl/src/feedbacks/nautilus.rs index e51e2cd5b3..3ec3608bf4 100644 --- a/libafl/src/feedbacks/nautilus.rs +++ b/libafl/src/feedbacks/nautilus.rs @@ -1,5 +1,8 @@ +//! Nautilus grammar mutator, see +use core::fmt::Debug; use grammartec::{chunkstore::ChunkStore, context::Context}; use serde::{Deserialize, Serialize}; +use serde_json; use std::fs::create_dir_all; use crate::{ @@ -15,14 +18,27 @@ use crate::{ Error, }; +/// Metadata for Nautilus grammar mutator chunks #[derive(Serialize, Deserialize)] pub struct NautilusChunksMetadata { + /// the chunk store pub cks: ChunkStore, } +impl Debug for NautilusChunksMetadata { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "NautilusChunksMetadata {{ {} }}", + serde_json::to_string_pretty(self).unwrap(), + ) + } +} + crate::impl_serdeany!(NautilusChunksMetadata); impl NautilusChunksMetadata { + /// Creates a new [`NautilusChunksMetadata`] #[must_use] pub fn new(work_dir: String) -> Self { create_dir_all(format!("{}/outputs/chunks", &work_dir)) @@ -33,11 +49,19 @@ impl NautilusChunksMetadata { } } +/// A nautilus feedback for grammar fuzzing pub struct NautilusFeedback<'a> { ctx: &'a Context, } +impl Debug for NautilusFeedback<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "NautilusFeedback {{}}") + } +} + impl<'a> NautilusFeedback<'a> { + /// Create a new [`NautilusFeedback`] #[must_use] pub fn new(context: &'a NautilusContext) -> Self { Self { ctx: &context.ctx } diff --git a/libafl/src/fuzzer/mod.rs b/libafl/src/fuzzer/mod.rs index 608f166475..e8d3d4476a 100644 --- a/libafl/src/fuzzer/mod.rs +++ b/libafl/src/fuzzer/mod.rs @@ -220,10 +220,14 @@ where } } +/// The corpus this input should be added to #[derive(Debug, PartialEq)] pub enum ExecuteInputResult { + /// No special input None, + /// This input should be stored ini the corpus Corpus, + /// This input leads to a solution Solution, } @@ -612,6 +616,7 @@ where } } +/// Structs with this trait will execute an [`Input`] pub trait ExecutesInput where I: Input, diff --git a/libafl/src/generators/gramatron.rs b/libafl/src/generators/gramatron.rs index 9ac6402886..c14cfbec30 100644 --- a/libafl/src/generators/gramatron.rs +++ b/libafl/src/generators/gramatron.rs @@ -1,3 +1,4 @@ +//! Gramamtron generator use alloc::{string::String, vec::Vec}; use core::marker::PhantomData; use serde::{Deserialize, Serialize}; @@ -10,16 +11,23 @@ use crate::{ Error, }; +/// A trigger #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] pub struct Trigger { + /// the destination pub dest: usize, + /// the term pub term: String, } +/// The [`Automaton`] #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] pub struct Automaton { + /// final state pub final_state: usize, + /// init state pub init_state: usize, + /// pda of [`Trigger`]s pub pda: Vec>, } @@ -64,6 +72,7 @@ where } } + /// Append the generated terminals pub fn append_generated_terminals(&self, input: &mut GramatronInput, state: &mut S) -> usize { let mut counter = 0; let final_state = self.automaton.final_state; diff --git a/libafl/src/generators/nautilus.rs b/libafl/src/generators/nautilus.rs index 51be4da97c..e96255ea70 100644 --- a/libafl/src/generators/nautilus.rs +++ b/libafl/src/generators/nautilus.rs @@ -1,15 +1,24 @@ +//! Generators for the [`Nautilus`](https://github.com/RUB-SysSec/nautilus) grammar fuzzer +use crate::{generators::Generator, inputs::nautilus::NautilusInput, Error}; use alloc::{string::String, vec::Vec}; +use core::fmt::Debug; +use grammartec::context::Context; use std::{fs, io::BufReader, path::Path}; -use crate::{generators::Generator, inputs::nautilus::NautilusInput, Error}; - -use grammartec::context::Context; pub use grammartec::newtypes::NTermID; +/// The nautilus context for a generator pub struct NautilusContext { + /// The nautilus context for a generator pub ctx: Context, } +impl Debug for NautilusContext { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "NautilusContext {{}}",) + } +} + impl NautilusContext { /// Returns a new [`NautilusGenerator`] #[must_use] @@ -26,6 +35,7 @@ impl NautilusContext { Self { ctx } } + /// Create a new [`NautilusContext`] from a file #[must_use] pub fn from_file>(tree_depth: usize, grammar_file: P) -> Self { let file = fs::File::open(grammar_file).expect("Cannot open grammar file"); @@ -39,9 +49,16 @@ impl NautilusContext { #[derive(Clone)] /// Generates random inputs from a grammar pub struct NautilusGenerator<'a> { + /// The nautilus context of the grammar pub ctx: &'a Context, } +impl Debug for NautilusGenerator<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "NautilusGenerator {{}}",) + } +} + impl<'a, S> Generator for NautilusGenerator<'a> { fn generate(&mut self, _state: &mut S) -> Result { let nonterm = self.nonterminal("START"); @@ -63,12 +80,14 @@ impl<'a> NautilusGenerator<'a> { Self { ctx: &context.ctx } } + /// Gets the nonterminal from this input // TODO create from a python grammar #[must_use] pub fn nonterminal(&self, name: &str) -> NTermID { self.ctx.nt_id(name) } + /// Generates a [`NautilusInput`] from a nonterminal pub fn generate_from_nonterminal(&self, input: &mut NautilusInput, start: NTermID, len: usize) { input.tree_mut().generate_from_nt(start, len, self.ctx); } diff --git a/libafl/src/inputs/encoded.rs b/libafl/src/inputs/encoded.rs index 95c141bc5c..1a0f14de34 100644 --- a/libafl/src/inputs/encoded.rs +++ b/libafl/src/inputs/encoded.rs @@ -15,25 +15,35 @@ use serde::{Deserialize, Serialize}; use crate::{bolts::HasLen, inputs::Input, Error}; +/// Trait to encode bytes to an [`EncodedInput`] using the given [`Tokenizer`] pub trait InputEncoder where T: Tokenizer, { + /// Encode bytes to an [`EncodedInput`] using the given [`Tokenizer`] fn encode(&mut self, bytes: &[u8], tokenizer: &mut T) -> Result; } +/// Trait to decode encoded input to bytes pub trait InputDecoder { + /// Decode encoded input to bytes fn decode(&self, input: &EncodedInput, bytes: &mut Vec) -> Result<(), Error>; } +/// Tokenizer is a trait that can tokenize bytes into a ][`Vec`] of tokens pub trait Tokenizer { + /// Tokanize the given bytes fn tokenize(&self, bytes: &[u8]) -> Result, Error>; } +/// A token input encoder/decoder #[derive(Clone, Debug)] pub struct TokenInputEncoderDecoder { + /// The table of tokens token_table: HashMap, + /// The table of ids id_table: HashMap, + /// The next id next_id: u32, } @@ -72,6 +82,7 @@ impl InputDecoder for TokenInputEncoderDecoder { } impl TokenInputEncoderDecoder { + /// Creates a new [`TokenInputEncoderDecoder`] #[must_use] pub fn new() -> Self { Self { @@ -88,15 +99,21 @@ impl Default for TokenInputEncoderDecoder { } } +/// A native tokenizer struct #[cfg(feature = "std")] +#[derive(Clone, Debug)] pub struct NaiveTokenizer { + /// Ident regex ident_re: Regex, + /// Comement regex comment_re: Regex, + /// String regex string_re: Regex, } #[cfg(feature = "std")] impl NaiveTokenizer { + /// Creates a new [`NaiveTokenizer`] #[must_use] pub fn new(ident_re: Regex, comment_re: Regex, string_re: Regex) -> Self { Self { @@ -221,11 +238,13 @@ impl EncodedInput { Self { codes } } + /// The codes of this encoded input #[must_use] pub fn codes(&self) -> &[u32] { &self.codes } + /// The codes of this encoded input, mutable #[must_use] pub fn codes_mut(&mut self) -> &mut Vec { &mut self.codes diff --git a/libafl/src/inputs/gramatron.rs b/libafl/src/inputs/gramatron.rs index f5b0f39fc2..98deed3a84 100644 --- a/libafl/src/inputs/gramatron.rs +++ b/libafl/src/inputs/gramatron.rs @@ -1,3 +1,4 @@ +//! The gramatron grammar fuzzer use ahash::AHasher; use core::hash::Hasher; @@ -7,14 +8,19 @@ use serde::{Deserialize, Serialize}; use crate::{bolts::HasLen, inputs::Input, Error}; +/// A terminal for gramatron grammar fuzzing #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq)] pub struct Terminal { + /// The state pub state: usize, + /// The trigger index pub trigger_idx: usize, + /// The symbol pub symbol: String, } impl Terminal { + /// Creates a new [`Terminal`] #[must_use] pub fn new(state: usize, trigger_idx: usize, symbol: String) -> Self { Self { @@ -25,6 +31,7 @@ impl Terminal { } } +/// An input for gramatron grammar fuzzing #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq)] pub struct GramatronInput { /// The input representation as list of terminals @@ -64,16 +71,19 @@ impl GramatronInput { Self { terms } } + /// The terminals of this input #[must_use] pub fn terminals(&self) -> &[Terminal] { &self.terms } + /// The terminals of this input, mutable #[must_use] pub fn terminals_mut(&mut self) -> &mut Vec { &mut self.terms } + /// Create a bytes representation of this input pub fn unparse(&self, bytes: &mut Vec) { bytes.clear(); for term in &self.terms { @@ -81,6 +91,7 @@ impl GramatronInput { } } + /// crop the value to the given length pub fn crop(&self, from: usize, to: usize) -> Result { if from < to && to <= self.terms.len() { let mut terms = vec![]; diff --git a/libafl/src/inputs/mod.rs b/libafl/src/inputs/mod.rs index f9a38c2837..eebf4d0406 100644 --- a/libafl/src/inputs/mod.rs +++ b/libafl/src/inputs/mod.rs @@ -28,7 +28,7 @@ use crate::bolts::fs::write_file_atomic; use crate::{bolts::ownedref::OwnedSlice, Error}; /// An input for the target -pub trait Input: Clone + serde::Serialize + serde::de::DeserializeOwned + Debug { +pub trait Input: Clone + Serialize + serde::de::DeserializeOwned + Debug { #[cfg(feature = "std")] /// Write this input to the file fn to_file

(&self, path: P) -> Result<(), Error> diff --git a/libafl/src/inputs/nautilus.rs b/libafl/src/inputs/nautilus.rs index 883afa0338..f49b6dc656 100644 --- a/libafl/src/inputs/nautilus.rs +++ b/libafl/src/inputs/nautilus.rs @@ -1,3 +1,6 @@ +//! Input for the [`Nautilus`](https://github.com/RUB-SysSec/nautilus) grammar fuzzer methods +//! + //use ahash::AHasher; //use core::hash::Hasher; @@ -12,6 +15,7 @@ use grammartec::{ tree::{Tree, TreeLike}, }; +/// An [`Input`] implementation for `Nautilus` grammar. #[derive(Serialize, Deserialize, Clone, Debug)] pub struct NautilusInput { /// The input representation as Tree @@ -52,6 +56,7 @@ impl NautilusInput { Self { tree } } + /// Create an empty [`Input`] #[must_use] pub fn empty() -> Self { Self { @@ -63,16 +68,19 @@ impl NautilusInput { } } + /// Generate a `Nautilus` input from the given bytes pub fn unparse(&self, context: &NautilusContext, bytes: &mut Vec) { bytes.clear(); self.tree.unparse(NodeID::from(0), &context.ctx, bytes); } + /// Get the tree representation of this input #[must_use] pub fn tree(&self) -> &Tree { &self.tree } + /// Get the tree representation of this input, as a mutable reference #[must_use] pub fn tree_mut(&mut self) -> &mut Tree { &mut self.tree diff --git a/libafl/src/lib.rs b/libafl/src/lib.rs index 339582d886..15d516e819 100644 --- a/libafl/src/lib.rs +++ b/libafl/src/lib.rs @@ -5,14 +5,53 @@ Welcome to `LibAFL` #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(feature = "RUSTC_IS_NIGHTLY", feature(min_specialization))] #![deny(rustdoc::broken_intra_doc_links)] +#![deny(clippy::pedantic)] +#![allow( + clippy::unreadable_literal, + clippy::type_repetition_in_bounds, + clippy::missing_errors_doc, + clippy::cast_possible_truncation, + clippy::used_underscore_binding, + clippy::ptr_as_ptr, + clippy::missing_panics_doc, + clippy::missing_docs_in_private_items, + clippy::module_name_repetitions, + clippy::unreadable_literal +)] +#![deny( + missing_debug_implementations, + missing_docs, + //trivial_casts, + trivial_numeric_casts, + unused_extern_crates, + unused_import_braces, + unused_qualifications, + //unused_results +)] +#![deny( + bad_style, + const_err, + dead_code, + improper_ctypes, + non_shorthand_field_patterns, + no_mangle_generic_items, + overflowing_literals, + path_statements, + patterns_in_fns_without_body, + private_in_public, + unconditional_recursion, + unused, + unused_allocation, + unused_comparisons, + unused_parens, + while_true +)] #[macro_use] extern crate alloc; #[macro_use] extern crate static_assertions; #[cfg(feature = "std")] -extern crate ctor; -#[cfg(feature = "std")] pub use ctor::ctor; // Re-export derive(SerdeAny) diff --git a/libafl/src/monitors/mod.rs b/libafl/src/monitors/mod.rs index f3bf219571..043c2b791f 100644 --- a/libafl/src/monitors/mod.rs +++ b/libafl/src/monitors/mod.rs @@ -7,7 +7,7 @@ use alloc::{ string::{String, ToString}, vec::Vec, }; -use core::{fmt, time, time::Duration}; +use core::{fmt, time::Duration}; use hashbrown::HashMap; use serde::{Deserialize, Serialize}; @@ -18,8 +18,11 @@ const CLIENT_STATS_TIME_WINDOW_SECS: u64 = 5; // 5 seconds /// User-defined stat types #[derive(Serialize, Deserialize, Debug, Clone)] pub enum UserStats { + /// A numerical value Number(u64), + /// A `String` String(String), + /// A ratio of two values Ratio(u64, u64), } @@ -52,7 +55,7 @@ pub struct ClientStats { /// The last reported executions for this client pub last_window_executions: u64, /// The last time we got this information - pub last_window_time: time::Duration, + pub last_window_time: Duration, /// The last executions per sec pub last_execs_per_sec: f32, /// User-defined monitor @@ -66,7 +69,7 @@ pub struct ClientStats { impl ClientStats { /// We got a new information about executions for this client, insert them. - pub fn update_executions(&mut self, executions: u64, cur_time: time::Duration) { + pub fn update_executions(&mut self, executions: u64, cur_time: Duration) { let diff = cur_time .checked_sub(self.last_window_time) .map_or(0, |d| d.as_secs()); @@ -95,7 +98,7 @@ impl ClientStats { /// Get the calculated executions per second for this client #[allow(clippy::cast_sign_loss, clippy::cast_precision_loss)] - pub fn execs_per_sec(&mut self, cur_time: time::Duration) -> u64 { + pub fn execs_per_sec(&mut self, cur_time: Duration) -> u64 { if self.executions == 0 { return 0; } @@ -149,7 +152,7 @@ pub trait Monitor { fn client_stats(&self) -> &[ClientStats]; /// creation time - fn start_time(&mut self) -> time::Duration; + fn start_time(&mut self) -> Duration; /// show the monitor to the user fn display(&mut self, event_msg: String, sender_id: u32); @@ -218,6 +221,7 @@ pub trait Monitor { /// Monitor that print exactly nothing. /// Not good for debuging, very good for speed. +#[derive(Debug)] pub struct NopMonitor { start_time: Duration, client_stats: Vec, @@ -235,7 +239,7 @@ impl Monitor for NopMonitor { } /// Time this fuzzing run stated - fn start_time(&mut self) -> time::Duration { + fn start_time(&mut self) -> Duration { self.start_time } @@ -285,7 +289,7 @@ where } /// Time this fuzzing run stated - fn start_time(&mut self) -> time::Duration { + fn start_time(&mut self) -> Duration { self.start_time } @@ -338,7 +342,7 @@ where } /// Creates the monitor with a given `start_time`. - pub fn with_time(print_fn: F, start_time: time::Duration) -> Self { + pub fn with_time(print_fn: F, start_time: Duration) -> Self { Self { print_fn, start_time, @@ -347,6 +351,7 @@ where } } +/// Start the timer #[macro_export] macro_rules! start_timer { ($state:expr) => {{ @@ -356,6 +361,7 @@ macro_rules! start_timer { }}; } +/// Mark the elapsed time for the given feature #[macro_export] macro_rules! mark_feature_time { ($state:expr, $feature:expr) => {{ @@ -367,6 +373,7 @@ macro_rules! mark_feature_time { }}; } +/// Mark the elapsed time for the given feature #[macro_export] macro_rules! mark_feedback_time { ($state:expr) => {{ @@ -708,7 +715,7 @@ impl ClientPerfMonitor { self.stages .iter() .enumerate() - .filter(move |(stage_index, _)| used[*stage_index as usize]) + .filter(move |(stage_index, _)| used[*stage_index]) } /// A map of all `feedbacks` diff --git a/libafl/src/monitors/multi.rs b/libafl/src/monitors/multi.rs index 5f4dbab134..1b71c7e87b 100644 --- a/libafl/src/monitors/multi.rs +++ b/libafl/src/monitors/multi.rs @@ -1,7 +1,7 @@ //! Monitor to disply both cumulative and per-client monitor use alloc::{string::String, vec::Vec}; -use core::{time, time::Duration}; +use core::time::Duration; #[cfg(feature = "introspection")] use alloc::string::ToString; @@ -37,7 +37,7 @@ where } /// Time this fuzzing run stated - fn start_time(&mut self) -> time::Duration { + fn start_time(&mut self) -> Duration { self.start_time } @@ -104,7 +104,7 @@ where } /// Creates the monitor with a given `start_time`. - pub fn with_time(print_fn: F, start_time: time::Duration) -> Self { + pub fn with_time(print_fn: F, start_time: Duration) -> Self { Self { print_fn, start_time, diff --git a/libafl/src/mutators/encoded_mutations.rs b/libafl/src/mutators/encoded_mutations.rs index 24674781a8..6e8d8c678a 100644 --- a/libafl/src/mutators/encoded_mutations.rs +++ b/libafl/src/mutators/encoded_mutations.rs @@ -1,3 +1,5 @@ +//! Mutations for [`EncodedInput`]s +//! use alloc::vec::Vec; use core::{ cmp::{max, min}, @@ -20,7 +22,7 @@ use crate::{ }; /// Set a code in the input as a random value -#[derive(Default)] +#[derive(Debug, Default)] pub struct EncodedRandMutator where S: HasRand, @@ -75,7 +77,7 @@ where } /// Increment a random code in the input -#[derive(Default)] +#[derive(Debug, Default)] pub struct EncodedIncMutator where S: HasRand, @@ -130,7 +132,7 @@ where } /// Decrement a random code in the input -#[derive(Default)] +#[derive(Debug, Default)] pub struct EncodedDecMutator where S: HasRand, @@ -185,7 +187,7 @@ where } /// Adds or subtracts a random value up to `ARITH_MAX` to a random place in the codes [`Vec`]. -#[derive(Default)] +#[derive(Debug, Default)] pub struct EncodedAddMutator where S: HasRand, @@ -244,7 +246,7 @@ where } /// Codes delete mutation for encoded inputs -#[derive(Default)] +#[derive(Debug, Default)] pub struct EncodedDeleteMutator where S: HasRand, @@ -302,7 +304,7 @@ where } /// Insert mutation for encoded inputs -#[derive(Default)] +#[derive(Debug, Default)] pub struct EncodedInsertCopyMutator where S: HasRand + HasMaxSize, @@ -382,7 +384,7 @@ where } /// Codes copy mutation for encoded inputs -#[derive(Default)] +#[derive(Debug, Default)] pub struct EncodedCopyMutator where S: HasRand, @@ -442,7 +444,7 @@ where } /// Crossover insert mutation for encoded inputs -#[derive(Default)] +#[derive(Debug, Default)] pub struct EncodedCrossoverInsertMutator where C: Corpus, @@ -537,7 +539,7 @@ where } /// Crossover replace mutation for encoded inputs -#[derive(Default)] +#[derive(Debug, Default)] pub struct EncodedCrossoverReplaceMutator where C: Corpus, diff --git a/libafl/src/mutators/gramatron.rs b/libafl/src/mutators/gramatron.rs index f0243f337c..b1ec7e8f8a 100644 --- a/libafl/src/mutators/gramatron.rs +++ b/libafl/src/mutators/gramatron.rs @@ -1,3 +1,5 @@ +//! Gramatron is the rewritten gramatron fuzzer in rust. +//! See the original gramatron repo [`Gramatron`](https://github.com/HexHive/Gramatron) for more details. use alloc::vec::Vec; use core::{cmp::max, marker::PhantomData}; use hashbrown::HashMap; @@ -13,6 +15,8 @@ use crate::{ Error, }; +/// A random mutator for grammar fuzzing +#[derive(Debug)] pub struct GramatronRandomMutator<'a, R, S> where S: HasRand + HasMetadata, @@ -66,7 +70,9 @@ where } } -#[derive(Serialize, Deserialize)] +/// The metadata used for `gramatron` +#[derive(Debug, Serialize, Deserialize)] +#[allow(missing_docs)] pub struct GramatronIdxMapMetadata { pub map: HashMap>, } @@ -74,6 +80,7 @@ pub struct GramatronIdxMapMetadata { crate::impl_serdeany!(GramatronIdxMapMetadata); impl GramatronIdxMapMetadata { + /// Creates a new [`struct@GramatronIdxMapMetadata`]. #[must_use] pub fn new(input: &GramatronInput) -> Self { let mut map = HashMap::default(); @@ -85,7 +92,8 @@ impl GramatronIdxMapMetadata { } } -#[derive(Default)] +/// A [`Mutator`] that mutates a [`GramatronInput`] by splicing inputs together. +#[derive(Default, Debug)] pub struct GramatronSpliceMutator where C: Corpus, @@ -173,7 +181,8 @@ where } } -#[derive(Default)] +/// A mutator that uses Gramatron for grammar fuzzing and mutation. +#[derive(Default, Debug)] pub struct GramatronRecursionMutator where S: HasRand + HasMetadata, diff --git a/libafl/src/mutators/mopt_mutator.rs b/libafl/src/mutators/mopt_mutator.rs index 62b9a5ed20..d2a49b59bc 100644 --- a/libafl/src/mutators/mopt_mutator.rs +++ b/libafl/src/mutators/mopt_mutator.rs @@ -30,9 +30,13 @@ pub struct MOpt { pub finds_until_last_swarm: usize, /// These w_* and g_* values are the coefficients for updating variables according to the PSO algorithms pub w_init: f64, + /// These w_* and g_* values are the coefficients for updating variables according to the PSO algorithms pub w_end: f64, + /// These w_* and g_* values are the coefficients for updating variables according to the PSO algorithms pub w_now: f64, + /// These w_* and g_* values are the coefficients for updating variables according to the PSO algorithms pub g_now: f64, + /// These w_* and g_* values are the coefficients for updating variables according to the PSO algorithms pub g_max: f64, /// The number of mutation operators pub operator_num: usize, @@ -48,11 +52,15 @@ pub struct MOpt { pub core_time: usize, /// The swarm identifier that we are currently using in the pilot fuzzing mode pub swarm_now: usize, - /// These are the parameters for the PSO algorithm + /// A parameter for the PSO algorithm x_now: Vec>, + /// A parameter for the PSO algorithm l_best: Vec>, + /// A parameter for the PSO algorithm eff_best: Vec>, + /// A parameter for the PSO algorithm g_best: Vec, + /// A parameter for the PSO algorithm v_now: Vec>, /// The probability that we want to use to choose the mutation operator. probability_now: Vec>, @@ -84,7 +92,7 @@ pub struct MOpt { crate::impl_serdeany!(MOpt); -impl fmt::Debug for MOpt { +impl Debug for MOpt { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("MOpt") .field("\ntotal_finds", &self.total_finds) @@ -129,6 +137,7 @@ impl fmt::Debug for MOpt { const PERIOD_PILOT_COEF: f64 = 5000.0; impl MOpt { + /// Creates a new [`struct@MOpt`] instance. pub fn new(operator_num: usize, swarm_num: usize) -> Result { let mut mopt = Self { rand: StdRand::with_seed(0), @@ -169,6 +178,7 @@ impl MOpt { Ok(mopt) } + /// initialize pso #[allow(clippy::cast_precision_loss)] pub fn pso_initialize(&mut self) -> Result<(), Error> { if self.g_now > self.g_max { @@ -229,7 +239,7 @@ impl MOpt { Ok(()) } - /// Update the PSO algorithm parameters + /// Update the `PSO` algorithm parameters /// See #[allow(clippy::cast_precision_loss)] pub fn pso_update(&mut self) -> Result<(), Error> { @@ -339,12 +349,17 @@ impl MOpt { const V_MAX: f64 = 1.0; const V_MIN: f64 = 0.05; +/// The `MOpt` mode to use #[derive(Serialize, Deserialize, Clone, Copy, Debug)] pub enum MOptMode { + /// Pilot fuzzing mode Pilotfuzzing, + /// Core fuzzing mode Corefuzzing, } +/// This is the main struct of `MOpt`, an `AFL` mutator. +/// See the original `MOpt` implementation in pub struct StdMOptMutator where C: Corpus, @@ -526,6 +541,7 @@ where S: HasRand + HasMetadata + HasCorpus + HasSolutions, SC: Corpus, { + /// Create a new [`StdMOptMutator`]. pub fn new(state: &mut S, mutations: MT, swarm_num: usize) -> Result { state.add_metadata::(MOpt::new(mutations.len(), swarm_num)?); Ok(Self { diff --git a/libafl/src/mutators/mutations.rs b/libafl/src/mutators/mutations.rs index 903cdcfd22..6c021c78bd 100644 --- a/libafl/src/mutators/mutations.rs +++ b/libafl/src/mutators/mutations.rs @@ -60,10 +60,13 @@ pub fn buffer_set(data: &mut [T], from: usize, len: usize, val: T) { /// The max value that will be added or subtracted during add mutations pub const ARITH_MAX: u64 = 35; +/// Interesting 8-bit values from AFL pub const INTERESTING_8: [i8; 9] = [-128, -1, 0, 1, 16, 32, 64, 100, 127]; +/// Interesting 16-bit values from AFL pub const INTERESTING_16: [i16; 19] = [ -128, -1, 0, 1, 16, 32, 64, 100, 127, -32768, -129, 128, 255, 256, 512, 1000, 1024, 4096, 32767, ]; +/// Interesting 32-bit values from AFL pub const INTERESTING_32: [i32; 27] = [ -128, -1, @@ -95,7 +98,7 @@ pub const INTERESTING_32: [i32; 27] = [ ]; /// Bitflip mutation for inputs with a bytes vector -#[derive(Default)] +#[derive(Default, Debug)] pub struct BitFlipMutator where I: Input + HasBytesVec, @@ -155,7 +158,7 @@ where } /// Byteflip mutation for inputs with a bytes vector -#[derive(Default)] +#[derive(Default, Debug)] pub struct ByteFlipMutator where I: Input + HasBytesVec, @@ -213,7 +216,7 @@ where } /// Byte increment mutation for inputs with a bytes vector -#[derive(Default)] +#[derive(Default, Debug)] pub struct ByteIncMutator where I: Input + HasBytesVec, @@ -272,7 +275,7 @@ where } /// Byte decrement mutation for inputs with a bytes vector -#[derive(Default)] +#[derive(Default, Debug)] pub struct ByteDecMutator where I: Input + HasBytesVec, @@ -331,7 +334,7 @@ where } /// Byte negate mutation for inputs with a bytes vector -#[derive(Default)] +#[derive(Default, Debug)] pub struct ByteNegMutator where I: Input + HasBytesVec, @@ -390,7 +393,7 @@ where } /// Byte random mutation for inputs with a bytes vector -#[derive(Default)] +#[derive(Default, Debug)] pub struct ByteRandMutator where I: Input + HasBytesVec, @@ -453,7 +456,7 @@ where macro_rules! add_mutator_impl { ($name: ident, $size: ty) => { /// Adds or subtracts a random value up to `ARITH_MAX` to a [`<$size>`] at a random place in the [`Vec`], in random byte order. - #[derive(Default)] + #[derive(Default, Debug)] pub struct $name where I: Input + HasBytesVec, @@ -463,6 +466,7 @@ macro_rules! add_mutator_impl { phantom: PhantomData<(I, R, S)>, } + #[allow(trivial_numeric_casts)] impl Mutator for $name where I: Input + HasBytesVec, @@ -539,7 +543,7 @@ add_mutator_impl!(QwordAddMutator, u64); macro_rules! interesting_mutator_impl { ($name: ident, $size: ty, $interesting: ident) => { /// Inserts an interesting value at a random place in the input vector - #[derive(Default)] + #[derive(Default, Debug)] pub struct $name where I: Input + HasBytesVec, @@ -612,7 +616,7 @@ interesting_mutator_impl!(WordInterestingMutator, u16, INTERESTING_16); interesting_mutator_impl!(DwordInterestingMutator, u32, INTERESTING_32); /// Bytes delete mutation for inputs with a bytes vector -#[derive(Default)] +#[derive(Default, Debug)] pub struct BytesDeleteMutator where I: Input + HasBytesVec, @@ -674,7 +678,7 @@ where } /// Bytes expand mutation for inputs with a bytes vector -#[derive(Default)] +#[derive(Default, Debug)] pub struct BytesExpandMutator where I: Input + HasBytesVec, @@ -743,7 +747,7 @@ where } /// Bytes insert mutation for inputs with a bytes vector -#[derive(Default)] +#[derive(Default, Debug)] pub struct BytesInsertMutator where I: Input + HasBytesVec, @@ -818,7 +822,7 @@ where } /// Bytes random insert mutation for inputs with a bytes vector -#[derive(Default)] +#[derive(Default, Debug)] pub struct BytesRandInsertMutator where I: Input + HasBytesVec, @@ -890,7 +894,7 @@ where } /// Bytes set mutation for inputs with a bytes vector -#[derive(Default)] +#[derive(Default, Debug)] pub struct BytesSetMutator where I: Input + HasBytesVec, @@ -954,7 +958,7 @@ where } /// Bytes random set mutation for inputs with a bytes vector -#[derive(Default)] +#[derive(Default, Debug)] pub struct BytesRandSetMutator where I: Input + HasBytesVec, @@ -1018,7 +1022,7 @@ where } /// Bytes copy mutation for inputs with a bytes vector -#[derive(Default)] +#[derive(Default, Debug)] pub struct BytesCopyMutator where I: Input + HasBytesVec, @@ -1082,7 +1086,7 @@ where } /// Bytes insert and self copy mutation for inputs with a bytes vector -#[derive(Default)] +#[derive(Debug, Default)] pub struct BytesInsertCopyMutator where I: Input + HasBytesVec, @@ -1166,7 +1170,7 @@ where } /// Bytes swap mutation for inputs with a bytes vector -#[derive(Default)] +#[derive(Debug, Default)] pub struct BytesSwapMutator where I: Input + HasBytesVec, @@ -1232,7 +1236,7 @@ where } /// Crossover insert mutation for inputs with a bytes vector -#[derive(Default)] +#[derive(Debug, Default)] pub struct CrossoverInsertMutator where C: Corpus, @@ -1331,7 +1335,7 @@ where } /// Crossover replace mutation for inputs with a bytes vector -#[derive(Default)] +#[derive(Debug, Default)] pub struct CrossoverReplaceMutator where C: Corpus, @@ -1438,7 +1442,7 @@ fn locate_diffs(this: &[u8], other: &[u8]) -> (i64, i64) { } /// Splice mutation for inputs with a bytes vector -#[derive(Default)] +#[derive(Debug, Default)] pub struct SpliceMutator where C: Corpus, diff --git a/libafl/src/mutators/nautilus.rs b/libafl/src/mutators/nautilus.rs index b1440bacad..7703c636da 100644 --- a/libafl/src/mutators/nautilus.rs +++ b/libafl/src/mutators/nautilus.rs @@ -1,4 +1,4 @@ -use core::marker::PhantomData; +//! Mutators for the `Nautilus` grammmar fuzzer use crate::{ bolts::tuples::Named, @@ -11,17 +11,25 @@ use crate::{ Error, }; +use core::{fmt::Debug, marker::PhantomData}; use grammartec::mutator::Mutator as BackingMutator; use grammartec::{ context::Context, tree::{Tree, TreeMutation}, }; +/// The randomic mutator for `Nautilus` grammar. pub struct NautilusRandomMutator<'a> { ctx: &'a Context, mutator: BackingMutator, } +impl Debug for NautilusRandomMutator<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "NautilusRandomMutator {{}}") + } +} + impl<'a, S> Mutator for NautilusRandomMutator<'a> { fn mutate( &mut self, @@ -70,12 +78,19 @@ impl<'a> NautilusRandomMutator<'a> { } } +/// The `Nautilus` recursion mutator // TODO calculate reucursions only for new items in corpus pub struct NautilusRecursionMutator<'a> { ctx: &'a Context, mutator: BackingMutator, } +impl Debug for NautilusRecursionMutator<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "NautilusRecursionMutator {{}}") + } +} + impl<'a, S> Mutator for NautilusRecursionMutator<'a> { fn mutate( &mut self, @@ -127,12 +142,19 @@ impl<'a> NautilusRecursionMutator<'a> { } } +/// The splicing mutator for `Nautilus` that can splice inputs together pub struct NautilusSpliceMutator<'a, C> { ctx: &'a Context, mutator: BackingMutator, phantom: PhantomData, } +impl Debug for NautilusSpliceMutator<'_, ()> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "NautilusSpliceMutator {{}}") + } +} + impl<'a, S, C> Mutator for NautilusSpliceMutator<'a, C> where C: Corpus, diff --git a/libafl/src/mutators/scheduled.rs b/libafl/src/mutators/scheduled.rs index 98b116a665..9a23a6a3a0 100644 --- a/libafl/src/mutators/scheduled.rs +++ b/libafl/src/mutators/scheduled.rs @@ -24,7 +24,7 @@ pub use crate::mutators::mutations::*; pub use crate::mutators::token_mutations::*; /// The metadata placed in a [`crate::corpus::Testcase`] by a [`LoggerScheduledMutator`]. -#[derive(Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize)] pub struct LogMutationMetadata { /// A list of logs pub list: Vec, diff --git a/libafl/src/mutators/token_mutations.rs b/libafl/src/mutators/token_mutations.rs index 1ed1ccd397..94435ffb61 100644 --- a/libafl/src/mutators/token_mutations.rs +++ b/libafl/src/mutators/token_mutations.rs @@ -23,7 +23,7 @@ use crate::{ }; /// A state metadata holding a list of tokens -#[derive(Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize)] pub struct Tokens { token_vec: Vec>, } @@ -126,7 +126,7 @@ impl Tokens { } /// Inserts a random token at a random position in the `Input`. -#[derive(Default)] +#[derive(Debug, Default)] pub struct TokenInsert where I: Input + HasBytesVec, @@ -212,7 +212,7 @@ where /// A `TokenReplace` [`Mutator`] replaces a random part of the input with one of a range of tokens. /// From AFL terms, this is called as `Dictionary` mutation (which doesn't really make sense ;) ). -#[derive(Default)] +#[derive(Debug, Default)] pub struct TokenReplace where I: Input + HasBytesVec, @@ -294,7 +294,7 @@ where /// A `I2SRandReplace` [`Mutator`] replaces a random matching input-2-state comparison operand with the other. /// it needs a valid [`CmpValuesMetadata`] in the state. -#[derive(Default)] +#[derive(Debug, Default)] pub struct I2SRandReplace where I: Input + HasBytesVec, diff --git a/libafl/src/observers/cmp.rs b/libafl/src/observers/cmp.rs index 79b2025588..024c9d38d3 100644 --- a/libafl/src/observers/cmp.rs +++ b/libafl/src/observers/cmp.rs @@ -14,16 +14,23 @@ use crate::{ Error, }; +/// Compare values collected during a run #[derive(Debug, Serialize, Deserialize)] pub enum CmpValues { + /// Two u8 values U8((u8, u8)), + /// Two u16 values U16((u16, u16)), + /// Two u32 values U32((u32, u32)), + /// Two u64 values U64((u64, u64)), + /// Two vecs of u8 values/byte Bytes((Vec, Vec)), } impl CmpValues { + /// Returns if the values are numericals #[must_use] pub fn is_numeric(&self) -> bool { matches!( @@ -32,6 +39,7 @@ impl CmpValues { ) } + /// Converts the value to a u64 tuple #[must_use] pub fn to_u64_tuple(&self) -> Option<(u64, u64)> { match self { @@ -45,7 +53,7 @@ impl CmpValues { } /// A state metadata holding a list of values logged from comparisons -#[derive(Default, Serialize, Deserialize)] +#[derive(Debug, Default, Serialize, Deserialize)] pub struct CmpValuesMetadata { /// A `list` of values. #[serde(skip)] @@ -81,13 +89,13 @@ pub trait CmpMap { self.len() == 0 } - // Get the number of executions for a cmp + /// Get the number of executions for a cmp fn executions_for(&self, idx: usize) -> usize; - // Get the number of logged executions for a cmp + /// Get the number of logged executions for a cmp fn usable_executions_for(&self, idx: usize) -> usize; - // Get the logged values for a cmp + /// Get the logged values for a cmp fn values_of(&self, idx: usize, execution: usize) -> CmpValues; /// Reset the state diff --git a/libafl/src/observers/concolic/mod.rs b/libafl/src/observers/concolic/mod.rs index 187697a988..c73000779f 100644 --- a/libafl/src/observers/concolic/mod.rs +++ b/libafl/src/observers/concolic/mod.rs @@ -52,6 +52,7 @@ impl From for Location { /// The messages in the format are a perfect mirror of the methods that are called on the runtime during execution. #[cfg(feature = "std")] #[derive(Serialize, Deserialize, Debug, PartialEq)] +#[allow(missing_docs)] pub enum SymExpr { InputByte { offset: usize, diff --git a/libafl/src/observers/concolic/observer.rs b/libafl/src/observers/concolic/observer.rs index 2fdd373a8f..9f77365bfd 100644 --- a/libafl/src/observers/concolic/observer.rs +++ b/libafl/src/observers/concolic/observer.rs @@ -18,6 +18,7 @@ pub struct ConcolicObserver<'map> { impl<'map, I, S> Observer for ConcolicObserver<'map> {} impl<'map> ConcolicObserver<'map> { + /// Create the concolic observer metadata for this run #[must_use] pub fn create_metadata_from_current_map(&self) -> ConcolicMetadata { let reader = MessageFileReader::from_length_prefixed_buffer(self.map) diff --git a/libafl/src/observers/concolic/serialization_format.rs b/libafl/src/observers/concolic/serialization_format.rs index 58aeffab50..32c69dc21e 100644 --- a/libafl/src/observers/concolic/serialization_format.rs +++ b/libafl/src/observers/concolic/serialization_format.rs @@ -56,9 +56,10 @@ fn serialization_options() -> DefaultOptions { } /// A `MessageFileReader` reads a stream of [`SymExpr`] and their corresponding [`SymExprRef`]s from any [`Read`]. +#[allow(missing_debug_implementations)] pub struct MessageFileReader { reader: R, - deserializer_config: bincode::DefaultOptions, + deserializer_config: DefaultOptions, current_id: usize, } @@ -78,7 +79,7 @@ impl MessageFileReader { /// Finally, the returned tuple contains the message itself as a [`SymExpr`] and the [`SymExprRef`] associated /// with this message. /// The `SymExprRef` may be used by following messages to refer back to this message. - pub fn next_message(&mut self) -> Option> { + pub fn next_message(&mut self) -> Option> { match self.deserializer_config.deserialize_from(&mut self.reader) { Ok(mut message) => { let message_id = self.transform_message(&mut message); @@ -203,6 +204,7 @@ impl MessageFileReader { /// A `MessageFileWriter` writes a stream of [`SymExpr`] to any [`Write`]. For each written expression, it returns /// a [`SymExprRef`] which should be used to refer back to it. +#[allow(missing_debug_implementations)] pub struct MessageFileWriter { id_counter: usize, writer: W, @@ -215,7 +217,7 @@ impl MessageFileWriter { pub fn from_writer(mut writer: W) -> io::Result { let writer_start_position = writer.stream_position()?; // write dummy trace length - writer.write_all(&0u64.to_le_bytes())?; + writer.write_all(&0_u64.to_le_bytes())?; Ok(Self { id_counter: 1, writer, @@ -227,7 +229,7 @@ impl MessageFileWriter { fn write_trace_size(&mut self) -> io::Result<()> { // calculate size of trace let end_pos = self.writer.stream_position()?; - let trace_header_len = 0u64.to_le_bytes().len() as u64; + let trace_header_len = 0_u64.to_le_bytes().len() as u64; assert!(end_pos > self.writer_start_position + trace_header_len); let trace_length = end_pos - self.writer_start_position - trace_header_len; @@ -253,7 +255,7 @@ impl MessageFileWriter { /// Writes a message to the stream and returns the [`SymExprRef`] that should be used to refer back to this message. /// May error when the underlying `Write` errors or when there is a serialization error. #[allow(clippy::too_many_lines)] - pub fn write_message(&mut self, mut message: SymExpr) -> bincode::Result { + pub fn write_message(&mut self, mut message: SymExpr) -> Result { let current_id = self.id_counter; match &mut message { SymExpr::InputByte { .. } @@ -442,7 +444,7 @@ impl<'buffer> MessageFileReader> { /// trace length (as generated by the [`MessageFileWriter`]). /// See also [`MessageFileReader::from_buffer`]. pub fn from_length_prefixed_buffer(mut buffer: &'buffer [u8]) -> io::Result { - let mut len_buf = 0u64.to_le_bytes(); + let mut len_buf = 0_u64.to_le_bytes(); buffer.read_exact(&mut len_buf)?; let buffer_len = u64::from_le_bytes(len_buf); assert!(usize::try_from(buffer_len).is_ok()); @@ -484,5 +486,6 @@ impl MessageFileWriter::Mem>> { } } +/// A writer that will write messages to a shared memory buffer. pub type StdShMemMessageFileWriter = MessageFileWriter::Mem>>; diff --git a/libafl/src/observers/map.rs b/libafl/src/observers/map.rs index 8e26c4be95..d203d41252 100644 --- a/libafl/src/observers/map.rs +++ b/libafl/src/observers/map.rs @@ -25,7 +25,7 @@ use crate::{ }; /// A [`MapObserver`] observes the static map, as oftentimes used for afl-like coverage information -pub trait MapObserver: HasLen + Named + serde::Serialize + serde::de::DeserializeOwned +pub trait MapObserver: HasLen + Named + Serialize + serde::de::DeserializeOwned where T: PrimInt + Default + Copy + Debug, { @@ -35,12 +35,14 @@ where /// Get the map (mutable) if the observer can be represented with a slice fn map_mut(&mut self) -> Option<&mut [T]>; + /// Get the value at `idx` fn get(&self, idx: usize) -> &T { &self .map() .expect("Cannot get a map that cannot be represented as slice")[idx] } + /// Get the value at `idx` (mutable) fn get_mut(&mut self, idx: usize) -> &mut T { &mut self .map_mut() @@ -109,7 +111,7 @@ where #[allow(clippy::unsafe_derive_deserialize)] pub struct StdMapObserver<'a, T> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { map: OwnedSliceMut<'a, T>, initial: T, @@ -118,7 +120,7 @@ where impl<'a, I, S, T> Observer for StdMapObserver<'a, T> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, Self: MapObserver, { #[inline] @@ -129,7 +131,7 @@ where impl<'a, T> Named for StdMapObserver<'a, T> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { #[inline] fn name(&self) -> &str { @@ -139,7 +141,7 @@ where impl<'a, T> HasLen for StdMapObserver<'a, T> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { #[inline] fn len(&self) -> usize { @@ -149,7 +151,7 @@ where impl<'a, T> MapObserver for StdMapObserver<'a, T> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, { #[inline] fn map(&self) -> Option<&[T]> { @@ -179,7 +181,7 @@ where impl<'a, T> StdMapObserver<'a, T> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { /// Creates a new [`MapObserver`] #[must_use] @@ -224,7 +226,7 @@ where #[allow(clippy::unsafe_derive_deserialize)] pub struct ConstMapObserver<'a, T, const N: usize> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { map: OwnedSliceMut<'a, T>, initial: T, @@ -233,7 +235,7 @@ where impl<'a, I, S, T, const N: usize> Observer for ConstMapObserver<'a, T, N> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, Self: MapObserver, { #[inline] @@ -244,7 +246,7 @@ where impl<'a, T, const N: usize> Named for ConstMapObserver<'a, T, N> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { #[inline] fn name(&self) -> &str { @@ -254,7 +256,7 @@ where impl<'a, T, const N: usize> HasLen for ConstMapObserver<'a, T, N> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { #[inline] fn len(&self) -> usize { @@ -264,7 +266,7 @@ where impl<'a, T, const N: usize> MapObserver for ConstMapObserver<'a, T, N> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, { #[inline] fn usable_count(&self) -> usize { @@ -299,7 +301,7 @@ where impl<'a, T, const N: usize> ConstMapObserver<'a, T, N> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { /// Creates a new [`MapObserver`] #[must_use] @@ -345,7 +347,7 @@ where #[allow(clippy::unsafe_derive_deserialize)] pub struct VariableMapObserver<'a, T> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { map: OwnedSliceMut<'a, T>, size: OwnedRefMut<'a, usize>, @@ -355,7 +357,7 @@ where impl<'a, I, S, T> Observer for VariableMapObserver<'a, T> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, Self: MapObserver, { #[inline] @@ -366,7 +368,7 @@ where impl<'a, T> Named for VariableMapObserver<'a, T> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { #[inline] fn name(&self) -> &str { @@ -376,7 +378,7 @@ where impl<'a, T> HasLen for VariableMapObserver<'a, T> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { #[inline] fn len(&self) -> usize { @@ -386,7 +388,7 @@ where impl<'a, T> MapObserver for VariableMapObserver<'a, T> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, { #[inline] fn map(&self) -> Option<&[T]> { @@ -421,7 +423,7 @@ where impl<'a, T> VariableMapObserver<'a, T> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { /// Creates a new [`MapObserver`] pub fn new(name: &'static str, map: &'a mut [T], size: &'a mut usize) -> Self { @@ -459,7 +461,7 @@ where #[serde(bound = "M: serde::de::DeserializeOwned")] pub struct HitcountsMapObserver where - M: serde::Serialize + serde::de::DeserializeOwned, + M: Serialize + serde::de::DeserializeOwned, { base: M, } @@ -500,7 +502,7 @@ where impl Named for HitcountsMapObserver where - M: Named + serde::Serialize + serde::de::DeserializeOwned, + M: Named + Serialize + serde::de::DeserializeOwned, { #[inline] fn name(&self) -> &str { @@ -555,7 +557,7 @@ where impl HitcountsMapObserver where - M: serde::Serialize + serde::de::DeserializeOwned, + M: Serialize + serde::de::DeserializeOwned, { /// Creates a new [`MapObserver`] pub fn new(base: M) -> Self { @@ -569,7 +571,7 @@ where #[allow(clippy::unsafe_derive_deserialize)] pub struct MultiMapObserver<'a, T> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { maps: Vec>, intervals: IntervalTree, @@ -580,7 +582,7 @@ where impl<'a, I, S, T> Observer for MultiMapObserver<'a, T> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, Self: MapObserver, { #[inline] @@ -591,7 +593,7 @@ where impl<'a, T> Named for MultiMapObserver<'a, T> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { #[inline] fn name(&self) -> &str { @@ -601,7 +603,7 @@ where impl<'a, T> HasLen for MultiMapObserver<'a, T> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { #[inline] fn len(&self) -> usize { @@ -611,7 +613,7 @@ where impl<'a, T> MapObserver for MultiMapObserver<'a, T> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, { #[inline] fn map(&self) -> Option<&[T]> { @@ -693,7 +695,7 @@ where impl<'a, T> MultiMapObserver<'a, T> where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { /// Creates a new [`MultiMapObserver`] #[must_use] diff --git a/libafl/src/stages/calibrate.rs b/libafl/src/stages/calibrate.rs index d43a999760..824b7e0576 100644 --- a/libafl/src/stages/calibrate.rs +++ b/libafl/src/stages/calibrate.rs @@ -21,10 +21,11 @@ use core::{fmt::Debug, marker::PhantomData, time::Duration}; use num_traits::PrimInt; use serde::{Deserialize, Serialize}; +/// The calibration stage will measure the average exec time and the target's stability for this input. #[derive(Clone, Debug)] pub struct CalibrationStage where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, C: Corpus, E: Executor + HasObservers, EM: EventFirer, @@ -47,7 +48,7 @@ const CAL_STAGE_MAX: usize = 16; impl Stage for CalibrationStage where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, C: Corpus, E: Executor + HasObservers, EM: EventFirer, @@ -110,7 +111,7 @@ where let mut i = 1; let mut has_errors = false; let mut unstable_entries: usize = 0; - let map_len: usize = map_first.len() as usize; + let map_len: usize = map_first.len(); while i < iter { let input = state .corpus() @@ -208,8 +209,10 @@ where } } +/// The n fuzz size pub const N_FUZZ_SIZE: usize = 1 << 21; +/// The metadata used for power schedules #[derive(Serialize, Deserialize, Clone, Debug)] pub struct PowerScheduleMetadata { /// Measured exec time during calibration @@ -228,6 +231,7 @@ pub struct PowerScheduleMetadata { /// The metadata for runs in the calibration stage. impl PowerScheduleMetadata { + /// Creates a new [`struct@PowerScheduleMetadata`] #[must_use] pub fn new() -> Self { Self { @@ -240,56 +244,68 @@ impl PowerScheduleMetadata { } } + /// The measured exec time during calibration #[must_use] pub fn exec_time(&self) -> Duration { self.exec_time } + /// Set the measured exec pub fn set_exec_time(&mut self, time: Duration) { self.exec_time = time; } + /// The cycles #[must_use] pub fn cycles(&self) -> u64 { self.cycles } + /// Sets the cycles pub fn set_cycles(&mut self, val: u64) { self.cycles = val; } + /// The bitmap size #[must_use] pub fn bitmap_size(&self) -> u64 { self.bitmap_size } + /// Sets the bitmap size pub fn set_bitmap_size(&mut self, val: u64) { self.bitmap_size = val; } + /// The number of filled map entries #[must_use] pub fn bitmap_entries(&self) -> u64 { self.bitmap_entries } + /// Sets the number of filled map entries pub fn set_bitmap_entries(&mut self, val: u64) { self.bitmap_entries = val; } + /// The amount of queue cycles #[must_use] pub fn queue_cycles(&self) -> u64 { self.queue_cycles } + /// Sets the amount of queue cycles pub fn set_queue_cycles(&mut self, val: u64) { self.queue_cycles = val; } + /// Gets the `n_fuzz`. #[must_use] pub fn n_fuzz(&self) -> &[u32] { &self.n_fuzz } + /// Sets the `n_fuzz`. #[must_use] pub fn n_fuzz_mut(&mut self) -> &mut [u32] { &mut self.n_fuzz @@ -300,7 +316,7 @@ crate::impl_serdeany!(PowerScheduleMetadata); impl CalibrationStage where - T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, C: Corpus, E: Executor + HasObservers, EM: EventFirer, @@ -311,6 +327,7 @@ where S: HasCorpus + HasMetadata, Z: Evaluator, { + /// Create a new [`CalibrationStage`]. pub fn new(state: &mut S, map_observer_name: &O) -> Self { state.add_metadata::(PowerScheduleMetadata::new()); Self { diff --git a/libafl/src/stages/mod.rs b/libafl/src/stages/mod.rs index c6a4218f14..cd60b1553c 100644 --- a/libafl/src/stages/mod.rs +++ b/libafl/src/stages/mod.rs @@ -110,6 +110,8 @@ where } } +/// A [`Stage`] that will call a closure +#[derive(Debug)] pub struct ClosureStage where CB: FnMut(&mut Z, &mut E, &mut S, &mut EM, usize) -> Result<(), Error>, @@ -134,10 +136,12 @@ where } } +/// A stage that takes a closure impl ClosureStage where CB: FnMut(&mut Z, &mut E, &mut S, &mut EM, usize) -> Result<(), Error>, { + /// Create a new [`ClosureStage`] #[must_use] pub fn new(closure: CB) -> Self { Self { @@ -159,6 +163,7 @@ where /// Allows us to use a [`push::PushStage`] as a normal [`Stage`] #[allow(clippy::type_complexity)] +#[derive(Debug)] pub struct PushStageAdapter where C: Corpus, diff --git a/libafl/src/stages/power.rs b/libafl/src/stages/power.rs index b59b556242..eb88f33287 100644 --- a/libafl/src/stages/power.rs +++ b/libafl/src/stages/power.rs @@ -16,6 +16,8 @@ use crate::{ Error, }; +/// The power schedule to use +#[allow(missing_docs)] #[derive(Clone, Debug, PartialEq)] pub enum PowerSchedule { EXPLORE, @@ -193,6 +195,7 @@ where S: HasClientPerfMonitor + HasCorpus + HasMetadata, Z: Evaluator, { + /// Creates a new [`PowerMutationalStage`] pub fn new(mutator: M, strat: PowerSchedule, map_observer_name: &O) -> Self { Self { map_observer_name: map_observer_name.name().to_string(), diff --git a/libafl/src/stages/push/mutational.rs b/libafl/src/stages/push/mutational.rs index d4eb9bd07e..fabe4966a3 100644 --- a/libafl/src/stages/push/mutational.rs +++ b/libafl/src/stages/push/mutational.rs @@ -23,6 +23,7 @@ use crate::monitors::PerfFeature; use super::{PushStage, PushStageHelper, PushStageSharedState}; +/// The default maximum number of mutations to perform per input. pub static DEFAULT_MUTATIONAL_MAX_ITERATIONS: u64 = 128; /// A Mutational push stage is the stage in a fuzzing run that mutates inputs. /// Mutational push stages will usually have a range of mutations that are @@ -75,6 +76,7 @@ where Ok(1 + state.rand_mut().below(DEFAULT_MUTATIONAL_MAX_ITERATIONS) as usize) } + /// Sets the current corpus index pub fn set_current_corpus_idx(&mut self, current_corpus_idx: usize) { self.current_corpus_idx = Some(current_corpus_idx); } @@ -150,7 +152,7 @@ where start_timer!(state); self.mutator - .mutate(state, &mut input, self.stage_idx as i32) + .mutate(state, &mut input, self.stage_idx) .unwrap(); mark_feature_time!(state, PerfFeature::Mutate); @@ -176,7 +178,7 @@ where start_timer!(state); self.mutator - .post_exec(state, self.stage_idx as i32, Some(self.testcases_done))?; + .post_exec(state, self.stage_idx, Some(self.testcases_done))?; mark_feature_time!(state, PerfFeature::MutatePostExec); self.testcases_done += 1; diff --git a/libafl/src/stages/sync.rs b/libafl/src/stages/sync.rs index b3d29feef8..488bb74f33 100644 --- a/libafl/src/stages/sync.rs +++ b/libafl/src/stages/sync.rs @@ -19,14 +19,17 @@ use crate::{ Error, }; -#[derive(Serialize, Deserialize)] +/// Metadata used to store information about disk sync time +#[derive(Serialize, Deserialize, Debug)] pub struct SyncFromDiskMetadata { + /// The last time the sync was done pub last_time: SystemTime, } crate::impl_serdeany!(SyncFromDiskMetadata); impl SyncFromDiskMetadata { + /// Create a new [`struct@SyncFromDiskMetadata`] #[must_use] pub fn new(last_time: SystemTime) -> Self { Self { last_time } @@ -34,6 +37,7 @@ impl SyncFromDiskMetadata { } /// A stage that loads testcases from disk to sync with other fuzzers such as AFL++ +#[derive(Debug)] pub struct SyncFromDiskStage where C: Corpus, diff --git a/libafl/src/stages/tracing.rs b/libafl/src/stages/tracing.rs index 41198ef1bd..fd45c05278 100644 --- a/libafl/src/stages/tracing.rs +++ b/libafl/src/stages/tracing.rs @@ -98,6 +98,7 @@ where } } + /// Gets the underlying tracer executor pub fn executor(&self) -> &TE { &self.tracer_executor } diff --git a/libafl/src/state/mod.rs b/libafl/src/state/mod.rs index 0f31d8ab9d..8a84f1c048 100644 --- a/libafl/src/state/mod.rs +++ b/libafl/src/state/mod.rs @@ -26,6 +26,9 @@ use crate::{ /// The maximum size of a testcase pub const DEFAULT_MAX_SIZE: usize = 1_048_576; +/// The [`State`] of the fuzzer +/// Contains all important information about the current run +/// Will be used to restart the fuzzing process at any timme. pub trait State: Serialize + DeserializeOwned {} /// Trait for elements offering a corpus From 9f7638666890956d124b0c75f50d572877a9c03b Mon Sep 17 00:00:00 2001 From: Evan Richter Date: Sat, 1 Jan 2022 18:03:35 -0600 Subject: [PATCH 13/25] [libafl_qemu] prevent unneeded build.rs runs (#441) `libqasan/libqasan.so` never exists during a normal `cargo build` because the .so is built in the target_dir, not in the source directory. This was triggering cargo to rerun the build script every time a user of this library made an incremental change to their code. pointing `rerun-if-changed` to a directory will make cargo rerun build.rs if any file in that directory changes. --- libafl_qemu/build.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/libafl_qemu/build.rs b/libafl_qemu/build.rs index efb906b8f1..48f3baad49 100644 --- a/libafl_qemu/build.rs +++ b/libafl_qemu/build.rs @@ -76,13 +76,8 @@ fn main() { let qasan_dir = Path::new("libqasan"); let qasan_dir = fs::canonicalize(&qasan_dir).unwrap(); let src_dir = Path::new("src"); - //let cwd = env::current_dir().unwrap().to_string_lossy().to_string(); - println!("cargo:rerun-if-changed={}/libqasan.so", qasan_dir.display()); - println!( - "cargo:rerun-if-changed={}/libqasan.so", - target_dir.display() - ); + println!("cargo:rerun-if-changed=libqasan"); build_dep_check(&["git", "make"]); From efc804fe7d2ce816ba3e19cd286d05a5bd0f2c98 Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Sun, 2 Jan 2022 17:52:44 +0100 Subject: [PATCH 14/25] Updated dependencies (#443) * updated dependencies * updated info in toml * Windows fixes * fixed immport * u32 -> i32 * ignore i32 overflows in constants * removed unused double allow --- fuzzers/frida_libpng/Cargo.toml | 4 +- fuzzers/fuzzbench/Cargo.toml | 2 +- fuzzers/fuzzbench_qemu/Cargo.toml | 2 +- libafl/Cargo.toml | 14 ++-- libafl/src/bolts/os/mod.rs | 3 +- libafl/src/bolts/os/windows_exceptions.rs | 96 +++++++++++------------ libafl/src/bolts/shmem.rs | 21 ++++- libafl_frida/Cargo.toml | 8 +- libafl_qemu/Cargo.toml | 6 +- libafl_sugar/Cargo.toml | 2 +- libafl_targets/Cargo.toml | 2 +- 11 files changed, 88 insertions(+), 72 deletions(-) diff --git a/fuzzers/frida_libpng/Cargo.toml b/fuzzers/frida_libpng/Cargo.toml index bbd3ce266e..43f56f2b79 100644 --- a/fuzzers/frida_libpng/Cargo.toml +++ b/fuzzers/frida_libpng/Cargo.toml @@ -35,9 +35,9 @@ libafl_frida = { path = "../../libafl_frida", features = ["cmplog"] } libafl_targets = { path = "../../libafl_targets", features = ["sancov_cmplog"] } lazy_static = "1.4.0" libc = "0.2" -libloading = "0.7.0" +libloading = "0.7" num-traits = "0.2.14" -rangemap = "0.1.10" +rangemap = "0.1" structopt = "0.3.25" serde = "1.0" mimalloc = { version = "*", default-features = false } diff --git a/fuzzers/fuzzbench/Cargo.toml b/fuzzers/fuzzbench/Cargo.toml index e3410e1fcf..45bb98dd5b 100644 --- a/fuzzers/fuzzbench/Cargo.toml +++ b/fuzzers/fuzzbench/Cargo.toml @@ -25,7 +25,7 @@ libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_h # TODO Include it only when building cc libafl_cc = { path = "../../libafl_cc/" } clap = { version = "3.0.0-rc.4", features = ["default"] } -nix = "0.23.0" +nix = "0.23" mimalloc = { version = "*", default-features = false } [lib] diff --git a/fuzzers/fuzzbench_qemu/Cargo.toml b/fuzzers/fuzzbench_qemu/Cargo.toml index 6182b85cec..b8b5240324 100644 --- a/fuzzers/fuzzbench_qemu/Cargo.toml +++ b/fuzzers/fuzzbench_qemu/Cargo.toml @@ -15,4 +15,4 @@ debug = true libafl = { path = "../../libafl/" } libafl_qemu = { path = "../../libafl_qemu/", features = ["x86_64"] } clap = { version = "3.0.0-rc.4", features = ["default"] } -nix = "0.23.0" +nix = "0.23" diff --git a/libafl/Cargo.toml b/libafl/Cargo.toml index a643145650..34a4520ff3 100644 --- a/libafl/Cargo.toml +++ b/libafl/Cargo.toml @@ -39,7 +39,7 @@ criterion = "0.3" # Benchmarking ahash = "0.7" # another hash fxhash = "0.2.1" # yet another hash xxhash-rust = { version = "0.8.2", features = ["xxh3"] } # xxh3 hashing for rust -serde_json = "1.0.60" +serde_json = "1.0" num_cpus = "1.0" # cpu count, for llmp example serial_test = "0.5" @@ -54,18 +54,18 @@ postcard = { version = "0.7", features = ["alloc"] } # no_std compatible serde s bincode = {version = "1.3", optional = true } static_assertions = "1.1.0" ctor = "0.1.20" -num_enum = { version = "0.5.1", default-features = false } +num_enum = { version = "0.5.4", default-features = false } typed-builder = "0.9.1" # Implement the builder pattern at compiletime ahash = { version = "0.7", default-features=false, features=["compile-time-rng"] } # The hash function already used in hashbrown intervaltree = { version = "0.2.7", default-features = false, features = ["serde"] } libafl_derive = { version = "0.7.0", optional = true, path = "../libafl_derive" } serde_json = { version = "1.0", optional = true, default-features = false, features = ["alloc"] } # an easy way to debug print SerdeAnyMap -miniz_oxide = { version = "0.4.4", optional = true} +miniz_oxide = { version = "0.5", optional = true} core_affinity = { version = "0.5", git = "https://github.com/s1341/core_affinity_rs", rev = "6648a7a", optional = true } hostname = { version = "^0.3", optional = true } # Is there really no gethostname in the stdlib? -rand_core = { version = "0.5.1", optional = true } # This dependency allows us to export our RomuRand as rand::Rng. -nix = { version = "0.23.0", optional = true } +rand_core = { version = "0.5.1", optional = true } # This dependency allows us to export our RomuRand as rand::Rng. We cannot update to the latest version because it breaks compatibility to microsoft lain. +nix = { version = "0.23", optional = true } regex = { version = "1", optional = true } build_id = { version = "0.2.1", git = "https://github.com/domenukk/build_id", rev = "6a61943", optional = true } uuid = { version = "0.8.2", optional = true, features = ["serde", "v4"] } @@ -86,10 +86,10 @@ regex = "1.4.5" backtrace = "0.3" [target.'cfg(windows)'.dependencies] -windows = { version = "0.28.0", features = ["std", "Win32_Foundation", "Win32_System_Threading", "Win32_System_Diagnostics_Debug", "Win32_System_Kernel", "Win32_System_Memory", "Win32_Security"] } +windows = { version = "0.29.0", features = ["std", "Win32_Foundation", "Win32_System_Threading", "Win32_System_Diagnostics_Debug", "Win32_System_Kernel", "Win32_System_Memory", "Win32_Security"] } [target.'cfg(windows)'.build-dependencies] -windows = "0.28.0" +windows = "0.29.0" [[bench]] name = "rand_speeds" diff --git a/libafl/src/bolts/os/mod.rs b/libafl/src/bolts/os/mod.rs index 71394ed988..068865cbae 100644 --- a/libafl/src/bolts/os/mod.rs +++ b/libafl/src/bolts/os/mod.rs @@ -24,8 +24,9 @@ pub mod pipes; #[cfg(all(unix, feature = "std"))] use std::ffi::CString; +// Allow a few extra features we need for the whole module #[cfg(all(windows, feature = "std"))] -#[allow(missing_docs)] +#[allow(missing_docs, overflowing_literals)] pub mod windows_exceptions; #[cfg(unix)] diff --git a/libafl/src/bolts/os/windows_exceptions.rs b/libafl/src/bolts/os/windows_exceptions.rs index a970e0153a..e50152bc21 100644 --- a/libafl/src/bolts/os/windows_exceptions.rs +++ b/libafl/src/bolts/os/windows_exceptions.rs @@ -36,55 +36,55 @@ pub const SIGABRT: i32 = 22; pub const SIGABRT2: i32 = 22; // From https://github.com/wine-mirror/wine/blob/master/include/winnt.h#L611 -pub const STATUS_WAIT_0: u32 = 0x00000000; -pub const STATUS_ABANDONED_WAIT_0: u32 = 0x00000080; -pub const STATUS_USER_APC: u32 = 0x000000C0; -pub const STATUS_TIMEOUT: u32 = 0x00000102; -pub const STATUS_PENDING: u32 = 0x00000103; -pub const STATUS_SEGMENT_NOTIFICATION: u32 = 0x40000005; -pub const STATUS_FATAL_APP_EXIT: u32 = 0x40000015; -pub const STATUS_GUARD_PAGE_VIOLATION: u32 = 0x80000001; -pub const STATUS_DATATYPE_MISALIGNMENT: u32 = 0x80000002; -pub const STATUS_BREAKPOINT: u32 = 0x80000003; -pub const STATUS_SINGLE_STEP: u32 = 0x80000004; -pub const STATUS_LONGJUMP: u32 = 0x80000026; -pub const STATUS_UNWIND_CONSOLIDATE: u32 = 0x80000029; -pub const STATUS_ACCESS_VIOLATION: u32 = 0xC0000005; -pub const STATUS_IN_PAGE_ERROR: u32 = 0xC0000006; -pub const STATUS_INVALID_HANDLE: u32 = 0xC0000008; -pub const STATUS_NO_MEMORY: u32 = 0xC0000017; -pub const STATUS_ILLEGAL_INSTRUCTION: u32 = 0xC000001D; -pub const STATUS_NONCONTINUABLE_EXCEPTION: u32 = 0xC0000025; -pub const STATUS_INVALID_DISPOSITION: u32 = 0xC0000026; -pub const STATUS_ARRAY_BOUNDS_EXCEEDED: u32 = 0xC000008C; -pub const STATUS_FLOAT_DENORMAL_OPERAND: u32 = 0xC000008D; -pub const STATUS_FLOAT_DIVIDE_BY_ZERO: u32 = 0xC000008E; -pub const STATUS_FLOAT_INEXACT_RESULT: u32 = 0xC000008F; -pub const STATUS_FLOAT_INVALID_OPERATION: u32 = 0xC0000090; -pub const STATUS_FLOAT_OVERFLOW: u32 = 0xC0000091; -pub const STATUS_FLOAT_STACK_CHECK: u32 = 0xC0000092; -pub const STATUS_FLOAT_UNDERFLOW: u32 = 0xC0000093; -pub const STATUS_INTEGER_DIVIDE_BY_ZERO: u32 = 0xC0000094; -pub const STATUS_INTEGER_OVERFLOW: u32 = 0xC0000095; -pub const STATUS_PRIVILEGED_INSTRUCTION: u32 = 0xC0000096; -pub const STATUS_STACK_OVERFLOW: u32 = 0xC00000FD; -pub const STATUS_DLL_NOT_FOUND: u32 = 0xC0000135; -pub const STATUS_ORDINAL_NOT_FOUND: u32 = 0xC0000138; -pub const STATUS_ENTRYPOINT_NOT_FOUND: u32 = 0xC0000139; -pub const STATUS_CONTROL_C_EXIT: u32 = 0xC000013A; -pub const STATUS_DLL_INIT_FAILED: u32 = 0xC0000142; -pub const STATUS_FLOAT_MULTIPLE_FAULTS: u32 = 0xC00002B4; -pub const STATUS_FLOAT_MULTIPLE_TRAPS: u32 = 0xC00002B5; -pub const STATUS_REG_NAT_CONSUMPTION: u32 = 0xC00002C9; -pub const STATUS_HEAP_CORRUPTION: u32 = 0xC0000374; -pub const STATUS_STACK_BUFFER_OVERRUN: u32 = 0xC0000409; -pub const STATUS_INVALID_CRUNTIME_PARAMETER: u32 = 0xC0000417; -pub const STATUS_ASSERTION_FAILURE: u32 = 0xC0000420; -pub const STATUS_SXS_EARLY_DEACTIVATION: u32 = 0xC015000F; -pub const STATUS_SXS_INVALID_DEACTIVATION: u32 = 0xC0150010; +pub const STATUS_WAIT_0: i32 = 0x00000000; +pub const STATUS_ABANDONED_WAIT_0: i32 = 0x00000080; +pub const STATUS_USER_APC: i32 = 0x000000C0; +pub const STATUS_TIMEOUT: i32 = 0x00000102; +pub const STATUS_PENDING: i32 = 0x00000103; +pub const STATUS_SEGMENT_NOTIFICATION: i32 = 0x40000005; +pub const STATUS_FATAL_APP_EXIT: i32 = 0x40000015; +pub const STATUS_GUARD_PAGE_VIOLATION: i32 = 0x80000001; +pub const STATUS_DATATYPE_MISALIGNMENT: i32 = 0x80000002; +pub const STATUS_BREAKPOINT: i32 = 0x80000003; +pub const STATUS_SINGLE_STEP: i32 = 0x80000004; +pub const STATUS_LONGJUMP: i32 = 0x80000026; +pub const STATUS_UNWIND_CONSOLIDATE: i32 = 0x80000029; +pub const STATUS_ACCESS_VIOLATION: i32 = 0xC0000005; +pub const STATUS_IN_PAGE_ERROR: i32 = 0xC0000006; +pub const STATUS_INVALID_HANDLE: i32 = 0xC0000008; +pub const STATUS_NO_MEMORY: i32 = 0xC0000017; +pub const STATUS_ILLEGAL_INSTRUCTION: i32 = 0xC000001D; +pub const STATUS_NONCONTINUABLE_EXCEPTION: i32 = 0xC0000025; +pub const STATUS_INVALID_DISPOSITION: i32 = 0xC0000026; +pub const STATUS_ARRAY_BOUNDS_EXCEEDED: i32 = 0xC000008C; +pub const STATUS_FLOAT_DENORMAL_OPERAND: i32 = 0xC000008D; +pub const STATUS_FLOAT_DIVIDE_BY_ZERO: i32 = 0xC000008E; +pub const STATUS_FLOAT_INEXACT_RESULT: i32 = 0xC000008F; +pub const STATUS_FLOAT_INVALID_OPERATION: i32 = 0xC0000090; +pub const STATUS_FLOAT_OVERFLOW: i32 = 0xC0000091; +pub const STATUS_FLOAT_STACK_CHECK: i32 = 0xC0000092; +pub const STATUS_FLOAT_UNDERFLOW: i32 = 0xC0000093; +pub const STATUS_INTEGER_DIVIDE_BY_ZERO: i32 = 0xC0000094; +pub const STATUS_INTEGER_OVERFLOW: i32 = 0xC0000095; +pub const STATUS_PRIVILEGED_INSTRUCTION: i32 = 0xC0000096; +pub const STATUS_STACK_OVERFLOW: i32 = 0xC00000FD; +pub const STATUS_DLL_NOT_FOUND: i32 = 0xC0000135; +pub const STATUS_ORDINAL_NOT_FOUND: i32 = 0xC0000138; +pub const STATUS_ENTRYPOINT_NOT_FOUND: i32 = 0xC0000139; +pub const STATUS_CONTROL_C_EXIT: i32 = 0xC000013A; +pub const STATUS_DLL_INIT_FAILED: i32 = 0xC0000142; +pub const STATUS_FLOAT_MULTIPLE_FAULTS: i32 = 0xC00002B4; +pub const STATUS_FLOAT_MULTIPLE_TRAPS: i32 = 0xC00002B5; +pub const STATUS_REG_NAT_CONSUMPTION: i32 = 0xC00002C9; +pub const STATUS_HEAP_CORRUPTION: i32 = 0xC0000374; +pub const STATUS_STACK_BUFFER_OVERRUN: i32 = 0xC0000409; +pub const STATUS_INVALID_CRUNTIME_PARAMETER: i32 = 0xC0000417; +pub const STATUS_ASSERTION_FAILURE: i32 = 0xC0000420; +pub const STATUS_SXS_EARLY_DEACTIVATION: i32 = 0xC015000F; +pub const STATUS_SXS_INVALID_DEACTIVATION: i32 = 0xC0150010; #[derive(Debug, TryFromPrimitive, Clone, Copy)] -#[repr(u32)] +#[repr(i32)] pub enum ExceptionCode { // From https://docs.microsoft.com/en-us/windows/win32/debug/getexceptioncode AccessViolation = STATUS_ACCESS_VIOLATION, @@ -157,7 +157,7 @@ pub static CRASH_EXCEPTIONS: &[ExceptionCode] = &[ impl PartialEq for ExceptionCode { fn eq(&self, other: &Self) -> bool { - *self as u32 == *other as u32 + *self as i32 == *other as i32 } } diff --git a/libafl/src/bolts/shmem.rs b/libafl/src/bolts/shmem.rs index 51f9d3befc..83f9c7048f 100644 --- a/libafl/src/bolts/shmem.rs +++ b/libafl/src/bolts/shmem.rs @@ -1071,7 +1071,11 @@ pub mod win32_shmem { Error, }; - use core::{ffi::c_void, ptr, slice}; + use core::{ + ffi::c_void, + fmt::{self, Debug, Formatter}, + ptr, slice, + }; use std::convert::TryInto; use uuid::Uuid; @@ -1086,7 +1090,7 @@ pub mod win32_shmem { }; /// The default Sharedmap impl for windows using shmctl & shmget - #[derive(Clone, Debug)] + #[derive(Clone)] pub struct Win32ShMem { id: ShMemId, handle: HANDLE, @@ -1094,6 +1098,17 @@ pub mod win32_shmem { map_size: usize, } + impl Debug for Win32ShMem { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("Win32ShMem") + .field("id", &self.id) + .field("handle", &self.handle.0) + .field("map", &self.map) + .field("map_size", &self.map_size) + .finish() + } + } + impl Win32ShMem { fn new_map(map_size: usize) -> Result { unsafe { @@ -1137,7 +1152,7 @@ pub mod win32_shmem { let map_str_bytes = id.id; // Unlike MapViewOfFile this one needs u32 let handle = OpenFileMappingA( - FILE_MAP_ALL_ACCESS.0, + FILE_MAP_ALL_ACCESS, BOOL(0), PSTR(&map_str_bytes as *const u8 as *mut u8), ); diff --git a/libafl_frida/Cargo.toml b/libafl_frida/Cargo.toml index a862bdbe05..3aef9c2849 100644 --- a/libafl_frida/Cargo.toml +++ b/libafl_frida/Cargo.toml @@ -21,16 +21,16 @@ cc = { version = "1.0", features = ["parallel"] } [dependencies] libafl = { path = "../libafl", version = "0.7.0", features = ["std", "libafl_derive"] } libafl_targets = { path = "../libafl_targets", version = "0.7.0", features = ["std", "sancov_cmplog"] } -nix = "0.23.0" +nix = "0.23" libc = "0.2" hashbrown = "0.11" -libloading = "0.7.0" -rangemap = "0.1.10" +libloading = "0.7" +rangemap = "0.1" frida-gum-sys = { version = "0.3", features = [ "auto-download", "event-sink", "invocation-listener"] } frida-gum = { version = "0.6.1", features = [ "auto-download", "event-sink", "invocation-listener"] } core_affinity = { version = "0.5", git = "https://github.com/s1341/core_affinity_rs", rev = "6648a7a" } regex = "1.4" -dynasmrt = "1.0.1" +dynasmrt = "1.2" capstone = "0.10.0" color-backtrace ={ version = "0.5", features = [ "resolve-modules" ] } termcolor = "1.1.2" diff --git a/libafl_qemu/Cargo.toml b/libafl_qemu/Cargo.toml index 135df1cfaf..ce838ff875 100644 --- a/libafl_qemu/Cargo.toml +++ b/libafl_qemu/Cargo.toml @@ -26,9 +26,9 @@ clippy = [] # special feature for clippy, don't use in normal projects§ libafl = { path = "../libafl", version = "0.7.0" } libafl_targets = { path = "../libafl_targets", version = "0.7.0" } serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib -hashbrown = { version = "0.9", features = ["serde", "ahash-compile-time-rng"] } # A faster hashmap, nostd compatible +hashbrown = { version = "0.11", features = ["serde", "ahash-compile-time-rng"] } # A faster hashmap, nostd compatible num-traits = "0.2" -num_enum = "0.5.1" +num_enum = "0.5.4" goblin = "0.4.2" libc = "0.2" strum = "0.21" @@ -40,7 +40,7 @@ pyo3 = { version = "0.15", optional = true } [build-dependencies] cc = { version = "1.0" } which = "4.1" -pyo3-build-config = { version = "0.14.5", optional = true } +pyo3-build-config = { version = "0.15", optional = true } [lib] name = "libafl_qemu" diff --git a/libafl_sugar/Cargo.toml b/libafl_sugar/Cargo.toml index cece221df0..4c0f4fc65c 100644 --- a/libafl_sugar/Cargo.toml +++ b/libafl_sugar/Cargo.toml @@ -23,7 +23,7 @@ arm = ["libafl_qemu/arm"] # build qemu for arm aarch64 = ["libafl_qemu/aarch64"] # build qemu for aarch64 [build-dependencies] -pyo3-build-config = { version = "0.14.5", optional = true } +pyo3-build-config = { version = "0.15", optional = true } [dependencies] libafl = { path = "../libafl", version = "0.7.0" } diff --git a/libafl_targets/Cargo.toml b/libafl_targets/Cargo.toml index 6168b13539..01f635388e 100644 --- a/libafl_targets/Cargo.toml +++ b/libafl_targets/Cargo.toml @@ -27,7 +27,7 @@ clippy = [] # Ignore compiler warnings during clippy cc = { version = "1.0", features = ["parallel"] } [dependencies] -rangemap = "0.1.10" +rangemap = "0.1" libafl = { path = "../libafl", version = "0.7.0", features = [] } serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib # serde-big-array = "0.3.2" From af3d321213973189dfbef864d4035974326d095e Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Mon, 3 Jan 2022 00:47:17 +0100 Subject: [PATCH 15/25] Derive debug for all structs in LibAFL (#442) * documentation, warnings * fixed docs * docs * no_std * test * windows * nautilus docs * more fixes * more docs * nits * windows clippy * docs, windows * nits * debug all the things * derive debug for all core library components * Docu for libafl_targets * nits * reordered generics * add docs to frida, debug * nits * fixes * more docu for frida, nits * more docu * more docu * Sugar docs * debug for qemu * more debug * import debug * fmt * debug * anyap_debug feature no longer needed * tidy up unused fn * indicate if we left out values for struct debug * implement Debug for sugar * debug allthethings * ci --- docs/src/design/metadata.md | 2 +- fuzzers/forkserver_simple/src/main.rs | 4 +- fuzzers/frida_libpng/src/fuzzer.rs | 24 ------- fuzzers/libfuzzer_libpng_ctx/Cargo.toml | 2 +- fuzzers/libfuzzer_libpng_launcher/Cargo.toml | 2 +- fuzzers/tutorial/src/metadata.rs | 2 +- libafl/Cargo.toml | 5 +- libafl/src/bolts/launcher.rs | 22 ++++++ libafl/src/bolts/serdeany.rs | 22 +++--- libafl/src/executors/combined.rs | 8 ++- libafl/src/executors/command.rs | 29 +++++--- libafl/src/executors/forkserver.rs | 37 +++++++--- libafl/src/executors/inprocess.rs | 39 +++++++++- libafl/src/executors/mod.rs | 6 +- libafl/src/executors/shadow.rs | 21 ++++-- libafl/src/executors/timeout.rs | 39 +++++++++- libafl/src/executors/with_observers.rs | 19 +++-- libafl/src/feedbacks/map.rs | 8 +-- libafl/src/feedbacks/mod.rs | 34 ++++++--- libafl/src/mutators/mopt_mutator.rs | 2 +- libafl/src/observers/cmp.rs | 4 +- .../concolic/serialization_format.rs | 25 ++++++- libafl/src/observers/map.rs | 2 +- libafl/src/observers/mod.rs | 6 +- libafl/src/stages/tracing.rs | 4 +- libafl_cc/build.rs | 4 ++ libafl_cc/src/clang.rs | 10 ++- libafl_cc/src/lib.rs | 43 +++++++++++ libafl_derive/src/lib.rs | 47 +++++++++++- libafl_frida/src/alloc.rs | 47 ++++++++++-- libafl_frida/src/asan/asan_rt.rs | 28 +++++++- libafl_frida/src/asan/errors.rs | 3 +- libafl_frida/src/asan/hook_funcs.rs | 6 ++ libafl_frida/src/asan/mod.rs | 2 + libafl_frida/src/cmplog_rt.rs | 44 +++++++++--- libafl_frida/src/coverage_rt.rs | 9 +++ libafl_frida/src/drcov_rt.rs | 2 +- libafl_frida/src/executor.rs | 26 +++++-- libafl_frida/src/helper.rs | 72 ++++++++++--------- libafl_frida/src/lib.rs | 43 +++++++++++ libafl_qemu/src/asan.rs | 1 + libafl_qemu/src/cmplog.rs | 3 +- libafl_qemu/src/edges.rs | 3 +- libafl_qemu/src/emu.rs | 1 + libafl_qemu/src/executor.rs | 24 ++++++- libafl_qemu/src/helper.rs | 8 ++- libafl_qemu/src/snapshot.rs | 2 + libafl_sugar/src/forkserver.rs | 15 +++- libafl_sugar/src/inmemory.rs | 42 ++++++++++- libafl_sugar/src/lib.rs | 47 ++++++++++++ libafl_sugar/src/qemu.rs | 42 ++++++++++- libafl_targets/src/cmplog.rs | 19 ++++- libafl_targets/src/coverage.rs | 6 +- libafl_targets/src/drcov.rs | 3 + libafl_targets/src/lib.rs | 45 ++++++++++++ libafl_targets/src/sancov_8bit.rs | 2 + libafl_targets/src/sancov_cmp.rs | 10 +++ libafl_targets/src/sancov_pcguard.rs | 2 +- 58 files changed, 848 insertions(+), 181 deletions(-) diff --git a/docs/src/design/metadata.md b/docs/src/design/metadata.md index cbc84e05d1..aa3ffaa094 100644 --- a/docs/src/design/metadata.md +++ b/docs/src/design/metadata.md @@ -11,7 +11,7 @@ extern crate serde; use libafl::SerdeAny; use serde::{Serialize, Deserialize}; -#[derive(Serialize, Deserialize, SerdeAny)] +#[derive(Debug, Serialize, Deserialize, SerdeAny)] pub struct MyMetadata { //... } diff --git a/fuzzers/forkserver_simple/src/main.rs b/fuzzers/forkserver_simple/src/main.rs index 6f0dbb6f04..130a5ec9ea 100644 --- a/fuzzers/forkserver_simple/src/main.rs +++ b/fuzzers/forkserver_simple/src/main.rs @@ -65,12 +65,12 @@ pub fn main() { let mut shmem = StdShMemProvider::new().unwrap().new_map(MAP_SIZE).unwrap(); //let the forkserver know the shmid shmem.write_to_env("__AFL_SHM_ID").unwrap(); - let mut shmem_map = shmem.map_mut(); + let shmem_map = shmem.map_mut(); // Create an observation channel using the signals map let edges_observer = HitcountsMapObserver::new(ConstMapObserver::<_, MAP_SIZE>::new( "shared_mem", - &mut shmem_map, + shmem_map, )); // Create an observation channel to keep track of the execution time diff --git a/fuzzers/frida_libpng/src/fuzzer.rs b/fuzzers/frida_libpng/src/fuzzer.rs index ccf78c69b1..af7bbe0334 100644 --- a/fuzzers/frida_libpng/src/fuzzer.rs +++ b/fuzzers/frida_libpng/src/fuzzer.rs @@ -54,9 +54,6 @@ use libafl_targets::cmplog::{CmpLogObserver, CMPLOG_MAP}; #[cfg(unix)] use libafl_frida::asan::errors::{AsanErrorsFeedback, AsanErrorsObserver, ASAN_ERRORS}; -fn timeout_from_millis_str(time: &str) -> Result { - Ok(Duration::from_millis(time.parse()?)) -} #[derive(Debug, StructOpt)] #[structopt( @@ -113,27 +110,6 @@ struct Opt { )] output: PathBuf, - /* - #[structopt( - parse(try_from_str = timeout_from_millis_str), - short, - long, - help = "Set the exeucution timeout in milliseconds, default is 1000", - name = "TIMEOUT", - default_value = "1000" - )] - timeout: Duration, - - #[structopt( - parse(from_os_str), - short = "x", - long, - help = "Feed the fuzzer with an user-specified list of tokens (often called \"dictionary\"", - name = "TOKENS", - multiple = true - )] - tokens: Vec, - */ #[structopt( long, help = "The configuration this fuzzer runs with, for multiprocessing", diff --git a/fuzzers/libfuzzer_libpng_ctx/Cargo.toml b/fuzzers/libfuzzer_libpng_ctx/Cargo.toml index 452edd9560..ab8ad5a6f0 100644 --- a/fuzzers/libfuzzer_libpng_ctx/Cargo.toml +++ b/fuzzers/libfuzzer_libpng_ctx/Cargo.toml @@ -20,7 +20,7 @@ which = { version = "4.0.2" } num_cpus = "1.0" [dependencies] -libafl = { path = "../../libafl/", features = ["std", "anymap_debug", "derive", "llmp_compression", "introspection"] } +libafl = { path = "../../libafl/", features = ["std", "derive", "llmp_compression", "introspection"] } libafl_targets = { path = "../../libafl_targets/", features = ["libfuzzer"] } # TODO Include it only when building cc libafl_cc = { path = "../../libafl_cc/" } diff --git a/fuzzers/libfuzzer_libpng_launcher/Cargo.toml b/fuzzers/libfuzzer_libpng_launcher/Cargo.toml index 7c70283460..f003e64ce9 100644 --- a/fuzzers/libfuzzer_libpng_launcher/Cargo.toml +++ b/fuzzers/libfuzzer_libpng_launcher/Cargo.toml @@ -20,7 +20,7 @@ which = { version = "4.0.2" } num_cpus = "1.0" [dependencies] -libafl = { path = "../../libafl/", features = ["std", "anymap_debug", "derive", "llmp_compression", "introspection"] } +libafl = { path = "../../libafl/", features = ["std", "derive", "llmp_compression", "introspection"] } libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_hitcounts", "libfuzzer"] } # TODO Include it only when building cc libafl_cc = { path = "../../libafl_cc/" } diff --git a/fuzzers/tutorial/src/metadata.rs b/fuzzers/tutorial/src/metadata.rs index a31de01645..4ec380751c 100644 --- a/fuzzers/tutorial/src/metadata.rs +++ b/fuzzers/tutorial/src/metadata.rs @@ -13,7 +13,7 @@ use crate::input::PacketData; use serde::{Deserialize, Serialize}; -#[derive(SerdeAny, Serialize, Deserialize)] +#[derive(Debug, SerdeAny, Serialize, Deserialize)] pub struct PacketLenMetadata { pub length: u64, } diff --git a/libafl/Cargo.toml b/libafl/Cargo.toml index 34a4520ff3..4be578f646 100644 --- a/libafl/Cargo.toml +++ b/libafl/Cargo.toml @@ -12,9 +12,8 @@ edition = "2021" build = "build.rs" [features] -default = ["std", "anymap_debug", "derive", "llmp_compression", "rand_trait", "fork"] +default = ["std", "derive", "llmp_compression", "rand_trait", "fork"] std = ["serde_json", "serde_json/std", "hostname", "core_affinity", "nix", "serde/std", "bincode", "wait-timeout", "regex", "build_id", "uuid"] # print, env, launcher ... support -anymap_debug = ["serde_json"] # uses serde_json to Debug the anymap trait. Disable for smaller footprint. derive = ["libafl_derive"] # provide derive(SerdeAny) macro. fork = [] # uses the fork() syscall to spawn children, instead of launching a new command, if supported by the OS (has no effect on Windows, no_std). rand_trait = ["rand_core"] # If set, libafl's rand implementations will implement `rand::Rng` @@ -60,7 +59,7 @@ ahash = { version = "0.7", default-features=false, features=["compile-time-rng"] intervaltree = { version = "0.2.7", default-features = false, features = ["serde"] } libafl_derive = { version = "0.7.0", optional = true, path = "../libafl_derive" } -serde_json = { version = "1.0", optional = true, default-features = false, features = ["alloc"] } # an easy way to debug print SerdeAnyMap +serde_json = { version = "1.0", optional = true, default-features = false, features = ["alloc"] } miniz_oxide = { version = "0.5", optional = true} core_affinity = { version = "0.5", git = "https://github.com/s1341/core_affinity_rs", rev = "6648a7a", optional = true } hostname = { version = "^0.3", optional = true } # Is there really no gethostname in the stdlib? diff --git a/libafl/src/bolts/launcher.rs b/libafl/src/bolts/launcher.rs index d1995357e6..33e08b6449 100644 --- a/libafl/src/bolts/launcher.rs +++ b/libafl/src/bolts/launcher.rs @@ -24,6 +24,7 @@ use crate::{ Error, }; +use core::fmt::{self, Debug, Formatter}; #[cfg(feature = "std")] use core::marker::PhantomData; #[cfg(all(feature = "std", any(windows, not(feature = "fork"))))] @@ -85,6 +86,27 @@ where phantom_data: PhantomData<(&'a I, &'a OT, &'a S, &'a SP)>, } +impl<'a, CF, I, MT, OT, S, SP> Debug for Launcher<'_, CF, I, MT, OT, S, SP> +where + CF: FnOnce(Option, LlmpRestartingEventManager, usize) -> Result<(), Error>, + I: Input, + OT: ObserversTuple + DeserializeOwned, + MT: Monitor + Clone, + SP: ShMemProvider + 'static, + S: DeserializeOwned, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("Launcher") + .field("configuration", &self.configuration) + .field("broker_port", &self.broker_port) + .field("core", &self.cores) + .field("spawn_broker", &self.spawn_broker) + .field("remote_broker_addr", &self.remote_broker_addr) + .field("stdout_file", &self.stdout_file) + .finish_non_exhaustive() + } +} + #[cfg(feature = "std")] impl<'a, CF, I, MT, OT, S, SP> Launcher<'a, CF, I, MT, OT, S, SP> where diff --git a/libafl/src/bolts/serdeany.rs b/libafl/src/bolts/serdeany.rs index 3a43bd2b49..ed6afd3825 100644 --- a/libafl/src/bolts/serdeany.rs +++ b/libafl/src/bolts/serdeany.rs @@ -3,7 +3,10 @@ use serde::{de::DeserializeSeed, Deserialize, Deserializer, Serialize, Serializer}; use alloc::boxed::Box; -use core::any::{Any, TypeId}; +use core::{ + any::{Any, TypeId}, + fmt::Debug, +}; // yolo @@ -30,7 +33,7 @@ pub fn unpack_type_id(id: TypeId) -> u64 { } /// A (de)serializable Any trait -pub trait SerdeAny: Any + erased_serde::Serialize { +pub trait SerdeAny: Any + erased_serde::Serialize + Debug { /// returns this as Any trait fn as_any(&self) -> &dyn Any; /// returns this as mutable Any trait @@ -40,11 +43,11 @@ pub trait SerdeAny: Any + erased_serde::Serialize { } /// Wrap a type for serialization -#[allow(missing_debug_implementations)] -pub struct Wrap<'a, T: ?Sized>(pub &'a T); +#[derive(Debug)] +pub struct Wrap<'a, T: ?Sized + Debug>(pub &'a T); impl<'a, T> Serialize for Wrap<'a, T> where - T: ?Sized + erased_serde::Serialize + 'a, + T: ?Sized + erased_serde::Serialize + 'a + Debug, { /// Serialize the type fn serialize(&self, serializer: S) -> Result @@ -191,9 +194,9 @@ macro_rules! create_serde_registry_for_trait { } } - #[derive(Serialize, Deserialize)] /// A (de)serializable anymap containing (de)serializable trait objects registered /// in the registry + #[derive(Debug, Serialize, Deserialize)] pub struct SerdeAnyMap { map: HashMap>, } @@ -207,6 +210,7 @@ macro_rules! create_serde_registry_for_trait { } } + /* #[cfg(feature = "anymap_debug")] impl fmt::Debug for SerdeAnyMap { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { @@ -220,7 +224,7 @@ macro_rules! create_serde_registry_for_trait { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!(f, "SerdeAnymap with {} elements", self.len()) } - } + }*/ #[allow(unused_qualifications)] impl SerdeAnyMap { @@ -318,8 +322,8 @@ macro_rules! create_serde_registry_for_trait { } /// A serializable [`HashMap`] wrapper for [`SerdeAny`] types, addressable by name. - #[allow(unused_qualifications, missing_debug_implementations)] - #[derive(Serialize, Deserialize)] + #[allow(unused_qualifications)] + #[derive(Debug, Serialize, Deserialize)] pub struct NamedSerdeAnyMap { map: HashMap>>, } diff --git a/libafl/src/executors/combined.rs b/libafl/src/executors/combined.rs index 9108ea6f90..8a091832c5 100644 --- a/libafl/src/executors/combined.rs +++ b/libafl/src/executors/combined.rs @@ -6,15 +6,16 @@ use crate::{ observers::ObserversTuple, Error, }; +use core::fmt::Debug; /// A [`CombinedExecutor`] wraps a primary executor, forwarding its methods, and a secondary one -#[allow(missing_debug_implementations)] -pub struct CombinedExecutor { +#[derive(Debug)] +pub struct CombinedExecutor { primary: A, secondary: B, } -impl CombinedExecutor { +impl CombinedExecutor { /// Create a new `CombinedExecutor`, wrapping the given `executor`s. pub fn new(primary: A, secondary: B) -> Self where @@ -56,6 +57,7 @@ where impl HasObservers for CombinedExecutor where A: HasObservers, + B: Debug, OT: ObserversTuple, { #[inline] diff --git a/libafl/src/executors/command.rs b/libafl/src/executors/command.rs index c0d630da32..24c8525422 100644 --- a/libafl/src/executors/command.rs +++ b/libafl/src/executors/command.rs @@ -1,5 +1,8 @@ //! The command executor executes a sub program for each run -use core::marker::PhantomData; +use core::{ + fmt::{self, Debug, Formatter}, + marker::PhantomData, +}; #[cfg(feature = "std")] use std::process::Child; @@ -15,15 +18,23 @@ use std::time::Duration; /// A `CommandExecutor` is a wrapper around [`std::process::Command`] to execute a target as a child process. /// Construct a `CommandExecutor` by implementing [`CommandConfigurator`] for a type of your choice and calling [`CommandConfigurator::into_executor`] on it. -#[allow(missing_debug_implementations)] -pub struct CommandExecutor { +pub struct CommandExecutor { inner: T, /// [`crate::observers::Observer`]s for this executor observers: OT, phantom: PhantomData<(EM, I, S, Z)>, } -impl CommandExecutor { +impl Debug for CommandExecutor { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("CommandExecutor") + .field("inner", &self.inner) + .field("observers", &self.observers) + .finish() + } +} + +impl CommandExecutor { /// Accesses the inner value pub fn inner(&mut self) -> &mut T { &mut self.inner @@ -32,7 +43,7 @@ impl CommandExecutor { // this only works on unix because of the reliance on checking the process signal for detecting OOM #[cfg(all(feature = "std", unix))] -impl Executor for CommandExecutor +impl Executor for CommandExecutor where I: Input, T: CommandConfigurator, @@ -72,7 +83,8 @@ where } #[cfg(all(feature = "std", unix))] -impl HasObservers for CommandExecutor +impl HasObservers + for CommandExecutor where I: Input, OT: ObserversTuple, @@ -94,6 +106,7 @@ where /// ``` /// use std::{io::Write, process::{Stdio, Command, Child}}; /// use libafl::{Error, inputs::{Input, HasTargetBytes}, executors::{Executor, command::CommandConfigurator}}; +/// #[derive(Debug)] /// struct MyExecutor; /// /// impl CommandConfigurator for MyExecutor { @@ -122,7 +135,7 @@ where /// } /// ``` #[cfg(all(feature = "std", unix))] -pub trait CommandConfigurator: Sized { +pub trait CommandConfigurator: Sized + Debug { /// Spawns a new process with the given configuration. fn spawn_child( &mut self, @@ -133,7 +146,7 @@ pub trait CommandConfigurator: Sized { ) -> Result; /// Create an `Executor` from this `CommandConfigurator`. - fn into_executor(self, observers: OT) -> CommandExecutor + fn into_executor(self, observers: OT) -> CommandExecutor where OT: ObserversTuple, { diff --git a/libafl/src/executors/forkserver.rs b/libafl/src/executors/forkserver.rs index 6ed6017554..f80d8eac8c 100644 --- a/libafl/src/executors/forkserver.rs +++ b/libafl/src/executors/forkserver.rs @@ -1,6 +1,10 @@ //! Expose an `Executor` based on a `Forkserver` in order to execute AFL/AFL++ binaries -use core::{marker::PhantomData, time::Duration}; +use core::{ + fmt::{self, Debug, Formatter}, + marker::PhantomData, + time::Duration, +}; use std::{ fs::{File, OpenOptions}, io::{self, prelude::*, ErrorKind, SeekFrom}, @@ -150,8 +154,9 @@ impl ConfigTarget for Command { } } -/// The [`OutFile`] to write to -#[allow(missing_debug_implementations)] +/// The [`OutFile`] to write input to. +/// The target/forkserver will read from this file. +#[derive(Debug)] pub struct OutFile { /// The file file: File, @@ -369,13 +374,13 @@ pub trait HasForkserver { } /// The timeout forkserver executor that wraps around the standard forkserver executor and sets a timeout before each run. -#[allow(missing_debug_implementations)] -pub struct TimeoutForkserverExecutor { +#[derive(Debug)] +pub struct TimeoutForkserverExecutor { executor: E, timeout: TimeSpec, } -impl TimeoutForkserverExecutor { +impl TimeoutForkserverExecutor { /// Create a new [`TimeoutForkserverExecutor`] pub fn new(executor: E, exec_tmout: Duration) -> Result { let milli_sec = exec_tmout.as_millis() as i64; @@ -384,7 +389,7 @@ impl TimeoutForkserverExecutor { } } -impl Executor for TimeoutForkserverExecutor +impl Executor for TimeoutForkserverExecutor where I: Input + HasTargetBytes, E: Executor + HasForkserver, @@ -482,7 +487,6 @@ where /// This [`Executor`] can run binaries compiled for AFL/AFL++ that make use of a forkserver. /// Shared memory feature is also available, but you have to set things up in your code. /// Please refer to AFL++'s docs. -#[allow(missing_debug_implementations)] pub struct ForkserverExecutor where I: Input + HasTargetBytes, @@ -497,6 +501,23 @@ where phantom: PhantomData<(I, S)>, } +impl Debug for ForkserverExecutor +where + I: Input + HasTargetBytes, + OT: ObserversTuple, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("ForkserverExecutor") + .field("target", &self.target) + .field("args", &self.args) + .field("out_file", &self.out_file) + .field("forkserver", &self.forkserver) + .field("observers", &self.observers) + .field("map", &self.map) + .finish() + } +} + impl ForkserverExecutor where I: Input + HasTargetBytes, diff --git a/libafl/src/executors/inprocess.rs b/libafl/src/executors/inprocess.rs index a58c827078..ac34079631 100644 --- a/libafl/src/executors/inprocess.rs +++ b/libafl/src/executors/inprocess.rs @@ -3,7 +3,12 @@ //! //! Needs the `fork` feature flag. -use core::{ffi::c_void, marker::PhantomData, ptr}; +use core::{ + ffi::c_void, + fmt::{self, Debug, Formatter}, + marker::PhantomData, + ptr, +}; #[cfg(any(unix, all(windows, feature = "std")))] use core::{ @@ -42,7 +47,6 @@ use crate::{ /// The inmem executor simply calls a target function, then returns afterwards. #[allow(dead_code)] -#[derive(Debug)] pub struct InProcessExecutor<'a, H, I, OT, S> where H: FnMut(&I) -> ExitKind, @@ -58,6 +62,20 @@ where phantom: PhantomData<(I, S)>, } +impl<'a, H, I, OT, S> Debug for InProcessExecutor<'a, H, I, OT, S> +where + H: FnMut(&I) -> ExitKind, + I: Input, + OT: ObserversTuple, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("InProcessExecutor") + .field("harness_fn", &"") + .field("observers", &self.observers) + .finish_non_exhaustive() + } +} + impl<'a, EM, H, I, OT, S, Z> Executor for InProcessExecutor<'a, H, I, OT, S> where H: FnMut(&I) -> ExitKind, @@ -982,7 +1000,6 @@ where /// [`InProcessForkExecutor`] is an executor that forks the current process before each execution. #[cfg(all(feature = "std", unix))] -#[allow(missing_debug_implementations)] pub struct InProcessForkExecutor<'a, H, I, OT, S, SP> where H: FnMut(&I) -> ExitKind, @@ -996,6 +1013,22 @@ where phantom: PhantomData<(I, S)>, } +#[cfg(all(feature = "std", unix))] +impl<'a, H, I, OT, S, SP> Debug for InProcessForkExecutor<'a, H, I, OT, S, SP> +where + H: FnMut(&I) -> ExitKind, + I: Input, + OT: ObserversTuple, + SP: ShMemProvider, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("InProcessForkExecutor") + .field("observers", &self.observers) + .field("shmem_provider", &self.shmem_provider) + .finish() + } +} + #[cfg(all(feature = "std", unix))] impl<'a, EM, H, I, OT, S, Z, SP> Executor for InProcessForkExecutor<'a, H, I, OT, S, SP> diff --git a/libafl/src/executors/mod.rs b/libafl/src/executors/mod.rs index 90c4688d65..bf2b977789 100644 --- a/libafl/src/executors/mod.rs +++ b/libafl/src/executors/mod.rs @@ -37,6 +37,7 @@ use crate::{ Error, }; +use core::fmt::Debug; use serde::{Deserialize, Serialize}; /// How an execution finished. @@ -57,7 +58,7 @@ pub enum ExitKind { crate::impl_serdeany!(ExitKind); /// Holds a tuple of Observers -pub trait HasObservers +pub trait HasObservers: Debug where OT: ObserversTuple, { @@ -69,7 +70,7 @@ where } /// An executor takes the given inputs, and runs the harness/target. -pub trait Executor +pub trait Executor: Debug where I: Input, { @@ -97,6 +98,7 @@ where /// A simple executor that does nothing. /// If intput len is 0, `run_target` will return Err +#[derive(Debug)] struct NopExecutor {} impl Executor for NopExecutor diff --git a/libafl/src/executors/shadow.rs b/libafl/src/executors/shadow.rs index 06210ec4bb..fcff96d608 100644 --- a/libafl/src/executors/shadow.rs +++ b/libafl/src/executors/shadow.rs @@ -1,6 +1,9 @@ //! A `ShadowExecutor` wraps an executor to have shadow observer that will not be considered by the feedbacks and the manager -use core::marker::PhantomData; +use core::{ + fmt::{self, Debug, Formatter}, + marker::PhantomData, +}; use crate::{ executors::{Executor, ExitKind, HasObservers}, @@ -10,8 +13,7 @@ use crate::{ }; /// A [`ShadowExecutor`] wraps an executor and a set of shadow observers -#[allow(missing_debug_implementations)] -pub struct ShadowExecutor { +pub struct ShadowExecutor { /// The wrapped executor executor: E, /// The shadow observers @@ -20,7 +22,16 @@ pub struct ShadowExecutor { phantom: PhantomData<(I, S)>, } -impl ShadowExecutor +impl Debug for ShadowExecutor { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("ShadowExecutor") + .field("executor", &self.executor) + .field("shadow_observers", &self.shadow_observers) + .finish() + } +} + +impl ShadowExecutor where SOT: ObserversTuple, { @@ -65,6 +76,8 @@ where impl HasObservers for ShadowExecutor where + I: Debug, + S: Debug, E: HasObservers, OT: ObserversTuple, SOT: ObserversTuple, diff --git a/libafl/src/executors/timeout.rs b/libafl/src/executors/timeout.rs index 612bed56df..93747bce79 100644 --- a/libafl/src/executors/timeout.rs +++ b/libafl/src/executors/timeout.rs @@ -1,7 +1,10 @@ //! A `TimeoutExecutor` sets a timeout before each target run #[cfg(any(windows, unix))] -use core::time::Duration; +use core::{ + fmt::{self, Debug, Formatter}, + time::Duration, +}; use crate::{ executors::{Executor, ExitKind, HasObservers}, @@ -41,8 +44,23 @@ struct Timeval { pub tv_usec: i64, } +#[cfg(unix)] +impl Debug for Timeval { + #[allow(clippy::cast_sign_loss)] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!( + f, + "Timeval {{ tv_sec: {:?}, tv_usec: {:?} (tv: {:?}) }}", + self.tv_sec, + self.tv_usec, + Duration::new(self.tv_sec as _, (self.tv_usec * 1000) as _) + ) + } +} + #[repr(C)] #[cfg(unix)] +#[derive(Debug)] struct Itimerval { pub it_interval: Timeval, pub it_value: Timeval, @@ -74,7 +92,6 @@ pub(crate) unsafe fn windows_delete_timer_queue(tp_timer: *mut TP_TIMER) { } /// The timeout excutor is a wrapper that sets a timeout before each run -#[allow(missing_debug_implementations)] pub struct TimeoutExecutor { executor: E, #[cfg(unix)] @@ -87,6 +104,24 @@ pub struct TimeoutExecutor { critical: RTL_CRITICAL_SECTION, } +impl Debug for TimeoutExecutor { + #[cfg(windows)] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("TimeoutExecutor") + .field("executor", &self.executor) + .field("milli_sec", &self.milli_sec) + .finish_non_exhaustive() + } + + #[cfg(unix)] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("TimeoutExecutor") + .field("executor", &self.executor) + .field("itimerval", &self.itimerval) + .finish() + } +} + #[cfg(windows)] #[allow(non_camel_case_types)] type PTP_TIMER_CALLBACK = unsafe extern "system" fn( diff --git a/libafl/src/executors/with_observers.rs b/libafl/src/executors/with_observers.rs index 7fbe684f5d..426e7f1de0 100644 --- a/libafl/src/executors/with_observers.rs +++ b/libafl/src/executors/with_observers.rs @@ -1,11 +1,17 @@ //! A wrapper for any [`Executor`] to make it implement [`HasObservers`] using a given [`ObserversTuple`]. -use crate::{inputs::Input, observers::ObserversTuple, Error}; -use super::{Executor, ExitKind, HasObservers}; +use core::fmt::Debug; + +use crate::{ + executors::{Executor, ExitKind, HasObservers}, + inputs::Input, + observers::ObserversTuple, + Error, +}; /// A wrapper for any [`Executor`] to make it implement [`HasObservers`] using a given [`ObserversTuple`]. -#[allow(missing_debug_implementations)] -pub struct WithObservers { +#[derive(Debug)] +pub struct WithObservers { executor: E, observers: OT, } @@ -14,6 +20,7 @@ impl Executor for WithObservers where I: Input, E: Executor, + OT: Debug, { fn run_target( &mut self, @@ -26,7 +33,7 @@ where } } -impl HasObservers for WithObservers +impl HasObservers for WithObservers where I: Input, OT: ObserversTuple, @@ -40,7 +47,7 @@ where } } -impl WithObservers { +impl WithObservers { /// Wraps the given [`Executor`] with the given [`ObserversTuple`] to implement [`HasObservers`]. /// /// If the executor already implements [`HasObservers`], then the original implementation will be overshadowed by diff --git a/libafl/src/feedbacks/map.rs b/libafl/src/feedbacks/map.rs index f95a6ded44..2fd6e24c5a 100644 --- a/libafl/src/feedbacks/map.rs +++ b/libafl/src/feedbacks/map.rs @@ -39,7 +39,7 @@ pub type MaxMapOneOrFilledFeedback = MapFeedback; /// A `Reducer` function is used to aggregate values for the novelty search -pub trait Reducer: Serialize + serde::de::DeserializeOwned + 'static +pub trait Reducer: Serialize + serde::de::DeserializeOwned + 'static + Debug where T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { @@ -112,7 +112,7 @@ where } /// A `IsNovel` function is used to discriminate if a reduced value is considered novel. -pub trait IsNovel: Serialize + serde::de::DeserializeOwned + 'static +pub trait IsNovel: Serialize + serde::de::DeserializeOwned + 'static + Debug where T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, { @@ -270,7 +270,7 @@ where impl FeedbackState for MapFeedbackState where - T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, + T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, { fn reset(&mut self) -> Result<(), Error> { self.history_map @@ -357,7 +357,7 @@ where O: MapObserver, N: IsNovel, I: Input, - S: HasFeedbackStates + HasClientPerfMonitor, + S: HasFeedbackStates + HasClientPerfMonitor + Debug, FT: FeedbackStatesTuple, { fn is_interesting( diff --git a/libafl/src/feedbacks/mod.rs b/libafl/src/feedbacks/mod.rs index 4923f687da..1647e5efd5 100644 --- a/libafl/src/feedbacks/mod.rs +++ b/libafl/src/feedbacks/mod.rs @@ -28,12 +28,16 @@ use crate::{ Error, }; -use core::{marker::PhantomData, time::Duration}; +use core::{ + fmt::{self, Debug, Formatter}, + marker::PhantomData, + time::Duration, +}; /// Feedbacks evaluate the observers. /// Basically, they reduce the information provided by an observer to a value, /// indicating the "interestingness" of the last run. -pub trait Feedback: Named +pub trait Feedback: Named + Debug where I: Input, S: HasClientPerfMonitor, @@ -103,7 +107,7 @@ where /// [`FeedbackState`] is the data associated with a [`Feedback`] that must persist as part /// of the fuzzer State -pub trait FeedbackState: Named + Serialize + serde::de::DeserializeOwned { +pub trait FeedbackState: Named + Serialize + serde::de::DeserializeOwned + Debug { /// Reset the internal state fn reset(&mut self) -> Result<(), Error> { Ok(()) @@ -111,7 +115,7 @@ pub trait FeedbackState: Named + Serialize + serde::de::DeserializeOwned { } /// A haskell-style tuple of feedback states -pub trait FeedbackStatesTuple: MatchName + Serialize + serde::de::DeserializeOwned { +pub trait FeedbackStatesTuple: MatchName + Serialize + serde::de::DeserializeOwned + Debug { /// Resets all the feedback states of the tuple fn reset_all(&mut self) -> Result<(), Error>; } @@ -134,7 +138,7 @@ where } /// A cobined feedback consisting of ultiple [`Feedback`]s -#[allow(missing_debug_implementations)] +#[derive(Debug)] pub struct CombinedFeedback where A: Feedback, @@ -190,7 +194,7 @@ where B: Feedback, FL: FeedbackLogic, I: Input, - S: HasClientPerfMonitor, + S: HasClientPerfMonitor + Debug, { fn is_interesting( &mut self, @@ -253,7 +257,7 @@ where } /// Logical combination of two feedbacks -pub trait FeedbackLogic: 'static +pub trait FeedbackLogic: 'static + Debug where A: Feedback, B: Feedback, @@ -545,7 +549,7 @@ pub type EagerOrFeedback = CombinedFeedback = CombinedFeedback; /// Compose feedbacks with an `NOT` operation -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct NotFeedback where A: Feedback, @@ -559,6 +563,20 @@ where phantom: PhantomData<(I, S)>, } +impl Debug for NotFeedback +where + A: Feedback, + I: Input, + S: HasClientPerfMonitor, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("NotFeedback") + .field("name", &self.name) + .field("first", &self.first) + .finish() + } +} + impl Feedback for NotFeedback where A: Feedback, diff --git a/libafl/src/mutators/mopt_mutator.rs b/libafl/src/mutators/mopt_mutator.rs index d2a49b59bc..c7bbc9ea17 100644 --- a/libafl/src/mutators/mopt_mutator.rs +++ b/libafl/src/mutators/mopt_mutator.rs @@ -130,7 +130,7 @@ impl Debug for MOpt { .field("\n\ncore_operator_cycles", &self.core_operator_cycles) .field("\n\ncore_operator_cycles_v2", &self.core_operator_cycles_v2) .field("\n\ncore_operator_cycles_v3", &self.core_operator_cycles_v3) - .finish() + .finish_non_exhaustive() } } diff --git a/libafl/src/observers/cmp.rs b/libafl/src/observers/cmp.rs index 024c9d38d3..5bc5be7a19 100644 --- a/libafl/src/observers/cmp.rs +++ b/libafl/src/observers/cmp.rs @@ -4,7 +4,7 @@ use alloc::{ string::{String, ToString}, vec::Vec, }; - +use core::fmt::Debug; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use crate::{ @@ -79,7 +79,7 @@ impl CmpValuesMetadata { } /// A [`CmpMap`] traces comparisons during the current execution -pub trait CmpMap { +pub trait CmpMap: Debug { /// Get the number of cmps fn len(&self) -> usize; diff --git a/libafl/src/observers/concolic/serialization_format.rs b/libafl/src/observers/concolic/serialization_format.rs index 32c69dc21e..82e50d6be2 100644 --- a/libafl/src/observers/concolic/serialization_format.rs +++ b/libafl/src/observers/concolic/serialization_format.rs @@ -43,7 +43,10 @@ #![cfg(feature = "std")] -use std::io::{self, Cursor, Read, Seek, SeekFrom, Write}; +use std::{ + fmt::{self, Debug, Formatter}, + io::{self, Cursor, Read, Seek, SeekFrom, Write}, +}; use bincode::{DefaultOptions, Options}; @@ -56,13 +59,18 @@ fn serialization_options() -> DefaultOptions { } /// A `MessageFileReader` reads a stream of [`SymExpr`] and their corresponding [`SymExprRef`]s from any [`Read`]. -#[allow(missing_debug_implementations)] pub struct MessageFileReader { reader: R, deserializer_config: DefaultOptions, current_id: usize, } +impl Debug for MessageFileReader { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "MessageFileReader {{ current_id: {} }}", self.current_id) + } +} + impl MessageFileReader { /// Construct from the given reader. pub fn from_reader(reader: R) -> Self { @@ -204,7 +212,6 @@ impl MessageFileReader { /// A `MessageFileWriter` writes a stream of [`SymExpr`] to any [`Write`]. For each written expression, it returns /// a [`SymExprRef`] which should be used to refer back to it. -#[allow(missing_debug_implementations)] pub struct MessageFileWriter { id_counter: usize, writer: W, @@ -212,6 +219,18 @@ pub struct MessageFileWriter { serialization_options: DefaultOptions, } +impl Debug for MessageFileWriter +where + W: Write, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("MessageFileWriter") + .field("id_counter", &self.id_counter) + .field("writer_start_position", &self.writer_start_position) + .finish_non_exhaustive() + } +} + impl MessageFileWriter { /// Create a `MessageFileWriter` from the given [`Write`]. pub fn from_writer(mut writer: W) -> io::Result { diff --git a/libafl/src/observers/map.rs b/libafl/src/observers/map.rs index d203d41252..5819abd9b0 100644 --- a/libafl/src/observers/map.rs +++ b/libafl/src/observers/map.rs @@ -25,7 +25,7 @@ use crate::{ }; /// A [`MapObserver`] observes the static map, as oftentimes used for afl-like coverage information -pub trait MapObserver: HasLen + Named + Serialize + serde::de::DeserializeOwned +pub trait MapObserver: HasLen + Named + Serialize + serde::de::DeserializeOwned + Debug where T: PrimInt + Default + Copy + Debug, { diff --git a/libafl/src/observers/mod.rs b/libafl/src/observers/mod.rs index 1703ea42f6..f1f32a8d13 100644 --- a/libafl/src/observers/mod.rs +++ b/libafl/src/observers/mod.rs @@ -9,7 +9,7 @@ pub use cmp::*; pub mod concolic; use alloc::string::{String, ToString}; -use core::time::Duration; +use core::{fmt::Debug, time::Duration}; use serde::{Deserialize, Serialize}; use crate::{ @@ -22,7 +22,7 @@ use crate::{ /// Observers observe different information about the target. /// They can then be used by various sorts of feedback. -pub trait Observer: Named { +pub trait Observer: Named + Debug { /// The testcase finished execution, calculate any changes. /// Reserved for future use. #[inline] @@ -44,7 +44,7 @@ pub trait Observer: Named { } /// A haskell-style tuple of observers -pub trait ObserversTuple: MatchName { +pub trait ObserversTuple: MatchName + Debug { /// This is called right before the next execution. fn pre_exec_all(&mut self, state: &mut S, input: &I) -> Result<(), Error>; diff --git a/libafl/src/stages/tracing.rs b/libafl/src/stages/tracing.rs index fd45c05278..166c8b5b39 100644 --- a/libafl/src/stages/tracing.rs +++ b/libafl/src/stages/tracing.rs @@ -1,6 +1,6 @@ //! The tracing stage can trace the target and enrich a testcase with metadata, for example for `CmpLog`. -use core::marker::PhantomData; +use core::{fmt::Debug, marker::PhantomData}; use crate::{ corpus::Corpus, @@ -119,7 +119,7 @@ where E: Executor + HasObservers, OT: ObserversTuple, SOT: ObserversTuple, - S: HasClientPerfMonitor + HasExecutions + HasCorpus, + S: HasClientPerfMonitor + HasExecutions + HasCorpus + Debug, { #[inline] fn perform( diff --git a/libafl_cc/build.rs b/libafl_cc/build.rs index 20ff3e79ef..dd52f6452f 100644 --- a/libafl_cc/build.rs +++ b/libafl_cc/build.rs @@ -104,7 +104,9 @@ fn main() { &mut clang_constants_file, "// These constants are autogenerated by build.rs + /// The path to the `clang` executable pub const CLANG_PATH: &str = {:?}; + /// The path to the `clang++` executable pub const CLANGXX_PATH: &str = {:?}; /// The size of the edges map @@ -165,7 +167,9 @@ fn main() { &mut clang_constants_file, "// These constants are autogenerated by build.rs +/// The path to the `clang` executable pub const CLANG_PATH: &str = \"clang\"; +/// The path to the `clang++` executable pub const CLANGXX_PATH: &str = \"clang++\"; " ) diff --git a/libafl_cc/src/clang.rs b/libafl_cc/src/clang.rs index 8cf15f1271..3931bff8dc 100644 --- a/libafl_cc/src/clang.rs +++ b/libafl_cc/src/clang.rs @@ -22,14 +22,19 @@ fn dll_extension<'a>() -> &'a str { include!(concat!(env!("OUT_DIR"), "/clang_constants.rs")); +/// The supported LLVM passes #[allow(clippy::upper_case_acronyms)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum LLVMPasses { //CmpLogIns, + /// The CmpLog pass CmpLogRtn, + /// The AFL coverage pass AFLCoverage, } impl LLVMPasses { + /// Gets the path of the LLVM pass #[must_use] pub fn path(&self) -> PathBuf { match self { @@ -43,6 +48,7 @@ impl LLVMPasses { /// Wrap Clang #[allow(clippy::struct_excessive_bools)] +#[derive(Debug)] pub struct ClangWrapper { is_silent: bool, optimize: bool, @@ -269,11 +275,13 @@ impl ClangWrapper { } } + /// Sets the wrapped `cc` compiler pub fn wrapped_cc(&mut self, cc: String) -> &'_ mut Self { self.wrapped_cc = cc; self } + /// Sets the wrapped `cxx` compiler pub fn wrapped_cxx(&mut self, cxx: String) -> &'_ mut Self { self.wrapped_cxx = cxx; self @@ -291,7 +299,7 @@ impl ClangWrapper { self } - // Add LLVM pass + /// Add LLVM pass pub fn add_pass(&mut self, pass: LLVMPasses) -> &'_ mut Self { self.passes.push(pass); self diff --git a/libafl_cc/src/lib.rs b/libafl_cc/src/lib.rs index f79d94aa89..c78300248f 100644 --- a/libafl_cc/src/lib.rs +++ b/libafl_cc/src/lib.rs @@ -1,5 +1,48 @@ //! Compiler Wrapper from `LibAFL` +#![deny(rustdoc::broken_intra_doc_links)] +#![deny(clippy::pedantic)] +#![allow( + clippy::unreadable_literal, + clippy::type_repetition_in_bounds, + clippy::missing_errors_doc, + clippy::cast_possible_truncation, + clippy::used_underscore_binding, + clippy::ptr_as_ptr, + clippy::missing_panics_doc, + clippy::missing_docs_in_private_items, + clippy::module_name_repetitions, + clippy::unreadable_literal +)] +#![deny( + missing_debug_implementations, + missing_docs, + //trivial_casts, + trivial_numeric_casts, + unused_extern_crates, + unused_import_braces, + unused_qualifications, + //unused_results +)] +#![deny( + bad_style, + const_err, + dead_code, + improper_ctypes, + non_shorthand_field_patterns, + no_mangle_generic_items, + overflowing_literals, + path_statements, + patterns_in_fns_without_body, + private_in_public, + unconditional_recursion, + unused, + unused_allocation, + unused_comparisons, + unused_parens, + while_true +)] + use std::{convert::Into, path::Path, process::Command, string::String, vec::Vec}; pub mod clang; diff --git a/libafl_derive/src/lib.rs b/libafl_derive/src/lib.rs index 066ad9b9e1..638cb0e092 100644 --- a/libafl_derive/src/lib.rs +++ b/libafl_derive/src/lib.rs @@ -1,8 +1,53 @@ -extern crate proc_macro; +//! Derives for `LibAFL` + +#![deny(rustdoc::broken_intra_doc_links)] +#![deny(clippy::pedantic)] +#![allow( + clippy::unreadable_literal, + clippy::type_repetition_in_bounds, + clippy::missing_errors_doc, + clippy::cast_possible_truncation, + clippy::used_underscore_binding, + clippy::ptr_as_ptr, + clippy::missing_panics_doc, + clippy::missing_docs_in_private_items, + clippy::module_name_repetitions, + clippy::unreadable_literal +)] +#![deny( + missing_debug_implementations, + missing_docs, + //trivial_casts, + trivial_numeric_casts, + unused_extern_crates, + unused_import_braces, + unused_qualifications, + //unused_results +)] +#![deny( + bad_style, + const_err, + dead_code, + improper_ctypes, + non_shorthand_field_patterns, + no_mangle_generic_items, + overflowing_literals, + path_statements, + patterns_in_fns_without_body, + private_in_public, + unconditional_recursion, + unused, + unused_allocation, + unused_comparisons, + unused_parens, + while_true +)] + use proc_macro::TokenStream; use quote::quote; use syn::{parse_macro_input, DeriveInput}; +/// Derive macro to implement `SerdeAny`, to use a type in a `SerdeAnyMap` #[proc_macro_derive(SerdeAny)] pub fn libafl_serdeany_derive(input: TokenStream) -> TokenStream { let name = parse_macro_input!(input as DeriveInput).ident; diff --git a/libafl_frida/src/alloc.rs b/libafl_frida/src/alloc.rs index 6badfdea44..1499e5852e 100644 --- a/libafl_frida/src/alloc.rs +++ b/libafl_frida/src/alloc.rs @@ -6,17 +6,28 @@ use nix::{ }; use backtrace::Backtrace; -#[cfg(unix)] +#[cfg(any( + target_os = "linux", + all(target_arch = "aarch64", target_os = "android") +))] use libc::{sysconf, _SC_PAGESIZE}; use rangemap::RangeSet; use serde::{Deserialize, Serialize}; -use std::{collections::BTreeMap, ffi::c_void, io}; +#[cfg(any( + target_os = "linux", + all(target_arch = "aarch64", target_os = "android") +))] +use std::io; +use std::{collections::BTreeMap, ffi::c_void}; use crate::{ asan::errors::{AsanError, AsanErrors}, FridaOptions, }; +/// An allocator wrapper with binary-only address sanitization +#[derive(Debug)] +#[allow(missing_docs)] pub struct Allocator { #[allow(dead_code)] options: FridaOptions, @@ -44,7 +55,9 @@ macro_rules! map_to_shadow { }; } +/// Metadata for an allocation #[derive(Clone, Debug, Default, Serialize, Deserialize)] +#[allow(missing_docs)] pub struct AllocationMetadata { pub address: usize, pub size: usize, @@ -56,6 +69,21 @@ pub struct AllocationMetadata { } impl Allocator { + /// Creates a new [`Allocator`] (not supported on this platform!) + #[cfg(not(any( + target_os = "linux", + all(target_arch = "aarch64", target_os = "android") + )))] + #[must_use] + pub fn new(_: FridaOptions) -> Self { + todo!("Shadow region not yet supported for this platform!"); + } + + /// Creates a new [`Allocator`] + #[cfg(any( + target_os = "linux", + all(target_arch = "aarch64", target_os = "android") + ))] #[must_use] pub fn new(options: FridaOptions) -> Self { let ret = unsafe { sysconf(_SC_PAGESIZE) }; @@ -118,11 +146,6 @@ impl Allocator { shadow_bit = try_shadow_bit; } } - #[cfg(not(any( - target_os = "linux", - all(target_arch = "aarch64", target_os = "android") - )))] - todo!("Shadow region not yet supported for this platform!"); assert!(shadow_bit != 0); // attempt to pre-map the entire shadow-memory space @@ -188,6 +211,7 @@ impl Allocator { None } + /// Allocate a new allocation of the given size. #[must_use] #[allow(clippy::missing_safety_doc)] pub unsafe fn alloc(&mut self, size: usize, _alignment: usize) -> *mut c_void { @@ -272,6 +296,7 @@ impl Allocator { address } + /// Releases the allocation at the given address. #[allow(clippy::missing_safety_doc)] pub unsafe fn release(&mut self, ptr: *mut c_void) { let mut metadata = if let Some(metadata) = self.allocations.get_mut(&(ptr as usize)) { @@ -302,6 +327,7 @@ impl Allocator { Self::poison(shadow_mapping_start, metadata.size); } + /// Finds the metadata for the allocation at the given address. pub fn find_metadata( &mut self, ptr: usize, @@ -328,6 +354,7 @@ impl Allocator { closest } + /// Resets the allocator contents pub fn reset(&mut self) { let mut tmp_allocations = Vec::new(); for (address, mut allocation) in self.allocations.drain() { @@ -358,6 +385,7 @@ impl Allocator { self.total_allocation_size = 0; } + /// Gets the usable size of the allocation, by allocated pointer pub fn get_usable_size(&self, ptr: *mut c_void) -> usize { match self.allocations.get(&(ptr as usize)) { Some(metadata) => metadata.size, @@ -388,6 +416,7 @@ impl Allocator { } } + /// Poisonn an area in memory pub fn poison(start: usize, size: usize) { // println!("poisoning {:x} for {:x}", start, size / 8 + 1); unsafe { @@ -448,17 +477,21 @@ impl Allocator { (shadow_mapping_start, (end - start) / 8) } + /// Maps the address to a shadow address + #[inline] #[must_use] pub fn map_to_shadow(&self, start: usize) -> usize { map_to_shadow!(self, start) } + /// Checks if the currennt address is one of ours #[inline] pub fn is_managed(&self, ptr: *mut c_void) -> bool { //self.allocations.contains_key(&(ptr as usize)) self.base_mapping_addr <= ptr as usize && (ptr as usize) < self.current_mapping_addr } + /// Checks if any of the allocations has not been freed pub fn check_for_leaks(&self) { for metadata in self.allocations.values() { if !metadata.freed { diff --git a/libafl_frida/src/asan/asan_rt.rs b/libafl_frida/src/asan/asan_rt.rs index 59cd2bb97c..1fbc0fc3e2 100644 --- a/libafl_frida/src/asan/asan_rt.rs +++ b/libafl_frida/src/asan/asan_rt.rs @@ -7,6 +7,7 @@ this helps finding mem errors early. */ use backtrace::Backtrace; +use core::fmt::{self, Debug, Formatter}; use frida_gum::{ModuleDetails, NativePointer, RangeDetails}; use hashbrown::HashMap; use nix::sys::mman::{mmap, MapFlags, ProtFlags}; @@ -69,10 +70,12 @@ const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANON; #[cfg(not(target_vendor = "apple"))] const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANONYMOUS; -// sixteen general purpose registers are put in this order, rax, rbx, rcx, rdx, rbp, rsp, rsi, rdi, r8-r15, plus instrumented rip, accessed memory addr and true rip +/// The count of registers that need to be saved by the asan runtime +/// sixteen general purpose registers are put in this order, rax, rbx, rcx, rdx, rbp, rsp, rsi, rdi, r8-r15, plus instrumented rip, accessed memory addr and true rip #[cfg(target_arch = "x86_64")] pub const ASAN_SAVE_REGISTER_COUNT: usize = 19; +/// The registers that need to be saved by the asan runtime, as names #[cfg(target_arch = "x86_64")] pub const ASAN_SAVE_REGISTER_NAMES: [&str; ASAN_SAVE_REGISTER_COUNT] = [ "rax", @@ -96,6 +99,7 @@ pub const ASAN_SAVE_REGISTER_NAMES: [&str; ASAN_SAVE_REGISTER_COUNT] = [ "actual rip", ]; +/// The count of registers that need to be saved by the asan runtime #[cfg(target_arch = "aarch64")] pub const ASAN_SAVE_REGISTER_COUNT: usize = 32; @@ -128,6 +132,17 @@ pub struct AsanRuntime { shadow_check_func: Option bool>, } +impl Debug for AsanRuntime { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("AsanRuntime") + .field("stalked_addresses", &self.stalked_addresses) + .field("options", &self.options) + .field("module_map", &"") + .field("suppressed_addresses", &self.suppressed_addresses) + .finish_non_exhaustive() + } +} + impl AsanRuntime { /// Create a new `AsanRuntime` #[must_use] @@ -261,15 +276,18 @@ impl AsanRuntime { self.allocator.reset(); } + /// Gets the allocator #[must_use] pub fn allocator(&self) -> &Allocator { &self.allocator } + /// Gets the allocator, mut pub fn allocator_mut(&mut self) -> &mut Allocator { &mut self.allocator } + /// The function that checks the shadow byte #[must_use] pub fn shadow_check_func(&self) -> &Option bool> { &self.shadow_check_func @@ -332,7 +350,7 @@ impl AsanRuntime { .map_shadow_for_region(tls_start, tls_end, true); println!( "registering thread with stack {:x}:{:x} and tls {:x}:{:x}", - stack_start as usize, stack_end as usize, tls_start as usize, tls_end as usize + stack_start, stack_end, tls_start, tls_end ); } @@ -416,6 +434,7 @@ impl AsanRuntime { (start, end) } + /// Gets the current instruction pointer #[cfg(target_arch = "aarch64")] #[must_use] #[inline] @@ -423,6 +442,7 @@ impl AsanRuntime { Interceptor::current_invocation().cpu_context().pc() as usize } + /// Gets the current instruction pointer #[cfg(target_arch = "x86_64")] #[must_use] #[inline] @@ -447,7 +467,7 @@ impl AsanRuntime { unsafe extern "C" fn []($($param: $param_type),*) -> $return_type { let mut invocation = Interceptor::current_invocation(); let this = &mut *(invocation.replacement_data().unwrap().0 as *mut AsanRuntime); - let real_address = this.real_address_for_stalked(invocation.return_addr() as usize); + let real_address = this.real_address_for_stalked(invocation.return_addr()); if !this.suppressed_addresses.contains(&real_address) && this.module_map.as_ref().unwrap().find(real_address as u64).is_some() { this.[]($($param),*) } else { @@ -2118,6 +2138,7 @@ impl AsanRuntime { Err(()) } + /// Checks if the current instruction is interesting for address sanitization. #[cfg(all(target_arch = "x86_64", unix))] #[inline] #[allow(clippy::unused_self)] @@ -2182,6 +2203,7 @@ impl AsanRuntime { Err(()) } + /// Emits a asan shadow byte check. #[inline] #[allow(clippy::too_many_lines)] #[allow(clippy::too_many_arguments)] diff --git a/libafl_frida/src/asan/errors.rs b/libafl_frida/src/asan/errors.rs index f0e641764c..b44bb2fba3 100644 --- a/libafl_frida/src/asan/errors.rs +++ b/libafl_frida/src/asan/errors.rs @@ -1,3 +1,4 @@ +//! Errors that can be caught by the `libafl_frida` address sanitizer. #[cfg(target_arch = "x86_64")] use crate::asan::asan_rt::ASAN_SAVE_REGISTER_NAMES; use backtrace::Backtrace; @@ -548,7 +549,7 @@ impl AsanErrors { pub static mut ASAN_ERRORS: Option = None; /// An observer for frida address sanitizer `AsanError`s for a frida executor run -#[derive(Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize)] #[allow(clippy::unsafe_derive_deserialize)] pub struct AsanErrorsObserver { errors: OwnedPtr>, diff --git a/libafl_frida/src/asan/hook_funcs.rs b/libafl_frida/src/asan/hook_funcs.rs index 9bd2516bc3..80cd03ba63 100644 --- a/libafl_frida/src/asan/hook_funcs.rs +++ b/libafl_frida/src/asan/hook_funcs.rs @@ -1,3 +1,4 @@ +//! The allocator hooks for address sanitizer. use crate::{ alloc::Allocator, asan::{ @@ -1013,6 +1014,7 @@ impl AsanRuntime { unsafe { atoi(s) } } + /// Hooks `atol` #[inline] pub fn hook_atol(&mut self, s: *const c_char) -> i32 { extern "C" { @@ -1031,6 +1033,7 @@ impl AsanRuntime { unsafe { atol(s) } } + /// Hooks `atoll` #[inline] pub fn hook_atoll(&mut self, s: *const c_char) -> i64 { extern "C" { @@ -1049,6 +1052,7 @@ impl AsanRuntime { unsafe { atoll(s) } } + /// Hooks `wcslen` #[inline] pub fn hook_wcslen(&mut self, s: *const wchar_t) -> usize { extern "C" { @@ -1067,6 +1071,7 @@ impl AsanRuntime { size } + /// Hooks `wcscpy` #[inline] pub fn hook_wcscpy(&mut self, dest: *mut wchar_t, src: *const wchar_t) -> *mut wchar_t { extern "C" { @@ -1098,6 +1103,7 @@ impl AsanRuntime { unsafe { wcscpy(dest, src) } } + /// Hooks `wcscmp` #[inline] pub fn hook_wcscmp(&mut self, s1: *const wchar_t, s2: *const wchar_t) -> i32 { extern "C" { diff --git a/libafl_frida/src/asan/mod.rs b/libafl_frida/src/asan/mod.rs index acf7989985..040461d325 100644 --- a/libafl_frida/src/asan/mod.rs +++ b/libafl_frida/src/asan/mod.rs @@ -1,3 +1,5 @@ +//! Address sanitization using [`frida`](https://frida.re/) pub mod asan_rt; pub mod errors; +#[allow(missing_docs)] pub mod hook_funcs; diff --git a/libafl_frida/src/cmplog_rt.rs b/libafl_frida/src/cmplog_rt.rs index 5a37774f85..835dc40e83 100644 --- a/libafl_frida/src/cmplog_rt.rs +++ b/libafl_frida/src/cmplog_rt.rs @@ -1,9 +1,15 @@ +//! Functionality for [`frida`](https://frida.re)-based binary-only `CmpLog`. +//! With it, a fuzzer can collect feedback about each compare that happenned in the target +//! This allows the fuzzer to potentially solve the compares, if a compare value is directly +//! related to the input. +//! Read the [`RedQueen`](https://www.ndss-symposium.org/ndss-paper/redqueen-fuzzing-with-input-to-state-correspondence/) paper for the general concepts. use dynasmrt::{dynasm, DynasmApi, DynasmLabelApi}; +use libafl_targets; use libafl_targets::CMPLOG_MAP_W; use std::ffi::c_void; -extern crate libafl_targets; extern "C" { + /// Tracks cmplog instructions pub fn __libafl_targets_cmplog_instructions(k: u64, shape: u8, arg1: u64, arg2: u64); } @@ -16,8 +22,13 @@ use frida_gum::{ #[cfg(all(feature = "cmplog", target_arch = "aarch64"))] use crate::helper::FridaInstrumentationHelper; +#[cfg(all(feature = "cmplog", target_arch = "aarch64"))] +/// Speciial CmpLog Cases for `aarch64` +#[derive(Debug)] pub enum SpecialCmpLogCase { + /// Test bit and branch if zero Tbz, + /// Test bit and branch if not zero Tbnz, } @@ -27,21 +38,31 @@ use capstone::{ Capstone, Insn, }; +/// The type of an operand loggged during `CmpLog` +#[derive(Debug)] +#[cfg(all(feature = "cmplog", target_arch = "aarch64"))] +pub enum CmplogOperandType { + /// A Register + Regid(capstone::RegId), + /// An immediate value + Imm(u64), + /// A constant immediate value + Cimm(u64), + /// A memory operand + Mem(capstone::RegId, capstone::RegId, i32, u32), +} + +/// `Frida`-based binary-only innstrumentation that logs compares to the fuzzer +/// `LibAFL` can use this knowledge for powerful mutations. +#[derive(Debug)] pub struct CmpLogRuntime { ops_save_register_and_blr_to_populate: Option>, ops_handle_tbz_masking: Option>, ops_handle_tbnz_masking: Option>, } -#[cfg(all(feature = "cmplog", target_arch = "aarch64"))] -pub enum CmplogOperandType { - Regid(capstone::RegId), - Imm(u64), - Cimm(u64), - Mem(capstone::RegId, capstone::RegId, i32, u32), -} - impl CmpLogRuntime { + /// Create a new [`CmpLogRuntime`] #[must_use] pub fn new() -> CmpLogRuntime { Self { @@ -179,6 +200,9 @@ impl CmpLogRuntime { .into_boxed_slice(), ); } + + /// Initialize this `CmpLog` runtime. + /// This will generate the instrumentation blobs for the current arch. pub fn init(&mut self) { self.generate_instrumentation_blobs(); } @@ -204,9 +228,9 @@ impl CmpLogRuntime { self.ops_handle_tbnz_masking.as_ref().unwrap() } + /// Emit the instrumentation code which is responsible for opernads value extraction and cmplog map population #[cfg(all(feature = "cmplog", target_arch = "aarch64"))] #[inline] - /// Emit the instrumentation code which is responsible for opernads value extraction and cmplog map population pub fn emit_comparison_handling( &self, _address: u64, diff --git a/libafl_frida/src/coverage_rt.rs b/libafl_frida/src/coverage_rt.rs index a3634a4a14..7df6790752 100644 --- a/libafl_frida/src/coverage_rt.rs +++ b/libafl_frida/src/coverage_rt.rs @@ -1,3 +1,4 @@ +//! Functionality regarding binary-only coverage collection. use dynasmrt::{dynasm, DynasmApi, DynasmLabelApi}; use std::ffi::c_void; @@ -11,6 +12,8 @@ use frida_gum::{instruction_writer::InstructionWriter, stalker::StalkerOutput}; /// (Default) map size for frida coverage reporting pub const MAP_SIZE: usize = 64 * 1024; +/// Frida binary-only coverage +#[derive(Debug)] pub struct CoverageRuntime { map: [u8; MAP_SIZE], previous_pc: u64, @@ -25,6 +28,7 @@ impl Default for CoverageRuntime { } impl CoverageRuntime { + /// Create a new coverage runtime #[must_use] pub fn new() -> Self { Self { @@ -35,13 +39,17 @@ impl CoverageRuntime { } } + /// Initialize the coverage runtime pub fn init(&mut self) { self.generate_maybe_log_blob(); } + /// Retrieve the coverage map pointer pub fn map_ptr_mut(&mut self) -> *mut u8 { self.map.as_mut_ptr() } + + /// Retrieve the `maybe_log` code blob, that will write coverage into the map #[must_use] pub fn blob_maybe_log(&self) -> &[u8] { self.blob_maybe_log.as_ref().unwrap() @@ -116,6 +124,7 @@ impl CoverageRuntime { self.blob_maybe_log = Some(ops_vec[..ops_vec.len() - 8].to_vec().into_boxed_slice()); } + /// Emits coverage mapping into the current basic block. #[inline] pub fn emit_coverage_mapping(&mut self, address: u64, output: &StalkerOutput) { let writer = output.writer(); diff --git a/libafl_frida/src/drcov_rt.rs b/libafl_frida/src/drcov_rt.rs index c30ba95c9b..cbf08d6089 100644 --- a/libafl_frida/src/drcov_rt.rs +++ b/libafl_frida/src/drcov_rt.rs @@ -9,7 +9,7 @@ use rangemap::RangeMap; use std::hash::Hasher; /// Generates `DrCov` traces -#[derive(Clone, Debug)] +#[derive(Debug, Clone)] pub struct DrCovRuntime { /// The basic blocks of this execution pub drcov_basic_blocks: Vec, diff --git a/libafl_frida/src/executor.rs b/libafl_frida/src/executor.rs index c71c5d82d2..41e04d63f7 100644 --- a/libafl_frida/src/executor.rs +++ b/libafl_frida/src/executor.rs @@ -1,13 +1,11 @@ use crate::helper::FridaHelper; -use std::{ffi::c_void, marker::PhantomData}; - +use core::fmt::{self, Debug, Formatter}; use frida_gum::{ stalker::{NoneEventSink, Stalker}, - Gum, NativePointer, + Gum, MemoryRange, NativePointer, }; - -use frida_gum::MemoryRange; +use std::{ffi::c_void, marker::PhantomData}; use libafl::{ executors::{Executor, ExitKind, HasObservers, InProcessExecutor}, @@ -22,6 +20,7 @@ use crate::asan::errors::ASAN_ERRORS; #[cfg(windows)] use libafl::executors::inprocess::{HasInProcessHandlers, InProcessHandlers}; +/// The [`FridaInProcessExecutor`] is an [`Executor`] that executes the target in the same process, usinig [`frida`](https://frida.re/) for binary-only instrumentation. pub struct FridaInProcessExecutor<'a, 'b, 'c, FH, H, I, OT, S> where FH: FridaHelper<'b>, @@ -38,6 +37,22 @@ where _phantom: PhantomData<&'b u8>, } +impl<'a, 'b, 'c, FH, H, I, OT, S> Debug for FridaInProcessExecutor<'a, 'b, 'c, FH, H, I, OT, S> +where + FH: FridaHelper<'b>, + H: FnMut(&I) -> ExitKind, + I: Input + HasTargetBytes, + OT: ObserversTuple, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("FridaInProcessExecutor") + .field("base", &self.base) + .field("helper", &self.helper) + .field("followed", &self.followed) + .finish_non_exhaustive() + } +} + impl<'a, 'b, 'c, EM, FH, H, I, OT, S, Z> Executor for FridaInProcessExecutor<'a, 'b, 'c, FH, H, I, OT, S> where @@ -107,6 +122,7 @@ where I: Input + HasTargetBytes, OT: ObserversTuple, { + /// Creates a new [`FridaInProcessExecutor`] pub fn new(gum: &'a Gum, base: InProcessExecutor<'a, H, I, OT, S>, helper: &'c mut FH) -> Self { let mut stalker = Stalker::new(gum); // Include the current module (the fuzzer) in stalked ranges. We clone the ranges so that diff --git a/libafl_frida/src/helper.rs b/libafl_frida/src/helper.rs index 680fced438..98d1387022 100644 --- a/libafl_frida/src/helper.rs +++ b/libafl_frida/src/helper.rs @@ -2,53 +2,39 @@ use libafl::inputs::{HasTargetBytes, Input}; use libafl::Error; use libafl_targets::drcov::DrCovBasicBlock; +#[cfg(feature = "cmplog")] +use crate::cmplog_rt::CmpLogRuntime; +#[cfg(windows)] +use crate::FridaOptions; +#[cfg(unix)] +use crate::{asan::asan_rt::AsanRuntime, FridaOptions}; +use crate::{coverage_rt::CoverageRuntime, drcov_rt::DrCovRuntime}; #[cfg(target_arch = "aarch64")] use capstone::{ arch::{self, arm64::Arm64OperandType, ArchOperand::Arm64Operand, BuildsCapstone}, Capstone, Insn, }; - #[cfg(all(target_arch = "x86_64", unix))] use capstone::{ arch::{self, BuildsCapstone}, Capstone, RegId, }; - -#[cfg(target_arch = "aarch64")] -use num_traits::cast::FromPrimitive; - +use core::fmt::{self, Debug, Formatter}; #[cfg(target_arch = "aarch64")] use frida_gum::instruction_writer::Aarch64Register; - #[cfg(target_arch = "x86_64")] use frida_gum::instruction_writer::X86Register; - -use frida_gum::{ - instruction_writer::InstructionWriter, stalker::Transformer, ModuleDetails, ModuleMap, -}; - #[cfg(unix)] use frida_gum::CpuContext; - -use frida_gum::{Gum, Module, PageProtection}; - -use rangemap::RangeMap; - +use frida_gum::{ + instruction_writer::InstructionWriter, stalker::Transformer, Gum, Module, ModuleDetails, + ModuleMap, PageProtection, +}; #[cfg(unix)] use nix::sys::mman::{mmap, MapFlags, ProtFlags}; - -#[cfg(unix)] -use crate::{asan::asan_rt::AsanRuntime, FridaOptions}; - -#[cfg(windows)] -use crate::FridaOptions; - -use crate::drcov_rt::DrCovRuntime; - -use crate::coverage_rt::CoverageRuntime; - -#[cfg(feature = "cmplog")] -use crate::cmplog_rt::CmpLogRuntime; +#[cfg(target_arch = "aarch64")] +use num_traits::cast::FromPrimitive; +use rangemap::RangeMap; #[cfg(any(target_vendor = "apple"))] const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANON; @@ -56,7 +42,7 @@ const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANON; const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANONYMOUS; /// An helper that feeds `FridaInProcessExecutor` with user-supplied instrumentation -pub trait FridaHelper<'a> { +pub trait FridaHelper<'a>: Debug { /// Access to the stalker `Transformer` fn transformer(&self) -> &Transformer<'a>; @@ -76,8 +62,10 @@ pub trait FridaHelper<'a> { /// pointer to the frida coverage map fn map_ptr_mut(&mut self) -> *mut u8; + /// Returns the mapped ranges of the target fn ranges(&self) -> &RangeMap; + /// Returns the mapped ranges of the target, mutable fn ranges_mut(&mut self) -> &mut RangeMap; } @@ -98,6 +86,23 @@ pub struct FridaInstrumentationHelper<'a> { options: &'a FridaOptions, } +impl Debug for FridaInstrumentationHelper<'_> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let mut dbg_me = f.debug_struct("FridaInstrumentationHelper"); + dbg_me + .field("coverage_rt", &self.coverage_rt) + .field("capstone", &self.capstone) + .field("asan_runtime", &self.asan_runtime) + .field("drcov_runtime", &self.drcov_runtime) + .field("ranges", &self.ranges) + .field("module_map", &"") + .field("options", &self.options); + #[cfg(feature = "cmplog")] + dbg_me.field("cmplog_runtime", &self.cmplog_runtime); + dbg_me.finish() + } +} + impl<'a> FridaHelper<'a> for FridaInstrumentationHelper<'a> { fn transformer(&self) -> &Transformer<'a> { self.transformer.as_ref().unwrap() @@ -166,7 +171,7 @@ pub fn get_module_size(module_name: &str) -> usize { let mut code_size = 0; let code_size_ref = &mut code_size; Module::enumerate_ranges(module_name, PageProtection::ReadExecute, move |details| { - *code_size_ref = details.memory_range().size() as usize; + *code_size_ref = details.memory_range().size(); true }); @@ -467,8 +472,9 @@ impl<'a> FridaInstrumentationHelper<'a> { Aarch64Register::from_u32(regint as u32).unwrap() } - // frida registers: https://docs.rs/frida-gum/0.4.0/frida_gum/instruction_writer/enum.X86Register.html - // capstone registers: https://docs.rs/capstone-sys/0.14.0/capstone_sys/x86_reg/index.html + /// The writer registers + /// frida registers: + /// capstone registers: #[cfg(all(target_arch = "x86_64", unix))] #[must_use] #[inline] diff --git a/libafl_frida/src/lib.rs b/libafl_frida/src/lib.rs index 58fed4ff98..e8f01c5254 100644 --- a/libafl_frida/src/lib.rs +++ b/libafl_frida/src/lib.rs @@ -3,6 +3,49 @@ The frida executor is a binary-only mode for `LibAFL`. It can report coverage and, on supported architecutres, even reports memory access errors. */ +#![deny(rustdoc::broken_intra_doc_links)] +#![deny(clippy::pedantic)] +#![allow( + clippy::unreadable_literal, + clippy::type_repetition_in_bounds, + clippy::missing_errors_doc, + clippy::cast_possible_truncation, + clippy::used_underscore_binding, + clippy::ptr_as_ptr, + clippy::missing_panics_doc, + clippy::missing_docs_in_private_items, + clippy::module_name_repetitions, + clippy::unreadable_literal +)] +#![deny( + missing_debug_implementations, + missing_docs, + //trivial_casts, + trivial_numeric_casts, + unused_extern_crates, + unused_import_braces, + unused_qualifications, + //unused_results +)] +#![deny( + bad_style, + const_err, + dead_code, + improper_ctypes, + non_shorthand_field_patterns, + no_mangle_generic_items, + overflowing_literals, + path_statements, + patterns_in_fns_without_body, + private_in_public, + unconditional_recursion, + unused, + unused_allocation, + unused_comparisons, + unused_parens, + while_true +)] + /// The frida-asan allocator #[cfg(unix)] pub mod alloc; diff --git a/libafl_qemu/src/asan.rs b/libafl_qemu/src/asan.rs index a77e07c52f..8e91798d5c 100644 --- a/libafl_qemu/src/asan.rs +++ b/libafl_qemu/src/asan.rs @@ -162,6 +162,7 @@ pub fn init_with_asan(args: &mut Vec, env: &mut [(String, String)]) -> E Emulator::new(args, env) } +#[derive(Debug)] // TODO intrumentation filter pub struct QemuAsanHelper { enabled: bool, diff --git a/libafl_qemu/src/cmplog.rs b/libafl_qemu/src/cmplog.rs index e3c1f11d72..c238380a3f 100644 --- a/libafl_qemu/src/cmplog.rs +++ b/libafl_qemu/src/cmplog.rs @@ -11,7 +11,7 @@ use crate::{ helper::{QemuHelper, QemuHelperTuple, QemuInstrumentationFilter}, }; -#[derive(Default, Serialize, Deserialize)] +#[derive(Debug, Default, Serialize, Deserialize)] pub struct QemuCmpsMapMetadata { pub map: HashMap, pub current_id: u64, @@ -29,6 +29,7 @@ impl QemuCmpsMapMetadata { libafl::impl_serdeany!(QemuCmpsMapMetadata); +#[derive(Debug)] pub struct QemuCmpLogHelper { filter: QemuInstrumentationFilter, } diff --git a/libafl_qemu/src/edges.rs b/libafl_qemu/src/edges.rs index c38a59f283..7da9b452f8 100644 --- a/libafl_qemu/src/edges.rs +++ b/libafl_qemu/src/edges.rs @@ -10,7 +10,7 @@ use crate::{ helper::{QemuHelper, QemuHelperTuple, QemuInstrumentationFilter}, }; -#[derive(Default, Serialize, Deserialize)] +#[derive(Debug, Default, Serialize, Deserialize)] pub struct QemuEdgesMapMetadata { pub map: HashMap<(u64, u64), u64>, pub current_id: u64, @@ -28,6 +28,7 @@ impl QemuEdgesMapMetadata { libafl::impl_serdeany!(QemuEdgesMapMetadata); +#[derive(Debug)] pub struct QemuEdgeCoverageHelper { filter: QemuInstrumentationFilter, } diff --git a/libafl_qemu/src/emu.rs b/libafl_qemu/src/emu.rs index 81ba5e842d..b4485bbc3f 100644 --- a/libafl_qemu/src/emu.rs +++ b/libafl_qemu/src/emu.rs @@ -293,6 +293,7 @@ impl Drop for GuestMaps { static mut EMULATOR_IS_INITIALIZED: bool = false; +#[derive(Debug)] pub struct Emulator { _private: (), } diff --git a/libafl_qemu/src/executor.rs b/libafl_qemu/src/executor.rs index 1c0abbc95c..9bdd0f89db 100644 --- a/libafl_qemu/src/executor.rs +++ b/libafl_qemu/src/executor.rs @@ -1,4 +1,10 @@ -use core::{ffi::c_void, mem::transmute, ptr}; +//! A `QEMU`-based executor for binary-only instrumentation in `LibAFL` +use core::{ + ffi::c_void, + fmt::{self, Debug, Formatter}, + mem::transmute, + ptr, +}; use libafl::{ corpus::Corpus, @@ -445,6 +451,22 @@ where inner: InProcessExecutor<'a, H, I, OT, S>, } +impl<'a, H, I, OT, QT, S> Debug for QemuExecutor<'a, H, I, OT, QT, S> +where + H: FnMut(&I) -> ExitKind, + I: Input, + OT: ObserversTuple, + QT: QemuHelperTuple, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("QemuExecutor") + .field("helpers", &self.helpers) + .field("emulator", &self.emulator) + .field("inner", &self.inner) + .finish() + } +} + impl<'a, H, I, OT, QT, S> QemuExecutor<'a, H, I, OT, QT, S> where H: FnMut(&I) -> ExitKind, diff --git a/libafl_qemu/src/helper.rs b/libafl_qemu/src/helper.rs index d8faee25ba..d4b383409d 100644 --- a/libafl_qemu/src/helper.rs +++ b/libafl_qemu/src/helper.rs @@ -1,12 +1,13 @@ +use core::{fmt::Debug, ops::Range}; use libafl::{ bolts::tuples::MatchFirstType, executors::ExitKind, inputs::Input, observers::ObserversTuple, }; -use std::ops::Range; use crate::{emu::Emulator, executor::QemuExecutor}; +/// A helper for `libafl_qemu`. // TODO remove 'static when specialization will be stable -pub trait QemuHelper: 'static +pub trait QemuHelper: 'static + Debug where I: Input, { @@ -23,7 +24,7 @@ where fn post_exec(&mut self, _emulator: &Emulator, _input: &I) {} } -pub trait QemuHelperTuple: MatchFirstType +pub trait QemuHelperTuple: MatchFirstType + Debug where I: Input, { @@ -82,6 +83,7 @@ where } } +#[derive(Debug)] pub enum QemuInstrumentationFilter { AllowList(Vec>), DenyList(Vec>), diff --git a/libafl_qemu/src/snapshot.rs b/libafl_qemu/src/snapshot.rs index 2948ae2e66..761a92d1ae 100644 --- a/libafl_qemu/src/snapshot.rs +++ b/libafl_qemu/src/snapshot.rs @@ -10,12 +10,14 @@ use crate::{ pub const SNAPSHOT_PAGE_SIZE: usize = 4096; +#[derive(Debug)] pub struct SnapshotPageInfo { pub addr: u64, pub dirty: bool, pub data: [u8; SNAPSHOT_PAGE_SIZE], } +#[derive(Debug)] // TODO be thread-safe maybe with https://amanieu.github.io/thread_local-rs/thread_local/index.html pub struct QemuSnapshotHelper { pub access_cache: [u64; 4], diff --git a/libafl_sugar/src/forkserver.rs b/libafl_sugar/src/forkserver.rs index 3abbf1dc8e..fa220f3475 100644 --- a/libafl_sugar/src/forkserver.rs +++ b/libafl_sugar/src/forkserver.rs @@ -1,6 +1,7 @@ -use typed_builder::TypedBuilder; - +//! An `afl`-style forkserver fuzzer. +//! Use this if your target has complex state that needs to be reset. use std::{fs, net::SocketAddr, path::PathBuf, time::Duration}; +use typed_builder::TypedBuilder; use libafl::{ bolts::{ @@ -32,9 +33,11 @@ use libafl::{ use crate::{CORPUS_CACHE_SIZE, DEFAULT_TIMEOUT_SECS}; +/// The default coverage map size to use for forkserver targets pub const DEFAULT_MAP_SIZE: usize = 65536; -#[derive(TypedBuilder)] +/// Creates a Forkserver-based fuzzer. +#[derive(Debug, TypedBuilder)] pub struct ForkserverBytesCoverageSugar<'a, const MAP_SIZE: usize> { /// Laucher configuration (default is random) #[builder(default = None, setter(strip_option))] @@ -74,6 +77,7 @@ pub struct ForkserverBytesCoverageSugar<'a, const MAP_SIZE: usize> { #[allow(clippy::similar_names)] impl<'a, const MAP_SIZE: usize> ForkserverBytesCoverageSugar<'a, MAP_SIZE> { + /// Runs the fuzzer. #[allow(clippy::too_many_lines, clippy::similar_names)] pub fn run(&mut self) { let conf = match self.configuration.as_ref() { @@ -250,6 +254,7 @@ impl<'a, const MAP_SIZE: usize> ForkserverBytesCoverageSugar<'a, MAP_SIZE> { } } +/// The python bindings for this sugar #[cfg(feature = "python")] pub mod pybind { use crate::forkserver; @@ -257,6 +262,7 @@ pub mod pybind { use pyo3::prelude::*; use std::path::PathBuf; + /// Python bindings for the `LibAFL` forkserver sugar #[pyclass(unsendable)] struct ForkserverBytesCoverageSugar { input_dirs: Vec, @@ -267,6 +273,7 @@ pub mod pybind { #[pymethods] impl ForkserverBytesCoverageSugar { + /// Create a new [`ForkserverBytesCoverageSugar`] #[new] fn new( input_dirs: Vec, @@ -282,6 +289,7 @@ pub mod pybind { } } + /// Run the fuzzer #[allow(clippy::needless_pass_by_value)] pub fn run(&self, program: String, arguments: Vec) { forkserver::ForkserverBytesCoverageSugar::<{ forkserver::DEFAULT_MAP_SIZE }>::builder() @@ -296,6 +304,7 @@ pub mod pybind { } } + /// Register the module pub fn register(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::()?; Ok(()) diff --git a/libafl_sugar/src/inmemory.rs b/libafl_sugar/src/inmemory.rs index 041129524e..fd0f6f140d 100644 --- a/libafl_sugar/src/inmemory.rs +++ b/libafl_sugar/src/inmemory.rs @@ -1,6 +1,9 @@ -use typed_builder::TypedBuilder; +//! In-Memory fuzzing made easy. +//! Use this sugar for scaling `libfuzzer`-style fuzzers. +use core::fmt::{self, Debug, Formatter}; use std::{fs, net::SocketAddr, path::PathBuf, time::Duration}; +use typed_builder::TypedBuilder; use libafl::{ bolts::{ @@ -35,6 +38,8 @@ use libafl_targets::{CmpLogObserver, CMPLOG_MAP, EDGES_MAP, MAX_EDGES_NUM}; use crate::{CORPUS_CACHE_SIZE, DEFAULT_TIMEOUT_SECS}; +/// In-Memory fuzzing made easy. +/// Use this sugar for scaling `libfuzzer`-style fuzzers. #[derive(TypedBuilder)] pub struct InMemoryBytesCoverageSugar<'a, H> where @@ -56,6 +61,7 @@ where /// Flag if use CmpLog #[builder(default = false)] use_cmplog: bool, + /// The port used for communication between this fuzzer node and other fuzzer nodes #[builder(default = 1337_u16)] broker_port: u16, /// The list of cores to run on @@ -69,11 +75,39 @@ where harness: Option, } +impl Debug for InMemoryBytesCoverageSugar<'_, H> +where + H: FnMut(&[u8]), +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("InMemoryBytesCoverageSugar") + .field("configuration", &self.configuration) + .field("timeout", &self.timeout) + .field("input_dirs", &self.input_dirs) + .field("output_dir", &self.output_dir) + .field("tokens_file", &self.tokens_file) + .field("use_cmplog", &self.use_cmplog) + .field("broker_port", &self.broker_port) + .field("cores", &self.cores) + .field("remote_broker_addr", &self.remote_broker_addr) + .field( + "harness", + if self.harness.is_some() { + &"" + } else { + &"None" + }, + ) + .finish() + } +} + #[allow(clippy::similar_names)] impl<'a, H> InMemoryBytesCoverageSugar<'a, H> where H: FnMut(&[u8]), { + /// Run the fuzzer #[allow(clippy::too_many_lines, clippy::similar_names)] pub fn run(&mut self) { let conf = match self.configuration.as_ref() { @@ -270,6 +304,7 @@ where } } +/// Python bindings for this sugar #[cfg(feature = "python")] pub mod pybind { use crate::inmemory; @@ -278,6 +313,8 @@ pub mod pybind { use pyo3::types::PyBytes; use std::path::PathBuf; + /// In-Memory fuzzing made easy. + /// Use this sugar for scaling `libfuzzer`-style fuzzers. #[pyclass(unsendable)] struct InMemoryBytesCoverageSugar { input_dirs: Vec, @@ -288,6 +325,7 @@ pub mod pybind { #[pymethods] impl InMemoryBytesCoverageSugar { + /// Create a new [`InMemoryBytesCoverageSugar`] #[new] fn new( input_dirs: Vec, @@ -303,6 +341,7 @@ pub mod pybind { } } + /// Run the fuzzer #[allow(clippy::needless_pass_by_value)] pub fn run(&self, harness: PyObject) { inmemory::InMemoryBytesCoverageSugar::builder() @@ -323,6 +362,7 @@ pub mod pybind { } } + /// Register the module pub fn register(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::()?; Ok(()) diff --git a/libafl_sugar/src/lib.rs b/libafl_sugar/src/lib.rs index 6e16c960b5..1af670774f 100644 --- a/libafl_sugar/src/lib.rs +++ b/libafl_sugar/src/lib.rs @@ -1,5 +1,48 @@ //! Sugar API to simplify the life of the naive user of `LibAFL` +#![deny(rustdoc::broken_intra_doc_links)] +#![deny(clippy::pedantic)] +#![allow( + clippy::unreadable_literal, + clippy::type_repetition_in_bounds, + clippy::missing_errors_doc, + clippy::cast_possible_truncation, + clippy::used_underscore_binding, + clippy::ptr_as_ptr, + clippy::missing_panics_doc, + clippy::missing_docs_in_private_items, + clippy::module_name_repetitions, + clippy::unreadable_literal +)] +#![deny( + missing_debug_implementations, + missing_docs, + //trivial_casts, + trivial_numeric_casts, + unused_extern_crates, + unused_import_braces, + unused_qualifications, + //unused_results +)] +#![deny( + bad_style, + const_err, + dead_code, + improper_ctypes, + non_shorthand_field_patterns, + no_mangle_generic_items, + overflowing_literals, + path_statements, + patterns_in_fns_without_body, + private_in_public, + unconditional_recursion, + unused, + unused_allocation, + unused_comparisons, + unused_parens, + while_true +)] + pub mod inmemory; pub use inmemory::InMemoryBytesCoverageSugar; @@ -13,12 +56,16 @@ pub mod forkserver; #[cfg(target_family = "unix")] pub use forkserver::ForkserverBytesCoverageSugar; +/// Default timeout for a run pub const DEFAULT_TIMEOUT_SECS: u64 = 1200; +/// Default cache size for the corpus in memory. +/// Anything else will be on disk. pub const CORPUS_CACHE_SIZE: usize = 4096; #[cfg(feature = "python")] use pyo3::prelude::*; +/// The sugar python module #[cfg(feature = "python")] #[pymodule] #[pyo3(name = "libafl_sugar")] diff --git a/libafl_sugar/src/qemu.rs b/libafl_sugar/src/qemu.rs index ca1eea390b..ed1ff9fe42 100644 --- a/libafl_sugar/src/qemu.rs +++ b/libafl_sugar/src/qemu.rs @@ -1,6 +1,8 @@ -use typed_builder::TypedBuilder; - +//! In-memory fuzzer with `QEMU`-based binary-only instrumentation +//! +use core::fmt::{self, Debug, Formatter}; use std::{fs, net::SocketAddr, path::PathBuf, time::Duration}; +use typed_builder::TypedBuilder; use libafl::{ bolts::{ @@ -36,6 +38,8 @@ use libafl_targets::CmpLogObserver; use crate::{CORPUS_CACHE_SIZE, DEFAULT_TIMEOUT_SECS}; +/// Sugar to create a `libfuzzer`-style fuzzer that uses +/// `QEMU`-based binary-only instrumentation #[derive(TypedBuilder)] pub struct QemuBytesCoverageSugar<'a, H> where @@ -57,6 +61,8 @@ where /// Flag if use CmpLog #[builder(default = false)] use_cmplog: bool, + /// The port the fuzzing nodes communicate over + /// This will spawn a server on this port, and connect to other brokers using this port. #[builder(default = 1337_u16)] broker_port: u16, /// The list of cores to run on @@ -70,10 +76,38 @@ where harness: Option, } +impl<'a, H> Debug for QemuBytesCoverageSugar<'a, H> +where + H: FnMut(&[u8]), +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("QemuBytesCoverageSugar") + .field("configuration", &self.configuration) + .field("timeout", &self.timeout) + .field("input_dirs", &self.input_dirs) + .field("output_dir", &self.output_dir) + .field("tokens_file", &self.tokens_file) + .field("use_cmplog", &self.use_cmplog) + .field("broker_port", &self.broker_port) + .field("cores", &self.cores) + .field("remote_broker_addr", &self.remote_broker_addr) + .field( + "harness", + if self.harness.is_some() { + &"" + } else { + &"None" + }, + ) + .finish() + } +} + impl<'a, H> QemuBytesCoverageSugar<'a, H> where H: FnMut(&[u8]), { + /// Run the fuzzer #[allow(clippy::too_many_lines, clippy::similar_names)] pub fn run(&mut self, emulator: &Emulator) { let conf = match self.configuration.as_ref() { @@ -330,6 +364,7 @@ where } } +/// python bindings for this sugar #[cfg(feature = "python")] pub mod pybind { use crate::qemu; @@ -349,6 +384,7 @@ pub mod pybind { #[pymethods] impl QemuBytesCoverageSugar { + /// Create a new [`QemuBytesCoverageSugar`] #[new] fn new( input_dirs: Vec, @@ -364,6 +400,7 @@ pub mod pybind { } } + /// Run the fuzzer #[allow(clippy::needless_pass_by_value)] pub fn run(&self, emulator: &Emulator, harness: PyObject) { qemu::QemuBytesCoverageSugar::builder() @@ -384,6 +421,7 @@ pub mod pybind { } } + /// Register this class pub fn register(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::()?; Ok(()) diff --git a/libafl_targets/src/cmplog.rs b/libafl_targets/src/cmplog.rs index 65d4d4ec40..8108af6b35 100644 --- a/libafl_targets/src/cmplog.rs +++ b/libafl_targets/src/cmplog.rs @@ -1,5 +1,8 @@ //! `CmpLog` logs and reports back values touched during fuzzing. //! The values will then be used in subsequent mutations. +//! + +use core::fmt::{self, Debug, Formatter}; use libafl::{ bolts::{ownedref::OwnedRefMut, tuples::Named}, @@ -16,6 +19,7 @@ pub const CMPLOG_MAP_SIZE: usize = CMPLOG_MAP_W * CMPLOG_MAP_H; /// The size of a logged routine argument in bytes pub const CMPLOG_RTN_LEN: usize = 32; +/// The hight of a cmplog routine map pub const CMPLOG_MAP_RTN_H: usize = (CMPLOG_MAP_H * core::mem::size_of::()) / core::mem::size_of::(); @@ -26,6 +30,7 @@ pub const CMPLOG_KIND_RTN: u8 = 1; // void __libafl_targets_cmplog_instructions(uintptr_t k, uint8_t shape, uint64_t arg1, uint64_t arg2) extern "C" { + /// Logs an instruction for feedback during fuzzing pub fn __libafl_targets_cmplog_instructions(k: usize, shape: u8, arg1: u64, arg2: u64); } @@ -48,6 +53,7 @@ pub struct CmpLogInstruction(u64, u64); #[derive(Default, Debug, Clone, Copy)] pub struct CmpLogRoutine([u8; CMPLOG_RTN_LEN], [u8; CMPLOG_RTN_LEN]); +/// Union of cmplog operands and routines #[repr(C)] #[derive(Clone, Copy)] pub union CmpLogVals { @@ -55,9 +61,15 @@ pub union CmpLogVals { routines: [[CmpLogRoutine; CMPLOG_MAP_RTN_H]; CMPLOG_MAP_W], } +impl Debug for CmpLogVals { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("CmpLogVals").finish_non_exhaustive() + } +} + /// A struct containing the `CmpLog` metadata for a `LibAFL` run. #[repr(C)] -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub struct CmpLogMap { headers: [CmpLogHeader; CMPLOG_MAP_W], vals: CmpLogVals, @@ -109,8 +121,8 @@ impl CmpMap for CmpLogMap { self.vals.operands[idx][execution].1 as u32, )), 8 => CmpValues::U64(( - self.vals.operands[idx][execution].0 as u64, - self.vals.operands[idx][execution].1 as u64, + self.vals.operands[idx][execution].0, + self.vals.operands[idx][execution].1, )), other => panic!("Invalid CmpLog shape {}", other), } @@ -155,6 +167,7 @@ pub static mut libafl_cmplog_enabled: u8 = 0; pub use libafl_cmplog_enabled as CMPLOG_ENABLED; /// A [`CmpObserver`] observer for `CmpLog` +#[derive(Debug)] pub struct CmpLogObserver<'a> { map: OwnedRefMut<'a, CmpLogMap>, size: Option>, diff --git a/libafl_targets/src/coverage.rs b/libafl_targets/src/coverage.rs index bfffdc9519..49a8215fb5 100644 --- a/libafl_targets/src/coverage.rs +++ b/libafl_targets/src/coverage.rs @@ -12,22 +12,26 @@ pub use __afl_area_ptr_local as EDGES_MAP; pub static mut MAX_EDGES_NUM: usize = 0; extern "C" { + /// The area pointer points to the edges map. pub static mut __afl_area_ptr: *mut u8; } pub use __afl_area_ptr as EDGES_MAP_PTR; +/// The size of the map for edges. #[no_mangle] pub static mut __afl_map_size: usize = EDGES_MAP_SIZE; pub use __afl_map_size as EDGES_MAP_PTR_SIZE; +/// Gets the edges map from the `EDGES_MAP_PTR` raw pointer. #[must_use] pub fn edges_map_from_ptr<'a>() -> &'a mut [u8] { unsafe { - assert!(!EDGES_MAP_PTR.is_null()); + debug_assert!(!EDGES_MAP_PTR.is_null()); from_raw_parts_mut(EDGES_MAP_PTR, EDGES_MAP_PTR_SIZE) } } +/// Gets the current maximum number of edges tracked. #[must_use] pub fn edges_max_num() -> usize { unsafe { diff --git a/libafl_targets/src/drcov.rs b/libafl_targets/src/drcov.rs index 9176a1bc2b..ae480c6877 100644 --- a/libafl_targets/src/drcov.rs +++ b/libafl_targets/src/drcov.rs @@ -13,7 +13,9 @@ use std::{ /// A basic block struct #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct DrCovBasicBlock { + /// Start of this basic block pub start: usize, + /// End of this basic block pub end: usize, } @@ -26,6 +28,7 @@ struct DrCovBasicBlockEntry { } /// A writer for `DrCov` files +#[derive(Debug)] pub struct DrCovWriter<'a> { module_mapping: &'a RangeMap, } diff --git a/libafl_targets/src/lib.rs b/libafl_targets/src/lib.rs index 1376ac9c8a..387aad7324 100644 --- a/libafl_targets/src/lib.rs +++ b/libafl_targets/src/lib.rs @@ -1,5 +1,50 @@ //! `libafl_targets` contains runtime code, injected in the target itself during compilation. +//! +//! +#![deny(rustdoc::broken_intra_doc_links)] +#![deny(clippy::pedantic)] +#![allow( + clippy::unreadable_literal, + clippy::type_repetition_in_bounds, + clippy::missing_errors_doc, + clippy::cast_possible_truncation, + clippy::used_underscore_binding, + clippy::ptr_as_ptr, + clippy::missing_panics_doc, + clippy::missing_docs_in_private_items, + clippy::module_name_repetitions, + clippy::unreadable_literal +)] +#![deny( + missing_debug_implementations, + missing_docs, + //trivial_casts, + trivial_numeric_casts, + unused_extern_crates, + unused_import_braces, + unused_qualifications, + //unused_results +)] +#![deny( + bad_style, + const_err, + dead_code, + improper_ctypes, + non_shorthand_field_patterns, + no_mangle_generic_items, + overflowing_literals, + path_statements, + patterns_in_fns_without_body, + private_in_public, + unconditional_recursion, + unused, + unused_allocation, + unused_comparisons, + unused_parens, + while_true +)] +#[allow(unused_imports)] #[macro_use] extern crate alloc; diff --git a/libafl_targets/src/sancov_8bit.rs b/libafl_targets/src/sancov_8bit.rs index 6a7fb63ecb..1bf9714780 100644 --- a/libafl_targets/src/sancov_8bit.rs +++ b/libafl_targets/src/sancov_8bit.rs @@ -2,6 +2,8 @@ use alloc::vec::Vec; use core::slice::from_raw_parts_mut; +/// A [`Vec`] of `8-bit-counters` maps for multiple modules. +/// They are initialized by calling [`__sanitizer_cov_8bit_counters_init`]( pub static mut COUNTERS_MAPS: Vec<&'static mut [u8]> = Vec::new(); /// Initialize the sancov `8-bit-counters` - usually called by `llvm`. diff --git a/libafl_targets/src/sancov_cmp.rs b/libafl_targets/src/sancov_cmp.rs index 9f78b643e2..fbb60a983c 100644 --- a/libafl_targets/src/sancov_cmp.rs +++ b/libafl_targets/src/sancov_cmp.rs @@ -1,15 +1,25 @@ +//! Sanitizer Coverage comparison functions extern "C" { + /// Trace an 8 bit `cmp` pub fn __sanitizer_cov_trace_cmp1(v0: u8, v1: u8); + /// Trace a 16 bit `cmp` pub fn __sanitizer_cov_trace_cmp2(v0: u16, v1: u16); + /// Trace a 32 bit `cmp` pub fn __sanitizer_cov_trace_cmp4(v0: u32, v1: u32); + /// Trace a 64 bit `cmp` pub fn __sanitizer_cov_trace_cmp8(v0: u64, v1: u64); + /// Trace an 8 bit constant `cmp` pub fn __sanitizer_cov_trace_const_cmp1(v0: u8, v1: u8); + /// Trace a 16 bit constant `cmp` pub fn __sanitizer_cov_trace_const_cmp2(v0: u16, v1: u16); + /// Trace a 32 bit constant `cmp` pub fn __sanitizer_cov_trace_const_cmp4(v0: u32, v1: u32); + /// Trace a 64 bit constant `cmp` pub fn __sanitizer_cov_trace_const_cmp8(v0: u64, v1: u64); + /// Trace a switch statement pub fn __sanitizer_cov_trace_switch(val: u64, cases: *const u64); } diff --git a/libafl_targets/src/sancov_pcguard.rs b/libafl_targets/src/sancov_pcguard.rs index 05daa71c75..11236ae5e8 100644 --- a/libafl_targets/src/sancov_pcguard.rs +++ b/libafl_targets/src/sancov_pcguard.rs @@ -39,7 +39,7 @@ pub unsafe extern "C" fn __sanitizer_cov_trace_pc_guard(guard: *mut u32) { } #[cfg(feature = "sancov_pcguard_hitcounts")] { - let val = (*EDGES_MAP.get_unchecked(pos) as u8).wrapping_add(1); + let val = (*EDGES_MAP.get_unchecked(pos)).wrapping_add(1); *EDGES_MAP.get_unchecked_mut(pos) = val; } } From b9acac46d92764fb24a40c7ba2bf2ff81e8bc7c6 Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Mon, 3 Jan 2022 00:47:31 +0100 Subject: [PATCH 16/25] Cpu atomics for LLMP (#438) * atomic read for unmap * send and recv * switching to Atomics * atomics * bring back compiler_fence (maybe needed for signals?) * only acquire mem if new msg is available * unused compiler fence * caching for msg ids to not have to read atomics as much * fix build * speed++ * only in a spinloop for the second try * cleanup logs * docu, error log --- libafl/src/bolts/llmp.rs | 145 +++++++++++++++++++++++++-------------- 1 file changed, 92 insertions(+), 53 deletions(-) diff --git a/libafl/src/bolts/llmp.rs b/libafl/src/bolts/llmp.rs index 8be287c193..de7b75a189 100644 --- a/libafl/src/bolts/llmp.rs +++ b/libafl/src/bolts/llmp.rs @@ -63,9 +63,10 @@ use alloc::{string::String, vec::Vec}; use core::{ cmp::max, fmt::Debug, + hint, mem::size_of, ptr, slice, - sync::atomic::{compiler_fence, Ordering}, + sync::atomic::{fence, AtomicU16, AtomicU64, Ordering}, time::Duration, }; use serde::{Deserialize, Serialize}; @@ -281,7 +282,7 @@ impl Listener { Listener::Tcp(inner) => match inner.accept() { Ok(res) => ListenerStream::Tcp(res.0, res.1), Err(err) => { - dbg!("Ignoring failed accept", err); + println!("Ignoring failed accept: {:?}", err); ListenerStream::Empty() } }, @@ -422,11 +423,11 @@ fn new_map_size(max_alloc: usize) -> usize { /// `llmp_page->messages` unsafe fn _llmp_page_init(shmem: &mut SHM, sender: u32, allow_reinit: bool) { #[cfg(all(feature = "llmp_debug", feature = "std"))] - dbg!("_llmp_page_init: shmem {}", &shmem); + println!("_llmp_page_init: shmem {:?}", &shmem); let map_size = shmem.len(); let page = shmem2page_mut(shmem); #[cfg(all(feature = "llmp_debug", feature = "std"))] - dbg!("_llmp_page_init: page {}", *page); + println!("_llmp_page_init: page {:?}", &(*page)); if !allow_reinit { assert!( @@ -439,15 +440,15 @@ unsafe fn _llmp_page_init(shmem: &mut SHM, sender: u32, allow_reinit (*page).magic = PAGE_INITIALIZED_MAGIC; (*page).sender = sender; - ptr::write_volatile(ptr::addr_of_mut!((*page).current_msg_id), 0); + (*page).current_msg_id.store(0, Ordering::Relaxed); (*page).max_alloc_size = 0; // Don't forget to subtract our own header size (*page).size_total = map_size - LLMP_PAGE_HEADER_LEN; (*page).size_used = 0; (*(*page).messages.as_mut_ptr()).message_id = 0; (*(*page).messages.as_mut_ptr()).tag = LLMP_TAG_UNSET; - ptr::write_volatile(ptr::addr_of_mut!((*page).safe_to_unmap), 0); - ptr::write_volatile(ptr::addr_of_mut!((*page).sender_dead), 0); + (*page).safe_to_unmap.store(0, Ordering::Relaxed); + (*page).sender_dead.store(0, Ordering::Relaxed); assert!((*page).size_total != 0); } @@ -598,7 +599,7 @@ where match tcp_bind(port) { Ok(listener) => { // We got the port. We are the broker! :) - dbg!("We're the broker"); + println!("We're the broker"); let mut broker = LlmpBroker::new(shmem_provider)?; let _listener_thread = broker.launch_listener(Listener::Tcp(listener))?; @@ -670,7 +671,7 @@ where } /// Contents of the share mem pages, used by llmp internally -#[derive(Copy, Clone, Debug)] +#[derive(Debug)] #[repr(C)] pub struct LlmpPage { /// to check if this page got initialized properly @@ -680,11 +681,11 @@ pub struct LlmpPage { /// Set to != 1 by the receiver, once it got mapped /// It's not safe for the sender to unmap this page before /// (The os may have tidied up the memory when the receiver starts to map) - pub safe_to_unmap: u16, + pub safe_to_unmap: AtomicU16, /// Not used at the moment (would indicate that the sender is no longer there) - pub sender_dead: u16, + pub sender_dead: AtomicU16, /// The current message ID - pub current_msg_id: u64, + pub current_msg_id: AtomicU64, /// How much space is available on this page in bytes pub size_total: usize, /// How much space is used on this page in bytes @@ -816,6 +817,7 @@ where if self.safe_to_unmap() { return; } + hint::spin_loop(); // We log that we're looping -> see when we're blocking. #[cfg(feature = "std")] { @@ -831,9 +833,11 @@ where pub fn safe_to_unmap(&self) -> bool { let current_out_map = self.out_maps.last().unwrap(); unsafe { - compiler_fence(Ordering::SeqCst); // println!("Reading safe_to_unmap from {:?}", current_out_map.page() as *const _); - ptr::read_volatile(ptr::addr_of!((*current_out_map.page()).safe_to_unmap)) != 0 + (*current_out_map.page()) + .safe_to_unmap + .load(Ordering::Relaxed) + != 0 } } @@ -841,8 +845,9 @@ where /// # Safety /// If this method is called, the page may be unmapped before it is read by any receiver. pub unsafe fn mark_safe_to_unmap(&mut self) { - // No need to do this volatile, as we should be the same thread in this scenario. - (*self.out_maps.last_mut().unwrap().page_mut()).safe_to_unmap = 1; + (*self.out_maps.last_mut().unwrap().page_mut()) + .safe_to_unmap + .store(1, Ordering::Relaxed); } /// Reattach to a vacant `out_map`. @@ -877,7 +882,7 @@ where // Exclude the current page by splitting of the last element for this iter let mut unmap_until_excl = 0; for map in self.out_maps.split_last_mut().unwrap().1 { - if (*map.page_mut()).safe_to_unmap == 0 { + if (*map.page()).safe_to_unmap.load(Ordering::Relaxed) == 0 { // The broker didn't read this page yet, no more pages to unmap. break; } @@ -960,7 +965,7 @@ where #[cfg(all(feature = "llmp_debug", feature = "std"))] dbg!( page, - *page, + &(*page), (*page).size_used, buf_len_padded, EOP_MSG_SIZE, @@ -984,7 +989,7 @@ where * with 0... */ (*ret).message_id = if last_msg.is_null() { 1 - } else if (*page).current_msg_id == (*last_msg).message_id { + } else if (*page).current_msg_id.load(Ordering::Relaxed) == (*last_msg).message_id { (*last_msg).message_id + 1 } else { /* Oops, wrong usage! */ @@ -1034,10 +1039,14 @@ where msg ))); } - (*msg).message_id = (*page).current_msg_id + 1; - compiler_fence(Ordering::SeqCst); - ptr::write_volatile(ptr::addr_of_mut!((*page).current_msg_id), (*msg).message_id); - compiler_fence(Ordering::SeqCst); + + (*msg).message_id = (*page).current_msg_id.load(Ordering::Relaxed) + 1; + + // Make sure all things have been written to the page, and commit the message to the page + (*page) + .current_msg_id + .store((*msg).message_id, Ordering::Release); + self.last_msg_sent = msg; self.has_unsent_message = false; Ok(()) @@ -1076,9 +1085,9 @@ where #[cfg(all(feature = "llmp_debug", feature = "std"))] println!("got new map at: {:?}", new_map); - ptr::write_volatile( - ptr::addr_of_mut!((*new_map).current_msg_id), - (*old_map).current_msg_id, + (*new_map).current_msg_id.store( + (*old_map).current_msg_id.load(Ordering::Relaxed), + Ordering::Relaxed, ); #[cfg(all(feature = "llmp_debug", feature = "std"))] @@ -1283,6 +1292,8 @@ where pub shmem_provider: SP, /// current page. After EOP, this gets replaced with the new one pub current_recv_map: LlmpSharedMap, + /// Caches the highest msg id we've seen so far + highest_msg_id: u64, } /// Receiving end of an llmp channel @@ -1328,6 +1339,7 @@ where current_recv_map, last_msg_recvd, shmem_provider, + highest_msg_id: 0, }) } @@ -1336,10 +1348,19 @@ where #[inline(never)] unsafe fn recv(&mut self) -> Result, Error> { /* DBG("recv %p %p\n", page, last_msg); */ - compiler_fence(Ordering::SeqCst); let mut page = self.current_recv_map.page_mut(); let last_msg = self.last_msg_recvd; - let current_msg_id = ptr::read_volatile(ptr::addr_of!((*page).current_msg_id)); + + let (current_msg_id, loaded) = + if !last_msg.is_null() && self.highest_msg_id > (*last_msg).message_id { + // read the msg_id from cache + (self.highest_msg_id, false) + } else { + // read the msg_id from shared map + let current_msg_id = (*page).current_msg_id.load(Ordering::Relaxed); + self.highest_msg_id = current_msg_id; + (current_msg_id, true) + }; // Read the message from the page let ret = if current_msg_id == 0 { @@ -1347,11 +1368,16 @@ where None } else if last_msg.is_null() { /* We never read a message from this queue. Return first. */ + fence(Ordering::Acquire); Some((*page).messages.as_mut_ptr()) } else if (*last_msg).message_id == current_msg_id { /* Oops! No new message! */ None } else { + if loaded { + // we read a higher id from this page, fetch. + fence(Ordering::Acquire); + } // We don't know how big the msg wants to be, assert at least the header has space. Some(llmp_next_msg_ptr_checked( &mut self.current_recv_map, @@ -1360,14 +1386,18 @@ where )?) }; - // Let's see what we go here. + // Let's see what we got. if let Some(msg) = ret { if !(*msg).in_map(&mut self.current_recv_map) { return Err(Error::IllegalState("Unexpected message in map (out of map bounds) - bugy client or tampered shared map detedted!".into())); } // Handle special, LLMP internal, messages. match (*msg).tag { - LLMP_TAG_UNSET => panic!("BUG: Read unallocated msg"), + LLMP_TAG_UNSET => panic!( + "BUG: Read unallocated msg (tag was {:x} - msg header: {:?}", + LLMP_TAG_UNSET, + &(*msg) + ), LLMP_TAG_EXITING => { // The other side is done. assert_eq!((*msg).buf_len, 0); @@ -1394,9 +1424,10 @@ where // Set last msg we received to null (as the map may no longer exist) self.last_msg_recvd = ptr::null(); + self.highest_msg_id = 0; // Mark the old page save to unmap, in case we didn't so earlier. - ptr::write_volatile(ptr::addr_of_mut!((*page).safe_to_unmap), 1); + (*page).safe_to_unmap.store(1, Ordering::Relaxed); // Map the new page. The old one should be unmapped by Drop self.current_recv_map = @@ -1406,7 +1437,7 @@ where )?); page = self.current_recv_map.page_mut(); // Mark the new page save to unmap also (it's mapped by us, the broker now) - ptr::write_volatile(ptr::addr_of_mut!((*page).safe_to_unmap), 1); + (*page).safe_to_unmap.store(1, Ordering::Relaxed); #[cfg(all(feature = "llmp_debug", feature = "std"))] println!( @@ -1443,13 +1474,13 @@ where current_msg_id = (*last_msg).message_id; } loop { - compiler_fence(Ordering::SeqCst); - if ptr::read_volatile(ptr::addr_of!((*page).current_msg_id)) != current_msg_id { + if (*page).current_msg_id.load(Ordering::Relaxed) != current_msg_id { return match self.recv()? { Some(msg) => Ok(msg), None => panic!("BUG: blocking llmp message should never be NULL"), }; } + hint::spin_loop(); } } @@ -1562,7 +1593,7 @@ where //let bt = Backtrace::new(); //#[cfg(not(debug_assertions))] //let bt = ""; - dbg!( + println!( "LLMP_DEBUG: Using existing map {} with size {}", existing_map.id(), existing_map.len(), @@ -1580,7 +1611,7 @@ where &ret.shmem ); #[cfg(all(feature = "llmp_debug", feature = "std"))] - dbg!("PAGE: {}", *ret.page()); + println!("PAGE: {:?}", &(*ret.page())); } ret } @@ -1589,7 +1620,7 @@ where /// This indicates, that the page may safely be unmapped by the sender. pub fn mark_safe_to_unmap(&mut self) { unsafe { - ptr::write_volatile(ptr::addr_of_mut!((*self.page_mut()).safe_to_unmap), 1); + (*self.page_mut()).safe_to_unmap.store(1, Ordering::Relaxed); } } @@ -1767,6 +1798,7 @@ where current_recv_map: client_page, last_msg_recvd: ptr::null_mut(), shmem_provider: self.shmem_provider.clone(), + highest_msg_id: 0, }); } @@ -1858,7 +1890,6 @@ where where F: FnMut(ClientId, Tag, Flags, &[u8]) -> Result, { - compiler_fence(Ordering::SeqCst); for i in 0..self.llmp_clients.len() { unsafe { self.handle_new_msgs(i as u32, on_new_msg)?; @@ -1898,7 +1929,6 @@ where } while !self.is_shutting_down() { - compiler_fence(Ordering::SeqCst); self.once(on_new_msg) .expect("An error occurred when brokering. Exiting."); @@ -2010,7 +2040,7 @@ where .expect("Failed to map local page in broker 2 broker thread!"); #[cfg(all(feature = "llmp_debug", feature = "std"))] - dbg!("B2B: Starting proxy loop :)"); + println!("B2B: Starting proxy loop :)"); loop { // first, forward all data we have. @@ -2019,13 +2049,16 @@ where .expect("Error reading from local page!") { if client_id == b2b_client_id { - dbg!("Ignored message we probably sent earlier (same id)", tag); + println!( + "Ignored message we probably sent earlier (same id), TAG: {:x}", + tag + ); continue; } #[cfg(all(feature = "llmp_debug", feature = "std"))] - dbg!( - "Fowarding message via broker2broker connection", + println!( + "Fowarding message ({} bytes) via broker2broker connection", payload.len() ); // We got a new message! Forward... @@ -2053,8 +2086,8 @@ where ); #[cfg(all(feature = "llmp_debug", feature = "std"))] - dbg!( - "Fowarding incoming message from broker2broker connection", + println!( + "Fowarding incoming message ({} bytes) from broker2broker connection", msg.payload.len() ); @@ -2065,7 +2098,7 @@ where .expect("B2B: Error forwarding message. Exiting."); } else { #[cfg(all(feature = "llmp_debug", feature = "std"))] - dbg!("Received no input, timeout or closed. Looping back up :)"); + println!("Received no input, timeout or closed. Looping back up :)"); } } }); @@ -2075,7 +2108,7 @@ where }); #[cfg(all(feature = "llmp_debug", feature = "std"))] - dbg!("B2B: returning from loop. Success: {}", ret.is_ok()); + println!("B2B: returning from loop. Success: {}", ret.is_ok()); ret } @@ -2186,14 +2219,18 @@ where loop { match listener.accept() { ListenerStream::Tcp(mut stream, addr) => { - dbg!("New connection", addr, stream.peer_addr().unwrap()); + eprintln!( + "New connection: {:?}/{:?}", + addr, + stream.peer_addr().unwrap() + ); // Send initial information, without anyone asking. // This makes it a tiny bit easier to map the broker map for new Clients. match send_tcp_msg(&mut stream, &broker_hello) { Ok(()) => {} Err(e) => { - dbg!("Error sending initial hello: {:?}", e); + eprintln!("Error sending initial hello: {:?}", e); continue; } } @@ -2201,14 +2238,14 @@ where let buf = match recv_tcp_msg(&mut stream) { Ok(buf) => buf, Err(e) => { - dbg!("Error receving from tcp", e); + eprintln!("Error receving from tcp: {:?}", e); continue; } }; let req = match (&buf).try_into() { Ok(req) => req, Err(e) => { - dbg!("Could not deserialize tcp message", e); + eprintln!("Could not deserialize tcp message: {:?}", e); continue; } }; @@ -2290,6 +2327,7 @@ where current_recv_map: new_page, last_msg_recvd: ptr::null_mut(), shmem_provider: self.shmem_provider.clone(), + highest_msg_id: 0, }); } Err(e) => { @@ -2469,6 +2507,7 @@ where current_recv_map: initial_broker_map, last_msg_recvd: ptr::null_mut(), shmem_provider, + highest_msg_id: 0, }, }) } @@ -2576,7 +2615,7 @@ where match TcpStream::connect((_LLMP_CONNECT_ADDR, port)) { Ok(stream) => break stream, Err(_) => { - dbg!("Connection Refused.. Retrying"); + println!("Connection Refused.. Retrying"); } } } @@ -2670,7 +2709,7 @@ mod tests { .unwrap(); let tag: Tag = 0x1337; - let arr: [u8; 1] = [1u8]; + let arr: [u8; 1] = [1_u8]; // Send stuff client.send_buf(tag, &arr).unwrap(); From 9f6872ac68ac5e3d266d3098d43595b19615b242 Mon Sep 17 00:00:00 2001 From: Evan Richter Date: Mon, 3 Jan 2022 03:41:29 -0600 Subject: [PATCH 17/25] [libafl_qemu] fix i386 Regs values (#444) The `Regs` enum was defined out of order, leading to incorrect results from `emu.read_reg`. I found the correct ordering defined here: https://github.com/AFLplusplus/qemu-libafl-bridge/blob/master/target/i386/cpu.h#L46-L54 --- libafl_qemu/src/i386.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/libafl_qemu/src/i386.rs b/libafl_qemu/src/i386.rs index c25e1fac64..5f3cea5d4a 100644 --- a/libafl_qemu/src/i386.rs +++ b/libafl_qemu/src/i386.rs @@ -10,13 +10,13 @@ pub use syscall_numbers::x86::*; #[repr(i32)] pub enum Regs { Eax = 0, - Ebx = 1, - Ecx = 2, - Edx = 3, - Esi = 4, - Edi = 5, - Ebp = 6, - Esp = 7, + Ecx = 1, + Edx = 2, + Ebx = 3, + Esp = 4, + Ebp = 5, + Esi = 6, + Edi = 7, Eip = 8, Eflags = 9, } From 1608294d0baa70adcb70ebae80155331c015965c Mon Sep 17 00:00:00 2001 From: s1341 Date: Mon, 3 Jan 2022 11:41:52 +0200 Subject: [PATCH 18/25] Various fixes related to frida mode (#445) * Fix lint errors * Fix incorrect address for unfreed allocations when reseting * Use hash for edge ids * Fmt --- libafl_frida/src/alloc.rs | 6 ++++-- libafl_frida/src/asan/asan_rt.rs | 4 +++- libafl_frida/src/asan/errors.rs | 9 ++------- libafl_frida/src/coverage_rt.rs | 24 ++++++++++++++---------- libafl_frida/src/helper.rs | 6 ++++-- 5 files changed, 27 insertions(+), 22 deletions(-) diff --git a/libafl_frida/src/alloc.rs b/libafl_frida/src/alloc.rs index 1499e5852e..e97d6fe87c 100644 --- a/libafl_frida/src/alloc.rs +++ b/libafl_frida/src/alloc.rs @@ -292,13 +292,14 @@ impl Allocator { self.allocations .insert(metadata.address + self.page_size, metadata); - // println!("serving address: {:?}, size: {:x}", address, size); + //println!("serving address: {:?}, size: {:x}", address, size); address } /// Releases the allocation at the given address. #[allow(clippy::missing_safety_doc)] pub unsafe fn release(&mut self, ptr: *mut c_void) { + //println!("freeing address: {:?}", ptr); let mut metadata = if let Some(metadata) = self.allocations.get_mut(&(ptr as usize)) { metadata } else { @@ -379,7 +380,8 @@ impl Allocator { } for allocation in tmp_allocations { - self.allocations.insert(allocation.address, allocation); + self.allocations + .insert(allocation.address + self.page_size, allocation); } self.total_allocation_size = 0; diff --git a/libafl_frida/src/asan/asan_rt.rs b/libafl_frida/src/asan/asan_rt.rs index 1fbc0fc3e2..c840107506 100644 --- a/libafl_frida/src/asan/asan_rt.rs +++ b/libafl_frida/src/asan/asan_rt.rs @@ -1084,7 +1084,7 @@ impl AsanRuntime { { index_reg -= capstone::arch::arm64::Arm64Reg::ARM64_REG_S0 as u16; } - fault_address += self.regs[index_reg as usize] as usize; + fault_address += self.regs[index_reg as usize]; } let backtrace = Backtrace::new(); @@ -2086,6 +2086,7 @@ impl AsanRuntime { self.blob_check_mem_64bytes.as_ref().unwrap() } + /// Determine if the instruction is 'interesting' for the purposes of ASAN #[cfg(target_arch = "aarch64")] #[inline] pub fn asan_is_interesting_instruction( @@ -2359,6 +2360,7 @@ impl AsanRuntime { writer.put_lea_reg_reg_offset(X86Register::Rsp, X86Register::Rsp, redzone_size); } + /// Emit a shadow memory check into the instruction stream #[cfg(target_arch = "aarch64")] #[inline] pub fn emit_shadow_check( diff --git a/libafl_frida/src/asan/errors.rs b/libafl_frida/src/asan/errors.rs index b44bb2fba3..80335efaaa 100644 --- a/libafl_frida/src/asan/errors.rs +++ b/libafl_frida/src/asan/errors.rs @@ -195,12 +195,7 @@ impl AsanErrors { .set_color(ColorSpec::new().set_fg(Some(Color::Yellow))) .unwrap(); } - write!( - output, - "x{:02}: 0x{:016x} ", - reg, error.registers[reg as usize] - ) - .unwrap(); + write!(output, "x{:02}: 0x{:016x} ", reg, error.registers[reg]).unwrap(); output.reset().unwrap(); if reg % 4 == 3 { writeln!(output).unwrap(); @@ -459,7 +454,7 @@ impl AsanErrors { .set_color(ColorSpec::new().set_fg(Some(Color::Yellow))) .unwrap(); } - write!(output, "x{:02}: 0x{:016x} ", reg, registers[reg as usize]).unwrap(); + write!(output, "x{:02}: 0x{:016x} ", reg, registers[reg]).unwrap(); output.reset().unwrap(); if reg % 4 == 3 { writeln!(output).unwrap(); diff --git a/libafl_frida/src/coverage_rt.rs b/libafl_frida/src/coverage_rt.rs index 7df6790752..a2bf1c7305 100644 --- a/libafl_frida/src/coverage_rt.rs +++ b/libafl_frida/src/coverage_rt.rs @@ -127,6 +127,15 @@ impl CoverageRuntime { /// Emits coverage mapping into the current basic block. #[inline] pub fn emit_coverage_mapping(&mut self, address: u64, output: &StalkerOutput) { + let tmp = (address >> 32) + ((address & 0xffffffff) << 32); + let bitflip = 0x1cad21f72c81017c ^ 0xdb979082e96dd4de; + let mut h64 = tmp ^ bitflip; + h64 = h64.rotate_left(49) & h64.rotate_left(24); + h64 *= 0x9FB21C651E98DF25; + h64 ^= (h64 >> 35) + 8; + h64 *= 0x9FB21C651E98DF25; + h64 ^= h64 >> 28; + let writer = output.writer(); #[allow(clippy::cast_possible_wrap)] // gum redzone size is u32, we need an offset as i32. let redzone_size = i64::from(frida_gum_sys::GUM_RED_ZONE_SIZE); @@ -153,10 +162,7 @@ impl CoverageRuntime { { writer.put_lea_reg_reg_offset(X86Register::Rsp, X86Register::Rsp, -(redzone_size)); writer.put_push_reg(X86Register::Rdi); - writer.put_mov_reg_address( - X86Register::Rdi, - ((address >> 4) ^ (address << 8)) & (MAP_SIZE - 1) as u64, - ); + writer.put_mov_reg_address(X86Register::Rdi, h64 & (MAP_SIZE as u64 - 1)); writer.put_call_address(self.current_log_impl); writer.put_pop_reg(X86Register::Rdi); writer.put_lea_reg_reg_offset(X86Register::Rsp, X86Register::Rsp, redzone_size); @@ -167,19 +173,17 @@ impl CoverageRuntime { Aarch64Register::Lr, Aarch64Register::X0, Aarch64Register::Sp, - -(16 + redzone_size) as i64, + -(16 + redzone_size), IndexMode::PreAdjust, ); - writer.put_ldr_reg_u64( - Aarch64Register::X0, - ((address >> 4) ^ (address << 8)) & (MAP_SIZE - 1) as u64, - ); + writer.put_ldr_reg_u64(Aarch64Register::X0, h64 & (MAP_SIZE as u64 - 1)); + writer.put_bl_imm(self.current_log_impl); writer.put_ldp_reg_reg_reg_offset( Aarch64Register::Lr, Aarch64Register::X0, Aarch64Register::Sp, - 16 + redzone_size as i64, + 16 + redzone_size, IndexMode::PostAdjust, ); } diff --git a/libafl_frida/src/helper.rs b/libafl_frida/src/helper.rs index 98d1387022..8f85457c37 100644 --- a/libafl_frida/src/helper.rs +++ b/libafl_frida/src/helper.rs @@ -285,7 +285,7 @@ impl<'a> FridaInstrumentationHelper<'a> { for instruction in basic_block { let instr = instruction.instr(); let address = instr.address(); - // println!("block @ {:x} transformed to {:x}", address, output.writer().pc()); + //println!("block @ {:x} transformed to {:x}", address, output.writer().pc()); //println!( //"address: {:x} contains: {:?}", @@ -297,7 +297,7 @@ impl<'a> FridaInstrumentationHelper<'a> { if helper.ranges.contains_key(&(address as usize)) { if first { first = false; - // println!("block @ {:x} transformed to {:x}", address, output.writer().pc()); + //println!("block @ {:x} transformed to {:x}", address, output.writer().pc()); if helper.options().coverage_enabled() { helper.coverage_rt.emit_coverage_mapping(address, &output); } @@ -399,6 +399,7 @@ impl<'a> FridaInstrumentationHelper<'a> { self.options } + /// Determine the width of the specified instruction #[cfg(target_arch = "aarch64")] #[inline] pub fn instruction_width(instr: &Insn, operands: &Vec) -> u32 { @@ -465,6 +466,7 @@ impl<'a> FridaInstrumentationHelper<'a> { 8 * num_registers } + /// Convert from a capstone register id to a frida InstructionWriter register index #[cfg(target_arch = "aarch64")] #[inline] pub fn writer_register(reg: capstone::RegId) -> Aarch64Register { From 2de729a77972169a9055da8c5838e1f21be2c80c Mon Sep 17 00:00:00 2001 From: Yerkebulan Tulibergenov Date: Mon, 3 Jan 2022 15:14:46 -0800 Subject: [PATCH 19/25] Fix a typo in TODO.md (#450) --- TODO.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/TODO.md b/TODO.md index 663f85b96a..9d2d0b3690 100644 --- a/TODO.md +++ b/TODO.md @@ -2,7 +2,7 @@ - [ ] Objective-Specific Corpuses (named per objective) - [ ] Good documentation -- [ ] More informative outpus, deeper introspection (monitor, what mutation did x, etc.) +- [ ] More informative outputs, deeper introspection (monitor, what mutation did x, etc.) - [ ] Timeout handling for llmp clients (no ping for n seconds -> treat as disconnected) - [ ] Heap for signal handling (bumpallo or llmp directly?) - [x] Frida support for Windows From 674005fa61b6ed35cd140681293ed6f9a1c1097b Mon Sep 17 00:00:00 2001 From: Dongjia Zhang Date: Tue, 4 Jan 2022 08:20:29 +0900 Subject: [PATCH 20/25] Reorder type parameters in the correct order (#449) * alphabetical order * revert * revert * fix --- libafl/src/events/simple.rs | 4 ++-- libafl/src/executors/inprocess.rs | 2 +- libafl/src/executors/with_observers.rs | 4 ++-- libafl/src/feedbacks/mod.rs | 16 ++++++++-------- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/libafl/src/events/simple.rs b/libafl/src/events/simple.rs index 31f3d04f6c..c4197aa196 100644 --- a/libafl/src/events/simple.rs +++ b/libafl/src/events/simple.rs @@ -282,7 +282,7 @@ where } #[cfg(feature = "std")] -impl<'a, C, E, I, S, SC, SP, MT, Z> EventProcessor +impl<'a, C, E, I, MT, S, SC, SP, Z> EventProcessor for SimpleRestartingEventManager<'a, C, I, MT, S, SC, SP> where C: Corpus, @@ -297,7 +297,7 @@ where } #[cfg(feature = "std")] -impl<'a, C, E, I, S, SC, SP, MT, Z> EventManager +impl<'a, C, E, I, MT, S, SC, SP, Z> EventManager for SimpleRestartingEventManager<'a, C, I, MT, S, SC, SP> where C: Corpus, diff --git a/libafl/src/executors/inprocess.rs b/libafl/src/executors/inprocess.rs index ac34079631..03c5531dce 100644 --- a/libafl/src/executors/inprocess.rs +++ b/libafl/src/executors/inprocess.rs @@ -1030,7 +1030,7 @@ where } #[cfg(all(feature = "std", unix))] -impl<'a, EM, H, I, OT, S, Z, SP> Executor +impl<'a, EM, H, I, OT, S, SP, Z> Executor for InProcessForkExecutor<'a, H, I, OT, S, SP> where H: FnMut(&I) -> ExitKind, diff --git a/libafl/src/executors/with_observers.rs b/libafl/src/executors/with_observers.rs index 426e7f1de0..da4b95f847 100644 --- a/libafl/src/executors/with_observers.rs +++ b/libafl/src/executors/with_observers.rs @@ -16,7 +16,7 @@ pub struct WithObservers { observers: OT, } -impl Executor for WithObservers +impl Executor for WithObservers where I: Input, E: Executor, @@ -33,7 +33,7 @@ where } } -impl HasObservers for WithObservers +impl HasObservers for WithObservers where I: Input, OT: ObserversTuple, diff --git a/libafl/src/feedbacks/mod.rs b/libafl/src/feedbacks/mod.rs index 1647e5efd5..1c463dffd4 100644 --- a/libafl/src/feedbacks/mod.rs +++ b/libafl/src/feedbacks/mod.rs @@ -139,7 +139,7 @@ where /// A cobined feedback consisting of ultiple [`Feedback`]s #[derive(Debug)] -pub struct CombinedFeedback +pub struct CombinedFeedback where A: Feedback, B: Feedback, @@ -155,7 +155,7 @@ where phantom: PhantomData<(I, S, FL)>, } -impl Named for CombinedFeedback +impl Named for CombinedFeedback where A: Feedback, B: Feedback, @@ -168,7 +168,7 @@ where } } -impl CombinedFeedback +impl CombinedFeedback where A: Feedback, B: Feedback, @@ -188,7 +188,7 @@ where } } -impl Feedback for CombinedFeedback +impl Feedback for CombinedFeedback where A: Feedback, B: Feedback, @@ -532,21 +532,21 @@ where /// Combine two feedbacks with an eager AND operation, /// will call all feedbacks functions even if not necessery to conclude the result -pub type EagerAndFeedback = CombinedFeedback; +pub type EagerAndFeedback = CombinedFeedback; /// Combine two feedbacks with an fast AND operation, /// might skip calling feedbacks functions if not necessery to conclude the result -pub type FastAndFeedback = CombinedFeedback; +pub type FastAndFeedback = CombinedFeedback; /// Combine two feedbacks with an eager OR operation, /// will call all feedbacks functions even if not necessery to conclude the result -pub type EagerOrFeedback = CombinedFeedback; +pub type EagerOrFeedback = CombinedFeedback; /// Combine two feedbacks with an fast OR operation, /// might skip calling feedbacks functions if not necessery to conclude the result /// This means any feedback that is not first might be skipped, use caution when using with /// `TimeFeedback` -pub type FastOrFeedback = CombinedFeedback; +pub type FastOrFeedback = CombinedFeedback; /// Compose feedbacks with an `NOT` operation #[derive(Clone)] From a1a6d5f478bd76e808d947b6d98763b3a53574c5 Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Tue, 4 Jan 2022 16:20:52 +0100 Subject: [PATCH 21/25] =?UTF-8?q?Disable=20pita=20=F0=9F=A5=99=20compiler?= =?UTF-8?q?=20in=20debug=20mode=20(#454)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- libafl/src/lib.rs | 51 ++++++++++++++++++++++++--------------- libafl_cc/src/lib.rs | 51 ++++++++++++++++++++++++--------------- libafl_derive/src/lib.rs | 51 ++++++++++++++++++++++++--------------- libafl_frida/src/lib.rs | 51 ++++++++++++++++++++++++--------------- libafl_sugar/src/lib.rs | 51 ++++++++++++++++++++++++--------------- libafl_targets/src/lib.rs | 51 ++++++++++++++++++++++++--------------- 6 files changed, 192 insertions(+), 114 deletions(-) diff --git a/libafl/src/lib.rs b/libafl/src/lib.rs index 15d516e819..358b3c8482 100644 --- a/libafl/src/lib.rs +++ b/libafl/src/lib.rs @@ -18,7 +18,7 @@ Welcome to `LibAFL` clippy::module_name_repetitions, clippy::unreadable_literal )] -#![deny( +#![cfg_attr(debug_assertions, warn( missing_debug_implementations, missing_docs, //trivial_casts, @@ -27,24 +27,37 @@ Welcome to `LibAFL` unused_import_braces, unused_qualifications, //unused_results -)] -#![deny( - bad_style, - const_err, - dead_code, - improper_ctypes, - non_shorthand_field_patterns, - no_mangle_generic_items, - overflowing_literals, - path_statements, - patterns_in_fns_without_body, - private_in_public, - unconditional_recursion, - unused, - unused_allocation, - unused_comparisons, - unused_parens, - while_true +))] +#![cfg_attr(not(debug_assertions), deny( + missing_debug_implementations, + missing_docs, + //trivial_casts, + trivial_numeric_casts, + unused_extern_crates, + unused_import_braces, + unused_qualifications, + //unused_results +))] +#![cfg_attr( + not(debug_assertions), + deny( + bad_style, + const_err, + dead_code, + improper_ctypes, + non_shorthand_field_patterns, + no_mangle_generic_items, + overflowing_literals, + path_statements, + patterns_in_fns_without_body, + private_in_public, + unconditional_recursion, + unused, + unused_allocation, + unused_comparisons, + unused_parens, + while_true + ) )] #[macro_use] diff --git a/libafl_cc/src/lib.rs b/libafl_cc/src/lib.rs index c78300248f..2a15b8ca04 100644 --- a/libafl_cc/src/lib.rs +++ b/libafl_cc/src/lib.rs @@ -14,7 +14,7 @@ clippy::module_name_repetitions, clippy::unreadable_literal )] -#![deny( +#![cfg_attr(debug_assertions, warn( missing_debug_implementations, missing_docs, //trivial_casts, @@ -23,24 +23,37 @@ unused_import_braces, unused_qualifications, //unused_results -)] -#![deny( - bad_style, - const_err, - dead_code, - improper_ctypes, - non_shorthand_field_patterns, - no_mangle_generic_items, - overflowing_literals, - path_statements, - patterns_in_fns_without_body, - private_in_public, - unconditional_recursion, - unused, - unused_allocation, - unused_comparisons, - unused_parens, - while_true +))] +#![cfg_attr(not(debug_assertions), deny( + missing_debug_implementations, + missing_docs, + //trivial_casts, + trivial_numeric_casts, + unused_extern_crates, + unused_import_braces, + unused_qualifications, + //unused_results +))] +#![cfg_attr( + not(debug_assertions), + deny( + bad_style, + const_err, + dead_code, + improper_ctypes, + non_shorthand_field_patterns, + no_mangle_generic_items, + overflowing_literals, + path_statements, + patterns_in_fns_without_body, + private_in_public, + unconditional_recursion, + unused, + unused_allocation, + unused_comparisons, + unused_parens, + while_true + ) )] use std::{convert::Into, path::Path, process::Command, string::String, vec::Vec}; diff --git a/libafl_derive/src/lib.rs b/libafl_derive/src/lib.rs index 638cb0e092..758fcecdcf 100644 --- a/libafl_derive/src/lib.rs +++ b/libafl_derive/src/lib.rs @@ -14,7 +14,7 @@ clippy::module_name_repetitions, clippy::unreadable_literal )] -#![deny( +#![cfg_attr(debug_assertions, warn( missing_debug_implementations, missing_docs, //trivial_casts, @@ -23,24 +23,37 @@ unused_import_braces, unused_qualifications, //unused_results -)] -#![deny( - bad_style, - const_err, - dead_code, - improper_ctypes, - non_shorthand_field_patterns, - no_mangle_generic_items, - overflowing_literals, - path_statements, - patterns_in_fns_without_body, - private_in_public, - unconditional_recursion, - unused, - unused_allocation, - unused_comparisons, - unused_parens, - while_true +))] +#![cfg_attr(not(debug_assertions), deny( + missing_debug_implementations, + missing_docs, + //trivial_casts, + trivial_numeric_casts, + unused_extern_crates, + unused_import_braces, + unused_qualifications, + //unused_results +))] +#![cfg_attr( + not(debug_assertions), + deny( + bad_style, + const_err, + dead_code, + improper_ctypes, + non_shorthand_field_patterns, + no_mangle_generic_items, + overflowing_literals, + path_statements, + patterns_in_fns_without_body, + private_in_public, + unconditional_recursion, + unused, + unused_allocation, + unused_comparisons, + unused_parens, + while_true + ) )] use proc_macro::TokenStream; diff --git a/libafl_frida/src/lib.rs b/libafl_frida/src/lib.rs index e8f01c5254..721e33a83c 100644 --- a/libafl_frida/src/lib.rs +++ b/libafl_frida/src/lib.rs @@ -17,7 +17,7 @@ It can report coverage and, on supported architecutres, even reports memory acce clippy::module_name_repetitions, clippy::unreadable_literal )] -#![deny( +#![cfg_attr(debug_assertions, warn( missing_debug_implementations, missing_docs, //trivial_casts, @@ -26,24 +26,37 @@ It can report coverage and, on supported architecutres, even reports memory acce unused_import_braces, unused_qualifications, //unused_results -)] -#![deny( - bad_style, - const_err, - dead_code, - improper_ctypes, - non_shorthand_field_patterns, - no_mangle_generic_items, - overflowing_literals, - path_statements, - patterns_in_fns_without_body, - private_in_public, - unconditional_recursion, - unused, - unused_allocation, - unused_comparisons, - unused_parens, - while_true +))] +#![cfg_attr(not(debug_assertions), deny( + missing_debug_implementations, + missing_docs, + //trivial_casts, + trivial_numeric_casts, + unused_extern_crates, + unused_import_braces, + unused_qualifications, + //unused_results +))] +#![cfg_attr( + not(debug_assertions), + deny( + bad_style, + const_err, + dead_code, + improper_ctypes, + non_shorthand_field_patterns, + no_mangle_generic_items, + overflowing_literals, + path_statements, + patterns_in_fns_without_body, + private_in_public, + unconditional_recursion, + unused, + unused_allocation, + unused_comparisons, + unused_parens, + while_true + ) )] /// The frida-asan allocator diff --git a/libafl_sugar/src/lib.rs b/libafl_sugar/src/lib.rs index 1af670774f..0390415e41 100644 --- a/libafl_sugar/src/lib.rs +++ b/libafl_sugar/src/lib.rs @@ -14,7 +14,7 @@ clippy::module_name_repetitions, clippy::unreadable_literal )] -#![deny( +#![cfg_attr(debug_assertions, warn( missing_debug_implementations, missing_docs, //trivial_casts, @@ -23,24 +23,37 @@ unused_import_braces, unused_qualifications, //unused_results -)] -#![deny( - bad_style, - const_err, - dead_code, - improper_ctypes, - non_shorthand_field_patterns, - no_mangle_generic_items, - overflowing_literals, - path_statements, - patterns_in_fns_without_body, - private_in_public, - unconditional_recursion, - unused, - unused_allocation, - unused_comparisons, - unused_parens, - while_true +))] +#![cfg_attr(not(debug_assertions), deny( + missing_debug_implementations, + missing_docs, + //trivial_casts, + trivial_numeric_casts, + unused_extern_crates, + unused_import_braces, + unused_qualifications, + //unused_results +))] +#![cfg_attr( + not(debug_assertions), + deny( + bad_style, + const_err, + dead_code, + improper_ctypes, + non_shorthand_field_patterns, + no_mangle_generic_items, + overflowing_literals, + path_statements, + patterns_in_fns_without_body, + private_in_public, + unconditional_recursion, + unused, + unused_allocation, + unused_comparisons, + unused_parens, + while_true + ) )] pub mod inmemory; diff --git a/libafl_targets/src/lib.rs b/libafl_targets/src/lib.rs index 387aad7324..e07a81ef17 100644 --- a/libafl_targets/src/lib.rs +++ b/libafl_targets/src/lib.rs @@ -15,7 +15,7 @@ clippy::module_name_repetitions, clippy::unreadable_literal )] -#![deny( +#![cfg_attr(debug_assertions, warn( missing_debug_implementations, missing_docs, //trivial_casts, @@ -24,24 +24,37 @@ unused_import_braces, unused_qualifications, //unused_results -)] -#![deny( - bad_style, - const_err, - dead_code, - improper_ctypes, - non_shorthand_field_patterns, - no_mangle_generic_items, - overflowing_literals, - path_statements, - patterns_in_fns_without_body, - private_in_public, - unconditional_recursion, - unused, - unused_allocation, - unused_comparisons, - unused_parens, - while_true +))] +#![cfg_attr(not(debug_assertions), deny( + missing_debug_implementations, + missing_docs, + //trivial_casts, + trivial_numeric_casts, + unused_extern_crates, + unused_import_braces, + unused_qualifications, + //unused_results +))] +#![cfg_attr( + not(debug_assertions), + deny( + bad_style, + const_err, + dead_code, + improper_ctypes, + non_shorthand_field_patterns, + no_mangle_generic_items, + overflowing_literals, + path_statements, + patterns_in_fns_without_body, + private_in_public, + unconditional_recursion, + unused, + unused_allocation, + unused_comparisons, + unused_parens, + while_true + ) )] #[allow(unused_imports)] From 6d9763c51f006dd5b2a72a50baf96cc5d757087a Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Tue, 4 Jan 2022 23:53:12 +0100 Subject: [PATCH 22/25] Move to clap 3.0 (#447) * move to clap 3.0 * fix cargo.toml * update symcc to use clap3 --- fuzzers/forkserver_simple/Cargo.toml | 2 +- fuzzers/frida_libpng/Cargo.toml | 2 +- fuzzers/frida_libpng/src/fuzzer.rs | 31 +++++++++---------- fuzzers/fuzzbench/Cargo.toml | 2 +- fuzzers/fuzzbench_qemu/Cargo.toml | 2 +- fuzzers/generic_inmemory/Cargo.toml | 2 +- fuzzers/generic_inmemory/src/lib.rs | 28 ++++++++--------- fuzzers/libafl_atheris/Cargo.toml | 2 +- fuzzers/libfuzzer_libpng_ctx/Cargo.toml | 2 +- fuzzers/libfuzzer_libpng_ctx/src/lib.rs | 24 +++++++------- fuzzers/libfuzzer_libpng_launcher/Cargo.toml | 2 +- fuzzers/libfuzzer_libpng_launcher/src/lib.rs | 24 +++++++------- .../fuzzer/Cargo.toml | 2 +- .../fuzzer/src/main.rs | 9 +++--- libafl/src/lib.rs | 3 ++ libafl_concolic/symcc_runtime/symcc | 2 +- .../test/dump_constraints/Cargo.toml | 2 +- .../test/dump_constraints/src/main.rs | 21 ++++++------- utils/gramatron/construct_automata/Cargo.toml | 2 +- .../construct_automata/src/clap-config.yaml | 22 ------------- .../gramatron/construct_automata/src/main.rs | 12 +++---- 21 files changed, 88 insertions(+), 110 deletions(-) delete mode 100644 utils/gramatron/construct_automata/src/clap-config.yaml diff --git a/fuzzers/forkserver_simple/Cargo.toml b/fuzzers/forkserver_simple/Cargo.toml index be189280b8..3840b058da 100644 --- a/fuzzers/forkserver_simple/Cargo.toml +++ b/fuzzers/forkserver_simple/Cargo.toml @@ -17,4 +17,4 @@ opt-level = 3 [dependencies] libafl = { path = "../../libafl/" } -clap = { version = "3.0.0-rc.4", features = ["default"] } \ No newline at end of file +clap = { version = "3.0", features = ["default"] } \ No newline at end of file diff --git a/fuzzers/frida_libpng/Cargo.toml b/fuzzers/frida_libpng/Cargo.toml index 43f56f2b79..829c5434c4 100644 --- a/fuzzers/frida_libpng/Cargo.toml +++ b/fuzzers/frida_libpng/Cargo.toml @@ -38,7 +38,7 @@ libc = "0.2" libloading = "0.7" num-traits = "0.2.14" rangemap = "0.1" -structopt = "0.3.25" +clap = { version = "3.0", features = ["derive"] } serde = "1.0" mimalloc = { version = "*", default-features = false } diff --git a/fuzzers/frida_libpng/src/fuzzer.rs b/fuzzers/frida_libpng/src/fuzzer.rs index af7bbe0334..3b3bda0984 100644 --- a/fuzzers/frida_libpng/src/fuzzer.rs +++ b/fuzzers/frida_libpng/src/fuzzer.rs @@ -4,14 +4,13 @@ use mimalloc::MiMalloc; #[global_allocator] static GLOBAL: MiMalloc = MiMalloc; +use clap::{self, StructOpt}; use frida_gum::Gum; use std::{ env, net::SocketAddr, path::{Path, PathBuf}, - time::Duration, }; -use structopt::StructOpt; use libafl::{ bolts::{ @@ -56,7 +55,7 @@ use libafl_targets::cmplog::{CmpLogObserver, CMPLOG_MAP}; use libafl_frida::asan::errors::{AsanErrorsFeedback, AsanErrorsObserver, ASAN_ERRORS}; #[derive(Debug, StructOpt)] -#[structopt( +#[clap( name = "libafl_frida", version = "0.1.0", about = "A frida-based binary-only libfuzzer-style fuzzer for with llmp-multithreading support", @@ -64,7 +63,7 @@ use libafl_frida::asan::errors::{AsanErrorsFeedback, AsanErrorsObserver, ASAN_ER Dongjia Zhang , Andrea Fioraldi , Dominik Maier " )] struct Opt { - #[structopt( + #[clap( short, long, parse(try_from_str = Cores::from_cmdline), @@ -73,8 +72,8 @@ struct Opt { )] cores: Cores, - #[structopt( - short = "p", + #[clap( + short = 'p', long, help = "Choose the broker TCP port, default is 1337", name = "PORT", @@ -82,16 +81,16 @@ struct Opt { )] broker_port: u16, - #[structopt( + #[clap( parse(try_from_str), - short = "a", + short = 'a', long, help = "Specify a remote broker", name = "REMOTE" )] remote_broker_addr: Option, - #[structopt( + #[clap( parse(try_from_str), short, long, @@ -100,7 +99,7 @@ struct Opt { )] input: Vec, - #[structopt( + #[clap( short, long, parse(try_from_str), @@ -110,7 +109,7 @@ struct Opt { )] output: PathBuf, - #[structopt( + #[clap( long, help = "The configuration this fuzzer runs with, for multiprocessing", name = "CONF", @@ -118,19 +117,19 @@ struct Opt { )] configuration: String, - #[structopt( + #[clap( long, help = "The file to redirect stdout input to (/dev/null if unset)" )] stdout_file: Option, - #[structopt(help = "The harness")] + #[clap(help = "The harness")] harness: String, - #[structopt(help = "The symbol name to look up and hook")] + #[clap(help = "The symbol name to look up and hook")] symbol: String, - #[structopt(help = "The modules to instrument, separated by colons")] + #[clap(help = "The modules to instrument, separated by colons")] modules_to_instrument: String, } @@ -140,7 +139,7 @@ pub fn main() { // Needed only on no_std //RegistryBuilder::register::(); - let opt = Opt::from_args(); + let opt = Opt::parse(); color_backtrace::install(); println!( diff --git a/fuzzers/fuzzbench/Cargo.toml b/fuzzers/fuzzbench/Cargo.toml index 45bb98dd5b..0cb876ce3e 100644 --- a/fuzzers/fuzzbench/Cargo.toml +++ b/fuzzers/fuzzbench/Cargo.toml @@ -24,7 +24,7 @@ libafl = { path = "../../libafl/" } libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_hitcounts", "sancov_cmplog", "libfuzzer"] } # TODO Include it only when building cc libafl_cc = { path = "../../libafl_cc/" } -clap = { version = "3.0.0-rc.4", features = ["default"] } +clap = { version = "3.0", features = ["default"] } nix = "0.23" mimalloc = { version = "*", default-features = false } diff --git a/fuzzers/fuzzbench_qemu/Cargo.toml b/fuzzers/fuzzbench_qemu/Cargo.toml index b8b5240324..4a7ab455e5 100644 --- a/fuzzers/fuzzbench_qemu/Cargo.toml +++ b/fuzzers/fuzzbench_qemu/Cargo.toml @@ -14,5 +14,5 @@ debug = true [dependencies] libafl = { path = "../../libafl/" } libafl_qemu = { path = "../../libafl_qemu/", features = ["x86_64"] } -clap = { version = "3.0.0-rc.4", features = ["default"] } +clap = { version = "3.0", features = ["default"] } nix = "0.23" diff --git a/fuzzers/generic_inmemory/Cargo.toml b/fuzzers/generic_inmemory/Cargo.toml index e842a56836..6cd0a235f3 100644 --- a/fuzzers/generic_inmemory/Cargo.toml +++ b/fuzzers/generic_inmemory/Cargo.toml @@ -24,7 +24,7 @@ libafl = { path = "../../libafl/" } libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_hitcounts", "sancov_cmplog", "libfuzzer"] } # TODO Include it only when building cc libafl_cc = { path = "../../libafl_cc/" } -structopt = "0.3.25" +clap = { version = "3.0", features = ["derive"] } mimalloc = { version = "*", default-features = false } [lib] diff --git a/fuzzers/generic_inmemory/src/lib.rs b/fuzzers/generic_inmemory/src/lib.rs index 60d62ab56a..ac614be4a4 100644 --- a/fuzzers/generic_inmemory/src/lib.rs +++ b/fuzzers/generic_inmemory/src/lib.rs @@ -4,9 +4,9 @@ use mimalloc::MiMalloc; #[global_allocator] static GLOBAL: MiMalloc = MiMalloc; +use clap::{self, StructOpt}; use core::time::Duration; use std::{env, net::SocketAddr, path::PathBuf}; -use structopt::StructOpt; use libafl::{ bolts::{ @@ -50,13 +50,13 @@ fn timeout_from_millis_str(time: &str) -> Result { } #[derive(Debug, StructOpt)] -#[structopt( +#[clap( name = "generic_inmemory", about = "A generic libfuzzer-like fuzzer with llmp-multithreading support", author = "Andrea Fioraldi , Dominik Maier " )] struct Opt { - #[structopt( + #[clap( short, long, parse(try_from_str = Cores::from_cmdline), @@ -65,24 +65,24 @@ struct Opt { )] cores: Cores, - #[structopt( - short = "p", + #[clap( + short = 'p', long, help = "Choose the broker TCP port, default is 1337", name = "PORT" )] broker_port: u16, - #[structopt( + #[clap( parse(try_from_str), - short = "a", + short = 'a', long, help = "Specify a remote broker", name = "REMOTE" )] remote_broker_addr: Option, - #[structopt( + #[clap( parse(try_from_str), short, long, @@ -91,7 +91,7 @@ struct Opt { )] input: Vec, - #[structopt( + #[clap( short, long, parse(try_from_str), @@ -101,7 +101,7 @@ struct Opt { )] output: PathBuf, - #[structopt( + #[clap( parse(try_from_str = timeout_from_millis_str), short, long, @@ -111,13 +111,13 @@ struct Opt { )] timeout: Duration, - #[structopt( + #[clap( parse(from_os_str), - short = "x", + short = 'x', long, help = "Feed the fuzzer with an user-specified list of tokens (often called \"dictionary\"", name = "TOKENS", - multiple = true + multiple_occurrences = true )] tokens: Vec, } @@ -131,7 +131,7 @@ pub fn libafl_main() { let workdir = env::current_dir().unwrap(); - let opt = Opt::from_args(); + let opt = Opt::parse(); let cores = opt.cores; let broker_port = opt.broker_port; diff --git a/fuzzers/libafl_atheris/Cargo.toml b/fuzzers/libafl_atheris/Cargo.toml index 6a6f269e25..0c30a9a79e 100644 --- a/fuzzers/libafl_atheris/Cargo.toml +++ b/fuzzers/libafl_atheris/Cargo.toml @@ -22,7 +22,7 @@ num_cpus = "1.0" [dependencies] libafl = { path = "../../libafl/" } libafl_targets = { path = "../../libafl_targets/", features = ["pointer_maps", "sancov_cmplog", "libfuzzer"] } -clap = { version = "3.0.0-beta.4", features = ["default", "yaml"] } +clap = { version = "3.0", features = ["default"] } [lib] name = "afl_atheris" diff --git a/fuzzers/libfuzzer_libpng_ctx/Cargo.toml b/fuzzers/libfuzzer_libpng_ctx/Cargo.toml index ab8ad5a6f0..fe20c9ccda 100644 --- a/fuzzers/libfuzzer_libpng_ctx/Cargo.toml +++ b/fuzzers/libfuzzer_libpng_ctx/Cargo.toml @@ -24,7 +24,7 @@ libafl = { path = "../../libafl/", features = ["std", "derive", "llmp_compressio libafl_targets = { path = "../../libafl_targets/", features = ["libfuzzer"] } # TODO Include it only when building cc libafl_cc = { path = "../../libafl_cc/" } -structopt = "0.3.25" +clap = { version = "3.0", features = ["derive"] } mimalloc = { version = "*", default-features = false } [lib] diff --git a/fuzzers/libfuzzer_libpng_ctx/src/lib.rs b/fuzzers/libfuzzer_libpng_ctx/src/lib.rs index 414b89dcc8..a9119fe1b5 100644 --- a/fuzzers/libfuzzer_libpng_ctx/src/lib.rs +++ b/fuzzers/libfuzzer_libpng_ctx/src/lib.rs @@ -6,9 +6,9 @@ use mimalloc::MiMalloc; #[global_allocator] static GLOBAL: MiMalloc = MiMalloc; +use clap::{self, StructOpt}; use core::time::Duration; use std::{env, net::SocketAddr, path::PathBuf}; -use structopt::StructOpt; use libafl::{ bolts::{ @@ -47,13 +47,13 @@ fn timeout_from_millis_str(time: &str) -> Result { } #[derive(Debug, StructOpt)] -#[structopt( +#[clap( name = "libfuzzer_libpng_ctx", about = "A clone of libfuzzer using LibAFL for a libpng harness", author = "Andrea Fioraldi , Dominik Maier " )] struct Opt { - #[structopt( + #[clap( short, long, parse(try_from_str = Cores::from_cmdline), @@ -62,8 +62,8 @@ struct Opt { )] cores: Cores, - #[structopt( - short = "p", + #[clap( + short = 'p', long, help = "Choose the broker TCP port, default is 1337", name = "PORT", @@ -71,16 +71,16 @@ struct Opt { )] broker_port: u16, - #[structopt( + #[clap( parse(try_from_str), - short = "a", + short = 'a', long, help = "Specify a remote broker", name = "REMOTE" )] remote_broker_addr: Option, - #[structopt( + #[clap( parse(try_from_str), short, long, @@ -89,7 +89,7 @@ struct Opt { )] input: Vec, - #[structopt( + #[clap( short, long, parse(try_from_str), @@ -99,7 +99,7 @@ struct Opt { )] output: PathBuf, - #[structopt( + #[clap( short, long, parse(try_from_str = timeout_from_millis_str), @@ -110,7 +110,7 @@ struct Opt { timeout: Duration, /* // The tokens are hardcoded in this example. - #[structopt( + #[clap( parse(from_os_str), short = "x", long, @@ -127,7 +127,7 @@ pub fn libafl_main() { // Registry the metadata types used in this fuzzer // Needed only on no_std //RegistryBuilder::register::(); - let opt = Opt::from_args(); + let opt = Opt::parse(); let broker_port = opt.broker_port; diff --git a/fuzzers/libfuzzer_libpng_launcher/Cargo.toml b/fuzzers/libfuzzer_libpng_launcher/Cargo.toml index f003e64ce9..6b23637ec5 100644 --- a/fuzzers/libfuzzer_libpng_launcher/Cargo.toml +++ b/fuzzers/libfuzzer_libpng_launcher/Cargo.toml @@ -24,7 +24,7 @@ libafl = { path = "../../libafl/", features = ["std", "derive", "llmp_compressio libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_hitcounts", "libfuzzer"] } # TODO Include it only when building cc libafl_cc = { path = "../../libafl_cc/" } -structopt = "0.3.25" +clap = { version = "3.0", features = ["derive"] } mimalloc = { version = "*", default-features = false } [lib] diff --git a/fuzzers/libfuzzer_libpng_launcher/src/lib.rs b/fuzzers/libfuzzer_libpng_launcher/src/lib.rs index 77daf9c526..3732a17a5e 100644 --- a/fuzzers/libfuzzer_libpng_launcher/src/lib.rs +++ b/fuzzers/libfuzzer_libpng_launcher/src/lib.rs @@ -6,9 +6,9 @@ use mimalloc::MiMalloc; #[global_allocator] static GLOBAL: MiMalloc = MiMalloc; +use clap::{self, StructOpt}; use core::time::Duration; use std::{env, net::SocketAddr, path::PathBuf}; -use structopt::StructOpt; use libafl::{ bolts::{ @@ -47,13 +47,13 @@ fn timeout_from_millis_str(time: &str) -> Result { /// The commandline args this fuzzer accepts #[derive(Debug, StructOpt)] -#[structopt( +#[clap( name = "libfuzzer_libpng_launcher", about = "A libfuzzer-like fuzzer for libpng with llmp-multithreading support and a launcher", author = "Andrea Fioraldi , Dominik Maier " )] struct Opt { - #[structopt( + #[clap( short, long, parse(try_from_str = Cores::from_cmdline), @@ -62,8 +62,8 @@ struct Opt { )] cores: Cores, - #[structopt( - short = "p", + #[clap( + short = 'p', long, help = "Choose the broker TCP port, default is 1337", name = "PORT", @@ -71,16 +71,16 @@ struct Opt { )] broker_port: u16, - #[structopt( + #[clap( parse(try_from_str), - short = "a", + short = 'a', long, help = "Specify a remote broker", name = "REMOTE" )] remote_broker_addr: Option, - #[structopt( + #[clap( parse(try_from_str), short, long, @@ -89,7 +89,7 @@ struct Opt { )] input: Vec, - #[structopt( + #[clap( short, long, parse(try_from_str), @@ -99,7 +99,7 @@ struct Opt { )] output: PathBuf, - #[structopt( + #[clap( parse(try_from_str = timeout_from_millis_str), short, long, @@ -110,7 +110,7 @@ struct Opt { timeout: Duration, /* /// This fuzzer has hard-coded tokens - #[structopt( + #[clap( parse(from_os_str), short = "x", long, @@ -128,7 +128,7 @@ pub fn libafl_main() { // Registry the metadata types used in this fuzzer // Needed only on no_std //RegistryBuilder::register::(); - let opt = Opt::from_args(); + let opt = Opt::parse(); let broker_port = opt.broker_port; let cores = opt.cores; diff --git a/fuzzers/libfuzzer_stb_image_concolic/fuzzer/Cargo.toml b/fuzzers/libfuzzer_stb_image_concolic/fuzzer/Cargo.toml index 95adad50f2..d1b1ad8e7a 100644 --- a/fuzzers/libfuzzer_stb_image_concolic/fuzzer/Cargo.toml +++ b/fuzzers/libfuzzer_stb_image_concolic/fuzzer/Cargo.toml @@ -18,7 +18,7 @@ debug = true [dependencies] libafl = { path = "../../../libafl/", features = ["concolic_mutation"] } libafl_targets = { path = "../../../libafl_targets/", features = ["sancov_pcguard_edges", "sancov_cmplog", "libfuzzer"] } -structopt = "0.3.21" +clap = { version = "3.0", features = ["derive"]} mimalloc = { version = "*", default-features = false } [build-dependencies] diff --git a/fuzzers/libfuzzer_stb_image_concolic/fuzzer/src/main.rs b/fuzzers/libfuzzer_stb_image_concolic/fuzzer/src/main.rs index dc1d642e66..9e8317770a 100644 --- a/fuzzers/libfuzzer_stb_image_concolic/fuzzer/src/main.rs +++ b/fuzzers/libfuzzer_stb_image_concolic/fuzzer/src/main.rs @@ -4,6 +4,7 @@ use mimalloc::MiMalloc; #[global_allocator] static GLOBAL: MiMalloc = MiMalloc; +use clap::{self, StructOpt}; use std::{env, path::PathBuf}; use libafl::{ @@ -25,6 +26,7 @@ use libafl::{ feedbacks::{CrashFeedback, MapFeedbackState, MaxMapFeedback, TimeFeedback}, fuzzer::{Fuzzer, StdFuzzer}, inputs::{BytesInput, HasTargetBytes, Input}, + monitors::MultiMonitor, mutators::{ scheduled::{havoc_mutations, StdScheduledMutator}, token_mutations::I2SRandReplace, @@ -41,7 +43,6 @@ use libafl::{ StdMutationalStage, TracingStage, }, state::{HasCorpus, StdState}, - monitors::MultiMonitor, Error, }; @@ -50,12 +51,10 @@ use libafl_targets::{ MAX_EDGES_NUM, }; -use structopt::StructOpt; - #[derive(Debug, StructOpt)] struct Opt { /// This node should do concolic tracing + solving instead of traditional fuzzing - #[structopt(short, long)] + #[clap(short, long)] concolic: bool, } @@ -64,7 +63,7 @@ pub fn main() { // Needed only on no_std //RegistryBuilder::register::(); - let opt = Opt::from_args(); + let opt = Opt::parse(); println!( "Workdir: {:?}", diff --git a/libafl/src/lib.rs b/libafl/src/lib.rs index 358b3c8482..8717b14d92 100644 --- a/libafl/src/lib.rs +++ b/libafl/src/lib.rs @@ -237,6 +237,9 @@ impl From for Error { } } +#[cfg(feature = "std")] +impl std::error::Error for Error {} + // TODO: no_std test #[cfg(feature = "std")] #[cfg(test)] diff --git a/libafl_concolic/symcc_runtime/symcc b/libafl_concolic/symcc_runtime/symcc index 45cde0269a..3133c0b37d 160000 --- a/libafl_concolic/symcc_runtime/symcc +++ b/libafl_concolic/symcc_runtime/symcc @@ -1 +1 @@ -Subproject commit 45cde0269ae22aef4cca2e1fb98c3b24f7bb2984 +Subproject commit 3133c0b37d3c498db9addf2331378c7c9cadbf10 diff --git a/libafl_concolic/test/dump_constraints/Cargo.toml b/libafl_concolic/test/dump_constraints/Cargo.toml index d3b7ed985e..876a7c24bc 100644 --- a/libafl_concolic/test/dump_constraints/Cargo.toml +++ b/libafl_concolic/test/dump_constraints/Cargo.toml @@ -8,4 +8,4 @@ authors = ["Julius Hohnerlein "] [dependencies] libafl = {path = "../../../libafl"} -structopt = "0.3.21" +clap = { version = "3.0", features = ["derive"] } diff --git a/libafl_concolic/test/dump_constraints/src/main.rs b/libafl_concolic/test/dump_constraints/src/main.rs index 7963c8b1b5..987d915873 100644 --- a/libafl_concolic/test/dump_constraints/src/main.rs +++ b/libafl_concolic/test/dump_constraints/src/main.rs @@ -2,6 +2,7 @@ //! It achieves this by running an instrumented target program with the necessary environment variables set. //! When the program has finished executing, it dumps the traced constraints to a file. +use clap::{self, StructOpt}; use std::{ ffi::OsString, fs::File, @@ -11,8 +12,6 @@ use std::{ string::ToString, }; -use structopt::StructOpt; - use libafl::{ bolts::shmem::{ShMem, ShMemProvider, StdShMemProvider}, observers::concolic::{ @@ -22,44 +21,44 @@ use libafl::{ }; #[derive(Debug, StructOpt)] -#[structopt( +#[clap( name = "dump_constraints", about = "Dump tool for concolic constraints." )] struct Opt { /// Outputs plain text instead of binary - #[structopt(short, long)] + #[clap(short, long)] plain_text: bool, /// Outputs coverage information to the given file - #[structopt(short, long)] + #[clap(short, long)] coverage_file: Option, /// Symbolizes only the given input file offsets. - #[structopt(short, long)] + #[clap(short, long)] symbolize_offsets: Option>, /// Concretize all floating point operations. - #[structopt(long)] + #[clap(long)] no_float: bool, /// Prune expressions from high-frequency code locations. - #[structopt(long)] + #[clap(long)] prune: bool, /// Trace file path, "trace" by default. - #[structopt(parse(from_os_str), short, long)] + #[clap(parse(from_os_str), short, long)] output: Option, /// Target program and arguments - #[structopt(last = true)] + #[clap(last = true)] program: Vec, } fn main() { const COVERAGE_MAP_SIZE: usize = 65536; - let opt = Opt::from_args(); + let opt = Opt::parse(); let mut shmemprovider = StdShMemProvider::default(); let concolic_shmem = shmemprovider diff --git a/utils/gramatron/construct_automata/Cargo.toml b/utils/gramatron/construct_automata/Cargo.toml index 3b54060e72..fccd5cf076 100644 --- a/utils/gramatron/construct_automata/Cargo.toml +++ b/utils/gramatron/construct_automata/Cargo.toml @@ -11,4 +11,4 @@ regex = "1" postcard = "0.7" lazy_static = "1.4.0" libafl = { path = "../../../libafl" } -structopt = "0.3.25" +clap = { version = "3.0", features = ["derive"] } diff --git a/utils/gramatron/construct_automata/src/clap-config.yaml b/utils/gramatron/construct_automata/src/clap-config.yaml deleted file mode 100644 index 55dc8c0d70..0000000000 --- a/utils/gramatron/construct_automata/src/clap-config.yaml +++ /dev/null @@ -1,22 +0,0 @@ -name: construct_automata -version: "0.1.0" -author: "Andrea Fioraldi " -about: Generate a serialized Automaton using a json GNF grammar -args: - - grammar: - short: g - long: grammar-file - value_name: GRAMMAR - required: true - takes_value: true - - output: - short: o - long: output - value_name: OUTPUT - required: true - takes_value: true - - limit: - short: l - long: limit - value_name: LIMIT - takes_value: true diff --git a/utils/gramatron/construct_automata/src/main.rs b/utils/gramatron/construct_automata/src/main.rs index 985d14fbe2..7744a045d0 100644 --- a/utils/gramatron/construct_automata/src/main.rs +++ b/utils/gramatron/construct_automata/src/main.rs @@ -1,3 +1,4 @@ +use clap::{self, StructOpt}; use lazy_static::lazy_static; use regex::Regex; use serde_json::Value; @@ -9,18 +10,17 @@ use std::{ path::PathBuf, rc::Rc, }; -use structopt::StructOpt; use libafl::generators::gramatron::{Automaton, Trigger}; #[derive(Debug, StructOpt)] -#[structopt( +#[clap( name = "construct_automata", about = "Generate a serialized Automaton using a json GNF grammar", author = "Andrea Fioraldi " )] struct Opt { - #[structopt( + #[clap( parse(try_from_str), short, long = "grammar-file", @@ -29,7 +29,7 @@ struct Opt { )] grammar: PathBuf, - #[structopt( + #[clap( parse(try_from_str), short, long, @@ -39,7 +39,7 @@ struct Opt { )] limit: usize, - #[structopt( + #[clap( parse(try_from_str), short, long, @@ -305,7 +305,7 @@ fn postprocess(pda: &[Transition], stack_limit: usize) -> Automaton { } fn main() { - let opt = Opt::from_args(); + let opt = Opt::parse(); let grammar_file = opt.grammar; let output_file = opt.output; From 30eb1508deb874cbf91cfb0a0a644f4569a8f268 Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Wed, 5 Jan 2022 01:15:23 +0100 Subject: [PATCH 23/25] Add OwnedSlice::RefRaw to keep track of raw pointers (#448) * add OwnedSlice::RefRaw to keep track of raw pointers * clippy * fmt * new from ownedref * clippy * OwnedSliceInner * fix,From * as_slice() * fmt * fix doc * OwnedSliceMut * fixes * clippy * fix * ownedmut -> owned * to owned * to_owned -> clone * removed comment Co-authored-by: tokatoka --- fuzzers/libafl_atheris/src/lib.rs | 9 +- fuzzers/libfuzzer_libpng_ctx/src/lib.rs | 5 +- fuzzers/tutorial/src/input.rs | 2 +- libafl/src/bolts/ownedref.rs | 241 ++++++++++++++++++++---- libafl/src/inputs/bytes.rs | 2 +- libafl/src/inputs/mod.rs | 2 +- libafl/src/observers/map.rs | 45 +++-- libafl_targets/src/coverage.rs | 15 +- 8 files changed, 259 insertions(+), 62 deletions(-) diff --git a/fuzzers/libafl_atheris/src/lib.rs b/fuzzers/libafl_atheris/src/lib.rs index d2e8219f0e..1ebd696a2f 100644 --- a/fuzzers/libafl_atheris/src/lib.rs +++ b/fuzzers/libafl_atheris/src/lib.rs @@ -216,8 +216,13 @@ pub fn LLVMFuzzerRunDriver( let mut run_client = |state: Option>, mut mgr, _core_id| { // Create an observation channel using the coverage map - let edges = unsafe { slice::from_raw_parts_mut(EDGES_MAP_PTR, MAX_EDGES_NUM) }; - let edges_observer = HitcountsMapObserver::new(StdMapObserver::new("edges", edges)); + let edges_observer = unsafe { + HitcountsMapObserver::new(StdMapObserver::new_from_ptr( + "edges", + EDGES_MAP_PTR, + MAX_EDGES_NUM, + )) + }; // Create an observation channel to keep track of the execution time let time_observer = TimeObserver::new("time"); diff --git a/fuzzers/libfuzzer_libpng_ctx/src/lib.rs b/fuzzers/libfuzzer_libpng_ctx/src/lib.rs index a9119fe1b5..315bca5d12 100644 --- a/fuzzers/libfuzzer_libpng_ctx/src/lib.rs +++ b/fuzzers/libfuzzer_libpng_ctx/src/lib.rs @@ -144,8 +144,9 @@ pub fn libafl_main() { let mut run_client = |state: Option>, mut restarting_mgr, _core_id| { // Create an observation channel using the coverage map - let edges = edges_map_from_ptr(); - let edges_observer = HitcountsMapObserver::new(StdMapObserver::new("edges", edges)); + let edges = unsafe { edges_map_from_ptr() }; + let edges_observer = + HitcountsMapObserver::new(StdMapObserver::new_from_ownedref("edges", edges)); // Create an observation channel to keep track of the execution time let time_observer = TimeObserver::new("time"); diff --git a/fuzzers/tutorial/src/input.rs b/fuzzers/tutorial/src/input.rs index 599c9b5e94..bc3b216a37 100644 --- a/fuzzers/tutorial/src/input.rs +++ b/fuzzers/tutorial/src/input.rs @@ -61,7 +61,7 @@ impl HasTargetBytes for PacketData { fn target_bytes(&self) -> OwnedSlice { let mut serialized_data = Vec::with_capacity(self.serialized_size()); self.binary_serialize::<_, LittleEndian>(&mut serialized_data); - OwnedSlice::Owned(serialized_data) + OwnedSlice::from(serialized_data) } } diff --git a/libafl/src/bolts/ownedref.rs b/libafl/src/bolts/ownedref.rs index 52d292f397..025632d910 100644 --- a/libafl/src/bolts/ownedref.rs +++ b/libafl/src/bolts/ownedref.rs @@ -2,7 +2,7 @@ // The serialization is towards owned, allowing to serialize pointers without troubles. use alloc::{boxed::Box, vec::Vec}; -use core::{clone::Clone, fmt::Debug}; +use core::{clone::Clone, fmt::Debug, slice}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; /// Trait to convert into an Owned type @@ -166,26 +166,31 @@ where /// Wrap a slice and convert to a Vec on serialize #[derive(Clone, Debug)] -pub enum OwnedSlice<'a, T: 'a + Sized> { +enum OwnedSliceInner<'a, T: 'a + Sized> { + /// A ref to a raw slice and length + RefRaw(*const T, usize), /// A ref to a slice Ref(&'a [T]), /// A ref to an owned [`Vec`] Owned(Vec), } -impl<'a, T: 'a + Sized + Serialize> Serialize for OwnedSlice<'a, T> { +impl<'a, T: 'a + Sized + Serialize> Serialize for OwnedSliceInner<'a, T> { fn serialize(&self, se: S) -> Result where S: Serializer, { match self { - OwnedSlice::Ref(r) => r.serialize(se), - OwnedSlice::Owned(b) => b.serialize(se), + OwnedSliceInner::RefRaw(rr, len) => unsafe { + slice::from_raw_parts(*rr, *len).serialize(se) + }, + OwnedSliceInner::Ref(r) => r.serialize(se), + OwnedSliceInner::Owned(b) => b.serialize(se), } } } -impl<'de, 'a, T: 'a + Sized> Deserialize<'de> for OwnedSlice<'a, T> +impl<'de, 'a, T: 'a + Sized> Deserialize<'de> for OwnedSliceInner<'a, T> where Vec: Deserialize<'de>, { @@ -193,7 +198,79 @@ where where D: Deserializer<'de>, { - Deserialize::deserialize(deserializer).map(OwnedSlice::Owned) + Deserialize::deserialize(deserializer).map(OwnedSliceInner::Owned) + } +} + +/// Wrap a slice and convert to a Vec on serialize +/// We use a hidden inner enum so the public API can be safe, +/// unless the user uses the unsafe [`OwnedSlice::from_raw_parts`] +#[allow(clippy::unsafe_derive_deserialize)] +#[derive(Debug, Serialize, Deserialize)] +pub struct OwnedSlice<'a, T: 'a + Sized> { + inner: OwnedSliceInner<'a, T>, +} + +impl<'a, T: 'a + Clone> Clone for OwnedSlice<'a, T> { + fn clone(&self) -> Self { + Self { + inner: OwnedSliceInner::Owned(self.as_slice().to_vec()), + } + } +} + +impl<'a, T> OwnedSlice<'a, T> { + /// Create a new [`OwnedSlice`] from a raw pointer and length + /// + /// # Safety + /// + /// The pointer must be valid and point to a map of the size `size_of() * len` + /// The contents will be dereferenced in subsequent operations. + #[must_use] + pub unsafe fn from_raw_parts(ptr: *const T, len: usize) -> Self { + Self { + inner: OwnedSliceInner::RefRaw(ptr, len), + } + } +} + +/// Create a new [`OwnedSlice`] from a vector +impl<'a, T> From> for OwnedSlice<'a, T> { + fn from(vec: Vec) -> Self { + Self { + inner: OwnedSliceInner::Owned(vec), + } + } +} + +/// Create a new [`OwnedSlice`] from a vector reference +impl<'a, T> From<&'a Vec> for OwnedSlice<'a, T> { + fn from(vec: &'a Vec) -> Self { + Self { + inner: OwnedSliceInner::Ref(vec), + } + } +} + +/// Create a new [`OwnedSlice`] from a reference to a slice +impl<'a, T> From<&'a [T]> for OwnedSlice<'a, T> { + fn from(r: &'a [T]) -> Self { + Self { + inner: OwnedSliceInner::Ref(r), + } + } +} + +/// Create a new [`OwnedSlice`] from a [`OwnedSliceMut`] +impl<'a, T> From> for OwnedSlice<'a, T> { + fn from(mut_slice: OwnedSliceMut<'a, T>) -> Self { + Self { + inner: match mut_slice.inner { + OwnedSliceMutInner::RefRaw(ptr, len) => OwnedSliceInner::RefRaw(ptr as _, len), + OwnedSliceMutInner::Ref(r) => OwnedSliceInner::Ref(r as _), + OwnedSliceMutInner::Owned(v) => OwnedSliceInner::Owned(v), + }, + } } } @@ -201,9 +278,10 @@ impl<'a, T: Sized> OwnedSlice<'a, T> { /// Get the [`OwnedSlice`] as slice. #[must_use] pub fn as_slice(&self) -> &[T] { - match self { - OwnedSlice::Ref(r) => r, - OwnedSlice::Owned(v) => v.as_slice(), + match &self.inner { + OwnedSliceInner::Ref(r) => r, + OwnedSliceInner::RefRaw(rr, len) => unsafe { slice::from_raw_parts(*rr, *len) }, + OwnedSliceInner::Owned(v) => v.as_slice(), } } } @@ -214,43 +292,57 @@ where { #[must_use] fn is_owned(&self) -> bool { - match self { - OwnedSlice::Ref(_) => false, - OwnedSlice::Owned(_) => true, + match self.inner { + OwnedSliceInner::RefRaw(_, _) | OwnedSliceInner::Ref(_) => false, + OwnedSliceInner::Owned(_) => true, } } #[must_use] fn into_owned(self) -> Self { - match self { - OwnedSlice::Ref(r) => OwnedSlice::Owned(r.to_vec()), - OwnedSlice::Owned(v) => OwnedSlice::Owned(v), + match self.inner { + OwnedSliceInner::RefRaw(rr, len) => Self { + inner: OwnedSliceInner::Owned(unsafe { slice::from_raw_parts(rr, len).to_vec() }), + }, + OwnedSliceInner::Ref(r) => Self { + inner: OwnedSliceInner::Owned(r.to_vec()), + }, + OwnedSliceInner::Owned(v) => Self { + inner: OwnedSliceInner::Owned(v), + }, } } } /// Wrap a mutable slice and convert to a Vec on serialize +/// We use a hidden inner enum so the public API can be safe, +/// unless the user uses the unsafe [`OwnedSliceMut::from_raw_parts_mut`] #[derive(Debug)] -pub enum OwnedSliceMut<'a, T: 'a + Sized> { +pub enum OwnedSliceMutInner<'a, T: 'a + Sized> { + /// A raw ptr to a memory location and a length + RefRaw(*mut T, usize), /// A ptr to a mutable slice of the type Ref(&'a mut [T]), /// An owned [`Vec`] of the type Owned(Vec), } -impl<'a, T: 'a + Sized + Serialize> Serialize for OwnedSliceMut<'a, T> { +impl<'a, T: 'a + Sized + Serialize> Serialize for OwnedSliceMutInner<'a, T> { fn serialize(&self, se: S) -> Result where S: Serializer, { match self { - OwnedSliceMut::Ref(r) => r.serialize(se), - OwnedSliceMut::Owned(b) => b.serialize(se), + OwnedSliceMutInner::RefRaw(rr, len) => { + unsafe { slice::from_raw_parts_mut(*rr, *len) }.serialize(se) + } + OwnedSliceMutInner::Ref(r) => r.serialize(se), + OwnedSliceMutInner::Owned(b) => b.serialize(se), } } } -impl<'de, 'a, T: 'a + Sized> Deserialize<'de> for OwnedSliceMut<'a, T> +impl<'de, 'a, T: 'a + Sized> Deserialize<'de> for OwnedSliceMutInner<'a, T> where Vec: Deserialize<'de>, { @@ -258,7 +350,35 @@ where where D: Deserializer<'de>, { - Deserialize::deserialize(deserializer).map(OwnedSliceMut::Owned) + Deserialize::deserialize(deserializer).map(OwnedSliceMutInner::Owned) + } +} + +/// Wrap a mutable slice and convert to a Vec on serialize +#[allow(clippy::unsafe_derive_deserialize)] +#[derive(Debug, Serialize, Deserialize)] +pub struct OwnedSliceMut<'a, T: 'a + Sized> { + inner: OwnedSliceMutInner<'a, T>, +} + +impl<'a, T: 'a + Sized> OwnedSliceMut<'a, T> { + /// Create a new [`OwnedSliceMut`] from a raw pointer and length + /// + /// # Safety + /// + /// The pointer must be valid and point to a map of the size `size_of() * len` + /// The contents will be dereferenced in subsequent operations. + #[must_use] + pub unsafe fn from_raw_parts_mut(ptr: *mut T, len: usize) -> OwnedSliceMut<'a, T> { + if ptr.is_null() || len == 0 { + Self { + inner: OwnedSliceMutInner::Owned(Vec::new()), + } + } else { + Self { + inner: OwnedSliceMutInner::RefRaw(ptr, len), + } + } } } @@ -266,18 +386,20 @@ impl<'a, T: Sized> OwnedSliceMut<'a, T> { /// Get the value as slice #[must_use] pub fn as_slice(&self) -> &[T] { - match self { - OwnedSliceMut::Ref(r) => r, - OwnedSliceMut::Owned(v) => v.as_slice(), + match &self.inner { + OwnedSliceMutInner::RefRaw(rr, len) => unsafe { slice::from_raw_parts(*rr, *len) }, + OwnedSliceMutInner::Ref(r) => r, + OwnedSliceMutInner::Owned(v) => v.as_slice(), } } /// Get the value as mut slice #[must_use] pub fn as_mut_slice(&mut self) -> &mut [T] { - match self { - OwnedSliceMut::Ref(r) => r, - OwnedSliceMut::Owned(v) => v.as_mut_slice(), + match &mut self.inner { + OwnedSliceMutInner::RefRaw(rr, len) => unsafe { slice::from_raw_parts_mut(*rr, *len) }, + OwnedSliceMutInner::Ref(r) => r, + OwnedSliceMutInner::Owned(v) => v.as_mut_slice(), } } } @@ -288,17 +410,68 @@ where { #[must_use] fn is_owned(&self) -> bool { - match self { - OwnedSliceMut::Ref(_) => false, - OwnedSliceMut::Owned(_) => true, + match self.inner { + OwnedSliceMutInner::RefRaw(_, _) | OwnedSliceMutInner::Ref(_) => false, + OwnedSliceMutInner::Owned(_) => true, } } #[must_use] fn into_owned(self) -> Self { - match self { - OwnedSliceMut::Ref(r) => OwnedSliceMut::Owned(r.to_vec()), - OwnedSliceMut::Owned(v) => OwnedSliceMut::Owned(v), + let vec = match self.inner { + OwnedSliceMutInner::RefRaw(rr, len) => unsafe { + slice::from_raw_parts_mut(rr, len).to_vec() + }, + OwnedSliceMutInner::Ref(r) => r.to_vec(), + OwnedSliceMutInner::Owned(v) => v, + }; + Self { + inner: OwnedSliceMutInner::Owned(vec), + } + } +} + +impl<'a, T: 'a + Clone> Clone for OwnedSliceMut<'a, T> { + fn clone(&self) -> Self { + Self { + inner: OwnedSliceMutInner::Owned(self.as_slice().to_vec()), + } + } +} + +/// Create a new [`OwnedSliceMut`] from a vector +impl<'a, T> From> for OwnedSliceMut<'a, T> { + fn from(vec: Vec) -> Self { + Self { + inner: OwnedSliceMutInner::Owned(vec), + } + } +} + +/// Create a new [`OwnedSliceMut`] from a vector reference +impl<'a, T> From<&'a mut Vec> for OwnedSliceMut<'a, T> { + fn from(vec: &'a mut Vec) -> Self { + Self { + inner: OwnedSliceMutInner::Ref(vec), + } + } +} + +/// Create a new [`OwnedSliceMut`] from a reference to ref to a slice +impl<'a, T> From<&'a mut [T]> for OwnedSliceMut<'a, T> { + fn from(r: &'a mut [T]) -> Self { + Self { + inner: OwnedSliceMutInner::Ref(r), + } + } +} + +/// Create a new [`OwnedSliceMut`] from a reference to ref to a slice +#[allow(clippy::mut_mut)] // This makes use in some iterators easier +impl<'a, T> From<&'a mut &'a mut [T]> for OwnedSliceMut<'a, T> { + fn from(r: &'a mut &'a mut [T]) -> Self { + Self { + inner: OwnedSliceMutInner::Ref(r), } } } diff --git a/libafl/src/inputs/bytes.rs b/libafl/src/inputs/bytes.rs index 5ecea7ef2f..67ec943eff 100644 --- a/libafl/src/inputs/bytes.rs +++ b/libafl/src/inputs/bytes.rs @@ -75,7 +75,7 @@ impl HasBytesVec for BytesInput { impl HasTargetBytes for BytesInput { #[inline] fn target_bytes(&self) -> OwnedSlice { - OwnedSlice::Ref(&self.bytes) + OwnedSlice::from(&self.bytes) } } diff --git a/libafl/src/inputs/mod.rs b/libafl/src/inputs/mod.rs index eebf4d0406..50b5baafa2 100644 --- a/libafl/src/inputs/mod.rs +++ b/libafl/src/inputs/mod.rs @@ -76,7 +76,7 @@ impl Input for NopInput { } impl HasTargetBytes for NopInput { fn target_bytes(&self) -> OwnedSlice { - OwnedSlice::Owned(vec![0]) + OwnedSlice::from(vec![0]) } } diff --git a/libafl/src/observers/map.rs b/libafl/src/observers/map.rs index 5819abd9b0..cca20be32f 100644 --- a/libafl/src/observers/map.rs +++ b/libafl/src/observers/map.rs @@ -5,11 +5,7 @@ use alloc::{ string::{String, ToString}, vec::Vec, }; -use core::{ - fmt::Debug, - hash::Hasher, - slice::{from_raw_parts, from_raw_parts_mut}, -}; +use core::{fmt::Debug, hash::Hasher, slice::from_raw_parts}; use intervaltree::IntervalTree; use num_traits::PrimInt; use serde::{Deserialize, Serialize}; @@ -188,7 +184,7 @@ where pub fn new(name: &'static str, map: &'a mut [T]) -> Self { let initial = if map.is_empty() { T::default() } else { map[0] }; Self { - map: OwnedSliceMut::Ref(map), + map: OwnedSliceMut::from(map), name: name.to_string(), initial, } @@ -199,7 +195,26 @@ where pub fn new_owned(name: &'static str, map: Vec) -> Self { let initial = if map.is_empty() { T::default() } else { map[0] }; Self { - map: OwnedSliceMut::Owned(map), + map: OwnedSliceMut::from(map), + name: name.to_string(), + initial, + } + } + + /// Creates a new [`MapObserver`] from an [`OwnedSliceMut`] map. + /// + /// # Safety + /// Will dereference the owned slice with up to len elements. + #[must_use] + pub fn new_from_ownedref(name: &'static str, map: OwnedSliceMut<'a, T>) -> Self { + let map_slice = map.as_slice(); + let initial = if map_slice.is_empty() { + T::default() + } else { + map_slice[0] + }; + Self { + map, name: name.to_string(), initial, } @@ -212,7 +227,7 @@ where pub unsafe fn new_from_ptr(name: &'static str, map_ptr: *mut T, len: usize) -> Self { let initial = if len > 0 { *map_ptr } else { T::default() }; StdMapObserver { - map: OwnedSliceMut::Ref(from_raw_parts_mut(map_ptr, len)), + map: OwnedSliceMut::from_raw_parts_mut(map_ptr, len), name: name.to_string(), initial, } @@ -309,7 +324,7 @@ where assert!(map.len() >= N); let initial = if map.is_empty() { T::default() } else { map[0] }; Self { - map: OwnedSliceMut::Ref(map), + map: OwnedSliceMut::from(map), name: name.to_string(), initial, } @@ -321,7 +336,7 @@ where assert!(map.len() >= N); let initial = if map.is_empty() { T::default() } else { map[0] }; Self { - map: OwnedSliceMut::Owned(map), + map: OwnedSliceMut::from(map), name: name.to_string(), initial, } @@ -334,7 +349,7 @@ where pub unsafe fn new_from_ptr(name: &'static str, map_ptr: *mut T) -> Self { let initial = if N > 0 { *map_ptr } else { T::default() }; ConstMapObserver { - map: OwnedSliceMut::Ref(from_raw_parts_mut(map_ptr, N)), + map: OwnedSliceMut::from_raw_parts_mut(map_ptr, N), name: name.to_string(), initial, } @@ -429,7 +444,7 @@ where pub fn new(name: &'static str, map: &'a mut [T], size: &'a mut usize) -> Self { let initial = if map.is_empty() { T::default() } else { map[0] }; Self { - map: OwnedSliceMut::Ref(map), + map: OwnedSliceMut::from(map), size: OwnedRefMut::Ref(size), name: name.into(), initial, @@ -448,7 +463,7 @@ where ) -> Self { let initial = if max_len > 0 { *map_ptr } else { T::default() }; VariableMapObserver { - map: OwnedSliceMut::Ref(from_raw_parts_mut(map_ptr, max_len)), + map: OwnedSliceMut::from_raw_parts_mut(map_ptr, max_len), size: OwnedRefMut::Ref(size), name: name.into(), initial, @@ -715,7 +730,7 @@ where idx += l; builder.push(r); v += 1; - OwnedSliceMut::Ref(x) + OwnedSliceMut::from(x) }) .collect(); Self { @@ -745,7 +760,7 @@ where idx += l; builder.push(r); v += 1; - OwnedSliceMut::Owned(x) + OwnedSliceMut::from(x) }) .collect(); Self { diff --git a/libafl_targets/src/coverage.rs b/libafl_targets/src/coverage.rs index 49a8215fb5..feb2fb5b91 100644 --- a/libafl_targets/src/coverage.rs +++ b/libafl_targets/src/coverage.rs @@ -1,7 +1,6 @@ //! Coverage maps as static mut array use crate::EDGES_MAP_SIZE; -use core::slice::from_raw_parts_mut; /// The map for edges. #[no_mangle] @@ -21,14 +20,18 @@ pub use __afl_area_ptr as EDGES_MAP_PTR; #[no_mangle] pub static mut __afl_map_size: usize = EDGES_MAP_SIZE; pub use __afl_map_size as EDGES_MAP_PTR_SIZE; +use libafl::bolts::ownedref::OwnedSliceMut; /// Gets the edges map from the `EDGES_MAP_PTR` raw pointer. +/// +/// # Safety +/// +/// This function will crash if `EDGES_MAP_PTR` is not a valid pointer. +/// The `EDGES_MAP_PTR_SIZE` needs to be smaller than, or equal to the size of the map. #[must_use] -pub fn edges_map_from_ptr<'a>() -> &'a mut [u8] { - unsafe { - debug_assert!(!EDGES_MAP_PTR.is_null()); - from_raw_parts_mut(EDGES_MAP_PTR, EDGES_MAP_PTR_SIZE) - } +pub unsafe fn edges_map_from_ptr<'a>() -> OwnedSliceMut<'a, u8> { + debug_assert!(!EDGES_MAP_PTR.is_null()); + OwnedSliceMut::from_raw_parts_mut(EDGES_MAP_PTR, EDGES_MAP_PTR_SIZE) } /// Gets the current maximum number of edges tracked. From 250ec8d1e0c43841d3fe1f4678a231d1545134d6 Mon Sep 17 00:00:00 2001 From: Evan Richter Date: Thu, 6 Jan 2022 03:41:02 -0600 Subject: [PATCH 24/25] Reduce generics for various Has* traits (#456) Specifically for Has{Rand,Corpus,Solutions,FeedbackStates} The Has* family of traits offer getters and get-mut-ers. The previous implementation had a fully generic return type: trait HasX { get_x(&self) -> &Self::X; get_mut_x(&mut self) -> &mut Self::X; } meaning a single type could implement both `HasRand` and `HasRand`. The advantage of having multiple implementations is not clear at this time, so it vastly simplifies the trait (and its impls) to bring the return type in the body as an associated type: trait HasX { type X: TraitX; get_x(&self) -> &Self::X; get_mut_x(&mut self) -> &mut Self::X; } This comes with the limitation that any type that impls these traits can only do so once, choosing only one associated type. * HasRand's only generic parameter (Rand) is now an associated type * HasCorpus and HasSolutions are now only generic over the Input type they store * HasFeedbackStates generic parameter now associated type --- fuzzers/push_stage_harness/src/main.rs | 2 +- fuzzers/tutorial/src/metadata.rs | 4 +- fuzzers/tutorial/src/mutator.rs | 33 +- libafl/src/corpus/minimizer.rs | 27 +- libafl/src/corpus/mod.rs | 40 +- libafl/src/corpus/powersched.rs | 33 +- libafl/src/corpus/queue.rs | 33 +- libafl/src/events/simple.rs | 49 +- libafl/src/executors/inprocess.rs | 54 +- libafl/src/feedbacks/map.rs | 42 +- libafl/src/fuzzer/mod.rs | 40 +- libafl/src/generators/gramatron.rs | 17 +- libafl/src/generators/mod.rs | 34 +- libafl/src/mutators/encoded_mutations.rs | 311 +++-------- libafl/src/mutators/gramatron.rs | 90 +--- libafl/src/mutators/mopt_mutator.rs | 44 +- libafl/src/mutators/mutations.rs | 659 +++++------------------ libafl/src/mutators/nautilus.rs | 26 +- libafl/src/mutators/scheduled.rs | 148 ++--- libafl/src/mutators/token_mutations.rs | 98 +--- libafl/src/stages/calibrate.rs | 31 +- libafl/src/stages/concolic.rs | 36 +- libafl/src/stages/mod.rs | 30 +- libafl/src/stages/mutational.rs | 32 +- libafl/src/stages/power.rs | 26 +- libafl/src/stages/push/mod.rs | 55 +- libafl/src/stages/push/mutational.rs | 40 +- libafl/src/stages/sync.rs | 30 +- libafl/src/stages/tracing.rs | 33 +- libafl/src/state/mod.rs | 66 +-- libafl_qemu/src/executor.rs | 6 +- 31 files changed, 586 insertions(+), 1583 deletions(-) diff --git a/fuzzers/push_stage_harness/src/main.rs b/fuzzers/push_stage_harness/src/main.rs index 840fd01978..f2fed619bc 100644 --- a/fuzzers/push_stage_harness/src/main.rs +++ b/fuzzers/push_stage_harness/src/main.rs @@ -41,7 +41,7 @@ pub fn main() { let feedback_state = MapFeedbackState::with_observer(&observer); // Feedback to rate the interestingness of an input - let feedback = MaxMapFeedback::<_, BytesInput, _, _, _>::new(&feedback_state, &observer); + let feedback = MaxMapFeedback::::new(&feedback_state, &observer); // A feedback to choose if an input is a solution or not let objective = CrashFeedback::new(); diff --git a/fuzzers/tutorial/src/metadata.rs b/fuzzers/tutorial/src/metadata.rs index 4ec380751c..e41fc8dec8 100644 --- a/fuzzers/tutorial/src/metadata.rs +++ b/fuzzers/tutorial/src/metadata.rs @@ -29,8 +29,8 @@ impl FavFactor for PacketLenFavFactor { } } -pub type PacketLenMinimizerCorpusScheduler = - MinimizerCorpusScheduler; +pub type PacketLenMinimizerCorpusScheduler = + MinimizerCorpusScheduler; #[derive(Serialize, Deserialize, Default, Clone, Debug)] pub struct PacketLenFeedback { diff --git a/fuzzers/tutorial/src/mutator.rs b/fuzzers/tutorial/src/mutator.rs index d560967b2a..836edd8f57 100644 --- a/fuzzers/tutorial/src/mutator.rs +++ b/fuzzers/tutorial/src/mutator.rs @@ -10,23 +10,13 @@ use libafl::{ use crate::input::PacketData; -use core::marker::PhantomData; use lain::traits::Mutatable; -pub struct LainMutator -where - S: HasRand, - R: Rand, -{ +pub struct LainMutator { inner: lain::mutator::Mutator, - phantom: PhantomData<*const (R, S)>, } -impl Mutator for LainMutator -where - S: HasRand, - R: Rand, -{ +impl Mutator for LainMutator { fn mutate( &mut self, state: &mut S, @@ -40,35 +30,22 @@ where } } -impl Named for LainMutator -where - S: HasRand, - R: Rand, -{ +impl Named for LainMutator { fn name(&self) -> &str { "LainMutator" } } -impl LainMutator -where - S: HasRand, - R: Rand, -{ +impl LainMutator { #[must_use] pub fn new() -> Self { Self { inner: lain::mutator::Mutator::new(StdRand::with_seed(0)), - phantom: PhantomData, } } } -impl Default for LainMutator -where - S: HasRand, - R: Rand, -{ +impl Default for LainMutator { #[must_use] fn default() -> Self { Self::new() diff --git a/libafl/src/corpus/minimizer.rs b/libafl/src/corpus/minimizer.rs index e6b661fad2..5db16136c1 100644 --- a/libafl/src/corpus/minimizer.rs +++ b/libafl/src/corpus/minimizer.rs @@ -81,29 +81,26 @@ where /// corpus that exercise all the requested features (e.g. all the coverage seen so far) /// prioritizing [`Testcase`]`s` using [`FavFactor`] #[derive(Debug, Clone)] -pub struct MinimizerCorpusScheduler +pub struct MinimizerCorpusScheduler where CS: CorpusScheduler, F: FavFactor, I: Input, M: AsSlice + SerdeAny + HasRefCnt, - S: HasCorpus + HasMetadata, - C: Corpus, + S: HasCorpus + HasMetadata, { base: CS, skip_non_favored_prob: u64, - phantom: PhantomData<(C, F, I, M, R, S)>, + phantom: PhantomData<(F, I, M, S)>, } -impl CorpusScheduler for MinimizerCorpusScheduler +impl CorpusScheduler for MinimizerCorpusScheduler where CS: CorpusScheduler, F: FavFactor, I: Input, M: AsSlice + SerdeAny + HasRefCnt, - S: HasCorpus + HasMetadata + HasRand, - C: Corpus, - R: Rand, + S: HasCorpus + HasMetadata + HasRand, { /// Add an entry to the corpus and return its index fn on_add(&self, state: &mut S, idx: usize) -> Result<(), Error> { @@ -145,15 +142,13 @@ where } } -impl MinimizerCorpusScheduler +impl MinimizerCorpusScheduler where CS: CorpusScheduler, F: FavFactor, I: Input, M: AsSlice + SerdeAny + HasRefCnt, - S: HasCorpus + HasMetadata + HasRand, - C: Corpus, - R: Rand, + S: HasCorpus + HasMetadata + HasRand, { /// Update the `Corpus` score using the `MinimizerCorpusScheduler` #[allow(clippy::unused_self)] @@ -284,10 +279,10 @@ where } /// A [`MinimizerCorpusScheduler`] with [`LenTimeMulFavFactor`] to prioritize quick and small [`Testcase`]`s`. -pub type LenTimeMinimizerCorpusScheduler = - MinimizerCorpusScheduler, I, M, R, S>; +pub type LenTimeMinimizerCorpusScheduler = + MinimizerCorpusScheduler, I, M, S>; /// A [`MinimizerCorpusScheduler`] with [`LenTimeMulFavFactor`] to prioritize quick and small [`Testcase`]`s` /// that exercise all the entries registered in the [`MapIndexesMetadata`]. -pub type IndexesLenTimeMinimizerCorpusScheduler = - MinimizerCorpusScheduler, I, MapIndexesMetadata, R, S>; +pub type IndexesLenTimeMinimizerCorpusScheduler = + MinimizerCorpusScheduler, I, MapIndexesMetadata, S>; diff --git a/libafl/src/corpus/mod.rs b/libafl/src/corpus/mod.rs index 77a0b30d35..a474c16480 100644 --- a/libafl/src/corpus/mod.rs +++ b/libafl/src/corpus/mod.rs @@ -30,7 +30,7 @@ pub mod powersched; pub use powersched::PowerQueueCorpusScheduler; use alloc::borrow::ToOwned; -use core::{cell::RefCell, marker::PhantomData}; +use core::cell::RefCell; use crate::{ bolts::rands::Rand, @@ -108,22 +108,12 @@ where /// Feed the fuzzer simpply with a random testcase on request #[derive(Debug, Clone)] -pub struct RandCorpusScheduler -where - S: HasCorpus + HasRand, - C: Corpus, - I: Input, - R: Rand, -{ - phantom: PhantomData<(C, I, R, S)>, -} +pub struct RandCorpusScheduler; -impl CorpusScheduler for RandCorpusScheduler +impl CorpusScheduler for RandCorpusScheduler where - S: HasCorpus + HasRand, - C: Corpus, + S: HasCorpus + HasRand, I: Input, - R: Rand, { /// Gets the next entry at random fn next(&self, state: &mut S) -> Result { @@ -138,29 +128,15 @@ where } } -impl RandCorpusScheduler -where - S: HasCorpus + HasRand, - C: Corpus, - I: Input, - R: Rand, -{ +impl RandCorpusScheduler { /// Create a new [`RandCorpusScheduler`] that just schedules randomly. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } -impl Default for RandCorpusScheduler -where - S: HasCorpus + HasRand, - C: Corpus, - I: Input, - R: Rand, -{ +impl Default for RandCorpusScheduler { fn default() -> Self { Self::new() } @@ -168,4 +144,4 @@ where /// A [`StdCorpusScheduler`] uses the default scheduler in `LibAFL` to schedule [`Testcase`]s /// The current `Std` is a [`RandCorpusScheduler`], although this may change in the future, if another [`CorpusScheduler`] delivers better results. -pub type StdCorpusScheduler = RandCorpusScheduler; +pub type StdCorpusScheduler = RandCorpusScheduler; diff --git a/libafl/src/corpus/powersched.rs b/libafl/src/corpus/powersched.rs index 987ff55460..8d837508ea 100644 --- a/libafl/src/corpus/powersched.rs +++ b/libafl/src/corpus/powersched.rs @@ -1,7 +1,6 @@ //! The queue corpus scheduler for power schedules. use alloc::string::{String, ToString}; -use core::marker::PhantomData; use crate::{ corpus::{Corpus, CorpusScheduler, PowerScheduleTestcaseMetaData}, @@ -13,30 +12,17 @@ use crate::{ /// A corpus scheduler using power schedules #[derive(Clone, Debug)] -pub struct PowerQueueCorpusScheduler -where - S: HasCorpus + HasMetadata, - C: Corpus, - I: Input, -{ - phantom: PhantomData<(C, I, S)>, -} +pub struct PowerQueueCorpusScheduler; -impl Default for PowerQueueCorpusScheduler -where - S: HasCorpus + HasMetadata, - C: Corpus, - I: Input, -{ +impl Default for PowerQueueCorpusScheduler { fn default() -> Self { Self::new() } } -impl CorpusScheduler for PowerQueueCorpusScheduler +impl CorpusScheduler for PowerQueueCorpusScheduler where - S: HasCorpus + HasMetadata, - C: Corpus, + S: HasCorpus + HasMetadata, I: Input, { /// Add an entry to the corpus and return its index @@ -92,17 +78,10 @@ where } } -impl PowerQueueCorpusScheduler -where - S: HasCorpus + HasMetadata, - C: Corpus, - I: Input, -{ +impl PowerQueueCorpusScheduler { /// Create a new [`PowerQueueCorpusScheduler`] #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } diff --git a/libafl/src/corpus/queue.rs b/libafl/src/corpus/queue.rs index d434d1ab93..96a9bbfd39 100644 --- a/libafl/src/corpus/queue.rs +++ b/libafl/src/corpus/queue.rs @@ -1,7 +1,6 @@ //! The queue corpus scheduler implements an AFL-like queue mechanism use alloc::borrow::ToOwned; -use core::marker::PhantomData; use crate::{ corpus::{Corpus, CorpusScheduler}, @@ -12,19 +11,11 @@ use crate::{ /// Walk the corpus in a queue-like fashion #[derive(Debug, Clone)] -pub struct QueueCorpusScheduler -where - S: HasCorpus, - C: Corpus, - I: Input, -{ - phantom: PhantomData<(C, I, S)>, -} +pub struct QueueCorpusScheduler; -impl CorpusScheduler for QueueCorpusScheduler +impl CorpusScheduler for QueueCorpusScheduler where - S: HasCorpus, - C: Corpus, + S: HasCorpus, I: Input, { /// Gets the next entry in the queue @@ -48,27 +39,15 @@ where } } -impl QueueCorpusScheduler -where - S: HasCorpus, - C: Corpus, - I: Input, -{ +impl QueueCorpusScheduler { /// Creates a new `QueueCorpusScheduler` #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } -impl Default for QueueCorpusScheduler -where - S: HasCorpus, - C: Corpus, - I: Input, -{ +impl Default for QueueCorpusScheduler { fn default() -> Self { Self::new() } diff --git a/libafl/src/events/simple.rs b/libafl/src/events/simple.rs index c4197aa196..6c65301022 100644 --- a/libafl/src/events/simple.rs +++ b/libafl/src/events/simple.rs @@ -11,10 +11,7 @@ use crate::{ }; use alloc::{string::ToString, vec::Vec}; #[cfg(feature = "std")] -use core::{ - marker::PhantomData, - sync::atomic::{compiler_fence, Ordering}, -}; +use core::sync::atomic::{compiler_fence, Ordering}; #[cfg(feature = "std")] use serde::{de::DeserializeOwned, Serialize}; @@ -232,11 +229,9 @@ where #[cfg(feature = "std")] #[allow(clippy::default_trait_access)] #[derive(Debug, Clone)] -pub struct SimpleRestartingEventManager<'a, C, I, MT, S, SC, SP> +pub struct SimpleRestartingEventManager where - C: Corpus, I: Input, - S: Serialize, SP: ShMemProvider, MT: Monitor, //CE: CustomEvent, { @@ -244,17 +239,12 @@ where simple_event_mgr: SimpleEventManager, /// [`StateRestorer`] for restarts staterestorer: StateRestorer, - /// Phantom data - _phantom: PhantomData<&'a (C, I, S, SC)>, } #[cfg(feature = "std")] -impl<'a, C, I, MT, S, SC, SP> EventFirer - for SimpleRestartingEventManager<'a, C, I, MT, S, SC, SP> +impl EventFirer for SimpleRestartingEventManager where - C: Corpus, I: Input, - S: Serialize, SP: ShMemProvider, MT: Monitor, //CE: CustomEvent, { @@ -264,10 +254,8 @@ where } #[cfg(feature = "std")] -impl<'a, C, I, MT, S, SC, SP> EventRestarter - for SimpleRestartingEventManager<'a, C, I, MT, S, SC, SP> +impl EventRestarter for SimpleRestartingEventManager where - C: Corpus, I: Input, S: Serialize, SP: ShMemProvider, @@ -282,10 +270,8 @@ where } #[cfg(feature = "std")] -impl<'a, C, E, I, MT, S, SC, SP, Z> EventProcessor - for SimpleRestartingEventManager<'a, C, I, MT, S, SC, SP> +impl EventProcessor for SimpleRestartingEventManager where - C: Corpus, I: Input, S: Serialize, SP: ShMemProvider, @@ -297,10 +283,8 @@ where } #[cfg(feature = "std")] -impl<'a, C, E, I, MT, S, SC, SP, Z> EventManager - for SimpleRestartingEventManager<'a, C, I, MT, S, SC, SP> +impl EventManager for SimpleRestartingEventManager where - C: Corpus, I: Input, S: Serialize, SP: ShMemProvider, @@ -309,24 +293,18 @@ where } #[cfg(feature = "std")] -impl<'a, C, I, MT, S, SC, SP> ProgressReporter - for SimpleRestartingEventManager<'a, C, I, MT, S, SC, SP> +impl ProgressReporter for SimpleRestartingEventManager where I: Input, - C: Corpus, - S: Serialize, SP: ShMemProvider, MT: Monitor, //CE: CustomEvent, { } #[cfg(feature = "std")] -impl<'a, C, I, MT, S, SC, SP> HasEventManagerId - for SimpleRestartingEventManager<'a, C, I, MT, S, SC, SP> +impl HasEventManagerId for SimpleRestartingEventManager where - C: Corpus, I: Input, - S: Serialize, SP: ShMemProvider, MT: Monitor, { @@ -337,12 +315,9 @@ where #[cfg(feature = "std")] #[allow(clippy::type_complexity, clippy::too_many_lines)] -impl<'a, C, I, MT, S, SC, SP> SimpleRestartingEventManager<'a, C, I, MT, S, SC, SP> +impl<'a, I, MT, SP> SimpleRestartingEventManager where - C: Corpus, I: Input, - S: DeserializeOwned + Serialize + HasCorpus + HasSolutions, - SC: Corpus, SP: ShMemProvider, MT: Monitor, //TODO CE: CustomEvent, { @@ -351,7 +326,6 @@ where Self { staterestorer, simple_event_mgr: SimpleEventManager::new(monitor), - _phantom: PhantomData {}, } } @@ -359,7 +333,10 @@ where /// This [`EventManager`] is simple and single threaded, /// but can still used shared maps to recover from crashes and timeouts. #[allow(clippy::similar_names)] - pub fn launch(mut monitor: MT, shmem_provider: &mut SP) -> Result<(Option, Self), Error> { + pub fn launch(mut monitor: MT, shmem_provider: &mut SP) -> Result<(Option, Self), Error> + where + S: DeserializeOwned + Serialize + HasCorpus + HasSolutions, + { // We start ourself as child process to actually fuzz let mut staterestorer = if std::env::var(_ENV_FUZZER_SENDER).is_err() { // First, create a place to store state in, for restarts. diff --git a/libafl/src/executors/inprocess.rs b/libafl/src/executors/inprocess.rs index 03c5531dce..2d177ca3e4 100644 --- a/libafl/src/executors/inprocess.rs +++ b/libafl/src/executors/inprocess.rs @@ -34,7 +34,6 @@ use crate::bolts::os::windows_exceptions::setup_exception_handler; use windows::Win32::System::Threading::SetThreadStackGuarantee; use crate::{ - corpus::Corpus, events::{EventFirer, EventRestarter}, executors::{Executor, ExitKind, HasObservers}, feedbacks::Feedback, @@ -127,7 +126,7 @@ where /// * `harness_fn` - the harness, executiong the function /// * `observers` - the observers observing the target during execution /// This may return an error on unix, if signal handler setup fails - pub fn new( + pub fn new( harness_fn: &'a mut H, observers: OT, _fuzzer: &mut Z, @@ -136,12 +135,11 @@ where ) -> Result where EM: EventFirer + EventRestarter, - OC: Corpus, OF: Feedback, - S: HasSolutions + HasClientPerfMonitor, + S: HasSolutions + HasClientPerfMonitor, Z: HasObjective, { - let handlers = InProcessHandlers::new::()?; + let handlers = InProcessHandlers::new::()?; #[cfg(windows)] unsafe { /* @@ -267,15 +265,14 @@ impl InProcessHandlers { } /// Create new [`InProcessHandlers`]. - pub fn new() -> Result + pub fn new() -> Result where I: Input, E: HasObservers, OT: ObserversTuple, EM: EventFirer + EventRestarter, - OC: Corpus, OF: Feedback, - S: HasSolutions + HasClientPerfMonitor, + S: HasSolutions + HasClientPerfMonitor, Z: HasObjective, { #[cfg(unix)] @@ -285,18 +282,10 @@ impl InProcessHandlers { compiler_fence(Ordering::SeqCst); Ok(Self { - crash_handler: unix_signal_handler::inproc_crash_handler:: + crash_handler: unix_signal_handler::inproc_crash_handler:: + as *const _, + timeout_handler: unix_signal_handler::inproc_timeout_handler:: as *const _, - timeout_handler: unix_signal_handler::inproc_timeout_handler::< - E, - EM, - I, - OC, - OF, - OT, - S, - Z, - > as *const _, }) } #[cfg(all(windows, feature = "std"))] @@ -310,7 +299,6 @@ impl InProcessHandlers { E, EM, I, - OC, OF, OT, S, @@ -320,7 +308,6 @@ impl InProcessHandlers { E, EM, I, - OC, OF, OT, S, @@ -493,7 +480,7 @@ mod unix_signal_handler { } #[cfg(unix)] - pub unsafe fn inproc_timeout_handler( + pub unsafe fn inproc_timeout_handler( _signal: Signal, _info: siginfo_t, _context: &mut ucontext_t, @@ -502,9 +489,8 @@ mod unix_signal_handler { E: HasObservers, EM: EventFirer + EventRestarter, OT: ObserversTuple, - OC: Corpus, OF: Feedback, - S: HasSolutions + HasClientPerfMonitor, + S: HasSolutions + HasClientPerfMonitor, I: Input, Z: HasObjective, { @@ -571,7 +557,7 @@ mod unix_signal_handler { /// Will be used for signal handling. /// It will store the current State to shmem, then exit. #[allow(clippy::too_many_lines)] - pub unsafe fn inproc_crash_handler( + pub unsafe fn inproc_crash_handler( signal: Signal, _info: siginfo_t, _context: &mut ucontext_t, @@ -580,9 +566,8 @@ mod unix_signal_handler { E: HasObservers, EM: EventFirer + EventRestarter, OT: ObserversTuple, - OC: Corpus, OF: Feedback, - S: HasSolutions + HasClientPerfMonitor, + S: HasSolutions + HasClientPerfMonitor, I: Input, Z: HasObjective, { @@ -748,7 +733,7 @@ mod windows_exception_handler { EnterCriticalSection, LeaveCriticalSection, RTL_CRITICAL_SECTION, }; - pub unsafe extern "system" fn inproc_timeout_handler( + pub unsafe extern "system" fn inproc_timeout_handler( _p0: *mut u8, global_state: *mut c_void, _p1: *mut u8, @@ -756,9 +741,8 @@ mod windows_exception_handler { E: HasObservers, EM: EventFirer + EventRestarter, OT: ObserversTuple, - OC: Corpus, OF: Feedback, - S: HasSolutions + HasClientPerfMonitor, + S: HasSolutions + HasClientPerfMonitor, I: Input, Z: HasObjective, { @@ -847,16 +831,15 @@ mod windows_exception_handler { // println!("TIMER INVOKED!"); } - pub unsafe fn inproc_crash_handler( + pub unsafe fn inproc_crash_handler( exception_pointers: *mut EXCEPTION_POINTERS, data: &mut InProcessExecutorHandlerData, ) where E: HasObservers, EM: EventFirer + EventRestarter, OT: ObserversTuple, - OC: Corpus, OF: Feedback, - S: HasSolutions + HasClientPerfMonitor, + S: HasSolutions + HasClientPerfMonitor, I: Input, Z: HasObjective, { @@ -1085,7 +1068,7 @@ where SP: ShMemProvider, { /// Creates a new [`InProcessForkExecutor`] - pub fn new( + pub fn new( harness_fn: &'a mut H, observers: OT, _fuzzer: &mut Z, @@ -1095,9 +1078,8 @@ where ) -> Result where EM: EventFirer + EventRestarter, - OC: Corpus, OF: Feedback, - S: HasSolutions + HasClientPerfMonitor, + S: HasSolutions + HasClientPerfMonitor, Z: HasObjective, { Ok(Self { diff --git a/libafl/src/feedbacks/map.rs b/libafl/src/feedbacks/map.rs index 2fd6e24c5a..8abf2dd24d 100644 --- a/libafl/src/feedbacks/map.rs +++ b/libafl/src/feedbacks/map.rs @@ -9,11 +9,14 @@ use num_traits::PrimInt; use serde::{Deserialize, Serialize}; use crate::{ - bolts::{tuples::Named, AsSlice, HasRefCnt}, + bolts::{ + tuples::{MatchName, Named}, + AsSlice, HasRefCnt, + }, corpus::Testcase, events::{Event, EventFirer}, executors::ExitKind, - feedbacks::{Feedback, FeedbackState, FeedbackStatesTuple}, + feedbacks::{Feedback, FeedbackState}, inputs::Input, monitors::UserStats, observers::{MapObserver, ObserversTuple}, @@ -22,21 +25,20 @@ use crate::{ }; /// A [`MapFeedback`] that implements the AFL algorithm using an [`OrReducer`] combining the bits for the history map and the bit from ``HitcountsMapObserver``. -pub type AflMapFeedback = MapFeedback; +pub type AflMapFeedback = MapFeedback; /// A [`MapFeedback`] that strives to maximize the map contents. -pub type MaxMapFeedback = MapFeedback; +pub type MaxMapFeedback = MapFeedback; /// A [`MapFeedback`] that strives to minimize the map contents. -pub type MinMapFeedback = MapFeedback; +pub type MinMapFeedback = MapFeedback; /// A [`MapFeedback`] that strives to maximize the map contents, /// but only, if a value is larger than `pow2` of the previous. -pub type MaxMapPow2Feedback = - MapFeedback; +pub type MaxMapPow2Feedback = MapFeedback; /// A [`MapFeedback`] that strives to maximize the map contents, /// but only, if a value is larger than `pow2` of the previous. -pub type MaxMapOneOrFilledFeedback = - MapFeedback; +pub type MaxMapOneOrFilledFeedback = + MapFeedback; /// A `Reducer` function is used to aggregate values for the novelty search pub trait Reducer: Serialize + serde::de::DeserializeOwned + 'static + Debug @@ -329,14 +331,13 @@ where /// The most common AFL-like feedback type #[derive(Serialize, Deserialize, Clone, Debug)] #[serde(bound = "T: serde::de::DeserializeOwned")] -pub struct MapFeedback +pub struct MapFeedback where T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, R: Reducer, O: MapObserver, N: IsNovel, - S: HasFeedbackStates, - FT: FeedbackStatesTuple, + S: HasFeedbackStates, { /// Indexes used in the last observation indexes: Option>, @@ -347,18 +348,17 @@ where /// Name identifier of the observer observer_name: String, /// Phantom Data of Reducer - phantom: PhantomData<(FT, I, N, S, R, O, T)>, + phantom: PhantomData<(I, N, S, R, O, T)>, } -impl Feedback for MapFeedback +impl Feedback for MapFeedback where T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, R: Reducer, O: MapObserver, N: IsNovel, I: Input, - S: HasFeedbackStates + HasClientPerfMonitor + Debug, - FT: FeedbackStatesTuple, + S: HasFeedbackStates + HasClientPerfMonitor + Debug, { fn is_interesting( &mut self, @@ -459,14 +459,13 @@ where } } -impl Named for MapFeedback +impl Named for MapFeedback where T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, R: Reducer, N: IsNovel, O: MapObserver, - S: HasFeedbackStates, - FT: FeedbackStatesTuple, + S: HasFeedbackStates, { #[inline] fn name(&self) -> &str { @@ -474,7 +473,7 @@ where } } -impl MapFeedback +impl MapFeedback where T: PrimInt + Default @@ -487,8 +486,7 @@ where R: Reducer, N: IsNovel, O: MapObserver, - S: HasFeedbackStates, - FT: FeedbackStatesTuple, + S: HasFeedbackStates, { /// Create new `MapFeedback` #[must_use] diff --git a/libafl/src/fuzzer/mod.rs b/libafl/src/fuzzer/mod.rs index e8d3d4476a..cc049bd84c 100644 --- a/libafl/src/fuzzer/mod.rs +++ b/libafl/src/fuzzer/mod.rs @@ -233,7 +233,7 @@ pub enum ExecuteInputResult { /// Your default fuzzer instance, for everyday use. #[derive(Debug)] -pub struct StdFuzzer +pub struct StdFuzzer where CS: CorpusScheduler, F: Feedback, @@ -244,11 +244,10 @@ where scheduler: CS, feedback: F, objective: OF, - phantom: PhantomData<(C, I, OT, S, SC)>, + phantom: PhantomData<(I, OT, S)>, } -impl HasCorpusScheduler - for StdFuzzer +impl HasCorpusScheduler for StdFuzzer where CS: CorpusScheduler, F: Feedback, @@ -265,7 +264,7 @@ where } } -impl HasFeedback for StdFuzzer +impl HasFeedback for StdFuzzer where CS: CorpusScheduler, F: Feedback, @@ -282,7 +281,7 @@ where } } -impl HasObjective for StdFuzzer +impl HasObjective for StdFuzzer where CS: CorpusScheduler, F: Feedback, @@ -299,17 +298,14 @@ where } } -impl ExecutionProcessor - for StdFuzzer +impl ExecutionProcessor for StdFuzzer where - C: Corpus, - SC: Corpus, CS: CorpusScheduler, F: Feedback, I: Input, OF: Feedback, OT: ObserversTuple + serde::Serialize + serde::de::DeserializeOwned, - S: HasCorpus + HasSolutions + HasClientPerfMonitor + HasExecutions, + S: HasCorpus + HasSolutions + HasClientPerfMonitor + HasExecutions, { /// Evaluate if a set of observation channels has an interesting state fn process_execution( @@ -416,17 +412,14 @@ where } } -impl EvaluatorObservers - for StdFuzzer +impl EvaluatorObservers for StdFuzzer where - C: Corpus, CS: CorpusScheduler, OT: ObserversTuple + serde::Serialize + serde::de::DeserializeOwned, F: Feedback, I: Input, OF: Feedback, - S: HasCorpus + HasSolutions + HasClientPerfMonitor + HasExecutions, - SC: Corpus, + S: HasCorpus + HasSolutions + HasClientPerfMonitor + HasExecutions, { /// Process one input, adding to the respective corpuses if needed and firing the right events #[inline] @@ -448,10 +441,8 @@ where } } -impl Evaluator - for StdFuzzer +impl Evaluator for StdFuzzer where - C: Corpus, CS: CorpusScheduler, E: Executor + HasObservers, OT: ObserversTuple + serde::Serialize + serde::de::DeserializeOwned, @@ -459,8 +450,7 @@ where F: Feedback, I: Input, OF: Feedback, - S: HasCorpus + HasSolutions + HasClientPerfMonitor + HasExecutions, - SC: Corpus, + S: HasCorpus + HasSolutions + HasClientPerfMonitor + HasExecutions, { /// Process one input, adding to the respective corpuses if needed and firing the right events #[inline] @@ -517,8 +507,7 @@ where } } -impl Fuzzer - for StdFuzzer +impl Fuzzer for StdFuzzer where CS: CorpusScheduler, EM: EventManager, @@ -568,7 +557,7 @@ where } } -impl StdFuzzer +impl StdFuzzer where CS: CorpusScheduler, F: Feedback, @@ -635,8 +624,7 @@ where OT: ObserversTuple; } -impl ExecutesInput - for StdFuzzer +impl ExecutesInput for StdFuzzer where CS: CorpusScheduler, F: Feedback, diff --git a/libafl/src/generators/gramatron.rs b/libafl/src/generators/gramatron.rs index c14cfbec30..13697f6b69 100644 --- a/libafl/src/generators/gramatron.rs +++ b/libafl/src/generators/gramatron.rs @@ -33,19 +33,17 @@ pub struct Automaton { #[derive(Clone, Debug)] /// Generates random inputs from a grammar automatron -pub struct GramatronGenerator<'a, R, S> +pub struct GramatronGenerator<'a, S> where - R: Rand, - S: HasRand, + S: HasRand, { automaton: &'a Automaton, - phantom: PhantomData<(R, S)>, + phantom: PhantomData, } -impl<'a, R, S> Generator for GramatronGenerator<'a, R, S> +impl<'a, S> Generator for GramatronGenerator<'a, S> where - R: Rand, - S: HasRand, + S: HasRand, { fn generate(&mut self, state: &mut S) -> Result { let mut input = GramatronInput::new(vec![]); @@ -58,10 +56,9 @@ where } } -impl<'a, R, S> GramatronGenerator<'a, R, S> +impl<'a, S> GramatronGenerator<'a, S> where - R: Rand, - S: HasRand, + S: HasRand, { /// Returns a new [`GramatronGenerator`] #[must_use] diff --git a/libafl/src/generators/mod.rs b/libafl/src/generators/mod.rs index 42d90d567b..e9871be5a3 100644 --- a/libafl/src/generators/mod.rs +++ b/libafl/src/generators/mod.rs @@ -35,19 +35,17 @@ where #[derive(Clone, Debug)] /// Generates random bytes -pub struct RandBytesGenerator +pub struct RandBytesGenerator where - R: Rand, - S: HasRand, + S: HasRand, { max_size: usize, - phantom: PhantomData<(R, S)>, + phantom: PhantomData, } -impl Generator for RandBytesGenerator +impl Generator for RandBytesGenerator where - R: Rand, - S: HasRand, + S: HasRand, { fn generate(&mut self, state: &mut S) -> Result { let mut size = state.rand_mut().below(self.max_size as u64); @@ -67,10 +65,9 @@ where } } -impl RandBytesGenerator +impl RandBytesGenerator where - R: Rand, - S: HasRand, + S: HasRand, { /// Returns a new [`RandBytesGenerator`], generating up to `max_size` random bytes. #[must_use] @@ -84,19 +81,17 @@ where #[derive(Clone, Debug)] /// Generates random printable characters -pub struct RandPrintablesGenerator +pub struct RandPrintablesGenerator where - R: Rand, - S: HasRand, + S: HasRand, { max_size: usize, - phantom: PhantomData<(R, S)>, + phantom: PhantomData, } -impl Generator for RandPrintablesGenerator +impl Generator for RandPrintablesGenerator where - R: Rand, - S: HasRand, + S: HasRand, { fn generate(&mut self, state: &mut S) -> Result { let mut size = state.rand_mut().below(self.max_size as u64); @@ -117,10 +112,9 @@ where } } -impl RandPrintablesGenerator +impl RandPrintablesGenerator where - R: Rand, - S: HasRand, + S: HasRand, { /// Creates a new [`RandPrintablesGenerator`], generating up to `max_size` random printable characters. #[must_use] diff --git a/libafl/src/mutators/encoded_mutations.rs b/libafl/src/mutators/encoded_mutations.rs index 6e8d8c678a..89ef123dde 100644 --- a/libafl/src/mutators/encoded_mutations.rs +++ b/libafl/src/mutators/encoded_mutations.rs @@ -1,10 +1,7 @@ //! Mutations for [`EncodedInput`]s //! use alloc::vec::Vec; -use core::{ - cmp::{max, min}, - marker::PhantomData, -}; +use core::cmp::{max, min}; use crate::{ bolts::{ @@ -23,19 +20,9 @@ use crate::{ /// Set a code in the input as a random value #[derive(Debug, Default)] -pub struct EncodedRandMutator -where - S: HasRand, - R: Rand, -{ - phantom: PhantomData<(R, S)>, -} +pub struct EncodedRandMutator; -impl Mutator for EncodedRandMutator -where - S: HasRand, - R: Rand, -{ +impl Mutator for EncodedRandMutator { fn mutate( &mut self, state: &mut S, @@ -52,45 +39,25 @@ where } } -impl Named for EncodedRandMutator -where - S: HasRand, - R: Rand, -{ +impl Named for EncodedRandMutator { fn name(&self) -> &str { "EncodedRandMutator" } } -impl EncodedRandMutator -where - S: HasRand, - R: Rand, -{ +impl EncodedRandMutator { /// Creates a new [`EncodedRandMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Increment a random code in the input #[derive(Debug, Default)] -pub struct EncodedIncMutator -where - S: HasRand, - R: Rand, -{ - phantom: PhantomData<(R, S)>, -} +pub struct EncodedIncMutator; -impl Mutator for EncodedIncMutator -where - S: HasRand, - R: Rand, -{ +impl Mutator for EncodedIncMutator { fn mutate( &mut self, state: &mut S, @@ -107,45 +74,25 @@ where } } -impl Named for EncodedIncMutator -where - S: HasRand, - R: Rand, -{ +impl Named for EncodedIncMutator { fn name(&self) -> &str { "EncodedIncMutator" } } -impl EncodedIncMutator -where - S: HasRand, - R: Rand, -{ - /// Creates a new [`EncodedRandMutator`]. +impl EncodedIncMutator { + /// Creates a new [`EncodedIncMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Decrement a random code in the input #[derive(Debug, Default)] -pub struct EncodedDecMutator -where - S: HasRand, - R: Rand, -{ - phantom: PhantomData<(R, S)>, -} +pub struct EncodedDecMutator; -impl Mutator for EncodedDecMutator -where - S: HasRand, - R: Rand, -{ +impl Mutator for EncodedDecMutator { fn mutate( &mut self, state: &mut S, @@ -162,45 +109,25 @@ where } } -impl Named for EncodedDecMutator -where - S: HasRand, - R: Rand, -{ +impl Named for EncodedDecMutator { fn name(&self) -> &str { "EncodedDecMutator" } } -impl EncodedDecMutator -where - S: HasRand, - R: Rand, -{ - /// Creates a new [`EncodedRandMutator`]. +impl EncodedDecMutator { + /// Creates a new [`EncodedDecMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Adds or subtracts a random value up to `ARITH_MAX` to a random place in the codes [`Vec`]. #[derive(Debug, Default)] -pub struct EncodedAddMutator -where - S: HasRand, - R: Rand, -{ - phantom: PhantomData<(R, S)>, -} +pub struct EncodedAddMutator; -impl Mutator for EncodedAddMutator -where - S: HasRand, - R: Rand, -{ +impl Mutator for EncodedAddMutator { fn mutate( &mut self, state: &mut S, @@ -221,45 +148,25 @@ where } } -impl Named for EncodedAddMutator -where - S: HasRand, - R: Rand, -{ +impl Named for EncodedAddMutator { fn name(&self) -> &str { "EncodedAddMutator" } } -impl EncodedAddMutator -where - S: HasRand, - R: Rand, -{ +impl EncodedAddMutator { /// Creates a new [`EncodedAddMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Codes delete mutation for encoded inputs #[derive(Debug, Default)] -pub struct EncodedDeleteMutator -where - S: HasRand, - R: Rand, -{ - phantom: PhantomData<(R, S)>, -} +pub struct EncodedDeleteMutator; -impl Mutator for EncodedDeleteMutator -where - S: HasRand, - R: Rand, -{ +impl Mutator for EncodedDeleteMutator { fn mutate( &mut self, state: &mut S, @@ -279,45 +186,29 @@ where } } -impl Named for EncodedDeleteMutator -where - S: HasRand, - R: Rand, -{ +impl Named for EncodedDeleteMutator { fn name(&self) -> &str { "EncodedDeleteMutator" } } -impl EncodedDeleteMutator -where - S: HasRand, - R: Rand, -{ +impl EncodedDeleteMutator { /// Creates a new [`EncodedDeleteMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Insert mutation for encoded inputs #[derive(Debug, Default)] -pub struct EncodedInsertCopyMutator -where - S: HasRand + HasMaxSize, - R: Rand, -{ +pub struct EncodedInsertCopyMutator { tmp_buf: Vec, - phantom: PhantomData<(R, S)>, } -impl Mutator for EncodedInsertCopyMutator +impl Mutator for EncodedInsertCopyMutator where - S: HasRand + HasMaxSize, - R: Rand, + S: HasRand + HasMaxSize, { fn mutate( &mut self, @@ -358,46 +249,25 @@ where } } -impl Named for EncodedInsertCopyMutator -where - S: HasRand + HasMaxSize, - R: Rand, -{ +impl Named for EncodedInsertCopyMutator { fn name(&self) -> &str { "EncodedInsertCopyMutator" } } -impl EncodedInsertCopyMutator -where - S: HasRand + HasMaxSize, - R: Rand, -{ +impl EncodedInsertCopyMutator { /// Creates a new [`EncodedInsertCopyMutator`]. #[must_use] pub fn new() -> Self { - Self { - tmp_buf: vec![], - phantom: PhantomData, - } + Self::default() } } /// Codes copy mutation for encoded inputs #[derive(Debug, Default)] -pub struct EncodedCopyMutator -where - S: HasRand, - R: Rand, -{ - phantom: PhantomData<(R, S)>, -} +pub struct EncodedCopyMutator; -impl Mutator for EncodedCopyMutator -where - S: HasRand, - R: Rand, -{ +impl Mutator for EncodedCopyMutator { fn mutate( &mut self, state: &mut S, @@ -419,46 +289,27 @@ where } } -impl Named for EncodedCopyMutator -where - S: HasRand, - R: Rand, -{ +impl Named for EncodedCopyMutator { fn name(&self) -> &str { "EncodedCopyMutator" } } -impl EncodedCopyMutator -where - S: HasRand, - R: Rand, -{ +impl EncodedCopyMutator { /// Creates a new [`EncodedCopyMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Crossover insert mutation for encoded inputs #[derive(Debug, Default)] -pub struct EncodedCrossoverInsertMutator -where - C: Corpus, - R: Rand, - S: HasRand + HasCorpus + HasMaxSize, -{ - phantom: PhantomData<(C, R, S)>, -} +pub struct EncodedCrossoverInsertMutator; -impl Mutator for EncodedCrossoverInsertMutator +impl Mutator for EncodedCrossoverInsertMutator where - C: Corpus, - R: Rand, - S: HasRand + HasCorpus + HasMaxSize, + S: HasRand + HasCorpus + HasMaxSize, { fn mutate( &mut self, @@ -512,48 +363,27 @@ where } } -impl Named for EncodedCrossoverInsertMutator -where - C: Corpus, - R: Rand, - S: HasRand + HasCorpus + HasMaxSize, -{ +impl Named for EncodedCrossoverInsertMutator { fn name(&self) -> &str { "EncodedCrossoverInsertMutator" } } -impl EncodedCrossoverInsertMutator -where - C: Corpus, - R: Rand, - S: HasRand + HasCorpus + HasMaxSize, -{ +impl EncodedCrossoverInsertMutator { /// Creates a new [`EncodedCrossoverInsertMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Crossover replace mutation for encoded inputs #[derive(Debug, Default)] -pub struct EncodedCrossoverReplaceMutator -where - C: Corpus, - R: Rand, - S: HasRand + HasCorpus, -{ - phantom: PhantomData<(C, R, S)>, -} +pub struct EncodedCrossoverReplaceMutator; -impl Mutator for EncodedCrossoverReplaceMutator +impl Mutator for EncodedCrossoverReplaceMutator where - C: Corpus, - R: Rand, - S: HasRand + HasCorpus, + S: HasRand + HasCorpus, { fn mutate( &mut self, @@ -599,50 +429,33 @@ where } } -impl Named for EncodedCrossoverReplaceMutator -where - C: Corpus, - R: Rand, - S: HasRand + HasCorpus, -{ +impl Named for EncodedCrossoverReplaceMutator { fn name(&self) -> &str { "EncodedCrossoverReplaceMutator" } } -impl EncodedCrossoverReplaceMutator -where - C: Corpus, - R: Rand, - S: HasRand + HasCorpus, -{ +impl EncodedCrossoverReplaceMutator { /// Creates a new [`EncodedCrossoverReplaceMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Get the mutations that compose the encoded mutator #[must_use] -pub fn encoded_mutations() -> tuple_list_type!( - EncodedRandMutator, - EncodedIncMutator, - EncodedDecMutator, - EncodedAddMutator, - EncodedDeleteMutator, - EncodedInsertCopyMutator, - EncodedCopyMutator, - EncodedCrossoverInsertMutator, - EncodedCrossoverReplaceMutator, - ) -where - S: HasRand + HasCorpus + HasMaxSize, - C: Corpus, - R: Rand, -{ +pub fn encoded_mutations() -> tuple_list_type!( + EncodedRandMutator, + EncodedIncMutator, + EncodedDecMutator, + EncodedAddMutator, + EncodedDeleteMutator, + EncodedInsertCopyMutator, + EncodedCopyMutator, + EncodedCrossoverInsertMutator, + EncodedCrossoverReplaceMutator, +) { tuple_list!( EncodedRandMutator::new(), EncodedIncMutator::new(), diff --git a/libafl/src/mutators/gramatron.rs b/libafl/src/mutators/gramatron.rs index b1ec7e8f8a..ad49479aa3 100644 --- a/libafl/src/mutators/gramatron.rs +++ b/libafl/src/mutators/gramatron.rs @@ -1,7 +1,7 @@ //! Gramatron is the rewritten gramatron fuzzer in rust. //! See the original gramatron repo [`Gramatron`](https://github.com/HexHive/Gramatron) for more details. use alloc::vec::Vec; -use core::{cmp::max, marker::PhantomData}; +use core::cmp::max; use hashbrown::HashMap; use serde::{Deserialize, Serialize}; @@ -17,18 +17,16 @@ use crate::{ /// A random mutator for grammar fuzzing #[derive(Debug)] -pub struct GramatronRandomMutator<'a, R, S> +pub struct GramatronRandomMutator<'a, S> where - S: HasRand + HasMetadata, - R: Rand, + S: HasRand + HasMetadata, { - generator: &'a GramatronGenerator<'a, R, S>, + generator: &'a GramatronGenerator<'a, S>, } -impl<'a, R, S> Mutator for GramatronRandomMutator<'a, R, S> +impl<'a, S> Mutator for GramatronRandomMutator<'a, S> where - S: HasRand + HasMetadata, - R: Rand, + S: HasRand + HasMetadata, { fn mutate( &mut self, @@ -48,24 +46,22 @@ where } } -impl<'a, R, S> Named for GramatronRandomMutator<'a, R, S> +impl<'a, S> Named for GramatronRandomMutator<'a, S> where - S: HasRand + HasMetadata, - R: Rand, + S: HasRand + HasMetadata, { fn name(&self) -> &str { "GramatronRandomMutator" } } -impl<'a, R, S> GramatronRandomMutator<'a, R, S> +impl<'a, S> GramatronRandomMutator<'a, S> where - S: HasRand + HasMetadata, - R: Rand, + S: HasRand + HasMetadata, { /// Creates a new [`GramatronRandomMutator`]. #[must_use] - pub fn new(generator: &'a GramatronGenerator<'a, R, S>) -> Self { + pub fn new(generator: &'a GramatronGenerator<'a, S>) -> Self { Self { generator } } } @@ -94,20 +90,11 @@ impl GramatronIdxMapMetadata { /// A [`Mutator`] that mutates a [`GramatronInput`] by splicing inputs together. #[derive(Default, Debug)] -pub struct GramatronSpliceMutator -where - C: Corpus, - S: HasRand + HasCorpus + HasMetadata, - R: Rand, -{ - phantom: PhantomData<(C, R, S)>, -} +pub struct GramatronSpliceMutator; -impl Mutator for GramatronSpliceMutator +impl Mutator for GramatronSpliceMutator where - C: Corpus, - S: HasRand + HasCorpus + HasMetadata, - R: Rand, + S: HasRand + HasCorpus + HasMetadata, { fn mutate( &mut self, @@ -155,49 +142,31 @@ where } } -impl Named for GramatronSpliceMutator -where - C: Corpus, - S: HasRand + HasCorpus + HasMetadata, - R: Rand, -{ +impl Named for GramatronSpliceMutator { fn name(&self) -> &str { "GramatronSpliceMutator" } } -impl<'a, C, R, S> GramatronSpliceMutator -where - C: Corpus, - S: HasRand + HasCorpus + HasMetadata, - R: Rand, -{ +impl GramatronSpliceMutator { /// Creates a new [`GramatronSpliceMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// A mutator that uses Gramatron for grammar fuzzing and mutation. #[derive(Default, Debug)] -pub struct GramatronRecursionMutator -where - S: HasRand + HasMetadata, - R: Rand, -{ +pub struct GramatronRecursionMutator { counters: HashMap, states: Vec, temp: Vec, - phantom: PhantomData<(R, S)>, } -impl Mutator for GramatronRecursionMutator +impl Mutator for GramatronRecursionMutator where - S: HasRand + HasMetadata, - R: Rand, + S: HasRand + HasMetadata, { fn mutate( &mut self, @@ -266,29 +235,16 @@ where } } -impl Named for GramatronRecursionMutator -where - S: HasRand + HasMetadata, - R: Rand, -{ +impl Named for GramatronRecursionMutator { fn name(&self) -> &str { "GramatronRecursionMutator" } } -impl GramatronRecursionMutator -where - S: HasRand + HasMetadata, - R: Rand, -{ +impl GramatronRecursionMutator { /// Creates a new [`GramatronRecursionMutator`]. #[must_use] pub fn new() -> Self { - Self { - counters: HashMap::default(), - states: vec![], - temp: vec![], - phantom: PhantomData, - } + Self::default() } } diff --git a/libafl/src/mutators/mopt_mutator.rs b/libafl/src/mutators/mopt_mutator.rs index c7bbc9ea17..4f99d0a3b5 100644 --- a/libafl/src/mutators/mopt_mutator.rs +++ b/libafl/src/mutators/mopt_mutator.rs @@ -360,29 +360,23 @@ pub enum MOptMode { /// This is the main struct of `MOpt`, an `AFL` mutator. /// See the original `MOpt` implementation in -pub struct StdMOptMutator +pub struct StdMOptMutator where - C: Corpus, I: Input, MT: MutatorsTuple, - R: Rand, - S: HasRand + HasMetadata + HasCorpus + HasSolutions, - SC: Corpus, + S: HasRand + HasMetadata + HasCorpus + HasSolutions, { mode: MOptMode, finds_before: usize, mutations: MT, - phantom: PhantomData<(C, I, R, S, SC)>, + phantom: PhantomData<(I, S)>, } -impl Debug for StdMOptMutator +impl Debug for StdMOptMutator where - C: Corpus, I: Input, MT: MutatorsTuple, - R: Rand, - S: HasRand + HasMetadata + HasCorpus + HasSolutions, - SC: Corpus, + S: HasRand + HasMetadata + HasCorpus + HasSolutions, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( @@ -394,14 +388,11 @@ where } } -impl Mutator for StdMOptMutator +impl Mutator for StdMOptMutator where - C: Corpus, I: Input, MT: MutatorsTuple, - R: Rand, - S: HasRand + HasMetadata + HasCorpus + HasSolutions, - SC: Corpus, + S: HasRand + HasMetadata + HasCorpus + HasSolutions, { #[inline] fn mutate( @@ -532,14 +523,11 @@ where } } -impl StdMOptMutator +impl StdMOptMutator where - C: Corpus, I: Input, MT: MutatorsTuple, - R: Rand, - S: HasRand + HasMetadata + HasCorpus + HasSolutions, - SC: Corpus, + S: HasRand + HasMetadata + HasCorpus + HasSolutions, { /// Create a new [`StdMOptMutator`]. pub fn new(state: &mut S, mutations: MT, swarm_num: usize) -> Result { @@ -619,14 +607,11 @@ where } } -impl ComposedByMutations for StdMOptMutator +impl ComposedByMutations for StdMOptMutator where - C: Corpus, I: Input, MT: MutatorsTuple, - R: Rand, - S: HasRand + HasMetadata + HasCorpus + HasSolutions, - SC: Corpus, + S: HasRand + HasMetadata + HasCorpus + HasSolutions, { /// Get the mutations #[inline] @@ -641,14 +626,11 @@ where } } -impl ScheduledMutator for StdMOptMutator +impl ScheduledMutator for StdMOptMutator where - C: Corpus, I: Input, MT: MutatorsTuple, - R: Rand, - S: HasRand + HasMetadata + HasCorpus + HasSolutions, - SC: Corpus, + S: HasRand + HasMetadata + HasCorpus + HasSolutions, { /// Compute the number of iterations used to apply stacked mutations fn iterations(&self, state: &mut S, _: &I) -> u64 { diff --git a/libafl/src/mutators/mutations.rs b/libafl/src/mutators/mutations.rs index 6c021c78bd..9d9c065196 100644 --- a/libafl/src/mutators/mutations.rs +++ b/libafl/src/mutators/mutations.rs @@ -12,7 +12,6 @@ use crate::{ use alloc::{borrow::ToOwned, vec::Vec}; use core::{ cmp::{max, min}, - marker::PhantomData, mem::size_of, }; @@ -99,20 +98,12 @@ pub const INTERESTING_32: [i32; 27] = [ /// Bitflip mutation for inputs with a bytes vector #[derive(Default, Debug)] -pub struct BitFlipMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ - phantom: PhantomData<(I, R, S)>, -} +pub struct BitFlipMutator; -impl Mutator for BitFlipMutator +impl Mutator for BitFlipMutator where I: Input + HasBytesVec, - S: HasRand, - R: Rand, + S: HasRand, { fn mutate( &mut self, @@ -131,48 +122,28 @@ where } } -impl Named for BitFlipMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl Named for BitFlipMutator { fn name(&self) -> &str { "BitFlipMutator" } } -impl BitFlipMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl BitFlipMutator { /// Creates a new [`BitFlipMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Byteflip mutation for inputs with a bytes vector #[derive(Default, Debug)] -pub struct ByteFlipMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ - phantom: PhantomData<(I, R, S)>, -} +pub struct ByteFlipMutator; -impl Mutator for ByteFlipMutator +impl Mutator for ByteFlipMutator where I: Input + HasBytesVec, - S: HasRand, - R: Rand, + S: HasRand, { fn mutate( &mut self, @@ -189,48 +160,28 @@ where } } -impl Named for ByteFlipMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl Named for ByteFlipMutator { fn name(&self) -> &str { "ByteFlipMutator" } } -impl ByteFlipMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl ByteFlipMutator { /// Creates a new [`ByteFlipMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Byte increment mutation for inputs with a bytes vector #[derive(Default, Debug)] -pub struct ByteIncMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ - phantom: PhantomData<(I, R, S)>, -} +pub struct ByteIncMutator; -impl Mutator for ByteIncMutator +impl Mutator for ByteIncMutator where I: Input + HasBytesVec, - S: HasRand, - R: Rand, + S: HasRand, { fn mutate( &mut self, @@ -248,48 +199,28 @@ where } } -impl Named for ByteIncMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl Named for ByteIncMutator { fn name(&self) -> &str { "ByteIncMutator" } } -impl ByteIncMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl ByteIncMutator { /// Creates a new [`ByteIncMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Byte decrement mutation for inputs with a bytes vector #[derive(Default, Debug)] -pub struct ByteDecMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ - phantom: PhantomData<(I, R, S)>, -} +pub struct ByteDecMutator; -impl Mutator for ByteDecMutator +impl Mutator for ByteDecMutator where I: Input + HasBytesVec, - S: HasRand, - R: Rand, + S: HasRand, { fn mutate( &mut self, @@ -307,48 +238,28 @@ where } } -impl Named for ByteDecMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl Named for ByteDecMutator { fn name(&self) -> &str { "ByteDecMutator" } } -impl ByteDecMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl ByteDecMutator { /// Creates a a new [`ByteDecMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Byte negate mutation for inputs with a bytes vector #[derive(Default, Debug)] -pub struct ByteNegMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ - phantom: PhantomData<(I, R, S)>, -} +pub struct ByteNegMutator; -impl Mutator for ByteNegMutator +impl Mutator for ByteNegMutator where I: Input + HasBytesVec, - S: HasRand, - R: Rand, + S: HasRand, { fn mutate( &mut self, @@ -366,48 +277,28 @@ where } } -impl Named for ByteNegMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl Named for ByteNegMutator { fn name(&self) -> &str { "ByteNegMutator" } } -impl ByteNegMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl ByteNegMutator { /// Creates a new [`ByteNegMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Byte random mutation for inputs with a bytes vector #[derive(Default, Debug)] -pub struct ByteRandMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ - phantom: PhantomData<(I, R, S)>, -} +pub struct ByteRandMutator; -impl Mutator for ByteRandMutator +impl Mutator for ByteRandMutator where I: Input + HasBytesVec, - S: HasRand, - R: Rand, + S: HasRand, { fn mutate( &mut self, @@ -425,29 +316,17 @@ where } } -impl Named for ByteRandMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl Named for ByteRandMutator { fn name(&self) -> &str { "ByteRandMutator" } } -impl ByteRandMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl ByteRandMutator { /// Creates a new [`ByteRandMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } @@ -457,21 +336,13 @@ macro_rules! add_mutator_impl { ($name: ident, $size: ty) => { /// Adds or subtracts a random value up to `ARITH_MAX` to a [`<$size>`] at a random place in the [`Vec`], in random byte order. #[derive(Default, Debug)] - pub struct $name - where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, - { - phantom: PhantomData<(I, R, S)>, - } + pub struct $name; #[allow(trivial_numeric_casts)] - impl Mutator for $name + impl Mutator for $name where I: Input + HasBytesVec, - S: HasRand, - R: Rand, + S: HasRand, { fn mutate( &mut self, @@ -505,29 +376,17 @@ macro_rules! add_mutator_impl { } } - impl Named for $name - where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, - { + impl Named for $name { fn name(&self) -> &str { stringify!($name) } } - impl $name - where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, - { + impl $name { /// Creates a new [`$name`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } }; @@ -544,20 +403,12 @@ macro_rules! interesting_mutator_impl { ($name: ident, $size: ty, $interesting: ident) => { /// Inserts an interesting value at a random place in the input vector #[derive(Default, Debug)] - pub struct $name - where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, - { - phantom: PhantomData<(I, R, S)>, - } + pub struct $name; - impl Mutator for $name + impl Mutator for $name where I: Input + HasBytesVec, - S: HasRand, - R: Rand, + S: HasRand, { #[allow(clippy::cast_sign_loss)] fn mutate( @@ -583,29 +434,17 @@ macro_rules! interesting_mutator_impl { } } - impl Named for $name - where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, - { + impl Named for $name { fn name(&self) -> &str { stringify!($name) } } - impl $name - where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, - { + impl $name { /// Creates a new [`$name`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } }; @@ -617,20 +456,12 @@ interesting_mutator_impl!(DwordInterestingMutator, u32, INTERESTING_32); /// Bytes delete mutation for inputs with a bytes vector #[derive(Default, Debug)] -pub struct BytesDeleteMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ - phantom: PhantomData<(I, R, S)>, -} +pub struct BytesDeleteMutator; -impl Mutator for BytesDeleteMutator +impl Mutator for BytesDeleteMutator where I: Input + HasBytesVec, - S: HasRand, - R: Rand, + S: HasRand, { fn mutate( &mut self, @@ -651,48 +482,28 @@ where } } -impl Named for BytesDeleteMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl Named for BytesDeleteMutator { fn name(&self) -> &str { "BytesDeleteMutator" } } -impl BytesDeleteMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl BytesDeleteMutator { /// Creates a new [`BytesDeleteMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Bytes expand mutation for inputs with a bytes vector #[derive(Default, Debug)] -pub struct BytesExpandMutator -where - I: Input + HasBytesVec, - S: HasRand + HasMaxSize, - R: Rand, -{ - phantom: PhantomData<(I, R, S)>, -} +pub struct BytesExpandMutator; -impl Mutator for BytesExpandMutator +impl Mutator for BytesExpandMutator where I: Input + HasBytesVec, - S: HasRand + HasMaxSize, - R: Rand, + S: HasRand + HasMaxSize, { fn mutate( &mut self, @@ -720,48 +531,28 @@ where } } -impl Named for BytesExpandMutator -where - I: Input + HasBytesVec, - S: HasRand + HasMaxSize, - R: Rand, -{ +impl Named for BytesExpandMutator { fn name(&self) -> &str { "BytesExpandMutator" } } -impl BytesExpandMutator -where - I: Input + HasBytesVec, - S: HasRand + HasMaxSize, - R: Rand, -{ +impl BytesExpandMutator { /// Creates a new [`BytesExpandMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Bytes insert mutation for inputs with a bytes vector #[derive(Default, Debug)] -pub struct BytesInsertMutator -where - I: Input + HasBytesVec, - S: HasRand + HasMaxSize, - R: Rand, -{ - phantom: PhantomData<(I, R, S)>, -} +pub struct BytesInsertMutator; -impl Mutator for BytesInsertMutator +impl Mutator for BytesInsertMutator where I: Input + HasBytesVec, - S: HasRand + HasMaxSize, - R: Rand, + S: HasRand + HasMaxSize, { fn mutate( &mut self, @@ -795,48 +586,28 @@ where } } -impl Named for BytesInsertMutator -where - I: Input + HasBytesVec, - S: HasRand + HasMaxSize, - R: Rand, -{ +impl Named for BytesInsertMutator { fn name(&self) -> &str { "BytesInsertMutator" } } -impl BytesInsertMutator -where - I: Input + HasBytesVec, - S: HasRand + HasMaxSize, - R: Rand, -{ +impl BytesInsertMutator { /// Creates a new [`BytesInsertMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Bytes random insert mutation for inputs with a bytes vector #[derive(Default, Debug)] -pub struct BytesRandInsertMutator -where - I: Input + HasBytesVec, - S: HasRand + HasMaxSize, - R: Rand, -{ - phantom: PhantomData<(I, R, S)>, -} +pub struct BytesRandInsertMutator; -impl Mutator for BytesRandInsertMutator +impl Mutator for BytesRandInsertMutator where I: Input + HasBytesVec, - S: HasRand + HasMaxSize, - R: Rand, + S: HasRand + HasMaxSize, { fn mutate( &mut self, @@ -867,48 +638,28 @@ where } } -impl Named for BytesRandInsertMutator -where - I: Input + HasBytesVec, - S: HasRand + HasMaxSize, - R: Rand, -{ +impl Named for BytesRandInsertMutator { fn name(&self) -> &str { "BytesRandInsertMutator" } } -impl BytesRandInsertMutator -where - I: Input + HasBytesVec, - S: HasRand + HasMaxSize, - R: Rand, -{ +impl BytesRandInsertMutator { /// Create a new [`BytesRandInsertMutator`] #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Bytes set mutation for inputs with a bytes vector #[derive(Default, Debug)] -pub struct BytesSetMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ - phantom: PhantomData<(I, R, S)>, -} +pub struct BytesSetMutator; -impl Mutator for BytesSetMutator +impl Mutator for BytesSetMutator where I: Input + HasBytesVec, - S: HasRand, - R: Rand, + S: HasRand, { fn mutate( &mut self, @@ -931,48 +682,28 @@ where } } -impl Named for BytesSetMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl Named for BytesSetMutator { fn name(&self) -> &str { "BytesSetMutator" } } -impl BytesSetMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl BytesSetMutator { /// Creates a new [`BytesSetMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Bytes random set mutation for inputs with a bytes vector #[derive(Default, Debug)] -pub struct BytesRandSetMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ - phantom: PhantomData<(I, R, S)>, -} +pub struct BytesRandSetMutator; -impl Mutator for BytesRandSetMutator +impl Mutator for BytesRandSetMutator where I: Input + HasBytesVec, - S: HasRand, - R: Rand, + S: HasRand, { fn mutate( &mut self, @@ -995,48 +726,28 @@ where } } -impl Named for BytesRandSetMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl Named for BytesRandSetMutator { fn name(&self) -> &str { "BytesRandSetMutator" } } -impl BytesRandSetMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl BytesRandSetMutator { /// Creates a new [`BytesRandSetMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Bytes copy mutation for inputs with a bytes vector #[derive(Default, Debug)] -pub struct BytesCopyMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ - phantom: PhantomData<(I, R, S)>, -} +pub struct BytesCopyMutator; -impl Mutator for BytesCopyMutator +impl Mutator for BytesCopyMutator where I: Input + HasBytesVec, - S: HasRand, - R: Rand, + S: HasRand, { fn mutate( &mut self, @@ -1059,49 +770,30 @@ where } } -impl Named for BytesCopyMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl Named for BytesCopyMutator { fn name(&self) -> &str { "BytesCopyMutator" } } -impl BytesCopyMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl BytesCopyMutator { /// Creates a new [`BytesCopyMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Bytes insert and self copy mutation for inputs with a bytes vector #[derive(Debug, Default)] -pub struct BytesInsertCopyMutator -where - I: Input + HasBytesVec, - S: HasRand + HasMaxSize, - R: Rand, -{ +pub struct BytesInsertCopyMutator { tmp_buf: Vec, - phantom: PhantomData<(I, R, S)>, } -impl Mutator for BytesInsertCopyMutator +impl Mutator for BytesInsertCopyMutator where I: Input + HasBytesVec, - S: HasRand + HasMaxSize, - R: Rand, + S: HasRand + HasMaxSize, { fn mutate( &mut self, @@ -1142,49 +834,28 @@ where } } -impl Named for BytesInsertCopyMutator -where - I: Input + HasBytesVec, - S: HasRand + HasMaxSize, - R: Rand, -{ +impl Named for BytesInsertCopyMutator { fn name(&self) -> &str { "BytesInsertCopyMutator" } } -impl BytesInsertCopyMutator -where - I: Input + HasBytesVec, - S: HasRand + HasMaxSize, - R: Rand, -{ +impl BytesInsertCopyMutator { /// Creates a new [`BytesInsertCopyMutator`]. #[must_use] pub fn new() -> Self { - Self { - tmp_buf: vec![], - phantom: PhantomData, - } + Self::default() } } /// Bytes swap mutation for inputs with a bytes vector #[derive(Debug, Default)] -pub struct BytesSwapMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ - phantom: PhantomData<(I, R, S)>, -} +pub struct BytesSwapMutator; -impl Mutator for BytesSwapMutator +impl Mutator for BytesSwapMutator where I: Input + HasBytesVec, - S: HasRand, - R: Rand, + S: HasRand + HasCorpus + HasMaxSize, { fn mutate( &mut self, @@ -1209,50 +880,28 @@ where } } -impl Named for BytesSwapMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl Named for BytesSwapMutator { fn name(&self) -> &str { "BytesSwapMutator" } } -impl BytesSwapMutator -where - I: Input + HasBytesVec, - S: HasRand, - R: Rand, -{ +impl BytesSwapMutator { /// Creates a new [`BytesSwapMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Crossover insert mutation for inputs with a bytes vector #[derive(Debug, Default)] -pub struct CrossoverInsertMutator -where - C: Corpus, - I: Input + HasBytesVec, - R: Rand, - S: HasRand + HasCorpus + HasMaxSize, -{ - phantom: PhantomData<(C, I, R, S)>, -} +pub struct CrossoverInsertMutator; -impl Mutator for CrossoverInsertMutator +impl Mutator for CrossoverInsertMutator where - C: Corpus, I: Input + HasBytesVec, - R: Rand, - S: HasRand + HasCorpus + HasMaxSize, + S: HasRand + HasCorpus + HasMaxSize, { fn mutate( &mut self, @@ -1306,52 +955,28 @@ where } } -impl Named for CrossoverInsertMutator -where - C: Corpus, - I: Input + HasBytesVec, - R: Rand, - S: HasRand + HasCorpus + HasMaxSize, -{ +impl Named for CrossoverInsertMutator { fn name(&self) -> &str { "CrossoverInsertMutator" } } -impl CrossoverInsertMutator -where - C: Corpus, - I: Input + HasBytesVec, - R: Rand, - S: HasRand + HasCorpus + HasMaxSize, -{ +impl CrossoverInsertMutator { /// Creates a new [`CrossoverInsertMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// Crossover replace mutation for inputs with a bytes vector #[derive(Debug, Default)] -pub struct CrossoverReplaceMutator -where - C: Corpus, - I: Input + HasBytesVec, - R: Rand, - S: HasRand + HasCorpus, -{ - phantom: PhantomData<(C, I, R, S)>, -} +pub struct CrossoverReplaceMutator; -impl Mutator for CrossoverReplaceMutator +impl Mutator for CrossoverReplaceMutator where - C: Corpus, I: Input + HasBytesVec, - R: Rand, - S: HasRand + HasCorpus, + S: HasRand + HasCorpus, { fn mutate( &mut self, @@ -1397,31 +1022,17 @@ where } } -impl Named for CrossoverReplaceMutator -where - C: Corpus, - I: Input + HasBytesVec, - R: Rand, - S: HasRand + HasCorpus, -{ +impl Named for CrossoverReplaceMutator { fn name(&self) -> &str { "CrossoverReplaceMutator" } } -impl CrossoverReplaceMutator -where - C: Corpus, - I: Input + HasBytesVec, - R: Rand, - S: HasRand + HasCorpus, -{ +impl CrossoverReplaceMutator { /// Creates a new [`CrossoverReplaceMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } @@ -1443,22 +1054,12 @@ fn locate_diffs(this: &[u8], other: &[u8]) -> (i64, i64) { /// Splice mutation for inputs with a bytes vector #[derive(Debug, Default)] -pub struct SpliceMutator -where - C: Corpus, - I: Input + HasBytesVec, - R: Rand, - S: HasRand + HasCorpus, -{ - phantom: PhantomData<(C, I, R, S)>, -} +pub struct SpliceMutator; -impl Mutator for SpliceMutator +impl Mutator for SpliceMutator where - C: Corpus, I: Input + HasBytesVec, - R: Rand, - S: HasRand + HasCorpus, + S: HasRand + HasCorpus, { #[allow(clippy::cast_sign_loss)] fn mutate( @@ -1506,31 +1107,17 @@ where } } -impl Named for SpliceMutator -where - C: Corpus, - I: Input + HasBytesVec, - R: Rand, - S: HasRand + HasCorpus, -{ +impl Named for SpliceMutator { fn name(&self) -> &str { "SpliceMutator" } } -impl SpliceMutator -where - C: Corpus, - I: Input + HasBytesVec, - R: Rand, - S: HasRand + HasCorpus, -{ +impl SpliceMutator { /// Creates a new [`SpliceMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } @@ -1591,12 +1178,10 @@ mod tests { state::{HasMetadata, StdState}, }; - fn test_mutations() -> impl MutatorsTuple + fn test_mutations() -> impl MutatorsTuple where I: Input + HasBytesVec, - S: HasRand + HasCorpus + HasMetadata + HasMaxSize, - C: Corpus, - R: Rand, + S: HasRand + HasCorpus + HasMetadata + HasMaxSize, { tuple_list!( BitFlipMutator::new(), diff --git a/libafl/src/mutators/nautilus.rs b/libafl/src/mutators/nautilus.rs index 7703c636da..782290189c 100644 --- a/libafl/src/mutators/nautilus.rs +++ b/libafl/src/mutators/nautilus.rs @@ -2,7 +2,6 @@ use crate::{ bolts::tuples::Named, - corpus::Corpus, feedbacks::NautilusChunksMetadata, generators::nautilus::NautilusContext, inputs::nautilus::NautilusInput, @@ -11,7 +10,7 @@ use crate::{ Error, }; -use core::{fmt::Debug, marker::PhantomData}; +use core::fmt::Debug; use grammartec::mutator::Mutator as BackingMutator; use grammartec::{ context::Context, @@ -30,7 +29,7 @@ impl Debug for NautilusRandomMutator<'_> { } } -impl<'a, S> Mutator for NautilusRandomMutator<'a> { +impl Mutator for NautilusRandomMutator<'_> { fn mutate( &mut self, _state: &mut S, @@ -60,7 +59,7 @@ impl<'a, S> Mutator for NautilusRandomMutator<'a> { } } -impl<'a> Named for NautilusRandomMutator<'a> { +impl Named for NautilusRandomMutator<'_> { fn name(&self) -> &str { "NautilusRandomMutator" } @@ -91,7 +90,7 @@ impl Debug for NautilusRecursionMutator<'_> { } } -impl<'a, S> Mutator for NautilusRecursionMutator<'a> { +impl Mutator for NautilusRecursionMutator<'_> { fn mutate( &mut self, _state: &mut S, @@ -124,7 +123,7 @@ impl<'a, S> Mutator for NautilusRecursionMutator<'a> { } } -impl<'a> Named for NautilusRecursionMutator<'a> { +impl Named for NautilusRecursionMutator<'_> { fn name(&self) -> &str { "NautilusRecursionMutator" } @@ -143,22 +142,20 @@ impl<'a> NautilusRecursionMutator<'a> { } /// The splicing mutator for `Nautilus` that can splice inputs together -pub struct NautilusSpliceMutator<'a, C> { +pub struct NautilusSpliceMutator<'a> { ctx: &'a Context, mutator: BackingMutator, - phantom: PhantomData, } -impl Debug for NautilusSpliceMutator<'_, ()> { +impl Debug for NautilusSpliceMutator<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "NautilusSpliceMutator {{}}") } } -impl<'a, S, C> Mutator for NautilusSpliceMutator<'a, C> +impl Mutator for NautilusSpliceMutator<'_> where - C: Corpus, - S: HasCorpus + HasMetadata, + S: HasCorpus + HasMetadata, { fn mutate( &mut self, @@ -194,13 +191,13 @@ where } } -impl<'a, C> Named for NautilusSpliceMutator<'a, C> { +impl Named for NautilusSpliceMutator<'_> { fn name(&self) -> &str { "NautilusSpliceMutator" } } -impl<'a, C> NautilusSpliceMutator<'a, C> { +impl<'a> NautilusSpliceMutator<'a> { /// Creates a new [`NautilusSpliceMutator`]. #[must_use] pub fn new(context: &'a NautilusContext) -> Self { @@ -208,7 +205,6 @@ impl<'a, C> NautilusSpliceMutator<'a, C> { Self { ctx: &context.ctx, mutator, - phantom: PhantomData, } } } diff --git a/libafl/src/mutators/scheduled.rs b/libafl/src/mutators/scheduled.rs index 9a23a6a3a0..55da803579 100644 --- a/libafl/src/mutators/scheduled.rs +++ b/libafl/src/mutators/scheduled.rs @@ -14,9 +14,9 @@ use crate::{ AsSlice, }, corpus::Corpus, - inputs::{HasBytesVec, Input}, + inputs::Input, mutators::{MutationResult, Mutator, MutatorsTuple}, - state::{HasCorpus, HasMaxSize, HasMetadata, HasRand}, + state::{HasCorpus, HasMetadata, HasRand}, Error, }; @@ -95,24 +95,22 @@ where } /// A [`Mutator`] that schedules one of the embedded mutations on each call. -pub struct StdScheduledMutator +pub struct StdScheduledMutator where I: Input, MT: MutatorsTuple, - R: Rand, - S: HasRand, + S: HasRand, { mutations: MT, max_iterations: u64, - phantom: PhantomData<(I, R, S)>, + phantom: PhantomData<(I, S)>, } -impl Debug for StdScheduledMutator +impl Debug for StdScheduledMutator where I: Input, MT: MutatorsTuple, - R: Rand, - S: HasRand, + S: HasRand, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( @@ -124,12 +122,11 @@ where } } -impl Mutator for StdScheduledMutator +impl Mutator for StdScheduledMutator where I: Input, MT: MutatorsTuple, - R: Rand, - S: HasRand, + S: HasRand, { #[inline] fn mutate( @@ -142,12 +139,11 @@ where } } -impl ComposedByMutations for StdScheduledMutator +impl ComposedByMutations for StdScheduledMutator where I: Input, MT: MutatorsTuple, - R: Rand, - S: HasRand, + S: HasRand, { /// Get the mutations #[inline] @@ -162,12 +158,11 @@ where } } -impl ScheduledMutator for StdScheduledMutator +impl ScheduledMutator for StdScheduledMutator where I: Input, MT: MutatorsTuple, - R: Rand, - S: HasRand, + S: HasRand, { /// Compute the number of iterations used to apply stacked mutations fn iterations(&self, state: &mut S, _: &I) -> u64 { @@ -181,12 +176,11 @@ where } } -impl StdScheduledMutator +impl StdScheduledMutator where I: Input, MT: MutatorsTuple, - R: Rand, - S: HasRand, + S: HasRand, { /// Create a new [`StdScheduledMutator`] instance specifying mutations pub fn new(mutations: MT) -> Self { @@ -209,41 +203,35 @@ where /// Get the mutations that compose the Havoc mutator #[must_use] -pub fn havoc_mutations() -> tuple_list_type!( - BitFlipMutator, - ByteFlipMutator, - ByteIncMutator, - ByteDecMutator, - ByteNegMutator, - ByteRandMutator, - ByteAddMutator, - WordAddMutator, - DwordAddMutator, - QwordAddMutator, - ByteInterestingMutator, - WordInterestingMutator, - DwordInterestingMutator, - BytesDeleteMutator, - BytesDeleteMutator, - BytesDeleteMutator, - BytesDeleteMutator, - BytesExpandMutator, - BytesInsertMutator, - BytesRandInsertMutator, - BytesSetMutator, - BytesRandSetMutator, - BytesCopyMutator, - BytesInsertCopyMutator, - BytesSwapMutator, - CrossoverInsertMutator, - CrossoverReplaceMutator, - ) -where - I: Input + HasBytesVec, - S: HasRand + HasCorpus + HasMetadata + HasMaxSize, - C: Corpus, - R: Rand, -{ +pub fn havoc_mutations() -> tuple_list_type!( + BitFlipMutator, + ByteFlipMutator, + ByteIncMutator, + ByteDecMutator, + ByteNegMutator, + ByteRandMutator, + ByteAddMutator, + WordAddMutator, + DwordAddMutator, + QwordAddMutator, + ByteInterestingMutator, + WordInterestingMutator, + DwordInterestingMutator, + BytesDeleteMutator, + BytesDeleteMutator, + BytesDeleteMutator, + BytesDeleteMutator, + BytesExpandMutator, + BytesInsertMutator, + BytesRandInsertMutator, + BytesSetMutator, + BytesRandSetMutator, + BytesCopyMutator, + BytesInsertCopyMutator, + BytesSwapMutator, + CrossoverInsertMutator, + CrossoverReplaceMutator, +) { tuple_list!( BitFlipMutator::new(), ByteFlipMutator::new(), @@ -277,39 +265,28 @@ where /// Get the mutations that uses the Tokens metadata #[must_use] -pub fn tokens_mutations( -) -> tuple_list_type!(TokenInsert, TokenReplace) -where - I: Input + HasBytesVec, - S: HasRand + HasCorpus + HasMetadata + HasMaxSize, - C: Corpus, - R: Rand, -{ +pub fn tokens_mutations() -> tuple_list_type!(TokenInsert, TokenReplace) { tuple_list!(TokenInsert::new(), TokenReplace::new(),) } /// A logging [`Mutator`] that wraps around a [`StdScheduledMutator`]. -pub struct LoggerScheduledMutator +pub struct LoggerScheduledMutator where - C: Corpus, I: Input, MT: MutatorsTuple + NamedTuple, - R: Rand, - S: HasRand + HasCorpus, + S: HasRand + HasCorpus, SM: ScheduledMutator, { scheduled: SM, mutation_log: Vec, - phantom: PhantomData<(C, I, MT, R, S)>, + phantom: PhantomData<(I, MT, S)>, } -impl Debug for LoggerScheduledMutator +impl Debug for LoggerScheduledMutator where - C: Corpus, I: Input, MT: MutatorsTuple + NamedTuple, - R: Rand, - S: HasRand + HasCorpus, + S: HasRand + HasCorpus, SM: ScheduledMutator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -322,13 +299,11 @@ where } } -impl Mutator for LoggerScheduledMutator +impl Mutator for LoggerScheduledMutator where - C: Corpus, I: Input, MT: MutatorsTuple + NamedTuple, - R: Rand, - S: HasRand + HasCorpus, + S: HasRand + HasCorpus, SM: ScheduledMutator, { fn mutate( @@ -362,14 +337,11 @@ where } } -impl ComposedByMutations - for LoggerScheduledMutator +impl ComposedByMutations for LoggerScheduledMutator where - C: Corpus, I: Input, MT: MutatorsTuple + NamedTuple, - R: Rand, - S: HasRand + HasCorpus, + S: HasRand + HasCorpus, SM: ScheduledMutator, { #[inline] @@ -383,13 +355,11 @@ where } } -impl ScheduledMutator for LoggerScheduledMutator +impl ScheduledMutator for LoggerScheduledMutator where - C: Corpus, I: Input, MT: MutatorsTuple + NamedTuple, - R: Rand, - S: HasRand + HasCorpus, + S: HasRand + HasCorpus, SM: ScheduledMutator, { /// Compute the number of iterations used to apply stacked mutations @@ -428,13 +398,11 @@ where } } -impl LoggerScheduledMutator +impl LoggerScheduledMutator where - C: Corpus, I: Input, MT: MutatorsTuple + NamedTuple, - R: Rand, - S: HasRand + HasCorpus, + S: HasRand + HasCorpus, SM: ScheduledMutator, { /// Create a new [`StdScheduledMutator`] instance without mutations and corpus diff --git a/libafl/src/mutators/token_mutations.rs b/libafl/src/mutators/token_mutations.rs index 94435ffb61..e7f3e389cf 100644 --- a/libafl/src/mutators/token_mutations.rs +++ b/libafl/src/mutators/token_mutations.rs @@ -1,7 +1,7 @@ //! Tokens are what afl calls extras or dictionaries. //! They may be inserted as part of mutations during fuzzing. use alloc::vec::Vec; -use core::{marker::PhantomData, mem::size_of}; +use core::mem::size_of; use serde::{Deserialize, Serialize}; #[cfg(feature = "std")] @@ -127,20 +127,12 @@ impl Tokens { /// Inserts a random token at a random position in the `Input`. #[derive(Debug, Default)] -pub struct TokenInsert -where - I: Input + HasBytesVec, - S: HasMetadata + HasRand + HasMaxSize, - R: Rand, -{ - phantom: PhantomData<(I, R, S)>, -} +pub struct TokenInsert; -impl Mutator for TokenInsert +impl Mutator for TokenInsert where I: Input + HasBytesVec, - S: HasMetadata + HasRand + HasMaxSize, - R: Rand, + S: HasMetadata + HasRand + HasMaxSize, { fn mutate( &mut self, @@ -184,49 +176,29 @@ where } } -impl Named for TokenInsert -where - I: Input + HasBytesVec, - S: HasMetadata + HasRand + HasMaxSize, - R: Rand, -{ +impl Named for TokenInsert { fn name(&self) -> &str { "TokenInsert" } } -impl TokenInsert -where - I: Input + HasBytesVec, - S: HasMetadata + HasRand + HasMaxSize, - R: Rand, -{ +impl TokenInsert { /// Create a `TokenInsert` `Mutation`. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// A `TokenReplace` [`Mutator`] replaces a random part of the input with one of a range of tokens. /// From AFL terms, this is called as `Dictionary` mutation (which doesn't really make sense ;) ). #[derive(Debug, Default)] -pub struct TokenReplace -where - I: Input + HasBytesVec, - S: HasMetadata + HasRand + HasMaxSize, - R: Rand, -{ - phantom: PhantomData<(I, R, S)>, -} +pub struct TokenReplace; -impl Mutator for TokenReplace +impl Mutator for TokenReplace where I: Input + HasBytesVec, - S: HasMetadata + HasRand + HasMaxSize, - R: Rand, + S: HasMetadata + HasRand + HasMaxSize, { fn mutate( &mut self, @@ -266,49 +238,29 @@ where } } -impl Named for TokenReplace -where - I: Input + HasBytesVec, - S: HasMetadata + HasRand + HasMaxSize, - R: Rand, -{ +impl Named for TokenReplace { fn name(&self) -> &str { "TokenReplace" } } -impl TokenReplace -where - I: Input + HasBytesVec, - S: HasMetadata + HasRand + HasMaxSize, - R: Rand, -{ +impl TokenReplace { /// Creates a new `TokenReplace` struct. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } /// A `I2SRandReplace` [`Mutator`] replaces a random matching input-2-state comparison operand with the other. /// it needs a valid [`CmpValuesMetadata`] in the state. #[derive(Debug, Default)] -pub struct I2SRandReplace -where - I: Input + HasBytesVec, - S: HasMetadata + HasRand + HasMaxSize, - R: Rand, -{ - phantom: PhantomData<(I, R, S)>, -} +pub struct I2SRandReplace; -impl Mutator for I2SRandReplace +impl Mutator for I2SRandReplace where I: Input + HasBytesVec, - S: HasMetadata + HasRand + HasMaxSize, - R: Rand, + S: HasMetadata + HasRand + HasMaxSize, { #[allow(clippy::too_many_lines)] fn mutate( @@ -471,29 +423,17 @@ where } } -impl Named for I2SRandReplace -where - I: Input + HasBytesVec, - S: HasMetadata + HasRand + HasMaxSize, - R: Rand, -{ +impl Named for I2SRandReplace { fn name(&self) -> &str { "I2SRandReplace" } } -impl I2SRandReplace -where - I: Input + HasBytesVec, - S: HasMetadata + HasRand + HasMaxSize, - R: Rand, -{ +impl I2SRandReplace { /// Creates a new `I2SRandReplace` struct. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self } } diff --git a/libafl/src/stages/calibrate.rs b/libafl/src/stages/calibrate.rs index 824b7e0576..3aa24b8d6e 100644 --- a/libafl/src/stages/calibrate.rs +++ b/libafl/src/stages/calibrate.rs @@ -2,10 +2,11 @@ use crate::{ bolts::current_time, + bolts::tuples::MatchName, corpus::{Corpus, PowerScheduleTestcaseMetaData}, events::{EventFirer, LogSeverity}, executors::{Executor, ExitKind, HasObservers}, - feedbacks::{FeedbackStatesTuple, MapFeedbackState}, + feedbacks::MapFeedbackState, fuzzer::Evaluator, inputs::Input, observers::{MapObserver, ObserversTuple}, @@ -23,40 +24,31 @@ use serde::{Deserialize, Serialize}; /// The calibration stage will measure the average exec time and the target's stability for this input. #[derive(Clone, Debug)] -pub struct CalibrationStage +pub struct CalibrationStage where T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, - C: Corpus, - E: Executor + HasObservers, - EM: EventFirer, - FT: FeedbackStatesTuple, I: Input, O: MapObserver, OT: ObserversTuple, - S: HasCorpus + HasMetadata, - Z: Evaluator, + S: HasCorpus + HasMetadata, { map_observer_name: String, stage_max: usize, - #[allow(clippy::type_complexity)] - phantom: PhantomData<(C, E, EM, FT, I, O, OT, S, T, Z)>, + phantom: PhantomData<(I, O, OT, S, T)>, } const CAL_STAGE_START: usize = 4; const CAL_STAGE_MAX: usize = 16; -impl Stage - for CalibrationStage +impl Stage for CalibrationStage where T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, - C: Corpus, E: Executor + HasObservers, EM: EventFirer, - FT: FeedbackStatesTuple, I: Input, O: MapObserver, OT: ObserversTuple, - S: HasCorpus + HasMetadata + HasFeedbackStates + HasClientPerfMonitor, + S: HasCorpus + HasMetadata + HasFeedbackStates + HasClientPerfMonitor, Z: Evaluator, { #[inline] @@ -314,18 +306,13 @@ impl PowerScheduleMetadata { crate::impl_serdeany!(PowerScheduleMetadata); -impl CalibrationStage +impl CalibrationStage where T: PrimInt + Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, - C: Corpus, - E: Executor + HasObservers, - EM: EventFirer, - FT: FeedbackStatesTuple, I: Input, O: MapObserver, OT: ObserversTuple, - S: HasCorpus + HasMetadata, - Z: Evaluator, + S: HasCorpus + HasMetadata, { /// Create a new [`CalibrationStage`]. pub fn new(state: &mut S, map_observer_name: &O) -> Self { diff --git a/libafl/src/stages/concolic.rs b/libafl/src/stages/concolic.rs index adbedd751e..a8250eee42 100644 --- a/libafl/src/stages/concolic.rs +++ b/libafl/src/stages/concolic.rs @@ -17,25 +17,23 @@ use super::{Stage, TracingStage}; /// Wraps a [`TracingStage`] to add concolic observing. #[derive(Clone, Debug)] -pub struct ConcolicTracingStage +pub struct ConcolicTracingStage where I: Input, - C: Corpus, TE: Executor + HasObservers, OT: ObserversTuple, - S: HasClientPerfMonitor + HasExecutions + HasCorpus, + S: HasClientPerfMonitor + HasExecutions + HasCorpus, { - inner: TracingStage, + inner: TracingStage, observer_name: String, } -impl Stage for ConcolicTracingStage +impl Stage for ConcolicTracingStage where I: Input, - C: Corpus, TE: Executor + HasObservers, OT: ObserversTuple, - S: HasClientPerfMonitor + HasExecutions + HasCorpus, + S: HasClientPerfMonitor + HasExecutions + HasCorpus, { #[inline] fn perform( @@ -67,16 +65,15 @@ where } } -impl ConcolicTracingStage +impl ConcolicTracingStage where I: Input, - C: Corpus, TE: Executor + HasObservers, OT: ObserversTuple, - S: HasClientPerfMonitor + HasExecutions + HasCorpus, + S: HasClientPerfMonitor + HasExecutions + HasCorpus, { /// Creates a new default tracing stage using the given [`Executor`], observing traces from a [`ConcolicObserver`] with the given name. - pub fn new(inner: TracingStage, observer_name: String) -> Self { + pub fn new(inner: TracingStage, observer_name: String) -> Self { Self { inner, observer_name, @@ -345,21 +342,19 @@ fn generate_mutations(iter: impl Iterator) -> Vec< /// A mutational stage that uses Z3 to solve concolic constraints attached to the [`crate::corpus::Testcase`] by the [`ConcolicTracingStage`]. #[derive(Clone, Debug)] -pub struct SimpleConcolicMutationalStage +pub struct SimpleConcolicMutationalStage where I: Input, - C: Corpus, - S: HasClientPerfMonitor + HasExecutions + HasCorpus, + S: HasClientPerfMonitor + HasExecutions + HasCorpus, { - _phantom: PhantomData<(C, EM, I, S, Z)>, + _phantom: PhantomData<(EM, I, S, Z)>, } #[cfg(feature = "concolic_mutation")] -impl Stage for SimpleConcolicMutationalStage +impl Stage for SimpleConcolicMutationalStage where I: Input + HasBytesVec, - C: Corpus, - S: HasClientPerfMonitor + HasExecutions + HasCorpus, + S: HasClientPerfMonitor + HasExecutions + HasCorpus, Z: Evaluator, { #[inline] @@ -399,11 +394,10 @@ where } } -impl Default for SimpleConcolicMutationalStage +impl Default for SimpleConcolicMutationalStage where I: Input, - C: Corpus, - S: HasClientPerfMonitor + HasExecutions + HasCorpus, + S: HasClientPerfMonitor + HasExecutions + HasCorpus, { fn default() -> Self { Self { diff --git a/libafl/src/stages/mod.rs b/libafl/src/stages/mod.rs index cd60b1553c..b09f81bafe 100644 --- a/libafl/src/stages/mod.rs +++ b/libafl/src/stages/mod.rs @@ -32,8 +32,7 @@ pub mod sync; pub use sync::*; use crate::{ - bolts::rands::Rand, - corpus::{Corpus, CorpusScheduler}, + corpus::CorpusScheduler, events::{EventFirer, EventRestarter, HasEventManagerId, ProgressReporter}, executors::{Executor, HasObservers}, inputs::Input, @@ -164,32 +163,28 @@ where /// Allows us to use a [`push::PushStage`] as a normal [`Stage`] #[allow(clippy::type_complexity)] #[derive(Debug)] -pub struct PushStageAdapter +pub struct PushStageAdapter where - C: Corpus, CS: CorpusScheduler, EM: EventFirer + EventRestarter + HasEventManagerId + ProgressReporter, I: Input, OT: ObserversTuple, - PS: PushStage, - R: Rand, - S: HasClientPerfMonitor + HasCorpus + HasRand + HasExecutions, + PS: PushStage, + S: HasClientPerfMonitor + HasCorpus + HasRand + HasExecutions, Z: ExecutionProcessor + EvaluatorObservers + HasCorpusScheduler, { push_stage: PS, - phantom: PhantomData<(C, CS, EM, I, OT, R, S, Z)>, + phantom: PhantomData<(CS, EM, I, OT, S, Z)>, } -impl PushStageAdapter +impl PushStageAdapter where - C: Corpus, CS: CorpusScheduler, EM: EventFirer + EventRestarter + HasEventManagerId + ProgressReporter, I: Input, OT: ObserversTuple, - PS: PushStage, - R: Rand, - S: HasClientPerfMonitor + HasCorpus + HasRand + HasExecutions, + PS: PushStage, + S: HasClientPerfMonitor + HasCorpus + HasRand + HasExecutions, Z: ExecutionProcessor + EvaluatorObservers + HasCorpusScheduler, { /// Create a new [`PushStageAdapter`], warpping the given [`PushStage`] @@ -203,18 +198,15 @@ where } } -impl Stage - for PushStageAdapter +impl Stage for PushStageAdapter where - C: Corpus, CS: CorpusScheduler, E: Executor + HasObservers, EM: EventFirer + EventRestarter + HasEventManagerId + ProgressReporter, I: Input, OT: ObserversTuple, - PS: PushStage, - R: Rand, - S: HasClientPerfMonitor + HasCorpus + HasRand + HasExecutions, + PS: PushStage, + S: HasClientPerfMonitor + HasCorpus + HasRand + HasExecutions, Z: ExecutesInput + ExecutionProcessor + EvaluatorObservers diff --git a/libafl/src/stages/mutational.rs b/libafl/src/stages/mutational.rs index 317557d700..664e18bcff 100644 --- a/libafl/src/stages/mutational.rs +++ b/libafl/src/stages/mutational.rs @@ -24,12 +24,11 @@ use crate::monitors::PerfFeature; /// A Mutational stage is the stage in a fuzzing run that mutates inputs. /// Mutational stages will usually have a range of mutations that are /// being applied to the input one by one, between executions. -pub trait MutationalStage: Stage +pub trait MutationalStage: Stage where - C: Corpus, M: Mutator, I: Input, - S: HasClientPerfMonitor + HasCorpus, + S: HasClientPerfMonitor + HasCorpus, Z: Evaluator, { /// The mutator registered for this stage @@ -84,28 +83,23 @@ pub static DEFAULT_MUTATIONAL_MAX_ITERATIONS: u64 = 128; /// The default mutational stage #[derive(Clone, Debug)] -pub struct StdMutationalStage +pub struct StdMutationalStage where - C: Corpus, M: Mutator, I: Input, - R: Rand, - S: HasClientPerfMonitor + HasCorpus + HasRand, + S: HasClientPerfMonitor + HasCorpus + HasRand, Z: Evaluator, { mutator: M, #[allow(clippy::type_complexity)] - phantom: PhantomData<(C, E, EM, I, R, S, Z)>, + phantom: PhantomData<(E, EM, I, S, Z)>, } -impl MutationalStage - for StdMutationalStage +impl MutationalStage for StdMutationalStage where - C: Corpus, M: Mutator, I: Input, - R: Rand, - S: HasClientPerfMonitor + HasCorpus + HasRand, + S: HasClientPerfMonitor + HasCorpus + HasRand, Z: Evaluator, { /// The mutator, added to this stage @@ -126,13 +120,11 @@ where } } -impl Stage for StdMutationalStage +impl Stage for StdMutationalStage where - C: Corpus, M: Mutator, I: Input, - R: Rand, - S: HasClientPerfMonitor + HasCorpus + HasRand, + S: HasClientPerfMonitor + HasCorpus + HasRand, Z: Evaluator, { #[inline] @@ -154,13 +146,11 @@ where } } -impl StdMutationalStage +impl StdMutationalStage where - C: Corpus, M: Mutator, I: Input, - R: Rand, - S: HasClientPerfMonitor + HasCorpus + HasRand, + S: HasClientPerfMonitor + HasCorpus + HasRand, Z: Evaluator, { /// Creates a new default mutational stage diff --git a/libafl/src/stages/power.rs b/libafl/src/stages/power.rs index eb88f33287..250723589b 100644 --- a/libafl/src/stages/power.rs +++ b/libafl/src/stages/power.rs @@ -34,16 +34,15 @@ const HAVOC_MAX_MULT: f64 = 64.0; /// The mutational stage using power schedules #[derive(Clone, Debug)] -pub struct PowerMutationalStage +pub struct PowerMutationalStage where T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug, - C: Corpus, E: Executor + HasObservers, I: Input, M: Mutator, O: MapObserver, OT: ObserversTuple, - S: HasClientPerfMonitor + HasCorpus + HasMetadata, + S: HasClientPerfMonitor + HasCorpus + HasMetadata, Z: Evaluator, { map_observer_name: String, @@ -51,20 +50,19 @@ where /// The employed power schedule strategy strat: PowerSchedule, #[allow(clippy::type_complexity)] - phantom: PhantomData<(C, E, EM, I, O, OT, S, T, Z)>, + phantom: PhantomData<(E, EM, I, O, OT, S, T, Z)>, } -impl MutationalStage - for PowerMutationalStage +impl MutationalStage + for PowerMutationalStage where T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug, - C: Corpus, E: Executor + HasObservers, I: Input, M: Mutator, O: MapObserver, OT: ObserversTuple, - S: HasClientPerfMonitor + HasCorpus + HasMetadata, + S: HasClientPerfMonitor + HasCorpus + HasMetadata, Z: Evaluator, { /// The mutator, added to this stage @@ -155,17 +153,16 @@ where } } -impl Stage - for PowerMutationalStage +impl Stage + for PowerMutationalStage where T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug, - C: Corpus, E: Executor + HasObservers, I: Input, M: Mutator, O: MapObserver, OT: ObserversTuple, - S: HasClientPerfMonitor + HasCorpus + HasMetadata, + S: HasClientPerfMonitor + HasCorpus + HasMetadata, Z: Evaluator, { #[inline] @@ -183,16 +180,15 @@ where } } -impl PowerMutationalStage +impl PowerMutationalStage where T: PrimInt + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned + Debug, - C: Corpus, E: Executor + HasObservers, I: Input, M: Mutator, O: MapObserver, OT: ObserversTuple, - S: HasClientPerfMonitor + HasCorpus + HasMetadata, + S: HasClientPerfMonitor + HasCorpus + HasMetadata, Z: Evaluator, { /// Creates a new [`PowerMutationalStage`] diff --git a/libafl/src/stages/push/mod.rs b/libafl/src/stages/push/mod.rs index 6ceb7e001e..56a422a524 100644 --- a/libafl/src/stages/push/mod.rs +++ b/libafl/src/stages/push/mod.rs @@ -16,8 +16,8 @@ use core::{ }; use crate::{ - bolts::{current_time, rands::Rand}, - corpus::{Corpus, CorpusScheduler}, + bolts::current_time, + corpus::CorpusScheduler, events::{EventFirer, EventRestarter, HasEventManagerId, ProgressReporter}, executors::ExitKind, inputs::Input, @@ -32,15 +32,13 @@ const STATS_TIMEOUT_DEFAULT: Duration = Duration::from_secs(15); // The shared state for all [`PushStage`]s /// Should be stored inside a `[Rc>`] #[derive(Clone, Debug)] -pub struct PushStageSharedState +pub struct PushStageSharedState where - C: Corpus, CS: CorpusScheduler, EM: EventFirer + EventRestarter + HasEventManagerId, I: Input, OT: ObserversTuple, - R: Rand, - S: HasClientPerfMonitor + HasCorpus + HasRand, + S: HasClientPerfMonitor + HasCorpus + HasRand, Z: ExecutionProcessor + EvaluatorObservers + HasCorpusScheduler, { /// The [`crate::state::State`] @@ -51,18 +49,16 @@ where pub event_mgr: EM, /// The [`crate::observers::ObserversTuple`] pub observers: OT, - phantom: PhantomData<(C, CS, I, OT, R, S, Z)>, + phantom: PhantomData<(CS, I, OT, S, Z)>, } -impl PushStageSharedState +impl PushStageSharedState where - C: Corpus, CS: CorpusScheduler, EM: EventFirer + EventRestarter + HasEventManagerId, I: Input, OT: ObserversTuple, - R: Rand, - S: HasClientPerfMonitor + HasCorpus + HasRand, + S: HasClientPerfMonitor + HasCorpus + HasRand, Z: ExecutionProcessor + EvaluatorObservers + HasCorpusScheduler, { /// Create a new `PushStageSharedState` that can be used by all [`PushStage`]s @@ -80,15 +76,13 @@ where /// Helper class for the [`PushStage`] trait, taking care of borrowing the shared state #[derive(Clone, Debug)] -pub struct PushStageHelper +pub struct PushStageHelper where - C: Corpus, CS: CorpusScheduler, EM: EventFirer + EventRestarter + HasEventManagerId, I: Input, OT: ObserversTuple, - R: Rand, - S: HasClientPerfMonitor + HasCorpus + HasRand, + S: HasClientPerfMonitor + HasCorpus + HasRand, Z: ExecutionProcessor + EvaluatorObservers + HasCorpusScheduler, { /// If this stage has already been initalized. @@ -98,7 +92,7 @@ where pub last_monitor_time: Duration, /// The shared state, keeping track of the corpus and the fuzzer #[allow(clippy::type_complexity)] - pub shared_state: Rc>>>, + pub shared_state: Rc>>>, /// If the last iteraation failed pub errored: bool, @@ -109,26 +103,24 @@ where pub current_input: Option, // Todo: Get rid of copy #[allow(clippy::type_complexity)] - phantom: PhantomData<(C, CS, (), EM, I, R, OT, S, Z)>, + phantom: PhantomData<(CS, (), EM, I, OT, S, Z)>, exit_kind: Rc>>, } -impl PushStageHelper +impl PushStageHelper where - C: Corpus, CS: CorpusScheduler, EM: EventFirer + EventRestarter + HasEventManagerId, I: Input, OT: ObserversTuple, - R: Rand, - S: HasClientPerfMonitor + HasCorpus + HasRand, + S: HasClientPerfMonitor + HasCorpus + HasRand, Z: ExecutionProcessor + EvaluatorObservers + HasCorpusScheduler, { /// Create a new [`PushStageHelper`] #[must_use] #[allow(clippy::type_complexity)] pub fn new( - shared_state: Rc>>>, + shared_state: Rc>>>, exit_kind_ref: Rc>>, ) -> Self { Self { @@ -145,17 +137,14 @@ where /// Sets the shared state for this helper (and all other helpers owning the same [`RefCell`]) #[inline] - pub fn set_shared_state( - &mut self, - shared_state: PushStageSharedState, - ) { + pub fn set_shared_state(&mut self, shared_state: PushStageSharedState) { (&mut *self.shared_state.borrow_mut()).replace(shared_state); } /// Takes the shared state from this helper, replacing it with `None` #[inline] #[allow(clippy::type_complexity)] - pub fn take_shared_state(&mut self) -> Option> { + pub fn take_shared_state(&mut self) -> Option> { let shared_state_ref = &mut (*self.shared_state).borrow_mut(); shared_state_ref.take() } @@ -176,7 +165,7 @@ where /// Resets this state after a full stage iter. fn end_of_iter( &mut self, - shared_state: PushStageSharedState, + shared_state: PushStageSharedState, errored: bool, ) { self.set_shared_state(shared_state); @@ -191,21 +180,19 @@ where /// A push stage is a generator that returns a single testcase for each call. /// It's an iterator so we can chain it. /// After it has finished once, we will call it agan for the next fuzzer round. -pub trait PushStage: Iterator +pub trait PushStage: Iterator where - C: Corpus, CS: CorpusScheduler, EM: EventFirer + EventRestarter + HasEventManagerId + ProgressReporter, I: Input, OT: ObserversTuple, - R: Rand, - S: HasClientPerfMonitor + HasCorpus + HasRand + HasExecutions, + S: HasClientPerfMonitor + HasCorpus + HasRand + HasExecutions, Z: ExecutionProcessor + EvaluatorObservers + HasCorpusScheduler, { /// Gets the [`PushStageHelper`] - fn push_stage_helper(&self) -> &PushStageHelper; + fn push_stage_helper(&self) -> &PushStageHelper; /// Gets the [`PushStageHelper`], mut - fn push_stage_helper_mut(&mut self) -> &mut PushStageHelper; + fn push_stage_helper_mut(&mut self) -> &mut PushStageHelper; /// Set the current corpus index this stagve works on fn set_current_corpus_idx(&mut self, corpus_idx: usize) { diff --git a/libafl/src/stages/push/mutational.rs b/libafl/src/stages/push/mutational.rs index fabe4966a3..83b8737d49 100644 --- a/libafl/src/stages/push/mutational.rs +++ b/libafl/src/stages/push/mutational.rs @@ -35,16 +35,14 @@ pub static DEFAULT_MUTATIONAL_MAX_ITERATIONS: u64 = 128; /// /// The default mutational push stage #[derive(Clone, Debug)] -pub struct StdMutationalPushStage +pub struct StdMutationalPushStage where - C: Corpus, CS: CorpusScheduler, EM: EventFirer + EventRestarter + HasEventManagerId, I: Input, M: Mutator, OT: ObserversTuple, - R: Rand, - S: HasClientPerfMonitor + HasCorpus + HasRand, + S: HasClientPerfMonitor + HasCorpus + HasRand, Z: ExecutionProcessor + EvaluatorObservers + HasCorpusScheduler, { current_corpus_idx: Option, @@ -55,19 +53,17 @@ where mutator: M, - psh: PushStageHelper, + psh: PushStageHelper, } -impl StdMutationalPushStage +impl StdMutationalPushStage where - C: Corpus, CS: CorpusScheduler, EM: EventFirer + EventRestarter + HasEventManagerId, I: Input, M: Mutator, OT: ObserversTuple, - R: Rand, - S: HasClientPerfMonitor + HasCorpus + HasRand, + S: HasClientPerfMonitor + HasCorpus + HasRand, Z: ExecutionProcessor + EvaluatorObservers + HasCorpusScheduler, { /// Gets the number of iterations as a random number @@ -82,17 +78,15 @@ where } } -impl PushStage - for StdMutationalPushStage +impl PushStage + for StdMutationalPushStage where - C: Corpus, CS: CorpusScheduler, EM: EventFirer + EventRestarter + HasEventManagerId + ProgressReporter, I: Input, M: Mutator, OT: ObserversTuple, - R: Rand, - S: HasClientPerfMonitor + HasCorpus + HasRand + HasExecutions, + S: HasClientPerfMonitor + HasCorpus + HasRand + HasExecutions, Z: ExecutionProcessor + EvaluatorObservers + HasCorpusScheduler, { /// Creates a new default mutational stage @@ -186,26 +180,24 @@ where } #[inline] - fn push_stage_helper(&self) -> &PushStageHelper { + fn push_stage_helper(&self) -> &PushStageHelper { &self.psh } #[inline] - fn push_stage_helper_mut(&mut self) -> &mut PushStageHelper { + fn push_stage_helper_mut(&mut self) -> &mut PushStageHelper { &mut self.psh } } -impl Iterator for StdMutationalPushStage +impl Iterator for StdMutationalPushStage where - C: Corpus, CS: CorpusScheduler, EM: EventFirer + EventRestarter + HasEventManagerId + ProgressReporter, I: Input, M: Mutator, OT: ObserversTuple, - R: Rand, - S: HasClientPerfMonitor + HasCorpus + HasRand + HasExecutions, + S: HasClientPerfMonitor + HasCorpus + HasRand + HasExecutions, Z: ExecutionProcessor + EvaluatorObservers + HasCorpusScheduler, { type Item = Result; @@ -215,16 +207,14 @@ where } } -impl StdMutationalPushStage +impl StdMutationalPushStage where - C: Corpus, CS: CorpusScheduler, EM: EventFirer + EventRestarter + HasEventManagerId, I: Input, M: Mutator, OT: ObserversTuple, - R: Rand, - S: HasClientPerfMonitor + HasCorpus + HasRand, + S: HasClientPerfMonitor + HasCorpus + HasRand, Z: ExecutionProcessor + EvaluatorObservers + HasCorpusScheduler, { /// Creates a new default mutational stage @@ -232,7 +222,7 @@ where #[allow(clippy::type_complexity)] pub fn new( mutator: M, - shared_state: Rc>>>, + shared_state: Rc>>>, exit_kind: Rc>>, stage_idx: i32, ) -> Self { diff --git a/libafl/src/stages/sync.rs b/libafl/src/stages/sync.rs index 488bb74f33..8da43f4fda 100644 --- a/libafl/src/stages/sync.rs +++ b/libafl/src/stages/sync.rs @@ -10,8 +10,6 @@ use std::{ }; use crate::{ - bolts::rands::Rand, - corpus::Corpus, fuzzer::Evaluator, inputs::Input, stages::Stage, @@ -38,28 +36,24 @@ impl SyncFromDiskMetadata { /// A stage that loads testcases from disk to sync with other fuzzers such as AFL++ #[derive(Debug)] -pub struct SyncFromDiskStage +pub struct SyncFromDiskStage where - C: Corpus, CB: FnMut(&mut Z, &mut S, &Path) -> Result, I: Input, - R: Rand, - S: HasClientPerfMonitor + HasCorpus + HasRand + HasMetadata, + S: HasClientPerfMonitor + HasCorpus + HasRand + HasMetadata, Z: Evaluator, { sync_dir: PathBuf, load_callback: CB, #[allow(clippy::type_complexity)] - phantom: PhantomData<(C, E, EM, I, R, S, Z)>, + phantom: PhantomData<(E, EM, I, S, Z)>, } -impl Stage for SyncFromDiskStage +impl Stage for SyncFromDiskStage where - C: Corpus, CB: FnMut(&mut Z, &mut S, &Path) -> Result, I: Input, - R: Rand, - S: HasClientPerfMonitor + HasCorpus + HasRand + HasMetadata, + S: HasClientPerfMonitor + HasCorpus + HasRand + HasMetadata, Z: Evaluator, { #[inline] @@ -99,13 +93,11 @@ where } } -impl SyncFromDiskStage +impl SyncFromDiskStage where - C: Corpus, CB: FnMut(&mut Z, &mut S, &Path) -> Result, I: Input, - R: Rand, - S: HasClientPerfMonitor + HasCorpus + HasRand + HasMetadata, + S: HasClientPerfMonitor + HasCorpus + HasRand + HasMetadata, Z: Evaluator, { /// Creates a new [`SyncFromDiskStage`] @@ -163,13 +155,11 @@ where } } -impl - SyncFromDiskStage Result, E, EM, I, R, S, Z> +impl + SyncFromDiskStage Result, E, EM, I, S, Z> where - C: Corpus, I: Input, - R: Rand, - S: HasClientPerfMonitor + HasCorpus + HasRand + HasMetadata, + S: HasClientPerfMonitor + HasCorpus + HasRand + HasMetadata, Z: Evaluator, { /// Creates a new [`SyncFromDiskStage`] invoking `Input::from_file` to load inputs diff --git a/libafl/src/stages/tracing.rs b/libafl/src/stages/tracing.rs index 166c8b5b39..9ff10a6b02 100644 --- a/libafl/src/stages/tracing.rs +++ b/libafl/src/stages/tracing.rs @@ -19,26 +19,24 @@ use crate::monitors::PerfFeature; /// A stage that runs a tracer executor #[derive(Clone, Debug)] -pub struct TracingStage +pub struct TracingStage where I: Input, - C: Corpus, TE: Executor + HasObservers, OT: ObserversTuple, - S: HasClientPerfMonitor + HasExecutions + HasCorpus, + S: HasClientPerfMonitor + HasExecutions + HasCorpus, { tracer_executor: TE, #[allow(clippy::type_complexity)] - phantom: PhantomData<(C, EM, I, OT, S, TE, Z)>, + phantom: PhantomData<(EM, I, OT, S, TE, Z)>, } -impl Stage for TracingStage +impl Stage for TracingStage where I: Input, - C: Corpus, TE: Executor + HasObservers, OT: ObserversTuple, - S: HasClientPerfMonitor + HasExecutions + HasCorpus, + S: HasClientPerfMonitor + HasExecutions + HasCorpus, { #[inline] fn perform( @@ -82,13 +80,12 @@ where } } -impl TracingStage +impl TracingStage where I: Input, - C: Corpus, TE: Executor + HasObservers, OT: ObserversTuple, - S: HasClientPerfMonitor + HasExecutions + HasCorpus, + S: HasClientPerfMonitor + HasExecutions + HasCorpus, { /// Creates a new default stage pub fn new(tracer_executor: TE) -> Self { @@ -106,20 +103,19 @@ where /// A stage that runs the shadow executor using also the shadow observers #[derive(Clone, Debug)] -pub struct ShadowTracingStage { +pub struct ShadowTracingStage { #[allow(clippy::type_complexity)] - phantom: PhantomData<(C, E, EM, I, OT, S, SOT, Z)>, + phantom: PhantomData<(E, EM, I, OT, S, SOT, Z)>, } -impl Stage, EM, S, Z> - for ShadowTracingStage +impl Stage, EM, S, Z> + for ShadowTracingStage where I: Input, - C: Corpus, E: Executor + HasObservers, OT: ObserversTuple, SOT: ObserversTuple, - S: HasClientPerfMonitor + HasExecutions + HasCorpus + Debug, + S: HasClientPerfMonitor + HasExecutions + HasCorpus + Debug, { #[inline] fn perform( @@ -163,14 +159,13 @@ where } } -impl ShadowTracingStage +impl ShadowTracingStage where I: Input, - C: Corpus, E: Executor + HasObservers, OT: ObserversTuple, SOT: ObserversTuple, - S: HasClientPerfMonitor + HasExecutions + HasCorpus, + S: HasClientPerfMonitor + HasExecutions + HasCorpus, { /// Creates a new default stage pub fn new(_executor: &mut ShadowExecutor) -> Self { diff --git a/libafl/src/state/mod.rs b/libafl/src/state/mod.rs index 8a84f1c048..a85bcf9306 100644 --- a/libafl/src/state/mod.rs +++ b/libafl/src/state/mod.rs @@ -32,15 +32,13 @@ pub const DEFAULT_MAX_SIZE: usize = 1_048_576; pub trait State: Serialize + DeserializeOwned {} /// Trait for elements offering a corpus -pub trait HasCorpus -where - C: Corpus, - I: Input, -{ +pub trait HasCorpus { + /// The associated type implementing [`Corpus`]. + type Corpus: Corpus; /// The testcase corpus - fn corpus(&self) -> &C; + fn corpus(&self) -> &Self::Corpus; /// The testcase corpus (mut) - fn corpus_mut(&mut self) -> &mut C; + fn corpus_mut(&mut self) -> &mut Self::Corpus; } /// Interact with the maximum size @@ -52,26 +50,23 @@ pub trait HasMaxSize { } /// Trait for elements offering a corpus of solutions -pub trait HasSolutions -where - C: Corpus, - I: Input, -{ +pub trait HasSolutions { + /// The associated type implementing [`Corpus`] for solutions + type Solutions: Corpus; /// The solutions corpus - fn solutions(&self) -> &C; + fn solutions(&self) -> &Self::Solutions; /// The solutions corpus (mut) - fn solutions_mut(&mut self) -> &mut C; + fn solutions_mut(&mut self) -> &mut Self::Solutions; } /// Trait for elements offering a rand -pub trait HasRand -where - R: Rand, -{ +pub trait HasRand { + /// The associated type implementing [`Rand`] + type Rand: Rand; /// The rand instance - fn rand(&self) -> &R; + fn rand(&self) -> &Self::Rand; /// The rand instance (mut) - fn rand_mut(&mut self) -> &mut R; + fn rand_mut(&mut self) -> &mut Self::Rand; } /// Trait for offering a [`ClientPerfMonitor`] @@ -116,15 +111,14 @@ pub trait HasMetadata { } /// Trait for elements offering a feedback -pub trait HasFeedbackStates -where - FT: FeedbackStatesTuple, -{ +pub trait HasFeedbackStates { + /// The associated feedback type implementing [`FeedbackStatesTuple`]. + type FeedbackStates: FeedbackStatesTuple; /// The feedback states - fn feedback_states(&self) -> &FT; + fn feedback_states(&self) -> &Self::FeedbackStates; /// The feedback states (mut) - fn feedback_states_mut(&mut self) -> &mut FT; + fn feedback_states_mut(&mut self) -> &mut Self::FeedbackStates; } /// Trait for the execution counter @@ -192,7 +186,7 @@ where { } -impl HasRand for StdState +impl HasRand for StdState where C: Corpus, I: Input, @@ -200,20 +194,22 @@ where FT: FeedbackStatesTuple, SC: Corpus, { + type Rand = R; + /// The rand instance #[inline] - fn rand(&self) -> &R { + fn rand(&self) -> &Self::Rand { &self.rand } /// The rand instance (mut) #[inline] - fn rand_mut(&mut self) -> &mut R { + fn rand_mut(&mut self) -> &mut Self::Rand { &mut self.rand } } -impl HasCorpus for StdState +impl HasCorpus for StdState where C: Corpus, I: Input, @@ -221,6 +217,8 @@ where FT: FeedbackStatesTuple, SC: Corpus, { + type Corpus = C; + /// Returns the corpus #[inline] fn corpus(&self) -> &C { @@ -234,7 +232,7 @@ where } } -impl HasSolutions for StdState +impl HasSolutions for StdState where C: Corpus, I: Input, @@ -242,6 +240,8 @@ where FT: FeedbackStatesTuple, SC: Corpus, { + type Solutions = SC; + /// Returns the solutions corpus #[inline] fn solutions(&self) -> &SC { @@ -276,7 +276,7 @@ where } } -impl HasFeedbackStates for StdState +impl HasFeedbackStates for StdState where C: Corpus, I: Input, @@ -284,6 +284,8 @@ where FT: FeedbackStatesTuple, SC: Corpus, { + type FeedbackStates = FT; + /// The feedback states #[inline] fn feedback_states(&self) -> &FT { diff --git a/libafl_qemu/src/executor.rs b/libafl_qemu/src/executor.rs index 9bdd0f89db..8805ce15ba 100644 --- a/libafl_qemu/src/executor.rs +++ b/libafl_qemu/src/executor.rs @@ -7,7 +7,6 @@ use core::{ }; use libafl::{ - corpus::Corpus, events::{EventFirer, EventRestarter}, executors::{ inprocess::inprocess_get_state, Executor, ExitKind, HasObservers, InProcessExecutor, @@ -474,7 +473,7 @@ where OT: ObserversTuple, QT: QemuHelperTuple, { - pub fn new( + pub fn new( harness_fn: &'a mut H, emulator: &'a Emulator, helpers: QT, @@ -485,9 +484,8 @@ where ) -> Result where EM: EventFirer + EventRestarter, - OC: Corpus, OF: Feedback, - S: HasSolutions + HasClientPerfMonitor, + S: HasSolutions + HasClientPerfMonitor, Z: HasObjective, { let slf = Self { From 87cd44b762e6cfd23047bd6918b8e094c2bde7d7 Mon Sep 17 00:00:00 2001 From: Dongjia Zhang Date: Fri, 7 Jan 2022 19:07:39 +0900 Subject: [PATCH 25/25] Use UserStats for Stability (#451) * stability:serstats * tostring * fix no_std * fix * fmt * clippy --- libafl/src/events/llmp.rs | 9 -------- libafl/src/events/mod.rs | 26 +++++++++++++--------- libafl/src/events/simple.rs | 8 ------- libafl/src/monitors/mod.rs | 44 ++++++++----------------------------- 4 files changed, 25 insertions(+), 62 deletions(-) diff --git a/libafl/src/events/llmp.rs b/libafl/src/events/llmp.rs index 5485a38c57..5321e46ded 100644 --- a/libafl/src/events/llmp.rs +++ b/libafl/src/events/llmp.rs @@ -170,15 +170,11 @@ where Event::UpdateExecStats { time, executions, - stability, phantom: _, } => { // TODO: The monitor buffer should be added on client add. let client = monitor.client_stats_mut_for(client_id); client.update_executions(*executions as u64, *time); - if let Some(stability) = stability { - client.update_stability(*stability); - } monitor.display(event.name().to_string(), client_id); Ok(BrokerEventResult::Handled) } @@ -196,7 +192,6 @@ where Event::UpdatePerfMonitor { time, executions, - stability, introspection_monitor, phantom: _, } => { @@ -208,10 +203,6 @@ where // Update the normal monitor for this client client.update_executions(*executions as u64, *time); - if let Some(stability) = stability { - client.update_stability(*stability); - } - // Update the performance monitor for this client client.update_introspection_monitor((**introspection_monitor).clone()); diff --git a/libafl/src/events/mod.rs b/libafl/src/events/mod.rs index be9bdf0361..b130ab76a9 100644 --- a/libafl/src/events/mod.rs +++ b/libafl/src/events/mod.rs @@ -6,7 +6,10 @@ pub mod llmp; pub use llmp::*; use ahash::AHasher; -use alloc::{string::String, vec::Vec}; +use alloc::{ + string::{String, ToString}, + vec::Vec, +}; use core::{fmt, hash::Hasher, marker::PhantomData, time::Duration}; use serde::{Deserialize, Serialize}; @@ -187,8 +190,6 @@ where UpdateExecStats { /// The time of generation of the [`Event`] time: Duration, - /// The stability of this fuzzer node, if known - stability: Option, /// The executions of this client executions: usize, /// [`PhantomData`] @@ -210,8 +211,6 @@ where time: Duration, /// The executions of this client executions: usize, - /// The stability of this fuzzer node, if known - stability: Option, /// Current performance statistics introspection_monitor: Box, @@ -256,7 +255,6 @@ where } => "Testcase", Event::UpdateExecStats { time: _, - stability: _, executions: _, phantom: _, } @@ -269,7 +267,6 @@ where Event::UpdatePerfMonitor { time: _, executions: _, - stability: _, introspection_monitor: _, phantom: _, } => "PerfMonitor", @@ -351,7 +348,6 @@ where S: HasExecutions + HasClientPerfMonitor, { let executions = *state.executions(); - let stability = *state.stability(); let cur = current_time(); // default to 0 here to avoid crashes on clock skew if cur.checked_sub(last_report_time).unwrap_or_default() > monitor_timeout { @@ -361,12 +357,23 @@ where state, Event::UpdateExecStats { executions, - stability, time: cur, phantom: PhantomData, }, )?; + if let Some(x) = state.stability() { + let stability = f64::from(*x); + self.fire( + state, + Event::UpdateUserStats { + name: "stability".to_string(), + value: UserStats::Float(stability), + phantom: PhantomData, + }, + )?; + } + // If performance monitor are requested, fire the `UpdatePerfMonitor` event #[cfg(feature = "introspection")] { @@ -381,7 +388,6 @@ where Event::UpdatePerfMonitor { executions, time: cur, - stability, introspection_monitor: Box::new(state.introspection_monitor().clone()), phantom: PhantomData, }, diff --git a/libafl/src/events/simple.rs b/libafl/src/events/simple.rs index 6c65301022..3fbd6983cb 100644 --- a/libafl/src/events/simple.rs +++ b/libafl/src/events/simple.rs @@ -150,16 +150,12 @@ where Event::UpdateExecStats { time, executions, - stability, phantom: _, } => { // TODO: The monitor buffer should be added on client add. let client = monitor.client_stats_mut_for(0); client.update_executions(*executions as u64, *time); - if let Some(stability) = stability { - client.update_stability(*stability); - } monitor.display(event.name().to_string(), 0); Ok(BrokerEventResult::Handled) @@ -179,7 +175,6 @@ where Event::UpdatePerfMonitor { time, executions, - stability, introspection_monitor, phantom: _, } => { @@ -187,9 +182,6 @@ where let client = &mut monitor.client_stats_mut()[0]; client.update_executions(*executions as u64, *time); client.update_introspection_monitor((**introspection_monitor).clone()); - if let Some(stability) = stability { - client.update_stability(*stability); - } monitor.display(event.name().to_string(), 0); Ok(BrokerEventResult::Handled) } diff --git a/libafl/src/monitors/mod.rs b/libafl/src/monitors/mod.rs index 043c2b791f..028fcbadd8 100644 --- a/libafl/src/monitors/mod.rs +++ b/libafl/src/monitors/mod.rs @@ -3,10 +3,11 @@ pub mod multi; pub use multi::MultiMonitor; -use alloc::{ - string::{String, ToString}, - vec::Vec, -}; +use alloc::{string::String, vec::Vec}; + +#[cfg(feature = "introspection")] +use alloc::string::ToString; + use core::{fmt, time::Duration}; use hashbrown::HashMap; use serde::{Deserialize, Serialize}; @@ -20,6 +21,8 @@ const CLIENT_STATS_TIME_WINDOW_SECS: u64 = 5; // 5 seconds pub enum UserStats { /// A numerical value Number(u64), + /// A Float value + Float(f64), /// A `String` String(String), /// A ratio of two values @@ -30,6 +33,7 @@ impl fmt::Display for UserStats { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { UserStats::Number(n) => write!(f, "{}", n), + UserStats::Float(n) => write!(f, "{}", n), UserStats::String(s) => write!(f, "{}", s), UserStats::Ratio(a, b) => { if *b == 0 { @@ -60,8 +64,6 @@ pub struct ClientStats { pub last_execs_per_sec: f32, /// User-defined monitor pub user_monitor: HashMap, - /// Stability, and if we ever received a stability value - pub stability: Option, /// Client performance statistics #[cfg(feature = "introspection")] pub introspection_monitor: ClientPerfMonitor, @@ -91,11 +93,6 @@ impl ClientStats { self.objective_size = objective_size; } - /// we got a new information about stability for this client, insert it. - pub fn update_stability(&mut self, stability: f32) { - self.stability = Some(stability); - } - /// Get the calculated executions per second for this client #[allow(clippy::cast_sign_loss, clippy::cast_precision_loss)] pub fn execs_per_sec(&mut self, cur_time: Duration) -> u64 { @@ -157,24 +154,6 @@ pub trait Monitor { /// show the monitor to the user fn display(&mut self, event_msg: String, sender_id: u32); - /// Show the Stabiliity - fn stability(&self) -> Option { - let mut stability_total = 0_f32; - let mut num = 0_usize; - for stat in self.client_stats() { - if let Some(stability) = stat.stability { - stability_total += stability; - num += 1; - } - } - if num == 0 { - None - } else { - #[allow(clippy::cast_precision_loss)] - Some(stability_total / num as f32) - } - } - /// Amount of elements in the corpus (combined for all children) fn corpus_size(&self) -> u64 { self.client_stats() @@ -295,7 +274,7 @@ where fn display(&mut self, event_msg: String, sender_id: u32) { let fmt = format!( - "[{} #{}] run time: {}, clients: {}, corpus: {}, objectives: {}, executions: {}{}, exec/sec: {}", + "[{} #{}] run time: {}, clients: {}, corpus: {}, objectives: {}, executions: {}, exec/sec: {}", event_msg, sender_id, format_duration_hms(&(current_time() - self.start_time)), @@ -303,11 +282,6 @@ where self.corpus_size(), self.objective_size(), self.total_execs(), - if let Some(stability) = self.stability() { - format!(", stability: {:.2}", stability) - } else { - "".to_string() - }, self.execs_per_sec() ); (self.print_fn)(fmt);