Better docu (#90)
* more docs * more docs: * more docu * more docu * finished docs * cleaned up markup * must_use tags added * more docs * more docu, less clippy * more fixes
This commit is contained in:
parent
bfa3fffc18
commit
ed9169119e
2
.github/workflows/build_and_test.yml
vendored
2
.github/workflows/build_and_test.yml
vendored
@ -57,7 +57,7 @@ jobs:
|
||||
- name: Build Docs
|
||||
run: cargo doc
|
||||
- name: Test Docs
|
||||
run: cargo test --doc
|
||||
run: cargo test --all-features --doc
|
||||
- name: Run clippy
|
||||
run: ./clippy.sh
|
||||
windows:
|
||||
|
@ -1,7 +1,7 @@
|
||||
#!/bin/sh
|
||||
# Clippy checks
|
||||
cargo clean -p libafl
|
||||
RUST_BACKTRACE=full cargo clippy --all -- \
|
||||
RUST_BACKTRACE=full cargo clippy --all --all-features --tests -- \
|
||||
-D clippy::pedantic \
|
||||
-W clippy::cast_sign_loss \
|
||||
-W clippy::similar-names \
|
||||
@ -10,9 +10,9 @@ RUST_BACKTRACE=full cargo clippy --all -- \
|
||||
-W clippy::unused_self \
|
||||
-W clippy::too_many_lines \
|
||||
-W clippy::option_if_let_else \
|
||||
-A missing-docs \
|
||||
-A clippy::doc_markdown \
|
||||
-A clippy::must-use-candidate \
|
||||
-W clippy::must-use-candidate \
|
||||
-W clippy::if-not-else \
|
||||
-W clippy::doc-markdown \
|
||||
-A clippy::type_repetition_in_bounds \
|
||||
-A clippy::missing-errors-doc \
|
||||
-A clippy::cast-possible-truncation \
|
||||
@ -23,4 +23,3 @@ RUST_BACKTRACE=full cargo clippy --all -- \
|
||||
-A clippy::unseparated-literal-suffix \
|
||||
-A clippy::module-name-repetitions \
|
||||
-A clippy::unreadable-literal \
|
||||
-A clippy::if-not-else \
|
||||
|
@ -276,7 +276,7 @@ unsafe fn fuzz(
|
||||
// Corpus that will be evolved, we keep it in memory for performance
|
||||
InMemoryCorpus::new(),
|
||||
// Feedbacks to rate the interestingness of an input
|
||||
MaxMapFeedback::new_with_observer_track(&edges_observer, true, false),
|
||||
MaxMapFeedback::new_tracking_with_observer(&edges_observer, true, false),
|
||||
// Corpus in which we store solutions (crashes in this example),
|
||||
// on disk so the user can get them after stopping the fuzzer
|
||||
OnDiskCorpus::new_save_meta(objective_dir, Some(OnDiskMetadataFormat::JsonPretty))
|
||||
|
@ -80,7 +80,7 @@ fn fuzz(corpus_dirs: Vec<PathBuf>, objective_dir: PathBuf, broker_port: u16) ->
|
||||
InMemoryCorpus::new(),
|
||||
// Feedbacks to rate the interestingness of an input
|
||||
feedback_or!(
|
||||
MaxMapFeedback::new_with_observer_track(&edges_observer, true, false),
|
||||
MaxMapFeedback::new_tracking_with_observer(&edges_observer, true, false),
|
||||
TimeFeedback::new_with_observer(&time_observer)
|
||||
),
|
||||
// Corpus in which we store solutions (crashes in this example),
|
||||
|
@ -79,7 +79,7 @@ fn fuzz(corpus_dirs: Vec<PathBuf>, objective_dir: PathBuf, broker_port: u16) ->
|
||||
// Corpus that will be evolved, we keep it in memory for performance
|
||||
InMemoryCorpus::new(),
|
||||
// Feedbacks to rate the interestingness of an input
|
||||
MaxMapFeedback::new_with_observer_track(&edges_observer, true, false),
|
||||
MaxMapFeedback::new_tracking_with_observer(&edges_observer, true, false),
|
||||
// Corpus in which we store solutions (crashes in this example),
|
||||
// on disk so the user can get them after stopping the fuzzer
|
||||
OnDiskCorpus::new(objective_dir).unwrap(),
|
||||
|
@ -78,7 +78,7 @@ fn fuzz(corpus_dirs: Vec<PathBuf>, objective_dir: PathBuf, broker_port: u16) ->
|
||||
InMemoryCorpus::new(),
|
||||
// Feedbacks to rate the interestingness of an input
|
||||
feedback_or!(
|
||||
MaxMapFeedback::new_with_observer_track(&edges_observer, true, false),
|
||||
MaxMapFeedback::new_tracking_with_observer(&edges_observer, true, false),
|
||||
TimeFeedback::new_with_observer(&time_observer)
|
||||
),
|
||||
// Corpus in which we store solutions (crashes in this example),
|
||||
|
@ -17,6 +17,7 @@ pub struct GzipCompressor {
|
||||
impl GzipCompressor {
|
||||
/// If the buffer is at lest larger as large as the `threshold` value, we compress the buffer.
|
||||
/// When given a `threshold` of `0`, the `GzipCompressor` will always compress.
|
||||
#[must_use]
|
||||
pub fn new(threshold: usize) -> Self {
|
||||
GzipCompressor { threshold }
|
||||
}
|
||||
|
@ -2,8 +2,8 @@
|
||||
A library for low level message passing
|
||||
|
||||
To send new messages, the clients place a new message at the end of their
|
||||
client_out_map. If the current map is filled up, they place a
|
||||
LLMP_AGE_END_OF_PAGE_V1 msg and alloc a new shmap.
|
||||
`client_out_map`. If the current map is filled up, they place an end of page (`EOP`)
|
||||
msg and alloc a new [`ShMem`].
|
||||
Once the broker mapped this same page, it flags it as safe for unmapping.
|
||||
|
||||
```text
|
||||
@ -17,14 +17,14 @@ Once the broker mapped this same page, it flags it as safe for unmapping.
|
||||
[broker]
|
||||
```
|
||||
|
||||
After the broker received a new message for clientN, (clientN_out->current_id
|
||||
!= last_message->message_id) the broker will copy the message content to its
|
||||
After the broker received a new message for clientN, (`clientN_out->current_id
|
||||
!= last_message->message_id`) the broker will copy the message content to its
|
||||
own, centralized page.
|
||||
|
||||
The clients periodically check (current_broadcast_map->current_id !=
|
||||
last_message->message_id) for new incoming messages. If the page is filled up,
|
||||
the broker instead creates a new page and places a LLMP_TAG_END_OF_PAGE_V1
|
||||
message in its queue. The LLMP_TAG_END_PAGE_V1 buf contains the new string to
|
||||
The clients periodically check (`current_broadcast_map->current_id !=
|
||||
last_message->message_id`) for new incoming messages. If the page is filled up,
|
||||
the broker instead creates a new page and places an end of page (`EOP`)
|
||||
message in its queue. The `EOP` buf contains the new description to
|
||||
access the shared map. The clients then switch over to read from that new
|
||||
current map.
|
||||
|
||||
@ -41,17 +41,20 @@ current map.
|
||||
[client0] [client1] ... [clientN]
|
||||
```
|
||||
|
||||
In the future, if we would need zero copy, the current_broadcast_map could instead
|
||||
list the client_out_map ID an offset for each message. In that case, the clients
|
||||
also need to create new shmaps once their bufs are filled up.
|
||||
In the future, if we would need zero copy, the `current_broadcast_map` could instead
|
||||
list the `client_out_map` ID an offset for each message. In that case, the clients
|
||||
also need to create a new [`ShMem`] each time their bufs are filled up.
|
||||
|
||||
|
||||
To use, you will have to create a broker using llmp_broker_new().
|
||||
Then register some clientloops using llmp_broker_register_threaded_clientloop
|
||||
(or launch them as seperate processes) and call llmp_broker_run();
|
||||
To use, you will have to create a broker using [`LlmpBroker::new()`].
|
||||
Then, create some [`LlmpClient`]`s` in other threads and register them
|
||||
with the main thread using [`LlmpBroker::register_client`].
|
||||
Finally, call [`LlmpBroker::loop_forever()`].
|
||||
|
||||
For broker2broker communication, all messages are forwarded via network sockets.
|
||||
|
||||
Check out the `llmp_test` example in ./examples, or build it with `cargo run --example llmp_test`.
|
||||
|
||||
*/
|
||||
|
||||
use alloc::{string::String, vec::Vec};
|
||||
@ -90,11 +93,11 @@ use libc::ucontext_t;
|
||||
/// We'll start off with 256 megabyte maps per fuzzer client
|
||||
#[cfg(not(feature = "llmp_small_maps"))]
|
||||
const LLMP_CFG_INITIAL_MAP_SIZE: usize = 1 << 28;
|
||||
/// If llmp_small_maps is set, we start off with 1 meg.
|
||||
/// If the `llmp_small_maps` feature is set, we start off with 1 meg.
|
||||
#[cfg(feature = "llmp_small_maps")]
|
||||
const LLMP_CFG_INITIAL_MAP_SIZE: usize = 1 << 20;
|
||||
/// What byte count to align messages to
|
||||
/// LlmpMsg sizes (including header) will always be rounded up to be a multiple of this value
|
||||
/// [`LlmpMsg`] sizes (including header) will always be rounded up to be a multiple of this value.
|
||||
const LLMP_CFG_ALIGNNMENT: usize = 64;
|
||||
|
||||
/// A msg fresh from the press: No tag got sent by the user yet
|
||||
@ -162,9 +165,15 @@ pub type MessageId = u64;
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub enum TcpRequest {
|
||||
/// We would like to be a local client.
|
||||
LocalClientHello { shmem_description: ShMemDescription },
|
||||
LocalClientHello {
|
||||
/// The sharedmem description of the connecting client.
|
||||
shmem_description: ShMemDescription,
|
||||
},
|
||||
/// We would like to establish a b2b connection.
|
||||
RemoteBrokerHello { hostname: String },
|
||||
RemoteBrokerHello {
|
||||
/// The hostname of our broker, trying to connect.
|
||||
hostname: String,
|
||||
},
|
||||
}
|
||||
|
||||
impl TryFrom<&Vec<u8>> for TcpRequest {
|
||||
@ -206,11 +215,13 @@ pub enum TcpResponse {
|
||||
/// This broker's hostname
|
||||
hostname: String,
|
||||
},
|
||||
/// Notify the client on the other side that it has been accepted.
|
||||
LocalClientAccepted {
|
||||
/// The ClientId this client should send messages as
|
||||
/// Mainly used for client-side deduplication of incoming messages
|
||||
client_id: ClientId,
|
||||
},
|
||||
/// Notify the remote broker has been accepted.
|
||||
RemoteBrokerAccepted {
|
||||
/// The broker id of this element
|
||||
broker_id: BrokerId,
|
||||
@ -233,12 +244,16 @@ impl TryFrom<&Vec<u8>> for TcpResponse {
|
||||
/// Abstraction for listeners
|
||||
#[cfg(feature = "std")]
|
||||
pub enum Listener {
|
||||
/// Listener listening on `tcp`.
|
||||
Tcp(TcpListener),
|
||||
}
|
||||
|
||||
/// A listener stream abstraction
|
||||
#[cfg(feature = "std")]
|
||||
pub enum ListenerStream {
|
||||
/// Listener listening on `tcp`.
|
||||
Tcp(TcpStream, SocketAddr),
|
||||
/// No listener provided.
|
||||
Empty(),
|
||||
}
|
||||
|
||||
@ -277,7 +292,7 @@ unsafe fn llmp_msg_in_page(page: *const LlmpPage, msg: *const LlmpMsg) -> bool {
|
||||
&& (page as *const u8).add((*page).size_total) > msg as *const u8
|
||||
}
|
||||
|
||||
/// allign to LLMP_CFG_ALIGNNMENT=64 bytes
|
||||
/// Align the page to `LLMP_CFG_ALIGNNMENT=64` bytes
|
||||
#[inline]
|
||||
const fn llmp_align(to_align: usize) -> usize {
|
||||
// check if we need to align first
|
||||
@ -293,8 +308,8 @@ const fn llmp_align(to_align: usize) -> usize {
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads the stored message offset for the given env_name (by appending _OFFSET)
|
||||
/// If the content of the env is _NULL, returns None
|
||||
/// Reads the stored message offset for the given `env_name` (by appending `_OFFSET`).
|
||||
/// If the content of the env is `_NULL`, returns [`Option::None`].
|
||||
#[cfg(feature = "std")]
|
||||
#[inline]
|
||||
fn msg_offset_from_env(env_name: &str) -> Result<Option<u64>, Error> {
|
||||
@ -361,7 +376,7 @@ fn recv_tcp_msg(stream: &mut TcpStream) -> Result<Vec<u8>, Error> {
|
||||
|
||||
/// In case we don't have enough space, make sure the next page will be large
|
||||
/// enough. For now, we want to have at least enough space to store 2 of the
|
||||
/// largest messages we encountered (plus message one new_page message).
|
||||
/// largest messages we encountered (plus message one `new_page` message).
|
||||
#[inline]
|
||||
fn new_map_size(max_alloc: usize) -> usize {
|
||||
max(
|
||||
@ -371,8 +386,8 @@ fn new_map_size(max_alloc: usize) -> usize {
|
||||
.next_power_of_two()
|
||||
}
|
||||
|
||||
/// Initialize a new llmp_page. size should be relative to
|
||||
/// llmp_page->messages
|
||||
/// Initialize a new `llmp_page`. The size should be relative to
|
||||
/// `llmp_page->messages`
|
||||
unsafe fn _llmp_page_init<SHM: ShMem>(shmem: &mut SHM, sender: u32, allow_reinit: bool) {
|
||||
#[cfg(all(feature = "llmp_debug", feature = "std"))]
|
||||
dbg!("_llmp_page_init: shmem {}", &shmem);
|
||||
@ -481,6 +496,7 @@ impl LlmpMsg {
|
||||
/// Gets the buffer from this message as slice, with the corrent length.
|
||||
/// # Safety
|
||||
/// This is unsafe if somebody has access to shared mem pages on the system.
|
||||
#[must_use]
|
||||
pub unsafe fn as_slice_unsafe(&self) -> &[u8] {
|
||||
slice::from_raw_parts(self.buf.as_ptr(), self.buf_len as usize)
|
||||
}
|
||||
@ -524,9 +540,15 @@ where
|
||||
SP: ShMemProvider + 'static,
|
||||
{
|
||||
/// A broker and a thread using this tcp background thread
|
||||
IsBroker { broker: LlmpBroker<SP> },
|
||||
IsBroker {
|
||||
/// The [`LlmpBroker`] of this [`LlmpConnection`].
|
||||
broker: LlmpBroker<SP>,
|
||||
},
|
||||
/// A client, connected to the port
|
||||
IsClient { client: LlmpClient<SP> },
|
||||
IsClient {
|
||||
/// The [`LlmpClient`] of this [`LlmpConnection`].
|
||||
client: LlmpClient<SP>,
|
||||
},
|
||||
}
|
||||
|
||||
impl<SP> LlmpConnection<SP>
|
||||
@ -586,6 +608,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Send the `buf` with given `flags`.
|
||||
pub fn send_buf_with_flags(&mut self, tag: Tag, buf: &[u8], flags: Flags) -> Result<(), Error> {
|
||||
match self {
|
||||
LlmpConnection::IsBroker { broker } => broker.send_buf_with_flags(tag, flags, buf),
|
||||
@ -621,9 +644,9 @@ pub struct LlmpPage {
|
||||
pub messages: [LlmpMsg; 0],
|
||||
}
|
||||
|
||||
/// Message payload when a client got added LLMP_TAG_CLIENT_ADDED_V1 */
|
||||
/// Message payload when a client got added */
|
||||
/// This is an internal message!
|
||||
/// LLMP_TAG_END_OF_PAGE_V1
|
||||
/// [`LLMP_TAG_END_OF_PAGE_V1`]
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
#[repr(C, packed)]
|
||||
struct LlmpPayloadSharedMapInfo {
|
||||
@ -659,6 +682,9 @@ impl<SP> LlmpSender<SP>
|
||||
where
|
||||
SP: ShMemProvider,
|
||||
{
|
||||
/// Create a new [`LlmpSender`] using a given [`ShMemProvider`], and `id`.
|
||||
/// If `keep_pages_forever` is `true`, `ShMem` will never be freed.
|
||||
/// If it is `false`, the pages will be unmapped once they are full, and have been mapped by at least one `LlmpReceiver`.
|
||||
pub fn new(mut shmem_provider: SP, id: u32, keep_pages_forever: bool) -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
id,
|
||||
@ -683,7 +709,7 @@ where
|
||||
self.last_msg_sent = ptr::null_mut();
|
||||
}
|
||||
|
||||
/// Reattach to a vacant out_map, to with a previous sender stored the information in an env before.
|
||||
/// Reattach to a vacant `out_map`, to with a previous sender stored the information in an env before.
|
||||
#[cfg(feature = "std")]
|
||||
pub fn on_existing_from_env(mut shmem_provider: SP, env_name: &str) -> Result<Self, Error> {
|
||||
let msg_sent_offset = msg_offset_from_env(env_name)?;
|
||||
@ -695,7 +721,7 @@ where
|
||||
}
|
||||
|
||||
/// Store the info to this sender to env.
|
||||
/// A new client can reattach to it using on_existing_from_env
|
||||
/// A new client can reattach to it using [`LlmpSender::on_existing_from_env()`].
|
||||
#[cfg(feature = "std")]
|
||||
pub fn to_env(&self, env_name: &str) -> Result<(), Error> {
|
||||
let current_out_map = self.out_maps.last().unwrap();
|
||||
@ -723,7 +749,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Reattach to a vacant out_map.
|
||||
/// Reattach to a vacant `out_map`.
|
||||
/// It is essential, that the receiver (or someone else) keeps a pointer to this map
|
||||
/// else reattach will get a new, empty page, from the OS, or fail.
|
||||
pub fn on_existing_map(
|
||||
@ -765,10 +791,10 @@ where
|
||||
self.out_maps.drain(0..unmap_until_excl);
|
||||
}
|
||||
|
||||
/// Intern: Special allocation function for EOP messages (and nothing else!)
|
||||
/// The normal alloc will fail if there is not enough space for buf_len_padded + EOP
|
||||
/// So if alloc_next fails, create new page if necessary, use this function,
|
||||
/// place EOP, commit EOP, reset, alloc again on the new space.
|
||||
/// Intern: Special allocation function for `EOP` messages (and nothing else!)
|
||||
/// The normal alloc will fail if there is not enough space for `buf_len_padded + EOP`
|
||||
/// So if [`alloc_next`] fails, create new page if necessary, use this function,
|
||||
/// place `EOP`, commit `EOP`, reset, alloc again on the new space.
|
||||
unsafe fn alloc_eop(&mut self) -> Result<*mut LlmpMsg, Error> {
|
||||
let mut map = self.out_maps.last_mut().unwrap();
|
||||
let page = map.page_mut();
|
||||
@ -800,7 +826,7 @@ where
|
||||
}
|
||||
|
||||
/// Intern: Will return a ptr to the next msg buf, or None if map is full.
|
||||
/// Never call alloc_next without either sending or cancelling the last allocated message for this page!
|
||||
/// Never call [`alloc_next`] without either sending or cancelling the last allocated message for this page!
|
||||
/// There can only ever be up to one message allocated per page at each given time.
|
||||
unsafe fn alloc_next_if_space(&mut self, buf_len: usize) -> Option<*mut LlmpMsg> {
|
||||
let buf_len_padded;
|
||||
@ -903,9 +929,9 @@ where
|
||||
Some(ret)
|
||||
}
|
||||
|
||||
/// Commit the message last allocated by alloc_next to the queue.
|
||||
/// Commit the message last allocated by [`alloc_next`] to the queue.
|
||||
/// After commiting, the msg shall no longer be altered!
|
||||
/// It will be read by the consuming threads (broker->clients or client->broker)
|
||||
/// It will be read by the consuming threads (`broker->clients` or `client->broker`)
|
||||
#[inline(never)] // Not inlined to make cpu-level reodering (hopefully?) improbable
|
||||
unsafe fn send(&mut self, msg: *mut LlmpMsg) -> Result<(), Error> {
|
||||
// dbg!("Sending msg {:?}", msg);
|
||||
@ -1058,6 +1084,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a `buf` with the given `flags`.
|
||||
pub fn send_buf_with_flags(&mut self, tag: Tag, flags: Flags, buf: &[u8]) -> Result<(), Error> {
|
||||
// Make sure we don't reuse already allocated tags
|
||||
if tag == LLMP_TAG_NEW_SHM_CLIENT
|
||||
@ -1081,7 +1108,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
// Describe this cient in a way, that it can be restored later with `Self::on_existing_from_description`
|
||||
/// Describe this [`LlmpClient`] in a way that it can be restored later, using [`Self::on_existing_from_description`].
|
||||
pub fn describe(&self) -> Result<LlmpDescription, Error> {
|
||||
let map = self.out_maps.last().unwrap();
|
||||
let last_message_offset = if self.last_msg_sent.is_null() {
|
||||
@ -1095,7 +1122,8 @@ where
|
||||
})
|
||||
}
|
||||
|
||||
// Create this client on an existing map from the given description. acquired with `self.describe`
|
||||
/// Create this client on an existing map from the given description.
|
||||
/// Acquired with [`self.describe`].
|
||||
pub fn on_existing_from_description(
|
||||
mut shmem_provider: SP,
|
||||
description: &LlmpDescription,
|
||||
@ -1114,6 +1142,7 @@ pub struct LlmpReceiver<SP>
|
||||
where
|
||||
SP: ShMemProvider,
|
||||
{
|
||||
/// Id of this provider
|
||||
pub id: u32,
|
||||
/// Pointer to the last meg this received
|
||||
pub last_msg_recvd: *const LlmpMsg,
|
||||
@ -1128,7 +1157,7 @@ impl<SP> LlmpReceiver<SP>
|
||||
where
|
||||
SP: ShMemProvider,
|
||||
{
|
||||
/// Reattach to a vacant recv_map, to with a previous sender stored the information in an env before.
|
||||
/// Reattach to a vacant `recv_map`, to with a previous sender stored the information in an env before.
|
||||
#[cfg(feature = "std")]
|
||||
pub fn on_existing_from_env(mut shmem_provider: SP, env_name: &str) -> Result<Self, Error> {
|
||||
Self::on_existing_map(
|
||||
@ -1139,7 +1168,7 @@ where
|
||||
}
|
||||
|
||||
/// Store the info to this receiver to env.
|
||||
/// A new client can reattach to it using on_existing_from_env
|
||||
/// A new client can reattach to it using [`LlmpReceiver::on_existing_from_env()`]
|
||||
#[cfg(feature = "std")]
|
||||
pub fn to_env(&self, env_name: &str) -> Result<(), Error> {
|
||||
let current_out_map = &self.current_recv_map;
|
||||
@ -1148,7 +1177,7 @@ where
|
||||
}
|
||||
|
||||
/// Create a Receiver, reattaching to an existing sender map.
|
||||
/// It is essential, that the sender (or someone else) keeps a pointer to the sender_map
|
||||
/// It is essential, that the sender (or someone else) keeps a pointer to the `sender_map`
|
||||
/// else reattach will get a new, empty page, from the OS, or fail.
|
||||
pub fn on_existing_map(
|
||||
shmem_provider: SP,
|
||||
@ -1329,7 +1358,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
// Describe this cient in a way, that it can be restored later with `Self::on_existing_from_description`
|
||||
/// Describe this client in a way, that it can be restored later with [`Self::on_existing_from_description`]
|
||||
pub fn describe(&self) -> Result<LlmpDescription, Error> {
|
||||
let map = &self.current_recv_map;
|
||||
let last_message_offset = if self.last_msg_recvd.is_null() {
|
||||
@ -1343,7 +1372,7 @@ where
|
||||
})
|
||||
}
|
||||
|
||||
// Create this client on an existing map from the given description. acquired with `self.describe`
|
||||
/// Create this client on an existing map from the given description. acquired with `self.describe`
|
||||
pub fn on_existing_from_description(
|
||||
mut shmem_provider: SP,
|
||||
description: &LlmpDescription,
|
||||
@ -1369,7 +1398,7 @@ where
|
||||
|
||||
// TODO: May be obsolete
|
||||
/// The page struct, placed on a shared mem instance.
|
||||
/// A thin wrapper around a ShMem implementation, with special Llmp funcs
|
||||
/// A thin wrapper around a [`ShMem`] implementation, with special [`crate::bolts::llmp`] funcs
|
||||
impl<SHM> LlmpSharedMap<SHM>
|
||||
where
|
||||
SHM: ShMem,
|
||||
@ -1441,7 +1470,8 @@ where
|
||||
}
|
||||
|
||||
/// Gets the offset of a message on this here page.
|
||||
/// Will return IllegalArgument error if msg is not on page.
|
||||
/// Will return [`crate::Error::IllegalArgument`] error if msg is not on page.
|
||||
///
|
||||
/// # Safety
|
||||
/// This dereferences msg, make sure to pass a proper pointer to it.
|
||||
#[allow(clippy::cast_sign_loss)]
|
||||
@ -1458,8 +1488,8 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieve the stored msg from env_name + _OFFSET.
|
||||
/// It will restore the stored offset by env_name and return the message.
|
||||
/// Retrieve the stored msg from `env_name` + `_OFFSET`.
|
||||
/// It will restore the stored offset by `env_name` and return the message.
|
||||
#[cfg(feature = "std")]
|
||||
pub fn msg_from_env(&mut self, map_env_name: &str) -> Result<*mut LlmpMsg, Error> {
|
||||
match msg_offset_from_env(map_env_name)? {
|
||||
@ -1468,8 +1498,9 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Store this msg offset to env_name + _OFFSET env variable.
|
||||
/// It can be restored using msg_from_env with the same env_name later.
|
||||
/// Store this msg offset to `env_name` + `_OFFSET` env variable.
|
||||
/// It can be restored using [`LlmpSharedMap::msg_from_env()`] with the same `env_name` later.
|
||||
///
|
||||
/// # Safety
|
||||
/// This function will dereference the msg ptr, make sure it's valid.
|
||||
#[cfg(feature = "std")]
|
||||
@ -1486,7 +1517,7 @@ where
|
||||
}
|
||||
|
||||
/// Gets this message from this page, at the indicated offset.
|
||||
/// Will return IllegalArgument error if the offset is out of bounds.
|
||||
/// Will return [`crate::Error::IllegalArgument`] error if the offset is out of bounds.
|
||||
pub fn msg_from_offset(&mut self, offset: u64) -> Result<*mut LlmpMsg, Error> {
|
||||
let offset = offset as usize;
|
||||
unsafe {
|
||||
@ -1525,6 +1556,7 @@ where
|
||||
shmem_provider: SP,
|
||||
}
|
||||
|
||||
/// A signal handler for the [`LlmpBroker`].
|
||||
#[cfg(unix)]
|
||||
pub struct LlmpBrokerSignalHandler {
|
||||
shutting_down: bool,
|
||||
@ -1547,7 +1579,7 @@ impl<SP> LlmpBroker<SP>
|
||||
where
|
||||
SP: ShMemProvider + 'static,
|
||||
{
|
||||
/// Create and initialize a new llmp_broker
|
||||
/// Create and initialize a new [`LlmpBroker`]
|
||||
pub fn new(mut shmem_provider: SP) -> Result<Self, Error> {
|
||||
Ok(LlmpBroker {
|
||||
llmp_out: LlmpSender {
|
||||
@ -1575,7 +1607,7 @@ where
|
||||
}
|
||||
|
||||
/// Registers a new client for the given sharedmap str and size.
|
||||
/// Returns the id of the new client in broker.client_map
|
||||
/// Returns the id of the new client in [`broker.client_map`]
|
||||
pub fn register_client(&mut self, mut client_page: LlmpSharedMap<SP::Mem>) {
|
||||
// Tell the client it may unmap this page now.
|
||||
client_page.mark_save_to_unmap();
|
||||
@ -1744,6 +1776,7 @@ where
|
||||
self.llmp_out.send_buf(tag, buf)
|
||||
}
|
||||
|
||||
/// Sends a `buf` with the given `flags`.
|
||||
pub fn send_buf_with_flags(&mut self, tag: Tag, flags: Flags, buf: &[u8]) -> Result<(), Error> {
|
||||
self.llmp_out.send_buf_with_flags(tag, flags, buf)
|
||||
}
|
||||
@ -1781,7 +1814,7 @@ where
|
||||
/// For broker to broker connections:
|
||||
/// Launches a proxy thread.
|
||||
/// It will read outgoing messages from the given broker map (and handle EOP by mapping a new page).
|
||||
/// This function returns the ShMemDescription the client uses to place incoming messages.
|
||||
/// This function returns the [`ShMemDescription`] the client uses to place incoming messages.
|
||||
/// The thread exits, when the remote broker disconnects.
|
||||
#[cfg(feature = "std")]
|
||||
#[allow(clippy::let_and_return)]
|
||||
@ -2171,7 +2204,7 @@ where
|
||||
SP: ShMemProvider,
|
||||
{
|
||||
/// Reattach to a vacant client map.
|
||||
/// It is essential, that the broker (or someone else) kept a pointer to the out_map
|
||||
/// It is essential, that the broker (or someone else) kept a pointer to the `out_map`
|
||||
/// else reattach will get a new, empty page, from the OS, or fail
|
||||
#[allow(clippy::needless_pass_by_value)]
|
||||
pub fn on_existing_map(
|
||||
@ -2196,7 +2229,7 @@ where
|
||||
})
|
||||
}
|
||||
|
||||
/// Recreate this client from a previous client.to_env
|
||||
/// Recreate this client from a previous [`client.to_env()`]
|
||||
#[cfg(feature = "std")]
|
||||
pub fn on_existing_from_env(shmem_provider: SP, env_name: &str) -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
@ -2213,7 +2246,7 @@ where
|
||||
}
|
||||
|
||||
/// Write the current state to env.
|
||||
/// A new client can attach to exactly the same state by calling on_existing_map.
|
||||
/// A new client can attach to exactly the same state by calling [`LlmpClient::on_existing_map()`].
|
||||
#[cfg(feature = "std")]
|
||||
pub fn to_env(&self, env_name: &str) -> Result<(), Error> {
|
||||
self.sender.to_env(&format!("{}_SENDER", env_name))?;
|
||||
@ -2257,7 +2290,7 @@ where
|
||||
self.sender.save_to_unmap()
|
||||
}
|
||||
|
||||
/// Creates a new LlmpClient
|
||||
/// Creates a new [`LlmpClient`]
|
||||
pub fn new(
|
||||
mut shmem_provider: SP,
|
||||
initial_broker_map: LlmpSharedMap<SP::Mem>,
|
||||
@ -2296,6 +2329,7 @@ where
|
||||
self.sender.send_buf(tag, buf)
|
||||
}
|
||||
|
||||
/// Send a `buf` with the given `flags`.
|
||||
pub fn send_buf_with_flags(&mut self, tag: Tag, flags: Flags, buf: &[u8]) -> Result<(), Error> {
|
||||
self.sender.send_buf_with_flags(tag, flags, buf)
|
||||
}
|
||||
@ -2359,20 +2393,21 @@ where
|
||||
self.receiver.recv_buf_blocking()
|
||||
}
|
||||
|
||||
/// Receive a `buf` from the broker, including the `flags` used during transmission.
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub fn recv_buf_with_flags(&mut self) -> Result<Option<(ClientId, Tag, Flags, &[u8])>, Error> {
|
||||
self.receiver.recv_buf_with_flags()
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
/// Creates a new LlmpClient, reading the map id and len from env
|
||||
/// Creates a new [`LlmpClient`], reading the map id and len from env
|
||||
pub fn create_using_env(mut shmem_provider: SP, env_var: &str) -> Result<Self, Error> {
|
||||
let map = LlmpSharedMap::existing(shmem_provider.existing_from_env(env_var)?);
|
||||
Self::new(shmem_provider, map)
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
/// Create a LlmpClient, getting the ID from a given port
|
||||
/// Create a [`LlmpClient`], getting the ID from a given port
|
||||
pub fn create_attach_to_tcp(mut shmem_provider: SP, port: u16) -> Result<Self, Error> {
|
||||
let mut stream = TcpStream::connect(format!("{}:{}", _LLMP_BIND_ADDR, port))?;
|
||||
println!("Connected to port {}", port);
|
||||
|
@ -38,6 +38,7 @@ use uds::{UnixListenerExt, UnixSocketAddr, UnixStreamExt};
|
||||
|
||||
const ASHMEM_SERVER_NAME: &str = "@ashmem_server";
|
||||
|
||||
/// Hands out served shared maps, as used on Android.
|
||||
#[derive(Debug)]
|
||||
pub struct ServedShMemProvider {
|
||||
stream: UnixStream,
|
||||
@ -45,6 +46,8 @@ pub struct ServedShMemProvider {
|
||||
id: i32,
|
||||
}
|
||||
|
||||
/// [`ShMem`] that got served from a [`AshmemService`] via domain sockets and can now be used in this program.
|
||||
/// It works around Android's lack of "proper" shared maps.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ServedShMem {
|
||||
inner: ManuallyDrop<AshmemShMem>,
|
||||
@ -112,7 +115,7 @@ impl Clone for ServedShMemProvider {
|
||||
impl ShMemProvider for ServedShMemProvider {
|
||||
type Mem = ServedShMem;
|
||||
|
||||
/// Connect to the server and return a new ServedShMemProvider
|
||||
/// Connect to the server and return a new [`ServedShMemProvider`]
|
||||
fn new() -> Result<Self, Error> {
|
||||
let mut res = Self {
|
||||
stream: UnixStream::connect_to_unix_addr(
|
||||
@ -170,7 +173,7 @@ impl ShMemProvider for ServedShMemProvider {
|
||||
}
|
||||
}
|
||||
|
||||
/// A request sent to the ShMem server to receive a fd to a shared map
|
||||
/// A request sent to the [`ShMem`] server to receive a fd to a shared map
|
||||
#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
|
||||
pub enum AshmemRequest {
|
||||
/// Register a new map with a given size.
|
||||
@ -199,6 +202,8 @@ impl AshmemClient {
|
||||
}
|
||||
}
|
||||
|
||||
/// The [`AshmemService`] is a service handing out [`ShMem`] pages via unix domain sockets.
|
||||
/// It is mainly used and needed on Android.
|
||||
#[derive(Debug)]
|
||||
pub struct AshmemService {
|
||||
provider: AshmemShMemProvider,
|
||||
@ -214,7 +219,7 @@ enum AshmemResponse {
|
||||
}
|
||||
|
||||
impl AshmemService {
|
||||
/// Create a new AshMem service
|
||||
/// Create a new [`AshMem`] service
|
||||
fn new() -> Result<Self, Error> {
|
||||
Ok(AshmemService {
|
||||
provider: AshmemShMemProvider::new()?,
|
||||
@ -331,7 +336,7 @@ impl AshmemService {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a new AshmemService, then listen and service incoming connections in a new thread.
|
||||
/// Create a new [`AshmemService`], then listen and service incoming connections in a new thread.
|
||||
pub fn start() -> Result<thread::JoinHandle<Result<(), Error>>, Error> {
|
||||
#[allow(clippy::mutex_atomic)]
|
||||
let syncpair = Arc::new((Mutex::new(false), Condvar::new()));
|
||||
|
@ -23,26 +23,42 @@ use crate::Error;
|
||||
|
||||
pub use libc::{c_void, siginfo_t};
|
||||
|
||||
/// All signals on this system, as `enum`.
|
||||
#[derive(IntoPrimitive, TryFromPrimitive, Clone, Copy)]
|
||||
#[repr(i32)]
|
||||
#[allow(clippy::pub_enum_variant_names)]
|
||||
pub enum Signal {
|
||||
/// `SIGABRT` signal id
|
||||
SigAbort = SIGABRT,
|
||||
/// `SIGBUS` signal id
|
||||
SigBus = SIGBUS,
|
||||
/// `SIGFPE` signal id
|
||||
SigFloatingPointException = SIGFPE,
|
||||
/// `SIGILL` signal id
|
||||
SigIllegalInstruction = SIGILL,
|
||||
/// `SIGPIPE` signal id
|
||||
SigPipe = SIGPIPE,
|
||||
/// `SIGSEGV` signal id
|
||||
SigSegmentationFault = SIGSEGV,
|
||||
/// `SIGUSR2` signal id
|
||||
SigUser2 = SIGUSR2,
|
||||
/// `SIGALARM` signal id
|
||||
SigAlarm = SIGALRM,
|
||||
/// `SIGHUP` signal id
|
||||
SigHangUp = SIGHUP,
|
||||
/// `SIGKILL` signal id
|
||||
SigKill = SIGKILL,
|
||||
/// `SIGQUIT` signal id
|
||||
SigQuit = SIGQUIT,
|
||||
/// `SIGTERM` signal id
|
||||
SigTerm = SIGTERM,
|
||||
/// `SIGINT` signal id
|
||||
SigInterrupt = SIGINT,
|
||||
/// `SIGTRAP` signal id
|
||||
SigTrap = SIGTRAP,
|
||||
}
|
||||
|
||||
/// A list of crashing signals
|
||||
pub static CRASH_SIGNALS: &[Signal] = &[
|
||||
Signal::SigAbort,
|
||||
Signal::SigBus,
|
||||
@ -85,6 +101,7 @@ impl Display for Signal {
|
||||
}
|
||||
}
|
||||
|
||||
/// A trait for `LibAFL` signal handling
|
||||
pub trait Handler {
|
||||
/// Handle a signal
|
||||
fn handle(&mut self, signal: Signal, info: siginfo_t, _context: &mut ucontext_t);
|
||||
@ -114,7 +131,7 @@ static mut SIGNAL_HANDLERS: [Option<HandlerHolder>; 32] = [
|
||||
/// Internal function that is being called whenever a signal we are registered for arrives.
|
||||
/// # Safety
|
||||
/// This should be somewhat safe to call for signals previously registered,
|
||||
/// unless the signal handlers registered using [setup_signal_handler] are broken.
|
||||
/// unless the signal handlers registered using [`setup_signal_handler()`] are broken.
|
||||
unsafe fn handle_signal(sig: c_int, info: siginfo_t, void: *mut c_void) {
|
||||
let signal = &Signal::try_from(sig).unwrap();
|
||||
let handler = {
|
||||
@ -128,7 +145,7 @@ unsafe fn handle_signal(sig: c_int, info: siginfo_t, void: *mut c_void) {
|
||||
|
||||
/// Setup signal handlers in a somewhat rusty way.
|
||||
/// This will allocate a signal stack and set the signal handlers accordingly.
|
||||
/// It is, for example, used in the [crate::executors::InProcessExecutor] to restart the fuzzer in case of a crash,
|
||||
/// It is, for example, used in the [`struct@crate::executors::InProcessExecutor`] to restart the fuzzer in case of a crash,
|
||||
/// or to handle `SIGINT` in the broker process.
|
||||
/// # Safety
|
||||
/// The signal handlers will be called on any signal. They should (tm) be async safe.
|
||||
|
@ -7,18 +7,24 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
/// Trait to convert into an Owned type
|
||||
pub trait IntoOwned {
|
||||
/// Returns if the current type is an owned type.
|
||||
#[must_use]
|
||||
fn is_owned(&self) -> bool;
|
||||
|
||||
/// Transfer the current type into an owned type.
|
||||
#[must_use]
|
||||
fn into_owned(self) -> Self;
|
||||
}
|
||||
|
||||
/// Wrap a reference and convert to a Box on serialize
|
||||
/// Wrap a reference and convert to a [`Box`] on serialize
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum OwnedRef<'a, T>
|
||||
where
|
||||
T: 'a + ?Sized,
|
||||
{
|
||||
/// A ref to a type
|
||||
Ref(&'a T),
|
||||
/// An owned [`Box`] of a type
|
||||
Owned(Box<T>),
|
||||
}
|
||||
|
||||
@ -54,6 +60,7 @@ impl<'a, T> AsRef<T> for OwnedRef<'a, T>
|
||||
where
|
||||
T: Sized,
|
||||
{
|
||||
#[must_use]
|
||||
fn as_ref(&self) -> &T {
|
||||
match self {
|
||||
OwnedRef::Ref(r) => r,
|
||||
@ -66,6 +73,7 @@ impl<'a, T> IntoOwned for OwnedRef<'a, T>
|
||||
where
|
||||
T: Sized + Clone,
|
||||
{
|
||||
#[must_use]
|
||||
fn is_owned(&self) -> bool {
|
||||
match self {
|
||||
OwnedRef::Ref(_) => false,
|
||||
@ -73,6 +81,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn into_owned(self) -> Self {
|
||||
match self {
|
||||
OwnedRef::Ref(r) => OwnedRef::Owned(Box::new(r.clone())),
|
||||
@ -84,7 +93,9 @@ where
|
||||
/// Wrap a mutable reference and convert to a Box on serialize
|
||||
#[derive(Debug)]
|
||||
pub enum OwnedRefMut<'a, T: 'a + ?Sized> {
|
||||
/// A mutable ref to a type
|
||||
Ref(&'a mut T),
|
||||
/// An owned [`Box`] of a type
|
||||
Owned(Box<T>),
|
||||
}
|
||||
|
||||
@ -113,6 +124,7 @@ where
|
||||
}
|
||||
|
||||
impl<'a, T: Sized> AsRef<T> for OwnedRefMut<'a, T> {
|
||||
#[must_use]
|
||||
fn as_ref(&self) -> &T {
|
||||
match self {
|
||||
OwnedRefMut::Ref(r) => r,
|
||||
@ -122,6 +134,7 @@ impl<'a, T: Sized> AsRef<T> for OwnedRefMut<'a, T> {
|
||||
}
|
||||
|
||||
impl<'a, T: Sized> AsMut<T> for OwnedRefMut<'a, T> {
|
||||
#[must_use]
|
||||
fn as_mut(&mut self) -> &mut T {
|
||||
match self {
|
||||
OwnedRefMut::Ref(r) => r,
|
||||
@ -134,6 +147,7 @@ impl<'a, T> IntoOwned for OwnedRefMut<'a, T>
|
||||
where
|
||||
T: Sized + Clone,
|
||||
{
|
||||
#[must_use]
|
||||
fn is_owned(&self) -> bool {
|
||||
match self {
|
||||
OwnedRefMut::Ref(_) => false,
|
||||
@ -141,6 +155,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn into_owned(self) -> Self {
|
||||
match self {
|
||||
OwnedRefMut::Ref(r) => OwnedRefMut::Owned(Box::new(r.clone())),
|
||||
@ -152,7 +167,9 @@ where
|
||||
/// Wrap a slice and convert to a Vec on serialize
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum OwnedSlice<'a, T: 'a + Sized> {
|
||||
/// A ref to a slice
|
||||
Ref(&'a [T]),
|
||||
/// A ref to an owned [`Vec`]
|
||||
Owned(Vec<T>),
|
||||
}
|
||||
|
||||
@ -181,6 +198,8 @@ where
|
||||
}
|
||||
|
||||
impl<'a, T: Sized> OwnedSlice<'a, T> {
|
||||
/// Get the [`OwnedSlice`] as slice.
|
||||
#[must_use]
|
||||
pub fn as_slice(&self) -> &[T] {
|
||||
match self {
|
||||
OwnedSlice::Ref(r) => r,
|
||||
@ -193,6 +212,7 @@ impl<'a, T> IntoOwned for OwnedSlice<'a, T>
|
||||
where
|
||||
T: Sized + Clone,
|
||||
{
|
||||
#[must_use]
|
||||
fn is_owned(&self) -> bool {
|
||||
match self {
|
||||
OwnedSlice::Ref(_) => false,
|
||||
@ -200,6 +220,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn into_owned(self) -> Self {
|
||||
match self {
|
||||
OwnedSlice::Ref(r) => OwnedSlice::Owned(r.to_vec()),
|
||||
@ -211,7 +232,9 @@ where
|
||||
/// Wrap a mutable slice and convert to a Vec on serialize
|
||||
#[derive(Debug)]
|
||||
pub enum OwnedSliceMut<'a, T: 'a + Sized> {
|
||||
/// A ptr to a mutable slice of the type
|
||||
Ref(&'a mut [T]),
|
||||
/// An owned [`Vec`] of the type
|
||||
Owned(Vec<T>),
|
||||
}
|
||||
|
||||
@ -240,6 +263,8 @@ where
|
||||
}
|
||||
|
||||
impl<'a, T: Sized> OwnedSliceMut<'a, T> {
|
||||
/// Get the value as slice
|
||||
#[must_use]
|
||||
pub fn as_slice(&self) -> &[T] {
|
||||
match self {
|
||||
OwnedSliceMut::Ref(r) => r,
|
||||
@ -247,6 +272,8 @@ impl<'a, T: Sized> OwnedSliceMut<'a, T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the value as mut slice
|
||||
#[must_use]
|
||||
pub fn as_mut_slice(&mut self) -> &mut [T] {
|
||||
match self {
|
||||
OwnedSliceMut::Ref(r) => r,
|
||||
@ -259,6 +286,7 @@ impl<'a, T> IntoOwned for OwnedSliceMut<'a, T>
|
||||
where
|
||||
T: Sized + Clone,
|
||||
{
|
||||
#[must_use]
|
||||
fn is_owned(&self) -> bool {
|
||||
match self {
|
||||
OwnedSliceMut::Ref(_) => false,
|
||||
@ -266,6 +294,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn into_owned(self) -> Self {
|
||||
match self {
|
||||
OwnedSliceMut::Ref(r) => OwnedSliceMut::Owned(r.to_vec()),
|
||||
@ -277,7 +306,9 @@ where
|
||||
/// Wrap a C-style pointer and convert to a Box on serialize
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum OwnedPtr<T: Sized> {
|
||||
/// Ptr to the content
|
||||
Ptr(*const T),
|
||||
/// Ptr to an owned [`Box`] of the content.
|
||||
Owned(Box<T>),
|
||||
}
|
||||
|
||||
@ -303,6 +334,7 @@ where
|
||||
}
|
||||
|
||||
impl<T: Sized> AsRef<T> for OwnedPtr<T> {
|
||||
#[must_use]
|
||||
fn as_ref(&self) -> &T {
|
||||
match self {
|
||||
OwnedPtr::Ptr(p) => unsafe { p.as_ref().unwrap() },
|
||||
@ -315,6 +347,7 @@ impl<T> IntoOwned for OwnedPtr<T>
|
||||
where
|
||||
T: Sized + Clone,
|
||||
{
|
||||
#[must_use]
|
||||
fn is_owned(&self) -> bool {
|
||||
match self {
|
||||
OwnedPtr::Ptr(_) => false,
|
||||
@ -322,6 +355,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn into_owned(self) -> Self {
|
||||
match self {
|
||||
OwnedPtr::Ptr(p) => unsafe { OwnedPtr::Owned(Box::new(p.as_ref().unwrap().clone())) },
|
||||
@ -333,7 +367,9 @@ where
|
||||
/// Wrap a C-style mutable pointer and convert to a Box on serialize
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum OwnedPtrMut<T: Sized> {
|
||||
/// A mut ptr to the content
|
||||
Ptr(*mut T),
|
||||
/// An owned [`Box`] to the content
|
||||
Owned(Box<T>),
|
||||
}
|
||||
|
||||
@ -359,6 +395,7 @@ where
|
||||
}
|
||||
|
||||
impl<T: Sized> AsRef<T> for OwnedPtrMut<T> {
|
||||
#[must_use]
|
||||
fn as_ref(&self) -> &T {
|
||||
match self {
|
||||
OwnedPtrMut::Ptr(p) => unsafe { p.as_ref().unwrap() },
|
||||
@ -380,6 +417,7 @@ impl<T> IntoOwned for OwnedPtrMut<T>
|
||||
where
|
||||
T: Sized + Clone,
|
||||
{
|
||||
#[must_use]
|
||||
fn is_owned(&self) -> bool {
|
||||
match self {
|
||||
OwnedPtrMut::Ptr(_) => false,
|
||||
@ -387,6 +425,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn into_owned(self) -> Self {
|
||||
match self {
|
||||
OwnedPtrMut::Ptr(p) => unsafe {
|
||||
@ -397,10 +436,12 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrap a C-style pointer to an array (with size= and convert to a Vec on serialize
|
||||
/// Wrap a C-style pointer to an array (with size) and convert to a Vec on serialize
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum OwnedArrayPtr<T: Sized> {
|
||||
/// Ptr to a slice
|
||||
ArrayPtr((*const T, usize)),
|
||||
/// A owned [`Vec`].
|
||||
Owned(Vec<T>),
|
||||
}
|
||||
|
||||
@ -426,6 +467,8 @@ where
|
||||
}
|
||||
|
||||
impl<T: Sized> OwnedArrayPtr<T> {
|
||||
/// Get a slice from this array.
|
||||
#[must_use]
|
||||
pub fn as_slice(&self) -> &[T] {
|
||||
match self {
|
||||
OwnedArrayPtr::ArrayPtr(p) => unsafe { core::slice::from_raw_parts(p.0, p.1) },
|
||||
@ -438,6 +481,7 @@ impl<T> IntoOwned for OwnedArrayPtr<T>
|
||||
where
|
||||
T: Sized + Clone,
|
||||
{
|
||||
#[must_use]
|
||||
fn is_owned(&self) -> bool {
|
||||
match self {
|
||||
OwnedArrayPtr::ArrayPtr(_) => false,
|
||||
@ -445,6 +489,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn into_owned(self) -> Self {
|
||||
match self {
|
||||
OwnedArrayPtr::ArrayPtr(p) => unsafe {
|
||||
@ -455,10 +500,12 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrap a C-style mutable pointer to an array (with size= and convert to a Vec on serialize
|
||||
/// Wrap a C-style mutable pointer to an array (with size) and convert to a Vec on serialize
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum OwnedArrayPtrMut<T: Sized> {
|
||||
/// A ptr to the array (or slice).
|
||||
ArrayPtr((*mut T, usize)),
|
||||
/// An owned [`Vec`].
|
||||
Owned(Vec<T>),
|
||||
}
|
||||
|
||||
@ -484,6 +531,8 @@ where
|
||||
}
|
||||
|
||||
impl<T: Sized> OwnedArrayPtrMut<T> {
|
||||
/// Return this array as slice
|
||||
#[must_use]
|
||||
pub fn as_slice(&self) -> &[T] {
|
||||
match self {
|
||||
OwnedArrayPtrMut::ArrayPtr(p) => unsafe { core::slice::from_raw_parts(p.0, p.1) },
|
||||
@ -491,6 +540,8 @@ impl<T: Sized> OwnedArrayPtrMut<T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Return this array as mut slice
|
||||
#[must_use]
|
||||
pub fn as_mut_slice(&mut self) -> &mut [T] {
|
||||
match self {
|
||||
OwnedArrayPtrMut::ArrayPtr(p) => unsafe { core::slice::from_raw_parts_mut(p.0, p.1) },
|
||||
@ -503,6 +554,7 @@ impl<T> IntoOwned for OwnedArrayPtrMut<T>
|
||||
where
|
||||
T: Sized + Clone,
|
||||
{
|
||||
#[must_use]
|
||||
fn is_owned(&self) -> bool {
|
||||
match self {
|
||||
OwnedArrayPtrMut::ArrayPtr(_) => false,
|
||||
@ -510,6 +562,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn into_owned(self) -> Self {
|
||||
match self {
|
||||
OwnedArrayPtrMut::ArrayPtr(p) => unsafe {
|
||||
|
@ -7,11 +7,23 @@ use core::any::{Any, TypeId};
|
||||
|
||||
// yolo
|
||||
|
||||
/// Get a `type_id` from it's previously unpacked `u64`.
|
||||
/// Opposite of [`unpack_type_id(id)`].
|
||||
///
|
||||
/// # Safety
|
||||
/// Probably not safe for future compilers, fine for now.
|
||||
#[must_use]
|
||||
pub fn pack_type_id(id: u64) -> TypeId {
|
||||
assert_eq_size!(TypeId, u64);
|
||||
unsafe { *(&id as *const u64 as *const TypeId) }
|
||||
}
|
||||
|
||||
/// Unpack a `type_id` to an `u64`
|
||||
/// Opposite of [`pack_type_id(id)`].
|
||||
///
|
||||
/// # Safety
|
||||
/// Probably not safe for future compilers, fine for now.
|
||||
#[must_use]
|
||||
pub fn unpack_type_id(id: TypeId) -> u64 {
|
||||
assert_eq_size!(TypeId, u64);
|
||||
unsafe { *(&id as *const _ as *const u64) }
|
||||
@ -25,11 +37,13 @@ pub trait SerdeAny: Any + erased_serde::Serialize {
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any;
|
||||
}
|
||||
|
||||
/// Wrap a type for serialization
|
||||
pub struct Wrap<'a, T: ?Sized>(pub &'a T);
|
||||
impl<'a, T> Serialize for Wrap<'a, T>
|
||||
where
|
||||
T: ?Sized + erased_serde::Serialize + 'a,
|
||||
{
|
||||
/// Serialize the type
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
@ -38,13 +52,16 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Callback for [`SerdeAny`] deserialization.
|
||||
pub type DeserializeCallback<B> =
|
||||
fn(&mut dyn erased_serde::Deserializer) -> Result<Box<B>, erased_serde::Error>;
|
||||
|
||||
/// Callback struct for deserialization of a [`SerdeAny`] type.
|
||||
pub struct DeserializeCallbackSeed<B>
|
||||
where
|
||||
B: ?Sized,
|
||||
{
|
||||
/// Callback for deserialization of a [`SerdeAny`] type.
|
||||
pub cb: DeserializeCallback<B>,
|
||||
}
|
||||
|
||||
@ -63,9 +80,12 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates the [`serde`] registry for serialization and deserialization of [`SerdeAny`].
|
||||
/// Each element needs to be registered so that it can be deserialized.
|
||||
#[macro_export]
|
||||
macro_rules! create_serde_registry_for_trait {
|
||||
($mod_name:ident, $trait_name:path) => {
|
||||
/// A [`crate::bolts::serdeany`] module.
|
||||
pub mod $mod_name {
|
||||
|
||||
use alloc::boxed::Box;
|
||||
@ -82,6 +102,7 @@ macro_rules! create_serde_registry_for_trait {
|
||||
};
|
||||
use $crate::Error;
|
||||
|
||||
/// Visitor object used internally for the [`SerdeAny`] registry.
|
||||
pub struct BoxDynVisitor {}
|
||||
impl<'de> serde::de::Visitor<'de> for BoxDynVisitor {
|
||||
type Value = Box<dyn $trait_name>;
|
||||
@ -194,6 +215,8 @@ macro_rules! create_serde_registry_for_trait {
|
||||
}
|
||||
|
||||
impl SerdeAnyMap {
|
||||
/// Get an element from the map.
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn get<T>(&self) -> Option<&T>
|
||||
where
|
||||
@ -204,6 +227,8 @@ macro_rules! create_serde_registry_for_trait {
|
||||
.map(|x| x.as_ref().as_any().downcast_ref::<T>().unwrap())
|
||||
}
|
||||
|
||||
/// Get a mutable borrow for an element in the map.
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn get_mut<T>(&mut self) -> Option<&mut T>
|
||||
where
|
||||
@ -214,6 +239,7 @@ macro_rules! create_serde_registry_for_trait {
|
||||
.map(|x| x.as_mut().as_any_mut().downcast_mut::<T>().unwrap())
|
||||
}
|
||||
|
||||
/// Insert an element into the map.
|
||||
#[inline]
|
||||
pub fn insert<T>(&mut self, t: T)
|
||||
where
|
||||
@ -223,11 +249,15 @@ macro_rules! create_serde_registry_for_trait {
|
||||
.insert(unpack_type_id(TypeId::of::<T>()), Box::new(t));
|
||||
}
|
||||
|
||||
/// Returns the count of elements in this map.
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn len(&self) -> usize {
|
||||
self.map.len()
|
||||
}
|
||||
|
||||
/// Returns if the map contains the given type.
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn contains<T>(&self) -> bool
|
||||
where
|
||||
@ -236,6 +266,8 @@ macro_rules! create_serde_registry_for_trait {
|
||||
self.map.contains_key(&unpack_type_id(TypeId::of::<T>()))
|
||||
}
|
||||
|
||||
/// Create a new [`SerdeAnyMap`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
SerdeAnyMap {
|
||||
map: HashMap::default(),
|
||||
@ -249,12 +281,15 @@ macro_rules! create_serde_registry_for_trait {
|
||||
}
|
||||
}
|
||||
|
||||
/// A serializable [`HashMap`] wrapper for [`SerdeAny`] types, addressable by name.
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct NamedSerdeAnyMap {
|
||||
map: HashMap<u64, HashMap<u64, Box<dyn $trait_name>>>,
|
||||
}
|
||||
|
||||
impl NamedSerdeAnyMap {
|
||||
/// Get an element by name
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn get<T>(&self, name: &str) -> Option<&T>
|
||||
where
|
||||
@ -268,6 +303,8 @@ macro_rules! create_serde_registry_for_trait {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get an element of a given type contained in this map by [`TypeId`].
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn by_typeid(&self, name: &str, typeid: &TypeId) -> Option<&dyn $trait_name> {
|
||||
match self.map.get(&unpack_type_id(*typeid)) {
|
||||
@ -278,6 +315,8 @@ macro_rules! create_serde_registry_for_trait {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get an element of a given type contained in this map by [`TypeId`], as mut.
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn get_mut<T>(&mut self, name: &str) -> Option<&mut T>
|
||||
where
|
||||
@ -291,6 +330,8 @@ macro_rules! create_serde_registry_for_trait {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get an element of a given type contained in this map by [`TypeId`], as mut.
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn by_typeid_mut(
|
||||
&mut self,
|
||||
@ -305,6 +346,8 @@ macro_rules! create_serde_registry_for_trait {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get all elements of a type contained in this map.
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn get_all<T>(
|
||||
&self,
|
||||
@ -325,6 +368,8 @@ macro_rules! create_serde_registry_for_trait {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get all elements of a given type contained in this map by [`TypeId`].
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn all_by_typeid(
|
||||
&self,
|
||||
@ -341,6 +386,7 @@ macro_rules! create_serde_registry_for_trait {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get all elements contained in this map, as mut.
|
||||
#[inline]
|
||||
pub fn get_all_mut<T>(
|
||||
&mut self,
|
||||
@ -362,6 +408,7 @@ macro_rules! create_serde_registry_for_trait {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get all [`TypeId`]`s` contained in this map, as mut.
|
||||
#[inline]
|
||||
pub fn all_by_typeid_mut(
|
||||
&mut self,
|
||||
@ -378,6 +425,7 @@ macro_rules! create_serde_registry_for_trait {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get all [`TypeId`]`s` contained in this map.
|
||||
#[inline]
|
||||
pub fn all_typeids(
|
||||
&self,
|
||||
@ -388,6 +436,7 @@ macro_rules! create_serde_registry_for_trait {
|
||||
self.map.keys().map(|x| pack_type_id(*x))
|
||||
}
|
||||
|
||||
/// Run `func` for each element in this map.
|
||||
#[inline]
|
||||
pub fn for_each(
|
||||
&self,
|
||||
@ -401,6 +450,7 @@ macro_rules! create_serde_registry_for_trait {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Run `func` for each element in this map, getting a mutable borrow.
|
||||
#[inline]
|
||||
pub fn for_each_mut(
|
||||
&mut self,
|
||||
@ -414,6 +464,7 @@ macro_rules! create_serde_registry_for_trait {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Insert an element into this map.
|
||||
#[inline]
|
||||
pub fn insert(&mut self, val: Box<dyn $trait_name>, name: &str) {
|
||||
let id = unpack_type_id((*val).type_id());
|
||||
@ -426,11 +477,15 @@ macro_rules! create_serde_registry_for_trait {
|
||||
.insert(xxhash_rust::xxh3::xxh3_64(name.as_bytes()), val);
|
||||
}
|
||||
|
||||
/// Returns the `len` of this map.
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn len(&self) -> usize {
|
||||
self.map.len()
|
||||
}
|
||||
|
||||
/// Returns if the element with a given type is contained in this map.
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn contains_type<T>(&self) -> bool
|
||||
where
|
||||
@ -439,6 +494,8 @@ macro_rules! create_serde_registry_for_trait {
|
||||
self.map.contains_key(&unpack_type_id(TypeId::of::<T>()))
|
||||
}
|
||||
|
||||
/// Returns if the element by a given `name` is contained in this map.
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn contains<T>(&self, name: &str) -> bool
|
||||
where
|
||||
@ -450,6 +507,8 @@ macro_rules! create_serde_registry_for_trait {
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new `SerdeAny` map.
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
map: HashMap::default(),
|
||||
@ -493,6 +552,7 @@ macro_rules! create_serde_registry_for_trait {
|
||||
create_serde_registry_for_trait!(serdeany_registry, crate::bolts::serdeany::SerdeAny);
|
||||
pub use serdeany_registry::*;
|
||||
|
||||
/// Implement a [`SerdeAny`], registering it in the [`RegistryBuilder`]
|
||||
#[cfg(feature = "std")]
|
||||
#[macro_export]
|
||||
macro_rules! impl_serdeany {
|
||||
|
@ -3,8 +3,10 @@
|
||||
|
||||
#[cfg(all(feature = "std", unix))]
|
||||
pub use unix_shmem::{UnixShMem, UnixShMemProvider};
|
||||
/// The default [`ShMemProvider`] for this os.
|
||||
#[cfg(all(feature = "std", unix))]
|
||||
pub type OsShMemProvider = UnixShMemProvider;
|
||||
/// The default [`ShMem`] for this os.
|
||||
#[cfg(all(feature = "std", unix))]
|
||||
pub type OsShMem = UnixShMem;
|
||||
|
||||
@ -22,8 +24,10 @@ pub type StdShMemProvider = RcShMemProvider<ServedShMemProvider>;
|
||||
#[cfg(target_os = "android")]
|
||||
pub type StdShMem = RcShMem<ServedShMemProvider>;
|
||||
|
||||
/// The default [`ShMemProvider`] for this os.
|
||||
#[cfg(all(feature = "std", not(target_os = "android")))]
|
||||
pub type StdShMemProvider = OsShMemProvider;
|
||||
/// The default [`ShMem`] for this os.
|
||||
#[cfg(all(feature = "std", not(target_os = "android")))]
|
||||
pub type StdShMem = OsShMem;
|
||||
|
||||
@ -49,15 +53,17 @@ pub struct ShMemDescription {
|
||||
}
|
||||
|
||||
impl ShMemDescription {
|
||||
pub fn from_string_and_size(string: &str, size: usize) -> Self {
|
||||
/// Create a description from a `id_str` and a `size`.
|
||||
#[must_use]
|
||||
pub fn from_string_and_size(id_str: &str, size: usize) -> Self {
|
||||
Self {
|
||||
size,
|
||||
id: ShMemId::from_string(string),
|
||||
id: ShMemId::from_string(id_str),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An id associated with a given shared memory mapping (ShMem), which can be used to
|
||||
/// An id associated with a given shared memory mapping ([`ShMem`]), which can be used to
|
||||
/// establish shared-mappings between proccesses.
|
||||
#[derive(Copy, Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash, Default)]
|
||||
pub struct ShMemId {
|
||||
@ -66,16 +72,19 @@ pub struct ShMemId {
|
||||
|
||||
impl ShMemId {
|
||||
/// Create a new id from a fixed-size string
|
||||
#[must_use]
|
||||
pub fn from_slice(slice: &[u8; 20]) -> Self {
|
||||
Self { id: *slice }
|
||||
}
|
||||
|
||||
/// Create a new id from an int
|
||||
#[must_use]
|
||||
pub fn from_int(val: i32) -> Self {
|
||||
Self::from_string(&val.to_string())
|
||||
}
|
||||
|
||||
/// Create a new id from a string
|
||||
#[must_use]
|
||||
pub fn from_string(val: &str) -> Self {
|
||||
let mut slice: [u8; 20] = [0; 20];
|
||||
for (i, val) in val.as_bytes().iter().enumerate() {
|
||||
@ -85,23 +94,29 @@ impl ShMemId {
|
||||
}
|
||||
|
||||
/// Get the id as a fixed-length slice
|
||||
#[must_use]
|
||||
pub fn as_slice(&self) -> &[u8; 20] {
|
||||
&self.id
|
||||
}
|
||||
|
||||
/// Get a string representation of this id
|
||||
#[must_use]
|
||||
pub fn to_string(&self) -> &str {
|
||||
let eof_pos = self.id.iter().position(|&c| c == 0).unwrap();
|
||||
alloc::str::from_utf8(&self.id[..eof_pos]).unwrap()
|
||||
}
|
||||
|
||||
/// Get an integer representation of this id
|
||||
#[must_use]
|
||||
pub fn to_int(&self) -> i32 {
|
||||
let id: i32 = self.to_string().parse().unwrap();
|
||||
id
|
||||
}
|
||||
}
|
||||
|
||||
/// A [`ShMem`] is an interface to shared maps.
|
||||
/// They are the backbone of [`crate::bolts::llmp`] for inter-process communication.
|
||||
/// All you need for scaling on a new target is to implement this interface, as well as the respective [`ShMemProvider`].
|
||||
pub trait ShMem: Sized + Debug + Clone {
|
||||
/// Get the id of this shared memory mapping
|
||||
fn id(&self) -> ShMemId;
|
||||
@ -139,7 +154,11 @@ pub trait ShMem: Sized + Debug + Clone {
|
||||
}
|
||||
}
|
||||
|
||||
/// A [`ShMemProvider`] provides access to shared maps.
|
||||
/// They are the backbone of [`crate::bolts::llmp`] for inter-process communication.
|
||||
/// All you need for scaling on a new target is to implement this interface, as well as the respective [`ShMem`].
|
||||
pub trait ShMemProvider: Send + Clone + Default + Debug {
|
||||
/// The actual shared map handed out by this [`ShMemProvider`].
|
||||
type Mem: ShMem;
|
||||
|
||||
/// Create a new instance of the provider
|
||||
@ -156,6 +175,7 @@ pub trait ShMemProvider: Send + Clone + Default + Debug {
|
||||
self.from_id_and_size(description.id, description.size)
|
||||
}
|
||||
|
||||
/// Create a new sharedmap reference from an existing `id` and `len`
|
||||
fn clone_ref(&mut self, mapping: &Self::Mem) -> Result<Self::Mem, Error> {
|
||||
self.from_id_and_size(mapping.id(), mapping.len())
|
||||
}
|
||||
@ -171,13 +191,13 @@ pub trait ShMemProvider: Send + Clone + Default + Debug {
|
||||
))
|
||||
}
|
||||
|
||||
/// This method should be called after a fork or after cloning/a thread creation event, allowing the ShMem to
|
||||
/// This method should be called after a fork or after cloning/a thread creation event, allowing the [`ShMem`] to
|
||||
/// reset thread specific info, and potentially reconnect.
|
||||
fn post_fork(&mut self) {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
/// Release the resources associated with the given ShMem
|
||||
/// Release the resources associated with the given [`ShMem`]
|
||||
fn release_map(&mut self, _map: &mut Self::Mem) {
|
||||
// do nothing
|
||||
}
|
||||
@ -282,18 +302,22 @@ where
|
||||
|
||||
/// A Unix sharedmem implementation.
|
||||
///
|
||||
/// On Android, this is partially reused to wrap `Ashmem`,
|
||||
/// Although for an `AshmemShMemProvider using a unix domain socket
|
||||
/// On Android, this is partially reused to wrap [`unix_shmem::ashmem::AshmemShMem`],
|
||||
/// Although for an [`unix_shmem::ashmem::AshmemShMemProvider`] using a unix domain socket
|
||||
/// Is needed on top.
|
||||
#[cfg(all(unix, feature = "std"))]
|
||||
pub mod unix_shmem {
|
||||
|
||||
/// Shared memory provider for Android, allocating and forwarding maps over unix domain sockets.
|
||||
#[cfg(target_os = "android")]
|
||||
pub type UnixShMemProvider = ashmem::AshmemShMemProvider;
|
||||
/// Shared memory for Android
|
||||
#[cfg(target_os = "android")]
|
||||
pub type UnixShMem = ashmem::AshmemShMem;
|
||||
/// Shared memory Provider for Unix
|
||||
#[cfg(not(target_os = "android"))]
|
||||
pub type UnixShMemProvider = default::CommonUnixShMemProvider;
|
||||
/// Shared memory for Unix
|
||||
#[cfg(not(target_os = "android"))]
|
||||
pub type UnixShMem = ashmem::AshmemShMem;
|
||||
|
||||
@ -380,7 +404,7 @@ pub mod unix_shmem {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a UnixShMem of the existing shared memory mapping identified by id
|
||||
/// Get a [`UnixShMem`] of the existing shared memory mapping identified by id
|
||||
pub fn from_id_and_size(id: ShMemId, map_size: usize) -> Result<Self, Error> {
|
||||
unsafe {
|
||||
let map = shmat(id.to_int(), ptr::null(), 0) as *mut c_uchar;
|
||||
@ -415,7 +439,7 @@ pub mod unix_shmem {
|
||||
}
|
||||
}
|
||||
|
||||
/// Drop implementation for UnixShMem, which cleans up the mapping
|
||||
/// [`Drop`] implementation for [`UnixShMem`], which cleans up the mapping.
|
||||
#[cfg(unix)]
|
||||
impl Drop for CommonUnixShMem {
|
||||
fn drop(&mut self) {
|
||||
@ -425,7 +449,7 @@ pub mod unix_shmem {
|
||||
}
|
||||
}
|
||||
|
||||
/// A ShMemProvider which uses shmget/shmat/shmctl to provide shared memory mappings.
|
||||
/// A [`ShMemProvider`] which uses `shmget`/`shmat`/`shmctl` to provide shared memory mappings.
|
||||
#[cfg(unix)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CommonUnixShMemProvider {}
|
||||
@ -439,7 +463,7 @@ pub mod unix_shmem {
|
||||
}
|
||||
}
|
||||
|
||||
/// Implement ShMemProvider for UnixShMemProvider
|
||||
/// Implement [`ShMemProvider`] for [`UnixShMemProvider`].
|
||||
#[cfg(unix)]
|
||||
impl ShMemProvider for CommonUnixShMemProvider {
|
||||
type Mem = CommonUnixShMem;
|
||||
@ -457,6 +481,7 @@ pub mod unix_shmem {
|
||||
}
|
||||
}
|
||||
|
||||
/// Module containing `ashmem` shared memory support, commonly used on Android.
|
||||
#[cfg(all(unix, feature = "std"))]
|
||||
pub mod ashmem {
|
||||
use core::slice;
|
||||
@ -564,7 +589,7 @@ pub mod unix_shmem {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a UnixShMem of the existing shared memory mapping identified by id
|
||||
/// Get a [`crate::bolts::shmem::unix_shmem::UnixShMem`] of the existing [`ShMem`] mapping identified by id.
|
||||
pub fn from_id_and_size(id: ShMemId, map_size: usize) -> Result<Self, Error> {
|
||||
unsafe {
|
||||
let fd: i32 = id.to_string().parse().unwrap();
|
||||
@ -618,7 +643,7 @@ pub mod unix_shmem {
|
||||
}
|
||||
}
|
||||
|
||||
/// Drop implementation for AshmemShMem, which cleans up the mapping
|
||||
/// [`Drop`] implementation for [`AshmemShMem`], which cleans up the mapping.
|
||||
#[cfg(unix)]
|
||||
impl Drop for AshmemShMem {
|
||||
fn drop(&mut self) {
|
||||
@ -639,7 +664,7 @@ pub mod unix_shmem {
|
||||
}
|
||||
}
|
||||
|
||||
/// A ShMemProvider which uses ashmem to provide shared memory mappings.
|
||||
/// A [`ShMemProvider`] which uses ashmem to provide shared memory mappings.
|
||||
#[cfg(unix)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AshmemShMemProvider {}
|
||||
@ -653,7 +678,7 @@ pub mod unix_shmem {
|
||||
}
|
||||
}
|
||||
|
||||
/// Implement ShMemProvider for AshmemShMemProvider
|
||||
/// Implement [`ShMemProvider`] for [`AshmemShMemProvider`], for the Android `ShMem`.
|
||||
#[cfg(unix)]
|
||||
impl ShMemProvider for AshmemShMemProvider {
|
||||
type Mem = AshmemShMem;
|
||||
|
@ -1,4 +1,4 @@
|
||||
//! Compiletime lists used throughout the libafl universe
|
||||
//! Compiletime lists/tuples used throughout the `LibAFL` universe
|
||||
|
||||
pub use tuple_list::{tuple_list, tuple_list_type, TupleList};
|
||||
|
||||
@ -34,10 +34,14 @@ const fn type_eq<T: ?Sized, U: ?Sized>() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
/// Gets the length of the element
|
||||
pub trait HasLen {
|
||||
/// The length as constant `usize`
|
||||
const LEN: usize;
|
||||
|
||||
/// The length
|
||||
fn len(&self) -> usize;
|
||||
/// Returns true, if empty
|
||||
fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
@ -62,26 +66,32 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Finds the `const_name` and `name_id`
|
||||
pub trait HasNameId {
|
||||
/// Gets the `const_name` for this entry
|
||||
fn const_name(&self) -> &'static str;
|
||||
|
||||
/// Gets the `name_id` for this entry
|
||||
fn name_id(&self) -> u64 {
|
||||
xxh3_64(self.const_name().as_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the id and `const_name` for the given index in a tuple
|
||||
pub trait HasNameIdTuple: HasLen {
|
||||
fn get_const_name(&self, index: usize) -> Option<&'static str>;
|
||||
/// Gets the `const_name` for the entry at the given index
|
||||
fn const_name_for(&self, index: usize) -> Option<&'static str>;
|
||||
|
||||
fn get_name_id(&self, index: usize) -> Option<u64>;
|
||||
/// Gets the `name_id` for the entry at the given index
|
||||
fn name_id_for(&self, index: usize) -> Option<u64>;
|
||||
}
|
||||
|
||||
impl HasNameIdTuple for () {
|
||||
fn get_const_name(&self, _index: usize) -> Option<&'static str> {
|
||||
fn const_name_for(&self, _index: usize) -> Option<&'static str> {
|
||||
None
|
||||
}
|
||||
|
||||
fn get_name_id(&self, _index: usize) -> Option<u64> {
|
||||
fn name_id_for(&self, _index: usize) -> Option<u64> {
|
||||
None
|
||||
}
|
||||
}
|
||||
@ -91,25 +101,28 @@ where
|
||||
Head: HasNameId,
|
||||
Tail: HasNameIdTuple,
|
||||
{
|
||||
fn get_const_name(&self, index: usize) -> Option<&'static str> {
|
||||
fn const_name_for(&self, index: usize) -> Option<&'static str> {
|
||||
if index == 0 {
|
||||
Some(self.0.const_name())
|
||||
} else {
|
||||
self.1.get_const_name(index - 1)
|
||||
self.1.const_name_for(index - 1)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_name_id(&self, index: usize) -> Option<u64> {
|
||||
fn name_id_for(&self, index: usize) -> Option<u64> {
|
||||
if index == 0 {
|
||||
Some(self.0.name_id())
|
||||
} else {
|
||||
self.1.get_name_id(index - 1)
|
||||
self.1.name_id_for(index - 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the first element with the given type
|
||||
pub trait MatchFirstType {
|
||||
/// Returns the first element with the given type as borrow, or [`Option::None`]
|
||||
fn match_first_type<T: 'static>(&self) -> Option<&T>;
|
||||
/// Returns the first element with the given type as mutable borrow, or [`Option::None`]
|
||||
fn match_first_type_mut<T: 'static>(&mut self) -> Option<&mut T>;
|
||||
}
|
||||
|
||||
@ -144,8 +157,11 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Match by type
|
||||
pub trait MatchType {
|
||||
/// Match by type and call the passed `f` function with a borrow, if found
|
||||
fn match_type<T: 'static>(&self, f: fn(t: &T));
|
||||
/// Match by type and call the passed `f` function with a mutable borrow, if found
|
||||
fn match_type_mut<T: 'static>(&mut self, f: fn(t: &mut T));
|
||||
}
|
||||
|
||||
@ -182,12 +198,14 @@ pub trait Named {
|
||||
fn name(&self) -> &str;
|
||||
}
|
||||
|
||||
/// A named tuple
|
||||
pub trait NamedTuple: HasLen {
|
||||
fn get_name(&self, index: usize) -> Option<&str>;
|
||||
/// Gets the name of this tuple
|
||||
fn name(&self, index: usize) -> Option<&str>;
|
||||
}
|
||||
|
||||
impl NamedTuple for () {
|
||||
fn get_name(&self, _index: usize) -> Option<&str> {
|
||||
fn name(&self, _index: usize) -> Option<&str> {
|
||||
None
|
||||
}
|
||||
}
|
||||
@ -197,18 +215,23 @@ where
|
||||
Head: Named,
|
||||
Tail: NamedTuple,
|
||||
{
|
||||
fn get_name(&self, index: usize) -> Option<&str> {
|
||||
fn name(&self, index: usize) -> Option<&str> {
|
||||
if index == 0 {
|
||||
Some(self.0.name())
|
||||
} else {
|
||||
self.1.get_name(index - 1)
|
||||
self.1.name(index - 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This operation is unsafe with Rust stable, wait for https://stackoverflow.com/a/60138532/7658998
|
||||
/// Match for a name and return the value
|
||||
///
|
||||
/// # Safety
|
||||
/// This operation is unsafe with Rust stable, wait for [specialization](https://stackoverflow.com/a/60138532/7658998).
|
||||
pub trait MatchName {
|
||||
/// Match for a name and return the borrowed value
|
||||
fn match_name<T>(&self, name: &str) -> Option<&T>;
|
||||
/// Match for a name and return the mut borrowed value
|
||||
fn match_name_mut<T>(&mut self, name: &str) -> Option<&mut T>;
|
||||
}
|
||||
|
||||
@ -243,8 +266,11 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Finds an element of a `type` by the given `name`.
|
||||
pub trait MatchNameAndType {
|
||||
/// Finds an element of a `type` by the given `name`, and returns a borrow, or [`Option::None`].
|
||||
fn match_name_type<T: 'static>(&self, name: &str) -> Option<&T>;
|
||||
/// Finds an element of a `type` by the given `name`, and returns a mut borrow, or [`Option::None`].
|
||||
fn match_name_type_mut<T: 'static>(&mut self, name: &str) -> Option<&mut T>;
|
||||
}
|
||||
|
||||
@ -283,6 +309,8 @@ where
|
||||
|
||||
/// Allows prepending of values to a tuple
|
||||
pub trait Prepend<T>: TupleList {
|
||||
/// The Resulting [`TupleList`], of an [`Prepend::prepend()`] call,
|
||||
/// including the prepended entry.
|
||||
type PreprendResult: TupleList;
|
||||
|
||||
/// Prepend a value to this tuple, returning a new tuple with prepended value.
|
||||
@ -304,6 +332,8 @@ where
|
||||
|
||||
/// Append to a `TupeList`
|
||||
pub trait Append<T>: TupleList {
|
||||
/// The Resulting [`TupleList`], of an [`Append::append()`] call,
|
||||
/// including the appended entry.
|
||||
type AppendResult: TupleList;
|
||||
|
||||
/// Append Value and return the tuple
|
||||
@ -335,6 +365,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterate over a tuple, executing the given `expr` for each element.
|
||||
#[macro_export]
|
||||
macro_rules! tuple_for_each {
|
||||
($fn_name:ident, $trait_name:path, $tuple_name:ident, $body:expr) => {
|
||||
@ -366,6 +397,7 @@ macro_rules! tuple_for_each {
|
||||
};
|
||||
}
|
||||
|
||||
/// Iterate over a tuple, executing the given `expr` for each element, granting mut access.
|
||||
#[macro_export]
|
||||
macro_rules! tuple_for_each_mut {
|
||||
($fn_name:ident, $trait_name:path, $tuple_name:ident, $body:expr) => {
|
||||
|
@ -77,6 +77,9 @@ impl<I> InMemoryCorpus<I>
|
||||
where
|
||||
I: Input,
|
||||
{
|
||||
/// Creates a new [`InMemoryCorpus`], keeping all [`Testcase`]`s` in memory.
|
||||
/// This is the simplest and fastest option, however test progress will be lost on exit or on OOM.
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
entries: vec![],
|
||||
|
@ -16,7 +16,7 @@ use hashbrown::{HashMap, HashSet};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Default probability to skip the non-favored values
|
||||
pub const DEFAULT_SKIP_NOT_FAV_PROB: u64 = 95;
|
||||
pub const DEFAULT_SKIP_NON_FAVORED_PROB: u64 = 95;
|
||||
|
||||
/// A testcase metadata saying if a testcase is favored
|
||||
#[derive(Serialize, Deserialize)]
|
||||
@ -34,6 +34,8 @@ pub struct TopRatedsMetadata {
|
||||
crate::impl_serdeany!(TopRatedsMetadata);
|
||||
|
||||
impl TopRatedsMetadata {
|
||||
/// Creates a new [`struct@TopRatedsMetadata`]
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
map: HashMap::default(),
|
||||
@ -47,11 +49,12 @@ impl Default for TopRatedsMetadata {
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the favor factor of a testcase. Lower is better.
|
||||
/// Compute the favor factor of a [`Testcase`]. Lower is better.
|
||||
pub trait FavFactor<I>
|
||||
where
|
||||
I: Input,
|
||||
{
|
||||
/// Computes the favor factor of a [`Testcase`]. Lower is better.
|
||||
fn compute(testcase: &mut Testcase<I>) -> Result<u64, Error>;
|
||||
}
|
||||
|
||||
@ -74,9 +77,9 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// The Minimizer scheduler employs a genetic algorithm to compute a subset of the
|
||||
/// The [`MinimizerCorpusScheduler`] employs a genetic algorithm to compute a subset of the
|
||||
/// corpus that exercise all the requested features (e.g. all the coverage seen so far)
|
||||
/// prioritizing testcases using FavFactor
|
||||
/// prioritizing [`Testcase`]`s` using [`FavFactor`]
|
||||
pub struct MinimizerCorpusScheduler<C, CS, F, I, M, R, S>
|
||||
where
|
||||
CS: CorpusScheduler<I, S>,
|
||||
@ -87,7 +90,7 @@ where
|
||||
C: Corpus<I>,
|
||||
{
|
||||
base: CS,
|
||||
skip_not_fav_prob: u64,
|
||||
skip_non_favored_prob: u64,
|
||||
phantom: PhantomData<(C, F, I, M, R, S)>,
|
||||
}
|
||||
|
||||
@ -133,7 +136,7 @@ where
|
||||
.borrow()
|
||||
.has_metadata::<IsFavoredMetadata>();
|
||||
has
|
||||
} && state.rand_mut().below(100) < self.skip_not_fav_prob
|
||||
} && state.rand_mut().below(100) < self.skip_non_favored_prob
|
||||
{
|
||||
idx = self.base.next(state)?;
|
||||
}
|
||||
@ -227,28 +230,32 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Creates a new [`MinimizerCorpusScheduler`] that wraps a `base` [`CorpusScheduler`]
|
||||
/// and has a default probability to skip non-faved [`Testcase`]s of [`DEFAULT_SKIP_NON_FAVORED_PROB`].
|
||||
pub fn new(base: CS) -> Self {
|
||||
Self {
|
||||
base,
|
||||
skip_not_fav_prob: DEFAULT_SKIP_NOT_FAV_PROB,
|
||||
skip_non_favored_prob: DEFAULT_SKIP_NON_FAVORED_PROB,
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_skip_prob(base: CS, skip_not_fav_prob: u64) -> Self {
|
||||
/// Creates a new [`MinimizerCorpusScheduler`] that wraps a `base` [`CorpusScheduler`]
|
||||
/// and has a non-default probability to skip non-faved [`Testcase`]s using (`skip_non_favored_prob`).
|
||||
pub fn with_skip_prob(base: CS, skip_non_favored_prob: u64) -> Self {
|
||||
Self {
|
||||
base,
|
||||
skip_not_fav_prob,
|
||||
skip_non_favored_prob,
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A MinimizerCorpusScheduler with LenTimeMulFavFactor to prioritize quick and small testcases
|
||||
/// A [`MinimizerCorpusScheduler`] with [`LenTimeMulFavFactor`] to prioritize quick and small [`Testcase`]`s`.
|
||||
pub type LenTimeMinimizerCorpusScheduler<C, CS, I, M, R, S> =
|
||||
MinimizerCorpusScheduler<C, CS, LenTimeMulFavFactor<I>, I, M, R, S>;
|
||||
|
||||
/// A MinimizerCorpusScheduler with LenTimeMulFavFactor to prioritize quick and small testcases
|
||||
/// that exercise all the entries registered in the MapIndexesMetadata
|
||||
/// A [`MinimizerCorpusScheduler`] with [`LenTimeMulFavFactor`] to prioritize quick and small [`Testcase`]`s`
|
||||
/// that exercise all the entries registered in the [`MapIndexesMetadata`].
|
||||
pub type IndexesLenTimeMinimizerCorpusScheduler<C, CS, I, R, S> =
|
||||
MinimizerCorpusScheduler<C, CS, LenTimeMulFavFactor<I>, I, MapIndexesMetadata, R, S>;
|
||||
|
@ -136,7 +136,8 @@ where
|
||||
I: Input,
|
||||
R: Rand,
|
||||
{
|
||||
/// Create a new RandCorpusScheduler that just schedules randomly.
|
||||
/// Create a new [`RandCorpusScheduler`] that just schedules randomly.
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -156,4 +157,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// A [`StdCorpusScheduler`] uses the default scheduler in `LibAFL` to schedule [`Testcase`]s
|
||||
/// The current `Std` is a [`RandCorpusScheduler`], although this may change in the future, if another [`CorpusScheduler`] delivers better results.
|
||||
pub type StdCorpusScheduler<C, I, R, S> = RandCorpusScheduler<C, I, R, S>;
|
||||
|
@ -115,8 +115,8 @@ impl<I> OnDiskCorpus<I>
|
||||
where
|
||||
I: Input,
|
||||
{
|
||||
/// Creates the OnDiskCorpus.
|
||||
/// Will error, if `std::fs::create_dir_all` failed for `dir_path`.
|
||||
/// Creates the [`OnDiskCorpus`].
|
||||
/// Will error, if [`std::fs::create_dir_all()`] failed for `dir_path`.
|
||||
pub fn new(dir_path: PathBuf) -> Result<Self, Error> {
|
||||
fs::create_dir_all(&dir_path)?;
|
||||
Ok(Self {
|
||||
@ -127,8 +127,8 @@ where
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates the OnDiskCorpus specifying the type of metatada to be saved to disk.
|
||||
/// Will error, if `std::fs::create_dir_all` failed for `dir_path`.
|
||||
/// Creates the [`OnDiskCorpus`] specifying the type of `Metadata` to be saved to disk.
|
||||
/// Will error, if [`std::fs::create_dir_all()`] failed for `dir_path`.
|
||||
pub fn new_save_meta(
|
||||
dir_path: PathBuf,
|
||||
meta_format: Option<OnDiskMetadataFormat>,
|
||||
|
@ -54,6 +54,7 @@ where
|
||||
I: Input,
|
||||
{
|
||||
/// Creates a new `QueueCorpusScheduler`
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -93,7 +94,7 @@ mod tests {
|
||||
let mut q =
|
||||
OnDiskCorpus::<BytesInput>::new(PathBuf::from("target/.test/fancy/path")).unwrap();
|
||||
let t = Testcase::with_filename(
|
||||
BytesInput::new(vec![0 as u8; 4]),
|
||||
BytesInput::new(vec![0_u8; 4]),
|
||||
"target/.test/fancy/path/fancyfile".into(),
|
||||
);
|
||||
q.add(t).unwrap();
|
||||
|
@ -35,13 +35,13 @@ impl<I> HasMetadata for Testcase<I>
|
||||
where
|
||||
I: Input,
|
||||
{
|
||||
/// Get all the metadata into an HashMap
|
||||
/// Get all the metadata into an [`hashbrown::HashMap`]
|
||||
#[inline]
|
||||
fn metadata(&self) -> &SerdeAnyMap {
|
||||
&self.metadata
|
||||
}
|
||||
|
||||
/// Get all the metadata into an HashMap (mutable)
|
||||
/// Get all the metadata into an [`hashbrown::HashMap`] (mutable)
|
||||
#[inline]
|
||||
fn metadata_mut(&mut self) -> &mut SerdeAnyMap {
|
||||
&mut self.metadata
|
||||
@ -143,7 +143,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new Testcase instace given an input and a filename
|
||||
/// Create a new Testcase instance given an [`Input`] and a `filename`
|
||||
#[inline]
|
||||
pub fn with_filename(input: I, filename: String) -> Self {
|
||||
Testcase {
|
||||
@ -155,6 +155,8 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new, empty, [`Testcase`].
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn default() -> Self {
|
||||
Testcase {
|
||||
|
@ -54,6 +54,8 @@ const LLMP_TAG_EVENT_TO_BOTH: llmp::Tag = 0x2B0741;
|
||||
const _LLMP_TAG_RESTART: llmp::Tag = 0x8357A87;
|
||||
const _LLMP_TAG_NO_RESTART: llmp::Tag = 0x57A7EE71;
|
||||
|
||||
/// An [`EventManager`] that forwards all events to other attached fuzzers on shared maps or via tcp,
|
||||
/// using low-level message passing, [`crate::bolts::llmp`].
|
||||
#[derive(Debug)]
|
||||
pub struct LlmpEventManager<I, S, SP, ST>
|
||||
where
|
||||
@ -109,7 +111,7 @@ where
|
||||
})
|
||||
}
|
||||
|
||||
/// If a client respawns, it may reuse the existing connection, previously stored by LlmpClient::to_env
|
||||
/// If a client respawns, it may reuse the existing connection, previously stored by [`LlmpClient::to_env()`].
|
||||
#[cfg(feature = "std")]
|
||||
pub fn existing_client_from_env(shmem_provider: SP, env_name: &str) -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
@ -149,7 +151,7 @@ where
|
||||
})
|
||||
}
|
||||
|
||||
/// Write the config for a client eventmgr to env vars, a new client can reattach using existing_client_from_env
|
||||
/// Write the config for a client [`EventManager`] to env vars, a new client can reattach using [`LlmpEventManager::existing_client_from_env()`].
|
||||
#[cfg(feature = "std")]
|
||||
pub fn to_env(&self, env_name: &str) {
|
||||
match &self.llmp {
|
||||
@ -523,6 +525,9 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets up a restarting fuzzer, using the [`StdShMemProvider`], and standard features.
|
||||
/// The restarting mgr is a combination of restarter and runner, that can be used on systems with and without `fork` support.
|
||||
/// The restarter will spawn a new process each time the child crashes or timeouts.
|
||||
#[cfg(feature = "std")]
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub fn setup_restarting_mgr_std<I, S, ST>(
|
||||
@ -547,7 +552,7 @@ where
|
||||
setup_restarting_mgr(StdShMemProvider::new()?, stats, broker_port)
|
||||
}
|
||||
|
||||
/// A restarting state is a combination of restarter and runner, that can be used on systems without `fork`.
|
||||
/// A restarting state is a combination of restarter and runner, that can be used on systems with and without `fork` support.
|
||||
/// The restarter will start a new process each time the child crashes or timeouts.
|
||||
#[cfg(feature = "std")]
|
||||
#[allow(
|
||||
|
@ -91,10 +91,11 @@ where
|
||||
},
|
||||
/// New stats.
|
||||
UpdateStats {
|
||||
/// The time of generation of the event
|
||||
/// The time of generation of the [`Event`]
|
||||
time: Duration,
|
||||
/// The executions of this client
|
||||
executions: usize,
|
||||
/// [`PhantomData`]
|
||||
phantom: PhantomData<I>,
|
||||
},
|
||||
/// A new objective was found
|
||||
@ -150,7 +151,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// EventManager is the main communications hub.
|
||||
/// [`EventManager`] is the main communications hub.
|
||||
/// For the "normal" multi-processed mode, you may want to look into `RestartingEventManager`
|
||||
pub trait EventManager<I, S>
|
||||
where
|
||||
|
@ -64,6 +64,7 @@ where
|
||||
I: Input,
|
||||
ST: Stats, //TODO CE: CustomEvent,
|
||||
{
|
||||
/// Creates a new [`SimpleEventManager`].
|
||||
pub fn new(stats: ST) -> Self {
|
||||
Self {
|
||||
stats,
|
||||
|
@ -1,4 +1,4 @@
|
||||
//! The InProcess Executor is a libfuzzer-like executor, that will simply call a function.
|
||||
//! The [`InProcessExecutor`] is a libfuzzer-like executor, that will simply call a function.
|
||||
//! It should usually be paired with extra error-handling, such as a restarting event manager, to be effective.
|
||||
|
||||
use core::{
|
||||
@ -706,7 +706,7 @@ mod tests {
|
||||
observers: tuple_list!(),
|
||||
phantom: PhantomData,
|
||||
};
|
||||
let mut input = NopInput {};
|
||||
assert!(in_process_executor.run_target(&mut input).is_ok());
|
||||
let input = NopInput {};
|
||||
assert!(in_process_executor.run_target(&input).is_ok());
|
||||
}
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ use crate::{
|
||||
|
||||
use alloc::boxed::Box;
|
||||
|
||||
/// A `CustomExitKind` for exits that do not fit the default `ExitKind`
|
||||
/// A `CustomExitKind` for exits that do not fit to one of the default `ExitKind`.
|
||||
pub trait CustomExitKind: core::fmt::Debug + SerdeAny + 'static {}
|
||||
|
||||
/// How an execution finished.
|
||||
@ -96,17 +96,18 @@ where
|
||||
fn observers_mut(&mut self) -> &mut OT;
|
||||
}
|
||||
|
||||
/// Execute the exec hooks of the observers if they all implement HasExecHooks
|
||||
/// Execute the exec hooks of the observers if they all implement [`HasExecHooks`].
|
||||
pub trait HasObserversHooks<EM, I, OT, S>: HasObservers<OT>
|
||||
where
|
||||
OT: ObserversTuple + HasExecHooksTuple<EM, I, S>,
|
||||
{
|
||||
/// Run the pre exec hook for all [`crate::observers::Observer`]`s` linked to this [`Executor`].
|
||||
#[inline]
|
||||
fn pre_exec_observers(&mut self, state: &mut S, mgr: &mut EM, input: &I) -> Result<(), Error> {
|
||||
self.observers_mut().pre_exec_all(state, mgr, input)
|
||||
}
|
||||
|
||||
/// Run the post exec hook for all the observes linked to this executor
|
||||
/// Run the post exec hook for all the [`crate::observers::Observer`]`s` linked to this [`Executor`].
|
||||
#[inline]
|
||||
fn post_exec_observers(&mut self, state: &mut S, mgr: &mut EM, input: &I) -> Result<(), Error> {
|
||||
self.observers_mut().post_exec_all(state, mgr, input)
|
||||
|
@ -20,7 +20,9 @@ use crate::{
|
||||
Error,
|
||||
};
|
||||
|
||||
/// A [`MapFeedback`] that strives to maximize the map contents.
|
||||
pub type MaxMapFeedback<O, T> = MapFeedback<O, MaxReducer, T>;
|
||||
/// A [`MapFeedback`] that strives to minimize the map contents.
|
||||
pub type MinMapFeedback<O, T> = MapFeedback<O, MinReducer, T>;
|
||||
|
||||
/// A Reducer function is used to aggregate values for the novelty search
|
||||
@ -28,9 +30,11 @@ pub trait Reducer<T>: Serialize + serde::de::DeserializeOwned + 'static
|
||||
where
|
||||
T: Integer + Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
|
||||
{
|
||||
/// Reduce two values to one value, with the current [`Reducer`].
|
||||
fn reduce(first: T, second: T) -> T;
|
||||
}
|
||||
|
||||
/// A [`MinReducer`] reduces [`Integer`] values and returns their maximum.
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct MaxReducer {}
|
||||
|
||||
@ -48,6 +52,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// A [`MinReducer`] reduces [`Integer`] values and returns their minimum.
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct MinReducer {}
|
||||
|
||||
@ -68,6 +73,7 @@ where
|
||||
/// A testcase metadata holding a list of indexes of a map
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct MapIndexesMetadata {
|
||||
/// The list of indexes.
|
||||
pub list: Vec<usize>,
|
||||
}
|
||||
|
||||
@ -81,6 +87,8 @@ impl AsSlice<usize> for MapIndexesMetadata {
|
||||
}
|
||||
|
||||
impl MapIndexesMetadata {
|
||||
/// Creates a new [`struct@MapIndexesMetadata`].
|
||||
#[must_use]
|
||||
pub fn new(list: Vec<usize>) -> Self {
|
||||
Self { list }
|
||||
}
|
||||
@ -89,6 +97,7 @@ impl MapIndexesMetadata {
|
||||
/// A testcase metadata holding a list of indexes of a map
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct MapNoveltiesMetadata {
|
||||
/// A `list` of novelties.
|
||||
pub list: Vec<usize>,
|
||||
}
|
||||
|
||||
@ -96,11 +105,14 @@ crate::impl_serdeany!(MapNoveltiesMetadata);
|
||||
|
||||
impl AsSlice<usize> for MapNoveltiesMetadata {
|
||||
/// Convert to a slice
|
||||
#[must_use]
|
||||
fn as_slice(&self) -> &[usize] {
|
||||
self.list.as_slice()
|
||||
}
|
||||
}
|
||||
impl MapNoveltiesMetadata {
|
||||
/// Creates a new [`struct@MapNoveltiesMetadata`]
|
||||
#[must_use]
|
||||
pub fn new(list: Vec<usize>) -> Self {
|
||||
Self { list }
|
||||
}
|
||||
@ -249,6 +261,7 @@ where
|
||||
O: MapObserver<T>,
|
||||
{
|
||||
/// Create new `MapFeedback`
|
||||
#[must_use]
|
||||
pub fn new(name: &'static str, map_size: usize) -> Self {
|
||||
Self {
|
||||
history_map: vec![T::default(); map_size],
|
||||
@ -271,7 +284,8 @@ where
|
||||
}
|
||||
|
||||
/// Create new `MapFeedback` specifying if it must track indexes of novelties
|
||||
pub fn new_track(
|
||||
#[must_use]
|
||||
pub fn new_tracking(
|
||||
name: &'static str,
|
||||
map_size: usize,
|
||||
track_indexes: bool,
|
||||
@ -287,7 +301,7 @@ where
|
||||
}
|
||||
|
||||
/// Create new `MapFeedback` for the observer type if it must track indexes of novelties
|
||||
pub fn new_with_observer_track(
|
||||
pub fn new_tracking_with_observer(
|
||||
map_observer: &O,
|
||||
track_indexes: bool,
|
||||
track_novelties: bool,
|
||||
@ -310,6 +324,7 @@ where
|
||||
{
|
||||
/// Create new `MapFeedback` using a map observer, and a map.
|
||||
/// The map can be shared.
|
||||
#[must_use]
|
||||
pub fn with_history_map(name: &'static str, history_map: Vec<T>) -> Self {
|
||||
Self {
|
||||
history_map,
|
||||
@ -321,6 +336,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// A [`ReachabilityFeedback`] reports if a target has been reached.
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct ReachabilityFeedback<O> {
|
||||
name: String,
|
||||
@ -332,6 +348,8 @@ impl<O> ReachabilityFeedback<O>
|
||||
where
|
||||
O: MapObserver<usize>,
|
||||
{
|
||||
/// Creates a new [`ReachabilityFeedback`] for a [`MapObserver`].
|
||||
#[must_use]
|
||||
pub fn new_with_observer(map_observer: &O) -> Self {
|
||||
Self {
|
||||
name: map_observer.name().to_string(),
|
||||
@ -339,6 +357,9 @@ where
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new [`ReachabilityFeedback`] for a [`MapObserver`] with the given `name`.
|
||||
#[must_use]
|
||||
pub fn new(name: &'static str) -> Self {
|
||||
Self {
|
||||
name: name.to_string(),
|
||||
|
@ -48,7 +48,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Compose feedbacks with an AND operation
|
||||
/// Compose [`Feedback`]`s` with an `AND` operation
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde(bound = "I: serde::de::DeserializeOwned")]
|
||||
pub struct AndFeedback<A, B, I>
|
||||
@ -57,7 +57,9 @@ where
|
||||
B: Feedback<I>,
|
||||
I: Input,
|
||||
{
|
||||
/// The first [`Feedback`] to `AND`.
|
||||
pub first: A,
|
||||
/// The second [`Feedback`] to `AND`.
|
||||
pub second: B,
|
||||
phantom: PhantomData<I>,
|
||||
}
|
||||
@ -114,6 +116,7 @@ where
|
||||
B: Feedback<I>,
|
||||
I: Input,
|
||||
{
|
||||
/// Creates a new [`AndFeedback`], resulting in the `AND` of two feedbacks.
|
||||
pub fn new(first: A, second: B) -> Self {
|
||||
Self {
|
||||
first,
|
||||
@ -132,7 +135,9 @@ where
|
||||
B: Feedback<I>,
|
||||
I: Input,
|
||||
{
|
||||
/// The first [`Feedback`]
|
||||
pub first: A,
|
||||
/// The second [`Feedback`], `OR`ed with the first.
|
||||
pub second: B,
|
||||
phantom: PhantomData<I>,
|
||||
}
|
||||
@ -189,6 +194,7 @@ where
|
||||
B: Feedback<I>,
|
||||
I: Input,
|
||||
{
|
||||
/// Creates a new [`OrFeedback`] for two feedbacks.
|
||||
pub fn new(first: A, second: B) -> Self {
|
||||
Self {
|
||||
first,
|
||||
@ -206,6 +212,7 @@ where
|
||||
A: Feedback<I>,
|
||||
I: Input,
|
||||
{
|
||||
/// The feedback to invert
|
||||
pub first: A,
|
||||
phantom: PhantomData<I>,
|
||||
}
|
||||
@ -255,6 +262,7 @@ where
|
||||
A: Feedback<I>,
|
||||
I: Input,
|
||||
{
|
||||
/// Creates a new [`NotFeedback`].
|
||||
pub fn new(first: A) -> Self {
|
||||
Self {
|
||||
first,
|
||||
@ -318,7 +326,7 @@ impl Named for () {
|
||||
}
|
||||
}
|
||||
|
||||
/// Is a crash feedback
|
||||
/// A [`CrashFeedback`] reports as interesting if the target crashed.
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct CrashFeedback {}
|
||||
|
||||
@ -351,6 +359,8 @@ impl Named for CrashFeedback {
|
||||
}
|
||||
|
||||
impl CrashFeedback {
|
||||
/// Creates a new [`CrashFeedback`]
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
@ -362,6 +372,7 @@ impl Default for CrashFeedback {
|
||||
}
|
||||
}
|
||||
|
||||
/// A [`TimeoutFeedback`] reduces the timeout value of a run.
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct TimeoutFeedback {}
|
||||
|
||||
@ -394,6 +405,8 @@ impl Named for TimeoutFeedback {
|
||||
}
|
||||
|
||||
impl TimeoutFeedback {
|
||||
/// Returns a new [`TimeoutFeedback`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
@ -406,7 +419,8 @@ impl Default for TimeoutFeedback {
|
||||
}
|
||||
|
||||
/// Nop feedback that annotates execution time in the new testcase, if any
|
||||
/// For this Feedback, the testcase is never interesting (use with an OR)
|
||||
/// for this Feedback, the testcase is never interesting (use with an OR)
|
||||
/// It decides, if the given [`TimeObserver`] value of a run is interesting.
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct TimeFeedback {
|
||||
exec_time: Option<Duration>,
|
||||
@ -456,6 +470,8 @@ impl Named for TimeFeedback {
|
||||
}
|
||||
|
||||
impl TimeFeedback {
|
||||
/// Creates a new [`TimeFeedback`], deciding if the value of a [`TimeObserver`] with the given `name` of a run is interesting.
|
||||
#[must_use]
|
||||
pub fn new(name: &'static str) -> Self {
|
||||
Self {
|
||||
exec_time: None,
|
||||
@ -463,6 +479,8 @@ impl TimeFeedback {
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new [`TimeFeedback`], deciding if the given [`TimeObserver`] value of a run is interesting.
|
||||
#[must_use]
|
||||
pub fn new_with_observer(observer: &TimeObserver) -> Self {
|
||||
Self {
|
||||
exec_time: None,
|
||||
|
@ -27,8 +27,10 @@ where
|
||||
I: Input,
|
||||
Self: Sized,
|
||||
{
|
||||
/// The stages
|
||||
fn stages(&self) -> &ST;
|
||||
|
||||
/// The stages (mut)
|
||||
fn stages_mut(&mut self) -> &mut ST;
|
||||
}
|
||||
|
||||
@ -38,8 +40,10 @@ where
|
||||
CS: CorpusScheduler<I, S>,
|
||||
I: Input,
|
||||
{
|
||||
/// The scheduler
|
||||
fn scheduler(&self) -> &CS;
|
||||
|
||||
/// The scheduler (mut)
|
||||
fn scheduler_mut(&mut self) -> &mut CS;
|
||||
}
|
||||
|
||||
|
@ -59,6 +59,8 @@ impl<R> RandBytesGenerator<R>
|
||||
where
|
||||
R: Rand,
|
||||
{
|
||||
/// Returns a new [`RandBytesGenerator`], generating up to `max_size` random bytes.
|
||||
#[must_use]
|
||||
pub fn new(max_size: usize) -> Self {
|
||||
Self {
|
||||
max_size,
|
||||
@ -101,6 +103,8 @@ impl<R> RandPrintablesGenerator<R>
|
||||
where
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a new [`RandPrintablesGenerator`], generating up to `max_size` random printable characters.
|
||||
#[must_use]
|
||||
pub fn new(max_size: usize) -> Self {
|
||||
Self {
|
||||
max_size,
|
||||
|
@ -97,6 +97,7 @@ impl From<&[u8]> for BytesInput {
|
||||
|
||||
impl BytesInput {
|
||||
/// Creates a new bytes input using the given bytes
|
||||
#[must_use]
|
||||
pub fn new(bytes: Vec<u8>) -> Self {
|
||||
Self { bytes }
|
||||
}
|
||||
|
@ -86,6 +86,7 @@ pub trait HasLen {
|
||||
/// The length
|
||||
fn len(&self) -> usize;
|
||||
|
||||
/// Returns `true` if it has no elements.
|
||||
fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*!
|
||||
Welcome to libAFL
|
||||
Welcome to `LibAFL`
|
||||
*/
|
||||
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
@ -173,11 +173,12 @@ mod tests {
|
||||
use crate::events::SimpleEventManager;
|
||||
|
||||
#[test]
|
||||
#[allow(clippy::similar_names)]
|
||||
fn test_fuzzer() {
|
||||
let rand = StdRand::with_seed(0);
|
||||
|
||||
let mut corpus = InMemoryCorpus::<BytesInput>::new();
|
||||
let testcase = Testcase::new(vec![0; 4]).into();
|
||||
let testcase = Testcase::new(vec![0; 4]);
|
||||
corpus.add(testcase).unwrap();
|
||||
|
||||
let mut state = State::new(
|
||||
@ -211,7 +212,7 @@ mod tests {
|
||||
for i in 0..1000 {
|
||||
fuzzer
|
||||
.fuzz_one(&mut state, &mut executor, &mut event_manager, &scheduler)
|
||||
.expect(&format!("Error in iter {}", i));
|
||||
.unwrap_or_else(|_| panic!("Error in iter {}", i));
|
||||
}
|
||||
|
||||
let state_serialized = postcard::to_allocvec(&state).unwrap();
|
||||
|
@ -21,7 +21,9 @@ use crate::{
|
||||
/// will not be executed with the returned input.
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
pub enum MutationResult {
|
||||
/// The [`Mutator`] mutated this `Input`.
|
||||
Mutated,
|
||||
/// The [`Mutator`] did not mutate this `Input`. It was `Skipped`.
|
||||
Skipped,
|
||||
}
|
||||
|
||||
@ -50,10 +52,12 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// A `Tuple` of `Mutators` that can execute multiple `Mutators` in a row.
|
||||
pub trait MutatorsTuple<I, S>: HasLen
|
||||
where
|
||||
I: Input,
|
||||
{
|
||||
/// Runs the `mutate` function on all `Mutators` in this `Tuple`.
|
||||
fn mutate_all(
|
||||
&mut self,
|
||||
state: &mut S,
|
||||
@ -61,6 +65,7 @@ where
|
||||
stage_idx: i32,
|
||||
) -> Result<MutationResult, Error>;
|
||||
|
||||
/// Runs the `post_exec` function on all `Mutators` in this `Tuple`.
|
||||
fn post_exec_all(
|
||||
&mut self,
|
||||
state: &mut S,
|
||||
@ -68,6 +73,7 @@ where
|
||||
corpus_idx: Option<usize>,
|
||||
) -> Result<(), Error>;
|
||||
|
||||
/// Gets the [`Mutator`] at the given index and runs the `mutate` function on it.
|
||||
fn get_and_mutate(
|
||||
&mut self,
|
||||
index: usize,
|
||||
@ -76,6 +82,7 @@ where
|
||||
stage_idx: i32,
|
||||
) -> Result<MutationResult, Error>;
|
||||
|
||||
/// Gets the [`Mutator`] at the given index and runs the `post_exec` function on it.
|
||||
fn get_and_post_exec(
|
||||
&mut self,
|
||||
index: usize,
|
||||
|
@ -42,7 +42,7 @@ pub fn buffer_copy(dst: &mut [u8], src: &[u8], from: usize, to: usize, len: usiz
|
||||
}
|
||||
}
|
||||
|
||||
/// A simple buffer_set.
|
||||
/// A simple way to set buffer contents.
|
||||
/// The compiler does the heavy lifting.
|
||||
/// see <https://stackoverflow.com/a/51732799/1345238/>
|
||||
#[inline]
|
||||
@ -142,6 +142,8 @@ where
|
||||
S: HasRand<R>,
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a new [`BitFlipMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -202,6 +204,8 @@ where
|
||||
S: HasRand<R>,
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a new [`ByteFlipMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -263,6 +267,8 @@ where
|
||||
S: HasRand<R>,
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a new [`ByteIncMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -324,6 +330,8 @@ where
|
||||
S: HasRand<R>,
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a a new [`ByteDecMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -384,6 +392,8 @@ where
|
||||
S: HasRand<R>,
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a new [`ByteNegMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -444,6 +454,8 @@ where
|
||||
S: HasRand<R>,
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a new [`ByteRandMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -509,6 +521,8 @@ where
|
||||
S: HasRand<R>,
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a new [`ByteAddMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -544,7 +558,7 @@ where
|
||||
} else {
|
||||
let idx = state.rand_mut().below(input.bytes().len() as u64 - 1) as usize;
|
||||
unsafe {
|
||||
// Moar speed, no bound check
|
||||
// Moar speed, no bounds checks
|
||||
let ptr = input.bytes_mut().get_unchecked_mut(idx) as *mut _ as *mut u16;
|
||||
let num = 1 + state.rand_mut().below(ARITH_MAX) as u16;
|
||||
match state.rand_mut().below(4) {
|
||||
@ -576,6 +590,8 @@ where
|
||||
S: HasRand<R>,
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a new [`WordAddMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -643,6 +659,8 @@ where
|
||||
S: HasRand<R>,
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a new [`DwordAddMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -678,7 +696,7 @@ where
|
||||
} else {
|
||||
let idx = state.rand_mut().below(input.bytes().len() as u64 - 7) as usize;
|
||||
unsafe {
|
||||
// Moar speed, no bound check
|
||||
// Moar speed, no bounds checks
|
||||
let ptr = input.bytes_mut().get_unchecked_mut(idx) as *mut _ as *mut u64;
|
||||
let num = 1 + state.rand_mut().below(ARITH_MAX) as u64;
|
||||
match state.rand_mut().below(4) {
|
||||
@ -710,6 +728,8 @@ where
|
||||
S: HasRand<R>,
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a new [`QwordAddMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -773,6 +793,8 @@ where
|
||||
S: HasRand<R>,
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a new [`ByteInterestingMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -811,7 +833,7 @@ where
|
||||
let val =
|
||||
INTERESTING_16[state.rand_mut().below(INTERESTING_8.len() as u64) as usize] as u16;
|
||||
unsafe {
|
||||
// Moar speed, no bound check
|
||||
// Moar speed, no bounds checks
|
||||
let ptr = input.bytes_mut().get_unchecked_mut(idx) as *mut _ as *mut u16;
|
||||
if state.rand_mut().below(2) == 0 {
|
||||
*ptr = val;
|
||||
@ -841,6 +863,8 @@ where
|
||||
S: HasRand<R>,
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a new [`WordInterestingMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -879,7 +903,7 @@ where
|
||||
let val =
|
||||
INTERESTING_32[state.rand_mut().below(INTERESTING_8.len() as u64) as usize] as u32;
|
||||
unsafe {
|
||||
// Moar speed, no bound check
|
||||
// Moar speed, no bounds checks
|
||||
let ptr = input.bytes_mut().get_unchecked_mut(idx) as *mut _ as *mut u32;
|
||||
if state.rand_mut().below(2) == 0 {
|
||||
*ptr = val;
|
||||
@ -909,6 +933,8 @@ where
|
||||
S: HasRand<R>,
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a new [`DwordInterestingMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -969,6 +995,8 @@ where
|
||||
S: HasRand<R>,
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a new [`BytesDeleteMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -1036,6 +1064,8 @@ where
|
||||
S: HasRand<R> + HasMaxSize,
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a new [`BytesExpandMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -1109,6 +1139,8 @@ where
|
||||
S: HasRand<R> + HasMaxSize,
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a new [`BytesInsertMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -1179,6 +1211,8 @@ where
|
||||
S: HasRand<R> + HasMaxSize,
|
||||
R: Rand,
|
||||
{
|
||||
/// Create a new [`BytesRandInsertMutator`]
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -1241,6 +1275,8 @@ where
|
||||
S: HasRand<R>,
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a new [`BytesSetMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -1303,6 +1339,8 @@ where
|
||||
S: HasRand<R>,
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a new [`BytesRandSetMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -1365,6 +1403,8 @@ where
|
||||
S: HasRand<R>,
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a new [`BytesCopyMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -1429,6 +1469,8 @@ where
|
||||
S: HasRand<R>,
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a new [`BytesSwapMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -1526,6 +1568,8 @@ where
|
||||
R: Rand,
|
||||
S: HasRand<R> + HasCorpus<C, I> + HasMaxSize,
|
||||
{
|
||||
/// Creates a new [`CrossoverInsertMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -1612,6 +1656,8 @@ where
|
||||
R: Rand,
|
||||
S: HasRand<R> + HasCorpus<C, I>,
|
||||
{
|
||||
/// Creates a new [`CrossoverReplaceMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -1719,6 +1765,8 @@ where
|
||||
R: Rand,
|
||||
S: HasRand<R> + HasCorpus<C, I>,
|
||||
{
|
||||
/// Creates a new [`SpliceMutator`].
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -1846,7 +1894,7 @@ mod tests {
|
||||
for _ in 0..2 {
|
||||
let mut new_testcases = vec![];
|
||||
for idx in 0..(mutations.len()) {
|
||||
for input in inputs.iter() {
|
||||
for input in &inputs {
|
||||
let mut mutant = input.clone();
|
||||
match mutations
|
||||
.get_and_mutate(idx, &mut state, &mut mutant, 0)
|
||||
|
@ -21,25 +21,30 @@ use crate::{
|
||||
pub use crate::mutators::mutations::*;
|
||||
pub use crate::mutators::token_mutations::*;
|
||||
|
||||
/// The metadata placed in a [`crate::corpus::Testcase`] by a [`LoggerScheduledMutator`].
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct MutationsMetadata {
|
||||
pub struct LogMutationMetadata {
|
||||
/// A list of logs
|
||||
pub list: Vec<String>,
|
||||
}
|
||||
|
||||
crate::impl_serdeany!(MutationsMetadata);
|
||||
crate::impl_serdeany!(LogMutationMetadata);
|
||||
|
||||
impl AsSlice<String> for MutationsMetadata {
|
||||
impl AsSlice<String> for LogMutationMetadata {
|
||||
fn as_slice(&self) -> &[String] {
|
||||
self.list.as_slice()
|
||||
}
|
||||
}
|
||||
|
||||
impl MutationsMetadata {
|
||||
impl LogMutationMetadata {
|
||||
/// Creates new [`struct@LogMutationMetadata`].
|
||||
#[must_use]
|
||||
pub fn new(list: Vec<String>) -> Self {
|
||||
Self { list }
|
||||
}
|
||||
}
|
||||
|
||||
/// A [`Mutator`] that composes multiple mutations into one.
|
||||
pub trait ComposedByMutations<I, MT, S>
|
||||
where
|
||||
I: Input,
|
||||
@ -48,10 +53,11 @@ where
|
||||
/// Get the mutations
|
||||
fn mutations(&self) -> &MT;
|
||||
|
||||
// Get the mutations (mut)
|
||||
/// Get the mutations (mut)
|
||||
fn mutations_mut(&mut self) -> &mut MT;
|
||||
}
|
||||
|
||||
/// A [`Mutator`] scheduling multiple [`Mutator`]s for an input.
|
||||
pub trait ScheduledMutator<I, MT, S>: ComposedByMutations<I, MT, S> + Mutator<I, S>
|
||||
where
|
||||
I: Input,
|
||||
@ -86,6 +92,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// A [`Mutator`] that schedules one of the embedded mutations on each call.
|
||||
pub struct StdScheduledMutator<I, MT, R, S>
|
||||
where
|
||||
I: Input,
|
||||
@ -178,7 +185,7 @@ where
|
||||
R: Rand,
|
||||
S: HasRand<R>,
|
||||
{
|
||||
/// Create a new StdScheduledMutator instance specifying mutations
|
||||
/// Create a new [`StdScheduledMutator`] instance specifying mutations
|
||||
pub fn new(mutations: MT) -> Self {
|
||||
StdScheduledMutator {
|
||||
mutations,
|
||||
@ -188,6 +195,7 @@ where
|
||||
}
|
||||
|
||||
/// Get the mutations that compose the Havoc mutator
|
||||
#[must_use]
|
||||
pub fn havoc_mutations<C, I, R, S>() -> impl MutatorsTuple<I, S>
|
||||
where
|
||||
I: Input + HasBytesVec,
|
||||
@ -227,7 +235,7 @@ where
|
||||
)
|
||||
}
|
||||
|
||||
//wraps around StdScheduledMutator
|
||||
/// A logging [`Mutator`] that wraps around a [`StdScheduledMutator`].
|
||||
pub struct LoggerScheduledMutator<C, I, MT, R, S, SM>
|
||||
where
|
||||
C: Corpus<I>,
|
||||
@ -289,10 +297,10 @@ where
|
||||
let mut testcase = (*state.corpus_mut().get(idx)?).borrow_mut();
|
||||
let mut log = Vec::<String>::new();
|
||||
while let Some(idx) = self.mutation_log.pop() {
|
||||
let name = String::from(self.scheduled.mutations().get_name(idx).unwrap()); // TODO maybe return an Error on None
|
||||
let name = String::from(self.scheduled.mutations().name(idx).unwrap()); // TODO maybe return an Error on None
|
||||
log.push(name)
|
||||
}
|
||||
let meta = MutationsMetadata::new(log);
|
||||
let meta = LogMutationMetadata::new(log);
|
||||
testcase.add_metadata(meta);
|
||||
};
|
||||
// Always reset the log for each run
|
||||
@ -376,7 +384,7 @@ where
|
||||
S: HasRand<R> + HasCorpus<C, I>,
|
||||
SM: ScheduledMutator<I, MT, S>,
|
||||
{
|
||||
/// Create a new StdScheduledMutator instance without mutations and corpus
|
||||
/// Create a new [`StdScheduledMutator`] instance without mutations and corpus
|
||||
pub fn new(scheduled: SM) -> Self {
|
||||
Self {
|
||||
scheduled,
|
||||
@ -397,20 +405,16 @@ mod tests {
|
||||
Mutator,
|
||||
},
|
||||
state::State,
|
||||
utils::{Rand, StdRand, XKCDRand},
|
||||
utils::{Rand, StdRand, XkcdRand},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_mut_scheduled() {
|
||||
// With the current impl, seed of 1 will result in a split at pos 2.
|
||||
let mut rand = XKCDRand::with_seed(5);
|
||||
let mut rand = XkcdRand::with_seed(5);
|
||||
let mut corpus: InMemoryCorpus<BytesInput> = InMemoryCorpus::new();
|
||||
corpus
|
||||
.add(Testcase::new(vec!['a' as u8, 'b' as u8, 'c' as u8]).into())
|
||||
.unwrap();
|
||||
corpus
|
||||
.add(Testcase::new(vec!['d' as u8, 'e' as u8, 'f' as u8]).into())
|
||||
.unwrap();
|
||||
corpus.add(Testcase::new(vec![b'a', b'b', b'c'])).unwrap();
|
||||
corpus.add(Testcase::new(vec![b'd', b'e', b'f'])).unwrap();
|
||||
|
||||
let testcase = corpus.get(0).expect("Corpus did not contain entries");
|
||||
let mut input = testcase.borrow_mut().load_input().unwrap().clone();
|
||||
@ -427,7 +431,7 @@ mod tests {
|
||||
|
||||
// The pre-seeded rand should have spliced at position 2.
|
||||
// TODO: Maybe have a fixed rand for this purpose?
|
||||
assert_eq!(input.bytes(), &['a' as u8, 'b' as u8, 'f' as u8])
|
||||
assert_eq!(input.bytes(), &[b'a', b'b', b'f'])
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -435,12 +439,8 @@ mod tests {
|
||||
// With the current impl, seed of 1 will result in a split at pos 2.
|
||||
let rand = StdRand::with_seed(0x1337);
|
||||
let mut corpus: InMemoryCorpus<BytesInput> = InMemoryCorpus::new();
|
||||
corpus
|
||||
.add(Testcase::new(vec!['a' as u8, 'b' as u8, 'c' as u8]).into())
|
||||
.unwrap();
|
||||
corpus
|
||||
.add(Testcase::new(vec!['d' as u8, 'e' as u8, 'f' as u8]).into())
|
||||
.unwrap();
|
||||
corpus.add(Testcase::new(vec![b'a', b'b', b'c'])).unwrap();
|
||||
corpus.add(Testcase::new(vec![b'd', b'e', b'f'])).unwrap();
|
||||
|
||||
let testcase = corpus.get(0).expect("Corpus did not contain entries");
|
||||
let mut input = testcase.borrow_mut().load_input().unwrap().clone();
|
||||
|
@ -33,6 +33,7 @@ crate::impl_serdeany!(Tokens);
|
||||
/// The metadata used for token mutators
|
||||
impl Tokens {
|
||||
/// Creates a new tokens metadata (old-skool afl name: `dictornary`)
|
||||
#[must_use]
|
||||
pub fn new(token_vec: Vec<Vec<u8>>) -> Self {
|
||||
Self { token_vec }
|
||||
}
|
||||
@ -118,11 +119,13 @@ impl Tokens {
|
||||
}
|
||||
|
||||
/// Gets the tokens stored in this db
|
||||
#[must_use]
|
||||
pub fn tokens(&self) -> &[Vec<u8>] {
|
||||
&self.token_vec
|
||||
}
|
||||
}
|
||||
|
||||
/// Inserts a random token at a random position in the `Input`.
|
||||
#[derive(Default)]
|
||||
pub struct TokenInsert<I, R, S>
|
||||
where
|
||||
@ -198,6 +201,8 @@ where
|
||||
S: HasMetadata + HasRand<R> + HasMaxSize,
|
||||
R: Rand,
|
||||
{
|
||||
/// Create a `TokenInsert` `Mutation`.
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
@ -205,6 +210,8 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// A `TokenReplace` [`Mutator`] replaces a random part of the input with one of a range of tokens.
|
||||
/// From AFL terms, this is called as `Dictionary` mutation (which doesn't really make sense ;) ).
|
||||
#[derive(Default)]
|
||||
pub struct TokenReplace<I, R, S>
|
||||
where
|
||||
@ -276,6 +283,8 @@ where
|
||||
S: HasMetadata + HasRand<R> + HasMaxSize,
|
||||
R: Rand,
|
||||
{
|
||||
/// Creates a new `TokenReplace` struct.
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
phantom: PhantomData,
|
||||
|
@ -17,7 +17,7 @@ use crate::{
|
||||
Error,
|
||||
};
|
||||
|
||||
/// A MapObserver observes the static map, as oftentimes used for afl-like coverage information
|
||||
/// A [`MapObserver`] observes the static map, as oftentimes used for afl-like coverage information
|
||||
pub trait MapObserver<T>: Observer
|
||||
where
|
||||
T: Default + Copy,
|
||||
@ -130,7 +130,8 @@ impl<'a, T> StdMapObserver<'a, T>
|
||||
where
|
||||
T: Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
|
||||
{
|
||||
/// Creates a new MapObserver
|
||||
/// Creates a new [`MapObserver`]
|
||||
#[must_use]
|
||||
pub fn new(name: &'static str, map: &'a mut [T]) -> Self {
|
||||
let initial = if map.is_empty() { T::default() } else { map[0] };
|
||||
Self {
|
||||
@ -140,7 +141,8 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new MapObserver with an owned map
|
||||
/// Creates a new [`MapObserver`] with an owned map
|
||||
#[must_use]
|
||||
pub fn new_owned(name: &'static str, map: Vec<T>) -> Self {
|
||||
let initial = if map.is_empty() { T::default() } else { map[0] };
|
||||
Self {
|
||||
@ -150,9 +152,10 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new MapObserver from a raw pointer
|
||||
/// Creates a new [`MapObserver`] from a raw pointer
|
||||
///
|
||||
/// # Safety
|
||||
/// Will dereference the map_ptr with up to len elements.
|
||||
/// Will dereference the `map_ptr` with up to len elements.
|
||||
pub unsafe fn new_from_ptr(name: &'static str, map_ptr: *mut T, len: usize) -> Self {
|
||||
let initial = if len > 0 { *map_ptr } else { T::default() };
|
||||
StdMapObserver {
|
||||
@ -241,7 +244,7 @@ impl<'a, T> VariableMapObserver<'a, T>
|
||||
where
|
||||
T: Default + Copy + 'static + serde::Serialize + serde::de::DeserializeOwned,
|
||||
{
|
||||
/// Creates a new MapObserver
|
||||
/// Creates a new [`MapObserver`]
|
||||
pub fn new(name: &'static str, map: &'a mut [T], size: &'a mut usize) -> Self {
|
||||
let initial = if map.is_empty() { T::default() } else { map[0] };
|
||||
Self {
|
||||
@ -252,9 +255,10 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new MapObserver from a raw pointer
|
||||
/// Creates a new [`MapObserver`] from a raw pointer
|
||||
///
|
||||
/// # Safety
|
||||
/// Dereferences map_ptr with up to max_len elements of size.
|
||||
/// Dereferences `map_ptr` with up to `max_len` elements of size.
|
||||
pub unsafe fn new_from_ptr(
|
||||
name: &'static str,
|
||||
map_ptr: *mut T,
|
||||
@ -365,7 +369,7 @@ impl<M> HitcountsMapObserver<M>
|
||||
where
|
||||
M: serde::Serialize + serde::de::DeserializeOwned,
|
||||
{
|
||||
/// Creates a new MapObserver
|
||||
/// Creates a new [`MapObserver`]
|
||||
pub fn new(base: M) -> Self {
|
||||
Self { base }
|
||||
}
|
||||
|
@ -46,7 +46,8 @@ pub struct TimeObserver {
|
||||
}
|
||||
|
||||
impl TimeObserver {
|
||||
/// Creates a new TimeObserver with the given name.
|
||||
/// Creates a new [`TimeObserver`] with the given name.
|
||||
#[must_use]
|
||||
pub fn new(name: &'static str) -> Self {
|
||||
Self {
|
||||
name: name.to_string(),
|
||||
@ -55,6 +56,8 @@ impl TimeObserver {
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the runtime for the last execution of this target.
|
||||
#[must_use]
|
||||
pub fn last_runtime(&self) -> &Option<Duration> {
|
||||
&self.last_runtime
|
||||
}
|
||||
|
@ -1,4 +1,12 @@
|
||||
/*!
|
||||
A [`Stage`] is a technique used during fuzzing, working on one [`crate::corpus::Corpus`] entry, and potentially altering it or creating new entries.
|
||||
A well-known [`Stage`], for example, is the mutational stage, running multiple [`crate::mutators::Mutator`]s against a [`crate::corpus::Testcase`], potentially storing new ones, according to [`crate::feedbacks::Feedback`].
|
||||
Other stages may enrich [`crate::corpus::Testcase`]s with metadata.
|
||||
*/
|
||||
|
||||
/// Mutational stage is the normal fuzzing stage,
|
||||
pub mod mutational;
|
||||
|
||||
pub use mutational::{MutationalStage, StdMutationalStage};
|
||||
|
||||
//pub mod power;
|
||||
@ -27,12 +35,14 @@ where
|
||||
) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
/// A tuple holding all `Stages` used for fuzzing.
|
||||
pub trait StagesTuple<CS, E, EM, I, S>
|
||||
where
|
||||
EM: EventManager<I, S>,
|
||||
E: Executor<I>,
|
||||
I: Input,
|
||||
{
|
||||
/// Performs all `Stages` in this tuple
|
||||
fn perform_all(
|
||||
&mut self,
|
||||
state: &mut S,
|
||||
|
@ -65,6 +65,8 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Default value, how many iterations each stage gets, as an upper bound
|
||||
/// It may randomly continue earlier.
|
||||
pub static DEFAULT_MUTATIONAL_MAX_ITERATIONS: u64 = 128;
|
||||
|
||||
/// The default mutational stage
|
||||
|
@ -301,13 +301,13 @@ where
|
||||
SC: Corpus<I>,
|
||||
OF: Feedback<I>,
|
||||
{
|
||||
/// Get all the metadata into an HashMap
|
||||
/// Get all the metadata into an [`hashbrown::HashMap`]
|
||||
#[inline]
|
||||
fn metadata(&self) -> &SerdeAnyMap {
|
||||
&self.metadata
|
||||
}
|
||||
|
||||
/// Get all the metadata into an HashMap (mutable)
|
||||
/// Get all the metadata into an [`hashbrown::HashMap`] (mutable)
|
||||
#[inline]
|
||||
fn metadata_mut(&mut self) -> &mut SerdeAnyMap {
|
||||
&mut self.metadata
|
||||
@ -537,7 +537,8 @@ where
|
||||
SC: Corpus<BytesInput>,
|
||||
OF: Feedback<BytesInput>,
|
||||
{
|
||||
pub fn load_from_directory<CS, E, OT, EM>(
|
||||
/// loads inputs from a directory
|
||||
fn load_from_directory<CS, E, OT, EM>(
|
||||
&mut self,
|
||||
executor: &mut E,
|
||||
manager: &mut EM,
|
||||
@ -587,6 +588,7 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Loads initial inputs from the passed-in `in_dirs`.
|
||||
pub fn load_initial_inputs<CS, E, OT, EM>(
|
||||
&mut self,
|
||||
executor: &mut E,
|
||||
@ -664,6 +666,7 @@ where
|
||||
Ok((is_interesting, is_solution))
|
||||
}
|
||||
|
||||
/// Generate `num` initial inputs, using the passed-in generator.
|
||||
pub fn generate_initial_inputs<CS, G, E, OT, EM>(
|
||||
&mut self,
|
||||
executor: &mut E,
|
||||
@ -703,6 +706,7 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Creates a new `State`, taking ownership of all of the individual components during fuzzing.
|
||||
pub fn new(rand: R, corpus: C, feedback: F, solutions: SC, objective: OF) -> Self {
|
||||
Self {
|
||||
rand,
|
||||
|
@ -138,6 +138,7 @@ pub trait Stats {
|
||||
}
|
||||
}
|
||||
|
||||
/// Tracking stats during fuzzing.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SimpleStats<F>
|
||||
where
|
||||
@ -186,6 +187,7 @@ impl<F> SimpleStats<F>
|
||||
where
|
||||
F: FnMut(String),
|
||||
{
|
||||
/// Creates the stats, using the `current_time` as `start_time`.
|
||||
pub fn new(print_fn: F) -> Self {
|
||||
Self {
|
||||
print_fn,
|
||||
@ -195,6 +197,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates the stats with a given `start_time`.
|
||||
pub fn with_time(print_fn: F, start_time: time::Duration) -> Self {
|
||||
Self {
|
||||
print_fn,
|
||||
|
@ -18,11 +18,15 @@ use std::{
|
||||
#[cfg(any(unix, feature = "std"))]
|
||||
use crate::Error;
|
||||
|
||||
/// Can be converted to a slice
|
||||
pub trait AsSlice<T> {
|
||||
/// Convert to a slice
|
||||
fn as_slice(&self) -> &[T];
|
||||
}
|
||||
|
||||
/// The standard rand implementation for `LibAFL`.
|
||||
/// It is usually the right choice, with very good speed and a reasonable randomness.
|
||||
/// Not cryptographically secure (which is not what you want during fuzzing ;) )
|
||||
pub type StdRand = RomuTrioRand;
|
||||
|
||||
/// Ways to get random around here
|
||||
@ -69,7 +73,7 @@ pub trait HasRand<R>
|
||||
where
|
||||
R: Rand,
|
||||
{
|
||||
/// Get the hold RefCell Rand instance
|
||||
/// Get the hold [`RefCell`] Rand instance
|
||||
fn rand(&self) -> &RefCell<R>;
|
||||
|
||||
/// Gets the next 64 bit value
|
||||
@ -93,8 +97,8 @@ where
|
||||
macro_rules! default_rand {
|
||||
($rand: ty) => {
|
||||
/// A default RNG will usually produce a nondeterministic stream of random numbers.
|
||||
/// As we do not have any way to get random seeds for no_std, they have to be reproducible there.
|
||||
/// Use [`RandomSeed::with_seed`] to generate a reproducible RNG.
|
||||
/// As we do not have any way to get random seeds for `no_std`, they have to be reproducible there.
|
||||
/// Use [`$rand::with_seed`] to generate a reproducible RNG.
|
||||
impl core::default::Default for $rand {
|
||||
#[cfg(feature = "std")]
|
||||
fn default() -> Self {
|
||||
@ -120,6 +124,7 @@ default_rand!(RomuDuoJrRand);
|
||||
/// Default implementations are provided with the "std" feature enabled, using system time in
|
||||
/// nanoseconds as the initial seed.
|
||||
pub trait RandomSeed: Rand + Default {
|
||||
/// Creates a new [`RandomSeed`].
|
||||
fn new() -> Self;
|
||||
}
|
||||
|
||||
@ -146,6 +151,7 @@ const HASH_CONST: u64 = 0xa5b35705;
|
||||
|
||||
/// Current time
|
||||
#[cfg(feature = "std")]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn current_time() -> time::Duration {
|
||||
SystemTime::now().duration_since(UNIX_EPOCH).unwrap()
|
||||
@ -160,13 +166,15 @@ pub fn current_time() -> time::Duration {
|
||||
time::Duration::from_millis(1)
|
||||
}
|
||||
|
||||
/// Gets current nanoseconds since UNIX_EPOCH
|
||||
/// Gets current nanoseconds since [`UNIX_EPOCH`]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn current_nanos() -> u64 {
|
||||
current_time().as_nanos() as u64
|
||||
}
|
||||
|
||||
/// Gets current milliseconds since UNIX_EPOCH
|
||||
/// Gets current milliseconds since [`UNIX_EPOCH`]
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn current_milliseconds() -> u64 {
|
||||
current_time().as_millis() as u64
|
||||
@ -210,6 +218,7 @@ impl Rand for Xoshiro256StarRand {
|
||||
|
||||
impl Xoshiro256StarRand {
|
||||
/// Creates a new Xoshiro rand with the given seed
|
||||
#[must_use]
|
||||
pub fn with_seed(seed: u64) -> Self {
|
||||
let mut rand = Self { rand_seed: [0; 4] };
|
||||
rand.set_seed(seed); // TODO: Proper random seed?
|
||||
@ -242,6 +251,7 @@ impl Rand for XorShift64Rand {
|
||||
|
||||
impl XorShift64Rand {
|
||||
/// Creates a new Xoshiro rand with the given seed
|
||||
#[must_use]
|
||||
pub fn with_seed(seed: u64) -> Self {
|
||||
let mut ret: Self = Self { rand_seed: 0 };
|
||||
ret.set_seed(seed); // TODO: Proper random seed?
|
||||
@ -271,6 +281,7 @@ impl Rand for Lehmer64Rand {
|
||||
|
||||
impl Lehmer64Rand {
|
||||
/// Creates a new Lehmer rand with the given seed
|
||||
#[must_use]
|
||||
pub fn with_seed(seed: u64) -> Self {
|
||||
let mut ret: Self = Self { rand_seed: 0 };
|
||||
ret.set_seed(seed);
|
||||
@ -288,6 +299,8 @@ pub struct RomuTrioRand {
|
||||
}
|
||||
|
||||
impl RomuTrioRand {
|
||||
/// Creates a new `RomuTrioRand` with the given seed.
|
||||
#[must_use]
|
||||
pub fn with_seed(seed: u64) -> Self {
|
||||
let mut rand = Self {
|
||||
x_state: 0,
|
||||
@ -327,6 +340,8 @@ pub struct RomuDuoJrRand {
|
||||
}
|
||||
|
||||
impl RomuDuoJrRand {
|
||||
/// Creates a new `RomuDuoJrRand` with the given seed.
|
||||
#[must_use]
|
||||
pub fn with_seed(seed: u64) -> Self {
|
||||
let mut rand = Self {
|
||||
x_state: 0,
|
||||
@ -356,12 +371,13 @@ impl Rand for RomuDuoJrRand {
|
||||
/// fake rand, for testing purposes
|
||||
#[cfg(test)]
|
||||
#[derive(Copy, Clone, Debug, Default, Serialize, Deserialize)]
|
||||
pub struct XKCDRand {
|
||||
#[allow(clippy::upper_case_acronyms)]
|
||||
pub struct XkcdRand {
|
||||
val: u64,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl Rand for XKCDRand {
|
||||
impl Rand for XkcdRand {
|
||||
fn set_seed(&mut self, val: u64) {
|
||||
self.val = val
|
||||
}
|
||||
@ -373,11 +389,16 @@ impl Rand for XKCDRand {
|
||||
|
||||
/// A test rng that will return the same value (chose by fair dice roll) for testing.
|
||||
#[cfg(test)]
|
||||
impl XKCDRand {
|
||||
impl XkcdRand {
|
||||
/// Creates a new [`XkCDRand`] with the rand of 4, [chosen by fair dice roll, guaranteed to be random](https://xkcd.com/221/).
|
||||
/// Will always return this seed.
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self::with_seed(4)
|
||||
}
|
||||
|
||||
/// Creates a new [`XkcdRand`] with the given seed. Will always return this seed.
|
||||
#[must_use]
|
||||
pub fn with_seed(seed: u64) -> Self {
|
||||
Self { val: seed }
|
||||
}
|
||||
@ -392,6 +413,7 @@ pub struct ChildHandle {
|
||||
#[cfg(unix)]
|
||||
impl ChildHandle {
|
||||
/// Block until the child exited and the status code becomes available
|
||||
#[must_use]
|
||||
pub fn status(&self) -> i32 {
|
||||
let mut status = -1;
|
||||
unsafe {
|
||||
@ -401,10 +423,13 @@ impl ChildHandle {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
/// The `ForkResult` (result of a fork)
|
||||
#[cfg(unix)]
|
||||
pub enum ForkResult {
|
||||
/// The fork finished, we are the parent process.
|
||||
/// The child has the handle `ChildHandle`.
|
||||
Parent(ChildHandle),
|
||||
/// The fork finished, we are the child process.
|
||||
Child,
|
||||
}
|
||||
|
||||
@ -492,6 +517,7 @@ pub fn find_mapping_for_address(address: usize) -> Result<(usize, usize, String,
|
||||
|
||||
/// Get the start and end address of the mapping containing with a particular path
|
||||
#[cfg(all(feature = "std", any(target_os = "linux", target_os = "android")))]
|
||||
#[must_use]
|
||||
pub fn find_mapping_for_path(libpath: &str) -> (usize, usize) {
|
||||
let mut libstart = 0;
|
||||
let mut libend = 0;
|
||||
@ -513,7 +539,9 @@ pub fn find_mapping_for_path(libpath: &str) -> (usize, usize) {
|
||||
mod tests {
|
||||
//use xxhash_rust::xxh3::xxh3_64_with_seed;
|
||||
|
||||
use crate::utils::{Rand, *};
|
||||
use crate::utils::{
|
||||
Rand, RomuDuoJrRand, RomuTrioRand, StdRand, XorShift64Rand, Xoshiro256StarRand,
|
||||
};
|
||||
|
||||
fn test_single_rand<R: Rand>(rand: &mut R) {
|
||||
assert_ne!(rand.next(), rand.next());
|
||||
|
@ -1,3 +1,11 @@
|
||||
/*!
|
||||
The frida address sanitizer runtime provides address sanitization.
|
||||
When executing in `ASAN`, each memory access will get checked, using frida stalker under the hood.
|
||||
The runtime can report memory errors that occurred during execution,
|
||||
even if the target would not have crashed under normal conditions.
|
||||
this helps finding mem errors early.
|
||||
*/
|
||||
|
||||
use hashbrown::HashMap;
|
||||
use libafl::{
|
||||
bolts::{ownedref::OwnedPtr, tuples::Named},
|
||||
@ -423,26 +431,31 @@ impl Allocator {
|
||||
}
|
||||
|
||||
/// Hook for malloc.
|
||||
#[must_use]
|
||||
pub extern "C" fn asan_malloc(size: usize) -> *mut c_void {
|
||||
unsafe { Allocator::get().alloc(size, 0x8) }
|
||||
}
|
||||
|
||||
/// Hook for new.
|
||||
#[must_use]
|
||||
pub extern "C" fn asan_new(size: usize) -> *mut c_void {
|
||||
unsafe { Allocator::get().alloc(size, 0x8) }
|
||||
}
|
||||
|
||||
/// Hook for new.
|
||||
#[must_use]
|
||||
pub extern "C" fn asan_new_nothrow(size: usize, _nothrow: *const c_void) -> *mut c_void {
|
||||
unsafe { Allocator::get().alloc(size, 0x8) }
|
||||
}
|
||||
|
||||
/// Hook for new with alignment.
|
||||
#[must_use]
|
||||
pub extern "C" fn asan_new_aligned(size: usize, alignment: usize) -> *mut c_void {
|
||||
unsafe { Allocator::get().alloc(size, alignment) }
|
||||
}
|
||||
|
||||
/// Hook for new with alignment.
|
||||
#[must_use]
|
||||
pub extern "C" fn asan_new_aligned_nothrow(
|
||||
size: usize,
|
||||
alignment: usize,
|
||||
@ -452,16 +465,19 @@ pub extern "C" fn asan_new_aligned_nothrow(
|
||||
}
|
||||
|
||||
/// Hook for pvalloc
|
||||
#[must_use]
|
||||
pub extern "C" fn asan_pvalloc(size: usize) -> *mut c_void {
|
||||
unsafe { Allocator::get().alloc(size, 0x8) }
|
||||
}
|
||||
|
||||
/// Hook for valloc
|
||||
#[must_use]
|
||||
pub extern "C" fn asan_valloc(size: usize) -> *mut c_void {
|
||||
unsafe { Allocator::get().alloc(size, 0x8) }
|
||||
}
|
||||
|
||||
/// Hook for calloc
|
||||
#[must_use]
|
||||
pub extern "C" fn asan_calloc(nmemb: usize, size: usize) -> *mut c_void {
|
||||
unsafe { Allocator::get().alloc(size * nmemb, 0x8) }
|
||||
}
|
||||
@ -470,6 +486,7 @@ pub extern "C" fn asan_calloc(nmemb: usize, size: usize) -> *mut c_void {
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is inherently unsafe, as it takes a raw pointer
|
||||
#[must_use]
|
||||
pub unsafe extern "C" fn asan_realloc(ptr: *mut c_void, size: usize) -> *mut c_void {
|
||||
let mut allocator = Allocator::get();
|
||||
let ret = allocator.alloc(size, 0x8);
|
||||
@ -544,7 +561,7 @@ pub unsafe extern "C" fn asan_delete_nothrow(ptr: *mut c_void, _nothrow: *const
|
||||
}
|
||||
}
|
||||
|
||||
/// Hook for delete
|
||||
/// Hook for `delete`
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is inherently unsafe, as it takes a raw pointer
|
||||
@ -558,23 +575,26 @@ pub unsafe extern "C" fn asan_delete_aligned_nothrow(
|
||||
}
|
||||
}
|
||||
|
||||
/// Hook for malloc_usable_size
|
||||
/// Hook for `malloc_usable_size`
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is inherently unsafe, as it takes a raw pointer
|
||||
#[must_use]
|
||||
pub unsafe extern "C" fn asan_malloc_usable_size(ptr: *mut c_void) -> usize {
|
||||
Allocator::get().get_usable_size(ptr)
|
||||
}
|
||||
|
||||
/// Hook for memalign
|
||||
/// Hook for `memalign`
|
||||
#[must_use]
|
||||
pub extern "C" fn asan_memalign(size: usize, alignment: usize) -> *mut c_void {
|
||||
unsafe { Allocator::get().alloc(size, alignment) }
|
||||
}
|
||||
|
||||
/// Hook for posix_memalign
|
||||
/// Hook for `posix_memalign`
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is inherently unsafe, as it takes a raw pointer
|
||||
#[must_use]
|
||||
pub unsafe extern "C" fn asan_posix_memalign(
|
||||
pptr: *mut *mut c_void,
|
||||
size: usize,
|
||||
@ -585,6 +605,7 @@ pub unsafe extern "C" fn asan_posix_memalign(
|
||||
}
|
||||
|
||||
/// Hook for mallinfo
|
||||
#[must_use]
|
||||
pub extern "C" fn asan_mallinfo() -> *mut c_void {
|
||||
std::ptr::null_mut()
|
||||
}
|
||||
@ -594,6 +615,11 @@ extern "C" {
|
||||
fn get_tls_ptr() -> *const c_void;
|
||||
}
|
||||
|
||||
/// The frida address sanitizer runtime, providing address sanitization.
|
||||
/// When executing in `ASAN`, each memory access will get checked, using frida stalker under the hood.
|
||||
/// The runtime can report memory errors that occurred during execution,
|
||||
/// even if the target would not have crashed under normal conditions.
|
||||
/// this helps finding mem errors early.
|
||||
pub struct AsanRuntime {
|
||||
regs: [usize; 32],
|
||||
blob_report: Option<Box<[u8]>>,
|
||||
@ -654,24 +680,32 @@ impl AsanError {
|
||||
}
|
||||
}
|
||||
|
||||
/// A struct holding errors that occurred during frida address sanitizer runs
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, SerdeAny)]
|
||||
pub struct AsanErrors {
|
||||
errors: Vec<AsanError>,
|
||||
}
|
||||
|
||||
impl AsanErrors {
|
||||
/// Creates a new `AsanErrors` struct
|
||||
#[must_use]
|
||||
fn new() -> Self {
|
||||
Self { errors: Vec::new() }
|
||||
}
|
||||
|
||||
/// Clears this `AsanErrors` struct
|
||||
pub fn clear(&mut self) {
|
||||
self.errors.clear()
|
||||
}
|
||||
|
||||
/// Gets the amount of `AsanErrors` in this struct
|
||||
#[must_use]
|
||||
pub fn len(&self) -> usize {
|
||||
self.errors.len()
|
||||
}
|
||||
|
||||
/// Returns `true` if no errors occurred
|
||||
#[must_use]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.errors.is_empty()
|
||||
}
|
||||
@ -679,6 +713,8 @@ impl AsanErrors {
|
||||
impl CustomExitKind for AsanErrors {}
|
||||
|
||||
impl AsanRuntime {
|
||||
/// Create a new `AsanRuntime`
|
||||
#[must_use]
|
||||
pub fn new(options: FridaOptions) -> Rc<RefCell<AsanRuntime>> {
|
||||
let res = Rc::new(RefCell::new(Self {
|
||||
regs: [0; 32],
|
||||
@ -760,6 +796,7 @@ impl AsanRuntime {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `AsanErrors` from the recent run
|
||||
#[allow(clippy::unused_self)]
|
||||
pub fn errors(&mut self) -> &Option<AsanErrors> {
|
||||
unsafe { &ASAN_ERRORS }
|
||||
@ -777,6 +814,8 @@ impl AsanRuntime {
|
||||
self.stalked_addresses.insert(stalked, real);
|
||||
}
|
||||
|
||||
/// Resolves the real address from a stalker stalked address
|
||||
#[must_use]
|
||||
pub fn real_address_for_stalked(&self, stalked: usize) -> Option<&usize> {
|
||||
self.stalked_addresses.get(&stalked)
|
||||
}
|
||||
@ -813,6 +852,10 @@ impl AsanRuntime {
|
||||
}
|
||||
|
||||
/// Determine the stack start, end for the currently running thread
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics, if no mapping for the `stack_address` at `0xeadbeef` could be found.
|
||||
#[must_use]
|
||||
pub fn current_stack() -> (usize, usize) {
|
||||
let stack_var = 0xeadbeef;
|
||||
let stack_address = &stack_var as *const _ as *const c_void as usize;
|
||||
@ -1561,85 +1604,101 @@ impl AsanRuntime {
|
||||
}
|
||||
|
||||
/// Get the blob which implements the report funclet
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn blob_report(&self) -> &[u8] {
|
||||
self.blob_report.as_ref().unwrap()
|
||||
}
|
||||
|
||||
/// Get the blob which checks a byte access
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn blob_check_mem_byte(&self) -> &[u8] {
|
||||
self.blob_check_mem_byte.as_ref().unwrap()
|
||||
}
|
||||
|
||||
/// Get the blob which checks a halfword access
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn blob_check_mem_halfword(&self) -> &[u8] {
|
||||
self.blob_check_mem_halfword.as_ref().unwrap()
|
||||
}
|
||||
|
||||
/// Get the blob which checks a dword access
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn blob_check_mem_dword(&self) -> &[u8] {
|
||||
self.blob_check_mem_dword.as_ref().unwrap()
|
||||
}
|
||||
|
||||
/// Get the blob which checks a qword access
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn blob_check_mem_qword(&self) -> &[u8] {
|
||||
self.blob_check_mem_qword.as_ref().unwrap()
|
||||
}
|
||||
|
||||
/// Get the blob which checks a 16 byte access
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn blob_check_mem_16bytes(&self) -> &[u8] {
|
||||
self.blob_check_mem_16bytes.as_ref().unwrap()
|
||||
}
|
||||
|
||||
/// Get the blob which checks a 3 byte access
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn blob_check_mem_3bytes(&self) -> &[u8] {
|
||||
self.blob_check_mem_3bytes.as_ref().unwrap()
|
||||
}
|
||||
|
||||
/// Get the blob which checks a 6 byte access
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn blob_check_mem_6bytes(&self) -> &[u8] {
|
||||
self.blob_check_mem_6bytes.as_ref().unwrap()
|
||||
}
|
||||
|
||||
/// Get the blob which checks a 12 byte access
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn blob_check_mem_12bytes(&self) -> &[u8] {
|
||||
self.blob_check_mem_12bytes.as_ref().unwrap()
|
||||
}
|
||||
|
||||
/// Get the blob which checks a 24 byte access
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn blob_check_mem_24bytes(&self) -> &[u8] {
|
||||
self.blob_check_mem_24bytes.as_ref().unwrap()
|
||||
}
|
||||
|
||||
/// Get the blob which checks a 32 byte access
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn blob_check_mem_32bytes(&self) -> &[u8] {
|
||||
self.blob_check_mem_32bytes.as_ref().unwrap()
|
||||
}
|
||||
|
||||
/// Get the blob which checks a 48 byte access
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn blob_check_mem_48bytes(&self) -> &[u8] {
|
||||
self.blob_check_mem_48bytes.as_ref().unwrap()
|
||||
}
|
||||
|
||||
/// Get the blob which checks a 64 byte access
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn blob_check_mem_64bytes(&self) -> &[u8] {
|
||||
self.blob_check_mem_64bytes.as_ref().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// static field for `AsanErrors` for a run
|
||||
pub static mut ASAN_ERRORS: Option<AsanErrors> = None;
|
||||
|
||||
/// An observer for frida address sanitizer `AsanError`s for a frida executor run
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[allow(clippy::unsafe_derive_deserialize)]
|
||||
pub struct AsanErrorsObserver {
|
||||
@ -1668,24 +1727,32 @@ impl Named for AsanErrorsObserver {
|
||||
}
|
||||
|
||||
impl AsanErrorsObserver {
|
||||
/// Creates a new `AsanErrorsObserver`, pointing to a constant `AsanErrors` field
|
||||
#[must_use]
|
||||
pub fn new(errors: &'static Option<AsanErrors>) -> Self {
|
||||
Self {
|
||||
errors: OwnedPtr::Ptr(errors as *const Option<AsanErrors>),
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new `AsanErrorsObserver`, owning the `AsanErrors`
|
||||
#[must_use]
|
||||
pub fn new_owned(errors: Option<AsanErrors>) -> Self {
|
||||
Self {
|
||||
errors: OwnedPtr::Owned(Box::new(errors)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new `AsanErrorsObserver` from a raw ptr
|
||||
#[must_use]
|
||||
pub fn new_from_ptr(errors: *const Option<AsanErrors>) -> Self {
|
||||
Self {
|
||||
errors: OwnedPtr::Ptr(errors),
|
||||
}
|
||||
}
|
||||
|
||||
/// gets the [`AsanErrors`] from the previous run
|
||||
#[must_use]
|
||||
pub fn errors(&self) -> Option<&AsanErrors> {
|
||||
match &self.errors {
|
||||
OwnedPtr::Ptr(p) => unsafe { p.as_ref().unwrap().as_ref() },
|
||||
@ -1694,6 +1761,7 @@ impl AsanErrorsObserver {
|
||||
}
|
||||
}
|
||||
|
||||
/// A feedback reporting potential [`AsanErrors`] from an `AsanErrorsObserver`
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct AsanErrorsFeedback {
|
||||
errors: Option<AsanErrors>,
|
||||
@ -1747,6 +1815,8 @@ impl Named for AsanErrorsFeedback {
|
||||
}
|
||||
|
||||
impl AsanErrorsFeedback {
|
||||
/// Create a new `AsanErrorsFeedback`
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self { errors: None }
|
||||
}
|
||||
|
@ -35,24 +35,31 @@ use std::{path::PathBuf, rc::Rc};
|
||||
|
||||
use crate::{asan_rt::AsanRuntime, FridaOptions};
|
||||
|
||||
/// An helper that feeds FridaInProcessExecutor with user-supplied instrumentation
|
||||
/// An helper that feeds [`FridaInProcessExecutor`] with user-supplied instrumentation
|
||||
pub trait FridaHelper<'a> {
|
||||
/// Access to the stalker `Transformer`
|
||||
fn transformer(&self) -> &Transformer<'a>;
|
||||
|
||||
/// Register a new thread with this `FridaHelper`
|
||||
fn register_thread(&self);
|
||||
|
||||
/// Called prior to execution of an input
|
||||
fn pre_exec<I: Input + HasTargetBytes>(&mut self, input: &I);
|
||||
|
||||
/// Called after execution of an input
|
||||
fn post_exec<I: Input + HasTargetBytes>(&mut self, input: &I);
|
||||
|
||||
/// Returns `true` if stalker is enabled
|
||||
fn stalker_enabled(&self) -> bool;
|
||||
|
||||
/// pointer to the frida coverage map
|
||||
fn map_ptr(&mut self) -> *mut u8;
|
||||
}
|
||||
|
||||
/// (Default) map size for frida coverage reporting
|
||||
pub const MAP_SIZE: usize = 64 * 1024;
|
||||
|
||||
/// An helper that feeds FridaInProcessExecutor with edge-coverage instrumentation
|
||||
/// An helper that feeds [`FridaInProcessExecutor`] with edge-coverage instrumentation
|
||||
pub struct FridaInstrumentationHelper<'a> {
|
||||
map: [u8; MAP_SIZE],
|
||||
previous_pc: [u64; 1],
|
||||
@ -74,7 +81,7 @@ impl<'a> FridaHelper<'a> for FridaInstrumentationHelper<'a> {
|
||||
self.transformer.as_ref().unwrap()
|
||||
}
|
||||
|
||||
/// Register the current thread with the FridaInstrumentationHelper
|
||||
/// Register the current thread with the [`FridaInstrumentationHelper`]
|
||||
fn register_thread(&self) {
|
||||
self.asan_runtime.borrow().register_thread();
|
||||
}
|
||||
@ -115,6 +122,7 @@ impl<'a> FridaHelper<'a> for FridaInstrumentationHelper<'a> {
|
||||
}
|
||||
|
||||
/// Helper function to get the size of a module's CODE section from frida
|
||||
#[must_use]
|
||||
pub fn get_module_size(module_name: &str) -> usize {
|
||||
let mut code_size = 0;
|
||||
let code_size_ref = &mut code_size;
|
||||
@ -126,7 +134,7 @@ pub fn get_module_size(module_name: &str) -> usize {
|
||||
code_size
|
||||
}
|
||||
|
||||
/// A minimal maybe_log implementation. We insert this into the transformed instruction stream
|
||||
/// A minimal `maybe_log` implementation. We insert this into the transformed instruction stream
|
||||
/// every time we need a copy that is within a direct branch of the start of the transformed basic
|
||||
/// block.
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
@ -194,10 +202,11 @@ fn get_pc(context: &CpuContext) -> usize {
|
||||
context.rip() as usize
|
||||
}
|
||||
|
||||
/// The implementation of the FridaInstrumentationHelper
|
||||
/// The implementation of the [`FridaInstrumentationHelper`]
|
||||
impl<'a> FridaInstrumentationHelper<'a> {
|
||||
/// Constructor function to create a new FridaInstrumentationHelper, given a module_name.
|
||||
/// Constructor function to create a new [`FridaInstrumentationHelper`], given a `module_name`.
|
||||
#[allow(clippy::clippy::too_many_lines)]
|
||||
#[must_use]
|
||||
pub fn new(
|
||||
gum: &'a Gum,
|
||||
options: &'a FridaOptions,
|
||||
|
@ -1,4 +1,11 @@
|
||||
/*!
|
||||
The frida executor is a binary-only mode for `LibAFL`.
|
||||
It can report coverage and, on supported architecutres, even reports memory access errors.
|
||||
*/
|
||||
|
||||
/// The frida address sanitizer runtime
|
||||
pub mod asan_rt;
|
||||
/// The `LibAFL` firda helper
|
||||
pub mod helper;
|
||||
|
||||
/// A representation of the various Frida options
|
||||
@ -15,9 +22,13 @@ pub struct FridaOptions {
|
||||
}
|
||||
|
||||
impl FridaOptions {
|
||||
/// Parse the frida options from the LIBAFL_FRIDA_OPTIONS environment variable.
|
||||
/// Parse the frida options from the [`LIBAFL_FRIDA_OPTIONS`] environment variable.
|
||||
///
|
||||
/// Options are ':' separated, and each options is a 'name=value' string.
|
||||
/// Options are `:` separated, and each options is a `name=value` string.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics, if no `=` sign exists in input, or or `value` behind `=` has zero length.
|
||||
#[must_use]
|
||||
pub fn parse_env_options() -> Self {
|
||||
let mut options = Self::default();
|
||||
|
||||
@ -88,36 +99,42 @@ impl FridaOptions {
|
||||
}
|
||||
|
||||
/// Is ASAN enabled?
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn asan_enabled(&self) -> bool {
|
||||
self.enable_asan
|
||||
}
|
||||
|
||||
/// Is coverage enabled?
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn coverage_enabled(&self) -> bool {
|
||||
self.enable_coverage
|
||||
}
|
||||
|
||||
/// Is DrCov enabled?
|
||||
/// Is `DrCov` enabled?
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn drcov_enabled(&self) -> bool {
|
||||
self.enable_drcov
|
||||
}
|
||||
|
||||
/// Should ASAN detect leaks
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn asan_detect_leaks(&self) -> bool {
|
||||
self.enable_asan_leak_detection
|
||||
}
|
||||
|
||||
/// Should ASAN continue after a memory error is detected
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn asan_continue_after_error(&self) -> bool {
|
||||
self.enable_asan_continue_after_error
|
||||
}
|
||||
|
||||
/// Should ASAN gather (and report) allocation-/free-site backtraces
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn asan_allocation_backtraces(&self) -> bool {
|
||||
self.enable_asan_allocation_backtraces
|
||||
@ -125,12 +142,14 @@ impl FridaOptions {
|
||||
|
||||
/// Whether stalker should be enabled. I.e. whether at least one stalker requiring option is
|
||||
/// enabled.
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub fn stalker_enabled(&self) -> bool {
|
||||
self.enable_asan || self.enable_coverage || self.enable_drcov
|
||||
}
|
||||
|
||||
/// A list of locations which will not be instrumented for ASAN or coverage purposes
|
||||
#[must_use]
|
||||
pub fn dont_instrument_locations(&self) -> Option<Vec<(String, usize)>> {
|
||||
self.instrument_suppress_locations.clone()
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ libfuzzer = []
|
||||
value_profile = []
|
||||
cmplog = []
|
||||
pcguard = ["pcguard_hitcounts"]
|
||||
clippy = [] # Ignore compiler warnings during clippy
|
||||
|
||||
[build-dependencies]
|
||||
cc = { version = "1.0", features = ["parallel"] }
|
||||
|
@ -1,11 +1,20 @@
|
||||
//! `CmpLog` logs and reports back values touched during fuzzing.
|
||||
//! The values will then be used in subsequent mutations.
|
||||
|
||||
// TODO compile time flag
|
||||
/// The `CmpLogMap` W value
|
||||
pub const CMPLOG_MAP_W: usize = 65536;
|
||||
/// The `CmpLogMap` H value
|
||||
pub const CMPLOG_MAP_H: usize = 32;
|
||||
/// The `CmpLog` map size
|
||||
pub const CMPLOG_MAP_SIZE: usize = CMPLOG_MAP_W * CMPLOG_MAP_H;
|
||||
|
||||
/// `CmpLog` instruction kind
|
||||
pub const CMPLOG_KIND_INS: u8 = 0;
|
||||
/// `CmpLog` return kind
|
||||
pub const CMPLOG_KIND_RTN: u8 = 1;
|
||||
|
||||
/// The header for `CmpLog` hits.
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct CmpLogHeader {
|
||||
@ -14,10 +23,12 @@ pub struct CmpLogHeader {
|
||||
kind: u8,
|
||||
}
|
||||
|
||||
/// The operands logged during `CmpLog`.
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct CmpLogOperands(u64, u64);
|
||||
|
||||
/// A struct containing the `CmpLog` metadata for a `LibAFL` run.
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct CmpLogMap {
|
||||
@ -25,6 +36,7 @@ pub struct CmpLogMap {
|
||||
operands: [[CmpLogOperands; CMPLOG_MAP_H]; CMPLOG_MAP_W],
|
||||
}
|
||||
|
||||
/// The global `CmpLog` map for the current `LibAFL` run.
|
||||
#[no_mangle]
|
||||
pub static mut libafl_cmplog_map: CmpLogMap = CmpLogMap {
|
||||
headers: [CmpLogHeader {
|
||||
@ -37,6 +49,7 @@ pub static mut libafl_cmplog_map: CmpLogMap = CmpLogMap {
|
||||
|
||||
pub use libafl_cmplog_map as CMPLOG_MAP;
|
||||
|
||||
/// Value indicating if cmplog is enabled.
|
||||
#[no_mangle]
|
||||
pub static mut libafl_cmplog_enabled: u8 = 0;
|
||||
|
||||
|
@ -1,15 +1,21 @@
|
||||
//! [`DrCov`](https://dynamorio.org/page_drcov.html) support for `LibAFL` frida mode,
|
||||
//! writing basic-block trace files to be read by coverage analysis tools, such as [Lighthouse](https://github.com/gaasedelen/lighthouse),
|
||||
//! [bncov](https://github.com/ForAllSecure/bncov), [dragondance](https://github.com/0ffffffffh/dragondance), etc.
|
||||
|
||||
use rangemap::RangeMap;
|
||||
use std::{
|
||||
fs::File,
|
||||
io::{BufWriter, Write},
|
||||
};
|
||||
|
||||
/// A basic block struct
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct DrCovBasicBlock {
|
||||
start: usize,
|
||||
end: usize,
|
||||
}
|
||||
|
||||
/// A writer for `DrCov` files
|
||||
pub struct DrCovWriter<'a> {
|
||||
writer: BufWriter<File>,
|
||||
module_mapping: &'a RangeMap<usize, (u16, &'a str)>,
|
||||
@ -24,11 +30,14 @@ struct DrCovBasicBlockEntry {
|
||||
}
|
||||
|
||||
impl DrCovBasicBlock {
|
||||
/// Create a new [`DrCovBasicBlock`] with the given `start` and `end` addresses.
|
||||
#[must_use]
|
||||
pub fn new(start: usize, end: usize) -> Self {
|
||||
Self { start, end }
|
||||
}
|
||||
}
|
||||
impl<'a> DrCovWriter<'a> {
|
||||
/// Create a new [`DrCovWriter`]
|
||||
pub fn new(
|
||||
path: &str,
|
||||
module_mapping: &'a RangeMap<usize, (u16, &str)>,
|
||||
@ -43,6 +52,7 @@ impl<'a> DrCovWriter<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Write the `DrCov` file.
|
||||
pub fn write(&mut self) {
|
||||
self.writer
|
||||
.write_all(b"DRCOV VERSION: 2\nDRCOV FLAVOR: libafl\n")
|
||||
|
@ -11,6 +11,7 @@ pub mod libfuzzer;
|
||||
pub use libfuzzer::*;
|
||||
|
||||
#[cfg(all(feature = "value_profile", feature = "cmplog"))]
|
||||
#[cfg(not(any(doc, feature = "clippy")))]
|
||||
compile_error!("the libafl_targets `value_profile` and `cmplog` features are mutually exclusive.");
|
||||
|
||||
#[cfg(feature = "value_profile")]
|
||||
|
@ -1,4 +1,7 @@
|
||||
/// We will interact with a C++ target, so use external c functionality
|
||||
//! (Libfuzzer)[https://www.llvm.org/docs/LibFuzzer.html]-style runtime wrapper for `LibAFL`.
|
||||
//! This makes `LibAFL` interoperable with harnesses written for other fuzzers like `Libfuzzer` and [`AFLplusplus`](aflplus.plus).
|
||||
//! We will interact with a C++ target, so use external c functionality
|
||||
|
||||
extern "C" {
|
||||
/// int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
|
||||
fn LLVMFuzzerTestOneInput(data: *const u8, size: usize) -> i32;
|
||||
@ -7,8 +10,16 @@ extern "C" {
|
||||
fn libafl_targets_libfuzzer_init(argc: *const i32, argv: *const *const *const u8) -> i32;
|
||||
}
|
||||
|
||||
/// Calls the (native) libfuzzer initialize function.
|
||||
/// Returns the value returned by the init function.
|
||||
/// # Safety
|
||||
/// Calls the libfuzzer-style init function which is native code.
|
||||
#[allow(clippy::similar_names)]
|
||||
#[allow(clippy::clippy::must_use_candidate)] // nobody uses that return code...
|
||||
pub fn libfuzzer_initialize(args: &[String]) -> i32 {
|
||||
let argv: Vec<*const u8> = args.iter().map(|x| x.as_bytes().as_ptr()).collect();
|
||||
assert!(argv.len() < i32::MAX as usize);
|
||||
#[allow(clippy::cast_possible_wrap)]
|
||||
let argc = argv.len() as i32;
|
||||
unsafe {
|
||||
libafl_targets_libfuzzer_init(
|
||||
@ -18,6 +29,10 @@ pub fn libfuzzer_initialize(args: &[String]) -> i32 {
|
||||
}
|
||||
}
|
||||
|
||||
/// Call a single input of a libfuzzer-style cpp-harness
|
||||
/// # Safety
|
||||
/// Calls the libfuzzer harness. We actually think the target is unsafe and crashes eventually, that's why we do all this fuzzing.
|
||||
#[allow(clippy::clippy::must_use_candidate)]
|
||||
pub fn libfuzzer_test_one_input(buf: &[u8]) -> i32 {
|
||||
unsafe { LLVMFuzzerTestOneInput(buf.as_ptr(), buf.len()) }
|
||||
}
|
||||
|
@ -1,15 +1,26 @@
|
||||
//! [`LLVM` `PcGuard`](https://clang.llvm.org/docs/SanitizerCoverage.html#tracing-pcs-with-guards) runtime for `LibAFL`.
|
||||
|
||||
#[cfg(all(feature = "pcguard_edges", feature = "pcguard_hitcounts"))]
|
||||
#[cfg(not(any(doc, feature = "clippy")))]
|
||||
compile_error!(
|
||||
"the libafl_targets `pcguard_edges` and `pcguard_hitcounts` features are mutually exclusive."
|
||||
);
|
||||
|
||||
// TODO compile time flag
|
||||
/// The map size for `SanCov` edges.
|
||||
pub const EDGES_MAP_SIZE: usize = 65536;
|
||||
|
||||
/// The map for `SanCov` edges.
|
||||
pub static mut EDGES_MAP: [u8; EDGES_MAP_SIZE] = [0; EDGES_MAP_SIZE];
|
||||
//pub static mut CMP_MAP: [u8; EDGES_MAP_SIZE] = [0; EDGES_MAP_SIZE];
|
||||
/// The max count of edges tracked.
|
||||
pub static mut MAX_EDGES_NUM: usize = 0;
|
||||
|
||||
/// Callback for sancov `pc_guard` - usually called by `llvm` on each block or edge.
|
||||
///
|
||||
/// # Safety
|
||||
/// Dereferences `guard`, reads the position from there, then dereferences the [`EDGES_MAP`] at that position.
|
||||
/// Should usually not be called directly.
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn __sanitizer_cov_trace_pc_guard(guard: *mut u32) {
|
||||
let pos = *guard as usize;
|
||||
@ -24,6 +35,10 @@ pub unsafe extern "C" fn __sanitizer_cov_trace_pc_guard(guard: *mut u32) {
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialize the sancov `pc_guard` - usually called by `llvm`.
|
||||
///
|
||||
/// # Safety
|
||||
/// Dereferences at `start` and writes to it.
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn __sanitizer_cov_trace_pc_guard_init(mut start: *mut u32, stop: *mut u32) {
|
||||
if start == stop || *start != 0 {
|
||||
@ -31,7 +46,7 @@ pub unsafe extern "C" fn __sanitizer_cov_trace_pc_guard_init(mut start: *mut u32
|
||||
}
|
||||
|
||||
while start < stop {
|
||||
MAX_EDGES_NUM += 1;
|
||||
MAX_EDGES_NUM = MAX_EDGES_NUM.wrapping_add(1);
|
||||
*start = (MAX_EDGES_NUM & (EDGES_MAP_SIZE - 1)) as u32;
|
||||
start = start.offset(1);
|
||||
}
|
||||
|
@ -1,6 +1,10 @@
|
||||
//! Value profile support for `LibAFL`
|
||||
|
||||
// TODO compile time flag
|
||||
/// The Cmp map size.
|
||||
pub const CMP_MAP_SIZE: usize = 65536;
|
||||
|
||||
/// The constant cmplog map for the current `LibAFL` target
|
||||
#[no_mangle]
|
||||
pub static mut libafl_cmp_map: [u8; CMP_MAP_SIZE] = [0; CMP_MAP_SIZE];
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user