Panic on Too Slow Broker (#230)
* panic on too slow broker * constant for max pending pages
This commit is contained in:
parent
b0cb74324c
commit
9591ed995e
@ -96,6 +96,10 @@ use nix::sys::socket::{self, sockopt::ReusePort};
|
|||||||
#[cfg(all(unix, feature = "std"))]
|
#[cfg(all(unix, feature = "std"))]
|
||||||
use std::os::unix::io::AsRawFd;
|
use std::os::unix::io::AsRawFd;
|
||||||
|
|
||||||
|
/// The max number of pages a [`client`] may have mapped that were not yet read by the [`broker`]
|
||||||
|
/// Usually, this value should not exceed `1`, else the broker cannot keep up with the amount of incoming messages.
|
||||||
|
/// Instead of increasing this value, you may consider sending new messages at a lower rate, else your Sender will eventually `OOM`.
|
||||||
|
const LLMP_CFG_MAX_PENDING_UNREAD_PAGES: usize = 3;
|
||||||
/// We'll start off with 256 megabyte maps per fuzzer client
|
/// We'll start off with 256 megabyte maps per fuzzer client
|
||||||
#[cfg(not(feature = "llmp_small_maps"))]
|
#[cfg(not(feature = "llmp_small_maps"))]
|
||||||
const LLMP_CFG_INITIAL_MAP_SIZE: usize = 1 << 28;
|
const LLMP_CFG_INITIAL_MAP_SIZE: usize = 1 << 28;
|
||||||
@ -117,6 +121,8 @@ const LLMP_TAG_END_OF_PAGE: Tag = 0xAF1E0F1;
|
|||||||
const LLMP_TAG_NEW_SHM_CLIENT: Tag = 0xC11E471;
|
const LLMP_TAG_NEW_SHM_CLIENT: Tag = 0xC11E471;
|
||||||
/// The sender on this map is exiting (if broker exits, clients should exit gracefully);
|
/// The sender on this map is exiting (if broker exits, clients should exit gracefully);
|
||||||
const LLMP_TAG_EXITING: Tag = 0x13C5171;
|
const LLMP_TAG_EXITING: Tag = 0x13C5171;
|
||||||
|
/// Client gave up as the receiver/broker was too slow
|
||||||
|
const LLMP_SLOW_RECEIVER_PANIC: Tag = 0x70051041;
|
||||||
|
|
||||||
/// Unused...
|
/// Unused...
|
||||||
pub const LLMP_FLAG_INITIALIZED: Flags = 0x0;
|
pub const LLMP_FLAG_INITIALIZED: Flags = 0x0;
|
||||||
@ -851,6 +857,13 @@ where
|
|||||||
}
|
}
|
||||||
unmap_until_excl += 1;
|
unmap_until_excl += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if unmap_until_excl == 0 && self.out_maps.len() > LLMP_CFG_MAX_PENDING_UNREAD_PAGES {
|
||||||
|
// We send one last information to the broker before quitting.
|
||||||
|
self.send_buf(LLMP_SLOW_RECEIVER_PANIC, &[]).unwrap();
|
||||||
|
panic!("The receiver/broker could not process our sent llmp messages in time. Either we're sending too many messages too fast, the broker got stuck, or it crashed. Giving up.");
|
||||||
|
}
|
||||||
|
|
||||||
// Remove all maps that the broker already mapped
|
// Remove all maps that the broker already mapped
|
||||||
// simply removing them from the vec should then call drop and unmap them.
|
// simply removing them from the vec should then call drop and unmap them.
|
||||||
self.out_maps.drain(0..unmap_until_excl);
|
self.out_maps.drain(0..unmap_until_excl);
|
||||||
@ -2231,7 +2244,12 @@ where
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if (*msg).tag == LLMP_TAG_NEW_SHM_CLIENT {
|
match (*msg).tag {
|
||||||
|
// first, handle the special, llmp-internal messages
|
||||||
|
LLMP_SLOW_RECEIVER_PANIC => {
|
||||||
|
return Err(Error::Unknown(format!("The broker was too slow to handle messages of client {} in time, so it quit. Either the client sent messages too fast, or we (the broker) got stuck!", client_id)));
|
||||||
|
}
|
||||||
|
LLMP_TAG_NEW_SHM_CLIENT => {
|
||||||
/* This client informs us about yet another new client
|
/* This client informs us about yet another new client
|
||||||
add it to the list! Also, no need to forward this msg. */
|
add it to the list! Also, no need to forward this msg. */
|
||||||
let msg_buf_len_padded = (*msg).buf_len_padded;
|
let msg_buf_len_padded = (*msg).buf_len_padded;
|
||||||
@ -2276,7 +2294,9 @@ where
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
} else {
|
}
|
||||||
|
// handle all other messages
|
||||||
|
_ => {
|
||||||
// The message is not specifically for use. Let the user handle it, then forward it to the clients, if necessary.
|
// The message is not specifically for use. Let the user handle it, then forward it to the clients, if necessary.
|
||||||
let mut should_forward_msg = true;
|
let mut should_forward_msg = true;
|
||||||
|
|
||||||
@ -2293,6 +2313,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A restorable client description
|
/// A restorable client description
|
||||||
|
@ -13,7 +13,7 @@ use crate::{
|
|||||||
executors::ExitKind, inputs::Input, observers::ObserversTuple, stats::UserStats, Error,
|
executors::ExitKind, inputs::Input, observers::ObserversTuple, stats::UserStats, Error,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// A per-fuzzer unique ID, usually starting with `0` and increasing
|
/// A per-fuzzer unique `ID`, usually starting with `0` and increasing
|
||||||
/// by `1` in multiprocessed `EventManager`s, such as [`self::llmp::LlmpEventManager`].
|
/// by `1` in multiprocessed `EventManager`s, such as [`self::llmp::LlmpEventManager`].
|
||||||
#[derive(Clone, Copy, PartialEq, Eq)]
|
#[derive(Clone, Copy, PartialEq, Eq)]
|
||||||
pub struct EventManagerId {
|
pub struct EventManagerId {
|
||||||
@ -209,7 +209,14 @@ pub trait EventFirer<I, S>
|
|||||||
where
|
where
|
||||||
I: Input,
|
I: Input,
|
||||||
{
|
{
|
||||||
/// Send off an event to the broker
|
/// Send off an [`Event`] to the broker
|
||||||
|
///
|
||||||
|
/// For multi-processed managers, such as [`llmp::LlmpEventManager`],
|
||||||
|
/// this serializes the [`Event`] and commits it to the [`llmp`] page.
|
||||||
|
/// In this case, if you `fire` faster than the broker can consume
|
||||||
|
/// (for example for each [`Input`], on multiple cores)
|
||||||
|
/// the [`llmp`] [`ShMem`] may fill up and the client will eventually OOM or [`panic`].
|
||||||
|
/// This should not happen for a normal use-cases.
|
||||||
fn fire(&mut self, state: &mut S, event: Event<I>) -> Result<(), Error>;
|
fn fire(&mut self, state: &mut S, event: Event<I>) -> Result<(), Error>;
|
||||||
|
|
||||||
/// Serialize all observers for this type and manager
|
/// Serialize all observers for this type and manager
|
||||||
|
Loading…
x
Reference in New Issue
Block a user