diff --git a/afl/Cargo.toml b/afl/Cargo.toml index 83accdea15..4eb56e00d5 100644 --- a/afl/Cargo.toml +++ b/afl/Cargo.toml @@ -34,3 +34,4 @@ xxhash-rust = { version = "0.8.0", features = ["xxh3"] } # xxh3 hashing for rust serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib erased-serde = "0.3.12" postcard = "0.5.1" # no_std compatible serde serialization fromat +memoffset = "0.6" # for offset_of support \ No newline at end of file diff --git a/afl/src/events/llmp_translated.rs b/afl/src/events/llmp_translated.rs index 4a869b17b8..df0b6a96bb 100644 --- a/afl/src/events/llmp_translated.rs +++ b/afl/src/events/llmp_translated.rs @@ -50,28 +50,34 @@ Then register some clientloops using llmp_broker_register_threaded_clientloop use ::libc; -use core::ffi::c_void; use core::ptr; use core::sync::atomic::{compiler_fence, Ordering}; +use core::{ffi::c_void, time}; use libc::{c_int, c_uint, c_ulong, c_ushort}; -use std::{ffi::CStr, os::raw::c_char}; +use std::{cmp::max, ffi::CStr, mem::size_of, os::raw::c_char, thread}; use crate::utils::next_pow2; use crate::AflError; use super::shmem_translated::{afl_shmem_by_str, afl_shmem_deinit, afl_shmem_init, AflShmem}; +/// The header length of a llmp page in a shared map (until messages start) +const LLMP_PAGE_HEADER_LEN: usize = offset_of!(LlmpPage, messages); + +/// We'll start off with 256 megabyte maps per fuzzer +const LLMP_INITIAL_MAP_SIZE: usize = 1 << 28; + +/// A msg fresh from the press: No tag got sent by the user yet +const LLMP_TAG_UNSET: u32 = 0xdeadaf; +/// This message should not exist yet. Some bug in unsafe code! +const LLMP_TAG_UNINITIALIZED: u32 = 0xa143af11; +/// The end of page mesasge +/// When receiving this, a new sharedmap needs to be allocated. +const LLMP_TAG_END_OF_PAGE: u32 = 0xaf1e0f1; +/// A new client for this broekr got added. +const LLMP_TAG_NEW_SHM_CLIENT: u32 = 0xc11e471; + extern "C" { - #[no_mangle] - fn usleep(__useconds: c_uint) -> c_int; - #[no_mangle] - fn fork() -> c_int; - #[no_mangle] - fn calloc(_: c_ulong, _: c_ulong) -> *mut c_void; - #[no_mangle] - fn realloc(_: *mut c_void, _: c_ulong) -> *mut c_void; - #[no_mangle] - fn free(__ptr: *mut c_void); #[no_mangle] fn memcpy(_: *mut c_void, _: *const c_void, _: c_ulong) -> *mut c_void; #[no_mangle] @@ -84,67 +90,90 @@ pub type AflRet = c_uint; pub const AFL_RET_ALLOC: AflRet = 3; pub const AFL_RET_SUCCESS: AflRet = 0; -/* AFL alloc buffer, the struct is here so we don't need to do fancy ptr - * arithmetics */ -#[derive(Copy, Clone)] -#[repr(C)] -pub struct AflAllocBuf { - pub complete_size: c_ulong, - pub magic: c_ulong, - pub buf: [u8; 0], +#[derive(Clone)] +pub struct LlmpSender { + pub id: u32, + pub last_msg_sent: *mut LlmpMsg, + pub out_maps: Vec, } #[derive(Clone)] -#[repr(C)] pub struct LlmpClient { - pub id: u32, + pub llmp_out: LlmpSender, pub last_msg_recvd: *mut LlmpMsg, - pub current_broadcast_map: *mut AflShmem, + pub current_broadcast_map: LlmpPageWrapper, pub last_msg_sent: *mut LlmpMsg, - pub out_map_count: c_ulong, - pub out_maps: *mut AflShmem, - pub new_out_page_hook_count: c_ulong, - pub new_out_page_hooks: *mut LlmpHookdataGeneric, + pub out_maps: Vec, + pub new_out_page_hooks: Vec>, +} + +#[derive(Clone)] +struct LlmpPageWrapper { + shmem: AflShmem, +} + +/// The page struct, placed on a shared mem instance. +impl LlmpPageWrapper { + /// Creates a new page with minimum prev_max_alloc_size or LLMP_INITIAL_MAP_SIZE + /// returning the initialized shared mem struct + unsafe fn new(sender: u32, min_size: usize) -> Result { + // Create a new shard page. + let mut shmem = AflShmem::new(new_map_size(min_size))?; + _llmp_page_init(&mut shmem, sender); + Ok(Self { shmem }) + } + + /// Initialize from a 0-terminated sharedmap id string and its size + unsafe fn from_str(shm_str: &CStr, map_size: usize) -> Result { + let shmem = AflShmem::from_str(shm_str, map_size)?; + // Not initializing the page here - the other side should have done it already! + Ok(Self { shmem }) + } + + /// Initialize from a shm_str with fixed len of 20 + unsafe fn from_name_slice(shm_str: &[u8; 20], map_size: usize) -> Result { + let shmem = AflShmem::from_name_slice(shm_str, map_size)?; + // Not initializing the page here - the other side should have done it already! + Ok(Self { shmem }) + } + + unsafe fn page(&self) -> *mut LlmpPage { + shmem2page(&self.shmem) + } } #[derive(Copy, Clone)] #[repr(C)] -pub struct LlmpHookdataGeneric { - pub func: *mut c_void, +pub struct LlmpHookdataGeneric { + pub func: T, pub data: *mut c_void, } #[derive(Copy, Clone)] #[repr(C, packed)] pub struct LlmpMsg { - pub tag: c_uint, - pub sender: c_uint, - pub message_id: c_uint, - pub buf_len: c_ulong, - pub buf_len_padded: c_ulong, + pub tag: u32, + pub sender: u32, + pub message_id: u64, + pub buf_len: u64, + pub buf_len_padded: u64, pub buf: [u8; 0], } #[derive(Clone)] #[repr(C)] pub struct LlmpBroker { - pub last_msg_sent: *mut LlmpMsg, - pub broadcast_map_count: c_ulong, - pub broadcast_maps: *mut AflShmem, - pub msg_hook_count: c_ulong, - pub msg_hooks: *mut LlmpHookdataGeneric, - pub llmp_client_count: c_ulong, - pub llmp_clients: *mut LlmpBrokerClientMetadata, + pub llmp_out: LlmpSender, + pub msg_hooks: Vec>, + pub llmp_clients: Vec, } -#[derive(Copy, Clone)] +#[derive(Clone)] #[repr(C)] pub struct LlmpBrokerClientMetadata { - pub client_type: LlmpClientType, - pub client_state: *mut LlmpClient, - pub cur_client_map: *mut AflShmem, + pub id: u32, + pub cur_client_map: LlmpPageWrapper, pub last_msg_broker_read: *mut LlmpMsg, - pub pid: c_int, pub clientloop: Option, pub data: *mut c_void, } @@ -152,11 +181,6 @@ pub struct LlmpBrokerClientMetadata { /// The client loop, running for each spawned client pub type LlmpClientloopFn = unsafe fn(client: *mut LlmpClient, data: *mut c_void) -> !; -/// Client type enum (TODO: Enumize) -type LlmpClientType = c_uint; -const LLMP_CLIENT_TYPE_FOREIGN_PROCESS: LlmpClientType = 3; -const LLMP_CLIENT_TYPE_CHILD_PROCESS: LlmpClientType = 2; - /// A share mem page, as used by llmp internally #[derive(Copy, Clone)] #[repr(C, packed)] @@ -164,10 +188,10 @@ pub struct LlmpPage { pub sender: u32, pub save_to_unmap: c_ushort, pub sender_dead: c_ushort, - pub current_msg_id: c_ulong, - pub c_ulongotal: c_ulong, - pub size_used: c_ulong, - pub max_alloc_size: c_ulong, + pub current_msg_id: u64, + pub size_total: usize, + pub size_used: usize, + pub max_alloc_size: usize, pub messages: [LlmpMsg; 0], } @@ -181,337 +205,256 @@ pub enum LlmpMsgHookResult { /// Message Hook pub type LlmpMsgHookFn = unsafe fn( - _: *mut LlmpBroker, - _: *mut LlmpBrokerClientMetadata, - _: *mut LlmpMsg, - _: *mut c_void, + broker: &LlmpBroker, + client_data: &LlmpBrokerClientMetadata, + msg: *mut LlmpMsg, + data: *mut c_void, ) -> LlmpMsgHookResult; /// Hook that gets called for each new page, created by LLMP -pub type LlmpClientNewPageHookFn = - unsafe fn(_: *mut LlmpClient, _: *mut LlmpPage, _: *mut c_void) -> (); +pub type LlmpClientNewPageHookFn = unsafe fn(client: &LlmpClient) -> (); /// Message payload when a client got added LLMP_TAG_CLIENT_ADDED_V1 */ /// This is an internal message! -/// LLMP_TAG_NEW_PAGE_V1 +/// LLMP_TAG_END_OF_PAGE_V1 #[derive(Copy, Clone)] #[repr(C, packed)] -struct LlmpPayloadNewPage { - pub map_size: c_ulong, +struct LlmpPayloadSharedMap { + pub map_size: usize, pub shm_str: [u8; 20], } -/// Returs the container element to this ptr #[inline] -unsafe fn afl_alloc_bufptr(buf: *mut c_void) -> *mut AflAllocBuf { - return (buf as *mut u8).offset(-(16 as c_ulong as isize)) as *mut AflAllocBuf; +unsafe fn shmem2page(afl_shmem: &AflShmem) -> *mut LlmpPage { + afl_shmem.map as *mut LlmpPage } -/// Optimized realloc wrapper, taken over from AFL. -/// This function makes sure *size is > size_needed after call. -/// It will realloc *buf otherwise. -/// *size will grow exponentially as per: -/// https://blog.mozilla.org/nnethercote/2014/11/04/please-grow-your-buffers-exponentially/ -/// @return For convenience, this function returns *buf. -/// Will return NULL and free *buf if size_needed is <1 or realloc failed. -unsafe fn afl_realloc(buf: *mut c_void, mut size_needed: c_ulong) -> *mut c_void { - let mut new_buf: *mut AflAllocBuf = 0 as *mut AflAllocBuf; - let mut current_size: c_ulong = 0 as c_ulong; - let mut next_size: c_ulong; - if !buf.is_null() { - /* the size is always stored at buf - 1*c_ulong */ - new_buf = afl_alloc_bufptr(buf); - if (*new_buf).magic != 0xaf1a110c as c_ulong { - panic!(format!( - "Illegal, non-null pointer passed to afl_realloc (buf {:?}, magic {:?})", - new_buf, - (*new_buf).magic as c_uint - )); - } - current_size = (*new_buf).complete_size - } - size_needed = (size_needed as c_ulong).wrapping_add(16 as c_ulong) as c_ulong; - /* No need to realloc */ - if current_size >= size_needed { - return buf; - } - /* No initial size was set */ - if size_needed < 64 as c_ulong { - next_size = 64 as c_ulong - } else { - /* grow exponentially */ - next_size = next_pow2(size_needed); - /* handle overflow: fall back to the original size_needed */ - if next_size == 0 { - next_size = size_needed - } - } - /* alloc */ - new_buf = realloc(new_buf as *mut c_void, next_size) as *mut AflAllocBuf; - if new_buf.is_null() { - return 0 as *mut c_void; - } - (*new_buf).complete_size = next_size; - (*new_buf).magic = 0xaf1a110c as c_ulong; - return (*new_buf).buf.as_mut_ptr() as *mut c_void; -} - -/// Call alf_free on all afl_realloc buffers. -#[inline] -unsafe fn afl_free(buf: *mut c_void) { - if !buf.is_null() { - free(afl_alloc_bufptr(buf) as *mut c_void); - }; -} -#[inline] -unsafe fn shmem2page(afl_shmem: *mut AflShmem) -> *mut LlmpPage { - return (*afl_shmem).map as *mut LlmpPage; -} /* If a msg is contained in the current page */ unsafe fn llmp_msg_in_page(page: *mut LlmpPage, msg: *mut LlmpMsg) -> bool { - /* DBG("llmp_msg_in_page %p within %p-%p\n", msg, page, page + page->c_ulongotal); */ + /* DBG("llmp_msg_in_page %p within %p-%p\n", msg, page, page + page->size_total); */ return (page as *mut u8) < msg as *mut u8 - && (page as *mut u8).offset((*page).c_ulongotal as isize) > msg as *mut u8; + && (page as *mut u8).offset((*page).size_total as isize) > msg as *mut u8; } -/* allign to LLMP_ALIGNNMENT bytes */ + +/// What byte count to align messages to +/// LlmpMsg sizes (including header) will always be rounded up to be a multiple of this value +const LLMP_ALIGNNMENT: usize = 64; + +/// Size of a new page message, header, payload, and alignment +const EOP_MSG_SIZE: usize = llmp_align(size_of::() + size_of::()); + +/* allign to LLMP_ALIGNNMENT=64 bytes */ #[inline] -unsafe fn llmp_align(to_align: c_ulong) -> c_ulong { - if 64 as c_int == 0 as c_int || to_align.wrapping_rem(64 as c_ulong) == 0 as c_int as c_ulong { +const fn llmp_align(to_align: usize) -> usize { + // check if we need to align first + if LLMP_ALIGNNMENT == 0 { return to_align; } - return to_align - .wrapping_add((64 as c_ulong).wrapping_sub(to_align.wrapping_rem(64 as c_int as c_ulong))); + // Then do the alignment + let modulo = to_align % LLMP_ALIGNNMENT; + if modulo == 0 { + to_align + } else { + to_align + LLMP_ALIGNNMENT - modulo + } } -/* In case we don't have enough space, make sure the next page will be large -enough. For now, we want to have at least enough space to store 2 of the -largest messages we encountered. */ + +/// In case we don't have enough space, make sure the next page will be large +/// enough. For now, we want to have at least enough space to store 2 of the +/// largest messages we encountered (plus message one new_page message). #[inline] -unsafe fn new_map_size(max_alloc: c_ulong) -> c_ulong { - return next_pow2({ - let mut _a: c_ulong = max_alloc - .wrapping_mul(2 as c_ulong) - .wrapping_add(llmp_align( - (::std::mem::size_of::() as c_ulong) - .wrapping_add(::std::mem::size_of::() as c_ulong), - )); - let mut _b: c_ulong = ((1 as c_int) << 28 as c_int) as c_ulong; - if _a > _b { - _a - } else { - _b - } - }); +const fn new_map_size(max_alloc: usize) -> usize { + next_pow2(max( + max_alloc * 2 + EOP_MSG_SIZE + LLMP_PAGE_HEADER_LEN, + LLMP_INITIAL_MAP_SIZE, + ) as u64) as usize } + /* Initialize a new llmp_page. size should be relative to * llmp_page->messages */ -unsafe fn _llmp_page_init(mut page: *mut LlmpPage, sender: u32, size: c_ulong) { +unsafe fn _llmp_page_init(shmem: &mut AflShmem, sender: u32) { + let page = shmem2page(&shmem); (*page).sender = sender; - ::std::ptr::write_volatile(&mut (*page).current_msg_id as *mut c_ulong, 0 as c_ulong); - (*page).max_alloc_size = 0 as c_ulong; - (*page).c_ulongotal = size; - (*page).size_used = 0 as c_ulong; - (*(*page).messages.as_mut_ptr()).message_id = 0 as c_uint; - (*(*page).messages.as_mut_ptr()).tag = 0xdeadaf as c_uint; - ::std::ptr::write_volatile(&mut (*page).save_to_unmap as *mut u16, 0 as c_int as u16); - ::std::ptr::write_volatile(&mut (*page).sender_dead as *mut u16, 0 as c_int as u16); + ptr::write_volatile(&mut (*page).current_msg_id, 0); + (*page).max_alloc_size = 0; + // Don't forget to subtract our own header size + (*page).size_total = shmem.map_size - LLMP_PAGE_HEADER_LEN; + (*page).size_used = 0; + (*(*page).messages.as_mut_ptr()).message_id = 0; + (*(*page).messages.as_mut_ptr()).tag = LLMP_TAG_UNSET; + ptr::write_volatile(&mut (*page).save_to_unmap, 0); + ptr::write_volatile(&mut (*page).sender_dead, 0); } + /* Pointer to the message behind the last message */ #[inline] -unsafe fn _llmp_next_msg_ptr(last_msg: *mut LlmpMsg) -> *mut LlmpMsg { +const unsafe fn _llmp_next_msg_ptr(last_msg: *const LlmpMsg) -> *mut LlmpMsg { /* DBG("_llmp_next_msg_ptr %p %lu + %lu\n", last_msg, last_msg->buf_len_padded, sizeof(llmp_message)); */ return (last_msg as *mut u8) - .offset(::std::mem::size_of::() as isize) + .offset(size_of::() as isize) .offset((*last_msg).buf_len_padded as isize) as *mut LlmpMsg; } + /* Read next message. */ -unsafe fn llmp_recv(page: *mut LlmpPage, last_msg: *mut LlmpMsg) -> *mut LlmpMsg { +unsafe fn llmp_recv( + page_wrapper: &LlmpPageWrapper, + last_msg: *mut LlmpMsg, +) -> Option<*mut LlmpMsg> { /* DBG("llmp_recv %p %p\n", page, last_msg); */ compiler_fence(Ordering::SeqCst); - if (*page).current_msg_id == 0 { + let page = page_wrapper.page(); + let current_msg_id = ptr::read_volatile(&mut (*page).current_msg_id); + if current_msg_id == 0 { /* No messages yet */ - return 0 as *mut LlmpMsg; + None } else if last_msg.is_null() { /* We never read a message from this queue. Return first. */ - return (*page).messages.as_mut_ptr(); - } else if (*last_msg).message_id as c_ulong == (*page).current_msg_id { + Some((*page).messages.as_mut_ptr()) + } else if (*last_msg).message_id == current_msg_id { /* Oops! No new message! */ - return 0 as *mut LlmpMsg; + None } else { - return _llmp_next_msg_ptr(last_msg); - }; + Some(_llmp_next_msg_ptr(last_msg)) + } } + /* Blocks/spins until the next message gets posted to the page, then returns that message. */ -pub unsafe fn llmp_recv_blocking(page: *mut LlmpPage, last_msg: *mut LlmpMsg) -> *mut LlmpMsg { - let mut current_msg_id: u32 = 0 as u32; +pub unsafe fn llmp_recv_blocking( + page_wrapper: &LlmpPageWrapper, + last_msg: *mut LlmpMsg, +) -> *mut LlmpMsg { + let mut current_msg_id = 0; + let page = page_wrapper.page(); if !last_msg.is_null() { - if (*last_msg).tag == 0xaf1e0f1 as c_uint && llmp_msg_in_page(page, last_msg) as c_int != 0 - { + if (*last_msg).tag == LLMP_TAG_END_OF_PAGE && !llmp_msg_in_page(page, last_msg) { panic!("BUG: full page passed to await_message_blocking or reset failed"); } current_msg_id = (*last_msg).message_id } loop { compiler_fence(Ordering::SeqCst); - if (*page).current_msg_id != current_msg_id as c_ulong { - let ret: *mut LlmpMsg = llmp_recv(page, last_msg); - if ret.is_null() { - panic!("BUG: blocking llmp message should never be NULL"); - } - return ret; + if ptr::read_volatile(&mut (*page).current_msg_id) != current_msg_id { + return match llmp_recv(page_wrapper, last_msg) { + Some(msg) => msg, + None => panic!("BUG: blocking llmp message should never be NULL"), + }; } } } + /* Special allocation function for EOP messages (and nothing else!) The normal alloc will fail if there is not enough space for buf_len_padded + EOP So if llmp_alloc_next fails, create new page if necessary, use this function, place EOP, commit EOP, reset, alloc again on the new space. */ -unsafe fn llmp_alloc_eop(mut page: *mut LlmpPage, mut last_msg: *mut LlmpMsg) -> *mut LlmpMsg { - if (*page).size_used.wrapping_add(llmp_align( - (::std::mem::size_of::() as c_ulong) - .wrapping_add(::std::mem::size_of::() as c_ulong), - )) > (*page).c_ulongotal - { - panic!(format!("PROGRAM ABORT : BUG: EOP does not fit in page! page {:?}, size_current {:?}, c_ulongotal {:?}", page, - (*page).size_used, (*page).c_ulongotal)); +unsafe fn llmp_alloc_eop(page: *mut LlmpPage, last_msg: *const LlmpMsg) -> *mut LlmpMsg { + if (*page).size_used + EOP_MSG_SIZE > (*page).size_total { + panic!(format!("PROGRAM ABORT : BUG: EOP does not fit in page! page {:?}, size_current {:?}, size_total {:?}", page, + (*page).size_used, (*page).size_total)); } let mut ret: *mut LlmpMsg = if !last_msg.is_null() { _llmp_next_msg_ptr(last_msg) } else { (*page).messages.as_mut_ptr() }; - if (*ret).tag == 0xa143af11 as c_uint { + if (*ret).tag == LLMP_TAG_UNINITIALIZED { panic!("Did not call send() on last message!"); } - (*ret).buf_len_padded = ::std::mem::size_of::() as c_ulong; + (*ret).buf_len_padded = size_of::() as c_ulong; (*ret).message_id = if !last_msg.is_null() { - (*last_msg).message_id = - ((*last_msg).message_id as c_uint).wrapping_add(1 as c_int as c_uint) as u32 as u32; - (*last_msg).message_id + (*last_msg).message_id + 1 } else { - 1 as c_uint + 1 }; - (*ret).tag = 0xaf1e0f1 as u32; - (*page).size_used = ((*page).size_used as c_ulong).wrapping_add(llmp_align( - (::std::mem::size_of::() as c_ulong) - .wrapping_add(::std::mem::size_of::() as c_ulong), - )) as c_ulong; - return ret; + (*ret).tag = LLMP_TAG_END_OF_PAGE; + (*page).size_used += EOP_MSG_SIZE; + ret } -/* Will return a ptr to the next msg buf, or NULL if map is full. -Never call alloc_next without either sending or cancelling the last allocated message for this page! -There can only ever be up to one message allocated per page at each given time. -*/ -unsafe fn llmp_alloc_next( - mut page: *mut LlmpPage, - last_msg: *mut LlmpMsg, - buf_len: c_ulong, -) -> *mut LlmpMsg { - let mut buf_len_padded: c_ulong = buf_len; - let mut complete_msg_size: c_ulong = - llmp_align((::std::mem::size_of::() as c_ulong).wrapping_add(buf_len_padded)); + +/// Will return a ptr to the next msg buf, or None if map is full. +/// Never call alloc_next without either sending or cancelling the last allocated message for this page! +/// There can only ever be up to one message allocated per page at each given time. +unsafe fn llmp_alloc_next(llmp: &mut LlmpSender, buf_len: usize) -> Option<*mut LlmpMsg> { + let mut buf_len_padded = buf_len; + let mut complete_msg_size = llmp_align(size_of::() + buf_len_padded); + let page = llmp.out_maps.last().unwrap().page(); + let last_msg = llmp.last_msg_sent; /* DBG("XXX complete_msg_size %lu (h: %lu)\n", complete_msg_size, sizeof(llmp_message)); */ /* In case we don't have enough space, make sure the next page will be large * enough */ - (*page).max_alloc_size = { - let mut _a: c_ulong = (*page).max_alloc_size; - let mut _b: c_ulong = complete_msg_size; - if _a > _b { - _a - } else { - _b - } - }; + // For future allocs, keep track of the maximum (aligned) alloc size we used + (*page).max_alloc_size = max((*page).max_alloc_size, complete_msg_size); + let mut ret: *mut LlmpMsg; /* DBG("last_msg %p %d (%d)\n", last_msg, last_msg ? (int)last_msg->tag : -1, (int)LLMP_TAG_END_OF_PAGE_V1); */ - if last_msg.is_null() || (*last_msg).tag == 0xaf1e0f1 as c_uint { - /* We start fresh */ + if last_msg.is_null() || (*last_msg).tag == LLMP_TAG_END_OF_PAGE { + /* We start fresh, on a new page */ ret = (*page).messages.as_mut_ptr(); /* The initial message may not be alligned, so we at least align the end of it. Technically, c_ulong can be smaller than a pointer, then who knows what happens */ - let base_addr: c_ulong = ret as c_ulong; - buf_len_padded = llmp_align(base_addr.wrapping_add(complete_msg_size)) - .wrapping_sub(base_addr) - .wrapping_sub(::std::mem::size_of::() as c_ulong); - complete_msg_size = - buf_len_padded.wrapping_add(::std::mem::size_of::() as c_ulong); + let base_addr = ret as usize; + buf_len_padded = + llmp_align(base_addr + complete_msg_size) - base_addr - size_of::(); + complete_msg_size = buf_len_padded + size_of::(); /* DBG("XXX complete_msg_size NEW %lu\n", complete_msg_size); */ /* Still space for the new message plus the additional "we're full" message? */ - if (*page) - .size_used - .wrapping_add(complete_msg_size) - .wrapping_add(llmp_align( - (::std::mem::size_of::() as c_ulong) - .wrapping_add(::std::mem::size_of::() as c_ulong), - )) - > (*page).c_ulongotal - { + if (*page).size_used + complete_msg_size + EOP_MSG_SIZE > (*page).size_total { /* We're full. */ - return 0 as *mut LlmpMsg; + return None; } /* We need to start with 1 for ids, as current message id is initialized * with 0... */ (*ret).message_id = if !last_msg.is_null() { - (*last_msg).message_id.wrapping_add(1 as c_uint) + (*last_msg).message_id + 1 } else { - 1 as c_uint + 1 } - } else if (*page).current_msg_id != (*last_msg).message_id as c_ulong { + } else if (*page).current_msg_id != (*last_msg).message_id { /* Oops, wrong usage! */ panic!(format!("BUG: The current message never got commited using llmp_send! (page->current_msg_id {:?}, last_msg->message_id: {})", (*page).current_msg_id, (*last_msg).message_id)); } else { - buf_len_padded = - complete_msg_size.wrapping_sub(::std::mem::size_of::() as c_ulong); + buf_len_padded = complete_msg_size - size_of::(); /* DBG("XXX ret %p id %u buf_len_padded %lu complete_msg_size %lu\n", ret, ret->message_id, buf_len_padded, * complete_msg_size); */ - if (*page) - .size_used - .wrapping_add(complete_msg_size) - .wrapping_add(llmp_align( - (::std::mem::size_of::() as c_ulong) - .wrapping_add(::std::mem::size_of::() as c_ulong), - )) - > (*page).c_ulongotal - { - /* Still space for the new message plus the additional "we're full" message? - */ + + /* Still space for the new message plus the additional "we're full" message? */ + if (*page).size_used + complete_msg_size + EOP_MSG_SIZE > (*page).size_total { /* We're full. */ - return 0 as *mut LlmpMsg; + return None; } ret = _llmp_next_msg_ptr(last_msg); - (*ret).message_id = (*last_msg).message_id.wrapping_add(1 as c_uint) + (*ret).message_id = (*last_msg).message_id + 1 } + /* The beginning of our message should be messages + size_used, else nobody * sent the last msg! */ /* DBG("XXX ret %p - page->messages %p = %lu != %lu, will add %lu -> %p\n", ret, page->messages, (c_ulong)((u8 *)ret - (u8 *)page->messages), page->size_used, complete_msg_size, ((u8 *)ret) + complete_msg_size); */ if last_msg.is_null() && (*page).size_used != 0 - || ((ret as *mut u8).wrapping_sub((*page).messages.as_mut_ptr() as *mut u8 as usize)) - as c_ulong - != (*page).size_used + || ((ret as usize) - (*page).messages.as_mut_ptr() as usize) != (*page).size_used { panic!(format!("Allocated new message without calling send() inbetween. ret: {:?}, page: {:?}, complete_msg_size: {:?}, size_used: {:?}, last_msg: {:?}", ret, page, buf_len_padded, (*page).size_used, last_msg)); } - (*page).size_used = ((*page).size_used as c_ulong).wrapping_add(complete_msg_size) as c_ulong; - (*ret).buf_len_padded = buf_len_padded; - (*ret).buf_len = buf_len; + (*page).size_used = (*page).size_used + complete_msg_size; + (*ret).buf_len_padded = buf_len_padded as c_ulong; + (*ret).buf_len = buf_len as c_ulong; /* DBG("Returning new message at %p with len %ld, TAG was %x", ret, ret->buf_len_padded, ret->tag); */ /* Maybe catch some bugs... */ - (*_llmp_next_msg_ptr(ret)).tag = 0xdeadaf as c_uint; - (*ret).tag = 0xa143af11 as c_uint; - return ret; + (*_llmp_next_msg_ptr(ret)).tag = LLMP_TAG_UNSET; + (*ret).tag = LLMP_TAG_UNINITIALIZED; + Some(ret) } -/* Commit the message last allocated by llmp_alloc_next to the queue. - After commiting, the msg shall no longer be altered! - It will be read by the consuming threads (broker->clients or client->broker) -*/ + +/// Commit the message last allocated by llmp_alloc_next to the queue. +/// After commiting, the msg shall no longer be altered! +/// It will be read by the consuming threads (broker->clients or client->broker) unsafe fn llmp_send(page: *mut LlmpPage, msg: *mut LlmpMsg) -> Result<(), AflError> { - if (*msg).tag == 0xdeadaf as c_uint { + if (*msg).tag == LLMP_TAG_UNSET as c_uint { panic!(format!( "No tag set on message with id {}", (*msg).message_id @@ -524,371 +467,203 @@ unsafe fn llmp_send(page: *mut LlmpPage, msg: *mut LlmpMsg) -> Result<(), AflErr ))); } compiler_fence(Ordering::SeqCst); - ::std::ptr::write_volatile( - &mut (*page).current_msg_id as *mut c_ulong, - (*msg).message_id as c_ulong, - ); - + ptr::write_volatile(&mut (*page).current_msg_id, (*msg).message_id); compiler_fence(Ordering::SeqCst); - return Ok(()); + Ok(()) } -#[inline] -unsafe fn _llmp_broker_current_broadcast_map(broker_state: *mut LlmpBroker) -> *mut AflShmem { - return &mut *(*broker_state).broadcast_maps.offset( - (*broker_state) - .broadcast_map_count - .wrapping_sub(1 as c_ulong) as isize, - ) as *mut AflShmem; -} -/* create a new shard page. Size_requested will be the min size, you may get a - * larger map. Retruns NULL on error. */ -unsafe fn llmp_new_page_shmem( - uninited_shmem: *mut AflShmem, - sender: c_ulong, - size_requested: c_ulong, -) -> *mut LlmpPage { - let size: c_ulong = next_pow2({ - let mut _a: c_ulong = size_requested.wrapping_add(40 as c_ulong); - let mut _b: c_ulong = ((1 as c_int) << 28 as c_int) as c_ulong; - if _a > _b { - _a - } else { - _b - } - }); - if afl_shmem_init(uninited_shmem, size).is_null() { - return 0 as *mut LlmpPage; - } - _llmp_page_init(shmem2page(uninited_shmem), sender as u32, size_requested); - return shmem2page(uninited_shmem); -} -/* This function handles EOP by creating a new shared page and informing the -listener about it using a EOP message. */ -unsafe fn llmp_handle_out_eop( - mut maps: *mut AflShmem, - map_count_p: *mut c_ulong, - last_msg_p: *mut *mut LlmpMsg, -) -> *mut AflShmem { - let map_count: u32 = *map_count_p as u32; - let mut old_map: *mut LlmpPage = - shmem2page(&mut *maps.offset(map_count.wrapping_sub(1 as c_uint) as isize)); - maps = afl_realloc( - maps as *mut c_void, - (map_count.wrapping_add(1 as c_uint) as c_ulong) - .wrapping_mul(::std::mem::size_of::() as c_ulong), - ) as *mut AflShmem; - if maps.is_null() { - return 0 as *mut AflShmem; - } - /* Broadcast a new, large enough, message. Also sorry for that c ptr stuff! */ - let mut new_map: *mut LlmpPage = llmp_new_page_shmem( - &mut *maps.offset(map_count as isize), - (*old_map).sender as c_ulong, - new_map_size((*old_map).max_alloc_size), - ); - if new_map.is_null() { - afl_free(maps as *mut c_void); - return 0 as *mut AflShmem; - } - /* Realloc may have changed the location of maps_p (and old_map) in memory :/ - */ - old_map = shmem2page(&mut *maps.offset(map_count.wrapping_sub(1 as c_uint) as isize)); - *map_count_p = map_count.wrapping_add(1 as c_uint) as c_ulong; - ::std::ptr::write_volatile( - &mut (*new_map).current_msg_id as *mut c_ulong, - (*old_map).current_msg_id, - ); +/// listener about it using a EOP message. +unsafe fn llmp_handle_out_eop(llmp: &mut LlmpSender) -> Result<(), AflError> { + let map_count = llmp.out_maps.len(); + let mut old_map = llmp.out_maps.last_mut().unwrap().page(); + + // Create a new shard page. + let mut new_map_shmem = LlmpPageWrapper::new((*old_map).sender, (*old_map).max_alloc_size)?; + let mut new_map = new_map_shmem.page(); + + ptr::write_volatile(&mut (*new_map).current_msg_id, (*old_map).current_msg_id); (*new_map).max_alloc_size = (*old_map).max_alloc_size; /* On the old map, place a last message linking to the new map for the clients * to consume */ - let mut out: *mut LlmpMsg = llmp_alloc_eop(old_map, *last_msg_p); + let mut out: *mut LlmpMsg = llmp_alloc_eop(old_map, llmp.last_msg_sent); (*out).sender = (*old_map).sender; - let mut new_page_msg: *mut LlmpPayloadNewPage = - (*out).buf.as_mut_ptr() as *mut LlmpPayloadNewPage; - /* copy the infos to the message we're going to send on the old buf */ - (*new_page_msg).map_size = (*maps.offset(map_count as isize)).map_size; - memcpy( - (*new_page_msg).shm_str.as_mut_ptr() as *mut c_void, - (*maps.offset(map_count as isize)).shm_str.as_mut_ptr() as *const c_void, - 20 as c_ulong, - ); + + let mut end_of_page_msg = (*out).buf.as_mut_ptr() as *mut LlmpPayloadSharedMap; + (*end_of_page_msg).map_size = new_map_shmem.shmem.map_size; + (*end_of_page_msg).shm_str = new_map_shmem.shmem.shm_str; + // We never sent a msg on the new buf */ - *last_msg_p = 0 as *mut LlmpMsg; + llmp.last_msg_sent = 0 as *mut LlmpMsg; + /* Send the last msg on the old buf */ - match llmp_send(old_map, out) { - Err(_e) => { - afl_free(maps as *mut c_void); - println!("Error sending message"); - 0 as *mut AflShmem - } - Ok(_) => maps, - } + llmp_send(old_map, out)?; + llmp.out_maps.push(new_map_shmem); + + Ok(()) } -/* no more space left! We'll have to start a new page */ -pub unsafe fn llmp_broker_handle_out_eop(broker: *mut LlmpBroker) -> AflRet { - (*broker).broadcast_maps = llmp_handle_out_eop( - (*broker).broadcast_maps, - &mut (*broker).broadcast_map_count, - &mut (*broker).last_msg_sent, - ); - return if !(*broker).broadcast_maps.is_null() { - AFL_RET_SUCCESS - } else { - AFL_RET_ALLOC - } as AflRet; -} -pub unsafe fn llmp_broker_alloc_next(broker: *mut LlmpBroker, len: c_ulong) -> *mut LlmpMsg { - let mut broadcast_page: *mut LlmpPage = shmem2page(_llmp_broker_current_broadcast_map(broker)); - let mut out: *mut LlmpMsg = llmp_alloc_next(broadcast_page, (*broker).last_msg_sent, len); - if out.is_null() { - /* no more space left! We'll have to start a new page */ - let ret: AflRet = llmp_broker_handle_out_eop(broker); - if ret != AFL_RET_SUCCESS as AflRet { - panic!("Error handling broker out EOP"); - } - /* llmp_handle_out_eop allocates a new current broadcast_map */ - broadcast_page = shmem2page(_llmp_broker_current_broadcast_map(broker)); - /* the alloc is now on a new page */ - out = llmp_alloc_next(broadcast_page, (*broker).last_msg_sent, len); - if out.is_null() { - panic!(format!( - "Error allocating {} bytes in shmap {:?}", - len, - (*_llmp_broker_current_broadcast_map(broker)) - .shm_str - .as_mut_ptr(), - )); - } + +pub unsafe fn llmp_broker_alloc_next( + broker: &LlmpBroker, + len: usize, +) -> Result<*mut LlmpMsg, AflError> { + match llmp_alloc_next(&mut broker.llmp_out, len) { + Some(msg) => return Ok(msg), + _ => (), + }; + + /* no more space left! We'll have to start a new page */ + llmp_handle_out_eop(&mut broker.llmp_out); + + match llmp_alloc_next(&mut broker.llmp_out, len) { + Some(msg) => Ok(msg), + None => Err(AflError::Unknown(format!( + "Error allocating {} bytes in shmap", + len + ))), } - return out; } impl LlmpBroker { /// Create and initialize a new llmp_broker pub unsafe fn new() -> Result { let mut broker = LlmpBroker { - last_msg_sent: ptr::null_mut(), - broadcast_map_count: 0, - broadcast_maps: ptr::null_mut(), - msg_hook_count: 0, - msg_hooks: ptr::null_mut(), - llmp_client_count: 0, - llmp_clients: ptr::null_mut(), + llmp_out: LlmpSender { + id: 0, + last_msg_sent: ptr::null_mut(), + out_maps: vec![LlmpPageWrapper::new(0, 0)?], + }, + msg_hooks: vec![], + llmp_clients: vec![], }; - llmp_broker_init(&mut broker)?; + Ok(broker) } /// Registers a new client for the given sharedmap str and size. - /// Be careful: Intenral realloc may change the location of the client map - unsafe fn register_client( - &mut self, - shm_str: &CStr, - map_size: c_ulong, - ) -> *mut LlmpBrokerClientMetadata { - /* make space for a new client and calculate its id */ - self.llmp_clients = afl_realloc( - self.llmp_clients as *mut c_void, - self.llmp_client_count - .wrapping_add(1 as c_ulong) - .wrapping_mul(::std::mem::size_of::() as c_ulong), - ) as *mut LlmpBrokerClientMetadata; - if self.llmp_clients.is_null() { - return 0 as *mut LlmpBrokerClientMetadata; - } - let mut client: *mut LlmpBrokerClientMetadata = - self.llmp_clients.offset(self.llmp_client_count as isize) - as *mut LlmpBrokerClientMetadata; - memset( - client as *mut c_void, - 0 as c_int, - ::std::mem::size_of::() as c_ulong, - ); - (*client).client_state = - calloc(1 as c_ulong, ::std::mem::size_of::() as c_ulong) as *mut LlmpClient; - if (*client).client_state.is_null() { - return 0 as *mut LlmpBrokerClientMetadata; - } - (*(*client).client_state).id = (*self).llmp_client_count as u32; - (*client).cur_client_map = - calloc(1 as c_ulong, ::std::mem::size_of::() as c_ulong) as *mut AflShmem; - if (*client).cur_client_map.is_null() { - return 0 as *mut LlmpBrokerClientMetadata; - } - if afl_shmem_by_str((*client).cur_client_map, shm_str, map_size).is_null() { - return 0 as *mut LlmpBrokerClientMetadata; - } - self.llmp_client_count = self.llmp_client_count.wrapping_add(1); - // TODO: Add client map - return client; + /// Returns the id of the new client in broker.client_map + unsafe fn register_client(&mut self, client_page: LlmpPageWrapper) { + let id = self.llmp_clients.len() as u32; + self.llmp_clients.push(LlmpBrokerClientMetadata { + id, + cur_client_map: client_page, + last_msg_broker_read: 0 as *mut LlmpMsg, + clientloop: None, + data: 0 as *mut c_void, + }); } /// Adds a hook that gets called in the broker for each new message the broker touches. /// if the callback returns false, the message is not forwarded to the clients. */ - pub unsafe fn add_message_hook(&mut self, hook: LlmpMsgHookFn, data: *mut c_void) -> AflRet { - return llmp_add_hook_generic( - &mut self.msg_hooks, - &mut self.msg_hook_count, - ::std::mem::transmute::, *mut c_void>(Some(hook)), - data, + pub fn add_message_hook(&mut self, hook: LlmpMsgHookFn, data: *mut c_void) { + self.msg_hooks + .push(LlmpHookdataGeneric { func: hook, data }); + } + + /// For internal use: Forward the current message to the out map. + unsafe fn forward_msg(&mut self, msg: *mut LlmpMsg) -> Result<(), AflError> { + let mut out: *mut LlmpMsg = llmp_broker_alloc_next(self, (*msg).buf_len_padded as usize)?; + + /* Copy over the whole message. + If we should need zero copy, we could instead post a link to the + original msg with the map_id and offset. */ + let actual_size = (*out).buf_len_padded; + memcpy( + out as *mut c_void, + msg as *const c_void, + size_of::() as c_ulong + (*msg).buf_len_padded as c_ulong, ); + (*out).buf_len_padded = actual_size; + /* We need to replace the message ID with our own */ + let out_page = self.llmp_out.out_maps.last().unwrap().page(); + (*out).message_id = (*out_page).current_msg_id + 1; + match llmp_send(out_page, out) { + Err(e) => panic!(format!("Error sending msg: {:?}", e)), + _ => (), + }; + self.llmp_out.last_msg_sent = out; + Ok(()) } /// broker broadcast to its own page for all others to read */ - #[inline] - unsafe fn handle_new_msgs(&mut self, mut client: *mut LlmpBrokerClientMetadata) { + unsafe fn handle_new_msgs( + &mut self, + client: &LlmpBrokerClientMetadata, + ) -> Result<(), AflError> { // TODO: We could memcpy a range of pending messages, instead of one by one. /* DBG("llmp_broker_handle_new_msgs %p %p->%u\n", broker, client, client->client_state->id); */ - let incoming: *mut LlmpPage = shmem2page((*client).cur_client_map); - let mut current_message_id: u32 = if !(*client).last_msg_broker_read.is_null() { - (*(*client).last_msg_broker_read).message_id + let incoming: *mut LlmpPage = client.cur_client_map.page(); + let mut current_message_id = if client.last_msg_broker_read.is_null() { + 0 } else { - 0 as c_uint + (*client.last_msg_broker_read).message_id }; - while current_message_id as c_ulong != (*incoming).current_msg_id { - let msg: *mut LlmpMsg = llmp_recv(incoming, (*client).last_msg_broker_read); - if msg.is_null() { - panic!("No message received but not all message ids receved! Data out of sync?"); - } - if (*msg).tag == 0xaf1e0f1 as c_uint { - let pageinfo: *mut LlmpPayloadNewPage = { - let mut _msg: *mut LlmpMsg = msg; - (if (*_msg).buf_len >= ::std::mem::size_of::() as c_ulong { - (*_msg).buf.as_mut_ptr() - } else { - 0 as *mut u8 - }) as *mut LlmpPayloadNewPage - }; - if pageinfo.is_null() { + + while current_message_id != ptr::read_volatile(&(*incoming).current_msg_id) { + let msg = match llmp_recv(&client.cur_client_map, (*client).last_msg_broker_read) { + None => { + panic!("No message received but not all message ids receved! Data out of sync?") + } + Some(msg) => msg, + }; + if (*msg).tag == LLMP_TAG_END_OF_PAGE { + // Handle end of page + if (*msg).buf_len < size_of::() as u64 { panic!(format!( "Illegal message length for EOP (is {}, expected {})", (*msg).buf_len_padded, - ::std::mem::size_of::() as c_ulong + size_of::() )); } + let pageinfo = (*msg).buf.as_mut_ptr() as *mut LlmpPayloadSharedMap; + /* We can reuse the map mem space, no need to free and calloc. However, the pageinfo points to the map we're about to unmap. - Copy the contents first. */ - let mut pageinfo_cpy: LlmpPayloadNewPage = LlmpPayloadNewPage { - map_size: 0, - shm_str: [0; 20], - }; - memcpy( - &mut pageinfo_cpy as *mut LlmpPayloadNewPage as *mut c_void, - pageinfo as *const c_void, - ::std::mem::size_of::() as c_ulong, - ); - let client_map: *mut AflShmem = (*client).cur_client_map; - ::std::ptr::write_volatile( - &mut (*shmem2page(client_map)).save_to_unmap as *mut u16, - 1 as u16, - ); - afl_shmem_deinit(client_map); - if afl_shmem_by_str( - client_map, - CStr::from_bytes_with_nul(&(*pageinfo).shm_str).expect("Illegal shm_str"), - (*pageinfo).map_size, - ) - .is_null() - { - panic!(format!( - "Could not get shmem by str for map {:?} of size {:?}", - (*pageinfo).shm_str.as_mut_ptr(), - (*pageinfo).map_size - )); - } - } else if (*msg).tag == 0xc11e471 as c_uint { + Clone the contents first to be safe (probably fine in rust eitner way). */ + let mut pageinfo_cpy = (*pageinfo).clone(); + + let client_map = (*client).cur_client_map; + + ptr::write_volatile(&mut (*client_map.page()).save_to_unmap, 1); + client.cur_client_map = + LlmpPageWrapper::from_name_slice(&pageinfo_cpy.shm_str, pageinfo_cpy.map_size)?; + dbg!("Client got a new map", client.cur_client_map.shmem.shm_str); + } else if (*msg).tag == LLMP_TAG_NEW_SHM_CLIENT { /* This client informs us about yet another new client add it to the list! Also, no need to forward this msg. */ - let pageinfo: *mut LlmpPayloadNewPage = { - let mut _msg: *mut LlmpMsg = msg; - (if (*_msg).buf_len >= ::std::mem::size_of::() as c_ulong { - (*_msg).buf.as_mut_ptr() - } else { - 0 as *mut u8 - }) as *mut LlmpPayloadNewPage - }; - if pageinfo.is_null() { - println!("Ignoring broken CLIENT_ADDED msg due to incorrect size. Expected {:?} but got {:?}", - ::std::mem::size_of::() as - c_ulong, (*msg).buf_len_padded); - } - /* register_client may realloc the clients, we need to find ours again */ - let client_id: u32 = (*(*client).client_state).id; - if self - .register_client( - CStr::from_bytes_with_nul(&(*pageinfo).shm_str).expect("Illegal shm_str"), - (*pageinfo).map_size, - ) - .is_null() - { - panic!(format!( - "Could not register clientprocess with shm_str {:?}", - (*pageinfo).shm_str.as_mut_ptr() - )); - } - (*client).client_type = LLMP_CLIENT_TYPE_FOREIGN_PROCESS; - /* find client again */ - client = - self.llmp_clients.offset(client_id as isize) as *mut LlmpBrokerClientMetadata - } else { - let mut forward_msg: bool = 1 as c_int != 0; - let mut i: c_ulong = 0; - while i < self.msg_hook_count { - let msg_hook: *mut LlmpHookdataGeneric = - self.msg_hooks.offset(i as isize) as *mut LlmpHookdataGeneric; - forward_msg = forward_msg as c_int != 0 - && ::std::mem::transmute::<*mut c_void, Option>( - (*msg_hook).func, - ) - .expect("non-null function pointer")( - self, client, msg, (*msg_hook).data - ) as c_int - != 0; - if !llmp_msg_in_page(shmem2page((*client).cur_client_map), msg) { - /* Special handling in case the client got exchanged inside the message_hook, for example after a crash. */ - return; - } - i = i.wrapping_add(1) - } - if forward_msg { - let mut out: *mut LlmpMsg = llmp_broker_alloc_next(self, (*msg).buf_len_padded); - if out.is_null() { - panic!(format!( - "Error allocating {} bytes in shmap {:?}", - (*msg).buf_len_padded, - (*_llmp_broker_current_broadcast_map(self)) - .shm_str - .as_mut_ptr(), - )); - } - /* Copy over the whole message. - If we should need zero copy, we could instead post a link to the - original msg with the map_id and offset. */ - let actual_size: c_ulong = (*out).buf_len_padded; - memcpy( - out as *mut c_void, - msg as *const c_void, - (::std::mem::size_of::() as c_ulong) - .wrapping_add((*msg).buf_len_padded), + if (*msg).buf_len < size_of::() as u64 { + println!("Ignoring broken CLIENT_ADDED msg due to incorrect size. Expected {} but got {}", + (*msg).buf_len_padded, + size_of::() ); - (*out).buf_len_padded = actual_size; - /* We need to replace the message ID with our own */ - let out_page: *mut LlmpPage = - shmem2page(_llmp_broker_current_broadcast_map(self)); - (*out).message_id = - (*out_page).current_msg_id.wrapping_add(1 as c_ulong) as u32; - match llmp_send(out_page, out) { - Err(e) => panic!(format!("Error sending msg: {:?}", e)), - _ => (), + } else { + let pageinfo = (*msg).buf.as_mut_ptr() as *mut LlmpPayloadSharedMap; + + let client_id: u32 = client.id; + match LlmpPageWrapper::from_name_slice( + &(*pageinfo).shm_str, + (*pageinfo).map_size, + ) { + Ok(new_page) => self.register_client(new_page), + Err(e) => println!("Error adding client! {:?}", e), }; - self.last_msg_sent = out + } + } else { + // The message is not specifically for use. Let the user handle it, then forward it to the clients, if necessary. + let mut should_forward_msg = true; + for hook in self.msg_hooks { + match (hook.func)(&self, client, msg, hook.data) { + LlmpMsgHookResult::Handled => should_forward_msg = false, + _ => (), + } + } + if should_forward_msg { + self.forward_msg(msg); } } (*client).last_msg_broker_read = msg; current_message_id = (*msg).message_id } + Ok(()) } /// The broker walks all pages and looks for changes, then broadcasts them on @@ -896,11 +671,8 @@ impl LlmpBroker { pub unsafe fn once(&mut self) { compiler_fence(Ordering::SeqCst); let mut i: u32 = 0; - while (i as c_ulong) < self.llmp_client_count { - let client: *mut LlmpBrokerClientMetadata = - self.llmp_clients.offset(i as isize) as *mut LlmpBrokerClientMetadata; - self.handle_new_msgs(client); - i = i.wrapping_add(1) + for client in self.llmp_clients { + self.handle_new_msgs(&client); } } @@ -910,166 +682,17 @@ impl LlmpBroker { loop { compiler_fence(Ordering::SeqCst); self.once(); + /* 5 milis of sleep for now to not busywait at 100% */ - usleep((5 as c_int * 1000 as c_int) as c_uint); + thread::sleep(time::Duration::from_millis(5)); } } - - /// launch a specific client. This function doesn't need to be called externally - all registered clients will get launched at broker_run - unsafe fn launch_client(&mut self, mut clientdata: *mut LlmpBrokerClientMetadata) -> bool { - if clientdata < self.llmp_clients - || clientdata - > self - .llmp_clients - .offset(self.llmp_client_count.wrapping_sub(1 as c_ulong) as isize) - as *mut LlmpBrokerClientMetadata - { - println!( - "[!] WARNING: Illegal client specified at ptr {:?} (instead of {:?} to {:?})", - clientdata, - self.llmp_clients, - self.llmp_clients - .offset(self.llmp_client_count.wrapping_sub(1 as c_ulong) as isize,) - as *mut LlmpBrokerClientMetadata, - ); - return 0 as c_int != 0; - } - if (*clientdata).client_type as c_uint == LLMP_CLIENT_TYPE_CHILD_PROCESS as c_int as c_uint - { - if (*clientdata).pid != 0 { - println!("[!] WARNING: Tried to relaunch already running client. Set ->pid to 0 if this is what you want."); - return 0 as c_int != 0; - } - let child_id: c_int = fork(); - if child_id < 0 as c_int { - println!("[!] WARNING: Could not fork"); - return 0 as c_int != 0; - } else { - if child_id == 0 as c_int { - /* child */ - _llmp_client_wrapped_loop(clientdata as *mut c_void); - } - } - /* parent */ - (*clientdata).pid = child_id; - return 1 as c_int != 0; - } else { - println!("[!] WARNING: Tried to spawn llmp child with unknown thread type."); - return 0 as c_int != 0; - } - //return 1 as c_int != 0; - } - - /// Launches all clientloops registered with this broker - pub unsafe fn launch_clientloops(&mut self) -> Result<(), AflError> { - let mut i: c_ulong = 0; - while i < self.llmp_client_count { - if (*self.llmp_clients.offset(i as isize)).client_type as c_uint - == LLMP_CLIENT_TYPE_CHILD_PROCESS as c_uint - { - if !self.launch_client(self.llmp_clients.offset(i as isize)) { - println!("[!] WARNING: Could not launch all clients"); - return Err(AflError::Unknown("Failed to launch clients".into())); - } - } - i = i.wrapping_add(1) - } - Ok(()) - } - - /// Start all threads and the main broker. - /// Same as llmp_broker_launch_threaded clients(); - /// Never returns. - pub unsafe fn run(&mut self) -> ! { - self.launch_clientloops().expect("Failed to launch clients"); - self.broker_loop(); - } - - /// Register a new forked/child client. - /// Client thread will be called with llmp_client client, containing - /// the data in ->data. This will register a client to be spawned up as soon as - /// broker_loop() starts. Clients can also be added later via - /// llmp_broker_register_remote(..) or the local_tcp_client - /// TODO: TCP remote client not yet supported in rust llmp - pub unsafe fn register_childprocess_clientloop( - &mut self, - clientloop: LlmpClientloopFn, - data: *mut c_void, - ) -> Result<(), AflError> { - let mut client_map: AflShmem = { - let init = AflShmem { - shm_str: [0; 20], - shm_id: 0, - map: 0 as *mut u8, - map_size: 0, - }; - init - }; - if llmp_new_page_shmem( - &mut client_map, - self.llmp_client_count, - ((1 as c_int) << 28 as c_int) as c_ulong, - ) - .is_null() - { - return Err(AflError::Unknown("Alloc".into())); - } - let mut client: *mut LlmpBrokerClientMetadata = self.register_client( - CStr::from_ptr(&client_map.shm_str as *const u8 as *const c_char), - client_map.map_size, - ); - if client.is_null() { - afl_shmem_deinit(&mut client_map); - return Err(AflError::Unknown("Something in clients failed".into())); - } - (*client).clientloop = Some(clientloop); - (*client).data = data; - (*client).client_type = LLMP_CLIENT_TYPE_CHILD_PROCESS; - /* Copy the already allocated shmem to the client state */ - (*(*client).client_state).out_maps = afl_realloc( - (*(*client).client_state).out_maps as *mut c_void, - ::std::mem::size_of::() as c_ulong, - ) as *mut AflShmem; - if (*(*client).client_state).out_maps.is_null() { - afl_shmem_deinit(&mut client_map); - afl_shmem_deinit((*client).cur_client_map); - /* "Unregister" by subtracting the client from count */ - self.llmp_client_count = self.llmp_client_count.wrapping_sub(1); - return Err(AflError::Unknown("Something in clients failed".into())); - } - memcpy( - (*(*client).client_state).out_maps as *mut c_void, - &mut client_map as *mut AflShmem as *const c_void, - ::std::mem::size_of::() as c_ulong, - ); - (*(*client).client_state).out_map_count = 1 as c_ulong; - /* Each client starts with the very first map. - They should then iterate through all maps once and work on all old messages. - */ - (*(*client).client_state).current_broadcast_map = - self.broadcast_maps.offset(0 as isize) as *mut AflShmem; - (*(*client).client_state).out_map_count = 1 as c_ulong; - return Ok(()); - } } /// A new page will be used. Notify each registered hook in the client about this fact. -unsafe fn llmp_clientrigger_new_out_page_hooks(client: *mut LlmpClient) { - let mut i: c_ulong = 0; - while i < (*client).new_out_page_hook_count { - ::std::mem::transmute::<*mut c_void, Option>( - (*(*client).new_out_page_hooks.offset(i as isize)).func, - ) - .expect("non-null function pointer")( - client, - shmem2page( - &mut *(*client) - .out_maps - .offset((*client).out_map_count.wrapping_sub(1 as c_ulong) as isize), - ), - (*(*client).new_out_page_hooks.offset(i as isize)).data, - ); - i = i.wrapping_add(1) +unsafe fn llmp_clien_trigger_new_out_page_hooks(client: &LlmpClient) { + for hook in client.new_out_page_hooks { + (hook.func)(client); } } @@ -1078,7 +701,7 @@ unsafe fn _llmp_client_wrapped_loop(llmp_client_broker_metadata_ptr: *mut c_void let metadata: *mut LlmpBrokerClientMetadata = llmp_client_broker_metadata_ptr as *mut LlmpBrokerClientMetadata; /* Before doing anything else:, notify registered hooks about the new page we're about to use */ - llmp_clientrigger_new_out_page_hooks((*metadata).client_state); + llmp_clien_trigger_new_out_page_hooks((*metadata).client_state); (*metadata).clientloop.expect("non-null function pointer")( (*metadata).client_state, @@ -1129,7 +752,7 @@ unsafe fn llmp_client_handle_out_eop(client: *mut LlmpClient) -> bool { */ llmp_client_prune_old_pages(client); /* So we got a new page. Inform potential hooks */ - llmp_clientrigger_new_out_page_hooks(client); + llmp_clien_trigger_new_out_page_hooks(client); return 1 as c_int != 0; } @@ -1145,37 +768,38 @@ pub unsafe fn llmp_client_recv(client: *mut LlmpClient) -> *mut LlmpMsg { return 0 as *mut LlmpMsg; } (*client).last_msg_recvd = msg; - if (*msg).tag == 0xdeadaf as c_uint { + if (*msg).tag == LLMP_TAG_UNSET as c_uint { panic!("BUG: Read unallocated msg"); } else { - if (*msg).tag == 0xaf1e0f1 as c_uint { + if (*msg).tag == LLMP_TAG_END_OF_PAGE as c_uint { /* we reached the end of the current page. We'll init a new page but can reuse the mem are of the current map. However, we cannot use the message if we deinit its page, so let's copy */ - let mut pageinfo_cpy: LlmpPayloadNewPage = LlmpPayloadNewPage { + let mut pageinfo_cpy: LlmpPayloadSharedMap = LlmpPayloadSharedMap { map_size: 0, shm_str: [0; 20], }; let broadcast_map: *mut AflShmem = (*client).current_broadcast_map; - let pageinfo: *mut LlmpPayloadNewPage = { + let pageinfo: *mut LlmpPayloadSharedMap = { let mut _msg: *mut LlmpMsg = msg; - (if (*_msg).buf_len >= ::std::mem::size_of::() as c_ulong { + (if (*_msg).buf_len >= ::std::mem::size_of::() as c_ulong + { (*_msg).buf.as_mut_ptr() } else { 0 as *mut u8 - }) as *mut LlmpPayloadNewPage + }) as *mut LlmpPayloadSharedMap }; if pageinfo.is_null() { panic!(format!( "Illegal message length for EOP (is {}, expected {})", (*msg).buf_len_padded, - ::std::mem::size_of::() as c_ulong + ::std::mem::size_of::() as c_ulong )); } memcpy( - &mut pageinfo_cpy as *mut LlmpPayloadNewPage as *mut c_void, + &mut pageinfo_cpy as *mut LlmpPayloadSharedMap as *mut c_void, pageinfo as *const c_void, - ::std::mem::size_of::() as c_ulong, + ::std::mem::size_of::() as c_ulong, ); /* Never read by broker broker: shmem2page(map)->save_to_unmap = true; */ afl_shmem_deinit(broadcast_map); @@ -1253,7 +877,7 @@ pub unsafe fn llmp_client_alloc_next(client: *mut LlmpClient, size: usize) -> *m )) .messages .as_mut_ptr()) - .tag != 0xdeadaf as c_uint + .tag != LLMP_TAG_UNSET as c_uint { panic!("Error in handle_out_eop"); } @@ -1292,7 +916,7 @@ pub unsafe fn llmp_client_cancel(client: *mut LlmpClient, mut msg: *mut LlmpMsg) .out_maps .offset((*client).out_map_count.wrapping_sub(1 as c_ulong) as isize), ); - (*msg).tag = 0xdeadaf as c_uint; + (*msg).tag = LLMP_TAG_UNSET as c_uint; (*page).size_used = ((*page).size_used as c_ulong).wrapping_sub( (*msg) .buf_len_padded @@ -1336,7 +960,7 @@ pub unsafe fn llmp_client_new_unconnected() -> *mut LlmpClient { if llmp_new_page_shmem( &mut *(*client_state).out_maps.offset(0 as isize), (*client_state).id as c_ulong, - ((1 as c_int) << 28 as c_int) as c_ulong, + LLMP_INITIAL_MAP_SIZE, ) .is_null() { @@ -1375,35 +999,6 @@ impl Drop for LlmpClient { } } -/// Generic function to add a hook to the mem pointed to by hooks_p, using afl_realloc on the mem area, and increasing -/// hooks_count_p -pub unsafe fn llmp_add_hook_generic( - hooks_p: *mut *mut LlmpHookdataGeneric, - hooks_count_p: *mut c_ulong, - new_hook_func: *mut c_void, - new_hook_data: *mut c_void, -) -> AflRet { - let hooks_count: c_ulong = *hooks_count_p; - let hooks: *mut LlmpHookdataGeneric = afl_realloc( - *hooks_p as *mut c_void, - hooks_count - .wrapping_add(1 as c_ulong) - .wrapping_mul(::std::mem::size_of::() as c_ulong), - ) as *mut LlmpHookdataGeneric; - if hooks.is_null() { - *hooks_p = 0 as *mut LlmpHookdataGeneric; - *hooks_count_p = 0 as c_ulong; - return AFL_RET_ALLOC; - } - let ref mut fresh9 = (*hooks.offset(hooks_count as isize)).func; - *fresh9 = new_hook_func; - let ref mut fresh10 = (*hooks.offset(hooks_count as isize)).data; - *fresh10 = new_hook_data; - *hooks_p = hooks; - *hooks_count_p = hooks_count.wrapping_add(1 as c_ulong); - return AFL_RET_SUCCESS; -} - /// Adds a hook that gets called in the client for each new outgoing page the client creates. pub unsafe fn llmp_client_add_new_out_page_hook( client: *mut LlmpClient, @@ -1444,35 +1039,3 @@ impl Drop for LlmpBroker { unsafe { llmp_broker_deinit(self) }; } } - -/// Allocate and set up the new broker instance. Afterwards, run with broker_run. -/// Use llmp_broker::new instead. -unsafe fn llmp_broker_init(broker: *mut LlmpBroker) -> Result<(), AflError> { - memset( - broker as *mut c_void, - 0 as c_int, - ::std::mem::size_of::() as c_ulong, - ); - /* let's create some space for outgoing maps */ - (*broker).broadcast_maps = afl_realloc( - 0 as *mut c_void, - (1 as c_ulong).wrapping_mul(::std::mem::size_of::() as c_ulong), - ) as *mut AflShmem; - if (*broker).broadcast_maps.is_null() { - return Err(AflError::Unknown("Alloc".into())); - } - (*broker).broadcast_map_count = 1 as c_ulong; - (*broker).llmp_client_count = 0 as c_ulong; - (*broker).llmp_clients = 0 as *mut LlmpBrokerClientMetadata; - if llmp_new_page_shmem( - _llmp_broker_current_broadcast_map(broker), - -(1 as c_int) as c_ulong, - ((1 as c_int) << 28 as c_int) as c_ulong, - ) - .is_null() - { - afl_free((*broker).broadcast_maps as *mut c_void); - return Err(AflError::Unknown("Alloc".into())); - } - return Ok(()); -} diff --git a/afl/src/events/shmem_translated.rs b/afl/src/events/shmem_translated.rs index 30552e9dad..cbf2866bde 100644 --- a/afl/src/events/shmem_translated.rs +++ b/afl/src/events/shmem_translated.rs @@ -1,5 +1,5 @@ use libc::{c_char, c_int, c_long, c_uchar, c_uint, c_ulong, c_ushort, c_void}; -use std::ffi::CStr; +use std::{ffi::CStr, mem::size_of}; use crate::AflError; @@ -64,7 +64,7 @@ pub struct AflShmem { pub shm_str: [u8; 20], pub shm_id: c_int, pub map: *mut c_uchar, - pub map_size: c_ulong, + pub map_size: usize, } /// Deinit on drop @@ -87,9 +87,9 @@ const fn afl_shmem_unitialized() -> AflShmem { } impl AflShmem { - fn from_str(shm_str: &CStr, map_size: c_ulong) -> Result { + pub fn from_str(shm_str: &CStr, map_size: usize) -> Result { let mut ret = afl_shmem_unitialized(); - let map = unsafe { afl_shmem_init(&mut ret, map_size) }; + let map = unsafe { afl_shmem_by_str(&mut ret, shm_str, map_size) }; if map != 0 as *mut u8 { Ok(ret) } else { @@ -100,7 +100,13 @@ impl AflShmem { } } - fn new(map_size: c_ulong) -> Result { + /// Generate a shared map with a fixed byte array of 20 + pub unsafe fn from_name_slice(shm_str: &[u8; 20], map_size: usize) -> Result { + let str_bytes = shm_str as *const [u8; 20] as *const libc::c_char; + Self::from_str(CStr::from_ptr(str_bytes), map_size) + } + + pub fn new(map_size: usize) -> Result { let mut ret = afl_shmem_unitialized(); let map = unsafe { afl_shmem_init(&mut ret, map_size) }; if map != 0 as *mut u8 { @@ -115,7 +121,7 @@ impl AflShmem { /// Sets this shm id as env variable with the given name /// Also write the map size as name#_SIZE env - fn to_env_var(&self, env_name: &CStr) -> Result<(), AflError> { + pub fn to_env_var(&self, env_name: &CStr) -> Result<(), AflError> { if unsafe { afl_shmem_to_env_var(&self, env_name) } == AFL_RET_SUCCESS { Ok(()) } else { @@ -141,12 +147,12 @@ pub unsafe fn afl_shmem_deinit(shm: *mut AflShmem) { /// Functions to create Shared memory region, for observation channels and /// opening inputs and stuff. -pub unsafe fn afl_shmem_init(shm: *mut AflShmem, map_size: c_ulong) -> *mut c_uchar { +pub unsafe fn afl_shmem_init(shm: *mut AflShmem, map_size: usize) -> *mut c_uchar { (*shm).map_size = map_size; (*shm).map = 0 as *mut c_uchar; (*shm).shm_id = shmget( 0 as c_int, - map_size, + map_size as c_ulong, 0o1000 as c_int | 0o2000 as c_int | 0o600 as c_int, ); if (*shm).shm_id < 0 as c_int { @@ -155,12 +161,13 @@ pub unsafe fn afl_shmem_init(shm: *mut AflShmem, map_size: c_ulong) -> *mut c_uc } snprintf( (*shm).shm_str.as_mut_ptr() as *mut i8, - ::std::mem::size_of::<[c_char; 20]>() as c_ulong, + size_of::<[c_char; 20]>() as c_ulong, b"%d\x00" as *const u8 as *const c_char, (*shm).shm_id, ); - (*shm).shm_str[(::std::mem::size_of::<[c_char; 20]>() as c_ulong) - .wrapping_sub(1 as c_int as c_ulong) as usize] = '\u{0}' as u8; + (*shm).shm_str + [(size_of::<[c_char; 20]>() as c_ulong).wrapping_sub(1 as c_int as c_ulong) as usize] = + '\u{0}' as u8; (*shm).map = shmat((*shm).shm_id, 0 as *const c_void, 0 as c_int) as *mut c_uchar; if (*shm).map == -(1 as c_int) as *mut c_void as *mut c_uchar || (*shm).map.is_null() { shmctl((*shm).shm_id, 0 as c_int, 0 as *mut shmid_ds); @@ -175,7 +182,7 @@ pub unsafe fn afl_shmem_init(shm: *mut AflShmem, map_size: c_ulong) -> *mut c_uc pub unsafe fn afl_shmem_by_str( shm: *mut AflShmem, shm_str: &CStr, - map_size: c_ulong, + map_size: usize, ) -> *mut c_uchar { if shm.is_null() || shm_str.to_bytes().len() == 0 || map_size == 0 { return 0 as *mut c_uchar; @@ -185,7 +192,7 @@ pub unsafe fn afl_shmem_by_str( strncpy( (*shm).shm_str.as_mut_ptr() as *mut c_char, shm_str.as_ptr() as *const c_char, - (::std::mem::size_of::<[c_char; 20]>() as c_ulong).wrapping_sub(1 as c_int as c_ulong), + (size_of::<[c_char; 20]>() as c_ulong).wrapping_sub(1 as c_int as c_ulong), ); (*shm).shm_id = shm_str .to_str() @@ -195,8 +202,8 @@ pub unsafe fn afl_shmem_by_str( (*shm).map = shmat((*shm).shm_id, 0 as *const c_void, 0 as c_int) as *mut c_uchar; if (*shm).map == -(1 as c_int) as *mut c_void as *mut c_uchar { (*shm).map = 0 as *mut c_uchar; - (*shm).map_size = 0 as c_int as c_ulong; - (*shm).shm_str[0 as c_int as usize] = '\u{0}' as u8; + (*shm).map_size = 0; + (*shm).shm_str[0] = '\u{0}' as u8; return 0 as *mut c_uchar; } return (*shm).map; @@ -211,7 +218,7 @@ pub unsafe fn afl_shmem_to_env_var(shmem: &AflShmem, env_name: &CStr) -> c_uint let mut shm_str: [c_char; 256] = [0; 256]; snprintf( shm_str.as_mut_ptr(), - ::std::mem::size_of::<[c_char; 256]>() as c_ulong, + size_of::<[c_char; 256]>() as c_ulong, b"%d\x00" as *const u8 as *const c_char, (*shmem).shm_id, ); @@ -227,13 +234,13 @@ pub unsafe fn afl_shmem_to_env_var(shmem: &AflShmem, env_name: &CStr) -> c_uint let mut size_env_name: [c_char; 256] = [0; 256]; snprintf( size_env_name.as_mut_ptr(), - ::std::mem::size_of::<[c_char; 256]>() as c_ulong, + size_of::<[c_char; 256]>() as c_ulong, b"%s_SIZE\x00" as *const u8 as *const c_char, env_name, ); snprintf( shm_str.as_mut_ptr(), - ::std::mem::size_of::<[c_char; 256]>() as c_ulong, + size_of::<[c_char; 256]>() as c_ulong, b"%d\x00" as *const u8 as *const c_char, (*shmem).shm_id, ); diff --git a/afl/src/lib.rs b/afl/src/lib.rs index c19f048093..1c1f204600 100644 --- a/afl/src/lib.rs +++ b/afl/src/lib.rs @@ -3,6 +3,10 @@ #[macro_use] extern crate alloc; +#[cfg_attr(feature = "std")] +#[macro_use] +extern crate memoffset; // for offset_of + pub mod corpus; pub mod engines; pub mod events; diff --git a/afl/src/metamap.rs b/afl/src/metamap.rs index 979e641fcf..4bac85604d 100644 --- a/afl/src/metamap.rs +++ b/afl/src/metamap.rs @@ -307,10 +307,7 @@ impl NamedAnyMap { if !self.map.contains_key(&typeid) { self.map.insert(typeid, HashMap::default()); } - self.map - .get_mut(&typeid) - .unwrap() - .insert(name, Box::new(t)); + self.map.get_mut(&typeid).unwrap().insert(name, Box::new(t)); } pub fn len(&self) -> usize {