From 4a65e25defc29dc05bdc9a3defd25a27a9f0817c Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Tue, 8 Dec 2020 01:44:15 +0100 Subject: [PATCH 01/17] started llmp reworking --- afl/Cargo.toml | 1 + afl/src/events/llmp_translated.rs | 1205 +++++++++------------------- afl/src/events/shmem_translated.rs | 43 +- afl/src/lib.rs | 4 + afl/src/metamap.rs | 5 +- 5 files changed, 415 insertions(+), 843 deletions(-) diff --git a/afl/Cargo.toml b/afl/Cargo.toml index 83accdea15..4eb56e00d5 100644 --- a/afl/Cargo.toml +++ b/afl/Cargo.toml @@ -34,3 +34,4 @@ xxhash-rust = { version = "0.8.0", features = ["xxh3"] } # xxh3 hashing for rust serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib erased-serde = "0.3.12" postcard = "0.5.1" # no_std compatible serde serialization fromat +memoffset = "0.6" # for offset_of support \ No newline at end of file diff --git a/afl/src/events/llmp_translated.rs b/afl/src/events/llmp_translated.rs index 4a869b17b8..df0b6a96bb 100644 --- a/afl/src/events/llmp_translated.rs +++ b/afl/src/events/llmp_translated.rs @@ -50,28 +50,34 @@ Then register some clientloops using llmp_broker_register_threaded_clientloop use ::libc; -use core::ffi::c_void; use core::ptr; use core::sync::atomic::{compiler_fence, Ordering}; +use core::{ffi::c_void, time}; use libc::{c_int, c_uint, c_ulong, c_ushort}; -use std::{ffi::CStr, os::raw::c_char}; +use std::{cmp::max, ffi::CStr, mem::size_of, os::raw::c_char, thread}; use crate::utils::next_pow2; use crate::AflError; use super::shmem_translated::{afl_shmem_by_str, afl_shmem_deinit, afl_shmem_init, AflShmem}; +/// The header length of a llmp page in a shared map (until messages start) +const LLMP_PAGE_HEADER_LEN: usize = offset_of!(LlmpPage, messages); + +/// We'll start off with 256 megabyte maps per fuzzer +const LLMP_INITIAL_MAP_SIZE: usize = 1 << 28; + +/// A msg fresh from the press: No tag got sent by the user yet +const LLMP_TAG_UNSET: u32 = 0xdeadaf; +/// This message should not exist yet. Some bug in unsafe code! +const LLMP_TAG_UNINITIALIZED: u32 = 0xa143af11; +/// The end of page mesasge +/// When receiving this, a new sharedmap needs to be allocated. +const LLMP_TAG_END_OF_PAGE: u32 = 0xaf1e0f1; +/// A new client for this broekr got added. +const LLMP_TAG_NEW_SHM_CLIENT: u32 = 0xc11e471; + extern "C" { - #[no_mangle] - fn usleep(__useconds: c_uint) -> c_int; - #[no_mangle] - fn fork() -> c_int; - #[no_mangle] - fn calloc(_: c_ulong, _: c_ulong) -> *mut c_void; - #[no_mangle] - fn realloc(_: *mut c_void, _: c_ulong) -> *mut c_void; - #[no_mangle] - fn free(__ptr: *mut c_void); #[no_mangle] fn memcpy(_: *mut c_void, _: *const c_void, _: c_ulong) -> *mut c_void; #[no_mangle] @@ -84,67 +90,90 @@ pub type AflRet = c_uint; pub const AFL_RET_ALLOC: AflRet = 3; pub const AFL_RET_SUCCESS: AflRet = 0; -/* AFL alloc buffer, the struct is here so we don't need to do fancy ptr - * arithmetics */ -#[derive(Copy, Clone)] -#[repr(C)] -pub struct AflAllocBuf { - pub complete_size: c_ulong, - pub magic: c_ulong, - pub buf: [u8; 0], +#[derive(Clone)] +pub struct LlmpSender { + pub id: u32, + pub last_msg_sent: *mut LlmpMsg, + pub out_maps: Vec, } #[derive(Clone)] -#[repr(C)] pub struct LlmpClient { - pub id: u32, + pub llmp_out: LlmpSender, pub last_msg_recvd: *mut LlmpMsg, - pub current_broadcast_map: *mut AflShmem, + pub current_broadcast_map: LlmpPageWrapper, pub last_msg_sent: *mut LlmpMsg, - pub out_map_count: c_ulong, - pub out_maps: *mut AflShmem, - pub new_out_page_hook_count: c_ulong, - pub new_out_page_hooks: *mut LlmpHookdataGeneric, + pub out_maps: Vec, + pub new_out_page_hooks: Vec>, +} + +#[derive(Clone)] +struct LlmpPageWrapper { + shmem: AflShmem, +} + +/// The page struct, placed on a shared mem instance. +impl LlmpPageWrapper { + /// Creates a new page with minimum prev_max_alloc_size or LLMP_INITIAL_MAP_SIZE + /// returning the initialized shared mem struct + unsafe fn new(sender: u32, min_size: usize) -> Result { + // Create a new shard page. + let mut shmem = AflShmem::new(new_map_size(min_size))?; + _llmp_page_init(&mut shmem, sender); + Ok(Self { shmem }) + } + + /// Initialize from a 0-terminated sharedmap id string and its size + unsafe fn from_str(shm_str: &CStr, map_size: usize) -> Result { + let shmem = AflShmem::from_str(shm_str, map_size)?; + // Not initializing the page here - the other side should have done it already! + Ok(Self { shmem }) + } + + /// Initialize from a shm_str with fixed len of 20 + unsafe fn from_name_slice(shm_str: &[u8; 20], map_size: usize) -> Result { + let shmem = AflShmem::from_name_slice(shm_str, map_size)?; + // Not initializing the page here - the other side should have done it already! + Ok(Self { shmem }) + } + + unsafe fn page(&self) -> *mut LlmpPage { + shmem2page(&self.shmem) + } } #[derive(Copy, Clone)] #[repr(C)] -pub struct LlmpHookdataGeneric { - pub func: *mut c_void, +pub struct LlmpHookdataGeneric { + pub func: T, pub data: *mut c_void, } #[derive(Copy, Clone)] #[repr(C, packed)] pub struct LlmpMsg { - pub tag: c_uint, - pub sender: c_uint, - pub message_id: c_uint, - pub buf_len: c_ulong, - pub buf_len_padded: c_ulong, + pub tag: u32, + pub sender: u32, + pub message_id: u64, + pub buf_len: u64, + pub buf_len_padded: u64, pub buf: [u8; 0], } #[derive(Clone)] #[repr(C)] pub struct LlmpBroker { - pub last_msg_sent: *mut LlmpMsg, - pub broadcast_map_count: c_ulong, - pub broadcast_maps: *mut AflShmem, - pub msg_hook_count: c_ulong, - pub msg_hooks: *mut LlmpHookdataGeneric, - pub llmp_client_count: c_ulong, - pub llmp_clients: *mut LlmpBrokerClientMetadata, + pub llmp_out: LlmpSender, + pub msg_hooks: Vec>, + pub llmp_clients: Vec, } -#[derive(Copy, Clone)] +#[derive(Clone)] #[repr(C)] pub struct LlmpBrokerClientMetadata { - pub client_type: LlmpClientType, - pub client_state: *mut LlmpClient, - pub cur_client_map: *mut AflShmem, + pub id: u32, + pub cur_client_map: LlmpPageWrapper, pub last_msg_broker_read: *mut LlmpMsg, - pub pid: c_int, pub clientloop: Option, pub data: *mut c_void, } @@ -152,11 +181,6 @@ pub struct LlmpBrokerClientMetadata { /// The client loop, running for each spawned client pub type LlmpClientloopFn = unsafe fn(client: *mut LlmpClient, data: *mut c_void) -> !; -/// Client type enum (TODO: Enumize) -type LlmpClientType = c_uint; -const LLMP_CLIENT_TYPE_FOREIGN_PROCESS: LlmpClientType = 3; -const LLMP_CLIENT_TYPE_CHILD_PROCESS: LlmpClientType = 2; - /// A share mem page, as used by llmp internally #[derive(Copy, Clone)] #[repr(C, packed)] @@ -164,10 +188,10 @@ pub struct LlmpPage { pub sender: u32, pub save_to_unmap: c_ushort, pub sender_dead: c_ushort, - pub current_msg_id: c_ulong, - pub c_ulongotal: c_ulong, - pub size_used: c_ulong, - pub max_alloc_size: c_ulong, + pub current_msg_id: u64, + pub size_total: usize, + pub size_used: usize, + pub max_alloc_size: usize, pub messages: [LlmpMsg; 0], } @@ -181,337 +205,256 @@ pub enum LlmpMsgHookResult { /// Message Hook pub type LlmpMsgHookFn = unsafe fn( - _: *mut LlmpBroker, - _: *mut LlmpBrokerClientMetadata, - _: *mut LlmpMsg, - _: *mut c_void, + broker: &LlmpBroker, + client_data: &LlmpBrokerClientMetadata, + msg: *mut LlmpMsg, + data: *mut c_void, ) -> LlmpMsgHookResult; /// Hook that gets called for each new page, created by LLMP -pub type LlmpClientNewPageHookFn = - unsafe fn(_: *mut LlmpClient, _: *mut LlmpPage, _: *mut c_void) -> (); +pub type LlmpClientNewPageHookFn = unsafe fn(client: &LlmpClient) -> (); /// Message payload when a client got added LLMP_TAG_CLIENT_ADDED_V1 */ /// This is an internal message! -/// LLMP_TAG_NEW_PAGE_V1 +/// LLMP_TAG_END_OF_PAGE_V1 #[derive(Copy, Clone)] #[repr(C, packed)] -struct LlmpPayloadNewPage { - pub map_size: c_ulong, +struct LlmpPayloadSharedMap { + pub map_size: usize, pub shm_str: [u8; 20], } -/// Returs the container element to this ptr #[inline] -unsafe fn afl_alloc_bufptr(buf: *mut c_void) -> *mut AflAllocBuf { - return (buf as *mut u8).offset(-(16 as c_ulong as isize)) as *mut AflAllocBuf; +unsafe fn shmem2page(afl_shmem: &AflShmem) -> *mut LlmpPage { + afl_shmem.map as *mut LlmpPage } -/// Optimized realloc wrapper, taken over from AFL. -/// This function makes sure *size is > size_needed after call. -/// It will realloc *buf otherwise. -/// *size will grow exponentially as per: -/// https://blog.mozilla.org/nnethercote/2014/11/04/please-grow-your-buffers-exponentially/ -/// @return For convenience, this function returns *buf. -/// Will return NULL and free *buf if size_needed is <1 or realloc failed. -unsafe fn afl_realloc(buf: *mut c_void, mut size_needed: c_ulong) -> *mut c_void { - let mut new_buf: *mut AflAllocBuf = 0 as *mut AflAllocBuf; - let mut current_size: c_ulong = 0 as c_ulong; - let mut next_size: c_ulong; - if !buf.is_null() { - /* the size is always stored at buf - 1*c_ulong */ - new_buf = afl_alloc_bufptr(buf); - if (*new_buf).magic != 0xaf1a110c as c_ulong { - panic!(format!( - "Illegal, non-null pointer passed to afl_realloc (buf {:?}, magic {:?})", - new_buf, - (*new_buf).magic as c_uint - )); - } - current_size = (*new_buf).complete_size - } - size_needed = (size_needed as c_ulong).wrapping_add(16 as c_ulong) as c_ulong; - /* No need to realloc */ - if current_size >= size_needed { - return buf; - } - /* No initial size was set */ - if size_needed < 64 as c_ulong { - next_size = 64 as c_ulong - } else { - /* grow exponentially */ - next_size = next_pow2(size_needed); - /* handle overflow: fall back to the original size_needed */ - if next_size == 0 { - next_size = size_needed - } - } - /* alloc */ - new_buf = realloc(new_buf as *mut c_void, next_size) as *mut AflAllocBuf; - if new_buf.is_null() { - return 0 as *mut c_void; - } - (*new_buf).complete_size = next_size; - (*new_buf).magic = 0xaf1a110c as c_ulong; - return (*new_buf).buf.as_mut_ptr() as *mut c_void; -} - -/// Call alf_free on all afl_realloc buffers. -#[inline] -unsafe fn afl_free(buf: *mut c_void) { - if !buf.is_null() { - free(afl_alloc_bufptr(buf) as *mut c_void); - }; -} -#[inline] -unsafe fn shmem2page(afl_shmem: *mut AflShmem) -> *mut LlmpPage { - return (*afl_shmem).map as *mut LlmpPage; -} /* If a msg is contained in the current page */ unsafe fn llmp_msg_in_page(page: *mut LlmpPage, msg: *mut LlmpMsg) -> bool { - /* DBG("llmp_msg_in_page %p within %p-%p\n", msg, page, page + page->c_ulongotal); */ + /* DBG("llmp_msg_in_page %p within %p-%p\n", msg, page, page + page->size_total); */ return (page as *mut u8) < msg as *mut u8 - && (page as *mut u8).offset((*page).c_ulongotal as isize) > msg as *mut u8; + && (page as *mut u8).offset((*page).size_total as isize) > msg as *mut u8; } -/* allign to LLMP_ALIGNNMENT bytes */ + +/// What byte count to align messages to +/// LlmpMsg sizes (including header) will always be rounded up to be a multiple of this value +const LLMP_ALIGNNMENT: usize = 64; + +/// Size of a new page message, header, payload, and alignment +const EOP_MSG_SIZE: usize = llmp_align(size_of::() + size_of::()); + +/* allign to LLMP_ALIGNNMENT=64 bytes */ #[inline] -unsafe fn llmp_align(to_align: c_ulong) -> c_ulong { - if 64 as c_int == 0 as c_int || to_align.wrapping_rem(64 as c_ulong) == 0 as c_int as c_ulong { +const fn llmp_align(to_align: usize) -> usize { + // check if we need to align first + if LLMP_ALIGNNMENT == 0 { return to_align; } - return to_align - .wrapping_add((64 as c_ulong).wrapping_sub(to_align.wrapping_rem(64 as c_int as c_ulong))); + // Then do the alignment + let modulo = to_align % LLMP_ALIGNNMENT; + if modulo == 0 { + to_align + } else { + to_align + LLMP_ALIGNNMENT - modulo + } } -/* In case we don't have enough space, make sure the next page will be large -enough. For now, we want to have at least enough space to store 2 of the -largest messages we encountered. */ + +/// In case we don't have enough space, make sure the next page will be large +/// enough. For now, we want to have at least enough space to store 2 of the +/// largest messages we encountered (plus message one new_page message). #[inline] -unsafe fn new_map_size(max_alloc: c_ulong) -> c_ulong { - return next_pow2({ - let mut _a: c_ulong = max_alloc - .wrapping_mul(2 as c_ulong) - .wrapping_add(llmp_align( - (::std::mem::size_of::() as c_ulong) - .wrapping_add(::std::mem::size_of::() as c_ulong), - )); - let mut _b: c_ulong = ((1 as c_int) << 28 as c_int) as c_ulong; - if _a > _b { - _a - } else { - _b - } - }); +const fn new_map_size(max_alloc: usize) -> usize { + next_pow2(max( + max_alloc * 2 + EOP_MSG_SIZE + LLMP_PAGE_HEADER_LEN, + LLMP_INITIAL_MAP_SIZE, + ) as u64) as usize } + /* Initialize a new llmp_page. size should be relative to * llmp_page->messages */ -unsafe fn _llmp_page_init(mut page: *mut LlmpPage, sender: u32, size: c_ulong) { +unsafe fn _llmp_page_init(shmem: &mut AflShmem, sender: u32) { + let page = shmem2page(&shmem); (*page).sender = sender; - ::std::ptr::write_volatile(&mut (*page).current_msg_id as *mut c_ulong, 0 as c_ulong); - (*page).max_alloc_size = 0 as c_ulong; - (*page).c_ulongotal = size; - (*page).size_used = 0 as c_ulong; - (*(*page).messages.as_mut_ptr()).message_id = 0 as c_uint; - (*(*page).messages.as_mut_ptr()).tag = 0xdeadaf as c_uint; - ::std::ptr::write_volatile(&mut (*page).save_to_unmap as *mut u16, 0 as c_int as u16); - ::std::ptr::write_volatile(&mut (*page).sender_dead as *mut u16, 0 as c_int as u16); + ptr::write_volatile(&mut (*page).current_msg_id, 0); + (*page).max_alloc_size = 0; + // Don't forget to subtract our own header size + (*page).size_total = shmem.map_size - LLMP_PAGE_HEADER_LEN; + (*page).size_used = 0; + (*(*page).messages.as_mut_ptr()).message_id = 0; + (*(*page).messages.as_mut_ptr()).tag = LLMP_TAG_UNSET; + ptr::write_volatile(&mut (*page).save_to_unmap, 0); + ptr::write_volatile(&mut (*page).sender_dead, 0); } + /* Pointer to the message behind the last message */ #[inline] -unsafe fn _llmp_next_msg_ptr(last_msg: *mut LlmpMsg) -> *mut LlmpMsg { +const unsafe fn _llmp_next_msg_ptr(last_msg: *const LlmpMsg) -> *mut LlmpMsg { /* DBG("_llmp_next_msg_ptr %p %lu + %lu\n", last_msg, last_msg->buf_len_padded, sizeof(llmp_message)); */ return (last_msg as *mut u8) - .offset(::std::mem::size_of::() as isize) + .offset(size_of::() as isize) .offset((*last_msg).buf_len_padded as isize) as *mut LlmpMsg; } + /* Read next message. */ -unsafe fn llmp_recv(page: *mut LlmpPage, last_msg: *mut LlmpMsg) -> *mut LlmpMsg { +unsafe fn llmp_recv( + page_wrapper: &LlmpPageWrapper, + last_msg: *mut LlmpMsg, +) -> Option<*mut LlmpMsg> { /* DBG("llmp_recv %p %p\n", page, last_msg); */ compiler_fence(Ordering::SeqCst); - if (*page).current_msg_id == 0 { + let page = page_wrapper.page(); + let current_msg_id = ptr::read_volatile(&mut (*page).current_msg_id); + if current_msg_id == 0 { /* No messages yet */ - return 0 as *mut LlmpMsg; + None } else if last_msg.is_null() { /* We never read a message from this queue. Return first. */ - return (*page).messages.as_mut_ptr(); - } else if (*last_msg).message_id as c_ulong == (*page).current_msg_id { + Some((*page).messages.as_mut_ptr()) + } else if (*last_msg).message_id == current_msg_id { /* Oops! No new message! */ - return 0 as *mut LlmpMsg; + None } else { - return _llmp_next_msg_ptr(last_msg); - }; + Some(_llmp_next_msg_ptr(last_msg)) + } } + /* Blocks/spins until the next message gets posted to the page, then returns that message. */ -pub unsafe fn llmp_recv_blocking(page: *mut LlmpPage, last_msg: *mut LlmpMsg) -> *mut LlmpMsg { - let mut current_msg_id: u32 = 0 as u32; +pub unsafe fn llmp_recv_blocking( + page_wrapper: &LlmpPageWrapper, + last_msg: *mut LlmpMsg, +) -> *mut LlmpMsg { + let mut current_msg_id = 0; + let page = page_wrapper.page(); if !last_msg.is_null() { - if (*last_msg).tag == 0xaf1e0f1 as c_uint && llmp_msg_in_page(page, last_msg) as c_int != 0 - { + if (*last_msg).tag == LLMP_TAG_END_OF_PAGE && !llmp_msg_in_page(page, last_msg) { panic!("BUG: full page passed to await_message_blocking or reset failed"); } current_msg_id = (*last_msg).message_id } loop { compiler_fence(Ordering::SeqCst); - if (*page).current_msg_id != current_msg_id as c_ulong { - let ret: *mut LlmpMsg = llmp_recv(page, last_msg); - if ret.is_null() { - panic!("BUG: blocking llmp message should never be NULL"); - } - return ret; + if ptr::read_volatile(&mut (*page).current_msg_id) != current_msg_id { + return match llmp_recv(page_wrapper, last_msg) { + Some(msg) => msg, + None => panic!("BUG: blocking llmp message should never be NULL"), + }; } } } + /* Special allocation function for EOP messages (and nothing else!) The normal alloc will fail if there is not enough space for buf_len_padded + EOP So if llmp_alloc_next fails, create new page if necessary, use this function, place EOP, commit EOP, reset, alloc again on the new space. */ -unsafe fn llmp_alloc_eop(mut page: *mut LlmpPage, mut last_msg: *mut LlmpMsg) -> *mut LlmpMsg { - if (*page).size_used.wrapping_add(llmp_align( - (::std::mem::size_of::() as c_ulong) - .wrapping_add(::std::mem::size_of::() as c_ulong), - )) > (*page).c_ulongotal - { - panic!(format!("PROGRAM ABORT : BUG: EOP does not fit in page! page {:?}, size_current {:?}, c_ulongotal {:?}", page, - (*page).size_used, (*page).c_ulongotal)); +unsafe fn llmp_alloc_eop(page: *mut LlmpPage, last_msg: *const LlmpMsg) -> *mut LlmpMsg { + if (*page).size_used + EOP_MSG_SIZE > (*page).size_total { + panic!(format!("PROGRAM ABORT : BUG: EOP does not fit in page! page {:?}, size_current {:?}, size_total {:?}", page, + (*page).size_used, (*page).size_total)); } let mut ret: *mut LlmpMsg = if !last_msg.is_null() { _llmp_next_msg_ptr(last_msg) } else { (*page).messages.as_mut_ptr() }; - if (*ret).tag == 0xa143af11 as c_uint { + if (*ret).tag == LLMP_TAG_UNINITIALIZED { panic!("Did not call send() on last message!"); } - (*ret).buf_len_padded = ::std::mem::size_of::() as c_ulong; + (*ret).buf_len_padded = size_of::() as c_ulong; (*ret).message_id = if !last_msg.is_null() { - (*last_msg).message_id = - ((*last_msg).message_id as c_uint).wrapping_add(1 as c_int as c_uint) as u32 as u32; - (*last_msg).message_id + (*last_msg).message_id + 1 } else { - 1 as c_uint + 1 }; - (*ret).tag = 0xaf1e0f1 as u32; - (*page).size_used = ((*page).size_used as c_ulong).wrapping_add(llmp_align( - (::std::mem::size_of::() as c_ulong) - .wrapping_add(::std::mem::size_of::() as c_ulong), - )) as c_ulong; - return ret; + (*ret).tag = LLMP_TAG_END_OF_PAGE; + (*page).size_used += EOP_MSG_SIZE; + ret } -/* Will return a ptr to the next msg buf, or NULL if map is full. -Never call alloc_next without either sending or cancelling the last allocated message for this page! -There can only ever be up to one message allocated per page at each given time. -*/ -unsafe fn llmp_alloc_next( - mut page: *mut LlmpPage, - last_msg: *mut LlmpMsg, - buf_len: c_ulong, -) -> *mut LlmpMsg { - let mut buf_len_padded: c_ulong = buf_len; - let mut complete_msg_size: c_ulong = - llmp_align((::std::mem::size_of::() as c_ulong).wrapping_add(buf_len_padded)); + +/// Will return a ptr to the next msg buf, or None if map is full. +/// Never call alloc_next without either sending or cancelling the last allocated message for this page! +/// There can only ever be up to one message allocated per page at each given time. +unsafe fn llmp_alloc_next(llmp: &mut LlmpSender, buf_len: usize) -> Option<*mut LlmpMsg> { + let mut buf_len_padded = buf_len; + let mut complete_msg_size = llmp_align(size_of::() + buf_len_padded); + let page = llmp.out_maps.last().unwrap().page(); + let last_msg = llmp.last_msg_sent; /* DBG("XXX complete_msg_size %lu (h: %lu)\n", complete_msg_size, sizeof(llmp_message)); */ /* In case we don't have enough space, make sure the next page will be large * enough */ - (*page).max_alloc_size = { - let mut _a: c_ulong = (*page).max_alloc_size; - let mut _b: c_ulong = complete_msg_size; - if _a > _b { - _a - } else { - _b - } - }; + // For future allocs, keep track of the maximum (aligned) alloc size we used + (*page).max_alloc_size = max((*page).max_alloc_size, complete_msg_size); + let mut ret: *mut LlmpMsg; /* DBG("last_msg %p %d (%d)\n", last_msg, last_msg ? (int)last_msg->tag : -1, (int)LLMP_TAG_END_OF_PAGE_V1); */ - if last_msg.is_null() || (*last_msg).tag == 0xaf1e0f1 as c_uint { - /* We start fresh */ + if last_msg.is_null() || (*last_msg).tag == LLMP_TAG_END_OF_PAGE { + /* We start fresh, on a new page */ ret = (*page).messages.as_mut_ptr(); /* The initial message may not be alligned, so we at least align the end of it. Technically, c_ulong can be smaller than a pointer, then who knows what happens */ - let base_addr: c_ulong = ret as c_ulong; - buf_len_padded = llmp_align(base_addr.wrapping_add(complete_msg_size)) - .wrapping_sub(base_addr) - .wrapping_sub(::std::mem::size_of::() as c_ulong); - complete_msg_size = - buf_len_padded.wrapping_add(::std::mem::size_of::() as c_ulong); + let base_addr = ret as usize; + buf_len_padded = + llmp_align(base_addr + complete_msg_size) - base_addr - size_of::(); + complete_msg_size = buf_len_padded + size_of::(); /* DBG("XXX complete_msg_size NEW %lu\n", complete_msg_size); */ /* Still space for the new message plus the additional "we're full" message? */ - if (*page) - .size_used - .wrapping_add(complete_msg_size) - .wrapping_add(llmp_align( - (::std::mem::size_of::() as c_ulong) - .wrapping_add(::std::mem::size_of::() as c_ulong), - )) - > (*page).c_ulongotal - { + if (*page).size_used + complete_msg_size + EOP_MSG_SIZE > (*page).size_total { /* We're full. */ - return 0 as *mut LlmpMsg; + return None; } /* We need to start with 1 for ids, as current message id is initialized * with 0... */ (*ret).message_id = if !last_msg.is_null() { - (*last_msg).message_id.wrapping_add(1 as c_uint) + (*last_msg).message_id + 1 } else { - 1 as c_uint + 1 } - } else if (*page).current_msg_id != (*last_msg).message_id as c_ulong { + } else if (*page).current_msg_id != (*last_msg).message_id { /* Oops, wrong usage! */ panic!(format!("BUG: The current message never got commited using llmp_send! (page->current_msg_id {:?}, last_msg->message_id: {})", (*page).current_msg_id, (*last_msg).message_id)); } else { - buf_len_padded = - complete_msg_size.wrapping_sub(::std::mem::size_of::() as c_ulong); + buf_len_padded = complete_msg_size - size_of::(); /* DBG("XXX ret %p id %u buf_len_padded %lu complete_msg_size %lu\n", ret, ret->message_id, buf_len_padded, * complete_msg_size); */ - if (*page) - .size_used - .wrapping_add(complete_msg_size) - .wrapping_add(llmp_align( - (::std::mem::size_of::() as c_ulong) - .wrapping_add(::std::mem::size_of::() as c_ulong), - )) - > (*page).c_ulongotal - { - /* Still space for the new message plus the additional "we're full" message? - */ + + /* Still space for the new message plus the additional "we're full" message? */ + if (*page).size_used + complete_msg_size + EOP_MSG_SIZE > (*page).size_total { /* We're full. */ - return 0 as *mut LlmpMsg; + return None; } ret = _llmp_next_msg_ptr(last_msg); - (*ret).message_id = (*last_msg).message_id.wrapping_add(1 as c_uint) + (*ret).message_id = (*last_msg).message_id + 1 } + /* The beginning of our message should be messages + size_used, else nobody * sent the last msg! */ /* DBG("XXX ret %p - page->messages %p = %lu != %lu, will add %lu -> %p\n", ret, page->messages, (c_ulong)((u8 *)ret - (u8 *)page->messages), page->size_used, complete_msg_size, ((u8 *)ret) + complete_msg_size); */ if last_msg.is_null() && (*page).size_used != 0 - || ((ret as *mut u8).wrapping_sub((*page).messages.as_mut_ptr() as *mut u8 as usize)) - as c_ulong - != (*page).size_used + || ((ret as usize) - (*page).messages.as_mut_ptr() as usize) != (*page).size_used { panic!(format!("Allocated new message without calling send() inbetween. ret: {:?}, page: {:?}, complete_msg_size: {:?}, size_used: {:?}, last_msg: {:?}", ret, page, buf_len_padded, (*page).size_used, last_msg)); } - (*page).size_used = ((*page).size_used as c_ulong).wrapping_add(complete_msg_size) as c_ulong; - (*ret).buf_len_padded = buf_len_padded; - (*ret).buf_len = buf_len; + (*page).size_used = (*page).size_used + complete_msg_size; + (*ret).buf_len_padded = buf_len_padded as c_ulong; + (*ret).buf_len = buf_len as c_ulong; /* DBG("Returning new message at %p with len %ld, TAG was %x", ret, ret->buf_len_padded, ret->tag); */ /* Maybe catch some bugs... */ - (*_llmp_next_msg_ptr(ret)).tag = 0xdeadaf as c_uint; - (*ret).tag = 0xa143af11 as c_uint; - return ret; + (*_llmp_next_msg_ptr(ret)).tag = LLMP_TAG_UNSET; + (*ret).tag = LLMP_TAG_UNINITIALIZED; + Some(ret) } -/* Commit the message last allocated by llmp_alloc_next to the queue. - After commiting, the msg shall no longer be altered! - It will be read by the consuming threads (broker->clients or client->broker) -*/ + +/// Commit the message last allocated by llmp_alloc_next to the queue. +/// After commiting, the msg shall no longer be altered! +/// It will be read by the consuming threads (broker->clients or client->broker) unsafe fn llmp_send(page: *mut LlmpPage, msg: *mut LlmpMsg) -> Result<(), AflError> { - if (*msg).tag == 0xdeadaf as c_uint { + if (*msg).tag == LLMP_TAG_UNSET as c_uint { panic!(format!( "No tag set on message with id {}", (*msg).message_id @@ -524,371 +467,203 @@ unsafe fn llmp_send(page: *mut LlmpPage, msg: *mut LlmpMsg) -> Result<(), AflErr ))); } compiler_fence(Ordering::SeqCst); - ::std::ptr::write_volatile( - &mut (*page).current_msg_id as *mut c_ulong, - (*msg).message_id as c_ulong, - ); - + ptr::write_volatile(&mut (*page).current_msg_id, (*msg).message_id); compiler_fence(Ordering::SeqCst); - return Ok(()); + Ok(()) } -#[inline] -unsafe fn _llmp_broker_current_broadcast_map(broker_state: *mut LlmpBroker) -> *mut AflShmem { - return &mut *(*broker_state).broadcast_maps.offset( - (*broker_state) - .broadcast_map_count - .wrapping_sub(1 as c_ulong) as isize, - ) as *mut AflShmem; -} -/* create a new shard page. Size_requested will be the min size, you may get a - * larger map. Retruns NULL on error. */ -unsafe fn llmp_new_page_shmem( - uninited_shmem: *mut AflShmem, - sender: c_ulong, - size_requested: c_ulong, -) -> *mut LlmpPage { - let size: c_ulong = next_pow2({ - let mut _a: c_ulong = size_requested.wrapping_add(40 as c_ulong); - let mut _b: c_ulong = ((1 as c_int) << 28 as c_int) as c_ulong; - if _a > _b { - _a - } else { - _b - } - }); - if afl_shmem_init(uninited_shmem, size).is_null() { - return 0 as *mut LlmpPage; - } - _llmp_page_init(shmem2page(uninited_shmem), sender as u32, size_requested); - return shmem2page(uninited_shmem); -} -/* This function handles EOP by creating a new shared page and informing the -listener about it using a EOP message. */ -unsafe fn llmp_handle_out_eop( - mut maps: *mut AflShmem, - map_count_p: *mut c_ulong, - last_msg_p: *mut *mut LlmpMsg, -) -> *mut AflShmem { - let map_count: u32 = *map_count_p as u32; - let mut old_map: *mut LlmpPage = - shmem2page(&mut *maps.offset(map_count.wrapping_sub(1 as c_uint) as isize)); - maps = afl_realloc( - maps as *mut c_void, - (map_count.wrapping_add(1 as c_uint) as c_ulong) - .wrapping_mul(::std::mem::size_of::() as c_ulong), - ) as *mut AflShmem; - if maps.is_null() { - return 0 as *mut AflShmem; - } - /* Broadcast a new, large enough, message. Also sorry for that c ptr stuff! */ - let mut new_map: *mut LlmpPage = llmp_new_page_shmem( - &mut *maps.offset(map_count as isize), - (*old_map).sender as c_ulong, - new_map_size((*old_map).max_alloc_size), - ); - if new_map.is_null() { - afl_free(maps as *mut c_void); - return 0 as *mut AflShmem; - } - /* Realloc may have changed the location of maps_p (and old_map) in memory :/ - */ - old_map = shmem2page(&mut *maps.offset(map_count.wrapping_sub(1 as c_uint) as isize)); - *map_count_p = map_count.wrapping_add(1 as c_uint) as c_ulong; - ::std::ptr::write_volatile( - &mut (*new_map).current_msg_id as *mut c_ulong, - (*old_map).current_msg_id, - ); +/// listener about it using a EOP message. +unsafe fn llmp_handle_out_eop(llmp: &mut LlmpSender) -> Result<(), AflError> { + let map_count = llmp.out_maps.len(); + let mut old_map = llmp.out_maps.last_mut().unwrap().page(); + + // Create a new shard page. + let mut new_map_shmem = LlmpPageWrapper::new((*old_map).sender, (*old_map).max_alloc_size)?; + let mut new_map = new_map_shmem.page(); + + ptr::write_volatile(&mut (*new_map).current_msg_id, (*old_map).current_msg_id); (*new_map).max_alloc_size = (*old_map).max_alloc_size; /* On the old map, place a last message linking to the new map for the clients * to consume */ - let mut out: *mut LlmpMsg = llmp_alloc_eop(old_map, *last_msg_p); + let mut out: *mut LlmpMsg = llmp_alloc_eop(old_map, llmp.last_msg_sent); (*out).sender = (*old_map).sender; - let mut new_page_msg: *mut LlmpPayloadNewPage = - (*out).buf.as_mut_ptr() as *mut LlmpPayloadNewPage; - /* copy the infos to the message we're going to send on the old buf */ - (*new_page_msg).map_size = (*maps.offset(map_count as isize)).map_size; - memcpy( - (*new_page_msg).shm_str.as_mut_ptr() as *mut c_void, - (*maps.offset(map_count as isize)).shm_str.as_mut_ptr() as *const c_void, - 20 as c_ulong, - ); + + let mut end_of_page_msg = (*out).buf.as_mut_ptr() as *mut LlmpPayloadSharedMap; + (*end_of_page_msg).map_size = new_map_shmem.shmem.map_size; + (*end_of_page_msg).shm_str = new_map_shmem.shmem.shm_str; + // We never sent a msg on the new buf */ - *last_msg_p = 0 as *mut LlmpMsg; + llmp.last_msg_sent = 0 as *mut LlmpMsg; + /* Send the last msg on the old buf */ - match llmp_send(old_map, out) { - Err(_e) => { - afl_free(maps as *mut c_void); - println!("Error sending message"); - 0 as *mut AflShmem - } - Ok(_) => maps, - } + llmp_send(old_map, out)?; + llmp.out_maps.push(new_map_shmem); + + Ok(()) } -/* no more space left! We'll have to start a new page */ -pub unsafe fn llmp_broker_handle_out_eop(broker: *mut LlmpBroker) -> AflRet { - (*broker).broadcast_maps = llmp_handle_out_eop( - (*broker).broadcast_maps, - &mut (*broker).broadcast_map_count, - &mut (*broker).last_msg_sent, - ); - return if !(*broker).broadcast_maps.is_null() { - AFL_RET_SUCCESS - } else { - AFL_RET_ALLOC - } as AflRet; -} -pub unsafe fn llmp_broker_alloc_next(broker: *mut LlmpBroker, len: c_ulong) -> *mut LlmpMsg { - let mut broadcast_page: *mut LlmpPage = shmem2page(_llmp_broker_current_broadcast_map(broker)); - let mut out: *mut LlmpMsg = llmp_alloc_next(broadcast_page, (*broker).last_msg_sent, len); - if out.is_null() { - /* no more space left! We'll have to start a new page */ - let ret: AflRet = llmp_broker_handle_out_eop(broker); - if ret != AFL_RET_SUCCESS as AflRet { - panic!("Error handling broker out EOP"); - } - /* llmp_handle_out_eop allocates a new current broadcast_map */ - broadcast_page = shmem2page(_llmp_broker_current_broadcast_map(broker)); - /* the alloc is now on a new page */ - out = llmp_alloc_next(broadcast_page, (*broker).last_msg_sent, len); - if out.is_null() { - panic!(format!( - "Error allocating {} bytes in shmap {:?}", - len, - (*_llmp_broker_current_broadcast_map(broker)) - .shm_str - .as_mut_ptr(), - )); - } + +pub unsafe fn llmp_broker_alloc_next( + broker: &LlmpBroker, + len: usize, +) -> Result<*mut LlmpMsg, AflError> { + match llmp_alloc_next(&mut broker.llmp_out, len) { + Some(msg) => return Ok(msg), + _ => (), + }; + + /* no more space left! We'll have to start a new page */ + llmp_handle_out_eop(&mut broker.llmp_out); + + match llmp_alloc_next(&mut broker.llmp_out, len) { + Some(msg) => Ok(msg), + None => Err(AflError::Unknown(format!( + "Error allocating {} bytes in shmap", + len + ))), } - return out; } impl LlmpBroker { /// Create and initialize a new llmp_broker pub unsafe fn new() -> Result { let mut broker = LlmpBroker { - last_msg_sent: ptr::null_mut(), - broadcast_map_count: 0, - broadcast_maps: ptr::null_mut(), - msg_hook_count: 0, - msg_hooks: ptr::null_mut(), - llmp_client_count: 0, - llmp_clients: ptr::null_mut(), + llmp_out: LlmpSender { + id: 0, + last_msg_sent: ptr::null_mut(), + out_maps: vec![LlmpPageWrapper::new(0, 0)?], + }, + msg_hooks: vec![], + llmp_clients: vec![], }; - llmp_broker_init(&mut broker)?; + Ok(broker) } /// Registers a new client for the given sharedmap str and size. - /// Be careful: Intenral realloc may change the location of the client map - unsafe fn register_client( - &mut self, - shm_str: &CStr, - map_size: c_ulong, - ) -> *mut LlmpBrokerClientMetadata { - /* make space for a new client and calculate its id */ - self.llmp_clients = afl_realloc( - self.llmp_clients as *mut c_void, - self.llmp_client_count - .wrapping_add(1 as c_ulong) - .wrapping_mul(::std::mem::size_of::() as c_ulong), - ) as *mut LlmpBrokerClientMetadata; - if self.llmp_clients.is_null() { - return 0 as *mut LlmpBrokerClientMetadata; - } - let mut client: *mut LlmpBrokerClientMetadata = - self.llmp_clients.offset(self.llmp_client_count as isize) - as *mut LlmpBrokerClientMetadata; - memset( - client as *mut c_void, - 0 as c_int, - ::std::mem::size_of::() as c_ulong, - ); - (*client).client_state = - calloc(1 as c_ulong, ::std::mem::size_of::() as c_ulong) as *mut LlmpClient; - if (*client).client_state.is_null() { - return 0 as *mut LlmpBrokerClientMetadata; - } - (*(*client).client_state).id = (*self).llmp_client_count as u32; - (*client).cur_client_map = - calloc(1 as c_ulong, ::std::mem::size_of::() as c_ulong) as *mut AflShmem; - if (*client).cur_client_map.is_null() { - return 0 as *mut LlmpBrokerClientMetadata; - } - if afl_shmem_by_str((*client).cur_client_map, shm_str, map_size).is_null() { - return 0 as *mut LlmpBrokerClientMetadata; - } - self.llmp_client_count = self.llmp_client_count.wrapping_add(1); - // TODO: Add client map - return client; + /// Returns the id of the new client in broker.client_map + unsafe fn register_client(&mut self, client_page: LlmpPageWrapper) { + let id = self.llmp_clients.len() as u32; + self.llmp_clients.push(LlmpBrokerClientMetadata { + id, + cur_client_map: client_page, + last_msg_broker_read: 0 as *mut LlmpMsg, + clientloop: None, + data: 0 as *mut c_void, + }); } /// Adds a hook that gets called in the broker for each new message the broker touches. /// if the callback returns false, the message is not forwarded to the clients. */ - pub unsafe fn add_message_hook(&mut self, hook: LlmpMsgHookFn, data: *mut c_void) -> AflRet { - return llmp_add_hook_generic( - &mut self.msg_hooks, - &mut self.msg_hook_count, - ::std::mem::transmute::, *mut c_void>(Some(hook)), - data, + pub fn add_message_hook(&mut self, hook: LlmpMsgHookFn, data: *mut c_void) { + self.msg_hooks + .push(LlmpHookdataGeneric { func: hook, data }); + } + + /// For internal use: Forward the current message to the out map. + unsafe fn forward_msg(&mut self, msg: *mut LlmpMsg) -> Result<(), AflError> { + let mut out: *mut LlmpMsg = llmp_broker_alloc_next(self, (*msg).buf_len_padded as usize)?; + + /* Copy over the whole message. + If we should need zero copy, we could instead post a link to the + original msg with the map_id and offset. */ + let actual_size = (*out).buf_len_padded; + memcpy( + out as *mut c_void, + msg as *const c_void, + size_of::() as c_ulong + (*msg).buf_len_padded as c_ulong, ); + (*out).buf_len_padded = actual_size; + /* We need to replace the message ID with our own */ + let out_page = self.llmp_out.out_maps.last().unwrap().page(); + (*out).message_id = (*out_page).current_msg_id + 1; + match llmp_send(out_page, out) { + Err(e) => panic!(format!("Error sending msg: {:?}", e)), + _ => (), + }; + self.llmp_out.last_msg_sent = out; + Ok(()) } /// broker broadcast to its own page for all others to read */ - #[inline] - unsafe fn handle_new_msgs(&mut self, mut client: *mut LlmpBrokerClientMetadata) { + unsafe fn handle_new_msgs( + &mut self, + client: &LlmpBrokerClientMetadata, + ) -> Result<(), AflError> { // TODO: We could memcpy a range of pending messages, instead of one by one. /* DBG("llmp_broker_handle_new_msgs %p %p->%u\n", broker, client, client->client_state->id); */ - let incoming: *mut LlmpPage = shmem2page((*client).cur_client_map); - let mut current_message_id: u32 = if !(*client).last_msg_broker_read.is_null() { - (*(*client).last_msg_broker_read).message_id + let incoming: *mut LlmpPage = client.cur_client_map.page(); + let mut current_message_id = if client.last_msg_broker_read.is_null() { + 0 } else { - 0 as c_uint + (*client.last_msg_broker_read).message_id }; - while current_message_id as c_ulong != (*incoming).current_msg_id { - let msg: *mut LlmpMsg = llmp_recv(incoming, (*client).last_msg_broker_read); - if msg.is_null() { - panic!("No message received but not all message ids receved! Data out of sync?"); - } - if (*msg).tag == 0xaf1e0f1 as c_uint { - let pageinfo: *mut LlmpPayloadNewPage = { - let mut _msg: *mut LlmpMsg = msg; - (if (*_msg).buf_len >= ::std::mem::size_of::() as c_ulong { - (*_msg).buf.as_mut_ptr() - } else { - 0 as *mut u8 - }) as *mut LlmpPayloadNewPage - }; - if pageinfo.is_null() { + + while current_message_id != ptr::read_volatile(&(*incoming).current_msg_id) { + let msg = match llmp_recv(&client.cur_client_map, (*client).last_msg_broker_read) { + None => { + panic!("No message received but not all message ids receved! Data out of sync?") + } + Some(msg) => msg, + }; + if (*msg).tag == LLMP_TAG_END_OF_PAGE { + // Handle end of page + if (*msg).buf_len < size_of::() as u64 { panic!(format!( "Illegal message length for EOP (is {}, expected {})", (*msg).buf_len_padded, - ::std::mem::size_of::() as c_ulong + size_of::() )); } + let pageinfo = (*msg).buf.as_mut_ptr() as *mut LlmpPayloadSharedMap; + /* We can reuse the map mem space, no need to free and calloc. However, the pageinfo points to the map we're about to unmap. - Copy the contents first. */ - let mut pageinfo_cpy: LlmpPayloadNewPage = LlmpPayloadNewPage { - map_size: 0, - shm_str: [0; 20], - }; - memcpy( - &mut pageinfo_cpy as *mut LlmpPayloadNewPage as *mut c_void, - pageinfo as *const c_void, - ::std::mem::size_of::() as c_ulong, - ); - let client_map: *mut AflShmem = (*client).cur_client_map; - ::std::ptr::write_volatile( - &mut (*shmem2page(client_map)).save_to_unmap as *mut u16, - 1 as u16, - ); - afl_shmem_deinit(client_map); - if afl_shmem_by_str( - client_map, - CStr::from_bytes_with_nul(&(*pageinfo).shm_str).expect("Illegal shm_str"), - (*pageinfo).map_size, - ) - .is_null() - { - panic!(format!( - "Could not get shmem by str for map {:?} of size {:?}", - (*pageinfo).shm_str.as_mut_ptr(), - (*pageinfo).map_size - )); - } - } else if (*msg).tag == 0xc11e471 as c_uint { + Clone the contents first to be safe (probably fine in rust eitner way). */ + let mut pageinfo_cpy = (*pageinfo).clone(); + + let client_map = (*client).cur_client_map; + + ptr::write_volatile(&mut (*client_map.page()).save_to_unmap, 1); + client.cur_client_map = + LlmpPageWrapper::from_name_slice(&pageinfo_cpy.shm_str, pageinfo_cpy.map_size)?; + dbg!("Client got a new map", client.cur_client_map.shmem.shm_str); + } else if (*msg).tag == LLMP_TAG_NEW_SHM_CLIENT { /* This client informs us about yet another new client add it to the list! Also, no need to forward this msg. */ - let pageinfo: *mut LlmpPayloadNewPage = { - let mut _msg: *mut LlmpMsg = msg; - (if (*_msg).buf_len >= ::std::mem::size_of::() as c_ulong { - (*_msg).buf.as_mut_ptr() - } else { - 0 as *mut u8 - }) as *mut LlmpPayloadNewPage - }; - if pageinfo.is_null() { - println!("Ignoring broken CLIENT_ADDED msg due to incorrect size. Expected {:?} but got {:?}", - ::std::mem::size_of::() as - c_ulong, (*msg).buf_len_padded); - } - /* register_client may realloc the clients, we need to find ours again */ - let client_id: u32 = (*(*client).client_state).id; - if self - .register_client( - CStr::from_bytes_with_nul(&(*pageinfo).shm_str).expect("Illegal shm_str"), - (*pageinfo).map_size, - ) - .is_null() - { - panic!(format!( - "Could not register clientprocess with shm_str {:?}", - (*pageinfo).shm_str.as_mut_ptr() - )); - } - (*client).client_type = LLMP_CLIENT_TYPE_FOREIGN_PROCESS; - /* find client again */ - client = - self.llmp_clients.offset(client_id as isize) as *mut LlmpBrokerClientMetadata - } else { - let mut forward_msg: bool = 1 as c_int != 0; - let mut i: c_ulong = 0; - while i < self.msg_hook_count { - let msg_hook: *mut LlmpHookdataGeneric = - self.msg_hooks.offset(i as isize) as *mut LlmpHookdataGeneric; - forward_msg = forward_msg as c_int != 0 - && ::std::mem::transmute::<*mut c_void, Option>( - (*msg_hook).func, - ) - .expect("non-null function pointer")( - self, client, msg, (*msg_hook).data - ) as c_int - != 0; - if !llmp_msg_in_page(shmem2page((*client).cur_client_map), msg) { - /* Special handling in case the client got exchanged inside the message_hook, for example after a crash. */ - return; - } - i = i.wrapping_add(1) - } - if forward_msg { - let mut out: *mut LlmpMsg = llmp_broker_alloc_next(self, (*msg).buf_len_padded); - if out.is_null() { - panic!(format!( - "Error allocating {} bytes in shmap {:?}", - (*msg).buf_len_padded, - (*_llmp_broker_current_broadcast_map(self)) - .shm_str - .as_mut_ptr(), - )); - } - /* Copy over the whole message. - If we should need zero copy, we could instead post a link to the - original msg with the map_id and offset. */ - let actual_size: c_ulong = (*out).buf_len_padded; - memcpy( - out as *mut c_void, - msg as *const c_void, - (::std::mem::size_of::() as c_ulong) - .wrapping_add((*msg).buf_len_padded), + if (*msg).buf_len < size_of::() as u64 { + println!("Ignoring broken CLIENT_ADDED msg due to incorrect size. Expected {} but got {}", + (*msg).buf_len_padded, + size_of::() ); - (*out).buf_len_padded = actual_size; - /* We need to replace the message ID with our own */ - let out_page: *mut LlmpPage = - shmem2page(_llmp_broker_current_broadcast_map(self)); - (*out).message_id = - (*out_page).current_msg_id.wrapping_add(1 as c_ulong) as u32; - match llmp_send(out_page, out) { - Err(e) => panic!(format!("Error sending msg: {:?}", e)), - _ => (), + } else { + let pageinfo = (*msg).buf.as_mut_ptr() as *mut LlmpPayloadSharedMap; + + let client_id: u32 = client.id; + match LlmpPageWrapper::from_name_slice( + &(*pageinfo).shm_str, + (*pageinfo).map_size, + ) { + Ok(new_page) => self.register_client(new_page), + Err(e) => println!("Error adding client! {:?}", e), }; - self.last_msg_sent = out + } + } else { + // The message is not specifically for use. Let the user handle it, then forward it to the clients, if necessary. + let mut should_forward_msg = true; + for hook in self.msg_hooks { + match (hook.func)(&self, client, msg, hook.data) { + LlmpMsgHookResult::Handled => should_forward_msg = false, + _ => (), + } + } + if should_forward_msg { + self.forward_msg(msg); } } (*client).last_msg_broker_read = msg; current_message_id = (*msg).message_id } + Ok(()) } /// The broker walks all pages and looks for changes, then broadcasts them on @@ -896,11 +671,8 @@ impl LlmpBroker { pub unsafe fn once(&mut self) { compiler_fence(Ordering::SeqCst); let mut i: u32 = 0; - while (i as c_ulong) < self.llmp_client_count { - let client: *mut LlmpBrokerClientMetadata = - self.llmp_clients.offset(i as isize) as *mut LlmpBrokerClientMetadata; - self.handle_new_msgs(client); - i = i.wrapping_add(1) + for client in self.llmp_clients { + self.handle_new_msgs(&client); } } @@ -910,166 +682,17 @@ impl LlmpBroker { loop { compiler_fence(Ordering::SeqCst); self.once(); + /* 5 milis of sleep for now to not busywait at 100% */ - usleep((5 as c_int * 1000 as c_int) as c_uint); + thread::sleep(time::Duration::from_millis(5)); } } - - /// launch a specific client. This function doesn't need to be called externally - all registered clients will get launched at broker_run - unsafe fn launch_client(&mut self, mut clientdata: *mut LlmpBrokerClientMetadata) -> bool { - if clientdata < self.llmp_clients - || clientdata - > self - .llmp_clients - .offset(self.llmp_client_count.wrapping_sub(1 as c_ulong) as isize) - as *mut LlmpBrokerClientMetadata - { - println!( - "[!] WARNING: Illegal client specified at ptr {:?} (instead of {:?} to {:?})", - clientdata, - self.llmp_clients, - self.llmp_clients - .offset(self.llmp_client_count.wrapping_sub(1 as c_ulong) as isize,) - as *mut LlmpBrokerClientMetadata, - ); - return 0 as c_int != 0; - } - if (*clientdata).client_type as c_uint == LLMP_CLIENT_TYPE_CHILD_PROCESS as c_int as c_uint - { - if (*clientdata).pid != 0 { - println!("[!] WARNING: Tried to relaunch already running client. Set ->pid to 0 if this is what you want."); - return 0 as c_int != 0; - } - let child_id: c_int = fork(); - if child_id < 0 as c_int { - println!("[!] WARNING: Could not fork"); - return 0 as c_int != 0; - } else { - if child_id == 0 as c_int { - /* child */ - _llmp_client_wrapped_loop(clientdata as *mut c_void); - } - } - /* parent */ - (*clientdata).pid = child_id; - return 1 as c_int != 0; - } else { - println!("[!] WARNING: Tried to spawn llmp child with unknown thread type."); - return 0 as c_int != 0; - } - //return 1 as c_int != 0; - } - - /// Launches all clientloops registered with this broker - pub unsafe fn launch_clientloops(&mut self) -> Result<(), AflError> { - let mut i: c_ulong = 0; - while i < self.llmp_client_count { - if (*self.llmp_clients.offset(i as isize)).client_type as c_uint - == LLMP_CLIENT_TYPE_CHILD_PROCESS as c_uint - { - if !self.launch_client(self.llmp_clients.offset(i as isize)) { - println!("[!] WARNING: Could not launch all clients"); - return Err(AflError::Unknown("Failed to launch clients".into())); - } - } - i = i.wrapping_add(1) - } - Ok(()) - } - - /// Start all threads and the main broker. - /// Same as llmp_broker_launch_threaded clients(); - /// Never returns. - pub unsafe fn run(&mut self) -> ! { - self.launch_clientloops().expect("Failed to launch clients"); - self.broker_loop(); - } - - /// Register a new forked/child client. - /// Client thread will be called with llmp_client client, containing - /// the data in ->data. This will register a client to be spawned up as soon as - /// broker_loop() starts. Clients can also be added later via - /// llmp_broker_register_remote(..) or the local_tcp_client - /// TODO: TCP remote client not yet supported in rust llmp - pub unsafe fn register_childprocess_clientloop( - &mut self, - clientloop: LlmpClientloopFn, - data: *mut c_void, - ) -> Result<(), AflError> { - let mut client_map: AflShmem = { - let init = AflShmem { - shm_str: [0; 20], - shm_id: 0, - map: 0 as *mut u8, - map_size: 0, - }; - init - }; - if llmp_new_page_shmem( - &mut client_map, - self.llmp_client_count, - ((1 as c_int) << 28 as c_int) as c_ulong, - ) - .is_null() - { - return Err(AflError::Unknown("Alloc".into())); - } - let mut client: *mut LlmpBrokerClientMetadata = self.register_client( - CStr::from_ptr(&client_map.shm_str as *const u8 as *const c_char), - client_map.map_size, - ); - if client.is_null() { - afl_shmem_deinit(&mut client_map); - return Err(AflError::Unknown("Something in clients failed".into())); - } - (*client).clientloop = Some(clientloop); - (*client).data = data; - (*client).client_type = LLMP_CLIENT_TYPE_CHILD_PROCESS; - /* Copy the already allocated shmem to the client state */ - (*(*client).client_state).out_maps = afl_realloc( - (*(*client).client_state).out_maps as *mut c_void, - ::std::mem::size_of::() as c_ulong, - ) as *mut AflShmem; - if (*(*client).client_state).out_maps.is_null() { - afl_shmem_deinit(&mut client_map); - afl_shmem_deinit((*client).cur_client_map); - /* "Unregister" by subtracting the client from count */ - self.llmp_client_count = self.llmp_client_count.wrapping_sub(1); - return Err(AflError::Unknown("Something in clients failed".into())); - } - memcpy( - (*(*client).client_state).out_maps as *mut c_void, - &mut client_map as *mut AflShmem as *const c_void, - ::std::mem::size_of::() as c_ulong, - ); - (*(*client).client_state).out_map_count = 1 as c_ulong; - /* Each client starts with the very first map. - They should then iterate through all maps once and work on all old messages. - */ - (*(*client).client_state).current_broadcast_map = - self.broadcast_maps.offset(0 as isize) as *mut AflShmem; - (*(*client).client_state).out_map_count = 1 as c_ulong; - return Ok(()); - } } /// A new page will be used. Notify each registered hook in the client about this fact. -unsafe fn llmp_clientrigger_new_out_page_hooks(client: *mut LlmpClient) { - let mut i: c_ulong = 0; - while i < (*client).new_out_page_hook_count { - ::std::mem::transmute::<*mut c_void, Option>( - (*(*client).new_out_page_hooks.offset(i as isize)).func, - ) - .expect("non-null function pointer")( - client, - shmem2page( - &mut *(*client) - .out_maps - .offset((*client).out_map_count.wrapping_sub(1 as c_ulong) as isize), - ), - (*(*client).new_out_page_hooks.offset(i as isize)).data, - ); - i = i.wrapping_add(1) +unsafe fn llmp_clien_trigger_new_out_page_hooks(client: &LlmpClient) { + for hook in client.new_out_page_hooks { + (hook.func)(client); } } @@ -1078,7 +701,7 @@ unsafe fn _llmp_client_wrapped_loop(llmp_client_broker_metadata_ptr: *mut c_void let metadata: *mut LlmpBrokerClientMetadata = llmp_client_broker_metadata_ptr as *mut LlmpBrokerClientMetadata; /* Before doing anything else:, notify registered hooks about the new page we're about to use */ - llmp_clientrigger_new_out_page_hooks((*metadata).client_state); + llmp_clien_trigger_new_out_page_hooks((*metadata).client_state); (*metadata).clientloop.expect("non-null function pointer")( (*metadata).client_state, @@ -1129,7 +752,7 @@ unsafe fn llmp_client_handle_out_eop(client: *mut LlmpClient) -> bool { */ llmp_client_prune_old_pages(client); /* So we got a new page. Inform potential hooks */ - llmp_clientrigger_new_out_page_hooks(client); + llmp_clien_trigger_new_out_page_hooks(client); return 1 as c_int != 0; } @@ -1145,37 +768,38 @@ pub unsafe fn llmp_client_recv(client: *mut LlmpClient) -> *mut LlmpMsg { return 0 as *mut LlmpMsg; } (*client).last_msg_recvd = msg; - if (*msg).tag == 0xdeadaf as c_uint { + if (*msg).tag == LLMP_TAG_UNSET as c_uint { panic!("BUG: Read unallocated msg"); } else { - if (*msg).tag == 0xaf1e0f1 as c_uint { + if (*msg).tag == LLMP_TAG_END_OF_PAGE as c_uint { /* we reached the end of the current page. We'll init a new page but can reuse the mem are of the current map. However, we cannot use the message if we deinit its page, so let's copy */ - let mut pageinfo_cpy: LlmpPayloadNewPage = LlmpPayloadNewPage { + let mut pageinfo_cpy: LlmpPayloadSharedMap = LlmpPayloadSharedMap { map_size: 0, shm_str: [0; 20], }; let broadcast_map: *mut AflShmem = (*client).current_broadcast_map; - let pageinfo: *mut LlmpPayloadNewPage = { + let pageinfo: *mut LlmpPayloadSharedMap = { let mut _msg: *mut LlmpMsg = msg; - (if (*_msg).buf_len >= ::std::mem::size_of::() as c_ulong { + (if (*_msg).buf_len >= ::std::mem::size_of::() as c_ulong + { (*_msg).buf.as_mut_ptr() } else { 0 as *mut u8 - }) as *mut LlmpPayloadNewPage + }) as *mut LlmpPayloadSharedMap }; if pageinfo.is_null() { panic!(format!( "Illegal message length for EOP (is {}, expected {})", (*msg).buf_len_padded, - ::std::mem::size_of::() as c_ulong + ::std::mem::size_of::() as c_ulong )); } memcpy( - &mut pageinfo_cpy as *mut LlmpPayloadNewPage as *mut c_void, + &mut pageinfo_cpy as *mut LlmpPayloadSharedMap as *mut c_void, pageinfo as *const c_void, - ::std::mem::size_of::() as c_ulong, + ::std::mem::size_of::() as c_ulong, ); /* Never read by broker broker: shmem2page(map)->save_to_unmap = true; */ afl_shmem_deinit(broadcast_map); @@ -1253,7 +877,7 @@ pub unsafe fn llmp_client_alloc_next(client: *mut LlmpClient, size: usize) -> *m )) .messages .as_mut_ptr()) - .tag != 0xdeadaf as c_uint + .tag != LLMP_TAG_UNSET as c_uint { panic!("Error in handle_out_eop"); } @@ -1292,7 +916,7 @@ pub unsafe fn llmp_client_cancel(client: *mut LlmpClient, mut msg: *mut LlmpMsg) .out_maps .offset((*client).out_map_count.wrapping_sub(1 as c_ulong) as isize), ); - (*msg).tag = 0xdeadaf as c_uint; + (*msg).tag = LLMP_TAG_UNSET as c_uint; (*page).size_used = ((*page).size_used as c_ulong).wrapping_sub( (*msg) .buf_len_padded @@ -1336,7 +960,7 @@ pub unsafe fn llmp_client_new_unconnected() -> *mut LlmpClient { if llmp_new_page_shmem( &mut *(*client_state).out_maps.offset(0 as isize), (*client_state).id as c_ulong, - ((1 as c_int) << 28 as c_int) as c_ulong, + LLMP_INITIAL_MAP_SIZE, ) .is_null() { @@ -1375,35 +999,6 @@ impl Drop for LlmpClient { } } -/// Generic function to add a hook to the mem pointed to by hooks_p, using afl_realloc on the mem area, and increasing -/// hooks_count_p -pub unsafe fn llmp_add_hook_generic( - hooks_p: *mut *mut LlmpHookdataGeneric, - hooks_count_p: *mut c_ulong, - new_hook_func: *mut c_void, - new_hook_data: *mut c_void, -) -> AflRet { - let hooks_count: c_ulong = *hooks_count_p; - let hooks: *mut LlmpHookdataGeneric = afl_realloc( - *hooks_p as *mut c_void, - hooks_count - .wrapping_add(1 as c_ulong) - .wrapping_mul(::std::mem::size_of::() as c_ulong), - ) as *mut LlmpHookdataGeneric; - if hooks.is_null() { - *hooks_p = 0 as *mut LlmpHookdataGeneric; - *hooks_count_p = 0 as c_ulong; - return AFL_RET_ALLOC; - } - let ref mut fresh9 = (*hooks.offset(hooks_count as isize)).func; - *fresh9 = new_hook_func; - let ref mut fresh10 = (*hooks.offset(hooks_count as isize)).data; - *fresh10 = new_hook_data; - *hooks_p = hooks; - *hooks_count_p = hooks_count.wrapping_add(1 as c_ulong); - return AFL_RET_SUCCESS; -} - /// Adds a hook that gets called in the client for each new outgoing page the client creates. pub unsafe fn llmp_client_add_new_out_page_hook( client: *mut LlmpClient, @@ -1444,35 +1039,3 @@ impl Drop for LlmpBroker { unsafe { llmp_broker_deinit(self) }; } } - -/// Allocate and set up the new broker instance. Afterwards, run with broker_run. -/// Use llmp_broker::new instead. -unsafe fn llmp_broker_init(broker: *mut LlmpBroker) -> Result<(), AflError> { - memset( - broker as *mut c_void, - 0 as c_int, - ::std::mem::size_of::() as c_ulong, - ); - /* let's create some space for outgoing maps */ - (*broker).broadcast_maps = afl_realloc( - 0 as *mut c_void, - (1 as c_ulong).wrapping_mul(::std::mem::size_of::() as c_ulong), - ) as *mut AflShmem; - if (*broker).broadcast_maps.is_null() { - return Err(AflError::Unknown("Alloc".into())); - } - (*broker).broadcast_map_count = 1 as c_ulong; - (*broker).llmp_client_count = 0 as c_ulong; - (*broker).llmp_clients = 0 as *mut LlmpBrokerClientMetadata; - if llmp_new_page_shmem( - _llmp_broker_current_broadcast_map(broker), - -(1 as c_int) as c_ulong, - ((1 as c_int) << 28 as c_int) as c_ulong, - ) - .is_null() - { - afl_free((*broker).broadcast_maps as *mut c_void); - return Err(AflError::Unknown("Alloc".into())); - } - return Ok(()); -} diff --git a/afl/src/events/shmem_translated.rs b/afl/src/events/shmem_translated.rs index 30552e9dad..cbf2866bde 100644 --- a/afl/src/events/shmem_translated.rs +++ b/afl/src/events/shmem_translated.rs @@ -1,5 +1,5 @@ use libc::{c_char, c_int, c_long, c_uchar, c_uint, c_ulong, c_ushort, c_void}; -use std::ffi::CStr; +use std::{ffi::CStr, mem::size_of}; use crate::AflError; @@ -64,7 +64,7 @@ pub struct AflShmem { pub shm_str: [u8; 20], pub shm_id: c_int, pub map: *mut c_uchar, - pub map_size: c_ulong, + pub map_size: usize, } /// Deinit on drop @@ -87,9 +87,9 @@ const fn afl_shmem_unitialized() -> AflShmem { } impl AflShmem { - fn from_str(shm_str: &CStr, map_size: c_ulong) -> Result { + pub fn from_str(shm_str: &CStr, map_size: usize) -> Result { let mut ret = afl_shmem_unitialized(); - let map = unsafe { afl_shmem_init(&mut ret, map_size) }; + let map = unsafe { afl_shmem_by_str(&mut ret, shm_str, map_size) }; if map != 0 as *mut u8 { Ok(ret) } else { @@ -100,7 +100,13 @@ impl AflShmem { } } - fn new(map_size: c_ulong) -> Result { + /// Generate a shared map with a fixed byte array of 20 + pub unsafe fn from_name_slice(shm_str: &[u8; 20], map_size: usize) -> Result { + let str_bytes = shm_str as *const [u8; 20] as *const libc::c_char; + Self::from_str(CStr::from_ptr(str_bytes), map_size) + } + + pub fn new(map_size: usize) -> Result { let mut ret = afl_shmem_unitialized(); let map = unsafe { afl_shmem_init(&mut ret, map_size) }; if map != 0 as *mut u8 { @@ -115,7 +121,7 @@ impl AflShmem { /// Sets this shm id as env variable with the given name /// Also write the map size as name#_SIZE env - fn to_env_var(&self, env_name: &CStr) -> Result<(), AflError> { + pub fn to_env_var(&self, env_name: &CStr) -> Result<(), AflError> { if unsafe { afl_shmem_to_env_var(&self, env_name) } == AFL_RET_SUCCESS { Ok(()) } else { @@ -141,12 +147,12 @@ pub unsafe fn afl_shmem_deinit(shm: *mut AflShmem) { /// Functions to create Shared memory region, for observation channels and /// opening inputs and stuff. -pub unsafe fn afl_shmem_init(shm: *mut AflShmem, map_size: c_ulong) -> *mut c_uchar { +pub unsafe fn afl_shmem_init(shm: *mut AflShmem, map_size: usize) -> *mut c_uchar { (*shm).map_size = map_size; (*shm).map = 0 as *mut c_uchar; (*shm).shm_id = shmget( 0 as c_int, - map_size, + map_size as c_ulong, 0o1000 as c_int | 0o2000 as c_int | 0o600 as c_int, ); if (*shm).shm_id < 0 as c_int { @@ -155,12 +161,13 @@ pub unsafe fn afl_shmem_init(shm: *mut AflShmem, map_size: c_ulong) -> *mut c_uc } snprintf( (*shm).shm_str.as_mut_ptr() as *mut i8, - ::std::mem::size_of::<[c_char; 20]>() as c_ulong, + size_of::<[c_char; 20]>() as c_ulong, b"%d\x00" as *const u8 as *const c_char, (*shm).shm_id, ); - (*shm).shm_str[(::std::mem::size_of::<[c_char; 20]>() as c_ulong) - .wrapping_sub(1 as c_int as c_ulong) as usize] = '\u{0}' as u8; + (*shm).shm_str + [(size_of::<[c_char; 20]>() as c_ulong).wrapping_sub(1 as c_int as c_ulong) as usize] = + '\u{0}' as u8; (*shm).map = shmat((*shm).shm_id, 0 as *const c_void, 0 as c_int) as *mut c_uchar; if (*shm).map == -(1 as c_int) as *mut c_void as *mut c_uchar || (*shm).map.is_null() { shmctl((*shm).shm_id, 0 as c_int, 0 as *mut shmid_ds); @@ -175,7 +182,7 @@ pub unsafe fn afl_shmem_init(shm: *mut AflShmem, map_size: c_ulong) -> *mut c_uc pub unsafe fn afl_shmem_by_str( shm: *mut AflShmem, shm_str: &CStr, - map_size: c_ulong, + map_size: usize, ) -> *mut c_uchar { if shm.is_null() || shm_str.to_bytes().len() == 0 || map_size == 0 { return 0 as *mut c_uchar; @@ -185,7 +192,7 @@ pub unsafe fn afl_shmem_by_str( strncpy( (*shm).shm_str.as_mut_ptr() as *mut c_char, shm_str.as_ptr() as *const c_char, - (::std::mem::size_of::<[c_char; 20]>() as c_ulong).wrapping_sub(1 as c_int as c_ulong), + (size_of::<[c_char; 20]>() as c_ulong).wrapping_sub(1 as c_int as c_ulong), ); (*shm).shm_id = shm_str .to_str() @@ -195,8 +202,8 @@ pub unsafe fn afl_shmem_by_str( (*shm).map = shmat((*shm).shm_id, 0 as *const c_void, 0 as c_int) as *mut c_uchar; if (*shm).map == -(1 as c_int) as *mut c_void as *mut c_uchar { (*shm).map = 0 as *mut c_uchar; - (*shm).map_size = 0 as c_int as c_ulong; - (*shm).shm_str[0 as c_int as usize] = '\u{0}' as u8; + (*shm).map_size = 0; + (*shm).shm_str[0] = '\u{0}' as u8; return 0 as *mut c_uchar; } return (*shm).map; @@ -211,7 +218,7 @@ pub unsafe fn afl_shmem_to_env_var(shmem: &AflShmem, env_name: &CStr) -> c_uint let mut shm_str: [c_char; 256] = [0; 256]; snprintf( shm_str.as_mut_ptr(), - ::std::mem::size_of::<[c_char; 256]>() as c_ulong, + size_of::<[c_char; 256]>() as c_ulong, b"%d\x00" as *const u8 as *const c_char, (*shmem).shm_id, ); @@ -227,13 +234,13 @@ pub unsafe fn afl_shmem_to_env_var(shmem: &AflShmem, env_name: &CStr) -> c_uint let mut size_env_name: [c_char; 256] = [0; 256]; snprintf( size_env_name.as_mut_ptr(), - ::std::mem::size_of::<[c_char; 256]>() as c_ulong, + size_of::<[c_char; 256]>() as c_ulong, b"%s_SIZE\x00" as *const u8 as *const c_char, env_name, ); snprintf( shm_str.as_mut_ptr(), - ::std::mem::size_of::<[c_char; 256]>() as c_ulong, + size_of::<[c_char; 256]>() as c_ulong, b"%d\x00" as *const u8 as *const c_char, (*shmem).shm_id, ); diff --git a/afl/src/lib.rs b/afl/src/lib.rs index c19f048093..1c1f204600 100644 --- a/afl/src/lib.rs +++ b/afl/src/lib.rs @@ -3,6 +3,10 @@ #[macro_use] extern crate alloc; +#[cfg_attr(feature = "std")] +#[macro_use] +extern crate memoffset; // for offset_of + pub mod corpus; pub mod engines; pub mod events; diff --git a/afl/src/metamap.rs b/afl/src/metamap.rs index 979e641fcf..4bac85604d 100644 --- a/afl/src/metamap.rs +++ b/afl/src/metamap.rs @@ -307,10 +307,7 @@ impl NamedAnyMap { if !self.map.contains_key(&typeid) { self.map.insert(typeid, HashMap::default()); } - self.map - .get_mut(&typeid) - .unwrap() - .insert(name, Box::new(t)); + self.map.get_mut(&typeid).unwrap().insert(name, Box::new(t)); } pub fn len(&self) -> usize { From 6f25fabe078048a20e2423b78700911bd0083ded Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Tue, 8 Dec 2020 17:41:23 +0100 Subject: [PATCH 02/17] builds --- afl/Cargo.toml | 3 +- afl/src/events/llmp.rs | 25 +- afl/src/events/llmp_translated.rs | 722 ++++++++++------------------- afl/src/events/shmem_translated.rs | 4 +- afl/src/lib.rs | 4 - 5 files changed, 243 insertions(+), 515 deletions(-) diff --git a/afl/Cargo.toml b/afl/Cargo.toml index 4eb56e00d5..fcac574c2a 100644 --- a/afl/Cargo.toml +++ b/afl/Cargo.toml @@ -33,5 +33,4 @@ num = "*" xxhash-rust = { version = "0.8.0", features = ["xxh3"] } # xxh3 hashing for rust serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib erased-serde = "0.3.12" -postcard = "0.5.1" # no_std compatible serde serialization fromat -memoffset = "0.6" # for offset_of support \ No newline at end of file +postcard = "0.5.1" # no_std compatible serde serialization fromat \ No newline at end of file diff --git a/afl/src/events/llmp.rs b/afl/src/events/llmp.rs index f6adf68eab..fd99b9d5ae 100644 --- a/afl/src/events/llmp.rs +++ b/afl/src/events/llmp.rs @@ -1,5 +1,4 @@ use core::marker::PhantomData; -use core::ptr; use std::{ffi::c_void, io::Read, io::Write, net::TcpListener}; use crate::{ @@ -7,11 +6,12 @@ use crate::{ }; use super::{ - llmp_translated::{LlmpBroker, LlmpClient, LlmpClientloopFn, LlmpMsgHookFn}, + llmp_translated::{LlmpBroker, LlmpClient, LlmpMsgHookFn}, Event, EventManager, }; -pub unsafe fn llmp_tcp_server_clientloop(client: *mut LlmpClient, _data: *mut c_void) -> ! { +/* +pub unsafe fn llmp_tcp_server_clientloop(client: &mut LlmpClient, _data: *mut c_void) -> ! { // Later in the execution, after the initial map filled up, // the current broacast map will will point to a different map. // However, the original map is (as of now) never freed, new clients will start @@ -57,6 +57,7 @@ pub unsafe fn llmp_tcp_server_clientloop(client: *mut LlmpClient, _data: *mut c_ } } } +*/ /// Eventmanager for multi-processed application #[cfg(feature = "std")] @@ -126,6 +127,7 @@ where } } +/* #[cfg(feature = "std")] impl LLMPEventManager where @@ -141,20 +143,5 @@ where broker_message_hook: LlmpMsgHookFn, clientloops: LlmpClientloopFn, ) -> ! { - unsafe { - let mut broker = LlmpBroker::new().expect("Failed to create llmp"); - - for i in 0..process_count - 1 { - println!("Adding client {}", i); - broker - .register_childprocess_clientloop(clientloops, ptr::null_mut()) - .expect("could not add child clientloop"); - } - - println!("Spawning broker"); - broker.add_message_hook(broker_message_hook, ptr::null_mut()); - - broker.run(); - } } -} +}*/ diff --git a/afl/src/events/llmp_translated.rs b/afl/src/events/llmp_translated.rs index df0b6a96bb..0878dca724 100644 --- a/afl/src/events/llmp_translated.rs +++ b/afl/src/events/llmp_translated.rs @@ -48,21 +48,19 @@ Then register some clientloops using llmp_broker_register_threaded_clientloop */ -use ::libc; - use core::ptr; use core::sync::atomic::{compiler_fence, Ordering}; -use core::{ffi::c_void, time}; -use libc::{c_int, c_uint, c_ulong, c_ushort}; -use std::{cmp::max, ffi::CStr, mem::size_of, os::raw::c_char, thread}; +use core::time; +use libc::{c_uint, c_ulong, c_ushort}; +use std::{cmp::max, ffi::CStr, mem::size_of, thread}; use crate::utils::next_pow2; use crate::AflError; -use super::shmem_translated::{afl_shmem_by_str, afl_shmem_deinit, afl_shmem_init, AflShmem}; +use super::shmem_translated::AflShmem; /// The header length of a llmp page in a shared map (until messages start) -const LLMP_PAGE_HEADER_LEN: usize = offset_of!(LlmpPage, messages); +const LLMP_PAGE_HEADER_LEN: usize = size_of::(); /// We'll start off with 256 megabyte maps per fuzzer const LLMP_INITIAL_MAP_SIZE: usize = 1 << 28; @@ -77,38 +75,49 @@ const LLMP_TAG_END_OF_PAGE: u32 = 0xaf1e0f1; /// A new client for this broekr got added. const LLMP_TAG_NEW_SHM_CLIENT: u32 = 0xc11e471; -extern "C" { - #[no_mangle] - fn memcpy(_: *mut c_void, _: *const c_void, _: c_ulong) -> *mut c_void; - #[no_mangle] - fn memmove(_: *mut c_void, _: *const c_void, _: c_ulong) -> *mut c_void; - #[no_mangle] - fn memset(_: *mut c_void, _: c_int, _: c_ulong) -> *mut c_void; -} - pub type AflRet = c_uint; pub const AFL_RET_ALLOC: AflRet = 3; pub const AFL_RET_SUCCESS: AflRet = 0; +/// Sending end on a (unidirectional) sharedmap channel #[derive(Clone)] pub struct LlmpSender { + /// ID of this sender. Only used in the broker. pub id: u32, + /// Ref to the last message this sender sent on the last page. + /// If null, a new page (just) started. pub last_msg_sent: *mut LlmpMsg, + /// A vec of page wrappers, each containing an intialized AfShmem pub out_maps: Vec, + /// If true, pages will never be pruned. + /// The broker uses this feature. + /// By keeping the message history around, + /// new clients may join at any time in the future. + pub keep_pages_forever: bool, } +/// Receiving end on a (unidirectional) sharedmap channel +#[derive(Clone)] +pub struct LlmpReceiver { + pub id: u32, + /// Pointer to the last meg this received + pub last_msg_recvd: *mut LlmpMsg, + /// current page. After EOP, this gets replaced with the new one + pub current_recv_map: LlmpPageWrapper, +} + +/// Client side of LLMP #[derive(Clone)] pub struct LlmpClient { pub llmp_out: LlmpSender, - pub last_msg_recvd: *mut LlmpMsg, - pub current_broadcast_map: LlmpPageWrapper, - pub last_msg_sent: *mut LlmpMsg, - pub out_maps: Vec, - pub new_out_page_hooks: Vec>, + pub llmp_in: LlmpReceiver, } +/// A page wrapper #[derive(Clone)] -struct LlmpPageWrapper { +pub struct LlmpPageWrapper { + /// Shmem containg the actual (unsafe) page, + /// shared between one LlmpSender and one LlmpReceiver shmem: AflShmem, } @@ -116,7 +125,7 @@ struct LlmpPageWrapper { impl LlmpPageWrapper { /// Creates a new page with minimum prev_max_alloc_size or LLMP_INITIAL_MAP_SIZE /// returning the initialized shared mem struct - unsafe fn new(sender: u32, min_size: usize) -> Result { + pub unsafe fn new(sender: u32, min_size: usize) -> Result { // Create a new shard page. let mut shmem = AflShmem::new(new_map_size(min_size))?; _llmp_page_init(&mut shmem, sender); @@ -124,64 +133,57 @@ impl LlmpPageWrapper { } /// Initialize from a 0-terminated sharedmap id string and its size - unsafe fn from_str(shm_str: &CStr, map_size: usize) -> Result { + pub unsafe fn from_str(shm_str: &CStr, map_size: usize) -> Result { let shmem = AflShmem::from_str(shm_str, map_size)?; // Not initializing the page here - the other side should have done it already! Ok(Self { shmem }) } /// Initialize from a shm_str with fixed len of 20 - unsafe fn from_name_slice(shm_str: &[u8; 20], map_size: usize) -> Result { + pub unsafe fn from_name_slice(shm_str: &[u8; 20], map_size: usize) -> Result { let shmem = AflShmem::from_name_slice(shm_str, map_size)?; // Not initializing the page here - the other side should have done it already! Ok(Self { shmem }) } - unsafe fn page(&self) -> *mut LlmpPage { + /// Get the unsafe ptr to this page, situated on the shared map + pub unsafe fn page(&self) -> *mut LlmpPage { shmem2page(&self.shmem) } } -#[derive(Copy, Clone)] -#[repr(C)] -pub struct LlmpHookdataGeneric { - pub func: T, - pub data: *mut c_void, -} - +/// Message sent over the "wire" #[derive(Copy, Clone)] #[repr(C, packed)] pub struct LlmpMsg { + /// A tag pub tag: u32, + /// Sender of this messge pub sender: u32, + /// The message ID, unique per page pub message_id: u64, + /// Buffer length as specified by the user pub buf_len: u64, + /// (Actual) buffer length after padding pub buf_len_padded: u64, + /// The buf pub buf: [u8; 0], } +/// The broker (node 0) #[derive(Clone)] #[repr(C)] pub struct LlmpBroker { + /// Broadcast map from broker to all clients pub llmp_out: LlmpSender, - pub msg_hooks: Vec>, - pub llmp_clients: Vec, + /// Users of Llmp can add message handlers in the broker. + /// This allows us to intercept messages right in the broker + /// This keeps the out map clean. + pub msg_hooks: Vec, + pub llmp_clients: Vec, } -#[derive(Clone)] -#[repr(C)] -pub struct LlmpBrokerClientMetadata { - pub id: u32, - pub cur_client_map: LlmpPageWrapper, - pub last_msg_broker_read: *mut LlmpMsg, - pub clientloop: Option, - pub data: *mut c_void, -} - -/// The client loop, running for each spawned client -pub type LlmpClientloopFn = unsafe fn(client: *mut LlmpClient, data: *mut c_void) -> !; - -/// A share mem page, as used by llmp internally +/// Contents of the share mem pages, used by llmp internally #[derive(Copy, Clone)] #[repr(C, packed)] pub struct LlmpPage { @@ -204,15 +206,7 @@ pub enum LlmpMsgHookResult { } /// Message Hook -pub type LlmpMsgHookFn = unsafe fn( - broker: &LlmpBroker, - client_data: &LlmpBrokerClientMetadata, - msg: *mut LlmpMsg, - data: *mut c_void, -) -> LlmpMsgHookResult; - -/// Hook that gets called for each new page, created by LLMP -pub type LlmpClientNewPageHookFn = unsafe fn(client: &LlmpClient) -> (); +pub type LlmpMsgHookFn = unsafe fn(client_id: u32, msg: *mut LlmpMsg) -> LlmpMsgHookResult; /// Message payload when a client got added LLMP_TAG_CLIENT_ADDED_V1 */ /// This is an internal message! @@ -263,7 +257,7 @@ const fn llmp_align(to_align: usize) -> usize { /// enough. For now, we want to have at least enough space to store 2 of the /// largest messages we encountered (plus message one new_page message). #[inline] -const fn new_map_size(max_alloc: usize) -> usize { +fn new_map_size(max_alloc: usize) -> usize { next_pow2(max( max_alloc * 2 + EOP_MSG_SIZE + LLMP_PAGE_HEADER_LEN, LLMP_INITIAL_MAP_SIZE, @@ -288,7 +282,7 @@ unsafe fn _llmp_page_init(shmem: &mut AflShmem, sender: u32) { /* Pointer to the message behind the last message */ #[inline] -const unsafe fn _llmp_next_msg_ptr(last_msg: *const LlmpMsg) -> *mut LlmpMsg { +unsafe fn _llmp_next_msg_ptr(last_msg: *const LlmpMsg) -> *mut LlmpMsg { /* DBG("_llmp_next_msg_ptr %p %lu + %lu\n", last_msg, last_msg->buf_len_padded, sizeof(llmp_message)); */ return (last_msg as *mut u8) .offset(size_of::() as isize) @@ -296,15 +290,15 @@ const unsafe fn _llmp_next_msg_ptr(last_msg: *const LlmpMsg) -> *mut LlmpMsg { } /* Read next message. */ -unsafe fn llmp_recv( - page_wrapper: &LlmpPageWrapper, - last_msg: *mut LlmpMsg, -) -> Option<*mut LlmpMsg> { +unsafe fn llmp_recv(receiver: &mut LlmpReceiver) -> Result, AflError> { /* DBG("llmp_recv %p %p\n", page, last_msg); */ compiler_fence(Ordering::SeqCst); - let page = page_wrapper.page(); + let page = receiver.current_recv_map.page(); + let last_msg = receiver.last_msg_recvd; let current_msg_id = ptr::read_volatile(&mut (*page).current_msg_id); - if current_msg_id == 0 { + + // Read the message from the page + let ret = if current_msg_id == 0 { /* No messages yet */ None } else if last_msg.is_null() { @@ -315,17 +309,60 @@ unsafe fn llmp_recv( None } else { Some(_llmp_next_msg_ptr(last_msg)) - } + }; + + // Let's see what we go here. + match ret { + Some(msg) => { + // Handle special, LLMP internal, messages. + match (*msg).tag { + LLMP_TAG_UNSET => panic!("BUG: Read unallocated msg"), + LLMP_TAG_END_OF_PAGE => { + dbg!("Got end of page, allocing next"); + // Handle end of page + if (*msg).buf_len < size_of::() as u64 { + panic!(format!( + "Illegal message length for EOP (is {}, expected {})", + (*msg).buf_len_padded, + size_of::() + )); + } + let pageinfo = (*msg).buf.as_mut_ptr() as *mut LlmpPayloadSharedMap; + + /* We can reuse the map mem space, no need to free and calloc. + However, the pageinfo points to the map we're about to unmap. + Clone the contents first to be safe (probably fine in rust eitner way). */ + let pageinfo_cpy = (*pageinfo).clone(); + + ptr::write_volatile(&mut (*page).save_to_unmap, 1); + receiver.current_recv_map = LlmpPageWrapper::from_name_slice( + &pageinfo_cpy.shm_str, + pageinfo_cpy.map_size, + )?; + dbg!( + "Got a new recv map", + receiver.current_recv_map.shmem.shm_str + ); + // After we mapped the new page, return the next message, if available + return llmp_recv(receiver); + } + _ => (), + } + + // Store the last msg for next time + receiver.last_msg_recvd = msg; + } + _ => (), + }; + Ok(ret) } /* Blocks/spins until the next message gets posted to the page, then returns that message. */ -pub unsafe fn llmp_recv_blocking( - page_wrapper: &LlmpPageWrapper, - last_msg: *mut LlmpMsg, -) -> *mut LlmpMsg { +pub unsafe fn llmp_recv_blocking(receiver: &mut LlmpReceiver) -> Result<*mut LlmpMsg, AflError> { let mut current_msg_id = 0; - let page = page_wrapper.page(); + let page = receiver.current_recv_map.page(); + let last_msg = receiver.last_msg_recvd; if !last_msg.is_null() { if (*last_msg).tag == LLMP_TAG_END_OF_PAGE && !llmp_msg_in_page(page, last_msg) { panic!("BUG: full page passed to await_message_blocking or reset failed"); @@ -335,8 +372,8 @@ pub unsafe fn llmp_recv_blocking( loop { compiler_fence(Ordering::SeqCst); if ptr::read_volatile(&mut (*page).current_msg_id) != current_msg_id { - return match llmp_recv(page_wrapper, last_msg) { - Some(msg) => msg, + return match llmp_recv(receiver)? { + Some(msg) => Ok(msg), None => panic!("BUG: blocking llmp message should never be NULL"), }; } @@ -375,7 +412,7 @@ unsafe fn llmp_alloc_eop(page: *mut LlmpPage, last_msg: *const LlmpMsg) -> *mut /// Will return a ptr to the next msg buf, or None if map is full. /// Never call alloc_next without either sending or cancelling the last allocated message for this page! /// There can only ever be up to one message allocated per page at each given time. -unsafe fn llmp_alloc_next(llmp: &mut LlmpSender, buf_len: usize) -> Option<*mut LlmpMsg> { +unsafe fn llmp_alloc_next_if_space(llmp: &mut LlmpSender, buf_len: usize) -> Option<*mut LlmpMsg> { let mut buf_len_padded = buf_len; let mut complete_msg_size = llmp_align(size_of::() + buf_len_padded); let page = llmp.out_maps.last().unwrap().page(); @@ -453,39 +490,44 @@ unsafe fn llmp_alloc_next(llmp: &mut LlmpSender, buf_len: usize) -> Option<*mut /// Commit the message last allocated by llmp_alloc_next to the queue. /// After commiting, the msg shall no longer be altered! /// It will be read by the consuming threads (broker->clients or client->broker) -unsafe fn llmp_send(page: *mut LlmpPage, msg: *mut LlmpMsg) -> Result<(), AflError> { +unsafe fn llmp_send(sender: &mut LlmpSender, msg: *mut LlmpMsg) -> Result<(), AflError> { + if sender.last_msg_sent == msg { + panic!("Message sent twice!"); + } if (*msg).tag == LLMP_TAG_UNSET as c_uint { panic!(format!( "No tag set on message with id {}", (*msg).message_id )); } + let page = sender.out_maps.last().unwrap().page(); if msg.is_null() || !llmp_msg_in_page(page, msg) { return Err(AflError::Unknown(format!( "Llmp Message {:?} is null or not in current page", msg ))); } + (*msg).message_id = (*page).current_msg_id + 1; compiler_fence(Ordering::SeqCst); ptr::write_volatile(&mut (*page).current_msg_id, (*msg).message_id); compiler_fence(Ordering::SeqCst); + sender.last_msg_sent = msg; Ok(()) } /// listener about it using a EOP message. -unsafe fn llmp_handle_out_eop(llmp: &mut LlmpSender) -> Result<(), AflError> { - let map_count = llmp.out_maps.len(); - let mut old_map = llmp.out_maps.last_mut().unwrap().page(); +unsafe fn llmp_handle_out_eop(sender: &mut LlmpSender) -> Result<(), AflError> { + let old_map = sender.out_maps.last_mut().unwrap().page(); // Create a new shard page. - let mut new_map_shmem = LlmpPageWrapper::new((*old_map).sender, (*old_map).max_alloc_size)?; + let new_map_shmem = LlmpPageWrapper::new((*old_map).sender, (*old_map).max_alloc_size)?; let mut new_map = new_map_shmem.page(); ptr::write_volatile(&mut (*new_map).current_msg_id, (*old_map).current_msg_id); (*new_map).max_alloc_size = (*old_map).max_alloc_size; /* On the old map, place a last message linking to the new map for the clients * to consume */ - let mut out: *mut LlmpMsg = llmp_alloc_eop(old_map, llmp.last_msg_sent); + let mut out: *mut LlmpMsg = llmp_alloc_eop(old_map, sender.last_msg_sent); (*out).sender = (*old_map).sender; let mut end_of_page_msg = (*out).buf.as_mut_ptr() as *mut LlmpPayloadSharedMap; @@ -493,32 +535,38 @@ unsafe fn llmp_handle_out_eop(llmp: &mut LlmpSender) -> Result<(), AflError> { (*end_of_page_msg).shm_str = new_map_shmem.shmem.shm_str; // We never sent a msg on the new buf */ - llmp.last_msg_sent = 0 as *mut LlmpMsg; + sender.last_msg_sent = 0 as *mut LlmpMsg; /* Send the last msg on the old buf */ - llmp_send(old_map, out)?; - llmp.out_maps.push(new_map_shmem); + llmp_send(sender, out)?; + + if !sender.keep_pages_forever { + llmp_prune_old_pages(sender); + } + + sender.out_maps.push(new_map_shmem); Ok(()) } -pub unsafe fn llmp_broker_alloc_next( - broker: &LlmpBroker, - len: usize, +/// Allocates the next space on this sender page +pub unsafe fn llmp_alloc_next( + sender: &mut LlmpSender, + buf_len: usize, ) -> Result<*mut LlmpMsg, AflError> { - match llmp_alloc_next(&mut broker.llmp_out, len) { + match llmp_alloc_next_if_space(sender, buf_len) { Some(msg) => return Ok(msg), _ => (), }; /* no more space left! We'll have to start a new page */ - llmp_handle_out_eop(&mut broker.llmp_out); + llmp_handle_out_eop(sender)?; - match llmp_alloc_next(&mut broker.llmp_out, len) { + match llmp_alloc_next_if_space(sender, buf_len) { Some(msg) => Ok(msg), None => Err(AflError::Unknown(format!( "Error allocating {} bytes in shmap", - len + buf_len ))), } } @@ -526,11 +574,14 @@ pub unsafe fn llmp_broker_alloc_next( impl LlmpBroker { /// Create and initialize a new llmp_broker pub unsafe fn new() -> Result { - let mut broker = LlmpBroker { + let broker = LlmpBroker { llmp_out: LlmpSender { id: 0, last_msg_sent: ptr::null_mut(), out_maps: vec![LlmpPageWrapper::new(0, 0)?], + // Broker never cleans up the pages so that new + // clients may join at any time + keep_pages_forever: true, }, msg_hooks: vec![], llmp_clients: vec![], @@ -539,44 +590,39 @@ impl LlmpBroker { Ok(broker) } + unsafe fn alloc_next(&mut self, buf_len: usize) -> Result<*mut LlmpMsg, AflError> { + llmp_alloc_next(&mut self.llmp_out, buf_len) + } + /// Registers a new client for the given sharedmap str and size. /// Returns the id of the new client in broker.client_map - unsafe fn register_client(&mut self, client_page: LlmpPageWrapper) { + pub unsafe fn register_client(&mut self, client_page: LlmpPageWrapper) { let id = self.llmp_clients.len() as u32; - self.llmp_clients.push(LlmpBrokerClientMetadata { + self.llmp_clients.push(LlmpReceiver { id, - cur_client_map: client_page, - last_msg_broker_read: 0 as *mut LlmpMsg, - clientloop: None, - data: 0 as *mut c_void, + current_recv_map: client_page, + last_msg_recvd: 0 as *mut LlmpMsg, }); } /// Adds a hook that gets called in the broker for each new message the broker touches. /// if the callback returns false, the message is not forwarded to the clients. */ - pub fn add_message_hook(&mut self, hook: LlmpMsgHookFn, data: *mut c_void) { - self.msg_hooks - .push(LlmpHookdataGeneric { func: hook, data }); + pub fn add_message_hook(&mut self, hook: LlmpMsgHookFn) { + self.msg_hooks.push(hook); } /// For internal use: Forward the current message to the out map. unsafe fn forward_msg(&mut self, msg: *mut LlmpMsg) -> Result<(), AflError> { - let mut out: *mut LlmpMsg = llmp_broker_alloc_next(self, (*msg).buf_len_padded as usize)?; + let mut out: *mut LlmpMsg = self.alloc_next((*msg).buf_len_padded as usize)?; /* Copy over the whole message. If we should need zero copy, we could instead post a link to the original msg with the map_id and offset. */ let actual_size = (*out).buf_len_padded; - memcpy( - out as *mut c_void, - msg as *const c_void, - size_of::() as c_ulong + (*msg).buf_len_padded as c_ulong, - ); + msg.copy_to_nonoverlapping(out, size_of::() + (*msg).buf_len_padded as usize); (*out).buf_len_padded = actual_size; /* We need to replace the message ID with our own */ - let out_page = self.llmp_out.out_maps.last().unwrap().page(); - (*out).message_id = (*out_page).current_msg_id + 1; - match llmp_send(out_page, out) { + match llmp_send(&mut self.llmp_out, out) { Err(e) => panic!(format!("Error sending msg: {:?}", e)), _ => (), }; @@ -585,49 +631,22 @@ impl LlmpBroker { } /// broker broadcast to its own page for all others to read */ - unsafe fn handle_new_msgs( - &mut self, - client: &LlmpBrokerClientMetadata, - ) -> Result<(), AflError> { + unsafe fn handle_new_msgs(&mut self, client_id: u32) -> Result<(), AflError> { + let mut next_id = self.llmp_clients.len() as u32; + // TODO: We could memcpy a range of pending messages, instead of one by one. - /* DBG("llmp_broker_handle_new_msgs %p %p->%u\n", broker, client, client->client_state->id); */ - let incoming: *mut LlmpPage = client.cur_client_map.page(); - let mut current_message_id = if client.last_msg_broker_read.is_null() { - 0 - } else { - (*client.last_msg_broker_read).message_id - }; - - while current_message_id != ptr::read_volatile(&(*incoming).current_msg_id) { - let msg = match llmp_recv(&client.cur_client_map, (*client).last_msg_broker_read) { - None => { - panic!("No message received but not all message ids receved! Data out of sync?") + loop { + let msg = { + let mut client = &mut self.llmp_clients[client_id as usize]; + match llmp_recv(&mut client)? { + None => { + // We're done handling this client + return Ok(()); + } + Some(msg) => msg, } - Some(msg) => msg, }; - if (*msg).tag == LLMP_TAG_END_OF_PAGE { - // Handle end of page - if (*msg).buf_len < size_of::() as u64 { - panic!(format!( - "Illegal message length for EOP (is {}, expected {})", - (*msg).buf_len_padded, - size_of::() - )); - } - let pageinfo = (*msg).buf.as_mut_ptr() as *mut LlmpPayloadSharedMap; - - /* We can reuse the map mem space, no need to free and calloc. - However, the pageinfo points to the map we're about to unmap. - Clone the contents first to be safe (probably fine in rust eitner way). */ - let mut pageinfo_cpy = (*pageinfo).clone(); - - let client_map = (*client).cur_client_map; - - ptr::write_volatile(&mut (*client_map.page()).save_to_unmap, 1); - client.cur_client_map = - LlmpPageWrapper::from_name_slice(&pageinfo_cpy.shm_str, pageinfo_cpy.map_size)?; - dbg!("Client got a new map", client.cur_client_map.shmem.shm_str); - } else if (*msg).tag == LLMP_TAG_NEW_SHM_CLIENT { + if (*msg).tag == LLMP_TAG_NEW_SHM_CLIENT { /* This client informs us about yet another new client add it to the list! Also, no need to forward this msg. */ if (*msg).buf_len < size_of::() as u64 { @@ -638,50 +657,55 @@ impl LlmpBroker { } else { let pageinfo = (*msg).buf.as_mut_ptr() as *mut LlmpPayloadSharedMap; - let client_id: u32 = client.id; match LlmpPageWrapper::from_name_slice( &(*pageinfo).shm_str, (*pageinfo).map_size, ) { - Ok(new_page) => self.register_client(new_page), + Ok(new_page) => { + let id = next_id; + next_id += 1; + self.llmp_clients.push(LlmpReceiver { + id, + current_recv_map: new_page, + last_msg_recvd: 0 as *mut LlmpMsg, + }); + } Err(e) => println!("Error adding client! {:?}", e), }; } } else { // The message is not specifically for use. Let the user handle it, then forward it to the clients, if necessary. let mut should_forward_msg = true; - for hook in self.msg_hooks { - match (hook.func)(&self, client, msg, hook.data) { + for hook in &self.msg_hooks { + match (hook)(client_id, msg) { LlmpMsgHookResult::Handled => should_forward_msg = false, _ => (), } } if should_forward_msg { - self.forward_msg(msg); + self.forward_msg(msg)?; } } - (*client).last_msg_broker_read = msg; - current_message_id = (*msg).message_id } - Ok(()) } /// The broker walks all pages and looks for changes, then broadcasts them on /// its own shared page, once. - pub unsafe fn once(&mut self) { + pub unsafe fn once(&mut self) -> Result<(), AflError> { compiler_fence(Ordering::SeqCst); - let mut i: u32 = 0; - for client in self.llmp_clients { - self.handle_new_msgs(&client); + for i in 0..self.llmp_clients.len() { + self.handle_new_msgs(i as u32)?; } + Ok(()) } /// Loops infinitely, forwarding and handling all incoming messages from clients. - /// Never returns. + /// Never returns. Panics on error. pub unsafe fn broker_loop(&mut self) -> ! { loop { compiler_fence(Ordering::SeqCst); - self.once(); + self.once() + .expect("An error occurred when brokering. Exiting."); /* 5 milis of sleep for now to not busywait at 100% */ thread::sleep(time::Duration::from_millis(5)); @@ -689,353 +713,75 @@ impl LlmpBroker { } } -/// A new page will be used. Notify each registered hook in the client about this fact. -unsafe fn llmp_clien_trigger_new_out_page_hooks(client: &LlmpClient) { - for hook in client.new_out_page_hooks { - (hook.func)(client); - } -} - -/// A wrapper around unpacking the data, calling through to the loop -unsafe fn _llmp_client_wrapped_loop(llmp_client_broker_metadata_ptr: *mut c_void) -> ! { - let metadata: *mut LlmpBrokerClientMetadata = - llmp_client_broker_metadata_ptr as *mut LlmpBrokerClientMetadata; - /* Before doing anything else:, notify registered hooks about the new page we're about to use */ - llmp_clien_trigger_new_out_page_hooks((*metadata).client_state); - - (*metadata).clientloop.expect("non-null function pointer")( - (*metadata).client_state, - (*metadata).data, - ); -} - -/// For non zero-copy, we want to get rid of old pages with duplicate messages +/// For non zero-copy, we want to get rid of old pages with duplicate messages in the client /// eventually. This function This funtion sees if we can unallocate older pages. /// The broker would have informed us by setting the save_to_unmap-flag. -unsafe fn llmp_client_prune_old_pages(client: *mut LlmpClient) { - let current_map: *mut u8 = (*(*client) - .out_maps - .offset((*client).out_map_count.wrapping_sub(1 as c_ulong) as isize)) - .map; - /* look for pages that are save_to_unmap, then unmap them. */ - while (*(*client).out_maps.offset(0 as isize)).map != current_map - && (*shmem2page(&mut *(*client).out_maps.offset(0 as isize))).save_to_unmap as c_int != 0 - { - /* This page is save to unmap. The broker already reads or read it. */ - afl_shmem_deinit(&mut *(*client).out_maps.offset(0 as isize)); - /* We remove at the start, move the other pages back. */ - memmove( - (*client).out_maps as *mut c_void, - (*client).out_maps.offset(1 as isize) as *const c_void, - (*client) - .out_map_count - .wrapping_sub(1 as c_ulong) - .wrapping_mul(::std::mem::size_of::() as c_ulong), - ); - (*client).out_map_count = (*client).out_map_count.wrapping_sub(1) +unsafe fn llmp_prune_old_pages(sender: &mut LlmpSender) { + // Exclude the current page by splitting of the last element for this iter + let mut unmap_until_excl = 0; + for map in sender.out_maps.split_last().unwrap().1 { + if (*map.page()).save_to_unmap == 0 { + // The broker didn't read this page yet, no more pages to unmap. + break; + } + unmap_until_excl += 1; } + // Remove all maps that the broker already mapped + // simply removing them from the vec should then call drop and unmap them. + sender.out_maps.drain(0..unmap_until_excl); } -/// We don't have any space. Send eop, then continue on a new page. -unsafe fn llmp_client_handle_out_eop(client: *mut LlmpClient) -> bool { - (*client).out_maps = llmp_handle_out_eop( - (*client).out_maps, - &mut (*client).out_map_count, - &mut (*client).last_msg_sent, - ); - if (*client).out_maps.is_null() { - return 0 as c_int != 0; +impl LlmpClient { + /// Creates a new LlmpClient + pub unsafe fn new(initial_broker_page: LlmpPageWrapper) -> Result { + Ok(Self { + llmp_out: LlmpSender { + id: 0, + last_msg_sent: 0 as *mut LlmpMsg, + out_maps: vec![LlmpPageWrapper::new(0, LLMP_INITIAL_MAP_SIZE)?], + // drop pages to the broker if it already read them + keep_pages_forever: false, + }, + llmp_in: LlmpReceiver { + id: 0, + current_recv_map: initial_broker_page, + last_msg_recvd: 0 as *mut LlmpMsg, + }, + }) } - /* Prune old pages! - This is a good time to see if we can unallocate older pages. - The broker would have informed us by setting the flag - */ - llmp_client_prune_old_pages(client); - /* So we got a new page. Inform potential hooks */ - llmp_clien_trigger_new_out_page_hooks(client); - return 1 as c_int != 0; } /// A client receives a broadcast message. /// Returns null if no message is availiable -pub unsafe fn llmp_client_recv(client: *mut LlmpClient) -> *mut LlmpMsg { - loop { - let msg = llmp_recv( - shmem2page((*client).current_broadcast_map), - (*client).last_msg_recvd, - ); - if msg.is_null() { - return 0 as *mut LlmpMsg; - } - (*client).last_msg_recvd = msg; - if (*msg).tag == LLMP_TAG_UNSET as c_uint { - panic!("BUG: Read unallocated msg"); - } else { - if (*msg).tag == LLMP_TAG_END_OF_PAGE as c_uint { - /* we reached the end of the current page. - We'll init a new page but can reuse the mem are of the current map. - However, we cannot use the message if we deinit its page, so let's copy */ - let mut pageinfo_cpy: LlmpPayloadSharedMap = LlmpPayloadSharedMap { - map_size: 0, - shm_str: [0; 20], - }; - let broadcast_map: *mut AflShmem = (*client).current_broadcast_map; - let pageinfo: *mut LlmpPayloadSharedMap = { - let mut _msg: *mut LlmpMsg = msg; - (if (*_msg).buf_len >= ::std::mem::size_of::() as c_ulong - { - (*_msg).buf.as_mut_ptr() - } else { - 0 as *mut u8 - }) as *mut LlmpPayloadSharedMap - }; - if pageinfo.is_null() { - panic!(format!( - "Illegal message length for EOP (is {}, expected {})", - (*msg).buf_len_padded, - ::std::mem::size_of::() as c_ulong - )); - } - memcpy( - &mut pageinfo_cpy as *mut LlmpPayloadSharedMap as *mut c_void, - pageinfo as *const c_void, - ::std::mem::size_of::() as c_ulong, - ); - /* Never read by broker broker: shmem2page(map)->save_to_unmap = true; */ - afl_shmem_deinit(broadcast_map); - if afl_shmem_by_str( - (*client).current_broadcast_map, - CStr::from_bytes_with_nul(&(*pageinfo).shm_str).expect("Illegal shm_str"), - (*pageinfo).map_size, - ) - .is_null() - { - panic!(format!( - "Could not get shmem by str for map {:?} of size {}", - (*pageinfo).shm_str.as_mut_ptr(), - (*pageinfo).map_size - )); - } - } else { - return msg; - } - } - } +pub unsafe fn llmp_client_recv(client: &mut LlmpClient) -> Result, AflError> { + llmp_recv(&mut client.llmp_in) } /// A client blocks/spins until the next message gets posted to the page, /// then returns that message. -pub unsafe fn llmp_client_recv_blocking(client: *mut LlmpClient) -> *mut LlmpMsg { - let mut page: *mut LlmpPage = shmem2page((*client).current_broadcast_map); - loop { - compiler_fence(Ordering::SeqCst); - /* busy-wait for a new msg_id to show up in the page */ - if (*page).current_msg_id - != (if !(*client).last_msg_recvd.is_null() { - (*(*client).last_msg_recvd).message_id - } else { - 0 as c_uint - }) as c_ulong - { - let ret: *mut LlmpMsg = llmp_client_recv(client); - if !ret.is_null() { - return ret; - } - /* last msg will exist, even if EOP was handled internally */ - page = shmem2page((*client).current_broadcast_map) - } - } +pub unsafe fn llmp_client_recv_blocking(client: &mut LlmpClient) -> Result<*mut LlmpMsg, AflError> { + llmp_recv_blocking(&mut client.llmp_in) } /// The current page could have changed in recv (EOP) /// Alloc the next message, internally handling end of page by allocating a new one. -pub unsafe fn llmp_client_alloc_next(client: *mut LlmpClient, size: usize) -> *mut LlmpMsg { - if client.is_null() { - panic!("Client is NULL"); - } - let mut msg = llmp_alloc_next( - shmem2page( - &mut *(*client) - .out_maps - .offset((*client).out_map_count.wrapping_sub(1) as isize), - ), - (*client).last_msg_sent, - size as c_ulong, - ); - if msg.is_null() { - let last_map_count: c_ulong = (*client).out_map_count; - /* Page is full -> Tell broker and start from the beginning. - Also, pray the broker got all messaes we're overwriting. :) */ - if !llmp_client_handle_out_eop(client) { - return 0 as *mut LlmpMsg; - } - if (*client).out_map_count == last_map_count - || (*(*shmem2page( - &mut *(*client) - .out_maps - .offset((*client).out_map_count.wrapping_sub(1) as isize), - )) - .messages - .as_mut_ptr()) - .tag != LLMP_TAG_UNSET as c_uint - { - panic!("Error in handle_out_eop"); - } - /* The client_out_map will have been changed by llmp_handle_out_eop. Don't - * alias. - */ - msg = llmp_alloc_next( - shmem2page( - &mut *(*client) - .out_maps - .offset((*client).out_map_count.wrapping_sub(1) as isize), - ), - 0 as *mut LlmpMsg, - size as c_ulong, - ); - if msg.is_null() { - return 0 as *mut LlmpMsg; - } - } - (*msg).sender = (*client).id; - (*msg).message_id = if !(*client).last_msg_sent.is_null() { - (*(*client).last_msg_sent).message_id.wrapping_add(1) - } else { - 1 as c_uint - }; - /* DBG("Allocated message at loc %p with buflen %ld", msg, msg->buf_len_padded); */ - return msg; +pub unsafe fn llmp_client_alloc_next( + client: &mut LlmpClient, + buf_len: usize, +) -> Result<*mut LlmpMsg, AflError> { + llmp_alloc_next(&mut client.llmp_out, buf_len) } /// Cancel send of the next message, this allows us to allocate a new message without sending this one. -pub unsafe fn llmp_client_cancel(client: *mut LlmpClient, mut msg: *mut LlmpMsg) { +pub unsafe fn llmp_cancel_send(sender: &mut LlmpSender, msg: *mut LlmpMsg) { /* DBG("Client %d cancels send of msg at %p with tag 0x%X and size %ld", client->id, msg, msg->tag, * msg->buf_len_padded); */ - let mut page: *mut LlmpPage = shmem2page( - &mut *(*client) - .out_maps - .offset((*client).out_map_count.wrapping_sub(1 as c_ulong) as isize), - ); - (*msg).tag = LLMP_TAG_UNSET as c_uint; - (*page).size_used = ((*page).size_used as c_ulong).wrapping_sub( - (*msg) - .buf_len_padded - .wrapping_add(::std::mem::size_of::() as c_ulong), - ) as c_ulong; -} -/* Commits a msg to the client's out ringbuf */ -pub unsafe fn llmp_client_send( - client_state: *mut LlmpClient, - msg: *mut LlmpMsg, -) -> Result<(), AflError> { - let page: *mut LlmpPage = shmem2page( - &mut *(*client_state) - .out_maps - .offset((*client_state).out_map_count.wrapping_sub(1) as isize), - ); - llmp_send(page, msg)?; - (*client_state).last_msg_sent = msg; - Ok(()) + let page = sender.out_maps.last().unwrap().page(); + (*msg).tag = LLMP_TAG_UNSET; + (*page).size_used -= (*msg).buf_len_padded as usize + size_of::(); } -/// Creates a new, unconnected, client state -pub unsafe fn llmp_client_new_unconnected() -> *mut LlmpClient { - let client_state: *mut LlmpClient = - calloc(1 as c_ulong, ::std::mem::size_of::() as c_ulong) as *mut LlmpClient; - (*client_state).current_broadcast_map = - calloc(1 as c_ulong, ::std::mem::size_of::() as c_ulong) as *mut AflShmem; - if (*client_state).current_broadcast_map.is_null() { - return 0 as *mut LlmpClient; - } - (*client_state).out_maps = afl_realloc( - (*client_state).out_maps as *mut c_void, - (1 as c_ulong).wrapping_mul(::std::mem::size_of::() as c_ulong), - ) as *mut AflShmem; - if (*client_state).out_maps.is_null() { - free((*client_state).current_broadcast_map as *mut c_void); - free(client_state as *mut c_void); - return 0 as *mut LlmpClient; - } - (*client_state).out_map_count = 1 as c_ulong; - if llmp_new_page_shmem( - &mut *(*client_state).out_maps.offset(0 as isize), - (*client_state).id as c_ulong, - LLMP_INITIAL_MAP_SIZE, - ) - .is_null() - { - afl_free((*client_state).out_maps as *mut c_void); - free((*client_state).current_broadcast_map as *mut c_void); - free(client_state as *mut c_void); - return 0 as *mut LlmpClient; - } - (*client_state).new_out_page_hook_count = 0 as c_ulong; - (*client_state).new_out_page_hooks = 0 as *mut LlmpHookdataGeneric; - return client_state; -} - -/// Destroys the given cient state -pub unsafe fn llmp_client_delete(client_state: *mut LlmpClient) { - let mut i: c_ulong = 0; - while i < (*client_state).out_map_count { - afl_shmem_deinit(&mut *(*client_state).out_maps.offset(i as isize)); - i = i.wrapping_add(1) - } - afl_free((*client_state).out_maps as *mut c_void); - (*client_state).out_maps = 0 as *mut AflShmem; - (*client_state).out_map_count = 0 as c_ulong; - afl_free((*client_state).new_out_page_hooks as *mut c_void); - (*client_state).new_out_page_hooks = 0 as *mut LlmpHookdataGeneric; - (*client_state).new_out_page_hook_count = 0 as c_ulong; - afl_shmem_deinit((*client_state).current_broadcast_map); - free((*client_state).current_broadcast_map as *mut c_void); - (*client_state).current_broadcast_map = 0 as *mut AflShmem; - free(client_state as *mut c_void); -} - -impl Drop for LlmpClient { - fn drop(&mut self) { - unsafe { llmp_client_delete(self) }; - } -} - -/// Adds a hook that gets called in the client for each new outgoing page the client creates. -pub unsafe fn llmp_client_add_new_out_page_hook( - client: *mut LlmpClient, - hook: Option, - data: *mut c_void, -) -> AflRet { - return llmp_add_hook_generic( - &mut (*client).new_out_page_hooks, - &mut (*client).new_out_page_hook_count, - ::std::mem::transmute::, *mut c_void>(hook), - data, - ); -} - -/// Clean up the broker instance -unsafe fn llmp_broker_deinit(broker: *mut LlmpBroker) { - let mut i: c_ulong; - i = 0 as c_ulong; - while i < (*broker).broadcast_map_count { - afl_shmem_deinit(&mut *(*broker).broadcast_maps.offset(i as isize)); - i = i.wrapping_add(1) - } - i = 0 as c_ulong; - while i < (*broker).llmp_client_count { - afl_shmem_deinit((*(*broker).llmp_clients.offset(i as isize)).cur_client_map); - free((*(*broker).llmp_clients.offset(i as isize)).cur_client_map as *mut c_void); - i = i.wrapping_add(1) - // TODO: Properly clean up the client - } - afl_free((*broker).broadcast_maps as *mut c_void); - (*broker).broadcast_map_count = 0 as c_ulong; - afl_free((*broker).llmp_clients as *mut c_void); - (*broker).llmp_client_count = 0 as c_ulong; -} - -impl Drop for LlmpBroker { - fn drop(&mut self) { - unsafe { llmp_broker_deinit(self) }; - } +/// Commits a msg to the client's out map +pub unsafe fn llmp_client_send(client: &mut LlmpClient, msg: *mut LlmpMsg) -> Result<(), AflError> { + llmp_send(&mut client.llmp_out, msg) } diff --git a/afl/src/events/shmem_translated.rs b/afl/src/events/shmem_translated.rs index cbf2866bde..0f1b901ef1 100644 --- a/afl/src/events/shmem_translated.rs +++ b/afl/src/events/shmem_translated.rs @@ -94,8 +94,8 @@ impl AflShmem { Ok(ret) } else { Err(AflError::Unknown(format!( - "Could not allocate map with id {:?}", - shm_str + "Could not allocate map with id {:?} and size {}", + shm_str, map_size ))) } } diff --git a/afl/src/lib.rs b/afl/src/lib.rs index 1c1f204600..c19f048093 100644 --- a/afl/src/lib.rs +++ b/afl/src/lib.rs @@ -3,10 +3,6 @@ #[macro_use] extern crate alloc; -#[cfg_attr(feature = "std")] -#[macro_use] -extern crate memoffset; // for offset_of - pub mod corpus; pub mod engines; pub mod events; From f9782b48d4da9c4f3ce8d8e152d654fc1ad3149a Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Tue, 8 Dec 2020 19:10:54 +0100 Subject: [PATCH 03/17] more rusty --- afl/src/events/llmp_translated.rs | 747 +++++++++++++++--------------- 1 file changed, 372 insertions(+), 375 deletions(-) diff --git a/afl/src/events/llmp_translated.rs b/afl/src/events/llmp_translated.rs index 0878dca724..71681bf2ad 100644 --- a/afl/src/events/llmp_translated.rs +++ b/afl/src/events/llmp_translated.rs @@ -75,9 +75,15 @@ const LLMP_TAG_END_OF_PAGE: u32 = 0xaf1e0f1; /// A new client for this broekr got added. const LLMP_TAG_NEW_SHM_CLIENT: u32 = 0xc11e471; -pub type AflRet = c_uint; -pub const AFL_RET_ALLOC: AflRet = 3; -pub const AFL_RET_SUCCESS: AflRet = 0; +/// What byte count to align messages to +/// LlmpMsg sizes (including header) will always be rounded up to be a multiple of this value +const LLMP_ALIGNNMENT: usize = 64; + +/// Size of a new page message, header, payload, and alignment +const EOP_MSG_SIZE: usize = llmp_align(size_of::() + size_of::()); + +/// Message Hook +pub type LlmpMsgHookFn = unsafe fn(client_id: u32, msg: *mut LlmpMsg) -> LlmpMsgHookResult; /// Sending end on a (unidirectional) sharedmap channel #[derive(Clone)] @@ -88,7 +94,7 @@ pub struct LlmpSender { /// If null, a new page (just) started. pub last_msg_sent: *mut LlmpMsg, /// A vec of page wrappers, each containing an intialized AfShmem - pub out_maps: Vec, + pub out_maps: Vec, /// If true, pages will never be pruned. /// The broker uses this feature. /// By keeping the message history around, @@ -103,7 +109,7 @@ pub struct LlmpReceiver { /// Pointer to the last meg this received pub last_msg_recvd: *mut LlmpMsg, /// current page. After EOP, this gets replaced with the new one - pub current_recv_map: LlmpPageWrapper, + pub current_recv_map: LlmpSharedMap, } /// Client side of LLMP @@ -115,43 +121,11 @@ pub struct LlmpClient { /// A page wrapper #[derive(Clone)] -pub struct LlmpPageWrapper { +pub struct LlmpSharedMap { /// Shmem containg the actual (unsafe) page, /// shared between one LlmpSender and one LlmpReceiver shmem: AflShmem, } - -/// The page struct, placed on a shared mem instance. -impl LlmpPageWrapper { - /// Creates a new page with minimum prev_max_alloc_size or LLMP_INITIAL_MAP_SIZE - /// returning the initialized shared mem struct - pub unsafe fn new(sender: u32, min_size: usize) -> Result { - // Create a new shard page. - let mut shmem = AflShmem::new(new_map_size(min_size))?; - _llmp_page_init(&mut shmem, sender); - Ok(Self { shmem }) - } - - /// Initialize from a 0-terminated sharedmap id string and its size - pub unsafe fn from_str(shm_str: &CStr, map_size: usize) -> Result { - let shmem = AflShmem::from_str(shm_str, map_size)?; - // Not initializing the page here - the other side should have done it already! - Ok(Self { shmem }) - } - - /// Initialize from a shm_str with fixed len of 20 - pub unsafe fn from_name_slice(shm_str: &[u8; 20], map_size: usize) -> Result { - let shmem = AflShmem::from_name_slice(shm_str, map_size)?; - // Not initializing the page here - the other side should have done it already! - Ok(Self { shmem }) - } - - /// Get the unsafe ptr to this page, situated on the shared map - pub unsafe fn page(&self) -> *mut LlmpPage { - shmem2page(&self.shmem) - } -} - /// Message sent over the "wire" #[derive(Copy, Clone)] #[repr(C, packed)] @@ -205,9 +179,6 @@ pub enum LlmpMsgHookResult { ForwardToClients, } -/// Message Hook -pub type LlmpMsgHookFn = unsafe fn(client_id: u32, msg: *mut LlmpMsg) -> LlmpMsgHookResult; - /// Message payload when a client got added LLMP_TAG_CLIENT_ADDED_V1 */ /// This is an internal message! /// LLMP_TAG_END_OF_PAGE_V1 @@ -218,26 +189,20 @@ struct LlmpPayloadSharedMap { pub shm_str: [u8; 20], } +/// Get sharedmem from a page #[inline] unsafe fn shmem2page(afl_shmem: &AflShmem) -> *mut LlmpPage { afl_shmem.map as *mut LlmpPage } -/* If a msg is contained in the current page */ +/// Return, if a msg is contained in the current page unsafe fn llmp_msg_in_page(page: *mut LlmpPage, msg: *mut LlmpMsg) -> bool { /* DBG("llmp_msg_in_page %p within %p-%p\n", msg, page, page + page->size_total); */ return (page as *mut u8) < msg as *mut u8 && (page as *mut u8).offset((*page).size_total as isize) > msg as *mut u8; } -/// What byte count to align messages to -/// LlmpMsg sizes (including header) will always be rounded up to be a multiple of this value -const LLMP_ALIGNNMENT: usize = 64; - -/// Size of a new page message, header, payload, and alignment -const EOP_MSG_SIZE: usize = llmp_align(size_of::() + size_of::()); - -/* allign to LLMP_ALIGNNMENT=64 bytes */ +/// allign to LLMP_ALIGNNMENT=64 bytes #[inline] const fn llmp_align(to_align: usize) -> usize { // check if we need to align first @@ -264,8 +229,8 @@ fn new_map_size(max_alloc: usize) -> usize { ) as u64) as usize } -/* Initialize a new llmp_page. size should be relative to - * llmp_page->messages */ +/// Initialize a new llmp_page. size should be relative to +/// llmp_page->messages unsafe fn _llmp_page_init(shmem: &mut AflShmem, sender: u32) { let page = shmem2page(&shmem); (*page).sender = sender; @@ -280,7 +245,7 @@ unsafe fn _llmp_page_init(shmem: &mut AflShmem, sender: u32) { ptr::write_volatile(&mut (*page).sender_dead, 0); } -/* Pointer to the message behind the last message */ +/// Pointer to the message behind the last message #[inline] unsafe fn _llmp_next_msg_ptr(last_msg: *const LlmpMsg) -> *mut LlmpMsg { /* DBG("_llmp_next_msg_ptr %p %lu + %lu\n", last_msg, last_msg->buf_len_padded, sizeof(llmp_message)); */ @@ -289,288 +254,349 @@ unsafe fn _llmp_next_msg_ptr(last_msg: *const LlmpMsg) -> *mut LlmpMsg { .offset((*last_msg).buf_len_padded as isize) as *mut LlmpMsg; } -/* Read next message. */ -unsafe fn llmp_recv(receiver: &mut LlmpReceiver) -> Result, AflError> { - /* DBG("llmp_recv %p %p\n", page, last_msg); */ - compiler_fence(Ordering::SeqCst); - let page = receiver.current_recv_map.page(); - let last_msg = receiver.last_msg_recvd; - let current_msg_id = ptr::read_volatile(&mut (*page).current_msg_id); - - // Read the message from the page - let ret = if current_msg_id == 0 { - /* No messages yet */ - None - } else if last_msg.is_null() { - /* We never read a message from this queue. Return first. */ - Some((*page).messages.as_mut_ptr()) - } else if (*last_msg).message_id == current_msg_id { - /* Oops! No new message! */ - None - } else { - Some(_llmp_next_msg_ptr(last_msg)) - }; - - // Let's see what we go here. - match ret { - Some(msg) => { - // Handle special, LLMP internal, messages. - match (*msg).tag { - LLMP_TAG_UNSET => panic!("BUG: Read unallocated msg"), - LLMP_TAG_END_OF_PAGE => { - dbg!("Got end of page, allocing next"); - // Handle end of page - if (*msg).buf_len < size_of::() as u64 { - panic!(format!( - "Illegal message length for EOP (is {}, expected {})", - (*msg).buf_len_padded, - size_of::() - )); - } - let pageinfo = (*msg).buf.as_mut_ptr() as *mut LlmpPayloadSharedMap; - - /* We can reuse the map mem space, no need to free and calloc. - However, the pageinfo points to the map we're about to unmap. - Clone the contents first to be safe (probably fine in rust eitner way). */ - let pageinfo_cpy = (*pageinfo).clone(); - - ptr::write_volatile(&mut (*page).save_to_unmap, 1); - receiver.current_recv_map = LlmpPageWrapper::from_name_slice( - &pageinfo_cpy.shm_str, - pageinfo_cpy.map_size, - )?; - dbg!( - "Got a new recv map", - receiver.current_recv_map.shmem.shm_str - ); - // After we mapped the new page, return the next message, if available - return llmp_recv(receiver); - } - _ => (), +/// An actor on the sendin part of the shared map +impl LlmpSender { + /// For non zero-copy, we want to get rid of old pages with duplicate messages in the client + /// eventually. This function This funtion sees if we can unallocate older pages. + /// The broker would have informed us by setting the save_to_unmap-flag. + unsafe fn prune_old_pages(&mut self) { + // Exclude the current page by splitting of the last element for this iter + let mut unmap_until_excl = 0; + for map in self.out_maps.split_last().unwrap().1 { + if (*map.page()).save_to_unmap == 0 { + // The broker didn't read this page yet, no more pages to unmap. + break; } - - // Store the last msg for next time - receiver.last_msg_recvd = msg; + unmap_until_excl += 1; } - _ => (), - }; - Ok(ret) -} + // Remove all maps that the broker already mapped + // simply removing them from the vec should then call drop and unmap them. + self.out_maps.drain(0..unmap_until_excl); + } -/* Blocks/spins until the next message gets posted to the page, -then returns that message. */ -pub unsafe fn llmp_recv_blocking(receiver: &mut LlmpReceiver) -> Result<*mut LlmpMsg, AflError> { - let mut current_msg_id = 0; - let page = receiver.current_recv_map.page(); - let last_msg = receiver.last_msg_recvd; - if !last_msg.is_null() { - if (*last_msg).tag == LLMP_TAG_END_OF_PAGE && !llmp_msg_in_page(page, last_msg) { - panic!("BUG: full page passed to await_message_blocking or reset failed"); + /// Intern: Special allocation function for EOP messages (and nothing else!) + /// The normal alloc will fail if there is not enough space for buf_len_padded + EOP + /// So if alloc_next fails, create new page if necessary, use this function, + /// place EOP, commit EOP, reset, alloc again on the new space. + unsafe fn alloc_eop(&mut self) -> *mut LlmpMsg { + let page = self.out_maps.last().unwrap().page(); + let last_msg = self.last_msg_sent; + if (*page).size_used + EOP_MSG_SIZE > (*page).size_total { + panic!(format!("PROGRAM ABORT : BUG: EOP does not fit in page! page {:?}, size_current {:?}, size_total {:?}", page, + (*page).size_used, (*page).size_total)); } - current_msg_id = (*last_msg).message_id - } - loop { - compiler_fence(Ordering::SeqCst); - if ptr::read_volatile(&mut (*page).current_msg_id) != current_msg_id { - return match llmp_recv(receiver)? { - Some(msg) => Ok(msg), - None => panic!("BUG: blocking llmp message should never be NULL"), - }; + let mut ret: *mut LlmpMsg = if !last_msg.is_null() { + _llmp_next_msg_ptr(last_msg) + } else { + (*page).messages.as_mut_ptr() + }; + if (*ret).tag == LLMP_TAG_UNINITIALIZED { + panic!("Did not call send() on last message!"); } - } -} - -/* Special allocation function for EOP messages (and nothing else!) - The normal alloc will fail if there is not enough space for buf_len_padded + EOP - So if llmp_alloc_next fails, create new page if necessary, use this function, - place EOP, commit EOP, reset, alloc again on the new space. -*/ -unsafe fn llmp_alloc_eop(page: *mut LlmpPage, last_msg: *const LlmpMsg) -> *mut LlmpMsg { - if (*page).size_used + EOP_MSG_SIZE > (*page).size_total { - panic!(format!("PROGRAM ABORT : BUG: EOP does not fit in page! page {:?}, size_current {:?}, size_total {:?}", page, - (*page).size_used, (*page).size_total)); - } - let mut ret: *mut LlmpMsg = if !last_msg.is_null() { - _llmp_next_msg_ptr(last_msg) - } else { - (*page).messages.as_mut_ptr() - }; - if (*ret).tag == LLMP_TAG_UNINITIALIZED { - panic!("Did not call send() on last message!"); - } - (*ret).buf_len_padded = size_of::() as c_ulong; - (*ret).message_id = if !last_msg.is_null() { - (*last_msg).message_id + 1 - } else { - 1 - }; - (*ret).tag = LLMP_TAG_END_OF_PAGE; - (*page).size_used += EOP_MSG_SIZE; - ret -} - -/// Will return a ptr to the next msg buf, or None if map is full. -/// Never call alloc_next without either sending or cancelling the last allocated message for this page! -/// There can only ever be up to one message allocated per page at each given time. -unsafe fn llmp_alloc_next_if_space(llmp: &mut LlmpSender, buf_len: usize) -> Option<*mut LlmpMsg> { - let mut buf_len_padded = buf_len; - let mut complete_msg_size = llmp_align(size_of::() + buf_len_padded); - let page = llmp.out_maps.last().unwrap().page(); - let last_msg = llmp.last_msg_sent; - /* DBG("XXX complete_msg_size %lu (h: %lu)\n", complete_msg_size, sizeof(llmp_message)); */ - /* In case we don't have enough space, make sure the next page will be large - * enough */ - // For future allocs, keep track of the maximum (aligned) alloc size we used - (*page).max_alloc_size = max((*page).max_alloc_size, complete_msg_size); - - let mut ret: *mut LlmpMsg; - /* DBG("last_msg %p %d (%d)\n", last_msg, last_msg ? (int)last_msg->tag : -1, (int)LLMP_TAG_END_OF_PAGE_V1); */ - if last_msg.is_null() || (*last_msg).tag == LLMP_TAG_END_OF_PAGE { - /* We start fresh, on a new page */ - ret = (*page).messages.as_mut_ptr(); - /* The initial message may not be alligned, so we at least align the end of - it. Technically, c_ulong can be smaller than a pointer, then who knows what - happens */ - let base_addr = ret as usize; - buf_len_padded = - llmp_align(base_addr + complete_msg_size) - base_addr - size_of::(); - complete_msg_size = buf_len_padded + size_of::(); - /* DBG("XXX complete_msg_size NEW %lu\n", complete_msg_size); */ - /* Still space for the new message plus the additional "we're full" message? - */ - if (*page).size_used + complete_msg_size + EOP_MSG_SIZE > (*page).size_total { - /* We're full. */ - return None; - } - /* We need to start with 1 for ids, as current message id is initialized - * with 0... */ + (*ret).buf_len_padded = size_of::() as c_ulong; (*ret).message_id = if !last_msg.is_null() { (*last_msg).message_id + 1 } else { 1 + }; + (*ret).tag = LLMP_TAG_END_OF_PAGE; + (*page).size_used += EOP_MSG_SIZE; + ret + } + + /// Intern: Will return a ptr to the next msg buf, or None if map is full. + /// Never call alloc_next without either sending or cancelling the last allocated message for this page! + /// There can only ever be up to one message allocated per page at each given time. + unsafe fn alloc_next_if_space(&mut self, buf_len: usize) -> Option<*mut LlmpMsg> { + let mut buf_len_padded = buf_len; + let mut complete_msg_size = llmp_align(size_of::() + buf_len_padded); + let page = self.out_maps.last().unwrap().page(); + let last_msg = self.last_msg_sent; + /* DBG("XXX complete_msg_size %lu (h: %lu)\n", complete_msg_size, sizeof(llmp_message)); */ + /* In case we don't have enough space, make sure the next page will be large + * enough */ + // For future allocs, keep track of the maximum (aligned) alloc size we used + (*page).max_alloc_size = max((*page).max_alloc_size, complete_msg_size); + + let mut ret: *mut LlmpMsg; + /* DBG("last_msg %p %d (%d)\n", last_msg, last_msg ? (int)last_msg->tag : -1, (int)LLMP_TAG_END_OF_PAGE_V1); */ + if last_msg.is_null() || (*last_msg).tag == LLMP_TAG_END_OF_PAGE { + /* We start fresh, on a new page */ + ret = (*page).messages.as_mut_ptr(); + /* The initial message may not be alligned, so we at least align the end of + it. Technically, c_ulong can be smaller than a pointer, then who knows what + happens */ + let base_addr = ret as usize; + buf_len_padded = + llmp_align(base_addr + complete_msg_size) - base_addr - size_of::(); + complete_msg_size = buf_len_padded + size_of::(); + /* DBG("XXX complete_msg_size NEW %lu\n", complete_msg_size); */ + /* Still space for the new message plus the additional "we're full" message? + */ + if (*page).size_used + complete_msg_size + EOP_MSG_SIZE > (*page).size_total { + /* We're full. */ + return None; + } + /* We need to start with 1 for ids, as current message id is initialized + * with 0... */ + (*ret).message_id = if !last_msg.is_null() { + (*last_msg).message_id + 1 + } else { + 1 + } + } else if (*page).current_msg_id != (*last_msg).message_id { + /* Oops, wrong usage! */ + panic!(format!("BUG: The current message never got commited using send! (page->current_msg_id {:?}, last_msg->message_id: {})", (*page).current_msg_id, (*last_msg).message_id)); + } else { + buf_len_padded = complete_msg_size - size_of::(); + /* DBG("XXX ret %p id %u buf_len_padded %lu complete_msg_size %lu\n", ret, ret->message_id, buf_len_padded, + * complete_msg_size); */ + + /* Still space for the new message plus the additional "we're full" message? */ + if (*page).size_used + complete_msg_size + EOP_MSG_SIZE > (*page).size_total { + /* We're full. */ + return None; + } + ret = _llmp_next_msg_ptr(last_msg); + (*ret).message_id = (*last_msg).message_id + 1 } - } else if (*page).current_msg_id != (*last_msg).message_id { - /* Oops, wrong usage! */ - panic!(format!("BUG: The current message never got commited using llmp_send! (page->current_msg_id {:?}, last_msg->message_id: {})", (*page).current_msg_id, (*last_msg).message_id)); - } else { - buf_len_padded = complete_msg_size - size_of::(); - /* DBG("XXX ret %p id %u buf_len_padded %lu complete_msg_size %lu\n", ret, ret->message_id, buf_len_padded, - * complete_msg_size); */ - /* Still space for the new message plus the additional "we're full" message? */ - if (*page).size_used + complete_msg_size + EOP_MSG_SIZE > (*page).size_total { - /* We're full. */ - return None; + /* The beginning of our message should be messages + size_used, else nobody + * sent the last msg! */ + /* DBG("XXX ret %p - page->messages %p = %lu != %lu, will add %lu -> %p\n", ret, page->messages, + (c_ulong)((u8 *)ret - (u8 *)page->messages), page->size_used, complete_msg_size, ((u8 *)ret) + complete_msg_size); + */ + if last_msg.is_null() && (*page).size_used != 0 + || ((ret as usize) - (*page).messages.as_mut_ptr() as usize) != (*page).size_used + { + panic!(format!("Allocated new message without calling send() inbetween. ret: {:?}, page: {:?}, complete_msg_size: {:?}, size_used: {:?}, last_msg: {:?}", ret, page, + buf_len_padded, (*page).size_used, last_msg)); } - ret = _llmp_next_msg_ptr(last_msg); - (*ret).message_id = (*last_msg).message_id + 1 + (*page).size_used = (*page).size_used + complete_msg_size; + (*ret).buf_len_padded = buf_len_padded as c_ulong; + (*ret).buf_len = buf_len as c_ulong; + /* DBG("Returning new message at %p with len %ld, TAG was %x", ret, ret->buf_len_padded, ret->tag); */ + /* Maybe catch some bugs... */ + (*_llmp_next_msg_ptr(ret)).tag = LLMP_TAG_UNSET; + (*ret).tag = LLMP_TAG_UNINITIALIZED; + Some(ret) } - /* The beginning of our message should be messages + size_used, else nobody - * sent the last msg! */ - /* DBG("XXX ret %p - page->messages %p = %lu != %lu, will add %lu -> %p\n", ret, page->messages, - (c_ulong)((u8 *)ret - (u8 *)page->messages), page->size_used, complete_msg_size, ((u8 *)ret) + complete_msg_size); - */ - if last_msg.is_null() && (*page).size_used != 0 - || ((ret as usize) - (*page).messages.as_mut_ptr() as usize) != (*page).size_used - { - panic!(format!("Allocated new message without calling send() inbetween. ret: {:?}, page: {:?}, complete_msg_size: {:?}, size_used: {:?}, last_msg: {:?}", ret, page, - buf_len_padded, (*page).size_used, last_msg)); - } - (*page).size_used = (*page).size_used + complete_msg_size; - (*ret).buf_len_padded = buf_len_padded as c_ulong; - (*ret).buf_len = buf_len as c_ulong; - /* DBG("Returning new message at %p with len %ld, TAG was %x", ret, ret->buf_len_padded, ret->tag); */ - /* Maybe catch some bugs... */ - (*_llmp_next_msg_ptr(ret)).tag = LLMP_TAG_UNSET; - (*ret).tag = LLMP_TAG_UNINITIALIZED; - Some(ret) -} - -/// Commit the message last allocated by llmp_alloc_next to the queue. -/// After commiting, the msg shall no longer be altered! -/// It will be read by the consuming threads (broker->clients or client->broker) -unsafe fn llmp_send(sender: &mut LlmpSender, msg: *mut LlmpMsg) -> Result<(), AflError> { - if sender.last_msg_sent == msg { - panic!("Message sent twice!"); - } - if (*msg).tag == LLMP_TAG_UNSET as c_uint { - panic!(format!( - "No tag set on message with id {}", - (*msg).message_id - )); - } - let page = sender.out_maps.last().unwrap().page(); - if msg.is_null() || !llmp_msg_in_page(page, msg) { - return Err(AflError::Unknown(format!( - "Llmp Message {:?} is null or not in current page", - msg - ))); - } - (*msg).message_id = (*page).current_msg_id + 1; - compiler_fence(Ordering::SeqCst); - ptr::write_volatile(&mut (*page).current_msg_id, (*msg).message_id); - compiler_fence(Ordering::SeqCst); - sender.last_msg_sent = msg; - Ok(()) -} - -/// listener about it using a EOP message. -unsafe fn llmp_handle_out_eop(sender: &mut LlmpSender) -> Result<(), AflError> { - let old_map = sender.out_maps.last_mut().unwrap().page(); - - // Create a new shard page. - let new_map_shmem = LlmpPageWrapper::new((*old_map).sender, (*old_map).max_alloc_size)?; - let mut new_map = new_map_shmem.page(); - - ptr::write_volatile(&mut (*new_map).current_msg_id, (*old_map).current_msg_id); - (*new_map).max_alloc_size = (*old_map).max_alloc_size; - /* On the old map, place a last message linking to the new map for the clients - * to consume */ - let mut out: *mut LlmpMsg = llmp_alloc_eop(old_map, sender.last_msg_sent); - (*out).sender = (*old_map).sender; - - let mut end_of_page_msg = (*out).buf.as_mut_ptr() as *mut LlmpPayloadSharedMap; - (*end_of_page_msg).map_size = new_map_shmem.shmem.map_size; - (*end_of_page_msg).shm_str = new_map_shmem.shmem.shm_str; - - // We never sent a msg on the new buf */ - sender.last_msg_sent = 0 as *mut LlmpMsg; - - /* Send the last msg on the old buf */ - llmp_send(sender, out)?; - - if !sender.keep_pages_forever { - llmp_prune_old_pages(sender); + /// Commit the message last allocated by alloc_next to the queue. + /// After commiting, the msg shall no longer be altered! + /// It will be read by the consuming threads (broker->clients or client->broker) + unsafe fn send(&mut self, msg: *mut LlmpMsg) -> Result<(), AflError> { + if self.last_msg_sent == msg { + panic!("Message sent twice!"); + } + if (*msg).tag == LLMP_TAG_UNSET as c_uint { + panic!(format!( + "No tag set on message with id {}", + (*msg).message_id + )); + } + let page = self.out_maps.last().unwrap().page(); + if msg.is_null() || !llmp_msg_in_page(page, msg) { + return Err(AflError::Unknown(format!( + "Llmp Message {:?} is null or not in current page", + msg + ))); + } + (*msg).message_id = (*page).current_msg_id + 1; + compiler_fence(Ordering::SeqCst); + ptr::write_volatile(&mut (*page).current_msg_id, (*msg).message_id); + compiler_fence(Ordering::SeqCst); + self.last_msg_sent = msg; + Ok(()) } - sender.out_maps.push(new_map_shmem); + /// listener about it using a EOP message. + unsafe fn handle_out_eop(&mut self) -> Result<(), AflError> { + let old_map = self.out_maps.last_mut().unwrap().page(); - Ok(()) -} + // Create a new shard page. + let new_map_shmem = LlmpSharedMap::new((*old_map).sender, (*old_map).max_alloc_size)?; + let mut new_map = new_map_shmem.page(); -/// Allocates the next space on this sender page -pub unsafe fn llmp_alloc_next( - sender: &mut LlmpSender, - buf_len: usize, -) -> Result<*mut LlmpMsg, AflError> { - match llmp_alloc_next_if_space(sender, buf_len) { - Some(msg) => return Ok(msg), - _ => (), - }; + ptr::write_volatile(&mut (*new_map).current_msg_id, (*old_map).current_msg_id); + (*new_map).max_alloc_size = (*old_map).max_alloc_size; + /* On the old map, place a last message linking to the new map for the clients + * to consume */ + let mut out: *mut LlmpMsg = self.alloc_eop(); + (*out).sender = (*old_map).sender; - /* no more space left! We'll have to start a new page */ - llmp_handle_out_eop(sender)?; + let mut end_of_page_msg = (*out).buf.as_mut_ptr() as *mut LlmpPayloadSharedMap; + (*end_of_page_msg).map_size = new_map_shmem.shmem.map_size; + (*end_of_page_msg).shm_str = new_map_shmem.shmem.shm_str; - match llmp_alloc_next_if_space(sender, buf_len) { - Some(msg) => Ok(msg), - None => Err(AflError::Unknown(format!( - "Error allocating {} bytes in shmap", - buf_len - ))), + // We never sent a msg on the new buf */ + self.last_msg_sent = 0 as *mut LlmpMsg; + + /* Send the last msg on the old buf */ + self.send(out)?; + + if !self.keep_pages_forever { + self.prune_old_pages(); + } + + self.out_maps.push(new_map_shmem); + + Ok(()) + } + + /// Allocates the next space on this sender page + pub unsafe fn alloc_next(&mut self, buf_len: usize) -> Result<*mut LlmpMsg, AflError> { + match self.alloc_next_if_space(buf_len) { + Some(msg) => return Ok(msg), + _ => (), + }; + + /* no more space left! We'll have to start a new page */ + self.handle_out_eop()?; + + match self.alloc_next_if_space(buf_len) { + Some(msg) => Ok(msg), + None => Err(AflError::Unknown(format!( + "Error allocating {} bytes in shmap", + buf_len + ))), + } + } + + /// Cancel send of the next message, this allows us to allocate a new message without sending this one. + pub unsafe fn cancel_send(&mut self, msg: *mut LlmpMsg) { + /* DBG("Client %d cancels send of msg at %p with tag 0x%X and size %ld", client->id, msg, msg->tag, + * msg->buf_len_padded); */ + let page = self.out_maps.last().unwrap().page(); + (*msg).tag = LLMP_TAG_UNSET; + (*page).size_used -= (*msg).buf_len_padded as usize + size_of::(); } } +/// Receiving end of an llmp channel +impl LlmpReceiver { + /// Read next message. + unsafe fn recv(&mut self) -> Result, AflError> { + /* DBG("recv %p %p\n", page, last_msg); */ + compiler_fence(Ordering::SeqCst); + let page = self.current_recv_map.page(); + let last_msg = self.last_msg_recvd; + let current_msg_id = ptr::read_volatile(&mut (*page).current_msg_id); + + // Read the message from the page + let ret = if current_msg_id == 0 { + /* No messages yet */ + None + } else if last_msg.is_null() { + /* We never read a message from this queue. Return first. */ + Some((*page).messages.as_mut_ptr()) + } else if (*last_msg).message_id == current_msg_id { + /* Oops! No new message! */ + None + } else { + Some(_llmp_next_msg_ptr(last_msg)) + }; + + // Let's see what we go here. + match ret { + Some(msg) => { + // Handle special, LLMP internal, messages. + match (*msg).tag { + LLMP_TAG_UNSET => panic!("BUG: Read unallocated msg"), + LLMP_TAG_END_OF_PAGE => { + dbg!("Got end of page, allocing next"); + // Handle end of page + if (*msg).buf_len < size_of::() as u64 { + panic!(format!( + "Illegal message length for EOP (is {}, expected {})", + (*msg).buf_len_padded, + size_of::() + )); + } + let pageinfo = (*msg).buf.as_mut_ptr() as *mut LlmpPayloadSharedMap; + + /* We can reuse the map mem space, no need to free and calloc. + However, the pageinfo points to the map we're about to unmap. + Clone the contents first to be safe (probably fine in rust eitner way). */ + let pageinfo_cpy = (*pageinfo).clone(); + + ptr::write_volatile(&mut (*page).save_to_unmap, 1); + self.current_recv_map = LlmpSharedMap::from_name_slice( + &pageinfo_cpy.shm_str, + pageinfo_cpy.map_size, + )?; + dbg!("Got a new recv map", self.current_recv_map.shmem.shm_str); + // After we mapped the new page, return the next message, if available + return self.recv(); + } + _ => (), + } + + // Store the last msg for next time + self.last_msg_recvd = msg; + } + _ => (), + }; + Ok(ret) + } + + /// Blocks/spins until the next message gets posted to the page, + /// then returns that message. + pub unsafe fn recv_blocking(&mut self) -> Result<*mut LlmpMsg, AflError> { + let mut current_msg_id = 0; + let page = self.current_recv_map.page(); + let last_msg = self.last_msg_recvd; + if !last_msg.is_null() { + if (*last_msg).tag == LLMP_TAG_END_OF_PAGE && !llmp_msg_in_page(page, last_msg) { + panic!("BUG: full page passed to await_message_blocking or reset failed"); + } + current_msg_id = (*last_msg).message_id + } + loop { + compiler_fence(Ordering::SeqCst); + if ptr::read_volatile(&mut (*page).current_msg_id) != current_msg_id { + return match self.recv()? { + Some(msg) => Ok(msg), + None => panic!("BUG: blocking llmp message should never be NULL"), + }; + } + } + } +} + +/// The page struct, placed on a shared mem instance. +impl LlmpSharedMap { + /// Creates a new page with minimum prev_max_alloc_size or LLMP_INITIAL_MAP_SIZE + /// returning the initialized shared mem struct + pub unsafe fn new(sender: u32, min_size: usize) -> Result { + // Create a new shard page. + let mut shmem = AflShmem::new(new_map_size(min_size))?; + _llmp_page_init(&mut shmem, sender); + Ok(Self { shmem }) + } + + /// Initialize from a 0-terminated sharedmap id string and its size + pub unsafe fn from_str(shm_str: &CStr, map_size: usize) -> Result { + let shmem = AflShmem::from_str(shm_str, map_size)?; + // Not initializing the page here - the other side should have done it already! + Ok(Self { shmem }) + } + + /// Initialize from a shm_str with fixed len of 20 + pub unsafe fn from_name_slice(shm_str: &[u8; 20], map_size: usize) -> Result { + let shmem = AflShmem::from_name_slice(shm_str, map_size)?; + // Not initializing the page here - the other side should have done it already! + Ok(Self { shmem }) + } + + /// Get the unsafe ptr to this page, situated on the shared map + pub unsafe fn page(&self) -> *mut LlmpPage { + shmem2page(&self.shmem) + } +} + +/// The broker forwards all messages to its own bus-like broadcast map. +/// It may intercept messages passing through. impl LlmpBroker { /// Create and initialize a new llmp_broker pub unsafe fn new() -> Result { @@ -578,7 +604,7 @@ impl LlmpBroker { llmp_out: LlmpSender { id: 0, last_msg_sent: ptr::null_mut(), - out_maps: vec![LlmpPageWrapper::new(0, 0)?], + out_maps: vec![LlmpSharedMap::new(0, 0)?], // Broker never cleans up the pages so that new // clients may join at any time keep_pages_forever: true, @@ -590,13 +616,14 @@ impl LlmpBroker { Ok(broker) } + /// Allocate the next message on the outgoing map unsafe fn alloc_next(&mut self, buf_len: usize) -> Result<*mut LlmpMsg, AflError> { - llmp_alloc_next(&mut self.llmp_out, buf_len) + self.llmp_out.alloc_next(buf_len) } /// Registers a new client for the given sharedmap str and size. /// Returns the id of the new client in broker.client_map - pub unsafe fn register_client(&mut self, client_page: LlmpPageWrapper) { + pub unsafe fn register_client(&mut self, client_page: LlmpSharedMap) { let id = self.llmp_clients.len() as u32; self.llmp_clients.push(LlmpReceiver { id, @@ -622,7 +649,7 @@ impl LlmpBroker { msg.copy_to_nonoverlapping(out, size_of::() + (*msg).buf_len_padded as usize); (*out).buf_len_padded = actual_size; /* We need to replace the message ID with our own */ - match llmp_send(&mut self.llmp_out, out) { + match self.llmp_out.send(out) { Err(e) => panic!(format!("Error sending msg: {:?}", e)), _ => (), }; @@ -637,8 +664,8 @@ impl LlmpBroker { // TODO: We could memcpy a range of pending messages, instead of one by one. loop { let msg = { - let mut client = &mut self.llmp_clients[client_id as usize]; - match llmp_recv(&mut client)? { + let client = &mut self.llmp_clients[client_id as usize]; + match client.recv()? { None => { // We're done handling this client return Ok(()); @@ -657,10 +684,8 @@ impl LlmpBroker { } else { let pageinfo = (*msg).buf.as_mut_ptr() as *mut LlmpPayloadSharedMap; - match LlmpPageWrapper::from_name_slice( - &(*pageinfo).shm_str, - (*pageinfo).map_size, - ) { + match LlmpSharedMap::from_name_slice(&(*pageinfo).shm_str, (*pageinfo).map_size) + { Ok(new_page) => { let id = next_id; next_id += 1; @@ -701,7 +726,7 @@ impl LlmpBroker { /// Loops infinitely, forwarding and handling all incoming messages from clients. /// Never returns. Panics on error. - pub unsafe fn broker_loop(&mut self) -> ! { + pub unsafe fn loop_forever(&mut self) -> ! { loop { compiler_fence(Ordering::SeqCst); self.once() @@ -713,75 +738,47 @@ impl LlmpBroker { } } -/// For non zero-copy, we want to get rid of old pages with duplicate messages in the client -/// eventually. This function This funtion sees if we can unallocate older pages. -/// The broker would have informed us by setting the save_to_unmap-flag. -unsafe fn llmp_prune_old_pages(sender: &mut LlmpSender) { - // Exclude the current page by splitting of the last element for this iter - let mut unmap_until_excl = 0; - for map in sender.out_maps.split_last().unwrap().1 { - if (*map.page()).save_to_unmap == 0 { - // The broker didn't read this page yet, no more pages to unmap. - break; - } - unmap_until_excl += 1; - } - // Remove all maps that the broker already mapped - // simply removing them from the vec should then call drop and unmap them. - sender.out_maps.drain(0..unmap_until_excl); -} - +/// `n` clients connect to a broker. They share an outgoing map with the broker, +/// and get incoming messages from the shared broker bus impl LlmpClient { /// Creates a new LlmpClient - pub unsafe fn new(initial_broker_page: LlmpPageWrapper) -> Result { + pub unsafe fn new(initial_broker_map: LlmpSharedMap) -> Result { Ok(Self { llmp_out: LlmpSender { id: 0, last_msg_sent: 0 as *mut LlmpMsg, - out_maps: vec![LlmpPageWrapper::new(0, LLMP_INITIAL_MAP_SIZE)?], + out_maps: vec![LlmpSharedMap::new(0, LLMP_INITIAL_MAP_SIZE)?], // drop pages to the broker if it already read them keep_pages_forever: false, }, llmp_in: LlmpReceiver { id: 0, - current_recv_map: initial_broker_page, + current_recv_map: initial_broker_map, last_msg_recvd: 0 as *mut LlmpMsg, }, }) } -} -/// A client receives a broadcast message. -/// Returns null if no message is availiable -pub unsafe fn llmp_client_recv(client: &mut LlmpClient) -> Result, AflError> { - llmp_recv(&mut client.llmp_in) -} + /// Commits a msg to the client's out map + pub unsafe fn send(&mut self, msg: *mut LlmpMsg) -> Result<(), AflError> { + self.llmp_out.send(msg) + } -/// A client blocks/spins until the next message gets posted to the page, -/// then returns that message. -pub unsafe fn llmp_client_recv_blocking(client: &mut LlmpClient) -> Result<*mut LlmpMsg, AflError> { - llmp_recv_blocking(&mut client.llmp_in) -} + /// A client receives a broadcast message. + /// Returns null if no message is availiable + pub unsafe fn recv(&mut self) -> Result, AflError> { + self.llmp_in.recv() + } -/// The current page could have changed in recv (EOP) -/// Alloc the next message, internally handling end of page by allocating a new one. -pub unsafe fn llmp_client_alloc_next( - client: &mut LlmpClient, - buf_len: usize, -) -> Result<*mut LlmpMsg, AflError> { - llmp_alloc_next(&mut client.llmp_out, buf_len) -} + /// A client blocks/spins until the next message gets posted to the page, + /// then returns that message. + pub unsafe fn recv_blocking(&mut self) -> Result<*mut LlmpMsg, AflError> { + self.llmp_in.recv_blocking() + } -/// Cancel send of the next message, this allows us to allocate a new message without sending this one. -pub unsafe fn llmp_cancel_send(sender: &mut LlmpSender, msg: *mut LlmpMsg) { - /* DBG("Client %d cancels send of msg at %p with tag 0x%X and size %ld", client->id, msg, msg->tag, - * msg->buf_len_padded); */ - let page = sender.out_maps.last().unwrap().page(); - (*msg).tag = LLMP_TAG_UNSET; - (*page).size_used -= (*msg).buf_len_padded as usize + size_of::(); -} - -/// Commits a msg to the client's out map -pub unsafe fn llmp_client_send(client: &mut LlmpClient, msg: *mut LlmpMsg) -> Result<(), AflError> { - llmp_send(&mut client.llmp_out, msg) + /// The current page could have changed in recv (EOP) + /// Alloc the next message, internally handling end of page by allocating a new one. + pub unsafe fn alloc_next(&mut self, buf_len: usize) -> Result<*mut LlmpMsg, AflError> { + self.llmp_out.alloc_next(buf_len) + } } From 43eba77a51365ce97d7c17b28d906d4c943be736 Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Tue, 8 Dec 2020 19:15:17 +0100 Subject: [PATCH 04/17] added sleep time to loop --- afl/src/events/llmp.rs | 1 - afl/src/events/llmp_translated.rs | 12 +++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/afl/src/events/llmp.rs b/afl/src/events/llmp.rs index fd99b9d5ae..4a191c1fe7 100644 --- a/afl/src/events/llmp.rs +++ b/afl/src/events/llmp.rs @@ -1,5 +1,4 @@ use core::marker::PhantomData; -use std::{ffi::c_void, io::Read, io::Write, net::TcpListener}; use crate::{ corpus::Corpus, engines::State, executors::Executor, inputs::Input, utils::Rand, AflError, diff --git a/afl/src/events/llmp_translated.rs b/afl/src/events/llmp_translated.rs index 71681bf2ad..cddcbe5267 100644 --- a/afl/src/events/llmp_translated.rs +++ b/afl/src/events/llmp_translated.rs @@ -50,7 +50,7 @@ Then register some clientloops using llmp_broker_register_threaded_clientloop use core::ptr; use core::sync::atomic::{compiler_fence, Ordering}; -use core::time; +use core::time::Duration; use libc::{c_uint, c_ulong, c_ushort}; use std::{cmp::max, ffi::CStr, mem::size_of, thread}; @@ -726,14 +726,16 @@ impl LlmpBroker { /// Loops infinitely, forwarding and handling all incoming messages from clients. /// Never returns. Panics on error. - pub unsafe fn loop_forever(&mut self) -> ! { + /// 5 millis of sleep can't hurt to keep busywait not at 100% + pub unsafe fn loop_forever(&mut self, sleep_time: Option) -> ! { loop { compiler_fence(Ordering::SeqCst); self.once() .expect("An error occurred when brokering. Exiting."); - - /* 5 milis of sleep for now to not busywait at 100% */ - thread::sleep(time::Duration::from_millis(5)); + match sleep_time { + Some(time) => thread::sleep(time), + None => (), + } } } } From cca2ac9724c17f4853f2c5b95016f8694db3e5f0 Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Tue, 8 Dec 2020 19:54:14 +0100 Subject: [PATCH 05/17] building client --- afl/src/events/llmp_translated.rs | 108 ++++++++++++++++++----------- afl/src/events/shmem_translated.rs | 8 ++- 2 files changed, 71 insertions(+), 45 deletions(-) diff --git a/afl/src/events/llmp_translated.rs b/afl/src/events/llmp_translated.rs index cddcbe5267..91269d6f6e 100644 --- a/afl/src/events/llmp_translated.rs +++ b/afl/src/events/llmp_translated.rs @@ -59,30 +59,28 @@ use crate::AflError; use super::shmem_translated::AflShmem; -/// The header length of a llmp page in a shared map (until messages start) -const LLMP_PAGE_HEADER_LEN: usize = size_of::(); - /// We'll start off with 256 megabyte maps per fuzzer -const LLMP_INITIAL_MAP_SIZE: usize = 1 << 28; - -/// A msg fresh from the press: No tag got sent by the user yet -const LLMP_TAG_UNSET: u32 = 0xdeadaf; -/// This message should not exist yet. Some bug in unsafe code! -const LLMP_TAG_UNINITIALIZED: u32 = 0xa143af11; -/// The end of page mesasge -/// When receiving this, a new sharedmap needs to be allocated. -const LLMP_TAG_END_OF_PAGE: u32 = 0xaf1e0f1; -/// A new client for this broekr got added. -const LLMP_TAG_NEW_SHM_CLIENT: u32 = 0xc11e471; - +const LLMP_PREF_INITIAL_MAP_SIZE: usize = 1 << 28; /// What byte count to align messages to /// LlmpMsg sizes (including header) will always be rounded up to be a multiple of this value -const LLMP_ALIGNNMENT: usize = 64; +const LLMP_PREF_ALIGNNMENT: usize = 64; + +/// A msg fresh from the press: No tag got sent by the user yet +const LLMP_TAG_UNSET: u32 = 0xDEADAF; +/// This message should not exist yet. Some bug in unsafe code! +const LLMP_TAG_UNINITIALIZED: u32 = 0xA143AF11; +/// The end of page mesasge +/// When receiving this, a new sharedmap needs to be allocated. +const LLMP_TAG_END_OF_PAGE: u32 = 0xAF1E0F1; +/// A new client for this broekr got added. +const LLMP_TAG_NEW_SHM_CLIENT: u32 = 0xC11E471; /// Size of a new page message, header, payload, and alignment const EOP_MSG_SIZE: usize = llmp_align(size_of::() + size_of::()); +/// The header length of a llmp page in a shared map (until messages start) +const LLMP_PAGE_HEADER_LEN: usize = size_of::(); -/// Message Hook +/// Message hook type pub type LlmpMsgHookFn = unsafe fn(client_id: u32, msg: *mut LlmpMsg) -> LlmpMsgHookResult; /// Sending end on a (unidirectional) sharedmap channel @@ -115,7 +113,9 @@ pub struct LlmpReceiver { /// Client side of LLMP #[derive(Clone)] pub struct LlmpClient { + /// Outgoing channel to the broker pub llmp_out: LlmpSender, + /// Incoming (broker) broadcast map pub llmp_in: LlmpReceiver, } @@ -144,19 +144,6 @@ pub struct LlmpMsg { pub buf: [u8; 0], } -/// The broker (node 0) -#[derive(Clone)] -#[repr(C)] -pub struct LlmpBroker { - /// Broadcast map from broker to all clients - pub llmp_out: LlmpSender, - /// Users of Llmp can add message handlers in the broker. - /// This allows us to intercept messages right in the broker - /// This keeps the out map clean. - pub msg_hooks: Vec, - pub llmp_clients: Vec, -} - /// Contents of the share mem pages, used by llmp internally #[derive(Copy, Clone)] #[repr(C, packed)] @@ -171,6 +158,19 @@ pub struct LlmpPage { pub messages: [LlmpMsg; 0], } +/// The broker (node 0) +#[derive(Clone)] +#[repr(C)] +pub struct LlmpBroker { + /// Broadcast map from broker to all clients + pub llmp_out: LlmpSender, + /// Users of Llmp can add message handlers in the broker. + /// This allows us to intercept messages right in the broker + /// This keeps the out map clean. + pub msg_hooks: Vec, + pub llmp_clients: Vec, +} + /// Result of an LLMP Mesasge hook pub enum LlmpMsgHookResult { /// This has been handled in the broker. No need to forward. @@ -202,19 +202,19 @@ unsafe fn llmp_msg_in_page(page: *mut LlmpPage, msg: *mut LlmpMsg) -> bool { && (page as *mut u8).offset((*page).size_total as isize) > msg as *mut u8; } -/// allign to LLMP_ALIGNNMENT=64 bytes +/// allign to LLMP_PREF_ALIGNNMENT=64 bytes #[inline] const fn llmp_align(to_align: usize) -> usize { // check if we need to align first - if LLMP_ALIGNNMENT == 0 { + if LLMP_PREF_ALIGNNMENT == 0 { return to_align; } // Then do the alignment - let modulo = to_align % LLMP_ALIGNNMENT; + let modulo = to_align % LLMP_PREF_ALIGNNMENT; if modulo == 0 { to_align } else { - to_align + LLMP_ALIGNNMENT - modulo + to_align + LLMP_PREF_ALIGNNMENT - modulo } } @@ -225,7 +225,7 @@ const fn llmp_align(to_align: usize) -> usize { fn new_map_size(max_alloc: usize) -> usize { next_pow2(max( max_alloc * 2 + EOP_MSG_SIZE + LLMP_PAGE_HEADER_LEN, - LLMP_INITIAL_MAP_SIZE, + LLMP_PREF_INITIAL_MAP_SIZE, ) as u64) as usize } @@ -566,24 +566,26 @@ impl LlmpReceiver { /// The page struct, placed on a shared mem instance. impl LlmpSharedMap { - /// Creates a new page with minimum prev_max_alloc_size or LLMP_INITIAL_MAP_SIZE + /// Creates a new page with minimum prev_max_alloc_size or LLMP_PREF_INITIAL_MAP_SIZE /// returning the initialized shared mem struct - pub unsafe fn new(sender: u32, min_size: usize) -> Result { + pub fn new(sender: u32, min_size: usize) -> Result { // Create a new shard page. let mut shmem = AflShmem::new(new_map_size(min_size))?; - _llmp_page_init(&mut shmem, sender); + unsafe { + _llmp_page_init(&mut shmem, sender); + } Ok(Self { shmem }) } /// Initialize from a 0-terminated sharedmap id string and its size - pub unsafe fn from_str(shm_str: &CStr, map_size: usize) -> Result { + pub fn from_str(shm_str: &CStr, map_size: usize) -> Result { let shmem = AflShmem::from_str(shm_str, map_size)?; // Not initializing the page here - the other side should have done it already! Ok(Self { shmem }) } /// Initialize from a shm_str with fixed len of 20 - pub unsafe fn from_name_slice(shm_str: &[u8; 20], map_size: usize) -> Result { + pub fn from_name_slice(shm_str: &[u8; 20], map_size: usize) -> Result { let shmem = AflShmem::from_name_slice(shm_str, map_size)?; // Not initializing the page here - the other side should have done it already! Ok(Self { shmem }) @@ -744,12 +746,12 @@ impl LlmpBroker { /// and get incoming messages from the shared broker bus impl LlmpClient { /// Creates a new LlmpClient - pub unsafe fn new(initial_broker_map: LlmpSharedMap) -> Result { + pub fn new(initial_broker_map: LlmpSharedMap) -> Result { Ok(Self { llmp_out: LlmpSender { id: 0, last_msg_sent: 0 as *mut LlmpMsg, - out_maps: vec![LlmpSharedMap::new(0, LLMP_INITIAL_MAP_SIZE)?], + out_maps: vec![LlmpSharedMap::new(0, LLMP_PREF_INITIAL_MAP_SIZE)?], // drop pages to the broker if it already read them keep_pages_forever: false, }, @@ -766,6 +768,28 @@ impl LlmpClient { self.llmp_out.send(msg) } + /// Allocates a message of the given size, tags it, and sends it off. + pub fn send_buf(&mut self, tag: u32, buf: &[u8]) -> Result<(), AflError> { + // Make sure we don't reuse already allocated tags + if tag == LLMP_TAG_NEW_SHM_CLIENT + || tag == LLMP_TAG_END_OF_PAGE + || tag == LLMP_TAG_UNINITIALIZED + || tag == LLMP_TAG_UNSET + { + return Err(AflError::Unknown(format!( + "Reserved tag supplied to send_buf ({:#X})", + tag + ))); + } + unsafe { + let msg = self.alloc_next(buf.len())?; + (*msg).tag = tag; + buf.as_ptr() + .copy_to_nonoverlapping((*msg).buf.as_mut_ptr(), buf.len()); + self.send(msg) + } + } + /// A client receives a broadcast message. /// Returns null if no message is availiable pub unsafe fn recv(&mut self) -> Result, AflError> { diff --git a/afl/src/events/shmem_translated.rs b/afl/src/events/shmem_translated.rs index 0f1b901ef1..e11c000cd3 100644 --- a/afl/src/events/shmem_translated.rs +++ b/afl/src/events/shmem_translated.rs @@ -101,9 +101,11 @@ impl AflShmem { } /// Generate a shared map with a fixed byte array of 20 - pub unsafe fn from_name_slice(shm_str: &[u8; 20], map_size: usize) -> Result { - let str_bytes = shm_str as *const [u8; 20] as *const libc::c_char; - Self::from_str(CStr::from_ptr(str_bytes), map_size) + pub fn from_name_slice(shm_str: &[u8; 20], map_size: usize) -> Result { + unsafe { + let str_bytes = shm_str as *const [u8; 20] as *const libc::c_char; + Self::from_str(CStr::from_ptr(str_bytes), map_size) + } } pub fn new(map_size: usize) -> Result { From ab8cf14a5ce0c2d6c52ba11be23b095d9218b94c Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Tue, 8 Dec 2020 19:58:03 +0100 Subject: [PATCH 06/17] moved llmp --- afl/src/events/{llmp.rs => llmp_manager.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename afl/src/events/{llmp.rs => llmp_manager.rs} (100%) diff --git a/afl/src/events/llmp.rs b/afl/src/events/llmp_manager.rs similarity index 100% rename from afl/src/events/llmp.rs rename to afl/src/events/llmp_manager.rs From eb8941d422bd384543da6680967c49d8e866763d Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Tue, 8 Dec 2020 19:58:25 +0100 Subject: [PATCH 07/17] moved translated to real llmp --- afl/src/events/{llmp_translated.rs => llmp.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename afl/src/events/{llmp_translated.rs => llmp.rs} (100%) diff --git a/afl/src/events/llmp_translated.rs b/afl/src/events/llmp.rs similarity index 100% rename from afl/src/events/llmp_translated.rs rename to afl/src/events/llmp.rs From ff8a89f0c190a5052af5d1b062e847bca318f808 Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Tue, 8 Dec 2020 21:12:39 +0100 Subject: [PATCH 08/17] added tcp server --- afl/src/events/llmp.rs | 121 ++++++++++++++++++++++++----- afl/src/events/llmp_manager.rs | 49 ------------ afl/src/events/mod.rs | 6 +- afl/src/events/shmem_translated.rs | 1 - 4 files changed, 103 insertions(+), 74 deletions(-) diff --git a/afl/src/events/llmp.rs b/afl/src/events/llmp.rs index 91269d6f6e..18cfcfc1f1 100644 --- a/afl/src/events/llmp.rs +++ b/afl/src/events/llmp.rs @@ -52,7 +52,14 @@ use core::ptr; use core::sync::atomic::{compiler_fence, Ordering}; use core::time::Duration; use libc::{c_uint, c_ulong, c_ushort}; -use std::{cmp::max, ffi::CStr, mem::size_of, thread}; +use std::{ + cmp::max, + ffi::CStr, + io::{Read, Write}, + mem::size_of, + net::TcpListener, + thread, +}; use crate::utils::next_pow2; use crate::AflError; @@ -471,6 +478,28 @@ impl LlmpSender { (*msg).tag = LLMP_TAG_UNSET; (*page).size_used -= (*msg).buf_len_padded as usize + size_of::(); } + + /// Allocates a message of the given size, tags it, and sends it off. + pub fn send_buf(&mut self, tag: u32, buf: &[u8]) -> Result<(), AflError> { + // Make sure we don't reuse already allocated tags + if tag == LLMP_TAG_NEW_SHM_CLIENT + || tag == LLMP_TAG_END_OF_PAGE + || tag == LLMP_TAG_UNINITIALIZED + || tag == LLMP_TAG_UNSET + { + return Err(AflError::Unknown(format!( + "Reserved tag supplied to send_buf ({:#X})", + tag + ))); + } + unsafe { + let msg = self.alloc_next(buf.len())?; + (*msg).tag = tag; + buf.as_ptr() + .copy_to_nonoverlapping((*msg).buf.as_mut_ptr(), buf.len()); + self.send(msg) + } + } } /// Receiving end of an llmp channel @@ -625,7 +654,7 @@ impl LlmpBroker { /// Registers a new client for the given sharedmap str and size. /// Returns the id of the new client in broker.client_map - pub unsafe fn register_client(&mut self, client_page: LlmpSharedMap) { + pub fn register_client(&mut self, client_page: LlmpSharedMap) { let id = self.llmp_clients.len() as u32; self.llmp_clients.push(LlmpReceiver { id, @@ -740,6 +769,75 @@ impl LlmpBroker { } } } + + pub fn launch_tcp_listener(&mut self, port: u16) -> Result, AflError> { + // Later in the execution, after the initial map filled up, + // the current broacast map will will point to a different map. + // However, the original map is (as of now) never freed, new clients will start + // to read from the initial map id. + + let listener = TcpListener::bind(format!("127.0.0.1:{}", port))?; + // accept connections and process them, spawning a new thread for each one + println!("Server listening on port {}", port); + + let client_out_map_mem = &self.llmp_out.out_maps.first().unwrap().shmem; + let broadcast_str_initial = client_out_map_mem.shm_str.clone(); + + let llmp_tcp_id = self.llmp_clients.len() as u32; + + // Tcp out map sends messages from background thread tcp server to foreground client + let tcp_out_map = LlmpSharedMap::new(llmp_tcp_id, LLMP_PREF_INITIAL_MAP_SIZE)?; + let tcp_out_map_str = tcp_out_map.shmem.shm_str; + let tcp_out_map_size = tcp_out_map.shmem.map_size; + self.register_client(tcp_out_map); + + Ok(thread::spawn(move || { + let mut new_client_sender = LlmpSender { + id: 0, + last_msg_sent: 0 as *mut LlmpMsg, + out_maps: vec![ + LlmpSharedMap::from_name_slice(&tcp_out_map_str, tcp_out_map_size).unwrap(), + ], + // drop pages to the broker if it already read them + keep_pages_forever: false, + }; + + loop { + let (mut stream, addr) = match listener.accept() { + Ok(res) => res, + Err(e) => { + dbg!("Ignoring failed accept", e); + continue; + } + }; + dbg!("New connection", addr, stream.peer_addr().unwrap()); + match stream.write(&broadcast_str_initial) { + Ok(_) => {} // fire & forget + Err(e) => { + dbg!("Could not send to shmap to client", e); + continue; + } + }; + let mut new_client_map_str: [u8; 20] = Default::default(); + let map_str_len = match stream.read(&mut new_client_map_str) { + Ok(res) => res, + Err(e) => { + dbg!("Ignoring failed read from client", e); + continue; + } + }; + if map_str_len < 20 { + dbg!("Didn't receive a complete shmap id str from client. Ignoring."); + continue; + } + + match new_client_sender.send_buf(LLMP_TAG_NEW_SHM_CLIENT, &new_client_map_str) { + Ok(()) => (), + Err(e) => println!("Error forwarding client on map: {:?}", e), + }; + } + })) + } } /// `n` clients connect to a broker. They share an outgoing map with the broker, @@ -770,24 +868,7 @@ impl LlmpClient { /// Allocates a message of the given size, tags it, and sends it off. pub fn send_buf(&mut self, tag: u32, buf: &[u8]) -> Result<(), AflError> { - // Make sure we don't reuse already allocated tags - if tag == LLMP_TAG_NEW_SHM_CLIENT - || tag == LLMP_TAG_END_OF_PAGE - || tag == LLMP_TAG_UNINITIALIZED - || tag == LLMP_TAG_UNSET - { - return Err(AflError::Unknown(format!( - "Reserved tag supplied to send_buf ({:#X})", - tag - ))); - } - unsafe { - let msg = self.alloc_next(buf.len())?; - (*msg).tag = tag; - buf.as_ptr() - .copy_to_nonoverlapping((*msg).buf.as_mut_ptr(), buf.len()); - self.send(msg) - } + self.llmp_out.send_buf(tag, buf) } /// A client receives a broadcast message. diff --git a/afl/src/events/llmp_manager.rs b/afl/src/events/llmp_manager.rs index 4a191c1fe7..78542e509b 100644 --- a/afl/src/events/llmp_manager.rs +++ b/afl/src/events/llmp_manager.rs @@ -9,55 +9,6 @@ use super::{ Event, EventManager, }; -/* -pub unsafe fn llmp_tcp_server_clientloop(client: &mut LlmpClient, _data: *mut c_void) -> ! { - // Later in the execution, after the initial map filled up, - // the current broacast map will will point to a different map. - // However, the original map is (as of now) never freed, new clients will start - // to read from the initial map id. - let initial_broadcasts_map_str = client - .as_ref() - .unwrap() - .current_broadcast_map - .as_ref() - .unwrap() - .shm_str; - - let listener = TcpListener::bind("0.0.0.0:3333").unwrap(); - // accept connections and process them, spawning a new thread for each one - println!("Server listening on port 3333"); - loop { - let (mut stream, addr) = match listener.accept() { - Ok(res) => res, - Err(e) => { - dbg!("Ignoring failed accept", e); - continue; - } - }; - dbg!("New connection", addr, stream.peer_addr().unwrap()); - match stream.write(&initial_broadcasts_map_str as &[u8]) { - Ok(_) => {} // fire & forget - Err(e) => { - dbg!("Could not send to shmap to client", e); - continue; - } - }; - let mut new_client_map_str: [u8; 20] = Default::default(); - let map_str_len = match stream.read(&mut new_client_map_str) { - Ok(res) => res, - Err(e) => { - dbg!("Ignoring failed read from client", e); - continue; - } - }; - if map_str_len < 20 { - dbg!("Didn't receive a complete shmap id str from client. Ignoring."); - continue; - } - } -} -*/ - /// Eventmanager for multi-processed application #[cfg(feature = "std")] pub struct LLMPEventManager diff --git a/afl/src/events/mod.rs b/afl/src/events/mod.rs index 42c104a693..a301f2f48d 100644 --- a/afl/src/events/mod.rs +++ b/afl/src/events/mod.rs @@ -6,13 +6,11 @@ use core::marker::PhantomData; use serde::{Deserialize, Serialize}; -#[cfg(feature = "std")] -pub mod llmp_translated; // TODO: Abstract away. #[cfg(feature = "std")] pub mod shmem_translated; -#[cfg(feature = "std")] -pub use crate::events::llmp::LLMPEventManager; +/*#[cfg(feature = "std")] +pub use crate::events::llmp::LLMPEventManager;*/ #[cfg(feature = "std")] use std::io::Write; diff --git a/afl/src/events/shmem_translated.rs b/afl/src/events/shmem_translated.rs index e11c000cd3..dbe195dcdd 100644 --- a/afl/src/events/shmem_translated.rs +++ b/afl/src/events/shmem_translated.rs @@ -59,7 +59,6 @@ const AFL_RET_SUCCESS: c_uint = 0; // too.) #[derive(Clone)] -#[repr(C)] pub struct AflShmem { pub shm_str: [u8; 20], pub shm_id: c_int, From 960154a3de297081c19b6317b6c10f865c38c126 Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Tue, 8 Dec 2020 23:59:24 +0100 Subject: [PATCH 09/17] adapted llmp test --- afl/llmp_test/src/main.rs | 136 +++++++++++++++----------------------- afl/src/events/llmp.rs | 99 +++++++++++++++++++-------- 2 files changed, 128 insertions(+), 107 deletions(-) diff --git a/afl/llmp_test/src/main.rs b/afl/llmp_test/src/main.rs index ed852b990f..8742bde4c2 100644 --- a/afl/llmp_test/src/main.rs +++ b/afl/llmp_test/src/main.rs @@ -1,60 +1,33 @@ -#[macro_use] extern crate alloc; use core::convert::TryInto; -use core::ffi::c_void; -use core::mem::size_of; -use core::ptr; +use core::time::Duration; use std::thread; use std::time; -use afl::events::llmp_translated::*; +use afl::events::llmp; const TAG_SIMPLE_U32_V1: u32 = 0x51300321; const TAG_MATH_RESULT_V1: u32 = 0x77474331; -unsafe fn llmp_test_clientloop(client: *mut LlmpClient, _data: *mut c_void) -> ! { - let mut counter: u32 = 0; - loop { - counter += 1; - - let msg = llmp_client_alloc_next(client, size_of::()); - core::ptr::copy( - counter.to_be_bytes().as_ptr(), - (*msg).buf.as_mut_ptr(), - size_of::(), - ); - (*msg).tag = TAG_SIMPLE_U32_V1; - llmp_client_send(client, msg).unwrap(); - - thread::sleep(time::Duration::from_millis(100)); - } -} - -unsafe fn u32_from_msg(msg: *const LlmpMsg) -> u32 { - u32::from_be_bytes( - alloc::slice::from_raw_parts((*msg).buf.as_ptr(), size_of::()) - .try_into() - .unwrap(), - ) -} - -unsafe fn test_adder_clientloop(client: *mut LlmpClient, _data: *mut c_void) -> ! { +fn adder_loop(port: u16) -> ! { + let mut client = llmp::LlmpClient::create_attach_to_tcp(port).unwrap(); let mut last_result: u32 = 0; let mut current_result: u32 = 0; loop { let mut msg_counter = 0; loop { - let last_msg = llmp_client_recv(client); - if last_msg == 0 as *mut LlmpMsg { - break; - } + let (tag, buf) = match client.recv_buf().unwrap() { + None => break, + Some(msg) => msg, + }; msg_counter += 1; - match (*last_msg).tag { + match tag { TAG_SIMPLE_U32_V1 => { - current_result = current_result.wrapping_add(u32_from_msg(last_msg)); + current_result = + current_result.wrapping_add(u32::from_le_bytes(buf.try_into().unwrap())); } - _ => println!("Adder Client ignored unknown message {}", (*last_msg).tag), + _ => println!("Adder Client ignored unknown message {}", tag), }; } @@ -64,14 +37,9 @@ unsafe fn test_adder_clientloop(client: *mut LlmpClient, _data: *mut c_void) -> msg_counter, current_result ); - let msg = llmp_client_alloc_next(client, size_of::()); - core::ptr::copy( - current_result.to_be_bytes().as_ptr(), - (*msg).buf.as_mut_ptr(), - size_of::(), - ); - (*msg).tag = TAG_MATH_RESULT_V1; - llmp_client_send(client, msg).unwrap(); + client + .send_buf(TAG_MATH_RESULT_V1, ¤t_result.to_le_bytes()) + .unwrap(); last_result = current_result; } @@ -80,61 +48,67 @@ unsafe fn test_adder_clientloop(client: *mut LlmpClient, _data: *mut c_void) -> } unsafe fn broker_message_hook( - _broker: *mut LlmpBroker, - client_metadata: *mut LlmpBrokerClientMetadata, - message: *mut LlmpMsg, - _data: *mut c_void, -) -> LlmpMsgHookResult { + client_id: u32, + message: *mut llmp::LlmpMsg, +) -> llmp::LlmpMsgHookResult { match (*message).tag { TAG_SIMPLE_U32_V1 => { println!( "Client {:?} sent message: {:?}", - (*client_metadata).pid, - u32_from_msg(message) + client_id, + u32::from_le_bytes((*message).as_slice().try_into().unwrap()) ); - LlmpMsgHookResult::ForwardToClients + llmp::LlmpMsgHookResult::ForwardToClients } TAG_MATH_RESULT_V1 => { println!( "Adder Client has this current result: {:?}", - u32_from_msg(message) + u32::from_le_bytes((*message).as_slice().try_into().unwrap()) ); - LlmpMsgHookResult::Handled + llmp::LlmpMsgHookResult::Handled } _ => { println!("Unknwon message id received!"); - LlmpMsgHookResult::ForwardToClients + llmp::LlmpMsgHookResult::ForwardToClients } } } fn main() { /* The main node has a broker, and a few worker threads */ - let threads_total = num_cpus::get(); - let counter_thread_count = threads_total - 2; - println!( - "Running with 1 broker, 1 adder, and {} counter clients", - counter_thread_count - ); + let mode = std::env::args() + .nth(1) + .expect("no mode specified, chose 'broker', 'adder', or 'printer'"); + let port: u16 = std::env::args() + .nth(2) + .unwrap_or("1337".into()) + .parse::() + .unwrap(); + println!("Launching in mode {} on port {}", mode, port); - unsafe { - let mut broker = LlmpBroker::new().expect("Failed to create llmp broker"); - for i in 0..counter_thread_count { - println!("Adding client {}", i); - broker - .register_childprocess_clientloop(llmp_test_clientloop, ptr::null_mut()) - .expect("could not add child clientloop"); + match mode.as_str() { + "broker" => { + let mut broker: llmp::LlmpBroker = llmp::LlmpBroker::new().unwrap(); + broker.launch_tcp_listener(port).unwrap(); + broker.add_message_hook(broker_message_hook); + broker.loop_forever(Some(Duration::from_millis(5))) + } + "adder" => { + let mut client = llmp::LlmpClient::create_attach_to_tcp(port).unwrap(); + let mut counter: u32 = 0; + loop { + counter = counter.wrapping_add(1); + client + .send_buf(TAG_SIMPLE_U32_V1, &counter.to_le_bytes()) + .unwrap(); + } + } + "printer" => { + adder_loop(port); + } + _ => { + println!("No valid mode supplied"); } - - broker - .register_childprocess_clientloop(test_adder_clientloop, ptr::null_mut()) - .expect("Error registering childprocess"); - - println!("Spawning broker"); - - broker.add_message_hook(broker_message_hook, ptr::null_mut()); - - broker.run(); } } diff --git a/afl/src/events/llmp.rs b/afl/src/events/llmp.rs index 18cfcfc1f1..fbf4fe0bf9 100644 --- a/afl/src/events/llmp.rs +++ b/afl/src/events/llmp.rs @@ -48,16 +48,16 @@ Then register some clientloops using llmp_broker_register_threaded_clientloop */ -use core::ptr; -use core::sync::atomic::{compiler_fence, Ordering}; -use core::time::Duration; -use libc::{c_uint, c_ulong, c_ushort}; -use std::{ +use core::{ cmp::max, - ffi::CStr, - io::{Read, Write}, mem::size_of, - net::TcpListener, + ptr, slice, + sync::atomic::{compiler_fence, Ordering}, + time::Duration, +}; +use std::{ + io::{Read, Write}, + net::{TcpListener, TcpStream}, thread, }; @@ -66,7 +66,7 @@ use crate::AflError; use super::shmem_translated::AflShmem; -/// We'll start off with 256 megabyte maps per fuzzer +/// We'll start off with 256 megabyte maps per fuzzer client const LLMP_PREF_INITIAL_MAP_SIZE: usize = 1 << 28; /// What byte count to align messages to /// LlmpMsg sizes (including header) will always be rounded up to be a multiple of this value @@ -151,13 +151,21 @@ pub struct LlmpMsg { pub buf: [u8; 0], } +/// The message we receive +impl LlmpMsg { + /// Gets the buffer from this message as slice, with the corrent length. + pub fn as_slice(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self.buf.as_ptr(), self.buf_len as usize) } + } +} + /// Contents of the share mem pages, used by llmp internally #[derive(Copy, Clone)] #[repr(C, packed)] pub struct LlmpPage { pub sender: u32, - pub save_to_unmap: c_ushort, - pub sender_dead: c_ushort, + pub save_to_unmap: u16, + pub sender_dead: u16, pub current_msg_id: u64, pub size_total: usize, pub size_used: usize, @@ -300,7 +308,7 @@ impl LlmpSender { if (*ret).tag == LLMP_TAG_UNINITIALIZED { panic!("Did not call send() on last message!"); } - (*ret).buf_len_padded = size_of::() as c_ulong; + (*ret).buf_len_padded = size_of::() as u64; (*ret).message_id = if !last_msg.is_null() { (*last_msg).message_id + 1 } else { @@ -380,8 +388,8 @@ impl LlmpSender { buf_len_padded, (*page).size_used, last_msg)); } (*page).size_used = (*page).size_used + complete_msg_size; - (*ret).buf_len_padded = buf_len_padded as c_ulong; - (*ret).buf_len = buf_len as c_ulong; + (*ret).buf_len_padded = buf_len_padded as u64; + (*ret).buf_len = buf_len as u64; /* DBG("Returning new message at %p with len %ld, TAG was %x", ret, ret->buf_len_padded, ret->tag); */ /* Maybe catch some bugs... */ (*_llmp_next_msg_ptr(ret)).tag = LLMP_TAG_UNSET; @@ -396,7 +404,7 @@ impl LlmpSender { if self.last_msg_sent == msg { panic!("Message sent twice!"); } - if (*msg).tag == LLMP_TAG_UNSET as c_uint { + if (*msg).tag == LLMP_TAG_UNSET { panic!(format!( "No tag set on message with id {}", (*msg).message_id @@ -591,6 +599,24 @@ impl LlmpReceiver { } } } + + /// Returns the next message, tag, buf, if avaliable, else None + pub fn recv_buf(&mut self) -> Result, AflError> { + unsafe { + Ok(match self.recv()? { + Some(msg) => Some(((*msg).tag, (*msg).as_slice())), + None => None, + }) + } + } + + /// Returns the next message, tag, buf, looping until it becomes available + pub fn recv_buf_blocking(&mut self) -> Result<(u32, &[u8]), AflError> { + unsafe { + let msg = self.recv_blocking()?; + Ok(((*msg).tag, (*msg).as_slice())) + } + } } /// The page struct, placed on a shared mem instance. @@ -606,13 +632,6 @@ impl LlmpSharedMap { Ok(Self { shmem }) } - /// Initialize from a 0-terminated sharedmap id string and its size - pub fn from_str(shm_str: &CStr, map_size: usize) -> Result { - let shmem = AflShmem::from_str(shm_str, map_size)?; - // Not initializing the page here - the other side should have done it already! - Ok(Self { shmem }) - } - /// Initialize from a shm_str with fixed len of 20 pub fn from_name_slice(shm_str: &[u8; 20], map_size: usize) -> Result { let shmem = AflShmem::from_name_slice(shm_str, map_size)?; @@ -630,7 +649,7 @@ impl LlmpSharedMap { /// It may intercept messages passing through. impl LlmpBroker { /// Create and initialize a new llmp_broker - pub unsafe fn new() -> Result { + pub fn new() -> Result { let broker = LlmpBroker { llmp_out: LlmpSender { id: 0, @@ -747,10 +766,12 @@ impl LlmpBroker { /// The broker walks all pages and looks for changes, then broadcasts them on /// its own shared page, once. - pub unsafe fn once(&mut self) -> Result<(), AflError> { + pub fn once(&mut self) -> Result<(), AflError> { compiler_fence(Ordering::SeqCst); for i in 0..self.llmp_clients.len() { - self.handle_new_msgs(i as u32)?; + unsafe { + self.handle_new_msgs(i as u32)?; + } } Ok(()) } @@ -758,7 +779,7 @@ impl LlmpBroker { /// Loops infinitely, forwarding and handling all incoming messages from clients. /// Never returns. Panics on error. /// 5 millis of sleep can't hurt to keep busywait not at 100% - pub unsafe fn loop_forever(&mut self, sleep_time: Option) -> ! { + pub fn loop_forever(&mut self, sleep_time: Option) -> ! { loop { compiler_fence(Ordering::SeqCst); self.once() @@ -861,6 +882,22 @@ impl LlmpClient { }) } + pub fn create_attach_to_tcp(port: u16) -> Result { + let mut stream = TcpStream::connect(format!("127.0.0.1:{}", port))?; + println!("Connected to port {}", port); + + let mut new_broker_map_str: [u8; 20] = Default::default(); + stream.read_exact(&mut new_broker_map_str)?; + + let ret = Self::new(LlmpSharedMap::from_name_slice( + &new_broker_map_str, + LLMP_PREF_INITIAL_MAP_SIZE, + )?)?; + + stream.write(&ret.llmp_out.out_maps.first().unwrap().shmem.shm_str)?; + Ok(ret) + } + /// Commits a msg to the client's out map pub unsafe fn send(&mut self, msg: *mut LlmpMsg) -> Result<(), AflError> { self.llmp_out.send(msg) @@ -888,4 +925,14 @@ impl LlmpClient { pub unsafe fn alloc_next(&mut self, buf_len: usize) -> Result<*mut LlmpMsg, AflError> { self.llmp_out.alloc_next(buf_len) } + + /// Returns the next message, tag, buf, if avaliable, else None + pub fn recv_buf(&mut self) -> Result, AflError> { + self.llmp_in.recv_buf() + } + + /// Receives a buf from the broker, looping until a messages becomes avaliable + pub fn recv_buf_blocking(&mut self) -> Result<(u32, &[u8]), AflError> { + self.llmp_in.recv_buf_blocking() + } } From d17c281b5531110fb7a218f0a45340e810b6bf37 Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Wed, 9 Dec 2020 00:28:10 +0100 Subject: [PATCH 10/17] connects and sends something again --- afl/llmp_test/src/main.rs | 6 ++-- afl/src/events/llmp.rs | 65 +++++++++++++++++++++++++++------------ 2 files changed, 48 insertions(+), 23 deletions(-) diff --git a/afl/llmp_test/src/main.rs b/afl/llmp_test/src/main.rs index 8742bde4c2..167aee970f 100644 --- a/afl/llmp_test/src/main.rs +++ b/afl/llmp_test/src/main.rs @@ -79,7 +79,7 @@ fn main() { let mode = std::env::args() .nth(1) - .expect("no mode specified, chose 'broker', 'adder', or 'printer'"); + .expect("no mode specified, chose 'broker', 'ctr', or 'adder'"); let port: u16 = std::env::args() .nth(2) .unwrap_or("1337".into()) @@ -94,7 +94,7 @@ fn main() { broker.add_message_hook(broker_message_hook); broker.loop_forever(Some(Duration::from_millis(5))) } - "adder" => { + "ctr" => { let mut client = llmp::LlmpClient::create_attach_to_tcp(port).unwrap(); let mut counter: u32 = 0; loop { @@ -104,7 +104,7 @@ fn main() { .unwrap(); } } - "printer" => { + "adder" => { adder_loop(port); } _ => { diff --git a/afl/src/events/llmp.rs b/afl/src/events/llmp.rs index fbf4fe0bf9..0f72928f15 100644 --- a/afl/src/events/llmp.rs +++ b/afl/src/events/llmp.rs @@ -83,7 +83,8 @@ const LLMP_TAG_END_OF_PAGE: u32 = 0xAF1E0F1; const LLMP_TAG_NEW_SHM_CLIENT: u32 = 0xC11E471; /// Size of a new page message, header, payload, and alignment -const EOP_MSG_SIZE: usize = llmp_align(size_of::() + size_of::()); +const EOP_MSG_SIZE: usize = + llmp_align(size_of::() + size_of::()); /// The header length of a llmp page in a shared map (until messages start) const LLMP_PAGE_HEADER_LEN: usize = size_of::(); @@ -199,7 +200,7 @@ pub enum LlmpMsgHookResult { /// LLMP_TAG_END_OF_PAGE_V1 #[derive(Copy, Clone)] #[repr(C, packed)] -struct LlmpPayloadSharedMap { +struct LlmpPayloadSharedMapInfo { pub map_size: usize, pub shm_str: [u8; 20], } @@ -308,7 +309,7 @@ impl LlmpSender { if (*ret).tag == LLMP_TAG_UNINITIALIZED { panic!("Did not call send() on last message!"); } - (*ret).buf_len_padded = size_of::() as u64; + (*ret).buf_len_padded = size_of::() as u64; (*ret).message_id = if !last_msg.is_null() { (*last_msg).message_id + 1 } else { @@ -440,7 +441,7 @@ impl LlmpSender { let mut out: *mut LlmpMsg = self.alloc_eop(); (*out).sender = (*old_map).sender; - let mut end_of_page_msg = (*out).buf.as_mut_ptr() as *mut LlmpPayloadSharedMap; + let mut end_of_page_msg = (*out).buf.as_mut_ptr() as *mut LlmpPayloadSharedMapInfo; (*end_of_page_msg).map_size = new_map_shmem.shmem.map_size; (*end_of_page_msg).shm_str = new_map_shmem.shmem.shm_str; @@ -543,14 +544,14 @@ impl LlmpReceiver { LLMP_TAG_END_OF_PAGE => { dbg!("Got end of page, allocing next"); // Handle end of page - if (*msg).buf_len < size_of::() as u64 { + if (*msg).buf_len < size_of::() as u64 { panic!(format!( "Illegal message length for EOP (is {}, expected {})", (*msg).buf_len_padded, - size_of::() + size_of::() )); } - let pageinfo = (*msg).buf.as_mut_ptr() as *mut LlmpPayloadSharedMap; + let pageinfo = (*msg).buf.as_mut_ptr() as *mut LlmpPayloadSharedMapInfo; /* We can reuse the map mem space, no need to free and calloc. However, the pageinfo points to the map we're about to unmap. @@ -726,13 +727,13 @@ impl LlmpBroker { if (*msg).tag == LLMP_TAG_NEW_SHM_CLIENT { /* This client informs us about yet another new client add it to the list! Also, no need to forward this msg. */ - if (*msg).buf_len < size_of::() as u64 { + if (*msg).buf_len < size_of::() as u64 { println!("Ignoring broken CLIENT_ADDED msg due to incorrect size. Expected {} but got {}", (*msg).buf_len_padded, - size_of::() + size_of::() ); } else { - let pageinfo = (*msg).buf.as_mut_ptr() as *mut LlmpPayloadSharedMap; + let pageinfo = (*msg).buf.as_mut_ptr() as *mut LlmpPayloadSharedMapInfo; match LlmpSharedMap::from_name_slice(&(*pageinfo).shm_str, (*pageinfo).map_size) { @@ -840,22 +841,27 @@ impl LlmpBroker { } }; let mut new_client_map_str: [u8; 20] = Default::default(); - let map_str_len = match stream.read(&mut new_client_map_str) { - Ok(res) => res, + match stream.read_exact(&mut new_client_map_str) { + Ok(()) => (), Err(e) => { dbg!("Ignoring failed read from client", e); continue; } }; - if map_str_len < 20 { - dbg!("Didn't receive a complete shmap id str from client. Ignoring."); - continue; - } - match new_client_sender.send_buf(LLMP_TAG_NEW_SHM_CLIENT, &new_client_map_str) { - Ok(()) => (), - Err(e) => println!("Error forwarding client on map: {:?}", e), - }; + unsafe { + let msg = new_client_sender + .alloc_next(size_of::()) + .expect("Could not allocate a new message in shared map."); + (*msg).tag = LLMP_TAG_NEW_SHM_CLIENT; + let pageinfo = (*msg).buf.as_mut_ptr() as *mut LlmpPayloadSharedMapInfo; + (*pageinfo).shm_str = new_client_map_str; + (*pageinfo).map_size = LLMP_PREF_INITIAL_MAP_SIZE; + match new_client_sender.send(msg) { + Ok(()) => (), + Err(e) => println!("Error forwarding client on map: {:?}", e), + }; + } } })) } @@ -908,6 +914,25 @@ impl LlmpClient { self.llmp_out.send_buf(tag, buf) } + /// Informs the broker about a new client in town, with the given map id + pub fn send_client_added_msg( + &mut self, + shm_str: &[u8; 20], + shm_id: usize, + ) -> Result<(), AflError> { + // We write this by hand to get around checks in send_buf + unsafe { + let msg = self + .alloc_next(size_of::()) + .expect("Could not allocate a new message in shared map."); + (*msg).tag = LLMP_TAG_NEW_SHM_CLIENT; + let pageinfo = (*msg).buf.as_mut_ptr() as *mut LlmpPayloadSharedMapInfo; + (*pageinfo).shm_str = *shm_str; + (*pageinfo).map_size = shm_id; + self.send(msg) + } + } + /// A client receives a broadcast message. /// Returns null if no message is availiable pub unsafe fn recv(&mut self) -> Result, AflError> { From 2f17068444b921cf6ceccc78777d8080cef7d0e4 Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Wed, 9 Dec 2020 00:31:12 +0100 Subject: [PATCH 11/17] fixed example --- afl/llmp_test/src/main.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/afl/llmp_test/src/main.rs b/afl/llmp_test/src/main.rs index 167aee970f..ab661bdd7b 100644 --- a/afl/llmp_test/src/main.rs +++ b/afl/llmp_test/src/main.rs @@ -102,6 +102,8 @@ fn main() { client .send_buf(TAG_SIMPLE_U32_V1, &counter.to_le_bytes()) .unwrap(); + prinln!("Writing", counter); + thread::sleep(Duration::from_secs(1)) } } "adder" => { From 93860dcbd8dabf0b70763e5945bfbbfff92606aa Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Wed, 9 Dec 2020 00:34:03 +0100 Subject: [PATCH 12/17] testcase fixed some more --- afl/llmp_test/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/afl/llmp_test/src/main.rs b/afl/llmp_test/src/main.rs index ab661bdd7b..84d5dd5b5e 100644 --- a/afl/llmp_test/src/main.rs +++ b/afl/llmp_test/src/main.rs @@ -102,7 +102,7 @@ fn main() { client .send_buf(TAG_SIMPLE_U32_V1, &counter.to_le_bytes()) .unwrap(); - prinln!("Writing", counter); + println!("CTR Client writing {}", counter); thread::sleep(Duration::from_secs(1)) } } From 8e420fcb5b63d0e4168c00725028f01b0dcabfcd Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Wed, 9 Dec 2020 00:39:39 +0100 Subject: [PATCH 13/17] moved llmp wrapper --- afl/src/events/{llmp.rs => llmp_manager.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename afl/src/events/{llmp.rs => llmp_manager.rs} (100%) diff --git a/afl/src/events/llmp.rs b/afl/src/events/llmp_manager.rs similarity index 100% rename from afl/src/events/llmp.rs rename to afl/src/events/llmp_manager.rs From e8d0d6729a29abcaefe3d4cf5228a3ccda0b02d9 Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Wed, 9 Dec 2020 00:40:04 +0100 Subject: [PATCH 14/17] moved llmp --- afl/src/events/{llmp_translated.rs => llmp.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename afl/src/events/{llmp_translated.rs => llmp.rs} (100%) diff --git a/afl/src/events/llmp_translated.rs b/afl/src/events/llmp.rs similarity index 100% rename from afl/src/events/llmp_translated.rs rename to afl/src/events/llmp.rs From 5fdbd5439ea2133623c19c9f93ac9f4b76fe7bf5 Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Wed, 9 Dec 2020 00:46:12 +0100 Subject: [PATCH 15/17] overwrote llmp merge issues --- afl/src/events/llmp.rs | 148 ----------------------------------------- 1 file changed, 148 deletions(-) diff --git a/afl/src/events/llmp.rs b/afl/src/events/llmp.rs index f309afa6ab..0f72928f15 100644 --- a/afl/src/events/llmp.rs +++ b/afl/src/events/llmp.rs @@ -617,53 +617,8 @@ impl LlmpReceiver { let msg = self.recv_blocking()?; Ok(((*msg).tag, (*msg).as_slice())) } - ret = _llmp_next_msg_ptr(last_msg); - (*ret).message_id = (*last_msg).message_id.wrapping_add(1 as c_uint) } - /* The beginning of our message should be messages + size_used, else nobody - * sent the last msg! */ - /* DBG("XXX ret %p - page->messages %p = %lu != %lu, will add %lu -> %p\n", ret, page->messages, - (c_ulong)((u8 *)ret - (u8 *)page->messages), page->size_used, complete_msg_size, ((u8 *)ret) + complete_msg_size); - */ - if last_msg.is_null() && (*page).size_used != 0 - || ((ret as *mut u8).wrapping_sub((*page).messages.as_mut_ptr() as *mut u8 as usize)) - as c_ulong - != (*page).size_used - { - panic!(format!("Allocated new message without calling send() inbetween. ret: {:?}, page: {:?}, complete_msg_size: {:?}, size_used: {:?}, last_msg: {:?}", ret, page, - buf_len_padded, (*page).size_used, last_msg)); - } - (*page).size_used = ((*page).size_used as c_ulong).wrapping_add(complete_msg_size) as c_ulong; - (*ret).buf_len_padded = buf_len_padded; - (*ret).buf_len = buf_len; - /* DBG("Returning new message at %p with len %ld, TAG was %x", ret, ret->buf_len_padded, ret->tag); */ - /* Maybe catch some bugs... */ - (*_llmp_next_msg_ptr(ret)).tag = 0xdeadaf as c_uint; - (*ret).tag = 0xa143af11 as c_uint; - return ret; } -/* Commit the message last allocated by llmp_alloc_next to the queue. - After commiting, the msg shall no longer be altered! - It will be read by the consuming threads (broker->clients or client->broker) -*/ -unsafe fn llmp_send(page: *mut LlmpPage, msg: *mut LlmpMsg) -> Result<(), AflError> { - if (*msg).tag == 0xdeadaf as c_uint { - panic!(format!( - "No tag set on message with id {}", - (*msg).message_id - )); - } - if msg.is_null() || !llmp_msg_in_page(page, msg) { - return Err(AflError::Unknown(format!( - "Llmp Message {:?} is null or not in current page", - msg - ))); - } - compiler_fence(Ordering::SeqCst); - ::std::ptr::write_volatile( - &mut (*page).current_msg_id as *mut c_ulong, - (*msg).message_id as c_ulong, - ); /// The page struct, placed on a shared mem instance. impl LlmpSharedMap { @@ -711,109 +666,6 @@ impl LlmpBroker { Ok(broker) } - _llmp_page_init(shmem2page(uninited_shmem), sender as u32, size_requested); - return shmem2page(uninited_shmem); -} -/* This function handles EOP by creating a new shared page and informing the -listener about it using a EOP message. */ -unsafe fn llmp_handle_out_eop( - mut maps: *mut AflShmem, - map_count_p: *mut c_ulong, - last_msg_p: *mut *mut LlmpMsg, -) -> *mut AflShmem { - let map_count: u32 = *map_count_p as u32; - let mut old_map: *mut LlmpPage = - shmem2page(&mut *maps.offset(map_count.wrapping_sub(1 as c_uint) as isize)); - maps = afl_realloc( - maps as *mut c_void, - (map_count.wrapping_add(1 as c_uint) as c_ulong) - .wrapping_mul(::std::mem::size_of::() as c_ulong), - ) as *mut AflShmem; - if maps.is_null() { - return 0 as *mut AflShmem; - } - /* Broadcast a new, large enough, message. Also sorry for that c ptr stuff! */ - let mut new_map: *mut LlmpPage = llmp_new_page_shmem( - &mut *maps.offset(map_count as isize), - (*old_map).sender as c_ulong, - new_map_size((*old_map).max_alloc_size), - ); - if new_map.is_null() { - afl_free(maps as *mut c_void); - return 0 as *mut AflShmem; - } - /* Realloc may have changed the location of maps_p (and old_map) in memory :/ - */ - old_map = shmem2page(&mut *maps.offset(map_count.wrapping_sub(1 as c_uint) as isize)); - *map_count_p = map_count.wrapping_add(1 as c_uint) as c_ulong; - ::std::ptr::write_volatile( - &mut (*new_map).current_msg_id as *mut c_ulong, - (*old_map).current_msg_id, - ); - (*new_map).max_alloc_size = (*old_map).max_alloc_size; - /* On the old map, place a last message linking to the new map for the clients - * to consume */ - let mut out: *mut LlmpMsg = llmp_alloc_eop(old_map, *last_msg_p); - (*out).sender = (*old_map).sender; - let mut new_page_msg: *mut LlmpPayloadNewPage = - (*out).buf.as_mut_ptr() as *mut LlmpPayloadNewPage; - /* copy the infos to the message we're going to send on the old buf */ - (*new_page_msg).map_size = (*maps.offset(map_count as isize)).map_size; - memcpy( - (*new_page_msg).shm_str.as_mut_ptr() as *mut c_void, - (*maps.offset(map_count as isize)).shm_str.as_mut_ptr() as *const c_void, - 20 as c_ulong, - ); - // We never sent a msg on the new buf */ - *last_msg_p = 0 as *mut LlmpMsg; - /* Send the last msg on the old buf */ - match llmp_send(old_map, out) { - Err(_e) => { - afl_free(maps as *mut c_void); - println!("Error sending message"); - 0 as *mut AflShmem - } - Ok(_) => maps, - } -} -/* no more space left! We'll have to start a new page */ -pub unsafe fn llmp_broker_handle_out_eop(broker: *mut LlmpBroker) -> AflRet { - (*broker).broadcast_maps = llmp_handle_out_eop( - (*broker).broadcast_maps, - &mut (*broker).broadcast_map_count, - &mut (*broker).last_msg_sent, - ); - return if !(*broker).broadcast_maps.is_null() { - AFL_RET_SUCCESS - } else { - AFL_RET_ALLOC - } as AflRet; -} -pub unsafe fn llmp_broker_alloc_next(broker: *mut LlmpBroker, len: c_ulong) -> *mut LlmpMsg { - let mut broadcast_page: *mut LlmpPage = shmem2page(_llmp_broker_current_broadcast_map(broker)); - let mut out: *mut LlmpMsg = llmp_alloc_next(broadcast_page, (*broker).last_msg_sent, len); - if out.is_null() { - /* no more space left! We'll have to start a new page */ - let ret: AflRet = llmp_broker_handle_out_eop(broker); - if ret != AFL_RET_SUCCESS as AflRet { - panic!("Error handling broker out EOP"); - } - /* llmp_handle_out_eop allocates a new current broadcast_map */ - broadcast_page = shmem2page(_llmp_broker_current_broadcast_map(broker)); - /* the alloc is now on a new page */ - out = llmp_alloc_next(broadcast_page, (*broker).last_msg_sent, len); - if out.is_null() { - panic!(format!( - "Error allocating {} bytes in shmap {:?}", - len, - (*_llmp_broker_current_broadcast_map(broker)) - .shm_str - .as_mut_ptr(), - )); - } - } - return out; -} /// Allocate the next message on the outgoing map unsafe fn alloc_next(&mut self, buf_len: usize) -> Result<*mut LlmpMsg, AflError> { From 06fbdf2564d62507f8ce7708c3e6f06a6440f3b7 Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Wed, 9 Dec 2020 00:48:25 +0100 Subject: [PATCH 16/17] remoted unused --- afl/src/events/llmp_manager.rs | 47 ---------------------------------- 1 file changed, 47 deletions(-) diff --git a/afl/src/events/llmp_manager.rs b/afl/src/events/llmp_manager.rs index 66124b4689..e21a03edcb 100644 --- a/afl/src/events/llmp_manager.rs +++ b/afl/src/events/llmp_manager.rs @@ -11,53 +11,6 @@ use super::{ Event, EventManager, }; -pub unsafe fn llmp_tcp_server_clientloop(client: *mut LlmpClient, _data: *mut c_void) -> ! { - // Later in the execution, after the initial map filled up, - // the current broacast map will will point to a different map. - // However, the original map is (as of now) never freed, new clients will start - // to read from the initial map id. - let initial_broadcasts_map_str = client - .as_ref() - .unwrap() - .current_broadcast_map - .as_ref() - .unwrap() - .shm_str; - - let listener = TcpListener::bind("0.0.0.0:3333").unwrap(); - // accept connections and process them, spawning a new thread for each one - println!("Server listening on port 3333"); - loop { - let (mut stream, addr) = match listener.accept() { - Ok(res) => res, - Err(e) => { - dbg!("Ignoring failed accept", e); - continue; - } - }; - dbg!("New connection", addr, stream.peer_addr().unwrap()); - match stream.write(&initial_broadcasts_map_str as &[u8]) { - Ok(_) => {} // fire & forget - Err(e) => { - dbg!("Could not send to shmap to client", e); - continue; - } - }; - let mut new_client_map_str: [u8; 20] = Default::default(); - let map_str_len = match stream.read(&mut new_client_map_str) { - Ok(res) => res, - Err(e) => { - dbg!("Ignoring failed read from client", e); - continue; - } - }; - if map_str_len < 20 { - dbg!("Didn't receive a complete shmap id str from client. Ignoring."); - continue; - } - } -} - /// Eventmanager for multi-processed application #[cfg(feature = "std")] pub struct LLMPEventManager From 0ccb8dcd6863c79a704bd41f93794ef050064dbe Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Wed, 9 Dec 2020 00:49:25 +0100 Subject: [PATCH 17/17] more removals --- afl/src/events/llmp_manager.rs | 23 +---------------------- 1 file changed, 1 insertion(+), 22 deletions(-) diff --git a/afl/src/events/llmp_manager.rs b/afl/src/events/llmp_manager.rs index e21a03edcb..97e3e3f65e 100644 --- a/afl/src/events/llmp_manager.rs +++ b/afl/src/events/llmp_manager.rs @@ -85,26 +85,5 @@ where E: Executor, R: Rand, { - /// Forks n processes, calls broker handler and client handlers, never returns. - pub fn spawn( - process_count: usize, - broker_message_hook: LlmpMsgHookFn, - clientloops: LlmpClientloopFn, - ) -> ! { - unsafe { - let mut broker = LlmpBroker::new().expect("Failed to create llmp"); - - for i in 0..process_count - 1 { - println!("Adding client {}", i); - broker - .register_childprocess_clientloop(clientloops, ptr::null_mut()) - .expect("could not add child clientloop"); - } - - println!("Spawning broker"); - broker.add_message_hook(broker_message_hook, ptr::null_mut()); - - broker.run(); - } - } + }