Add libdesyscall (#1221)

Co-authored-by: Dongjia "toka" Zhang <tokazerkje@outlook.com>
This commit is contained in:
Andrea Fioraldi 2024-06-12 11:28:31 +02:00 committed by GitHub
parent 0ed295842b
commit 09faec15f4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 952 additions and 0 deletions

View File

@ -0,0 +1,18 @@
[package]
name = "desyscall"
version = "0.1.0"
edition = "2021"
[dependencies]
meminterval = "0.3"
libc = "0.2"
[dev-dependencies]
rusty-fork = "0.3.0"
[build-dependencies]
cc = "1"
[lib]
name = "desyscall"
crate-type = ["rlib", "cdylib"]

View File

@ -0,0 +1,3 @@
# libdesyscall
This library emulates in userspace various costly syscall to allow targets to scale better over cores in Linux

24
utils/desyscall/build.rs Normal file
View File

@ -0,0 +1,24 @@
// build.rs
use std::env;
fn main() {
let out_dir = env::var_os("OUT_DIR").unwrap();
let out_dir = out_dir.to_string_lossy().to_string();
println!("cargo:rerun-if-changed=src/syscalls.c");
// Enforce clang for its -fsanitize-coverage support.
std::env::set_var("CC", "clang");
std::env::set_var("CXX", "clang++");
cc::Build::new().file("src/syscalls.c").compile("syscalls");
println!("cargo:rerun-if-changed=src/syscalls.c");
cc::Build::new().file("src/patch.c").compile("patch");
println!("cargo:rerun-if-changed=src/patch.c");
println!("cargo:rustc-link-search=native={}", &out_dir);
println!("cargo:rerun-if-changed=build.rs");
}

View File

@ -0,0 +1,32 @@
use libc::{c_int, size_t, ssize_t};
use crate::{Context, Pointer};
extern "C" {
// ssize_t __libafl_raw_write(int fd, const void *buf, size_t count);
fn __libafl_raw_write(fd: c_int, buf: Pointer, count: size_t) -> ssize_t;
// ssize_t __libafl_raw_read(int fd, void *buf, size_t count)
fn __libafl_raw_read(fd: c_int, buf: Pointer, count: size_t) -> ssize_t;
}
#[no_mangle]
pub unsafe fn write(fd: c_int, buf: Pointer, count: size_t) -> ssize_t {
let ctx = Context::get();
if ctx.enabled && (fd == 1 || fd == 2) {
count as ssize_t
} else {
__libafl_raw_write(fd, buf, count)
}
}
#[no_mangle]
pub unsafe fn read(fd: c_int, buf: Pointer, count: size_t) -> ssize_t {
let ctx = Context::get();
if ctx.enabled && fd >= 0 && fd <= 2 {
0
} else {
__libafl_raw_read(fd, buf, count)
}
}

View File

@ -0,0 +1,87 @@
use libc::{c_int, c_void};
use meminterval::IntervalTree;
use std::{mem::MaybeUninit, sync::Once};
pub mod file;
pub mod mmap;
pub type Pointer = *mut c_void;
#[derive(Debug, Clone)]
pub struct Mapping {
prot: c_int,
flags: c_int,
mapped: bool,
}
pub struct Context {
enabled: bool,
mappings: IntervalTree<Pointer, Mapping>,
exit_hook: Option<Box<dyn FnMut(i32)>>,
}
impl Context {
pub fn new() -> Self {
Self {
enabled: false,
mappings: IntervalTree::new(),
exit_hook: None,
}
}
pub fn disable(&mut self) -> bool {
let prev = self.enabled;
self.enabled = false;
prev
}
pub fn enable(&mut self) -> bool {
let prev = self.enabled;
self.enabled = true;
prev
}
pub fn print_mappings(&self) {
for entry in self.mappings.query((0 as Pointer)..(usize::MAX as Pointer)) {
println!(
"{:?}-{:?}\t==> {:?}",
entry.interval.start, entry.interval.end, entry.value
);
}
}
pub fn register_exit_hook(&mut self, hook: Box<dyn FnMut(i32)>) {
self.exit_hook = Some(hook);
}
pub fn get() -> &'static mut Context {
// TODO use Mutex with a feature
static mut SINGLETON_CONTEXT: MaybeUninit<Context> = MaybeUninit::uninit();
static ONCE: Once = Once::new();
unsafe {
ONCE.call_once(|| {
SINGLETON_CONTEXT.write(Context::new());
});
SINGLETON_CONTEXT.assume_init_mut()
}
}
}
extern "C" {
fn __libafl_raw_exit_group(status: c_int);
}
// void _exit(int status);
#[no_mangle]
pub unsafe fn _exit(status: c_int) {
let ctx = Context::get();
if ctx.enabled {
if let Some(hook) = &mut ctx.exit_hook {
(hook)(status as i32);
}
}
__libafl_raw_exit_group(status);
}

542
utils/desyscall/src/mmap.rs Normal file
View File

@ -0,0 +1,542 @@
use libc::{c_int, c_void, off_t, size_t};
use meminterval::Interval;
use std::ptr;
use crate::{Context, Mapping, Pointer};
const PAGE_SIZE: usize = 4096;
extern "C" {
//void* __libafl_raw_mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset);
fn __libafl_raw_mmap(
addr: *mut c_void,
length: size_t,
prot: c_int,
flags: c_int,
fd: c_int,
offset: off_t,
) -> *mut c_void;
//int __libafl_raw_munmap(void *addr, size_t length);
fn __libafl_raw_munmap(addr: *mut c_void, length: size_t) -> c_int;
//void *__libafl_raw_mremap(void *old_address, size_t old_size, size_t new_size, int flags, ... /* void *new_address */);
fn __libafl_raw_mremap(
old_address: *mut c_void,
old_size: size_t,
new_size: size_t,
flags: c_int,
new_address: *mut c_void,
) -> *mut c_void;
//int __libafl_raw_mprotect(void *addr, size_t len, int prot);
fn __libafl_raw_mprotect(addr: *mut c_void, len: size_t, prot: c_int) -> c_int;
//int __libafl_raw_madvise(void *addr, size_t length, int advice) {
fn __libafl_raw_madvise(addr: *mut c_void, length: size_t, advice: c_int) -> c_int;
}
#[no_mangle]
pub unsafe fn mmap(
addr: Pointer,
length: size_t,
prot: c_int,
flags: c_int,
fd: c_int,
offset: off_t,
) -> Pointer {
let ctx = Context::get();
if !ctx.enabled {
return __libafl_raw_mmap(addr, length, prot, flags, fd, offset) as Pointer;
}
// validity checks
if length == 0 || length % PAGE_SIZE != 0 || (addr as usize) % PAGE_SIZE != 0 {
*libc::__errno_location() = libc::EINVAL;
return libc::MAP_FAILED as Pointer;
}
ctx.disable();
if addr == 0 as Pointer {
let mut candidate = None;
for entry in ctx.mappings.query((0 as Pointer)..(usize::MAX as Pointer)) {
if entry.value.mapped || entry.value.prot != prot {
continue;
}
if length <= entry.interval.end as usize - entry.interval.start as usize {
candidate = Some((entry.interval.clone(), entry.value.clone()));
break;
}
}
let ret = if let Some(cand) = candidate {
let size = cand.0.end as usize - cand.0.start as usize;
if length < size {
ctx.mappings.delete(cand.0);
let end = cand.0.start.offset(length as isize);
ctx.mappings.insert(
cand.0.start..end,
Mapping {
prot,
flags,
mapped: true,
},
);
ctx.mappings.insert(end..cand.0.end, cand.1);
} else {
let val = ctx.mappings.query_mut(cand.0).next().unwrap().value;
val.mapped = true;
val.flags = flags;
}
ptr::write_bytes(cand.0.start, 0, length);
cand.0.start
} else {
let ret = __libafl_raw_mmap(addr, length, prot, flags, fd, offset) as Pointer;
if ret != libc::MAP_FAILED as Pointer {
let end = ret.offset(length as isize);
ctx.mappings.insert(
ret..end,
Mapping {
prot,
flags,
mapped: true,
},
);
}
ret
};
ctx.enable();
return ret;
}
let end = addr.offset(length as isize);
let mut prev: Option<(_, _)> = None;
let mut fail = false;
let mut already_mapped = false;
let mut reminder = None;
let mut intervals = vec![]; // TODO put scratch in ctx
for entry in ctx.mappings.query(addr..end) {
if let Some(p) = prev {
if entry.interval.start != p.0 {
fail = true;
}
} else {
if entry.interval.start > addr {
fail = true;
} else if entry.interval.start < addr {
reminder = Some((entry.interval.start, entry.value.clone()));
}
}
if entry.value.prot != prot {
fail = true;
}
if entry.value.mapped {
fail = true;
already_mapped = true;
}
intervals.push(entry.interval.clone());
prev = Some((entry.interval.end, entry.value));
}
let mut reminder_next = None;
if let Some(p) = prev.take() {
if p.0 < end {
fail = true;
} else if p.0 > end {
reminder_next = Some((p.0, p.1.clone()));
}
} else {
fail = true; // new allocation
}
for interval in intervals {
ctx.mappings.delete(interval);
}
if let Some(r) = reminder {
ctx.mappings.insert(r.0..addr, r.1);
}
if let Some(r) = reminder_next {
ctx.mappings.insert(end..r.0, r.1);
}
let ret = if fail || fd != -1 {
if !already_mapped {
__libafl_raw_munmap(addr, length);
}
let ret = __libafl_raw_mmap(addr, length, prot, flags, fd, offset) as Pointer;
if ret != libc::MAP_FAILED as Pointer {
ctx.mappings.insert(
addr..end,
Mapping {
prot,
flags,
mapped: true,
},
);
}
ret
} else {
ctx.mappings.insert(
addr..end,
Mapping {
prot,
flags,
mapped: true,
},
);
ptr::write_bytes(addr, 0, length);
addr
};
// TODO keep track file backed regions
ctx.enable();
ret
}
#[no_mangle]
pub unsafe fn munmap(addr: *mut c_void, length: size_t) -> c_int {
let ctx = Context::get();
if !ctx.enabled {
return __libafl_raw_munmap(addr, length);
}
// validity checks
if length == 0 || (addr as usize) % PAGE_SIZE != 0 {
*libc::__errno_location() = libc::EINVAL;
return -1;
}
let aligned_length = if length % PAGE_SIZE != 0 {
length + (PAGE_SIZE - length % PAGE_SIZE)
} else {
length
};
let end = addr.offset(aligned_length as isize);
ctx.disable();
let mut new_entries: Vec<(Interval<_>, Mapping)> = vec![]; // TODO put scratch in ctx
let mut intervals = vec![]; // TODO put scratch in ctx
// TODO unmap file backed regions
for entry in ctx.mappings.query(addr..end) {
let rng = Interval::new(
if entry.interval.start <= addr {
addr
} else {
entry.interval.start
},
if entry.interval.end >= end {
end
} else {
entry.interval.end
},
);
let consolidated = if let Some(last) = new_entries.last_mut() {
// consolidate
if last.0.end == rng.start && last.1.prot == entry.value.prot {
last.0.end = rng.end;
true
} else {
false
}
} else {
false
};
if entry.interval.start < addr {
new_entries.push((
Interval::new(entry.interval.start, addr),
entry.value.clone(),
));
}
if !consolidated {
let mut val = entry.value.clone();
val.mapped = false;
new_entries.push((rng, val));
}
if entry.interval.end > end {
new_entries.push((Interval::new(end, entry.interval.end), entry.value.clone()));
}
intervals.push(entry.interval.clone());
}
for interval in intervals {
ctx.mappings.delete(interval);
}
for (rng, val) in new_entries {
ctx.mappings.insert(rng, val);
}
ctx.enable();
0
}
#[no_mangle]
pub unsafe fn mprotect(addr: *mut c_void, length: size_t, prot: c_int) -> c_int {
let ctx = Context::get();
if !ctx.enabled {
// in theory it can change perms to a tracked region, in practice we assume not
return __libafl_raw_mprotect(addr, length, prot);
}
let aligned_length = if length % PAGE_SIZE != 0 {
length + (PAGE_SIZE - length % PAGE_SIZE)
} else {
length
};
let end = addr.offset(aligned_length as isize);
ctx.disable();
let mut query_iter = ctx.mappings.query(addr..end);
if let Some(mut entry) = query_iter.next() {
// cache the repeated mprotects on the same region
if entry.interval.start == addr && entry.interval.end == end {
if entry.value.prot == prot {
ctx.enable();
return 0;
}
}
let ret = __libafl_raw_mprotect(addr, length, prot);
// return on error
if ret != 0 {
ctx.enable();
return ret;
}
let mut new_entries: Vec<(Interval<_>, Mapping)> = vec![]; // TODO put scratch in ctx
let mut intervals = vec![]; // TODO put scratch in ctx
loop {
let rng = Interval::new(
if entry.interval.start <= addr {
addr
} else {
entry.interval.start
},
if entry.interval.end >= end {
end
} else {
entry.interval.end
},
);
let consolidated = if let Some(last) = new_entries.last_mut() {
// consolidate
if last.0.end == rng.start && last.1.prot == entry.value.prot {
last.0.end = rng.end;
true
} else {
false
}
} else {
false
};
if entry.interval.start < addr {
new_entries.push((
Interval::new(entry.interval.start, addr),
entry.value.clone(),
));
}
if !consolidated {
let mut val = entry.value.clone();
val.prot = prot;
debug_assert!(val.mapped);
new_entries.push((rng, val));
}
if entry.interval.end > end {
new_entries.push((Interval::new(end, entry.interval.end), entry.value.clone()));
}
intervals.push(entry.interval.clone());
if let Some(next) = query_iter.next() {
entry = next;
} else {
break;
}
}
for interval in intervals {
ctx.mappings.delete(interval);
}
for (rng, val) in new_entries {
ctx.mappings.insert(rng, val);
}
ctx.enable();
0
} else {
let ret = __libafl_raw_mprotect(addr, length, prot);
// return on error
if ret != 0 {
ctx.enable();
return ret;
}
ctx.mappings.insert(
addr..end,
Mapping {
prot,
flags: 0, // TODO what to do with flags?
mapped: true,
},
);
ctx.enable();
ret
}
}
#[no_mangle]
pub unsafe fn madvise(addr: *mut c_void, length: size_t, advice: c_int) -> c_int {
let ctx = Context::get();
if ctx.enabled && advice == libc::MADV_DONTNEED {
0
} else {
__libafl_raw_madvise(addr, length, advice)
}
}
#[cfg(test)]
mod tests {
use super::*;
use rusty_fork::rusty_fork_test;
// cargo test -- --nocapture --test-threads=1
rusty_fork_test! {
#[test]
fn test_map_unmap_1() {
unsafe {
Context::get().enable();
let p = mmap(0x7ffff9f9e000usize as Pointer, 4096, 0x7, 0x22, 0, 0);
assert!(p as isize != -1);
println!("Pre {:?}", p);
Context::get().print_mappings();
let r = munmap(p, 1);
assert!(r == 0);
println!("Post");
Context::get().print_mappings();
}
}
}
rusty_fork_test! {
#[test]
fn test_map_unmap_2() {
unsafe {
Context::get().enable();
let p = mmap(0x7ffff9f9e000usize as Pointer, PAGE_SIZE*4, 0x7, 0x22, 0, 0);
assert!(p as isize != -1);
println!("Pre {:?}", p);
Context::get().print_mappings();
let r = munmap(p.offset(PAGE_SIZE as isize), PAGE_SIZE*2);
assert!(r == 0);
println!("Post");
Context::get().print_mappings();
}
}
}
rusty_fork_test! {
#[test]
fn test_map_unmap_3() {
unsafe {
Context::get().enable();
let p = mmap(0x7ffff9f9e000usize as Pointer, PAGE_SIZE*4, 0x7, 0x22, 0, 0);
assert!(p as isize != -1);
println!("Pre {:?}", p);
Context::get().print_mappings();
let r = munmap(p.offset(PAGE_SIZE as isize), PAGE_SIZE*2);
assert!(r == 0);
println!("Post");
Context::get().print_mappings();
let p = mmap(p.offset(PAGE_SIZE as isize), PAGE_SIZE, 0x1, 0x22, 0, 0);
assert!(p as isize != -1);
println!("Remap {:?}", p);
Context::get().print_mappings();
}
}
}
rusty_fork_test! {
#[test]
fn test_map_unmap_zero_1() {
unsafe {
Context::get().enable();
let p = mmap(0 as Pointer, PAGE_SIZE*4, 0x7, 0x22, 0, 0);
assert!(p as isize != -1);
println!("Pre {:?}", p);
Context::get().print_mappings();
let r = munmap(p.offset(PAGE_SIZE as isize), PAGE_SIZE*2);
assert!(r == 0);
println!("Post");
Context::get().print_mappings();
let p = mmap(0 as Pointer, PAGE_SIZE, 0x7, 0x22, 0, 0);
assert!(p as isize != -1);
println!("Remap {:?}", p);
Context::get().print_mappings();
}
}
}
}

210
utils/desyscall/src/patch.c Normal file
View File

@ -0,0 +1,210 @@
/*******************************************************************************
Copyright (c) 2019-2020, Andrea Fioraldi
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#define _GNU_SOURCE
#include <dlfcn.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <unistd.h>
#include <inttypes.h>
#include <sys/mman.h>
int __libafl_raw_mprotect(void *addr, size_t len, int prot);
void* mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset);
int munmap(void *addr, size_t length);
void *mremap(void *old_address, size_t old_size, size_t new_size, int flags, ... /* void *new_address */);
int mprotect(void *addr, size_t len, int prot);
#ifdef __x86_64__
uint8_t *__libafl_patch_jump(uint8_t *addr, uint8_t *dest) {
// mov rax, dest
addr[0] = 0x48;
addr[1] = 0xb8;
*(uint8_t **)&addr[2] = dest;
// jmp rax
addr[10] = 0xff;
addr[11] = 0xe0;
return &addr[12];
}
#elif __i386__
uint8_t *__libafl_patch_jump(uint8_t *addr, uint8_t *dest) {
// mov eax, dest
addr[0] = 0xb8;
*(uint8_t **)&addr[1] = dest;
// jmp eax
addr[5] = 0xff;
addr[6] = 0xe0;
return &addr[7];
}
#elif __arm__
// in ARM, r12 is a scratch register used by the linker to jump,
// so let's use it in our stub
uint8_t *__libafl_patch_jump(uint8_t *addr, uint8_t *dest) {
// ldr r12, OFF
addr[0] = 0x0;
addr[1] = 0xc0;
addr[2] = 0x9f;
addr[3] = 0xe5;
// add pc, pc, r12
addr[4] = 0xc;
addr[5] = 0xf0;
addr[6] = 0x8f;
addr[7] = 0xe0;
// OFF: .word dest
*(uint32_t *)&addr[8] = (uint32_t)dest;
return &addr[12];
}
#elif __aarch64__
// in ARM64, x16 is a scratch register used by the linker to jump,
// so let's use it in our stub
uint8_t *__libafl_patch_jump(uint8_t *addr, uint8_t *dest) {
// ldr x16, OFF
addr[0] = 0x50;
addr[1] = 0x0;
addr[2] = 0x0;
addr[3] = 0x58;
// br x16
addr[4] = 0x0;
addr[5] = 0x2;
addr[6] = 0x1f;
addr[7] = 0xd6;
// OFF: .dword dest
*(uint64_t *)&addr[8] = (uint64_t)dest;
return &addr[16];
}
#else
#define CANNOT_HOTPATCH
#endif
#ifdef CANNOT_HOTPATCH
//__attribute__((constructor)) void __libafl_hotpatch(void) {
//}
#else
static void *libc_start, *libc_end;
int libc_perms;
static void find_libc(void) {
FILE *fp;
char *line = NULL;
size_t len = 0;
ssize_t read;
fp = fopen("/proc/self/maps", "r");
if (fp == NULL) { return; }
while ((read = getline(&line, &len, fp)) != -1) {
int fields, dev_maj, dev_min, inode;
uint64_t min, max, offset;
char flag_r, flag_w, flag_x, flag_p;
char path[513] = "";
fields = sscanf(line,
"%" PRIx64 "-%" PRIx64 " %c%c%c%c %" PRIx64
" %x:%x %d"
" %512s",
&min, &max, &flag_r, &flag_w, &flag_x, &flag_p, &offset,
&dev_maj, &dev_min, &inode, path);
if ((fields < 10) || (fields > 11)) { continue; }
if (flag_x == 'x' && (strstr(path, "/libc.so") || strstr(path, "/libc-"))) {
libc_start = (void *)min;
libc_end = (void *)max;
libc_perms = PROT_EXEC;
if (flag_w == 'w') { libc_perms |= PROT_WRITE; }
if (flag_r == 'r') { libc_perms |= PROT_READ; }
break;
}
}
free(line);
fclose(fp);
}
/* Why this shit? https://twitter.com/andreafioraldi/status/1227635146452541441
Unfortunatly, symbol override with LD_PRELOAD is not enough to prevent libc
code to call this optimized XMM-based routines.
We patch them at runtime to call our unoptimized version of the same routine.
*/
__attribute__((constructor)) void __libafl_hotpatch(void) {
find_libc();
if (!libc_start) { return; }
if (__libafl_raw_mprotect(libc_start, libc_end - libc_start,
PROT_READ | PROT_WRITE | PROT_EXEC) < 0) {
return;
}
void *libc = dlopen("libc.so.6", RTLD_LAZY);
#define HOTPATCH(fn) \
uint8_t *p_##fn = (uint8_t *)dlsym(libc, #fn); \
if (p_##fn) { __libafl_patch_jump(p_##fn, (uint8_t *)&(fn)); }
HOTPATCH(mmap)
HOTPATCH(munmap)
HOTPATCH(mprotect)
HOTPATCH(write)
HOTPATCH(_exit)
#undef HOTPATCH
__libafl_raw_mprotect(libc_start, libc_end - libc_start, libc_perms);
}
#endif

View File

@ -0,0 +1,36 @@
#include <unistd.h>
#include <stdint.h>
#include <sys/mman.h>
#include <sys/syscall.h>
void* __libafl_raw_mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset) {
return (void*)syscall(SYS_mmap, addr, length, prot, flags, fd, offset);
}
int __libafl_raw_munmap(void *addr, size_t length) {
return syscall(SYS_munmap, addr, length);
}
void *__libafl_raw_mremap(void *old_address, size_t old_size, size_t new_size, int flags, void *new_address) {
return (void*)syscall(SYS_mremap, old_address, old_size, new_size, flags, new_address);
}
int __libafl_raw_mprotect(void *addr, size_t len, int prot) {
return syscall(SYS_mprotect, addr, len, prot);
}
int __libafl_raw_madvise(void *addr, size_t length, int advice) {
return syscall(SYS_madvise, addr, length, advice);
}
ssize_t __libafl_raw_write(int fd, const void *buf, size_t count) {
return syscall(SYS_write, fd, buf, count);
}
ssize_t __libafl_raw_read(int fd, void *buf, size_t count) {
return syscall(SYS_read, fd, buf, count);
}
void __libafl_raw_exit_group(int status) {
syscall(SYS_exit_group, status);
}