AddressSanitizer for libafl_qemu (#378)

* build libqasan

* asan runtime

* working simple asan

* init_with_asan

* fmt

* incomplete instr filter

* harden dealloc

* clippy
This commit is contained in:
Andrea Fioraldi 2021-11-16 13:53:28 +01:00 committed by GitHub
parent c7512fceec
commit 00d38dc535
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 13542 additions and 642 deletions

View File

@ -41,11 +41,14 @@ use libafl::{
};
use libafl_qemu::{
amd64::Amd64Regs,
asan::QemuAsanHelper,
cmplog,
cmplog::{CmpLogObserver, QemuCmpLogHelper},
edges,
edges::QemuEdgeCoverageHelper,
elf::EasyElf,
emu, filter_qemu_args,
helpers::{QemuCmpLogHelper, QemuEdgeCoverageHelper, QemuSnapshotHelper},
hooks,
hooks::CmpLogObserver,
emu, filter_qemu_args, init_with_asan,
snapshot::QemuSnapshotHelper,
MmapPerms, QemuExecutor,
};
@ -55,9 +58,9 @@ pub fn main() {
// Needed only on no_std
//RegistryBuilder::register::<Tokens>();
let args: Vec<String> = env::args().collect();
let env: Vec<(String, String)> = env::vars().collect();
emu::init(&args, &env);
let mut args: Vec<String> = env::args().collect();
let mut env: Vec<(String, String)> = env::vars().collect();
init_with_asan(&mut args, &mut env);
let res = match App::new("libafl_qemu_fuzzbench")
.version("0.4.0")
@ -230,8 +233,8 @@ fn fuzz(
};
// Create an observation channel using the coverage map
let edges = unsafe { &mut hooks::EDGES_MAP };
let edges_counter = unsafe { &mut hooks::MAX_EDGES_NUM };
let edges = unsafe { &mut edges::EDGES_MAP };
let edges_counter = unsafe { &mut edges::MAX_EDGES_NUM };
let edges_observer =
HitcountsMapObserver::new(VariableMapObserver::new("edges", edges, edges_counter));
@ -239,7 +242,7 @@ fn fuzz(
let time_observer = TimeObserver::new("time");
// Create an observation channel using cmplog map
let cmplog_observer = CmpLogObserver::new("cmplog", unsafe { &mut hooks::CMPLOG_MAP }, true);
let cmplog_observer = CmpLogObserver::new("cmplog", unsafe { &mut cmplog::CMPLOG_MAP }, true);
// The state of the edges feedback.
let feedback_state = MapFeedbackState::with_observer(&edges_observer);
@ -305,7 +308,8 @@ fn fuzz(
tuple_list!(
QemuEdgeCoverageHelper::new(),
QemuCmpLogHelper::new(),
QemuSnapshotHelper::new()
QemuAsanHelper::new(),
//QemuSnapshotHelper::new()
),
tuple_list!(edges_observer, time_observer),
&mut fuzzer,

View File

@ -15,17 +15,22 @@ fn build_dep_check(tools: &[&str]) {
fn main() {
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-env-changed=CPU_TARGET");
println!("cargo:rerun-if-env-changed=CROSS_CC");
let target_os = env::var("CARGO_CFG_TARGET_OS").unwrap();
if target_os != "linux" {
return;
}
let jobs = env::var("CARGO_BUILD_JOBS").unwrap_or_else(|_| "1".to_owned());
let jobs = env::var("CARGO_BUILD_JOBS");
let cpu_target = env::var("CPU_TARGET").unwrap_or_else(|_| {
println!("cargo:warning=CPU_TARGET is not set, default to x86_64");
"x86_64".to_owned()
});
let cross_cc = env::var("CROSS_CC").unwrap_or_else(|_| {
println!("cargo:warning=CROSS_CC is not set, default to cc (things can go wrong if CPU_TARGET is not the host arch)");
"cc".to_owned()
});
let out_dir = env::var_os("OUT_DIR").unwrap();
let out_dir = out_dir.to_string_lossy().to_string();
@ -34,6 +39,9 @@ fn main() {
target_dir.pop();
target_dir.pop();
target_dir.pop();
let qasan_dir = Path::new("libqasan");
let qasan_dir = fs::canonicalize(&qasan_dir).unwrap();
let src_dir = Path::new("src");
//let cwd = env::current_dir().unwrap().to_string_lossy().to_string();
build_dep_check(&["git", "make"]);
@ -44,7 +52,7 @@ fn main() {
if qemu_rev.exists()
&& fs::read_to_string(&qemu_rev).expect("Failed to read QEMU_REVISION") != QEMU_REVISION
{
fs::remove_dir_all(&qemu_path).unwrap();
drop(fs::remove_dir_all(&qemu_path));
}
if !qemu_path.is_dir() {
@ -149,12 +157,20 @@ fn main() {
])
.status()
.expect("Configure failed");
Command::new("make")
.current_dir(&qemu_path)
.arg("-j")
.arg(&jobs)
.status()
.expect("Make failed");
if let Ok(j) = jobs {
Command::new("make")
.current_dir(&qemu_path)
.arg("-j")
.arg(&j)
.status()
.expect("Make failed");
} else {
Command::new("make")
.current_dir(&qemu_path)
.arg("-j")
.status()
.expect("Make failed");
}
//let _ = remove_file(build_dir.join(&format!("libqemu-{}.so", cpu_target)));
}
@ -243,6 +259,24 @@ fn main() {
println!("cargo:rustc-env=LD_LIBRARY_PATH={}", target_dir.display());
}
println!("cargo:rerun-if-changed={}/libqasan.so", qasan_dir.display());
drop(
Command::new("make")
.current_dir(&out_dir_path)
.env("CC", cross_cc)
.env("OUT_DIR", &target_dir)
.arg("-C")
.arg(&qasan_dir)
.status(),
);
println!("cargo:rerun-if-changed=src/asan-giovese.c");
println!("cargo:rerun-if-changed=src/asan-giovese.h");
cc::Build::new()
.warnings(false)
.file(src_dir.join("asan-giovese.c"))
.compile("asan_giovese");
}
/*

View File

@ -0,0 +1,33 @@
#
# american fuzzy lop++ - libqasan
# -------------------------------
#
# Written by Andrea Fioraldi <andreafioraldi@gmail.com>
#
# Copyright 2019-2020 Andrea Fioraldi. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
OUT_DIR ?= .
CFLAGS += -Wno-int-to-void-pointer-cast -ggdb
LDFLAGS += -ldl -pthread
SRC := libqasan.c hooks.c malloc.c string.c uninstrument.c patch.c dlmalloc.c
HDR := libqasan.h
all: libqasan.so
libqasan.so: $(HDR) $(SRC)
$(CC) $(CFLAGS) -fPIC -shared $(SRC) -o $(OUT_DIR)/$@ $(LDFLAGS)
.NOTPARALLEL: clean
clean:
rm -f *.o *.so *~ a.out core core.[1-9][0-9]*
rm -f libqasan.so

View File

@ -0,0 +1,13 @@
# QEMU AddressSanitizer Runtime
This library is the injected runtime used by QEMU AddressSanitizer (QASan).
The original repository is [here](https://github.com/andreafioraldi/qasan).
The version embedded in libafl_qemu is an updated version of just the usermode part
and this runtime is injected via LD_PRELOAD (so works just for dynamically
linked binaries).
For debugging purposes, we still suggest to run the original QASan as the
stacktrace support for ARM (just a debug feature, it does not affect the bug
finding capabilities during fuzzing) is WIP.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,690 @@
/*******************************************************************************
Copyright (c) 2019-2020, Andrea Fioraldi
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#include "libqasan.h"
#include "map_macro.h"
#include <unistd.h>
#include <sys/syscall.h>
char *(*__lq_libc_fgets)(char *, int, FILE *);
int (*__lq_libc_atoi)(const char *);
long (*__lq_libc_atol)(const char *);
long long (*__lq_libc_atoll)(const char *);
void __libqasan_init_hooks(void) {
__libqasan_init_malloc();
__lq_libc_fgets = ASSERT_DLSYM(fgets);
__lq_libc_atoi = ASSERT_DLSYM(atoi);
__lq_libc_atol = ASSERT_DLSYM(atol);
__lq_libc_atoll = ASSERT_DLSYM(atoll);
}
ssize_t write(int fd, const void *buf, size_t count) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: write(%d, %p, %zu)\n", rtv, fd, buf, count);
QASAN_LOAD(buf, count);
ssize_t r = syscall(SYS_write, fd, buf, count);
QASAN_DEBUG("\t\t = %zd\n", r);
return r;
}
ssize_t read(int fd, void *buf, size_t count) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: read(%d, %p, %zu)\n", rtv, fd, buf, count);
QASAN_STORE(buf, count);
ssize_t r = syscall(SYS_read, fd, buf, count);
QASAN_DEBUG("\t\t = %zd\n", r);
return r;
}
#ifdef __ANDROID__
size_t malloc_usable_size(const void *ptr) {
#else
size_t malloc_usable_size(void *ptr) {
#endif
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: malloc_usable_size(%p)\n", rtv, ptr);
size_t r = __libqasan_malloc_usable_size((void *)ptr);
QASAN_DEBUG("\t\t = %zu\n", r);
return r;
}
void *malloc(size_t size) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: malloc(%zu)\n", rtv, size);
void *r = __libqasan_malloc(size);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
void *calloc(size_t nmemb, size_t size) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: calloc(%zu, %zu)\n", rtv, nmemb, size);
void *r = __libqasan_calloc(nmemb, size);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
void *realloc(void *ptr, size_t size) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: realloc(%p, %zu)\n", rtv, ptr, size);
void *r = __libqasan_realloc(ptr, size);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
int posix_memalign(void **memptr, size_t alignment, size_t size) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: posix_memalign(%p, %zu, %zu)\n", rtv, memptr, alignment,
size);
int r = __libqasan_posix_memalign(memptr, alignment, size);
QASAN_DEBUG("\t\t = %d [*memptr = %p]\n", r, *memptr);
return r;
}
void *memalign(size_t alignment, size_t size) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: memalign(%zu, %zu)\n", rtv, alignment, size);
void *r = __libqasan_memalign(alignment, size);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
void *aligned_alloc(size_t alignment, size_t size) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: aligned_alloc(%zu, %zu)\n", rtv, alignment, size);
void *r = __libqasan_aligned_alloc(alignment, size);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
void *valloc(size_t size) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: valloc(%zu)\n", rtv, size);
void *r = __libqasan_memalign(sysconf(_SC_PAGESIZE), size);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
void *pvalloc(size_t size) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: pvalloc(%zu)\n", rtv, size);
size_t page_size = sysconf(_SC_PAGESIZE);
size = (size & (page_size - 1)) + page_size;
void *r = __libqasan_memalign(page_size, size);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
void free(void *ptr) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: free(%p)\n", rtv, ptr);
__libqasan_free(ptr);
}
char *fgets(char *s, int size, FILE *stream) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: fgets(%p, %d, %p)\n", rtv, s, size, stream);
QASAN_STORE(s, size);
#ifndef __ANDROID__
QASAN_LOAD(stream, sizeof(FILE));
#endif
char *r = __lq_libc_fgets(s, size, stream);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
int memcmp(const void *s1, const void *s2, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: memcmp(%p, %p, %zu)\n", rtv, s1, s2, n);
QASAN_LOAD(s1, n);
QASAN_LOAD(s2, n);
int r = __libqasan_memcmp(s1, s2, n);
QASAN_DEBUG("\t\t = %d\n", r);
return r;
}
void *memcpy(void *dest, const void *src, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: memcpy(%p, %p, %zu)\n", rtv, dest, src, n);
QASAN_LOAD(src, n);
QASAN_STORE(dest, n);
void *r = __libqasan_memcpy(dest, src, n);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
void *mempcpy(void *dest, const void *src, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: mempcpy(%p, %p, %zu)\n", rtv, dest, src, n);
QASAN_LOAD(src, n);
QASAN_STORE(dest, n);
void *r = (uint8_t *)__libqasan_memcpy(dest, src, n) + n;
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
void *memmove(void *dest, const void *src, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: memmove(%p, %p, %zu)\n", rtv, dest, src, n);
QASAN_LOAD(src, n);
QASAN_STORE(dest, n);
void *r = __libqasan_memmove(dest, src, n);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
void *memset(void *s, int c, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: memset(%p, %d, %zu)\n", rtv, s, c, n);
QASAN_STORE(s, n);
void *r = __libqasan_memset(s, c, n);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
void *memchr(const void *s, int c, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: memchr(%p, %d, %zu)\n", rtv, s, c, n);
void *r = __libqasan_memchr(s, c, n);
if (r == NULL)
QASAN_LOAD(s, n);
else
QASAN_LOAD(s, r - s);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
void *memrchr(const void *s, int c, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: memrchr(%p, %d, %zu)\n", rtv, s, c, n);
QASAN_LOAD(s, n);
void *r = __libqasan_memrchr(s, c, n);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
void *memmem(const void *haystack, size_t haystacklen, const void *needle,
size_t needlelen) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: memmem(%p, %zu, %p, %zu)\n", rtv, haystack, haystacklen,
needle, needlelen);
QASAN_LOAD(haystack, haystacklen);
QASAN_LOAD(needle, needlelen);
void *r = __libqasan_memmem(haystack, haystacklen, needle, needlelen);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
#ifndef __BIONIC__
void bzero(void *s, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: bzero(%p, %zu)\n", rtv, s, n);
QASAN_STORE(s, n);
__libqasan_memset(s, 0, n);
}
#endif
void explicit_bzero(void *s, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: bzero(%p, %zu)\n", rtv, s, n);
QASAN_STORE(s, n);
__libqasan_memset(s, 0, n);
}
int bcmp(const void *s1, const void *s2, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: bcmp(%p, %p, %zu)\n", rtv, s1, s2, n);
QASAN_LOAD(s1, n);
QASAN_LOAD(s2, n);
int r = __libqasan_bcmp(s1, s2, n);
QASAN_DEBUG("\t\t = %d\n", r);
return r;
}
char *strchr(const char *s, int c) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: strchr(%p, %d)\n", rtv, s, c);
size_t l = __libqasan_strlen(s);
QASAN_LOAD(s, l + 1);
void *r = __libqasan_strchr(s, c);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
char *strrchr(const char *s, int c) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: strrchr(%p, %d)\n", rtv, s, c);
size_t l = __libqasan_strlen(s);
QASAN_LOAD(s, l + 1);
void *r = __libqasan_strrchr(s, c);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
int strcasecmp(const char *s1, const char *s2) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: strcasecmp(%p, %p)\n", rtv, s1, s2);
size_t l1 = __libqasan_strlen(s1);
QASAN_LOAD(s1, l1 + 1);
size_t l2 = __libqasan_strlen(s2);
QASAN_LOAD(s2, l2 + 1);
int r = __libqasan_strcasecmp(s1, s2);
QASAN_DEBUG("\t\t = %d\n", r);
return r;
}
int strncasecmp(const char *s1, const char *s2, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: strncasecmp(%p, %p, %zu)\n", rtv, s1, s2, n);
size_t l1 = __libqasan_strnlen(s1, n);
QASAN_LOAD(s1, l1);
size_t l2 = __libqasan_strnlen(s2, n);
QASAN_LOAD(s2, l2);
int r = __libqasan_strncasecmp(s1, s2, n);
QASAN_DEBUG("\t\t = %d\n", r);
return r;
}
char *strcat(char *dest, const char *src) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: strcat(%p, %p)\n", rtv, dest, src);
size_t l2 = __libqasan_strlen(src);
QASAN_LOAD(src, l2 + 1);
size_t l1 = __libqasan_strlen(dest);
QASAN_STORE(dest, l1 + l2 + 1);
__libqasan_memcpy(dest + l1, src, l2);
dest[l1 + l2] = 0;
void *r = dest;
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
int strcmp(const char *s1, const char *s2) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: strcmp(%p, %p)\n", rtv, s1, s2);
size_t l1 = __libqasan_strlen(s1);
QASAN_LOAD(s1, l1 + 1);
size_t l2 = __libqasan_strlen(s2);
QASAN_LOAD(s2, l2 + 1);
int r = __libqasan_strcmp(s1, s2);
QASAN_DEBUG("\t\t = %d\n", r);
return r;
}
int strncmp(const char *s1, const char *s2, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: strncmp(%p, %p, %zu)\n", rtv, s1, s2, n);
size_t l1 = __libqasan_strnlen(s1, n);
QASAN_LOAD(s1, l1);
size_t l2 = __libqasan_strnlen(s2, n);
QASAN_LOAD(s2, l2);
int r = __libqasan_strncmp(s1, s2, n);
QASAN_DEBUG("\t\t = %d\n", r);
return r;
}
char *strcpy(char *dest, const char *src) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: strcpy(%p, %p)\n", rtv, dest, src);
size_t l = __libqasan_strlen(src) + 1;
QASAN_LOAD(src, l);
QASAN_STORE(dest, l);
void *r = __libqasan_memcpy(dest, src, l);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
char *strncpy(char *dest, const char *src, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: strncpy(%p, %p, %zu)\n", rtv, dest, src, n);
size_t l = __libqasan_strnlen(src, n);
QASAN_STORE(dest, n);
void *r;
if (l < n) {
QASAN_LOAD(src, l + 1);
r = __libqasan_memcpy(dest, src, l + 1);
} else {
QASAN_LOAD(src, n);
r = __libqasan_memcpy(dest, src, n);
}
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
char *stpcpy(char *dest, const char *src) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: stpcpy(%p, %p)\n", rtv, dest, src);
size_t l = __libqasan_strlen(src) + 1;
QASAN_LOAD(src, l);
QASAN_STORE(dest, l);
char *r = __libqasan_memcpy(dest, src, l) + (l - 1);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
char *strdup(const char *s) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: strdup(%p)\n", rtv, s);
size_t l = __libqasan_strlen(s);
QASAN_LOAD(s, l + 1);
void *r = __libqasan_malloc(l + 1);
__libqasan_memcpy(r, s, l + 1);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
size_t strlen(const char *s) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: strlen(%p)\n", rtv, s);
size_t r = __libqasan_strlen(s);
QASAN_LOAD(s, r + 1);
QASAN_DEBUG("\t\t = %zu\n", r);
return r;
}
size_t strnlen(const char *s, size_t n) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: strnlen(%p, %zu)\n", rtv, s, n);
size_t r = __libqasan_strnlen(s, n);
QASAN_LOAD(s, r);
QASAN_DEBUG("\t\t = %zu\n", r);
return r;
}
char *strstr(const char *haystack, const char *needle) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: strstr(%p, %p)\n", rtv, haystack, needle);
size_t l = __libqasan_strlen(haystack) + 1;
QASAN_LOAD(haystack, l);
l = __libqasan_strlen(needle) + 1;
QASAN_LOAD(needle, l);
void *r = __libqasan_strstr(haystack, needle);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
char *strcasestr(const char *haystack, const char *needle) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: strcasestr(%p, %p)\n", rtv, haystack, needle);
size_t l = __libqasan_strlen(haystack) + 1;
QASAN_LOAD(haystack, l);
l = __libqasan_strlen(needle) + 1;
QASAN_LOAD(needle, l);
void *r = __libqasan_strcasestr(haystack, needle);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
int atoi(const char *nptr) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: atoi(%p)\n", rtv, nptr);
size_t l = __libqasan_strlen(nptr) + 1;
QASAN_LOAD(nptr, l);
int r = __lq_libc_atoi(nptr);
QASAN_DEBUG("\t\t = %d\n", r);
return r;
}
long atol(const char *nptr) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: atol(%p)\n", rtv, nptr);
size_t l = __libqasan_strlen(nptr) + 1;
QASAN_LOAD(nptr, l);
long r = __lq_libc_atol(nptr);
QASAN_DEBUG("\t\t = %ld\n", r);
return r;
}
long long atoll(const char *nptr) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: atoll(%p)\n", rtv, nptr);
size_t l = __libqasan_strlen(nptr) + 1;
QASAN_LOAD(nptr, l);
long long r = __lq_libc_atoll(nptr);
QASAN_DEBUG("\t\t = %lld\n", r);
return r;
}
size_t wcslen(const wchar_t *s) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: wcslen(%p)\n", rtv, s);
size_t r = __libqasan_wcslen(s);
QASAN_LOAD(s, sizeof(wchar_t) * (r + 1));
QASAN_DEBUG("\t\t = %zu\n", r);
return r;
}
wchar_t *wcscpy(wchar_t *dest, const wchar_t *src) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: wcscpy(%p, %p)\n", rtv, dest, src);
size_t l = __libqasan_wcslen(src) + 1;
QASAN_LOAD(src, l * sizeof(wchar_t));
QASAN_STORE(dest, l * sizeof(wchar_t));
void *r = __libqasan_wcscpy(dest, src);
QASAN_DEBUG("\t\t = %p\n", r);
return r;
}
int wcscmp(const wchar_t *s1, const wchar_t *s2) {
void *rtv = __builtin_return_address(0);
QASAN_DEBUG("%14p: wcscmp(%p, %p)\n", rtv, s1, s2);
size_t l1 = __libqasan_wcslen(s1);
QASAN_LOAD(s1, sizeof(wchar_t) * (l1 + 1));
size_t l2 = __libqasan_wcslen(s2);
QASAN_LOAD(s2, sizeof(wchar_t) * (l2 + 1));
int r = __libqasan_wcscmp(s1, s2);
QASAN_DEBUG("\t\t = %d\n", r);
return r;
}

View File

@ -0,0 +1,102 @@
/*******************************************************************************
Copyright (c) 2019-2020, Andrea Fioraldi
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#include "libqasan.h"
#ifdef DEBUG
int __qasan_debug;
#endif
int __qasan_log;
void __libqasan_print_maps(void) {
int fd = open("/proc/self/maps", O_RDONLY);
char buf[4096] = {0};
read(fd, buf, 4095);
close(fd);
size_t len = strlen(buf);
QASAN_LOG("Guest process maps:\n");
int i;
char *line = NULL;
for (i = 0; i < len; i++) {
if (!line) line = &buf[i];
if (buf[i] == '\n') {
buf[i] = 0;
QASAN_LOG("%s\n", line);
line = NULL;
}
}
if (line) QASAN_LOG("%s\n", line);
QASAN_LOG("\n");
}
int __libqasan_is_initialized = 0;
__attribute__((constructor)) void __libqasan_init() {
if (__libqasan_is_initialized) return;
__libqasan_is_initialized = 1;
__libqasan_init_hooks();
if (getenv("AFL_INST_LIBS") || getenv("QASAN_HOTPACH")) __libqasan_hotpatch();
if (getenv("AFL_INST_LIBS") || getenv("QASAN_HOTPACH")) __libqasan_hotpatch();
#ifdef DEBUG
__qasan_debug = getenv("QASAN_DEBUG") != NULL;
#endif
__qasan_log = getenv("QASAN_LOG") != NULL;
QASAN_LOG("QEMU-AddressSanitizer (v%s)\n", QASAN_VERSTR);
QASAN_LOG(
"Copyright (C) 2019-2021 Andrea Fioraldi <andreafioraldi@gmail.com>\n");
QASAN_LOG("\n");
if (__qasan_log) __libqasan_print_maps();
}
int __libc_start_main(int (*main)(int, char **, char **), int argc, char **argv,
int (*init)(int, char **, char **), void (*fini)(void),
void (*rtld_fini)(void), void *stack_end) {
typeof(&__libc_start_main) orig = dlsym(RTLD_NEXT, "__libc_start_main");
__libqasan_init();
return orig(main, argc, argv, init, fini, rtld_fini, stack_end);
}

View File

@ -0,0 +1,132 @@
/*******************************************************************************
Copyright (c) 2019-2020, Andrea Fioraldi
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#ifndef __LIBQASAN_H__
#define __LIBQASAN_H__
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
#include <string.h>
#include <signal.h>
#include <ucontext.h>
#include <inttypes.h>
#include <dlfcn.h>
#include <wchar.h>
#include "qasan.h"
#define QASAN_LOG(msg...) \
do { \
\
if (__qasan_log) { \
\
fprintf(stderr, "==%d== ", getpid()); \
fprintf(stderr, msg); \
\
} \
\
} while (0)
#ifdef DEBUG
#define QASAN_DEBUG(msg...) \
do { \
\
if (__qasan_debug) { \
\
fprintf(stderr, "==%d== ", getpid()); \
fprintf(stderr, msg); \
\
} \
\
} while (0)
#else
#define QASAN_DEBUG(msg...) \
do { \
\
} while (0)
#endif
#define ASSERT_DLSYM(name) \
({ \
\
void *a = (void *)dlsym(RTLD_NEXT, #name); \
if (!a) { \
\
fprintf(stderr, \
"FATAL ERROR: failed dlsym of " #name " in libqasan!\n"); \
abort(); \
\
} \
a; \
\
})
extern int __qasan_debug;
extern int __qasan_log;
void __libqasan_init_hooks(void);
void __libqasan_init_malloc(void);
void __libqasan_hotpatch(void);
size_t __libqasan_malloc_usable_size(void *ptr);
void * __libqasan_malloc(size_t size);
void __libqasan_free(void *ptr);
void * __libqasan_calloc(size_t nmemb, size_t size);
void * __libqasan_realloc(void *ptr, size_t size);
int __libqasan_posix_memalign(void **ptr, size_t align, size_t len);
void * __libqasan_memalign(size_t align, size_t len);
void * __libqasan_aligned_alloc(size_t align, size_t len);
void * __libqasan_memcpy(void *dest, const void *src, size_t n);
void * __libqasan_memmove(void *dest, const void *src, size_t n);
void * __libqasan_memset(void *s, int c, size_t n);
void * __libqasan_memchr(const void *s, int c, size_t n);
void * __libqasan_memrchr(const void *s, int c, size_t n);
size_t __libqasan_strlen(const char *s);
size_t __libqasan_strnlen(const char *s, size_t len);
int __libqasan_strcmp(const char *str1, const char *str2);
int __libqasan_strncmp(const char *str1, const char *str2, size_t len);
int __libqasan_strcasecmp(const char *str1, const char *str2);
int __libqasan_strncasecmp(const char *str1, const char *str2, size_t len);
int __libqasan_memcmp(const void *mem1, const void *mem2, size_t len);
int __libqasan_bcmp(const void *mem1, const void *mem2, size_t len);
char * __libqasan_strstr(const char *haystack, const char *needle);
char * __libqasan_strcasestr(const char *haystack, const char *needle);
void * __libqasan_memmem(const void *haystack, size_t haystack_len,
const void *needle, size_t needle_len);
char * __libqasan_strchr(const char *s, int c);
char * __libqasan_strrchr(const char *s, int c);
size_t __libqasan_wcslen(const wchar_t *s);
wchar_t *__libqasan_wcscpy(wchar_t *d, const wchar_t *s);
int __libqasan_wcscmp(const wchar_t *s1, const wchar_t *s2);
#endif

View File

@ -0,0 +1,370 @@
/*******************************************************************************
Copyright (c) 2019-2020, Andrea Fioraldi
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#include "libqasan.h"
#include <features.h>
#include <errno.h>
#include <stddef.h>
#include <assert.h>
#include <pthread.h>
#define REDZONE_SIZE 128
// 50 mb quarantine
#define QUARANTINE_MAX_BYTES 52428800
#if __STDC_VERSION__ < 201112L || \
(defined(__FreeBSD__) && __FreeBSD_version < 1200000)
// use this hack if not C11
typedef struct {
long long __ll;
long double __ld;
} max_align_t;
#endif
#define ALLOC_ALIGN_SIZE (_Alignof(max_align_t))
struct chunk_begin {
size_t requested_size;
void * aligned_orig; // NULL if not aligned
struct chunk_begin *next;
struct chunk_begin *prev;
char redzone[REDZONE_SIZE];
};
struct chunk_struct {
struct chunk_begin begin;
char redzone[REDZONE_SIZE];
size_t prev_size_padding;
};
#ifdef __GLIBC__
void *(*__lq_libc_malloc)(size_t);
void (*__lq_libc_free)(void *);
#define backend_malloc __lq_libc_malloc
#define backend_free __lq_libc_free
#define TMP_ZONE_SIZE 4096
static int __tmp_alloc_zone_idx;
static unsigned char __tmp_alloc_zone[TMP_ZONE_SIZE];
#else
// From dlmalloc.c
void * dlmalloc(size_t);
void dlfree(void *);
#define backend_malloc dlmalloc
#define backend_free dlfree
#endif
int __libqasan_malloc_initialized;
static struct chunk_begin *quarantine_top;
static struct chunk_begin *quarantine_end;
static size_t quarantine_bytes;
#ifdef __BIONIC__
static pthread_mutex_t quarantine_lock;
#define LOCK_TRY pthread_mutex_trylock
#define LOCK_INIT pthread_mutex_init
#define LOCK_UNLOCK pthread_mutex_unlock
#else
static pthread_spinlock_t quarantine_lock;
#define LOCK_TRY pthread_spin_trylock
#define LOCK_INIT pthread_spin_init
#define LOCK_UNLOCK pthread_spin_unlock
#endif
// need qasan disabled
static int quarantine_push(struct chunk_begin *ck) {
if (ck->requested_size >= QUARANTINE_MAX_BYTES) return 0;
if (LOCK_TRY(&quarantine_lock)) return 0;
while (ck->requested_size + quarantine_bytes >= QUARANTINE_MAX_BYTES) {
struct chunk_begin *tmp = quarantine_end;
quarantine_end = tmp->prev;
quarantine_bytes -= tmp->requested_size;
if (tmp->aligned_orig)
backend_free(tmp->aligned_orig);
else
backend_free(tmp);
}
ck->next = quarantine_top;
if (quarantine_top) quarantine_top->prev = ck;
quarantine_top = ck;
LOCK_UNLOCK(&quarantine_lock);
return 1;
}
void __libqasan_init_malloc(void) {
if (__libqasan_malloc_initialized) return;
#ifdef __GLIBC__
__lq_libc_malloc = dlsym(RTLD_NEXT, "malloc");
__lq_libc_free = dlsym(RTLD_NEXT, "free");
#endif
LOCK_INIT(&quarantine_lock, PTHREAD_PROCESS_PRIVATE);
__libqasan_malloc_initialized = 1;
QASAN_LOG("\n");
QASAN_LOG("Allocator initialization done.\n");
QASAN_LOG("\n");
}
size_t __libqasan_malloc_usable_size(void *ptr) {
char *p = ptr;
p -= sizeof(struct chunk_begin);
// Validate that the chunk marker is readable (a crude check
// to verify that ptr is a valid malloc region before we dereference it)
QASAN_LOAD(p, sizeof(struct chunk_begin) - REDZONE_SIZE);
return ((struct chunk_begin *)p)->requested_size;
}
void *__libqasan_malloc(size_t size) {
if (!__libqasan_malloc_initialized) {
__libqasan_init_malloc();
#ifdef __GLIBC__
void *r = &__tmp_alloc_zone[__tmp_alloc_zone_idx];
if (size & (ALLOC_ALIGN_SIZE - 1))
__tmp_alloc_zone_idx +=
(size & ~(ALLOC_ALIGN_SIZE - 1)) + ALLOC_ALIGN_SIZE;
else
__tmp_alloc_zone_idx += size;
return r;
#endif
}
int state = QASAN_SWAP(QASAN_DISABLED); // disable qasan for this thread
struct chunk_begin *p = backend_malloc(sizeof(struct chunk_struct) + size);
QASAN_SWAP(state);
if (!p) return NULL;
QASAN_UNPOISON(p, sizeof(struct chunk_struct) + size);
p->requested_size = size;
p->aligned_orig = NULL;
p->next = p->prev = NULL;
QASAN_ALLOC(&p[1], (char *)&p[1] + size);
QASAN_POISON(p->redzone, REDZONE_SIZE, ASAN_HEAP_LEFT_RZ);
if (size & (ALLOC_ALIGN_SIZE - 1))
QASAN_POISON((char *)&p[1] + size,
(size & ~(ALLOC_ALIGN_SIZE - 1)) + 8 - size + REDZONE_SIZE,
ASAN_HEAP_RIGHT_RZ);
else
QASAN_POISON((char *)&p[1] + size, REDZONE_SIZE, ASAN_HEAP_RIGHT_RZ);
__builtin_memset(&p[1], 0xff, size);
return &p[1];
}
void __libqasan_free(void *ptr) {
if (!ptr) return;
#ifdef __GLIBC__
if (ptr >= (void *)__tmp_alloc_zone &&
ptr < ((void *)__tmp_alloc_zone + TMP_ZONE_SIZE))
return;
#endif
struct chunk_begin *p = ptr;
p -= 1;
// Validate that the chunk marker is readable (a crude check
// to verify that ptr is a valid malloc region before we dereference it)
QASAN_LOAD(p, sizeof(struct chunk_begin) - REDZONE_SIZE);
size_t n = p->requested_size;
QASAN_STORE(ptr, n);
int state = QASAN_SWAP(QASAN_DISABLED); // disable qasan for this thread
if (!quarantine_push(p)) {
if (p->aligned_orig)
backend_free(p->aligned_orig);
else
backend_free(p);
}
QASAN_SWAP(state);
if (n & (ALLOC_ALIGN_SIZE - 1))
n = (n & ~(ALLOC_ALIGN_SIZE - 1)) + ALLOC_ALIGN_SIZE;
QASAN_POISON(ptr, n, ASAN_HEAP_FREED);
QASAN_DEALLOC(ptr);
}
void *__libqasan_calloc(size_t nmemb, size_t size) {
size *= nmemb;
#ifdef __GLIBC__
if (!__libqasan_malloc_initialized) {
void *r = &__tmp_alloc_zone[__tmp_alloc_zone_idx];
__tmp_alloc_zone_idx += size;
return r;
}
#endif
char *p = __libqasan_malloc(size);
if (!p) return NULL;
__builtin_memset(p, 0, size);
return p;
}
void *__libqasan_realloc(void *ptr, size_t size) {
char *p = __libqasan_malloc(size);
if (!p) return NULL;
if (!ptr) return p;
size_t n = ((struct chunk_begin *)ptr)[-1].requested_size;
if (size < n) n = size;
__builtin_memcpy(p, ptr, n);
__libqasan_free(ptr);
return p;
}
int __libqasan_posix_memalign(void **ptr, size_t align, size_t len) {
if ((align % 2) || (align % sizeof(void *))) return EINVAL;
if (len == 0) {
*ptr = NULL;
return 0;
}
size_t rem = len % align;
size_t size = len;
if (rem) size += rem;
int state = QASAN_SWAP(QASAN_DISABLED); // disable qasan for this thread
char *orig = backend_malloc(sizeof(struct chunk_struct) + size);
QASAN_SWAP(state);
if (!orig) return ENOMEM;
QASAN_UNPOISON(orig, sizeof(struct chunk_struct) + size);
char *data = orig + sizeof(struct chunk_begin);
data += align - ((uintptr_t)data % align);
struct chunk_begin *p = (struct chunk_begin *)data - 1;
p->requested_size = len;
p->aligned_orig = orig;
QASAN_ALLOC(data, data + len);
QASAN_POISON(p->redzone, REDZONE_SIZE, ASAN_HEAP_LEFT_RZ);
if (len & (ALLOC_ALIGN_SIZE - 1))
QASAN_POISON(
data + len,
(len & ~(ALLOC_ALIGN_SIZE - 1)) + ALLOC_ALIGN_SIZE - len + REDZONE_SIZE,
ASAN_HEAP_RIGHT_RZ);
else
QASAN_POISON(data + len, REDZONE_SIZE, ASAN_HEAP_RIGHT_RZ);
__builtin_memset(data, 0xff, len);
*ptr = data;
return 0;
}
void *__libqasan_memalign(size_t align, size_t len) {
void *ret = NULL;
__libqasan_posix_memalign(&ret, align, len);
return ret;
}
void *__libqasan_aligned_alloc(size_t align, size_t len) {
void *ret = NULL;
if ((len % align)) return NULL;
__libqasan_posix_memalign(&ret, align, len);
return ret;
}

View File

@ -0,0 +1,74 @@
/*
* Copyright (C) 2012 William Swanson
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Except as contained in this notice, the names of the authors or
* their institutions shall not be used in advertising or otherwise to
* promote the sale, use or other dealings in this Software without
* prior written authorization from the authors.
*/
#ifndef MAP_H_INCLUDED
#define MAP_H_INCLUDED
#define EVAL0(...) __VA_ARGS__
#define EVAL1(...) EVAL0(EVAL0(EVAL0(__VA_ARGS__)))
#define EVAL2(...) EVAL1(EVAL1(EVAL1(__VA_ARGS__)))
#define EVAL3(...) EVAL2(EVAL2(EVAL2(__VA_ARGS__)))
#define EVAL4(...) EVAL3(EVAL3(EVAL3(__VA_ARGS__)))
#define EVAL(...) EVAL4(EVAL4(EVAL4(__VA_ARGS__)))
#define MAP_END(...)
#define MAP_OUT
#define MAP_COMMA ,
#define MAP_GET_END2() 0, MAP_END
#define MAP_GET_END1(...) MAP_GET_END2
#define MAP_GET_END(...) MAP_GET_END1
#define MAP_NEXT0(test, next, ...) next MAP_OUT
#define MAP_NEXT1(test, next) MAP_NEXT0(test, next, 0)
#define MAP_NEXT(test, next) MAP_NEXT1(MAP_GET_END test, next)
#define MAP0(f, x, peek, ...) f(x) MAP_NEXT(peek, MAP1)(f, peek, __VA_ARGS__)
#define MAP1(f, x, peek, ...) f(x) MAP_NEXT(peek, MAP0)(f, peek, __VA_ARGS__)
#define MAP_LIST_NEXT1(test, next) MAP_NEXT0(test, MAP_COMMA next, 0)
#define MAP_LIST_NEXT(test, next) MAP_LIST_NEXT1(MAP_GET_END test, next)
#define MAP_LIST0(f, x, peek, ...) \
f(x) MAP_LIST_NEXT(peek, MAP_LIST1)(f, peek, __VA_ARGS__)
#define MAP_LIST1(f, x, peek, ...) \
f(x) MAP_LIST_NEXT(peek, MAP_LIST0)(f, peek, __VA_ARGS__)
/**
* Applies the function macro `f` to each of the remaining parameters.
*/
#define MAP(f, ...) EVAL(MAP1(f, __VA_ARGS__, ()()(), ()()(), ()()(), 0))
/**
* Applies the function macro `f` to each of the remaining parameters and
* inserts commas between the results.
*/
#define MAP_LIST(f, ...) \
EVAL(MAP_LIST1(f, __VA_ARGS__, ()()(), ()()(), ()()(), 0))
#endif

View File

@ -0,0 +1,243 @@
/*******************************************************************************
Copyright (c) 2019-2020, Andrea Fioraldi
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#include "libqasan.h"
#include <sys/mman.h>
#ifdef __x86_64__
uint8_t *__libqasan_patch_jump(uint8_t *addr, uint8_t *dest) {
// mov rax, dest
addr[0] = 0x48;
addr[1] = 0xb8;
*(uint8_t **)&addr[2] = dest;
// jmp rax
addr[10] = 0xff;
addr[11] = 0xe0;
return &addr[12];
}
#elif __i386__
uint8_t *__libqasan_patch_jump(uint8_t *addr, uint8_t *dest) {
// mov eax, dest
addr[0] = 0xb8;
*(uint8_t **)&addr[1] = dest;
// jmp eax
addr[5] = 0xff;
addr[6] = 0xe0;
return &addr[7];
}
#elif __arm__
// in ARM, r12 is a scratch register used by the linker to jump,
// so let's use it in our stub
uint8_t *__libqasan_patch_jump(uint8_t *addr, uint8_t *dest) {
// ldr r12, OFF
addr[0] = 0x0;
addr[1] = 0xc0;
addr[2] = 0x9f;
addr[3] = 0xe5;
// add pc, pc, r12
addr[4] = 0xc;
addr[5] = 0xf0;
addr[6] = 0x8f;
addr[7] = 0xe0;
// OFF: .word dest
*(uint32_t *)&addr[8] = (uint32_t)dest;
return &addr[12];
}
#elif __aarch64__
// in ARM64, x16 is a scratch register used by the linker to jump,
// so let's use it in our stub
uint8_t *__libqasan_patch_jump(uint8_t *addr, uint8_t *dest) {
// ldr x16, OFF
addr[0] = 0x50;
addr[1] = 0x0;
addr[2] = 0x0;
addr[3] = 0x58;
// br x16
addr[4] = 0x0;
addr[5] = 0x2;
addr[6] = 0x1f;
addr[7] = 0xd6;
// OFF: .dword dest
*(uint64_t *)&addr[8] = (uint64_t)dest;
return &addr[16];
}
#else
#define CANNOT_HOTPATCH
#endif
#ifdef CANNOT_HOTPATCH
void __libqasan_hotpatch(void) {
}
#else
static void *libc_start, *libc_end;
int libc_perms;
static void find_libc(void) {
FILE * fp;
char * line = NULL;
size_t len = 0;
ssize_t read;
fp = fopen("/proc/self/maps", "r");
if (fp == NULL) return;
while ((read = getline(&line, &len, fp)) != -1) {
int fields, dev_maj, dev_min, inode;
uint64_t min, max, offset;
char flag_r, flag_w, flag_x, flag_p;
char path[512] = "";
fields = sscanf(line,
"%" PRIx64 "-%" PRIx64 " %c%c%c%c %" PRIx64
" %x:%x %d"
" %512s",
&min, &max, &flag_r, &flag_w, &flag_x, &flag_p, &offset,
&dev_maj, &dev_min, &inode, path);
if ((fields < 10) || (fields > 11)) continue;
if (flag_x == 'x' && (__libqasan_strstr(path, "/libc.so") ||
__libqasan_strstr(path, "/libc-"))) {
libc_start = (void *)min;
libc_end = (void *)max;
libc_perms = PROT_EXEC;
if (flag_w == 'w') libc_perms |= PROT_WRITE;
if (flag_r == 'r') libc_perms |= PROT_READ;
break;
}
}
free(line);
fclose(fp);
}
/* Why this shit? https://twitter.com/andreafioraldi/status/1227635146452541441
Unfortunatly, symbol override with LD_PRELOAD is not enough to prevent libc
code to call this optimized XMM-based routines.
We patch them at runtime to call our unoptimized version of the same routine.
*/
void __libqasan_hotpatch(void) {
find_libc();
if (!libc_start) return;
if (mprotect(libc_start, libc_end - libc_start,
PROT_READ | PROT_WRITE | PROT_EXEC) < 0)
return;
void *libc = dlopen("libc.so.6", RTLD_LAZY);
#define HOTPATCH(fn) \
uint8_t *p_##fn = (uint8_t *)dlsym(libc, #fn); \
if (p_##fn) __libqasan_patch_jump(p_##fn, (uint8_t *)&(fn));
HOTPATCH(memcmp)
HOTPATCH(memmove)
uint8_t *p_memcpy = (uint8_t *)dlsym(libc, "memcpy");
// fuck you libc
if (p_memcpy && p_memmove != p_memcpy)
__libqasan_patch_jump(p_memcpy, (uint8_t *)&memcpy);
HOTPATCH(memchr)
HOTPATCH(memrchr)
HOTPATCH(memmem)
#ifndef __BIONIC__
HOTPATCH(bzero)
HOTPATCH(explicit_bzero)
HOTPATCH(mempcpy)
HOTPATCH(bcmp)
#endif
HOTPATCH(strchr)
HOTPATCH(strrchr)
HOTPATCH(strcasecmp)
HOTPATCH(strncasecmp)
HOTPATCH(strcat)
HOTPATCH(strcmp)
HOTPATCH(strncmp)
HOTPATCH(strcpy)
HOTPATCH(strncpy)
HOTPATCH(stpcpy)
HOTPATCH(strdup)
HOTPATCH(strlen)
HOTPATCH(strnlen)
HOTPATCH(strstr)
HOTPATCH(strcasestr)
HOTPATCH(wcslen)
HOTPATCH(wcscpy)
HOTPATCH(wcscmp)
#undef HOTPATCH
mprotect(libc_start, libc_end - libc_start, libc_perms);
}
#endif

View File

@ -0,0 +1,110 @@
/*******************************************************************************
Copyright (c) 2019-2021, Andrea Fioraldi
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#ifndef __QASAN_H__
#define __QASAN_H__
#define DEBUG 1
#define QASAN_VERSTR "0.3"
#define QASAN_FAKESYS_NR 0xa2a4
enum {
QASAN_ACTION_CHECK_LOAD,
QASAN_ACTION_CHECK_STORE,
QASAN_ACTION_POISON,
QASAN_ACTION_USER_POISON,
QASAN_ACTION_UNPOISON,
QASAN_ACTION_IS_POISON,
QASAN_ACTION_ALLOC,
QASAN_ACTION_DEALLOC,
QASAN_ACTION_ENABLE,
QASAN_ACTION_DISABLE,
QASAN_ACTION_SWAP_STATE,
};
/* shadow map byte values */
#define ASAN_VALID 0x00
#define ASAN_PARTIAL1 0x01
#define ASAN_PARTIAL2 0x02
#define ASAN_PARTIAL3 0x03
#define ASAN_PARTIAL4 0x04
#define ASAN_PARTIAL5 0x05
#define ASAN_PARTIAL6 0x06
#define ASAN_PARTIAL7 0x07
#define ASAN_ARRAY_COOKIE 0xac
#define ASAN_STACK_RZ 0xf0
#define ASAN_STACK_LEFT_RZ 0xf1
#define ASAN_STACK_MID_RZ 0xf2
#define ASAN_STACK_RIGHT_RZ 0xf3
#define ASAN_STACK_FREED 0xf5
#define ASAN_STACK_OOSCOPE 0xf8
#define ASAN_GLOBAL_RZ 0xf9
#define ASAN_HEAP_RZ 0xe9
#define ASAN_USER 0xf7
#define ASAN_HEAP_LEFT_RZ 0xfa
#define ASAN_HEAP_RIGHT_RZ 0xfb
#define ASAN_HEAP_FREED 0xfd
#define QASAN_ENABLED (0)
#define QASAN_DISABLED (1)
// fake syscall, works only for QASan user-mode!!!
#include <unistd.h>
#define QASAN_CALL0(action) \
syscall(QASAN_FAKESYS_NR, action, NULL, NULL, NULL)
#define QASAN_CALL1(action, arg1) \
syscall(QASAN_FAKESYS_NR, action, arg1, NULL, NULL)
#define QASAN_CALL2(action, arg1, arg2) \
syscall(QASAN_FAKESYS_NR, action, arg1, arg2, NULL)
#define QASAN_CALL3(action, arg1, arg2, arg3) \
syscall(QASAN_FAKESYS_NR, action, arg1, arg2, arg3)
#define QASAN_LOAD(ptr, len) \
QASAN_CALL2(QASAN_ACTION_CHECK_LOAD, ptr, len)
#define QASAN_STORE(ptr, len) \
QASAN_CALL2(QASAN_ACTION_CHECK_STORE, ptr, len)
#define QASAN_POISON(ptr, len, poison_byte) \
QASAN_CALL3(QASAN_ACTION_POISON, ptr, len, poison_byte)
#define QASAN_USER_POISON(ptr, len) \
QASAN_CALL3(QASAN_ACTION_POISON, ptr, len, ASAN_USER)
#define QASAN_UNPOISON(ptr, len) \
QASAN_CALL2(QASAN_ACTION_UNPOISON, ptr, len)
#define QASAN_IS_POISON(ptr, len) \
QASAN_CALL2(QASAN_ACTION_IS_POISON, ptr, len)
#define QASAN_ALLOC(start, end) \
QASAN_CALL2(QASAN_ACTION_ALLOC, start, end)
#define QASAN_DEALLOC(ptr) \
QASAN_CALL1(QASAN_ACTION_DEALLOC, ptr)
#define QASAN_SWAP(state) \
QASAN_CALL1(QASAN_ACTION_SWAP_STATE, state)
#endif

View File

@ -0,0 +1,339 @@
/*******************************************************************************
Copyright (c) 2019-2020, Andrea Fioraldi
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#include "libqasan.h"
#include <ctype.h>
void *__libqasan_memcpy(void *dest, const void *src, size_t n) {
unsigned char * d = dest;
const unsigned char *s = src;
if (!n) return dest;
while (n--) {
*d = *s;
++d;
++s;
}
return dest;
}
void *__libqasan_memmove(void *dest, const void *src, size_t n) {
unsigned char * d = dest;
const unsigned char *s = src;
if (!n) return dest;
if (!((d + n) >= s && d <= (s + n))) // do not overlap
return __libqasan_memcpy(dest, src, n);
d = __libqasan_malloc(n);
__libqasan_memcpy(d, src, n);
__libqasan_memcpy(dest, d, n);
__libqasan_free(d);
return dest;
}
void *__libqasan_memset(void *s, int c, size_t n) {
unsigned char *b = s;
while (n--)
*(b++) = (unsigned char)c;
return s;
}
void *__libqasan_memchr(const void *s, int c, size_t n) {
unsigned char *m = (unsigned char *)s;
size_t i;
for (i = 0; i < n; ++i)
if (m[i] == (unsigned char)c) return &m[i];
return NULL;
}
void *__libqasan_memrchr(const void *s, int c, size_t n) {
unsigned char *m = (unsigned char *)s;
long i;
for (i = n; i >= 0; --i)
if (m[i] == (unsigned char)c) return &m[i];
return NULL;
}
size_t __libqasan_strlen(const char *s) {
const char *i = s;
while (*(i++))
;
return i - s - 1;
}
size_t __libqasan_strnlen(const char *s, size_t len) {
size_t r = 0;
while (len-- && *(s++))
++r;
return r;
}
int __libqasan_strcmp(const char *str1, const char *str2) {
while (1) {
const unsigned char c1 = *str1, c2 = *str2;
if (c1 != c2) return c1 - c2;
if (!c1) return 0;
str1++;
str2++;
}
return 0;
}
int __libqasan_strncmp(const char *str1, const char *str2, size_t len) {
while (len--) {
unsigned char c1 = *str1, c2 = *str2;
if (c1 != c2) return c1 - c2;
if (!c1) return 0;
str1++;
str2++;
}
return 0;
}
int __libqasan_strcasecmp(const char *str1, const char *str2) {
while (1) {
const unsigned char c1 = tolower(*str1), c2 = tolower(*str2);
if (c1 != c2) return c1 - c2;
if (!c1) return 0;
str1++;
str2++;
}
return 0;
}
int __libqasan_strncasecmp(const char *str1, const char *str2, size_t len) {
while (len--) {
const unsigned char c1 = tolower(*str1), c2 = tolower(*str2);
if (c1 != c2) return c1 - c2;
if (!c1) return 0;
str1++;
str2++;
}
return 0;
}
int __libqasan_memcmp(const void *mem1, const void *mem2, size_t len) {
const char *strmem1 = (const char *)mem1;
const char *strmem2 = (const char *)mem2;
while (len--) {
const unsigned char c1 = *strmem1, c2 = *strmem2;
if (c1 != c2) return (c1 > c2) ? 1 : -1;
strmem1++;
strmem2++;
}
return 0;
}
int __libqasan_bcmp(const void *mem1, const void *mem2, size_t len) {
const char *strmem1 = (const char *)mem1;
const char *strmem2 = (const char *)mem2;
while (len--) {
int diff = *strmem1 ^ *strmem2;
if (diff != 0) return 1;
strmem1++;
strmem2++;
}
return 0;
}
char *__libqasan_strstr(const char *haystack, const char *needle) {
do {
const char *n = needle;
const char *h = haystack;
while (*n && *h && *n == *h)
n++, h++;
if (!*n) return (char *)haystack;
} while (*(haystack++));
return 0;
}
char *__libqasan_strcasestr(const char *haystack, const char *needle) {
do {
const char *n = needle;
const char *h = haystack;
while (*n && *h && tolower(*n) == tolower(*h))
n++, h++;
if (!*n) return (char *)haystack;
} while (*(haystack++));
return 0;
}
void *__libqasan_memmem(const void *haystack, size_t haystack_len,
const void *needle, size_t needle_len) {
const char *n = (const char *)needle;
const char *h = (const char *)haystack;
if (haystack_len < needle_len) return 0;
if (needle_len == 0) return (void *)haystack;
if (needle_len == 1) return memchr(haystack, *n, haystack_len);
const char *end = h + (haystack_len - needle_len);
do {
if (*h == *n) {
if (memcmp(h, n, needle_len) == 0) return (void *)h;
}
} while (++h <= end);
return 0;
}
char *__libqasan_strchr(const char *s, int c) {
while (*s != (char)c)
if (!*s++) return 0;
return (char *)s;
}
char *__libqasan_strrchr(const char *s, int c) {
char *r = NULL;
do
if (*s == (char)c) r = (char *)s;
while (*s++);
return r;
}
size_t __libqasan_wcslen(const wchar_t *s) {
size_t len = 0;
while (s[len] != L'\0') {
if (s[++len] == L'\0') return len;
if (s[++len] == L'\0') return len;
if (s[++len] == L'\0') return len;
++len;
}
return len;
}
wchar_t *__libqasan_wcscpy(wchar_t *d, const wchar_t *s) {
wchar_t *a = d;
while ((*d++ = *s++))
;
return a;
}
int __libqasan_wcscmp(const wchar_t *s1, const wchar_t *s2) {
wchar_t c1, c2;
do {
c1 = *s1++;
c2 = *s2++;
if (c2 == L'\0') return c1 - c2;
} while (c1 == c2);
return c1 < c2 ? -1 : 1;
}

View File

@ -0,0 +1,83 @@
/*
This code is DEPRECATED!
I'm keeping it here cause maybe the uninstrumentation of a function is needed
for some strange reason.
*/
/*******************************************************************************
Copyright (c) 2019-2020, Andrea Fioraldi
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#include "libqasan.h"
#include "map_macro.h"
#include <sys/types.h>
#include <pwd.h>
#define X_GET_FNPAR(type, name) name
#define GET_FNPAR(x) X_GET_FNPAR x
#define X_GET_FNTYPE(type, name) type
#define GET_FNTYPE(x) X_GET_FNTYPE x
#define X_GET_FNDECL(type, name) type name
#define GET_FNDECL(x) X_GET_FNDECL x
#define HOOK_UNINSTRUMENT(rettype, name, ...) \
rettype (*__lq_libc_##name)(MAP_LIST(GET_FNTYPE, __VA_ARGS__)); \
rettype name(MAP_LIST(GET_FNDECL, __VA_ARGS__)) { \
\
if (!(__lq_libc_##name)) __lq_libc_##name = ASSERT_DLSYM(name); \
int state = QASAN_SWAP(QASAN_DISABLED); \
rettype r = __lq_libc_##name(MAP_LIST(GET_FNPAR, __VA_ARGS__)); \
QASAN_SWAP(state); \
\
return r; \
\
}
HOOK_UNINSTRUMENT(char *, getenv, (const char *, name))
/*
HOOK_UNINSTRUMENT(char*, setlocale, (int, category), (const char *, locale))
HOOK_UNINSTRUMENT(int, setenv, (const char *, name), (const char *, value),
(int, overwrite)) HOOK_UNINSTRUMENT(char*, getenv, (const char *, name))
HOOK_UNINSTRUMENT(char*, bindtextdomain, (const char *, domainname), (const char
*, dirname)) HOOK_UNINSTRUMENT(char*, bind_textdomain_codeset, (const char *,
domainname), (const char *, codeset)) HOOK_UNINSTRUMENT(char*, gettext, (const
char *, msgid)) HOOK_UNINSTRUMENT(char*, dgettext, (const char *, domainname),
(const char *, msgid)) HOOK_UNINSTRUMENT(char*, dcgettext, (const char *,
domainname), (const char *, msgid), (int, category)) HOOK_UNINSTRUMENT(int,
__gen_tempname, (char, *tmpl), (int, suffixlen), (int, flags), (int, kind))
HOOK_UNINSTRUMENT(int, mkstemp, (char *, template))
HOOK_UNINSTRUMENT(int, mkostemp, (char *, template), (int, flags))
HOOK_UNINSTRUMENT(int, mkstemps, (char *, template), (int, suffixlen))
HOOK_UNINSTRUMENT(int, mkostemps, (char *, template), (int, suffixlen), (int,
flags)) HOOK_UNINSTRUMENT(struct passwd *, getpwnam, (const char *, name))
HOOK_UNINSTRUMENT(struct passwd *, getpwuid, (uid_t, uid))
HOOK_UNINSTRUMENT(int, getpwnam_r, (const char *, name), (struct passwd *, pwd),
(char *, buf), (size_t, buflen), (struct passwd **, result))
HOOK_UNINSTRUMENT(int, getpwuid_r, (uid_t, uid), (struct passwd *, pwd), (char
*, buf), (size_t, buflen), (struct passwd **, result))
*/

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,160 @@
/*******************************************************************************
BSD 2-Clause License
Copyright (c) 2020-2021, Andrea Fioraldi
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#ifndef __ASAN_GIOVESE_H__
#define __ASAN_GIOVESE_H__
#include <stdint.h>
#include <inttypes.h>
#include <stdlib.h>
#define target_ulong uint64_t
#define TARGET_FMT_lx "%" PRIx64
#define TARGET_FMT_ld "%" PRId64
#ifndef ASAN_NAME_STR
#define ASAN_NAME_STR "AddressSanitizer"
#endif
#define HIGH_SHADOW_ADDR ((void*)0x02008fff7000ULL)
#define LOW_SHADOW_ADDR ((void*)0x00007fff8000ULL)
#define GAP_SHADOW_ADDR ((void*)0x00008fff7000)
#define HIGH_SHADOW_SIZE (0xdfff0000fffULL)
#define LOW_SHADOW_SIZE (0xfffefffULL)
#define GAP_SHADOW_SIZE (0x1ffffffffff)
#define SHADOW_OFFSET (0x7fff8000ULL)
/* shadow map byte values */
#define ASAN_VALID 0x00
#define ASAN_PARTIAL1 0x01
#define ASAN_PARTIAL2 0x02
#define ASAN_PARTIAL3 0x03
#define ASAN_PARTIAL4 0x04
#define ASAN_PARTIAL5 0x05
#define ASAN_PARTIAL6 0x06
#define ASAN_PARTIAL7 0x07
#define ASAN_ARRAY_COOKIE 0xac
#define ASAN_STACK_RZ 0xf0
#define ASAN_STACK_LEFT_RZ 0xf1
#define ASAN_STACK_MID_RZ 0xf2
#define ASAN_STACK_RIGHT_RZ 0xf3
#define ASAN_STACK_FREED 0xf5
#define ASAN_STACK_OOSCOPE 0xf8
#define ASAN_GLOBAL_RZ 0xf9
#define ASAN_HEAP_RZ 0xe9
#define ASAN_USER 0xf7
#define ASAN_HEAP_LEFT_RZ 0xfa
#define ASAN_HEAP_RIGHT_RZ 0xfb
#define ASAN_HEAP_FREED 0xfd
enum {
ACCESS_TYPE_LOAD,
ACCESS_TYPE_STORE,
};
struct call_context {
target_ulong* addresses;
uint32_t tid;
uint32_t size;
};
struct chunk_info {
target_ulong start;
target_ulong end;
struct call_context* alloc_ctx;
struct call_context* free_ctx; // NULL if chunk is allocated
};
extern void* __ag_high_shadow;
extern void* __ag_low_shadow;
// ------------------------------------------------------------------------- //
// Virtual functions, you have to implement them
// ------------------------------------------------------------------------- //
///////////////////////////////////////////////////////////////////////////////
void asan_giovese_populate_context(struct call_context* ctx, target_ulong pc);
char* asan_giovese_printaddr(target_ulong addr);
///////////////////////////////////////////////////////////////////////////////
// ------------------------------------------------------------------------- //
// Exposed functions
// ------------------------------------------------------------------------- //
void asan_giovese_init(void);
// this has to be fast, ptr is an host pointer
int asan_giovese_load1(void* ptr);
int asan_giovese_load2(void* ptr);
int asan_giovese_load4(void* ptr);
int asan_giovese_load8(void* ptr);
int asan_giovese_store1(void* ptr);
int asan_giovese_store2(void* ptr);
int asan_giovese_store4(void* ptr);
int asan_giovese_store8(void* ptr);
int asan_giovese_loadN(void* ptr, size_t n);
int asan_giovese_storeN(void* ptr, size_t n);
int asan_giovese_guest_loadN(target_ulong addr, size_t n);
int asan_giovese_guest_storeN(target_ulong addr, size_t n);
int asan_giovese_poison_region(void* ptr, size_t n,
uint8_t poison_byte);
int asan_giovese_user_poison_region(void* ptr, size_t n);
int asan_giovese_unpoison_region(void* ptr, size_t n);
int asan_giovese_poison_guest_region(target_ulong addr, size_t n, uint8_t poison_byte);
int asan_giovese_user_poison_guest_region(target_ulong addr, size_t n);
int asan_giovese_unpoison_guest_region(target_ulong addr, size_t n);
// addr is a guest pointer
int asan_giovese_report_and_crash(int access_type, target_ulong addr, size_t n,
target_ulong pc, target_ulong bp,
target_ulong sp);
int asan_giovese_deadly_signal(int signum, target_ulong addr, target_ulong pc,
target_ulong bp, target_ulong sp);
int asan_giovese_badfree(target_ulong addr, target_ulong pc);
struct chunk_info* asan_giovese_alloc_search(target_ulong query);
void asan_giovese_alloc_remove(target_ulong start, target_ulong end);
void asan_giovese_alloc_insert(target_ulong start, target_ulong end,
struct call_context* alloc_ctx);
#endif

518
libafl_qemu/src/asan.rs Normal file
View File

@ -0,0 +1,518 @@
use libafl::{executors::ExitKind, inputs::Input, observers::ObserversTuple, state::HasMetadata};
use num_enum::{IntoPrimitive, TryFromPrimitive};
use std::{env, fs};
use crate::{
emu,
emu::SyscallHookResult,
executor::QemuExecutor,
helper::{QemuHelper, QemuHelperTuple, QemuInstrumentationFilter},
};
// TODO at some point, merge parts with libafl_frida
pub const QASAN_FAKESYS_NR: i32 = 0xa2a4;
#[derive(IntoPrimitive, TryFromPrimitive, Debug, Clone, Copy)]
#[repr(u64)]
pub enum QasanAction {
CheckLoad,
CheckStore,
Poison,
UserPoison,
UnPoison,
IsPoison,
Alloc,
Dealloc,
Enable,
Disable,
SwapState,
}
#[derive(IntoPrimitive, TryFromPrimitive, Debug, Clone, Copy)]
#[repr(u8)]
pub enum PoisonKind {
Valid = 0,
Partial1 = 1,
Partial2 = 2,
Partial3 = 3,
Partial4 = 4,
Partial5 = 5,
Partial6 = 6,
Partial7 = 7,
ArrayCookie = 0xac,
StackRz = 0xf0,
StackLeftRz = 0xf1,
StackMidRz = 0xf2,
StackRightRz = 0xf3,
StacKFreed = 0xf5,
StackOOScope = 0xf8,
GlobalRz = 0xf9,
HeapRz = 0xe9,
User = 0xf7,
HeapLeftRz = 0xfa,
HeapRightRz = 0xfb,
HeapFreed = 0xfd,
}
#[repr(C)]
struct CallContext {
pub addresses: *const u64,
pub tid: u32,
pub size: u32,
}
#[repr(C)]
struct ChunkInfo {
pub start: u64,
pub end: u64,
pub alloc_ctx: *const CallContext,
pub free_ctx: *const CallContext, // NULL if chunk is allocated
}
extern "C" {
fn asan_giovese_init();
fn asan_giovese_load1(ptr: *const u8) -> i32;
fn asan_giovese_load2(ptr: *const u8) -> i32;
fn asan_giovese_load4(ptr: *const u8) -> i32;
fn asan_giovese_load8(ptr: *const u8) -> i32;
fn asan_giovese_store1(ptr: *const u8) -> i32;
fn asan_giovese_store2(ptr: *const u8) -> i32;
fn asan_giovese_store4(ptr: *const u8) -> i32;
fn asan_giovese_store8(ptr: *const u8) -> i32;
// int asan_giovese_loadN(void* ptr, size_t n);
fn asan_giovese_loadN(ptr: *const u8, n: usize) -> i32;
// int asan_giovese_storeN(void* ptr, size_t n);
fn asan_giovese_storeN(ptr: *const u8, n: usize) -> i32;
// int asan_giovese_poison_region(void* ptr, size_t n, uint8_t poison_byte);
fn asan_giovese_poison_region(ptr: *const u8, n: usize, poison: u8) -> i32;
// int asan_giovese_unpoison_region(void* ptr, size_t n);
fn asan_giovese_unpoison_region(ptr: *const u8, n: usize) -> i32;
// struct chunk_info* asan_giovese_alloc_search(target_ulong query);
fn asan_giovese_alloc_search(query: u64) -> *mut ChunkInfo;
// void asan_giovese_alloc_remove(target_ulong start, target_ulong end);
fn asan_giovese_alloc_remove(start: u64, end: u64);
// void asan_giovese_alloc_insert(target_ulong start, target_ulong end, struct call_context* alloc_ctx);
fn asan_giovese_alloc_insert(start: u64, end: u64, alloc_ctx: *const CallContext);
}
static mut ASAN_INITED: bool = false;
pub fn init_with_asan(args: &mut Vec<String>, env: &mut [(String, String)]) -> i32 {
assert!(args.len() > 0);
let current = env::current_exe().unwrap();
let asan_lib = fs::canonicalize(&current)
.unwrap()
.parent()
.unwrap()
.join("libqasan.so");
let asan_lib = asan_lib
.to_str()
.expect("The path to the asan lib is invalid")
.to_string();
let add_asan =
|e: &str| "LD_PRELOAD=".to_string() + &asan_lib + " " + &e["LD_PRELOAD=".len()..];
let mut added = false;
for (k, v) in env.iter_mut() {
if k == "QEMU_SET_ENV" {
let mut new_v = vec![];
for e in v.split(",") {
if e.starts_with("LD_PRELOAD=") {
added = true;
new_v.push(add_asan(e));
} else {
new_v.push(e.to_string());
}
}
*v = new_v.join(",");
}
}
for i in 0..args.len() {
if args[i] == "-E" && i + 1 < args.len() {
if args[i + 1].starts_with("LD_PRELOAD=") {
added = true;
args[i + 1] = add_asan(&args[i + 1])
}
}
}
if !added {
args.insert(1, "LD_PRELOAD=".to_string() + &asan_lib);
args.insert(1, "-E".into());
}
unsafe {
asan_giovese_init();
ASAN_INITED = true;
}
emu::init(args, env)
}
// TODO intrumentation filter
pub struct QemuAsanHelper {
enabled: bool,
filter: QemuInstrumentationFilter,
}
impl QemuAsanHelper {
#[must_use]
pub fn new() -> Self {
assert!(unsafe { ASAN_INITED == true }, "The ASan runtime is not initialized, use init_with_asan(...) instead of just init(...)");
Self {
enabled: true,
filter: QemuInstrumentationFilter::None,
}
}
#[must_use]
pub fn with_instrumentation_filter(filter: QemuInstrumentationFilter) -> Self {
Self {
enabled: true,
filter,
}
}
#[must_use]
pub fn must_instrument(&self, addr: u64) -> bool {
self.filter.allowed(addr)
}
#[must_use]
pub fn enabled(&self) -> bool {
self.enabled
}
pub fn set_enabled(&mut self, enabled: bool) {
self.enabled = enabled;
}
#[allow(clippy::unused_self)]
pub fn alloc(&mut self, start: u64, end: u64) {
unsafe {
let ctx: *const CallContext =
libc::calloc(core::mem::size_of::<CallContext>(), 1) as *const _;
asan_giovese_alloc_insert(start, end, ctx);
}
}
#[allow(clippy::unused_self)]
pub fn dealloc(&mut self, addr: u64) {
unsafe {
let ckinfo = asan_giovese_alloc_search(addr);
if let Some(ck) = ckinfo.as_mut() {
if ck.start != addr {
// Free not the start of the chunk
std::process::abort();
}
let ctx: *const CallContext =
libc::calloc(core::mem::size_of::<CallContext>(), 1) as *const _;
ck.free_ctx = ctx;
} else {
// Free of wild ptr
std::process::abort();
}
}
}
#[allow(clippy::unused_self)]
pub fn is_poisoned(&self, addr: u64, size: usize) -> bool {
unsafe { asan_giovese_loadN(emu::g2h(addr), size) != 0 }
}
pub fn read_1(&mut self, addr: u64) {
if self.enabled() && unsafe { asan_giovese_load1(emu::g2h(addr)) != 0 } {
std::process::abort();
}
}
pub fn read_2(&mut self, addr: u64) {
if self.enabled() && unsafe { asan_giovese_load2(emu::g2h(addr)) != 0 } {
std::process::abort();
}
}
pub fn read_4(&mut self, addr: u64) {
if self.enabled() && unsafe { asan_giovese_load4(emu::g2h(addr)) != 0 } {
std::process::abort();
}
}
pub fn read_8(&mut self, addr: u64) {
if self.enabled() && unsafe { asan_giovese_load8(emu::g2h(addr)) != 0 } {
std::process::abort();
}
}
pub fn read_n(&mut self, addr: u64, size: usize) {
if self.enabled() && unsafe { asan_giovese_loadN(emu::g2h(addr), size) != 0 } {
std::process::abort();
}
}
pub fn write_1(&mut self, addr: u64) {
if self.enabled() && unsafe { asan_giovese_store1(emu::g2h(addr)) != 0 } {
std::process::abort();
}
}
pub fn write_2(&mut self, addr: u64) {
if self.enabled() && unsafe { asan_giovese_store2(emu::g2h(addr)) != 0 } {
std::process::abort();
}
}
pub fn write_4(&mut self, addr: u64) {
if self.enabled() && unsafe { asan_giovese_store4(emu::g2h(addr)) != 0 } {
std::process::abort();
}
}
pub fn write_8(&mut self, addr: u64) {
if self.enabled() && unsafe { asan_giovese_store8(emu::g2h(addr)) != 0 } {
std::process::abort();
}
}
pub fn write_n(&mut self, addr: u64, size: usize) {
if self.enabled() && unsafe { asan_giovese_storeN(emu::g2h(addr), size) != 0 } {
std::process::abort();
}
}
#[allow(clippy::unused_self)]
pub fn poison(&mut self, addr: u64, size: usize, poison: PoisonKind) {
unsafe { asan_giovese_poison_region(emu::g2h(addr), size, poison.into()) };
}
#[allow(clippy::unused_self)]
pub fn unpoison(&mut self, addr: u64, size: usize) {
unsafe { asan_giovese_unpoison_region(emu::g2h(addr), size) };
}
#[allow(clippy::unused_self)]
pub fn reset(&mut self) {
unsafe { asan_giovese_alloc_remove(0, u64::MAX) };
}
}
impl Default for QemuAsanHelper {
fn default() -> Self {
Self::new()
}
}
impl<I, S> QemuHelper<I, S> for QemuAsanHelper
where
I: Input,
S: HasMetadata,
{
fn init<'a, H, OT, QT>(&self, executor: &QemuExecutor<'a, H, I, OT, QT, S>)
where
H: FnMut(&I) -> ExitKind,
OT: ObserversTuple<I, S>,
QT: QemuHelperTuple<I, S>,
{
//executor.hook_read_generation(gen_readwrite_asan::<I, QT, S>);
executor.hook_read8_execution(trace_read8_asan::<I, QT, S>);
executor.hook_read4_execution(trace_read4_asan::<I, QT, S>);
executor.hook_read2_execution(trace_read2_asan::<I, QT, S>);
executor.hook_read1_execution(trace_read1_asan::<I, QT, S>);
executor.hook_read_n_execution(trace_read_n_asan::<I, QT, S>);
//executor.hook_write_generation(gen_readwrite_asan::<I, QT, S>);
executor.hook_write8_execution(trace_write8_asan::<I, QT, S>);
executor.hook_write4_execution(trace_write4_asan::<I, QT, S>);
executor.hook_write2_execution(trace_write2_asan::<I, QT, S>);
executor.hook_write1_execution(trace_write1_asan::<I, QT, S>);
executor.hook_write_n_execution(trace_write_n_asan::<I, QT, S>);
executor.hook_syscalls(qasan_fake_syscall::<I, QT, S>);
}
fn post_exec(&mut self, _input: &I) {
self.reset();
}
}
// TODO add pc to generation hooks
pub fn gen_readwrite_asan<I, QT, S>(
helpers: &mut QT,
_state: &mut S,
pc: u64,
_size: usize,
) -> Option<u64>
where
I: Input,
QT: QemuHelperTuple<I, S>,
{
let h = helpers.match_first_type_mut::<QemuAsanHelper>().unwrap();
if h.must_instrument(pc) {
Some(pc)
} else {
None
}
}
pub fn trace_read1_asan<I, QT, S>(helpers: &mut QT, _state: &mut S, _id: u64, addr: u64)
where
I: Input,
QT: QemuHelperTuple<I, S>,
{
let h = helpers.match_first_type_mut::<QemuAsanHelper>().unwrap();
h.read_1(addr);
}
pub fn trace_read2_asan<I, QT, S>(helpers: &mut QT, _state: &mut S, _id: u64, addr: u64)
where
I: Input,
QT: QemuHelperTuple<I, S>,
{
let h = helpers.match_first_type_mut::<QemuAsanHelper>().unwrap();
h.read_2(addr);
}
pub fn trace_read4_asan<I, QT, S>(helpers: &mut QT, _state: &mut S, _id: u64, addr: u64)
where
I: Input,
QT: QemuHelperTuple<I, S>,
{
let h = helpers.match_first_type_mut::<QemuAsanHelper>().unwrap();
h.read_4(addr);
}
pub fn trace_read8_asan<I, QT, S>(helpers: &mut QT, _state: &mut S, _id: u64, addr: u64)
where
I: Input,
QT: QemuHelperTuple<I, S>,
{
let h = helpers.match_first_type_mut::<QemuAsanHelper>().unwrap();
h.read_8(addr);
}
pub fn trace_read_n_asan<I, QT, S>(
helpers: &mut QT,
_state: &mut S,
_id: u64,
addr: u64,
size: usize,
) where
I: Input,
QT: QemuHelperTuple<I, S>,
{
let h = helpers.match_first_type_mut::<QemuAsanHelper>().unwrap();
h.read_n(addr, size);
}
pub fn trace_write1_asan<I, QT, S>(helpers: &mut QT, _state: &mut S, _id: u64, addr: u64)
where
I: Input,
QT: QemuHelperTuple<I, S>,
{
let h = helpers.match_first_type_mut::<QemuAsanHelper>().unwrap();
h.write_1(addr);
}
pub fn trace_write2_asan<I, QT, S>(helpers: &mut QT, _state: &mut S, _id: u64, addr: u64)
where
I: Input,
QT: QemuHelperTuple<I, S>,
{
let h = helpers.match_first_type_mut::<QemuAsanHelper>().unwrap();
h.write_2(addr);
}
pub fn trace_write4_asan<I, QT, S>(helpers: &mut QT, _state: &mut S, _id: u64, addr: u64)
where
I: Input,
QT: QemuHelperTuple<I, S>,
{
let h = helpers.match_first_type_mut::<QemuAsanHelper>().unwrap();
h.write_4(addr);
}
pub fn trace_write8_asan<I, QT, S>(helpers: &mut QT, _state: &mut S, _id: u64, addr: u64)
where
I: Input,
QT: QemuHelperTuple<I, S>,
{
let h = helpers.match_first_type_mut::<QemuAsanHelper>().unwrap();
h.write_8(addr);
}
pub fn trace_write_n_asan<I, QT, S>(
helpers: &mut QT,
_state: &mut S,
_id: u64,
addr: u64,
size: usize,
) where
I: Input,
QT: QemuHelperTuple<I, S>,
{
let h = helpers.match_first_type_mut::<QemuAsanHelper>().unwrap();
h.read_n(addr, size);
}
pub fn qasan_fake_syscall<I, QT, S>(
helpers: &mut QT,
_state: &mut S,
sys_num: i32,
a0: u64,
a1: u64,
a2: u64,
a3: u64,
_a4: u64,
_a5: u64,
_a6: u64,
_a7: u64,
) -> SyscallHookResult
where
I: Input,
QT: QemuHelperTuple<I, S>,
{
if sys_num == QASAN_FAKESYS_NR {
let h = helpers.match_first_type_mut::<QemuAsanHelper>().unwrap();
let mut r = 0;
match QasanAction::try_from(a0).expect("Invalid QASan action number") {
QasanAction::CheckLoad => {
h.read_n(a1, a2 as usize);
}
QasanAction::CheckStore => {
h.write_n(a1, a2 as usize);
}
QasanAction::Poison => {
h.poison(a1, a2 as usize, PoisonKind::try_from(a3 as u8).unwrap());
}
QasanAction::UserPoison => {
h.poison(a1, a2 as usize, PoisonKind::User);
}
QasanAction::UnPoison => {
h.unpoison(a1, a2 as usize);
}
QasanAction::IsPoison => {
if h.is_poisoned(a1, a2 as usize) {
r = 1;
}
}
QasanAction::Alloc => {
h.alloc(a1, a2);
}
QasanAction::Dealloc => {
h.dealloc(a1);
}
QasanAction::Enable => {
h.set_enabled(true);
}
QasanAction::Disable => {
h.set_enabled(false);
}
QasanAction::SwapState => {
h.set_enabled(!h.enabled());
}
}
SyscallHookResult::new(Some(r))
} else {
SyscallHookResult::new(None)
}
}

135
libafl_qemu/src/cmplog.rs Normal file
View File

@ -0,0 +1,135 @@
use libafl::{executors::ExitKind, inputs::Input, observers::ObserversTuple, state::HasMetadata};
pub use libafl_targets::{
cmplog::__libafl_targets_cmplog_instructions, CmpLogObserver, CMPLOG_MAP, CMPLOG_MAP_W,
};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use crate::{
emu,
executor::QemuExecutor,
helper::{QemuHelper, QemuHelperTuple, QemuInstrumentationFilter},
};
#[derive(Default, Serialize, Deserialize)]
pub struct QemuCmpsMapMetadata {
pub map: HashMap<u64, u64>,
pub current_id: u64,
}
impl QemuCmpsMapMetadata {
#[must_use]
pub fn new() -> Self {
Self {
map: HashMap::new(),
current_id: 0,
}
}
}
libafl::impl_serdeany!(QemuCmpsMapMetadata);
pub struct QemuCmpLogHelper {
filter: QemuInstrumentationFilter,
}
impl QemuCmpLogHelper {
#[must_use]
pub fn new() -> Self {
Self {
filter: QemuInstrumentationFilter::None,
}
}
#[must_use]
pub fn with_instrumentation_filter(filter: QemuInstrumentationFilter) -> Self {
Self { filter }
}
#[must_use]
pub fn must_instrument(&self, addr: u64) -> bool {
self.filter.allowed(addr)
}
}
impl Default for QemuCmpLogHelper {
fn default() -> Self {
Self::new()
}
}
impl<I, S> QemuHelper<I, S> for QemuCmpLogHelper
where
I: Input,
S: HasMetadata,
{
fn init<'a, H, OT, QT>(&self, executor: &QemuExecutor<'a, H, I, OT, QT, S>)
where
H: FnMut(&I) -> ExitKind,
OT: ObserversTuple<I, S>,
QT: QemuHelperTuple<I, S>,
{
executor.hook_cmp_generation(gen_unique_cmp_ids::<I, QT, S>);
emu::set_exec_cmp8_hook(trace_cmp8_cmplog);
emu::set_exec_cmp4_hook(trace_cmp4_cmplog);
emu::set_exec_cmp2_hook(trace_cmp2_cmplog);
emu::set_exec_cmp1_hook(trace_cmp1_cmplog);
}
}
pub fn gen_unique_cmp_ids<I, QT, S>(
helpers: &mut QT,
state: &mut S,
pc: u64,
_size: usize,
) -> Option<u64>
where
S: HasMetadata,
I: Input,
QT: QemuHelperTuple<I, S>,
{
if let Some(h) = helpers.match_first_type::<QemuCmpLogHelper>() {
if !h.must_instrument(pc) {
return None;
}
}
if state.metadata().get::<QemuCmpsMapMetadata>().is_none() {
state.add_metadata(QemuCmpsMapMetadata::new());
}
let meta = state
.metadata_mut()
.get_mut::<QemuCmpsMapMetadata>()
.unwrap();
let id = meta.current_id as usize;
if meta.map.contains_key(&pc) {
Some(*meta.map.get(&pc).unwrap())
} else {
meta.current_id = ((id + 1) & (CMPLOG_MAP_W - 1)) as u64;
meta.map.insert(pc, id as u64);
Some(id as u64)
}
}
pub extern "C" fn trace_cmp1_cmplog(id: u64, v0: u8, v1: u8) {
unsafe {
__libafl_targets_cmplog_instructions(id as usize, 1, u64::from(v0), u64::from(v1));
}
}
pub extern "C" fn trace_cmp2_cmplog(id: u64, v0: u16, v1: u16) {
unsafe {
__libafl_targets_cmplog_instructions(id as usize, 2, u64::from(v0), u64::from(v1));
}
}
pub extern "C" fn trace_cmp4_cmplog(id: u64, v0: u32, v1: u32) {
unsafe {
__libafl_targets_cmplog_instructions(id as usize, 4, u64::from(v0), u64::from(v1));
}
}
pub extern "C" fn trace_cmp8_cmplog(id: u64, v0: u64, v1: u64) {
unsafe {
__libafl_targets_cmplog_instructions(id as usize, 8, v0, v1);
}
}

181
libafl_qemu/src/edges.rs Normal file
View File

@ -0,0 +1,181 @@
use libafl::{executors::ExitKind, inputs::Input, observers::ObserversTuple, state::HasMetadata};
pub use libafl_targets::{EDGES_MAP, EDGES_MAP_SIZE, MAX_EDGES_NUM};
use serde::{Deserialize, Serialize};
use std::{cell::UnsafeCell, cmp::max, collections::HashMap};
use crate::{
emu,
executor::QemuExecutor,
helper::{QemuHelper, QemuHelperTuple, QemuInstrumentationFilter},
};
#[derive(Default, Serialize, Deserialize)]
pub struct QemuEdgesMapMetadata {
pub map: HashMap<(u64, u64), u64>,
pub current_id: u64,
}
impl QemuEdgesMapMetadata {
#[must_use]
pub fn new() -> Self {
Self {
map: HashMap::new(),
current_id: 0,
}
}
}
libafl::impl_serdeany!(QemuEdgesMapMetadata);
pub struct QemuEdgeCoverageHelper {
filter: QemuInstrumentationFilter,
}
impl QemuEdgeCoverageHelper {
#[must_use]
pub fn new() -> Self {
Self {
filter: QemuInstrumentationFilter::None,
}
}
#[must_use]
pub fn with_instrumentation_filter(filter: QemuInstrumentationFilter) -> Self {
Self { filter }
}
#[must_use]
pub fn must_instrument(&self, addr: u64) -> bool {
self.filter.allowed(addr)
}
}
impl Default for QemuEdgeCoverageHelper {
fn default() -> Self {
Self::new()
}
}
impl<I, S> QemuHelper<I, S> for QemuEdgeCoverageHelper
where
I: Input,
S: HasMetadata,
{
fn init<'a, H, OT, QT>(&self, executor: &QemuExecutor<'a, H, I, OT, QT, S>)
where
H: FnMut(&I) -> ExitKind,
OT: ObserversTuple<I, S>,
QT: QemuHelperTuple<I, S>,
{
executor.hook_edge_generation(gen_unique_edge_ids::<I, QT, S>);
emu::set_exec_edge_hook(trace_edge_hitcount);
}
}
thread_local!(static PREV_LOC : UnsafeCell<u64> = UnsafeCell::new(0));
fn hash_me(mut x: u64) -> u64 {
x = (x.overflowing_shr(16).0 ^ x).overflowing_mul(0x45d9f3b).0;
x = (x.overflowing_shr(16).0 ^ x).overflowing_mul(0x45d9f3b).0;
x = (x.overflowing_shr(16).0 ^ x) ^ x;
x
}
pub fn gen_unique_edge_ids<I, QT, S>(
helpers: &mut QT,
state: &mut S,
src: u64,
dest: u64,
) -> Option<u64>
where
S: HasMetadata,
I: Input,
QT: QemuHelperTuple<I, S>,
{
if let Some(h) = helpers.match_first_type::<QemuEdgeCoverageHelper>() {
if !h.must_instrument(src) && !h.must_instrument(dest) {
return None;
}
}
if state.metadata().get::<QemuEdgesMapMetadata>().is_none() {
state.add_metadata(QemuEdgesMapMetadata::new());
}
let meta = state
.metadata_mut()
.get_mut::<QemuEdgesMapMetadata>()
.unwrap();
if meta.map.contains_key(&(src, dest)) {
let id = *meta.map.get(&(src, dest)).unwrap();
let nxt = (id as usize + 1) & (EDGES_MAP_SIZE - 1);
unsafe {
MAX_EDGES_NUM = max(MAX_EDGES_NUM, nxt);
}
Some(id)
} else {
let id = meta.current_id;
meta.map.insert((src, dest), id);
meta.current_id = (id + 1) & (EDGES_MAP_SIZE as u64 - 1);
unsafe {
MAX_EDGES_NUM = meta.current_id as usize;
}
Some(id as u64)
}
}
pub fn gen_hashed_edge_ids<I, QT, S>(
helpers: &mut QT,
_state: &mut S,
src: u64,
dest: u64,
) -> Option<u64>
where
I: Input,
QT: QemuHelperTuple<I, S>,
{
if let Some(h) = helpers.match_first_type::<QemuEdgeCoverageHelper>() {
if !h.must_instrument(src) && !h.must_instrument(dest) {
return None;
}
}
Some(hash_me(src) ^ hash_me(dest))
}
pub extern "C" fn trace_edge_hitcount(id: u64) {
unsafe {
EDGES_MAP[id as usize] += 1;
}
}
pub extern "C" fn trace_edge_single(id: u64) {
unsafe {
EDGES_MAP[id as usize] = 1;
}
}
pub fn gen_addr_block_ids<I, QT, S>(_helpers: &mut QT, _state: &mut S, pc: u64) -> Option<u64> {
Some(pc)
}
pub fn gen_hashed_block_ids<I, QT, S>(_helpers: &mut QT, _state: &mut S, pc: u64) -> Option<u64> {
Some(hash_me(pc))
}
pub extern "C" fn trace_block_transition_hitcount(id: u64) {
unsafe {
PREV_LOC.with(|prev_loc| {
let x = ((*prev_loc.get() ^ id) as usize) & (EDGES_MAP_SIZE - 1);
EDGES_MAP[x] += 1;
*prev_loc.get() = id.overflowing_shr(1).0;
});
}
}
pub extern "C" fn trace_block_transition_single(id: u64) {
unsafe {
PREV_LOC.with(|prev_loc| {
let x = ((*prev_loc.get() ^ id) as usize) & (EDGES_MAP_SIZE - 1);
EDGES_MAP[x] = 1;
*prev_loc.get() = id.overflowing_shr(1).0;
});
}
}

View File

@ -232,6 +232,7 @@ extern "C" {
#[allow(clippy::must_use_candidate, clippy::similar_names)]
pub fn init(args: &[String], env: &[(String, String)]) -> i32 {
assert!(args.len() > 0);
let args: Vec<String> = args.iter().map(|x| x.clone() + "\0").collect();
let argv: Vec<*const u8> = args.iter().map(|x| x.as_bytes().as_ptr()).collect();
assert!(argv.len() < i32::MAX as usize);

View File

@ -15,7 +15,7 @@ use libafl::{
};
pub use crate::emu::SyscallHookResult;
use crate::{emu, emu::SKIP_EXEC_HOOK, helpers::QemuHelperTuple};
use crate::{emu, emu::SKIP_EXEC_HOOK, helper::QemuHelperTuple};
static mut QEMU_HELPERS_PTR: *const c_void = ptr::null();

114
libafl_qemu/src/helper.rs Normal file
View File

@ -0,0 +1,114 @@
use libafl::{
bolts::tuples::MatchFirstType, executors::ExitKind, inputs::Input, observers::ObserversTuple,
};
use std::ops::Range;
use crate::executor::QemuExecutor;
// TODO remove 'static when specialization will be stable
pub trait QemuHelper<I, S>: 'static
where
I: Input,
{
fn init<'a, H, OT, QT>(&self, _executor: &QemuExecutor<'a, H, I, OT, QT, S>)
where
H: FnMut(&I) -> ExitKind,
OT: ObserversTuple<I, S>,
QT: QemuHelperTuple<I, S>,
{
}
fn pre_exec(&mut self, _input: &I) {}
fn post_exec(&mut self, _input: &I) {}
}
pub trait QemuHelperTuple<I, S>: MatchFirstType
where
I: Input,
{
fn init_all<'a, H, OT, QT>(&self, executor: &QemuExecutor<'a, H, I, OT, QT, S>)
where
H: FnMut(&I) -> ExitKind,
OT: ObserversTuple<I, S>,
QT: QemuHelperTuple<I, S>;
fn pre_exec_all(&mut self, input: &I);
fn post_exec_all(&mut self, input: &I);
}
impl<I, S> QemuHelperTuple<I, S> for ()
where
I: Input,
{
fn init_all<'a, H, OT, QT>(&self, _executor: &QemuExecutor<'a, H, I, OT, QT, S>)
where
H: FnMut(&I) -> ExitKind,
OT: ObserversTuple<I, S>,
QT: QemuHelperTuple<I, S>,
{
}
fn pre_exec_all(&mut self, _input: &I) {}
fn post_exec_all(&mut self, _input: &I) {}
}
impl<Head, Tail, I, S> QemuHelperTuple<I, S> for (Head, Tail)
where
Head: QemuHelper<I, S>,
Tail: QemuHelperTuple<I, S>,
I: Input,
{
fn init_all<'a, H, OT, QT>(&self, executor: &QemuExecutor<'a, H, I, OT, QT, S>)
where
H: FnMut(&I) -> ExitKind,
OT: ObserversTuple<I, S>,
QT: QemuHelperTuple<I, S>,
{
self.0.init(executor);
self.1.init_all(executor);
}
fn pre_exec_all(&mut self, input: &I) {
self.0.pre_exec(input);
self.1.pre_exec_all(input);
}
fn post_exec_all(&mut self, input: &I) {
self.0.post_exec(input);
self.1.post_exec_all(input);
}
}
pub enum QemuInstrumentationFilter {
AllowList(Vec<Range<u64>>),
DenyList(Vec<Range<u64>>),
None,
}
impl QemuInstrumentationFilter {
#[must_use]
pub fn allowed(&self, addr: u64) -> bool {
match self {
QemuInstrumentationFilter::AllowList(l) => {
for rng in l {
if rng.contains(&addr) {
return true;
}
}
false
}
QemuInstrumentationFilter::DenyList(l) => {
for rng in l {
if rng.contains(&addr) {
return false;
}
}
true
}
QemuInstrumentationFilter::None => true,
}
}
}

View File

@ -1,338 +0,0 @@
use std::{collections::HashMap, ops::Range};
use libafl::{
bolts::tuples::MatchFirstType, executors::ExitKind, inputs::Input, observers::ObserversTuple,
state::HasMetadata,
};
use crate::{emu, emu::GuestMaps, executor::QemuExecutor, hooks};
// TODO remove 'static when specialization will be stable
pub trait QemuHelper<I, S>: 'static
where
I: Input,
{
fn init<'a, H, OT, QT>(&self, _executor: &QemuExecutor<'a, H, I, OT, QT, S>)
where
H: FnMut(&I) -> ExitKind,
OT: ObserversTuple<I, S>,
QT: QemuHelperTuple<I, S>,
{
}
fn pre_exec(&mut self, _input: &I) {}
fn post_exec(&mut self, _input: &I) {}
}
pub trait QemuHelperTuple<I, S>: MatchFirstType
where
I: Input,
{
fn init_all<'a, H, OT, QT>(&self, executor: &QemuExecutor<'a, H, I, OT, QT, S>)
where
H: FnMut(&I) -> ExitKind,
OT: ObserversTuple<I, S>,
QT: QemuHelperTuple<I, S>;
fn pre_exec_all(&mut self, input: &I);
fn post_exec_all(&mut self, input: &I);
}
impl<I, S> QemuHelperTuple<I, S> for ()
where
I: Input,
{
fn init_all<'a, H, OT, QT>(&self, _executor: &QemuExecutor<'a, H, I, OT, QT, S>)
where
H: FnMut(&I) -> ExitKind,
OT: ObserversTuple<I, S>,
QT: QemuHelperTuple<I, S>,
{
}
fn pre_exec_all(&mut self, _input: &I) {}
fn post_exec_all(&mut self, _input: &I) {}
}
impl<Head, Tail, I, S> QemuHelperTuple<I, S> for (Head, Tail)
where
Head: QemuHelper<I, S>,
Tail: QemuHelperTuple<I, S>,
I: Input,
{
fn init_all<'a, H, OT, QT>(&self, executor: &QemuExecutor<'a, H, I, OT, QT, S>)
where
H: FnMut(&I) -> ExitKind,
OT: ObserversTuple<I, S>,
QT: QemuHelperTuple<I, S>,
{
self.0.init(executor);
self.1.init_all(executor);
}
fn pre_exec_all(&mut self, input: &I) {
self.0.pre_exec(input);
self.1.pre_exec_all(input);
}
fn post_exec_all(&mut self, input: &I) {
self.0.post_exec(input);
self.1.post_exec_all(input);
}
}
pub enum QemuInstrumentationFilter {
AllowList(Vec<Range<u64>>),
DenyList(Vec<Range<u64>>),
None,
}
impl QemuInstrumentationFilter {
#[must_use]
pub fn allowed(&self, addr: u64) -> bool {
match self {
QemuInstrumentationFilter::AllowList(l) => {
for rng in l {
if rng.contains(&addr) {
return true;
}
}
false
}
QemuInstrumentationFilter::DenyList(l) => {
for rng in l {
if rng.contains(&addr) {
return false;
}
}
true
}
QemuInstrumentationFilter::None => true,
}
}
}
pub struct QemuEdgeCoverageHelper {
filter: QemuInstrumentationFilter,
}
impl QemuEdgeCoverageHelper {
#[must_use]
pub fn new() -> Self {
Self {
filter: QemuInstrumentationFilter::None,
}
}
#[must_use]
pub fn with_instrumentation_filter(filter: QemuInstrumentationFilter) -> Self {
Self { filter }
}
#[must_use]
pub fn must_instrument(&self, addr: u64) -> bool {
self.filter.allowed(addr)
}
}
impl Default for QemuEdgeCoverageHelper {
fn default() -> Self {
Self::new()
}
}
impl<I, S> QemuHelper<I, S> for QemuEdgeCoverageHelper
where
I: Input,
S: HasMetadata,
{
fn init<'a, H, OT, QT>(&self, executor: &QemuExecutor<'a, H, I, OT, QT, S>)
where
H: FnMut(&I) -> ExitKind,
OT: ObserversTuple<I, S>,
QT: QemuHelperTuple<I, S>,
{
executor.hook_edge_generation(hooks::gen_unique_edge_ids::<I, QT, S>);
emu::set_exec_edge_hook(hooks::trace_edge_hitcount);
}
}
pub struct QemuCmpLogHelper {
filter: QemuInstrumentationFilter,
}
impl QemuCmpLogHelper {
#[must_use]
pub fn new() -> Self {
Self {
filter: QemuInstrumentationFilter::None,
}
}
#[must_use]
pub fn with_instrumentation_filter(filter: QemuInstrumentationFilter) -> Self {
Self { filter }
}
#[must_use]
pub fn must_instrument(&self, addr: u64) -> bool {
self.filter.allowed(addr)
}
}
impl Default for QemuCmpLogHelper {
fn default() -> Self {
Self::new()
}
}
impl<I, S> QemuHelper<I, S> for QemuCmpLogHelper
where
I: Input,
S: HasMetadata,
{
fn init<'a, H, OT, QT>(&self, executor: &QemuExecutor<'a, H, I, OT, QT, S>)
where
H: FnMut(&I) -> ExitKind,
OT: ObserversTuple<I, S>,
QT: QemuHelperTuple<I, S>,
{
executor.hook_cmp_generation(hooks::gen_unique_cmp_ids::<I, QT, S>);
emu::set_exec_cmp8_hook(hooks::trace_cmp8_cmplog);
emu::set_exec_cmp4_hook(hooks::trace_cmp4_cmplog);
emu::set_exec_cmp2_hook(hooks::trace_cmp2_cmplog);
emu::set_exec_cmp1_hook(hooks::trace_cmp1_cmplog);
}
}
pub const SNAPSHOT_PAGE_SIZE: usize = 4096;
pub struct SnapshotPageInfo {
pub addr: u64,
pub dirty: bool,
pub data: [u8; SNAPSHOT_PAGE_SIZE],
}
// TODO be thread-safe maybe with https://amanieu.github.io/thread_local-rs/thread_local/index.html
pub struct QemuSnapshotHelper {
pub access_cache: [u64; 4],
pub access_cache_idx: usize,
pub pages: HashMap<u64, SnapshotPageInfo>,
pub dirty: Vec<u64>,
pub brk: u64,
pub empty: bool,
}
impl QemuSnapshotHelper {
#[must_use]
pub fn new() -> Self {
Self {
access_cache: [u64::MAX; 4],
access_cache_idx: 0,
pages: HashMap::default(),
dirty: vec![],
brk: 0,
empty: true,
}
}
pub fn snapshot(&mut self) {
self.brk = emu::get_brk();
self.pages.clear();
for map in GuestMaps::new() {
// TODO track all the pages OR track mproctect
if !map.flags().is_w() {
continue;
}
let mut addr = map.start();
while addr < map.end() {
let mut info = SnapshotPageInfo {
addr,
dirty: false,
data: [0; SNAPSHOT_PAGE_SIZE],
};
emu::read_mem(addr, &mut info.data);
self.pages.insert(addr, info);
addr += SNAPSHOT_PAGE_SIZE as u64;
}
}
self.empty = false;
}
pub fn page_access(&mut self, page: u64) {
if self.access_cache[0] == page
|| self.access_cache[1] == page
|| self.access_cache[2] == page
|| self.access_cache[3] == page
{
return;
}
self.access_cache[self.access_cache_idx] = page;
self.access_cache_idx = (self.access_cache_idx + 1) & 3;
if let Some(info) = self.pages.get_mut(&page) {
if info.dirty {
return;
}
info.dirty = true;
}
self.dirty.push(page);
}
pub fn access(&mut self, addr: u64, size: usize) {
debug_assert!(size > 0);
let page = addr & (SNAPSHOT_PAGE_SIZE as u64 - 1);
self.page_access(page);
let second_page = (addr + size as u64 - 1) & (SNAPSHOT_PAGE_SIZE as u64 - 1);
if page != second_page {
self.page_access(second_page);
}
}
pub fn reset(&mut self) {
self.access_cache = [u64::MAX; 4];
self.access_cache_idx = 0;
while let Some(page) = self.dirty.pop() {
if let Some(info) = self.pages.get_mut(&page) {
emu::write_mem(page, &info.data);
info.dirty = false;
}
}
emu::set_brk(self.brk);
}
}
impl Default for QemuSnapshotHelper {
fn default() -> Self {
Self::new()
}
}
impl<I, S> QemuHelper<I, S> for QemuSnapshotHelper
where
I: Input,
S: HasMetadata,
{
fn init<'a, H, OT, QT>(&self, executor: &QemuExecutor<'a, H, I, OT, QT, S>)
where
H: FnMut(&I) -> ExitKind,
OT: ObserversTuple<I, S>,
QT: QemuHelperTuple<I, S>,
{
executor.hook_write8_execution(hooks::trace_write8_snapshot::<I, QT, S>);
executor.hook_write4_execution(hooks::trace_write4_snapshot::<I, QT, S>);
executor.hook_write2_execution(hooks::trace_write2_snapshot::<I, QT, S>);
executor.hook_write1_execution(hooks::trace_write1_snapshot::<I, QT, S>);
executor.hook_write_n_execution(hooks::trace_write_n_snapshot::<I, QT, S>);
}
fn pre_exec(&mut self, _input: &I) {
if self.empty {
self.snapshot();
} else {
self.reset();
}
}
}

View File

@ -1,274 +0,0 @@
use core::{cell::UnsafeCell, cmp::max};
use hashbrown::HashMap;
use serde::{Deserialize, Serialize};
use libafl::{inputs::Input, state::HasMetadata};
pub use libafl_targets::{
cmplog::__libafl_targets_cmplog_instructions, CmpLogObserver, CMPLOG_MAP, CMPLOG_MAP_W,
EDGES_MAP, EDGES_MAP_SIZE, MAX_EDGES_NUM,
};
use crate::helpers::{
QemuCmpLogHelper, QemuEdgeCoverageHelper, QemuHelperTuple, QemuSnapshotHelper,
};
#[derive(Default, Serialize, Deserialize)]
pub struct QemuEdgesMapMetadata {
pub map: HashMap<(u64, u64), u64>,
pub current_id: u64,
}
impl QemuEdgesMapMetadata {
#[must_use]
pub fn new() -> Self {
Self {
map: HashMap::new(),
current_id: 0,
}
}
}
libafl::impl_serdeany!(QemuEdgesMapMetadata);
#[derive(Default, Serialize, Deserialize)]
pub struct QemuCmpsMapMetadata {
pub map: HashMap<u64, u64>,
pub current_id: u64,
}
impl QemuCmpsMapMetadata {
#[must_use]
pub fn new() -> Self {
Self {
map: HashMap::new(),
current_id: 0,
}
}
}
libafl::impl_serdeany!(QemuCmpsMapMetadata);
thread_local!(static PREV_LOC : UnsafeCell<u64> = UnsafeCell::new(0));
fn hash_me(mut x: u64) -> u64 {
x = (x.overflowing_shr(16).0 ^ x).overflowing_mul(0x45d9f3b).0;
x = (x.overflowing_shr(16).0 ^ x).overflowing_mul(0x45d9f3b).0;
x = (x.overflowing_shr(16).0 ^ x) ^ x;
x
}
pub fn gen_unique_edge_ids<I, QT, S>(
helpers: &mut QT,
state: &mut S,
src: u64,
dest: u64,
) -> Option<u64>
where
S: HasMetadata,
I: Input,
QT: QemuHelperTuple<I, S>,
{
if let Some(h) = helpers.match_first_type::<QemuEdgeCoverageHelper>() {
if !h.must_instrument(src) && !h.must_instrument(dest) {
return None;
}
}
if state.metadata().get::<QemuEdgesMapMetadata>().is_none() {
state.add_metadata(QemuEdgesMapMetadata::new());
}
let meta = state
.metadata_mut()
.get_mut::<QemuEdgesMapMetadata>()
.unwrap();
if meta.map.contains_key(&(src, dest)) {
let id = *meta.map.get(&(src, dest)).unwrap();
let nxt = (id as usize + 1) & (EDGES_MAP_SIZE - 1);
unsafe {
MAX_EDGES_NUM = max(MAX_EDGES_NUM, nxt);
}
Some(id)
} else {
let id = meta.current_id;
meta.map.insert((src, dest), id);
meta.current_id = (id + 1) & (EDGES_MAP_SIZE as u64 - 1);
unsafe {
MAX_EDGES_NUM = meta.current_id as usize;
}
Some(id as u64)
}
}
pub fn gen_hashed_edge_ids<I, QT, S>(
helpers: &mut QT,
_state: &mut S,
src: u64,
dest: u64,
) -> Option<u64>
where
I: Input,
QT: QemuHelperTuple<I, S>,
{
if let Some(h) = helpers.match_first_type::<QemuEdgeCoverageHelper>() {
if !h.must_instrument(src) && !h.must_instrument(dest) {
return None;
}
}
Some(hash_me(src) ^ hash_me(dest))
}
pub extern "C" fn trace_edge_hitcount(id: u64) {
unsafe {
EDGES_MAP[id as usize] += 1;
}
}
pub extern "C" fn trace_edge_single(id: u64) {
unsafe {
EDGES_MAP[id as usize] = 1;
}
}
pub fn gen_addr_block_ids<I, QT, S>(_helpers: &mut QT, _state: &mut S, pc: u64) -> Option<u64> {
Some(pc)
}
pub fn gen_hashed_block_ids<I, QT, S>(_helpers: &mut QT, _state: &mut S, pc: u64) -> Option<u64> {
Some(hash_me(pc))
}
pub extern "C" fn trace_block_transition_hitcount(id: u64) {
unsafe {
PREV_LOC.with(|prev_loc| {
let x = ((*prev_loc.get() ^ id) as usize) & (EDGES_MAP_SIZE - 1);
EDGES_MAP[x] += 1;
*prev_loc.get() = id.overflowing_shr(1).0;
});
}
}
pub extern "C" fn trace_block_transition_single(id: u64) {
unsafe {
PREV_LOC.with(|prev_loc| {
let x = ((*prev_loc.get() ^ id) as usize) & (EDGES_MAP_SIZE - 1);
EDGES_MAP[x] = 1;
*prev_loc.get() = id.overflowing_shr(1).0;
});
}
}
pub fn gen_unique_cmp_ids<I, QT, S>(
helpers: &mut QT,
state: &mut S,
pc: u64,
_size: usize,
) -> Option<u64>
where
S: HasMetadata,
I: Input,
QT: QemuHelperTuple<I, S>,
{
if let Some(h) = helpers.match_first_type::<QemuCmpLogHelper>() {
if !h.must_instrument(pc) {
return None;
}
}
if state.metadata().get::<QemuCmpsMapMetadata>().is_none() {
state.add_metadata(QemuCmpsMapMetadata::new());
}
let meta = state
.metadata_mut()
.get_mut::<QemuCmpsMapMetadata>()
.unwrap();
let id = meta.current_id as usize;
if meta.map.contains_key(&pc) {
Some(*meta.map.get(&pc).unwrap())
} else {
meta.current_id = ((id + 1) & (CMPLOG_MAP_W - 1)) as u64;
meta.map.insert(pc, id as u64);
Some(id as u64)
}
}
pub extern "C" fn trace_cmp1_cmplog(id: u64, v0: u8, v1: u8) {
unsafe {
__libafl_targets_cmplog_instructions(id as usize, 1, u64::from(v0), u64::from(v1));
}
}
pub extern "C" fn trace_cmp2_cmplog(id: u64, v0: u16, v1: u16) {
unsafe {
__libafl_targets_cmplog_instructions(id as usize, 2, u64::from(v0), u64::from(v1));
}
}
pub extern "C" fn trace_cmp4_cmplog(id: u64, v0: u32, v1: u32) {
unsafe {
__libafl_targets_cmplog_instructions(id as usize, 4, u64::from(v0), u64::from(v1));
}
}
pub extern "C" fn trace_cmp8_cmplog(id: u64, v0: u64, v1: u64) {
unsafe {
__libafl_targets_cmplog_instructions(id as usize, 8, v0, v1);
}
}
pub fn trace_write1_snapshot<I, QT, S>(helpers: &mut QT, _state: &mut S, _id: u64, addr: u64)
where
I: Input,
QT: QemuHelperTuple<I, S>,
{
let h = helpers
.match_first_type_mut::<QemuSnapshotHelper>()
.unwrap();
h.access(addr, 1);
}
pub fn trace_write2_snapshot<I, QT, S>(helpers: &mut QT, _state: &mut S, _id: u64, addr: u64)
where
I: Input,
QT: QemuHelperTuple<I, S>,
{
let h = helpers
.match_first_type_mut::<QemuSnapshotHelper>()
.unwrap();
h.access(addr, 2);
}
pub fn trace_write4_snapshot<I, QT, S>(helpers: &mut QT, _state: &mut S, _id: u64, addr: u64)
where
I: Input,
QT: QemuHelperTuple<I, S>,
{
let h = helpers
.match_first_type_mut::<QemuSnapshotHelper>()
.unwrap();
h.access(addr, 4);
}
pub fn trace_write8_snapshot<I, QT, S>(helpers: &mut QT, _state: &mut S, _id: u64, addr: u64)
where
I: Input,
QT: QemuHelperTuple<I, S>,
{
let h = helpers
.match_first_type_mut::<QemuSnapshotHelper>()
.unwrap();
h.access(addr, 8);
}
pub fn trace_write_n_snapshot<I, QT, S>(
helpers: &mut QT,
_state: &mut S,
_id: u64,
addr: u64,
size: usize,
) where
I: Input,
QT: QemuHelperTuple<I, S>,
{
let h = helpers
.match_first_type_mut::<QemuSnapshotHelper>()
.unwrap();
h.access(addr, size);
}

View File

@ -0,0 +1,3 @@
*.o
*~
interval-tree-test

View File

@ -0,0 +1,20 @@
From interval_tree_generic.h:
Interval Trees
(C) 2012 Michel Lespinasse <walken@google.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
include/linux/interval_tree_generic.h

View File

@ -0,0 +1,17 @@
#ifndef __INT_COMPILER_H__
#define __INT_COMPILER_H__
/**
* container_of - cast a member of a structure out to the containing structure
* @ptr: the pointer to the member.
* @type: the type of the container struct this is embedded in.
* @member: the name of the member within the struct.
*
*/
#ifndef container_of
#define container_of(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type,member) );})
#endif
#endif /* __INT_COMPILER_H__ */

View File

@ -0,0 +1,2 @@
#include "interval_tree_generic.h"
#include "rbtree.inl"

View File

@ -0,0 +1,193 @@
/*
Interval Trees
(C) 2012 Michel Lespinasse <walken@google.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
include/linux/interval_tree_generic.h
*/
#include <stdbool.h>
#include "rbtree_augmented.h"
/*
* Template for implementing interval trees
*
* ITSTRUCT: struct type of the interval tree nodes
* ITRB: name of struct rb_node field within ITSTRUCT
* ITTYPE: type of the interval endpoints
* ITSUBTREE: name of ITTYPE field within ITSTRUCT holding last-in-subtree
* ITSTART(n): start endpoint of ITSTRUCT node n
* ITLAST(n): last endpoint of ITSTRUCT node n
* ITSTATIC: 'static' or empty
* ITPREFIX: prefix to use for the inline tree definitions
*
* Note - before using this, please consider if non-generic version
* (interval_tree.h) would work for you...
*/
#define INTERVAL_TREE_DEFINE(ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, \
ITSTART, ITLAST, ITSTATIC, ITPREFIX) \
\
/* Callbacks for augmented rbtree insert and remove */ \
\
static inline ITTYPE ITPREFIX ## _compute_subtree_last(ITSTRUCT *node) \
{ \
ITTYPE max = ITLAST(node), subtree_last; \
if (node->ITRB.rb_left) { \
subtree_last = rb_entry(node->ITRB.rb_left, \
ITSTRUCT, ITRB)->ITSUBTREE; \
if (max < subtree_last) \
max = subtree_last; \
} \
if (node->ITRB.rb_right) { \
subtree_last = rb_entry(node->ITRB.rb_right, \
ITSTRUCT, ITRB)->ITSUBTREE; \
if (max < subtree_last) \
max = subtree_last; \
} \
return max; \
} \
\
RB_DECLARE_CALLBACKS(static, ITPREFIX ## _augment, ITSTRUCT, ITRB, \
ITTYPE, ITSUBTREE, ITPREFIX ## _compute_subtree_last) \
\
/* Insert / remove interval nodes from the tree */ \
\
ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, struct rb_root *root) \
{ \
struct rb_node **link = &root->rb_node, *rb_parent = NULL; \
ITTYPE start = ITSTART(node), last = ITLAST(node); \
ITSTRUCT *parent; \
\
while (*link) { \
rb_parent = *link; \
parent = rb_entry(rb_parent, ITSTRUCT, ITRB); \
if (parent->ITSUBTREE < last) \
parent->ITSUBTREE = last; \
if (start < ITSTART(parent)) \
link = &parent->ITRB.rb_left; \
else \
link = &parent->ITRB.rb_right; \
} \
\
node->ITSUBTREE = last; \
rb_link_node(&node->ITRB, rb_parent, link); \
rb_insert_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \
} \
\
ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, struct rb_root *root) \
{ \
rb_erase_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \
} \
\
/* \
* Iterate over intervals intersecting [start;last] \
* \
* Note that a node's interval intersects [start;last] iff: \
* Cond1: ITSTART(node) <= last \
* and \
* Cond2: start <= ITLAST(node) \
*/ \
\
static ITSTRUCT * \
ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
{ \
while (true) { \
/* \
* Loop invariant: start <= node->ITSUBTREE \
* (Cond2 is satisfied by one of the subtree nodes) \
*/ \
if (node->ITRB.rb_left) { \
ITSTRUCT *left = rb_entry(node->ITRB.rb_left, \
ITSTRUCT, ITRB); \
if (start <= left->ITSUBTREE) { \
/* \
* Some nodes in left subtree satisfy Cond2. \
* Iterate to find the leftmost such node N. \
* If it also satisfies Cond1, that's the \
* match we are looking for. Otherwise, there \
* is no matching interval as nodes to the \
* right of N can't satisfy Cond1 either. \
*/ \
node = left; \
continue; \
} \
} \
if (ITSTART(node) <= last) { /* Cond1 */ \
if (start <= ITLAST(node)) /* Cond2 */ \
return node; /* node is leftmost match */ \
if (node->ITRB.rb_right) { \
node = rb_entry(node->ITRB.rb_right, \
ITSTRUCT, ITRB); \
if (start <= node->ITSUBTREE) \
continue; \
} \
} \
return NULL; /* No match */ \
} \
} \
\
ITSTATIC ITSTRUCT * \
ITPREFIX ## _iter_first(struct rb_root *root, ITTYPE start, ITTYPE last) \
{ \
ITSTRUCT *node; \
\
if (!root->rb_node) \
return NULL; \
node = rb_entry(root->rb_node, ITSTRUCT, ITRB); \
if (node->ITSUBTREE < start) \
return NULL; \
return ITPREFIX ## _subtree_search(node, start, last); \
} \
\
ITSTATIC ITSTRUCT * \
ITPREFIX ## _iter_next(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
{ \
struct rb_node *rb = node->ITRB.rb_right, *prev; \
\
while (true) { \
/* \
* Loop invariants: \
* Cond1: ITSTART(node) <= last \
* rb == node->ITRB.rb_right \
* \
* First, search right subtree if suitable \
*/ \
if (rb) { \
ITSTRUCT *right = rb_entry(rb, ITSTRUCT, ITRB); \
if (start <= right->ITSUBTREE) \
return ITPREFIX ## _subtree_search(right, \
start, last); \
} \
\
/* Move up the tree until we come from a node's left child */ \
do { \
rb = rb_parent(&node->ITRB); \
if (!rb) \
return NULL; \
prev = &node->ITRB; \
node = rb_entry(rb, ITSTRUCT, ITRB); \
rb = node->ITRB.rb_right; \
} while (prev == rb); \
\
/* Check if the node intersects [start;last] */ \
if (last < ITSTART(node)) /* !Cond1 */ \
return NULL; \
else if (start <= ITLAST(node)) /* Cond2 */ \
return node; \
} \
}

View File

@ -0,0 +1,108 @@
/*
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
linux/include/linux/rbtree.h
To use rbtrees you'll have to implement your own insert and search cores.
This will avoid us to use callbacks and to drop drammatically performances.
I know it's not the cleaner way, but in C (not in C++) to get
performances and genericity...
See Documentation/rbtree.txt for documentation and samples.
*/
#ifndef _LINUX_RBTREE_H
#define _LINUX_RBTREE_H
#include <stddef.h>
#include "compiler.h"
struct rb_node {
unsigned long __rb_parent_color;
struct rb_node *rb_right;
struct rb_node *rb_left;
} __attribute__((aligned(sizeof(long))));
/* The alignment might seem pointless, but allegedly CRIS needs it */
struct rb_root {
struct rb_node *rb_node;
};
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
#define RB_ROOT (struct rb_root) { NULL, }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
#define RB_EMPTY_NODE(node) \
((node)->__rb_parent_color == (unsigned long)(node))
#define RB_CLEAR_NODE(node) \
((node)->__rb_parent_color = (unsigned long)(node))
extern void rb_insert_color(struct rb_node *, struct rb_root *);
extern void rb_erase(struct rb_node *, struct rb_root *);
/* Find logical next and previous nodes in a tree */
extern struct rb_node *rb_next(const struct rb_node *);
extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);
/* Postorder iteration - always visit the parent after its children */
extern struct rb_node *rb_first_postorder(const struct rb_root *);
extern struct rb_node *rb_next_postorder(const struct rb_node *);
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
struct rb_node ** rb_link)
{
node->__rb_parent_color = (unsigned long)parent;
node->rb_left = node->rb_right = NULL;
*rb_link = node;
}
#define rb_entry_safe(ptr, type, member) \
({ typeof(ptr) ____ptr = (ptr); \
____ptr ? rb_entry(____ptr, type, member) : NULL; \
})
/**
* rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
* given type safe against removal of rb_node entry
*
* @pos: the 'type *' to use as a loop cursor.
* @n: another 'type *' to use as temporary storage
* @root: 'rb_root *' of the rbtree.
* @field: the name of the rb_node field within 'type'.
*/
#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \
typeof(*pos), field); 1; }); \
pos = n)
#endif /* _LINUX_RBTREE_H */

View File

@ -0,0 +1,549 @@
/*
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
(C) 2002 David Woodhouse <dwmw2@infradead.org>
(C) 2012 Michel Lespinasse <walken@google.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
linux/lib/rbtree.c
*/
#include <stdbool.h>
#include "rbtree_augmented.h"
/*
* red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
*
* 1) A node is either red or black
* 2) The root is black
* 3) All leaves (NULL) are black
* 4) Both children of every red node are black
* 5) Every simple path from root to leaves contains the same number
* of black nodes.
*
* 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two
* consecutive red nodes in a path and every red node is therefore followed by
* a black. So if B is the number of black nodes on every simple path (as per
* 5), then the longest possible path due to 4 is 2B.
*
* We shall indicate color with case, where black nodes are uppercase and red
* nodes will be lowercase. Unknown color nodes shall be drawn as red within
* parentheses and have some accompanying text comment.
*/
static inline void rb_set_black(struct rb_node *rb)
{
rb->__rb_parent_color |= RB_BLACK;
}
static inline struct rb_node *rb_red_parent(struct rb_node *red)
{
return (struct rb_node *)red->__rb_parent_color;
}
/*
* Helper function for rotations:
* - old's parent and color get assigned to new
* - old gets assigned new as a parent and 'color' as a color.
*/
static inline void
__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
struct rb_root *root, int color)
{
struct rb_node *parent = rb_parent(old);
new->__rb_parent_color = old->__rb_parent_color;
rb_set_parent_color(old, new, color);
__rb_change_child(old, new, parent, root);
}
static inline void
__rb_insert(struct rb_node *node, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
while (true) {
/*
* Loop invariant: node is red
*
* If there is a black parent, we are done.
* Otherwise, take some corrective action as we don't
* want a red root or two consecutive red nodes.
*/
if (!parent) {
rb_set_parent_color(node, NULL, RB_BLACK);
break;
} else if (rb_is_black(parent))
break;
gparent = rb_red_parent(parent);
tmp = gparent->rb_right;
if (parent != tmp) { /* parent == gparent->rb_left */
if (tmp && rb_is_red(tmp)) {
/*
* Case 1 - color flips
*
* G g
* / \ / \
* p u --> P U
* / /
* n n
*
* However, since g's parent might be red, and
* 4) does not allow this, we need to recurse
* at g.
*/
rb_set_parent_color(tmp, gparent, RB_BLACK);
rb_set_parent_color(parent, gparent, RB_BLACK);
node = gparent;
parent = rb_parent(node);
rb_set_parent_color(node, parent, RB_RED);
continue;
}
tmp = parent->rb_right;
if (node == tmp) {
/*
* Case 2 - left rotate at parent
*
* G G
* / \ / \
* p U --> n U
* \ /
* n p
*
* This still leaves us in violation of 4), the
* continuation into Case 3 will fix that.
*/
parent->rb_right = tmp = node->rb_left;
node->rb_left = parent;
if (tmp)
rb_set_parent_color(tmp, parent,
RB_BLACK);
rb_set_parent_color(parent, node, RB_RED);
augment_rotate(parent, node);
parent = node;
tmp = node->rb_right;
}
/*
* Case 3 - right rotate at gparent
*
* G P
* / \ / \
* p U --> n g
* / \
* n U
*/
gparent->rb_left = tmp; /* == parent->rb_right */
parent->rb_right = gparent;
if (tmp)
rb_set_parent_color(tmp, gparent, RB_BLACK);
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
augment_rotate(gparent, parent);
break;
} else {
tmp = gparent->rb_left;
if (tmp && rb_is_red(tmp)) {
/* Case 1 - color flips */
rb_set_parent_color(tmp, gparent, RB_BLACK);
rb_set_parent_color(parent, gparent, RB_BLACK);
node = gparent;
parent = rb_parent(node);
rb_set_parent_color(node, parent, RB_RED);
continue;
}
tmp = parent->rb_left;
if (node == tmp) {
/* Case 2 - right rotate at parent */
parent->rb_left = tmp = node->rb_right;
node->rb_right = parent;
if (tmp)
rb_set_parent_color(tmp, parent,
RB_BLACK);
rb_set_parent_color(parent, node, RB_RED);
augment_rotate(parent, node);
parent = node;
tmp = node->rb_left;
}
/* Case 3 - left rotate at gparent */
gparent->rb_right = tmp; /* == parent->rb_left */
parent->rb_left = gparent;
if (tmp)
rb_set_parent_color(tmp, gparent, RB_BLACK);
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
augment_rotate(gparent, parent);
break;
}
}
}
/*
* Inline version for rb_erase() use - we want to be able to inline
* and eliminate the dummy_rotate callback there
*/
static inline void
____rb_erase_color(struct rb_node *parent, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
while (true) {
/*
* Loop invariants:
* - node is black (or NULL on first iteration)
* - node is not the root (parent is not NULL)
* - All leaf paths going through parent and node have a
* black node count that is 1 lower than other leaf paths.
*/
sibling = parent->rb_right;
if (node != sibling) { /* node == parent->rb_left */
if (rb_is_red(sibling)) {
/*
* Case 1 - left rotate at parent
*
* P S
* / \ / \
* N s --> p Sr
* / \ / \
* Sl Sr N Sl
*/
parent->rb_right = tmp1 = sibling->rb_left;
sibling->rb_left = parent;
rb_set_parent_color(tmp1, parent, RB_BLACK);
__rb_rotate_set_parents(parent, sibling, root,
RB_RED);
augment_rotate(parent, sibling);
sibling = tmp1;
}
tmp1 = sibling->rb_right;
if (!tmp1 || rb_is_black(tmp1)) {
tmp2 = sibling->rb_left;
if (!tmp2 || rb_is_black(tmp2)) {
/*
* Case 2 - sibling color flip
* (p could be either color here)
*
* (p) (p)
* / \ / \
* N S --> N s
* / \ / \
* Sl Sr Sl Sr
*
* This leaves us violating 5) which
* can be fixed by flipping p to black
* if it was red, or by recursing at p.
* p is red when coming from Case 1.
*/
rb_set_parent_color(sibling, parent,
RB_RED);
if (rb_is_red(parent))
rb_set_black(parent);
else {
node = parent;
parent = rb_parent(node);
if (parent)
continue;
}
break;
}
/*
* Case 3 - right rotate at sibling
* (p could be either color here)
*
* (p) (p)
* / \ / \
* N S --> N Sl
* / \ \
* sl Sr s
* \
* Sr
*/
sibling->rb_left = tmp1 = tmp2->rb_right;
tmp2->rb_right = sibling;
parent->rb_right = tmp2;
if (tmp1)
rb_set_parent_color(tmp1, sibling,
RB_BLACK);
augment_rotate(sibling, tmp2);
tmp1 = sibling;
sibling = tmp2;
}
/*
* Case 4 - left rotate at parent + color flips
* (p and sl could be either color here.
* After rotation, p becomes black, s acquires
* p's color, and sl keeps its color)
*
* (p) (s)
* / \ / \
* N S --> P Sr
* / \ / \
* (sl) sr N (sl)
*/
parent->rb_right = tmp2 = sibling->rb_left;
sibling->rb_left = parent;
rb_set_parent_color(tmp1, sibling, RB_BLACK);
if (tmp2)
rb_set_parent(tmp2, parent);
__rb_rotate_set_parents(parent, sibling, root,
RB_BLACK);
augment_rotate(parent, sibling);
break;
} else {
sibling = parent->rb_left;
if (rb_is_red(sibling)) {
/* Case 1 - right rotate at parent */
parent->rb_left = tmp1 = sibling->rb_right;
sibling->rb_right = parent;
rb_set_parent_color(tmp1, parent, RB_BLACK);
__rb_rotate_set_parents(parent, sibling, root,
RB_RED);
augment_rotate(parent, sibling);
sibling = tmp1;
}
tmp1 = sibling->rb_left;
if (!tmp1 || rb_is_black(tmp1)) {
tmp2 = sibling->rb_right;
if (!tmp2 || rb_is_black(tmp2)) {
/* Case 2 - sibling color flip */
rb_set_parent_color(sibling, parent,
RB_RED);
if (rb_is_red(parent))
rb_set_black(parent);
else {
node = parent;
parent = rb_parent(node);
if (parent)
continue;
}
break;
}
/* Case 3 - right rotate at sibling */
sibling->rb_right = tmp1 = tmp2->rb_left;
tmp2->rb_left = sibling;
parent->rb_left = tmp2;
if (tmp1)
rb_set_parent_color(tmp1, sibling,
RB_BLACK);
augment_rotate(sibling, tmp2);
tmp1 = sibling;
sibling = tmp2;
}
/* Case 4 - left rotate at parent + color flips */
parent->rb_left = tmp2 = sibling->rb_right;
sibling->rb_right = parent;
rb_set_parent_color(tmp1, sibling, RB_BLACK);
if (tmp2)
rb_set_parent(tmp2, parent);
__rb_rotate_set_parents(parent, sibling, root,
RB_BLACK);
augment_rotate(parent, sibling);
break;
}
}
}
/* Non-inline version for rb_erase_augmented() use */
void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
____rb_erase_color(parent, root, augment_rotate);
}
/*
* Non-augmented rbtree manipulation functions.
*
* We use dummy augmented callbacks here, and have the compiler optimize them
* out of the rb_insert_color() and rb_erase() function definitions.
*/
static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {}
static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
static const struct rb_augment_callbacks dummy_callbacks = {
dummy_propagate, dummy_copy, dummy_rotate
};
void rb_insert_color(struct rb_node *node, struct rb_root *root)
{
__rb_insert(node, root, dummy_rotate);
}
void rb_erase(struct rb_node *node, struct rb_root *root)
{
struct rb_node *rebalance;
rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
if (rebalance)
____rb_erase_color(rebalance, root, dummy_rotate);
}
/*
* Augmented rbtree manipulation functions.
*
* This instantiates the same __always_inline functions as in the non-augmented
* case, but this time with user-defined callbacks.
*/
void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
__rb_insert(node, root, augment_rotate);
}
/*
* This function returns the first node (in sort order) of the tree.
*/
struct rb_node *rb_first(const struct rb_root *root)
{
struct rb_node *n;
n = root->rb_node;
if (!n)
return NULL;
while (n->rb_left)
n = n->rb_left;
return n;
}
struct rb_node *rb_last(const struct rb_root *root)
{
struct rb_node *n;
n = root->rb_node;
if (!n)
return NULL;
while (n->rb_right)
n = n->rb_right;
return n;
}
struct rb_node *rb_next(const struct rb_node *node)
{
struct rb_node *parent;
if (RB_EMPTY_NODE(node))
return NULL;
/*
* If we have a right-hand child, go down and then left as far
* as we can.
*/
if (node->rb_right) {
node = node->rb_right;
while (node->rb_left)
node=node->rb_left;
return (struct rb_node *)node;
}
/*
* No right-hand children. Everything down and left is smaller than us,
* so any 'next' node must be in the general direction of our parent.
* Go up the tree; any time the ancestor is a right-hand child of its
* parent, keep going up. First time it's a left-hand child of its
* parent, said parent is our 'next' node.
*/
while ((parent = rb_parent(node)) && node == parent->rb_right)
node = parent;
return parent;
}
struct rb_node *rb_prev(const struct rb_node *node)
{
struct rb_node *parent;
if (RB_EMPTY_NODE(node))
return NULL;
/*
* If we have a left-hand child, go down and then right as far
* as we can.
*/
if (node->rb_left) {
node = node->rb_left;
while (node->rb_right)
node=node->rb_right;
return (struct rb_node *)node;
}
/*
* No left-hand children. Go up till we find an ancestor which
* is a right-hand child of its parent.
*/
while ((parent = rb_parent(node)) && node == parent->rb_left)
node = parent;
return parent;
}
void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root)
{
struct rb_node *parent = rb_parent(victim);
/* Set the surrounding nodes to point to the replacement */
__rb_change_child(victim, new, parent, root);
if (victim->rb_left)
rb_set_parent(victim->rb_left, new);
if (victim->rb_right)
rb_set_parent(victim->rb_right, new);
/* Copy the pointers/colour from the victim to the replacement */
*new = *victim;
}
static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
{
for (;;) {
if (node->rb_left)
node = node->rb_left;
else if (node->rb_right)
node = node->rb_right;
else
return (struct rb_node *)node;
}
}
struct rb_node *rb_next_postorder(const struct rb_node *node)
{
const struct rb_node *parent;
if (!node)
return NULL;
parent = rb_parent(node);
/* If we're sitting on node, we've already seen our children */
if (parent && node == parent->rb_left && parent->rb_right) {
/* If we are the parent's left node, go to the parent's right
* node then all the way down to the left */
return rb_left_deepest_node(parent->rb_right);
} else
/* Otherwise we are the parent's right node, and the parent
* should be next */
return (struct rb_node *)parent;
}
struct rb_node *rb_first_postorder(const struct rb_root *root)
{
if (!root->rb_node)
return NULL;
return rb_left_deepest_node(root->rb_node);
}

View File

@ -0,0 +1,245 @@
/*
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
(C) 2002 David Woodhouse <dwmw2@infradead.org>
(C) 2012 Michel Lespinasse <walken@google.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
linux/include/linux/rbtree_augmented.h
*/
#ifndef _LINUX_RBTREE_AUGMENTED_H
#define _LINUX_RBTREE_AUGMENTED_H
#include <stddef.h>
#include "compiler.h"
#include "rbtree.h"
/*
* Please note - only struct rb_augment_callbacks and the prototypes for
* rb_insert_augmented() and rb_erase_augmented() are intended to be public.
* The rest are implementation details you are not expected to depend on.
*
* See Documentation/rbtree.txt for documentation and samples.
*/
struct rb_augment_callbacks {
void (*propagate)(struct rb_node *node, struct rb_node *stop);
void (*copy)(struct rb_node *old, struct rb_node *new);
void (*rotate)(struct rb_node *old, struct rb_node *new);
};
extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
/*
* Fixup the rbtree and update the augmented information when rebalancing.
*
* On insertion, the user must update the augmented information on the path
* leading to the inserted node, then call rb_link_node() as usual and
* rb_augment_inserted() instead of the usual rb_insert_color() call.
* If rb_augment_inserted() rebalances the rbtree, it will callback into
* a user provided function to update the augmented information on the
* affected subtrees.
*/
static inline void
rb_insert_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
__rb_insert_augmented(node, root, augment->rotate);
}
#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
rbtype, rbaugmented, rbcompute) \
static inline void \
rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \
{ \
while (rb != stop) { \
rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
rbtype augmented = rbcompute(node); \
if (node->rbaugmented == augmented) \
break; \
node->rbaugmented = augmented; \
rb = rb_parent(&node->rbfield); \
} \
} \
static inline void \
rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
{ \
rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
new->rbaugmented = old->rbaugmented; \
} \
static void \
rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
{ \
rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
new->rbaugmented = old->rbaugmented; \
old->rbaugmented = rbcompute(old); \
} \
rbstatic const struct rb_augment_callbacks rbname = { \
rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
};
#define RB_RED 0
#define RB_BLACK 1
#define __rb_parent(pc) ((struct rb_node *)(pc & ~3))
#define __rb_color(pc) ((pc) & 1)
#define __rb_is_black(pc) __rb_color(pc)
#define __rb_is_red(pc) (!__rb_color(pc))
#define rb_color(rb) __rb_color((rb)->__rb_parent_color)
#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color)
#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color)
static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
{
rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
}
static inline void rb_set_parent_color(struct rb_node *rb,
struct rb_node *p, int color)
{
rb->__rb_parent_color = (unsigned long)p | color;
}
static inline void
__rb_change_child(struct rb_node *old, struct rb_node *new,
struct rb_node *parent, struct rb_root *root)
{
if (parent) {
if (parent->rb_left == old)
parent->rb_left = new;
else
parent->rb_right = new;
} else
root->rb_node = new;
}
extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
static inline struct rb_node *
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
struct rb_node *child = node->rb_right, *tmp = node->rb_left;
struct rb_node *parent, *rebalance;
unsigned long pc;
if (!tmp) {
/*
* Case 1: node to erase has no more than 1 child (easy!)
*
* Note that if there is one child it must be red due to 5)
* and node must be black due to 4). We adjust colors locally
* so as to bypass __rb_erase_color() later on.
*/
pc = node->__rb_parent_color;
parent = __rb_parent(pc);
__rb_change_child(node, child, parent, root);
if (child) {
child->__rb_parent_color = pc;
rebalance = NULL;
} else
rebalance = __rb_is_black(pc) ? parent : NULL;
tmp = parent;
} else if (!child) {
/* Still case 1, but this time the child is node->rb_left */
tmp->__rb_parent_color = pc = node->__rb_parent_color;
parent = __rb_parent(pc);
__rb_change_child(node, tmp, parent, root);
rebalance = NULL;
tmp = parent;
} else {
struct rb_node *successor = child, *child2;
tmp = child->rb_left;
if (!tmp) {
/*
* Case 2: node's successor is its right child
*
* (n) (s)
* / \ / \
* (x) (s) -> (x) (c)
* \
* (c)
*/
parent = successor;
child2 = successor->rb_right;
augment->copy(node, successor);
} else {
/*
* Case 3: node's successor is leftmost under
* node's right child subtree
*
* (n) (s)
* / \ / \
* (x) (y) -> (x) (y)
* / /
* (p) (p)
* / /
* (s) (c)
* \
* (c)
*/
do {
parent = successor;
successor = tmp;
tmp = tmp->rb_left;
} while (tmp);
parent->rb_left = child2 = successor->rb_right;
successor->rb_right = child;
rb_set_parent(child, successor);
augment->copy(node, successor);
augment->propagate(parent, successor);
}
successor->rb_left = tmp = node->rb_left;
rb_set_parent(tmp, successor);
pc = node->__rb_parent_color;
tmp = __rb_parent(pc);
__rb_change_child(node, successor, tmp, root);
if (child2) {
successor->__rb_parent_color = pc;
rb_set_parent_color(child2, parent, RB_BLACK);
rebalance = NULL;
} else {
unsigned long pc2 = successor->__rb_parent_color;
successor->__rb_parent_color = pc;
rebalance = __rb_is_black(pc2) ? parent : NULL;
}
tmp = successor;
}
augment->propagate(tmp, NULL);
return rebalance;
}
static inline void
rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
if (rebalance)
__rb_erase_color(rebalance, root, augment->rotate);
}
#endif /* _LINUX_RBTREE_AUGMENTED_H */

View File

@ -6,8 +6,28 @@ pub mod arm;
pub mod x86;
pub mod elf;
#[cfg(target_os = "linux")]
pub mod hooks;
pub mod helper;
#[cfg(target_os = "linux")]
pub use helper::*;
#[cfg(target_os = "linux")]
pub mod edges;
#[cfg(target_os = "linux")]
pub use edges::QemuEdgeCoverageHelper;
#[cfg(target_os = "linux")]
pub mod cmplog;
#[cfg(target_os = "linux")]
pub use cmplog::QemuCmpLogHelper;
#[cfg(target_os = "linux")]
pub mod snapshot;
#[cfg(target_os = "linux")]
pub use snapshot::QemuSnapshotHelper;
#[cfg(target_os = "linux")]
pub mod asan;
#[cfg(target_os = "linux")]
pub use asan::{init_with_asan, QemuAsanHelper};
#[cfg(target_os = "linux")]
pub mod executor;
@ -19,11 +39,6 @@ pub mod emu;
#[cfg(target_os = "linux")]
pub use emu::*;
#[cfg(target_os = "linux")]
pub mod helpers;
#[cfg(target_os = "linux")]
pub use helpers::*;
#[must_use]
pub fn filter_qemu_args() -> Vec<String> {
let mut args = vec![env::args().next().unwrap()];

198
libafl_qemu/src/snapshot.rs Normal file
View File

@ -0,0 +1,198 @@
use libafl::{executors::ExitKind, inputs::Input, observers::ObserversTuple, state::HasMetadata};
use std::collections::HashMap;
use crate::{
emu,
emu::GuestMaps,
executor::QemuExecutor,
helper::{QemuHelper, QemuHelperTuple},
};
pub const SNAPSHOT_PAGE_SIZE: usize = 4096;
pub struct SnapshotPageInfo {
pub addr: u64,
pub dirty: bool,
pub data: [u8; SNAPSHOT_PAGE_SIZE],
}
// TODO be thread-safe maybe with https://amanieu.github.io/thread_local-rs/thread_local/index.html
pub struct QemuSnapshotHelper {
pub access_cache: [u64; 4],
pub access_cache_idx: usize,
pub pages: HashMap<u64, SnapshotPageInfo>,
pub dirty: Vec<u64>,
pub brk: u64,
pub empty: bool,
}
impl QemuSnapshotHelper {
#[must_use]
pub fn new() -> Self {
Self {
access_cache: [u64::MAX; 4],
access_cache_idx: 0,
pages: HashMap::default(),
dirty: vec![],
brk: 0,
empty: true,
}
}
pub fn snapshot(&mut self) {
self.brk = emu::get_brk();
self.pages.clear();
for map in GuestMaps::new() {
// TODO track all the pages OR track mproctect
if !map.flags().is_w() {
continue;
}
let mut addr = map.start();
while addr < map.end() {
let mut info = SnapshotPageInfo {
addr,
dirty: false,
data: [0; SNAPSHOT_PAGE_SIZE],
};
emu::read_mem(addr, &mut info.data);
self.pages.insert(addr, info);
addr += SNAPSHOT_PAGE_SIZE as u64;
}
}
self.empty = false;
}
pub fn page_access(&mut self, page: u64) {
if self.access_cache[0] == page
|| self.access_cache[1] == page
|| self.access_cache[2] == page
|| self.access_cache[3] == page
{
return;
}
self.access_cache[self.access_cache_idx] = page;
self.access_cache_idx = (self.access_cache_idx + 1) & 3;
if let Some(info) = self.pages.get_mut(&page) {
if info.dirty {
return;
}
info.dirty = true;
}
self.dirty.push(page);
}
pub fn access(&mut self, addr: u64, size: usize) {
debug_assert!(size > 0);
let page = addr & (SNAPSHOT_PAGE_SIZE as u64 - 1);
self.page_access(page);
let second_page = (addr + size as u64 - 1) & (SNAPSHOT_PAGE_SIZE as u64 - 1);
if page != second_page {
self.page_access(second_page);
}
}
pub fn reset(&mut self) {
self.access_cache = [u64::MAX; 4];
self.access_cache_idx = 0;
while let Some(page) = self.dirty.pop() {
if let Some(info) = self.pages.get_mut(&page) {
emu::write_mem(page, &info.data);
info.dirty = false;
}
}
emu::set_brk(self.brk);
}
}
impl Default for QemuSnapshotHelper {
fn default() -> Self {
Self::new()
}
}
impl<I, S> QemuHelper<I, S> for QemuSnapshotHelper
where
I: Input,
S: HasMetadata,
{
fn init<'a, H, OT, QT>(&self, executor: &QemuExecutor<'a, H, I, OT, QT, S>)
where
H: FnMut(&I) -> ExitKind,
OT: ObserversTuple<I, S>,
QT: QemuHelperTuple<I, S>,
{
executor.hook_write8_execution(trace_write8_snapshot::<I, QT, S>);
executor.hook_write4_execution(trace_write4_snapshot::<I, QT, S>);
executor.hook_write2_execution(trace_write2_snapshot::<I, QT, S>);
executor.hook_write1_execution(trace_write1_snapshot::<I, QT, S>);
executor.hook_write_n_execution(trace_write_n_snapshot::<I, QT, S>);
}
fn pre_exec(&mut self, _input: &I) {
if self.empty {
self.snapshot();
} else {
self.reset();
}
}
}
pub fn trace_write1_snapshot<I, QT, S>(helpers: &mut QT, _state: &mut S, _id: u64, addr: u64)
where
I: Input,
QT: QemuHelperTuple<I, S>,
{
let h = helpers
.match_first_type_mut::<QemuSnapshotHelper>()
.unwrap();
h.access(addr, 1);
}
pub fn trace_write2_snapshot<I, QT, S>(helpers: &mut QT, _state: &mut S, _id: u64, addr: u64)
where
I: Input,
QT: QemuHelperTuple<I, S>,
{
let h = helpers
.match_first_type_mut::<QemuSnapshotHelper>()
.unwrap();
h.access(addr, 2);
}
pub fn trace_write4_snapshot<I, QT, S>(helpers: &mut QT, _state: &mut S, _id: u64, addr: u64)
where
I: Input,
QT: QemuHelperTuple<I, S>,
{
let h = helpers
.match_first_type_mut::<QemuSnapshotHelper>()
.unwrap();
h.access(addr, 4);
}
pub fn trace_write8_snapshot<I, QT, S>(helpers: &mut QT, _state: &mut S, _id: u64, addr: u64)
where
I: Input,
QT: QemuHelperTuple<I, S>,
{
let h = helpers
.match_first_type_mut::<QemuSnapshotHelper>()
.unwrap();
h.access(addr, 8);
}
pub fn trace_write_n_snapshot<I, QT, S>(
helpers: &mut QT,
_state: &mut S,
_id: u64,
addr: u64,
size: usize,
) where
I: Input,
QT: QemuHelperTuple<I, S>,
{
let h = helpers
.match_first_type_mut::<QemuSnapshotHelper>()
.unwrap();
h.access(addr, size);
}

View File

@ -30,7 +30,7 @@ use libafl::{
};
pub use libafl_qemu::emu;
use libafl_qemu::{hooks, QemuCmpLogHelper, QemuEdgeCoverageHelper, QemuExecutor};
use libafl_qemu::{cmplog, edges, QemuCmpLogHelper, QemuEdgeCoverageHelper, QemuExecutor};
use libafl_targets::CmpLogObserver;
use crate::{CORPUS_CACHE_SIZE, DEFAULT_TIMEOUT_SECS};
@ -103,8 +103,8 @@ where
let mut run_client = |state: Option<StdState<_, _, _, _, _>>, mut mgr, _core_id| {
// Create an observation channel using the coverage map
let edges = unsafe { &mut hooks::EDGES_MAP };
let edges_counter = unsafe { &mut hooks::MAX_EDGES_NUM };
let edges = unsafe { &mut edges::EDGES_MAP };
let edges_counter = unsafe { &mut edges::MAX_EDGES_NUM };
let edges_observer =
HitcountsMapObserver::new(VariableMapObserver::new("edges", edges, edges_counter));
@ -112,7 +112,7 @@ where
let time_observer = TimeObserver::new("time");
// Keep tracks of CMPs
let cmplog = unsafe { &mut hooks::CMPLOG_MAP };
let cmplog = unsafe { &mut cmplog::CMPLOG_MAP };
let cmplog_observer = CmpLogObserver::new("cmplog", cmplog, true);
// The state of the edges feedback.