Implement thread-safe AsanGiovese in Rust with snapshots support (#851)

* Purge C impl of asan-giovese

* Compiling

* reset asan

* Restore asan state in qemu

* clippy

* upd

* Asan snapshots

* fuzzbench_qemu

* fix snap mmap limit

* fix

* compiles again

* clippy

* update meminterval

* autofix

* fix 32 bit targets

* try to clean intermediate builds

Co-authored-by: Dominik Maier <dmnk@google.com>
This commit is contained in:
Andrea Fioraldi 2022-10-25 09:48:59 +02:00 committed by GitHub
parent 332c2bc3f8
commit 5571a03641
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 560 additions and 2876 deletions

View File

@ -96,7 +96,7 @@ jobs:
# Skipping `python` as it has to be built with the `maturin` tool
# `agpl`, `nautilus` require nightly
# `sancov_pcguard_edges` is tested seperately
run: cargo hack check --each-feature --exclude-features=agpl,nautilus,python,sancov_pcguard_edges,arm,aarch64,i386,be
run: cargo hack check --each-feature --clean-per-run --exclude-features=agpl,nautilus,python,sancov_pcguard_edges,arm,aarch64,i386,be
- name: Check nightly features
run: cargo +nightly check --features=agpl && cargo +nightly check --features=nautilus
- name: Build examples

View File

@ -63,8 +63,8 @@ pub fn libafl_main() {
// Needed only on no_std
//RegistryBuilder::register::<Tokens>();
let res = match Command::new("libafl_fuzzbench")
.version("0.8.1")
let res = match Command::new(env!("CARGO_PKG_NAME"))
.version(env!("CARGO_PKG_VERSION"))
.author("AFLplusplus team")
.about("LibAFL-based fuzzer for Fuzzbench")
.arg(

View File

@ -62,8 +62,8 @@ pub fn main() {
// Needed only on no_std
//RegistryBuilder::register::<Tokens>();
let res = match Command::new("libafl_qemu_fuzzbench")
.version("0.4.0")
let res = match Command::new(env!("CARGO_PKG_NAME"))
.version(env!("CARGO_PKG_VERSION"))
.author("AFLplusplus team")
.about("LibAFL-based fuzzer with QEMU for Fuzzbench")
.arg(

View File

@ -45,7 +45,7 @@ use libafl::{
Error,
};
use libafl_qemu::{
//asan::QemuAsanHelper,
//asan::{init_with_asan, QemuAsanHelper},
cmplog,
cmplog::{CmpLogObserver, QemuCmpLogHelper},
edges,
@ -62,14 +62,16 @@ use libafl_qemu::{
#[cfg(unix)]
use nix::{self, unistd::dup};
pub const MAX_INPUT_SIZE: usize = 1048576; // 1MB
/// The fuzzer main
pub fn main() {
// Registry the metadata types used in this fuzzer
// Needed only on no_std
//RegistryBuilder::register::<Tokens>();
let res = match Command::new("libafl_qemu_fuzzbench")
.version("0.4.0")
let res = match Command::new(env!("CARGO_PKG_NAME"))
.version(env!("CARGO_PKG_VERSION"))
.author("AFLplusplus team")
.about("LibAFL-based fuzzer with QEMU for Fuzzbench")
.arg(
@ -173,6 +175,7 @@ fn fuzz(
let args: Vec<String> = env::args().collect();
let env: Vec<(String, String)> = env::vars().collect();
let emu = Emulator::new(&args, &env);
//let emu = init_with_asan(&mut args, &mut env);
let mut elf_buffer = Vec::new();
let elf = EasyElf::from_file(emu.binary_path(), &mut elf_buffer)?;
@ -198,7 +201,9 @@ fn fuzz(
emu.remove_breakpoint(test_one_input_ptr); // LLVMFuzzerTestOneInput
emu.set_breakpoint(ret_addr); // LLVMFuzzerTestOneInput ret addr
let input_addr = emu.map_private(0, 4096, MmapPerms::ReadWrite).unwrap();
let input_addr = emu
.map_private(0, MAX_INPUT_SIZE, MmapPerms::ReadWrite)
.unwrap();
println!("Placing input at {:#x}", input_addr);
let log = RefCell::new(
@ -313,9 +318,9 @@ fn fuzz(
let target = input.target_bytes();
let mut buf = target.as_slice();
let mut len = buf.len();
if len > 4096 {
buf = &buf[0..4096];
len = 4096;
if len > MAX_INPUT_SIZE {
buf = &buf[0..MAX_INPUT_SIZE];
len = MAX_INPUT_SIZE;
}
unsafe {
@ -337,7 +342,7 @@ fn fuzz(
tuple_list!(
QemuEdgeCoverageHelper::default(),
QemuCmpLogHelper::default(),
//QemuAsanHelper::new(),
//QemuAsanHelper::default(),
//QemuSnapshotHelper::new()
),
);

View File

@ -69,8 +69,8 @@ pub fn libafl_main() {
// Needed only on no_std
//RegistryBuilder::register::<Tokens>();
let res = match Command::new("libafl_fuzzbench")
.version("0.8.1")
let res = match Command::new(env!("CARGO_PKG_NAME"))
.version(env!("CARGO_PKG_VERSION"))
.author("AFLplusplus team")
.about("LibAFL-based fuzzer for Fuzzbench")
.arg(

View File

@ -63,8 +63,8 @@ pub fn libafl_main() {
// Needed only on no_std
//RegistryBuilder::register::<Tokens>();
let res = match Command::new("libafl_fuzzbench")
.version("0.8.1")
let res = match Command::new(env!("CARGO_PKG_NAME"))
.version(env!("CARGO_PKG_VERSION"))
.author("AFLplusplus team")
.about("LibAFL-based fuzzer for Fuzzbench")
.arg(

View File

@ -2,7 +2,7 @@
name = "qemu_arm_launcher"
version = "0.8.2"
authors = ["Andrea Fioraldi <andreafioraldi@gmail.com>", "Dominik Maier <domenukk@gmail.com>"]
edition = "2018"
edition = "2021"
[features]
default = ["std"]

View File

@ -2,7 +2,7 @@
name = "qemu_launcher"
version = "0.8.2"
authors = ["Andrea Fioraldi <andreafioraldi@gmail.com>", "Dominik Maier <domenukk@gmail.com>"]
edition = "2018"
edition = "2021"
[features]
default = ["std"]

View File

@ -44,6 +44,8 @@ use libafl_qemu::{
Regs,
};
pub const MAX_INPUT_SIZE: usize = 1048576; // 1MB
pub fn fuzz() {
// Hardcoded parameters
let timeout = Duration::from_secs(1);
@ -83,7 +85,9 @@ pub fn fuzz() {
emu.remove_breakpoint(test_one_input_ptr); // LLVMFuzzerTestOneInput
emu.set_breakpoint(ret_addr); // LLVMFuzzerTestOneInput ret addr
let input_addr = emu.map_private(0, 4096, MmapPerms::ReadWrite).unwrap();
let input_addr = emu
.map_private(0, MAX_INPUT_SIZE, MmapPerms::ReadWrite)
.unwrap();
println!("Placing input at {:#x}", input_addr);
// The wrapped harness function, calling out to the LLVM-style harness
@ -91,9 +95,9 @@ pub fn fuzz() {
let target = input.target_bytes();
let mut buf = target.as_slice();
let mut len = buf.len();
if len > 4096 {
buf = &buf[0..4096];
len = 4096;
if len > MAX_INPUT_SIZE {
buf = &buf[0..MAX_INPUT_SIZE];
len = MAX_INPUT_SIZE;
}
unsafe {

View File

@ -40,7 +40,7 @@ libc = "0.2"
strum = "0.24"
strum_macros = "0.24"
syscall-numbers = "3.0"
meminterval = "0.1"
meminterval = "0.3"
thread_local = "1.1.4"
capstone = "0.11.0"
#pyo3 = { version = "0.15", features = ["extension-module"], optional = true }

View File

@ -380,7 +380,6 @@ pub fn build() {
{
let qasan_dir = Path::new("libqasan");
let qasan_dir = fs::canonicalize(qasan_dir).unwrap();
let src_dir = Path::new("src");
assert!(Command::new("make")
.current_dir(out_dir_path)
@ -401,10 +400,5 @@ pub fn build() {
.status()
.expect("make failed")
.success());
cc::Build::new()
.warnings(false)
.file(src_dir.join("asan-giovese.c"))
.compile("asan_giovese");
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,155 +0,0 @@
/*******************************************************************************
BSD 2-Clause License
Copyright (c) 2020-2021, Andrea Fioraldi
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#ifndef __ASAN_GIOVESE_H__
#define __ASAN_GIOVESE_H__
#include <stdint.h>
#include <inttypes.h>
#include <stdlib.h>
#define target_ulong uint64_t
#define TARGET_FMT_lx "%" PRIx64
#define TARGET_FMT_ld "%" PRId64
#ifndef ASAN_NAME_STR
#define ASAN_NAME_STR "AddressSanitizer"
#endif
#define HIGH_SHADOW_ADDR ((void *)0x02008fff7000ULL)
#define LOW_SHADOW_ADDR ((void *)0x00007fff8000ULL)
#define GAP_SHADOW_ADDR ((void *)0x00008fff7000)
#define HIGH_SHADOW_SIZE (0xdfff0000fffULL)
#define LOW_SHADOW_SIZE (0xfffefffULL)
#define GAP_SHADOW_SIZE (0x1ffffffffff)
#define SHADOW_OFFSET (0x7fff8000ULL)
/* shadow map byte values */
#define ASAN_VALID 0x00
#define ASAN_PARTIAL1 0x01
#define ASAN_PARTIAL2 0x02
#define ASAN_PARTIAL3 0x03
#define ASAN_PARTIAL4 0x04
#define ASAN_PARTIAL5 0x05
#define ASAN_PARTIAL6 0x06
#define ASAN_PARTIAL7 0x07
#define ASAN_ARRAY_COOKIE 0xac
#define ASAN_STACK_RZ 0xf0
#define ASAN_STACK_LEFT_RZ 0xf1
#define ASAN_STACK_MID_RZ 0xf2
#define ASAN_STACK_RIGHT_RZ 0xf3
#define ASAN_STACK_FREED 0xf5
#define ASAN_STACK_OOSCOPE 0xf8
#define ASAN_GLOBAL_RZ 0xf9
#define ASAN_HEAP_RZ 0xe9
#define ASAN_USER 0xf7
#define ASAN_HEAP_LEFT_RZ 0xfa
#define ASAN_HEAP_RIGHT_RZ 0xfb
#define ASAN_HEAP_FREED 0xfd
enum {
ACCESS_TYPE_LOAD,
ACCESS_TYPE_STORE,
};
struct call_context {
target_ulong *addresses;
uint32_t tid;
uint32_t size;
};
struct chunk_info {
target_ulong start;
target_ulong end;
struct call_context *alloc_ctx;
struct call_context *free_ctx; // NULL if chunk is allocated
};
extern void *__ag_high_shadow;
extern void *__ag_low_shadow;
// ------------------------------------------------------------------------- //
// Virtual functions, you have to implement them
// ------------------------------------------------------------------------- //
///////////////////////////////////////////////////////////////////////////////
void asan_giovese_populate_context(struct call_context *ctx, target_ulong pc);
char *asan_giovese_printaddr(target_ulong addr);
///////////////////////////////////////////////////////////////////////////////
// ------------------------------------------------------------------------- //
// Exposed functions
// ------------------------------------------------------------------------- //
void asan_giovese_init(void);
// this has to be fast, ptr is an host pointer
int asan_giovese_load1(void *ptr);
int asan_giovese_load2(void *ptr);
int asan_giovese_load4(void *ptr);
int asan_giovese_load8(void *ptr);
int asan_giovese_store1(void *ptr);
int asan_giovese_store2(void *ptr);
int asan_giovese_store4(void *ptr);
int asan_giovese_store8(void *ptr);
int asan_giovese_loadN(void *ptr, size_t n);
int asan_giovese_storeN(void *ptr, size_t n);
int asan_giovese_guest_loadN(target_ulong addr, size_t n);
int asan_giovese_guest_storeN(target_ulong addr, size_t n);
int asan_giovese_poison_region(void *ptr, size_t n, uint8_t poison_byte);
int asan_giovese_user_poison_region(void *ptr, size_t n);
int asan_giovese_unpoison_region(void *ptr, size_t n);
int asan_giovese_poison_guest_region(target_ulong addr, size_t n,
uint8_t poison_byte);
int asan_giovese_user_poison_guest_region(target_ulong addr, size_t n);
int asan_giovese_unpoison_guest_region(target_ulong addr, size_t n);
// addr is a guest pointer
int asan_giovese_report_and_crash(int access_type, target_ulong addr, size_t n,
target_ulong pc, target_ulong bp,
target_ulong sp);
int asan_giovese_deadly_signal(int signum, target_ulong addr, target_ulong pc,
target_ulong bp, target_ulong sp);
int asan_giovese_badfree(target_ulong addr, target_ulong pc);
struct chunk_info *asan_giovese_alloc_search(target_ulong query);
void asan_giovese_alloc_remove(target_ulong start, target_ulong end);
void asan_giovese_alloc_insert(target_ulong start, target_ulong end,
struct call_context *alloc_ctx);
#endif

View File

@ -1,19 +1,42 @@
use std::{env, fs, ptr};
#![allow(clippy::cast_possible_wrap)]
use std::{
collections::{HashMap, HashSet},
env, fs,
sync::Mutex,
};
use libafl::{inputs::UsesInput, state::HasMetadata};
use libc::{
c_void, MAP_ANON, MAP_FAILED, MAP_FIXED, MAP_NORESERVE, MAP_PRIVATE, PROT_READ, PROT_WRITE,
};
use meminterval::{Interval, IntervalTree};
use num_enum::{IntoPrimitive, TryFromPrimitive};
use crate::{
emu::{Emulator, SyscallHookResult},
helper::{QemuHelper, QemuHelperTuple, QemuInstrumentationFilter},
hooks::QemuHooks,
GuestAddr, Regs,
GuestAddr,
};
// TODO at some point, merge parts with libafl_frida
pub const HIGH_SHADOW_ADDR: *mut c_void = 0x02008fff7000 as *mut c_void;
pub const LOW_SHADOW_ADDR: *mut c_void = 0x00007fff8000 as *mut c_void;
pub const GAP_SHADOW_ADDR: *mut c_void = 0x00008fff7000 as *mut c_void;
pub const HIGH_SHADOW_SIZE: usize = 0xdfff0000fff;
pub const LOW_SHADOW_SIZE: usize = 0xfffefff;
pub const GAP_SHADOW_SIZE: usize = 0x1ffffffffff;
pub const SHADOW_OFFSET: isize = 0x7fff8000;
pub const QASAN_FAKESYS_NR: i32 = 0xa2a4;
pub const SHADOW_PAGE_SIZE: usize = 4096;
pub const SHADOW_PAGE_MASK: GuestAddr = !(SHADOW_PAGE_SIZE as GuestAddr - 1);
#[derive(IntoPrimitive, TryFromPrimitive, Debug, Clone, Copy)]
#[repr(u64)]
pub enum QasanAction {
@ -31,7 +54,7 @@ pub enum QasanAction {
}
#[derive(IntoPrimitive, TryFromPrimitive, Debug, Clone, Copy)]
#[repr(u8)]
#[repr(i8)]
pub enum PoisonKind {
Valid = 0,
Partial1 = 1,
@ -41,75 +64,366 @@ pub enum PoisonKind {
Partial5 = 5,
Partial6 = 6,
Partial7 = 7,
ArrayCookie = 0xac,
StackRz = 0xf0,
StackLeftRz = 0xf1,
StackMidRz = 0xf2,
StackRightRz = 0xf3,
StacKFreed = 0xf5,
StackOOScope = 0xf8,
GlobalRz = 0xf9,
HeapRz = 0xe9,
User = 0xf7,
HeapLeftRz = 0xfa,
HeapRightRz = 0xfb,
HeapFreed = 0xfd,
ArrayCookie = -84, // 0xac
StackRz = -16, // 0xf0
StackLeftRz = -15, // 0xf1
StackMidRz = -14, // 0xf2
StackRightRz = -13, // 0xf3
StacKFreed = -11, // 0xf5
StackOOScope = -8, // 0xf8
GlobalRz = -7, // 0xf9
HeapRz = -23, // 0xe9
User = -9, // 0xf7
HeapLeftRz = -6, // 0xfa
HeapRightRz = -5, // 0xfb
HeapFreed = -3, // 0xfd
}
#[repr(C)]
struct CallContext {
pub addresses: *const u64,
pub tid: i32,
pub size: u32,
pub enum AsanError {
Read(GuestAddr, usize),
Write(GuestAddr, usize),
BadFree(GuestAddr, Option<Interval<GuestAddr>>),
MemLeak(Interval<GuestAddr>),
}
#[repr(C)]
struct ChunkInfo {
pub start: u64,
pub end: u64,
pub alloc_ctx: *const CallContext,
pub free_ctx: *const CallContext, // NULL if chunk is allocated
pub type AsanErrorCallback = Box<dyn FnMut(&Emulator, AsanError)>;
pub struct AsanGiovese {
pub alloc_tree: Mutex<IntervalTree<GuestAddr, ()>>,
pub saved_tree: IntervalTree<GuestAddr, ()>,
pub error_callback: Option<AsanErrorCallback>,
pub dirty_shadow: Mutex<HashSet<GuestAddr>>,
pub saved_shadow: HashMap<GuestAddr, Vec<i8>>,
pub snapshot_shadow: bool,
}
extern "C" {
fn asan_giovese_init();
fn asan_giovese_load1(ptr: *const u8) -> i32;
fn asan_giovese_load2(ptr: *const u8) -> i32;
fn asan_giovese_load4(ptr: *const u8) -> i32;
fn asan_giovese_load8(ptr: *const u8) -> i32;
fn asan_giovese_store1(ptr: *const u8) -> i32;
fn asan_giovese_store2(ptr: *const u8) -> i32;
fn asan_giovese_store4(ptr: *const u8) -> i32;
fn asan_giovese_store8(ptr: *const u8) -> i32;
fn asan_giovese_loadN(ptr: *const u8, n: usize) -> i32;
fn asan_giovese_storeN(ptr: *const u8, n: usize) -> i32;
fn asan_giovese_poison_region(ptr: *const u8, n: usize, poison: u8) -> i32;
fn asan_giovese_unpoison_region(ptr: *const u8, n: usize) -> i32;
fn asan_giovese_alloc_search(query: u64) -> *mut ChunkInfo;
fn asan_giovese_alloc_remove(start: u64, end: u64);
fn asan_giovese_alloc_insert(start: u64, end: u64, alloc_ctx: *const CallContext);
fn asan_giovese_report_and_crash(
access_type: i32,
addr: u64,
n: usize,
pc: u64,
bp: u64,
sp: u64,
);
fn asan_giovese_badfree(addr: u64, pc: u64);
impl core::fmt::Debug for AsanGiovese {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("AsanGiovese")
.field("alloc_tree", &self.alloc_tree)
.field("dirty_shadow", &self.dirty_shadow)
.finish()
}
}
#[no_mangle]
extern "C" fn asan_giovese_printaddr(_addr: u64) -> *const u8 {
// Just addresses ATM
ptr::null()
}
impl AsanGiovese {
pub unsafe fn map_shadow() {
assert!(
libc::mmap(
HIGH_SHADOW_ADDR,
HIGH_SHADOW_SIZE,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE | MAP_ANON,
-1,
0
) != MAP_FAILED
);
assert!(
libc::mmap(
LOW_SHADOW_ADDR,
LOW_SHADOW_SIZE,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE | MAP_ANON,
-1,
0
) != MAP_FAILED
);
assert!(
libc::mmap(
GAP_SHADOW_ADDR,
GAP_SHADOW_SIZE,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE | MAP_ANON,
-1,
0
) != MAP_FAILED
);
}
#[no_mangle]
unsafe extern "C" fn asan_giovese_populate_context(ctx: *mut CallContext, _pc: u64) {
let ctx = ctx.as_mut().unwrap();
ctx.tid = libc::gettid();
ctx.size = 0;
#[inline]
#[must_use]
pub fn is_invalid_access_1(emu: &Emulator, addr: GuestAddr) -> bool {
unsafe {
let h = emu.g2h::<*const c_void>(addr) as isize;
let shadow_addr = ((h >> 3) as *mut i8).offset(SHADOW_OFFSET);
let k = *shadow_addr as isize;
k != 0 && (h & 7).wrapping_add(1) > k
}
}
#[inline]
#[must_use]
pub fn is_invalid_access_2(emu: &Emulator, addr: GuestAddr) -> bool {
unsafe {
let h = emu.g2h::<*const c_void>(addr) as isize;
let shadow_addr = ((h >> 3) as *mut i8).offset(SHADOW_OFFSET);
let k = *shadow_addr as isize;
k != 0 && (h & 7).wrapping_add(2) > k
}
}
#[inline]
#[must_use]
pub fn is_invalid_access_4(emu: &Emulator, addr: GuestAddr) -> bool {
unsafe {
let h = emu.g2h::<*const c_void>(addr) as isize;
let shadow_addr = ((h >> 3) as *mut i8).offset(SHADOW_OFFSET);
let k = *shadow_addr as isize;
k != 0 && (h & 7).wrapping_add(4) > k
}
}
#[inline]
#[must_use]
pub fn is_invalid_access_8(emu: &Emulator, addr: GuestAddr) -> bool {
unsafe {
let h = emu.g2h::<*const c_void>(addr) as isize;
let shadow_addr = ((h >> 3) as *mut i8).offset(SHADOW_OFFSET);
*shadow_addr != 0
}
}
#[inline]
#[must_use]
pub fn is_invalid_access(emu: &Emulator, addr: GuestAddr, n: usize) -> bool {
unsafe {
if n == 0 {
return false;
}
let n = n as isize;
let mut start = addr;
let end = start.wrapping_add(n as GuestAddr);
let last_8 = end & !7;
if start & 0x7 != 0 {
let next_8 = (start & !7).wrapping_add(8);
let first_size = next_8.wrapping_sub(start) as isize;
if n <= first_size {
let h = emu.g2h::<*const c_void>(start) as isize;
let shadow_addr = ((h >> 3) as *mut i8).offset(SHADOW_OFFSET);
let k = *shadow_addr as isize;
return k != 0 && (h & 7).wrapping_add(n) > k;
}
let h = emu.g2h::<*const c_void>(start) as isize;
let shadow_addr = ((h >> 3) as *mut i8).offset(SHADOW_OFFSET);
let k = *shadow_addr as isize;
if k != 0 && (h & 7).wrapping_add(first_size) > k {
return true;
}
start = next_8;
}
while start < last_8 {
let h = emu.g2h::<*const c_void>(start) as isize;
let shadow_addr = ((h >> 3) as *mut i8).offset(SHADOW_OFFSET);
if *shadow_addr != 0 {
return true;
}
start = (start).wrapping_add(8);
}
if last_8 != end {
let h = emu.g2h::<*const c_void>(start) as isize;
let last_size = end.wrapping_sub(last_8) as isize;
let shadow_addr = ((h >> 3) as *mut i8).offset(SHADOW_OFFSET);
let k = *shadow_addr as isize;
return k != 0 && (h & 7).wrapping_add(last_size) > k;
}
false
}
}
#[inline]
pub fn poison(&mut self, emu: &Emulator, addr: GuestAddr, n: usize, poison_byte: i8) -> bool {
unsafe {
if n == 0 {
return false;
}
if self.snapshot_shadow {
let mut page = addr & SHADOW_PAGE_MASK;
let mut set = self.dirty_shadow.lock().unwrap();
while page < addr + n as GuestAddr {
set.insert(page);
page += SHADOW_PAGE_SIZE as GuestAddr;
}
}
let n = n as isize;
let mut start = addr;
let end = start.wrapping_add(n as GuestAddr);
let last_8 = end & !7;
if start & 0x7 != 0 {
let next_8 = (start & !7).wrapping_add(8);
let first_size = next_8.wrapping_sub(start) as isize;
if n < first_size {
return false;
}
let h = emu.g2h::<*const c_void>(start) as isize;
let shadow_addr = ((h >> 3) as *mut i8).offset(SHADOW_OFFSET);
*shadow_addr = (8isize).wrapping_sub(first_size) as i8;
start = next_8;
}
while start < last_8 {
let h = emu.g2h::<*const c_void>(start) as isize;
let shadow_addr = ((h >> 3) as *mut i8).offset(SHADOW_OFFSET);
*shadow_addr = poison_byte;
start = (start).wrapping_add(8);
}
true
}
}
#[inline]
#[allow(clippy::must_use_candidate)]
pub fn unpoison(emu: &Emulator, addr: GuestAddr, n: usize) -> bool {
unsafe {
let n = n as isize;
let mut start = addr;
let end = start.wrapping_add(n as GuestAddr);
while start < end {
let h = emu.g2h::<*const c_void>(start) as isize;
let shadow_addr = ((h >> 3) as *mut i8).offset(SHADOW_OFFSET);
*shadow_addr = 0;
start = (start).wrapping_add(8);
}
true
}
}
#[inline]
fn unpoison_page(emu: &Emulator, page: GuestAddr) {
unsafe {
let h = emu.g2h::<*const c_void>(page) as isize;
let shadow_addr = ((h >> 3) as *mut i8).offset(SHADOW_OFFSET);
shadow_addr.write_bytes(0, SHADOW_PAGE_SIZE);
}
}
#[inline]
#[allow(clippy::mut_from_ref)]
fn get_shadow_page(emu: &Emulator, page: GuestAddr) -> &mut [i8] {
unsafe {
let h = emu.g2h::<*const c_void>(page) as isize;
let shadow_addr = ((h >> 3) as *mut i8).offset(SHADOW_OFFSET);
std::slice::from_raw_parts_mut(shadow_addr, SHADOW_PAGE_SIZE)
}
}
#[must_use]
pub fn new(snapshot_shadow: bool) -> Self {
Self {
alloc_tree: Mutex::new(IntervalTree::new()),
saved_tree: IntervalTree::new(),
error_callback: None,
dirty_shadow: Mutex::new(HashSet::default()),
saved_shadow: HashMap::default(),
snapshot_shadow,
}
}
#[must_use]
pub fn with_error_callback(snapshot_shadow: bool, error_callback: AsanErrorCallback) -> Self {
Self {
alloc_tree: Mutex::new(IntervalTree::new()),
saved_tree: IntervalTree::new(),
error_callback: Some(error_callback),
dirty_shadow: Mutex::new(HashSet::default()),
saved_shadow: HashMap::default(),
snapshot_shadow,
}
}
pub fn report_and_crash(&mut self, emu: &Emulator, error: AsanError) {
if let Some(cb) = self.error_callback.as_mut() {
(cb)(emu, error);
} else {
std::process::abort();
}
}
pub fn alloc_insert(&mut self, start: GuestAddr, end: GuestAddr) {
self.alloc_tree.lock().unwrap().insert(start..end, ());
}
pub fn alloc_remove(&mut self, start: GuestAddr, end: GuestAddr) {
let mut tree = self.alloc_tree.lock().unwrap();
let mut found = vec![];
for entry in tree.query(start..end) {
found.push(*entry.interval);
}
for interval in found {
tree.delete(interval);
}
}
#[must_use]
pub fn alloc_search(&mut self, query: GuestAddr) -> Option<Interval<GuestAddr>> {
self.alloc_tree
.lock()
.unwrap()
.query(query..=query)
.next()
.map(|entry| *entry.interval)
}
pub fn snapshot(&mut self, emu: &Emulator) {
if self.snapshot_shadow {
let set = self.dirty_shadow.lock().unwrap();
for &page in set.iter() {
let data = Self::get_shadow_page(emu, page).to_vec();
self.saved_shadow.insert(page, data);
}
let tree = self.alloc_tree.lock().unwrap();
self.saved_tree = tree.clone();
}
}
pub fn rollback(&mut self, emu: &Emulator, detect_leaks: bool) {
let mut leaks = vec![];
{
let mut tree = self.alloc_tree.lock().unwrap();
if detect_leaks {
for entry in tree.query(0..GuestAddr::MAX) {
leaks.push(*entry.interval);
}
}
if self.snapshot_shadow {
tree.clear();
}
}
if self.snapshot_shadow {
let mut set = self.dirty_shadow.lock().unwrap();
for &page in set.iter() {
let original = self.saved_shadow.get(&page);
if let Some(data) = original {
let cur = Self::get_shadow_page(emu, page);
cur.copy_from_slice(data);
} else {
Self::unpoison_page(emu, page);
}
}
set.clear();
}
for interval in leaks {
self.report_and_crash(emu, AsanError::MemLeak(interval));
}
}
}
static mut ASAN_INITED: bool = false;
@ -157,26 +471,67 @@ pub fn init_with_asan(args: &mut Vec<String>, env: &mut [(String, String)]) -> E
}
unsafe {
asan_giovese_init();
AsanGiovese::map_shadow();
ASAN_INITED = true;
}
Emulator::new(args, env)
}
pub enum QemuAsanOptions {
None,
Snapshot,
DetectLeaks,
SnapshotDetectLeaks,
}
pub type QemuAsanChildHelper = QemuAsanHelper;
#[derive(Debug)]
pub struct QemuAsanHelper {
enabled: bool,
detect_leaks: bool,
empty: bool,
rt: AsanGiovese,
filter: QemuInstrumentationFilter,
}
impl QemuAsanHelper {
#[must_use]
pub fn new(filter: QemuInstrumentationFilter) -> Self {
pub fn new(filter: QemuInstrumentationFilter, options: QemuAsanOptions) -> Self {
assert!(unsafe { ASAN_INITED }, "The ASan runtime is not initialized, use init_with_asan(...) instead of just Emulator::new(...)");
let (snapshot, detect_leaks) = match options {
QemuAsanOptions::None => (false, false),
QemuAsanOptions::Snapshot => (true, false),
QemuAsanOptions::DetectLeaks => (false, true),
QemuAsanOptions::SnapshotDetectLeaks => (true, true),
};
Self {
enabled: true,
detect_leaks,
empty: true,
rt: AsanGiovese::new(snapshot),
filter,
}
}
#[must_use]
pub fn with_error_callback(
filter: QemuInstrumentationFilter,
error_callback: AsanErrorCallback,
options: QemuAsanOptions,
) -> Self {
assert!(unsafe { ASAN_INITED }, "The ASan runtime is not initialized, use init_with_asan(...) instead of just Emulator::new(...)");
let (snapshot, detect_leaks) = match options {
QemuAsanOptions::None => (false, false),
QemuAsanOptions::Snapshot => (true, false),
QemuAsanOptions::DetectLeaks => (false, true),
QemuAsanOptions::SnapshotDetectLeaks => (true, true),
};
Self {
enabled: true,
detect_leaks,
empty: true,
rt: AsanGiovese::with_error_callback(snapshot, error_callback),
filter,
}
}
@ -195,191 +550,97 @@ impl QemuAsanHelper {
self.enabled = enabled;
}
#[allow(clippy::unused_self)]
pub fn alloc(&mut self, _emulator: &Emulator, start: u64, end: u64) {
unsafe {
let ctx: *const CallContext =
libc::calloc(core::mem::size_of::<CallContext>(), 1) as *const _;
asan_giovese_alloc_insert(start, end, ctx);
}
pub fn alloc(&mut self, _emulator: &Emulator, start: GuestAddr, end: GuestAddr) {
self.rt.alloc_insert(start, end);
}
#[allow(clippy::unused_self)]
pub fn dealloc(&mut self, emulator: &Emulator, addr: u64) {
unsafe {
let ckinfo = asan_giovese_alloc_search(addr);
if let Some(ck) = ckinfo.as_mut() {
if ck.start != addr {
// Free not the start of the chunk
asan_giovese_badfree(addr, emulator.read_reg(Regs::Pc).unwrap_or(u64::MAX));
}
let ctx: *const CallContext =
libc::calloc(core::mem::size_of::<CallContext>(), 1) as *const _;
ck.free_ctx = ctx;
} else {
// Free of wild ptr
asan_giovese_badfree(addr, emulator.read_reg(Regs::Pc).unwrap_or(u64::MAX));
pub fn dealloc(&mut self, emulator: &Emulator, addr: GuestAddr) {
let chunk = self.rt.alloc_search(addr);
if let Some(ck) = chunk {
if ck.start != addr {
// Free not the start of the chunk
self.rt
.report_and_crash(emulator, AsanError::BadFree(addr, Some(ck)));
}
} else {
// Free of wild ptr
self.rt
.report_and_crash(emulator, AsanError::BadFree(addr, None));
}
}
#[allow(clippy::unused_self)]
#[must_use]
pub fn is_poisoned(&self, emulator: &Emulator, addr: GuestAddr, size: usize) -> bool {
unsafe { asan_giovese_loadN(emulator.g2h(addr), size) != 0 }
AsanGiovese::is_invalid_access(emulator, addr, size)
}
pub fn read_1(&mut self, emulator: &Emulator, addr: GuestAddr) {
unsafe {
if self.enabled() && asan_giovese_load1(emulator.g2h(addr)) != 0 {
asan_giovese_report_and_crash(
0,
addr.into(),
1,
emulator.read_reg(Regs::Pc).unwrap_or(u64::MAX),
0,
emulator.read_reg(Regs::Sp).unwrap_or(u64::MAX),
);
}
if self.enabled() && AsanGiovese::is_invalid_access_1(emulator, addr) {
self.rt.report_and_crash(emulator, AsanError::Read(addr, 1));
}
}
pub fn read_2(&mut self, emulator: &Emulator, addr: GuestAddr) {
unsafe {
if self.enabled() && asan_giovese_load2(emulator.g2h(addr)) != 0 {
asan_giovese_report_and_crash(
0,
addr.into(),
2,
emulator.read_reg(Regs::Pc).unwrap_or(u64::MAX),
0,
emulator.read_reg(Regs::Sp).unwrap_or(u64::MAX),
);
}
if self.enabled() && AsanGiovese::is_invalid_access_2(emulator, addr) {
self.rt.report_and_crash(emulator, AsanError::Read(addr, 2));
}
}
pub fn read_4(&mut self, emulator: &Emulator, addr: GuestAddr) {
unsafe {
if self.enabled() && asan_giovese_load4(emulator.g2h(addr)) != 0 {
asan_giovese_report_and_crash(
0,
addr.into(),
4,
emulator.read_reg(Regs::Pc).unwrap_or(u64::MAX),
0,
emulator.read_reg(Regs::Sp).unwrap_or(u64::MAX),
);
}
if self.enabled() && AsanGiovese::is_invalid_access_4(emulator, addr) {
self.rt.report_and_crash(emulator, AsanError::Read(addr, 4));
}
}
pub fn read_8(&mut self, emulator: &Emulator, addr: GuestAddr) {
unsafe {
if self.enabled() && asan_giovese_load8(emulator.g2h(addr)) != 0 {
asan_giovese_report_and_crash(
0,
addr.into(),
8,
emulator.read_reg(Regs::Pc).unwrap_or(u64::MAX),
0,
emulator.read_reg(Regs::Sp).unwrap_or(u64::MAX),
);
}
if self.enabled() && AsanGiovese::is_invalid_access_8(emulator, addr) {
self.rt.report_and_crash(emulator, AsanError::Read(addr, 8));
}
}
pub fn read_n(&mut self, emulator: &Emulator, addr: GuestAddr, size: usize) {
unsafe {
if self.enabled() && asan_giovese_loadN(emulator.g2h(addr), size) != 0 {
asan_giovese_report_and_crash(
0,
addr.into(),
size,
emulator.read_reg(Regs::Pc).unwrap_or(u64::MAX),
0,
emulator.read_reg(Regs::Sp).unwrap_or(u64::MAX),
);
}
if self.enabled() && AsanGiovese::is_invalid_access(emulator, addr, size) {
self.rt
.report_and_crash(emulator, AsanError::Read(addr, size));
}
}
pub fn write_1(&mut self, emulator: &Emulator, addr: GuestAddr) {
unsafe {
if self.enabled() && asan_giovese_store1(emulator.g2h(addr)) != 0 {
asan_giovese_report_and_crash(
1,
addr.into(),
1,
emulator.read_reg(Regs::Pc).unwrap_or(u64::MAX),
0,
emulator.read_reg(Regs::Sp).unwrap_or(u64::MAX),
);
}
if self.enabled() && AsanGiovese::is_invalid_access_1(emulator, addr) {
self.rt
.report_and_crash(emulator, AsanError::Write(addr, 1));
}
}
pub fn write_2(&mut self, emulator: &Emulator, addr: GuestAddr) {
unsafe {
if self.enabled() && asan_giovese_store2(emulator.g2h(addr)) != 0 {
asan_giovese_report_and_crash(
1,
addr.into(),
2,
emulator.read_reg(Regs::Pc).unwrap_or(u64::MAX),
0,
emulator.read_reg(Regs::Sp).unwrap_or(u64::MAX),
);
}
if self.enabled() && AsanGiovese::is_invalid_access_2(emulator, addr) {
self.rt
.report_and_crash(emulator, AsanError::Write(addr, 2));
}
}
pub fn write_4(&mut self, emulator: &Emulator, addr: GuestAddr) {
unsafe {
if self.enabled() && asan_giovese_store4(emulator.g2h(addr)) != 0 {
asan_giovese_report_and_crash(
1,
addr.into(),
4,
emulator.read_reg(Regs::Pc).unwrap_or(u64::MAX),
0,
emulator.read_reg(Regs::Sp).unwrap_or(u64::MAX),
);
}
if self.enabled() && AsanGiovese::is_invalid_access_4(emulator, addr) {
self.rt
.report_and_crash(emulator, AsanError::Write(addr, 4));
}
}
pub fn write_8(&mut self, emulator: &Emulator, addr: GuestAddr) {
unsafe {
if self.enabled() && asan_giovese_store8(emulator.g2h(addr)) != 0 {
asan_giovese_report_and_crash(
1,
addr.into(),
8,
emulator.read_reg(Regs::Pc).unwrap_or(u64::MAX),
0,
emulator.read_reg(Regs::Sp).unwrap_or(u64::MAX),
);
}
if self.enabled() && AsanGiovese::is_invalid_access_8(emulator, addr) {
self.rt
.report_and_crash(emulator, AsanError::Write(addr, 8));
}
}
pub fn write_n(&mut self, emulator: &Emulator, addr: GuestAddr, size: usize) {
unsafe {
if self.enabled() && asan_giovese_storeN(emulator.g2h(addr), size) != 0 {
asan_giovese_report_and_crash(
1,
addr.into(),
size,
emulator.read_reg(Regs::Pc).unwrap_or(u64::MAX),
0,
emulator.read_reg(Regs::Sp).unwrap_or(u64::MAX),
);
}
if self.enabled() && AsanGiovese::is_invalid_access(emulator, addr, size) {
self.rt
.report_and_crash(emulator, AsanError::Write(addr, size));
}
}
#[allow(clippy::unused_self)]
pub fn poison(
&mut self,
emulator: &Emulator,
@ -387,23 +648,22 @@ impl QemuAsanHelper {
size: usize,
poison: PoisonKind,
) {
unsafe { asan_giovese_poison_region(emulator.g2h(addr), size, poison.into()) };
self.rt.poison(emulator, addr, size, poison.into());
}
#[allow(clippy::unused_self)]
pub fn unpoison(&mut self, emulator: &Emulator, addr: GuestAddr, size: usize) {
unsafe { asan_giovese_unpoison_region(emulator.g2h(addr), size) };
AsanGiovese::unpoison(emulator, addr, size);
}
#[allow(clippy::unused_self)]
pub fn reset(&mut self) {
unsafe { asan_giovese_alloc_remove(0, u64::MAX) };
pub fn reset(&mut self, emulator: &Emulator) {
self.rt.rollback(emulator, self.detect_leaks);
}
}
impl Default for QemuAsanHelper {
fn default() -> Self {
Self::new(QemuInstrumentationFilter::None)
Self::new(QemuInstrumentationFilter::None, QemuAsanOptions::Snapshot)
}
}
@ -414,6 +674,13 @@ where
const HOOKS_DO_SIDE_EFFECTS: bool = false;
fn init_hooks<QT>(&self, hooks: &QemuHooks<'_, QT, S>)
where
QT: QemuHelperTuple<S>,
{
hooks.syscalls(qasan_fake_syscall::<QT, S>);
}
fn first_exec<QT>(&self, hooks: &QemuHooks<'_, QT, S>)
where
QT: QemuHelperTuple<S>,
{
@ -434,12 +701,17 @@ where
Some(trace_write8_asan::<QT, S>),
Some(trace_write_n_asan::<QT, S>),
);
hooks.syscalls(qasan_fake_syscall::<QT, S>);
}
fn post_exec(&mut self, _emulator: &Emulator, _input: &S::Input) {
self.reset();
fn pre_exec(&mut self, emulator: &Emulator, _input: &S::Input) {
if self.empty {
self.rt.snapshot(emulator);
self.empty = false;
}
}
fn post_exec(&mut self, emulator: &Emulator, _input: &S::Input) {
self.reset(emulator);
}
}
@ -636,7 +908,7 @@ where
&emulator,
a1 as GuestAddr,
a2 as usize,
PoisonKind::try_from(a3 as u8).unwrap(),
PoisonKind::try_from(a3 as i8).unwrap(),
);
}
QasanAction::UserPoison => {
@ -651,10 +923,10 @@ where
}
}
QasanAction::Alloc => {
h.alloc(&emulator, a1, a2);
h.alloc(&emulator, a1 as GuestAddr, a2 as GuestAddr);
}
QasanAction::Dealloc => {
h.dealloc(&emulator, a1);
h.dealloc(&emulator, a1 as GuestAddr);
}
QasanAction::Enable => {
h.set_enabled(true);

View File

@ -57,7 +57,7 @@ impl<S> QemuHelper<S> for QemuCmpLogHelper
where
S: UsesInput + HasMetadata,
{
fn init_hooks<QT>(&self, hooks: &QemuHooks<'_, QT, S>)
fn first_exec<QT>(&self, hooks: &QemuHooks<'_, QT, S>)
where
QT: QemuHelperTuple<S>,
{
@ -101,7 +101,7 @@ where
{
const HOOKS_DO_SIDE_EFFECTS: bool = false;
fn init_hooks<QT>(&self, hooks: &QemuHooks<'_, QT, S>)
fn first_exec<QT>(&self, hooks: &QemuHooks<'_, QT, S>)
where
QT: QemuHelperTuple<S>,
{

View File

@ -70,7 +70,7 @@ impl<S> QemuHelper<S> for QemuEdgeCoverageHelper
where
S: UsesInput + HasMetadata,
{
fn init_hooks<QT>(&self, hooks: &QemuHooks<'_, QT, S>)
fn first_exec<QT>(&self, hooks: &QemuHooks<'_, QT, S>)
where
QT: QemuHelperTuple<S>,
{
@ -129,7 +129,7 @@ where
{
const HOOKS_DO_SIDE_EFFECTS: bool = false;
fn init_hooks<QT>(&self, hooks: &QemuHooks<'_, QT, S>)
fn first_exec<QT>(&self, hooks: &QemuHooks<'_, QT, S>)
where
QT: QemuHelperTuple<S>,
{

View File

@ -27,6 +27,7 @@ where
OT: ObserversTuple<S>,
QT: QemuHelperTuple<S>,
{
first_exec: bool,
hooks: &'a mut QemuHooks<'a, QT, S>,
inner: InProcessExecutor<'a, H, OT, S>,
}
@ -68,6 +69,7 @@ where
Z: HasObjective<OF, State = S>,
{
Ok(Self {
first_exec: true,
hooks,
inner: InProcessExecutor::new(harness_fn, observers, fuzzer, state, event_mgr)?,
})
@ -111,6 +113,10 @@ where
input: &Self::Input,
) -> Result<ExitKind, Error> {
let emu = Emulator::new_empty();
if self.first_exec {
self.hooks.helpers().first_exec_all(self.hooks);
self.first_exec = false;
}
self.hooks.helpers_mut().pre_exec_all(&emu, input);
let r = self.inner.run_target(fuzzer, state, mgr, input);
self.hooks.helpers_mut().post_exec_all(&emu, input);
@ -165,6 +171,7 @@ where
QT: QemuHelperTuple<S>,
SP: ShMemProvider,
{
first_exec: bool,
hooks: &'a mut QemuHooks<'a, QT, S>,
inner: InProcessForkExecutor<'a, H, OT, S, SP>,
}
@ -213,6 +220,7 @@ where
assert!(!QT::HOOKS_DO_SIDE_EFFECTS, "When using QemuForkExecutor, the hooks must not do any side effect as they will happen in the child process and then discarded");
Ok(Self {
first_exec: true,
hooks,
inner: InProcessForkExecutor::new(
harness_fn,
@ -265,6 +273,10 @@ where
input: &Self::Input,
) -> Result<ExitKind, Error> {
let emu = Emulator::new_empty();
if self.first_exec {
self.hooks.helpers().first_exec_all(self.hooks);
self.first_exec = false;
}
self.hooks.helpers_mut().pre_exec_all(&emu, input);
let r = self.inner.run_target(fuzzer, state, mgr, input);
self.hooks.helpers_mut().post_exec_all(&emu, input);

View File

@ -18,6 +18,12 @@ where
{
}
fn first_exec<QT>(&self, _hooks: &QemuHooks<'_, QT, S>)
where
QT: QemuHelperTuple<S>,
{
}
fn pre_exec(&mut self, _emulator: &Emulator, _input: &S::Input) {}
fn post_exec(&mut self, _emulator: &Emulator, _input: &S::Input) {}
@ -33,6 +39,10 @@ where
where
QT: QemuHelperTuple<S>;
fn first_exec_all<QT>(&self, hooks: &QemuHooks<'_, QT, S>)
where
QT: QemuHelperTuple<S>;
fn pre_exec_all(&mut self, _emulator: &Emulator, input: &S::Input);
fn post_exec_all(&mut self, _emulator: &Emulator, input: &S::Input);
@ -50,6 +60,12 @@ where
{
}
fn first_exec_all<QT>(&self, _hooks: &QemuHooks<'_, QT, S>)
where
QT: QemuHelperTuple<S>,
{
}
fn pre_exec_all(&mut self, _emulator: &Emulator, _input: &S::Input) {}
fn post_exec_all(&mut self, _emulator: &Emulator, _input: &S::Input) {}
@ -71,6 +87,14 @@ where
self.1.init_hooks_all(hooks);
}
fn first_exec_all<QT>(&self, hooks: &QemuHooks<'_, QT, S>)
where
QT: QemuHelperTuple<S>,
{
self.0.first_exec(hooks);
self.1.first_exec_all(hooks);
}
fn pre_exec_all(&mut self, emulator: &Emulator, input: &S::Input) {
self.0.pre_exec(emulator, input);
self.1.pre_exec_all(emulator, input);

View File

@ -1,3 +0,0 @@
*.o
*~
interval-tree-test

View File

@ -1,20 +0,0 @@
From interval_tree_generic.h:
Interval Trees
(C) 2012 Michel Lespinasse <walken@google.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
include/linux/interval_tree_generic.h

View File

@ -1,19 +0,0 @@
#ifndef __INT_COMPILER_H__
#define __INT_COMPILER_H__
/**
* container_of - cast a member of a structure out to the containing structure
* @ptr: the pointer to the member.
* @type: the type of the container struct this is embedded in.
* @member: the name of the member within the struct.
*
*/
#ifndef container_of
#define container_of(ptr, type, member) \
({ \
const typeof(((type *)0)->member) *__mptr = (ptr); \
(type *)((char *)__mptr - offsetof(type, member)); \
})
#endif
#endif /* __INT_COMPILER_H__ */

View File

@ -1,2 +0,0 @@
#include "interval_tree_generic.h"
#include "rbtree.inl"

View File

@ -1,176 +0,0 @@
/*
Interval Trees
(C) 2012 Michel Lespinasse <walken@google.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
include/linux/interval_tree_generic.h
*/
#include <stdbool.h>
#include "rbtree_augmented.h"
/*
* Template for implementing interval trees
*
* ITSTRUCT: struct type of the interval tree nodes
* ITRB: name of struct rb_node field within ITSTRUCT
* ITTYPE: type of the interval endpoints
* ITSUBTREE: name of ITTYPE field within ITSTRUCT holding last-in-subtree
* ITSTART(n): start endpoint of ITSTRUCT node n
* ITLAST(n): last endpoint of ITSTRUCT node n
* ITSTATIC: 'static' or empty
* ITPREFIX: prefix to use for the inline tree definitions
*
* Note - before using this, please consider if non-generic version
* (interval_tree.h) would work for you...
*/
#define INTERVAL_TREE_DEFINE(ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, ITSTART, \
ITLAST, ITSTATIC, ITPREFIX) \
\
/* Callbacks for augmented rbtree insert and remove */ \
\
static inline ITTYPE ITPREFIX##_compute_subtree_last(ITSTRUCT *node) { \
ITTYPE max = ITLAST(node), subtree_last; \
if (node->ITRB.rb_left) { \
subtree_last = rb_entry(node->ITRB.rb_left, ITSTRUCT, ITRB)->ITSUBTREE; \
if (max < subtree_last) { max = subtree_last; } \
} \
if (node->ITRB.rb_right) { \
subtree_last = rb_entry(node->ITRB.rb_right, ITSTRUCT, ITRB)->ITSUBTREE; \
if (max < subtree_last) max = subtree_last; \
} \
return max; \
} \
\
RB_DECLARE_CALLBACKS(static, ITPREFIX##_augment, ITSTRUCT, ITRB, ITTYPE, \
ITSUBTREE, ITPREFIX##_compute_subtree_last) \
\
/* Insert / remove interval nodes from the tree */ \
\
ITSTATIC void ITPREFIX##_insert(ITSTRUCT *node, struct rb_root *root) { \
struct rb_node **link = &root->rb_node, *rb_parent = NULL; \
ITTYPE start = ITSTART(node), last = ITLAST(node); \
ITSTRUCT *parent; \
\
while (*link) { \
rb_parent = *link; \
parent = rb_entry(rb_parent, ITSTRUCT, ITRB); \
if (parent->ITSUBTREE < last) parent->ITSUBTREE = last; \
if (start < ITSTART(parent)) \
link = &parent->ITRB.rb_left; \
else \
link = &parent->ITRB.rb_right; \
} \
\
node->ITSUBTREE = last; \
rb_link_node(&node->ITRB, rb_parent, link); \
rb_insert_augmented(&node->ITRB, root, &ITPREFIX##_augment); \
} \
\
ITSTATIC void ITPREFIX##_remove(ITSTRUCT *node, struct rb_root *root) { \
rb_erase_augmented(&node->ITRB, root, &ITPREFIX##_augment); \
} \
\
/* \
* Iterate over intervals intersecting [start;last] \
* \
* Note that a node's interval intersects [start;last] iff: \
* Cond1: ITSTART(node) <= last \
* and \
* Cond2: start <= ITLAST(node) \
*/ \
\
static ITSTRUCT *ITPREFIX##_subtree_search(ITSTRUCT *node, ITTYPE start, \
ITTYPE last) { \
while (true) { \
/* \
* Loop invariant: start <= node->ITSUBTREE \
* (Cond2 is satisfied by one of the subtree nodes) \
*/ \
if (node->ITRB.rb_left) { \
ITSTRUCT *left = rb_entry(node->ITRB.rb_left, ITSTRUCT, ITRB); \
if (start <= left->ITSUBTREE) { \
/* \
* Some nodes in left subtree satisfy Cond2. \
* Iterate to find the leftmost such node N. \
* If it also satisfies Cond1, that's the \
* match we are looking for. Otherwise, there \
* is no matching interval as nodes to the \
* right of N can't satisfy Cond1 either. \
*/ \
node = left; \
continue; \
} \
} \
if (ITSTART(node) <= last) { /* Cond1 */ \
if (start <= ITLAST(node)) /* Cond2 */ \
return node; /* node is leftmost match */ \
if (node->ITRB.rb_right) { \
node = rb_entry(node->ITRB.rb_right, ITSTRUCT, ITRB); \
if (start <= node->ITSUBTREE) continue; \
} \
} \
return NULL; /* No match */ \
} \
} \
\
ITSTATIC ITSTRUCT *ITPREFIX##_iter_first(struct rb_root *root, ITTYPE start, \
ITTYPE last) { \
ITSTRUCT *node; \
\
if (!root->rb_node) return NULL; \
node = rb_entry(root->rb_node, ITSTRUCT, ITRB); \
if (node->ITSUBTREE < start) return NULL; \
return ITPREFIX##_subtree_search(node, start, last); \
} \
\
ITSTATIC ITSTRUCT *ITPREFIX##_iter_next(ITSTRUCT *node, ITTYPE start, \
ITTYPE last) { \
struct rb_node *rb = node->ITRB.rb_right, *prev; \
\
while (true) { \
/* \
* Loop invariants: \
* Cond1: ITSTART(node) <= last \
* rb == node->ITRB.rb_right \
* \
* First, search right subtree if suitable \
*/ \
if (rb) { \
ITSTRUCT *right = rb_entry(rb, ITSTRUCT, ITRB); \
if (start <= right->ITSUBTREE) \
return ITPREFIX##_subtree_search(right, start, last); \
} \
\
/* Move up the tree until we come from a node's left child */ \
do { \
rb = rb_parent(&node->ITRB); \
if (!rb) { return NULL; } \
prev = &node->ITRB; \
node = rb_entry(rb, ITSTRUCT, ITRB); \
rb = node->ITRB.rb_right; \
} while (prev == rb); \
\
/* Check if the node intersects [start;last] */ \
if (last < ITSTART(node)) { /* !Cond1 */ \
return NULL; \
} else if (start <= ITLAST(node)) { /* Cond2 */ \
return node; \
} \
} \
}

View File

@ -1,109 +0,0 @@
/*
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
linux/include/linux/rbtree.h
To use rbtrees you'll have to implement your own insert and search cores.
This will avoid us to use callbacks and to drop drammatically performances.
I know it's not the cleaner way, but in C (not in C++) to get
performances and genericity...
See Documentation/rbtree.txt for documentation and samples.
*/
#ifndef _LINUX_RBTREE_H
#define _LINUX_RBTREE_H
#include <stddef.h>
#include "compiler.h"
struct rb_node {
unsigned long __rb_parent_color;
struct rb_node *rb_right;
struct rb_node *rb_left;
} __attribute__((aligned(sizeof(long))));
/* The alignment might seem pointless, but allegedly CRIS needs it */
struct rb_root {
struct rb_node *rb_node;
};
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
#define RB_ROOT \
(struct rb_root) { \
NULL, \
}
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
#define RB_EMPTY_NODE(node) ((node)->__rb_parent_color == (unsigned long)(node))
#define RB_CLEAR_NODE(node) ((node)->__rb_parent_color = (unsigned long)(node))
extern void rb_insert_color(struct rb_node *, struct rb_root *);
extern void rb_erase(struct rb_node *, struct rb_root *);
/* Find logical next and previous nodes in a tree */
extern struct rb_node *rb_next(const struct rb_node *);
extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);
/* Postorder iteration - always visit the parent after its children */
extern struct rb_node *rb_first_postorder(const struct rb_root *);
extern struct rb_node *rb_next_postorder(const struct rb_node *);
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link) {
node->__rb_parent_color = (unsigned long)parent;
node->rb_left = node->rb_right = NULL;
*rb_link = node;
}
#define rb_entry_safe(ptr, type, member) \
({ \
typeof(ptr) ____ptr = (ptr); \
____ptr ? rb_entry(____ptr, type, member) : NULL; \
})
/**
* rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
* given type safe against removal of rb_node entry
*
* @pos: the 'type *' to use as a loop cursor.
* @n: another 'type *' to use as temporary storage
* @root: 'rb_root *' of the rbtree.
* @field: the name of the rb_node field within 'type'.
*/
#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
pos && ({ \
n = rb_entry_safe(rb_next_postorder(&pos->field), typeof(*pos), \
field); \
1; \
}); \
pos = n)
#endif /* _LINUX_RBTREE_H */

View File

@ -1,564 +0,0 @@
/*
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
(C) 2002 David Woodhouse <dwmw2@infradead.org>
(C) 2012 Michel Lespinasse <walken@google.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
linux/lib/rbtree.c
*/
#include <stdbool.h>
#include "rbtree_augmented.h"
/*
* red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
*
* 1) A node is either red or black
* 2) The root is black
* 3) All leaves (NULL) are black
* 4) Both children of every red node are black
* 5) Every simple path from root to leaves contains the same number
* of black nodes.
*
* 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two
* consecutive red nodes in a path and every red node is therefore followed by
* a black. So if B is the number of black nodes on every simple path (as per
* 5), then the longest possible path due to 4 is 2B.
*
* We shall indicate color with case, where black nodes are uppercase and red
* nodes will be lowercase. Unknown color nodes shall be drawn as red within
* parentheses and have some accompanying text comment.
*/
static inline void rb_set_black(struct rb_node *rb)
{
rb->__rb_parent_color |= RB_BLACK;
}
static inline struct rb_node *rb_red_parent(struct rb_node *red)
{
return (struct rb_node *)red->__rb_parent_color;
}
/*
* Helper function for rotations:
* - old's parent and color get assigned to new
* - old gets assigned new as a parent and 'color' as a color.
*/
static inline void
__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
struct rb_root *root, int color)
{
struct rb_node *parent = rb_parent(old);
new->__rb_parent_color = old->__rb_parent_color;
rb_set_parent_color(old, new, color);
__rb_change_child(old, new, parent, root);
}
static inline void
__rb_insert(struct rb_node *node, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
while (true) {
/*
* Loop invariant: node is red
*
* If there is a black parent, we are done.
* Otherwise, take some corrective action as we don't
* want a red root or two consecutive red nodes.
*/
if (!parent) {
rb_set_parent_color(node, NULL, RB_BLACK);
break;
} else if (rb_is_black(parent))
break;
gparent = rb_red_parent(parent);
tmp = gparent->rb_right;
if (parent != tmp) { /* parent == gparent->rb_left */
if (tmp && rb_is_red(tmp)) {
/*
* Case 1 - color flips
*
* G g
* / \ / \
* p u --> P U
* / /
* n n
*
* However, since g's parent might be red, and
* 4) does not allow this, we need to recurse
* at g.
*/
rb_set_parent_color(tmp, gparent, RB_BLACK);
rb_set_parent_color(parent, gparent, RB_BLACK);
node = gparent;
parent = rb_parent(node);
rb_set_parent_color(node, parent, RB_RED);
continue;
}
tmp = parent->rb_right;
if (node == tmp) {
/*
* Case 2 - left rotate at parent
*
* G G
* / \ / \
* p U --> n U
* \ /
* n p
*
* This still leaves us in violation of 4), the
* continuation into Case 3 will fix that.
*/
parent->rb_right = tmp = node->rb_left;
node->rb_left = parent;
if (tmp)
rb_set_parent_color(tmp, parent,
RB_BLACK);
rb_set_parent_color(parent, node, RB_RED);
augment_rotate(parent, node);
parent = node;
tmp = node->rb_right;
}
/*
* Case 3 - right rotate at gparent
*
* G P
* / \ / \
* p U --> n g
* / \
* n U
*/
gparent->rb_left = tmp; /* == parent->rb_right */
parent->rb_right = gparent;
if (tmp)
rb_set_parent_color(tmp, gparent, RB_BLACK);
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
augment_rotate(gparent, parent);
break;
} else {
tmp = gparent->rb_left;
if (tmp && rb_is_red(tmp)) {
/* Case 1 - color flips */
rb_set_parent_color(tmp, gparent, RB_BLACK);
rb_set_parent_color(parent, gparent, RB_BLACK);
node = gparent;
parent = rb_parent(node);
rb_set_parent_color(node, parent, RB_RED);
continue;
}
tmp = parent->rb_left;
if (node == tmp) {
/* Case 2 - right rotate at parent */
parent->rb_left = tmp = node->rb_right;
node->rb_right = parent;
if (tmp)
rb_set_parent_color(tmp, parent,
RB_BLACK);
rb_set_parent_color(parent, node, RB_RED);
augment_rotate(parent, node);
parent = node;
tmp = node->rb_left;
}
/* Case 3 - left rotate at gparent */
gparent->rb_right = tmp; /* == parent->rb_left */
parent->rb_left = gparent;
if (tmp)
rb_set_parent_color(tmp, gparent, RB_BLACK);
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
augment_rotate(gparent, parent);
break;
}
}
}
/*
* Inline version for rb_erase() use - we want to be able to inline
* and eliminate the dummy_rotate callback there
*/
static inline void
____rb_erase_color(struct rb_node *parent, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
while (true) {
/*
* Loop invariants:
* - node is black (or NULL on first iteration)
* - node is not the root (parent is not NULL)
* - All leaf paths going through parent and node have a
* black node count that is 1 lower than other leaf paths.
*/
sibling = parent->rb_right;
if (node != sibling) { /* node == parent->rb_left */
if (rb_is_red(sibling)) {
/*
* Case 1 - left rotate at parent
*
* P S
* / \ / \
* N s --> p Sr
* / \ / \
* Sl Sr N Sl
*/
parent->rb_right = tmp1 = sibling->rb_left;
sibling->rb_left = parent;
rb_set_parent_color(tmp1, parent, RB_BLACK);
__rb_rotate_set_parents(parent, sibling, root,
RB_RED);
augment_rotate(parent, sibling);
sibling = tmp1;
}
tmp1 = sibling->rb_right;
if (!tmp1 || rb_is_black(tmp1)) {
tmp2 = sibling->rb_left;
if (!tmp2 || rb_is_black(tmp2)) {
/*
* Case 2 - sibling color flip
* (p could be either color here)
*
* (p) (p)
* / \ / \
* N S --> N s
* / \ / \
* Sl Sr Sl Sr
*
* This leaves us violating 5) which
* can be fixed by flipping p to black
* if it was red, or by recursing at p.
* p is red when coming from Case 1.
*/
rb_set_parent_color(sibling, parent,
RB_RED);
if (rb_is_red(parent))
rb_set_black(parent);
else {
node = parent;
parent = rb_parent(node);
if (parent)
continue;
}
break;
}
/*
* Case 3 - right rotate at sibling
* (p could be either color here)
*
* (p) (p)
* / \ / \
* N S --> N Sl
* / \ \
* sl Sr s
* \
* Sr
*/
sibling->rb_left = tmp1 = tmp2->rb_right;
tmp2->rb_right = sibling;
parent->rb_right = tmp2;
if (tmp1)
rb_set_parent_color(tmp1, sibling,
RB_BLACK);
augment_rotate(sibling, tmp2);
tmp1 = sibling;
sibling = tmp2;
}
/*
* Case 4 - left rotate at parent + color flips
* (p and sl could be either color here.
* After rotation, p becomes black, s acquires
* p's color, and sl keeps its color)
*
* (p) (s)
* / \ / \
* N S --> P Sr
* / \ / \
* (sl) sr N (sl)
*/
parent->rb_right = tmp2 = sibling->rb_left;
sibling->rb_left = parent;
rb_set_parent_color(tmp1, sibling, RB_BLACK);
if (tmp2)
rb_set_parent(tmp2, parent);
__rb_rotate_set_parents(parent, sibling, root,
RB_BLACK);
augment_rotate(parent, sibling);
break;
} else {
sibling = parent->rb_left;
if (rb_is_red(sibling)) {
/* Case 1 - right rotate at parent */
parent->rb_left = tmp1 = sibling->rb_right;
sibling->rb_right = parent;
rb_set_parent_color(tmp1, parent, RB_BLACK);
__rb_rotate_set_parents(parent, sibling, root,
RB_RED);
augment_rotate(parent, sibling);
sibling = tmp1;
}
tmp1 = sibling->rb_left;
if (!tmp1 || rb_is_black(tmp1)) {
tmp2 = sibling->rb_right;
if (!tmp2 || rb_is_black(tmp2)) {
/* Case 2 - sibling color flip */
rb_set_parent_color(sibling, parent,
RB_RED);
if (rb_is_red(parent))
rb_set_black(parent);
else {
node = parent;
parent = rb_parent(node);
if (parent)
continue;
}
break;
}
/* Case 3 - right rotate at sibling */
sibling->rb_right = tmp1 = tmp2->rb_left;
tmp2->rb_left = sibling;
parent->rb_left = tmp2;
if (tmp1)
rb_set_parent_color(tmp1, sibling,
RB_BLACK);
augment_rotate(sibling, tmp2);
tmp1 = sibling;
sibling = tmp2;
}
/* Case 4 - left rotate at parent + color flips */
parent->rb_left = tmp2 = sibling->rb_right;
sibling->rb_right = parent;
rb_set_parent_color(tmp1, sibling, RB_BLACK);
if (tmp2)
rb_set_parent(tmp2, parent);
__rb_rotate_set_parents(parent, sibling, root,
RB_BLACK);
augment_rotate(parent, sibling);
break;
}
}
}
/* Non-inline version for rb_erase_augmented() use */
void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
____rb_erase_color(parent, root, augment_rotate);
}
/*
* Non-augmented rbtree manipulation functions.
*
* We use dummy augmented callbacks here, and have the compiler optimize them
* out of the rb_insert_color() and rb_erase() function definitions.
*/
static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {}
static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
static const struct rb_augment_callbacks dummy_callbacks = {
dummy_propagate, dummy_copy, dummy_rotate
};
void rb_insert_color(struct rb_node *node, struct rb_root *root)
{
__rb_insert(node, root, dummy_rotate);
}
void rb_erase(struct rb_node *node, struct rb_root *root)
{
struct rb_node *rebalance;
rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
if (rebalance)
____rb_erase_color(rebalance, root, dummy_rotate);
}
/*
* Augmented rbtree manipulation functions.
*
* This instantiates the same __always_inline functions as in the non-augmented
* case, but this time with user-defined callbacks.
*/
void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
{
__rb_insert(node, root, augment_rotate);
}
/*
* This function returns the first node (in sort order) of the tree.
*/
struct rb_node *rb_first(const struct rb_root *root)
{
struct rb_node *n;
n = root->rb_node;
if (!n) {
return NULL;
}
while (n->rb_left) {
n = n->rb_left;
}
return n;
}
struct rb_node *rb_last(const struct rb_root *root)
{
struct rb_node *n;
n = root->rb_node;
if (!n) {
return NULL;
}
while (n->rb_right) {
n = n->rb_right;
}
return n;
}
struct rb_node *rb_next(const struct rb_node *node)
{
struct rb_node *parent;
if (RB_EMPTY_NODE(node))
return NULL;
/*
* If we have a right-hand child, go down and then left as far
* as we can.
*/
if (node->rb_right) {
node = node->rb_right;
while (node->rb_left) {
node=node->rb_left;
}
return (struct rb_node *)node;
}
/*
* No right-hand children. Everything down and left is smaller than us,
* so any 'next' node must be in the general direction of our parent.
* Go up the tree; any time the ancestor is a right-hand child of its
* parent, keep going up. First time it's a left-hand child of its
* parent, said parent is our 'next' node.
*/
while ((parent = rb_parent(node)) && node == parent->rb_right) {
node = parent;
}
return parent;
}
struct rb_node *rb_prev(const struct rb_node *node)
{
struct rb_node *parent;
if (RB_EMPTY_NODE(node))
return NULL;
/*
* If we have a left-hand child, go down and then right as far
* as we can.
*/
if (node->rb_left) {
node = node->rb_left;
while (node->rb_right) {
node=node->rb_right;
}
return (struct rb_node *)node;
}
/*
* No left-hand children. Go up till we find an ancestor which
* is a right-hand child of its parent.
*/
while ((parent = rb_parent(node)) && node == parent->rb_left) {
node = parent;
}
return parent;
}
void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root)
{
struct rb_node *parent = rb_parent(victim);
/* Set the surrounding nodes to point to the replacement */
__rb_change_child(victim, new, parent, root);
if (victim->rb_left) {
rb_set_parent(victim->rb_left, new);
}
if (victim->rb_right) {
rb_set_parent(victim->rb_right, new);
}
/* Copy the pointers/colour from the victim to the replacement */
*new = *victim;
}
static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
{
for (;;) {
if (node->rb_left) {
node = node->rb_left;
}
else if (node->rb_right) {
node = node->rb_right;
}
else {
return (struct rb_node *)node;
}
}
}
struct rb_node *rb_next_postorder(const struct rb_node *node)
{
const struct rb_node *parent;
if (!node) {
return NULL;
}
parent = rb_parent(node);
/* If we're sitting on node, we've already seen our children */
if (parent && node == parent->rb_left && parent->rb_right) {
/* If we are the parent's left node, go to the parent's right
* node then all the way down to the left */
return rb_left_deepest_node(parent->rb_right);
} else
/* Otherwise we are the parent's right node, and the parent
* should be next */
return (struct rb_node *)parent;
}
struct rb_node *rb_first_postorder(const struct rb_root *root)
{
if (!root->rb_node) {
return NULL;
}
return rb_left_deepest_node(root->rb_node);
}

View File

@ -1,236 +0,0 @@
/*
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
(C) 2002 David Woodhouse <dwmw2@infradead.org>
(C) 2012 Michel Lespinasse <walken@google.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
linux/include/linux/rbtree_augmented.h
*/
#ifndef _LINUX_RBTREE_AUGMENTED_H
#define _LINUX_RBTREE_AUGMENTED_H
#include <stddef.h>
#include "compiler.h"
#include "rbtree.h"
/*
* Please note - only struct rb_augment_callbacks and the prototypes for
* rb_insert_augmented() and rb_erase_augmented() are intended to be public.
* The rest are implementation details you are not expected to depend on.
*
* See Documentation/rbtree.txt for documentation and samples.
*/
struct rb_augment_callbacks {
void (*propagate)(struct rb_node *node, struct rb_node *stop);
void (*copy)(struct rb_node *old, struct rb_node *new);
void (*rotate)(struct rb_node *old, struct rb_node *new);
};
extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old,
struct rb_node *new));
/*
* Fixup the rbtree and update the augmented information when rebalancing.
*
* On insertion, the user must update the augmented information on the path
* leading to the inserted node, then call rb_link_node() as usual and
* rb_augment_inserted() instead of the usual rb_insert_color() call.
* If rb_augment_inserted() rebalances the rbtree, it will callback into
* a user provided function to update the augmented information on the
* affected subtrees.
*/
static inline void rb_insert_augmented(
struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment) {
__rb_insert_augmented(node, root, augment->rotate);
}
#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, rbtype, \
rbaugmented, rbcompute) \
static inline void rbname##_propagate(struct rb_node *rb, \
struct rb_node *stop) { \
while (rb != stop) { \
rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
rbtype augmented = rbcompute(node); \
if (node->rbaugmented == augmented) { break; } \
node->rbaugmented = augmented; \
rb = rb_parent(&node->rbfield); \
} \
} \
static inline void rbname##_copy(struct rb_node *rb_old, \
struct rb_node *rb_new) { \
rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
new->rbaugmented = old->rbaugmented; \
} \
static void rbname##_rotate(struct rb_node *rb_old, \
struct rb_node *rb_new) { \
rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
new->rbaugmented = old->rbaugmented; \
old->rbaugmented = rbcompute(old); \
} \
rbstatic const struct rb_augment_callbacks rbname = { \
rbname##_propagate, rbname##_copy, rbname##_rotate};
#define RB_RED 0
#define RB_BLACK 1
#define __rb_parent(pc) ((struct rb_node *)(pc & ~3))
#define __rb_color(pc) ((pc)&1)
#define __rb_is_black(pc) __rb_color(pc)
#define __rb_is_red(pc) (!__rb_color(pc))
#define rb_color(rb) __rb_color((rb)->__rb_parent_color)
#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color)
#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color)
static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) {
rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
}
static inline void rb_set_parent_color(struct rb_node *rb, struct rb_node *p,
int color) {
rb->__rb_parent_color = (unsigned long)p | color;
}
static inline void __rb_change_child(struct rb_node *old, struct rb_node *new,
struct rb_node *parent,
struct rb_root *root) {
if (parent) {
if (parent->rb_left == old) {
parent->rb_left = new;
} else {
parent->rb_right = new;
}
} else {
root->rb_node = new;
}
}
extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
void (*augment_rotate)(struct rb_node *old,
struct rb_node *new));
static inline struct rb_node *__rb_erase_augmented(
struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment) {
struct rb_node *child = node->rb_right, *tmp = node->rb_left;
struct rb_node *parent, *rebalance;
unsigned long pc;
if (!tmp) {
/*
* Case 1: node to erase has no more than 1 child (easy!)
*
* Note that if there is one child it must be red due to 5)
* and node must be black due to 4). We adjust colors locally
* so as to bypass __rb_erase_color() later on.
*/
pc = node->__rb_parent_color;
parent = __rb_parent(pc);
__rb_change_child(node, child, parent, root);
if (child) {
child->__rb_parent_color = pc;
rebalance = NULL;
} else
rebalance = __rb_is_black(pc) ? parent : NULL;
tmp = parent;
} else if (!child) {
/* Still case 1, but this time the child is node->rb_left */
tmp->__rb_parent_color = pc = node->__rb_parent_color;
parent = __rb_parent(pc);
__rb_change_child(node, tmp, parent, root);
rebalance = NULL;
tmp = parent;
} else {
struct rb_node *successor = child, *child2;
tmp = child->rb_left;
if (!tmp) {
/*
* Case 2: node's successor is its right child
*
* (n) (s)
* / \ / \
* (x) (s) -> (x) (c)
* \
* (c)
*/
parent = successor;
child2 = successor->rb_right;
augment->copy(node, successor);
} else {
/*
* Case 3: node's successor is leftmost under
* node's right child subtree
*
* (n) (s)
* / \ / \
* (x) (y) -> (x) (y)
* / /
* (p) (p)
* / /
* (s) (c)
* \
* (c)
*/
do {
parent = successor;
successor = tmp;
tmp = tmp->rb_left;
} while (tmp);
parent->rb_left = child2 = successor->rb_right;
successor->rb_right = child;
rb_set_parent(child, successor);
augment->copy(node, successor);
augment->propagate(parent, successor);
}
successor->rb_left = tmp = node->rb_left;
rb_set_parent(tmp, successor);
pc = node->__rb_parent_color;
tmp = __rb_parent(pc);
__rb_change_child(node, successor, tmp, root);
if (child2) {
successor->__rb_parent_color = pc;
rb_set_parent_color(child2, parent, RB_BLACK);
rebalance = NULL;
} else {
unsigned long pc2 = successor->__rb_parent_color;
successor->__rb_parent_color = pc;
rebalance = __rb_is_black(pc2) ? parent : NULL;
}
tmp = successor;
}
augment->propagate(tmp, NULL);
return rebalance;
}
static inline void rb_erase_augmented(
struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment) {
struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
if (rebalance) { __rb_erase_color(rebalance, root, augment->rotate); }
}
#endif /* _LINUX_RBTREE_AUGMENTED_H */

View File

@ -105,7 +105,7 @@ impl QemuSnapshotHelper {
}
#[must_use]
pub fn with_mmap_limit(mmap_limit: usize, stop_execution: Box<StopExecutionCallback>) -> Self {
pub fn with_mmap_limit(mmap_limit: usize, stop_execution: StopExecutionCallback) -> Self {
Self {
accesses: ThreadLocal::new(),
maps: MappingInfo::default(),
@ -187,6 +187,7 @@ impl QemuSnapshotHelper {
}
pub fn access(&mut self, addr: GuestAddr, size: usize) {
// ASSUMPTION: the access can only cross 2 pages
debug_assert!(size > 0);
let page = addr & SNAPSHOT_PAGE_MASK;
self.page_access(page);
@ -472,7 +473,7 @@ impl<S> QemuHelper<S> for QemuSnapshotHelper
where
S: UsesInput + HasMetadata,
{
fn init_hooks<QT>(&self, hooks: &QemuHooks<'_, QT, S>)
fn first_exec<QT>(&self, hooks: &QemuHooks<'_, QT, S>)
where
QT: QemuHelperTuple<S>,
{