Refactor QEMU snapshot helper and add mmap memory limit (#844)

* waiting for an interval tree...

* Rework QEMU user memory snapshots

* Fix pcrel to 1

* clippy

* clippy
This commit is contained in:
Andrea Fioraldi 2022-10-19 18:46:37 +02:00 committed by GitHub
parent 41cc717dfc
commit 4ccd85f568
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 365 additions and 42 deletions

View File

@ -40,7 +40,7 @@ libc = "0.2"
strum = "0.24"
strum_macros = "0.24"
syscall-numbers = "3.0"
bio = "0.41"
meminterval = "0.1"
thread_local = "1.1.4"
capstone = "0.11.0"
#pyo3 = { version = "0.15", features = ["extension-module"], optional = true }

View File

@ -3,7 +3,7 @@ use which::which;
const QEMU_URL: &str = "https://github.com/AFLplusplus/qemu-libafl-bridge";
const QEMU_DIRNAME: &str = "qemu-libafl-bridge";
const QEMU_REVISION: &str = "7dfe8278546b94ae3d18517cfd97bc20c717ed66";
const QEMU_REVISION: &str = "35d36bf8fa2d483965a57ee0c7d7a997e8798273";
fn build_dep_check(tools: &[&str]) {
for tool in tools {

View File

@ -4,16 +4,16 @@ use std::{
sync::Mutex,
};
use bio::data_structures::interval_tree::IntervalTree;
use libafl::{inputs::Input, state::HasMetadata};
use meminterval::{Interval, IntervalTree};
use thread_local::ThreadLocal;
use crate::{
emu::{Emulator, MmapPerms},
emu::{Emulator, MmapPerms, SyscallHookResult},
helper::{QemuHelper, QemuHelperTuple},
hooks::QemuHooks,
GuestAddr, SYS_fstat, SYS_fstatfs, SYS_futex, SYS_getrandom, SYS_mprotect, SYS_mremap,
SYS_pread64, SYS_read, SYS_readlinkat, SYS_statfs,
SYS_munmap, SYS_pread64, SYS_read, SYS_readlinkat, SYS_statfs,
};
#[cfg(cpu_target = "arm")]
use crate::{SYS_fstatat64, SYS_mmap2};
@ -23,6 +23,8 @@ use crate::{SYS_mmap, SYS_newfstatat};
pub const SNAPSHOT_PAGE_SIZE: usize = 4096;
pub const SNAPSHOT_PAGE_MASK: GuestAddr = !(SNAPSHOT_PAGE_SIZE as GuestAddr - 1);
pub type StopExecutionCallback = Box<dyn FnMut(&mut QemuSnapshotHelper, &Emulator)>;
#[derive(Debug)]
pub struct SnapshotPageInfo {
pub addr: GuestAddr,
@ -46,14 +48,43 @@ impl SnapshotAccessInfo {
}
}
#[derive(Debug)]
#[derive(Clone, Default, Debug)]
pub struct MemoryRegionInfo {
pub perms: Option<MmapPerms>,
pub changed: bool,
}
#[derive(Clone, Default, Debug)]
pub struct MappingInfo {
pub tree: IntervalTree<GuestAddr, MemoryRegionInfo>,
pub size: usize,
}
pub struct QemuSnapshotHelper {
pub accesses: ThreadLocal<UnsafeCell<SnapshotAccessInfo>>,
pub new_maps: Mutex<IntervalTree<GuestAddr, Option<MmapPerms>>>,
pub maps: MappingInfo,
pub new_maps: Mutex<MappingInfo>,
pub pages: HashMap<GuestAddr, SnapshotPageInfo>,
pub brk: GuestAddr,
pub mmap_start: GuestAddr,
pub mmap_limit: usize,
pub stop_execution: Option<StopExecutionCallback>,
pub empty: bool,
pub accurate_unmap: bool,
}
impl core::fmt::Debug for QemuSnapshotHelper {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("QemuSnapshotHelper")
.field("accesses", &self.accesses)
.field("new_maps", &self.new_maps)
.field("pages", &self.pages)
.field("brk", &self.brk)
.field("mmap_start", &self.mmap_start)
.field("mmap_limit", &self.mmap_limit)
.field("empty", &self.empty)
.finish()
}
}
impl QemuSnapshotHelper {
@ -61,14 +92,38 @@ impl QemuSnapshotHelper {
pub fn new() -> Self {
Self {
accesses: ThreadLocal::new(),
new_maps: Mutex::new(IntervalTree::new()),
maps: MappingInfo::default(),
new_maps: Mutex::new(MappingInfo::default()),
pages: HashMap::default(),
brk: 0,
mmap_start: 0,
mmap_limit: 0,
stop_execution: None,
empty: true,
accurate_unmap: false,
}
}
#[must_use]
pub fn with_mmap_limit(mmap_limit: usize, stop_execution: Box<StopExecutionCallback>) -> Self {
Self {
accesses: ThreadLocal::new(),
maps: MappingInfo::default(),
new_maps: Mutex::new(MappingInfo::default()),
pages: HashMap::default(),
brk: 0,
mmap_start: 0,
mmap_limit,
stop_execution: Some(stop_execution),
empty: true,
accurate_unmap: false,
}
}
pub fn use_accurate_unmapping(&mut self) {
self.accurate_unmap = true;
}
#[allow(clippy::uninit_assumed_init)]
pub fn snapshot(&mut self, emulator: &Emulator) {
self.brk = emulator.get_brk();
@ -83,7 +138,8 @@ impl QemuSnapshotHelper {
private: map.is_priv(),
data: None,
};
if map.flags().is_w() {
if map.flags().is_r() {
// TODO not just for R pages
unsafe {
info.data = Some(Box::new(core::mem::zeroed()));
emulator.read_mem(addr, &mut info.data.as_mut().unwrap()[..]);
@ -92,8 +148,18 @@ impl QemuSnapshotHelper {
self.pages.insert(addr, info);
addr += SNAPSHOT_PAGE_SIZE as GuestAddr;
}
self.maps.tree.insert(
map.start()..map.end(),
MemoryRegionInfo {
perms: Some(map.flags()),
changed: false,
},
);
self.maps.size += (map.end() - map.start()) as usize;
}
self.empty = false;
*self.new_maps.lock().unwrap() = self.maps.clone();
}
pub fn page_access(&mut self, page: GuestAddr) {
@ -113,6 +179,13 @@ impl QemuSnapshotHelper {
}
}
pub fn page_access_no_cache(&self, page: GuestAddr) {
unsafe {
let acc = self.accesses.get_or_default().get();
(*acc).dirty.insert(page);
}
}
pub fn access(&mut self, addr: GuestAddr, size: usize) {
debug_assert!(size > 0);
let page = addr & SNAPSHOT_PAGE_MASK;
@ -124,65 +197,268 @@ impl QemuSnapshotHelper {
}
pub fn reset(&mut self, emulator: &Emulator) {
{
let new_maps = self.new_maps.get_mut().unwrap();
for acc in self.accesses.iter_mut() {
unsafe { &mut (*acc.get()) }.dirty.retain(|page| {
if let Some(info) = self.pages.get_mut(page) {
// TODO avoid duplicated memcpy
if let Some(data) = info.data.as_ref() {
// Change segment perms to RW if not writeable in current mapping
let mut found = false;
for entry in new_maps
.tree
.query_mut(*page..(page + SNAPSHOT_PAGE_SIZE as GuestAddr))
{
if !entry.value.perms.unwrap_or(MmapPerms::None).is_w() {
drop(emulator.mprotect(
entry.interval.start,
(entry.interval.end - entry.interval.start) as usize,
MmapPerms::ReadWrite,
));
entry.value.changed = true;
entry.value.perms = Some(MmapPerms::ReadWrite);
}
found = true;
}
if !found {
return true; // Restore later
}
unsafe { emulator.write_mem(*page, &data[..]) };
} else {
panic!("Cannot restored a dirty but unsaved page");
}
}
false
});
}
}
self.reset_maps(emulator);
// This one is after that we remapped potential regions mapped at snapshot time but unmapped during execution
for acc in self.accesses.iter_mut() {
for page in unsafe { &(*acc.get()).dirty } {
for entry in self
.maps
.tree
.query_mut(*page..(page + SNAPSHOT_PAGE_SIZE as GuestAddr))
{
if !entry.value.perms.unwrap_or(MmapPerms::None).is_w() && !entry.value.changed
{
drop(emulator.mprotect(
entry.interval.start,
(entry.interval.end - entry.interval.start) as usize,
MmapPerms::ReadWrite,
));
entry.value.changed = true;
}
}
if let Some(info) = self.pages.get_mut(page) {
// TODO avoid duplicated memcpy
if let Some(data) = info.data.as_ref() {
unsafe { emulator.write_mem(*page, &data[..]) };
} else {
panic!("Cannot restored a dirty but unsaved page");
}
}
}
unsafe { (*acc.get()).clear() };
}
for entry in self.maps.tree.query_mut(0..GuestAddr::MAX) {
if entry.value.changed {
drop(emulator.mprotect(
entry.interval.start,
(entry.interval.end - entry.interval.start) as usize,
entry.value.perms.unwrap(),
));
entry.value.changed = false;
}
}
emulator.set_brk(self.brk);
emulator.set_mmap_start(self.mmap_start);
}
pub fn add_mapped(&mut self, start: GuestAddr, mut size: usize, perms: Option<MmapPerms>) {
pub fn is_unmap_allowed(&mut self, start: GuestAddr, mut size: usize) -> bool {
if size % SNAPSHOT_PAGE_SIZE != 0 {
size = size + (SNAPSHOT_PAGE_SIZE - size % SNAPSHOT_PAGE_SIZE);
}
self.new_maps
.lock()
.unwrap()
.insert(start..start + (size as GuestAddr), perms);
self.maps
.tree
.query(start..(start + (size as GuestAddr)))
.next()
.is_none()
}
pub fn add_mapped(&mut self, start: GuestAddr, mut size: usize, perms: Option<MmapPerms>) {
let total_size = {
if size % SNAPSHOT_PAGE_SIZE != 0 {
size = size + (SNAPSHOT_PAGE_SIZE - size % SNAPSHOT_PAGE_SIZE);
}
let mut mapping = self.new_maps.lock().unwrap();
mapping.tree.insert(
start..(start + (size as GuestAddr)),
MemoryRegionInfo {
perms,
changed: true,
},
);
mapping.size += size;
mapping.size
};
if self.mmap_limit != 0 && total_size > self.mmap_limit {
let mut cb = self.stop_execution.take().unwrap();
let emu = Emulator::new_empty();
(cb)(self, &emu);
self.stop_execution = Some(cb);
}
}
pub fn change_mapped(&mut self, start: GuestAddr, mut size: usize, perms: Option<MmapPerms>) {
if size % SNAPSHOT_PAGE_SIZE != 0 {
size = size + (SNAPSHOT_PAGE_SIZE - size % SNAPSHOT_PAGE_SIZE);
}
let mut mapping = self.new_maps.lock().unwrap();
let interval = Interval::new(start, start + (size as GuestAddr));
let mut found = vec![]; // TODO optimize
for entry in mapping.tree.query(interval) {
found.push((*entry.interval, entry.value.perms));
}
for (i, perms) in found {
let overlap = i.intersect(&interval).unwrap();
mapping.tree.delete(i);
if i.start < overlap.start {
mapping.tree.insert(
i.start..overlap.start,
MemoryRegionInfo {
perms,
changed: true,
},
);
}
if i.end > overlap.end {
mapping.tree.insert(
overlap.end..i.end,
MemoryRegionInfo {
perms,
changed: true,
},
);
}
}
mapping.tree.insert(
interval,
MemoryRegionInfo {
perms,
changed: true,
},
);
}
pub fn remove_mapped(&mut self, start: GuestAddr, mut size: usize) {
if size % SNAPSHOT_PAGE_SIZE != 0 {
size = size + (SNAPSHOT_PAGE_SIZE - size % SNAPSHOT_PAGE_SIZE);
}
let mut mapping = self.new_maps.lock().unwrap();
let interval = Interval::new(start, start + (size as GuestAddr));
let mut found = vec![]; // TODO optimize
for entry in mapping.tree.query(interval) {
found.push((*entry.interval, entry.value.perms));
}
for (i, perms) in found {
let overlap = i.intersect(&interval).unwrap();
mapping.tree.delete(i);
for page in (i.start..i.end).step_by(SNAPSHOT_PAGE_SIZE) {
self.page_access_no_cache(page);
}
if i.start < overlap.start {
mapping.tree.insert(
i.start..overlap.start,
MemoryRegionInfo {
perms,
changed: true,
},
);
}
if i.end > overlap.end {
mapping.tree.insert(
overlap.end..i.end,
MemoryRegionInfo {
perms,
changed: true,
},
);
}
}
}
pub fn reset_maps(&mut self, emulator: &Emulator) {
let new_maps = self.new_maps.get_mut().unwrap();
for r in new_maps.find(0..GuestAddr::MAX) {
let addr = r.interval().start;
let end = r.interval().end;
let perms = r.data();
let mut page = addr & SNAPSHOT_PAGE_MASK;
let mut prev = None;
while page < end {
if let Some(info) = self.pages.get(&page) {
if let Some((addr, size)) = prev {
drop(emulator.unmap(addr, size));
}
prev = None;
if let Some(p) = perms {
if info.perms != *p {
drop(emulator.mprotect(page, SNAPSHOT_PAGE_SIZE, info.perms));
}
}
} else if let Some((_, size)) = &mut prev {
*size += SNAPSHOT_PAGE_SIZE;
} else {
prev = Some((page, SNAPSHOT_PAGE_SIZE));
}
page += SNAPSHOT_PAGE_SIZE as GuestAddr;
for entry in self.maps.tree.query(0..GuestAddr::MAX) {
let mut found = vec![]; // TODO optimize
for overlap in new_maps.tree.query(*entry.interval) {
found.push((
*overlap.interval,
overlap.value.changed,
overlap.value.perms,
));
}
if let Some((addr, size)) = prev {
drop(emulator.unmap(addr, size));
if found.is_empty() {
//panic!("A pre-snapshot memory region was unmapped");
drop(emulator.map_fixed(
entry.interval.start,
(entry.interval.end - entry.interval.start) as usize,
entry.value.perms.unwrap(),
));
} else if found.len() == 1 && found[0].0 == *entry.interval {
if found[0].1 && found[0].2 != entry.value.perms {
drop(emulator.mprotect(
entry.interval.start,
(entry.interval.end - entry.interval.start) as usize,
entry.value.perms.unwrap(),
));
}
} else {
// TODO check for holes
drop(emulator.mprotect(
entry.interval.start,
(entry.interval.end - entry.interval.start) as usize,
entry.value.perms.unwrap(),
));
}
for (i, _, _) in found {
new_maps.tree.delete(i);
}
}
*new_maps = IntervalTree::new();
for entry in new_maps.tree.query(0..GuestAddr::MAX) {
drop(emulator.unmap(
entry.interval.start,
(entry.interval.end - entry.interval.start) as usize,
));
}
*new_maps = self.maps.clone();
}
}
@ -210,6 +486,9 @@ where
Some(trace_write_n_snapshot::<I, QT, S>),
);
if !self.accurate_unmap {
hooks.syscalls(filter_mmap_snapshot::<I, QT, S>);
}
hooks.after_syscalls(trace_mmap_snapshot::<I, QT, S>);
}
@ -288,6 +567,34 @@ pub fn trace_write_n_snapshot<I, QT, S>(
h.access(addr, size);
}
#[allow(clippy::too_many_arguments)]
#[allow(non_upper_case_globals)]
pub fn filter_mmap_snapshot<I, QT, S>(
hooks: &mut QemuHooks<'_, I, QT, S>,
_state: Option<&mut S>,
sys_num: i32,
a0: u64,
a1: u64,
_a2: u64,
_a3: u64,
_a4: u64,
_a5: u64,
_a6: u64,
_a7: u64,
) -> SyscallHookResult
where
I: Input,
QT: QemuHelperTuple<I, S>,
{
if i64::from(sys_num) == SYS_munmap {
let h = hooks.match_helper_mut::<QemuSnapshotHelper>().unwrap();
if !h.is_unmap_allowed(a0 as GuestAddr, a1 as usize) {
return SyscallHookResult::new(Some(0));
}
}
SyscallHookResult::new(None)
}
#[allow(clippy::too_many_arguments)]
#[allow(non_upper_case_globals)]
pub fn trace_mmap_snapshot<I, QT, S>(
@ -352,6 +659,8 @@ where
return result;
}
// TODO handle huge pages
#[cfg(cpu_target = "arm")]
if i64::from(sys_num) == SYS_mmap2 {
if let Ok(prot) = MmapPerms::try_from(a2 as i32) {
@ -360,11 +669,18 @@ where
}
} else if i64::from(sys_num) == SYS_mremap {
let h = hooks.match_helper_mut::<QemuSnapshotHelper>().unwrap();
h.remove_mapped(a0 as GuestAddr, a1 as usize);
h.add_mapped(result as GuestAddr, a2 as usize, None);
// TODO get the old permissions from the removed mapping
} else if i64::from(sys_num) == SYS_mprotect {
if let Ok(prot) = MmapPerms::try_from(a2 as i32) {
let h = hooks.match_helper_mut::<QemuSnapshotHelper>().unwrap();
h.add_mapped(a0 as GuestAddr, a2 as usize, Some(prot));
h.add_mapped(a0 as GuestAddr, a1 as usize, Some(prot));
}
} else if i64::from(sys_num) == SYS_munmap {
let h = hooks.match_helper_mut::<QemuSnapshotHelper>().unwrap();
if !h.accurate_unmap && !h.is_unmap_allowed(a0 as GuestAddr, a1 as usize) {
h.remove_mapped(a0 as GuestAddr, a1 as usize);
}
}
@ -376,11 +692,18 @@ where
}
} else if i64::from(sys_num) == SYS_mremap {
let h = hooks.match_helper_mut::<QemuSnapshotHelper>().unwrap();
h.remove_mapped(a0 as GuestAddr, a1 as usize);
h.add_mapped(result as GuestAddr, a2 as usize, None);
// TODO get the old permissions from the removed mappin
} else if i64::from(sys_num) == SYS_mprotect {
if let Ok(prot) = MmapPerms::try_from(a2 as i32) {
let h = hooks.match_helper_mut::<QemuSnapshotHelper>().unwrap();
h.add_mapped(a0 as GuestAddr, a2 as usize, Some(prot));
h.add_mapped(a0 as GuestAddr, a1 as usize, Some(prot));
}
} else if i64::from(sys_num) == SYS_munmap {
let h = hooks.match_helper_mut::<QemuSnapshotHelper>().unwrap();
if !h.accurate_unmap && !h.is_unmap_allowed(a0 as GuestAddr, a1 as usize) {
h.remove_mapped(a0 as GuestAddr, a1 as usize);
}
}
}