Block Device fast snapshot implementation.

This commit is contained in:
Romain Malmain 2023-12-15 14:12:40 +01:00
parent c92d7c2ef6
commit 587303b513
11 changed files with 571 additions and 211 deletions

View File

@ -27,6 +27,7 @@
#include "qemu/option.h"
#include "trace.h"
#include "migration/misc.h"
#include "libafl_extras/syx-snapshot/syx-snapshot.h"
/* Number of coroutines to reserve per attached device model */
#define COROUTINE_POOL_RESERVATION 64
@ -42,6 +43,9 @@ typedef struct BlockBackendAioNotifier {
struct BlockBackend {
char *name;
//// --- Begin LibAFL code ---
guint name_hash;
//// --- End LibAFL code ---
int refcnt;
BdrvChild *root;
AioContext *ctx;
@ -714,6 +718,10 @@ bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp)
error_setg(errp, "Device with id '%s' already exists", name);
return false;
}
if (blk_by_name_hash(g_str_hash(name))) {
error_setg(errp, "Device with name hash '%x' already exists", g_str_hash(name));
return false;
}
if (bdrv_find_node(name)) {
error_setg(errp,
"Device name '%s' conflicts with an existing node name",
@ -722,6 +730,11 @@ bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp)
}
blk->name = g_strdup(name);
//// --- Begin LibAFL code ---
blk->name_hash = g_str_hash(blk->name);
//// --- End LibAFL code ---
QTAILQ_INSERT_TAIL(&monitor_block_backends, blk, monitor_link);
return true;
}
@ -753,6 +766,12 @@ const char *blk_name(const BlockBackend *blk)
return blk->name ?: "";
}
guint blk_name_hash(const BlockBackend* blk)
{
IO_CODE();
return blk->name_hash;
}
/*
* Return the BlockBackend with name @name if it exists, else null.
* @name must not be null.
@ -771,6 +790,22 @@ BlockBackend *blk_by_name(const char *name)
return NULL;
}
/*
* Return the BlockBackend with name hash @name_hash if it exists, else null.
*/
BlockBackend *blk_by_name_hash(guint name_hash)
{
BlockBackend *blk = NULL;
GLOBAL_STATE_CODE();
while ((blk = blk_next(blk)) != NULL) {
if (name_hash == blk->name_hash) {
return blk;
}
}
return NULL;
}
/*
* Return the BlockDriverState attached to @blk if any, else null.
*/
@ -1624,8 +1659,14 @@ static void coroutine_fn blk_aio_read_entry(void *opaque)
QEMUIOVector *qiov = rwco->iobuf;
assert(qiov->size == acb->bytes);
rwco->ret = blk_co_do_preadv_part(rwco->blk, rwco->offset, acb->bytes, qiov,
if (!syx_snapshot_cow_cache_read_entry(rwco->blk, rwco->offset, acb->bytes, qiov, 0, rwco->flags)) {
rwco->ret = blk_co_do_preadv_part(rwco->blk, rwco->offset, acb->bytes, qiov,
0, rwco->flags);
} else {
rwco->ret = 0;
}
blk_aio_complete(acb);
}
@ -1636,8 +1677,14 @@ static void coroutine_fn blk_aio_write_entry(void *opaque)
QEMUIOVector *qiov = rwco->iobuf;
assert(!qiov || qiov->size == acb->bytes);
rwco->ret = blk_co_do_pwritev_part(rwco->blk, rwco->offset, acb->bytes,
if (!syx_snapshot_cow_cache_write_entry(rwco->blk, rwco->offset, acb->bytes, qiov, 0, rwco->flags)) {
rwco->ret = blk_co_do_pwritev_part(rwco->blk, rwco->offset, acb->bytes,
qiov, 0, rwco->flags);
} else {
rwco->ret = 0;
}
blk_aio_complete(acb);
}

View File

@ -470,7 +470,7 @@ void QEMU_ERROR("code path is reachable")
/*
* Round number down to multiple. Requires that d be a power of 2 (see
* QEMU_ALIGN_UP for a safer but slower version on arbitrary
* QEMU_ALIGN_DOWN for a safer but slower version on arbitrary
* numbers); works even if d is a smaller type than n.
*/
#ifndef ROUND_DOWN

View File

@ -48,6 +48,7 @@ void coroutine_fn no_co_wrapper blk_co_unref(BlockBackend *blk);
void blk_remove_all_bs(void);
BlockBackend *blk_by_name(const char *name);
BlockBackend *blk_by_name_hash(guint name_hash);
BlockBackend *blk_next(BlockBackend *blk);
BlockBackend *blk_all_next(BlockBackend *blk);
bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp);

View File

@ -24,6 +24,7 @@
*/
const char *blk_name(const BlockBackend *blk);
guint blk_name_hash(const BlockBackend* blk);
BlockDriverState *blk_bs(BlockBackend *blk);

View File

@ -66,7 +66,8 @@ void libafl_sync_exit_cpu(void)
last_exit_reason.next_pc = 0;
}
bool libafl_exit_asap(void) {
bool libafl_exit_asap(void)
{
return expected_exit;
}

View File

@ -20,7 +20,7 @@ int libafl_qemu_remove_breakpoint(target_ulong pc);
enum libafl_exit_reason_kind {
BREAKPOINT = 0,
SYNC_BACKDOOR = 1
SYNC_BACKDOOR = 1,
};
struct libafl_exit_reason_breakpoint {

View File

@ -1,6 +1,7 @@
specific_ss.add(when: 'CONFIG_SOFTMMU', if_true: [files(
'syx-snapshot/device-save.c',
'syx-snapshot/syx-snapshot.c',
'syx-snapshot/syx-cow-cache.c',
'syx-snapshot/channel-buffer-writeback.c',
)])
specific_ss.add(files('exit.c', 'hook.c', 'jit.c'))

View File

@ -0,0 +1,214 @@
#include "syx-cow-cache.h"
#include "sysemu/block-backend.h"
#define IS_POWER_OF_TWO(x) ((x != 0) && ((x & (x - 1)) == 0))
SyxCowCache* syx_cow_cache_new(void)
{
SyxCowCache* cache = g_new0(SyxCowCache, 2);
QTAILQ_INIT(&cache->layers);
return cache;
}
static gchar* g_array_element_ptr(GArray* array, guint position)
{
assert(position < array->len);
return array->data + position * g_array_get_element_size(array);
}
void syx_cow_cache_push_layer(SyxCowCache* scc, uint64_t chunk_size, uint64_t max_size)
{
SyxCowCacheLayer* new_layer = g_new0(SyxCowCacheLayer, 1);
new_layer->cow_cache_devices = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, NULL);
new_layer->chunk_size = chunk_size;
new_layer->max_nb_chunks = max_size;
assert(IS_POWER_OF_TWO(chunk_size));
assert(!(max_size % chunk_size));
QTAILQ_INSERT_HEAD(&scc->layers, new_layer, next);
}
void syx_cow_cache_pop_layer(SyxCowCache* scc)
{
// TODO
}
static void flush_device_layer(gpointer _blk_name_hash, gpointer cache_device, gpointer _user_data)
{
SyxCowCacheDevice* sccd = (SyxCowCacheDevice*) cache_device;
g_hash_table_remove_all(sccd->positions);
g_array_set_size(sccd->data, 0);
}
void syx_cow_cache_flush_highest_layer(SyxCowCache* scc)
{
SyxCowCacheLayer* highest_layer = QTAILQ_FIRST(&scc->layers);
// highest_layer->cow_cache_devices
g_hash_table_foreach(highest_layer->cow_cache_devices, flush_device_layer, NULL);
}
void syx_cow_cache_move(SyxCowCache* lhs, SyxCowCache** rhs)
{
lhs->layers = (*rhs)->layers;
g_free(*rhs);
*rhs = NULL;
}
static bool read_chunk_from_cache_layer_device(SyxCowCacheDevice* sccd, QEMUIOVector* qiov, size_t qiov_offset, uint64_t blk_offset)
{
gpointer data_position = NULL;
bool found = g_hash_table_lookup_extended(sccd->positions, GUINT_TO_POINTER(blk_offset), NULL, &data_position);
// cache hit
if (found) {
void* data_position_ptr = g_array_element_ptr(sccd->data, GPOINTER_TO_UINT(data_position));
assert(qemu_iovec_from_buf(qiov, qiov_offset, data_position_ptr, g_array_get_element_size(sccd->data)) == g_array_get_element_size(sccd->data));
}
return found;
}
// len must be smaller than nb bytes to next aligned to chunk of blk_offset.
// static void write_to_cache_layer_device_unaligned(SyxCowCacheDevice* sccd, QEMUIOVector* qiov, size_t qiov_offset, uint64_t blk_offset, uint64_t len)
// {
// const uint64_t chunk_size = g_array_get_element_size(sccd->data);
//
// assert(ROUND_UP(blk_offset, chunk_size) - blk_offset <= len);
// assert(IS_POWER_OF_TWO(chunk_size));
//
// uint64_t blk_offset_aligned = ROUND_DOWN(blk_offset, chunk_size);
//
// gpointer data_position = NULL;
// bool found = g_hash_table_lookup_extended(sccd->positions, GUINT_TO_POINTER(blk_offset_aligned), NULL, &data_position);
//
// if (!found) {
// data_position = GUINT_TO_POINTER(sccd->data->len);
// sccd->data = g_array_set_size(sccd->data, sccd->data->len + 1);
// g_hash_table_insert(sccd->positions, GUINT_TO_POINTER(blk_offset), data_position);
// }
//
// void* data_position_ptr = g_array_element_ptr(sccd->data, GPOINTER_TO_UINT(data_position));
//
// assert(qemu_iovec_to_buf(qiov, qiov_offset, data_position_ptr, g_array_get_element_size(sccd->data)) ==
// g_array_get_element_size(sccd->data));
// }
// cache layer is allocated and all the basic checks are already done.
static void write_chunk_to_cache_layer_device(SyxCowCacheDevice* sccd, QEMUIOVector* qiov, size_t qiov_offset, uint64_t blk_offset)
{
const uint64_t chunk_size = g_array_get_element_size(sccd->data);
gpointer data_position = NULL;
bool found = g_hash_table_lookup_extended(sccd->positions, GUINT_TO_POINTER(blk_offset), NULL, &data_position);
if (!found) {
data_position = GUINT_TO_POINTER(sccd->data->len);
sccd->data = g_array_set_size(sccd->data, sccd->data->len + 1);
g_hash_table_insert(sccd->positions, GUINT_TO_POINTER(blk_offset), data_position);
}
void* data_position_ptr = g_array_element_ptr(sccd->data, GPOINTER_TO_UINT(data_position));
assert(qemu_iovec_to_buf(qiov, qiov_offset, data_position_ptr, chunk_size) ==
chunk_size);
}
static bool read_chunk_from_cache_layer(SyxCowCacheLayer* sccl, BlockBackend* blk, QEMUIOVector* qiov, size_t qiov_offset, uint64_t blk_offset)
{
assert(!(qiov->size % sccl->chunk_size));
SyxCowCacheDevice* cache_entry = g_hash_table_lookup(sccl->cow_cache_devices, GINT_TO_POINTER(blk_name_hash(blk)));
// return early if nothing is registered
if (!cache_entry) {
return false;
}
assert(cache_entry && cache_entry->data);
// try to read cached pages in current layer if something is registered.
return read_chunk_from_cache_layer_device(cache_entry, qiov, qiov_offset, blk_offset);
}
// Returns false if could not write to current layer.
static bool write_to_cache_layer(SyxCowCacheLayer* sccl, BlockBackend* blk, int64_t offset, int64_t bytes, QEMUIOVector* qiov)
{
if (qiov->size % sccl->chunk_size) {
// todo: determine if it is worth developing an unaligned access version.
printf("error: 0x%zx %% 0x%lx == 0x%lx\n", qiov->size, sccl->chunk_size, qiov->size % sccl->chunk_size);
exit(1);
}
SyxCowCacheDevice* cache_entry = g_hash_table_lookup(sccl->cow_cache_devices, GINT_TO_POINTER(blk_name_hash(blk)));
if (unlikely(!cache_entry)) {
cache_entry = g_new0(SyxCowCacheDevice, 1);
cache_entry->data = g_array_sized_new(false, false, sccl->chunk_size, INITIAL_NB_CHUNKS_PER_DEVICE);
cache_entry->positions = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, NULL);
g_hash_table_insert(sccl->cow_cache_devices, GINT_TO_POINTER(blk_name_hash(blk)), cache_entry);
}
assert(cache_entry && cache_entry->data);
if (cache_entry->data->len + (qiov->size / sccl->chunk_size) > sccl->max_nb_chunks) {
return false;
}
// write cached page
uint64_t blk_offset = offset;
size_t qiov_offset = 0;
for (; qiov_offset < qiov->size; blk_offset += sccl->chunk_size, qiov_offset += sccl->chunk_size) {
write_chunk_to_cache_layer_device(cache_entry, qiov, qiov_offset, blk_offset);
}
return true;
}
void syx_cow_cache_read_entry(SyxCowCache* scc, BlockBackend *blk, int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t _qiov_offset,
BdrvRequestFlags flags)
{
SyxCowCacheLayer* layer;
uint64_t blk_offset = offset;
size_t qiov_offset = 0;
uint64_t chunk_size = 0;
// printf("[%s] Read 0x%zx bytes @addr %lx\n", blk_name(blk), qiov->size, offset);
// First read the backing block device normally.
assert(blk_co_preadv(blk, offset, bytes, qiov, flags) >= 0);
// Then fix the chunks that have been read from before.
if (!QTAILQ_EMPTY(&scc->layers)) {
for (;qiov_offset < qiov->size; blk_offset += chunk_size, qiov_offset += chunk_size) {
QTAILQ_FOREACH(layer, &scc->layers, next) {
chunk_size = layer->chunk_size;
if (read_chunk_from_cache_layer(layer, blk, qiov, qiov_offset, blk_offset)) {
break;
}
}
}
}
}
bool syx_cow_cache_write_entry(SyxCowCache* scc, BlockBackend *blk, int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
BdrvRequestFlags flags)
{
SyxCowCacheLayer* layer;
// printf("[%s] Write 0x%zx bytes @addr %lx\n", blk_name(blk), qiov->size, offset);
layer = QTAILQ_FIRST(&scc->layers);
if (layer) {
assert(write_to_cache_layer(layer, blk, offset, bytes, qiov));
return true;
} else {
return false;
}
}

View File

@ -0,0 +1,45 @@
#pragma once
// Rewritten COW cache for block devices, heavily inspired by kAFL/NYX implementation.
#include "qemu/osdep.h"
#include "qemu/iov.h"
#include "block/block.h"
#define INITIAL_NB_CHUNKS_PER_DEVICE (1024 * 64)
typedef struct SyxCowCacheDevice {
GArray* data;
GHashTable* positions; // blk_offset -> data_position
} SyxCowCacheDevice;
typedef struct SyxCowCacheLayer SyxCowCacheLayer;
typedef struct SyxCowCacheLayer {
GHashTable *cow_cache_devices; // H(device) -> SyxCowCacheDevice
uint64_t chunk_size;
uint64_t max_nb_chunks;
QTAILQ_ENTRY(SyxCowCacheLayer) next;
} SyxCowCacheLayer;
typedef struct SyxCowCache {
QTAILQ_HEAD(, SyxCowCacheLayer) layers;
} SyxCowCache;
SyxCowCache* syx_cow_cache_new(void);
// lhs <- rhs
// rhs is freed and nulled.
void syx_cow_cache_move(SyxCowCache* lhs, SyxCowCache** rhs);
void syx_cow_cache_push_layer(SyxCowCache* scc, uint64_t chunk_size, uint64_t max_size);
void syx_cow_cache_pop_layer(SyxCowCache* scc);
void syx_cow_cache_flush_highest_layer(SyxCowCache* scc);
void syx_cow_cache_read_entry(SyxCowCache* scc, BlockBackend *blk, int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
BdrvRequestFlags flags);
bool syx_cow_cache_write_entry(SyxCowCache* scc, BlockBackend *blk, int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
BdrvRequestFlags flags);

View File

@ -16,6 +16,48 @@
#define TARGET_NEXT_PAGE_ADDR(p) \
((typeof(p))(((uintptr_t) p + TARGET_PAGE_SIZE) & TARGET_PAGE_MASK))
/**
* Saved ramblock
*/
typedef struct SyxSnapshotRAMBlock {
uint8_t *ram; // RAM block
uint64_t used_length; // Length of the ram block
} SyxSnapshotRAMBlock;
/**
* A root snapshot representation.
*/
typedef struct SyxSnapshotRoot {
GHashTable* rbs_snapshot; // hash map: H(rb) -> SyxSnapshotRAMBlock
DeviceSaveState* dss;
} SyxSnapshotRoot;
/**
* A list of dirty pages with their old data.
*/
typedef struct SyxSnapshotDirtyPage {
ram_addr_t offset_within_rb;
uint8_t *data;
} SyxSnapshotDirtyPage;
typedef struct SyxSnapshotDirtyPageList {
SyxSnapshotDirtyPage *dirty_pages;
uint64_t length;
} SyxSnapshotDirtyPageList;
/**
* A snapshot increment. It is used to quickly
* save a VM state.
*/
typedef struct SyxSnapshotIncrement {
// Back to root snapshot if NULL
struct SyxSnapshotIncrement *parent;
DeviceSaveState *dss;
GHashTable *rbs_dirty_pages; // hash map: H(rb) -> SyxSnapshotDirtyPageList
} SyxSnapshotIncrement;
SyxSnapshotState syx_snapshot_state = {0};
static MemoryRegion* mr_to_enable = NULL;
@ -48,7 +90,7 @@ static RAMBlock* ramblock_lookup(gpointer rb_idstr_hash)
}
// Root snapshot API
static SyxSnapshotRoot syx_snapshot_root_new(DeviceSnapshotKind kind, char** devices);
static SyxSnapshotRoot* syx_snapshot_root_new(DeviceSnapshotKind kind, char** devices);
static void syx_snapshot_root_free(SyxSnapshotRoot* root);
struct rb_dirty_list_to_page_args {
@ -78,25 +120,37 @@ struct rb_check_memory_args {
uint64_t nb_inconsistent_pages; // OUT
};
void syx_snapshot_init(void)
{
void syx_snapshot_init(bool cached_bdrvs) {
uint64_t page_size = TARGET_PAGE_SIZE;
syx_snapshot_state.page_size = page_size;
syx_snapshot_state.page_mask = ((uint64_t)-1) << __builtin_ctz(page_size);
syx_snapshot_state.page_mask = ((uint64_t) -1) << __builtin_ctz(page_size);
syx_snapshot_state.tracked_snapshots = syx_snapshot_tracker_init();
if (cached_bdrvs) {
syx_snapshot_state.before_fuzz_cache = syx_cow_cache_new();
syx_cow_cache_push_layer(syx_snapshot_state.before_fuzz_cache, SYX_SNAPSHOT_COW_CACHE_DEFAULT_CHUNK_SIZE, SYX_SNAPSHOT_COW_CACHE_DEFAULT_MAX_BLOCKS);
}
syx_snapshot_state.is_enabled = false;
}
SyxSnapshot* syx_snapshot_new(bool track, DeviceSnapshotKind kind, char** devices)
{
SyxSnapshot* snapshot = g_new0(SyxSnapshot, 1);
SyxSnapshot *syx_snapshot_new(bool track, bool is_active_bdrv_cache, DeviceSnapshotKind kind, char **devices) {
SyxSnapshot *snapshot = g_new0(SyxSnapshot, 1);
snapshot->root_snapshot = syx_snapshot_root_new(kind, devices);
snapshot->last_incremental_snapshot = NULL;
snapshot->rbs_dirty_list = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, (GDestroyNotify) g_hash_table_remove_all);
snapshot->rbs_dirty_list = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
(GDestroyNotify) g_hash_table_remove_all);
snapshot->bdrvs_cow_cache = syx_cow_cache_new();
if (is_active_bdrv_cache) {
syx_cow_cache_move(snapshot->bdrvs_cow_cache, &syx_snapshot_state.before_fuzz_cache);
syx_snapshot_state.active_bdrv_cache_snapshot = snapshot;
} else {
syx_cow_cache_push_layer(snapshot->bdrvs_cow_cache, SYX_SNAPSHOT_COW_CACHE_DEFAULT_CHUNK_SIZE, SYX_SNAPSHOT_COW_CACHE_DEFAULT_MAX_BLOCKS);
}
if (track) {
syx_snapshot_track(&syx_snapshot_state.tracked_snapshots, snapshot);
@ -123,7 +177,7 @@ void syx_snapshot_free(SyxSnapshot* snapshot)
g_hash_table_remove_all(snapshot->rbs_dirty_list);
syx_snapshot_root_free(&snapshot->root_snapshot);
syx_snapshot_root_free(snapshot->root_snapshot);
g_free(snapshot);
}
@ -136,57 +190,55 @@ static void destroy_ramblock_snapshot(gpointer root_snapshot)
g_free(snapshot_rb);
}
static SyxSnapshotRoot syx_snapshot_root_new(DeviceSnapshotKind kind, char** devices)
{
SyxSnapshotRoot root = {0};
static SyxSnapshotRoot* syx_snapshot_root_new(DeviceSnapshotKind kind, char **devices) {
SyxSnapshotRoot* root = g_new0(SyxSnapshotRoot, 1);
RAMBlock* block;
RAMBlock* inner_block;
DeviceSaveState* dss = device_save_kind(kind, devices);
RAMBlock *block;
RAMBlock *inner_block;
DeviceSaveState *dss = device_save_kind(kind, devices);
root.rbs_snapshot = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, destroy_ramblock_snapshot);
root.dss = dss;
root->rbs_snapshot = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, destroy_ramblock_snapshot);
root->dss = dss;
RAMBLOCK_FOREACH(block) {
RAMBLOCK_FOREACH(inner_block) {
if (block != inner_block && inner_block->idstr_hash == block->idstr_hash) {
SYX_ERROR("Hash collision detected on RAMBlocks %s and %s, snapshotting will not work correctly.", inner_block->idstr, block->idstr);
SYX_ERROR("Hash collision detected on RAMBlocks %s and %s, snapshotting will not work correctly.",
inner_block->idstr, block->idstr);
exit(1);
}
}
SyxSnapshotRAMBlock* snapshot_rb = g_new(SyxSnapshotRAMBlock, 1);
SyxSnapshotRAMBlock *snapshot_rb = g_new(SyxSnapshotRAMBlock, 1);
snapshot_rb->used_length = block->used_length;
snapshot_rb->ram = g_new(uint8_t, block->used_length);
memcpy(snapshot_rb->ram, block->host, block->used_length);
g_hash_table_insert(root.rbs_snapshot, GINT_TO_POINTER(block->idstr_hash), snapshot_rb);
g_hash_table_insert(root->rbs_snapshot, GINT_TO_POINTER(block->idstr_hash), snapshot_rb);
}
return root;
}
static void syx_snapshot_root_free(SyxSnapshotRoot* root)
{
static void syx_snapshot_root_free(SyxSnapshotRoot *root) {
g_hash_table_destroy(root->rbs_snapshot);
g_free(root);
}
SyxSnapshotTracker syx_snapshot_tracker_init(void)
{
SyxSnapshotTracker syx_snapshot_tracker_init(void) {
SyxSnapshotTracker tracker = {
.length = 0,
.capacity = SYX_SNAPSHOT_LIST_INIT_SIZE,
.tracked_snapshots = g_new(SyxSnapshot*, SYX_SNAPSHOT_LIST_INIT_SIZE)
.length = 0,
.capacity = SYX_SNAPSHOT_LIST_INIT_SIZE,
.tracked_snapshots = g_new(SyxSnapshot*, SYX_SNAPSHOT_LIST_INIT_SIZE)
};
return tracker;
}
void syx_snapshot_track(SyxSnapshotTracker* tracker, SyxSnapshot* snapshot)
{
void syx_snapshot_track(SyxSnapshotTracker *tracker, SyxSnapshot *snapshot) {
if (tracker->length == tracker->capacity) {
tracker->capacity *= SYX_SNAPSHOT_LIST_GROW_FACTOR;
tracker->tracked_snapshots = g_realloc(tracker->tracked_snapshots, tracker->capacity * sizeof(SyxSnapshot*));
tracker->tracked_snapshots = g_realloc(tracker->tracked_snapshots, tracker->capacity * sizeof(SyxSnapshot *));
}
assert(tracker->length < tracker->capacity);
@ -195,12 +247,11 @@ void syx_snapshot_track(SyxSnapshotTracker* tracker, SyxSnapshot* snapshot)
tracker->length++;
}
void syx_snapshot_stop_track(SyxSnapshotTracker* tracker, SyxSnapshot* snapshot)
{
void syx_snapshot_stop_track(SyxSnapshotTracker *tracker, SyxSnapshot *snapshot) {
for (uint64_t i = 0; i < tracker->length; ++i) {
if (tracker->tracked_snapshots[i] == snapshot) {
for (uint64_t j = i + i; j < tracker->length; ++j) {
tracker->tracked_snapshots[j-1] = tracker->tracked_snapshots[j];
tracker->tracked_snapshots[j - 1] = tracker->tracked_snapshots[j];
}
tracker->length--;
return;
@ -211,11 +262,11 @@ void syx_snapshot_stop_track(SyxSnapshotTracker* tracker, SyxSnapshot* snapshot)
abort();
}
static void rb_save_dirty_addr_to_table(gpointer offset_within_rb, gpointer unused, gpointer rb_dirty_list_to_page_args_ptr)
{
struct rb_dirty_list_to_page_args* args = rb_dirty_list_to_page_args_ptr;
RAMBlock* rb = args->rb;
SyxSnapshotDirtyPage* dirty_page = &args->dirty_page_list->dirty_pages[*args->table_idx];
static void
rb_save_dirty_addr_to_table(gpointer offset_within_rb, gpointer unused, gpointer rb_dirty_list_to_page_args_ptr) {
struct rb_dirty_list_to_page_args *args = rb_dirty_list_to_page_args_ptr;
RAMBlock *rb = args->rb;
SyxSnapshotDirtyPage *dirty_page = &args->dirty_page_list->dirty_pages[*args->table_idx];
dirty_page->offset_within_rb = (ram_addr_t) offset_within_rb;
memcpy((gpointer) dirty_page->data, rb->host + (ram_addr_t) offset_within_rb, syx_snapshot_state.page_size);
@ -223,37 +274,36 @@ static void rb_save_dirty_addr_to_table(gpointer offset_within_rb, gpointer unus
*args->table_idx += 1;
}
static void rb_dirty_list_to_dirty_pages(gpointer rb_idstr_hash, gpointer rb_dirty_list_hash_table_ptr, gpointer rbs_dirty_pages_ptr)
{
GHashTable* rbs_dirty_pages = rbs_dirty_pages_ptr;
GHashTable* rb_dirty_list = rb_dirty_list_hash_table_ptr;
static void rb_dirty_list_to_dirty_pages(gpointer rb_idstr_hash, gpointer rb_dirty_list_hash_table_ptr,
gpointer rbs_dirty_pages_ptr) {
GHashTable *rbs_dirty_pages = rbs_dirty_pages_ptr;
GHashTable *rb_dirty_list = rb_dirty_list_hash_table_ptr;
RAMBlock* rb = ramblock_lookup(rb_idstr_hash);
RAMBlock *rb = ramblock_lookup(rb_idstr_hash);
if (rb) {
SyxSnapshotDirtyPageList* dirty_page_list = g_new(SyxSnapshotDirtyPageList, 1);
dirty_page_list->length = g_hash_table_size(rb_dirty_list);
dirty_page_list->dirty_pages = g_new(SyxSnapshotDirtyPage, dirty_page_list->length);
SyxSnapshotDirtyPageList *dirty_page_list = g_new(SyxSnapshotDirtyPageList, 1);
dirty_page_list->length = g_hash_table_size(rb_dirty_list);
dirty_page_list->dirty_pages = g_new(SyxSnapshotDirtyPage, dirty_page_list->length);
uint64_t* ctr = g_new0(uint64_t, 1);
uint64_t *ctr = g_new0(uint64_t, 1);
struct rb_dirty_list_to_page_args dirty_list_to_page_args = {
struct rb_dirty_list_to_page_args dirty_list_to_page_args = {
.rb = rb,
.table_idx = ctr,
.dirty_page_list = dirty_page_list
};
};
g_hash_table_foreach(rbs_dirty_pages, rb_save_dirty_addr_to_table, &dirty_list_to_page_args);
g_hash_table_foreach(rbs_dirty_pages, rb_save_dirty_addr_to_table, &dirty_list_to_page_args);
g_free(dirty_list_to_page_args.table_idx);
g_free(dirty_list_to_page_args.table_idx);
} else {
SYX_ERROR("Impossible to find RAMBlock with pages marked as dirty.");
}
}
static void destroy_snapshot_dirty_page_list(gpointer snapshot_dirty_page_list_ptr)
{
SyxSnapshotDirtyPageList* snapshot_dirty_page_list = snapshot_dirty_page_list_ptr;
static void destroy_snapshot_dirty_page_list(gpointer snapshot_dirty_page_list_ptr) {
SyxSnapshotDirtyPageList *snapshot_dirty_page_list = snapshot_dirty_page_list_ptr;
for (uint64_t i = 0; i < snapshot_dirty_page_list->length; ++i) {
g_free(snapshot_dirty_page_list->dirty_pages[i].data);
@ -263,26 +313,26 @@ static void destroy_snapshot_dirty_page_list(gpointer snapshot_dirty_page_list_p
g_free(snapshot_dirty_page_list);
}
void syx_snapshot_increment_push(SyxSnapshot* snapshot, DeviceSnapshotKind kind, char** devices)
{
SyxSnapshotIncrement* increment = g_new0(SyxSnapshotIncrement, 1);
void syx_snapshot_increment_push(SyxSnapshot *snapshot, DeviceSnapshotKind kind, char **devices) {
SyxSnapshotIncrement *increment = g_new0(SyxSnapshotIncrement, 1);
increment->parent = snapshot->last_incremental_snapshot;
snapshot->last_incremental_snapshot = increment;
increment->rbs_dirty_pages = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, destroy_snapshot_dirty_page_list);
increment->rbs_dirty_pages = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
destroy_snapshot_dirty_page_list);
g_hash_table_foreach(snapshot->rbs_dirty_list, rb_dirty_list_to_dirty_pages, increment->rbs_dirty_pages);
increment->dss = device_save_kind(kind, devices);
g_hash_table_remove_all(snapshot->rbs_dirty_list);
}
static SyxSnapshotDirtyPage* get_dirty_page_from_addr_rec(SyxSnapshotIncrement* increment, RAMBlock* rb, ram_addr_t offset_within_rb)
{
static SyxSnapshotDirtyPage *
get_dirty_page_from_addr_rec(SyxSnapshotIncrement *increment, RAMBlock *rb, ram_addr_t offset_within_rb) {
if (increment == NULL) {
return NULL;
}
SyxSnapshotDirtyPageList* dpl = g_hash_table_lookup(increment->rbs_dirty_pages, GINT_TO_POINTER(rb->idstr_hash));
SyxSnapshotDirtyPageList *dpl = g_hash_table_lookup(increment->rbs_dirty_pages, GINT_TO_POINTER(rb->idstr_hash));
if (dpl) {
for (uint64_t i = 0; i < dpl->length; ++i) {
@ -292,23 +342,24 @@ static SyxSnapshotDirtyPage* get_dirty_page_from_addr_rec(SyxSnapshotIncrement*
}
}
return get_dirty_page_from_addr_rec(increment->parent, rb, offset_within_rb);
return get_dirty_page_from_addr_rec(increment->parent, rb, offset_within_rb);
}
static void restore_dirty_page_to_increment(gpointer offset_within_rb, gpointer _unused, gpointer args_ptr) {
struct rb_page_increment_restore_args* args = args_ptr;
RAMBlock* rb = args->rb;
SyxSnapshot* snapshot = args->snapshot;
SyxSnapshotIncrement* increment = args->increment;
struct rb_page_increment_restore_args *args = args_ptr;
RAMBlock *rb = args->rb;
SyxSnapshot *snapshot = args->snapshot;
SyxSnapshotIncrement *increment = args->increment;
ram_addr_t offset = (ram_addr_t) offset_within_rb;
SyxSnapshotDirtyPage* dp = get_dirty_page_from_addr_rec(increment, rb, offset);
SyxSnapshotDirtyPage *dp = get_dirty_page_from_addr_rec(increment, rb, offset);
if (dp) {
memcpy(rb->host + offset, dp->data, syx_snapshot_state.page_size);
} else {
SyxSnapshotRAMBlock* rrb = g_hash_table_lookup(snapshot->root_snapshot.rbs_snapshot, GINT_TO_POINTER(rb->idstr_hash));
SyxSnapshotRAMBlock *rrb = g_hash_table_lookup(snapshot->root_snapshot->rbs_snapshot,
GINT_TO_POINTER(rb->idstr_hash));
assert(rrb);
memcpy(rb->host + offset, rrb->ram, syx_snapshot_state.page_size);
@ -316,32 +367,30 @@ static void restore_dirty_page_to_increment(gpointer offset_within_rb, gpointer
}
static void restore_rb_to_increment(gpointer rb_idstr_hash, gpointer rb_dirty_pages_hash_table_ptr, gpointer args_ptr) {
struct rb_increment_restore_args* args = args_ptr;
GHashTable* rb_dirty_pages_hash_table = rb_dirty_pages_hash_table_ptr;
struct rb_increment_restore_args *args = args_ptr;
GHashTable *rb_dirty_pages_hash_table = rb_dirty_pages_hash_table_ptr;
RAMBlock* rb = ramblock_lookup(rb_idstr_hash);
RAMBlock *rb = ramblock_lookup(rb_idstr_hash);
struct rb_page_increment_restore_args page_args = {
.snapshot = args->snapshot,
.increment = args->increment,
.rb = rb
.snapshot = args->snapshot,
.increment = args->increment,
.rb = rb
};
g_hash_table_foreach(rb_dirty_pages_hash_table, restore_dirty_page_to_increment, &page_args);
}
static void restore_to_increment(SyxSnapshot* snapshot, SyxSnapshotIncrement* increment)
{
static void restore_to_increment(SyxSnapshot *snapshot, SyxSnapshotIncrement *increment) {
struct rb_increment_restore_args args = {
.snapshot = snapshot,
.increment = increment
.snapshot = snapshot,
.increment = increment
};
g_hash_table_foreach(snapshot->rbs_dirty_list, restore_rb_to_increment, &args);
}
void syx_snapshot_increment_pop(SyxSnapshot* snapshot)
{
SyxSnapshotIncrement* last_increment = snapshot->last_incremental_snapshot;
void syx_snapshot_increment_pop(SyxSnapshot *snapshot) {
SyxSnapshotIncrement *last_increment = snapshot->last_incremental_snapshot;
device_restore_all(last_increment->dss);
restore_to_increment(snapshot, last_increment);
@ -352,9 +401,8 @@ void syx_snapshot_increment_pop(SyxSnapshot* snapshot)
syx_snapshot_dirty_list_flush(snapshot);
}
void syx_snapshot_increment_restore_last(SyxSnapshot* snapshot)
{
SyxSnapshotIncrement* last_increment = snapshot->last_incremental_snapshot;
void syx_snapshot_increment_restore_last(SyxSnapshot *snapshot) {
SyxSnapshotIncrement *last_increment = snapshot->last_incremental_snapshot;
device_restore_all(last_increment->dss);
restore_to_increment(snapshot, last_increment);
@ -362,28 +410,25 @@ void syx_snapshot_increment_restore_last(SyxSnapshot* snapshot)
syx_snapshot_dirty_list_flush(snapshot);
}
static SyxSnapshotIncrement* syx_snapshot_increment_free(SyxSnapshotIncrement* increment)
{
SyxSnapshotIncrement* parent_increment = increment->parent;
static SyxSnapshotIncrement *syx_snapshot_increment_free(SyxSnapshotIncrement *increment) {
SyxSnapshotIncrement *parent_increment = increment->parent;
g_hash_table_destroy(increment->rbs_dirty_pages);
device_free_all(increment->dss);
g_free(increment);
return parent_increment;
}
static void syx_snapshot_dirty_list_flush(SyxSnapshot* snapshot)
{
static void syx_snapshot_dirty_list_flush(SyxSnapshot *snapshot) {
g_hash_table_foreach(snapshot->rbs_dirty_list, empty_rb_dirty_list, (gpointer) snapshot);
}
static inline void syx_snapshot_dirty_list_add_internal(RAMBlock* rb, ram_addr_t offset)
{
static inline void syx_snapshot_dirty_list_add_internal(RAMBlock *rb, ram_addr_t offset) {
assert((offset & syx_snapshot_state.page_mask) == offset); // offsets should always be page-aligned.
for (uint64_t i = 0; i < syx_snapshot_state.tracked_snapshots.length; ++i) {
SyxSnapshot* snapshot = syx_snapshot_state.tracked_snapshots.tracked_snapshots[i];
SyxSnapshot *snapshot = syx_snapshot_state.tracked_snapshots.tracked_snapshots[i];
GHashTable* rb_dirty_list = g_hash_table_lookup(snapshot->rbs_dirty_list, GINT_TO_POINTER(rb->idstr_hash));
GHashTable *rb_dirty_list = g_hash_table_lookup(snapshot->rbs_dirty_list, GINT_TO_POINTER(rb->idstr_hash));
if (unlikely(!rb_dirty_list)) {
#ifdef SYX_SNAPSHOT_DEBUG
@ -401,8 +446,7 @@ static inline void syx_snapshot_dirty_list_add_internal(RAMBlock* rb, ram_addr_t
}
}
bool syx_snapshot_is_enabled(void)
{
bool syx_snapshot_is_enabled(void) {
return syx_snapshot_state.is_enabled;
}
@ -428,15 +472,14 @@ __attribute__((target("no-3dnow,no-sse,no-mmx"),no_caller_saved_registers)) void
*/
// host_addr should be page-aligned.
void syx_snapshot_dirty_list_add_hostaddr(void* host_addr)
{
void syx_snapshot_dirty_list_add_hostaddr(void *host_addr) {
// early check to know whether we should log the page access or not
if (!syx_snapshot_is_enabled()) {
return;
}
ram_addr_t offset;
RAMBlock* rb = qemu_ram_block_from_host((void*) host_addr, true, &offset);
RAMBlock *rb = qemu_ram_block_from_host((void *) host_addr, true, &offset);
#ifdef SYX_SNAPSHOT_DEBUG
SYX_PRINTF("Should mark offset 0x%lx as dirty\n", offset);
@ -449,8 +492,7 @@ void syx_snapshot_dirty_list_add_hostaddr(void* host_addr)
syx_snapshot_dirty_list_add_internal(rb, offset);
}
void syx_snapshot_dirty_list_add_hostaddr_range(void* host_addr, uint64_t len)
{
void syx_snapshot_dirty_list_add_hostaddr_range(void *host_addr, uint64_t len) {
// early check to know whether we should log the page access or not
if (!syx_snapshot_is_enabled()) {
return;
@ -460,7 +502,7 @@ void syx_snapshot_dirty_list_add_hostaddr_range(void* host_addr, uint64_t len)
int64_t len_signed = (int64_t) len;
syx_snapshot_dirty_list_add_hostaddr(QEMU_ALIGN_PTR_DOWN(host_addr, syx_snapshot_state.page_size));
void* next_page_addr = TARGET_NEXT_PAGE_ADDR(host_addr);
void *next_page_addr = TARGET_NEXT_PAGE_ADDR(host_addr);
assert(next_page_addr > host_addr);
assert(QEMU_PTR_IS_ALIGNED(next_page_addr, TARGET_PAGE_SIZE));
@ -469,7 +511,7 @@ void syx_snapshot_dirty_list_add_hostaddr_range(void* host_addr, uint64_t len)
host_addr += len_to_next_page;
len_signed -= len_to_next_page;
while(len_signed > 0) {
while (len_signed > 0) {
assert(QEMU_PTR_IS_ALIGNED(host_addr, TARGET_PAGE_SIZE));
syx_snapshot_dirty_list_add_hostaddr(host_addr);
@ -477,22 +519,20 @@ void syx_snapshot_dirty_list_add_hostaddr_range(void* host_addr, uint64_t len)
}
}
static void empty_rb_dirty_list(gpointer _rb_idstr_hash, gpointer rb_dirty_list_hash_table_ptr, gpointer _user_data)
{
GHashTable* rb_dirty_hash_table = rb_dirty_list_hash_table_ptr;
static void empty_rb_dirty_list(gpointer _rb_idstr_hash, gpointer rb_dirty_list_hash_table_ptr, gpointer _user_data) {
GHashTable *rb_dirty_hash_table = rb_dirty_list_hash_table_ptr;
g_hash_table_remove_all(rb_dirty_hash_table);
}
static void root_restore_rb_page(gpointer offset_within_rb, gpointer _unused, gpointer root_restore_args_ptr)
{
struct rb_page_root_restore_args* args = root_restore_args_ptr;
RAMBlock* rb = args->rb;
SyxSnapshotRAMBlock* snapshot_rb = args->snapshot_rb;
static void root_restore_rb_page(gpointer offset_within_rb, gpointer _unused, gpointer root_restore_args_ptr) {
struct rb_page_root_restore_args *args = root_restore_args_ptr;
RAMBlock *rb = args->rb;
SyxSnapshotRAMBlock *snapshot_rb = args->snapshot_rb;
// safe cast because ram_addr_t is also an alias to void*
void* host_rb_restore = rb->host + (ram_addr_t) offset_within_rb;
void* host_snapshot_rb_restore = (gpointer) snapshot_rb->ram + (ram_addr_t) offset_within_rb;
void *host_rb_restore = rb->host + (ram_addr_t) offset_within_rb;
void *host_snapshot_rb_restore = (gpointer) snapshot_rb->ram + (ram_addr_t) offset_within_rb;
#ifdef SYX_SNAPSHOT_DEBUG
SYX_PRINTF("\t[%s] Restore at offset 0x%lx of size %lu...\n", rb->idstr, (uint64_t) offset_within_rb, syx_snapshot_state.page_size);
@ -502,18 +542,18 @@ static void root_restore_rb_page(gpointer offset_within_rb, gpointer _unused, gp
//TODO: manage special case of TSEG.
}
static void root_restore_rb(gpointer rb_idstr_hash, gpointer rb_dirty_pages_hash_table_ptr, gpointer snapshot_ptr)
{
SyxSnapshot* snapshot = snapshot_ptr;
GHashTable* rb_dirty_pages_hash_table = rb_dirty_pages_hash_table_ptr;
RAMBlock* rb = ramblock_lookup(rb_idstr_hash);
static void root_restore_rb(gpointer rb_idstr_hash, gpointer rb_dirty_pages_hash_table_ptr, gpointer snapshot_ptr) {
SyxSnapshot *snapshot = snapshot_ptr;
GHashTable *rb_dirty_pages_hash_table = rb_dirty_pages_hash_table_ptr;
RAMBlock *rb = ramblock_lookup(rb_idstr_hash);
if (rb) {
SyxSnapshotRAMBlock* snapshot_ramblock = g_hash_table_lookup(snapshot->root_snapshot.rbs_snapshot, rb_idstr_hash);
SyxSnapshotRAMBlock *snapshot_ramblock = g_hash_table_lookup(snapshot->root_snapshot->rbs_snapshot,
rb_idstr_hash);
struct rb_page_root_restore_args root_restore_args = {
.rb = rb,
.snapshot_rb = snapshot_ramblock
.rb = rb,
.snapshot_rb = snapshot_ramblock
};
#ifdef CONFIG_DEBUG_TCG
@ -531,16 +571,16 @@ static void root_restore_rb(gpointer rb_idstr_hash, gpointer rb_dirty_pages_hash
}
}
static void root_restore_check_memory_rb(gpointer rb_idstr_hash, gpointer rb_dirty_pages_hash_table_ptr, gpointer check_memory_args_ptr)
{
struct rb_check_memory_args* args = check_memory_args_ptr;
SyxSnapshot* snapshot = args->snapshot;
RAMBlock* rb = ramblock_lookup(rb_idstr_hash);
static void root_restore_check_memory_rb(gpointer rb_idstr_hash, gpointer rb_dirty_pages_hash_table_ptr,
gpointer check_memory_args_ptr) {
struct rb_check_memory_args *args = check_memory_args_ptr;
SyxSnapshot *snapshot = args->snapshot;
RAMBlock *rb = ramblock_lookup(rb_idstr_hash);
args->nb_inconsistent_pages = 0;
if (rb) {
SYX_PRINTF("Checking memory consistency of %s... ", rb->idstr);
SyxSnapshotRAMBlock* rb_snapshot = g_hash_table_lookup(snapshot->root_snapshot.rbs_snapshot, rb_idstr_hash);
SyxSnapshotRAMBlock *rb_snapshot = g_hash_table_lookup(snapshot->root_snapshot->rbs_snapshot, rb_idstr_hash);
assert(rb_snapshot);
assert(rb->used_length == rb_snapshot->used_length);
@ -559,7 +599,8 @@ static void root_restore_check_memory_rb(gpointer rb_idstr_hash, gpointer rb_dir
}
if (args->nb_inconsistent_pages > 0) {
SYX_ERROR("[%s] Found %lu page %s.\n", rb->idstr, args->nb_inconsistent_pages, args->nb_inconsistent_pages > 1 ? "inconsistencies" : "inconsistency");
SYX_ERROR("[%s] Found %lu page %s.\n", rb->idstr, args->nb_inconsistent_pages,
args->nb_inconsistent_pages > 1 ? "inconsistencies" : "inconsistency");
} else {
SYX_PRINTF("OK.\n");
}
@ -569,35 +610,35 @@ static void root_restore_check_memory_rb(gpointer rb_idstr_hash, gpointer rb_dir
}
}
uint64_t syx_snapshot_check_memory_consistency(SyxSnapshot* snapshot)
{
uint64_t syx_snapshot_check_memory_consistency(SyxSnapshot *snapshot) {
struct rb_check_memory_args args = {
.snapshot = snapshot
.snapshot = snapshot
};
g_hash_table_foreach(snapshot->rbs_dirty_list, root_restore_check_memory_rb, &args);
return args.nb_inconsistent_pages;
}
void syx_snapshot_root_restore(SyxSnapshot* snapshot)
{
void syx_snapshot_root_restore(SyxSnapshot *snapshot) {
// health check.
CPUState* cpu;
CPUState *cpu;
CPU_FOREACH(cpu) {
assert(cpu->stopped);
}
bool must_unlock_iothread = false;
bool must_unlock_iothread = false;
if (!qemu_mutex_iothread_locked()) {
qemu_mutex_lock_iothread();
must_unlock_iothread = true;
}
if (!qemu_mutex_iothread_locked()) {
qemu_mutex_lock_iothread();
must_unlock_iothread = true;
}
// In case, we first restore devices if there is a modification of memory layout
device_restore_all(snapshot->root_snapshot.dss);
device_restore_all(snapshot->root_snapshot->dss);
g_hash_table_foreach(snapshot->rbs_dirty_list, root_restore_rb, snapshot);
syx_cow_cache_flush_highest_layer(snapshot->bdrvs_cow_cache);
if (mr_to_enable) {
memory_region_set_enabled(mr_to_enable, true);
mr_to_enable = NULL;
@ -605,7 +646,38 @@ void syx_snapshot_root_restore(SyxSnapshot* snapshot)
syx_snapshot_dirty_list_flush(snapshot);
if (must_unlock_iothread) {
qemu_mutex_unlock_iothread();
}
if (must_unlock_iothread) {
qemu_mutex_unlock_iothread();
}
}
bool syx_snapshot_cow_cache_read_entry(BlockBackend *blk, int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
BdrvRequestFlags flags)
{
if (!syx_snapshot_state.active_bdrv_cache_snapshot) {
if (syx_snapshot_state.before_fuzz_cache) {
syx_cow_cache_read_entry(syx_snapshot_state.before_fuzz_cache, blk, offset, bytes, qiov, qiov_offset, flags);
return true;
}
return false;
} else {
syx_cow_cache_read_entry(syx_snapshot_state.active_bdrv_cache_snapshot->bdrvs_cow_cache, blk, offset, bytes, qiov, qiov_offset, flags);
return true;
}
}
bool syx_snapshot_cow_cache_write_entry(BlockBackend *blk, int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
BdrvRequestFlags flags)
{
if (!syx_snapshot_state.active_bdrv_cache_snapshot) {
if (syx_snapshot_state.before_fuzz_cache) {
assert(syx_cow_cache_write_entry(syx_snapshot_state.before_fuzz_cache, blk, offset, bytes, qiov, qiov_offset, flags));
return true;
}
return false;
} else {
assert(syx_cow_cache_write_entry(syx_snapshot_state.active_bdrv_cache_snapshot->bdrvs_cow_cache, blk, offset, bytes, qiov, qiov_offset, flags));
return true;
}
}

View File

@ -7,64 +7,31 @@
*/
#pragma once
#include "qemu/osdep.h"
#include "qom/object.h"
#include "sysemu/sysemu.h"
#include "device-save.h"
#include "syx-cow-cache.h"
#include "../syx-misc.h"
/**
* Saved ramblock
*/
typedef struct SyxSnapshotRAMBlock {
uint8_t* ram; // RAM block
uint64_t used_length; // Length of the ram block
} SyxSnapshotRAMBlock;
#define SYX_SNAPSHOT_COW_CACHE_DEFAULT_CHUNK_SIZE 64
#define SYX_SNAPSHOT_COW_CACHE_DEFAULT_MAX_BLOCKS (1024 * 1024)
/**
* A root snapshot representation.
*/
typedef struct SyxSnapshotRoot {
GHashTable* rbs_snapshot; // hash map: H(rb) -> SyxSnapshotRAMBlock
DeviceSaveState* dss;
} SyxSnapshotRoot;
/**
* A list of dirty pages with their old data.
*/
typedef struct SyxSnapshotDirtyPage {
ram_addr_t offset_within_rb;
uint8_t* data;
} SyxSnapshotDirtyPage;
typedef struct SyxSnapshotDirtyPageList {
SyxSnapshotDirtyPage* dirty_pages;
uint64_t length;
} SyxSnapshotDirtyPageList;
/**
* A snapshot increment. It is used to quickly
* save a VM state.
*/
typedef struct SyxSnapshotIncrement {
// Back to root snapshot if NULL
struct SyxSnapshotIncrement* parent;
DeviceSaveState* dss;
GHashTable* rbs_dirty_pages; // hash map: H(rb) -> SyxSnapshotDirtyPageList
} SyxSnapshotIncrement;
typedef struct SyxSnapshotRoot SyxSnapshotRoot;
typedef struct SyxSnapshotIncrement SyxSnapshotIncrement;
/**
* A snapshot. It is the main object used in this API to
* handle snapshotting.
*/
typedef struct SyxSnapshot {
SyxSnapshotRoot root_snapshot;
SyxSnapshotRoot* root_snapshot;
SyxSnapshotIncrement* last_incremental_snapshot;
GHashTable* rbs_dirty_list; // hash map: H(rb) -> GHashTable(offset_within_ramblock). Filled lazily.
SyxCowCache* bdrvs_cow_cache;
GHashTable *rbs_dirty_list; // hash map: H(rb) -> GHashTable(offset_within_ramblock). Filled lazily.
} SyxSnapshot;
typedef struct SyxSnapshotTracker {
@ -78,32 +45,41 @@ typedef struct SyxSnapshotState {
uint64_t page_size;
uint64_t page_mask;
// Actively tracked snapshots. Their dirty lists will
// be updated at each dirty access
SyxSnapshotTracker tracked_snapshots;
// In use iif syx is initialized with cached_bdrvs flag on.
// It is not updated anymore when an active bdrv cache snapshto is set.
SyxCowCache* before_fuzz_cache;
// snapshot used to restore bdrv cache if enabled.
SyxSnapshot* active_bdrv_cache_snapshot;
// Root
} SyxSnapshotState;
void syx_snapshot_init(void);
void syx_snapshot_init(bool cached_bdrvs);
//
// Snapshot API
//
SyxSnapshot* syx_snapshot_new(bool track, DeviceSnapshotKind kind, char** devices);
void syx_snapshot_free(SyxSnapshot* snapshot);
void syx_snapshot_root_restore(SyxSnapshot* snapshot);
uint64_t syx_snapshot_check_memory_consistency(SyxSnapshot* snapshot);
SyxSnapshot *syx_snapshot_new(bool track, bool is_active_bdrv_cache, DeviceSnapshotKind kind, char **devices);
void syx_snapshot_free(SyxSnapshot *snapshot);
void syx_snapshot_root_restore(SyxSnapshot *snapshot);
uint64_t syx_snapshot_check_memory_consistency(SyxSnapshot *snapshot);
// Push the current RAM state and saves it
void syx_snapshot_increment_push(SyxSnapshot* snapshot, DeviceSnapshotKind kind, char** devices);
void syx_snapshot_increment_push(SyxSnapshot *snapshot, DeviceSnapshotKind kind, char **devices);
// Restores the last push. Restores the root snapshot if no incremental snapshot is present.
void syx_snapshot_increment_pop(SyxSnapshot* snapshot);
void syx_snapshot_increment_pop(SyxSnapshot *snapshot);
void syx_snapshot_increment_restore_last(SyxSnapshot* snapshot);
void syx_snapshot_increment_restore_last(SyxSnapshot *snapshot);
//
@ -111,8 +87,10 @@ void syx_snapshot_increment_restore_last(SyxSnapshot* snapshot);
//
SyxSnapshotTracker syx_snapshot_tracker_init(void);
void syx_snapshot_track(SyxSnapshotTracker* tracker, SyxSnapshot* snapshot);
void syx_snapshot_stop_track(SyxSnapshotTracker* tracker, SyxSnapshot* snapshot);
void syx_snapshot_track(SyxSnapshotTracker *tracker, SyxSnapshot *snapshot);
void syx_snapshot_stop_track(SyxSnapshotTracker *tracker, SyxSnapshot *snapshot);
//
@ -126,15 +104,9 @@ bool syx_snapshot_is_enabled(void);
// Dirty list API
//
void syx_snapshot_dirty_list_add_hostaddr(void* host_addr);
void syx_snapshot_dirty_list_add_hostaddr_range(void* host_addr, uint64_t len);
void syx_snapshot_dirty_list_add_hostaddr(void *host_addr);
/**
* @brief Add a dirty physical address to the list
*
* @param paddr The physical address to add
*/
void syx_snapshot_dirty_list_add_paddr(hwaddr paddr);
void syx_snapshot_dirty_list_add_hostaddr_range(void *host_addr, uint64_t len);
/**
* @brief Same as syx_snapshot_dirty_list_add. The difference
@ -148,4 +120,10 @@ void syx_snapshot_dirty_list_add_paddr(hwaddr paddr);
* tcg-target.inc.c specific environment.
* @param host_addr The host address where the dirty page is located.
*/
void syx_snapshot_dirty_list_add_tcg_target(uint64_t dummy, void* host_addr);
void syx_snapshot_dirty_list_add_tcg_target(uint64_t dummy, void *host_addr);
bool syx_snapshot_cow_cache_read_entry(BlockBackend *blk, int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
BdrvRequestFlags flags);
bool syx_snapshot_cow_cache_write_entry(BlockBackend *blk, int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
BdrvRequestFlags flags);