Block Device fast snapshot implementation.
This commit is contained in:
parent
c92d7c2ef6
commit
587303b513
@ -27,6 +27,7 @@
|
|||||||
#include "qemu/option.h"
|
#include "qemu/option.h"
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
#include "migration/misc.h"
|
#include "migration/misc.h"
|
||||||
|
#include "libafl_extras/syx-snapshot/syx-snapshot.h"
|
||||||
|
|
||||||
/* Number of coroutines to reserve per attached device model */
|
/* Number of coroutines to reserve per attached device model */
|
||||||
#define COROUTINE_POOL_RESERVATION 64
|
#define COROUTINE_POOL_RESERVATION 64
|
||||||
@ -42,6 +43,9 @@ typedef struct BlockBackendAioNotifier {
|
|||||||
|
|
||||||
struct BlockBackend {
|
struct BlockBackend {
|
||||||
char *name;
|
char *name;
|
||||||
|
//// --- Begin LibAFL code ---
|
||||||
|
guint name_hash;
|
||||||
|
//// --- End LibAFL code ---
|
||||||
int refcnt;
|
int refcnt;
|
||||||
BdrvChild *root;
|
BdrvChild *root;
|
||||||
AioContext *ctx;
|
AioContext *ctx;
|
||||||
@ -714,6 +718,10 @@ bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp)
|
|||||||
error_setg(errp, "Device with id '%s' already exists", name);
|
error_setg(errp, "Device with id '%s' already exists", name);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
if (blk_by_name_hash(g_str_hash(name))) {
|
||||||
|
error_setg(errp, "Device with name hash '%x' already exists", g_str_hash(name));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
if (bdrv_find_node(name)) {
|
if (bdrv_find_node(name)) {
|
||||||
error_setg(errp,
|
error_setg(errp,
|
||||||
"Device name '%s' conflicts with an existing node name",
|
"Device name '%s' conflicts with an existing node name",
|
||||||
@ -722,6 +730,11 @@ bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp)
|
|||||||
}
|
}
|
||||||
|
|
||||||
blk->name = g_strdup(name);
|
blk->name = g_strdup(name);
|
||||||
|
//// --- Begin LibAFL code ---
|
||||||
|
|
||||||
|
blk->name_hash = g_str_hash(blk->name);
|
||||||
|
|
||||||
|
//// --- End LibAFL code ---
|
||||||
QTAILQ_INSERT_TAIL(&monitor_block_backends, blk, monitor_link);
|
QTAILQ_INSERT_TAIL(&monitor_block_backends, blk, monitor_link);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -753,6 +766,12 @@ const char *blk_name(const BlockBackend *blk)
|
|||||||
return blk->name ?: "";
|
return blk->name ?: "";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
guint blk_name_hash(const BlockBackend* blk)
|
||||||
|
{
|
||||||
|
IO_CODE();
|
||||||
|
return blk->name_hash;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return the BlockBackend with name @name if it exists, else null.
|
* Return the BlockBackend with name @name if it exists, else null.
|
||||||
* @name must not be null.
|
* @name must not be null.
|
||||||
@ -771,6 +790,22 @@ BlockBackend *blk_by_name(const char *name)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return the BlockBackend with name hash @name_hash if it exists, else null.
|
||||||
|
*/
|
||||||
|
BlockBackend *blk_by_name_hash(guint name_hash)
|
||||||
|
{
|
||||||
|
BlockBackend *blk = NULL;
|
||||||
|
|
||||||
|
GLOBAL_STATE_CODE();
|
||||||
|
while ((blk = blk_next(blk)) != NULL) {
|
||||||
|
if (name_hash == blk->name_hash) {
|
||||||
|
return blk;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return the BlockDriverState attached to @blk if any, else null.
|
* Return the BlockDriverState attached to @blk if any, else null.
|
||||||
*/
|
*/
|
||||||
@ -1624,8 +1659,14 @@ static void coroutine_fn blk_aio_read_entry(void *opaque)
|
|||||||
QEMUIOVector *qiov = rwco->iobuf;
|
QEMUIOVector *qiov = rwco->iobuf;
|
||||||
|
|
||||||
assert(qiov->size == acb->bytes);
|
assert(qiov->size == acb->bytes);
|
||||||
|
|
||||||
|
if (!syx_snapshot_cow_cache_read_entry(rwco->blk, rwco->offset, acb->bytes, qiov, 0, rwco->flags)) {
|
||||||
rwco->ret = blk_co_do_preadv_part(rwco->blk, rwco->offset, acb->bytes, qiov,
|
rwco->ret = blk_co_do_preadv_part(rwco->blk, rwco->offset, acb->bytes, qiov,
|
||||||
0, rwco->flags);
|
0, rwco->flags);
|
||||||
|
} else {
|
||||||
|
rwco->ret = 0;
|
||||||
|
}
|
||||||
|
|
||||||
blk_aio_complete(acb);
|
blk_aio_complete(acb);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1636,8 +1677,14 @@ static void coroutine_fn blk_aio_write_entry(void *opaque)
|
|||||||
QEMUIOVector *qiov = rwco->iobuf;
|
QEMUIOVector *qiov = rwco->iobuf;
|
||||||
|
|
||||||
assert(!qiov || qiov->size == acb->bytes);
|
assert(!qiov || qiov->size == acb->bytes);
|
||||||
|
|
||||||
|
if (!syx_snapshot_cow_cache_write_entry(rwco->blk, rwco->offset, acb->bytes, qiov, 0, rwco->flags)) {
|
||||||
rwco->ret = blk_co_do_pwritev_part(rwco->blk, rwco->offset, acb->bytes,
|
rwco->ret = blk_co_do_pwritev_part(rwco->blk, rwco->offset, acb->bytes,
|
||||||
qiov, 0, rwco->flags);
|
qiov, 0, rwco->flags);
|
||||||
|
} else {
|
||||||
|
rwco->ret = 0;
|
||||||
|
}
|
||||||
|
|
||||||
blk_aio_complete(acb);
|
blk_aio_complete(acb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -470,7 +470,7 @@ void QEMU_ERROR("code path is reachable")
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Round number down to multiple. Requires that d be a power of 2 (see
|
* Round number down to multiple. Requires that d be a power of 2 (see
|
||||||
* QEMU_ALIGN_UP for a safer but slower version on arbitrary
|
* QEMU_ALIGN_DOWN for a safer but slower version on arbitrary
|
||||||
* numbers); works even if d is a smaller type than n.
|
* numbers); works even if d is a smaller type than n.
|
||||||
*/
|
*/
|
||||||
#ifndef ROUND_DOWN
|
#ifndef ROUND_DOWN
|
||||||
|
@ -48,6 +48,7 @@ void coroutine_fn no_co_wrapper blk_co_unref(BlockBackend *blk);
|
|||||||
|
|
||||||
void blk_remove_all_bs(void);
|
void blk_remove_all_bs(void);
|
||||||
BlockBackend *blk_by_name(const char *name);
|
BlockBackend *blk_by_name(const char *name);
|
||||||
|
BlockBackend *blk_by_name_hash(guint name_hash);
|
||||||
BlockBackend *blk_next(BlockBackend *blk);
|
BlockBackend *blk_next(BlockBackend *blk);
|
||||||
BlockBackend *blk_all_next(BlockBackend *blk);
|
BlockBackend *blk_all_next(BlockBackend *blk);
|
||||||
bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp);
|
bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp);
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
const char *blk_name(const BlockBackend *blk);
|
const char *blk_name(const BlockBackend *blk);
|
||||||
|
guint blk_name_hash(const BlockBackend* blk);
|
||||||
|
|
||||||
BlockDriverState *blk_bs(BlockBackend *blk);
|
BlockDriverState *blk_bs(BlockBackend *blk);
|
||||||
|
|
||||||
|
@ -66,7 +66,8 @@ void libafl_sync_exit_cpu(void)
|
|||||||
last_exit_reason.next_pc = 0;
|
last_exit_reason.next_pc = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool libafl_exit_asap(void) {
|
bool libafl_exit_asap(void)
|
||||||
|
{
|
||||||
return expected_exit;
|
return expected_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ int libafl_qemu_remove_breakpoint(target_ulong pc);
|
|||||||
|
|
||||||
enum libafl_exit_reason_kind {
|
enum libafl_exit_reason_kind {
|
||||||
BREAKPOINT = 0,
|
BREAKPOINT = 0,
|
||||||
SYNC_BACKDOOR = 1
|
SYNC_BACKDOOR = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct libafl_exit_reason_breakpoint {
|
struct libafl_exit_reason_breakpoint {
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
specific_ss.add(when: 'CONFIG_SOFTMMU', if_true: [files(
|
specific_ss.add(when: 'CONFIG_SOFTMMU', if_true: [files(
|
||||||
'syx-snapshot/device-save.c',
|
'syx-snapshot/device-save.c',
|
||||||
'syx-snapshot/syx-snapshot.c',
|
'syx-snapshot/syx-snapshot.c',
|
||||||
|
'syx-snapshot/syx-cow-cache.c',
|
||||||
'syx-snapshot/channel-buffer-writeback.c',
|
'syx-snapshot/channel-buffer-writeback.c',
|
||||||
)])
|
)])
|
||||||
specific_ss.add(files('exit.c', 'hook.c', 'jit.c'))
|
specific_ss.add(files('exit.c', 'hook.c', 'jit.c'))
|
||||||
|
214
libafl_extras/syx-snapshot/syx-cow-cache.c
Normal file
214
libafl_extras/syx-snapshot/syx-cow-cache.c
Normal file
@ -0,0 +1,214 @@
|
|||||||
|
#include "syx-cow-cache.h"
|
||||||
|
|
||||||
|
#include "sysemu/block-backend.h"
|
||||||
|
|
||||||
|
#define IS_POWER_OF_TWO(x) ((x != 0) && ((x & (x - 1)) == 0))
|
||||||
|
|
||||||
|
SyxCowCache* syx_cow_cache_new(void)
|
||||||
|
{
|
||||||
|
SyxCowCache* cache = g_new0(SyxCowCache, 2);
|
||||||
|
|
||||||
|
QTAILQ_INIT(&cache->layers);
|
||||||
|
|
||||||
|
return cache;
|
||||||
|
}
|
||||||
|
|
||||||
|
static gchar* g_array_element_ptr(GArray* array, guint position)
|
||||||
|
{
|
||||||
|
assert(position < array->len);
|
||||||
|
return array->data + position * g_array_get_element_size(array);
|
||||||
|
}
|
||||||
|
|
||||||
|
void syx_cow_cache_push_layer(SyxCowCache* scc, uint64_t chunk_size, uint64_t max_size)
|
||||||
|
{
|
||||||
|
SyxCowCacheLayer* new_layer = g_new0(SyxCowCacheLayer, 1);
|
||||||
|
|
||||||
|
new_layer->cow_cache_devices = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, NULL);
|
||||||
|
new_layer->chunk_size = chunk_size;
|
||||||
|
new_layer->max_nb_chunks = max_size;
|
||||||
|
|
||||||
|
assert(IS_POWER_OF_TWO(chunk_size));
|
||||||
|
assert(!(max_size % chunk_size));
|
||||||
|
|
||||||
|
QTAILQ_INSERT_HEAD(&scc->layers, new_layer, next);
|
||||||
|
}
|
||||||
|
|
||||||
|
void syx_cow_cache_pop_layer(SyxCowCache* scc)
|
||||||
|
{
|
||||||
|
// TODO
|
||||||
|
}
|
||||||
|
|
||||||
|
static void flush_device_layer(gpointer _blk_name_hash, gpointer cache_device, gpointer _user_data)
|
||||||
|
{
|
||||||
|
SyxCowCacheDevice* sccd = (SyxCowCacheDevice*) cache_device;
|
||||||
|
|
||||||
|
g_hash_table_remove_all(sccd->positions);
|
||||||
|
g_array_set_size(sccd->data, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void syx_cow_cache_flush_highest_layer(SyxCowCache* scc)
|
||||||
|
{
|
||||||
|
SyxCowCacheLayer* highest_layer = QTAILQ_FIRST(&scc->layers);
|
||||||
|
|
||||||
|
// highest_layer->cow_cache_devices
|
||||||
|
g_hash_table_foreach(highest_layer->cow_cache_devices, flush_device_layer, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
void syx_cow_cache_move(SyxCowCache* lhs, SyxCowCache** rhs)
|
||||||
|
{
|
||||||
|
lhs->layers = (*rhs)->layers;
|
||||||
|
g_free(*rhs);
|
||||||
|
*rhs = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool read_chunk_from_cache_layer_device(SyxCowCacheDevice* sccd, QEMUIOVector* qiov, size_t qiov_offset, uint64_t blk_offset)
|
||||||
|
{
|
||||||
|
gpointer data_position = NULL;
|
||||||
|
bool found = g_hash_table_lookup_extended(sccd->positions, GUINT_TO_POINTER(blk_offset), NULL, &data_position);
|
||||||
|
|
||||||
|
// cache hit
|
||||||
|
if (found) {
|
||||||
|
void* data_position_ptr = g_array_element_ptr(sccd->data, GPOINTER_TO_UINT(data_position));
|
||||||
|
assert(qemu_iovec_from_buf(qiov, qiov_offset, data_position_ptr, g_array_get_element_size(sccd->data)) == g_array_get_element_size(sccd->data));
|
||||||
|
}
|
||||||
|
|
||||||
|
return found;
|
||||||
|
}
|
||||||
|
|
||||||
|
// len must be smaller than nb bytes to next aligned to chunk of blk_offset.
|
||||||
|
// static void write_to_cache_layer_device_unaligned(SyxCowCacheDevice* sccd, QEMUIOVector* qiov, size_t qiov_offset, uint64_t blk_offset, uint64_t len)
|
||||||
|
// {
|
||||||
|
// const uint64_t chunk_size = g_array_get_element_size(sccd->data);
|
||||||
|
//
|
||||||
|
// assert(ROUND_UP(blk_offset, chunk_size) - blk_offset <= len);
|
||||||
|
// assert(IS_POWER_OF_TWO(chunk_size));
|
||||||
|
//
|
||||||
|
// uint64_t blk_offset_aligned = ROUND_DOWN(blk_offset, chunk_size);
|
||||||
|
//
|
||||||
|
// gpointer data_position = NULL;
|
||||||
|
// bool found = g_hash_table_lookup_extended(sccd->positions, GUINT_TO_POINTER(blk_offset_aligned), NULL, &data_position);
|
||||||
|
//
|
||||||
|
// if (!found) {
|
||||||
|
// data_position = GUINT_TO_POINTER(sccd->data->len);
|
||||||
|
// sccd->data = g_array_set_size(sccd->data, sccd->data->len + 1);
|
||||||
|
// g_hash_table_insert(sccd->positions, GUINT_TO_POINTER(blk_offset), data_position);
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// void* data_position_ptr = g_array_element_ptr(sccd->data, GPOINTER_TO_UINT(data_position));
|
||||||
|
//
|
||||||
|
// assert(qemu_iovec_to_buf(qiov, qiov_offset, data_position_ptr, g_array_get_element_size(sccd->data)) ==
|
||||||
|
// g_array_get_element_size(sccd->data));
|
||||||
|
// }
|
||||||
|
|
||||||
|
// cache layer is allocated and all the basic checks are already done.
|
||||||
|
static void write_chunk_to_cache_layer_device(SyxCowCacheDevice* sccd, QEMUIOVector* qiov, size_t qiov_offset, uint64_t blk_offset)
|
||||||
|
{
|
||||||
|
const uint64_t chunk_size = g_array_get_element_size(sccd->data);
|
||||||
|
|
||||||
|
gpointer data_position = NULL;
|
||||||
|
bool found = g_hash_table_lookup_extended(sccd->positions, GUINT_TO_POINTER(blk_offset), NULL, &data_position);
|
||||||
|
|
||||||
|
if (!found) {
|
||||||
|
data_position = GUINT_TO_POINTER(sccd->data->len);
|
||||||
|
sccd->data = g_array_set_size(sccd->data, sccd->data->len + 1);
|
||||||
|
g_hash_table_insert(sccd->positions, GUINT_TO_POINTER(blk_offset), data_position);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* data_position_ptr = g_array_element_ptr(sccd->data, GPOINTER_TO_UINT(data_position));
|
||||||
|
|
||||||
|
assert(qemu_iovec_to_buf(qiov, qiov_offset, data_position_ptr, chunk_size) ==
|
||||||
|
chunk_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool read_chunk_from_cache_layer(SyxCowCacheLayer* sccl, BlockBackend* blk, QEMUIOVector* qiov, size_t qiov_offset, uint64_t blk_offset)
|
||||||
|
{
|
||||||
|
assert(!(qiov->size % sccl->chunk_size));
|
||||||
|
|
||||||
|
SyxCowCacheDevice* cache_entry = g_hash_table_lookup(sccl->cow_cache_devices, GINT_TO_POINTER(blk_name_hash(blk)));
|
||||||
|
|
||||||
|
// return early if nothing is registered
|
||||||
|
if (!cache_entry) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(cache_entry && cache_entry->data);
|
||||||
|
|
||||||
|
// try to read cached pages in current layer if something is registered.
|
||||||
|
return read_chunk_from_cache_layer_device(cache_entry, qiov, qiov_offset, blk_offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns false if could not write to current layer.
|
||||||
|
static bool write_to_cache_layer(SyxCowCacheLayer* sccl, BlockBackend* blk, int64_t offset, int64_t bytes, QEMUIOVector* qiov)
|
||||||
|
{
|
||||||
|
if (qiov->size % sccl->chunk_size) {
|
||||||
|
// todo: determine if it is worth developing an unaligned access version.
|
||||||
|
printf("error: 0x%zx %% 0x%lx == 0x%lx\n", qiov->size, sccl->chunk_size, qiov->size % sccl->chunk_size);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
SyxCowCacheDevice* cache_entry = g_hash_table_lookup(sccl->cow_cache_devices, GINT_TO_POINTER(blk_name_hash(blk)));
|
||||||
|
|
||||||
|
if (unlikely(!cache_entry)) {
|
||||||
|
cache_entry = g_new0(SyxCowCacheDevice, 1);
|
||||||
|
cache_entry->data = g_array_sized_new(false, false, sccl->chunk_size, INITIAL_NB_CHUNKS_PER_DEVICE);
|
||||||
|
cache_entry->positions = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, NULL);
|
||||||
|
g_hash_table_insert(sccl->cow_cache_devices, GINT_TO_POINTER(blk_name_hash(blk)), cache_entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(cache_entry && cache_entry->data);
|
||||||
|
|
||||||
|
if (cache_entry->data->len + (qiov->size / sccl->chunk_size) > sccl->max_nb_chunks) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// write cached page
|
||||||
|
uint64_t blk_offset = offset;
|
||||||
|
size_t qiov_offset = 0;
|
||||||
|
for (; qiov_offset < qiov->size; blk_offset += sccl->chunk_size, qiov_offset += sccl->chunk_size) {
|
||||||
|
write_chunk_to_cache_layer_device(cache_entry, qiov, qiov_offset, blk_offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void syx_cow_cache_read_entry(SyxCowCache* scc, BlockBackend *blk, int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t _qiov_offset,
|
||||||
|
BdrvRequestFlags flags)
|
||||||
|
{
|
||||||
|
SyxCowCacheLayer* layer;
|
||||||
|
uint64_t blk_offset = offset;
|
||||||
|
size_t qiov_offset = 0;
|
||||||
|
uint64_t chunk_size = 0;
|
||||||
|
|
||||||
|
// printf("[%s] Read 0x%zx bytes @addr %lx\n", blk_name(blk), qiov->size, offset);
|
||||||
|
|
||||||
|
// First read the backing block device normally.
|
||||||
|
assert(blk_co_preadv(blk, offset, bytes, qiov, flags) >= 0);
|
||||||
|
|
||||||
|
// Then fix the chunks that have been read from before.
|
||||||
|
if (!QTAILQ_EMPTY(&scc->layers)) {
|
||||||
|
for (;qiov_offset < qiov->size; blk_offset += chunk_size, qiov_offset += chunk_size) {
|
||||||
|
QTAILQ_FOREACH(layer, &scc->layers, next) {
|
||||||
|
chunk_size = layer->chunk_size;
|
||||||
|
if (read_chunk_from_cache_layer(layer, blk, qiov, qiov_offset, blk_offset)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool syx_cow_cache_write_entry(SyxCowCache* scc, BlockBackend *blk, int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
|
||||||
|
BdrvRequestFlags flags)
|
||||||
|
{
|
||||||
|
SyxCowCacheLayer* layer;
|
||||||
|
|
||||||
|
// printf("[%s] Write 0x%zx bytes @addr %lx\n", blk_name(blk), qiov->size, offset);
|
||||||
|
|
||||||
|
layer = QTAILQ_FIRST(&scc->layers);
|
||||||
|
if (layer) {
|
||||||
|
assert(write_to_cache_layer(layer, blk, offset, bytes, qiov));
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
45
libafl_extras/syx-snapshot/syx-cow-cache.h
Normal file
45
libafl_extras/syx-snapshot/syx-cow-cache.h
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
// Rewritten COW cache for block devices, heavily inspired by kAFL/NYX implementation.
|
||||||
|
|
||||||
|
#include "qemu/osdep.h"
|
||||||
|
#include "qemu/iov.h"
|
||||||
|
#include "block/block.h"
|
||||||
|
|
||||||
|
#define INITIAL_NB_CHUNKS_PER_DEVICE (1024 * 64)
|
||||||
|
|
||||||
|
typedef struct SyxCowCacheDevice {
|
||||||
|
GArray* data;
|
||||||
|
GHashTable* positions; // blk_offset -> data_position
|
||||||
|
} SyxCowCacheDevice;
|
||||||
|
|
||||||
|
typedef struct SyxCowCacheLayer SyxCowCacheLayer;
|
||||||
|
|
||||||
|
typedef struct SyxCowCacheLayer {
|
||||||
|
GHashTable *cow_cache_devices; // H(device) -> SyxCowCacheDevice
|
||||||
|
uint64_t chunk_size;
|
||||||
|
uint64_t max_nb_chunks;
|
||||||
|
|
||||||
|
QTAILQ_ENTRY(SyxCowCacheLayer) next;
|
||||||
|
} SyxCowCacheLayer;
|
||||||
|
|
||||||
|
typedef struct SyxCowCache {
|
||||||
|
QTAILQ_HEAD(, SyxCowCacheLayer) layers;
|
||||||
|
} SyxCowCache;
|
||||||
|
|
||||||
|
SyxCowCache* syx_cow_cache_new(void);
|
||||||
|
|
||||||
|
// lhs <- rhs
|
||||||
|
// rhs is freed and nulled.
|
||||||
|
void syx_cow_cache_move(SyxCowCache* lhs, SyxCowCache** rhs);
|
||||||
|
|
||||||
|
void syx_cow_cache_push_layer(SyxCowCache* scc, uint64_t chunk_size, uint64_t max_size);
|
||||||
|
void syx_cow_cache_pop_layer(SyxCowCache* scc);
|
||||||
|
|
||||||
|
void syx_cow_cache_flush_highest_layer(SyxCowCache* scc);
|
||||||
|
|
||||||
|
void syx_cow_cache_read_entry(SyxCowCache* scc, BlockBackend *blk, int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
|
||||||
|
BdrvRequestFlags flags);
|
||||||
|
|
||||||
|
bool syx_cow_cache_write_entry(SyxCowCache* scc, BlockBackend *blk, int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
|
||||||
|
BdrvRequestFlags flags);
|
@ -16,6 +16,48 @@
|
|||||||
#define TARGET_NEXT_PAGE_ADDR(p) \
|
#define TARGET_NEXT_PAGE_ADDR(p) \
|
||||||
((typeof(p))(((uintptr_t) p + TARGET_PAGE_SIZE) & TARGET_PAGE_MASK))
|
((typeof(p))(((uintptr_t) p + TARGET_PAGE_SIZE) & TARGET_PAGE_MASK))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Saved ramblock
|
||||||
|
*/
|
||||||
|
typedef struct SyxSnapshotRAMBlock {
|
||||||
|
uint8_t *ram; // RAM block
|
||||||
|
uint64_t used_length; // Length of the ram block
|
||||||
|
} SyxSnapshotRAMBlock;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A root snapshot representation.
|
||||||
|
*/
|
||||||
|
typedef struct SyxSnapshotRoot {
|
||||||
|
GHashTable* rbs_snapshot; // hash map: H(rb) -> SyxSnapshotRAMBlock
|
||||||
|
DeviceSaveState* dss;
|
||||||
|
} SyxSnapshotRoot;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A list of dirty pages with their old data.
|
||||||
|
*/
|
||||||
|
typedef struct SyxSnapshotDirtyPage {
|
||||||
|
ram_addr_t offset_within_rb;
|
||||||
|
uint8_t *data;
|
||||||
|
} SyxSnapshotDirtyPage;
|
||||||
|
|
||||||
|
typedef struct SyxSnapshotDirtyPageList {
|
||||||
|
SyxSnapshotDirtyPage *dirty_pages;
|
||||||
|
uint64_t length;
|
||||||
|
} SyxSnapshotDirtyPageList;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A snapshot increment. It is used to quickly
|
||||||
|
* save a VM state.
|
||||||
|
*/
|
||||||
|
typedef struct SyxSnapshotIncrement {
|
||||||
|
// Back to root snapshot if NULL
|
||||||
|
struct SyxSnapshotIncrement *parent;
|
||||||
|
|
||||||
|
DeviceSaveState *dss;
|
||||||
|
|
||||||
|
GHashTable *rbs_dirty_pages; // hash map: H(rb) -> SyxSnapshotDirtyPageList
|
||||||
|
} SyxSnapshotIncrement;
|
||||||
|
|
||||||
|
|
||||||
SyxSnapshotState syx_snapshot_state = {0};
|
SyxSnapshotState syx_snapshot_state = {0};
|
||||||
static MemoryRegion* mr_to_enable = NULL;
|
static MemoryRegion* mr_to_enable = NULL;
|
||||||
@ -48,7 +90,7 @@ static RAMBlock* ramblock_lookup(gpointer rb_idstr_hash)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Root snapshot API
|
// Root snapshot API
|
||||||
static SyxSnapshotRoot syx_snapshot_root_new(DeviceSnapshotKind kind, char** devices);
|
static SyxSnapshotRoot* syx_snapshot_root_new(DeviceSnapshotKind kind, char** devices);
|
||||||
static void syx_snapshot_root_free(SyxSnapshotRoot* root);
|
static void syx_snapshot_root_free(SyxSnapshotRoot* root);
|
||||||
|
|
||||||
struct rb_dirty_list_to_page_args {
|
struct rb_dirty_list_to_page_args {
|
||||||
@ -78,8 +120,7 @@ struct rb_check_memory_args {
|
|||||||
uint64_t nb_inconsistent_pages; // OUT
|
uint64_t nb_inconsistent_pages; // OUT
|
||||||
};
|
};
|
||||||
|
|
||||||
void syx_snapshot_init(void)
|
void syx_snapshot_init(bool cached_bdrvs) {
|
||||||
{
|
|
||||||
uint64_t page_size = TARGET_PAGE_SIZE;
|
uint64_t page_size = TARGET_PAGE_SIZE;
|
||||||
|
|
||||||
syx_snapshot_state.page_size = page_size;
|
syx_snapshot_state.page_size = page_size;
|
||||||
@ -87,16 +128,29 @@ void syx_snapshot_init(void)
|
|||||||
|
|
||||||
syx_snapshot_state.tracked_snapshots = syx_snapshot_tracker_init();
|
syx_snapshot_state.tracked_snapshots = syx_snapshot_tracker_init();
|
||||||
|
|
||||||
|
if (cached_bdrvs) {
|
||||||
|
syx_snapshot_state.before_fuzz_cache = syx_cow_cache_new();
|
||||||
|
syx_cow_cache_push_layer(syx_snapshot_state.before_fuzz_cache, SYX_SNAPSHOT_COW_CACHE_DEFAULT_CHUNK_SIZE, SYX_SNAPSHOT_COW_CACHE_DEFAULT_MAX_BLOCKS);
|
||||||
|
}
|
||||||
|
|
||||||
syx_snapshot_state.is_enabled = false;
|
syx_snapshot_state.is_enabled = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
SyxSnapshot* syx_snapshot_new(bool track, DeviceSnapshotKind kind, char** devices)
|
SyxSnapshot *syx_snapshot_new(bool track, bool is_active_bdrv_cache, DeviceSnapshotKind kind, char **devices) {
|
||||||
{
|
|
||||||
SyxSnapshot *snapshot = g_new0(SyxSnapshot, 1);
|
SyxSnapshot *snapshot = g_new0(SyxSnapshot, 1);
|
||||||
|
|
||||||
snapshot->root_snapshot = syx_snapshot_root_new(kind, devices);
|
snapshot->root_snapshot = syx_snapshot_root_new(kind, devices);
|
||||||
snapshot->last_incremental_snapshot = NULL;
|
snapshot->last_incremental_snapshot = NULL;
|
||||||
snapshot->rbs_dirty_list = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, (GDestroyNotify) g_hash_table_remove_all);
|
snapshot->rbs_dirty_list = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
|
||||||
|
(GDestroyNotify) g_hash_table_remove_all);
|
||||||
|
snapshot->bdrvs_cow_cache = syx_cow_cache_new();
|
||||||
|
|
||||||
|
if (is_active_bdrv_cache) {
|
||||||
|
syx_cow_cache_move(snapshot->bdrvs_cow_cache, &syx_snapshot_state.before_fuzz_cache);
|
||||||
|
syx_snapshot_state.active_bdrv_cache_snapshot = snapshot;
|
||||||
|
} else {
|
||||||
|
syx_cow_cache_push_layer(snapshot->bdrvs_cow_cache, SYX_SNAPSHOT_COW_CACHE_DEFAULT_CHUNK_SIZE, SYX_SNAPSHOT_COW_CACHE_DEFAULT_MAX_BLOCKS);
|
||||||
|
}
|
||||||
|
|
||||||
if (track) {
|
if (track) {
|
||||||
syx_snapshot_track(&syx_snapshot_state.tracked_snapshots, snapshot);
|
syx_snapshot_track(&syx_snapshot_state.tracked_snapshots, snapshot);
|
||||||
@ -123,7 +177,7 @@ void syx_snapshot_free(SyxSnapshot* snapshot)
|
|||||||
|
|
||||||
g_hash_table_remove_all(snapshot->rbs_dirty_list);
|
g_hash_table_remove_all(snapshot->rbs_dirty_list);
|
||||||
|
|
||||||
syx_snapshot_root_free(&snapshot->root_snapshot);
|
syx_snapshot_root_free(snapshot->root_snapshot);
|
||||||
|
|
||||||
g_free(snapshot);
|
g_free(snapshot);
|
||||||
}
|
}
|
||||||
@ -136,21 +190,21 @@ static void destroy_ramblock_snapshot(gpointer root_snapshot)
|
|||||||
g_free(snapshot_rb);
|
g_free(snapshot_rb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static SyxSnapshotRoot syx_snapshot_root_new(DeviceSnapshotKind kind, char** devices)
|
static SyxSnapshotRoot* syx_snapshot_root_new(DeviceSnapshotKind kind, char **devices) {
|
||||||
{
|
SyxSnapshotRoot* root = g_new0(SyxSnapshotRoot, 1);
|
||||||
SyxSnapshotRoot root = {0};
|
|
||||||
|
|
||||||
RAMBlock *block;
|
RAMBlock *block;
|
||||||
RAMBlock *inner_block;
|
RAMBlock *inner_block;
|
||||||
DeviceSaveState *dss = device_save_kind(kind, devices);
|
DeviceSaveState *dss = device_save_kind(kind, devices);
|
||||||
|
|
||||||
root.rbs_snapshot = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, destroy_ramblock_snapshot);
|
root->rbs_snapshot = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, destroy_ramblock_snapshot);
|
||||||
root.dss = dss;
|
root->dss = dss;
|
||||||
|
|
||||||
RAMBLOCK_FOREACH(block) {
|
RAMBLOCK_FOREACH(block) {
|
||||||
RAMBLOCK_FOREACH(inner_block) {
|
RAMBLOCK_FOREACH(inner_block) {
|
||||||
if (block != inner_block && inner_block->idstr_hash == block->idstr_hash) {
|
if (block != inner_block && inner_block->idstr_hash == block->idstr_hash) {
|
||||||
SYX_ERROR("Hash collision detected on RAMBlocks %s and %s, snapshotting will not work correctly.", inner_block->idstr, block->idstr);
|
SYX_ERROR("Hash collision detected on RAMBlocks %s and %s, snapshotting will not work correctly.",
|
||||||
|
inner_block->idstr, block->idstr);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -160,19 +214,18 @@ static SyxSnapshotRoot syx_snapshot_root_new(DeviceSnapshotKind kind, char** dev
|
|||||||
snapshot_rb->ram = g_new(uint8_t, block->used_length);
|
snapshot_rb->ram = g_new(uint8_t, block->used_length);
|
||||||
memcpy(snapshot_rb->ram, block->host, block->used_length);
|
memcpy(snapshot_rb->ram, block->host, block->used_length);
|
||||||
|
|
||||||
g_hash_table_insert(root.rbs_snapshot, GINT_TO_POINTER(block->idstr_hash), snapshot_rb);
|
g_hash_table_insert(root->rbs_snapshot, GINT_TO_POINTER(block->idstr_hash), snapshot_rb);
|
||||||
}
|
}
|
||||||
|
|
||||||
return root;
|
return root;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void syx_snapshot_root_free(SyxSnapshotRoot* root)
|
static void syx_snapshot_root_free(SyxSnapshotRoot *root) {
|
||||||
{
|
|
||||||
g_hash_table_destroy(root->rbs_snapshot);
|
g_hash_table_destroy(root->rbs_snapshot);
|
||||||
|
g_free(root);
|
||||||
}
|
}
|
||||||
|
|
||||||
SyxSnapshotTracker syx_snapshot_tracker_init(void)
|
SyxSnapshotTracker syx_snapshot_tracker_init(void) {
|
||||||
{
|
|
||||||
SyxSnapshotTracker tracker = {
|
SyxSnapshotTracker tracker = {
|
||||||
.length = 0,
|
.length = 0,
|
||||||
.capacity = SYX_SNAPSHOT_LIST_INIT_SIZE,
|
.capacity = SYX_SNAPSHOT_LIST_INIT_SIZE,
|
||||||
@ -182,8 +235,7 @@ SyxSnapshotTracker syx_snapshot_tracker_init(void)
|
|||||||
return tracker;
|
return tracker;
|
||||||
}
|
}
|
||||||
|
|
||||||
void syx_snapshot_track(SyxSnapshotTracker* tracker, SyxSnapshot* snapshot)
|
void syx_snapshot_track(SyxSnapshotTracker *tracker, SyxSnapshot *snapshot) {
|
||||||
{
|
|
||||||
if (tracker->length == tracker->capacity) {
|
if (tracker->length == tracker->capacity) {
|
||||||
tracker->capacity *= SYX_SNAPSHOT_LIST_GROW_FACTOR;
|
tracker->capacity *= SYX_SNAPSHOT_LIST_GROW_FACTOR;
|
||||||
tracker->tracked_snapshots = g_realloc(tracker->tracked_snapshots, tracker->capacity * sizeof(SyxSnapshot *));
|
tracker->tracked_snapshots = g_realloc(tracker->tracked_snapshots, tracker->capacity * sizeof(SyxSnapshot *));
|
||||||
@ -195,8 +247,7 @@ void syx_snapshot_track(SyxSnapshotTracker* tracker, SyxSnapshot* snapshot)
|
|||||||
tracker->length++;
|
tracker->length++;
|
||||||
}
|
}
|
||||||
|
|
||||||
void syx_snapshot_stop_track(SyxSnapshotTracker* tracker, SyxSnapshot* snapshot)
|
void syx_snapshot_stop_track(SyxSnapshotTracker *tracker, SyxSnapshot *snapshot) {
|
||||||
{
|
|
||||||
for (uint64_t i = 0; i < tracker->length; ++i) {
|
for (uint64_t i = 0; i < tracker->length; ++i) {
|
||||||
if (tracker->tracked_snapshots[i] == snapshot) {
|
if (tracker->tracked_snapshots[i] == snapshot) {
|
||||||
for (uint64_t j = i + i; j < tracker->length; ++j) {
|
for (uint64_t j = i + i; j < tracker->length; ++j) {
|
||||||
@ -211,8 +262,8 @@ void syx_snapshot_stop_track(SyxSnapshotTracker* tracker, SyxSnapshot* snapshot)
|
|||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rb_save_dirty_addr_to_table(gpointer offset_within_rb, gpointer unused, gpointer rb_dirty_list_to_page_args_ptr)
|
static void
|
||||||
{
|
rb_save_dirty_addr_to_table(gpointer offset_within_rb, gpointer unused, gpointer rb_dirty_list_to_page_args_ptr) {
|
||||||
struct rb_dirty_list_to_page_args *args = rb_dirty_list_to_page_args_ptr;
|
struct rb_dirty_list_to_page_args *args = rb_dirty_list_to_page_args_ptr;
|
||||||
RAMBlock *rb = args->rb;
|
RAMBlock *rb = args->rb;
|
||||||
SyxSnapshotDirtyPage *dirty_page = &args->dirty_page_list->dirty_pages[*args->table_idx];
|
SyxSnapshotDirtyPage *dirty_page = &args->dirty_page_list->dirty_pages[*args->table_idx];
|
||||||
@ -223,8 +274,8 @@ static void rb_save_dirty_addr_to_table(gpointer offset_within_rb, gpointer unus
|
|||||||
*args->table_idx += 1;
|
*args->table_idx += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rb_dirty_list_to_dirty_pages(gpointer rb_idstr_hash, gpointer rb_dirty_list_hash_table_ptr, gpointer rbs_dirty_pages_ptr)
|
static void rb_dirty_list_to_dirty_pages(gpointer rb_idstr_hash, gpointer rb_dirty_list_hash_table_ptr,
|
||||||
{
|
gpointer rbs_dirty_pages_ptr) {
|
||||||
GHashTable *rbs_dirty_pages = rbs_dirty_pages_ptr;
|
GHashTable *rbs_dirty_pages = rbs_dirty_pages_ptr;
|
||||||
GHashTable *rb_dirty_list = rb_dirty_list_hash_table_ptr;
|
GHashTable *rb_dirty_list = rb_dirty_list_hash_table_ptr;
|
||||||
|
|
||||||
@ -251,8 +302,7 @@ static void rb_dirty_list_to_dirty_pages(gpointer rb_idstr_hash, gpointer rb_dir
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void destroy_snapshot_dirty_page_list(gpointer snapshot_dirty_page_list_ptr)
|
static void destroy_snapshot_dirty_page_list(gpointer snapshot_dirty_page_list_ptr) {
|
||||||
{
|
|
||||||
SyxSnapshotDirtyPageList *snapshot_dirty_page_list = snapshot_dirty_page_list_ptr;
|
SyxSnapshotDirtyPageList *snapshot_dirty_page_list = snapshot_dirty_page_list_ptr;
|
||||||
|
|
||||||
for (uint64_t i = 0; i < snapshot_dirty_page_list->length; ++i) {
|
for (uint64_t i = 0; i < snapshot_dirty_page_list->length; ++i) {
|
||||||
@ -263,21 +313,21 @@ static void destroy_snapshot_dirty_page_list(gpointer snapshot_dirty_page_list_p
|
|||||||
g_free(snapshot_dirty_page_list);
|
g_free(snapshot_dirty_page_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
void syx_snapshot_increment_push(SyxSnapshot* snapshot, DeviceSnapshotKind kind, char** devices)
|
void syx_snapshot_increment_push(SyxSnapshot *snapshot, DeviceSnapshotKind kind, char **devices) {
|
||||||
{
|
|
||||||
SyxSnapshotIncrement *increment = g_new0(SyxSnapshotIncrement, 1);
|
SyxSnapshotIncrement *increment = g_new0(SyxSnapshotIncrement, 1);
|
||||||
increment->parent = snapshot->last_incremental_snapshot;
|
increment->parent = snapshot->last_incremental_snapshot;
|
||||||
snapshot->last_incremental_snapshot = increment;
|
snapshot->last_incremental_snapshot = increment;
|
||||||
|
|
||||||
increment->rbs_dirty_pages = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, destroy_snapshot_dirty_page_list);
|
increment->rbs_dirty_pages = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
|
||||||
|
destroy_snapshot_dirty_page_list);
|
||||||
g_hash_table_foreach(snapshot->rbs_dirty_list, rb_dirty_list_to_dirty_pages, increment->rbs_dirty_pages);
|
g_hash_table_foreach(snapshot->rbs_dirty_list, rb_dirty_list_to_dirty_pages, increment->rbs_dirty_pages);
|
||||||
increment->dss = device_save_kind(kind, devices);
|
increment->dss = device_save_kind(kind, devices);
|
||||||
|
|
||||||
g_hash_table_remove_all(snapshot->rbs_dirty_list);
|
g_hash_table_remove_all(snapshot->rbs_dirty_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
static SyxSnapshotDirtyPage* get_dirty_page_from_addr_rec(SyxSnapshotIncrement* increment, RAMBlock* rb, ram_addr_t offset_within_rb)
|
static SyxSnapshotDirtyPage *
|
||||||
{
|
get_dirty_page_from_addr_rec(SyxSnapshotIncrement *increment, RAMBlock *rb, ram_addr_t offset_within_rb) {
|
||||||
if (increment == NULL) {
|
if (increment == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -308,7 +358,8 @@ static void restore_dirty_page_to_increment(gpointer offset_within_rb, gpointer
|
|||||||
if (dp) {
|
if (dp) {
|
||||||
memcpy(rb->host + offset, dp->data, syx_snapshot_state.page_size);
|
memcpy(rb->host + offset, dp->data, syx_snapshot_state.page_size);
|
||||||
} else {
|
} else {
|
||||||
SyxSnapshotRAMBlock* rrb = g_hash_table_lookup(snapshot->root_snapshot.rbs_snapshot, GINT_TO_POINTER(rb->idstr_hash));
|
SyxSnapshotRAMBlock *rrb = g_hash_table_lookup(snapshot->root_snapshot->rbs_snapshot,
|
||||||
|
GINT_TO_POINTER(rb->idstr_hash));
|
||||||
assert(rrb);
|
assert(rrb);
|
||||||
|
|
||||||
memcpy(rb->host + offset, rrb->ram, syx_snapshot_state.page_size);
|
memcpy(rb->host + offset, rrb->ram, syx_snapshot_state.page_size);
|
||||||
@ -329,8 +380,7 @@ static void restore_rb_to_increment(gpointer rb_idstr_hash, gpointer rb_dirty_pa
|
|||||||
g_hash_table_foreach(rb_dirty_pages_hash_table, restore_dirty_page_to_increment, &page_args);
|
g_hash_table_foreach(rb_dirty_pages_hash_table, restore_dirty_page_to_increment, &page_args);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void restore_to_increment(SyxSnapshot* snapshot, SyxSnapshotIncrement* increment)
|
static void restore_to_increment(SyxSnapshot *snapshot, SyxSnapshotIncrement *increment) {
|
||||||
{
|
|
||||||
struct rb_increment_restore_args args = {
|
struct rb_increment_restore_args args = {
|
||||||
.snapshot = snapshot,
|
.snapshot = snapshot,
|
||||||
.increment = increment
|
.increment = increment
|
||||||
@ -339,8 +389,7 @@ static void restore_to_increment(SyxSnapshot* snapshot, SyxSnapshotIncrement* in
|
|||||||
g_hash_table_foreach(snapshot->rbs_dirty_list, restore_rb_to_increment, &args);
|
g_hash_table_foreach(snapshot->rbs_dirty_list, restore_rb_to_increment, &args);
|
||||||
}
|
}
|
||||||
|
|
||||||
void syx_snapshot_increment_pop(SyxSnapshot* snapshot)
|
void syx_snapshot_increment_pop(SyxSnapshot *snapshot) {
|
||||||
{
|
|
||||||
SyxSnapshotIncrement *last_increment = snapshot->last_incremental_snapshot;
|
SyxSnapshotIncrement *last_increment = snapshot->last_incremental_snapshot;
|
||||||
|
|
||||||
device_restore_all(last_increment->dss);
|
device_restore_all(last_increment->dss);
|
||||||
@ -352,8 +401,7 @@ void syx_snapshot_increment_pop(SyxSnapshot* snapshot)
|
|||||||
syx_snapshot_dirty_list_flush(snapshot);
|
syx_snapshot_dirty_list_flush(snapshot);
|
||||||
}
|
}
|
||||||
|
|
||||||
void syx_snapshot_increment_restore_last(SyxSnapshot* snapshot)
|
void syx_snapshot_increment_restore_last(SyxSnapshot *snapshot) {
|
||||||
{
|
|
||||||
SyxSnapshotIncrement *last_increment = snapshot->last_incremental_snapshot;
|
SyxSnapshotIncrement *last_increment = snapshot->last_incremental_snapshot;
|
||||||
|
|
||||||
device_restore_all(last_increment->dss);
|
device_restore_all(last_increment->dss);
|
||||||
@ -362,8 +410,7 @@ void syx_snapshot_increment_restore_last(SyxSnapshot* snapshot)
|
|||||||
syx_snapshot_dirty_list_flush(snapshot);
|
syx_snapshot_dirty_list_flush(snapshot);
|
||||||
}
|
}
|
||||||
|
|
||||||
static SyxSnapshotIncrement* syx_snapshot_increment_free(SyxSnapshotIncrement* increment)
|
static SyxSnapshotIncrement *syx_snapshot_increment_free(SyxSnapshotIncrement *increment) {
|
||||||
{
|
|
||||||
SyxSnapshotIncrement *parent_increment = increment->parent;
|
SyxSnapshotIncrement *parent_increment = increment->parent;
|
||||||
g_hash_table_destroy(increment->rbs_dirty_pages);
|
g_hash_table_destroy(increment->rbs_dirty_pages);
|
||||||
device_free_all(increment->dss);
|
device_free_all(increment->dss);
|
||||||
@ -371,13 +418,11 @@ static SyxSnapshotIncrement* syx_snapshot_increment_free(SyxSnapshotIncrement* i
|
|||||||
return parent_increment;
|
return parent_increment;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void syx_snapshot_dirty_list_flush(SyxSnapshot* snapshot)
|
static void syx_snapshot_dirty_list_flush(SyxSnapshot *snapshot) {
|
||||||
{
|
|
||||||
g_hash_table_foreach(snapshot->rbs_dirty_list, empty_rb_dirty_list, (gpointer) snapshot);
|
g_hash_table_foreach(snapshot->rbs_dirty_list, empty_rb_dirty_list, (gpointer) snapshot);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void syx_snapshot_dirty_list_add_internal(RAMBlock* rb, ram_addr_t offset)
|
static inline void syx_snapshot_dirty_list_add_internal(RAMBlock *rb, ram_addr_t offset) {
|
||||||
{
|
|
||||||
assert((offset & syx_snapshot_state.page_mask) == offset); // offsets should always be page-aligned.
|
assert((offset & syx_snapshot_state.page_mask) == offset); // offsets should always be page-aligned.
|
||||||
|
|
||||||
for (uint64_t i = 0; i < syx_snapshot_state.tracked_snapshots.length; ++i) {
|
for (uint64_t i = 0; i < syx_snapshot_state.tracked_snapshots.length; ++i) {
|
||||||
@ -401,8 +446,7 @@ static inline void syx_snapshot_dirty_list_add_internal(RAMBlock* rb, ram_addr_t
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool syx_snapshot_is_enabled(void)
|
bool syx_snapshot_is_enabled(void) {
|
||||||
{
|
|
||||||
return syx_snapshot_state.is_enabled;
|
return syx_snapshot_state.is_enabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -428,8 +472,7 @@ __attribute__((target("no-3dnow,no-sse,no-mmx"),no_caller_saved_registers)) void
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
// host_addr should be page-aligned.
|
// host_addr should be page-aligned.
|
||||||
void syx_snapshot_dirty_list_add_hostaddr(void* host_addr)
|
void syx_snapshot_dirty_list_add_hostaddr(void *host_addr) {
|
||||||
{
|
|
||||||
// early check to know whether we should log the page access or not
|
// early check to know whether we should log the page access or not
|
||||||
if (!syx_snapshot_is_enabled()) {
|
if (!syx_snapshot_is_enabled()) {
|
||||||
return;
|
return;
|
||||||
@ -449,8 +492,7 @@ void syx_snapshot_dirty_list_add_hostaddr(void* host_addr)
|
|||||||
syx_snapshot_dirty_list_add_internal(rb, offset);
|
syx_snapshot_dirty_list_add_internal(rb, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
void syx_snapshot_dirty_list_add_hostaddr_range(void* host_addr, uint64_t len)
|
void syx_snapshot_dirty_list_add_hostaddr_range(void *host_addr, uint64_t len) {
|
||||||
{
|
|
||||||
// early check to know whether we should log the page access or not
|
// early check to know whether we should log the page access or not
|
||||||
if (!syx_snapshot_is_enabled()) {
|
if (!syx_snapshot_is_enabled()) {
|
||||||
return;
|
return;
|
||||||
@ -477,14 +519,12 @@ void syx_snapshot_dirty_list_add_hostaddr_range(void* host_addr, uint64_t len)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void empty_rb_dirty_list(gpointer _rb_idstr_hash, gpointer rb_dirty_list_hash_table_ptr, gpointer _user_data)
|
static void empty_rb_dirty_list(gpointer _rb_idstr_hash, gpointer rb_dirty_list_hash_table_ptr, gpointer _user_data) {
|
||||||
{
|
|
||||||
GHashTable *rb_dirty_hash_table = rb_dirty_list_hash_table_ptr;
|
GHashTable *rb_dirty_hash_table = rb_dirty_list_hash_table_ptr;
|
||||||
g_hash_table_remove_all(rb_dirty_hash_table);
|
g_hash_table_remove_all(rb_dirty_hash_table);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void root_restore_rb_page(gpointer offset_within_rb, gpointer _unused, gpointer root_restore_args_ptr)
|
static void root_restore_rb_page(gpointer offset_within_rb, gpointer _unused, gpointer root_restore_args_ptr) {
|
||||||
{
|
|
||||||
struct rb_page_root_restore_args *args = root_restore_args_ptr;
|
struct rb_page_root_restore_args *args = root_restore_args_ptr;
|
||||||
RAMBlock *rb = args->rb;
|
RAMBlock *rb = args->rb;
|
||||||
SyxSnapshotRAMBlock *snapshot_rb = args->snapshot_rb;
|
SyxSnapshotRAMBlock *snapshot_rb = args->snapshot_rb;
|
||||||
@ -502,14 +542,14 @@ static void root_restore_rb_page(gpointer offset_within_rb, gpointer _unused, gp
|
|||||||
//TODO: manage special case of TSEG.
|
//TODO: manage special case of TSEG.
|
||||||
}
|
}
|
||||||
|
|
||||||
static void root_restore_rb(gpointer rb_idstr_hash, gpointer rb_dirty_pages_hash_table_ptr, gpointer snapshot_ptr)
|
static void root_restore_rb(gpointer rb_idstr_hash, gpointer rb_dirty_pages_hash_table_ptr, gpointer snapshot_ptr) {
|
||||||
{
|
|
||||||
SyxSnapshot *snapshot = snapshot_ptr;
|
SyxSnapshot *snapshot = snapshot_ptr;
|
||||||
GHashTable *rb_dirty_pages_hash_table = rb_dirty_pages_hash_table_ptr;
|
GHashTable *rb_dirty_pages_hash_table = rb_dirty_pages_hash_table_ptr;
|
||||||
RAMBlock *rb = ramblock_lookup(rb_idstr_hash);
|
RAMBlock *rb = ramblock_lookup(rb_idstr_hash);
|
||||||
|
|
||||||
if (rb) {
|
if (rb) {
|
||||||
SyxSnapshotRAMBlock* snapshot_ramblock = g_hash_table_lookup(snapshot->root_snapshot.rbs_snapshot, rb_idstr_hash);
|
SyxSnapshotRAMBlock *snapshot_ramblock = g_hash_table_lookup(snapshot->root_snapshot->rbs_snapshot,
|
||||||
|
rb_idstr_hash);
|
||||||
|
|
||||||
struct rb_page_root_restore_args root_restore_args = {
|
struct rb_page_root_restore_args root_restore_args = {
|
||||||
.rb = rb,
|
.rb = rb,
|
||||||
@ -531,8 +571,8 @@ static void root_restore_rb(gpointer rb_idstr_hash, gpointer rb_dirty_pages_hash
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void root_restore_check_memory_rb(gpointer rb_idstr_hash, gpointer rb_dirty_pages_hash_table_ptr, gpointer check_memory_args_ptr)
|
static void root_restore_check_memory_rb(gpointer rb_idstr_hash, gpointer rb_dirty_pages_hash_table_ptr,
|
||||||
{
|
gpointer check_memory_args_ptr) {
|
||||||
struct rb_check_memory_args *args = check_memory_args_ptr;
|
struct rb_check_memory_args *args = check_memory_args_ptr;
|
||||||
SyxSnapshot *snapshot = args->snapshot;
|
SyxSnapshot *snapshot = args->snapshot;
|
||||||
RAMBlock *rb = ramblock_lookup(rb_idstr_hash);
|
RAMBlock *rb = ramblock_lookup(rb_idstr_hash);
|
||||||
@ -540,7 +580,7 @@ static void root_restore_check_memory_rb(gpointer rb_idstr_hash, gpointer rb_dir
|
|||||||
args->nb_inconsistent_pages = 0;
|
args->nb_inconsistent_pages = 0;
|
||||||
if (rb) {
|
if (rb) {
|
||||||
SYX_PRINTF("Checking memory consistency of %s... ", rb->idstr);
|
SYX_PRINTF("Checking memory consistency of %s... ", rb->idstr);
|
||||||
SyxSnapshotRAMBlock* rb_snapshot = g_hash_table_lookup(snapshot->root_snapshot.rbs_snapshot, rb_idstr_hash);
|
SyxSnapshotRAMBlock *rb_snapshot = g_hash_table_lookup(snapshot->root_snapshot->rbs_snapshot, rb_idstr_hash);
|
||||||
assert(rb_snapshot);
|
assert(rb_snapshot);
|
||||||
|
|
||||||
assert(rb->used_length == rb_snapshot->used_length);
|
assert(rb->used_length == rb_snapshot->used_length);
|
||||||
@ -559,7 +599,8 @@ static void root_restore_check_memory_rb(gpointer rb_idstr_hash, gpointer rb_dir
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (args->nb_inconsistent_pages > 0) {
|
if (args->nb_inconsistent_pages > 0) {
|
||||||
SYX_ERROR("[%s] Found %lu page %s.\n", rb->idstr, args->nb_inconsistent_pages, args->nb_inconsistent_pages > 1 ? "inconsistencies" : "inconsistency");
|
SYX_ERROR("[%s] Found %lu page %s.\n", rb->idstr, args->nb_inconsistent_pages,
|
||||||
|
args->nb_inconsistent_pages > 1 ? "inconsistencies" : "inconsistency");
|
||||||
} else {
|
} else {
|
||||||
SYX_PRINTF("OK.\n");
|
SYX_PRINTF("OK.\n");
|
||||||
}
|
}
|
||||||
@ -569,8 +610,7 @@ static void root_restore_check_memory_rb(gpointer rb_idstr_hash, gpointer rb_dir
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t syx_snapshot_check_memory_consistency(SyxSnapshot* snapshot)
|
uint64_t syx_snapshot_check_memory_consistency(SyxSnapshot *snapshot) {
|
||||||
{
|
|
||||||
struct rb_check_memory_args args = {
|
struct rb_check_memory_args args = {
|
||||||
.snapshot = snapshot
|
.snapshot = snapshot
|
||||||
};
|
};
|
||||||
@ -578,8 +618,7 @@ uint64_t syx_snapshot_check_memory_consistency(SyxSnapshot* snapshot)
|
|||||||
return args.nb_inconsistent_pages;
|
return args.nb_inconsistent_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
void syx_snapshot_root_restore(SyxSnapshot* snapshot)
|
void syx_snapshot_root_restore(SyxSnapshot *snapshot) {
|
||||||
{
|
|
||||||
// health check.
|
// health check.
|
||||||
CPUState *cpu;
|
CPUState *cpu;
|
||||||
CPU_FOREACH(cpu) {
|
CPU_FOREACH(cpu) {
|
||||||
@ -594,10 +633,12 @@ void syx_snapshot_root_restore(SyxSnapshot* snapshot)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// In case, we first restore devices if there is a modification of memory layout
|
// In case, we first restore devices if there is a modification of memory layout
|
||||||
device_restore_all(snapshot->root_snapshot.dss);
|
device_restore_all(snapshot->root_snapshot->dss);
|
||||||
|
|
||||||
g_hash_table_foreach(snapshot->rbs_dirty_list, root_restore_rb, snapshot);
|
g_hash_table_foreach(snapshot->rbs_dirty_list, root_restore_rb, snapshot);
|
||||||
|
|
||||||
|
syx_cow_cache_flush_highest_layer(snapshot->bdrvs_cow_cache);
|
||||||
|
|
||||||
if (mr_to_enable) {
|
if (mr_to_enable) {
|
||||||
memory_region_set_enabled(mr_to_enable, true);
|
memory_region_set_enabled(mr_to_enable, true);
|
||||||
mr_to_enable = NULL;
|
mr_to_enable = NULL;
|
||||||
@ -609,3 +650,34 @@ void syx_snapshot_root_restore(SyxSnapshot* snapshot)
|
|||||||
qemu_mutex_unlock_iothread();
|
qemu_mutex_unlock_iothread();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
bool syx_snapshot_cow_cache_read_entry(BlockBackend *blk, int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
|
||||||
|
BdrvRequestFlags flags)
|
||||||
|
{
|
||||||
|
if (!syx_snapshot_state.active_bdrv_cache_snapshot) {
|
||||||
|
if (syx_snapshot_state.before_fuzz_cache) {
|
||||||
|
syx_cow_cache_read_entry(syx_snapshot_state.before_fuzz_cache, blk, offset, bytes, qiov, qiov_offset, flags);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
syx_cow_cache_read_entry(syx_snapshot_state.active_bdrv_cache_snapshot->bdrvs_cow_cache, blk, offset, bytes, qiov, qiov_offset, flags);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool syx_snapshot_cow_cache_write_entry(BlockBackend *blk, int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
|
||||||
|
BdrvRequestFlags flags)
|
||||||
|
{
|
||||||
|
if (!syx_snapshot_state.active_bdrv_cache_snapshot) {
|
||||||
|
if (syx_snapshot_state.before_fuzz_cache) {
|
||||||
|
assert(syx_cow_cache_write_entry(syx_snapshot_state.before_fuzz_cache, blk, offset, bytes, qiov, qiov_offset, flags));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
assert(syx_cow_cache_write_entry(syx_snapshot_state.active_bdrv_cache_snapshot->bdrvs_cow_cache, blk, offset, bytes, qiov, qiov_offset, flags));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -7,63 +7,30 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "qemu/osdep.h"
|
#include "qemu/osdep.h"
|
||||||
#include "qom/object.h"
|
#include "qom/object.h"
|
||||||
#include "sysemu/sysemu.h"
|
#include "sysemu/sysemu.h"
|
||||||
|
|
||||||
#include "device-save.h"
|
#include "device-save.h"
|
||||||
|
#include "syx-cow-cache.h"
|
||||||
#include "../syx-misc.h"
|
#include "../syx-misc.h"
|
||||||
|
|
||||||
/**
|
#define SYX_SNAPSHOT_COW_CACHE_DEFAULT_CHUNK_SIZE 64
|
||||||
* Saved ramblock
|
#define SYX_SNAPSHOT_COW_CACHE_DEFAULT_MAX_BLOCKS (1024 * 1024)
|
||||||
*/
|
|
||||||
typedef struct SyxSnapshotRAMBlock {
|
|
||||||
uint8_t* ram; // RAM block
|
|
||||||
uint64_t used_length; // Length of the ram block
|
|
||||||
} SyxSnapshotRAMBlock;
|
|
||||||
|
|
||||||
/**
|
typedef struct SyxSnapshotRoot SyxSnapshotRoot;
|
||||||
* A root snapshot representation.
|
typedef struct SyxSnapshotIncrement SyxSnapshotIncrement;
|
||||||
*/
|
|
||||||
typedef struct SyxSnapshotRoot {
|
|
||||||
GHashTable* rbs_snapshot; // hash map: H(rb) -> SyxSnapshotRAMBlock
|
|
||||||
DeviceSaveState* dss;
|
|
||||||
} SyxSnapshotRoot;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A list of dirty pages with their old data.
|
|
||||||
*/
|
|
||||||
typedef struct SyxSnapshotDirtyPage {
|
|
||||||
ram_addr_t offset_within_rb;
|
|
||||||
uint8_t* data;
|
|
||||||
} SyxSnapshotDirtyPage;
|
|
||||||
|
|
||||||
typedef struct SyxSnapshotDirtyPageList {
|
|
||||||
SyxSnapshotDirtyPage* dirty_pages;
|
|
||||||
uint64_t length;
|
|
||||||
} SyxSnapshotDirtyPageList;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A snapshot increment. It is used to quickly
|
|
||||||
* save a VM state.
|
|
||||||
*/
|
|
||||||
typedef struct SyxSnapshotIncrement {
|
|
||||||
// Back to root snapshot if NULL
|
|
||||||
struct SyxSnapshotIncrement* parent;
|
|
||||||
|
|
||||||
DeviceSaveState* dss;
|
|
||||||
|
|
||||||
GHashTable* rbs_dirty_pages; // hash map: H(rb) -> SyxSnapshotDirtyPageList
|
|
||||||
} SyxSnapshotIncrement;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A snapshot. It is the main object used in this API to
|
* A snapshot. It is the main object used in this API to
|
||||||
* handle snapshotting.
|
* handle snapshotting.
|
||||||
*/
|
*/
|
||||||
typedef struct SyxSnapshot {
|
typedef struct SyxSnapshot {
|
||||||
SyxSnapshotRoot root_snapshot;
|
SyxSnapshotRoot* root_snapshot;
|
||||||
SyxSnapshotIncrement* last_incremental_snapshot;
|
SyxSnapshotIncrement* last_incremental_snapshot;
|
||||||
|
|
||||||
|
SyxCowCache* bdrvs_cow_cache;
|
||||||
GHashTable *rbs_dirty_list; // hash map: H(rb) -> GHashTable(offset_within_ramblock). Filled lazily.
|
GHashTable *rbs_dirty_list; // hash map: H(rb) -> GHashTable(offset_within_ramblock). Filled lazily.
|
||||||
} SyxSnapshot;
|
} SyxSnapshot;
|
||||||
|
|
||||||
@ -82,19 +49,28 @@ typedef struct SyxSnapshotState {
|
|||||||
// Actively tracked snapshots. Their dirty lists will
|
// Actively tracked snapshots. Their dirty lists will
|
||||||
// be updated at each dirty access
|
// be updated at each dirty access
|
||||||
SyxSnapshotTracker tracked_snapshots;
|
SyxSnapshotTracker tracked_snapshots;
|
||||||
|
|
||||||
|
// In use iif syx is initialized with cached_bdrvs flag on.
|
||||||
|
// It is not updated anymore when an active bdrv cache snapshto is set.
|
||||||
|
SyxCowCache* before_fuzz_cache;
|
||||||
|
// snapshot used to restore bdrv cache if enabled.
|
||||||
|
SyxSnapshot* active_bdrv_cache_snapshot;
|
||||||
|
|
||||||
|
// Root
|
||||||
} SyxSnapshotState;
|
} SyxSnapshotState;
|
||||||
|
|
||||||
|
void syx_snapshot_init(bool cached_bdrvs);
|
||||||
void syx_snapshot_init(void);
|
|
||||||
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// Snapshot API
|
// Snapshot API
|
||||||
//
|
//
|
||||||
|
|
||||||
SyxSnapshot* syx_snapshot_new(bool track, DeviceSnapshotKind kind, char** devices);
|
SyxSnapshot *syx_snapshot_new(bool track, bool is_active_bdrv_cache, DeviceSnapshotKind kind, char **devices);
|
||||||
|
|
||||||
void syx_snapshot_free(SyxSnapshot *snapshot);
|
void syx_snapshot_free(SyxSnapshot *snapshot);
|
||||||
|
|
||||||
void syx_snapshot_root_restore(SyxSnapshot *snapshot);
|
void syx_snapshot_root_restore(SyxSnapshot *snapshot);
|
||||||
|
|
||||||
uint64_t syx_snapshot_check_memory_consistency(SyxSnapshot *snapshot);
|
uint64_t syx_snapshot_check_memory_consistency(SyxSnapshot *snapshot);
|
||||||
|
|
||||||
// Push the current RAM state and saves it
|
// Push the current RAM state and saves it
|
||||||
@ -111,7 +87,9 @@ void syx_snapshot_increment_restore_last(SyxSnapshot* snapshot);
|
|||||||
//
|
//
|
||||||
|
|
||||||
SyxSnapshotTracker syx_snapshot_tracker_init(void);
|
SyxSnapshotTracker syx_snapshot_tracker_init(void);
|
||||||
|
|
||||||
void syx_snapshot_track(SyxSnapshotTracker *tracker, SyxSnapshot *snapshot);
|
void syx_snapshot_track(SyxSnapshotTracker *tracker, SyxSnapshot *snapshot);
|
||||||
|
|
||||||
void syx_snapshot_stop_track(SyxSnapshotTracker *tracker, SyxSnapshot *snapshot);
|
void syx_snapshot_stop_track(SyxSnapshotTracker *tracker, SyxSnapshot *snapshot);
|
||||||
|
|
||||||
|
|
||||||
@ -127,14 +105,8 @@ bool syx_snapshot_is_enabled(void);
|
|||||||
//
|
//
|
||||||
|
|
||||||
void syx_snapshot_dirty_list_add_hostaddr(void *host_addr);
|
void syx_snapshot_dirty_list_add_hostaddr(void *host_addr);
|
||||||
void syx_snapshot_dirty_list_add_hostaddr_range(void* host_addr, uint64_t len);
|
|
||||||
|
|
||||||
/**
|
void syx_snapshot_dirty_list_add_hostaddr_range(void *host_addr, uint64_t len);
|
||||||
* @brief Add a dirty physical address to the list
|
|
||||||
*
|
|
||||||
* @param paddr The physical address to add
|
|
||||||
*/
|
|
||||||
void syx_snapshot_dirty_list_add_paddr(hwaddr paddr);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Same as syx_snapshot_dirty_list_add. The difference
|
* @brief Same as syx_snapshot_dirty_list_add. The difference
|
||||||
@ -149,3 +121,9 @@ void syx_snapshot_dirty_list_add_paddr(hwaddr paddr);
|
|||||||
* @param host_addr The host address where the dirty page is located.
|
* @param host_addr The host address where the dirty page is located.
|
||||||
*/
|
*/
|
||||||
void syx_snapshot_dirty_list_add_tcg_target(uint64_t dummy, void *host_addr);
|
void syx_snapshot_dirty_list_add_tcg_target(uint64_t dummy, void *host_addr);
|
||||||
|
|
||||||
|
bool syx_snapshot_cow_cache_read_entry(BlockBackend *blk, int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
|
||||||
|
BdrvRequestFlags flags);
|
||||||
|
|
||||||
|
bool syx_snapshot_cow_cache_write_entry(BlockBackend *blk, int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
|
||||||
|
BdrvRequestFlags flags);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user