migration: Move ram_flush_compressed_data() to ram-compress.c

As we export it, rename it compress_flush_data().

Reviewed-by: Fabiano Rosas <farosas@suse.de>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231019110724.15324-10-quintela@redhat.com>
This commit is contained in:
Juan Quintela 2023-10-19 13:07:22 +02:00
parent 742ec5f338
commit 8020bc9a77
3 changed files with 14 additions and 13 deletions

View File

@ -558,3 +558,12 @@ void compress_update_rates(uint64_t page_count)
compression_counters.compressed_size; compression_counters.compressed_size;
} }
} }
void compress_flush_data(void)
{
if (!migrate_compress()) {
return;
}
flush_compressed_data(compress_send_queued_data);
}

View File

@ -73,5 +73,6 @@ uint64_t ram_compressed_pages(void);
void update_compress_thread_counts(const CompressParam *param, int bytes_xmit); void update_compress_thread_counts(const CompressParam *param, int bytes_xmit);
void compress_update_rates(uint64_t page_count); void compress_update_rates(uint64_t page_count);
int compress_send_queued_data(CompressParam *param); int compress_send_queued_data(CompressParam *param);
void compress_flush_data(void);
#endif #endif

View File

@ -1300,15 +1300,6 @@ int compress_send_queued_data(CompressParam *param)
return len; return len;
} }
static void ram_flush_compressed_data(void)
{
if (!migrate_compress()) {
return;
}
flush_compressed_data(compress_send_queued_data);
}
#define PAGE_ALL_CLEAN 0 #define PAGE_ALL_CLEAN 0
#define PAGE_TRY_AGAIN 1 #define PAGE_TRY_AGAIN 1
#define PAGE_DIRTY_FOUND 2 #define PAGE_DIRTY_FOUND 2
@ -1364,7 +1355,7 @@ static int find_dirty_block(RAMState *rs, PageSearchStatus *pss)
* Also If xbzrle is on, stop using the data compression at this * Also If xbzrle is on, stop using the data compression at this
* point. In theory, xbzrle can do better than compression. * point. In theory, xbzrle can do better than compression.
*/ */
ram_flush_compressed_data(); compress_flush_data();
/* Hit the end of the list */ /* Hit the end of the list */
pss->block = QLIST_FIRST_RCU(&ram_list.blocks); pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
@ -2036,7 +2027,7 @@ static bool save_compress_page(RAMState *rs, PageSearchStatus *pss,
* much CPU resource. * much CPU resource.
*/ */
if (pss->block != pss->last_sent_block) { if (pss->block != pss->last_sent_block) {
ram_flush_compressed_data(); compress_flush_data();
return false; return false;
} }
@ -3083,7 +3074,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
* page is sent in one chunk. * page is sent in one chunk.
*/ */
if (migrate_postcopy_ram()) { if (migrate_postcopy_ram()) {
ram_flush_compressed_data(); compress_flush_data();
} }
/* /*
@ -3184,7 +3175,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
} }
qemu_mutex_unlock(&rs->bitmap_mutex); qemu_mutex_unlock(&rs->bitmap_mutex);
ram_flush_compressed_data(); compress_flush_data();
int ret = rdma_registration_stop(f, RAM_CONTROL_FINISH); int ret = rdma_registration_stop(f, RAM_CONTROL_FINISH);
if (ret < 0) { if (ret < 0) {