From 224f364a49ec88f9710908574393818d964d0593 Mon Sep 17 00:00:00 2001 From: Li Zhijian Date: Thu, 8 Jul 2021 22:45:21 +0800 Subject: [PATCH 1/6] migration/rdma: prevent from double free the same mr backtrace: '0x00007ffff5f44ec2 in __ibv_dereg_mr_1_1 (mr=0x7fff1007d390) at /home/lizhijian/rdma-core/libibverbs/verbs.c:478 478 void *addr = mr->addr; (gdb) bt #0 0x00007ffff5f44ec2 in __ibv_dereg_mr_1_1 (mr=0x7fff1007d390) at /home/lizhijian/rdma-core/libibverbs/verbs.c:478 #1 0x0000555555891fcc in rdma_delete_block (block=, rdma=0x7fff38176010) at ../migration/rdma.c:691 #2 qemu_rdma_cleanup (rdma=0x7fff38176010) at ../migration/rdma.c:2365 #3 0x00005555558925b0 in qio_channel_rdma_close_rcu (rcu=0x555556b8b6c0) at ../migration/rdma.c:3073 #4 0x0000555555d652a3 in call_rcu_thread (opaque=opaque@entry=0x0) at ../util/rcu.c:281 #5 0x0000555555d5edf9 in qemu_thread_start (args=0x7fffe88bb4d0) at ../util/qemu-thread-posix.c:541 #6 0x00007ffff54c73f9 in start_thread () at /lib64/libpthread.so.0 #7 0x00007ffff53f3b03 in clone () at /lib64/libc.so.6 ' Signed-off-by: Li Zhijian Message-Id: <20210708144521.1959614-1-lizhijian@cn.fujitsu.com> Reviewed-by: Dr. David Alan Gilbert Signed-off-by: Dr. David Alan Gilbert --- migration/rdma.c | 1 + 1 file changed, 1 insertion(+) diff --git a/migration/rdma.c b/migration/rdma.c index 38a099f7ee..5c2d113aa9 100644 --- a/migration/rdma.c +++ b/migration/rdma.c @@ -1143,6 +1143,7 @@ static int qemu_rdma_reg_whole_ram_blocks(RDMAContext *rdma) for (i--; i >= 0; i--) { ibv_dereg_mr(local->block[i].mr); + local->block[i].mr = NULL; rdma->total_registrations--; } From a51dcef08ba574c129ae347f6f47b61ccb10cf07 Mon Sep 17 00:00:00 2001 From: Laurent Vivier Date: Thu, 1 Jul 2021 15:14:58 +0200 Subject: [PATCH 2/6] migration: failover: emit a warning when the card is not fully unplugged When the migration fails or is canceled we wait the end of the unplug operation to be able to plug it back. But if the unplug operation is never finished we stop to wait and QEMU emits a warning to inform the user. Based-on: 20210629155007.629086-1-lvivier@redhat.com Signed-off-by: Laurent Vivier Message-Id: <20210701131458.112036-1-lvivier@redhat.com> Reviewed-by: Juan Quintela Signed-off-by: Dr. David Alan Gilbert --- migration/migration.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/migration/migration.c b/migration/migration.c index 5ff7ba9d5c..d717cd089a 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -3701,6 +3701,10 @@ static void qemu_savevm_wait_unplug(MigrationState *s, int old_state, while (timeout-- && qemu_savevm_state_guest_unplug_pending()) { qemu_sem_timedwait(&s->wait_unplug_sem, 250); } + if (qemu_savevm_state_guest_unplug_pending()) { + warn_report("migration: partially unplugged device on " + "failure"); + } } migrate_set_state(&s->state, MIGRATION_STATUS_WAIT_UNPLUG, new_state); From 2e3e3da3c2ad559d1255a9a3bf3df0782c2cf231 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Thu, 8 Jul 2021 15:06:51 -0400 Subject: [PATCH 3/6] migration: Release return path early for paused postcopy When postcopy pause triggered, we rely on the migration thread to cleanup the to_dst_file handle, and the return path thread to cleanup the from_dst_file handle (which is stored in the local variable "rp"). Within the process, from_dst_file cleanup (qemu_fclose) is postponed until it's setup again due to a postcopy recovery. It used to work before yank was born; after yank is introduced we rely on the refcount of IOC to correctly unregister yank function in channel_close(). If without the early and on-time release of from_dst_file handle the yank function will be leftover during paused postcopy. Without this patch, below steps (quoted from Xiaohui) could trigger qemu src crash: 1.Boot vm on src host 2.Boot vm on dst host 3.Enable postcopy on src&dst host 4.Load stressapptest in vm and set postcopy speed to 50M 5.Start migration from src to dst host, change into postcopy mode when migration is active. 6.When postcopy is active, down the network card(do migration via this network) on dst host. 7.Wait untill postcopy is paused on src&dst host. 8.Before up network card, recover migration on dst host, will get error like following. 9.Ignore the error of step 8, go on recovering migration on src host: After step 9, qemu on src host will core dump after some seconds: qemu-kvm: ../util/yank.c:107: yank_unregister_instance: Assertion `QLIST_EMPTY(&entry->yankfns)' failed. 1.sh: line 38: 44662 Aborted (core dumped) Reported-by: Li Xiaohui Signed-off-by: Peter Xu Message-Id: <20210708190653.252961-2-peterx@redhat.com> Reviewed-by: Dr. David Alan Gilbert Signed-off-by: Dr. David Alan Gilbert --- migration/migration.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/migration/migration.c b/migration/migration.c index d717cd089a..38ebc6c1ab 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -2818,12 +2818,12 @@ out: * Maybe there is something we can do: it looks like a * network down issue, and we pause for a recovery. */ + qemu_fclose(rp); + ms->rp_state.from_dst_file = NULL; + rp = NULL; if (postcopy_pause_return_path_thread(ms)) { /* Reload rp, reset the rest */ - if (rp != ms->rp_state.from_dst_file) { - qemu_fclose(rp); - rp = ms->rp_state.from_dst_file; - } + rp = ms->rp_state.from_dst_file; ms->rp_state.error = false; goto retry; } From ca30f24d12c9ba1fc0654e6e983f950f7792a217 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Thu, 8 Jul 2021 15:06:52 -0400 Subject: [PATCH 4/6] migration: Don't do migrate cleanup if during postcopy resume Below process could crash qemu with postcopy recovery: 1. (hmp) migrate -d .. 2. (hmp) migrate_start_postcopy 3. [network down, postcopy paused] 4. (hmp) migrate -r $WRONG_PORT when try the recover on an invalid $WRONG_PORT, cleanup_bh will be cleared 5. (hmp) migrate -r $RIGHT_PORT [qemu crash on assert(cleanup_bh)] The thing is we shouldn't cleanup if it's postcopy resume; the error is set mostly because the channel is wrong, so we return directly waiting for the user to retry. migrate_fd_cleanup() should only be called when migration is cancelled or completed. Signed-off-by: Peter Xu Message-Id: <20210708190653.252961-3-peterx@redhat.com> Reviewed-by: Dr. David Alan Gilbert Signed-off-by: Dr. David Alan Gilbert --- migration/migration.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/migration/migration.c b/migration/migration.c index 38ebc6c1ab..20c48cfff1 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -3979,7 +3979,18 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) } if (error_in) { migrate_fd_error(s, error_in); - migrate_fd_cleanup(s); + if (resume) { + /* + * Don't do cleanup for resume if channel is invalid, but only dump + * the error. We wait for another channel connect from the user. + * The error_report still gives HMP user a hint on what failed. + * It's normally done in migrate_fd_cleanup(), but call it here + * explicitly. + */ + error_report_err(error_copy(s->error)); + } else { + migrate_fd_cleanup(s); + } return; } From ca7bd0821bb62a1561dd409507039558c0e1f5ac Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Thu, 8 Jul 2021 15:06:53 -0400 Subject: [PATCH 5/6] migration: Clear error at entry of migrate_fd_connect() For each "migrate" command, remember to clear the s->error before going on. For one reason, when there's a new error it'll be still remembered; see migrate_set_error() who only sets the error if error==NULL. Meanwhile if a failed migration completes (e.g., postcopy recovered and finished), we shouldn't dump an error when calling migrate_fd_cleanup() at last. Signed-off-by: Peter Xu Message-Id: <20210708190653.252961-4-peterx@redhat.com> Reviewed-by: Dr. David Alan Gilbert Signed-off-by: Dr. David Alan Gilbert --- migration/migration.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/migration/migration.c b/migration/migration.c index 20c48cfff1..2d306582eb 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -1855,6 +1855,15 @@ void migrate_set_error(MigrationState *s, const Error *error) } } +static void migrate_error_free(MigrationState *s) +{ + QEMU_LOCK_GUARD(&s->error_mutex); + if (s->error) { + error_free(s->error); + s->error = NULL; + } +} + void migrate_fd_error(MigrationState *s, const Error *error) { trace_migrate_fd_error(error_get_pretty(error)); @@ -3970,6 +3979,13 @@ void migrate_fd_connect(MigrationState *s, Error *error_in) int64_t rate_limit; bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED; + /* + * If there's a previous error, free it and prepare for another one. + * Meanwhile if migration completes successfully, there won't have an error + * dumped when calling migrate_fd_cleanup(). + */ + migrate_error_free(s); + s->expected_downtime = s->parameters.downtime_limit; if (resume) { assert(s->cleanup_bh); From 63268c4970a5f126cc9af75f3ccb8057abef5ec0 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Wed, 30 Jun 2021 16:08:05 -0400 Subject: [PATCH 6/6] migration: Move bitmap_mutex out of migration_bitmap_clear_dirty() Taking the mutex every time for each dirty bit to clear is too slow, especially we'll take/release even if the dirty bit is cleared. So far it's only used to sync with special cases with qemu_guest_free_page_hint() against migration thread, nothing really that serious yet. Let's move the lock to be upper. There're two callers of migration_bitmap_clear_dirty(). For migration, move it into ram_save_iterate(). With the help of MAX_WAIT logic, we'll only run ram_save_iterate() for no more than 50ms-ish time, so taking the lock once there at the entry. It also means any call sites to qemu_guest_free_page_hint() can be delayed; but it should be very rare, only during migration, and I don't see a problem with it. For COLO, move it up to colo_flush_ram_cache(). I think COLO forgot to take that lock even when calling ramblock_sync_dirty_bitmap(), where another example is migration_bitmap_sync() who took it right. So let the mutex cover both the ramblock_sync_dirty_bitmap() and migration_bitmap_clear_dirty() calls. It's even possible to drop the lock so we use atomic operations upon rb->bmap and the variable migration_dirty_pages. I didn't do it just to still be safe, also not predictable whether the frequent atomic ops could bring overhead too e.g. on huge vms when it happens very often. When that really comes, we can keep a local counter and periodically call atomic ops. Keep it simple for now. Cc: Wei Wang Cc: David Hildenbrand Cc: Hailiang Zhang Cc: Dr. David Alan Gilbert Cc: Juan Quintela Cc: Leonardo Bras Soares Passos Signed-off-by: Peter Xu Message-Id: <20210630200805.280905-1-peterx@redhat.com> Reviewed-by: Wei Wang Signed-off-by: Dr. David Alan Gilbert --- migration/ram.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 88ff34f574..b5fc454b2f 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -795,8 +795,6 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs, { bool ret; - QEMU_LOCK_GUARD(&rs->bitmap_mutex); - /* * Clear dirty bitmap if needed. This _must_ be called before we * send any of the page in the chunk because we need to make sure @@ -2834,6 +2832,14 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) goto out; } + /* + * We'll take this lock a little bit long, but it's okay for two reasons. + * Firstly, the only possible other thread to take it is who calls + * qemu_guest_free_page_hint(), which should be rare; secondly, see + * MAX_WAIT (if curious, further see commit 4508bd9ed8053ce) below, which + * guarantees that we'll at least released it in a regular basis. + */ + qemu_mutex_lock(&rs->bitmap_mutex); WITH_RCU_READ_LOCK_GUARD() { if (ram_list.version != rs->last_version) { ram_state_reset(rs); @@ -2893,6 +2899,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) i++; } } + qemu_mutex_unlock(&rs->bitmap_mutex); /* * Must occur before EOS (or any QEMUFile operation) @@ -3682,6 +3689,7 @@ void colo_flush_ram_cache(void) unsigned long offset = 0; memory_global_dirty_log_sync(); + qemu_mutex_lock(&ram_state->bitmap_mutex); WITH_RCU_READ_LOCK_GUARD() { RAMBLOCK_FOREACH_NOT_IGNORED(block) { ramblock_sync_dirty_bitmap(ram_state, block); @@ -3710,6 +3718,7 @@ void colo_flush_ram_cache(void) } } trace_colo_flush_ram_cache_end(); + qemu_mutex_unlock(&ram_state->bitmap_mutex); } /**