vfio queue:

* Add support for IGD passthrough on all Intel Gen 11 and 12 devices
 * Refactor dirty tracking engine to include VFIO state in calc-dirty-rate
 * Drop usage migration_is_device() and migration_is_active()
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEoPZlSPBIlev+awtgUaNDx8/77KEFAmdtFXUACgkQUaNDx8/7
 7KGDgQ//bjcz061VL+8pFv5eOSPKXa7m0hTFIjGswk8y6i3izs8c6WXX2RWwfOXn
 0vLE87XpEoTr494RC6qT/QIhuzfIm+mFb91U/jpjn7TSIrVzvWzI9qVUqKAjvVES
 M0BWNi4oCvZMAoADPJ7wvXbQO5eDSUauF5AeHGRUpy34DFwnHLmOCLe+Cj5L732H
 EOL+QCNf2y/iR36Anh2VyDaaFDPCx7BBF+SApWR93jAnpe3kIXSQczn0wLeXoELB
 Q7FhLSOEicuZUF6pgTYMJ7hpGdZMv9AopTDt4owoDgxYXr0PQ0YWy+fsG5mlavnd
 DHo9qmHKjkbzPHSV5tlim2zDbqu4lRnC6NzJTtVzzFfyrrXTQYTNZh7usVRiG9VN
 JQNNmT5L14tso0YSCgc+KeqjYnV12ZktYsZosoJHKQ2pkpoZRUFQUtXfnRrQGmNt
 RnfNv60Mez1PcWvt17Gq4S5JM+XUgsB6Jpm8tLj1eGowurCerFwLNRK5U09cBKLa
 WprF+b5KmSDQuqiWpmssmuKbvfSyeC8NVgrpRXEkDyivnJYkELki9H6Ec7ATUNyI
 4ZiX1GlvofKqgiDX8ZUafnz3z4++lgLvOkMb5e/n/oktzUM6gzAds/4mGXLm6hxk
 8gZb/Hrfjhv0PLIVzphMxv+N3U0nu2CVNJzMcmzFGkqlsnLqgO0=
 =F4P6
 -----END PGP SIGNATURE-----

Merge tag 'pull-vfio-20241226' of https://github.com/legoater/qemu into staging

vfio queue:

* Add support for IGD passthrough on all Intel Gen 11 and 12 devices
* Refactor dirty tracking engine to include VFIO state in calc-dirty-rate
* Drop usage migration_is_device() and migration_is_active()

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCAAdFiEEoPZlSPBIlev+awtgUaNDx8/77KEFAmdtFXUACgkQUaNDx8/7
# 7KGDgQ//bjcz061VL+8pFv5eOSPKXa7m0hTFIjGswk8y6i3izs8c6WXX2RWwfOXn
# 0vLE87XpEoTr494RC6qT/QIhuzfIm+mFb91U/jpjn7TSIrVzvWzI9qVUqKAjvVES
# M0BWNi4oCvZMAoADPJ7wvXbQO5eDSUauF5AeHGRUpy34DFwnHLmOCLe+Cj5L732H
# EOL+QCNf2y/iR36Anh2VyDaaFDPCx7BBF+SApWR93jAnpe3kIXSQczn0wLeXoELB
# Q7FhLSOEicuZUF6pgTYMJ7hpGdZMv9AopTDt4owoDgxYXr0PQ0YWy+fsG5mlavnd
# DHo9qmHKjkbzPHSV5tlim2zDbqu4lRnC6NzJTtVzzFfyrrXTQYTNZh7usVRiG9VN
# JQNNmT5L14tso0YSCgc+KeqjYnV12ZktYsZosoJHKQ2pkpoZRUFQUtXfnRrQGmNt
# RnfNv60Mez1PcWvt17Gq4S5JM+XUgsB6Jpm8tLj1eGowurCerFwLNRK5U09cBKLa
# WprF+b5KmSDQuqiWpmssmuKbvfSyeC8NVgrpRXEkDyivnJYkELki9H6Ec7ATUNyI
# 4ZiX1GlvofKqgiDX8ZUafnz3z4++lgLvOkMb5e/n/oktzUM6gzAds/4mGXLm6hxk
# 8gZb/Hrfjhv0PLIVzphMxv+N3U0nu2CVNJzMcmzFGkqlsnLqgO0=
# =F4P6
# -----END PGP SIGNATURE-----
# gpg: Signature made Thu 26 Dec 2024 03:36:05 EST
# gpg:                using RSA key A0F66548F04895EBFE6B0B6051A343C7CFFBECA1
# gpg: Good signature from "Cédric Le Goater <clg@redhat.com>" [full]
# gpg:                 aka "Cédric Le Goater <clg@kaod.org>" [full]
# Primary key fingerprint: A0F6 6548 F048 95EB FE6B  0B60 51A3 43C7 CFFB ECA1

* tag 'pull-vfio-20241226' of https://github.com/legoater/qemu:
  migration: Unexport migration_is_active()
  migration: Drop migration_is_device()
  system/dirtylimit: Don't use migration_is_active()
  vfio/migration: Rename vfio_devices_all_dirty_tracking()
  vfio/migration: Refactor vfio_devices_all_running_and_mig_active() logic
  vfio/migration: Refactor vfio_devices_all_dirty_tracking() logic
  vfio/container: Add dirty tracking started flag
  vfio/igd: add x-igd-gms option back to set DSM region size for guest
  vfio/igd: emulate BDSM in mmio bar0 for gen 6-10 devices
  vfio/igd: emulate GGC register in mmio bar0
  vfio/igd: add macro for declaring mirrored registers
  vfio/igd: add Alder/Raptor/Rocket/Ice/Jasper Lake device ids
  vfio/igd: add Gemini Lake and Comet Lake device ids
  vfio/igd: canonicalize memory size calculations
  vfio/igd: align generation with i915 kernel driver
  vfio/igd: remove unsupported device ids
  vfio/igd: fix GTT stolen memory size calculation for gen 8+

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2024-12-26 04:38:38 -05:00
commit 38d0939b86
9 changed files with 216 additions and 156 deletions

View File

@ -170,11 +170,32 @@ bool vfio_device_state_is_precopy(VFIODevice *vbasedev)
migration->device_state == VFIO_DEVICE_STATE_PRE_COPY_P2P;
}
static bool vfio_devices_all_dirty_tracking(VFIOContainerBase *bcontainer)
static bool vfio_devices_all_device_dirty_tracking_started(
const VFIOContainerBase *bcontainer)
{
VFIODevice *vbasedev;
if (!migration_is_active() && !migration_is_device()) {
QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
if (!vbasedev->dirty_tracking) {
return false;
}
}
return true;
}
bool vfio_devices_all_dirty_tracking_started(
const VFIOContainerBase *bcontainer)
{
return vfio_devices_all_device_dirty_tracking_started(bcontainer) ||
bcontainer->dirty_pages_started;
}
static bool vfio_log_sync_needed(const VFIOContainerBase *bcontainer)
{
VFIODevice *vbasedev;
if (!vfio_devices_all_dirty_tracking_started(bcontainer)) {
return false;
}
@ -210,36 +231,6 @@ bool vfio_devices_all_device_dirty_tracking(const VFIOContainerBase *bcontainer)
return true;
}
/*
* Check if all VFIO devices are running and migration is active, which is
* essentially equivalent to the migration being in pre-copy phase.
*/
bool
vfio_devices_all_running_and_mig_active(const VFIOContainerBase *bcontainer)
{
VFIODevice *vbasedev;
if (!migration_is_active()) {
return false;
}
QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
VFIOMigration *migration = vbasedev->migration;
if (!migration) {
return false;
}
if (vfio_device_state_is_running(vbasedev) ||
vfio_device_state_is_precopy(vbasedev)) {
continue;
} else {
return false;
}
}
return true;
}
static bool vfio_listener_skipped_section(MemoryRegionSection *section)
{
return (!memory_region_is_ram(section->mr) &&
@ -1373,7 +1364,7 @@ static void vfio_listener_log_sync(MemoryListener *listener,
return;
}
if (vfio_devices_all_dirty_tracking(bcontainer)) {
if (vfio_log_sync_needed(bcontainer)) {
ret = vfio_sync_dirty_bitmap(bcontainer, section, &local_err);
if (ret) {
error_report_err(local_err);

View File

@ -64,13 +64,23 @@ int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer,
bool start, Error **errp)
{
VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
int ret;
if (!bcontainer->dirty_pages_supported) {
return 0;
}
g_assert(vioc->set_dirty_page_tracking);
return vioc->set_dirty_page_tracking(bcontainer, start, errp);
if (bcontainer->dirty_pages_started == start) {
return 0;
}
ret = vioc->set_dirty_page_tracking(bcontainer, start, errp);
if (!ret) {
bcontainer->dirty_pages_started = start;
}
return ret;
}
int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer,

View File

@ -131,7 +131,7 @@ static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer,
int ret;
Error *local_err = NULL;
if (iotlb && vfio_devices_all_running_and_mig_active(bcontainer)) {
if (iotlb && vfio_devices_all_dirty_tracking_started(bcontainer)) {
if (!vfio_devices_all_device_dirty_tracking(bcontainer) &&
bcontainer->dirty_pages_supported) {
return vfio_dma_unmap_bitmap(container, iova, size, iotlb);

View File

@ -14,6 +14,7 @@
#include "qemu/units.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
#include "hw/hw.h"
#include "hw/nvram/fw_cfg.h"
#include "pci.h"
@ -59,43 +60,41 @@
*/
static int igd_gen(VFIOPCIDevice *vdev)
{
if ((vdev->device_id & 0xfff) == 0xa84) {
return 8; /* Broxton */
/*
* Device IDs for Broxton/Apollo Lake are 0x0a84, 0x1a84, 0x1a85, 0x5a84
* and 0x5a85, match bit 11:1 here
* Prefix 0x0a is taken by Haswell, this rule should be matched first.
*/
if ((vdev->device_id & 0xffe) == 0xa84) {
return 9;
}
switch (vdev->device_id & 0xff00) {
/* Old, untested, unavailable, unknown */
case 0x0000:
case 0x2500:
case 0x2700:
case 0x2900:
case 0x2a00:
case 0x2e00:
case 0x3500:
case 0xa000:
return -1;
/* SandyBridge, IvyBridge, ValleyView, Haswell */
case 0x0100:
case 0x0400:
case 0x0a00:
case 0x0c00:
case 0x0d00:
case 0x0f00:
case 0x0100: /* SandyBridge, IvyBridge */
return 6;
/* BroadWell, CherryView, SkyLake, KabyLake */
case 0x1600:
case 0x1900:
case 0x2200:
case 0x5900:
case 0x0400: /* Haswell */
case 0x0a00: /* Haswell */
case 0x0c00: /* Haswell */
case 0x0d00: /* Haswell */
case 0x0f00: /* Valleyview/Bay Trail */
return 7;
case 0x1600: /* Broadwell */
case 0x2200: /* Cherryview */
return 8;
/* CoffeeLake */
case 0x3e00:
case 0x1900: /* Skylake */
case 0x3100: /* Gemini Lake */
case 0x5900: /* Kaby Lake */
case 0x3e00: /* Coffee Lake */
case 0x9B00: /* Comet Lake */
return 9;
/* ElkhartLake */
case 0x4500:
case 0x8A00: /* Ice Lake */
case 0x4500: /* Elkhart Lake */
case 0x4E00: /* Jasper Lake */
return 11;
/* TigerLake */
case 0x9A00:
case 0x9A00: /* Tiger Lake */
case 0x4C00: /* Rocket Lake */
case 0x4600: /* Alder Lake */
case 0xA700: /* Raptor Lake */
return 12;
}
@ -116,6 +115,53 @@ typedef struct VFIOIGDQuirk {
#define IGD_BDSM 0x5c /* Base Data of Stolen Memory */
#define IGD_BDSM_GEN11 0xc0 /* Base Data of Stolen Memory of gen 11 and later */
#define IGD_GMCH_GEN6_GMS_SHIFT 3 /* SNB_GMCH in i915 */
#define IGD_GMCH_GEN6_GMS_MASK 0x1f
#define IGD_GMCH_GEN6_GGMS_SHIFT 8
#define IGD_GMCH_GEN6_GGMS_MASK 0x3
#define IGD_GMCH_GEN8_GMS_SHIFT 8 /* BDW_GMCH in i915 */
#define IGD_GMCH_GEN8_GMS_MASK 0xff
#define IGD_GMCH_GEN8_GGMS_SHIFT 6
#define IGD_GMCH_GEN8_GGMS_MASK 0x3
static uint64_t igd_gtt_memory_size(int gen, uint16_t gmch)
{
uint64_t ggms;
if (gen < 8) {
ggms = (gmch >> IGD_GMCH_GEN6_GGMS_SHIFT) & IGD_GMCH_GEN6_GGMS_MASK;
} else {
ggms = (gmch >> IGD_GMCH_GEN8_GGMS_SHIFT) & IGD_GMCH_GEN8_GGMS_MASK;
if (ggms != 0) {
ggms = 1 << ggms;
}
}
return ggms * MiB;
}
static uint64_t igd_stolen_memory_size(int gen, uint32_t gmch)
{
uint64_t gms;
if (gen < 8) {
gms = (gmch >> IGD_GMCH_GEN6_GMS_SHIFT) & IGD_GMCH_GEN6_GMS_MASK;
} else {
gms = (gmch >> IGD_GMCH_GEN8_GMS_SHIFT) & IGD_GMCH_GEN8_GMS_MASK;
}
if (gen < 9) {
return gms * 32 * MiB;
} else {
if (gms < 0xf0) {
return gms * 32 * MiB;
} else {
return (gms - 0xf0 + 1) * 4 * MiB;
}
}
return 0;
}
/*
* The rather short list of registers that we copy from the host devices.
@ -264,17 +310,10 @@ static int vfio_pci_igd_lpc_init(VFIOPCIDevice *vdev,
static int vfio_igd_gtt_max(VFIOPCIDevice *vdev)
{
uint32_t gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, sizeof(gmch));
int ggms, gen = igd_gen(vdev);
int gen = igd_gen(vdev);
uint64_t ggms_size = igd_gtt_memory_size(gen, gmch);
gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, sizeof(gmch));
ggms = (gmch >> (gen < 8 ? 8 : 6)) & 0x3;
if (gen > 6) {
ggms = 1 << ggms;
}
ggms *= MiB;
return (ggms / (4 * KiB)) * (gen < 8 ? 4 : 8);
return (ggms_size / (4 * KiB)) * (gen < 8 ? 4 : 8);
}
/*
@ -383,16 +422,9 @@ static const MemoryRegionOps vfio_igd_index_quirk = {
.endianness = DEVICE_LITTLE_ENDIAN,
};
#define IGD_BDSM_MMIO_OFFSET 0x1080C0
static uint64_t vfio_igd_quirk_bdsm_read(void *opaque,
hwaddr addr, unsigned size)
static uint64_t vfio_igd_pci_config_read(VFIOPCIDevice *vdev, uint64_t offset,
unsigned size)
{
VFIOPCIDevice *vdev = opaque;
uint64_t offset;
offset = IGD_BDSM_GEN11 + addr;
switch (size) {
case 1:
return pci_get_byte(vdev->pdev.config + offset);
@ -403,21 +435,17 @@ static uint64_t vfio_igd_quirk_bdsm_read(void *opaque,
case 8:
return pci_get_quad(vdev->pdev.config + offset);
default:
hw_error("igd: unsupported read size, %u bytes", size);
hw_error("igd: unsupported pci config read at %"PRIx64", size %u",
offset, size);
break;
}
return 0;
}
static void vfio_igd_quirk_bdsm_write(void *opaque, hwaddr addr,
static void vfio_igd_pci_config_write(VFIOPCIDevice *vdev, uint64_t offset,
uint64_t data, unsigned size)
{
VFIOPCIDevice *vdev = opaque;
uint64_t offset;
offset = IGD_BDSM_GEN11 + addr;
switch (size) {
case 1:
pci_set_byte(vdev->pdev.config + offset, data);
@ -432,17 +460,42 @@ static void vfio_igd_quirk_bdsm_write(void *opaque, hwaddr addr,
pci_set_quad(vdev->pdev.config + offset, data);
break;
default:
hw_error("igd: unsupported read size, %u bytes", size);
hw_error("igd: unsupported pci config write at %"PRIx64", size %u",
offset, size);
break;
}
}
static const MemoryRegionOps vfio_igd_bdsm_quirk = {
.read = vfio_igd_quirk_bdsm_read,
.write = vfio_igd_quirk_bdsm_write,
.endianness = DEVICE_LITTLE_ENDIAN,
#define VFIO_IGD_QUIRK_MIRROR_REG(reg, name) \
static uint64_t vfio_igd_quirk_read_##name(void *opaque, \
hwaddr addr, unsigned size) \
{ \
VFIOPCIDevice *vdev = opaque; \
\
return vfio_igd_pci_config_read(vdev, reg + addr, size); \
} \
\
static void vfio_igd_quirk_write_##name(void *opaque, hwaddr addr, \
uint64_t data, unsigned size) \
{ \
VFIOPCIDevice *vdev = opaque; \
\
vfio_igd_pci_config_write(vdev, reg + addr, data, size); \
} \
\
static const MemoryRegionOps vfio_igd_quirk_mirror_##name = { \
.read = vfio_igd_quirk_read_##name, \
.write = vfio_igd_quirk_write_##name, \
.endianness = DEVICE_LITTLE_ENDIAN, \
};
VFIO_IGD_QUIRK_MIRROR_REG(IGD_GMCH, ggc)
VFIO_IGD_QUIRK_MIRROR_REG(IGD_BDSM, bdsm)
VFIO_IGD_QUIRK_MIRROR_REG(IGD_BDSM_GEN11, bdsm64)
#define IGD_GGC_MMIO_OFFSET 0x108040
#define IGD_BDSM_MMIO_OFFSET 0x1080C0
void vfio_probe_igd_bar0_quirk(VFIOPCIDevice *vdev, int nr)
{
VFIOQuirk *quirk;
@ -465,46 +518,39 @@ void vfio_probe_igd_bar0_quirk(VFIOPCIDevice *vdev, int nr)
* into MMIO space and read from MMIO space by the Windows driver.
*/
gen = igd_gen(vdev);
if (gen < 11) {
if (gen < 6) {
return;
}
quirk = vfio_quirk_alloc(1);
quirk = vfio_quirk_alloc(2);
quirk->data = vdev;
memory_region_init_io(&quirk->mem[0], OBJECT(vdev), &vfio_igd_bdsm_quirk,
vdev, "vfio-igd-bdsm-quirk", 8);
memory_region_init_io(&quirk->mem[0], OBJECT(vdev),
&vfio_igd_quirk_mirror_ggc, vdev,
"vfio-igd-ggc-quirk", 2);
memory_region_add_subregion_overlap(vdev->bars[0].region.mem,
IGD_BDSM_MMIO_OFFSET, &quirk->mem[0],
IGD_GGC_MMIO_OFFSET, &quirk->mem[0],
1);
if (gen < 11) {
memory_region_init_io(&quirk->mem[1], OBJECT(vdev),
&vfio_igd_quirk_mirror_bdsm, vdev,
"vfio-igd-bdsm-quirk", 4);
memory_region_add_subregion_overlap(vdev->bars[0].region.mem,
IGD_BDSM_MMIO_OFFSET,
&quirk->mem[1], 1);
} else {
memory_region_init_io(&quirk->mem[1], OBJECT(vdev),
&vfio_igd_quirk_mirror_bdsm64, vdev,
"vfio-igd-bdsm-quirk", 8);
memory_region_add_subregion_overlap(vdev->bars[0].region.mem,
IGD_BDSM_MMIO_OFFSET,
&quirk->mem[1], 1);
}
QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
}
static int igd_get_stolen_mb(int gen, uint32_t gmch)
{
int gms;
if (gen < 8) {
gms = (gmch >> 3) & 0x1f;
} else {
gms = (gmch >> 8) & 0xff;
}
if (gen < 9) {
if (gms > 0x10) {
error_report("Unsupported IGD GMS value 0x%x", gms);
return 0;
}
return gms * 32;
} else {
if (gms < 0xf0)
return gms * 32;
else
return (gms - 0xf0) * 4 + 4;
}
}
void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
{
g_autofree struct vfio_region_info *rom = NULL;
@ -514,7 +560,8 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
VFIOQuirk *quirk;
VFIOIGDQuirk *igd;
PCIDevice *lpc_bridge;
int i, ret, ggms_mb, gms_mb = 0, gen;
int i, ret, gen;
uint64_t ggms_size, gms_size;
uint64_t *bdsm_size;
uint32_t gmch;
uint16_t cmd_orig, cmd;
@ -676,13 +723,33 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
/* Determine the size of stolen memory needed for GTT */
ggms_mb = (gmch >> (gen < 8 ? 8 : 6)) & 0x3;
if (gen > 6) {
ggms_mb = 1 << ggms_mb;
/*
* Allow user to override dsm size using x-igd-gms option, in multiples of
* 32MiB. This option should only be used when the desired size cannot be
* set from DVMT Pre-Allocated option in host BIOS.
*/
if (vdev->igd_gms) {
if (gen < 8) {
if (vdev->igd_gms <= 0x10) {
gmch &= ~(IGD_GMCH_GEN6_GMS_MASK << IGD_GMCH_GEN6_GMS_SHIFT);
gmch |= vdev->igd_gms << IGD_GMCH_GEN6_GMS_SHIFT;
} else {
error_report(QERR_INVALID_PARAMETER_VALUE,
"x-igd-gms", "0~0x10");
}
} else {
if (vdev->igd_gms <= 0x40) {
gmch &= ~(IGD_GMCH_GEN8_GMS_MASK << IGD_GMCH_GEN8_GMS_SHIFT);
gmch |= vdev->igd_gms << IGD_GMCH_GEN8_GMS_SHIFT;
} else {
error_report(QERR_INVALID_PARAMETER_VALUE,
"x-igd-gms", "0~0x40");
}
}
}
gms_mb = igd_get_stolen_mb(gen, gmch);
ggms_size = igd_gtt_memory_size(gen, gmch);
gms_size = igd_stolen_memory_size(gen, gmch);
/*
* Request reserved memory for stolen memory via fw_cfg. VM firmware
@ -693,7 +760,7 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
* config offset 0x5C.
*/
bdsm_size = g_malloc(sizeof(*bdsm_size));
*bdsm_size = cpu_to_le64((ggms_mb + gms_mb) * MiB);
*bdsm_size = cpu_to_le64(ggms_size + gms_size);
fw_cfg_add_file(fw_cfg_find(), "etc/igd-bdsm-size",
bdsm_size, sizeof(*bdsm_size));
@ -744,5 +811,6 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr)
vdev->vbasedev.name);
}
trace_vfio_pci_igd_bdsm_enabled(vdev->vbasedev.name, ggms_mb + gms_mb);
trace_vfio_pci_igd_bdsm_enabled(vdev->vbasedev.name,
(ggms_size + gms_size) / MiB);
}

View File

@ -143,7 +143,7 @@ typedef struct VFIODevice {
OnOffAuto pre_copy_dirty_page_tracking;
OnOffAuto device_dirty_page_tracking;
bool dirty_pages_supported;
bool dirty_tracking;
bool dirty_tracking; /* Protected by BQL */
bool iommu_dirty_tracking;
HostIOMMUDevice *hiod;
int devid;
@ -296,8 +296,8 @@ bool vfio_migration_realize(VFIODevice *vbasedev, Error **errp);
void vfio_migration_exit(VFIODevice *vbasedev);
int vfio_bitmap_alloc(VFIOBitmap *vbmap, hwaddr size);
bool
vfio_devices_all_running_and_mig_active(const VFIOContainerBase *bcontainer);
bool vfio_devices_all_dirty_tracking_started(
const VFIOContainerBase *bcontainer);
bool
vfio_devices_all_device_dirty_tracking(const VFIOContainerBase *bcontainer);
int vfio_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer,

View File

@ -44,6 +44,7 @@ typedef struct VFIOContainerBase {
unsigned long pgsizes;
unsigned int dma_max_mappings;
bool dirty_pages_supported;
bool dirty_pages_started; /* Protected by BQL */
QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
QLIST_HEAD(, VFIORamDiscardListener) vrdl_list;
QLIST_ENTRY(VFIOContainerBase) next;

View File

@ -53,8 +53,6 @@ void dump_vmstate_json_to_file(FILE *out_fp);
void migration_object_init(void);
void migration_shutdown(void);
bool migration_is_active(void);
bool migration_is_device(void);
bool migration_is_running(void);
bool migration_thread_is_self(void);

View File

@ -1139,6 +1139,14 @@ bool migration_is_running(void)
}
}
static bool migration_is_active(void)
{
MigrationState *s = current_migration;
return (s->state == MIGRATION_STATUS_ACTIVE ||
s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
}
static bool migrate_show_downtime(MigrationState *s)
{
return (s->state == MIGRATION_STATUS_COMPLETED) || migration_in_postcopy();
@ -1637,21 +1645,6 @@ bool migration_in_bg_snapshot(void)
return migrate_background_snapshot() && migration_is_running();
}
bool migration_is_active(void)
{
MigrationState *s = current_migration;
return (s->state == MIGRATION_STATUS_ACTIVE ||
s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
}
bool migration_is_device(void)
{
MigrationState *s = current_migration;
return s->state == MIGRATION_STATUS_DEVICE;
}
bool migration_thread_is_self(void)
{
MigrationState *s = current_migration;

View File

@ -80,8 +80,7 @@ static void vcpu_dirty_rate_stat_collect(void)
int i = 0;
int64_t period = DIRTYLIMIT_CALC_TIME_MS;
if (migrate_dirty_limit() &&
migration_is_active()) {
if (migrate_dirty_limit() && migration_is_running()) {
period = migrate_vcpu_dirty_limit_period();
}