kvm: require KVM_CAP_IOEVENTFD and KVM_CAP_IOEVENTFD_ANY_LENGTH
KVM_CAP_IOEVENTFD_ANY_LENGTH was added in Linux 4.4, released in 2016. Assume that it is present. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
5d9ec1f4c7
commit
126e7f7803
@ -90,7 +90,6 @@ bool kvm_kernel_irqchip;
|
|||||||
bool kvm_split_irqchip;
|
bool kvm_split_irqchip;
|
||||||
bool kvm_async_interrupts_allowed;
|
bool kvm_async_interrupts_allowed;
|
||||||
bool kvm_halt_in_kernel_allowed;
|
bool kvm_halt_in_kernel_allowed;
|
||||||
bool kvm_eventfds_allowed;
|
|
||||||
bool kvm_resamplefds_allowed;
|
bool kvm_resamplefds_allowed;
|
||||||
bool kvm_msi_via_irqfd_allowed;
|
bool kvm_msi_via_irqfd_allowed;
|
||||||
bool kvm_gsi_routing_allowed;
|
bool kvm_gsi_routing_allowed;
|
||||||
@ -98,7 +97,6 @@ bool kvm_gsi_direct_mapping;
|
|||||||
bool kvm_allowed;
|
bool kvm_allowed;
|
||||||
bool kvm_readonly_mem_allowed;
|
bool kvm_readonly_mem_allowed;
|
||||||
bool kvm_vm_attributes_allowed;
|
bool kvm_vm_attributes_allowed;
|
||||||
bool kvm_ioeventfd_any_length_allowed;
|
|
||||||
bool kvm_msi_use_devid;
|
bool kvm_msi_use_devid;
|
||||||
bool kvm_has_guest_debug;
|
bool kvm_has_guest_debug;
|
||||||
static int kvm_sstep_flags;
|
static int kvm_sstep_flags;
|
||||||
@ -110,6 +108,8 @@ static const KVMCapabilityInfo kvm_required_capabilites[] = {
|
|||||||
KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
|
KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
|
||||||
KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
|
KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
|
||||||
KVM_CAP_INFO(INTERNAL_ERROR_DATA),
|
KVM_CAP_INFO(INTERNAL_ERROR_DATA),
|
||||||
|
KVM_CAP_INFO(IOEVENTFD),
|
||||||
|
KVM_CAP_INFO(IOEVENTFD_ANY_LENGTH),
|
||||||
KVM_CAP_LAST_INFO
|
KVM_CAP_LAST_INFO
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -2547,18 +2547,12 @@ static int kvm_init(MachineState *ms)
|
|||||||
kvm_readonly_mem_allowed =
|
kvm_readonly_mem_allowed =
|
||||||
(kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
|
(kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
|
||||||
|
|
||||||
kvm_eventfds_allowed =
|
|
||||||
(kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
|
|
||||||
|
|
||||||
kvm_resamplefds_allowed =
|
kvm_resamplefds_allowed =
|
||||||
(kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
|
(kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
|
||||||
|
|
||||||
kvm_vm_attributes_allowed =
|
kvm_vm_attributes_allowed =
|
||||||
(kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
|
(kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
|
||||||
|
|
||||||
kvm_ioeventfd_any_length_allowed =
|
|
||||||
(kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0);
|
|
||||||
|
|
||||||
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
||||||
kvm_has_guest_debug =
|
kvm_has_guest_debug =
|
||||||
(kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0);
|
(kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0);
|
||||||
@ -2595,19 +2589,15 @@ static int kvm_init(MachineState *ms)
|
|||||||
kvm_irqchip_create(s);
|
kvm_irqchip_create(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (kvm_eventfds_allowed) {
|
s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
|
||||||
s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
|
s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
|
||||||
s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
|
|
||||||
}
|
|
||||||
s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
|
s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
|
||||||
s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
|
s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
|
||||||
|
|
||||||
kvm_memory_listener_register(s, &s->memory_listener,
|
kvm_memory_listener_register(s, &s->memory_listener,
|
||||||
&address_space_memory, 0, "kvm-memory");
|
&address_space_memory, 0, "kvm-memory");
|
||||||
if (kvm_eventfds_allowed) {
|
memory_listener_register(&kvm_io_listener,
|
||||||
memory_listener_register(&kvm_io_listener,
|
&address_space_io);
|
||||||
&address_space_io);
|
|
||||||
}
|
|
||||||
memory_listener_register(&kvm_coalesced_pio_listener,
|
memory_listener_register(&kvm_coalesced_pio_listener,
|
||||||
&address_space_io);
|
&address_space_io);
|
||||||
|
|
||||||
|
@ -17,14 +17,12 @@
|
|||||||
KVMState *kvm_state;
|
KVMState *kvm_state;
|
||||||
bool kvm_kernel_irqchip;
|
bool kvm_kernel_irqchip;
|
||||||
bool kvm_async_interrupts_allowed;
|
bool kvm_async_interrupts_allowed;
|
||||||
bool kvm_eventfds_allowed;
|
|
||||||
bool kvm_resamplefds_allowed;
|
bool kvm_resamplefds_allowed;
|
||||||
bool kvm_msi_via_irqfd_allowed;
|
bool kvm_msi_via_irqfd_allowed;
|
||||||
bool kvm_gsi_routing_allowed;
|
bool kvm_gsi_routing_allowed;
|
||||||
bool kvm_gsi_direct_mapping;
|
bool kvm_gsi_direct_mapping;
|
||||||
bool kvm_allowed;
|
bool kvm_allowed;
|
||||||
bool kvm_readonly_mem_allowed;
|
bool kvm_readonly_mem_allowed;
|
||||||
bool kvm_ioeventfd_any_length_allowed;
|
|
||||||
bool kvm_msi_use_devid;
|
bool kvm_msi_use_devid;
|
||||||
|
|
||||||
void kvm_flush_coalesced_mmio_buffer(void)
|
void kvm_flush_coalesced_mmio_buffer(void)
|
||||||
|
@ -245,7 +245,6 @@ static void pci_testdev_realize(PCIDevice *pci_dev, Error **errp)
|
|||||||
uint8_t *pci_conf;
|
uint8_t *pci_conf;
|
||||||
char *name;
|
char *name;
|
||||||
int r, i;
|
int r, i;
|
||||||
bool fastmmio = kvm_ioeventfd_any_length_enabled();
|
|
||||||
|
|
||||||
pci_conf = pci_dev->config;
|
pci_conf = pci_dev->config;
|
||||||
|
|
||||||
@ -279,7 +278,7 @@ static void pci_testdev_realize(PCIDevice *pci_dev, Error **errp)
|
|||||||
g_free(name);
|
g_free(name);
|
||||||
test->hdr->offset = cpu_to_le32(IOTEST_SIZE(i) + i * IOTEST_ACCESS_WIDTH);
|
test->hdr->offset = cpu_to_le32(IOTEST_SIZE(i) + i * IOTEST_ACCESS_WIDTH);
|
||||||
test->match_data = strcmp(IOTEST_TEST(i), "wildcard-eventfd");
|
test->match_data = strcmp(IOTEST_TEST(i), "wildcard-eventfd");
|
||||||
if (fastmmio && IOTEST_IS_MEM(i) && !test->match_data) {
|
if (IOTEST_IS_MEM(i) && !test->match_data) {
|
||||||
test->size = 0;
|
test->size = 0;
|
||||||
} else {
|
} else {
|
||||||
test->size = IOTEST_ACCESS_WIDTH;
|
test->size = IOTEST_ACCESS_WIDTH;
|
||||||
|
@ -768,10 +768,6 @@ static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp)
|
|||||||
sch->cssid, sch->ssid, sch->schid, sch->devno,
|
sch->cssid, sch->ssid, sch->schid, sch->devno,
|
||||||
ccw_dev->devno.valid ? "user-configured" : "auto-configured");
|
ccw_dev->devno.valid ? "user-configured" : "auto-configured");
|
||||||
|
|
||||||
if (kvm_enabled() && !kvm_eventfds_enabled()) {
|
|
||||||
dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* fd-based ioevents can't be synchronized in record/replay */
|
/* fd-based ioevents can't be synchronized in record/replay */
|
||||||
if (replay_mode != REPLAY_MODE_NONE) {
|
if (replay_mode != REPLAY_MODE_NONE) {
|
||||||
dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
|
dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
|
||||||
|
@ -264,11 +264,6 @@ struct scrub_regions {
|
|||||||
int fd_idx;
|
int fd_idx;
|
||||||
};
|
};
|
||||||
|
|
||||||
static bool ioeventfd_enabled(void)
|
|
||||||
{
|
|
||||||
return !kvm_enabled() || kvm_eventfds_enabled();
|
|
||||||
}
|
|
||||||
|
|
||||||
static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
|
static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
|
||||||
{
|
{
|
||||||
struct vhost_user *u = dev->opaque;
|
struct vhost_user *u = dev->opaque;
|
||||||
@ -1318,7 +1313,7 @@ static int vhost_set_vring_file(struct vhost_dev *dev,
|
|||||||
.hdr.size = sizeof(msg.payload.u64),
|
.hdr.size = sizeof(msg.payload.u64),
|
||||||
};
|
};
|
||||||
|
|
||||||
if (ioeventfd_enabled() && file->fd > 0) {
|
if (file->fd > 0) {
|
||||||
fds[fd_num++] = file->fd;
|
fds[fd_num++] = file->fd;
|
||||||
} else {
|
} else {
|
||||||
msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
|
msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
|
||||||
|
@ -761,10 +761,6 @@ static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
|
|||||||
qbus_init(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS, d, NULL);
|
qbus_init(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS, d, NULL);
|
||||||
sysbus_init_irq(sbd, &proxy->irq);
|
sysbus_init_irq(sbd, &proxy->irq);
|
||||||
|
|
||||||
if (!kvm_eventfds_enabled()) {
|
|
||||||
proxy->flags &= ~VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* fd-based ioevents can't be synchronized in record/replay */
|
/* fd-based ioevents can't be synchronized in record/replay */
|
||||||
if (replay_mode != REPLAY_MODE_NONE) {
|
if (replay_mode != REPLAY_MODE_NONE) {
|
||||||
proxy->flags &= ~VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD;
|
proxy->flags &= ~VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD;
|
||||||
|
@ -332,7 +332,6 @@ static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
|
|||||||
VirtQueue *vq = virtio_get_queue(vdev, n);
|
VirtQueue *vq = virtio_get_queue(vdev, n);
|
||||||
bool legacy = virtio_pci_legacy(proxy);
|
bool legacy = virtio_pci_legacy(proxy);
|
||||||
bool modern = virtio_pci_modern(proxy);
|
bool modern = virtio_pci_modern(proxy);
|
||||||
bool fast_mmio = kvm_ioeventfd_any_length_enabled();
|
|
||||||
bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
|
bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY;
|
||||||
MemoryRegion *modern_mr = &proxy->notify.mr;
|
MemoryRegion *modern_mr = &proxy->notify.mr;
|
||||||
MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr;
|
MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr;
|
||||||
@ -343,13 +342,8 @@ static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
|
|||||||
|
|
||||||
if (assign) {
|
if (assign) {
|
||||||
if (modern) {
|
if (modern) {
|
||||||
if (fast_mmio) {
|
memory_region_add_eventfd(modern_mr, modern_addr, 0,
|
||||||
memory_region_add_eventfd(modern_mr, modern_addr, 0,
|
false, n, notifier);
|
||||||
false, n, notifier);
|
|
||||||
} else {
|
|
||||||
memory_region_add_eventfd(modern_mr, modern_addr, 2,
|
|
||||||
false, n, notifier);
|
|
||||||
}
|
|
||||||
if (modern_pio) {
|
if (modern_pio) {
|
||||||
memory_region_add_eventfd(modern_notify_mr, 0, 2,
|
memory_region_add_eventfd(modern_notify_mr, 0, 2,
|
||||||
true, n, notifier);
|
true, n, notifier);
|
||||||
@ -361,13 +355,8 @@ static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (modern) {
|
if (modern) {
|
||||||
if (fast_mmio) {
|
memory_region_del_eventfd(modern_mr, modern_addr, 0,
|
||||||
memory_region_del_eventfd(modern_mr, modern_addr, 0,
|
false, n, notifier);
|
||||||
false, n, notifier);
|
|
||||||
} else {
|
|
||||||
memory_region_del_eventfd(modern_mr, modern_addr, 2,
|
|
||||||
false, n, notifier);
|
|
||||||
}
|
|
||||||
if (modern_pio) {
|
if (modern_pio) {
|
||||||
memory_region_del_eventfd(modern_notify_mr, 0, 2,
|
memory_region_del_eventfd(modern_notify_mr, 0, 2,
|
||||||
true, n, notifier);
|
true, n, notifier);
|
||||||
|
@ -36,13 +36,11 @@ extern bool kvm_kernel_irqchip;
|
|||||||
extern bool kvm_split_irqchip;
|
extern bool kvm_split_irqchip;
|
||||||
extern bool kvm_async_interrupts_allowed;
|
extern bool kvm_async_interrupts_allowed;
|
||||||
extern bool kvm_halt_in_kernel_allowed;
|
extern bool kvm_halt_in_kernel_allowed;
|
||||||
extern bool kvm_eventfds_allowed;
|
|
||||||
extern bool kvm_resamplefds_allowed;
|
extern bool kvm_resamplefds_allowed;
|
||||||
extern bool kvm_msi_via_irqfd_allowed;
|
extern bool kvm_msi_via_irqfd_allowed;
|
||||||
extern bool kvm_gsi_routing_allowed;
|
extern bool kvm_gsi_routing_allowed;
|
||||||
extern bool kvm_gsi_direct_mapping;
|
extern bool kvm_gsi_direct_mapping;
|
||||||
extern bool kvm_readonly_mem_allowed;
|
extern bool kvm_readonly_mem_allowed;
|
||||||
extern bool kvm_ioeventfd_any_length_allowed;
|
|
||||||
extern bool kvm_msi_use_devid;
|
extern bool kvm_msi_use_devid;
|
||||||
|
|
||||||
#define kvm_enabled() (kvm_allowed)
|
#define kvm_enabled() (kvm_allowed)
|
||||||
@ -86,15 +84,6 @@ extern bool kvm_msi_use_devid;
|
|||||||
*/
|
*/
|
||||||
#define kvm_halt_in_kernel() (kvm_halt_in_kernel_allowed)
|
#define kvm_halt_in_kernel() (kvm_halt_in_kernel_allowed)
|
||||||
|
|
||||||
/**
|
|
||||||
* kvm_eventfds_enabled:
|
|
||||||
*
|
|
||||||
* Returns: true if we can use eventfds to receive notifications
|
|
||||||
* from a KVM CPU (ie the kernel supports eventds and we are running
|
|
||||||
* with a configuration where it is meaningful to use them).
|
|
||||||
*/
|
|
||||||
#define kvm_eventfds_enabled() (kvm_eventfds_allowed)
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kvm_irqfds_enabled:
|
* kvm_irqfds_enabled:
|
||||||
*
|
*
|
||||||
@ -147,12 +136,6 @@ extern bool kvm_msi_use_devid;
|
|||||||
*/
|
*/
|
||||||
#define kvm_readonly_mem_enabled() (kvm_readonly_mem_allowed)
|
#define kvm_readonly_mem_enabled() (kvm_readonly_mem_allowed)
|
||||||
|
|
||||||
/**
|
|
||||||
* kvm_ioeventfd_any_length_enabled:
|
|
||||||
* Returns: true if KVM allows any length io eventfd.
|
|
||||||
*/
|
|
||||||
#define kvm_ioeventfd_any_length_enabled() (kvm_ioeventfd_any_length_allowed)
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kvm_msi_devid_required:
|
* kvm_msi_devid_required:
|
||||||
* Returns: true if KVM requires a device id to be provided while
|
* Returns: true if KVM requires a device id to be provided while
|
||||||
@ -168,13 +151,11 @@ extern bool kvm_msi_use_devid;
|
|||||||
#define kvm_async_interrupts_enabled() (false)
|
#define kvm_async_interrupts_enabled() (false)
|
||||||
#define kvm_halt_in_kernel() (false)
|
#define kvm_halt_in_kernel() (false)
|
||||||
#define kvm_irqfds_enabled() (false)
|
#define kvm_irqfds_enabled() (false)
|
||||||
#define kvm_eventfds_enabled() (false)
|
|
||||||
#define kvm_resamplefds_enabled() (false)
|
#define kvm_resamplefds_enabled() (false)
|
||||||
#define kvm_msi_via_irqfd_enabled() (false)
|
#define kvm_msi_via_irqfd_enabled() (false)
|
||||||
#define kvm_gsi_routing_allowed() (false)
|
#define kvm_gsi_routing_allowed() (false)
|
||||||
#define kvm_gsi_direct_mapping() (false)
|
#define kvm_gsi_direct_mapping() (false)
|
||||||
#define kvm_readonly_mem_enabled() (false)
|
#define kvm_readonly_mem_enabled() (false)
|
||||||
#define kvm_ioeventfd_any_length_enabled() (false)
|
|
||||||
#define kvm_msi_devid_required() (false)
|
#define kvm_msi_devid_required() (false)
|
||||||
|
|
||||||
#endif /* CONFIG_KVM_IS_POSSIBLE */
|
#endif /* CONFIG_KVM_IS_POSSIBLE */
|
||||||
|
@ -1535,7 +1535,12 @@ MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
|
|||||||
|
|
||||||
adjust_endianness(mr, &data, op);
|
adjust_endianness(mr, &data, op);
|
||||||
|
|
||||||
if ((!kvm_eventfds_enabled()) &&
|
/*
|
||||||
|
* FIXME: it's not clear why under KVM the write would be processed
|
||||||
|
* directly, instead of going through eventfd. This probably should
|
||||||
|
* test "tcg_enabled() || qtest_enabled()", or should just go away.
|
||||||
|
*/
|
||||||
|
if (!kvm_enabled() &&
|
||||||
memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
|
memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
|
||||||
return MEMTX_OK;
|
return MEMTX_OK;
|
||||||
}
|
}
|
||||||
@ -2550,8 +2555,6 @@ void memory_region_clear_flush_coalesced(MemoryRegion *mr)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool userspace_eventfd_warning;
|
|
||||||
|
|
||||||
void memory_region_add_eventfd(MemoryRegion *mr,
|
void memory_region_add_eventfd(MemoryRegion *mr,
|
||||||
hwaddr addr,
|
hwaddr addr,
|
||||||
unsigned size,
|
unsigned size,
|
||||||
@ -2568,13 +2571,6 @@ void memory_region_add_eventfd(MemoryRegion *mr,
|
|||||||
};
|
};
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
|
|
||||||
userspace_eventfd_warning))) {
|
|
||||||
userspace_eventfd_warning = true;
|
|
||||||
error_report("Using eventfd without MMIO binding in KVM. "
|
|
||||||
"Suboptimal performance expected");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (size) {
|
if (size) {
|
||||||
adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
|
adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user