 c471ad0e9b
			
		
	
	
		c471ad0e9b
		
	
	
	
	
		
			
			This patches implements Device IOTLB support for vhost kernel. This is done through: 1) switch to use dma helpers when map/unmap vrings from vhost codes 2) introduce a set of VhostOps to: - setting up device IOTLB request callback - processing device IOTLB request - processing device IOTLB invalidation 2) kernel support for Device IOTLB API: - allow vhost-net to query the IOMMU IOTLB entry through eventfd - enable the ability for qemu to update a specified mapping of vhost - through ioctl. - enable the ability to invalidate a specified range of iova for the device IOTLB of vhost through ioctl. In x86/intel_iommu case this is triggered through iommu memory region notifier from device IOTLB invalidation descriptor processing routine. With all the above, kernel vhost_net can co-operate with userspace IOMMU. For vhost-user, the support could be easily done on top by implementing the VhostOps. Cc: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
		
			
				
	
	
		
			99 lines
		
	
	
		
			2.9 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			99 lines
		
	
	
		
			2.9 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| #ifndef VHOST_H
 | |
| #define VHOST_H
 | |
| 
 | |
| #include "hw/hw.h"
 | |
| #include "hw/virtio/vhost-backend.h"
 | |
| #include "hw/virtio/virtio.h"
 | |
| #include "exec/memory.h"
 | |
| 
 | |
| /* Generic structures common for any vhost based device. */
 | |
| struct vhost_virtqueue {
 | |
|     int kick;
 | |
|     int call;
 | |
|     void *desc;
 | |
|     void *avail;
 | |
|     void *used;
 | |
|     int num;
 | |
|     unsigned long long desc_phys;
 | |
|     unsigned desc_size;
 | |
|     unsigned long long avail_phys;
 | |
|     unsigned avail_size;
 | |
|     unsigned long long used_phys;
 | |
|     unsigned used_size;
 | |
|     EventNotifier masked_notifier;
 | |
|     struct vhost_dev *dev;
 | |
| };
 | |
| 
 | |
| typedef unsigned long vhost_log_chunk_t;
 | |
| #define VHOST_LOG_PAGE 0x1000
 | |
| #define VHOST_LOG_BITS (8 * sizeof(vhost_log_chunk_t))
 | |
| #define VHOST_LOG_CHUNK (VHOST_LOG_PAGE * VHOST_LOG_BITS)
 | |
| #define VHOST_INVALID_FEATURE_BIT   (0xff)
 | |
| 
 | |
| struct vhost_log {
 | |
|     unsigned long long size;
 | |
|     int refcnt;
 | |
|     int fd;
 | |
|     vhost_log_chunk_t *log;
 | |
| };
 | |
| 
 | |
| struct vhost_memory;
 | |
| struct vhost_dev {
 | |
|     VirtIODevice *vdev;
 | |
|     MemoryListener memory_listener;
 | |
|     struct vhost_memory *mem;
 | |
|     int n_mem_sections;
 | |
|     MemoryRegionSection *mem_sections;
 | |
|     struct vhost_virtqueue *vqs;
 | |
|     int nvqs;
 | |
|     /* the first virtqueue which would be used by this vhost dev */
 | |
|     int vq_index;
 | |
|     uint64_t features;
 | |
|     uint64_t acked_features;
 | |
|     uint64_t backend_features;
 | |
|     uint64_t protocol_features;
 | |
|     uint64_t max_queues;
 | |
|     bool started;
 | |
|     bool log_enabled;
 | |
|     uint64_t log_size;
 | |
|     Error *migration_blocker;
 | |
|     bool memory_changed;
 | |
|     hwaddr mem_changed_start_addr;
 | |
|     hwaddr mem_changed_end_addr;
 | |
|     const VhostOps *vhost_ops;
 | |
|     void *opaque;
 | |
|     struct vhost_log *log;
 | |
|     QLIST_ENTRY(vhost_dev) entry;
 | |
|     IOMMUNotifier n;
 | |
| };
 | |
| 
 | |
| int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
 | |
|                    VhostBackendType backend_type,
 | |
|                    uint32_t busyloop_timeout);
 | |
| void vhost_dev_cleanup(struct vhost_dev *hdev);
 | |
| int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev);
 | |
| void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev);
 | |
| int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
 | |
| void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
 | |
| 
 | |
| /* Test and clear masked event pending status.
 | |
|  * Should be called after unmask to avoid losing events.
 | |
|  */
 | |
| bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n);
 | |
| 
 | |
| /* Mask/unmask events from this vq.
 | |
|  */
 | |
| void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
 | |
|                           bool mask);
 | |
| uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
 | |
|                             uint64_t features);
 | |
| void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
 | |
|                         uint64_t features);
 | |
| bool vhost_has_free_slot(void);
 | |
| 
 | |
| int vhost_net_set_backend(struct vhost_dev *hdev,
 | |
|                           struct vhost_vring_file *file);
 | |
| 
 | |
| void vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write);
 | |
| #endif
 |