vdpa: use VhostVDPAShared in vdpa_dma_map and unmap
The callers only have the shared information by the end of this series. Start converting this functions. Signed-off-by: Eugenio Pérez <eperezma@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Message-Id: <20231221174322.3130442-12-eperezma@redhat.com> Tested-by: Lei Yang <leiyang@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
3c6d12a3b1
commit
6f03d9ef8a
@ -86,11 +86,11 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
|
|||||||
* The caller must set asid = 0 if the device does not support asid.
|
* The caller must set asid = 0 if the device does not support asid.
|
||||||
* This is not an ABI break since it is set to 0 by the initializer anyway.
|
* This is not an ABI break since it is set to 0 by the initializer anyway.
|
||||||
*/
|
*/
|
||||||
int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
|
int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
|
||||||
hwaddr size, void *vaddr, bool readonly)
|
hwaddr size, void *vaddr, bool readonly)
|
||||||
{
|
{
|
||||||
struct vhost_msg_v2 msg = {};
|
struct vhost_msg_v2 msg = {};
|
||||||
int fd = v->shared->device_fd;
|
int fd = s->device_fd;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
msg.type = VHOST_IOTLB_MSG_V2;
|
msg.type = VHOST_IOTLB_MSG_V2;
|
||||||
@ -101,7 +101,7 @@ int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
|
|||||||
msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
|
msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
|
||||||
msg.iotlb.type = VHOST_IOTLB_UPDATE;
|
msg.iotlb.type = VHOST_IOTLB_UPDATE;
|
||||||
|
|
||||||
trace_vhost_vdpa_dma_map(v->shared, fd, msg.type, msg.asid, msg.iotlb.iova,
|
trace_vhost_vdpa_dma_map(s, fd, msg.type, msg.asid, msg.iotlb.iova,
|
||||||
msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm,
|
msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm,
|
||||||
msg.iotlb.type);
|
msg.iotlb.type);
|
||||||
|
|
||||||
@ -118,11 +118,11 @@ int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
|
|||||||
* The caller must set asid = 0 if the device does not support asid.
|
* The caller must set asid = 0 if the device does not support asid.
|
||||||
* This is not an ABI break since it is set to 0 by the initializer anyway.
|
* This is not an ABI break since it is set to 0 by the initializer anyway.
|
||||||
*/
|
*/
|
||||||
int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
|
int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
|
||||||
hwaddr size)
|
hwaddr size)
|
||||||
{
|
{
|
||||||
struct vhost_msg_v2 msg = {};
|
struct vhost_msg_v2 msg = {};
|
||||||
int fd = v->shared->device_fd;
|
int fd = s->device_fd;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
msg.type = VHOST_IOTLB_MSG_V2;
|
msg.type = VHOST_IOTLB_MSG_V2;
|
||||||
@ -131,8 +131,8 @@ int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
|
|||||||
msg.iotlb.size = size;
|
msg.iotlb.size = size;
|
||||||
msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
|
msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
|
||||||
|
|
||||||
trace_vhost_vdpa_dma_unmap(v->shared, fd, msg.type, msg.asid,
|
trace_vhost_vdpa_dma_unmap(s, fd, msg.type, msg.asid, msg.iotlb.iova,
|
||||||
msg.iotlb.iova, msg.iotlb.size, msg.iotlb.type);
|
msg.iotlb.size, msg.iotlb.type);
|
||||||
|
|
||||||
if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
|
if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
|
||||||
error_report("failed to write, fd=%d, errno=%d (%s)",
|
error_report("failed to write, fd=%d, errno=%d (%s)",
|
||||||
@ -143,30 +143,29 @@ int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v)
|
static void vhost_vdpa_listener_begin_batch(VhostVDPAShared *s)
|
||||||
{
|
{
|
||||||
int fd = v->shared->device_fd;
|
int fd = s->device_fd;
|
||||||
struct vhost_msg_v2 msg = {
|
struct vhost_msg_v2 msg = {
|
||||||
.type = VHOST_IOTLB_MSG_V2,
|
.type = VHOST_IOTLB_MSG_V2,
|
||||||
.iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
|
.iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
|
||||||
};
|
};
|
||||||
|
|
||||||
trace_vhost_vdpa_listener_begin_batch(v->shared, fd, msg.type,
|
trace_vhost_vdpa_listener_begin_batch(s, fd, msg.type, msg.iotlb.type);
|
||||||
msg.iotlb.type);
|
|
||||||
if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
|
if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
|
||||||
error_report("failed to write, fd=%d, errno=%d (%s)",
|
error_report("failed to write, fd=%d, errno=%d (%s)",
|
||||||
fd, errno, strerror(errno));
|
fd, errno, strerror(errno));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v)
|
static void vhost_vdpa_iotlb_batch_begin_once(VhostVDPAShared *s)
|
||||||
{
|
{
|
||||||
if (v->shared->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
|
if (s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
|
||||||
!v->shared->iotlb_batch_begin_sent) {
|
!s->iotlb_batch_begin_sent) {
|
||||||
vhost_vdpa_listener_begin_batch(v);
|
vhost_vdpa_listener_begin_batch(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
v->shared->iotlb_batch_begin_sent = true;
|
s->iotlb_batch_begin_sent = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vhost_vdpa_listener_commit(MemoryListener *listener)
|
static void vhost_vdpa_listener_commit(MemoryListener *listener)
|
||||||
@ -226,7 +225,7 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
|
|||||||
if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, NULL)) {
|
if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, NULL)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova,
|
ret = vhost_vdpa_dma_map(v->shared, VHOST_VDPA_GUEST_PA_ASID, iova,
|
||||||
iotlb->addr_mask + 1, vaddr, read_only);
|
iotlb->addr_mask + 1, vaddr, read_only);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx ", "
|
error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx ", "
|
||||||
@ -234,7 +233,7 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
|
|||||||
v, iova, iotlb->addr_mask + 1, vaddr, ret);
|
v, iova, iotlb->addr_mask + 1, vaddr, ret);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova,
|
ret = vhost_vdpa_dma_unmap(v->shared, VHOST_VDPA_GUEST_PA_ASID, iova,
|
||||||
iotlb->addr_mask + 1);
|
iotlb->addr_mask + 1);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
|
error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
|
||||||
@ -370,8 +369,8 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener,
|
|||||||
iova = mem_region.iova;
|
iova = mem_region.iova;
|
||||||
}
|
}
|
||||||
|
|
||||||
vhost_vdpa_iotlb_batch_begin_once(v);
|
vhost_vdpa_iotlb_batch_begin_once(v->shared);
|
||||||
ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova,
|
ret = vhost_vdpa_dma_map(v->shared, VHOST_VDPA_GUEST_PA_ASID, iova,
|
||||||
int128_get64(llsize), vaddr, section->readonly);
|
int128_get64(llsize), vaddr, section->readonly);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
error_report("vhost vdpa map fail!");
|
error_report("vhost vdpa map fail!");
|
||||||
@ -455,13 +454,13 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
|
|||||||
iova = result->iova;
|
iova = result->iova;
|
||||||
vhost_iova_tree_remove(v->shared->iova_tree, *result);
|
vhost_iova_tree_remove(v->shared->iova_tree, *result);
|
||||||
}
|
}
|
||||||
vhost_vdpa_iotlb_batch_begin_once(v);
|
vhost_vdpa_iotlb_batch_begin_once(v->shared);
|
||||||
/*
|
/*
|
||||||
* The unmap ioctl doesn't accept a full 64-bit. need to check it
|
* The unmap ioctl doesn't accept a full 64-bit. need to check it
|
||||||
*/
|
*/
|
||||||
if (int128_eq(llsize, int128_2_64())) {
|
if (int128_eq(llsize, int128_2_64())) {
|
||||||
llsize = int128_rshift(llsize, 1);
|
llsize = int128_rshift(llsize, 1);
|
||||||
ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova,
|
ret = vhost_vdpa_dma_unmap(v->shared, VHOST_VDPA_GUEST_PA_ASID, iova,
|
||||||
int128_get64(llsize));
|
int128_get64(llsize));
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -471,7 +470,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener,
|
|||||||
}
|
}
|
||||||
iova += int128_get64(llsize);
|
iova += int128_get64(llsize);
|
||||||
}
|
}
|
||||||
ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova,
|
ret = vhost_vdpa_dma_unmap(v->shared, VHOST_VDPA_GUEST_PA_ASID, iova,
|
||||||
int128_get64(llsize));
|
int128_get64(llsize));
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -1081,7 +1080,8 @@ static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
size = ROUND_UP(result->size, qemu_real_host_page_size());
|
size = ROUND_UP(result->size, qemu_real_host_page_size());
|
||||||
r = vhost_vdpa_dma_unmap(v, v->address_space_id, result->iova, size);
|
r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, result->iova,
|
||||||
|
size);
|
||||||
if (unlikely(r < 0)) {
|
if (unlikely(r < 0)) {
|
||||||
error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r);
|
error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r);
|
||||||
return;
|
return;
|
||||||
@ -1121,7 +1121,7 @@ static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = vhost_vdpa_dma_map(v, v->address_space_id, needle->iova,
|
r = vhost_vdpa_dma_map(v->shared, v->address_space_id, needle->iova,
|
||||||
needle->size + 1,
|
needle->size + 1,
|
||||||
(void *)(uintptr_t)needle->translated_addr,
|
(void *)(uintptr_t)needle->translated_addr,
|
||||||
needle->perm == IOMMU_RO);
|
needle->perm == IOMMU_RO);
|
||||||
|
@ -69,9 +69,9 @@ typedef struct vhost_vdpa {
|
|||||||
int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range);
|
int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range);
|
||||||
int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx);
|
int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx);
|
||||||
|
|
||||||
int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
|
int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
|
||||||
hwaddr size, void *vaddr, bool readonly);
|
hwaddr size, void *vaddr, bool readonly);
|
||||||
int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
|
int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
|
||||||
hwaddr size);
|
hwaddr size);
|
||||||
|
|
||||||
typedef struct vdpa_iommu {
|
typedef struct vdpa_iommu {
|
||||||
|
@ -471,7 +471,8 @@ static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
|
r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova,
|
||||||
|
map->size + 1);
|
||||||
if (unlikely(r != 0)) {
|
if (unlikely(r != 0)) {
|
||||||
error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
|
error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
|
||||||
}
|
}
|
||||||
@ -495,7 +496,7 @@ static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
|
r = vhost_vdpa_dma_map(v->shared, v->address_space_id, map.iova,
|
||||||
vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
|
vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
|
||||||
if (unlikely(r < 0)) {
|
if (unlikely(r < 0)) {
|
||||||
goto dma_map_err;
|
goto dma_map_err;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user