vfio/pci: Implement return_page_response page response callback
This patch implements the page response path. The response is written into the page response ring buffer and then update header's head index is updated. This path is not used by this series. It is introduced here as a POC for vSVA/ARM integration. Signed-off-by: Eric Auger <eric.auger@redhat.com> Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com>
This commit is contained in:
parent
2117b42cb1
commit
518ab37de3
199
vfio-pci-Implement-return_page_response-page-respons.patch
Normal file
199
vfio-pci-Implement-return_page_response-page-respons.patch
Normal file
@ -0,0 +1,199 @@
|
|||||||
|
From dab7c3ad6d51e9f0c65d864d6128f62697db4604 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Eric Auger <eric.auger@redhat.com>
|
||||||
|
Date: Fri, 6 Nov 2020 12:03:29 -0500
|
||||||
|
Subject: [PATCH] vfio/pci: Implement return_page_response page response
|
||||||
|
callback
|
||||||
|
|
||||||
|
This patch implements the page response path. The
|
||||||
|
response is written into the page response ring buffer and then
|
||||||
|
update header's head index is updated. This path is not used
|
||||||
|
by this series. It is introduced here as a POC for vSVA/ARM
|
||||||
|
integration.
|
||||||
|
|
||||||
|
Signed-off-by: Eric Auger <eric.auger@redhat.com>
|
||||||
|
Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com>
|
||||||
|
---
|
||||||
|
hw/vfio/pci.c | 123 ++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||||
|
hw/vfio/pci.h | 2 +
|
||||||
|
2 files changed, 125 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
|
||||||
|
index d1198c8a23..6f4083aec8 100644
|
||||||
|
--- a/hw/vfio/pci.c
|
||||||
|
+++ b/hw/vfio/pci.c
|
||||||
|
@@ -2662,6 +2662,61 @@ out:
|
||||||
|
g_free(fault_region_info);
|
||||||
|
}
|
||||||
|
|
||||||
|
+static void vfio_init_fault_response_regions(VFIOPCIDevice *vdev, Error **errp)
|
||||||
|
+{
|
||||||
|
+ struct vfio_region_info *fault_region_info = NULL;
|
||||||
|
+ struct vfio_region_info_cap_fault *cap_fault;
|
||||||
|
+ VFIODevice *vbasedev = &vdev->vbasedev;
|
||||||
|
+ struct vfio_info_cap_header *hdr;
|
||||||
|
+ char *fault_region_name;
|
||||||
|
+ int ret;
|
||||||
|
+
|
||||||
|
+ ret = vfio_get_dev_region_info(&vdev->vbasedev,
|
||||||
|
+ VFIO_REGION_TYPE_NESTED,
|
||||||
|
+ VFIO_REGION_SUBTYPE_NESTED_DMA_FAULT_RESPONSE,
|
||||||
|
+ &fault_region_info);
|
||||||
|
+ if (ret) {
|
||||||
|
+ goto out;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ hdr = vfio_get_region_info_cap(fault_region_info,
|
||||||
|
+ VFIO_REGION_INFO_CAP_DMA_FAULT_RESPONSE);
|
||||||
|
+ if (!hdr) {
|
||||||
|
+ error_setg(errp, "failed to retrieve DMA FAULT RESPONSE capability");
|
||||||
|
+ goto out;
|
||||||
|
+ }
|
||||||
|
+ cap_fault = container_of(hdr, struct vfio_region_info_cap_fault,
|
||||||
|
+ header);
|
||||||
|
+ if (cap_fault->version != 1) {
|
||||||
|
+ error_setg(errp, "Unsupported DMA FAULT RESPONSE API version %d",
|
||||||
|
+ cap_fault->version);
|
||||||
|
+ goto out;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ fault_region_name = g_strdup_printf("%s DMA FAULT RESPONSE %d",
|
||||||
|
+ vbasedev->name,
|
||||||
|
+ fault_region_info->index);
|
||||||
|
+
|
||||||
|
+ ret = vfio_region_setup(OBJECT(vdev), vbasedev,
|
||||||
|
+ &vdev->dma_fault_response_region,
|
||||||
|
+ fault_region_info->index,
|
||||||
|
+ fault_region_name);
|
||||||
|
+ g_free(fault_region_name);
|
||||||
|
+ if (ret) {
|
||||||
|
+ error_setg_errno(errp, -ret,
|
||||||
|
+ "failed to set up the DMA FAULT RESPONSE region %d",
|
||||||
|
+ fault_region_info->index);
|
||||||
|
+ goto out;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ ret = vfio_region_mmap(&vdev->dma_fault_response_region);
|
||||||
|
+ if (ret) {
|
||||||
|
+ error_setg_errno(errp, -ret, "Failed to mmap the DMA FAULT RESPONSE queue");
|
||||||
|
+ }
|
||||||
|
+out:
|
||||||
|
+ g_free(fault_region_info);
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
static void vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
|
||||||
|
{
|
||||||
|
VFIODevice *vbasedev = &vdev->vbasedev;
|
||||||
|
@@ -2737,6 +2792,12 @@ static void vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
+ vfio_init_fault_response_regions(vdev, &err);
|
||||||
|
+ if (err) {
|
||||||
|
+ error_propagate(errp, err);
|
||||||
|
+ return;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
irq_info.index = VFIO_PCI_ERR_IRQ_INDEX;
|
||||||
|
|
||||||
|
ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
|
||||||
|
@@ -2915,8 +2976,68 @@ static int vfio_iommu_set_pasid_table(PCIBus *bus, int32_t devfn,
|
||||||
|
return ioctl(container->fd, VFIO_IOMMU_SET_PASID_TABLE, &info);
|
||||||
|
}
|
||||||
|
|
||||||
|
+static int vfio_iommu_return_page_response(PCIBus *bus, int32_t devfn,
|
||||||
|
+ IOMMUPageResponse *resp)
|
||||||
|
+{
|
||||||
|
+ PCIDevice *pdev = bus->devices[devfn];
|
||||||
|
+ VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
|
||||||
|
+ struct iommu_page_response *response = &resp->resp;
|
||||||
|
+ struct vfio_region_dma_fault_response header;
|
||||||
|
+ struct iommu_page_response *queue;
|
||||||
|
+ char *queue_buffer = NULL;
|
||||||
|
+ ssize_t bytes;
|
||||||
|
+
|
||||||
|
+ if (!vdev->dma_fault_response_region.mem) {
|
||||||
|
+ return -EINVAL;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ /* read the header */
|
||||||
|
+ bytes = pread(vdev->vbasedev.fd, &header, sizeof(header),
|
||||||
|
+ vdev->dma_fault_response_region.fd_offset);
|
||||||
|
+ if (bytes != sizeof(header)) {
|
||||||
|
+ error_report("%s unable to read the fault region header (0x%lx)",
|
||||||
|
+ __func__, bytes);
|
||||||
|
+ return -1;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ /* Normally the fault queue is mmapped */
|
||||||
|
+ queue = (struct iommu_page_response *)vdev->dma_fault_response_region.mmaps[0].mmap;
|
||||||
|
+ if (!queue) {
|
||||||
|
+ size_t queue_size = header.nb_entries * header.entry_size;
|
||||||
|
+
|
||||||
|
+ error_report("%s: fault queue not mmapped: slower fault handling",
|
||||||
|
+ vdev->vbasedev.name);
|
||||||
|
+
|
||||||
|
+ queue_buffer = g_malloc(queue_size);
|
||||||
|
+ bytes = pread(vdev->vbasedev.fd, queue_buffer, queue_size,
|
||||||
|
+ vdev->dma_fault_response_region.fd_offset + header.offset);
|
||||||
|
+ if (bytes != queue_size) {
|
||||||
|
+ error_report("%s unable to read the fault queue (0x%lx)",
|
||||||
|
+ __func__, bytes);
|
||||||
|
+ return -1;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ queue = (struct iommu_page_response *)queue_buffer;
|
||||||
|
+ }
|
||||||
|
+ /* deposit the new response in the queue and increment the head */
|
||||||
|
+ memcpy(queue + header.head, response, header.entry_size);
|
||||||
|
+
|
||||||
|
+ vdev->fault_response_head_index =
|
||||||
|
+ (vdev->fault_response_head_index + 1) % header.nb_entries;
|
||||||
|
+ bytes = pwrite(vdev->vbasedev.fd, &vdev->fault_response_head_index, 4,
|
||||||
|
+ vdev->dma_fault_response_region.fd_offset);
|
||||||
|
+ if (bytes != 4) {
|
||||||
|
+ error_report("%s unable to write the fault response region head index (0x%lx)",
|
||||||
|
+ __func__, bytes);
|
||||||
|
+ }
|
||||||
|
+ g_free(queue_buffer);
|
||||||
|
+
|
||||||
|
+ return 0;
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
static PCIPASIDOps vfio_pci_pasid_ops = {
|
||||||
|
.set_pasid_table = vfio_iommu_set_pasid_table,
|
||||||
|
+ .return_page_response = vfio_iommu_return_page_response,
|
||||||
|
};
|
||||||
|
|
||||||
|
static void vfio_dma_fault_notifier_handler(void *opaque)
|
||||||
|
@@ -3373,6 +3494,7 @@ static void vfio_instance_finalize(Object *obj)
|
||||||
|
vfio_display_finalize(vdev);
|
||||||
|
vfio_bars_finalize(vdev);
|
||||||
|
vfio_region_finalize(&vdev->dma_fault_region);
|
||||||
|
+ vfio_region_finalize(&vdev->dma_fault_response_region);
|
||||||
|
g_free(vdev->emulated_config_bits);
|
||||||
|
g_free(vdev->rom);
|
||||||
|
/*
|
||||||
|
@@ -3394,6 +3516,7 @@ static void vfio_exitfn(PCIDevice *pdev)
|
||||||
|
vfio_unregister_err_notifier(vdev);
|
||||||
|
vfio_unregister_ext_irq_notifiers(vdev);
|
||||||
|
vfio_region_exit(&vdev->dma_fault_region);
|
||||||
|
+ vfio_region_exit(&vdev->dma_fault_response_region);
|
||||||
|
pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
|
||||||
|
vfio_disable_interrupts(vdev);
|
||||||
|
if (vdev->intx.mmap_timer) {
|
||||||
|
diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h
|
||||||
|
index e31bc0173a..7fdcfa0dc8 100644
|
||||||
|
--- a/hw/vfio/pci.h
|
||||||
|
+++ b/hw/vfio/pci.h
|
||||||
|
@@ -143,6 +143,8 @@ typedef struct VFIOPCIDevice {
|
||||||
|
VFIOPCIExtIRQ *ext_irqs;
|
||||||
|
VFIORegion dma_fault_region;
|
||||||
|
uint32_t fault_tail_index;
|
||||||
|
+ VFIORegion dma_fault_response_region;
|
||||||
|
+ uint32_t fault_response_head_index;
|
||||||
|
int (*resetfn)(struct VFIOPCIDevice *);
|
||||||
|
uint32_t vendor_id;
|
||||||
|
uint32_t device_id;
|
||||||
|
--
|
||||||
|
2.27.0
|
||||||
|
|
||||||
Loading…
x
Reference in New Issue
Block a user