qemu/memory-Add-new-fields-in-IOTLBEntry.patch
Chen Qun 14aa645c91 memory: Add new fields in IOTLBEntry
The current IOTLBEntry becomes too simple to interact with
some physical IOMMUs. IOTLBs can be invalidated with different
granularities: domain, pasid, addr. Current IOTLB entry only offers
page selective invalidation. Let's add a granularity field
that conveys this information.

TLB entries are usually tagged with some ids such as the asid
or pasid. When propagating an invalidation command from the
guest to the host, we need to pass those IDs.

Also we add a leaf field which indicates, in case of invalidation
notification, whether only cache entries for the last level of
translation are required to be invalidated.

A flag field is introduced to inform whether those fields are set.

To enforce all existing users do not use those new fields,
initialize the IOMMUTLBEvents when needed.

Signed-off-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com>
Signed-off-by: imxcc <xingchaochao@huawei.com>
(cherry picked from commit 2fa8c817167816f22c951308e3edbd0bc7370ecd)
2022-03-19 14:42:31 +08:00

185 lines
6.6 KiB
Diff

From da97cef20d4ee5a8f3942953836b35e7f7dd974f Mon Sep 17 00:00:00 2001
From: Eric Auger <eric.auger@redhat.com>
Date: Tue, 4 Sep 2018 08:43:05 -0400
Subject: [PATCH] memory: Add new fields in IOTLBEntry
The current IOTLBEntry becomes too simple to interact with
some physical IOMMUs. IOTLBs can be invalidated with different
granularities: domain, pasid, addr. Current IOTLB entry only offers
page selective invalidation. Let's add a granularity field
that conveys this information.
TLB entries are usually tagged with some ids such as the asid
or pasid. When propagating an invalidation command from the
guest to the host, we need to pass those IDs.
Also we add a leaf field which indicates, in case of invalidation
notification, whether only cache entries for the last level of
translation are required to be invalidated.
A flag field is introduced to inform whether those fields are set.
To enforce all existing users do not use those new fields,
initialize the IOMMUTLBEvents when needed.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com>
---
hw/arm/smmu-common.c | 2 +-
hw/arm/smmuv3.c | 2 +-
hw/i386/intel_iommu.c | 6 +++---
hw/ppc/spapr_iommu.c | 2 +-
hw/virtio/virtio-iommu.c | 4 ++--
include/exec/memory.h | 36 +++++++++++++++++++++++++++++++++++-
6 files changed, 43 insertions(+), 9 deletions(-)
diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
index 0459850a93..3a1ecf81d6 100644
--- a/hw/arm/smmu-common.c
+++ b/hw/arm/smmu-common.c
@@ -470,7 +470,7 @@ IOMMUMemoryRegion *smmu_iommu_mr(SMMUState *s, uint32_t sid)
/* Unmap the whole notifier's range */
static void smmu_unmap_notifier_range(IOMMUNotifier *n)
{
- IOMMUTLBEvent event;
+ IOMMUTLBEvent event = {};
event.type = IOMMU_NOTIFIER_UNMAP;
event.entry.target_as = &address_space_memory;
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index 01b60bee49..94e2c658f8 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -802,7 +802,7 @@ static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
uint8_t tg, uint64_t num_pages)
{
SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
- IOMMUTLBEvent event;
+ IOMMUTLBEvent event = {};
uint8_t granule;
if (!tg) {
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index f584449d8d..fae282ef5e 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -1193,7 +1193,7 @@ static int vtd_page_walk_level(dma_addr_t addr, uint64_t start,
uint32_t offset;
uint64_t slpte;
uint64_t subpage_size, subpage_mask;
- IOMMUTLBEvent event;
+ IOMMUTLBEvent event = {};
uint64_t iova = start;
uint64_t iova_next;
int ret = 0;
@@ -2425,7 +2425,7 @@ static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s,
VTDInvDesc *inv_desc)
{
VTDAddressSpace *vtd_dev_as;
- IOMMUTLBEvent event;
+ IOMMUTLBEvent event = {};
struct VTDBus *vtd_bus;
hwaddr addr;
uint64_t sz;
@@ -3481,7 +3481,7 @@ static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n)
size = remain = end - start + 1;
while (remain >= VTD_PAGE_SIZE) {
- IOMMUTLBEvent event;
+ IOMMUTLBEvent event = {};
uint64_t mask = dma_aligned_pow2_mask(start, end, s->aw_bits);
uint64_t size = mask + 1;
diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c
index db01071858..454df25d44 100644
--- a/hw/ppc/spapr_iommu.c
+++ b/hw/ppc/spapr_iommu.c
@@ -449,7 +449,7 @@ static void spapr_tce_reset(DeviceState *dev)
static target_ulong put_tce_emu(SpaprTceTable *tcet, target_ulong ioba,
target_ulong tce)
{
- IOMMUTLBEvent event;
+ IOMMUTLBEvent event = {};
hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift;
diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
index 1b23e8e18c..83ed2b82e6 100644
--- a/hw/virtio/virtio-iommu.c
+++ b/hw/virtio/virtio-iommu.c
@@ -129,7 +129,7 @@ static void virtio_iommu_notify_map(IOMMUMemoryRegion *mr, hwaddr virt_start,
hwaddr virt_end, hwaddr paddr,
uint32_t flags)
{
- IOMMUTLBEvent event;
+ IOMMUTLBEvent event = {};
IOMMUAccessFlags perm = IOMMU_ACCESS_FLAG(flags & VIRTIO_IOMMU_MAP_F_READ,
flags & VIRTIO_IOMMU_MAP_F_WRITE);
@@ -154,7 +154,7 @@ static void virtio_iommu_notify_map(IOMMUMemoryRegion *mr, hwaddr virt_start,
static void virtio_iommu_notify_unmap(IOMMUMemoryRegion *mr, hwaddr virt_start,
hwaddr virt_end)
{
- IOMMUTLBEvent event;
+ IOMMUTLBEvent event = {};
uint64_t delta = virt_end - virt_start;
if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_UNMAP)) {
diff --git a/include/exec/memory.h b/include/exec/memory.h
index 20f1b27377..c3180075e1 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -113,14 +113,48 @@ typedef enum {
IOMMU_RW = 3,
} IOMMUAccessFlags;
+/* Granularity of the cache invalidation */
+typedef enum {
+ IOMMU_INV_GRAN_ADDR = 0,
+ IOMMU_INV_GRAN_PASID,
+ IOMMU_INV_GRAN_DOMAIN,
+} IOMMUInvGranularity;
+
#define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
+/**
+ * struct IOMMUTLBEntry - IOMMU TLB entry
+ *
+ * Structure used when performing a translation or when notifying MAP or
+ * UNMAP (invalidation) events
+ *
+ * @target_as: target address space
+ * @iova: IO virtual address (input)
+ * @translated_addr: translated address (output)
+ * @addr_mask: address mask (0xfff means 4K binding), must be multiple of 2
+ * @perm: permission flag of the mapping (NONE encodes no mapping or
+ * invalidation notification)
+ * @granularity: granularity of the invalidation
+ * @flags: informs whether the following fields are set
+ * @arch_id: architecture specific ID tagging the TLB
+ * @pasid: PASID tagging the TLB
+ * @leaf: when @perm is NONE, indicates whether only caches for the last
+ * level of translation need to be invalidated.
+ */
struct IOMMUTLBEntry {
AddressSpace *target_as;
hwaddr iova;
hwaddr translated_addr;
- hwaddr addr_mask; /* 0xfff = 4k translation */
+ hwaddr addr_mask;
IOMMUAccessFlags perm;
+ IOMMUInvGranularity granularity;
+#define IOMMU_INV_FLAGS_PASID (1 << 0)
+#define IOMMU_INV_FLAGS_ARCHID (1 << 1)
+#define IOMMU_INV_FLAGS_LEAF (1 << 2)
+ uint32_t flags;
+ uint32_t arch_id;
+ uint32_t pasid;
+ bool leaf;
};
/*
--
2.27.0