stratovirt/vfio-fix-the-problem-of-dma-mapping-failed.patch

229 lines
9.1 KiB
Diff
Raw Normal View History

From 023dde42b55a58c7a41293566e95a0fc94efa2c6 Mon Sep 17 00:00:00 2001
From: "Xinle.Guo" <guoxinle1@huawei.com>
Date: Thu, 19 Aug 2021 20:48:25 +0800
Subject: [PATCH 7/8] vfio: fix the problem of dma mapping failed
Failed to use libvirt to manager more than two vfio devices.
The reason is that creating a container for every vfio device,
the process stratovirt uses more than the memory locked limit.
Signed-off-by: Xinle.Guo <guoxinle1@huawei.com>
---
machine/src/lib.rs | 24 +++++++++++-------------
vfio/src/vfio_dev.rs | 25 +++++++++++++++----------
vfio/src/vfio_pci.rs | 31 ++++++++++++++++++-------------
3 files changed, 44 insertions(+), 36 deletions(-)
diff --git a/machine/src/lib.rs b/machine/src/lib.rs
index 9eb3039..8ba0ba3 100644
--- a/machine/src/lib.rs
+++ b/machine/src/lib.rs
@@ -121,7 +121,7 @@ use devices::legacy::FwCfgOps;
#[cfg(target_arch = "aarch64")]
use devices::InterruptController;
use hypervisor::KVM_FDS;
-use kvm_ioctls::{DeviceFd, VcpuFd};
+use kvm_ioctls::VcpuFd;
use machine_manager::config::{
get_pci_bdf, parse_balloon, parse_blk, parse_net, parse_rng_dev, parse_root_port, parse_vfio,
parse_virtconsole, parse_virtio_serial, parse_vsock, MachineMemConfig, PFlashConfig, PciBdf,
@@ -132,7 +132,7 @@ use machine_manager::machine::{KvmVmState, MachineInterface};
use migration::MigrationManager;
use util::loop_context::{EventNotifier, NotifierCallback, NotifierOperation};
use util::seccomp::{BpfRule, SeccompOpt, SyscallFilter};
-use vfio::vfio_pci::create_vfio_device;
+use vfio::vfio_pci::create_vfio_container;
use vfio::{VfioContainer, VfioPciDevice};
use virtio::{balloon_allow_list, Balloon, Block, Console, Rng, VirtioMmioDevice, VirtioPciDevice};
use vmm_sys_util::epoll::EventSet;
@@ -478,13 +478,8 @@ pub trait MachineOps {
&mut self,
vm_config: &VmConfig,
cfg_args: &str,
- dev_fd: Arc<DeviceFd>,
+ container: Arc<VfioContainer>,
) -> Result<()> {
- let sys_mem = self.get_sys_mem().clone();
- let container = Arc::new(
- VfioContainer::new(dev_fd, &sys_mem).chain_err(|| "Failed to create vfio container")?,
- );
-
let device_cfg: VfioConfig = parse_vfio(vm_config, cfg_args)?;
let path = "/sys/bus/pci/devices/".to_string() + &device_cfg.host;
let name = device_cfg.id;
@@ -543,10 +538,7 @@ pub trait MachineOps {
.chain_err(|| ErrorKind::AddDevErr("pflash".to_string()))?;
}
- // Create an emulated kvm device that is used for VFIO. It should be created only once.
- // See the kernel docs for `KVM_CREATE_DEVICE` to get more info.
- let vfio_dev = create_vfio_device().chain_err(|| "Failed to create kvm device for VFIO")?;
-
+ let mut container: Option<Arc<VfioContainer>> = None;
for dev in &cloned_vm_config.devices {
let cfg_args = dev.1.as_str();
match dev.0.as_str() {
@@ -581,7 +573,13 @@ pub trait MachineOps {
self.add_virtio_rng(vm_config, cfg_args)?;
}
"vfio-pci" => {
- self.add_vfio_device(&vm_config, cfg_args, vfio_dev.clone())?;
+ if container.is_none() {
+ container = Some(
+ create_vfio_container(self.get_sys_mem().clone())
+ .chain_err(|| "Failed to create vfio container")?,
+ );
+ }
+ self.add_vfio_device(&vm_config, cfg_args, container.clone().unwrap())?;
}
_ => {
bail!("Unsupported device: {:?}", dev.0.as_str());
diff --git a/vfio/src/vfio_dev.rs b/vfio/src/vfio_dev.rs
index 30f7d5a..93922ba 100644
--- a/vfio/src/vfio_dev.rs
+++ b/vfio/src/vfio_dev.rs
@@ -96,8 +96,8 @@ pub struct VfioMemoryRegion {
pub memory_size: u64,
// Host virtual address.
pub userspace_addr: u64,
- // No flags specified for now.
- flags_padding: u64,
+ // IOMMU mapped flag.
+ pub iommu_mapped: bool,
}
/// `VfioMemInfo` structure contains pinning pages information. If any pages need to be zapped from
@@ -131,7 +131,7 @@ impl VfioMemInfo {
guest_phys_addr,
memory_size,
userspace_addr,
- flags_padding: 0_u64,
+ iommu_mapped: false,
});
Ok(())
@@ -150,7 +150,7 @@ impl VfioMemInfo {
guest_phys_addr: fr.addr_range.base.raw_value(),
memory_size: fr.addr_range.size,
userspace_addr: hva + fr.offset_in_region,
- flags_padding: 0_u64,
+ iommu_mapped: false,
};
let mut mem_regions = self.regions.lock().unwrap();
for (index, mr) in mem_regions.iter().enumerate() {
@@ -378,9 +378,11 @@ impl VfioGroup {
return Err(ErrorKind::VfioIoctl("VFIO_GROUP_SET_CONTAINER".to_string(), ret).into());
}
- if let Err(e) = container.set_iommu(vfio::VFIO_TYPE1v2_IOMMU) {
- unsafe { ioctl_with_ref(&self.group, VFIO_GROUP_UNSET_CONTAINER(), &raw_fd) };
- return Err(e).chain_err(|| "Failed to set IOMMU");
+ if container.groups.lock().unwrap().is_empty() {
+ if let Err(e) = container.set_iommu(vfio::VFIO_TYPE1v2_IOMMU) {
+ unsafe { ioctl_with_ref(&self.group, VFIO_GROUP_UNSET_CONTAINER(), &raw_fd) };
+ return Err(e).chain_err(|| "Failed to set IOMMU");
+ }
}
if let Err(e) = container.kvm_device_add_group(&self.group.as_raw_fd()) {
@@ -473,15 +475,18 @@ impl VfioDevice {
group_id = n.parse::<u32>().chain_err(|| "Invalid iommu group id")?;
}
- let mut groups = container.groups.lock().unwrap();
- if let Some(g) = groups.get(&group_id) {
+ if let Some(g) = container.groups.lock().unwrap().get(&group_id) {
return Ok(g.clone());
}
let group = Arc::new(VfioGroup::new(group_id)?);
group
.connect_container(&container)
.chain_err(|| "Fail to connect container")?;
- groups.insert(group_id, group.clone());
+ container
+ .groups
+ .lock()
+ .unwrap()
+ .insert(group_id, group.clone());
Ok(group)
}
diff --git a/vfio/src/vfio_pci.rs b/vfio/src/vfio_pci.rs
index 5cc674a..7bec445 100644
--- a/vfio/src/vfio_pci.rs
+++ b/vfio/src/vfio_pci.rs
@@ -19,13 +19,12 @@ use std::sync::{Arc, Mutex, Weak};
use byteorder::{ByteOrder, LittleEndian};
use error_chain::ChainedError;
use kvm_bindings::{kvm_create_device, kvm_device_type_KVM_DEV_TYPE_VFIO};
-use kvm_ioctls::DeviceFd;
use vfio_bindings::bindings::vfio;
use vmm_sys_util::eventfd::EventFd;
use vmm_sys_util::ioctl::ioctl_with_mut_ref;
use super::errors::{ErrorKind, Result, ResultExt};
-use address_space::{FileBackend, GuestAddress, HostMemMapping, Region, RegionOps};
+use address_space::{AddressSpace, FileBackend, GuestAddress, HostMemMapping, Region, RegionOps};
use hypervisor::{MsiVector, KVM_FDS};
#[cfg(target_arch = "aarch64")]
use pci::config::SECONDARY_BUS_NUM;
@@ -380,7 +379,7 @@ impl VfioPciDevice {
.register_bar(i as usize, bar_region, vfio_bar.region_type, false, size);
}
- self.map_guest_memory()?;
+ self.do_dma_map()?;
Ok(())
}
@@ -569,14 +568,17 @@ impl VfioPciDevice {
}
/// Add all guest memory regions into IOMMU table.
- fn map_guest_memory(&mut self) -> Result<()> {
+ fn do_dma_map(&mut self) -> Result<()> {
let container = &self.vfio_device.container;
- let regions = container.vfio_mem_info.regions.lock().unwrap();
-
- for r in regions.iter() {
- container
- .vfio_dma_map(r.guest_phys_addr, r.memory_size, r.userspace_addr)
- .chain_err(|| "Failed to add guest memory region map into IOMMU table")?;
+ let mut regions = container.vfio_mem_info.regions.lock().unwrap();
+
+ for r in regions.iter_mut() {
+ if !r.iommu_mapped {
+ container
+ .vfio_dma_map(r.guest_phys_addr, r.memory_size, r.userspace_addr)
+ .chain_err(|| "Failed to add guest memory region map into IOMMU table")?;
+ r.iommu_mapped = true;
+ }
}
Ok(())
}
@@ -872,7 +874,7 @@ fn get_irq_rawfds(gsi_msi_routes: &[GsiMsiRoute]) -> Vec<RawFd> {
rawfds
}
-pub fn create_vfio_device() -> Result<Arc<DeviceFd>> {
+pub fn create_vfio_container(sys_mem: Arc<AddressSpace>) -> Result<Arc<VfioContainer>> {
let mut vfio_device = kvm_create_device {
type_: kvm_device_type_KVM_DEV_TYPE_VFIO,
fd: 0,
@@ -884,7 +886,10 @@ pub fn create_vfio_device() -> Result<Arc<DeviceFd>> {
.as_ref()
.unwrap()
.create_device(&mut vfio_device)
- .chain_err(|| "Failed to create VFIO type KVM device")?;
+ .chain_err(|| "Failed to create kvm device for VFIO")?;
- Ok(Arc::new(dev_fd))
+ Ok(Arc::new(
+ VfioContainer::new(Arc::new(dev_fd), &sys_mem)
+ .chain_err(|| "Failed to create vfio container")?,
+ ))
}
--
2.25.1