aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Alex Williamson <alex.williamson@redhat.com> 2024-06-06 21:52:07 -0600
committerGravatar Alex Williamson <alex.williamson@redhat.com> 2024-06-12 15:40:39 -0600
commitd71a989cf5d961989c273093cdff2550acdde314 (patch)
tree549d8e81b566d5cdfa3d50ce7bae886a2d183cd9
parentvfio/pci: Use unmap_mapping_range() (diff)
downloadlinux-d71a989cf5d961989c273093cdff2550acdde314.tar.gz
linux-d71a989cf5d961989c273093cdff2550acdde314.tar.bz2
linux-d71a989cf5d961989c273093cdff2550acdde314.zip
vfio/pci: Insert full vma on mmap'd MMIO fault
In order to improve performance of typical scenarios we can try to insert the entire vma on fault. This accelerates typical cases, such as when the MMIO region is DMA mapped by QEMU. The vfio_iommu_type1 driver will fault in the entire DMA mapped range through fixup_user_fault(). In synthetic testing, this improves the time required to walk a PCI BAR mapping from userspace by roughly 1/3rd. This is likely an interim solution until vmf_insert_pfn_{pmd,pud}() gain support for pfnmaps. Suggested-by: Yan Zhao <yan.y.zhao@intel.com> Link: https://lore.kernel.org/all/Zl6XdUkt%2FzMMGOLF@yzhao56-desk.sh.intel.com/ Reviewed-by: Yan Zhao <yan.y.zhao@intel.com> Link: https://lore.kernel.org/r/20240607035213.2054226-1-alex.williamson@redhat.com Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c19
1 files changed, 17 insertions, 2 deletions
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index db31c27bf78b..987c7921affa 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -1662,6 +1662,7 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct vfio_pci_core_device *vdev = vma->vm_private_data;
unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff;
+ unsigned long addr = vma->vm_start;
vm_fault_t ret = VM_FAULT_SIGBUS;
pfn = vma_to_pfn(vma);
@@ -1669,11 +1670,25 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
down_read(&vdev->memory_lock);
if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev))
- goto out_disabled;
+ goto out_unlock;
ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff);
+ if (ret & VM_FAULT_ERROR)
+ goto out_unlock;
-out_disabled:
+ /*
+ * Pre-fault the remainder of the vma, abort further insertions and
+ * supress error if fault is encountered during pre-fault.
+ */
+ for (; addr < vma->vm_end; addr += PAGE_SIZE, pfn++) {
+ if (addr == vmf->address)
+ continue;
+
+ if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR)
+ break;
+ }
+
+out_unlock:
up_read(&vdev->memory_lock);
return ret;