aboutsummaryrefslogtreecommitdiff
path: root/drivers/iommu/virtio-iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/virtio-iommu.c')
-rw-r--r--drivers/iommu/virtio-iommu.c115
1 files changed, 95 insertions, 20 deletions
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 80930ce04a16..1f793219cac6 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -71,6 +71,7 @@ struct viommu_domain {
struct rb_root_cached mappings;
unsigned long nr_endpoints;
+ bool bypass;
};
struct viommu_endpoint {
@@ -310,8 +311,8 @@ out_unlock:
*
* On success, return the new mapping. Otherwise return NULL.
*/
-static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
- phys_addr_t paddr, size_t size, u32 flags)
+static int viommu_add_mapping(struct viommu_domain *vdomain, u64 iova, u64 end,
+ phys_addr_t paddr, u32 flags)
{
unsigned long irqflags;
struct viommu_mapping *mapping;
@@ -322,7 +323,7 @@ static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
mapping->paddr = paddr;
mapping->iova.start = iova;
- mapping->iova.last = iova + size - 1;
+ mapping->iova.last = end;
mapping->flags = flags;
spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
@@ -337,26 +338,24 @@ static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
*
* @vdomain: the domain
* @iova: start of the range
- * @size: size of the range. A size of 0 corresponds to the entire address
- * space.
+ * @end: end of the range
*
- * On success, returns the number of unmapped bytes (>= size)
+ * On success, returns the number of unmapped bytes
*/
static size_t viommu_del_mappings(struct viommu_domain *vdomain,
- unsigned long iova, size_t size)
+ u64 iova, u64 end)
{
size_t unmapped = 0;
unsigned long flags;
- unsigned long last = iova + size - 1;
struct viommu_mapping *mapping = NULL;
struct interval_tree_node *node, *next;
spin_lock_irqsave(&vdomain->mappings_lock, flags);
- next = interval_tree_iter_first(&vdomain->mappings, iova, last);
+ next = interval_tree_iter_first(&vdomain->mappings, iova, end);
while (next) {
node = next;
mapping = container_of(node, struct viommu_mapping, iova);
- next = interval_tree_iter_next(node, iova, last);
+ next = interval_tree_iter_next(node, iova, end);
/* Trying to split a mapping? */
if (mapping->iova.start < iova)
@@ -377,6 +376,55 @@ static size_t viommu_del_mappings(struct viommu_domain *vdomain,
}
/*
+ * Fill the domain with identity mappings, skipping the device's reserved
+ * regions.
+ */
+static int viommu_domain_map_identity(struct viommu_endpoint *vdev,
+ struct viommu_domain *vdomain)
+{
+ int ret;
+ struct iommu_resv_region *resv;
+ u64 iova = vdomain->domain.geometry.aperture_start;
+ u64 limit = vdomain->domain.geometry.aperture_end;
+ u32 flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
+ unsigned long granule = 1UL << __ffs(vdomain->domain.pgsize_bitmap);
+
+ iova = ALIGN(iova, granule);
+ limit = ALIGN_DOWN(limit + 1, granule) - 1;
+
+ list_for_each_entry(resv, &vdev->resv_regions, list) {
+ u64 resv_start = ALIGN_DOWN(resv->start, granule);
+ u64 resv_end = ALIGN(resv->start + resv->length, granule) - 1;
+
+ if (resv_end < iova || resv_start > limit)
+ /* No overlap */
+ continue;
+
+ if (resv_start > iova) {
+ ret = viommu_add_mapping(vdomain, iova, resv_start - 1,
+ (phys_addr_t)iova, flags);
+ if (ret)
+ goto err_unmap;
+ }
+
+ if (resv_end >= limit)
+ return 0;
+
+ iova = resv_end + 1;
+ }
+
+ ret = viommu_add_mapping(vdomain, iova, limit, (phys_addr_t)iova,
+ flags);
+ if (ret)
+ goto err_unmap;
+ return 0;
+
+err_unmap:
+ viommu_del_mappings(vdomain, 0, iova);
+ return ret;
+}
+
+/*
* viommu_replay_mappings - re-send MAP requests
*
* When reattaching a domain that was previously detached from all endpoints,
@@ -422,7 +470,7 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
size_t size;
u64 start64, end64;
phys_addr_t start, end;
- struct iommu_resv_region *region = NULL;
+ struct iommu_resv_region *region = NULL, *next;
unsigned long prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
start = start64 = le64_to_cpu(mem->start);
@@ -453,7 +501,12 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
if (!region)
return -ENOMEM;
- list_add(&region->list, &vdev->resv_regions);
+ /* Keep the list sorted */
+ list_for_each_entry(next, &vdev->resv_regions, list) {
+ if (next->start > region->start)
+ break;
+ }
+ list_add_tail(&region->list, &next->list);
return 0;
}
@@ -587,7 +640,9 @@ static struct iommu_domain *viommu_domain_alloc(unsigned type)
{
struct viommu_domain *vdomain;
- if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
+ if (type != IOMMU_DOMAIN_UNMANAGED &&
+ type != IOMMU_DOMAIN_DMA &&
+ type != IOMMU_DOMAIN_IDENTITY)
return NULL;
vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
@@ -630,6 +685,21 @@ static int viommu_domain_finalise(struct viommu_endpoint *vdev,
vdomain->map_flags = viommu->map_flags;
vdomain->viommu = viommu;
+ if (domain->type == IOMMU_DOMAIN_IDENTITY) {
+ if (virtio_has_feature(viommu->vdev,
+ VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
+ vdomain->bypass = true;
+ return 0;
+ }
+
+ ret = viommu_domain_map_identity(vdev, vdomain);
+ if (ret) {
+ ida_free(&viommu->domain_ids, vdomain->id);
+ vdomain->viommu = NULL;
+ return -EOPNOTSUPP;
+ }
+ }
+
return 0;
}
@@ -637,8 +707,8 @@ static void viommu_domain_free(struct iommu_domain *domain)
{
struct viommu_domain *vdomain = to_viommu_domain(domain);
- /* Free all remaining mappings (size 2^64) */
- viommu_del_mappings(vdomain, 0, 0);
+ /* Free all remaining mappings */
+ viommu_del_mappings(vdomain, 0, ULLONG_MAX);
if (vdomain->viommu)
ida_free(&vdomain->viommu->domain_ids, vdomain->id);
@@ -673,7 +743,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
/*
* In the virtio-iommu device, when attaching the endpoint to a new
- * domain, it is detached from the old one and, if as as a result the
+ * domain, it is detached from the old one and, if as a result the
* old domain isn't attached to any endpoint, all mappings are removed
* from the old domain and it is freed.
*
@@ -691,6 +761,9 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
.domain = cpu_to_le32(vdomain->id),
};
+ if (vdomain->bypass)
+ req.flags |= cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS);
+
for (i = 0; i < fwspec->num_ids; i++) {
req.endpoint = cpu_to_le32(fwspec->ids[i]);
@@ -720,6 +793,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
{
int ret;
u32 flags;
+ u64 end = iova + size - 1;
struct virtio_iommu_req_map map;
struct viommu_domain *vdomain = to_viommu_domain(domain);
@@ -730,7 +804,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
if (flags & ~vdomain->map_flags)
return -EINVAL;
- ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
+ ret = viommu_add_mapping(vdomain, iova, end, paddr, flags);
if (ret)
return ret;
@@ -739,7 +813,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
.domain = cpu_to_le32(vdomain->id),
.virt_start = cpu_to_le64(iova),
.phys_start = cpu_to_le64(paddr),
- .virt_end = cpu_to_le64(iova + size - 1),
+ .virt_end = cpu_to_le64(end),
.flags = cpu_to_le32(flags),
};
@@ -748,7 +822,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
if (ret)
- viommu_del_mappings(vdomain, iova, size);
+ viommu_del_mappings(vdomain, iova, end);
return ret;
}
@@ -761,7 +835,7 @@ static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
struct virtio_iommu_req_unmap unmap;
struct viommu_domain *vdomain = to_viommu_domain(domain);
- unmapped = viommu_del_mappings(vdomain, iova, size);
+ unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1);
if (unmapped < size)
return 0;
@@ -1132,6 +1206,7 @@ static unsigned int features[] = {
VIRTIO_IOMMU_F_DOMAIN_RANGE,
VIRTIO_IOMMU_F_PROBE,
VIRTIO_IOMMU_F_MMIO,
+ VIRTIO_IOMMU_F_BYPASS_CONFIG,
};
static struct virtio_device_id id_table[] = {