aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nouveau_dmem.c
diff options
context:
space:
mode:
authorGravatar Ralph Campbell <rcampbell@nvidia.com> 2020-07-23 15:30:02 -0700
committerGravatar Jason Gunthorpe <jgg@nvidia.com> 2020-07-28 16:20:33 -0300
commitf8477ce6b5923ef9829e32f2165ea9c2b4b754d2 (patch)
treef5f62e123b88987cec782eaf9f55cb9a0b52bda6 /drivers/gpu/drm/nouveau/nouveau_dmem.c
parentmm/notifier: add migration invalidation type (diff)
downloadlinux-f8477ce6b5923ef9829e32f2165ea9c2b4b754d2.tar.gz
linux-f8477ce6b5923ef9829e32f2165ea9c2b4b754d2.tar.bz2
linux-f8477ce6b5923ef9829e32f2165ea9c2b4b754d2.zip
nouveau/svm: use the new migration invalidation
Use the new MMU_NOTIFY_MIGRATE event to skip GPU MMU invalidations of device private memory and handle the invalidation in the driver as part of migrating device private memory. Link: https://lore.kernel.org/r/20200723223004.9586-5-rcampbell@nvidia.com Signed-off-by: Ralph Campbell <rcampbell@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_dmem.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c15
1 files changed, 12 insertions, 3 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 78b9e3c2a5b3..511fe04cd680 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -140,6 +140,7 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
{
struct device *dev = drm->dev->dev;
struct page *dpage, *spage;
+ struct nouveau_svmm *svmm;
spage = migrate_pfn_to_page(args->src[0]);
if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE))
@@ -154,14 +155,19 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
if (dma_mapping_error(dev, *dma_addr))
goto error_free_page;
+ svmm = spage->zone_device_data;
+ mutex_lock(&svmm->mutex);
+ nouveau_svmm_invalidate(svmm, args->start, args->end);
if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage)))
goto error_dma_unmap;
+ mutex_unlock(&svmm->mutex);
args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
return 0;
error_dma_unmap:
+ mutex_unlock(&svmm->mutex);
dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
error_free_page:
__free_page(dpage);
@@ -531,7 +537,8 @@ nouveau_dmem_init(struct nouveau_drm *drm)
}
static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
- unsigned long src, dma_addr_t *dma_addr, u64 *pfn)
+ struct nouveau_svmm *svmm, unsigned long src,
+ dma_addr_t *dma_addr, u64 *pfn)
{
struct device *dev = drm->dev->dev;
struct page *dpage, *spage;
@@ -561,6 +568,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
goto out_free_page;
}
+ dpage->zone_device_data = svmm;
*pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
if (src & MIGRATE_PFN_WRITE)
@@ -584,8 +592,8 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
unsigned long addr = args->start, nr_dma = 0, i;
for (i = 0; addr < args->end; i++) {
- args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i],
- dma_addrs + nr_dma, pfns + i);
+ args->dst[i] = nouveau_dmem_migrate_copy_one(drm, svmm,
+ args->src[i], dma_addrs + nr_dma, pfns + i);
if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
nr_dma++;
addr += PAGE_SIZE;
@@ -616,6 +624,7 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
struct migrate_vma args = {
.vma = vma,
.start = start,
+ .pgmap_owner = drm->dev,
.flags = MIGRATE_VMA_SELECT_SYSTEM,
};
unsigned long i;