aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nouveau_dmem.c
diff options
context:
space:
mode:
authorGravatar Duoming Zhou <duoming@zju.edu.cn> 2024-03-06 13:01:04 +0800
committerGravatar Danilo Krummrich <dakr@redhat.com> 2024-03-08 17:36:28 +0100
commit16e87fe23d4af6df920406494ced5c0f4354567b (patch)
treeff62bf6eff02a0fe30d45baa9e448117284ac876 /drivers/gpu/drm/nouveau/nouveau_dmem.c
parentdrm/tests/buddy: fix print format (diff)
downloadlinux-16e87fe23d4af6df920406494ced5c0f4354567b.tar.gz
linux-16e87fe23d4af6df920406494ced5c0f4354567b.tar.bz2
linux-16e87fe23d4af6df920406494ced5c0f4354567b.zip
nouveau/dmem: handle kcalloc() allocation failure
The kcalloc() in nouveau_dmem_evict_chunk() will return null if the physical memory has run out. As a result, if we dereference src_pfns, dst_pfns or dma_addrs, the null pointer dereference bugs will happen. Moreover, the GPU is going away. If the kcalloc() fails, we could not evict all pages mapping a chunk. So this patch adds a __GFP_NOFAIL flag in kcalloc(). Finally, as there is no need to have physically contiguous memory, this patch switches kcalloc() to kvcalloc() in order to avoid failing allocations. CC: <stable@vger.kernel.org> # v6.1 Fixes: 249881232e14 ("nouveau/dmem: evict device private memory during release") Suggested-by: Danilo Krummrich <dakr@redhat.com> Signed-off-by: Duoming Zhou <duoming@zju.edu.cn> Signed-off-by: Danilo Krummrich <dakr@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240306050104.11259-1-duoming@zju.edu.cn
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_dmem.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 12feecf71e75..6fb65b01d778 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -378,9 +378,9 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
dma_addr_t *dma_addrs;
struct nouveau_fence *fence;
- src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
- dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
- dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL);
+ src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ dma_addrs = kvcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL | __GFP_NOFAIL);
migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
npages);
@@ -406,11 +406,11 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
migrate_device_pages(src_pfns, dst_pfns, npages);
nouveau_dmem_fence_done(&fence);
migrate_device_finalize(src_pfns, dst_pfns, npages);
- kfree(src_pfns);
- kfree(dst_pfns);
+ kvfree(src_pfns);
+ kvfree(dst_pfns);
for (i = 0; i < npages; i++)
dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
- kfree(dma_addrs);
+ kvfree(dma_addrs);
}
void