aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorGravatar Dmitry Osipenko <dmitry.osipenko@collabora.com> 2022-04-12 01:15:36 +0300
committerGravatar Andrey Grodzovsky <andrey.grodzovsky@amd.com> 2022-07-15 10:09:15 -0400
commit9b04369b060fd4885f728b7a4ab4851ffb1abb64 (patch)
treef45ce0b2dd07c2da0efa6f018b747f1d384717cf /drivers/gpu
parentRevert "drm/amdgpu: add drm buddy support to amdgpu" (diff)
downloadlinux-9b04369b060fd4885f728b7a4ab4851ffb1abb64.tar.gz
linux-9b04369b060fd4885f728b7a4ab4851ffb1abb64.tar.bz2
linux-9b04369b060fd4885f728b7a4ab4851ffb1abb64.zip
drm/scheduler: Don't kill jobs in interrupt context
Interrupt context can't sleep. Drivers like Panfrost and MSM are taking mutex when job is released, and thus, that code can sleep. This results into "BUG: scheduling while atomic" if locks are contented while job is freed. There is no good reason for releasing scheduler's jobs in IRQ context, hence use normal context to fix the trouble. Cc: stable@vger.kernel.org Fixes: 542cff7893a3 ("drm/sched: Avoid lockdep spalt on killing a processes") Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220411221536.283312-1-dmitry.osipenko@collabora.com
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 191c56064f19..6b25b2f4f5a3 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -190,7 +190,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
}
EXPORT_SYMBOL(drm_sched_entity_flush);
-static void drm_sched_entity_kill_jobs_irq_work(struct irq_work *wrk)
+static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
{
struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
@@ -207,8 +207,8 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
finish_cb);
- init_irq_work(&job->work, drm_sched_entity_kill_jobs_irq_work);
- irq_work_queue(&job->work);
+ INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
+ schedule_work(&job->work);
}
static struct dma_fence *