aboutsummaryrefslogtreecommitdiff
path: root/drivers/dma-buf/dma-resv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma-buf/dma-resv.c')
-rw-r--r--drivers/dma-buf/dma-resv.c142
1 files changed, 122 insertions, 20 deletions
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index b51416405e86..8c650b96357a 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -34,6 +34,7 @@
*/
#include <linux/dma-resv.h>
+#include <linux/dma-fence-array.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/sched/mm.h>
@@ -56,6 +57,12 @@
DEFINE_WD_CLASS(reservation_ww_class);
EXPORT_SYMBOL(reservation_ww_class);
+struct dma_resv_list {
+ struct rcu_head rcu;
+ u32 shared_count, shared_max;
+ struct dma_fence __rcu *shared[];
+};
+
/**
* dma_resv_list_alloc - allocate fence list
* @shared_max: number of fences we need space for
@@ -133,8 +140,19 @@ void dma_resv_fini(struct dma_resv *obj)
}
EXPORT_SYMBOL(dma_resv_fini);
+static inline struct dma_fence *
+dma_resv_excl_fence(struct dma_resv *obj)
+{
+ return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
+}
+
+static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
+{
+ return rcu_dereference_check(obj->fence, dma_resv_held(obj));
+}
+
/**
- * dma_resv_reserve_shared - Reserve space to add shared fences to
+ * dma_resv_reserve_fences - Reserve space to add shared fences to
* a dma_resv.
* @obj: reservation object
* @num_fences: number of fences we want to add
@@ -149,7 +167,7 @@ EXPORT_SYMBOL(dma_resv_fini);
* RETURNS
* Zero for success, or -errno
*/
-int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
+int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
{
struct dma_resv_list *old, *new;
unsigned int i, j, k, max;
@@ -212,7 +230,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
return 0;
}
-EXPORT_SYMBOL(dma_resv_reserve_shared);
+EXPORT_SYMBOL(dma_resv_reserve_fences);
#ifdef CONFIG_DEBUG_MUTEXES
/**
@@ -220,7 +238,7 @@ EXPORT_SYMBOL(dma_resv_reserve_shared);
* @obj: the dma_resv object to reset
*
* Reset the number of pre-reserved shared slots to test that drivers do
- * correct slot allocation using dma_resv_reserve_shared(). See also
+ * correct slot allocation using dma_resv_reserve_fences(). See also
* &dma_resv_list.shared_max.
*/
void dma_resv_reset_shared_max(struct dma_resv *obj)
@@ -242,7 +260,7 @@ EXPORT_SYMBOL(dma_resv_reset_shared_max);
* @fence: the shared fence to add
*
* Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and
- * dma_resv_reserve_shared() has been called.
+ * dma_resv_reserve_fences() has been called.
*
* See also &dma_resv.fence for a discussion of the semantics.
*/
@@ -290,40 +308,71 @@ replace:
EXPORT_SYMBOL(dma_resv_add_shared_fence);
/**
+ * dma_resv_replace_fences - replace fences in the dma_resv obj
+ * @obj: the reservation object
+ * @context: the context of the fences to replace
+ * @replacement: the new fence to use instead
+ *
+ * Replace fences with a specified context with a new fence. Only valid if the
+ * operation represented by the original fence has no longer access to the
+ * resources represented by the dma_resv object when the new fence completes.
+ *
+ * And example for using this is replacing a preemption fence with a page table
+ * update fence which makes the resource inaccessible.
+ */
+void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
+ struct dma_fence *replacement)
+{
+ struct dma_resv_list *list;
+ struct dma_fence *old;
+ unsigned int i;
+
+ dma_resv_assert_held(obj);
+
+ write_seqcount_begin(&obj->seq);
+
+ old = dma_resv_excl_fence(obj);
+ if (old->context == context) {
+ RCU_INIT_POINTER(obj->fence_excl, dma_fence_get(replacement));
+ dma_fence_put(old);
+ }
+
+ list = dma_resv_shared_list(obj);
+ for (i = 0; list && i < list->shared_count; ++i) {
+ old = rcu_dereference_protected(list->shared[i],
+ dma_resv_held(obj));
+ if (old->context != context)
+ continue;
+
+ rcu_assign_pointer(list->shared[i], dma_fence_get(replacement));
+ dma_fence_put(old);
+ }
+
+ write_seqcount_end(&obj->seq);
+}
+EXPORT_SYMBOL(dma_resv_replace_fences);
+
+/**
* dma_resv_add_excl_fence - Add an exclusive fence.
* @obj: the reservation object
* @fence: the exclusive fence to add
*
* Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
- * Note that this function replaces all fences attached to @obj, see also
- * &dma_resv.fence_excl for a discussion of the semantics.
+ * See also &dma_resv.fence_excl for a discussion of the semantics.
*/
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
{
struct dma_fence *old_fence = dma_resv_excl_fence(obj);
- struct dma_resv_list *old;
- u32 i = 0;
dma_resv_assert_held(obj);
- old = dma_resv_shared_list(obj);
- if (old)
- i = old->shared_count;
-
dma_fence_get(fence);
write_seqcount_begin(&obj->seq);
/* write_seqcount_begin provides the necessary memory barrier */
RCU_INIT_POINTER(obj->fence_excl, fence);
- if (old)
- old->shared_count = 0;
write_seqcount_end(&obj->seq);
- /* inplace update, no shared fences */
- while (i--)
- dma_fence_put(rcu_dereference_protected(old->shared[i],
- dma_resv_held(obj)));
-
dma_fence_put(old_fence);
}
EXPORT_SYMBOL(dma_resv_add_excl_fence);
@@ -595,6 +644,59 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write,
EXPORT_SYMBOL_GPL(dma_resv_get_fences);
/**
+ * dma_resv_get_singleton - Get a single fence for all the fences
+ * @obj: the reservation object
+ * @write: true if we should return all fences
+ * @fence: the resulting fence
+ *
+ * Get a single fence representing all the fences inside the resv object.
+ * Returns either 0 for success or -ENOMEM.
+ *
+ * Warning: This can't be used like this when adding the fence back to the resv
+ * object since that can lead to stack corruption when finalizing the
+ * dma_fence_array.
+ *
+ * Returns 0 on success and negative error values on failure.
+ */
+int dma_resv_get_singleton(struct dma_resv *obj, bool write,
+ struct dma_fence **fence)
+{
+ struct dma_fence_array *array;
+ struct dma_fence **fences;
+ unsigned count;
+ int r;
+
+ r = dma_resv_get_fences(obj, write, &count, &fences);
+ if (r)
+ return r;
+
+ if (count == 0) {
+ *fence = NULL;
+ return 0;
+ }
+
+ if (count == 1) {
+ *fence = fences[0];
+ kfree(fences);
+ return 0;
+ }
+
+ array = dma_fence_array_create(count, fences,
+ dma_fence_context_alloc(1),
+ 1, false);
+ if (!array) {
+ while (count--)
+ dma_fence_put(fences[count]);
+ kfree(fences);
+ return -ENOMEM;
+ }
+
+ *fence = &array->base;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
+
+/**
* dma_resv_wait_timeout - Wait on reservation's objects
* shared and/or exclusive fences.
* @obj: the reservation object