drm_sync_helper.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314
  1. /*
  2. * drm_sync_helper.c: software fence and helper functions for fences and
  3. * reservations used for dma buffer access synchronization between drivers.
  4. *
  5. * Copyright 2014 Google, Inc.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. */
  16. #include <linux/module.h>
  17. #include <drm/drm_sync_helper.h>
  18. #include <linux/slab.h>
  19. #include <linux/reservation.h>
  20. static DEFINE_SPINLOCK(sw_fence_lock);
  21. void drm_add_reservation(struct reservation_object *resv,
  22. struct reservation_object **resvs,
  23. unsigned long *excl_resvs_bitmap,
  24. unsigned int *num_resvs, bool exclusive)
  25. {
  26. unsigned int r;
  27. for (r = 0; r < *num_resvs; r++) {
  28. if (resvs[r] == resv)
  29. return;
  30. }
  31. resvs[*num_resvs] = resv;
  32. if (exclusive)
  33. set_bit(*num_resvs, excl_resvs_bitmap);
  34. (*num_resvs)++;
  35. }
  36. EXPORT_SYMBOL(drm_add_reservation);
  37. int drm_lock_reservations(struct reservation_object **resvs,
  38. unsigned int num_resvs, struct ww_acquire_ctx *ctx)
  39. {
  40. unsigned int r;
  41. struct reservation_object *slow_res = NULL;
  42. ww_acquire_init(ctx, &reservation_ww_class);
  43. retry:
  44. for (r = 0; r < num_resvs; r++) {
  45. int ret;
  46. /* skip the resv we locked with slow lock */
  47. if (resvs[r] == slow_res) {
  48. slow_res = NULL;
  49. continue;
  50. }
  51. ret = ww_mutex_lock(&resvs[r]->lock, ctx);
  52. if (ret < 0) {
  53. unsigned int slow_r = r;
  54. /*
  55. * undo all the locks we already done,
  56. * in reverse order
  57. */
  58. while (r > 0) {
  59. r--;
  60. ww_mutex_unlock(&resvs[r]->lock);
  61. }
  62. if (slow_res)
  63. ww_mutex_unlock(&slow_res->lock);
  64. if (ret == -EDEADLK) {
  65. slow_res = resvs[slow_r];
  66. ww_mutex_lock_slow(&slow_res->lock, ctx);
  67. goto retry;
  68. }
  69. ww_acquire_fini(ctx);
  70. return ret;
  71. }
  72. }
  73. ww_acquire_done(ctx);
  74. return 0;
  75. }
  76. EXPORT_SYMBOL(drm_lock_reservations);
  77. void drm_unlock_reservations(struct reservation_object **resvs,
  78. unsigned int num_resvs,
  79. struct ww_acquire_ctx *ctx)
  80. {
  81. unsigned int r;
  82. for (r = 0; r < num_resvs; r++)
  83. ww_mutex_unlock(&resvs[r]->lock);
  84. ww_acquire_fini(ctx);
  85. }
  86. EXPORT_SYMBOL(drm_unlock_reservations);
  87. static void reservation_cb_fence_cb(struct fence *fence, struct fence_cb *cb)
  88. {
  89. struct drm_reservation_fence_cb *rfcb =
  90. container_of(cb, struct drm_reservation_fence_cb, base);
  91. struct drm_reservation_cb *rcb = rfcb->parent;
  92. if (atomic_dec_and_test(&rcb->count))
  93. schedule_work(&rcb->work);
  94. }
  95. static void
  96. reservation_cb_cleanup(struct drm_reservation_cb *rcb)
  97. {
  98. unsigned cb;
  99. for (cb = 0; cb < rcb->num_fence_cbs; cb++) {
  100. if (rcb->fence_cbs[cb]) {
  101. fence_remove_callback(rcb->fence_cbs[cb]->fence,
  102. &rcb->fence_cbs[cb]->base);
  103. fence_put(rcb->fence_cbs[cb]->fence);
  104. kfree(rcb->fence_cbs[cb]);
  105. rcb->fence_cbs[cb] = NULL;
  106. }
  107. }
  108. kfree(rcb->fence_cbs);
  109. rcb->fence_cbs = NULL;
  110. rcb->num_fence_cbs = 0;
  111. }
  112. static void reservation_cb_work(struct work_struct *pwork)
  113. {
  114. struct drm_reservation_cb *rcb =
  115. container_of(pwork, struct drm_reservation_cb, work);
  116. /*
  117. * clean up everything before calling the callback, because the callback
  118. * may free structure containing rcb and work_struct
  119. */
  120. reservation_cb_cleanup(rcb);
  121. rcb->func(rcb, rcb->context);
  122. }
  123. static int
  124. reservation_cb_add_fence_cb(struct drm_reservation_cb *rcb, struct fence *fence)
  125. {
  126. int ret = 0;
  127. struct drm_reservation_fence_cb *fence_cb;
  128. struct drm_reservation_fence_cb **new_fence_cbs;
  129. new_fence_cbs = krealloc(rcb->fence_cbs,
  130. (rcb->num_fence_cbs + 1)
  131. * sizeof(struct drm_reservation_fence_cb *),
  132. GFP_KERNEL);
  133. if (!new_fence_cbs)
  134. return -ENOMEM;
  135. rcb->fence_cbs = new_fence_cbs;
  136. fence_cb = kzalloc(sizeof(struct drm_reservation_fence_cb), GFP_KERNEL);
  137. if (!fence_cb)
  138. return -ENOMEM;
  139. /*
  140. * do not want for fence to disappear on us while we are waiting for
  141. * callback and we need it in case we want to remove callbacks
  142. */
  143. fence_get(fence);
  144. fence_cb->fence = fence;
  145. fence_cb->parent = rcb;
  146. rcb->fence_cbs[rcb->num_fence_cbs] = fence_cb;
  147. atomic_inc(&rcb->count);
  148. ret = fence_add_callback(fence, &fence_cb->base,
  149. reservation_cb_fence_cb);
  150. if (ret == -ENOENT) {
  151. /* already signaled */
  152. atomic_dec(&rcb->count);
  153. fence_put(fence_cb->fence);
  154. kfree(fence_cb);
  155. ret = 0;
  156. } else if (ret < 0) {
  157. atomic_dec(&rcb->count);
  158. fence_put(fence_cb->fence);
  159. kfree(fence_cb);
  160. return ret;
  161. } else {
  162. rcb->num_fence_cbs++;
  163. }
  164. return ret;
  165. }
  166. void
  167. drm_reservation_cb_init(struct drm_reservation_cb *rcb,
  168. drm_reservation_cb_func_t func, void *context)
  169. {
  170. INIT_WORK(&rcb->work, reservation_cb_work);
  171. atomic_set(&rcb->count, 1);
  172. rcb->num_fence_cbs = 0;
  173. rcb->fence_cbs = NULL;
  174. rcb->func = func;
  175. rcb->context = context;
  176. }
  177. EXPORT_SYMBOL(drm_reservation_cb_init);
  178. int
  179. drm_reservation_cb_add(struct drm_reservation_cb *rcb,
  180. struct reservation_object *resv, bool exclusive)
  181. {
  182. int ret = 0;
  183. struct fence *fence;
  184. unsigned shared_count = 0, f;
  185. struct fence **shared_fences = NULL;
  186. /* enum all the fences in the reservation and add callbacks */
  187. ret = reservation_object_get_fences_rcu(resv, &fence,
  188. &shared_count, &shared_fences);
  189. if (ret < 0)
  190. return ret;
  191. if (fence) {
  192. ret = reservation_cb_add_fence_cb(rcb, fence);
  193. if (ret < 0) {
  194. reservation_cb_cleanup(rcb);
  195. goto error;
  196. }
  197. }
  198. if (exclusive) {
  199. for (f = 0; f < shared_count; f++) {
  200. ret = reservation_cb_add_fence_cb(rcb,
  201. shared_fences[f]);
  202. if (ret < 0) {
  203. reservation_cb_cleanup(rcb);
  204. goto error;
  205. }
  206. }
  207. }
  208. error:
  209. if (fence)
  210. fence_put(fence);
  211. if (shared_fences) {
  212. for (f = 0; f < shared_count; f++)
  213. fence_put(shared_fences[f]);
  214. kfree(shared_fences);
  215. }
  216. return ret;
  217. }
  218. EXPORT_SYMBOL(drm_reservation_cb_add);
  219. void
  220. drm_reservation_cb_done(struct drm_reservation_cb *rcb)
  221. {
  222. /*
  223. * we need to decrement from initial 1
  224. * and trigger the callback in case all the
  225. * fences were already triggered
  226. */
  227. if (atomic_dec_and_test(&rcb->count)) {
  228. /*
  229. * we could call the callback here directly but in case
  230. * the callback function needs to lock the same mutex
  231. * as our caller it could cause a deadlock, so it is
  232. * safer to call it from a worker
  233. */
  234. schedule_work(&rcb->work);
  235. }
  236. }
  237. EXPORT_SYMBOL(drm_reservation_cb_done);
  238. void
  239. drm_reservation_cb_fini(struct drm_reservation_cb *rcb)
  240. {
  241. /* make sure no work will be triggered */
  242. atomic_set(&rcb->count, 0);
  243. cancel_work_sync(&rcb->work);
  244. reservation_cb_cleanup(rcb);
  245. }
  246. EXPORT_SYMBOL(drm_reservation_cb_fini);
  247. static bool sw_fence_enable_signaling(struct fence *f)
  248. {
  249. return true;
  250. }
  251. static const char *sw_fence_get_get_driver_name(struct fence *fence)
  252. {
  253. return "drm_sync_helper";
  254. }
  255. static const char *sw_fence_get_timeline_name(struct fence *f)
  256. {
  257. return "drm_sync.sw";
  258. }
  259. static const struct fence_ops sw_fence_ops = {
  260. .get_driver_name = sw_fence_get_get_driver_name,
  261. .get_timeline_name = sw_fence_get_timeline_name,
  262. .enable_signaling = sw_fence_enable_signaling,
  263. .signaled = NULL,
  264. .wait = fence_default_wait,
  265. .release = NULL
  266. };
  267. struct fence *drm_sw_fence_new(unsigned int context, unsigned seqno)
  268. {
  269. struct fence *fence;
  270. fence = kzalloc(sizeof(*fence), GFP_KERNEL);
  271. if (!fence)
  272. return ERR_PTR(-ENOMEM);
  273. fence_init(fence,
  274. &sw_fence_ops,
  275. &sw_fence_lock,
  276. context, seqno);
  277. return fence;
  278. }
  279. EXPORT_SYMBOL(drm_sw_fence_new);