mtk_sync.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
  1. /*
  2. * Copyright (C) 2017 MediaTek Inc.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/debugfs.h>
  14. #include <linux/export.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/file.h>
  17. #include <linux/kthread.h>
  18. #include <linux/slab.h>
  19. #include <linux/delay.h>
  20. #include <linux/sync_file.h>
  21. #include "mtk_sync.h"
  22. /* ---------------------------------------------------------------- */
  23. static struct dma_fence_ops mtk_sync_timeline_fence_ops;
  24. static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence)
  25. {
  26. return container_of(fence->lock, struct sync_timeline, lock);
  27. }
  28. static LIST_HEAD(sync_timeline_list_head);
  29. static DEFINE_SPINLOCK(sync_timeline_list_lock);
  30. static inline struct sync_pt *fence_to_sync_pt(struct dma_fence *fence)
  31. {
  32. if (fence->ops != &mtk_sync_timeline_fence_ops)
  33. return NULL;
  34. return container_of(fence, struct sync_pt, base);
  35. }
  36. static void mt_sync_timeline_debug_add(struct sync_timeline *obj)
  37. {
  38. unsigned long flags;
  39. spin_lock_irqsave(&sync_timeline_list_lock, flags);
  40. list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
  41. spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
  42. }
  43. static void mt_sync_timeline_debug_remove(struct sync_timeline *obj)
  44. {
  45. unsigned long flags;
  46. spin_lock_irqsave(&sync_timeline_list_lock, flags);
  47. list_del(&obj->sync_timeline_list);
  48. spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
  49. }
  50. static void mtk_sync_timeline_free(struct kref *kref)
  51. {
  52. struct sync_timeline *obj =
  53. container_of(kref, struct sync_timeline, kref);
  54. mt_sync_timeline_debug_remove(obj);
  55. kfree(obj);
  56. }
  57. static void mtk_sync_timeline_get(struct sync_timeline *obj)
  58. {
  59. kref_get(&obj->kref);
  60. }
  61. static void mtk_sync_timeline_put(struct sync_timeline *obj)
  62. {
  63. kref_put(&obj->kref, mtk_sync_timeline_free);
  64. }
  65. /**
  66. * mtk_sync_pt_create() - creates a sync pt
  67. * @parent: fence's parent sync_timeline
  68. * @size: size to allocate for this pt
  69. * @inc: value of the fence
  70. *
  71. * Creates a new sync_pt as a child of @parent. @size bytes will be
  72. * allocated allowing for implementation specific data to be kept after
  73. * the generic sync_timeline struct. Returns the sync_pt object or
  74. * NULL in case of error.
  75. */
  76. static struct sync_pt *mtk_sync_pt_create(struct sync_timeline *obj,
  77. unsigned int value)
  78. {
  79. struct sync_pt *pt;
  80. pt = kzalloc(sizeof(*pt), GFP_KERNEL);
  81. if (!pt)
  82. return NULL;
  83. mtk_sync_timeline_get(obj);
  84. dma_fence_init(&pt->base, &mtk_sync_timeline_fence_ops, &obj->lock,
  85. obj->context, value);
  86. INIT_LIST_HEAD(&pt->link);
  87. spin_lock_irq(&obj->lock);
  88. if (!dma_fence_is_signaled_locked(&pt->base)) {
  89. struct rb_node **p = &obj->pt_tree.rb_node;
  90. struct rb_node *parent = NULL;
  91. while (*p) {
  92. struct sync_pt *other;
  93. int cmp;
  94. parent = *p;
  95. other = rb_entry(parent, typeof(*pt), node);
  96. cmp = value - other->base.seqno;
  97. if (cmp > 0) {
  98. p = &parent->rb_right;
  99. } else if (cmp < 0) {
  100. p = &parent->rb_left;
  101. } else {
  102. if (dma_fence_get_rcu(&other->base)) {
  103. dma_fence_put(&pt->base);
  104. pt = other;
  105. goto unlock;
  106. }
  107. p = &parent->rb_left;
  108. }
  109. }
  110. rb_link_node(&pt->node, parent, p);
  111. rb_insert_color(&pt->node, &obj->pt_tree);
  112. parent = rb_next(&pt->node);
  113. list_add_tail(
  114. &pt->link,
  115. parent ? &rb_entry(parent, typeof(*pt), node)->link
  116. : &obj->pt_list);
  117. }
  118. unlock:
  119. spin_unlock_irq(&obj->lock);
  120. return pt;
  121. }
  122. static const char *
  123. mtk_sync_timeline_fence_get_driver_name(struct dma_fence *fence)
  124. {
  125. return "mtk_sync";
  126. }
  127. static const char *
  128. mtk_sync_timeline_fence_get_timeline_name(struct dma_fence *fence)
  129. {
  130. struct sync_timeline *parent = dma_fence_parent(fence);
  131. return parent->name;
  132. }
  133. static void mtk_sync_timeline_fence_release(struct dma_fence *fence)
  134. {
  135. struct sync_pt *pt = fence_to_sync_pt(fence);
  136. struct sync_timeline *parent = dma_fence_parent(fence);
  137. if (pt) {
  138. if (!list_empty(&pt->link)) {
  139. unsigned long flags;
  140. spin_lock_irqsave(fence->lock, flags);
  141. if (!list_empty(&pt->link)) {
  142. list_del(&pt->link);
  143. rb_erase(&pt->node, &parent->pt_tree);
  144. }
  145. spin_unlock_irqrestore(fence->lock, flags);
  146. }
  147. }
  148. mtk_sync_timeline_put(parent);
  149. dma_fence_free(fence);
  150. }
  151. static bool mtk_sync_timeline_fence_signaled(struct dma_fence *fence)
  152. {
  153. struct sync_timeline *parent = dma_fence_parent(fence);
  154. return !__dma_fence_is_later(fence->seqno, parent->value);
  155. }
  156. static bool mtk_sync_timeline_fence_enable_signaling(struct dma_fence *fence)
  157. {
  158. return true;
  159. }
  160. static void mtk_sync_timeline_fence_value_str(
  161. struct dma_fence *fence,
  162. char *str,
  163. int size)
  164. {
  165. int r;
  166. r = snprintf(str, size, "%d", fence->seqno);
  167. if (r < 0) {
  168. /* Handle snprintf() error */
  169. pr_debug("snprintf error\n");
  170. }
  171. }
  172. static void mtk_sync_timeline_fence_timeline_value_str(struct dma_fence *fence,
  173. char *str, int size)
  174. {
  175. struct sync_timeline *parent = dma_fence_parent(fence);
  176. snprintf(str, size, "%d", parent->value);
  177. }
  178. static struct dma_fence_ops mtk_sync_timeline_fence_ops = {
  179. .get_driver_name = mtk_sync_timeline_fence_get_driver_name,
  180. .get_timeline_name = mtk_sync_timeline_fence_get_timeline_name,
  181. .enable_signaling = mtk_sync_timeline_fence_enable_signaling,
  182. .signaled = mtk_sync_timeline_fence_signaled,
  183. .wait = dma_fence_default_wait,
  184. .release = mtk_sync_timeline_fence_release,
  185. .fence_value_str = mtk_sync_timeline_fence_value_str,
  186. .timeline_value_str = mtk_sync_timeline_fence_timeline_value_str,
  187. };
  188. /* ---------------------------------------------------------------- */
  189. /**
  190. * mtk_sync_timeline_signal() - signal a status change on a sync_timeline
  191. * @obj: sync_timeline to signal
  192. * @inc: num to increment on timeline->value
  193. *
  194. * A sync implementation should call this any time one of it's fences
  195. * has signaled or has an error condition.
  196. */
  197. static void mtk_sync_timeline_signal(struct sync_timeline *obj,
  198. unsigned int inc)
  199. {
  200. struct sync_pt *pt, *next;
  201. spin_lock_irq(&obj->lock);
  202. obj->value += inc;
  203. list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
  204. if (!mtk_sync_timeline_fence_signaled(&pt->base))
  205. break;
  206. list_del_init(&pt->link);
  207. rb_erase(&pt->node, &obj->pt_tree);
  208. /*
  209. * A signal callback may release the last reference to this
  210. * fence, causing it to be freed. That operation has to be
  211. * last to avoid a use after free inside this loop, and must
  212. * be after we remove the fence from the timeline in order to
  213. * prevent deadlocking on timeline->lock inside
  214. * timeline_fence_release().
  215. */
  216. dma_fence_signal_locked(&pt->base);
  217. }
  218. spin_unlock_irq(&obj->lock);
  219. }
  220. /**
  221. * sync_timeline_create() - creates a sync object
  222. * @name: sync_timeline name
  223. *
  224. * Creates a new sync_timeline. Returns the sync_timeline object or NULL in
  225. * case of error.
  226. */
  227. static struct sync_timeline *sync_timeline_create(const char *name)
  228. {
  229. struct sync_timeline *obj;
  230. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  231. if (!obj)
  232. return NULL;
  233. kref_init(&obj->kref);
  234. obj->context = dma_fence_context_alloc(1);
  235. strlcpy(obj->name, name, sizeof(obj->name));
  236. obj->pt_tree = RB_ROOT;
  237. INIT_LIST_HEAD(&obj->pt_list);
  238. spin_lock_init(&obj->lock);
  239. mt_sync_timeline_debug_add(obj);
  240. return obj;
  241. }
  242. struct sync_timeline *mtk_sync_timeline_create(const char *name)
  243. {
  244. return sync_timeline_create(name);
  245. }
  246. void mtk_sync_timeline_destroy(struct sync_timeline *obj)
  247. {
  248. mtk_sync_timeline_put(obj);
  249. }
  250. void mtk_sync_timeline_inc(struct sync_timeline *obj, u32 value)
  251. {
  252. mtk_sync_timeline_signal(obj, value);
  253. }
  254. int mtk_sync_fence_create(struct sync_timeline *obj, struct fence_data *data)
  255. {
  256. int fd = get_unused_fd_flags(O_CLOEXEC);
  257. int err;
  258. struct sync_pt *pt;
  259. struct sync_file *sync_file;
  260. if (fd < 0)
  261. return fd;
  262. pt = mtk_sync_pt_create(obj, data->value);
  263. if (!pt) {
  264. err = -ENOMEM;
  265. goto err;
  266. }
  267. sync_file = sync_file_create(&pt->base);
  268. dma_fence_put(&pt->base);
  269. if (!sync_file) {
  270. err = -ENOMEM;
  271. goto err;
  272. }
  273. data->fence = fd;
  274. fd_install(fd, sync_file->file);
  275. return 0;
  276. err:
  277. put_unused_fd(fd);
  278. return err;
  279. }