sync.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061
  1. /*
  2. * drivers/base/sync.c
  3. *
  4. * Copyright (C) 2012 Google, Inc.
  5. *
  6. * This software is licensed under the terms of the GNU General Public
  7. * License version 2, as published by the Free Software Foundation, and
  8. * may be copied, distributed, and modified under those terms.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. */
  16. #include <linux/debugfs.h>
  17. #include <linux/export.h>
  18. #include <linux/file.h>
  19. #include <linux/fs.h>
  20. #include <linux/kernel.h>
  21. #include <linux/poll.h>
  22. #include <linux/sched.h>
  23. #include <linux/seq_file.h>
  24. #include <linux/slab.h>
  25. #include <linux/sync.h>
  26. #include <linux/uaccess.h>
  27. #include <linux/anon_inodes.h>
  28. #define CREATE_TRACE_POINTS
  29. #include <trace/events/sync.h>
  30. static void sync_fence_signal_pt(struct sync_pt *pt);
  31. static int _sync_pt_has_signaled(struct sync_pt *pt);
  32. static void sync_fence_free(struct kref *kref);
  33. static LIST_HEAD(sync_timeline_list_head);
  34. static DEFINE_SPINLOCK(sync_timeline_list_lock);
  35. static LIST_HEAD(sync_fence_list_head);
  36. static DEFINE_SPINLOCK(sync_fence_list_lock);
  37. struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
  38. int size, const char *name)
  39. {
  40. struct sync_timeline *obj;
  41. unsigned long flags;
  42. if (size < sizeof(struct sync_timeline))
  43. return NULL;
  44. obj = kzalloc(size, GFP_KERNEL);
  45. if (obj == NULL)
  46. return NULL;
  47. kref_init(&obj->kref);
  48. obj->ops = ops;
  49. strlcpy(obj->name, name, sizeof(obj->name));
  50. INIT_LIST_HEAD(&obj->child_list_head);
  51. spin_lock_init(&obj->child_list_lock);
  52. INIT_LIST_HEAD(&obj->active_list_head);
  53. spin_lock_init(&obj->active_list_lock);
  54. spin_lock_irqsave(&sync_timeline_list_lock, flags);
  55. list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
  56. spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
  57. return obj;
  58. }
  59. EXPORT_SYMBOL(sync_timeline_create);
  60. static void sync_timeline_free(struct kref *kref)
  61. {
  62. struct sync_timeline *obj =
  63. container_of(kref, struct sync_timeline, kref);
  64. unsigned long flags;
  65. spin_lock_irqsave(&sync_timeline_list_lock, flags);
  66. list_del(&obj->sync_timeline_list);
  67. spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
  68. if (obj->ops->release_obj)
  69. obj->ops->release_obj(obj);
  70. kfree(obj);
  71. }
  72. void sync_timeline_destroy(struct sync_timeline *obj)
  73. {
  74. obj->destroyed = true;
  75. smp_wmb();
  76. /*
  77. * signal any children that their parent is going away.
  78. */
  79. sync_timeline_signal(obj);
  80. kref_put(&obj->kref, sync_timeline_free);
  81. }
  82. EXPORT_SYMBOL(sync_timeline_destroy);
  83. static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
  84. {
  85. unsigned long flags;
  86. pt->parent = obj;
  87. spin_lock_irqsave(&obj->child_list_lock, flags);
  88. list_add_tail(&pt->child_list, &obj->child_list_head);
  89. spin_unlock_irqrestore(&obj->child_list_lock, flags);
  90. }
  91. static void sync_timeline_remove_pt(struct sync_pt *pt)
  92. {
  93. struct sync_timeline *obj = pt->parent;
  94. unsigned long flags;
  95. spin_lock_irqsave(&obj->active_list_lock, flags);
  96. if (!list_empty(&pt->active_list))
  97. list_del_init(&pt->active_list);
  98. spin_unlock_irqrestore(&obj->active_list_lock, flags);
  99. spin_lock_irqsave(&obj->child_list_lock, flags);
  100. if (!list_empty(&pt->child_list)) {
  101. list_del_init(&pt->child_list);
  102. }
  103. spin_unlock_irqrestore(&obj->child_list_lock, flags);
  104. }
  105. void sync_timeline_signal(struct sync_timeline *obj)
  106. {
  107. unsigned long flags;
  108. LIST_HEAD(signaled_pts);
  109. struct list_head *pos, *n;
  110. trace_sync_timeline(obj);
  111. spin_lock_irqsave(&obj->active_list_lock, flags);
  112. list_for_each_safe(pos, n, &obj->active_list_head) {
  113. struct sync_pt *pt =
  114. container_of(pos, struct sync_pt, active_list);
  115. if (_sync_pt_has_signaled(pt)) {
  116. list_del_init(pos);
  117. list_add(&pt->signaled_list, &signaled_pts);
  118. kref_get(&pt->fence->kref);
  119. }
  120. }
  121. spin_unlock_irqrestore(&obj->active_list_lock, flags);
  122. list_for_each_safe(pos, n, &signaled_pts) {
  123. struct sync_pt *pt =
  124. container_of(pos, struct sync_pt, signaled_list);
  125. list_del_init(pos);
  126. sync_fence_signal_pt(pt);
  127. kref_put(&pt->fence->kref, sync_fence_free);
  128. }
  129. }
  130. EXPORT_SYMBOL(sync_timeline_signal);
  131. struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
  132. {
  133. struct sync_pt *pt;
  134. if (size < sizeof(struct sync_pt))
  135. return NULL;
  136. pt = kzalloc(size, GFP_KERNEL);
  137. if (pt == NULL)
  138. return NULL;
  139. INIT_LIST_HEAD(&pt->active_list);
  140. kref_get(&parent->kref);
  141. sync_timeline_add_pt(parent, pt);
  142. return pt;
  143. }
  144. EXPORT_SYMBOL(sync_pt_create);
  145. void sync_pt_free(struct sync_pt *pt)
  146. {
  147. if (pt->parent->ops->free_pt)
  148. pt->parent->ops->free_pt(pt);
  149. sync_timeline_remove_pt(pt);
  150. kref_put(&pt->parent->kref, sync_timeline_free);
  151. kfree(pt);
  152. }
  153. EXPORT_SYMBOL(sync_pt_free);
  154. /* call with pt->parent->active_list_lock held */
  155. static int _sync_pt_has_signaled(struct sync_pt *pt)
  156. {
  157. int old_status = pt->status;
  158. if (!pt->status)
  159. pt->status = pt->parent->ops->has_signaled(pt);
  160. if (!pt->status && pt->parent->destroyed)
  161. pt->status = -ENOENT;
  162. if (pt->status != old_status)
  163. pt->timestamp = ktime_get();
  164. return pt->status;
  165. }
  166. static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
  167. {
  168. return pt->parent->ops->dup(pt);
  169. }
  170. /* Adds a sync pt to the active queue. Called when added to a fence */
  171. static void sync_pt_activate(struct sync_pt *pt)
  172. {
  173. struct sync_timeline *obj = pt->parent;
  174. unsigned long flags;
  175. int err;
  176. spin_lock_irqsave(&obj->active_list_lock, flags);
  177. err = _sync_pt_has_signaled(pt);
  178. if (err != 0)
  179. goto out;
  180. list_add_tail(&pt->active_list, &obj->active_list_head);
  181. out:
  182. spin_unlock_irqrestore(&obj->active_list_lock, flags);
  183. }
  184. static int sync_fence_release(struct inode *inode, struct file *file);
  185. static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
  186. static long sync_fence_ioctl(struct file *file, unsigned int cmd,
  187. unsigned long arg);
  188. static const struct file_operations sync_fence_fops = {
  189. .release = sync_fence_release,
  190. .poll = sync_fence_poll,
  191. .unlocked_ioctl = sync_fence_ioctl,
  192. };
  193. static struct sync_fence *sync_fence_alloc(const char *name)
  194. {
  195. struct sync_fence *fence;
  196. unsigned long flags;
  197. fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
  198. if (fence == NULL)
  199. return NULL;
  200. fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
  201. fence, 0);
  202. if (IS_ERR(fence->file))
  203. goto err;
  204. kref_init(&fence->kref);
  205. #ifdef CONFIG_SYNC_DEBUG
  206. strlcpy(fence->name, name, sizeof(fence->name));
  207. #endif
  208. INIT_LIST_HEAD(&fence->pt_list_head);
  209. INIT_LIST_HEAD(&fence->waiter_list_head);
  210. spin_lock_init(&fence->waiter_list_lock);
  211. init_waitqueue_head(&fence->wq);
  212. spin_lock_irqsave(&sync_fence_list_lock, flags);
  213. list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
  214. spin_unlock_irqrestore(&sync_fence_list_lock, flags);
  215. return fence;
  216. err:
  217. kfree(fence);
  218. return NULL;
  219. }
  220. /* TODO: implement a create which takes more that one sync_pt */
  221. struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
  222. {
  223. struct sync_fence *fence;
  224. if (pt->fence)
  225. return NULL;
  226. fence = sync_fence_alloc(name);
  227. if (fence == NULL)
  228. return NULL;
  229. pt->fence = fence;
  230. list_add(&pt->pt_list, &fence->pt_list_head);
  231. sync_pt_activate(pt);
  232. /*
  233. * signal the fence in case pt was activated before
  234. * sync_pt_activate(pt) was called
  235. */
  236. sync_fence_signal_pt(pt);
  237. return fence;
  238. }
  239. EXPORT_SYMBOL(sync_fence_create);
  240. static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
  241. {
  242. struct list_head *pos;
  243. list_for_each(pos, &src->pt_list_head) {
  244. struct sync_pt *orig_pt =
  245. container_of(pos, struct sync_pt, pt_list);
  246. struct sync_pt *new_pt = sync_pt_dup(orig_pt);
  247. if (new_pt == NULL)
  248. return -ENOMEM;
  249. new_pt->fence = dst;
  250. list_add(&new_pt->pt_list, &dst->pt_list_head);
  251. }
  252. return 0;
  253. }
  254. static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
  255. {
  256. struct list_head *src_pos, *dst_pos, *n;
  257. list_for_each(src_pos, &src->pt_list_head) {
  258. struct sync_pt *src_pt =
  259. container_of(src_pos, struct sync_pt, pt_list);
  260. bool collapsed = false;
  261. list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
  262. struct sync_pt *dst_pt =
  263. container_of(dst_pos, struct sync_pt, pt_list);
  264. /* collapse two sync_pts on the same timeline
  265. * to a single sync_pt that will signal at
  266. * the later of the two
  267. */
  268. if (dst_pt->parent == src_pt->parent) {
  269. int cmp_val;
  270. int (*cmp_fn)
  271. (struct sync_pt *, struct sync_pt *);
  272. cmp_fn = dst_pt->parent->ops->compare;
  273. cmp_val = cmp_fn(dst_pt, src_pt);
  274. /*
  275. * Out-of-order users like oneshot don't follow
  276. * a timeline ordering.
  277. */
  278. if (cmp_val != -cmp_fn(src_pt, dst_pt))
  279. break;
  280. if (cmp_val == -1) {
  281. struct sync_pt *new_pt =
  282. sync_pt_dup(src_pt);
  283. if (new_pt == NULL)
  284. return -ENOMEM;
  285. new_pt->fence = dst;
  286. list_replace(&dst_pt->pt_list,
  287. &new_pt->pt_list);
  288. sync_pt_free(dst_pt);
  289. }
  290. collapsed = true;
  291. break;
  292. }
  293. }
  294. if (!collapsed) {
  295. struct sync_pt *new_pt = sync_pt_dup(src_pt);
  296. if (new_pt == NULL)
  297. return -ENOMEM;
  298. new_pt->fence = dst;
  299. list_add(&new_pt->pt_list, &dst->pt_list_head);
  300. }
  301. }
  302. return 0;
  303. }
  304. static void sync_fence_detach_pts(struct sync_fence *fence)
  305. {
  306. struct list_head *pos, *n;
  307. list_for_each_safe(pos, n, &fence->pt_list_head) {
  308. struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
  309. sync_timeline_remove_pt(pt);
  310. }
  311. }
  312. static void sync_fence_free_pts(struct sync_fence *fence)
  313. {
  314. struct list_head *pos, *n;
  315. list_for_each_safe(pos, n, &fence->pt_list_head) {
  316. struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
  317. sync_pt_free(pt);
  318. }
  319. }
  320. struct sync_fence *sync_fence_fdget(int fd)
  321. {
  322. struct file *file = fget(fd);
  323. if (file == NULL)
  324. return NULL;
  325. if (file->f_op != &sync_fence_fops)
  326. goto err;
  327. return file->private_data;
  328. err:
  329. fput(file);
  330. return NULL;
  331. }
  332. EXPORT_SYMBOL(sync_fence_fdget);
  333. void sync_fence_put(struct sync_fence *fence)
  334. {
  335. fput(fence->file);
  336. }
  337. EXPORT_SYMBOL(sync_fence_put);
  338. void sync_fence_install(struct sync_fence *fence, int fd)
  339. {
  340. fd_install(fd, fence->file);
  341. }
  342. EXPORT_SYMBOL(sync_fence_install);
  343. static int sync_fence_get_status(struct sync_fence *fence)
  344. {
  345. struct list_head *pos;
  346. int status = 1;
  347. list_for_each(pos, &fence->pt_list_head) {
  348. struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
  349. int pt_status = pt->status;
  350. if (pt_status < 0) {
  351. status = pt_status;
  352. break;
  353. } else if (status == 1) {
  354. status = pt_status;
  355. }
  356. }
  357. return status;
  358. }
  359. struct sync_fence *sync_fence_merge(const char *name,
  360. struct sync_fence *a, struct sync_fence *b)
  361. {
  362. struct sync_fence *fence;
  363. struct list_head *pos;
  364. int err;
  365. fence = sync_fence_alloc(name);
  366. if (fence == NULL)
  367. return NULL;
  368. err = sync_fence_copy_pts(fence, a);
  369. if (err < 0)
  370. goto err;
  371. err = sync_fence_merge_pts(fence, b);
  372. if (err < 0)
  373. goto err;
  374. list_for_each(pos, &fence->pt_list_head) {
  375. struct sync_pt *pt =
  376. container_of(pos, struct sync_pt, pt_list);
  377. sync_pt_activate(pt);
  378. }
  379. /*
  380. * signal the fence in case one of it's pts were activated before
  381. * they were activated
  382. */
  383. sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
  384. struct sync_pt,
  385. pt_list));
  386. return fence;
  387. err:
  388. sync_fence_free_pts(fence);
  389. kfree(fence);
  390. return NULL;
  391. }
  392. EXPORT_SYMBOL(sync_fence_merge);
  393. static void sync_fence_signal_pt(struct sync_pt *pt)
  394. {
  395. LIST_HEAD(signaled_waiters);
  396. struct sync_fence *fence = pt->fence;
  397. struct list_head *pos;
  398. struct list_head *n;
  399. unsigned long flags;
  400. int status;
  401. status = sync_fence_get_status(fence);
  402. spin_lock_irqsave(&fence->waiter_list_lock, flags);
  403. /*
  404. * this should protect against two threads racing on the signaled
  405. * false -> true transition
  406. */
  407. if (status && !fence->status) {
  408. list_for_each_safe(pos, n, &fence->waiter_list_head)
  409. list_move(pos, &signaled_waiters);
  410. fence->status = status;
  411. } else {
  412. status = 0;
  413. }
  414. spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
  415. if (status) {
  416. list_for_each_safe(pos, n, &signaled_waiters) {
  417. struct sync_fence_waiter *waiter =
  418. container_of(pos, struct sync_fence_waiter,
  419. waiter_list);
  420. list_del(pos);
  421. waiter->callback(fence, waiter);
  422. }
  423. wake_up(&fence->wq);
  424. }
  425. }
  426. int sync_fence_wait_async(struct sync_fence *fence,
  427. struct sync_fence_waiter *waiter)
  428. {
  429. unsigned long flags;
  430. int err = 0;
  431. spin_lock_irqsave(&fence->waiter_list_lock, flags);
  432. if (fence->status) {
  433. err = fence->status;
  434. goto out;
  435. }
  436. list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
  437. out:
  438. spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
  439. return err;
  440. }
  441. EXPORT_SYMBOL(sync_fence_wait_async);
  442. int sync_fence_cancel_async(struct sync_fence *fence,
  443. struct sync_fence_waiter *waiter)
  444. {
  445. struct list_head *pos;
  446. struct list_head *n;
  447. unsigned long flags;
  448. int ret = -ENOENT;
  449. spin_lock_irqsave(&fence->waiter_list_lock, flags);
  450. /*
  451. * Make sure waiter is still in waiter_list because it is possible for
  452. * the waiter to be removed from the list while the callback is still
  453. * pending.
  454. */
  455. list_for_each_safe(pos, n, &fence->waiter_list_head) {
  456. struct sync_fence_waiter *list_waiter =
  457. container_of(pos, struct sync_fence_waiter,
  458. waiter_list);
  459. if (list_waiter == waiter) {
  460. list_del(pos);
  461. ret = 0;
  462. break;
  463. }
  464. }
  465. spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
  466. return ret;
  467. }
  468. EXPORT_SYMBOL(sync_fence_cancel_async);
  469. static bool sync_fence_check(struct sync_fence *fence)
  470. {
  471. /*
  472. * Make sure that reads to fence->status are ordered with the
  473. * wait queue event triggering
  474. */
  475. smp_rmb();
  476. return fence->status != 0;
  477. }
  478. static const char *sync_status_str(int status)
  479. {
  480. if (status > 0)
  481. return "signaled";
  482. else if (status == 0)
  483. return "active";
  484. else
  485. return "error";
  486. }
  487. static void sync_pt_log(struct sync_pt *pt)
  488. {
  489. int status = pt->status;
  490. pr_cont(" %s_pt %s",
  491. pt->parent->name,
  492. sync_status_str(status));
  493. if (pt->status) {
  494. struct timeval tv = ktime_to_timeval(pt->timestamp);
  495. pr_cont("@%ld.%06ld", tv.tv_sec, tv.tv_usec);
  496. }
  497. if (pt->parent->ops->timeline_value_str &&
  498. pt->parent->ops->pt_value_str) {
  499. char value[64];
  500. pt->parent->ops->pt_value_str(pt, value, sizeof(value));
  501. pr_cont(": %s", value);
  502. pt->parent->ops->timeline_value_str(pt->parent, value,
  503. sizeof(value));
  504. pr_cont(" / %s", value);
  505. }
  506. pr_cont("\n");
  507. /* Show additional details for active fences */
  508. if (pt->status == 0 && pt->parent->ops->pt_log)
  509. pt->parent->ops->pt_log(pt);
  510. }
  511. void sync_fence_log(struct sync_fence *fence)
  512. {
  513. struct list_head *pos;
  514. unsigned long flags;
  515. pr_info("[%pK] %s: %s\n", fence, fence->name,
  516. sync_status_str(fence->status));
  517. pr_info("waiters:\n");
  518. spin_lock_irqsave(&fence->waiter_list_lock, flags);
  519. list_for_each(pos, &fence->waiter_list_head) {
  520. struct sync_fence_waiter *waiter =
  521. container_of(pos, struct sync_fence_waiter,
  522. waiter_list);
  523. pr_info(" %pF\n", waiter->callback);
  524. }
  525. spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
  526. pr_info("syncpoints:\n");
  527. list_for_each(pos, &fence->pt_list_head) {
  528. struct sync_pt *pt =
  529. container_of(pos, struct sync_pt, pt_list);
  530. sync_pt_log(pt);
  531. }
  532. }
  533. EXPORT_SYMBOL(sync_fence_log);
  534. int sync_fence_wait(struct sync_fence *fence, long timeout)
  535. {
  536. int err = 0;
  537. struct sync_pt *pt;
  538. trace_sync_wait(fence, 1);
  539. list_for_each_entry(pt, &fence->pt_list_head, pt_list)
  540. trace_sync_pt(pt);
  541. if (timeout > 0) {
  542. timeout = msecs_to_jiffies(timeout);
  543. err = wait_event_interruptible_timeout(fence->wq,
  544. sync_fence_check(fence),
  545. timeout);
  546. } else if (timeout < 0) {
  547. err = wait_event_interruptible(fence->wq,
  548. sync_fence_check(fence));
  549. }
  550. trace_sync_wait(fence, 0);
  551. if (err < 0)
  552. return err;
  553. if (fence->status < 0) {
  554. pr_info("fence error %d on [%pK]\n", fence->status, fence);
  555. sync_fence_log(fence);
  556. return fence->status;
  557. }
  558. if (fence->status == 0) {
  559. if (timeout > 0) {
  560. pr_info("fence timeout on [%pK] after %dms\n", fence,
  561. jiffies_to_msecs(timeout));
  562. sync_fence_log(fence);
  563. }
  564. return -ETIME;
  565. }
  566. return 0;
  567. }
  568. EXPORT_SYMBOL(sync_fence_wait);
  569. static void sync_fence_free(struct kref *kref)
  570. {
  571. struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
  572. sync_fence_free_pts(fence);
  573. kfree(fence);
  574. }
  575. static int sync_fence_release(struct inode *inode, struct file *file)
  576. {
  577. struct sync_fence *fence = file->private_data;
  578. unsigned long flags;
  579. /*
  580. * We need to remove all ways to access this fence before droping
  581. * our ref.
  582. *
  583. * start with its membership in the global fence list
  584. */
  585. spin_lock_irqsave(&sync_fence_list_lock, flags);
  586. list_del(&fence->sync_fence_list);
  587. spin_unlock_irqrestore(&sync_fence_list_lock, flags);
  588. /*
  589. * remove its pts from their parents so that sync_timeline_signal()
  590. * can't reference the fence.
  591. */
  592. sync_fence_detach_pts(fence);
  593. kref_put(&fence->kref, sync_fence_free);
  594. return 0;
  595. }
  596. static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
  597. {
  598. struct sync_fence *fence = file->private_data;
  599. poll_wait(file, &fence->wq, wait);
  600. /*
  601. * Make sure that reads to fence->status are ordered with the
  602. * wait queue event triggering
  603. */
  604. smp_rmb();
  605. if (fence->status == 1)
  606. return POLLIN;
  607. else if (fence->status < 0)
  608. return POLLERR;
  609. else
  610. return 0;
  611. }
  612. static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
  613. {
  614. __s32 value;
  615. if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
  616. return -EFAULT;
  617. return sync_fence_wait(fence, value);
  618. }
  619. static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
  620. {
  621. int fd = get_unused_fd();
  622. int err;
  623. struct sync_fence *fence2, *fence3;
  624. struct sync_merge_data data;
  625. if (fd < 0)
  626. return fd;
  627. if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
  628. err = -EFAULT;
  629. goto err_put_fd;
  630. }
  631. fence2 = sync_fence_fdget(data.fd2);
  632. if (fence2 == NULL) {
  633. err = -ENOENT;
  634. goto err_put_fd;
  635. }
  636. data.name[sizeof(data.name) - 1] = '\0';
  637. fence3 = sync_fence_merge(data.name, fence, fence2);
  638. if (fence3 == NULL) {
  639. err = -ENOMEM;
  640. goto err_put_fence2;
  641. }
  642. data.fence = fd;
  643. if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
  644. err = -EFAULT;
  645. goto err_put_fence3;
  646. }
  647. sync_fence_install(fence3, fd);
  648. sync_fence_put(fence2);
  649. return 0;
  650. err_put_fence3:
  651. sync_fence_put(fence3);
  652. err_put_fence2:
  653. sync_fence_put(fence2);
  654. err_put_fd:
  655. put_unused_fd(fd);
  656. return err;
  657. }
  658. static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
  659. {
  660. struct sync_pt_info *info = data;
  661. int ret;
  662. if (size < sizeof(struct sync_pt_info))
  663. return -ENOMEM;
  664. info->len = sizeof(struct sync_pt_info);
  665. if (pt->parent->ops->fill_driver_data) {
  666. ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
  667. size - sizeof(*info));
  668. if (ret < 0)
  669. return ret;
  670. info->len += ret;
  671. }
  672. strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
  673. strlcpy(info->driver_name, pt->parent->ops->driver_name,
  674. sizeof(info->driver_name));
  675. info->status = pt->status;
  676. info->timestamp_ns = ktime_to_ns(pt->timestamp);
  677. return info->len;
  678. }
  679. static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
  680. unsigned long arg)
  681. {
  682. struct sync_fence_info_data *data;
  683. struct list_head *pos;
  684. __u32 size;
  685. __u32 len = 0;
  686. int ret;
  687. if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
  688. return -EFAULT;
  689. if (size < sizeof(struct sync_fence_info_data))
  690. return -EINVAL;
  691. if (size > 4096)
  692. size = 4096;
  693. data = kzalloc(size, GFP_KERNEL);
  694. if (data == NULL)
  695. return -ENOMEM;
  696. strlcpy(data->name, fence->name, sizeof(data->name));
  697. data->status = fence->status;
  698. len = sizeof(struct sync_fence_info_data);
  699. list_for_each(pos, &fence->pt_list_head) {
  700. struct sync_pt *pt =
  701. container_of(pos, struct sync_pt, pt_list);
  702. ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
  703. if (ret < 0)
  704. goto out;
  705. len += ret;
  706. }
  707. data->len = len;
  708. if (copy_to_user((void __user *)arg, data, len))
  709. ret = -EFAULT;
  710. else
  711. ret = 0;
  712. out:
  713. kfree(data);
  714. return ret;
  715. }
  716. static long sync_fence_ioctl(struct file *file, unsigned int cmd,
  717. unsigned long arg)
  718. {
  719. struct sync_fence *fence = file->private_data;
  720. switch (cmd) {
  721. case SYNC_IOC_WAIT:
  722. return sync_fence_ioctl_wait(fence, arg);
  723. case SYNC_IOC_MERGE:
  724. return sync_fence_ioctl_merge(fence, arg);
  725. case SYNC_IOC_FENCE_INFO:
  726. return sync_fence_ioctl_fence_info(fence, arg);
  727. default:
  728. return -ENOTTY;
  729. }
  730. }
  731. #ifdef CONFIG_DEBUG_FS
  732. static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
  733. {
  734. int status = pt->status;
  735. seq_printf(s, " %s%spt %s",
  736. fence ? pt->parent->name : "",
  737. fence ? "_" : "",
  738. sync_status_str(status));
  739. if (pt->status) {
  740. struct timeval tv = ktime_to_timeval(pt->timestamp);
  741. seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
  742. }
  743. if (pt->parent->ops->timeline_value_str &&
  744. pt->parent->ops->pt_value_str) {
  745. char value[64];
  746. pt->parent->ops->pt_value_str(pt, value, sizeof(value));
  747. seq_printf(s, ": %s", value);
  748. if (fence) {
  749. pt->parent->ops->timeline_value_str(pt->parent, value,
  750. sizeof(value));
  751. seq_printf(s, " / %s", value);
  752. }
  753. } else if (pt->parent->ops->print_pt) {
  754. seq_printf(s, ": ");
  755. pt->parent->ops->print_pt(s, pt);
  756. }
  757. seq_printf(s, "\n");
  758. }
  759. static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
  760. {
  761. struct list_head *pos;
  762. unsigned long flags;
  763. seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
  764. if (obj->ops->timeline_value_str) {
  765. char value[64];
  766. obj->ops->timeline_value_str(obj, value, sizeof(value));
  767. seq_printf(s, ": %s", value);
  768. } else if (obj->ops->print_obj) {
  769. seq_printf(s, ": ");
  770. obj->ops->print_obj(s, obj);
  771. }
  772. seq_printf(s, "\n");
  773. spin_lock_irqsave(&obj->child_list_lock, flags);
  774. list_for_each(pos, &obj->child_list_head) {
  775. struct sync_pt *pt =
  776. container_of(pos, struct sync_pt, child_list);
  777. sync_print_pt(s, pt, false);
  778. }
  779. spin_unlock_irqrestore(&obj->child_list_lock, flags);
  780. }
  781. static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
  782. {
  783. struct list_head *pos;
  784. unsigned long flags;
  785. seq_printf(s, "[%pK] %s: %s\n", fence, fence->name,
  786. sync_status_str(fence->status));
  787. list_for_each(pos, &fence->pt_list_head) {
  788. struct sync_pt *pt =
  789. container_of(pos, struct sync_pt, pt_list);
  790. sync_print_pt(s, pt, true);
  791. }
  792. spin_lock_irqsave(&fence->waiter_list_lock, flags);
  793. list_for_each(pos, &fence->waiter_list_head) {
  794. struct sync_fence_waiter *waiter =
  795. container_of(pos, struct sync_fence_waiter,
  796. waiter_list);
  797. seq_printf(s, "waiter %pF\n", waiter->callback);
  798. }
  799. spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
  800. }
  801. static int sync_debugfs_show(struct seq_file *s, void *unused)
  802. {
  803. unsigned long flags;
  804. struct list_head *pos;
  805. seq_printf(s, "objs:\n--------------\n");
  806. spin_lock_irqsave(&sync_timeline_list_lock, flags);
  807. list_for_each(pos, &sync_timeline_list_head) {
  808. struct sync_timeline *obj =
  809. container_of(pos, struct sync_timeline,
  810. sync_timeline_list);
  811. sync_print_obj(s, obj);
  812. seq_printf(s, "\n");
  813. }
  814. spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
  815. seq_printf(s, "fences:\n--------------\n");
  816. spin_lock_irqsave(&sync_fence_list_lock, flags);
  817. list_for_each(pos, &sync_fence_list_head) {
  818. struct sync_fence *fence =
  819. container_of(pos, struct sync_fence, sync_fence_list);
  820. sync_print_fence(s, fence);
  821. seq_printf(s, "\n");
  822. }
  823. spin_unlock_irqrestore(&sync_fence_list_lock, flags);
  824. return 0;
  825. }
  826. static int sync_debugfs_open(struct inode *inode, struct file *file)
  827. {
  828. return single_open(file, sync_debugfs_show, inode->i_private);
  829. }
  830. static const struct file_operations sync_debugfs_fops = {
  831. .open = sync_debugfs_open,
  832. .read = seq_read,
  833. .llseek = seq_lseek,
  834. .release = single_release,
  835. };
  836. static __init int sync_debugfs_init(void)
  837. {
  838. debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
  839. return 0;
  840. }
  841. late_initcall(sync_debugfs_init);
  842. #endif