sync.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058
  1. /*
  2. * drivers/base/sync.c
  3. *
  4. * Copyright (C) 2012 Google, Inc.
  5. *
  6. * This software is licensed under the terms of the GNU General Public
  7. * License version 2, as published by the Free Software Foundation, and
  8. * may be copied, distributed, and modified under those terms.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. */
  16. #include <linux/debugfs.h>
  17. #include <linux/export.h>
  18. #include <linux/file.h>
  19. #include <linux/fs.h>
  20. #include <linux/kernel.h>
  21. #include <linux/poll.h>
  22. #include <linux/sched.h>
  23. #include <linux/seq_file.h>
  24. #include <linux/slab.h>
  25. #include <linux/sync.h>
  26. #include <linux/uaccess.h>
  27. #include <linux/anon_inodes.h>
  28. #define CREATE_TRACE_POINTS
  29. #include <trace/events/sync.h>
  30. static void sync_fence_signal_pt(struct sync_pt *pt);
  31. static int _sync_pt_has_signaled(struct sync_pt *pt);
  32. static void sync_fence_free(struct kref *kref);
  33. static LIST_HEAD(sync_timeline_list_head);
  34. static DEFINE_SPINLOCK(sync_timeline_list_lock);
  35. static LIST_HEAD(sync_fence_list_head);
  36. static DEFINE_SPINLOCK(sync_fence_list_lock);
  37. struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
  38. int size, const char *name)
  39. {
  40. struct sync_timeline *obj;
  41. unsigned long flags;
  42. if (size < sizeof(struct sync_timeline))
  43. return NULL;
  44. obj = kzalloc(size, GFP_KERNEL);
  45. if (obj == NULL)
  46. return NULL;
  47. kref_init(&obj->kref);
  48. obj->ops = ops;
  49. strlcpy(obj->name, name, sizeof(obj->name));
  50. INIT_LIST_HEAD(&obj->child_list_head);
  51. spin_lock_init(&obj->child_list_lock);
  52. INIT_LIST_HEAD(&obj->active_list_head);
  53. spin_lock_init(&obj->active_list_lock);
  54. spin_lock_irqsave(&sync_timeline_list_lock, flags);
  55. list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
  56. spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
  57. return obj;
  58. }
  59. EXPORT_SYMBOL(sync_timeline_create);
  60. static void sync_timeline_free(struct kref *kref)
  61. {
  62. struct sync_timeline *obj =
  63. container_of(kref, struct sync_timeline, kref);
  64. unsigned long flags;
  65. spin_lock_irqsave(&sync_timeline_list_lock, flags);
  66. list_del(&obj->sync_timeline_list);
  67. spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
  68. if (obj->ops->release_obj)
  69. obj->ops->release_obj(obj);
  70. kfree(obj);
  71. }
  72. void sync_timeline_destroy(struct sync_timeline *obj)
  73. {
  74. obj->destroyed = true;
  75. smp_wmb();
  76. /*
  77. * signal any children that their parent is going away.
  78. */
  79. sync_timeline_signal(obj);
  80. kref_put(&obj->kref, sync_timeline_free);
  81. }
  82. EXPORT_SYMBOL(sync_timeline_destroy);
  83. static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
  84. {
  85. unsigned long flags;
  86. pt->parent = obj;
  87. spin_lock_irqsave(&obj->child_list_lock, flags);
  88. list_add_tail(&pt->child_list, &obj->child_list_head);
  89. spin_unlock_irqrestore(&obj->child_list_lock, flags);
  90. }
  91. static void sync_timeline_remove_pt(struct sync_pt *pt)
  92. {
  93. struct sync_timeline *obj = pt->parent;
  94. unsigned long flags;
  95. spin_lock_irqsave(&obj->active_list_lock, flags);
  96. if (!list_empty(&pt->active_list))
  97. list_del_init(&pt->active_list);
  98. spin_unlock_irqrestore(&obj->active_list_lock, flags);
  99. spin_lock_irqsave(&obj->child_list_lock, flags);
  100. if (!list_empty(&pt->child_list)) {
  101. list_del_init(&pt->child_list);
  102. }
  103. spin_unlock_irqrestore(&obj->child_list_lock, flags);
  104. }
  105. void sync_timeline_signal(struct sync_timeline *obj)
  106. {
  107. unsigned long flags;
  108. LIST_HEAD(signaled_pts);
  109. struct list_head *pos, *n;
  110. trace_sync_timeline(obj);
  111. spin_lock_irqsave(&obj->active_list_lock, flags);
  112. list_for_each_safe(pos, n, &obj->active_list_head) {
  113. struct sync_pt *pt =
  114. container_of(pos, struct sync_pt, active_list);
  115. if (_sync_pt_has_signaled(pt)) {
  116. list_del_init(pos);
  117. list_add(&pt->signaled_list, &signaled_pts);
  118. kref_get(&pt->fence->kref);
  119. }
  120. }
  121. spin_unlock_irqrestore(&obj->active_list_lock, flags);
  122. list_for_each_safe(pos, n, &signaled_pts) {
  123. struct sync_pt *pt =
  124. container_of(pos, struct sync_pt, signaled_list);
  125. list_del_init(pos);
  126. sync_fence_signal_pt(pt);
  127. kref_put(&pt->fence->kref, sync_fence_free);
  128. }
  129. }
  130. EXPORT_SYMBOL(sync_timeline_signal);
  131. struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
  132. {
  133. struct sync_pt *pt;
  134. if (size < sizeof(struct sync_pt))
  135. return NULL;
  136. pt = kzalloc(size, GFP_KERNEL);
  137. if (pt == NULL)
  138. return NULL;
  139. INIT_LIST_HEAD(&pt->active_list);
  140. kref_get(&parent->kref);
  141. sync_timeline_add_pt(parent, pt);
  142. return pt;
  143. }
  144. EXPORT_SYMBOL(sync_pt_create);
  145. void sync_pt_free(struct sync_pt *pt)
  146. {
  147. if (pt->parent->ops->free_pt)
  148. pt->parent->ops->free_pt(pt);
  149. sync_timeline_remove_pt(pt);
  150. kref_put(&pt->parent->kref, sync_timeline_free);
  151. kfree(pt);
  152. }
  153. EXPORT_SYMBOL(sync_pt_free);
  154. /* call with pt->parent->active_list_lock held */
  155. static int _sync_pt_has_signaled(struct sync_pt *pt)
  156. {
  157. int old_status = pt->status;
  158. if (!pt->status)
  159. pt->status = pt->parent->ops->has_signaled(pt);
  160. if (!pt->status && pt->parent->destroyed)
  161. pt->status = -ENOENT;
  162. if (pt->status != old_status)
  163. pt->timestamp = ktime_get();
  164. return pt->status;
  165. }
  166. static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
  167. {
  168. return pt->parent->ops->dup(pt);
  169. }
  170. /* Adds a sync pt to the active queue. Called when added to a fence */
  171. static void sync_pt_activate(struct sync_pt *pt)
  172. {
  173. struct sync_timeline *obj = pt->parent;
  174. unsigned long flags;
  175. int err;
  176. spin_lock_irqsave(&obj->active_list_lock, flags);
  177. err = _sync_pt_has_signaled(pt);
  178. if (err != 0)
  179. goto out;
  180. list_add_tail(&pt->active_list, &obj->active_list_head);
  181. out:
  182. spin_unlock_irqrestore(&obj->active_list_lock, flags);
  183. }
  184. static int sync_fence_release(struct inode *inode, struct file *file);
  185. static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
  186. static long sync_fence_ioctl(struct file *file, unsigned int cmd,
  187. unsigned long arg);
  188. static const struct file_operations sync_fence_fops = {
  189. .release = sync_fence_release,
  190. .poll = sync_fence_poll,
  191. .unlocked_ioctl = sync_fence_ioctl,
  192. };
  193. static struct sync_fence *sync_fence_alloc(const char *name)
  194. {
  195. struct sync_fence *fence;
  196. unsigned long flags;
  197. fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
  198. if (fence == NULL)
  199. return NULL;
  200. fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
  201. fence, 0);
  202. if (IS_ERR(fence->file))
  203. goto err;
  204. kref_init(&fence->kref);
  205. strlcpy(fence->name, name, sizeof(fence->name));
  206. INIT_LIST_HEAD(&fence->pt_list_head);
  207. INIT_LIST_HEAD(&fence->waiter_list_head);
  208. spin_lock_init(&fence->waiter_list_lock);
  209. init_waitqueue_head(&fence->wq);
  210. spin_lock_irqsave(&sync_fence_list_lock, flags);
  211. list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
  212. spin_unlock_irqrestore(&sync_fence_list_lock, flags);
  213. return fence;
  214. err:
  215. kfree(fence);
  216. return NULL;
  217. }
  218. /* TODO: implement a create which takes more that one sync_pt */
  219. struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
  220. {
  221. struct sync_fence *fence;
  222. if (pt->fence)
  223. return NULL;
  224. fence = sync_fence_alloc(name);
  225. if (fence == NULL)
  226. return NULL;
  227. pt->fence = fence;
  228. list_add(&pt->pt_list, &fence->pt_list_head);
  229. sync_pt_activate(pt);
  230. /*
  231. * signal the fence in case pt was activated before
  232. * sync_pt_activate(pt) was called
  233. */
  234. sync_fence_signal_pt(pt);
  235. return fence;
  236. }
  237. EXPORT_SYMBOL(sync_fence_create);
  238. static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
  239. {
  240. struct list_head *pos;
  241. list_for_each(pos, &src->pt_list_head) {
  242. struct sync_pt *orig_pt =
  243. container_of(pos, struct sync_pt, pt_list);
  244. struct sync_pt *new_pt = sync_pt_dup(orig_pt);
  245. if (new_pt == NULL)
  246. return -ENOMEM;
  247. new_pt->fence = dst;
  248. list_add(&new_pt->pt_list, &dst->pt_list_head);
  249. }
  250. return 0;
  251. }
  252. static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
  253. {
  254. struct list_head *src_pos, *dst_pos, *n;
  255. list_for_each(src_pos, &src->pt_list_head) {
  256. struct sync_pt *src_pt =
  257. container_of(src_pos, struct sync_pt, pt_list);
  258. bool collapsed = false;
  259. list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
  260. struct sync_pt *dst_pt =
  261. container_of(dst_pos, struct sync_pt, pt_list);
  262. /* collapse two sync_pts on the same timeline
  263. * to a single sync_pt that will signal at
  264. * the later of the two
  265. */
  266. if (dst_pt->parent == src_pt->parent) {
  267. int cmp_val;
  268. int (*cmp_fn)
  269. (struct sync_pt *, struct sync_pt *);
  270. cmp_fn = dst_pt->parent->ops->compare;
  271. cmp_val = cmp_fn(dst_pt, src_pt);
  272. /*
  273. * Out-of-order users like oneshot don't follow
  274. * a timeline ordering.
  275. */
  276. if (cmp_val != -cmp_fn(src_pt, dst_pt))
  277. break;
  278. if (cmp_val == -1) {
  279. struct sync_pt *new_pt =
  280. sync_pt_dup(src_pt);
  281. if (new_pt == NULL)
  282. return -ENOMEM;
  283. new_pt->fence = dst;
  284. list_replace(&dst_pt->pt_list,
  285. &new_pt->pt_list);
  286. sync_pt_free(dst_pt);
  287. }
  288. collapsed = true;
  289. break;
  290. }
  291. }
  292. if (!collapsed) {
  293. struct sync_pt *new_pt = sync_pt_dup(src_pt);
  294. if (new_pt == NULL)
  295. return -ENOMEM;
  296. new_pt->fence = dst;
  297. list_add(&new_pt->pt_list, &dst->pt_list_head);
  298. }
  299. }
  300. return 0;
  301. }
  302. static void sync_fence_detach_pts(struct sync_fence *fence)
  303. {
  304. struct list_head *pos, *n;
  305. list_for_each_safe(pos, n, &fence->pt_list_head) {
  306. struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
  307. sync_timeline_remove_pt(pt);
  308. }
  309. }
  310. static void sync_fence_free_pts(struct sync_fence *fence)
  311. {
  312. struct list_head *pos, *n;
  313. list_for_each_safe(pos, n, &fence->pt_list_head) {
  314. struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
  315. sync_pt_free(pt);
  316. }
  317. }
  318. struct sync_fence *sync_fence_fdget(int fd)
  319. {
  320. struct file *file = fget(fd);
  321. if (file == NULL)
  322. return NULL;
  323. if (file->f_op != &sync_fence_fops)
  324. goto err;
  325. return file->private_data;
  326. err:
  327. fput(file);
  328. return NULL;
  329. }
  330. EXPORT_SYMBOL(sync_fence_fdget);
  331. void sync_fence_put(struct sync_fence *fence)
  332. {
  333. fput(fence->file);
  334. }
  335. EXPORT_SYMBOL(sync_fence_put);
  336. void sync_fence_install(struct sync_fence *fence, int fd)
  337. {
  338. fd_install(fd, fence->file);
  339. }
  340. EXPORT_SYMBOL(sync_fence_install);
  341. static int sync_fence_get_status(struct sync_fence *fence)
  342. {
  343. struct list_head *pos;
  344. int status = 1;
  345. list_for_each(pos, &fence->pt_list_head) {
  346. struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
  347. int pt_status = pt->status;
  348. if (pt_status < 0) {
  349. status = pt_status;
  350. break;
  351. } else if (status == 1) {
  352. status = pt_status;
  353. }
  354. }
  355. return status;
  356. }
  357. struct sync_fence *sync_fence_merge(const char *name,
  358. struct sync_fence *a, struct sync_fence *b)
  359. {
  360. struct sync_fence *fence;
  361. struct list_head *pos;
  362. int err;
  363. fence = sync_fence_alloc(name);
  364. if (fence == NULL)
  365. return NULL;
  366. err = sync_fence_copy_pts(fence, a);
  367. if (err < 0)
  368. goto err;
  369. err = sync_fence_merge_pts(fence, b);
  370. if (err < 0)
  371. goto err;
  372. list_for_each(pos, &fence->pt_list_head) {
  373. struct sync_pt *pt =
  374. container_of(pos, struct sync_pt, pt_list);
  375. sync_pt_activate(pt);
  376. }
  377. /*
  378. * signal the fence in case one of it's pts were activated before
  379. * they were activated
  380. */
  381. sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
  382. struct sync_pt,
  383. pt_list));
  384. return fence;
  385. err:
  386. sync_fence_free_pts(fence);
  387. kfree(fence);
  388. return NULL;
  389. }
  390. EXPORT_SYMBOL(sync_fence_merge);
  391. static void sync_fence_signal_pt(struct sync_pt *pt)
  392. {
  393. LIST_HEAD(signaled_waiters);
  394. struct sync_fence *fence = pt->fence;
  395. struct list_head *pos;
  396. struct list_head *n;
  397. unsigned long flags;
  398. int status;
  399. status = sync_fence_get_status(fence);
  400. spin_lock_irqsave(&fence->waiter_list_lock, flags);
  401. /*
  402. * this should protect against two threads racing on the signaled
  403. * false -> true transition
  404. */
  405. if (status && !fence->status) {
  406. list_for_each_safe(pos, n, &fence->waiter_list_head)
  407. list_move(pos, &signaled_waiters);
  408. fence->status = status;
  409. } else {
  410. status = 0;
  411. }
  412. spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
  413. if (status) {
  414. list_for_each_safe(pos, n, &signaled_waiters) {
  415. struct sync_fence_waiter *waiter =
  416. container_of(pos, struct sync_fence_waiter,
  417. waiter_list);
  418. list_del(pos);
  419. waiter->callback(fence, waiter);
  420. }
  421. wake_up(&fence->wq);
  422. }
  423. }
  424. int sync_fence_wait_async(struct sync_fence *fence,
  425. struct sync_fence_waiter *waiter)
  426. {
  427. unsigned long flags;
  428. int err = 0;
  429. spin_lock_irqsave(&fence->waiter_list_lock, flags);
  430. if (fence->status) {
  431. err = fence->status;
  432. goto out;
  433. }
  434. list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
  435. out:
  436. spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
  437. return err;
  438. }
  439. EXPORT_SYMBOL(sync_fence_wait_async);
  440. int sync_fence_cancel_async(struct sync_fence *fence,
  441. struct sync_fence_waiter *waiter)
  442. {
  443. struct list_head *pos;
  444. struct list_head *n;
  445. unsigned long flags;
  446. int ret = -ENOENT;
  447. spin_lock_irqsave(&fence->waiter_list_lock, flags);
  448. /*
  449. * Make sure waiter is still in waiter_list because it is possible for
  450. * the waiter to be removed from the list while the callback is still
  451. * pending.
  452. */
  453. list_for_each_safe(pos, n, &fence->waiter_list_head) {
  454. struct sync_fence_waiter *list_waiter =
  455. container_of(pos, struct sync_fence_waiter,
  456. waiter_list);
  457. if (list_waiter == waiter) {
  458. list_del(pos);
  459. ret = 0;
  460. break;
  461. }
  462. }
  463. spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
  464. return ret;
  465. }
  466. EXPORT_SYMBOL(sync_fence_cancel_async);
  467. static bool sync_fence_check(struct sync_fence *fence)
  468. {
  469. /*
  470. * Make sure that reads to fence->status are ordered with the
  471. * wait queue event triggering
  472. */
  473. smp_rmb();
  474. return fence->status != 0;
  475. }
  476. static const char *sync_status_str(int status)
  477. {
  478. if (status > 0)
  479. return "signaled";
  480. else if (status == 0)
  481. return "active";
  482. else
  483. return "error";
  484. }
  485. static void sync_pt_log(struct sync_pt *pt)
  486. {
  487. int status = pt->status;
  488. pr_cont(" %s_pt %s",
  489. pt->parent->name,
  490. sync_status_str(status));
  491. if (pt->status) {
  492. struct timeval tv = ktime_to_timeval(pt->timestamp);
  493. pr_cont("@%ld.%06ld", tv.tv_sec, tv.tv_usec);
  494. }
  495. if (pt->parent->ops->timeline_value_str &&
  496. pt->parent->ops->pt_value_str) {
  497. char value[64];
  498. pt->parent->ops->pt_value_str(pt, value, sizeof(value));
  499. pr_cont(": %s", value);
  500. pt->parent->ops->timeline_value_str(pt->parent, value,
  501. sizeof(value));
  502. pr_cont(" / %s", value);
  503. }
  504. pr_cont("\n");
  505. /* Show additional details for active fences */
  506. if (pt->status == 0 && pt->parent->ops->pt_log)
  507. pt->parent->ops->pt_log(pt);
  508. }
  509. void sync_fence_log(struct sync_fence *fence)
  510. {
  511. struct list_head *pos;
  512. unsigned long flags;
  513. pr_info("[%pK] %s: %s\n", fence, fence->name,
  514. sync_status_str(fence->status));
  515. pr_info("waiters:\n");
  516. spin_lock_irqsave(&fence->waiter_list_lock, flags);
  517. list_for_each(pos, &fence->waiter_list_head) {
  518. struct sync_fence_waiter *waiter =
  519. container_of(pos, struct sync_fence_waiter,
  520. waiter_list);
  521. pr_info(" %pF\n", waiter->callback);
  522. }
  523. spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
  524. pr_info("syncpoints:\n");
  525. list_for_each(pos, &fence->pt_list_head) {
  526. struct sync_pt *pt =
  527. container_of(pos, struct sync_pt, pt_list);
  528. sync_pt_log(pt);
  529. }
  530. }
  531. EXPORT_SYMBOL(sync_fence_log);
  532. int sync_fence_wait(struct sync_fence *fence, long timeout)
  533. {
  534. int err = 0;
  535. struct sync_pt *pt;
  536. trace_sync_wait(fence, 1);
  537. list_for_each_entry(pt, &fence->pt_list_head, pt_list)
  538. trace_sync_pt(pt);
  539. if (timeout > 0) {
  540. timeout = msecs_to_jiffies(timeout);
  541. err = wait_event_interruptible_timeout(fence->wq,
  542. sync_fence_check(fence),
  543. timeout);
  544. } else if (timeout < 0) {
  545. err = wait_event_interruptible(fence->wq,
  546. sync_fence_check(fence));
  547. }
  548. trace_sync_wait(fence, 0);
  549. if (err < 0)
  550. return err;
  551. if (fence->status < 0) {
  552. pr_info("fence error %d on [%pK]\n", fence->status, fence);
  553. sync_fence_log(fence);
  554. return fence->status;
  555. }
  556. if (fence->status == 0) {
  557. if (timeout > 0) {
  558. pr_info("fence timeout on [%pK] after %dms\n", fence,
  559. jiffies_to_msecs(timeout));
  560. sync_fence_log(fence);
  561. }
  562. return -ETIME;
  563. }
  564. return 0;
  565. }
  566. EXPORT_SYMBOL(sync_fence_wait);
  567. static void sync_fence_free(struct kref *kref)
  568. {
  569. struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
  570. sync_fence_free_pts(fence);
  571. kfree(fence);
  572. }
  573. static int sync_fence_release(struct inode *inode, struct file *file)
  574. {
  575. struct sync_fence *fence = file->private_data;
  576. unsigned long flags;
  577. /*
  578. * We need to remove all ways to access this fence before droping
  579. * our ref.
  580. *
  581. * start with its membership in the global fence list
  582. */
  583. spin_lock_irqsave(&sync_fence_list_lock, flags);
  584. list_del(&fence->sync_fence_list);
  585. spin_unlock_irqrestore(&sync_fence_list_lock, flags);
  586. /*
  587. * remove its pts from their parents so that sync_timeline_signal()
  588. * can't reference the fence.
  589. */
  590. sync_fence_detach_pts(fence);
  591. kref_put(&fence->kref, sync_fence_free);
  592. return 0;
  593. }
  594. static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
  595. {
  596. struct sync_fence *fence = file->private_data;
  597. poll_wait(file, &fence->wq, wait);
  598. /*
  599. * Make sure that reads to fence->status are ordered with the
  600. * wait queue event triggering
  601. */
  602. smp_rmb();
  603. if (fence->status == 1)
  604. return POLLIN;
  605. else if (fence->status < 0)
  606. return POLLERR;
  607. else
  608. return 0;
  609. }
  610. static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
  611. {
  612. __s32 value;
  613. if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
  614. return -EFAULT;
  615. return sync_fence_wait(fence, value);
  616. }
  617. static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
  618. {
  619. int fd = get_unused_fd();
  620. int err;
  621. struct sync_fence *fence2, *fence3;
  622. struct sync_merge_data data;
  623. if (fd < 0)
  624. return fd;
  625. if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
  626. err = -EFAULT;
  627. goto err_put_fd;
  628. }
  629. fence2 = sync_fence_fdget(data.fd2);
  630. if (fence2 == NULL) {
  631. err = -ENOENT;
  632. goto err_put_fd;
  633. }
  634. data.name[sizeof(data.name) - 1] = '\0';
  635. fence3 = sync_fence_merge(data.name, fence, fence2);
  636. if (fence3 == NULL) {
  637. err = -ENOMEM;
  638. goto err_put_fence2;
  639. }
  640. data.fence = fd;
  641. if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
  642. err = -EFAULT;
  643. goto err_put_fence3;
  644. }
  645. sync_fence_install(fence3, fd);
  646. sync_fence_put(fence2);
  647. return 0;
  648. err_put_fence3:
  649. sync_fence_put(fence3);
  650. err_put_fence2:
  651. sync_fence_put(fence2);
  652. err_put_fd:
  653. put_unused_fd(fd);
  654. return err;
  655. }
  656. static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
  657. {
  658. struct sync_pt_info *info = data;
  659. int ret;
  660. if (size < sizeof(struct sync_pt_info))
  661. return -ENOMEM;
  662. info->len = sizeof(struct sync_pt_info);
  663. if (pt->parent->ops->fill_driver_data) {
  664. ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
  665. size - sizeof(*info));
  666. if (ret < 0)
  667. return ret;
  668. info->len += ret;
  669. }
  670. strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
  671. strlcpy(info->driver_name, pt->parent->ops->driver_name,
  672. sizeof(info->driver_name));
  673. info->status = pt->status;
  674. info->timestamp_ns = ktime_to_ns(pt->timestamp);
  675. return info->len;
  676. }
  677. static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
  678. unsigned long arg)
  679. {
  680. struct sync_fence_info_data *data;
  681. struct list_head *pos;
  682. __u32 size;
  683. __u32 len = 0;
  684. int ret;
  685. if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
  686. return -EFAULT;
  687. if (size < sizeof(struct sync_fence_info_data))
  688. return -EINVAL;
  689. if (size > 4096)
  690. size = 4096;
  691. data = kzalloc(size, GFP_KERNEL);
  692. if (data == NULL)
  693. return -ENOMEM;
  694. strlcpy(data->name, fence->name, sizeof(data->name));
  695. data->status = fence->status;
  696. len = sizeof(struct sync_fence_info_data);
  697. list_for_each(pos, &fence->pt_list_head) {
  698. struct sync_pt *pt =
  699. container_of(pos, struct sync_pt, pt_list);
  700. ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
  701. if (ret < 0)
  702. goto out;
  703. len += ret;
  704. }
  705. data->len = len;
  706. if (copy_to_user((void __user *)arg, data, len))
  707. ret = -EFAULT;
  708. else
  709. ret = 0;
  710. out:
  711. kfree(data);
  712. return ret;
  713. }
  714. static long sync_fence_ioctl(struct file *file, unsigned int cmd,
  715. unsigned long arg)
  716. {
  717. struct sync_fence *fence = file->private_data;
  718. switch (cmd) {
  719. case SYNC_IOC_WAIT:
  720. return sync_fence_ioctl_wait(fence, arg);
  721. case SYNC_IOC_MERGE:
  722. return sync_fence_ioctl_merge(fence, arg);
  723. case SYNC_IOC_FENCE_INFO:
  724. return sync_fence_ioctl_fence_info(fence, arg);
  725. default:
  726. return -ENOTTY;
  727. }
  728. }
  729. #ifdef CONFIG_DEBUG_FS
  730. static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
  731. {
  732. int status = pt->status;
  733. seq_printf(s, " %s%spt %s",
  734. fence ? pt->parent->name : "",
  735. fence ? "_" : "",
  736. sync_status_str(status));
  737. if (pt->status) {
  738. struct timeval tv = ktime_to_timeval(pt->timestamp);
  739. seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
  740. }
  741. if (pt->parent->ops->timeline_value_str &&
  742. pt->parent->ops->pt_value_str) {
  743. char value[64];
  744. pt->parent->ops->pt_value_str(pt, value, sizeof(value));
  745. seq_printf(s, ": %s", value);
  746. if (fence) {
  747. pt->parent->ops->timeline_value_str(pt->parent, value,
  748. sizeof(value));
  749. seq_printf(s, " / %s", value);
  750. }
  751. } else if (pt->parent->ops->print_pt) {
  752. seq_printf(s, ": ");
  753. pt->parent->ops->print_pt(s, pt);
  754. }
  755. seq_printf(s, "\n");
  756. }
  757. static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
  758. {
  759. struct list_head *pos;
  760. unsigned long flags;
  761. seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
  762. if (obj->ops->timeline_value_str) {
  763. char value[64];
  764. obj->ops->timeline_value_str(obj, value, sizeof(value));
  765. seq_printf(s, ": %s", value);
  766. } else if (obj->ops->print_obj) {
  767. seq_printf(s, ": ");
  768. obj->ops->print_obj(s, obj);
  769. }
  770. seq_printf(s, "\n");
  771. spin_lock_irqsave(&obj->child_list_lock, flags);
  772. list_for_each(pos, &obj->child_list_head) {
  773. struct sync_pt *pt =
  774. container_of(pos, struct sync_pt, child_list);
  775. sync_print_pt(s, pt, false);
  776. }
  777. spin_unlock_irqrestore(&obj->child_list_lock, flags);
  778. }
  779. static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
  780. {
  781. struct list_head *pos;
  782. unsigned long flags;
  783. seq_printf(s, "[%pK] %s: %s\n", fence, fence->name,
  784. sync_status_str(fence->status));
  785. list_for_each(pos, &fence->pt_list_head) {
  786. struct sync_pt *pt =
  787. container_of(pos, struct sync_pt, pt_list);
  788. sync_print_pt(s, pt, true);
  789. }
  790. spin_lock_irqsave(&fence->waiter_list_lock, flags);
  791. list_for_each(pos, &fence->waiter_list_head) {
  792. struct sync_fence_waiter *waiter =
  793. container_of(pos, struct sync_fence_waiter,
  794. waiter_list);
  795. seq_printf(s, "waiter %pF\n", waiter->callback);
  796. }
  797. spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
  798. }
  799. static int sync_debugfs_show(struct seq_file *s, void *unused)
  800. {
  801. unsigned long flags;
  802. struct list_head *pos;
  803. seq_printf(s, "objs:\n--------------\n");
  804. spin_lock_irqsave(&sync_timeline_list_lock, flags);
  805. list_for_each(pos, &sync_timeline_list_head) {
  806. struct sync_timeline *obj =
  807. container_of(pos, struct sync_timeline,
  808. sync_timeline_list);
  809. sync_print_obj(s, obj);
  810. seq_printf(s, "\n");
  811. }
  812. spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
  813. seq_printf(s, "fences:\n--------------\n");
  814. spin_lock_irqsave(&sync_fence_list_lock, flags);
  815. list_for_each(pos, &sync_fence_list_head) {
  816. struct sync_fence *fence =
  817. container_of(pos, struct sync_fence, sync_fence_list);
  818. sync_print_fence(s, fence);
  819. seq_printf(s, "\n");
  820. }
  821. spin_unlock_irqrestore(&sync_fence_list_lock, flags);
  822. return 0;
  823. }
  824. static int sync_debugfs_open(struct inode *inode, struct file *file)
  825. {
  826. return single_open(file, sync_debugfs_show, inode->i_private);
  827. }
  828. static const struct file_operations sync_debugfs_fops = {
  829. .open = sync_debugfs_open,
  830. .read = seq_read,
  831. .llseek = seq_lseek,
  832. .release = single_release,
  833. };
  834. static __init int sync_debugfs_init(void)
  835. {
  836. debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
  837. return 0;
  838. }
  839. late_initcall(sync_debugfs_init);
  840. #endif