sync.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044
  1. /*
  2. * drivers/base/sync.c
  3. *
  4. * Copyright (C) 2012 Google, Inc.
  5. *
  6. * This software is licensed under the terms of the GNU General Public
  7. * License version 2, as published by the Free Software Foundation, and
  8. * may be copied, distributed, and modified under those terms.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. */
  16. #include <linux/debugfs.h>
  17. #include <linux/export.h>
  18. #include <linux/file.h>
  19. #include <linux/fs.h>
  20. #include <linux/kernel.h>
  21. #include <linux/poll.h>
  22. #include <linux/sched.h>
  23. #include <linux/seq_file.h>
  24. #include <linux/slab.h>
  25. #include <linux/sync.h>
  26. #include <linux/uaccess.h>
  27. #include <linux/anon_inodes.h>
  28. #define CREATE_TRACE_POINTS
  29. #include <trace/events/sync.h>
  30. static void sync_fence_signal_pt(struct sync_pt *pt);
  31. static int _sync_pt_has_signaled(struct sync_pt *pt);
  32. static void sync_fence_free(struct kref *kref);
  33. static LIST_HEAD(sync_timeline_list_head);
  34. static DEFINE_SPINLOCK(sync_timeline_list_lock);
  35. static LIST_HEAD(sync_fence_list_head);
  36. static DEFINE_SPINLOCK(sync_fence_list_lock);
  37. struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
  38. int size, const char *name)
  39. {
  40. struct sync_timeline *obj;
  41. unsigned long flags;
  42. if (size < sizeof(struct sync_timeline))
  43. return NULL;
  44. obj = kzalloc(size, GFP_KERNEL);
  45. if (obj == NULL)
  46. return NULL;
  47. kref_init(&obj->kref);
  48. obj->ops = ops;
  49. strlcpy(obj->name, name, sizeof(obj->name));
  50. INIT_LIST_HEAD(&obj->child_list_head);
  51. spin_lock_init(&obj->child_list_lock);
  52. INIT_LIST_HEAD(&obj->active_list_head);
  53. spin_lock_init(&obj->active_list_lock);
  54. spin_lock_irqsave(&sync_timeline_list_lock, flags);
  55. list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
  56. spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
  57. return obj;
  58. }
  59. EXPORT_SYMBOL(sync_timeline_create);
  60. static void sync_timeline_free(struct kref *kref)
  61. {
  62. struct sync_timeline *obj =
  63. container_of(kref, struct sync_timeline, kref);
  64. unsigned long flags;
  65. spin_lock_irqsave(&sync_timeline_list_lock, flags);
  66. list_del(&obj->sync_timeline_list);
  67. spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
  68. if (obj->ops->release_obj)
  69. obj->ops->release_obj(obj);
  70. kfree(obj);
  71. }
  72. void sync_timeline_destroy(struct sync_timeline *obj)
  73. {
  74. obj->destroyed = true;
  75. smp_wmb();
  76. /*
  77. * signal any children that their parent is going away.
  78. */
  79. sync_timeline_signal(obj);
  80. kref_put(&obj->kref, sync_timeline_free);
  81. }
  82. EXPORT_SYMBOL(sync_timeline_destroy);
  83. static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
  84. {
  85. unsigned long flags;
  86. pt->parent = obj;
  87. spin_lock_irqsave(&obj->child_list_lock, flags);
  88. list_add_tail(&pt->child_list, &obj->child_list_head);
  89. spin_unlock_irqrestore(&obj->child_list_lock, flags);
  90. }
  91. static void sync_timeline_remove_pt(struct sync_pt *pt)
  92. {
  93. struct sync_timeline *obj = pt->parent;
  94. unsigned long flags;
  95. spin_lock_irqsave(&obj->active_list_lock, flags);
  96. if (!list_empty(&pt->active_list))
  97. list_del_init(&pt->active_list);
  98. spin_unlock_irqrestore(&obj->active_list_lock, flags);
  99. spin_lock_irqsave(&obj->child_list_lock, flags);
  100. if (!list_empty(&pt->child_list)) {
  101. list_del_init(&pt->child_list);
  102. }
  103. spin_unlock_irqrestore(&obj->child_list_lock, flags);
  104. }
  105. void sync_timeline_signal(struct sync_timeline *obj)
  106. {
  107. unsigned long flags;
  108. LIST_HEAD(signaled_pts);
  109. struct list_head *pos, *n;
  110. trace_sync_timeline(obj);
  111. spin_lock_irqsave(&obj->active_list_lock, flags);
  112. list_for_each_safe(pos, n, &obj->active_list_head) {
  113. struct sync_pt *pt =
  114. container_of(pos, struct sync_pt, active_list);
  115. if (_sync_pt_has_signaled(pt)) {
  116. list_del_init(pos);
  117. list_add(&pt->signaled_list, &signaled_pts);
  118. kref_get(&pt->fence->kref);
  119. }
  120. }
  121. spin_unlock_irqrestore(&obj->active_list_lock, flags);
  122. list_for_each_safe(pos, n, &signaled_pts) {
  123. struct sync_pt *pt =
  124. container_of(pos, struct sync_pt, signaled_list);
  125. list_del_init(pos);
  126. sync_fence_signal_pt(pt);
  127. kref_put(&pt->fence->kref, sync_fence_free);
  128. }
  129. }
  130. EXPORT_SYMBOL(sync_timeline_signal);
  131. struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
  132. {
  133. struct sync_pt *pt;
  134. if (size < sizeof(struct sync_pt))
  135. return NULL;
  136. pt = kzalloc(size, GFP_KERNEL);
  137. if (pt == NULL)
  138. return NULL;
  139. INIT_LIST_HEAD(&pt->active_list);
  140. kref_get(&parent->kref);
  141. sync_timeline_add_pt(parent, pt);
  142. return pt;
  143. }
  144. EXPORT_SYMBOL(sync_pt_create);
  145. void sync_pt_free(struct sync_pt *pt)
  146. {
  147. if (pt->parent->ops->free_pt)
  148. pt->parent->ops->free_pt(pt);
  149. sync_timeline_remove_pt(pt);
  150. kref_put(&pt->parent->kref, sync_timeline_free);
  151. kfree(pt);
  152. }
  153. EXPORT_SYMBOL(sync_pt_free);
  154. /* call with pt->parent->active_list_lock held */
  155. static int _sync_pt_has_signaled(struct sync_pt *pt)
  156. {
  157. int old_status = pt->status;
  158. if (!pt->status)
  159. pt->status = pt->parent->ops->has_signaled(pt);
  160. if (!pt->status && pt->parent->destroyed)
  161. pt->status = -ENOENT;
  162. if (pt->status != old_status)
  163. pt->timestamp = ktime_get();
  164. return pt->status;
  165. }
  166. static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
  167. {
  168. return pt->parent->ops->dup(pt);
  169. }
  170. /* Adds a sync pt to the active queue. Called when added to a fence */
  171. static void sync_pt_activate(struct sync_pt *pt)
  172. {
  173. struct sync_timeline *obj = pt->parent;
  174. unsigned long flags;
  175. int err;
  176. spin_lock_irqsave(&obj->active_list_lock, flags);
  177. err = _sync_pt_has_signaled(pt);
  178. if (err != 0)
  179. goto out;
  180. list_add_tail(&pt->active_list, &obj->active_list_head);
  181. out:
  182. spin_unlock_irqrestore(&obj->active_list_lock, flags);
  183. }
  184. static int sync_fence_release(struct inode *inode, struct file *file);
  185. static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
  186. static long sync_fence_ioctl(struct file *file, unsigned int cmd,
  187. unsigned long arg);
  188. static const struct file_operations sync_fence_fops = {
  189. .release = sync_fence_release,
  190. .poll = sync_fence_poll,
  191. .unlocked_ioctl = sync_fence_ioctl,
  192. };
  193. static struct sync_fence *sync_fence_alloc(const char *name)
  194. {
  195. struct sync_fence *fence;
  196. unsigned long flags;
  197. fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
  198. if (fence == NULL)
  199. return NULL;
  200. fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
  201. fence, 0);
  202. if (fence->file == NULL)
  203. goto err;
  204. kref_init(&fence->kref);
  205. strlcpy(fence->name, name, sizeof(fence->name));
  206. INIT_LIST_HEAD(&fence->pt_list_head);
  207. INIT_LIST_HEAD(&fence->waiter_list_head);
  208. spin_lock_init(&fence->waiter_list_lock);
  209. init_waitqueue_head(&fence->wq);
  210. spin_lock_irqsave(&sync_fence_list_lock, flags);
  211. list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
  212. spin_unlock_irqrestore(&sync_fence_list_lock, flags);
  213. return fence;
  214. err:
  215. kfree(fence);
  216. return NULL;
  217. }
  218. /* TODO: implement a create which takes more that one sync_pt */
  219. struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
  220. {
  221. struct sync_fence *fence;
  222. if (pt->fence)
  223. return NULL;
  224. fence = sync_fence_alloc(name);
  225. if (fence == NULL)
  226. return NULL;
  227. pt->fence = fence;
  228. list_add(&pt->pt_list, &fence->pt_list_head);
  229. sync_pt_activate(pt);
  230. /*
  231. * signal the fence in case pt was activated before
  232. * sync_pt_activate(pt) was called
  233. */
  234. sync_fence_signal_pt(pt);
  235. return fence;
  236. }
  237. EXPORT_SYMBOL(sync_fence_create);
  238. static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
  239. {
  240. struct list_head *pos;
  241. list_for_each(pos, &src->pt_list_head) {
  242. struct sync_pt *orig_pt =
  243. container_of(pos, struct sync_pt, pt_list);
  244. struct sync_pt *new_pt = sync_pt_dup(orig_pt);
  245. if (new_pt == NULL)
  246. return -ENOMEM;
  247. new_pt->fence = dst;
  248. list_add(&new_pt->pt_list, &dst->pt_list_head);
  249. }
  250. return 0;
  251. }
  252. static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
  253. {
  254. struct list_head *src_pos, *dst_pos, *n;
  255. list_for_each(src_pos, &src->pt_list_head) {
  256. struct sync_pt *src_pt =
  257. container_of(src_pos, struct sync_pt, pt_list);
  258. bool collapsed = false;
  259. list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
  260. struct sync_pt *dst_pt =
  261. container_of(dst_pos, struct sync_pt, pt_list);
  262. /* collapse two sync_pts on the same timeline
  263. * to a single sync_pt that will signal at
  264. * the later of the two
  265. */
  266. if (dst_pt->parent == src_pt->parent) {
  267. if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
  268. struct sync_pt *new_pt =
  269. sync_pt_dup(src_pt);
  270. if (new_pt == NULL)
  271. return -ENOMEM;
  272. new_pt->fence = dst;
  273. list_replace(&dst_pt->pt_list,
  274. &new_pt->pt_list);
  275. sync_pt_free(dst_pt);
  276. }
  277. collapsed = true;
  278. break;
  279. }
  280. }
  281. if (!collapsed) {
  282. struct sync_pt *new_pt = sync_pt_dup(src_pt);
  283. if (new_pt == NULL)
  284. return -ENOMEM;
  285. new_pt->fence = dst;
  286. list_add(&new_pt->pt_list, &dst->pt_list_head);
  287. }
  288. }
  289. return 0;
  290. }
  291. static void sync_fence_detach_pts(struct sync_fence *fence)
  292. {
  293. struct list_head *pos, *n;
  294. list_for_each_safe(pos, n, &fence->pt_list_head) {
  295. struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
  296. sync_timeline_remove_pt(pt);
  297. }
  298. }
  299. static void sync_fence_free_pts(struct sync_fence *fence)
  300. {
  301. struct list_head *pos, *n;
  302. list_for_each_safe(pos, n, &fence->pt_list_head) {
  303. struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
  304. sync_pt_free(pt);
  305. }
  306. }
  307. struct sync_fence *sync_fence_fdget(int fd)
  308. {
  309. struct file *file = fget(fd);
  310. if (file == NULL)
  311. return NULL;
  312. if (file->f_op != &sync_fence_fops)
  313. goto err;
  314. return file->private_data;
  315. err:
  316. fput(file);
  317. return NULL;
  318. }
  319. EXPORT_SYMBOL(sync_fence_fdget);
  320. void sync_fence_put(struct sync_fence *fence)
  321. {
  322. fput(fence->file);
  323. }
  324. EXPORT_SYMBOL(sync_fence_put);
  325. void sync_fence_install(struct sync_fence *fence, int fd)
  326. {
  327. fd_install(fd, fence->file);
  328. }
  329. EXPORT_SYMBOL(sync_fence_install);
  330. static int sync_fence_get_status(struct sync_fence *fence)
  331. {
  332. struct list_head *pos;
  333. int status = 1;
  334. list_for_each(pos, &fence->pt_list_head) {
  335. struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
  336. int pt_status = pt->status;
  337. if (pt_status < 0) {
  338. status = pt_status;
  339. break;
  340. } else if (status == 1) {
  341. status = pt_status;
  342. }
  343. }
  344. return status;
  345. }
  346. struct sync_fence *sync_fence_merge(const char *name,
  347. struct sync_fence *a, struct sync_fence *b)
  348. {
  349. struct sync_fence *fence;
  350. struct list_head *pos;
  351. int err;
  352. fence = sync_fence_alloc(name);
  353. if (fence == NULL)
  354. return NULL;
  355. err = sync_fence_copy_pts(fence, a);
  356. if (err < 0)
  357. goto err;
  358. err = sync_fence_merge_pts(fence, b);
  359. if (err < 0)
  360. goto err;
  361. list_for_each(pos, &fence->pt_list_head) {
  362. struct sync_pt *pt =
  363. container_of(pos, struct sync_pt, pt_list);
  364. sync_pt_activate(pt);
  365. }
  366. /*
  367. * signal the fence in case one of it's pts were activated before
  368. * they were activated
  369. */
  370. sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
  371. struct sync_pt,
  372. pt_list));
  373. return fence;
  374. err:
  375. sync_fence_free_pts(fence);
  376. kfree(fence);
  377. return NULL;
  378. }
  379. EXPORT_SYMBOL(sync_fence_merge);
  380. static void sync_fence_signal_pt(struct sync_pt *pt)
  381. {
  382. LIST_HEAD(signaled_waiters);
  383. struct sync_fence *fence = pt->fence;
  384. struct list_head *pos;
  385. struct list_head *n;
  386. unsigned long flags;
  387. int status;
  388. status = sync_fence_get_status(fence);
  389. spin_lock_irqsave(&fence->waiter_list_lock, flags);
  390. /*
  391. * this should protect against two threads racing on the signaled
  392. * false -> true transition
  393. */
  394. if (status && !fence->status) {
  395. list_for_each_safe(pos, n, &fence->waiter_list_head)
  396. list_move(pos, &signaled_waiters);
  397. fence->status = status;
  398. } else {
  399. status = 0;
  400. }
  401. spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
  402. if (status) {
  403. list_for_each_safe(pos, n, &signaled_waiters) {
  404. struct sync_fence_waiter *waiter =
  405. container_of(pos, struct sync_fence_waiter,
  406. waiter_list);
  407. list_del(pos);
  408. waiter->callback(fence, waiter);
  409. }
  410. wake_up(&fence->wq);
  411. }
  412. }
  413. int sync_fence_wait_async(struct sync_fence *fence,
  414. struct sync_fence_waiter *waiter)
  415. {
  416. unsigned long flags;
  417. int err = 0;
  418. spin_lock_irqsave(&fence->waiter_list_lock, flags);
  419. if (fence->status) {
  420. err = fence->status;
  421. goto out;
  422. }
  423. list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
  424. out:
  425. spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
  426. return err;
  427. }
  428. EXPORT_SYMBOL(sync_fence_wait_async);
  429. int sync_fence_cancel_async(struct sync_fence *fence,
  430. struct sync_fence_waiter *waiter)
  431. {
  432. struct list_head *pos;
  433. struct list_head *n;
  434. unsigned long flags;
  435. int ret = -ENOENT;
  436. spin_lock_irqsave(&fence->waiter_list_lock, flags);
  437. /*
  438. * Make sure waiter is still in waiter_list because it is possible for
  439. * the waiter to be removed from the list while the callback is still
  440. * pending.
  441. */
  442. list_for_each_safe(pos, n, &fence->waiter_list_head) {
  443. struct sync_fence_waiter *list_waiter =
  444. container_of(pos, struct sync_fence_waiter,
  445. waiter_list);
  446. if (list_waiter == waiter) {
  447. list_del(pos);
  448. ret = 0;
  449. break;
  450. }
  451. }
  452. spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
  453. return ret;
  454. }
  455. EXPORT_SYMBOL(sync_fence_cancel_async);
  456. static bool sync_fence_check(struct sync_fence *fence)
  457. {
  458. /*
  459. * Make sure that reads to fence->status are ordered with the
  460. * wait queue event triggering
  461. */
  462. smp_rmb();
  463. return fence->status != 0;
  464. }
  465. static const char *sync_status_str(int status)
  466. {
  467. if (status > 0)
  468. return "signaled";
  469. else if (status == 0)
  470. return "active";
  471. else
  472. return "error";
  473. }
  474. static void sync_pt_log(struct sync_pt *pt)
  475. {
  476. int status = pt->status;
  477. pr_cont(" %s_pt %s",
  478. pt->parent->name,
  479. sync_status_str(status));
  480. if (pt->status) {
  481. struct timeval tv = ktime_to_timeval(pt->timestamp);
  482. pr_cont("@%ld.%06ld", tv.tv_sec, tv.tv_usec);
  483. }
  484. if (pt->parent->ops->timeline_value_str &&
  485. pt->parent->ops->pt_value_str) {
  486. char value[64];
  487. pt->parent->ops->pt_value_str(pt, value, sizeof(value));
  488. pr_cont(": %s", value);
  489. pt->parent->ops->timeline_value_str(pt->parent, value,
  490. sizeof(value));
  491. pr_cont(" / %s", value);
  492. }
  493. pr_cont("\n");
  494. /* Show additional details for active fences */
  495. if (pt->status == 0 && pt->parent->ops->pt_log)
  496. pt->parent->ops->pt_log(pt);
  497. }
  498. void sync_fence_log(struct sync_fence *fence)
  499. {
  500. struct list_head *pos;
  501. unsigned long flags;
  502. pr_info("[%p] %s: %s\n", fence, fence->name,
  503. sync_status_str(fence->status));
  504. pr_info("waiters:\n");
  505. spin_lock_irqsave(&fence->waiter_list_lock, flags);
  506. list_for_each(pos, &fence->waiter_list_head) {
  507. struct sync_fence_waiter *waiter =
  508. container_of(pos, struct sync_fence_waiter,
  509. waiter_list);
  510. pr_info(" %pF\n", waiter->callback);
  511. }
  512. spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
  513. pr_info("syncpoints:\n");
  514. list_for_each(pos, &fence->pt_list_head) {
  515. struct sync_pt *pt =
  516. container_of(pos, struct sync_pt, pt_list);
  517. sync_pt_log(pt);
  518. }
  519. }
  520. EXPORT_SYMBOL(sync_fence_log);
  521. int sync_fence_wait(struct sync_fence *fence, long timeout)
  522. {
  523. int err = 0;
  524. struct sync_pt *pt;
  525. trace_sync_wait(fence, 1);
  526. list_for_each_entry(pt, &fence->pt_list_head, pt_list)
  527. trace_sync_pt(pt);
  528. if (timeout > 0) {
  529. timeout = msecs_to_jiffies(timeout);
  530. err = wait_event_interruptible_timeout(fence->wq,
  531. sync_fence_check(fence),
  532. timeout);
  533. } else if (timeout < 0) {
  534. err = wait_event_interruptible(fence->wq,
  535. sync_fence_check(fence));
  536. }
  537. trace_sync_wait(fence, 0);
  538. if (err < 0)
  539. return err;
  540. if (fence->status < 0) {
  541. pr_info("fence error %d on [%p]\n", fence->status, fence);
  542. sync_fence_log(fence);
  543. return fence->status;
  544. }
  545. if (fence->status == 0) {
  546. if (timeout > 0) {
  547. pr_info("fence timeout on [%p] after %dms\n", fence,
  548. jiffies_to_msecs(timeout));
  549. sync_fence_log(fence);
  550. }
  551. return -ETIME;
  552. }
  553. return 0;
  554. }
  555. EXPORT_SYMBOL(sync_fence_wait);
  556. static void sync_fence_free(struct kref *kref)
  557. {
  558. struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
  559. sync_fence_free_pts(fence);
  560. kfree(fence);
  561. }
  562. static int sync_fence_release(struct inode *inode, struct file *file)
  563. {
  564. struct sync_fence *fence = file->private_data;
  565. unsigned long flags;
  566. /*
  567. * We need to remove all ways to access this fence before droping
  568. * our ref.
  569. *
  570. * start with its membership in the global fence list
  571. */
  572. spin_lock_irqsave(&sync_fence_list_lock, flags);
  573. list_del(&fence->sync_fence_list);
  574. spin_unlock_irqrestore(&sync_fence_list_lock, flags);
  575. /*
  576. * remove its pts from their parents so that sync_timeline_signal()
  577. * can't reference the fence.
  578. */
  579. sync_fence_detach_pts(fence);
  580. kref_put(&fence->kref, sync_fence_free);
  581. return 0;
  582. }
  583. static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
  584. {
  585. struct sync_fence *fence = file->private_data;
  586. poll_wait(file, &fence->wq, wait);
  587. /*
  588. * Make sure that reads to fence->status are ordered with the
  589. * wait queue event triggering
  590. */
  591. smp_rmb();
  592. if (fence->status == 1)
  593. return POLLIN;
  594. else if (fence->status < 0)
  595. return POLLERR;
  596. else
  597. return 0;
  598. }
  599. static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
  600. {
  601. __s32 value;
  602. if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
  603. return -EFAULT;
  604. return sync_fence_wait(fence, value);
  605. }
  606. static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
  607. {
  608. int fd = get_unused_fd();
  609. int err;
  610. struct sync_fence *fence2, *fence3;
  611. struct sync_merge_data data;
  612. if (fd < 0)
  613. return fd;
  614. if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
  615. err = -EFAULT;
  616. goto err_put_fd;
  617. }
  618. fence2 = sync_fence_fdget(data.fd2);
  619. if (fence2 == NULL) {
  620. err = -ENOENT;
  621. goto err_put_fd;
  622. }
  623. data.name[sizeof(data.name) - 1] = '\0';
  624. fence3 = sync_fence_merge(data.name, fence, fence2);
  625. if (fence3 == NULL) {
  626. err = -ENOMEM;
  627. goto err_put_fence2;
  628. }
  629. data.fence = fd;
  630. if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
  631. err = -EFAULT;
  632. goto err_put_fence3;
  633. }
  634. sync_fence_install(fence3, fd);
  635. sync_fence_put(fence2);
  636. return 0;
  637. err_put_fence3:
  638. sync_fence_put(fence3);
  639. err_put_fence2:
  640. sync_fence_put(fence2);
  641. err_put_fd:
  642. put_unused_fd(fd);
  643. return err;
  644. }
  645. static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
  646. {
  647. struct sync_pt_info *info = data;
  648. int ret;
  649. if (size < sizeof(struct sync_pt_info))
  650. return -ENOMEM;
  651. info->len = sizeof(struct sync_pt_info);
  652. if (pt->parent->ops->fill_driver_data) {
  653. ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
  654. size - sizeof(*info));
  655. if (ret < 0)
  656. return ret;
  657. info->len += ret;
  658. }
  659. strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
  660. strlcpy(info->driver_name, pt->parent->ops->driver_name,
  661. sizeof(info->driver_name));
  662. info->status = pt->status;
  663. info->timestamp_ns = ktime_to_ns(pt->timestamp);
  664. return info->len;
  665. }
  666. static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
  667. unsigned long arg)
  668. {
  669. struct sync_fence_info_data *data;
  670. struct list_head *pos;
  671. __u32 size;
  672. __u32 len = 0;
  673. int ret;
  674. if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
  675. return -EFAULT;
  676. if (size < sizeof(struct sync_fence_info_data))
  677. return -EINVAL;
  678. if (size > 4096)
  679. size = 4096;
  680. data = kzalloc(size, GFP_KERNEL);
  681. if (data == NULL)
  682. return -ENOMEM;
  683. strlcpy(data->name, fence->name, sizeof(data->name));
  684. data->status = fence->status;
  685. len = sizeof(struct sync_fence_info_data);
  686. list_for_each(pos, &fence->pt_list_head) {
  687. struct sync_pt *pt =
  688. container_of(pos, struct sync_pt, pt_list);
  689. ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
  690. if (ret < 0)
  691. goto out;
  692. len += ret;
  693. }
  694. data->len = len;
  695. if (copy_to_user((void __user *)arg, data, len))
  696. ret = -EFAULT;
  697. else
  698. ret = 0;
  699. out:
  700. kfree(data);
  701. return ret;
  702. }
  703. static long sync_fence_ioctl(struct file *file, unsigned int cmd,
  704. unsigned long arg)
  705. {
  706. struct sync_fence *fence = file->private_data;
  707. switch (cmd) {
  708. case SYNC_IOC_WAIT:
  709. return sync_fence_ioctl_wait(fence, arg);
  710. case SYNC_IOC_MERGE:
  711. return sync_fence_ioctl_merge(fence, arg);
  712. case SYNC_IOC_FENCE_INFO:
  713. return sync_fence_ioctl_fence_info(fence, arg);
  714. default:
  715. return -ENOTTY;
  716. }
  717. }
  718. #ifdef CONFIG_DEBUG_FS
  719. static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
  720. {
  721. int status = pt->status;
  722. seq_printf(s, " %s%spt %s",
  723. fence ? pt->parent->name : "",
  724. fence ? "_" : "",
  725. sync_status_str(status));
  726. if (pt->status) {
  727. struct timeval tv = ktime_to_timeval(pt->timestamp);
  728. seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
  729. }
  730. if (pt->parent->ops->timeline_value_str &&
  731. pt->parent->ops->pt_value_str) {
  732. char value[64];
  733. pt->parent->ops->pt_value_str(pt, value, sizeof(value));
  734. seq_printf(s, ": %s", value);
  735. if (fence) {
  736. pt->parent->ops->timeline_value_str(pt->parent, value,
  737. sizeof(value));
  738. seq_printf(s, " / %s", value);
  739. }
  740. } else if (pt->parent->ops->print_pt) {
  741. seq_printf(s, ": ");
  742. pt->parent->ops->print_pt(s, pt);
  743. }
  744. seq_printf(s, "\n");
  745. }
  746. static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
  747. {
  748. struct list_head *pos;
  749. unsigned long flags;
  750. seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
  751. if (obj->ops->timeline_value_str) {
  752. char value[64];
  753. obj->ops->timeline_value_str(obj, value, sizeof(value));
  754. seq_printf(s, ": %s", value);
  755. } else if (obj->ops->print_obj) {
  756. seq_printf(s, ": ");
  757. obj->ops->print_obj(s, obj);
  758. }
  759. seq_printf(s, "\n");
  760. spin_lock_irqsave(&obj->child_list_lock, flags);
  761. list_for_each(pos, &obj->child_list_head) {
  762. struct sync_pt *pt =
  763. container_of(pos, struct sync_pt, child_list);
  764. sync_print_pt(s, pt, false);
  765. }
  766. spin_unlock_irqrestore(&obj->child_list_lock, flags);
  767. }
  768. static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
  769. {
  770. struct list_head *pos;
  771. unsigned long flags;
  772. seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
  773. sync_status_str(fence->status));
  774. list_for_each(pos, &fence->pt_list_head) {
  775. struct sync_pt *pt =
  776. container_of(pos, struct sync_pt, pt_list);
  777. sync_print_pt(s, pt, true);
  778. }
  779. spin_lock_irqsave(&fence->waiter_list_lock, flags);
  780. list_for_each(pos, &fence->waiter_list_head) {
  781. struct sync_fence_waiter *waiter =
  782. container_of(pos, struct sync_fence_waiter,
  783. waiter_list);
  784. seq_printf(s, "waiter %pF\n", waiter->callback);
  785. }
  786. spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
  787. }
  788. static int sync_debugfs_show(struct seq_file *s, void *unused)
  789. {
  790. unsigned long flags;
  791. struct list_head *pos;
  792. seq_printf(s, "objs:\n--------------\n");
  793. spin_lock_irqsave(&sync_timeline_list_lock, flags);
  794. list_for_each(pos, &sync_timeline_list_head) {
  795. struct sync_timeline *obj =
  796. container_of(pos, struct sync_timeline,
  797. sync_timeline_list);
  798. sync_print_obj(s, obj);
  799. seq_printf(s, "\n");
  800. }
  801. spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
  802. seq_printf(s, "fences:\n--------------\n");
  803. spin_lock_irqsave(&sync_fence_list_lock, flags);
  804. list_for_each(pos, &sync_fence_list_head) {
  805. struct sync_fence *fence =
  806. container_of(pos, struct sync_fence, sync_fence_list);
  807. sync_print_fence(s, fence);
  808. seq_printf(s, "\n");
  809. }
  810. spin_unlock_irqrestore(&sync_fence_list_lock, flags);
  811. return 0;
  812. }
  813. static int sync_debugfs_open(struct inode *inode, struct file *file)
  814. {
  815. return single_open(file, sync_debugfs_show, inode->i_private);
  816. }
  817. static const struct file_operations sync_debugfs_fops = {
  818. .open = sync_debugfs_open,
  819. .read = seq_read,
  820. .llseek = seq_lseek,
  821. .release = single_release,
  822. };
  823. static __init int sync_debugfs_init(void)
  824. {
  825. debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
  826. return 0;
  827. }
  828. late_initcall(sync_debugfs_init);
  829. #endif