vmwgfx_marker.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. /**************************************************************************
  2. *
  3. * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. struct vmw_marker {
  29. struct list_head head;
  30. uint32_t seqno;
  31. struct timespec submitted;
  32. };
  33. void vmw_marker_queue_init(struct vmw_marker_queue *queue)
  34. {
  35. INIT_LIST_HEAD(&queue->head);
  36. queue->lag = ns_to_timespec(0);
  37. getrawmonotonic(&queue->lag_time);
  38. spin_lock_init(&queue->lock);
  39. }
  40. void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
  41. {
  42. struct vmw_marker *marker, *next;
  43. spin_lock(&queue->lock);
  44. list_for_each_entry_safe(marker, next, &queue->head, head) {
  45. kfree(marker);
  46. }
  47. spin_unlock(&queue->lock);
  48. }
  49. int vmw_marker_push(struct vmw_marker_queue *queue,
  50. uint32_t seqno)
  51. {
  52. struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
  53. if (unlikely(!marker))
  54. return -ENOMEM;
  55. marker->seqno = seqno;
  56. getrawmonotonic(&marker->submitted);
  57. spin_lock(&queue->lock);
  58. list_add_tail(&marker->head, &queue->head);
  59. spin_unlock(&queue->lock);
  60. return 0;
  61. }
  62. int vmw_marker_pull(struct vmw_marker_queue *queue,
  63. uint32_t signaled_seqno)
  64. {
  65. struct vmw_marker *marker, *next;
  66. struct timespec now;
  67. bool updated = false;
  68. spin_lock(&queue->lock);
  69. getrawmonotonic(&now);
  70. if (list_empty(&queue->head)) {
  71. queue->lag = ns_to_timespec(0);
  72. queue->lag_time = now;
  73. updated = true;
  74. goto out_unlock;
  75. }
  76. list_for_each_entry_safe(marker, next, &queue->head, head) {
  77. if (signaled_seqno - marker->seqno > (1 << 30))
  78. continue;
  79. queue->lag = timespec_sub(now, marker->submitted);
  80. queue->lag_time = now;
  81. updated = true;
  82. list_del(&marker->head);
  83. kfree(marker);
  84. }
  85. out_unlock:
  86. spin_unlock(&queue->lock);
  87. return (updated) ? 0 : -EBUSY;
  88. }
  89. static struct timespec vmw_timespec_add(struct timespec t1,
  90. struct timespec t2)
  91. {
  92. t1.tv_sec += t2.tv_sec;
  93. t1.tv_nsec += t2.tv_nsec;
  94. if (t1.tv_nsec >= 1000000000L) {
  95. t1.tv_sec += 1;
  96. t1.tv_nsec -= 1000000000L;
  97. }
  98. return t1;
  99. }
  100. static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
  101. {
  102. struct timespec now;
  103. spin_lock(&queue->lock);
  104. getrawmonotonic(&now);
  105. queue->lag = vmw_timespec_add(queue->lag,
  106. timespec_sub(now, queue->lag_time));
  107. queue->lag_time = now;
  108. spin_unlock(&queue->lock);
  109. return queue->lag;
  110. }
  111. static bool vmw_lag_lt(struct vmw_marker_queue *queue,
  112. uint32_t us)
  113. {
  114. struct timespec lag, cond;
  115. cond = ns_to_timespec((s64) us * 1000);
  116. lag = vmw_fifo_lag(queue);
  117. return (timespec_compare(&lag, &cond) < 1);
  118. }
  119. int vmw_wait_lag(struct vmw_private *dev_priv,
  120. struct vmw_marker_queue *queue, uint32_t us)
  121. {
  122. struct vmw_marker *marker;
  123. uint32_t seqno;
  124. int ret;
  125. while (!vmw_lag_lt(queue, us)) {
  126. spin_lock(&queue->lock);
  127. if (list_empty(&queue->head))
  128. seqno = atomic_read(&dev_priv->marker_seq);
  129. else {
  130. marker = list_first_entry(&queue->head,
  131. struct vmw_marker, head);
  132. seqno = marker->seqno;
  133. }
  134. spin_unlock(&queue->lock);
  135. ret = vmw_wait_seqno(dev_priv, false, seqno, true,
  136. 3*HZ);
  137. if (unlikely(ret != 0))
  138. return ret;
  139. (void) vmw_marker_pull(queue, seqno);
  140. }
  141. return 0;
  142. }