vmwgfx_marker.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. /**************************************************************************
  2. *
  3. * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. struct vmw_marker {
  29. struct list_head head;
  30. uint32_t seqno;
  31. u64 submitted;
  32. };
  33. void vmw_marker_queue_init(struct vmw_marker_queue *queue)
  34. {
  35. INIT_LIST_HEAD(&queue->head);
  36. queue->lag = 0;
  37. queue->lag_time = ktime_get_raw_ns();
  38. spin_lock_init(&queue->lock);
  39. }
  40. void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
  41. {
  42. struct vmw_marker *marker, *next;
  43. spin_lock(&queue->lock);
  44. list_for_each_entry_safe(marker, next, &queue->head, head) {
  45. kfree(marker);
  46. }
  47. spin_unlock(&queue->lock);
  48. }
  49. int vmw_marker_push(struct vmw_marker_queue *queue,
  50. uint32_t seqno)
  51. {
  52. struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
  53. if (unlikely(!marker))
  54. return -ENOMEM;
  55. marker->seqno = seqno;
  56. marker->submitted = ktime_get_raw_ns();
  57. spin_lock(&queue->lock);
  58. list_add_tail(&marker->head, &queue->head);
  59. spin_unlock(&queue->lock);
  60. return 0;
  61. }
  62. int vmw_marker_pull(struct vmw_marker_queue *queue,
  63. uint32_t signaled_seqno)
  64. {
  65. struct vmw_marker *marker, *next;
  66. bool updated = false;
  67. u64 now;
  68. spin_lock(&queue->lock);
  69. now = ktime_get_raw_ns();
  70. if (list_empty(&queue->head)) {
  71. queue->lag = 0;
  72. queue->lag_time = now;
  73. updated = true;
  74. goto out_unlock;
  75. }
  76. list_for_each_entry_safe(marker, next, &queue->head, head) {
  77. if (signaled_seqno - marker->seqno > (1 << 30))
  78. continue;
  79. queue->lag = now - marker->submitted;
  80. queue->lag_time = now;
  81. updated = true;
  82. list_del(&marker->head);
  83. kfree(marker);
  84. }
  85. out_unlock:
  86. spin_unlock(&queue->lock);
  87. return (updated) ? 0 : -EBUSY;
  88. }
  89. static u64 vmw_fifo_lag(struct vmw_marker_queue *queue)
  90. {
  91. u64 now;
  92. spin_lock(&queue->lock);
  93. now = ktime_get_raw_ns();
  94. queue->lag += now - queue->lag_time;
  95. queue->lag_time = now;
  96. spin_unlock(&queue->lock);
  97. return queue->lag;
  98. }
  99. static bool vmw_lag_lt(struct vmw_marker_queue *queue,
  100. uint32_t us)
  101. {
  102. u64 cond = (u64) us * NSEC_PER_USEC;
  103. return vmw_fifo_lag(queue) <= cond;
  104. }
  105. int vmw_wait_lag(struct vmw_private *dev_priv,
  106. struct vmw_marker_queue *queue, uint32_t us)
  107. {
  108. struct vmw_marker *marker;
  109. uint32_t seqno;
  110. int ret;
  111. while (!vmw_lag_lt(queue, us)) {
  112. spin_lock(&queue->lock);
  113. if (list_empty(&queue->head))
  114. seqno = atomic_read(&dev_priv->marker_seq);
  115. else {
  116. marker = list_first_entry(&queue->head,
  117. struct vmw_marker, head);
  118. seqno = marker->seqno;
  119. }
  120. spin_unlock(&queue->lock);
  121. ret = vmw_wait_seqno(dev_priv, false, seqno, true,
  122. 3*HZ);
  123. if (unlikely(ret != 0))
  124. return ret;
  125. (void) vmw_marker_pull(queue, seqno);
  126. }
  127. return 0;
  128. }