vmwgfx_gmr.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include "drmP.h"
  29. #include "ttm/ttm_bo_driver.h"
  30. #define VMW_PPN_SIZE (sizeof(unsigned long))
  31. /* A future safe maximum remap size. */
  32. #define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
  33. static int vmw_gmr2_bind(struct vmw_private *dev_priv,
  34. struct page *pages[],
  35. unsigned long num_pages,
  36. int gmr_id)
  37. {
  38. SVGAFifoCmdDefineGMR2 define_cmd;
  39. SVGAFifoCmdRemapGMR2 remap_cmd;
  40. uint32_t *cmd;
  41. uint32_t *cmd_orig;
  42. uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
  43. uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
  44. uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
  45. uint32_t remap_pos = 0;
  46. uint32_t cmd_size = define_size + remap_size;
  47. uint32_t i;
  48. cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
  49. if (unlikely(cmd == NULL))
  50. return -ENOMEM;
  51. define_cmd.gmrId = gmr_id;
  52. define_cmd.numPages = num_pages;
  53. *cmd++ = SVGA_CMD_DEFINE_GMR2;
  54. memcpy(cmd, &define_cmd, sizeof(define_cmd));
  55. cmd += sizeof(define_cmd) / sizeof(*cmd);
  56. /*
  57. * Need to split the command if there are too many
  58. * pages that goes into the gmr.
  59. */
  60. remap_cmd.gmrId = gmr_id;
  61. remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
  62. SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
  63. while (num_pages > 0) {
  64. unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
  65. remap_cmd.offsetPages = remap_pos;
  66. remap_cmd.numPages = nr;
  67. *cmd++ = SVGA_CMD_REMAP_GMR2;
  68. memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
  69. cmd += sizeof(remap_cmd) / sizeof(*cmd);
  70. for (i = 0; i < nr; ++i) {
  71. if (VMW_PPN_SIZE <= 4)
  72. *cmd = page_to_pfn(*pages++);
  73. else
  74. *((uint64_t *)cmd) = page_to_pfn(*pages++);
  75. cmd += VMW_PPN_SIZE / sizeof(*cmd);
  76. }
  77. num_pages -= nr;
  78. remap_pos += nr;
  79. }
  80. BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
  81. vmw_fifo_commit(dev_priv, cmd_size);
  82. return 0;
  83. }
  84. static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
  85. int gmr_id)
  86. {
  87. SVGAFifoCmdDefineGMR2 define_cmd;
  88. uint32_t define_size = sizeof(define_cmd) + 4;
  89. uint32_t *cmd;
  90. cmd = vmw_fifo_reserve(dev_priv, define_size);
  91. if (unlikely(cmd == NULL)) {
  92. DRM_ERROR("GMR2 unbind failed.\n");
  93. return;
  94. }
  95. define_cmd.gmrId = gmr_id;
  96. define_cmd.numPages = 0;
  97. *cmd++ = SVGA_CMD_DEFINE_GMR2;
  98. memcpy(cmd, &define_cmd, sizeof(define_cmd));
  99. vmw_fifo_commit(dev_priv, define_size);
  100. }
  101. /**
  102. * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
  103. * the number of used descriptors.
  104. */
  105. static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
  106. struct page *pages[],
  107. unsigned long num_pages)
  108. {
  109. struct page *page, *next;
  110. struct svga_guest_mem_descriptor *page_virtual = NULL;
  111. struct svga_guest_mem_descriptor *desc_virtual = NULL;
  112. unsigned int desc_per_page;
  113. unsigned long prev_pfn;
  114. unsigned long pfn;
  115. int ret;
  116. desc_per_page = PAGE_SIZE /
  117. sizeof(struct svga_guest_mem_descriptor) - 1;
  118. while (likely(num_pages != 0)) {
  119. page = alloc_page(__GFP_HIGHMEM);
  120. if (unlikely(page == NULL)) {
  121. ret = -ENOMEM;
  122. goto out_err;
  123. }
  124. list_add_tail(&page->lru, desc_pages);
  125. /*
  126. * Point previous page terminating descriptor to this
  127. * page before unmapping it.
  128. */
  129. if (likely(page_virtual != NULL)) {
  130. desc_virtual->ppn = page_to_pfn(page);
  131. kunmap_atomic(page_virtual);
  132. }
  133. page_virtual = kmap_atomic(page);
  134. desc_virtual = page_virtual - 1;
  135. prev_pfn = ~(0UL);
  136. while (likely(num_pages != 0)) {
  137. pfn = page_to_pfn(*pages);
  138. if (pfn != prev_pfn + 1) {
  139. if (desc_virtual - page_virtual ==
  140. desc_per_page - 1)
  141. break;
  142. (++desc_virtual)->ppn = cpu_to_le32(pfn);
  143. desc_virtual->num_pages = cpu_to_le32(1);
  144. } else {
  145. uint32_t tmp =
  146. le32_to_cpu(desc_virtual->num_pages);
  147. desc_virtual->num_pages = cpu_to_le32(tmp + 1);
  148. }
  149. prev_pfn = pfn;
  150. --num_pages;
  151. ++pages;
  152. }
  153. (++desc_virtual)->ppn = cpu_to_le32(0);
  154. desc_virtual->num_pages = cpu_to_le32(0);
  155. }
  156. if (likely(page_virtual != NULL))
  157. kunmap_atomic(page_virtual);
  158. return 0;
  159. out_err:
  160. list_for_each_entry_safe(page, next, desc_pages, lru) {
  161. list_del_init(&page->lru);
  162. __free_page(page);
  163. }
  164. return ret;
  165. }
  166. static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
  167. {
  168. struct page *page, *next;
  169. list_for_each_entry_safe(page, next, desc_pages, lru) {
  170. list_del_init(&page->lru);
  171. __free_page(page);
  172. }
  173. }
  174. static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
  175. int gmr_id, struct list_head *desc_pages)
  176. {
  177. struct page *page;
  178. if (unlikely(list_empty(desc_pages)))
  179. return;
  180. page = list_entry(desc_pages->next, struct page, lru);
  181. mutex_lock(&dev_priv->hw_mutex);
  182. vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
  183. wmb();
  184. vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page));
  185. mb();
  186. mutex_unlock(&dev_priv->hw_mutex);
  187. }
  188. /**
  189. * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
  190. * the number of used descriptors.
  191. */
  192. static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
  193. unsigned long num_pages)
  194. {
  195. unsigned long prev_pfn = ~(0UL);
  196. unsigned long pfn;
  197. unsigned long descriptors = 0;
  198. while (num_pages--) {
  199. pfn = page_to_pfn(*pages++);
  200. if (prev_pfn + 1 != pfn)
  201. ++descriptors;
  202. prev_pfn = pfn;
  203. }
  204. return descriptors;
  205. }
  206. int vmw_gmr_bind(struct vmw_private *dev_priv,
  207. struct page *pages[],
  208. unsigned long num_pages,
  209. int gmr_id)
  210. {
  211. struct list_head desc_pages;
  212. int ret;
  213. if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
  214. return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id);
  215. if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
  216. return -EINVAL;
  217. if (vmw_gmr_count_descriptors(pages, num_pages) >
  218. dev_priv->max_gmr_descriptors)
  219. return -EINVAL;
  220. INIT_LIST_HEAD(&desc_pages);
  221. ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages);
  222. if (unlikely(ret != 0))
  223. return ret;
  224. vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages);
  225. vmw_gmr_free_descriptors(&desc_pages);
  226. return 0;
  227. }
  228. void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
  229. {
  230. if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) {
  231. vmw_gmr2_unbind(dev_priv, gmr_id);
  232. return;
  233. }
  234. mutex_lock(&dev_priv->hw_mutex);
  235. vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
  236. wmb();
  237. vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
  238. mb();
  239. mutex_unlock(&dev_priv->hw_mutex);
  240. }