amdgpu_queue_mgr.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300
  1. /*
  2. * Copyright 2017 Valve Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Andres Rodriguez
  23. */
  24. #include "amdgpu.h"
  25. #include "amdgpu_ring.h"
  26. static int amdgpu_queue_mapper_init(struct amdgpu_queue_mapper *mapper,
  27. int hw_ip)
  28. {
  29. if (!mapper)
  30. return -EINVAL;
  31. if (hw_ip > AMDGPU_MAX_IP_NUM)
  32. return -EINVAL;
  33. mapper->hw_ip = hw_ip;
  34. mutex_init(&mapper->lock);
  35. memset(mapper->queue_map, 0, sizeof(mapper->queue_map));
  36. return 0;
  37. }
  38. static struct amdgpu_ring *amdgpu_get_cached_map(struct amdgpu_queue_mapper *mapper,
  39. int ring)
  40. {
  41. return mapper->queue_map[ring];
  42. }
  43. static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper,
  44. int ring, struct amdgpu_ring *pring)
  45. {
  46. if (WARN_ON(mapper->queue_map[ring])) {
  47. DRM_ERROR("Un-expected ring re-map\n");
  48. return -EINVAL;
  49. }
  50. mapper->queue_map[ring] = pring;
  51. return 0;
  52. }
  53. static int amdgpu_identity_map(struct amdgpu_device *adev,
  54. struct amdgpu_queue_mapper *mapper,
  55. u32 ring,
  56. struct amdgpu_ring **out_ring)
  57. {
  58. switch (mapper->hw_ip) {
  59. case AMDGPU_HW_IP_GFX:
  60. *out_ring = &adev->gfx.gfx_ring[ring];
  61. break;
  62. case AMDGPU_HW_IP_COMPUTE:
  63. *out_ring = &adev->gfx.compute_ring[ring];
  64. break;
  65. case AMDGPU_HW_IP_DMA:
  66. *out_ring = &adev->sdma.instance[ring].ring;
  67. break;
  68. case AMDGPU_HW_IP_UVD:
  69. *out_ring = &adev->uvd.ring;
  70. break;
  71. case AMDGPU_HW_IP_VCE:
  72. *out_ring = &adev->vce.ring[ring];
  73. break;
  74. case AMDGPU_HW_IP_UVD_ENC:
  75. *out_ring = &adev->uvd.ring_enc[ring];
  76. break;
  77. case AMDGPU_HW_IP_VCN_DEC:
  78. *out_ring = &adev->vcn.ring_dec;
  79. break;
  80. case AMDGPU_HW_IP_VCN_ENC:
  81. *out_ring = &adev->vcn.ring_enc[ring];
  82. break;
  83. default:
  84. *out_ring = NULL;
  85. DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip);
  86. return -EINVAL;
  87. }
  88. return amdgpu_update_cached_map(mapper, ring, *out_ring);
  89. }
  90. static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip)
  91. {
  92. switch (hw_ip) {
  93. case AMDGPU_HW_IP_GFX:
  94. return AMDGPU_RING_TYPE_GFX;
  95. case AMDGPU_HW_IP_COMPUTE:
  96. return AMDGPU_RING_TYPE_COMPUTE;
  97. case AMDGPU_HW_IP_DMA:
  98. return AMDGPU_RING_TYPE_SDMA;
  99. case AMDGPU_HW_IP_UVD:
  100. return AMDGPU_RING_TYPE_UVD;
  101. case AMDGPU_HW_IP_VCE:
  102. return AMDGPU_RING_TYPE_VCE;
  103. default:
  104. DRM_ERROR("Invalid HW IP specified %d\n", hw_ip);
  105. return -1;
  106. }
  107. }
  108. static int amdgpu_lru_map(struct amdgpu_device *adev,
  109. struct amdgpu_queue_mapper *mapper,
  110. u32 user_ring,
  111. struct amdgpu_ring **out_ring)
  112. {
  113. int r, i, j;
  114. int ring_type = amdgpu_hw_ip_to_ring_type(mapper->hw_ip);
  115. int ring_blacklist[AMDGPU_MAX_RINGS];
  116. struct amdgpu_ring *ring;
  117. /* 0 is a valid ring index, so initialize to -1 */
  118. memset(ring_blacklist, 0xff, sizeof(ring_blacklist));
  119. for (i = 0, j = 0; i < AMDGPU_MAX_RINGS; i++) {
  120. ring = mapper->queue_map[i];
  121. if (ring)
  122. ring_blacklist[j++] = ring->idx;
  123. }
  124. r = amdgpu_ring_lru_get(adev, ring_type, ring_blacklist,
  125. j, out_ring);
  126. if (r)
  127. return r;
  128. return amdgpu_update_cached_map(mapper, user_ring, *out_ring);
  129. }
  130. /**
  131. * amdgpu_queue_mgr_init - init an amdgpu_queue_mgr struct
  132. *
  133. * @adev: amdgpu_device pointer
  134. * @mgr: amdgpu_queue_mgr structure holding queue information
  135. *
  136. * Initialize the the selected @mgr (all asics).
  137. *
  138. * Returns 0 on success, error on failure.
  139. */
  140. int amdgpu_queue_mgr_init(struct amdgpu_device *adev,
  141. struct amdgpu_queue_mgr *mgr)
  142. {
  143. int i, r;
  144. if (!adev || !mgr)
  145. return -EINVAL;
  146. memset(mgr, 0, sizeof(*mgr));
  147. for (i = 0; i < AMDGPU_MAX_IP_NUM; ++i) {
  148. r = amdgpu_queue_mapper_init(&mgr->mapper[i], i);
  149. if (r)
  150. return r;
  151. }
  152. return 0;
  153. }
  154. /**
  155. * amdgpu_queue_mgr_fini - de-initialize an amdgpu_queue_mgr struct
  156. *
  157. * @adev: amdgpu_device pointer
  158. * @mgr: amdgpu_queue_mgr structure holding queue information
  159. *
  160. * De-initialize the the selected @mgr (all asics).
  161. *
  162. * Returns 0 on success, error on failure.
  163. */
  164. int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
  165. struct amdgpu_queue_mgr *mgr)
  166. {
  167. return 0;
  168. }
  169. /**
  170. * amdgpu_queue_mgr_map - Map a userspace ring id to an amdgpu_ring
  171. *
  172. * @adev: amdgpu_device pointer
  173. * @mgr: amdgpu_queue_mgr structure holding queue information
  174. * @hw_ip: HW IP enum
  175. * @instance: HW instance
  176. * @ring: user ring id
  177. * @our_ring: pointer to mapped amdgpu_ring
  178. *
  179. * Map a userspace ring id to an appropriate kernel ring. Different
  180. * policies are configurable at a HW IP level.
  181. *
  182. * Returns 0 on success, error on failure.
  183. */
  184. int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
  185. struct amdgpu_queue_mgr *mgr,
  186. u32 hw_ip, u32 instance, u32 ring,
  187. struct amdgpu_ring **out_ring)
  188. {
  189. int r, ip_num_rings;
  190. struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip];
  191. if (!adev || !mgr || !out_ring)
  192. return -EINVAL;
  193. if (hw_ip >= AMDGPU_MAX_IP_NUM)
  194. return -EINVAL;
  195. if (ring >= AMDGPU_MAX_RINGS)
  196. return -EINVAL;
  197. /* Right now all IPs have only one instance - multiple rings. */
  198. if (instance != 0) {
  199. DRM_ERROR("invalid ip instance: %d\n", instance);
  200. return -EINVAL;
  201. }
  202. switch (hw_ip) {
  203. case AMDGPU_HW_IP_GFX:
  204. ip_num_rings = adev->gfx.num_gfx_rings;
  205. break;
  206. case AMDGPU_HW_IP_COMPUTE:
  207. ip_num_rings = adev->gfx.num_compute_rings;
  208. break;
  209. case AMDGPU_HW_IP_DMA:
  210. ip_num_rings = adev->sdma.num_instances;
  211. break;
  212. case AMDGPU_HW_IP_UVD:
  213. ip_num_rings = 1;
  214. break;
  215. case AMDGPU_HW_IP_VCE:
  216. ip_num_rings = adev->vce.num_rings;
  217. break;
  218. case AMDGPU_HW_IP_UVD_ENC:
  219. ip_num_rings = adev->uvd.num_enc_rings;
  220. break;
  221. case AMDGPU_HW_IP_VCN_DEC:
  222. ip_num_rings = 1;
  223. break;
  224. case AMDGPU_HW_IP_VCN_ENC:
  225. ip_num_rings = adev->vcn.num_enc_rings;
  226. break;
  227. default:
  228. DRM_ERROR("unknown ip type: %d\n", hw_ip);
  229. return -EINVAL;
  230. }
  231. if (ring >= ip_num_rings) {
  232. DRM_ERROR("Ring index:%d exceeds maximum:%d for ip:%d\n",
  233. ring, ip_num_rings, hw_ip);
  234. return -EINVAL;
  235. }
  236. mutex_lock(&mapper->lock);
  237. *out_ring = amdgpu_get_cached_map(mapper, ring);
  238. if (*out_ring) {
  239. /* cache hit */
  240. r = 0;
  241. goto out_unlock;
  242. }
  243. switch (mapper->hw_ip) {
  244. case AMDGPU_HW_IP_GFX:
  245. case AMDGPU_HW_IP_UVD:
  246. case AMDGPU_HW_IP_VCE:
  247. case AMDGPU_HW_IP_UVD_ENC:
  248. case AMDGPU_HW_IP_VCN_DEC:
  249. case AMDGPU_HW_IP_VCN_ENC:
  250. r = amdgpu_identity_map(adev, mapper, ring, out_ring);
  251. break;
  252. case AMDGPU_HW_IP_DMA:
  253. case AMDGPU_HW_IP_COMPUTE:
  254. r = amdgpu_lru_map(adev, mapper, ring, out_ring);
  255. break;
  256. default:
  257. *out_ring = NULL;
  258. r = -EINVAL;
  259. DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip);
  260. }
  261. out_unlock:
  262. mutex_unlock(&mapper->lock);
  263. return r;
  264. }