mxgpu_vi.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618
  1. /*
  2. * Copyright 2017 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Xiangliang.Yu@amd.com
  23. */
  24. #include "amdgpu.h"
  25. #include "vi.h"
  26. #include "bif/bif_5_0_d.h"
  27. #include "bif/bif_5_0_sh_mask.h"
  28. #include "vid.h"
  29. #include "gca/gfx_8_0_d.h"
  30. #include "gca/gfx_8_0_sh_mask.h"
  31. #include "gmc_v8_0.h"
  32. #include "gfx_v8_0.h"
  33. #include "sdma_v3_0.h"
  34. #include "tonga_ih.h"
  35. #include "gmc/gmc_8_2_d.h"
  36. #include "gmc/gmc_8_2_sh_mask.h"
  37. #include "oss/oss_3_0_d.h"
  38. #include "oss/oss_3_0_sh_mask.h"
  39. #include "gca/gfx_8_0_sh_mask.h"
  40. #include "dce/dce_10_0_d.h"
  41. #include "dce/dce_10_0_sh_mask.h"
  42. #include "smu/smu_7_1_3_d.h"
  43. #include "mxgpu_vi.h"
  44. /* VI golden setting */
  45. static const u32 xgpu_fiji_mgcg_cgcg_init[] = {
  46. mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
  47. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  48. mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
  49. mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
  50. mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
  51. mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
  52. mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
  53. mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
  54. mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
  55. mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
  56. mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
  57. mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
  58. mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
  59. mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
  60. mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
  61. mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
  62. mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
  63. mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
  64. mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
  65. mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
  66. mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
  67. mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
  68. mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
  69. mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
  70. mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
  71. mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
  72. mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
  73. mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
  74. mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
  75. mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
  76. mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
  77. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  78. mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
  79. mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
  80. mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
  81. mmPCIE_INDEX, 0xffffffff, 0x0140001c,
  82. mmPCIE_DATA, 0x000f0000, 0x00000000,
  83. mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
  84. mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
  85. mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
  86. mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
  87. mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
  88. mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
  89. mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
  90. mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
  91. mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
  92. mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100,
  93. };
  94. static const u32 xgpu_fiji_golden_settings_a10[] = {
  95. mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
  96. mmDB_DEBUG2, 0xf00fffff, 0x00000400,
  97. mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
  98. mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
  99. mmFBC_MISC, 0x1f311fff, 0x12300000,
  100. mmHDMI_CONTROL, 0x31000111, 0x00000011,
  101. mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
  102. mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
  103. mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
  104. mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
  105. mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
  106. mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
  107. mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
  108. mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
  109. mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
  110. mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
  111. mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
  112. mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
  113. mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
  114. mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
  115. mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
  116. mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  117. mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  118. mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  119. mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  120. };
  121. static const u32 xgpu_fiji_golden_common_all[] = {
  122. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  123. mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
  124. mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
  125. mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
  126. mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
  127. mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
  128. mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
  129. mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
  130. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  131. mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
  132. };
  133. static const u32 xgpu_tonga_mgcg_cgcg_init[] = {
  134. mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
  135. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  136. mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
  137. mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
  138. mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
  139. mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
  140. mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
  141. mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
  142. mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
  143. mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
  144. mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
  145. mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
  146. mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
  147. mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
  148. mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
  149. mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
  150. mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
  151. mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
  152. mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
  153. mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
  154. mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
  155. mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
  156. mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
  157. mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
  158. mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
  159. mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
  160. mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
  161. mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
  162. mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
  163. mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
  164. mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
  165. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  166. mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  167. mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  168. mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
  169. mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  170. mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  171. mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  172. mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  173. mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
  174. mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  175. mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  176. mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  177. mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  178. mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
  179. mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  180. mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  181. mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  182. mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  183. mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
  184. mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  185. mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  186. mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  187. mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  188. mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
  189. mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  190. mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  191. mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  192. mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  193. mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
  194. mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  195. mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  196. mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  197. mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  198. mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
  199. mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  200. mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  201. mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
  202. mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
  203. mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
  204. mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
  205. mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
  206. mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
  207. mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
  208. mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
  209. mmPCIE_INDEX, 0xffffffff, 0x0140001c,
  210. mmPCIE_DATA, 0x000f0000, 0x00000000,
  211. mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
  212. mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
  213. mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
  214. mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
  215. mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
  216. mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
  217. mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
  218. mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
  219. mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
  220. mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100,
  221. };
  222. static const u32 xgpu_tonga_golden_settings_a11[] = {
  223. mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
  224. mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
  225. mmDB_DEBUG2, 0xf00fffff, 0x00000400,
  226. mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
  227. mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
  228. mmFBC_MISC, 0x1f311fff, 0x12300000,
  229. mmGB_GPU_ID, 0x0000000f, 0x00000000,
  230. mmHDMI_CONTROL, 0x31000111, 0x00000011,
  231. mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
  232. mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
  233. mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
  234. mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
  235. mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
  236. mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
  237. mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
  238. mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
  239. mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
  240. mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
  241. mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
  242. mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
  243. mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
  244. mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
  245. mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
  246. mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
  247. mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
  248. mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
  249. mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
  250. mmTCC_CTRL, 0x00100000, 0xf31fff7f,
  251. mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
  252. mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
  253. mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
  254. mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
  255. mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
  256. mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  257. mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  258. mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  259. mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  260. };
  261. static const u32 xgpu_tonga_golden_common_all[] = {
  262. mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
  263. mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
  264. mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
  265. mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
  266. mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
  267. mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
  268. mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
  269. };
  270. void xgpu_vi_init_golden_registers(struct amdgpu_device *adev)
  271. {
  272. switch (adev->asic_type) {
  273. case CHIP_FIJI:
  274. amdgpu_program_register_sequence(adev,
  275. xgpu_fiji_mgcg_cgcg_init,
  276. (const u32)ARRAY_SIZE(
  277. xgpu_fiji_mgcg_cgcg_init));
  278. amdgpu_program_register_sequence(adev,
  279. xgpu_fiji_golden_settings_a10,
  280. (const u32)ARRAY_SIZE(
  281. xgpu_fiji_golden_settings_a10));
  282. amdgpu_program_register_sequence(adev,
  283. xgpu_fiji_golden_common_all,
  284. (const u32)ARRAY_SIZE(
  285. xgpu_fiji_golden_common_all));
  286. break;
  287. case CHIP_TONGA:
  288. amdgpu_program_register_sequence(adev,
  289. xgpu_tonga_mgcg_cgcg_init,
  290. (const u32)ARRAY_SIZE(
  291. xgpu_tonga_mgcg_cgcg_init));
  292. amdgpu_program_register_sequence(adev,
  293. xgpu_tonga_golden_settings_a11,
  294. (const u32)ARRAY_SIZE(
  295. xgpu_tonga_golden_settings_a11));
  296. amdgpu_program_register_sequence(adev,
  297. xgpu_tonga_golden_common_all,
  298. (const u32)ARRAY_SIZE(
  299. xgpu_tonga_golden_common_all));
  300. break;
  301. default:
  302. BUG_ON("Doesn't support chip type.\n");
  303. break;
  304. }
  305. }
  306. /*
  307. * Mailbox communication between GPU hypervisor and VFs
  308. */
  309. static void xgpu_vi_mailbox_send_ack(struct amdgpu_device *adev)
  310. {
  311. u32 reg;
  312. int timeout = VI_MAILBOX_TIMEDOUT;
  313. u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID);
  314. reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
  315. reg = REG_SET_FIELD(reg, MAILBOX_CONTROL, RCV_MSG_ACK, 1);
  316. WREG32_NO_KIQ(mmMAILBOX_CONTROL, reg);
  317. /*Wait for RCV_MSG_VALID to be 0*/
  318. reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
  319. while (reg & mask) {
  320. if (timeout <= 0) {
  321. pr_err("RCV_MSG_VALID is not cleared\n");
  322. break;
  323. }
  324. mdelay(1);
  325. timeout -=1;
  326. reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
  327. }
  328. }
  329. static void xgpu_vi_mailbox_set_valid(struct amdgpu_device *adev, bool val)
  330. {
  331. u32 reg;
  332. reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
  333. reg = REG_SET_FIELD(reg, MAILBOX_CONTROL,
  334. TRN_MSG_VALID, val ? 1 : 0);
  335. WREG32_NO_KIQ(mmMAILBOX_CONTROL, reg);
  336. }
  337. static void xgpu_vi_mailbox_trans_msg(struct amdgpu_device *adev,
  338. enum idh_request req)
  339. {
  340. u32 reg;
  341. reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0);
  342. reg = REG_SET_FIELD(reg, MAILBOX_MSGBUF_TRN_DW0,
  343. MSGBUF_DATA, req);
  344. WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, reg);
  345. xgpu_vi_mailbox_set_valid(adev, true);
  346. }
  347. static int xgpu_vi_mailbox_rcv_msg(struct amdgpu_device *adev,
  348. enum idh_event event)
  349. {
  350. u32 reg;
  351. u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID);
  352. /* workaround: host driver doesn't set VALID for CMPL now */
  353. if (event != IDH_FLR_NOTIFICATION_CMPL) {
  354. reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
  355. if (!(reg & mask))
  356. return -ENOENT;
  357. }
  358. reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
  359. if (reg != event)
  360. return -ENOENT;
  361. /* send ack to PF */
  362. xgpu_vi_mailbox_send_ack(adev);
  363. return 0;
  364. }
  365. static int xgpu_vi_poll_ack(struct amdgpu_device *adev)
  366. {
  367. int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
  368. u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, TRN_MSG_ACK);
  369. u32 reg;
  370. reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
  371. while (!(reg & mask)) {
  372. if (timeout <= 0) {
  373. pr_err("Doesn't get ack from pf.\n");
  374. r = -ETIME;
  375. break;
  376. }
  377. mdelay(5);
  378. timeout -= 5;
  379. reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
  380. }
  381. return r;
  382. }
  383. static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event)
  384. {
  385. int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
  386. r = xgpu_vi_mailbox_rcv_msg(adev, event);
  387. while (r) {
  388. if (timeout <= 0) {
  389. pr_err("Doesn't get ack from pf.\n");
  390. r = -ETIME;
  391. break;
  392. }
  393. mdelay(5);
  394. timeout -= 5;
  395. r = xgpu_vi_mailbox_rcv_msg(adev, event);
  396. }
  397. return r;
  398. }
  399. static int xgpu_vi_send_access_requests(struct amdgpu_device *adev,
  400. enum idh_request request)
  401. {
  402. int r;
  403. xgpu_vi_mailbox_trans_msg(adev, request);
  404. /* start to poll ack */
  405. r = xgpu_vi_poll_ack(adev);
  406. if (r)
  407. return r;
  408. xgpu_vi_mailbox_set_valid(adev, false);
  409. /* start to check msg if request is idh_req_gpu_init_access */
  410. if (request == IDH_REQ_GPU_INIT_ACCESS ||
  411. request == IDH_REQ_GPU_FINI_ACCESS ||
  412. request == IDH_REQ_GPU_RESET_ACCESS) {
  413. r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
  414. if (r)
  415. pr_err("Doesn't get ack from pf, continue\n");
  416. }
  417. return 0;
  418. }
  419. static int xgpu_vi_request_reset(struct amdgpu_device *adev)
  420. {
  421. return xgpu_vi_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
  422. }
  423. static int xgpu_vi_request_full_gpu_access(struct amdgpu_device *adev,
  424. bool init)
  425. {
  426. enum idh_request req;
  427. req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
  428. return xgpu_vi_send_access_requests(adev, req);
  429. }
  430. static int xgpu_vi_release_full_gpu_access(struct amdgpu_device *adev,
  431. bool init)
  432. {
  433. enum idh_request req;
  434. int r = 0;
  435. req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
  436. r = xgpu_vi_send_access_requests(adev, req);
  437. return r;
  438. }
  439. /* add support mailbox interrupts */
  440. static int xgpu_vi_mailbox_ack_irq(struct amdgpu_device *adev,
  441. struct amdgpu_irq_src *source,
  442. struct amdgpu_iv_entry *entry)
  443. {
  444. DRM_DEBUG("get ack intr and do nothing.\n");
  445. return 0;
  446. }
  447. static int xgpu_vi_set_mailbox_ack_irq(struct amdgpu_device *adev,
  448. struct amdgpu_irq_src *src,
  449. unsigned type,
  450. enum amdgpu_interrupt_state state)
  451. {
  452. u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
  453. tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, ACK_INT_EN,
  454. (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
  455. WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
  456. return 0;
  457. }
  458. static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
  459. {
  460. struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
  461. struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
  462. /* wait until RCV_MSG become 3 */
  463. if (xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
  464. pr_err("failed to recieve FLR_CMPL\n");
  465. return;
  466. }
  467. /* Trigger recovery due to world switch failure */
  468. amdgpu_sriov_gpu_reset(adev, NULL);
  469. }
  470. static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
  471. struct amdgpu_irq_src *src,
  472. unsigned type,
  473. enum amdgpu_interrupt_state state)
  474. {
  475. u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
  476. tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, VALID_INT_EN,
  477. (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
  478. WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
  479. return 0;
  480. }
  481. static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
  482. struct amdgpu_irq_src *source,
  483. struct amdgpu_iv_entry *entry)
  484. {
  485. int r;
  486. /* trigger gpu-reset by hypervisor only if TDR disbaled */
  487. if (amdgpu_lockup_timeout == 0) {
  488. /* see what event we get */
  489. r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
  490. /* only handle FLR_NOTIFY now */
  491. if (!r)
  492. schedule_work(&adev->virt.flr_work);
  493. }
  494. return 0;
  495. }
  496. static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_ack_irq_funcs = {
  497. .set = xgpu_vi_set_mailbox_ack_irq,
  498. .process = xgpu_vi_mailbox_ack_irq,
  499. };
  500. static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_rcv_irq_funcs = {
  501. .set = xgpu_vi_set_mailbox_rcv_irq,
  502. .process = xgpu_vi_mailbox_rcv_irq,
  503. };
  504. void xgpu_vi_mailbox_set_irq_funcs(struct amdgpu_device *adev)
  505. {
  506. adev->virt.ack_irq.num_types = 1;
  507. adev->virt.ack_irq.funcs = &xgpu_vi_mailbox_ack_irq_funcs;
  508. adev->virt.rcv_irq.num_types = 1;
  509. adev->virt.rcv_irq.funcs = &xgpu_vi_mailbox_rcv_irq_funcs;
  510. }
  511. int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev)
  512. {
  513. int r;
  514. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
  515. if (r)
  516. return r;
  517. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
  518. if (r) {
  519. amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
  520. return r;
  521. }
  522. return 0;
  523. }
  524. int xgpu_vi_mailbox_get_irq(struct amdgpu_device *adev)
  525. {
  526. int r;
  527. r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
  528. if (r)
  529. return r;
  530. r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
  531. if (r) {
  532. amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
  533. return r;
  534. }
  535. INIT_WORK(&adev->virt.flr_work, xgpu_vi_mailbox_flr_work);
  536. return 0;
  537. }
  538. void xgpu_vi_mailbox_put_irq(struct amdgpu_device *adev)
  539. {
  540. amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
  541. amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
  542. }
  543. const struct amdgpu_virt_ops xgpu_vi_virt_ops = {
  544. .req_full_gpu = xgpu_vi_request_full_gpu_access,
  545. .rel_full_gpu = xgpu_vi_release_full_gpu_access,
  546. .reset_gpu = xgpu_vi_request_reset,
  547. .trans_msg = NULL, /* Does not need to trans VF errors to host. */
  548. };