sdma_v3_0.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Alex Deucher
  23. */
  24. #include <linux/firmware.h>
  25. #include <drm/drmP.h>
  26. #include "amdgpu.h"
  27. #include "amdgpu_ucode.h"
  28. #include "amdgpu_trace.h"
  29. #include "vi.h"
  30. #include "vid.h"
  31. #include "oss/oss_3_0_d.h"
  32. #include "oss/oss_3_0_sh_mask.h"
  33. #include "gmc/gmc_8_1_d.h"
  34. #include "gmc/gmc_8_1_sh_mask.h"
  35. #include "gca/gfx_8_0_d.h"
  36. #include "gca/gfx_8_0_enum.h"
  37. #include "gca/gfx_8_0_sh_mask.h"
  38. #include "bif/bif_5_0_d.h"
  39. #include "bif/bif_5_0_sh_mask.h"
  40. #include "tonga_sdma_pkt_open.h"
  41. static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev);
  42. static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev);
  43. static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev);
  44. static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev);
  45. MODULE_FIRMWARE("amdgpu/tonga_sdma.bin");
  46. MODULE_FIRMWARE("amdgpu/tonga_sdma1.bin");
  47. MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin");
  48. MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin");
  49. MODULE_FIRMWARE("amdgpu/fiji_sdma.bin");
  50. MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin");
  51. MODULE_FIRMWARE("amdgpu/stoney_sdma.bin");
  52. MODULE_FIRMWARE("amdgpu/polaris10_sdma.bin");
  53. MODULE_FIRMWARE("amdgpu/polaris10_sdma1.bin");
  54. MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin");
  55. MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin");
  56. MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin");
  57. MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin");
  58. static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
  59. {
  60. SDMA0_REGISTER_OFFSET,
  61. SDMA1_REGISTER_OFFSET
  62. };
  63. static const u32 golden_settings_tonga_a11[] =
  64. {
  65. mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
  66. mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
  67. mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
  68. mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
  69. mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
  70. mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
  71. mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
  72. mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
  73. mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
  74. mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
  75. };
  76. static const u32 tonga_mgcg_cgcg_init[] =
  77. {
  78. mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
  79. mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
  80. };
  81. static const u32 golden_settings_fiji_a10[] =
  82. {
  83. mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
  84. mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
  85. mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
  86. mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
  87. mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
  88. mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
  89. mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
  90. mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
  91. };
  92. static const u32 fiji_mgcg_cgcg_init[] =
  93. {
  94. mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
  95. mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
  96. };
  97. static const u32 golden_settings_polaris11_a11[] =
  98. {
  99. mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
  100. mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
  101. mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
  102. mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
  103. mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
  104. mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
  105. mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
  106. mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
  107. mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
  108. mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
  109. };
  110. static const u32 golden_settings_polaris10_a11[] =
  111. {
  112. mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
  113. mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
  114. mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
  115. mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
  116. mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
  117. mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
  118. mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
  119. mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
  120. mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
  121. mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
  122. };
  123. static const u32 cz_golden_settings_a11[] =
  124. {
  125. mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
  126. mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
  127. mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
  128. mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
  129. mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
  130. mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
  131. mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
  132. mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
  133. mmSDMA1_GFX_IB_CNTL, 0x00000100, 0x00000100,
  134. mmSDMA1_POWER_CNTL, 0x00000800, 0x0003c800,
  135. mmSDMA1_RLC0_IB_CNTL, 0x00000100, 0x00000100,
  136. mmSDMA1_RLC1_IB_CNTL, 0x00000100, 0x00000100,
  137. };
  138. static const u32 cz_mgcg_cgcg_init[] =
  139. {
  140. mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
  141. mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
  142. };
  143. static const u32 stoney_golden_settings_a11[] =
  144. {
  145. mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
  146. mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
  147. mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
  148. mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
  149. };
  150. static const u32 stoney_mgcg_cgcg_init[] =
  151. {
  152. mmSDMA0_CLK_CTRL, 0xffffffff, 0x00000100,
  153. };
  154. /*
  155. * sDMA - System DMA
  156. * Starting with CIK, the GPU has new asynchronous
  157. * DMA engines. These engines are used for compute
  158. * and gfx. There are two DMA engines (SDMA0, SDMA1)
  159. * and each one supports 1 ring buffer used for gfx
  160. * and 2 queues used for compute.
  161. *
  162. * The programming model is very similar to the CP
  163. * (ring buffer, IBs, etc.), but sDMA has it's own
  164. * packet format that is different from the PM4 format
  165. * used by the CP. sDMA supports copying data, writing
  166. * embedded data, solid fills, and a number of other
  167. * things. It also has support for tiling/detiling of
  168. * buffers.
  169. */
  170. static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
  171. {
  172. switch (adev->asic_type) {
  173. case CHIP_FIJI:
  174. amdgpu_program_register_sequence(adev,
  175. fiji_mgcg_cgcg_init,
  176. (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
  177. amdgpu_program_register_sequence(adev,
  178. golden_settings_fiji_a10,
  179. (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
  180. break;
  181. case CHIP_TONGA:
  182. amdgpu_program_register_sequence(adev,
  183. tonga_mgcg_cgcg_init,
  184. (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
  185. amdgpu_program_register_sequence(adev,
  186. golden_settings_tonga_a11,
  187. (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
  188. break;
  189. case CHIP_POLARIS11:
  190. case CHIP_POLARIS12:
  191. amdgpu_program_register_sequence(adev,
  192. golden_settings_polaris11_a11,
  193. (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
  194. break;
  195. case CHIP_POLARIS10:
  196. amdgpu_program_register_sequence(adev,
  197. golden_settings_polaris10_a11,
  198. (const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
  199. break;
  200. case CHIP_CARRIZO:
  201. amdgpu_program_register_sequence(adev,
  202. cz_mgcg_cgcg_init,
  203. (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
  204. amdgpu_program_register_sequence(adev,
  205. cz_golden_settings_a11,
  206. (const u32)ARRAY_SIZE(cz_golden_settings_a11));
  207. break;
  208. case CHIP_STONEY:
  209. amdgpu_program_register_sequence(adev,
  210. stoney_mgcg_cgcg_init,
  211. (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
  212. amdgpu_program_register_sequence(adev,
  213. stoney_golden_settings_a11,
  214. (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
  215. break;
  216. default:
  217. break;
  218. }
  219. }
  220. static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
  221. {
  222. int i;
  223. for (i = 0; i < adev->sdma.num_instances; i++) {
  224. release_firmware(adev->sdma.instance[i].fw);
  225. adev->sdma.instance[i].fw = NULL;
  226. }
  227. }
  228. /**
  229. * sdma_v3_0_init_microcode - load ucode images from disk
  230. *
  231. * @adev: amdgpu_device pointer
  232. *
  233. * Use the firmware interface to load the ucode images into
  234. * the driver (not loaded into hw).
  235. * Returns 0 on success, error on failure.
  236. */
  237. static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
  238. {
  239. const char *chip_name;
  240. char fw_name[30];
  241. int err = 0, i;
  242. struct amdgpu_firmware_info *info = NULL;
  243. const struct common_firmware_header *header = NULL;
  244. const struct sdma_firmware_header_v1_0 *hdr;
  245. DRM_DEBUG("\n");
  246. switch (adev->asic_type) {
  247. case CHIP_TONGA:
  248. chip_name = "tonga";
  249. break;
  250. case CHIP_FIJI:
  251. chip_name = "fiji";
  252. break;
  253. case CHIP_POLARIS11:
  254. chip_name = "polaris11";
  255. break;
  256. case CHIP_POLARIS10:
  257. chip_name = "polaris10";
  258. break;
  259. case CHIP_POLARIS12:
  260. chip_name = "polaris12";
  261. break;
  262. case CHIP_CARRIZO:
  263. chip_name = "carrizo";
  264. break;
  265. case CHIP_STONEY:
  266. chip_name = "stoney";
  267. break;
  268. default: BUG();
  269. }
  270. for (i = 0; i < adev->sdma.num_instances; i++) {
  271. if (i == 0)
  272. snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
  273. else
  274. snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
  275. err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
  276. if (err)
  277. goto out;
  278. err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
  279. if (err)
  280. goto out;
  281. hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
  282. adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
  283. adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
  284. if (adev->sdma.instance[i].feature_version >= 20)
  285. adev->sdma.instance[i].burst_nop = true;
  286. if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
  287. info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
  288. info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
  289. info->fw = adev->sdma.instance[i].fw;
  290. header = (const struct common_firmware_header *)info->fw->data;
  291. adev->firmware.fw_size +=
  292. ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
  293. }
  294. }
  295. out:
  296. if (err) {
  297. pr_err("sdma_v3_0: Failed to load firmware \"%s\"\n", fw_name);
  298. for (i = 0; i < adev->sdma.num_instances; i++) {
  299. release_firmware(adev->sdma.instance[i].fw);
  300. adev->sdma.instance[i].fw = NULL;
  301. }
  302. }
  303. return err;
  304. }
  305. /**
  306. * sdma_v3_0_ring_get_rptr - get the current read pointer
  307. *
  308. * @ring: amdgpu ring pointer
  309. *
  310. * Get the current rptr from the hardware (VI+).
  311. */
  312. static uint64_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
  313. {
  314. /* XXX check if swapping is necessary on BE */
  315. return ring->adev->wb.wb[ring->rptr_offs] >> 2;
  316. }
  317. /**
  318. * sdma_v3_0_ring_get_wptr - get the current write pointer
  319. *
  320. * @ring: amdgpu ring pointer
  321. *
  322. * Get the current wptr from the hardware (VI+).
  323. */
  324. static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
  325. {
  326. struct amdgpu_device *adev = ring->adev;
  327. u32 wptr;
  328. if (ring->use_doorbell) {
  329. /* XXX check if swapping is necessary on BE */
  330. wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
  331. } else {
  332. int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
  333. wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
  334. }
  335. return wptr;
  336. }
  337. /**
  338. * sdma_v3_0_ring_set_wptr - commit the write pointer
  339. *
  340. * @ring: amdgpu ring pointer
  341. *
  342. * Write the wptr back to the hardware (VI+).
  343. */
  344. static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
  345. {
  346. struct amdgpu_device *adev = ring->adev;
  347. if (ring->use_doorbell) {
  348. /* XXX check if swapping is necessary on BE */
  349. adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr) << 2;
  350. WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr) << 2);
  351. } else {
  352. int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
  353. WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2);
  354. }
  355. }
  356. static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  357. {
  358. struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
  359. int i;
  360. for (i = 0; i < count; i++)
  361. if (sdma && sdma->burst_nop && (i == 0))
  362. amdgpu_ring_write(ring, ring->funcs->nop |
  363. SDMA_PKT_NOP_HEADER_COUNT(count - 1));
  364. else
  365. amdgpu_ring_write(ring, ring->funcs->nop);
  366. }
  367. /**
  368. * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine
  369. *
  370. * @ring: amdgpu ring pointer
  371. * @ib: IB object to schedule
  372. *
  373. * Schedule an IB in the DMA ring (VI).
  374. */
  375. static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
  376. struct amdgpu_ib *ib,
  377. unsigned vm_id, bool ctx_switch)
  378. {
  379. u32 vmid = vm_id & 0xf;
  380. /* IB packet must end on a 8 DW boundary */
  381. sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
  382. amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
  383. SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
  384. /* base must be 32 byte aligned */
  385. amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
  386. amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
  387. amdgpu_ring_write(ring, ib->length_dw);
  388. amdgpu_ring_write(ring, 0);
  389. amdgpu_ring_write(ring, 0);
  390. }
  391. /**
  392. * sdma_v3_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
  393. *
  394. * @ring: amdgpu ring pointer
  395. *
  396. * Emit an hdp flush packet on the requested DMA ring.
  397. */
  398. static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
  399. {
  400. u32 ref_and_mask = 0;
  401. if (ring == &ring->adev->sdma.instance[0].ring)
  402. ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
  403. else
  404. ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
  405. amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
  406. SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
  407. SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
  408. amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
  409. amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
  410. amdgpu_ring_write(ring, ref_and_mask); /* reference */
  411. amdgpu_ring_write(ring, ref_and_mask); /* mask */
  412. amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
  413. SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
  414. }
  415. static void sdma_v3_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
  416. {
  417. amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
  418. SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
  419. amdgpu_ring_write(ring, mmHDP_DEBUG0);
  420. amdgpu_ring_write(ring, 1);
  421. }
  422. /**
  423. * sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring
  424. *
  425. * @ring: amdgpu ring pointer
  426. * @fence: amdgpu fence object
  427. *
  428. * Add a DMA fence packet to the ring to write
  429. * the fence seq number and DMA trap packet to generate
  430. * an interrupt if needed (VI).
  431. */
  432. static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
  433. unsigned flags)
  434. {
  435. bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
  436. /* write the fence */
  437. amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
  438. amdgpu_ring_write(ring, lower_32_bits(addr));
  439. amdgpu_ring_write(ring, upper_32_bits(addr));
  440. amdgpu_ring_write(ring, lower_32_bits(seq));
  441. /* optionally write high bits as well */
  442. if (write64bit) {
  443. addr += 4;
  444. amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
  445. amdgpu_ring_write(ring, lower_32_bits(addr));
  446. amdgpu_ring_write(ring, upper_32_bits(addr));
  447. amdgpu_ring_write(ring, upper_32_bits(seq));
  448. }
  449. /* generate an interrupt */
  450. amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
  451. amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
  452. }
  453. /**
  454. * sdma_v3_0_gfx_stop - stop the gfx async dma engines
  455. *
  456. * @adev: amdgpu_device pointer
  457. *
  458. * Stop the gfx async dma ring buffers (VI).
  459. */
  460. static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
  461. {
  462. struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
  463. struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
  464. u32 rb_cntl, ib_cntl;
  465. int i;
  466. if ((adev->mman.buffer_funcs_ring == sdma0) ||
  467. (adev->mman.buffer_funcs_ring == sdma1))
  468. amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
  469. for (i = 0; i < adev->sdma.num_instances; i++) {
  470. rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
  471. rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
  472. WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
  473. ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
  474. ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
  475. WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
  476. }
  477. sdma0->ready = false;
  478. sdma1->ready = false;
  479. }
  480. /**
  481. * sdma_v3_0_rlc_stop - stop the compute async dma engines
  482. *
  483. * @adev: amdgpu_device pointer
  484. *
  485. * Stop the compute async dma queues (VI).
  486. */
  487. static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev)
  488. {
  489. /* XXX todo */
  490. }
  491. /**
  492. * sdma_v3_0_ctx_switch_enable - stop the async dma engines context switch
  493. *
  494. * @adev: amdgpu_device pointer
  495. * @enable: enable/disable the DMA MEs context switch.
  496. *
  497. * Halt or unhalt the async dma engines context switch (VI).
  498. */
  499. static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
  500. {
  501. u32 f32_cntl, phase_quantum = 0;
  502. int i;
  503. if (amdgpu_sdma_phase_quantum) {
  504. unsigned value = amdgpu_sdma_phase_quantum;
  505. unsigned unit = 0;
  506. while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
  507. SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
  508. value = (value + 1) >> 1;
  509. unit++;
  510. }
  511. if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
  512. SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
  513. value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
  514. SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
  515. unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
  516. SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
  517. WARN_ONCE(1,
  518. "clamping sdma_phase_quantum to %uK clock cycles\n",
  519. value << unit);
  520. }
  521. phase_quantum =
  522. value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
  523. unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
  524. }
  525. for (i = 0; i < adev->sdma.num_instances; i++) {
  526. f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
  527. if (enable) {
  528. f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
  529. AUTO_CTXSW_ENABLE, 1);
  530. f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
  531. ATC_L1_ENABLE, 1);
  532. if (amdgpu_sdma_phase_quantum) {
  533. WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i],
  534. phase_quantum);
  535. WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i],
  536. phase_quantum);
  537. }
  538. } else {
  539. f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
  540. AUTO_CTXSW_ENABLE, 0);
  541. f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
  542. ATC_L1_ENABLE, 1);
  543. }
  544. WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl);
  545. }
  546. }
  547. /**
  548. * sdma_v3_0_enable - stop the async dma engines
  549. *
  550. * @adev: amdgpu_device pointer
  551. * @enable: enable/disable the DMA MEs.
  552. *
  553. * Halt or unhalt the async dma engines (VI).
  554. */
  555. static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
  556. {
  557. u32 f32_cntl;
  558. int i;
  559. if (!enable) {
  560. sdma_v3_0_gfx_stop(adev);
  561. sdma_v3_0_rlc_stop(adev);
  562. }
  563. for (i = 0; i < adev->sdma.num_instances; i++) {
  564. f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
  565. if (enable)
  566. f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
  567. else
  568. f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
  569. WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
  570. }
  571. }
  572. /**
  573. * sdma_v3_0_gfx_resume - setup and start the async dma engines
  574. *
  575. * @adev: amdgpu_device pointer
  576. *
  577. * Set up the gfx DMA ring buffers and enable them (VI).
  578. * Returns 0 for success, error for failure.
  579. */
  580. static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
  581. {
  582. struct amdgpu_ring *ring;
  583. u32 rb_cntl, ib_cntl;
  584. u32 rb_bufsz;
  585. u32 wb_offset;
  586. u32 doorbell;
  587. int i, j, r;
  588. for (i = 0; i < adev->sdma.num_instances; i++) {
  589. ring = &adev->sdma.instance[i].ring;
  590. amdgpu_ring_clear_ring(ring);
  591. wb_offset = (ring->rptr_offs * 4);
  592. mutex_lock(&adev->srbm_mutex);
  593. for (j = 0; j < 16; j++) {
  594. vi_srbm_select(adev, 0, 0, 0, j);
  595. /* SDMA GFX */
  596. WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
  597. WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
  598. }
  599. vi_srbm_select(adev, 0, 0, 0, 0);
  600. mutex_unlock(&adev->srbm_mutex);
  601. WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
  602. adev->gfx.config.gb_addr_config & 0x70);
  603. WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
  604. /* Set ring buffer size in dwords */
  605. rb_bufsz = order_base_2(ring->ring_size / 4);
  606. rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
  607. rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
  608. #ifdef __BIG_ENDIAN
  609. rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
  610. rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
  611. RPTR_WRITEBACK_SWAP_ENABLE, 1);
  612. #endif
  613. WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
  614. /* Initialize the ring buffer's read and write pointers */
  615. ring->wptr = 0;
  616. WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
  617. sdma_v3_0_ring_set_wptr(ring);
  618. WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
  619. WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
  620. /* set the wb address whether it's enabled or not */
  621. WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
  622. upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
  623. WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
  624. lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
  625. rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
  626. WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
  627. WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
  628. doorbell = RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]);
  629. if (ring->use_doorbell) {
  630. doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL,
  631. OFFSET, ring->doorbell_index);
  632. doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
  633. } else {
  634. doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
  635. }
  636. WREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i], doorbell);
  637. /* enable DMA RB */
  638. rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
  639. WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
  640. ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
  641. ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
  642. #ifdef __BIG_ENDIAN
  643. ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
  644. #endif
  645. /* enable DMA IBs */
  646. WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
  647. ring->ready = true;
  648. }
  649. /* unhalt the MEs */
  650. sdma_v3_0_enable(adev, true);
  651. /* enable sdma ring preemption */
  652. sdma_v3_0_ctx_switch_enable(adev, true);
  653. for (i = 0; i < adev->sdma.num_instances; i++) {
  654. ring = &adev->sdma.instance[i].ring;
  655. r = amdgpu_ring_test_ring(ring);
  656. if (r) {
  657. ring->ready = false;
  658. return r;
  659. }
  660. if (adev->mman.buffer_funcs_ring == ring)
  661. amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
  662. }
  663. return 0;
  664. }
  665. /**
  666. * sdma_v3_0_rlc_resume - setup and start the async dma engines
  667. *
  668. * @adev: amdgpu_device pointer
  669. *
  670. * Set up the compute DMA queues and enable them (VI).
  671. * Returns 0 for success, error for failure.
  672. */
  673. static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev)
  674. {
  675. /* XXX todo */
  676. return 0;
  677. }
  678. /**
  679. * sdma_v3_0_load_microcode - load the sDMA ME ucode
  680. *
  681. * @adev: amdgpu_device pointer
  682. *
  683. * Loads the sDMA0/1 ucode.
  684. * Returns 0 for success, -EINVAL if the ucode is not available.
  685. */
  686. static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
  687. {
  688. const struct sdma_firmware_header_v1_0 *hdr;
  689. const __le32 *fw_data;
  690. u32 fw_size;
  691. int i, j;
  692. /* halt the MEs */
  693. sdma_v3_0_enable(adev, false);
  694. for (i = 0; i < adev->sdma.num_instances; i++) {
  695. if (!adev->sdma.instance[i].fw)
  696. return -EINVAL;
  697. hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
  698. amdgpu_ucode_print_sdma_hdr(&hdr->header);
  699. fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
  700. fw_data = (const __le32 *)
  701. (adev->sdma.instance[i].fw->data +
  702. le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  703. WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
  704. for (j = 0; j < fw_size; j++)
  705. WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
  706. WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
  707. }
  708. return 0;
  709. }
  710. /**
  711. * sdma_v3_0_start - setup and start the async dma engines
  712. *
  713. * @adev: amdgpu_device pointer
  714. *
  715. * Set up the DMA engines and enable them (VI).
  716. * Returns 0 for success, error for failure.
  717. */
  718. static int sdma_v3_0_start(struct amdgpu_device *adev)
  719. {
  720. int r, i;
  721. if (!adev->pp_enabled) {
  722. if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
  723. r = sdma_v3_0_load_microcode(adev);
  724. if (r)
  725. return r;
  726. } else {
  727. for (i = 0; i < adev->sdma.num_instances; i++) {
  728. r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
  729. (i == 0) ?
  730. AMDGPU_UCODE_ID_SDMA0 :
  731. AMDGPU_UCODE_ID_SDMA1);
  732. if (r)
  733. return -EINVAL;
  734. }
  735. }
  736. }
  737. /* disable sdma engine before programing it */
  738. sdma_v3_0_ctx_switch_enable(adev, false);
  739. sdma_v3_0_enable(adev, false);
  740. /* start the gfx rings and rlc compute queues */
  741. r = sdma_v3_0_gfx_resume(adev);
  742. if (r)
  743. return r;
  744. r = sdma_v3_0_rlc_resume(adev);
  745. if (r)
  746. return r;
  747. return 0;
  748. }
  749. /**
  750. * sdma_v3_0_ring_test_ring - simple async dma engine test
  751. *
  752. * @ring: amdgpu_ring structure holding ring information
  753. *
  754. * Test the DMA engine by writing using it to write an
  755. * value to memory. (VI).
  756. * Returns 0 for success, error for failure.
  757. */
  758. static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
  759. {
  760. struct amdgpu_device *adev = ring->adev;
  761. unsigned i;
  762. unsigned index;
  763. int r;
  764. u32 tmp;
  765. u64 gpu_addr;
  766. r = amdgpu_wb_get(adev, &index);
  767. if (r) {
  768. dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
  769. return r;
  770. }
  771. gpu_addr = adev->wb.gpu_addr + (index * 4);
  772. tmp = 0xCAFEDEAD;
  773. adev->wb.wb[index] = cpu_to_le32(tmp);
  774. r = amdgpu_ring_alloc(ring, 5);
  775. if (r) {
  776. DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
  777. amdgpu_wb_free(adev, index);
  778. return r;
  779. }
  780. amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
  781. SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
  782. amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
  783. amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
  784. amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
  785. amdgpu_ring_write(ring, 0xDEADBEEF);
  786. amdgpu_ring_commit(ring);
  787. for (i = 0; i < adev->usec_timeout; i++) {
  788. tmp = le32_to_cpu(adev->wb.wb[index]);
  789. if (tmp == 0xDEADBEEF)
  790. break;
  791. DRM_UDELAY(1);
  792. }
  793. if (i < adev->usec_timeout) {
  794. DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
  795. } else {
  796. DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
  797. ring->idx, tmp);
  798. r = -EINVAL;
  799. }
  800. amdgpu_wb_free(adev, index);
  801. return r;
  802. }
  803. /**
  804. * sdma_v3_0_ring_test_ib - test an IB on the DMA engine
  805. *
  806. * @ring: amdgpu_ring structure holding ring information
  807. *
  808. * Test a simple IB in the DMA ring (VI).
  809. * Returns 0 on success, error on failure.
  810. */
  811. static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
  812. {
  813. struct amdgpu_device *adev = ring->adev;
  814. struct amdgpu_ib ib;
  815. struct dma_fence *f = NULL;
  816. unsigned index;
  817. u32 tmp = 0;
  818. u64 gpu_addr;
  819. long r;
  820. r = amdgpu_wb_get(adev, &index);
  821. if (r) {
  822. dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
  823. return r;
  824. }
  825. gpu_addr = adev->wb.gpu_addr + (index * 4);
  826. tmp = 0xCAFEDEAD;
  827. adev->wb.wb[index] = cpu_to_le32(tmp);
  828. memset(&ib, 0, sizeof(ib));
  829. r = amdgpu_ib_get(adev, NULL, 256, &ib);
  830. if (r) {
  831. DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
  832. goto err0;
  833. }
  834. ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
  835. SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
  836. ib.ptr[1] = lower_32_bits(gpu_addr);
  837. ib.ptr[2] = upper_32_bits(gpu_addr);
  838. ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
  839. ib.ptr[4] = 0xDEADBEEF;
  840. ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
  841. ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
  842. ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
  843. ib.length_dw = 8;
  844. r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
  845. if (r)
  846. goto err1;
  847. r = dma_fence_wait_timeout(f, false, timeout);
  848. if (r == 0) {
  849. DRM_ERROR("amdgpu: IB test timed out\n");
  850. r = -ETIMEDOUT;
  851. goto err1;
  852. } else if (r < 0) {
  853. DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
  854. goto err1;
  855. }
  856. tmp = le32_to_cpu(adev->wb.wb[index]);
  857. if (tmp == 0xDEADBEEF) {
  858. DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
  859. r = 0;
  860. } else {
  861. DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
  862. r = -EINVAL;
  863. }
  864. err1:
  865. amdgpu_ib_free(adev, &ib, NULL);
  866. dma_fence_put(f);
  867. err0:
  868. amdgpu_wb_free(adev, index);
  869. return r;
  870. }
  871. /**
  872. * sdma_v3_0_vm_copy_pte - update PTEs by copying them from the GART
  873. *
  874. * @ib: indirect buffer to fill with commands
  875. * @pe: addr of the page entry
  876. * @src: src addr to copy from
  877. * @count: number of page entries to update
  878. *
  879. * Update PTEs by copying them from the GART using sDMA (CIK).
  880. */
  881. static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
  882. uint64_t pe, uint64_t src,
  883. unsigned count)
  884. {
  885. unsigned bytes = count * 8;
  886. ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
  887. SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
  888. ib->ptr[ib->length_dw++] = bytes;
  889. ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
  890. ib->ptr[ib->length_dw++] = lower_32_bits(src);
  891. ib->ptr[ib->length_dw++] = upper_32_bits(src);
  892. ib->ptr[ib->length_dw++] = lower_32_bits(pe);
  893. ib->ptr[ib->length_dw++] = upper_32_bits(pe);
  894. }
  895. /**
  896. * sdma_v3_0_vm_write_pte - update PTEs by writing them manually
  897. *
  898. * @ib: indirect buffer to fill with commands
  899. * @pe: addr of the page entry
  900. * @value: dst addr to write into pe
  901. * @count: number of page entries to update
  902. * @incr: increase next addr by incr bytes
  903. *
  904. * Update PTEs by writing them manually using sDMA (CIK).
  905. */
  906. static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
  907. uint64_t value, unsigned count,
  908. uint32_t incr)
  909. {
  910. unsigned ndw = count * 2;
  911. ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
  912. SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
  913. ib->ptr[ib->length_dw++] = lower_32_bits(pe);
  914. ib->ptr[ib->length_dw++] = upper_32_bits(pe);
  915. ib->ptr[ib->length_dw++] = ndw;
  916. for (; ndw > 0; ndw -= 2) {
  917. ib->ptr[ib->length_dw++] = lower_32_bits(value);
  918. ib->ptr[ib->length_dw++] = upper_32_bits(value);
  919. value += incr;
  920. }
  921. }
  922. /**
  923. * sdma_v3_0_vm_set_pte_pde - update the page tables using sDMA
  924. *
  925. * @ib: indirect buffer to fill with commands
  926. * @pe: addr of the page entry
  927. * @addr: dst addr to write into pe
  928. * @count: number of page entries to update
  929. * @incr: increase next addr by incr bytes
  930. * @flags: access flags
  931. *
  932. * Update the page tables using sDMA (CIK).
  933. */
  934. static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
  935. uint64_t addr, unsigned count,
  936. uint32_t incr, uint64_t flags)
  937. {
  938. /* for physically contiguous pages (vram) */
  939. ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
  940. ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
  941. ib->ptr[ib->length_dw++] = upper_32_bits(pe);
  942. ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
  943. ib->ptr[ib->length_dw++] = upper_32_bits(flags);
  944. ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
  945. ib->ptr[ib->length_dw++] = upper_32_bits(addr);
  946. ib->ptr[ib->length_dw++] = incr; /* increment size */
  947. ib->ptr[ib->length_dw++] = 0;
  948. ib->ptr[ib->length_dw++] = count; /* number of entries */
  949. }
  950. /**
  951. * sdma_v3_0_ring_pad_ib - pad the IB to the required number of dw
  952. *
  953. * @ib: indirect buffer to fill with padding
  954. *
  955. */
  956. static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
  957. {
  958. struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
  959. u32 pad_count;
  960. int i;
  961. pad_count = (8 - (ib->length_dw & 0x7)) % 8;
  962. for (i = 0; i < pad_count; i++)
  963. if (sdma && sdma->burst_nop && (i == 0))
  964. ib->ptr[ib->length_dw++] =
  965. SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
  966. SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
  967. else
  968. ib->ptr[ib->length_dw++] =
  969. SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
  970. }
  971. /**
  972. * sdma_v3_0_ring_emit_pipeline_sync - sync the pipeline
  973. *
  974. * @ring: amdgpu_ring pointer
  975. *
  976. * Make sure all previous operations are completed (CIK).
  977. */
  978. static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
  979. {
  980. uint32_t seq = ring->fence_drv.sync_seq;
  981. uint64_t addr = ring->fence_drv.gpu_addr;
  982. /* wait for idle */
  983. amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
  984. SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
  985. SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
  986. SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
  987. amdgpu_ring_write(ring, addr & 0xfffffffc);
  988. amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
  989. amdgpu_ring_write(ring, seq); /* reference */
  990. amdgpu_ring_write(ring, 0xffffffff); /* mask */
  991. amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
  992. SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
  993. }
  994. /**
  995. * sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA
  996. *
  997. * @ring: amdgpu_ring pointer
  998. * @vm: amdgpu_vm pointer
  999. *
  1000. * Update the page table base and flush the VM TLB
  1001. * using sDMA (VI).
  1002. */
  1003. static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
  1004. unsigned vm_id, uint64_t pd_addr)
  1005. {
  1006. amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
  1007. SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
  1008. if (vm_id < 8) {
  1009. amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
  1010. } else {
  1011. amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
  1012. }
  1013. amdgpu_ring_write(ring, pd_addr >> 12);
  1014. /* flush TLB */
  1015. amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
  1016. SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
  1017. amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
  1018. amdgpu_ring_write(ring, 1 << vm_id);
  1019. /* wait for flush */
  1020. amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
  1021. SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
  1022. SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */
  1023. amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
  1024. amdgpu_ring_write(ring, 0);
  1025. amdgpu_ring_write(ring, 0); /* reference */
  1026. amdgpu_ring_write(ring, 0); /* mask */
  1027. amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
  1028. SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
  1029. }
  1030. static int sdma_v3_0_early_init(void *handle)
  1031. {
  1032. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1033. switch (adev->asic_type) {
  1034. case CHIP_STONEY:
  1035. adev->sdma.num_instances = 1;
  1036. break;
  1037. default:
  1038. adev->sdma.num_instances = SDMA_MAX_INSTANCE;
  1039. break;
  1040. }
  1041. sdma_v3_0_set_ring_funcs(adev);
  1042. sdma_v3_0_set_buffer_funcs(adev);
  1043. sdma_v3_0_set_vm_pte_funcs(adev);
  1044. sdma_v3_0_set_irq_funcs(adev);
  1045. return 0;
  1046. }
  1047. static int sdma_v3_0_sw_init(void *handle)
  1048. {
  1049. struct amdgpu_ring *ring;
  1050. int r, i;
  1051. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1052. /* SDMA trap event */
  1053. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
  1054. &adev->sdma.trap_irq);
  1055. if (r)
  1056. return r;
  1057. /* SDMA Privileged inst */
  1058. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
  1059. &adev->sdma.illegal_inst_irq);
  1060. if (r)
  1061. return r;
  1062. /* SDMA Privileged inst */
  1063. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
  1064. &adev->sdma.illegal_inst_irq);
  1065. if (r)
  1066. return r;
  1067. r = sdma_v3_0_init_microcode(adev);
  1068. if (r) {
  1069. DRM_ERROR("Failed to load sdma firmware!\n");
  1070. return r;
  1071. }
  1072. for (i = 0; i < adev->sdma.num_instances; i++) {
  1073. ring = &adev->sdma.instance[i].ring;
  1074. ring->ring_obj = NULL;
  1075. ring->use_doorbell = true;
  1076. ring->doorbell_index = (i == 0) ?
  1077. AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
  1078. sprintf(ring->name, "sdma%d", i);
  1079. r = amdgpu_ring_init(adev, ring, 1024,
  1080. &adev->sdma.trap_irq,
  1081. (i == 0) ?
  1082. AMDGPU_SDMA_IRQ_TRAP0 :
  1083. AMDGPU_SDMA_IRQ_TRAP1);
  1084. if (r)
  1085. return r;
  1086. }
  1087. return r;
  1088. }
  1089. static int sdma_v3_0_sw_fini(void *handle)
  1090. {
  1091. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1092. int i;
  1093. for (i = 0; i < adev->sdma.num_instances; i++)
  1094. amdgpu_ring_fini(&adev->sdma.instance[i].ring);
  1095. sdma_v3_0_free_microcode(adev);
  1096. return 0;
  1097. }
  1098. static int sdma_v3_0_hw_init(void *handle)
  1099. {
  1100. int r;
  1101. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1102. sdma_v3_0_init_golden_registers(adev);
  1103. r = sdma_v3_0_start(adev);
  1104. if (r)
  1105. return r;
  1106. return r;
  1107. }
  1108. static int sdma_v3_0_hw_fini(void *handle)
  1109. {
  1110. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1111. sdma_v3_0_ctx_switch_enable(adev, false);
  1112. sdma_v3_0_enable(adev, false);
  1113. return 0;
  1114. }
  1115. static int sdma_v3_0_suspend(void *handle)
  1116. {
  1117. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1118. return sdma_v3_0_hw_fini(adev);
  1119. }
  1120. static int sdma_v3_0_resume(void *handle)
  1121. {
  1122. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1123. return sdma_v3_0_hw_init(adev);
  1124. }
  1125. static bool sdma_v3_0_is_idle(void *handle)
  1126. {
  1127. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1128. u32 tmp = RREG32(mmSRBM_STATUS2);
  1129. if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
  1130. SRBM_STATUS2__SDMA1_BUSY_MASK))
  1131. return false;
  1132. return true;
  1133. }
  1134. static int sdma_v3_0_wait_for_idle(void *handle)
  1135. {
  1136. unsigned i;
  1137. u32 tmp;
  1138. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1139. for (i = 0; i < adev->usec_timeout; i++) {
  1140. tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
  1141. SRBM_STATUS2__SDMA1_BUSY_MASK);
  1142. if (!tmp)
  1143. return 0;
  1144. udelay(1);
  1145. }
  1146. return -ETIMEDOUT;
  1147. }
  1148. static bool sdma_v3_0_check_soft_reset(void *handle)
  1149. {
  1150. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1151. u32 srbm_soft_reset = 0;
  1152. u32 tmp = RREG32(mmSRBM_STATUS2);
  1153. if ((tmp & SRBM_STATUS2__SDMA_BUSY_MASK) ||
  1154. (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)) {
  1155. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
  1156. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
  1157. }
  1158. if (srbm_soft_reset) {
  1159. adev->sdma.srbm_soft_reset = srbm_soft_reset;
  1160. return true;
  1161. } else {
  1162. adev->sdma.srbm_soft_reset = 0;
  1163. return false;
  1164. }
  1165. }
  1166. static int sdma_v3_0_pre_soft_reset(void *handle)
  1167. {
  1168. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1169. u32 srbm_soft_reset = 0;
  1170. if (!adev->sdma.srbm_soft_reset)
  1171. return 0;
  1172. srbm_soft_reset = adev->sdma.srbm_soft_reset;
  1173. if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
  1174. REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
  1175. sdma_v3_0_ctx_switch_enable(adev, false);
  1176. sdma_v3_0_enable(adev, false);
  1177. }
  1178. return 0;
  1179. }
  1180. static int sdma_v3_0_post_soft_reset(void *handle)
  1181. {
  1182. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1183. u32 srbm_soft_reset = 0;
  1184. if (!adev->sdma.srbm_soft_reset)
  1185. return 0;
  1186. srbm_soft_reset = adev->sdma.srbm_soft_reset;
  1187. if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
  1188. REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
  1189. sdma_v3_0_gfx_resume(adev);
  1190. sdma_v3_0_rlc_resume(adev);
  1191. }
  1192. return 0;
  1193. }
  1194. static int sdma_v3_0_soft_reset(void *handle)
  1195. {
  1196. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1197. u32 srbm_soft_reset = 0;
  1198. u32 tmp;
  1199. if (!adev->sdma.srbm_soft_reset)
  1200. return 0;
  1201. srbm_soft_reset = adev->sdma.srbm_soft_reset;
  1202. if (srbm_soft_reset) {
  1203. tmp = RREG32(mmSRBM_SOFT_RESET);
  1204. tmp |= srbm_soft_reset;
  1205. dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
  1206. WREG32(mmSRBM_SOFT_RESET, tmp);
  1207. tmp = RREG32(mmSRBM_SOFT_RESET);
  1208. udelay(50);
  1209. tmp &= ~srbm_soft_reset;
  1210. WREG32(mmSRBM_SOFT_RESET, tmp);
  1211. tmp = RREG32(mmSRBM_SOFT_RESET);
  1212. /* Wait a little for things to settle down */
  1213. udelay(50);
  1214. }
  1215. return 0;
  1216. }
  1217. static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev,
  1218. struct amdgpu_irq_src *source,
  1219. unsigned type,
  1220. enum amdgpu_interrupt_state state)
  1221. {
  1222. u32 sdma_cntl;
  1223. switch (type) {
  1224. case AMDGPU_SDMA_IRQ_TRAP0:
  1225. switch (state) {
  1226. case AMDGPU_IRQ_STATE_DISABLE:
  1227. sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
  1228. sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
  1229. WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
  1230. break;
  1231. case AMDGPU_IRQ_STATE_ENABLE:
  1232. sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
  1233. sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
  1234. WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
  1235. break;
  1236. default:
  1237. break;
  1238. }
  1239. break;
  1240. case AMDGPU_SDMA_IRQ_TRAP1:
  1241. switch (state) {
  1242. case AMDGPU_IRQ_STATE_DISABLE:
  1243. sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
  1244. sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
  1245. WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
  1246. break;
  1247. case AMDGPU_IRQ_STATE_ENABLE:
  1248. sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
  1249. sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
  1250. WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
  1251. break;
  1252. default:
  1253. break;
  1254. }
  1255. break;
  1256. default:
  1257. break;
  1258. }
  1259. return 0;
  1260. }
  1261. static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
  1262. struct amdgpu_irq_src *source,
  1263. struct amdgpu_iv_entry *entry)
  1264. {
  1265. u8 instance_id, queue_id;
  1266. instance_id = (entry->ring_id & 0x3) >> 0;
  1267. queue_id = (entry->ring_id & 0xc) >> 2;
  1268. DRM_DEBUG("IH: SDMA trap\n");
  1269. switch (instance_id) {
  1270. case 0:
  1271. switch (queue_id) {
  1272. case 0:
  1273. amdgpu_fence_process(&adev->sdma.instance[0].ring);
  1274. break;
  1275. case 1:
  1276. /* XXX compute */
  1277. break;
  1278. case 2:
  1279. /* XXX compute */
  1280. break;
  1281. }
  1282. break;
  1283. case 1:
  1284. switch (queue_id) {
  1285. case 0:
  1286. amdgpu_fence_process(&adev->sdma.instance[1].ring);
  1287. break;
  1288. case 1:
  1289. /* XXX compute */
  1290. break;
  1291. case 2:
  1292. /* XXX compute */
  1293. break;
  1294. }
  1295. break;
  1296. }
  1297. return 0;
  1298. }
  1299. static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev,
  1300. struct amdgpu_irq_src *source,
  1301. struct amdgpu_iv_entry *entry)
  1302. {
  1303. DRM_ERROR("Illegal instruction in SDMA command stream\n");
  1304. schedule_work(&adev->reset_work);
  1305. return 0;
  1306. }
  1307. static void sdma_v3_0_update_sdma_medium_grain_clock_gating(
  1308. struct amdgpu_device *adev,
  1309. bool enable)
  1310. {
  1311. uint32_t temp, data;
  1312. int i;
  1313. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
  1314. for (i = 0; i < adev->sdma.num_instances; i++) {
  1315. temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]);
  1316. data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
  1317. SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
  1318. SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
  1319. SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
  1320. SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
  1321. SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
  1322. SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
  1323. SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
  1324. if (data != temp)
  1325. WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data);
  1326. }
  1327. } else {
  1328. for (i = 0; i < adev->sdma.num_instances; i++) {
  1329. temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]);
  1330. data |= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
  1331. SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
  1332. SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
  1333. SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
  1334. SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
  1335. SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
  1336. SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
  1337. SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK;
  1338. if (data != temp)
  1339. WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data);
  1340. }
  1341. }
  1342. }
  1343. static void sdma_v3_0_update_sdma_medium_grain_light_sleep(
  1344. struct amdgpu_device *adev,
  1345. bool enable)
  1346. {
  1347. uint32_t temp, data;
  1348. int i;
  1349. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
  1350. for (i = 0; i < adev->sdma.num_instances; i++) {
  1351. temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]);
  1352. data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
  1353. if (temp != data)
  1354. WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data);
  1355. }
  1356. } else {
  1357. for (i = 0; i < adev->sdma.num_instances; i++) {
  1358. temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]);
  1359. data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
  1360. if (temp != data)
  1361. WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data);
  1362. }
  1363. }
  1364. }
  1365. static int sdma_v3_0_set_clockgating_state(void *handle,
  1366. enum amd_clockgating_state state)
  1367. {
  1368. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1369. if (amdgpu_sriov_vf(adev))
  1370. return 0;
  1371. switch (adev->asic_type) {
  1372. case CHIP_FIJI:
  1373. case CHIP_CARRIZO:
  1374. case CHIP_STONEY:
  1375. sdma_v3_0_update_sdma_medium_grain_clock_gating(adev,
  1376. state == AMD_CG_STATE_GATE);
  1377. sdma_v3_0_update_sdma_medium_grain_light_sleep(adev,
  1378. state == AMD_CG_STATE_GATE);
  1379. break;
  1380. default:
  1381. break;
  1382. }
  1383. return 0;
  1384. }
  1385. static int sdma_v3_0_set_powergating_state(void *handle,
  1386. enum amd_powergating_state state)
  1387. {
  1388. return 0;
  1389. }
  1390. static void sdma_v3_0_get_clockgating_state(void *handle, u32 *flags)
  1391. {
  1392. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1393. int data;
  1394. if (amdgpu_sriov_vf(adev))
  1395. *flags = 0;
  1396. /* AMD_CG_SUPPORT_SDMA_MGCG */
  1397. data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[0]);
  1398. if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK))
  1399. *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
  1400. /* AMD_CG_SUPPORT_SDMA_LS */
  1401. data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[0]);
  1402. if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
  1403. *flags |= AMD_CG_SUPPORT_SDMA_LS;
  1404. }
  1405. static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
  1406. .name = "sdma_v3_0",
  1407. .early_init = sdma_v3_0_early_init,
  1408. .late_init = NULL,
  1409. .sw_init = sdma_v3_0_sw_init,
  1410. .sw_fini = sdma_v3_0_sw_fini,
  1411. .hw_init = sdma_v3_0_hw_init,
  1412. .hw_fini = sdma_v3_0_hw_fini,
  1413. .suspend = sdma_v3_0_suspend,
  1414. .resume = sdma_v3_0_resume,
  1415. .is_idle = sdma_v3_0_is_idle,
  1416. .wait_for_idle = sdma_v3_0_wait_for_idle,
  1417. .check_soft_reset = sdma_v3_0_check_soft_reset,
  1418. .pre_soft_reset = sdma_v3_0_pre_soft_reset,
  1419. .post_soft_reset = sdma_v3_0_post_soft_reset,
  1420. .soft_reset = sdma_v3_0_soft_reset,
  1421. .set_clockgating_state = sdma_v3_0_set_clockgating_state,
  1422. .set_powergating_state = sdma_v3_0_set_powergating_state,
  1423. .get_clockgating_state = sdma_v3_0_get_clockgating_state,
  1424. };
  1425. static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
  1426. .type = AMDGPU_RING_TYPE_SDMA,
  1427. .align_mask = 0xf,
  1428. .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
  1429. .support_64bit_ptrs = false,
  1430. .get_rptr = sdma_v3_0_ring_get_rptr,
  1431. .get_wptr = sdma_v3_0_ring_get_wptr,
  1432. .set_wptr = sdma_v3_0_ring_set_wptr,
  1433. .emit_frame_size =
  1434. 6 + /* sdma_v3_0_ring_emit_hdp_flush */
  1435. 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */
  1436. 6 + /* sdma_v3_0_ring_emit_pipeline_sync */
  1437. 12 + /* sdma_v3_0_ring_emit_vm_flush */
  1438. 10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
  1439. .emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */
  1440. .emit_ib = sdma_v3_0_ring_emit_ib,
  1441. .emit_fence = sdma_v3_0_ring_emit_fence,
  1442. .emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
  1443. .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush,
  1444. .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
  1445. .emit_hdp_invalidate = sdma_v3_0_ring_emit_hdp_invalidate,
  1446. .test_ring = sdma_v3_0_ring_test_ring,
  1447. .test_ib = sdma_v3_0_ring_test_ib,
  1448. .insert_nop = sdma_v3_0_ring_insert_nop,
  1449. .pad_ib = sdma_v3_0_ring_pad_ib,
  1450. };
  1451. static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
  1452. {
  1453. int i;
  1454. for (i = 0; i < adev->sdma.num_instances; i++)
  1455. adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
  1456. }
  1457. static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
  1458. .set = sdma_v3_0_set_trap_irq_state,
  1459. .process = sdma_v3_0_process_trap_irq,
  1460. };
  1461. static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = {
  1462. .process = sdma_v3_0_process_illegal_inst_irq,
  1463. };
  1464. static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
  1465. {
  1466. adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
  1467. adev->sdma.trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
  1468. adev->sdma.illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
  1469. }
  1470. /**
  1471. * sdma_v3_0_emit_copy_buffer - copy buffer using the sDMA engine
  1472. *
  1473. * @ring: amdgpu_ring structure holding ring information
  1474. * @src_offset: src GPU address
  1475. * @dst_offset: dst GPU address
  1476. * @byte_count: number of bytes to xfer
  1477. *
  1478. * Copy GPU buffers using the DMA engine (VI).
  1479. * Used by the amdgpu ttm implementation to move pages if
  1480. * registered as the asic copy callback.
  1481. */
  1482. static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib,
  1483. uint64_t src_offset,
  1484. uint64_t dst_offset,
  1485. uint32_t byte_count)
  1486. {
  1487. ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
  1488. SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
  1489. ib->ptr[ib->length_dw++] = byte_count;
  1490. ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
  1491. ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
  1492. ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
  1493. ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
  1494. ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
  1495. }
  1496. /**
  1497. * sdma_v3_0_emit_fill_buffer - fill buffer using the sDMA engine
  1498. *
  1499. * @ring: amdgpu_ring structure holding ring information
  1500. * @src_data: value to write to buffer
  1501. * @dst_offset: dst GPU address
  1502. * @byte_count: number of bytes to xfer
  1503. *
  1504. * Fill GPU buffers using the DMA engine (VI).
  1505. */
  1506. static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib,
  1507. uint32_t src_data,
  1508. uint64_t dst_offset,
  1509. uint32_t byte_count)
  1510. {
  1511. ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
  1512. ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
  1513. ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
  1514. ib->ptr[ib->length_dw++] = src_data;
  1515. ib->ptr[ib->length_dw++] = byte_count;
  1516. }
  1517. static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
  1518. .copy_max_bytes = 0x1fffff,
  1519. .copy_num_dw = 7,
  1520. .emit_copy_buffer = sdma_v3_0_emit_copy_buffer,
  1521. .fill_max_bytes = 0x1fffff,
  1522. .fill_num_dw = 5,
  1523. .emit_fill_buffer = sdma_v3_0_emit_fill_buffer,
  1524. };
  1525. static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
  1526. {
  1527. if (adev->mman.buffer_funcs == NULL) {
  1528. adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
  1529. adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
  1530. }
  1531. }
  1532. static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
  1533. .copy_pte = sdma_v3_0_vm_copy_pte,
  1534. .write_pte = sdma_v3_0_vm_write_pte,
  1535. .set_pte_pde = sdma_v3_0_vm_set_pte_pde,
  1536. };
  1537. static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
  1538. {
  1539. unsigned i;
  1540. if (adev->vm_manager.vm_pte_funcs == NULL) {
  1541. adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
  1542. for (i = 0; i < adev->sdma.num_instances; i++)
  1543. adev->vm_manager.vm_pte_rings[i] =
  1544. &adev->sdma.instance[i].ring;
  1545. adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
  1546. }
  1547. }
  1548. const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
  1549. {
  1550. .type = AMD_IP_BLOCK_TYPE_SDMA,
  1551. .major = 3,
  1552. .minor = 0,
  1553. .rev = 0,
  1554. .funcs = &sdma_v3_0_ip_funcs,
  1555. };
  1556. const struct amdgpu_ip_block_version sdma_v3_1_ip_block =
  1557. {
  1558. .type = AMD_IP_BLOCK_TYPE_SDMA,
  1559. .major = 3,
  1560. .minor = 1,
  1561. .rev = 0,
  1562. .funcs = &sdma_v3_0_ip_funcs,
  1563. };