gmc_v7_0.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include <drm/drmP.h>
  25. #include "amdgpu.h"
  26. #include "cikd.h"
  27. #include "cik.h"
  28. #include "gmc_v7_0.h"
  29. #include "amdgpu_ucode.h"
  30. #include "bif/bif_4_1_d.h"
  31. #include "bif/bif_4_1_sh_mask.h"
  32. #include "gmc/gmc_7_1_d.h"
  33. #include "gmc/gmc_7_1_sh_mask.h"
  34. #include "oss/oss_2_0_d.h"
  35. #include "oss/oss_2_0_sh_mask.h"
  36. #include "dce/dce_8_0_d.h"
  37. #include "dce/dce_8_0_sh_mask.h"
  38. #include "amdgpu_atombios.h"
  39. static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
  40. static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
  41. static int gmc_v7_0_wait_for_idle(void *handle);
  42. MODULE_FIRMWARE("radeon/bonaire_mc.bin");
  43. MODULE_FIRMWARE("radeon/hawaii_mc.bin");
  44. MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
  45. static const u32 golden_settings_iceland_a11[] =
  46. {
  47. mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  48. mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  49. mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
  50. mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
  51. };
  52. static const u32 iceland_mgcg_cgcg_init[] =
  53. {
  54. mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
  55. };
  56. static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
  57. {
  58. switch (adev->asic_type) {
  59. case CHIP_TOPAZ:
  60. amdgpu_program_register_sequence(adev,
  61. iceland_mgcg_cgcg_init,
  62. (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
  63. amdgpu_program_register_sequence(adev,
  64. golden_settings_iceland_a11,
  65. (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
  66. break;
  67. default:
  68. break;
  69. }
  70. }
  71. static void gmc_v7_0_mc_stop(struct amdgpu_device *adev)
  72. {
  73. u32 blackout;
  74. gmc_v7_0_wait_for_idle((void *)adev);
  75. blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  76. if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
  77. /* Block CPU access */
  78. WREG32(mmBIF_FB_EN, 0);
  79. /* blackout the MC */
  80. blackout = REG_SET_FIELD(blackout,
  81. MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  82. WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
  83. }
  84. /* wait for the MC to settle */
  85. udelay(100);
  86. }
  87. static void gmc_v7_0_mc_resume(struct amdgpu_device *adev)
  88. {
  89. u32 tmp;
  90. /* unblackout the MC */
  91. tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
  92. tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
  93. WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
  94. /* allow CPU access */
  95. tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
  96. tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
  97. WREG32(mmBIF_FB_EN, tmp);
  98. }
  99. /**
  100. * gmc_v7_0_init_microcode - load ucode images from disk
  101. *
  102. * @adev: amdgpu_device pointer
  103. *
  104. * Use the firmware interface to load the ucode images into
  105. * the driver (not loaded into hw).
  106. * Returns 0 on success, error on failure.
  107. */
  108. static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
  109. {
  110. const char *chip_name;
  111. char fw_name[30];
  112. int err;
  113. DRM_DEBUG("\n");
  114. switch (adev->asic_type) {
  115. case CHIP_BONAIRE:
  116. chip_name = "bonaire";
  117. break;
  118. case CHIP_HAWAII:
  119. chip_name = "hawaii";
  120. break;
  121. case CHIP_TOPAZ:
  122. chip_name = "topaz";
  123. break;
  124. case CHIP_KAVERI:
  125. case CHIP_KABINI:
  126. case CHIP_MULLINS:
  127. return 0;
  128. default: BUG();
  129. }
  130. if (adev->asic_type == CHIP_TOPAZ)
  131. snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
  132. else
  133. snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
  134. err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
  135. if (err)
  136. goto out;
  137. err = amdgpu_ucode_validate(adev->mc.fw);
  138. out:
  139. if (err) {
  140. pr_err("cik_mc: Failed to load firmware \"%s\"\n", fw_name);
  141. release_firmware(adev->mc.fw);
  142. adev->mc.fw = NULL;
  143. }
  144. return err;
  145. }
  146. /**
  147. * gmc_v7_0_mc_load_microcode - load MC ucode into the hw
  148. *
  149. * @adev: amdgpu_device pointer
  150. *
  151. * Load the GDDR MC ucode into the hw (CIK).
  152. * Returns 0 on success, error on failure.
  153. */
  154. static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
  155. {
  156. const struct mc_firmware_header_v1_0 *hdr;
  157. const __le32 *fw_data = NULL;
  158. const __le32 *io_mc_regs = NULL;
  159. u32 running;
  160. int i, ucode_size, regs_size;
  161. if (!adev->mc.fw)
  162. return -EINVAL;
  163. hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
  164. amdgpu_ucode_print_mc_hdr(&hdr->header);
  165. adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version);
  166. regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
  167. io_mc_regs = (const __le32 *)
  168. (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
  169. ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
  170. fw_data = (const __le32 *)
  171. (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  172. running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
  173. if (running == 0) {
  174. /* reset the engine and set to writable */
  175. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
  176. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
  177. /* load mc io regs */
  178. for (i = 0; i < regs_size; i++) {
  179. WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
  180. WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
  181. }
  182. /* load the MC ucode */
  183. for (i = 0; i < ucode_size; i++)
  184. WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
  185. /* put the engine back into the active state */
  186. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
  187. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
  188. WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
  189. /* wait for training to complete */
  190. for (i = 0; i < adev->usec_timeout; i++) {
  191. if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
  192. MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
  193. break;
  194. udelay(1);
  195. }
  196. for (i = 0; i < adev->usec_timeout; i++) {
  197. if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
  198. MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
  199. break;
  200. udelay(1);
  201. }
  202. }
  203. return 0;
  204. }
  205. static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
  206. struct amdgpu_mc *mc)
  207. {
  208. u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
  209. base <<= 24;
  210. if (mc->mc_vram_size > 0xFFC0000000ULL) {
  211. /* leave room for at least 1024M GTT */
  212. dev_warn(adev->dev, "limiting VRAM\n");
  213. mc->real_vram_size = 0xFFC0000000ULL;
  214. mc->mc_vram_size = 0xFFC0000000ULL;
  215. }
  216. amdgpu_vram_location(adev, &adev->mc, base);
  217. amdgpu_gart_location(adev, mc);
  218. }
  219. /**
  220. * gmc_v7_0_mc_program - program the GPU memory controller
  221. *
  222. * @adev: amdgpu_device pointer
  223. *
  224. * Set the location of vram, gart, and AGP in the GPU's
  225. * physical address space (CIK).
  226. */
  227. static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
  228. {
  229. u32 tmp;
  230. int i, j;
  231. /* Initialize HDP */
  232. for (i = 0, j = 0; i < 32; i++, j += 0x6) {
  233. WREG32((0xb05 + j), 0x00000000);
  234. WREG32((0xb06 + j), 0x00000000);
  235. WREG32((0xb07 + j), 0x00000000);
  236. WREG32((0xb08 + j), 0x00000000);
  237. WREG32((0xb09 + j), 0x00000000);
  238. }
  239. WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
  240. if (gmc_v7_0_wait_for_idle((void *)adev)) {
  241. dev_warn(adev->dev, "Wait for MC idle timedout !\n");
  242. }
  243. if (adev->mode_info.num_crtc) {
  244. /* Lockout access through VGA aperture*/
  245. tmp = RREG32(mmVGA_HDP_CONTROL);
  246. tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
  247. WREG32(mmVGA_HDP_CONTROL, tmp);
  248. /* disable VGA render */
  249. tmp = RREG32(mmVGA_RENDER_CONTROL);
  250. tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
  251. WREG32(mmVGA_RENDER_CONTROL, tmp);
  252. }
  253. /* Update configuration */
  254. WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
  255. adev->mc.vram_start >> 12);
  256. WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
  257. adev->mc.vram_end >> 12);
  258. WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
  259. adev->vram_scratch.gpu_addr >> 12);
  260. WREG32(mmMC_VM_AGP_BASE, 0);
  261. WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
  262. WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
  263. if (gmc_v7_0_wait_for_idle((void *)adev)) {
  264. dev_warn(adev->dev, "Wait for MC idle timedout !\n");
  265. }
  266. WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
  267. tmp = RREG32(mmHDP_MISC_CNTL);
  268. tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
  269. WREG32(mmHDP_MISC_CNTL, tmp);
  270. tmp = RREG32(mmHDP_HOST_PATH_CNTL);
  271. WREG32(mmHDP_HOST_PATH_CNTL, tmp);
  272. }
  273. /**
  274. * gmc_v7_0_mc_init - initialize the memory controller driver params
  275. *
  276. * @adev: amdgpu_device pointer
  277. *
  278. * Look up the amount of vram, vram width, and decide how to place
  279. * vram and gart within the GPU's physical address space (CIK).
  280. * Returns 0 for success.
  281. */
  282. static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
  283. {
  284. adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev);
  285. if (!adev->mc.vram_width) {
  286. u32 tmp;
  287. int chansize, numchan;
  288. /* Get VRAM informations */
  289. tmp = RREG32(mmMC_ARB_RAMCFG);
  290. if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
  291. chansize = 64;
  292. } else {
  293. chansize = 32;
  294. }
  295. tmp = RREG32(mmMC_SHARED_CHMAP);
  296. switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
  297. case 0:
  298. default:
  299. numchan = 1;
  300. break;
  301. case 1:
  302. numchan = 2;
  303. break;
  304. case 2:
  305. numchan = 4;
  306. break;
  307. case 3:
  308. numchan = 8;
  309. break;
  310. case 4:
  311. numchan = 3;
  312. break;
  313. case 5:
  314. numchan = 6;
  315. break;
  316. case 6:
  317. numchan = 10;
  318. break;
  319. case 7:
  320. numchan = 12;
  321. break;
  322. case 8:
  323. numchan = 16;
  324. break;
  325. }
  326. adev->mc.vram_width = numchan * chansize;
  327. }
  328. /* Could aper size report 0 ? */
  329. adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
  330. adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
  331. /* size in MB on si */
  332. adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
  333. adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
  334. #ifdef CONFIG_X86_64
  335. if (adev->flags & AMD_IS_APU) {
  336. adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
  337. adev->mc.aper_size = adev->mc.real_vram_size;
  338. }
  339. #endif
  340. /* In case the PCI BAR is larger than the actual amount of vram */
  341. adev->mc.visible_vram_size = adev->mc.aper_size;
  342. if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
  343. adev->mc.visible_vram_size = adev->mc.real_vram_size;
  344. /* set the gart size */
  345. if (amdgpu_gart_size == -1) {
  346. switch (adev->asic_type) {
  347. case CHIP_TOPAZ: /* no MM engines */
  348. default:
  349. adev->mc.gart_size = 256ULL << 20;
  350. break;
  351. #ifdef CONFIG_DRM_AMDGPU_CIK
  352. case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */
  353. case CHIP_HAWAII: /* UVD, VCE do not support GPUVM */
  354. case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */
  355. case CHIP_KABINI: /* UVD, VCE do not support GPUVM */
  356. case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */
  357. adev->mc.gart_size = 1024ULL << 20;
  358. break;
  359. #endif
  360. }
  361. } else {
  362. adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
  363. }
  364. gmc_v7_0_vram_gtt_location(adev, &adev->mc);
  365. return 0;
  366. }
  367. /*
  368. * GART
  369. * VMID 0 is the physical GPU addresses as used by the kernel.
  370. * VMIDs 1-15 are used for userspace clients and are handled
  371. * by the amdgpu vm/hsa code.
  372. */
  373. /**
  374. * gmc_v7_0_gart_flush_gpu_tlb - gart tlb flush callback
  375. *
  376. * @adev: amdgpu_device pointer
  377. * @vmid: vm instance to flush
  378. *
  379. * Flush the TLB for the requested page table (CIK).
  380. */
  381. static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
  382. uint32_t vmid)
  383. {
  384. /* flush hdp cache */
  385. WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
  386. /* bits 0-15 are the VM contexts0-15 */
  387. WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
  388. }
  389. /**
  390. * gmc_v7_0_gart_set_pte_pde - update the page tables using MMIO
  391. *
  392. * @adev: amdgpu_device pointer
  393. * @cpu_pt_addr: cpu address of the page table
  394. * @gpu_page_idx: entry in the page table to update
  395. * @addr: dst addr to write into pte/pde
  396. * @flags: access flags
  397. *
  398. * Update the page tables using the CPU.
  399. */
  400. static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev,
  401. void *cpu_pt_addr,
  402. uint32_t gpu_page_idx,
  403. uint64_t addr,
  404. uint64_t flags)
  405. {
  406. void __iomem *ptr = (void *)cpu_pt_addr;
  407. uint64_t value;
  408. value = addr & 0xFFFFFFFFFFFFF000ULL;
  409. value |= flags;
  410. writeq(value, ptr + (gpu_page_idx * 8));
  411. return 0;
  412. }
  413. static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev,
  414. uint32_t flags)
  415. {
  416. uint64_t pte_flag = 0;
  417. if (flags & AMDGPU_VM_PAGE_READABLE)
  418. pte_flag |= AMDGPU_PTE_READABLE;
  419. if (flags & AMDGPU_VM_PAGE_WRITEABLE)
  420. pte_flag |= AMDGPU_PTE_WRITEABLE;
  421. if (flags & AMDGPU_VM_PAGE_PRT)
  422. pte_flag |= AMDGPU_PTE_PRT;
  423. return pte_flag;
  424. }
  425. static uint64_t gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
  426. {
  427. BUG_ON(addr & 0xFFFFFF0000000FFFULL);
  428. return addr;
  429. }
  430. /**
  431. * gmc_v8_0_set_fault_enable_default - update VM fault handling
  432. *
  433. * @adev: amdgpu_device pointer
  434. * @value: true redirects VM faults to the default page
  435. */
  436. static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev,
  437. bool value)
  438. {
  439. u32 tmp;
  440. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  441. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  442. RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  443. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  444. DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  445. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  446. PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  447. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  448. VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  449. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  450. READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  451. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
  452. WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
  453. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  454. }
  455. /**
  456. * gmc_v7_0_set_prt - set PRT VM fault
  457. *
  458. * @adev: amdgpu_device pointer
  459. * @enable: enable/disable VM fault handling for PRT
  460. */
  461. static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
  462. {
  463. uint32_t tmp;
  464. if (enable && !adev->mc.prt_warning) {
  465. dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
  466. adev->mc.prt_warning = true;
  467. }
  468. tmp = RREG32(mmVM_PRT_CNTL);
  469. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  470. CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
  471. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  472. CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
  473. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  474. TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
  475. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  476. TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
  477. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  478. L2_CACHE_STORE_INVALID_ENTRIES, enable);
  479. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  480. L1_TLB_STORE_INVALID_ENTRIES, enable);
  481. tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
  482. MASK_PDE0_FAULT, enable);
  483. WREG32(mmVM_PRT_CNTL, tmp);
  484. if (enable) {
  485. uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
  486. uint32_t high = adev->vm_manager.max_pfn;
  487. WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
  488. WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
  489. WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
  490. WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
  491. WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
  492. WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
  493. WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
  494. WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
  495. } else {
  496. WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
  497. WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
  498. WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
  499. WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
  500. WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
  501. WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
  502. WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
  503. WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
  504. }
  505. }
  506. /**
  507. * gmc_v7_0_gart_enable - gart enable
  508. *
  509. * @adev: amdgpu_device pointer
  510. *
  511. * This sets up the TLBs, programs the page tables for VMID0,
  512. * sets up the hw for VMIDs 1-15 which are allocated on
  513. * demand, and sets up the global locations for the LDS, GDS,
  514. * and GPUVM for FSA64 clients (CIK).
  515. * Returns 0 for success, errors for failure.
  516. */
  517. static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
  518. {
  519. int r, i;
  520. u32 tmp, field;
  521. if (adev->gart.robj == NULL) {
  522. dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
  523. return -EINVAL;
  524. }
  525. r = amdgpu_gart_table_vram_pin(adev);
  526. if (r)
  527. return r;
  528. /* Setup TLB control */
  529. tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
  530. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
  531. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
  532. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
  533. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
  534. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
  535. WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
  536. /* Setup L2 cache */
  537. tmp = RREG32(mmVM_L2_CNTL);
  538. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
  539. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
  540. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
  541. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
  542. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
  543. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
  544. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
  545. WREG32(mmVM_L2_CNTL, tmp);
  546. tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
  547. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
  548. WREG32(mmVM_L2_CNTL2, tmp);
  549. field = adev->vm_manager.fragment_size;
  550. tmp = RREG32(mmVM_L2_CNTL3);
  551. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
  552. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
  553. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
  554. WREG32(mmVM_L2_CNTL3, tmp);
  555. /* setup context0 */
  556. WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
  557. WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12);
  558. WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
  559. WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
  560. (u32)(adev->dummy_page.addr >> 12));
  561. WREG32(mmVM_CONTEXT0_CNTL2, 0);
  562. tmp = RREG32(mmVM_CONTEXT0_CNTL);
  563. tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
  564. tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
  565. tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
  566. WREG32(mmVM_CONTEXT0_CNTL, tmp);
  567. WREG32(0x575, 0);
  568. WREG32(0x576, 0);
  569. WREG32(0x577, 0);
  570. /* empty context1-15 */
  571. /* FIXME start with 4G, once using 2 level pt switch to full
  572. * vm size space
  573. */
  574. /* set vm size, must be a multiple of 4 */
  575. WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
  576. WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
  577. for (i = 1; i < 16; i++) {
  578. if (i < 8)
  579. WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
  580. adev->gart.table_addr >> 12);
  581. else
  582. WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
  583. adev->gart.table_addr >> 12);
  584. }
  585. /* enable context1-15 */
  586. WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
  587. (u32)(adev->dummy_page.addr >> 12));
  588. WREG32(mmVM_CONTEXT1_CNTL2, 4);
  589. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  590. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
  591. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
  592. tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
  593. adev->vm_manager.block_size - 9);
  594. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  595. if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
  596. gmc_v7_0_set_fault_enable_default(adev, false);
  597. else
  598. gmc_v7_0_set_fault_enable_default(adev, true);
  599. if (adev->asic_type == CHIP_KAVERI) {
  600. tmp = RREG32(mmCHUB_CONTROL);
  601. tmp &= ~BYPASS_VM;
  602. WREG32(mmCHUB_CONTROL, tmp);
  603. }
  604. gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
  605. DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
  606. (unsigned)(adev->mc.gart_size >> 20),
  607. (unsigned long long)adev->gart.table_addr);
  608. adev->gart.ready = true;
  609. return 0;
  610. }
  611. static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
  612. {
  613. int r;
  614. if (adev->gart.robj) {
  615. WARN(1, "R600 PCIE GART already initialized\n");
  616. return 0;
  617. }
  618. /* Initialize common gart structure */
  619. r = amdgpu_gart_init(adev);
  620. if (r)
  621. return r;
  622. adev->gart.table_size = adev->gart.num_gpu_pages * 8;
  623. adev->gart.gart_pte_flags = 0;
  624. return amdgpu_gart_table_vram_alloc(adev);
  625. }
  626. /**
  627. * gmc_v7_0_gart_disable - gart disable
  628. *
  629. * @adev: amdgpu_device pointer
  630. *
  631. * This disables all VM page table (CIK).
  632. */
  633. static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
  634. {
  635. u32 tmp;
  636. /* Disable all tables */
  637. WREG32(mmVM_CONTEXT0_CNTL, 0);
  638. WREG32(mmVM_CONTEXT1_CNTL, 0);
  639. /* Setup TLB control */
  640. tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
  641. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
  642. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
  643. tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
  644. WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
  645. /* Setup L2 cache */
  646. tmp = RREG32(mmVM_L2_CNTL);
  647. tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
  648. WREG32(mmVM_L2_CNTL, tmp);
  649. WREG32(mmVM_L2_CNTL2, 0);
  650. amdgpu_gart_table_vram_unpin(adev);
  651. }
  652. /**
  653. * gmc_v7_0_gart_fini - vm fini callback
  654. *
  655. * @adev: amdgpu_device pointer
  656. *
  657. * Tears down the driver GART/VM setup (CIK).
  658. */
  659. static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
  660. {
  661. amdgpu_gart_table_vram_free(adev);
  662. amdgpu_gart_fini(adev);
  663. }
  664. /**
  665. * gmc_v7_0_vm_decode_fault - print human readable fault info
  666. *
  667. * @adev: amdgpu_device pointer
  668. * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
  669. * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
  670. *
  671. * Print human readable fault information (CIK).
  672. */
  673. static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev,
  674. u32 status, u32 addr, u32 mc_client)
  675. {
  676. u32 mc_id;
  677. u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
  678. u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
  679. PROTECTIONS);
  680. char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
  681. (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
  682. mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
  683. MEMORY_CLIENT_ID);
  684. dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
  685. protections, vmid, addr,
  686. REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
  687. MEMORY_CLIENT_RW) ?
  688. "write" : "read", block, mc_client, mc_id);
  689. }
  690. static const u32 mc_cg_registers[] = {
  691. mmMC_HUB_MISC_HUB_CG,
  692. mmMC_HUB_MISC_SIP_CG,
  693. mmMC_HUB_MISC_VM_CG,
  694. mmMC_XPB_CLK_GAT,
  695. mmATC_MISC_CG,
  696. mmMC_CITF_MISC_WR_CG,
  697. mmMC_CITF_MISC_RD_CG,
  698. mmMC_CITF_MISC_VM_CG,
  699. mmVM_L2_CG,
  700. };
  701. static const u32 mc_cg_ls_en[] = {
  702. MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
  703. MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
  704. MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
  705. MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
  706. ATC_MISC_CG__MEM_LS_ENABLE_MASK,
  707. MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
  708. MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
  709. MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
  710. VM_L2_CG__MEM_LS_ENABLE_MASK,
  711. };
  712. static const u32 mc_cg_en[] = {
  713. MC_HUB_MISC_HUB_CG__ENABLE_MASK,
  714. MC_HUB_MISC_SIP_CG__ENABLE_MASK,
  715. MC_HUB_MISC_VM_CG__ENABLE_MASK,
  716. MC_XPB_CLK_GAT__ENABLE_MASK,
  717. ATC_MISC_CG__ENABLE_MASK,
  718. MC_CITF_MISC_WR_CG__ENABLE_MASK,
  719. MC_CITF_MISC_RD_CG__ENABLE_MASK,
  720. MC_CITF_MISC_VM_CG__ENABLE_MASK,
  721. VM_L2_CG__ENABLE_MASK,
  722. };
  723. static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
  724. bool enable)
  725. {
  726. int i;
  727. u32 orig, data;
  728. for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
  729. orig = data = RREG32(mc_cg_registers[i]);
  730. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
  731. data |= mc_cg_ls_en[i];
  732. else
  733. data &= ~mc_cg_ls_en[i];
  734. if (data != orig)
  735. WREG32(mc_cg_registers[i], data);
  736. }
  737. }
  738. static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
  739. bool enable)
  740. {
  741. int i;
  742. u32 orig, data;
  743. for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
  744. orig = data = RREG32(mc_cg_registers[i]);
  745. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
  746. data |= mc_cg_en[i];
  747. else
  748. data &= ~mc_cg_en[i];
  749. if (data != orig)
  750. WREG32(mc_cg_registers[i], data);
  751. }
  752. }
  753. static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
  754. bool enable)
  755. {
  756. u32 orig, data;
  757. orig = data = RREG32_PCIE(ixPCIE_CNTL2);
  758. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
  759. data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
  760. data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
  761. data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
  762. data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
  763. } else {
  764. data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
  765. data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
  766. data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
  767. data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
  768. }
  769. if (orig != data)
  770. WREG32_PCIE(ixPCIE_CNTL2, data);
  771. }
  772. static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
  773. bool enable)
  774. {
  775. u32 orig, data;
  776. orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
  777. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
  778. data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
  779. else
  780. data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
  781. if (orig != data)
  782. WREG32(mmHDP_HOST_PATH_CNTL, data);
  783. }
  784. static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
  785. bool enable)
  786. {
  787. u32 orig, data;
  788. orig = data = RREG32(mmHDP_MEM_POWER_LS);
  789. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
  790. data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
  791. else
  792. data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
  793. if (orig != data)
  794. WREG32(mmHDP_MEM_POWER_LS, data);
  795. }
  796. static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type)
  797. {
  798. switch (mc_seq_vram_type) {
  799. case MC_SEQ_MISC0__MT__GDDR1:
  800. return AMDGPU_VRAM_TYPE_GDDR1;
  801. case MC_SEQ_MISC0__MT__DDR2:
  802. return AMDGPU_VRAM_TYPE_DDR2;
  803. case MC_SEQ_MISC0__MT__GDDR3:
  804. return AMDGPU_VRAM_TYPE_GDDR3;
  805. case MC_SEQ_MISC0__MT__GDDR4:
  806. return AMDGPU_VRAM_TYPE_GDDR4;
  807. case MC_SEQ_MISC0__MT__GDDR5:
  808. return AMDGPU_VRAM_TYPE_GDDR5;
  809. case MC_SEQ_MISC0__MT__HBM:
  810. return AMDGPU_VRAM_TYPE_HBM;
  811. case MC_SEQ_MISC0__MT__DDR3:
  812. return AMDGPU_VRAM_TYPE_DDR3;
  813. default:
  814. return AMDGPU_VRAM_TYPE_UNKNOWN;
  815. }
  816. }
  817. static int gmc_v7_0_early_init(void *handle)
  818. {
  819. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  820. gmc_v7_0_set_gart_funcs(adev);
  821. gmc_v7_0_set_irq_funcs(adev);
  822. adev->mc.shared_aperture_start = 0x2000000000000000ULL;
  823. adev->mc.shared_aperture_end =
  824. adev->mc.shared_aperture_start + (4ULL << 30) - 1;
  825. adev->mc.private_aperture_start =
  826. adev->mc.shared_aperture_end + 1;
  827. adev->mc.private_aperture_end =
  828. adev->mc.private_aperture_start + (4ULL << 30) - 1;
  829. return 0;
  830. }
  831. static int gmc_v7_0_late_init(void *handle)
  832. {
  833. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  834. if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
  835. return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
  836. else
  837. return 0;
  838. }
  839. static int gmc_v7_0_sw_init(void *handle)
  840. {
  841. int r;
  842. int dma_bits;
  843. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  844. if (adev->flags & AMD_IS_APU) {
  845. adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
  846. } else {
  847. u32 tmp = RREG32(mmMC_SEQ_MISC0);
  848. tmp &= MC_SEQ_MISC0__MT__MASK;
  849. adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
  850. }
  851. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
  852. if (r)
  853. return r;
  854. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
  855. if (r)
  856. return r;
  857. /* Adjust VM size here.
  858. * Currently set to 4GB ((1 << 20) 4k pages).
  859. * Max GPUVM size for cayman and SI is 40 bits.
  860. */
  861. amdgpu_vm_adjust_size(adev, 64, 4);
  862. adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
  863. /* Set the internal MC address mask
  864. * This is the max address of the GPU's
  865. * internal address space.
  866. */
  867. adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
  868. adev->mc.stolen_size = 256 * 1024;
  869. /* set DMA mask + need_dma32 flags.
  870. * PCIE - can handle 40-bits.
  871. * IGP - can handle 40-bits
  872. * PCI - dma32 for legacy pci gart, 40 bits on newer asics
  873. */
  874. adev->need_dma32 = false;
  875. dma_bits = adev->need_dma32 ? 32 : 40;
  876. r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
  877. if (r) {
  878. adev->need_dma32 = true;
  879. dma_bits = 32;
  880. pr_warn("amdgpu: No suitable DMA available\n");
  881. }
  882. r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
  883. if (r) {
  884. pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
  885. pr_warn("amdgpu: No coherent DMA available\n");
  886. }
  887. r = gmc_v7_0_init_microcode(adev);
  888. if (r) {
  889. DRM_ERROR("Failed to load mc firmware!\n");
  890. return r;
  891. }
  892. r = gmc_v7_0_mc_init(adev);
  893. if (r)
  894. return r;
  895. /* Memory manager */
  896. r = amdgpu_bo_init(adev);
  897. if (r)
  898. return r;
  899. r = gmc_v7_0_gart_init(adev);
  900. if (r)
  901. return r;
  902. /*
  903. * number of VMs
  904. * VMID 0 is reserved for System
  905. * amdgpu graphics/compute will use VMIDs 1-7
  906. * amdkfd will use VMIDs 8-15
  907. */
  908. adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS;
  909. adev->vm_manager.num_level = 1;
  910. amdgpu_vm_manager_init(adev);
  911. /* base offset of vram pages */
  912. if (adev->flags & AMD_IS_APU) {
  913. u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
  914. tmp <<= 22;
  915. adev->vm_manager.vram_base_offset = tmp;
  916. } else {
  917. adev->vm_manager.vram_base_offset = 0;
  918. }
  919. return 0;
  920. }
  921. static int gmc_v7_0_sw_fini(void *handle)
  922. {
  923. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  924. amdgpu_vm_manager_fini(adev);
  925. gmc_v7_0_gart_fini(adev);
  926. amdgpu_gem_force_release(adev);
  927. amdgpu_bo_fini(adev);
  928. return 0;
  929. }
  930. static int gmc_v7_0_hw_init(void *handle)
  931. {
  932. int r;
  933. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  934. gmc_v7_0_init_golden_registers(adev);
  935. gmc_v7_0_mc_program(adev);
  936. if (!(adev->flags & AMD_IS_APU)) {
  937. r = gmc_v7_0_mc_load_microcode(adev);
  938. if (r) {
  939. DRM_ERROR("Failed to load MC firmware!\n");
  940. return r;
  941. }
  942. }
  943. r = gmc_v7_0_gart_enable(adev);
  944. if (r)
  945. return r;
  946. return r;
  947. }
  948. static int gmc_v7_0_hw_fini(void *handle)
  949. {
  950. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  951. amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
  952. gmc_v7_0_gart_disable(adev);
  953. return 0;
  954. }
  955. static int gmc_v7_0_suspend(void *handle)
  956. {
  957. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  958. gmc_v7_0_hw_fini(adev);
  959. return 0;
  960. }
  961. static int gmc_v7_0_resume(void *handle)
  962. {
  963. int r;
  964. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  965. r = gmc_v7_0_hw_init(adev);
  966. if (r)
  967. return r;
  968. amdgpu_vm_reset_all_ids(adev);
  969. return 0;
  970. }
  971. static bool gmc_v7_0_is_idle(void *handle)
  972. {
  973. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  974. u32 tmp = RREG32(mmSRBM_STATUS);
  975. if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  976. SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
  977. return false;
  978. return true;
  979. }
  980. static int gmc_v7_0_wait_for_idle(void *handle)
  981. {
  982. unsigned i;
  983. u32 tmp;
  984. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  985. for (i = 0; i < adev->usec_timeout; i++) {
  986. /* read MC_STATUS */
  987. tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
  988. SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  989. SRBM_STATUS__MCC_BUSY_MASK |
  990. SRBM_STATUS__MCD_BUSY_MASK |
  991. SRBM_STATUS__VMC_BUSY_MASK);
  992. if (!tmp)
  993. return 0;
  994. udelay(1);
  995. }
  996. return -ETIMEDOUT;
  997. }
  998. static int gmc_v7_0_soft_reset(void *handle)
  999. {
  1000. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1001. u32 srbm_soft_reset = 0;
  1002. u32 tmp = RREG32(mmSRBM_STATUS);
  1003. if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
  1004. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
  1005. SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
  1006. if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
  1007. SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
  1008. if (!(adev->flags & AMD_IS_APU))
  1009. srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
  1010. SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
  1011. }
  1012. if (srbm_soft_reset) {
  1013. gmc_v7_0_mc_stop(adev);
  1014. if (gmc_v7_0_wait_for_idle((void *)adev)) {
  1015. dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
  1016. }
  1017. tmp = RREG32(mmSRBM_SOFT_RESET);
  1018. tmp |= srbm_soft_reset;
  1019. dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
  1020. WREG32(mmSRBM_SOFT_RESET, tmp);
  1021. tmp = RREG32(mmSRBM_SOFT_RESET);
  1022. udelay(50);
  1023. tmp &= ~srbm_soft_reset;
  1024. WREG32(mmSRBM_SOFT_RESET, tmp);
  1025. tmp = RREG32(mmSRBM_SOFT_RESET);
  1026. /* Wait a little for things to settle down */
  1027. udelay(50);
  1028. gmc_v7_0_mc_resume(adev);
  1029. udelay(50);
  1030. }
  1031. return 0;
  1032. }
  1033. static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
  1034. struct amdgpu_irq_src *src,
  1035. unsigned type,
  1036. enum amdgpu_interrupt_state state)
  1037. {
  1038. u32 tmp;
  1039. u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1040. VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1041. VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1042. VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1043. VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
  1044. VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
  1045. switch (state) {
  1046. case AMDGPU_IRQ_STATE_DISABLE:
  1047. /* system context */
  1048. tmp = RREG32(mmVM_CONTEXT0_CNTL);
  1049. tmp &= ~bits;
  1050. WREG32(mmVM_CONTEXT0_CNTL, tmp);
  1051. /* VMs */
  1052. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  1053. tmp &= ~bits;
  1054. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  1055. break;
  1056. case AMDGPU_IRQ_STATE_ENABLE:
  1057. /* system context */
  1058. tmp = RREG32(mmVM_CONTEXT0_CNTL);
  1059. tmp |= bits;
  1060. WREG32(mmVM_CONTEXT0_CNTL, tmp);
  1061. /* VMs */
  1062. tmp = RREG32(mmVM_CONTEXT1_CNTL);
  1063. tmp |= bits;
  1064. WREG32(mmVM_CONTEXT1_CNTL, tmp);
  1065. break;
  1066. default:
  1067. break;
  1068. }
  1069. return 0;
  1070. }
  1071. static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
  1072. struct amdgpu_irq_src *source,
  1073. struct amdgpu_iv_entry *entry)
  1074. {
  1075. u32 addr, status, mc_client;
  1076. addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
  1077. status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
  1078. mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
  1079. /* reset addr and status */
  1080. WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
  1081. if (!addr && !status)
  1082. return 0;
  1083. if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
  1084. gmc_v7_0_set_fault_enable_default(adev, false);
  1085. if (printk_ratelimit()) {
  1086. dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
  1087. entry->src_id, entry->src_data[0]);
  1088. dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
  1089. addr);
  1090. dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
  1091. status);
  1092. gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
  1093. }
  1094. return 0;
  1095. }
  1096. static int gmc_v7_0_set_clockgating_state(void *handle,
  1097. enum amd_clockgating_state state)
  1098. {
  1099. bool gate = false;
  1100. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1101. if (state == AMD_CG_STATE_GATE)
  1102. gate = true;
  1103. if (!(adev->flags & AMD_IS_APU)) {
  1104. gmc_v7_0_enable_mc_mgcg(adev, gate);
  1105. gmc_v7_0_enable_mc_ls(adev, gate);
  1106. }
  1107. gmc_v7_0_enable_bif_mgls(adev, gate);
  1108. gmc_v7_0_enable_hdp_mgcg(adev, gate);
  1109. gmc_v7_0_enable_hdp_ls(adev, gate);
  1110. return 0;
  1111. }
  1112. static int gmc_v7_0_set_powergating_state(void *handle,
  1113. enum amd_powergating_state state)
  1114. {
  1115. return 0;
  1116. }
  1117. static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
  1118. .name = "gmc_v7_0",
  1119. .early_init = gmc_v7_0_early_init,
  1120. .late_init = gmc_v7_0_late_init,
  1121. .sw_init = gmc_v7_0_sw_init,
  1122. .sw_fini = gmc_v7_0_sw_fini,
  1123. .hw_init = gmc_v7_0_hw_init,
  1124. .hw_fini = gmc_v7_0_hw_fini,
  1125. .suspend = gmc_v7_0_suspend,
  1126. .resume = gmc_v7_0_resume,
  1127. .is_idle = gmc_v7_0_is_idle,
  1128. .wait_for_idle = gmc_v7_0_wait_for_idle,
  1129. .soft_reset = gmc_v7_0_soft_reset,
  1130. .set_clockgating_state = gmc_v7_0_set_clockgating_state,
  1131. .set_powergating_state = gmc_v7_0_set_powergating_state,
  1132. };
  1133. static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = {
  1134. .flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb,
  1135. .set_pte_pde = gmc_v7_0_gart_set_pte_pde,
  1136. .set_prt = gmc_v7_0_set_prt,
  1137. .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
  1138. .get_vm_pde = gmc_v7_0_get_vm_pde
  1139. };
  1140. static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
  1141. .set = gmc_v7_0_vm_fault_interrupt_state,
  1142. .process = gmc_v7_0_process_interrupt,
  1143. };
  1144. static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev)
  1145. {
  1146. if (adev->gart.gart_funcs == NULL)
  1147. adev->gart.gart_funcs = &gmc_v7_0_gart_funcs;
  1148. }
  1149. static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
  1150. {
  1151. adev->mc.vm_fault.num_types = 1;
  1152. adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
  1153. }
  1154. const struct amdgpu_ip_block_version gmc_v7_0_ip_block =
  1155. {
  1156. .type = AMD_IP_BLOCK_TYPE_GMC,
  1157. .major = 7,
  1158. .minor = 0,
  1159. .rev = 0,
  1160. .funcs = &gmc_v7_0_ip_funcs,
  1161. };
  1162. const struct amdgpu_ip_block_version gmc_v7_4_ip_block =
  1163. {
  1164. .type = AMD_IP_BLOCK_TYPE_GMC,
  1165. .major = 7,
  1166. .minor = 4,
  1167. .rev = 0,
  1168. .funcs = &gmc_v7_0_ip_funcs,
  1169. };