nbio_v6_1.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272
  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include "amdgpu.h"
  24. #include "amdgpu_atombios.h"
  25. #include "nbio_v6_1.h"
  26. #include "vega10/soc15ip.h"
  27. #include "vega10/NBIO/nbio_6_1_default.h"
  28. #include "vega10/NBIO/nbio_6_1_offset.h"
  29. #include "vega10/NBIO/nbio_6_1_sh_mask.h"
  30. #include "vega10/vega10_enum.h"
  31. #define smnCPM_CONTROL 0x11180460
  32. #define smnPCIE_CNTL2 0x11180070
  33. #define smnPCIE_CONFIG_CNTL 0x11180044
  34. u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
  35. {
  36. u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
  37. tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
  38. tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
  39. return tmp;
  40. }
  41. u32 nbio_v6_1_get_atombios_scratch_regs(struct amdgpu_device *adev,
  42. uint32_t idx)
  43. {
  44. return RREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx);
  45. }
  46. void nbio_v6_1_set_atombios_scratch_regs(struct amdgpu_device *adev,
  47. uint32_t idx, uint32_t val)
  48. {
  49. WREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx, val);
  50. }
  51. void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
  52. {
  53. if (enable)
  54. WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
  55. BIF_FB_EN__FB_READ_EN_MASK |
  56. BIF_FB_EN__FB_WRITE_EN_MASK);
  57. else
  58. WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
  59. }
  60. void nbio_v6_1_hdp_flush(struct amdgpu_device *adev)
  61. {
  62. WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
  63. }
  64. u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
  65. {
  66. return RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE);
  67. }
  68. static const u32 nbio_sdma_doorbell_range_reg[] =
  69. {
  70. SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE),
  71. SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE)
  72. };
  73. void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
  74. bool use_doorbell, int doorbell_index)
  75. {
  76. u32 doorbell_range = RREG32(nbio_sdma_doorbell_range_reg[instance]);
  77. if (use_doorbell) {
  78. doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
  79. doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2);
  80. } else
  81. doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
  82. WREG32(nbio_sdma_doorbell_range_reg[instance], doorbell_range);
  83. }
  84. void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev,
  85. bool enable)
  86. {
  87. WREG32_FIELD15(NBIO, 0, RCC_PF_0_0_RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
  88. }
  89. void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
  90. bool enable)
  91. {
  92. u32 tmp = 0;
  93. if (enable) {
  94. tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
  95. REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
  96. REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
  97. WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
  98. lower_32_bits(adev->doorbell.base));
  99. WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
  100. upper_32_bits(adev->doorbell.base));
  101. }
  102. WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, tmp);
  103. }
  104. void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
  105. bool use_doorbell, int doorbell_index)
  106. {
  107. u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
  108. if (use_doorbell) {
  109. ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
  110. ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
  111. } else
  112. ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
  113. WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
  114. }
  115. void nbio_v6_1_ih_control(struct amdgpu_device *adev)
  116. {
  117. u32 interrupt_cntl;
  118. /* setup interrupt control */
  119. WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
  120. interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
  121. /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
  122. * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
  123. */
  124. interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
  125. /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
  126. interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
  127. WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
  128. }
  129. void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
  130. bool enable)
  131. {
  132. uint32_t def, data;
  133. def = data = RREG32_PCIE(smnCPM_CONTROL);
  134. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) {
  135. data |= (CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
  136. CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
  137. CPM_CONTROL__TXCLK_PERM_GATE_ENABLE_MASK |
  138. CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
  139. CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
  140. CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
  141. CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
  142. } else {
  143. data &= ~(CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
  144. CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
  145. CPM_CONTROL__TXCLK_PERM_GATE_ENABLE_MASK |
  146. CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
  147. CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
  148. CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
  149. CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
  150. }
  151. if (def != data)
  152. WREG32_PCIE(smnCPM_CONTROL, data);
  153. }
  154. void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
  155. bool enable)
  156. {
  157. uint32_t def, data;
  158. def = data = RREG32_PCIE(smnPCIE_CNTL2);
  159. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
  160. data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
  161. PCIE_CNTL2__MST_MEM_LS_EN_MASK |
  162. PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
  163. } else {
  164. data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
  165. PCIE_CNTL2__MST_MEM_LS_EN_MASK |
  166. PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
  167. }
  168. if (def != data)
  169. WREG32_PCIE(smnPCIE_CNTL2, data);
  170. }
  171. void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
  172. {
  173. int data;
  174. /* AMD_CG_SUPPORT_BIF_MGCG */
  175. data = RREG32_PCIE(smnCPM_CONTROL);
  176. if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
  177. *flags |= AMD_CG_SUPPORT_BIF_MGCG;
  178. /* AMD_CG_SUPPORT_BIF_LS */
  179. data = RREG32_PCIE(smnPCIE_CNTL2);
  180. if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
  181. *flags |= AMD_CG_SUPPORT_BIF_LS;
  182. }
  183. struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
  184. struct nbio_pcie_index_data nbio_v6_1_pcie_index_data;
  185. int nbio_v6_1_init(struct amdgpu_device *adev)
  186. {
  187. nbio_v6_1_hdp_flush_reg.hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
  188. nbio_v6_1_hdp_flush_reg.hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
  189. nbio_v6_1_hdp_flush_reg.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK;
  190. nbio_v6_1_hdp_flush_reg.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK;
  191. nbio_v6_1_hdp_flush_reg.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK;
  192. nbio_v6_1_hdp_flush_reg.ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK;
  193. nbio_v6_1_hdp_flush_reg.ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK;
  194. nbio_v6_1_hdp_flush_reg.ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK;
  195. nbio_v6_1_hdp_flush_reg.ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK;
  196. nbio_v6_1_hdp_flush_reg.ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK;
  197. nbio_v6_1_hdp_flush_reg.ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK;
  198. nbio_v6_1_hdp_flush_reg.ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK;
  199. nbio_v6_1_hdp_flush_reg.ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK;
  200. nbio_v6_1_hdp_flush_reg.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK;
  201. nbio_v6_1_pcie_index_data.index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX);
  202. nbio_v6_1_pcie_index_data.data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA);
  203. return 0;
  204. }
  205. void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
  206. {
  207. uint32_t reg;
  208. reg = RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_IOV_FUNC_IDENTIFIER);
  209. if (reg & 1)
  210. adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
  211. if (reg & 0x80000000)
  212. adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
  213. if (!reg) {
  214. if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
  215. adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
  216. }
  217. }
  218. void nbio_v6_1_init_registers(struct amdgpu_device *adev)
  219. {
  220. uint32_t def, data;
  221. def = data = RREG32_PCIE(smnPCIE_CONFIG_CNTL);
  222. data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
  223. data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
  224. if (def != data)
  225. WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
  226. }