mtk_disp_pmqos.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. /*
  2. * Copyright (C) 2019 MediaTek Inc.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include "mtk_layering_rule.h"
  14. #include "mtk_drm_crtc.h"
  15. #include "mtk_disp_pmqos.h"
  16. #include "mtk_drm_mmp.h"
  17. #include "mtk_drm_drv.h"
  18. static struct drm_crtc *dev_crtc;
  19. /* add for mm qos */
  20. static struct pm_qos_request mm_freq_request;
  21. static u64 g_freq_steps[MAX_FREQ_STEP];
  22. static int g_freq_level = -1;
  23. static int step_size = 1;
  24. #ifdef MTK_FB_MMDVFS_SUPPORT
  25. int __mtk_disp_pmqos_slot_look_up(int comp_id, int mode)
  26. {
  27. switch (comp_id) {
  28. case DDP_COMPONENT_OVL0:
  29. if (mode == DISP_BW_FBDC_MODE)
  30. return DISP_PMQOS_OVL0_FBDC_BW;
  31. else
  32. return DISP_PMQOS_OVL0_BW;
  33. case DDP_COMPONENT_OVL1:
  34. if (mode == DISP_BW_FBDC_MODE)
  35. return DISP_PMQOS_OVL1_FBDC_BW;
  36. else
  37. return DISP_PMQOS_OVL1_BW;
  38. case DDP_COMPONENT_OVL0_2L:
  39. if (mode == DISP_BW_FBDC_MODE)
  40. return DISP_PMQOS_OVL0_2L_FBDC_BW;
  41. else
  42. return DISP_PMQOS_OVL0_2L_BW;
  43. case DDP_COMPONENT_OVL1_2L:
  44. if (mode == DISP_BW_FBDC_MODE)
  45. return DISP_PMQOS_OVL1_2L_FBDC_BW;
  46. else
  47. return DISP_PMQOS_OVL1_2L_BW;
  48. case DDP_COMPONENT_OVL2_2L:
  49. if (mode == DISP_BW_FBDC_MODE)
  50. return DISP_PMQOS_OVL2_2L_FBDC_BW;
  51. else
  52. return DISP_PMQOS_OVL2_2L_BW;
  53. case DDP_COMPONENT_OVL3_2L:
  54. if (mode == DISP_BW_FBDC_MODE)
  55. return DISP_PMQOS_OVL3_2L_FBDC_BW;
  56. else
  57. return DISP_PMQOS_OVL3_2L_BW;
  58. case DDP_COMPONENT_RDMA0:
  59. return DISP_PMQOS_RDMA0_BW;
  60. case DDP_COMPONENT_RDMA1:
  61. return DISP_PMQOS_RDMA1_BW;
  62. case DDP_COMPONENT_RDMA2:
  63. return DISP_PMQOS_RDMA2_BW;
  64. case DDP_COMPONENT_WDMA0:
  65. return DISP_PMQOS_WDMA0_BW;
  66. case DDP_COMPONENT_WDMA1:
  67. return DISP_PMQOS_WDMA1_BW;
  68. default:
  69. DDPPR_ERR("%s, unknown comp %d\n", __func__, comp_id);
  70. break;
  71. }
  72. return -EINVAL;
  73. }
  74. int __mtk_disp_pmqos_port_look_up(int comp_id)
  75. {
  76. switch (comp_id) {
  77. #if defined(CONFIG_MACH_MT6779)
  78. case DDP_COMPONENT_OVL0:
  79. return SMI_PORT_DISP_OVL0;
  80. case DDP_COMPONENT_OVL1:
  81. return SMI_PORT_DISP_OVL1;
  82. case DDP_COMPONENT_OVL0_2L:
  83. return SMI_PORT_DISP_OVL0_2L;
  84. case DDP_COMPONENT_OVL1_2L:
  85. return SMI_PORT_DISP_OVL1_2L;
  86. case DDP_COMPONENT_OVL2_2L:
  87. return SMI_PORT_DISP_OVL2;
  88. case DDP_COMPONENT_OVL3_2L:
  89. return SMI_PORT_DISP_OVL3;
  90. case DDP_COMPONENT_RDMA0:
  91. return SMI_PORT_DISP_RDMA0;
  92. case DDP_COMPONENT_RDMA1:
  93. return SMI_PORT_DISP_RDMA1;
  94. case DDP_COMPONENT_WDMA0:
  95. return SMI_PORT_DISP_WDMA0;
  96. #endif
  97. #if defined(CONFIG_MACH_MT6885) || defined(CONFIG_MACH_MT6893)
  98. case DDP_COMPONENT_OVL0:
  99. return M4U_PORT_L0_OVL_RDMA0;
  100. case DDP_COMPONENT_OVL0_2L:
  101. return M4U_PORT_L1_OVL_2L_RDMA0;
  102. case DDP_COMPONENT_OVL1_2L:
  103. return M4U_PORT_L0_OVL_2L_RDMA1;
  104. case DDP_COMPONENT_OVL2_2L:
  105. return M4U_PORT_L1_OVL_2L_RDMA2;
  106. case DDP_COMPONENT_OVL3_2L:
  107. return M4U_PORT_L0_OVL_2L_RDMA3;
  108. case DDP_COMPONENT_RDMA0:
  109. return M4U_PORT_L0_DISP_RDMA0;
  110. case DDP_COMPONENT_RDMA1:
  111. return M4U_PORT_L1_DISP_RDMA1;
  112. case DDP_COMPONENT_WDMA0:
  113. return M4U_PORT_L0_DISP_WDMA0;
  114. case DDP_COMPONENT_WDMA1:
  115. return M4U_PORT_L1_DISP_WDMA1;
  116. #endif
  117. #if defined(CONFIG_MACH_MT6873)
  118. case DDP_COMPONENT_OVL0:
  119. return M4U_PORT_L0_OVL_RDMA0;
  120. case DDP_COMPONENT_OVL0_2L:
  121. return M4U_PORT_L1_OVL_2L_RDMA0;
  122. case DDP_COMPONENT_OVL2_2L:
  123. return M4U_PORT_L1_OVL_2L_RDMA2;
  124. case DDP_COMPONENT_RDMA0:
  125. return M4U_PORT_L0_DISP_RDMA0;
  126. case DDP_COMPONENT_RDMA4:
  127. return M4U_PORT_L1_DISP_RDMA4;
  128. case DDP_COMPONENT_WDMA0:
  129. return M4U_PORT_L0_DISP_WDMA0;
  130. #endif
  131. #if defined(CONFIG_MACH_MT6853) || defined(CONFIG_MACH_MT6833)
  132. case DDP_COMPONENT_OVL0:
  133. return M4U_PORT_L0_OVL_RDMA0;
  134. case DDP_COMPONENT_OVL0_2L:
  135. return M4U_PORT_L1_OVL_2L_RDMA0;
  136. case DDP_COMPONENT_RDMA0:
  137. return M4U_PORT_L1_DISP_RDMA0;
  138. case DDP_COMPONENT_WDMA0:
  139. return M4U_PORT_L1_DISP_WDMA0;
  140. #endif
  141. default:
  142. DDPPR_ERR("%s, unknown comp %d\n", __func__, comp_id);
  143. break;
  144. }
  145. return -EINVAL;
  146. }
  147. int __mtk_disp_set_module_bw(struct mm_qos_request *request, int comp_id,
  148. unsigned int bandwidth, unsigned int bw_mode)
  149. {
  150. int mode;
  151. if (bw_mode == DISP_BW_FBDC_MODE)
  152. mode = BW_COMP_DEFAULT;
  153. else
  154. mode = BW_COMP_NONE;
  155. DDPINFO("set module %d, bw %u\n", comp_id, bandwidth);
  156. bandwidth = bandwidth * 133 / 100;
  157. mm_qos_set_bw_request(request, bandwidth, mode);
  158. DRM_MMP_MARK(pmqos, comp_id, bandwidth);
  159. return 0;
  160. }
  161. void __mtk_disp_set_module_hrt(struct mm_qos_request *request,
  162. unsigned int bandwidth)
  163. {
  164. mm_qos_set_hrt_request(request, bandwidth);
  165. }
  166. int mtk_disp_set_hrt_bw(struct mtk_drm_crtc *mtk_crtc, unsigned int bw)
  167. {
  168. struct drm_crtc *crtc = &mtk_crtc->base;
  169. struct mtk_drm_private *priv = crtc->dev->dev_private;
  170. struct mtk_ddp_comp *comp;
  171. unsigned int tmp;
  172. int i, j, ret = 0;
  173. tmp = bw;
  174. for (i = 0; i < DDP_PATH_NR; i++) {
  175. if (!(mtk_crtc->ddp_ctx[mtk_crtc->ddp_mode].req_hrt[i]))
  176. continue;
  177. for_each_comp_in_crtc_target_path(comp, mtk_crtc, j, i) {
  178. ret |= mtk_ddp_comp_io_cmd(comp, NULL, PMQOS_SET_HRT_BW,
  179. &tmp);
  180. }
  181. }
  182. if (ret == RDMA_REQ_HRT)
  183. tmp = mtk_drm_primary_frame_bw(crtc);
  184. mm_qos_set_hrt_request(&priv->hrt_bw_request, tmp);
  185. DRM_MMP_MARK(hrt_bw, 0, tmp);
  186. DDPINFO("set HRT bw %u\n", tmp);
  187. mm_qos_update_all_request(&priv->hrt_request_list);
  188. return ret;
  189. }
  190. void mtk_drm_pan_disp_set_hrt_bw(struct drm_crtc *crtc, const char *caller)
  191. {
  192. struct mtk_drm_crtc *mtk_crtc;
  193. struct drm_display_mode *mode;
  194. unsigned int bw = 0;
  195. dev_crtc = crtc;
  196. mtk_crtc = to_mtk_crtc(dev_crtc);
  197. mode = &crtc->state->adjusted_mode;
  198. bw = _layering_get_frame_bw(crtc, mode);
  199. mtk_disp_set_hrt_bw(mtk_crtc, bw);
  200. DDPINFO("%s:pan_disp_set_hrt_bw: %u\n", caller, bw);
  201. }
  202. int mtk_disp_hrt_cond_change_cb(struct notifier_block *nb, unsigned long value,
  203. void *v)
  204. {
  205. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(dev_crtc);
  206. int i, ret;
  207. unsigned int hrt_idx;
  208. DDP_MUTEX_LOCK(&mtk_crtc->lock, __func__, __LINE__);
  209. switch (value) {
  210. case BW_THROTTLE_START: /* CAM on */
  211. DDPMSG("DISP BW Throttle start\n");
  212. /* TODO: concider memory session */
  213. DDPINFO("CAM trigger repaint\n");
  214. hrt_idx = _layering_rule_get_hrt_idx();
  215. hrt_idx++;
  216. DDP_MUTEX_UNLOCK(&mtk_crtc->lock, __func__, __LINE__);
  217. drm_trigger_repaint(DRM_REPAINT_FOR_IDLE, dev_crtc->dev);
  218. for (i = 0; i < 5; ++i) {
  219. ret = wait_event_timeout(
  220. mtk_crtc->qos_ctx->hrt_cond_wq,
  221. atomic_read(&mtk_crtc->qos_ctx->hrt_cond_sig),
  222. HZ / 5);
  223. if (ret == 0)
  224. DDPINFO("wait repaint timeout %d\n", i);
  225. atomic_set(&mtk_crtc->qos_ctx->hrt_cond_sig, 0);
  226. if (atomic_read(&mtk_crtc->qos_ctx->last_hrt_idx) >=
  227. hrt_idx)
  228. break;
  229. }
  230. DDP_MUTEX_LOCK(&mtk_crtc->lock, __func__, __LINE__);
  231. break;
  232. case BW_THROTTLE_END: /* CAM off */
  233. DDPMSG("DISP BW Throttle end\n");
  234. /* TODO: switch DC */
  235. break;
  236. default:
  237. break;
  238. }
  239. DDP_MUTEX_UNLOCK(&mtk_crtc->lock, __func__, __LINE__);
  240. return 0;
  241. }
  242. struct notifier_block pmqos_hrt_notifier = {
  243. .notifier_call = mtk_disp_hrt_cond_change_cb,
  244. };
  245. int mtk_disp_hrt_bw_dbg(void)
  246. {
  247. mtk_disp_hrt_cond_change_cb(NULL, BW_THROTTLE_START, NULL);
  248. return 0;
  249. }
  250. #endif
  251. int mtk_disp_hrt_cond_init(struct drm_crtc *crtc)
  252. {
  253. struct mtk_drm_crtc *mtk_crtc;
  254. dev_crtc = crtc;
  255. mtk_crtc = to_mtk_crtc(dev_crtc);
  256. mtk_crtc->qos_ctx = vmalloc(sizeof(struct mtk_drm_qos_ctx));
  257. if (mtk_crtc->qos_ctx == NULL) {
  258. DDPPR_ERR("%s:allocate qos_ctx failed\n", __func__);
  259. return -ENOMEM;
  260. }
  261. return 0;
  262. }
  263. #ifdef MTK_FB_MMDVFS_SUPPORT
  264. void mtk_drm_mmdvfs_init(void)
  265. {
  266. pm_qos_add_request(&mm_freq_request, PM_QOS_DISP_FREQ,
  267. PM_QOS_MM_FREQ_DEFAULT_VALUE);
  268. mmdvfs_qos_get_freq_steps(PM_QOS_DISP_FREQ, g_freq_steps, &step_size);
  269. }
  270. #endif
  271. static void mtk_drm_set_mmclk(struct drm_crtc *crtc, int level,
  272. const char *caller)
  273. {
  274. if (drm_crtc_index(crtc) != 0)
  275. return;
  276. if (level < 0 || level > MAX_FREQ_STEP)
  277. level = -1;
  278. if (level == g_freq_level)
  279. return;
  280. g_freq_level = level;
  281. DDPINFO("%s set mmclk level: %d\n", caller, g_freq_level);
  282. if (g_freq_level >= 0)
  283. pm_qos_update_request(&mm_freq_request,
  284. g_freq_steps[g_freq_level]);
  285. else
  286. pm_qos_update_request(&mm_freq_request, 0);
  287. }
  288. void mtk_drm_set_mmclk_by_pixclk(struct drm_crtc *crtc,
  289. unsigned int pixclk, const char *caller)
  290. {
  291. int i;
  292. if (pixclk >= g_freq_steps[0]) {
  293. DDPMSG("%s:error:pixleclk (%d) is to big for mmclk (%llu)\n",
  294. caller, pixclk, g_freq_steps[0]);
  295. mtk_drm_set_mmclk(crtc, 0, caller);
  296. return;
  297. }
  298. if (!pixclk) {
  299. mtk_drm_set_mmclk(crtc, -1, caller);
  300. return;
  301. }
  302. for (i = 1; i < step_size; i++) {
  303. if (pixclk >= g_freq_steps[i]) {
  304. mtk_drm_set_mmclk(crtc, i-1, caller);
  305. break;
  306. }
  307. if (i == step_size - 1)
  308. mtk_drm_set_mmclk(crtc, -1, caller);
  309. }
  310. }