mtk_drm_lowpower.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589
  1. /*
  2. * Copyright (c) 2019 MediaTek Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/kthread.h>
  14. #include <linux/wait.h>
  15. #include <linux/mutex.h>
  16. #include <linux/sched.h>
  17. #include <linux/sched/clock.h>
  18. #include <drm/mediatek_drm.h>
  19. #include "mtk_drm_lowpower.h"
  20. #include "mtk_drm_crtc.h"
  21. #include "mtk_drm_drv.h"
  22. #include "mtk_drm_ddp.h"
  23. #include "mtk_drm_ddp_comp.h"
  24. #include "mtk_drm_mmp.h"
  25. #define MAX_ENTER_IDLE_RSZ_RATIO 250
  26. static void mtk_drm_idlemgr_enable_crtc(struct drm_crtc *crtc);
  27. static void mtk_drm_idlemgr_disable_crtc(struct drm_crtc *crtc);
  28. static void mtk_drm_vdo_mode_enter_idle(struct drm_crtc *crtc)
  29. {
  30. struct mtk_drm_private *priv = crtc->dev->dev_private;
  31. struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state);
  32. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  33. int i, j;
  34. struct cmdq_pkt *handle;
  35. struct cmdq_client *client = mtk_crtc->gce_obj.client[CLIENT_CFG];
  36. struct mtk_ddp_comp *comp;
  37. mtk_crtc_pkt_create(&handle, crtc, client);
  38. if (mtk_drm_helper_get_opt(priv->helper_opt,
  39. MTK_DRM_OPT_IDLEMGR_BY_REPAINT) &&
  40. atomic_read(&state->plane_enabled_num) > 1) {
  41. atomic_set(&priv->idle_need_repaint, 1);
  42. drm_trigger_repaint(DRM_REPAINT_FOR_IDLE, crtc->dev);
  43. }
  44. if (mtk_drm_helper_get_opt(priv->helper_opt,
  45. MTK_DRM_OPT_IDLEMGR_DISABLE_ROUTINE_IRQ)) {
  46. mtk_disp_mutex_inten_disable_cmdq(mtk_crtc->mutex[0], handle);
  47. for_each_comp_in_cur_crtc_path(comp, mtk_crtc, i, j)
  48. mtk_ddp_comp_io_cmd(comp, handle, IRQ_LEVEL_IDLE, NULL);
  49. }
  50. comp = mtk_ddp_comp_request_output(mtk_crtc);
  51. if (comp)
  52. mtk_ddp_comp_io_cmd(comp, handle, DSI_VFP_IDLE_MODE, NULL);
  53. cmdq_pkt_flush(handle);
  54. cmdq_pkt_destroy(handle);
  55. drm_crtc_vblank_off(crtc);
  56. }
  57. static void mtk_drm_cmd_mode_enter_idle(struct drm_crtc *crtc)
  58. {
  59. mtk_drm_idlemgr_disable_crtc(crtc);
  60. lcm_fps_ctx_reset(crtc);
  61. }
  62. static void mtk_drm_vdo_mode_leave_idle(struct drm_crtc *crtc)
  63. {
  64. struct mtk_drm_private *priv = crtc->dev->dev_private;
  65. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  66. int i, j;
  67. struct cmdq_pkt *handle;
  68. struct cmdq_client *client = mtk_crtc->gce_obj.client[CLIENT_CFG];
  69. struct mtk_ddp_comp *comp;
  70. mtk_crtc_pkt_create(&handle, crtc, client);
  71. if (mtk_drm_helper_get_opt(priv->helper_opt,
  72. MTK_DRM_OPT_IDLEMGR_DISABLE_ROUTINE_IRQ)) {
  73. mtk_disp_mutex_inten_enable_cmdq(mtk_crtc->mutex[0], handle);
  74. for_each_comp_in_cur_crtc_path(comp, mtk_crtc, i, j)
  75. mtk_ddp_comp_io_cmd(comp, handle, IRQ_LEVEL_ALL, NULL);
  76. }
  77. comp = mtk_ddp_comp_request_output(mtk_crtc);
  78. if (comp)
  79. mtk_ddp_comp_io_cmd(comp, handle, DSI_VFP_DEFAULT_MODE, NULL);
  80. cmdq_pkt_flush(handle);
  81. cmdq_pkt_destroy(handle);
  82. drm_crtc_vblank_on(crtc);
  83. }
  84. static void mtk_drm_cmd_mode_leave_idle(struct drm_crtc *crtc)
  85. {
  86. mtk_drm_idlemgr_enable_crtc(crtc);
  87. lcm_fps_ctx_reset(crtc);
  88. }
  89. static void mtk_drm_idlemgr_enter_idle_nolock(struct drm_crtc *crtc)
  90. {
  91. struct mtk_drm_private *priv = crtc->dev->dev_private;
  92. struct mtk_ddp_comp *output_comp;
  93. int index = drm_crtc_index(crtc);
  94. bool mode;
  95. output_comp = priv->ddp_comp[DDP_COMPONENT_DSI0];
  96. if (!output_comp)
  97. return;
  98. mode = mtk_dsi_is_cmd_mode(output_comp);
  99. CRTC_MMP_EVENT_START(index, enter_idle, mode, 0);
  100. if (mode)
  101. mtk_drm_cmd_mode_enter_idle(crtc);
  102. else
  103. mtk_drm_vdo_mode_enter_idle(crtc);
  104. CRTC_MMP_EVENT_END(index, enter_idle, mode, 0);
  105. }
  106. static void mtk_drm_idlemgr_leave_idle_nolock(struct drm_crtc *crtc)
  107. {
  108. struct mtk_drm_private *priv = crtc->dev->dev_private;
  109. struct mtk_ddp_comp *output_comp;
  110. int index = drm_crtc_index(crtc);
  111. bool mode;
  112. output_comp = priv->ddp_comp[DDP_COMPONENT_DSI0];
  113. if (!output_comp)
  114. return;
  115. mode = mtk_dsi_is_cmd_mode(output_comp);
  116. CRTC_MMP_EVENT_START(index, leave_idle, mode, 0);
  117. if (mode)
  118. mtk_drm_cmd_mode_leave_idle(crtc);
  119. else
  120. mtk_drm_vdo_mode_leave_idle(crtc);
  121. CRTC_MMP_EVENT_END(index, leave_idle, mode, 0);
  122. }
  123. bool mtk_drm_is_idle(struct drm_crtc *crtc)
  124. {
  125. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  126. struct mtk_drm_idlemgr *idlemgr = mtk_crtc->idlemgr;
  127. if (!idlemgr)
  128. return false;
  129. return idlemgr->idlemgr_ctx->is_idle;
  130. }
  131. void mtk_drm_idlemgr_kick(const char *source, struct drm_crtc *crtc,
  132. int need_lock)
  133. {
  134. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  135. struct mtk_drm_idlemgr *idlemgr;
  136. struct mtk_drm_idlemgr_context *idlemgr_ctx;
  137. if (!mtk_crtc->idlemgr)
  138. return;
  139. idlemgr = mtk_crtc->idlemgr;
  140. idlemgr_ctx = idlemgr->idlemgr_ctx;
  141. /* get lock to protect idlemgr_last_kick_time and is_idle */
  142. if (need_lock)
  143. DDP_MUTEX_LOCK(&mtk_crtc->lock, __func__, __LINE__);
  144. /* update kick timestamp */
  145. idlemgr_ctx->idlemgr_last_kick_time = sched_clock();
  146. if (idlemgr_ctx->is_idle) {
  147. DDPINFO("[LP] kick idle from [%s]\n", source);
  148. if (mtk_crtc->esd_ctx)
  149. atomic_set(&mtk_crtc->esd_ctx->target_time, 0);
  150. mtk_drm_idlemgr_leave_idle_nolock(crtc);
  151. idlemgr_ctx->is_idle = 0;
  152. /* wake up idlemgr process to monitor next idle state */
  153. wake_up_interruptible(&idlemgr->idlemgr_wq);
  154. }
  155. if (need_lock)
  156. DDP_MUTEX_UNLOCK(&mtk_crtc->lock, __func__, __LINE__);
  157. }
  158. unsigned int mtk_drm_set_idlemgr(struct drm_crtc *crtc, unsigned int flag,
  159. bool need_lock)
  160. {
  161. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  162. struct mtk_drm_idlemgr *idlemgr = mtk_crtc->idlemgr;
  163. unsigned int old_flag;
  164. if (!idlemgr)
  165. return 0;
  166. old_flag = atomic_read(&idlemgr->idlemgr_task_active);
  167. if (flag) {
  168. DDPINFO("[LP] enable idlemgr\n");
  169. atomic_set(&idlemgr->idlemgr_task_active, 1);
  170. wake_up_interruptible(&idlemgr->idlemgr_wq);
  171. } else {
  172. DDPINFO("[LP] disable idlemgr\n");
  173. atomic_set(&idlemgr->idlemgr_task_active, 0);
  174. mtk_drm_idlemgr_kick(__func__, crtc, need_lock);
  175. }
  176. return old_flag;
  177. }
  178. unsigned long long
  179. mtk_drm_set_idle_check_interval(struct drm_crtc *crtc,
  180. unsigned long long new_interval)
  181. {
  182. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  183. unsigned long long old_interval = 0;
  184. if (!(mtk_crtc && mtk_crtc->idlemgr && mtk_crtc->idlemgr->idlemgr_ctx))
  185. return 0;
  186. old_interval = mtk_crtc->idlemgr->idlemgr_ctx->idle_check_interval;
  187. mtk_crtc->idlemgr->idlemgr_ctx->idle_check_interval = new_interval;
  188. return old_interval;
  189. }
  190. unsigned long long
  191. mtk_drm_get_idle_check_interval(struct drm_crtc *crtc)
  192. {
  193. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  194. if (!(mtk_crtc && mtk_crtc->idlemgr && mtk_crtc->idlemgr->idlemgr_ctx))
  195. return 0;
  196. return mtk_crtc->idlemgr->idlemgr_ctx->idle_check_interval;
  197. }
  198. static int mtk_drm_idlemgr_get_rsz_ratio(struct mtk_crtc_state *state)
  199. {
  200. int src_w = state->rsz_src_roi.width;
  201. int src_h = state->rsz_src_roi.height;
  202. int dst_w = state->rsz_dst_roi.width;
  203. int dst_h = state->rsz_dst_roi.height;
  204. int ratio_w, ratio_h;
  205. if (src_w == 0 || src_h == 0)
  206. return 100;
  207. ratio_w = dst_w * 100 / src_w;
  208. ratio_h = dst_h * 100 / src_h;
  209. return ((ratio_w > ratio_h) ? ratio_w : ratio_h);
  210. }
  211. static bool is_yuv(uint32_t format)
  212. {
  213. switch (format) {
  214. case DRM_FORMAT_YUV420:
  215. case DRM_FORMAT_YVU420:
  216. case DRM_FORMAT_NV12:
  217. case DRM_FORMAT_NV21:
  218. case DRM_FORMAT_YUYV:
  219. case DRM_FORMAT_YVYU:
  220. case DRM_FORMAT_UYVY:
  221. case DRM_FORMAT_VYUY:
  222. return true;
  223. default:
  224. break;
  225. }
  226. return false;
  227. }
  228. static bool mtk_planes_is_yuv_fmt(struct drm_crtc *crtc)
  229. {
  230. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  231. int i;
  232. for (i = 0; i < mtk_crtc->layer_nr; i++) {
  233. struct drm_plane *plane = &mtk_crtc->planes[i].base;
  234. struct mtk_plane_state *plane_state =
  235. to_mtk_plane_state(plane->state);
  236. struct mtk_plane_pending_state *pending = &plane_state->pending;
  237. unsigned int fmt = pending->format;
  238. if (pending->enable && is_yuv(fmt))
  239. return true;
  240. }
  241. return false;
  242. }
  243. static int mtk_drm_idlemgr_monitor_thread(void *data)
  244. {
  245. int ret = 0;
  246. long long t_to_check = 0;
  247. unsigned long long t_idle;
  248. struct drm_crtc *crtc = (struct drm_crtc *)data;
  249. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  250. struct mtk_drm_idlemgr *idlemgr = mtk_crtc->idlemgr;
  251. struct mtk_drm_idlemgr_context *idlemgr_ctx = idlemgr->idlemgr_ctx;
  252. struct mtk_drm_private *priv = crtc->dev->dev_private;
  253. struct mtk_crtc_state *mtk_state = NULL;
  254. struct drm_vblank_crtc *vblank = NULL;
  255. int crtc_id = drm_crtc_index(crtc);
  256. static unsigned long long idlemgr_vblank_check_internal;
  257. msleep(16000);
  258. while (1) {
  259. ret = wait_event_interruptible(
  260. idlemgr->idlemgr_wq,
  261. atomic_read(&idlemgr->idlemgr_task_active));
  262. t_idle = local_clock() - idlemgr_ctx->idlemgr_last_kick_time;
  263. if (idlemgr_vblank_check_internal)
  264. t_to_check = idlemgr_vblank_check_internal *
  265. 1000 * 1000 - t_idle;
  266. else
  267. t_to_check = idlemgr_ctx->idle_check_interval *
  268. 1000 * 1000 - t_idle;
  269. do_div(t_to_check, 1000000);
  270. t_to_check = min(t_to_check, 1000LL);
  271. /* when starting up before the first time kick */
  272. if (idlemgr_ctx->idlemgr_last_kick_time == 0)
  273. msleep_interruptible(idlemgr_ctx->idle_check_interval);
  274. else if (t_to_check > 0)
  275. msleep_interruptible(t_to_check);
  276. DDP_MUTEX_LOCK(&mtk_crtc->lock, __func__, __LINE__);
  277. if (!mtk_crtc->enabled) {
  278. DDP_MUTEX_UNLOCK(&mtk_crtc->lock, __func__, __LINE__);
  279. mtk_crtc_wait_status(crtc, 1, MAX_SCHEDULE_TIMEOUT);
  280. continue;
  281. }
  282. if (crtc->state) {
  283. mtk_state = to_mtk_crtc_state(crtc->state);
  284. if (mtk_state->prop_val[CRTC_PROP_DOZE_ACTIVE]) {
  285. DDP_MUTEX_UNLOCK(&mtk_crtc->lock, __func__,
  286. __LINE__);
  287. continue;
  288. }
  289. /* do not enter VDO idle when rsz ratio >= 2.5;
  290. * And When layer fmt is YUV in VP scenario, it
  291. * will flicker into idle repaint, so let it not
  292. * into idle repaint as workaround.
  293. */
  294. if (mtk_crtc_is_frame_trigger_mode(crtc) == 0 &&
  295. ((mtk_drm_idlemgr_get_rsz_ratio(mtk_state) >=
  296. MAX_ENTER_IDLE_RSZ_RATIO) ||
  297. mtk_planes_is_yuv_fmt(crtc))) {
  298. DDP_MUTEX_UNLOCK(&mtk_crtc->lock, __func__,
  299. __LINE__);
  300. continue;
  301. }
  302. }
  303. if (idlemgr_ctx->is_idle
  304. || mtk_crtc_is_dc_mode(crtc)
  305. || priv->session_mode != MTK_DRM_SESSION_DL
  306. || mtk_crtc->sec_on) {
  307. DDP_MUTEX_UNLOCK(&mtk_crtc->lock, __func__, __LINE__);
  308. continue;
  309. }
  310. t_idle = local_clock() - idlemgr_ctx->idlemgr_last_kick_time;
  311. if ((idlemgr_vblank_check_internal &&
  312. t_idle < idlemgr_vblank_check_internal * 1000 * 1000) ||
  313. (!idlemgr_vblank_check_internal &&
  314. t_idle < idlemgr_ctx->idle_check_interval * 1000 * 1000)) {
  315. /* kicked in idle_check_interval msec, it's not idle */
  316. DDP_MUTEX_UNLOCK(&mtk_crtc->lock, __func__, __LINE__);
  317. continue;
  318. }
  319. /* double check if dynamic switch on/off */
  320. if (atomic_read(&idlemgr->idlemgr_task_active)) {
  321. DDPINFO("[LP] enter idle\n");
  322. crtc_id = drm_crtc_index(crtc);
  323. vblank = &crtc->dev->vblank[crtc_id];
  324. /* enter idle state */
  325. if (!vblank || atomic_read(&vblank->refcount) == 0) {
  326. mtk_drm_idlemgr_enter_idle_nolock(crtc);
  327. idlemgr_ctx->is_idle = 1;
  328. idlemgr_vblank_check_internal = 0;
  329. } else {
  330. idlemgr_ctx->idlemgr_last_kick_time =
  331. sched_clock();
  332. idlemgr_vblank_check_internal = 10;
  333. }
  334. }
  335. DDP_MUTEX_UNLOCK(&mtk_crtc->lock, __func__, __LINE__);
  336. wait_event_interruptible(idlemgr->idlemgr_wq,
  337. !idlemgr_ctx->is_idle);
  338. if (kthread_should_stop())
  339. break;
  340. }
  341. return 0;
  342. }
  343. int mtk_drm_idlemgr_init(struct drm_crtc *crtc, int index)
  344. {
  345. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  346. struct mtk_drm_idlemgr *idlemgr =
  347. kzalloc(sizeof(struct mtk_drm_idlemgr), GFP_KERNEL);
  348. struct mtk_drm_idlemgr_context *idlemgr_ctx =
  349. kzalloc(sizeof(struct mtk_drm_idlemgr_context), GFP_KERNEL);
  350. const int len = 50;
  351. char name[len];
  352. if (!idlemgr) {
  353. DDPPR_ERR("struct mtk_drm_idlemgr allocate fail\n");
  354. return -ENOMEM;
  355. ;
  356. }
  357. if (!idlemgr_ctx) {
  358. DDPPR_ERR("struct mtk_drm_idlemgr_context allocate fail\n");
  359. return -ENOMEM;
  360. }
  361. idlemgr->idlemgr_ctx = idlemgr_ctx;
  362. mtk_crtc->idlemgr = idlemgr;
  363. idlemgr_ctx->session_mode_before_enter_idle = MTK_DRM_SESSION_INVALID;
  364. idlemgr_ctx->is_idle = 0;
  365. idlemgr_ctx->enterulps = 0;
  366. idlemgr_ctx->idlemgr_last_kick_time = ~(0ULL);
  367. idlemgr_ctx->cur_lp_cust_mode = 0;
  368. idlemgr_ctx->idle_check_interval = 50;
  369. snprintf(name, len, "mtk_drm_disp_idlemgr-%d", index);
  370. idlemgr->idlemgr_task =
  371. kthread_create(mtk_drm_idlemgr_monitor_thread, crtc, name);
  372. init_waitqueue_head(&idlemgr->idlemgr_wq);
  373. atomic_set(&idlemgr->idlemgr_task_active, 1);
  374. wake_up_process(idlemgr->idlemgr_task);
  375. return 0;
  376. }
  377. static void mtk_drm_idlemgr_disable_connector(struct drm_crtc *crtc)
  378. {
  379. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  380. struct mtk_ddp_comp *output_comp;
  381. output_comp = mtk_ddp_comp_request_output(mtk_crtc);
  382. if (output_comp)
  383. mtk_ddp_comp_io_cmd(output_comp, NULL, CONNECTOR_DISABLE, NULL);
  384. }
  385. static void mtk_drm_idlemgr_enable_connector(struct drm_crtc *crtc)
  386. {
  387. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  388. struct mtk_ddp_comp *output_comp;
  389. output_comp = mtk_ddp_comp_request_output(mtk_crtc);
  390. if (output_comp)
  391. mtk_ddp_comp_io_cmd(output_comp, NULL, CONNECTOR_ENABLE, NULL);
  392. }
  393. static void mtk_drm_idlemgr_disable_crtc(struct drm_crtc *crtc)
  394. {
  395. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  396. unsigned int crtc_id = drm_crtc_index(&mtk_crtc->base);
  397. bool mode = mtk_crtc_is_dc_mode(crtc);
  398. DDPINFO("%s, crtc%d+\n", __func__, crtc_id);
  399. if (mode) {
  400. DDPINFO("crtc%d mode:%d bypass enter idle\n", crtc_id, mode);
  401. DDPINFO("crtc%d do %s-\n", crtc_id, __func__);
  402. return;
  403. }
  404. /* 1. stop CRTC */
  405. mtk_crtc_stop(mtk_crtc, true);
  406. /* 2. disconnect addon module and recover config */
  407. mtk_crtc_disconnect_addon_module(crtc);
  408. /* 3. set HRT BW to 0 */
  409. #ifdef MTK_FB_MMDVFS_SUPPORT
  410. mtk_disp_set_hrt_bw(mtk_crtc, 0);
  411. #endif
  412. /* 4. disconnect path */
  413. mtk_crtc_disconnect_default_path(mtk_crtc);
  414. /* 5. power off all modules in this CRTC */
  415. mtk_crtc_ddp_unprepare(mtk_crtc);
  416. mtk_drm_idlemgr_disable_connector(crtc);
  417. drm_crtc_vblank_off(crtc);
  418. mtk_crtc_vblank_irq(&mtk_crtc->base);
  419. /* 6. power off MTCMOS */
  420. mtk_drm_top_clk_disable_unprepare(crtc->dev);
  421. /* 7. disable fake vsync if need */
  422. mtk_drm_fake_vsync_switch(crtc, false);
  423. DDPINFO("crtc%d do %s-\n", crtc_id, __func__);
  424. }
  425. /* TODO: we should restore the current setting rather than default setting */
  426. static void mtk_drm_idlemgr_enable_crtc(struct drm_crtc *crtc)
  427. {
  428. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  429. unsigned int crtc_id = drm_crtc_index(crtc);
  430. bool mode = mtk_crtc_is_dc_mode(crtc);
  431. struct mtk_ddp_comp *comp;
  432. unsigned int i, j;
  433. DDPINFO("crtc%d do %s+\n", crtc_id, __func__);
  434. if (mode) {
  435. DDPINFO("crtc%d mode:%d bypass exit idle\n", crtc_id, mode);
  436. DDPINFO("crtc%d do %s-\n", crtc_id, __func__);
  437. return;
  438. }
  439. /* 1. power on mtcmos */
  440. mtk_drm_top_clk_prepare_enable(crtc->dev);
  441. /* 2. prepare modules would be used in this CRTC */
  442. mtk_drm_idlemgr_enable_connector(crtc);
  443. mtk_crtc_ddp_prepare(mtk_crtc);
  444. /* 3. start trigger loop first to keep gce alive */
  445. if (crtc_id == 0) {
  446. #if defined(CONFIG_MACH_MT6873) || defined(CONFIG_MACH_MT6853) \
  447. || defined(CONFIG_MACH_MT6833)
  448. if (!mtk_crtc_is_frame_trigger_mode(crtc))
  449. mtk_crtc_start_sodi_loop(crtc);
  450. #endif
  451. mtk_crtc_start_trig_loop(crtc);
  452. mtk_crtc_hw_block_ready(crtc);
  453. }
  454. /* 4. connect path */
  455. mtk_crtc_connect_default_path(mtk_crtc);
  456. /* 5. config ddp engine & set dirty for cmd mode */
  457. mtk_crtc_config_default_path(mtk_crtc);
  458. /* 6. conect addon module and config */
  459. mtk_crtc_connect_addon_module(crtc);
  460. /* 7. restore OVL setting */
  461. mtk_crtc_restore_plane_setting(mtk_crtc);
  462. /* 8. Set QOS BW */
  463. for_each_comp_in_cur_crtc_path(comp, mtk_crtc, i, j)
  464. mtk_ddp_comp_io_cmd(comp, NULL, PMQOS_SET_BW, NULL);
  465. /* 9. restore HRT BW */
  466. #ifdef MTK_FB_MMDVFS_SUPPORT
  467. mtk_disp_set_hrt_bw(mtk_crtc, mtk_crtc->qos_ctx->last_hrt_req);
  468. #endif
  469. /* 10. set vblank */
  470. drm_crtc_vblank_on(crtc);
  471. /* 11. enable fake vsync if need */
  472. mtk_drm_fake_vsync_switch(crtc, true);
  473. DDPINFO("crtc%d do %s-\n", crtc_id, __func__);
  474. }