mtk_disp_recovery.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644
  1. /*
  2. * Copyright (C) 2019 MediaTek Inc.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/of.h>
  15. #include <linux/of_address.h>
  16. #include <linux/of_irq.h>
  17. #include <linux/of_platform.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/kthread.h>
  20. #include <linux/types.h>
  21. #include <linux/wait.h>
  22. #include <linux/workqueue.h>
  23. #include <linux/sched/clock.h>
  24. #include <uapi/linux/sched/types.h>
  25. #include <drm/drmP.h>
  26. #include <linux/soc/mediatek/mtk-cmdq.h>
  27. #include "mtk_drm_drv.h"
  28. #include "mtk_drm_ddp_comp.h"
  29. #include "mtk_drm_crtc.h"
  30. #include "mtk_drm_helper.h"
  31. #include "mtk_drm_assert.h"
  32. #include "mtk_drm_mmp.h"
  33. #include "mtk_drm_fbdev.h"
  34. #include "mtk_drm_trace.h"
  35. #define ESD_TRY_CNT 5
  36. #define ESD_CHECK_PERIOD 2000 /* ms */
  37. /* pinctrl implementation */
  38. long _set_state(struct drm_crtc *crtc, const char *name)
  39. {
  40. #ifndef CONFIG_FPGA_EARLY_PORTING
  41. struct mtk_drm_private *priv = crtc->dev->dev_private;
  42. struct pinctrl_state *pState = 0;
  43. long ret = 0;
  44. /* TODO: race condition issue for pctrl handle */
  45. /* SO Far _set_state() only process once */
  46. if (!priv->pctrl) {
  47. DDPPR_ERR("this pctrl is null\n");
  48. return -1;
  49. }
  50. pState = pinctrl_lookup_state(priv->pctrl, name);
  51. if (IS_ERR(pState)) {
  52. DDPPR_ERR("lookup state '%s' failed\n", name);
  53. ret = PTR_ERR(pState);
  54. goto exit;
  55. }
  56. /* select state! */
  57. pinctrl_select_state(priv->pctrl, pState);
  58. exit:
  59. return ret; /* Good! */
  60. #else
  61. return 0; /* Good! */
  62. #endif
  63. }
  64. long disp_dts_gpio_init(struct device *dev, struct mtk_drm_private *private)
  65. {
  66. #ifndef CONFIG_FPGA_EARLY_PORTING
  67. long ret = 0;
  68. struct pinctrl *pctrl;
  69. /* retrieve */
  70. pctrl = devm_pinctrl_get(dev);
  71. if (IS_ERR(pctrl)) {
  72. DDPPR_ERR("Cannot find disp pinctrl!");
  73. ret = PTR_ERR(pctrl);
  74. goto exit;
  75. }
  76. private->pctrl = pctrl;
  77. exit:
  78. return ret;
  79. #else
  80. return 0;
  81. #endif
  82. }
  83. static inline int _can_switch_check_mode(struct drm_crtc *crtc,
  84. struct mtk_panel_ext *panel_ext)
  85. {
  86. struct mtk_drm_private *priv = crtc->dev->dev_private;
  87. int ret = 0;
  88. if (panel_ext->params->cust_esd_check == 0 &&
  89. panel_ext->params->lcm_esd_check_table[0].cmd != 0 &&
  90. mtk_drm_helper_get_opt(priv->helper_opt,
  91. MTK_DRM_OPT_ESD_CHECK_SWITCH))
  92. ret = 1;
  93. return ret;
  94. }
  95. static inline int _lcm_need_esd_check(struct mtk_panel_ext *panel_ext)
  96. {
  97. int ret = 0;
  98. if (panel_ext->params->esd_check_enable == 1 &&
  99. mtk_drm_lcm_is_connect()) {
  100. ret = 1;
  101. }
  102. return ret;
  103. }
  104. static inline int need_wait_esd_eof(struct drm_crtc *crtc,
  105. struct mtk_panel_ext *panel_ext)
  106. {
  107. int ret = 1;
  108. /*
  109. * 1.vdo mode
  110. * 2.cmd mode te
  111. */
  112. if (!mtk_crtc_is_frame_trigger_mode(crtc))
  113. ret = 0;
  114. if (panel_ext->params->cust_esd_check == 0)
  115. ret = 0;
  116. return ret;
  117. }
  118. static void esd_cmdq_timeout_cb(struct cmdq_cb_data data)
  119. {
  120. struct drm_crtc *crtc = data.data;
  121. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  122. struct mtk_drm_esd_ctx *esd_ctx = mtk_crtc->esd_ctx;
  123. if (!crtc) {
  124. DDPMSG("%s find crtc fail\n", __func__);
  125. return;
  126. }
  127. DDPMSG("read flush fail\n");
  128. esd_ctx->chk_sta = 0xff;
  129. mtk_drm_crtc_analysis(crtc);
  130. mtk_drm_crtc_dump(crtc);
  131. }
  132. int _mtk_esd_check_read(struct drm_crtc *crtc)
  133. {
  134. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  135. struct mtk_ddp_comp *output_comp;
  136. struct mtk_panel_ext *panel_ext;
  137. struct cmdq_pkt *cmdq_handle, *cmdq_handle2;
  138. struct mtk_drm_esd_ctx *esd_ctx;
  139. int ret = 0;
  140. DDPINFO("[ESD]ESD read panel\n");
  141. output_comp = mtk_ddp_comp_request_output(mtk_crtc);
  142. if (unlikely(!output_comp)) {
  143. DDPPR_ERR("%s:invalid output comp\n", __func__);
  144. return -EINVAL;
  145. }
  146. if (mtk_drm_is_idle(crtc) && mtk_dsi_is_cmd_mode(output_comp))
  147. return 0;
  148. mtk_ddp_comp_io_cmd(output_comp, NULL, REQ_PANEL_EXT, &panel_ext);
  149. if (unlikely(!(panel_ext && panel_ext->params))) {
  150. DDPPR_ERR("%s:can't find panel_ext handle\n", __func__);
  151. return -EINVAL;
  152. }
  153. cmdq_handle = cmdq_pkt_create(mtk_crtc->gce_obj.client[CLIENT_DSI_CFG]);
  154. cmdq_handle->err_cb.cb = esd_cmdq_timeout_cb;
  155. cmdq_handle->err_cb.data = crtc;
  156. CRTC_MMP_MARK(drm_crtc_index(crtc), esd_check, 2, 1);
  157. if (mtk_dsi_is_cmd_mode(output_comp)) {
  158. if (mtk_crtc_with_sub_path(crtc, mtk_crtc->ddp_mode))
  159. mtk_crtc_wait_frame_done(mtk_crtc, cmdq_handle,
  160. DDP_SECOND_PATH, 0);
  161. else
  162. mtk_crtc_wait_frame_done(mtk_crtc, cmdq_handle,
  163. DDP_FIRST_PATH, 0);
  164. cmdq_pkt_clear_event(cmdq_handle,
  165. mtk_crtc->gce_obj.event[EVENT_ESD_EOF]);
  166. mtk_ddp_comp_io_cmd(output_comp, cmdq_handle, ESD_CHECK_READ,
  167. (void *)mtk_crtc->gce_obj.buf.pa_base +
  168. DISP_SLOT_ESD_READ_BASE);
  169. cmdq_pkt_set_event(cmdq_handle,
  170. mtk_crtc->gce_obj.event[EVENT_ESD_EOF]);
  171. } else { /* VDO mode */
  172. if (mtk_crtc_with_sub_path(crtc, mtk_crtc->ddp_mode))
  173. mtk_crtc_wait_frame_done(mtk_crtc, cmdq_handle,
  174. DDP_SECOND_PATH, 1);
  175. else
  176. mtk_crtc_wait_frame_done(mtk_crtc, cmdq_handle,
  177. DDP_FIRST_PATH, 1);
  178. CRTC_MMP_MARK(drm_crtc_index(crtc), esd_check, 2, 2);
  179. mtk_ddp_comp_io_cmd(output_comp, cmdq_handle, DSI_STOP_VDO_MODE,
  180. NULL);
  181. CRTC_MMP_MARK(drm_crtc_index(crtc), esd_check, 2, 3);
  182. mtk_ddp_comp_io_cmd(output_comp, cmdq_handle, ESD_CHECK_READ,
  183. (void *)mtk_crtc->gce_obj.buf.pa_base +
  184. DISP_SLOT_ESD_READ_BASE);
  185. mtk_ddp_comp_io_cmd(output_comp, cmdq_handle,
  186. DSI_START_VDO_MODE, NULL);
  187. mtk_disp_mutex_trigger(mtk_crtc->mutex[0], cmdq_handle);
  188. mtk_ddp_comp_io_cmd(output_comp, cmdq_handle, COMP_REG_START,
  189. NULL);
  190. }
  191. esd_ctx = mtk_crtc->esd_ctx;
  192. esd_ctx->chk_sta = 0;
  193. cmdq_pkt_flush(cmdq_handle);
  194. CRTC_MMP_MARK(drm_crtc_index(crtc), esd_check, 2, 4);
  195. mtk_ddp_comp_io_cmd(output_comp, NULL, CONNECTOR_READ_EPILOG,
  196. NULL);
  197. if (esd_ctx->chk_sta == 0xff) {
  198. ret = -1;
  199. if (need_wait_esd_eof(crtc, panel_ext)) {
  200. /* TODO: set ESD_EOF event through CPU is better */
  201. mtk_crtc_pkt_create(&cmdq_handle2, crtc,
  202. mtk_crtc->gce_obj.client[CLIENT_CFG]);
  203. cmdq_pkt_set_event(
  204. cmdq_handle2,
  205. mtk_crtc->gce_obj.event[EVENT_ESD_EOF]);
  206. cmdq_pkt_flush(cmdq_handle2);
  207. cmdq_pkt_destroy(cmdq_handle2);
  208. }
  209. goto done;
  210. }
  211. ret = mtk_ddp_comp_io_cmd(output_comp, NULL, ESD_CHECK_CMP,
  212. (void *)mtk_crtc->gce_obj.buf.va_base +
  213. DISP_SLOT_ESD_READ_BASE);
  214. done:
  215. cmdq_pkt_destroy(cmdq_handle);
  216. return ret;
  217. }
  218. static irqreturn_t _esd_check_ext_te_irq_handler(int irq, void *data)
  219. {
  220. struct mtk_drm_esd_ctx *esd_ctx = (struct mtk_drm_esd_ctx *)data;
  221. atomic_set(&esd_ctx->ext_te_event, 1);
  222. wake_up_interruptible(&esd_ctx->ext_te_wq);
  223. return IRQ_HANDLED;
  224. }
  225. static int _mtk_esd_check_eint(struct drm_crtc *crtc)
  226. {
  227. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  228. struct mtk_drm_esd_ctx *esd_ctx = mtk_crtc->esd_ctx;
  229. int ret = 1;
  230. DDPINFO("[ESD]ESD check eint\n");
  231. if (unlikely(!esd_ctx)) {
  232. DDPPR_ERR("%s:invalid ESD context\n", __func__);
  233. return -EINVAL;
  234. }
  235. enable_irq(esd_ctx->eint_irq);
  236. /* check if there is TE in the last 2s, if so ESD check is pass */
  237. if (wait_event_interruptible_timeout(
  238. esd_ctx->ext_te_wq,
  239. atomic_read(&esd_ctx->ext_te_event),
  240. HZ / 2) > 0)
  241. ret = 0;
  242. disable_irq(esd_ctx->eint_irq);
  243. atomic_set(&esd_ctx->ext_te_event, 0);
  244. return ret;
  245. }
  246. static int mtk_drm_request_eint(struct drm_crtc *crtc)
  247. {
  248. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  249. struct mtk_drm_esd_ctx *esd_ctx = mtk_crtc->esd_ctx;
  250. struct mtk_ddp_comp *output_comp;
  251. struct device_node *node;
  252. u32 ints[2] = {0, 0};
  253. char *compat_str;
  254. int ret = 0;
  255. if (unlikely(!esd_ctx)) {
  256. DDPPR_ERR("%s:invalid ESD context\n", __func__);
  257. return -EINVAL;
  258. }
  259. output_comp = mtk_ddp_comp_request_output(mtk_crtc);
  260. if (unlikely(!output_comp)) {
  261. DDPPR_ERR("%s:invalid output comp\n", __func__);
  262. return -EINVAL;
  263. }
  264. mtk_ddp_comp_io_cmd(output_comp, NULL, REQ_ESD_EINT_COMPAT,
  265. &compat_str);
  266. if (unlikely(!compat_str)) {
  267. DDPPR_ERR("%s: invalid compat string\n", __func__);
  268. return -EINVAL;
  269. }
  270. node = of_find_compatible_node(NULL, NULL, compat_str);
  271. if (unlikely(!node)) {
  272. DDPPR_ERR("can't find ESD TE eint compatible node\n");
  273. return -EINVAL;
  274. }
  275. of_property_read_u32_array(node, "debounce", ints, ARRAY_SIZE(ints));
  276. esd_ctx->eint_irq = irq_of_parse_and_map(node, 0);
  277. ret = request_irq(esd_ctx->eint_irq, _esd_check_ext_te_irq_handler,
  278. IRQF_TRIGGER_RISING, "ESD_TE-eint", esd_ctx);
  279. if (ret) {
  280. DDPPR_ERR("eint irq line not available!\n");
  281. return ret;
  282. }
  283. disable_irq(esd_ctx->eint_irq);
  284. _set_state(crtc, "mode_te_te");
  285. return ret;
  286. }
  287. static int mtk_drm_esd_check(struct drm_crtc *crtc)
  288. {
  289. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  290. struct mtk_panel_ext *panel_ext;
  291. struct mtk_drm_esd_ctx *esd_ctx = mtk_crtc->esd_ctx;
  292. int ret = 0;
  293. CRTC_MMP_EVENT_START(drm_crtc_index(crtc), esd_check, 0, 0);
  294. if (mtk_crtc->enabled == 0) {
  295. CRTC_MMP_MARK(drm_crtc_index(crtc), esd_check, 0, 99);
  296. DDPINFO("[ESD] CRTC %d disable. skip esd check\n",
  297. drm_crtc_index(crtc));
  298. goto done;
  299. }
  300. panel_ext = mtk_crtc->panel_ext;
  301. if (unlikely(!(panel_ext && panel_ext->params))) {
  302. DDPPR_ERR("can't find panel_ext handle\n");
  303. ret = -EINVAL;
  304. goto done;
  305. }
  306. /* Check panel EINT */
  307. if (panel_ext->params->cust_esd_check == 0 &&
  308. esd_ctx->chk_mode == READ_EINT) {
  309. CRTC_MMP_MARK(drm_crtc_index(crtc), esd_check, 1, 0);
  310. ret = _mtk_esd_check_eint(crtc);
  311. } else { /* READ LCM CMD */
  312. CRTC_MMP_MARK(drm_crtc_index(crtc), esd_check, 2, 0);
  313. ret = _mtk_esd_check_read(crtc);
  314. }
  315. /* switch ESD check mode */
  316. if (_can_switch_check_mode(crtc, panel_ext) &&
  317. !mtk_crtc_is_frame_trigger_mode(crtc))
  318. esd_ctx->chk_mode =
  319. (esd_ctx->chk_mode == READ_EINT) ? READ_LCM : READ_EINT;
  320. done:
  321. CRTC_MMP_EVENT_END(drm_crtc_index(crtc), esd_check, 0, ret);
  322. return ret;
  323. }
  324. static int mtk_drm_esd_recover(struct drm_crtc *crtc)
  325. {
  326. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  327. struct mtk_ddp_comp *output_comp;
  328. int ret = 0;
  329. CRTC_MMP_EVENT_START(drm_crtc_index(crtc), esd_recovery, 0, 0);
  330. if (crtc->state && !crtc->state->active) {
  331. DDPMSG("%s: crtc is inactive\n", __func__);
  332. return 0;
  333. }
  334. CRTC_MMP_MARK(drm_crtc_index(crtc), esd_recovery, 0, 1);
  335. output_comp = mtk_ddp_comp_request_output(mtk_crtc);
  336. if (unlikely(!output_comp)) {
  337. DDPPR_ERR("%s: invalid output comp\n", __func__);
  338. ret = -EINVAL;
  339. goto done;
  340. }
  341. mtk_drm_idlemgr_kick(__func__, &mtk_crtc->base, 0);
  342. mtk_ddp_comp_io_cmd(output_comp, NULL, CONNECTOR_PANEL_DISABLE, NULL);
  343. mtk_drm_crtc_disable(crtc, true);
  344. CRTC_MMP_MARK(drm_crtc_index(crtc), esd_recovery, 0, 2);
  345. mtk_drm_crtc_enable(crtc);
  346. CRTC_MMP_MARK(drm_crtc_index(crtc), esd_recovery, 0, 3);
  347. mtk_ddp_comp_io_cmd(output_comp, NULL, CONNECTOR_PANEL_ENABLE, NULL);
  348. CRTC_MMP_MARK(drm_crtc_index(crtc), esd_recovery, 0, 4);
  349. mtk_crtc_hw_block_ready(crtc);
  350. if (mtk_crtc_is_frame_trigger_mode(crtc)) {
  351. struct cmdq_pkt *cmdq_handle;
  352. mtk_crtc_pkt_create(&cmdq_handle, &mtk_crtc->base,
  353. mtk_crtc->gce_obj.client[CLIENT_CFG]);
  354. cmdq_pkt_set_event(cmdq_handle,
  355. mtk_crtc->gce_obj.event[EVENT_STREAM_DIRTY]);
  356. cmdq_pkt_set_event(cmdq_handle,
  357. mtk_crtc->gce_obj.event[EVENT_CABC_EOF]);
  358. cmdq_pkt_set_event(cmdq_handle,
  359. mtk_crtc->gce_obj.event[EVENT_ESD_EOF]);
  360. cmdq_pkt_flush(cmdq_handle);
  361. cmdq_pkt_destroy(cmdq_handle);
  362. }
  363. mtk_drm_idlemgr_kick(__func__, &mtk_crtc->base, 0);
  364. CRTC_MMP_MARK(drm_crtc_index(crtc), esd_recovery, 0, 5);
  365. done:
  366. CRTC_MMP_EVENT_END(drm_crtc_index(crtc), esd_recovery, 0, ret);
  367. return 0;
  368. }
  369. static int mtk_drm_esd_check_worker_kthread(void *data)
  370. {
  371. struct sched_param param = {.sched_priority = 87};
  372. struct drm_crtc *crtc = (struct drm_crtc *)data;
  373. struct mtk_drm_private *private = crtc->dev->dev_private;
  374. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  375. struct mtk_drm_esd_ctx *esd_ctx = mtk_crtc->esd_ctx;
  376. int ret = 0;
  377. int i = 0;
  378. int recovery_flg = 0;
  379. sched_setscheduler(current, SCHED_RR, &param);
  380. if (!crtc) {
  381. DDPPR_ERR("%s invalid CRTC context, stop thread\n", __func__);
  382. return -EINVAL;
  383. }
  384. while (1) {
  385. msleep(ESD_CHECK_PERIOD);
  386. ret = wait_event_interruptible(
  387. esd_ctx->check_task_wq,
  388. atomic_read(&esd_ctx->check_wakeup) &&
  389. (atomic_read(&esd_ctx->target_time) ||
  390. esd_ctx->chk_mode == READ_EINT));
  391. if (ret < 0) {
  392. DDPINFO("[ESD]check thread waked up accidently\n");
  393. continue;
  394. }
  395. mutex_lock(&private->commit.lock);
  396. DDP_MUTEX_LOCK(&mtk_crtc->lock, __func__, __LINE__);
  397. mtk_drm_trace_begin("esd");
  398. if (!mtk_drm_is_idle(crtc))
  399. atomic_set(&esd_ctx->target_time, 0);
  400. /* 1. esd check & recovery */
  401. if (!esd_ctx->chk_active) {
  402. DDP_MUTEX_UNLOCK(&mtk_crtc->lock, __func__, __LINE__);
  403. mutex_unlock(&private->commit.lock);
  404. continue;
  405. }
  406. i = 0; /* repeat */
  407. do {
  408. ret = mtk_drm_esd_check(crtc);
  409. if (!ret) /* success */
  410. break;
  411. DDPPR_ERR(
  412. "[ESD]esd check fail, will do esd recovery. try=%d\n",
  413. i);
  414. mtk_drm_esd_recover(crtc);
  415. recovery_flg = 1;
  416. } while (++i < ESD_TRY_CNT);
  417. if (ret != 0) {
  418. DDPPR_ERR(
  419. "[ESD]after esd recovery %d times, still fail, disable esd check\n",
  420. ESD_TRY_CNT);
  421. mtk_disp_esd_check_switch(crtc, false);
  422. DDP_MUTEX_UNLOCK(&mtk_crtc->lock, __func__, __LINE__);
  423. mutex_unlock(&private->commit.lock);
  424. break;
  425. } else if (recovery_flg) {
  426. DDPINFO("[ESD] esd recovery success\n");
  427. recovery_flg = 0;
  428. }
  429. mtk_drm_trace_end();
  430. DDP_MUTEX_UNLOCK(&mtk_crtc->lock, __func__, __LINE__);
  431. mutex_unlock(&private->commit.lock);
  432. /* 2. other check & recovery */
  433. if (kthread_should_stop())
  434. break;
  435. }
  436. return 0;
  437. }
  438. void mtk_disp_esd_check_switch(struct drm_crtc *crtc, bool enable)
  439. {
  440. struct mtk_drm_private *priv = crtc->dev->dev_private;
  441. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  442. struct mtk_drm_esd_ctx *esd_ctx = mtk_crtc->esd_ctx;
  443. if (!mtk_drm_helper_get_opt(priv->helper_opt,
  444. MTK_DRM_OPT_ESD_CHECK_RECOVERY))
  445. return;
  446. if (unlikely(!esd_ctx)) {
  447. DDPINFO("%s:invalid ESD context, crtc id:%d\n",
  448. __func__, drm_crtc_index(crtc));
  449. return;
  450. }
  451. esd_ctx->chk_active = enable;
  452. atomic_set(&esd_ctx->check_wakeup, enable);
  453. if (enable)
  454. wake_up_interruptible(&esd_ctx->check_task_wq);
  455. }
  456. static void mtk_disp_esd_chk_deinit(struct drm_crtc *crtc)
  457. {
  458. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  459. struct mtk_drm_esd_ctx *esd_ctx = mtk_crtc->esd_ctx;
  460. if (unlikely(!esd_ctx)) {
  461. DDPPR_ERR("%s:invalid ESD context\n", __func__);
  462. return;
  463. }
  464. /* Stop ESD task */
  465. mtk_disp_esd_check_switch(crtc, false);
  466. /* Stop ESD kthread */
  467. kthread_stop(esd_ctx->disp_esd_chk_task);
  468. kfree(esd_ctx);
  469. }
  470. static void mtk_disp_esd_chk_init(struct drm_crtc *crtc)
  471. {
  472. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  473. struct mtk_panel_ext *panel_ext;
  474. struct mtk_drm_esd_ctx *esd_ctx;
  475. panel_ext = mtk_crtc->panel_ext;
  476. if (!(panel_ext && panel_ext->params)) {
  477. DDPMSG("can't find panel_ext handle\n");
  478. return;
  479. }
  480. if (_lcm_need_esd_check(panel_ext) == 0)
  481. return;
  482. DDPINFO("create ESD thread\n");
  483. /* primary display check thread init */
  484. esd_ctx = kzalloc(sizeof(*esd_ctx), GFP_KERNEL);
  485. if (!esd_ctx) {
  486. DDPPR_ERR("allocate ESD context failed!\n");
  487. return;
  488. }
  489. mtk_crtc->esd_ctx = esd_ctx;
  490. esd_ctx->disp_esd_chk_task = kthread_create(
  491. mtk_drm_esd_check_worker_kthread, crtc, "disp_echk");
  492. init_waitqueue_head(&esd_ctx->check_task_wq);
  493. init_waitqueue_head(&esd_ctx->ext_te_wq);
  494. atomic_set(&esd_ctx->check_wakeup, 0);
  495. atomic_set(&esd_ctx->ext_te_event, 0);
  496. atomic_set(&esd_ctx->target_time, 0);
  497. esd_ctx->chk_mode = READ_EINT;
  498. mtk_drm_request_eint(crtc);
  499. wake_up_process(esd_ctx->disp_esd_chk_task);
  500. }
  501. void mtk_disp_chk_recover_deinit(struct drm_crtc *crtc)
  502. {
  503. struct mtk_drm_private *priv = crtc->dev->dev_private;
  504. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  505. /* TODO : check function work in other CRTC & other connector */
  506. if (mtk_drm_helper_get_opt(priv->helper_opt,
  507. MTK_DRM_OPT_ESD_CHECK_RECOVERY) &&
  508. drm_crtc_index(&mtk_crtc->base) == 0)
  509. mtk_disp_esd_chk_deinit(crtc);
  510. }
  511. void mtk_disp_chk_recover_init(struct drm_crtc *crtc)
  512. {
  513. struct mtk_drm_private *priv = crtc->dev->dev_private;
  514. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  515. /* TODO : check function work in other CRTC & other connector */
  516. if (mtk_drm_helper_get_opt(priv->helper_opt,
  517. MTK_DRM_OPT_ESD_CHECK_RECOVERY) &&
  518. drm_crtc_index(&mtk_crtc->base) == 0)
  519. mtk_disp_esd_chk_init(crtc);
  520. }