mtk_layering_rule.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888
  1. /*
  2. * Copyright (C) 2016 MediaTek Inc.
  3. * Copyright (C) 2021 XiaoMi, Inc.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  12. * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
  13. */
  14. #include <linux/delay.h>
  15. #include <linux/sched.h>
  16. #include <linux/semaphore.h>
  17. #include <linux/module.h>
  18. #include <linux/wait.h>
  19. #include <linux/kthread.h>
  20. #include <linux/mutex.h>
  21. #include <linux/types.h>
  22. #include <linux/of.h>
  23. #include <linux/of_irq.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/slab.h>
  26. #if defined(CONFIG_MTK_DRAMC)
  27. #include "mtk_dramc.h"
  28. #endif
  29. #include "mtk_layering_rule.h"
  30. #ifdef MTK_FB_MMDVFS_SUPPORT
  31. #include "mmdvfs_mgr.h"
  32. #include "mmdvfs_pmqos.h"
  33. #endif
  34. #include "mtk_log.h"
  35. #include "mtk_rect.h"
  36. #include "mtk_drm_drv.h"
  37. #include "mtk_drm_graphics_base.h"
  38. static struct layering_rule_ops l_rule_ops;
  39. static struct layering_rule_info_t l_rule_info;
  40. static DEFINE_SPINLOCK(hrt_table_lock);
  41. /* To backup for primary display drm_mtk_layer_config */
  42. static struct drm_mtk_layer_config *g_input_config;
  43. static int emi_bound_table[HRT_BOUND_NUM][HRT_LEVEL_NUM] = {
  44. /* HRT_BOUND_TYPE_LP4 */
  45. {100, 300, 500, 600},
  46. };
  47. static int larb_bound_table[HRT_BOUND_NUM][HRT_LEVEL_NUM] = {
  48. /* HRT_BOUND_TYPE_LP4 */
  49. {100, 300, 500, 600},
  50. };
  51. /**
  52. * The layer mapping table define ovl layer dispatch rule for both
  53. * primary and secondary display.Each table has 16 elements which
  54. * represent the layer mapping rule by the number of input layers.
  55. */
  56. static uint16_t layer_mapping_table[HRT_TB_NUM] = {
  57. 0x0003, 0x007E, 0x007A, 0x0001
  58. };
  59. static uint16_t layer_mapping_table_vds_switch[HRT_TB_NUM] = {
  60. 0x0078, 0x0078, 0x0078, 0x0078
  61. };
  62. /**
  63. * The larb mapping table represent the relation between LARB and OVL.
  64. */
  65. static uint16_t larb_mapping_table[HRT_TB_NUM] = {
  66. 0x0001, 0x0010, 0x0010, 0x0001
  67. };
  68. static uint16_t larb_mapping_tb_vds_switch[HRT_TB_NUM] = {
  69. 0x0010, 0x0010, 0x0010, 0x0001
  70. };
  71. /**
  72. * The OVL mapping table is used to get the OVL index of correcponding layer.
  73. * The bit value 1 means the position of the last layer in OVL engine.
  74. */
  75. static uint16_t ovl_mapping_table[HRT_TB_NUM] = {
  76. 0x0002, 0x0045, 0x0045, 0x0001
  77. };
  78. static uint16_t ovl_mapping_tb_vds_switch[HRT_TB_NUM] = {
  79. 0x0045, 0x0045, 0x0045, 0x0045
  80. };
  81. #define GET_SYS_STATE(sys_state) \
  82. ((l_rule_info.hrt_sys_state >> sys_state) & 0x1)
  83. static void layering_rule_senario_decision(unsigned int scn_decision_flag,
  84. unsigned int scale_num)
  85. {
  86. /*TODO: need MMP support*/
  87. #if 0
  88. mmprofile_log_ex(ddp_mmp_get_events()->hrt, MMPROFILE_FLAG_START,
  89. l_rule_info.addon_scn[0], l_rule_info.layer_tb_idx |
  90. (l_rule_info.bound_tb_idx << 16));
  91. #endif
  92. l_rule_info.primary_fps = 60;
  93. l_rule_info.bound_tb_idx = HRT_BOUND_TYPE_LP4;
  94. if (scn_decision_flag & SCN_NEED_GAME_PQ)
  95. l_rule_info.addon_scn[HRT_PRIMARY] = GAME_PQ;
  96. else if (scn_decision_flag & SCN_NEED_VP_PQ)
  97. l_rule_info.addon_scn[HRT_PRIMARY] = VP_PQ;
  98. else if (scale_num == 1)
  99. l_rule_info.addon_scn[HRT_PRIMARY] = ONE_SCALING;
  100. else if (scale_num == 2)
  101. l_rule_info.addon_scn[HRT_PRIMARY] = TWO_SCALING;
  102. else
  103. l_rule_info.addon_scn[HRT_PRIMARY] = NONE;
  104. if (scn_decision_flag & SCN_TRIPLE_DISP) {
  105. l_rule_info.addon_scn[HRT_SECONDARY] = TRIPLE_DISP;
  106. l_rule_info.addon_scn[HRT_THIRD] = TRIPLE_DISP;
  107. } else {
  108. l_rule_info.addon_scn[HRT_SECONDARY] = NONE;
  109. l_rule_info.addon_scn[HRT_THIRD] = NONE;
  110. }
  111. /*TODO: need MMP support*/
  112. #if 0
  113. mmprofile_log_ex(ddp_mmp_get_events()->hrt, MMPROFILE_FLAG_END,
  114. l_rule_info.addon_scn[0], l_rule_info.layer_tb_idx |
  115. (l_rule_info.bound_tb_idx << 16));
  116. #endif
  117. }
  118. /* A OVL supports at most 1 yuv layers */
  119. static void filter_by_yuv_layers(struct drm_mtk_layering_info *disp_info)
  120. {
  121. unsigned int disp_idx = 0, i = 0;
  122. struct drm_mtk_layer_config *info;
  123. unsigned int yuv_gpu_cnt;
  124. unsigned int yuv_layer_gpu[12];
  125. int yuv_layer_ovl = -1;
  126. for (disp_idx = 0 ; disp_idx < HRT_TYPE_NUM ; disp_idx++) {
  127. yuv_layer_ovl = -1;
  128. yuv_gpu_cnt = 0;
  129. /* cal gpu_layer_cnt & yuv_layer_cnt */
  130. for (i = 0; i < disp_info->layer_num[disp_idx]; i++) {
  131. info = &(disp_info->input_config[disp_idx][i]);
  132. if (mtk_is_gles_layer(disp_info, disp_idx, i))
  133. continue;
  134. if (mtk_is_yuv(info->src_fmt)) {
  135. if (info->secure == 1 &&
  136. yuv_layer_ovl < 0) {
  137. yuv_layer_ovl = i;
  138. } else {
  139. yuv_layer_gpu[yuv_gpu_cnt] = i;
  140. yuv_gpu_cnt++;
  141. }
  142. }
  143. }
  144. if (yuv_gpu_cnt == 0)
  145. continue;
  146. if (yuv_layer_ovl >= 0) {
  147. //if have sec layer, rollback the others to gpu
  148. for (i = 0; i < yuv_gpu_cnt; i++)
  149. mtk_rollback_layer_to_GPU(disp_info,
  150. disp_idx, yuv_layer_gpu[i]);
  151. } else {
  152. /* keep the 1st normal yuv layer,
  153. * rollback the others to gpu
  154. */
  155. for (i = 1; i < yuv_gpu_cnt; i++)
  156. mtk_rollback_layer_to_GPU(disp_info,
  157. disp_idx, yuv_layer_gpu[i]);
  158. }
  159. }
  160. }
  161. static void filter_2nd_display(struct drm_mtk_layering_info *disp_info)
  162. {
  163. unsigned int i, j, layer_cnt = 0;
  164. for (i = HRT_SECONDARY; i < HRT_TYPE_NUM; i++) {
  165. unsigned int max_layer_cnt = SECONDARY_OVL_LAYER_NUM;
  166. if (is_triple_disp(disp_info) && i == HRT_SECONDARY)
  167. max_layer_cnt = 1;
  168. for (j = 0; j < disp_info->layer_num[i]; j++) {
  169. if (mtk_is_gles_layer(disp_info, i, j))
  170. continue;
  171. layer_cnt++;
  172. if (layer_cnt > max_layer_cnt)
  173. mtk_rollback_layer_to_GPU(disp_info, i, j);
  174. }
  175. }
  176. }
  177. static bool is_ovl_wcg(enum mtk_drm_dataspace ds)
  178. {
  179. bool ret = false;
  180. switch (ds) {
  181. case MTK_DRM_DATASPACE_V0_SCRGB:
  182. case MTK_DRM_DATASPACE_V0_SCRGB_LINEAR:
  183. case MTK_DRM_DATASPACE_DISPLAY_P3:
  184. ret = true;
  185. break;
  186. default:
  187. ret = false;
  188. break;
  189. }
  190. return ret;
  191. }
  192. static bool is_ovl_standard(struct drm_device *dev, enum mtk_drm_dataspace ds)
  193. {
  194. struct mtk_drm_private *priv = dev->dev_private;
  195. enum mtk_drm_dataspace std = ds & MTK_DRM_DATASPACE_STANDARD_MASK;
  196. bool ret = false;
  197. if (!mtk_drm_helper_get_opt(priv->helper_opt, MTK_DRM_OPT_OVL_WCG) &&
  198. is_ovl_wcg(ds))
  199. return ret;
  200. switch (std) {
  201. case MTK_DRM_DATASPACE_STANDARD_BT2020:
  202. case MTK_DRM_DATASPACE_STANDARD_BT2020_CONSTANT_LUMINANCE:
  203. ret = false;
  204. break;
  205. default:
  206. ret = true;
  207. break;
  208. }
  209. return ret;
  210. }
  211. static void filter_by_wcg(struct drm_device *dev,
  212. struct drm_mtk_layering_info *disp_info)
  213. {
  214. unsigned int i, j;
  215. struct drm_mtk_layer_config *c;
  216. for (i = 0; i < disp_info->layer_num[HRT_PRIMARY]; i++) {
  217. c = &disp_info->input_config[HRT_PRIMARY][i];
  218. if (is_ovl_standard(dev, c->dataspace) ||
  219. mtk_has_layer_cap(c, MTK_MDP_HDR_LAYER))
  220. continue;
  221. mtk_rollback_layer_to_GPU(disp_info, HRT_PRIMARY, i);
  222. }
  223. for (i = HRT_SECONDARY; i < HRT_TYPE_NUM; i++)
  224. for (j = 0; j < disp_info->layer_num[i]; j++) {
  225. c = &disp_info->input_config[i][j];
  226. if (!is_ovl_wcg(c->dataspace) &&
  227. (is_ovl_standard(dev, c->dataspace) ||
  228. mtk_has_layer_cap(c, MTK_MDP_HDR_LAYER)))
  229. continue;
  230. mtk_rollback_layer_to_GPU(disp_info, i, j);
  231. }
  232. }
  233. static bool can_be_compress(uint32_t format)
  234. {
  235. #if defined(CONFIG_MACH_MT6873) || defined(CONFIG_MACH_MT6853)
  236. if (mtk_is_yuv(format))
  237. return 0;
  238. #else
  239. if (mtk_is_yuv(format) || format == DRM_FORMAT_RGB565)
  240. return 0;
  241. #endif
  242. else
  243. return 1;
  244. }
  245. static void filter_by_fbdc(struct drm_mtk_layering_info *disp_info)
  246. {
  247. unsigned int i, j;
  248. struct drm_mtk_layer_config *c;
  249. /* primary: check fmt */
  250. for (i = 0; i < disp_info->layer_num[HRT_PRIMARY]; i++) {
  251. c = &(disp_info->input_config[HRT_PRIMARY][i]);
  252. if (!c->compress)
  253. continue;
  254. if (can_be_compress(c->src_fmt) == 0)
  255. mtk_rollback_compress_layer_to_GPU(disp_info,
  256. HRT_PRIMARY, i);
  257. }
  258. /* secondary: rollback all */
  259. for (i = HRT_SECONDARY; i < HRT_TYPE_NUM; i++)
  260. for (j = 0; j < disp_info->layer_num[i]; j++) {
  261. c = &(disp_info->input_config[i][j]);
  262. if (!c->compress ||
  263. mtk_is_gles_layer(disp_info, i, j))
  264. continue;
  265. /* if the layer is already gles layer,
  266. * do not set NO_FBDC to reduce BW access
  267. */
  268. mtk_rollback_compress_layer_to_GPU(disp_info, i, j);
  269. }
  270. }
  271. static bool filter_by_hw_limitation(struct drm_device *dev,
  272. struct drm_mtk_layering_info *disp_info)
  273. {
  274. bool flag = false;
  275. filter_by_wcg(dev, disp_info);
  276. filter_by_yuv_layers(disp_info);
  277. /* Is this nessasary? */
  278. filter_2nd_display(disp_info);
  279. return flag;
  280. }
  281. static uint16_t get_mapping_table(struct drm_device *dev, int disp_idx,
  282. enum DISP_HW_MAPPING_TB_TYPE tb_type,
  283. int param);
  284. static int layering_get_valid_hrt(struct drm_crtc *crtc,
  285. struct drm_display_mode *mode);
  286. static void copy_hrt_bound_table(struct drm_mtk_layering_info *disp_info,
  287. int is_larb, int *hrt_table, struct drm_device *dev)
  288. {
  289. unsigned long flags = 0;
  290. int valid_num, ovl_bound, i;
  291. struct drm_crtc *crtc;
  292. struct drm_display_mode *mode;
  293. /* Not used in 6779 */
  294. if (is_larb)
  295. return;
  296. drm_for_each_crtc(crtc, dev) {
  297. if (drm_crtc_index(crtc) == 0)
  298. break;
  299. }
  300. mode = mtk_drm_crtc_avail_disp_mode(crtc,
  301. disp_info->disp_mode_idx[0]);
  302. /* update table if hrt bw is enabled */
  303. spin_lock_irqsave(&hrt_table_lock, flags);
  304. valid_num = layering_get_valid_hrt(crtc, mode);
  305. ovl_bound = mtk_get_phy_layer_limit(
  306. get_mapping_table(dev, 0, DISP_HW_LAYER_TB, MAX_PHY_OVL_CNT));
  307. valid_num = min(valid_num, ovl_bound * 100);
  308. for (i = 0; i < HRT_LEVEL_NUM; i++)
  309. emi_bound_table[l_rule_info.bound_tb_idx][i] = valid_num;
  310. spin_unlock_irqrestore(&hrt_table_lock, flags);
  311. for (i = 0; i < HRT_LEVEL_NUM; i++)
  312. hrt_table[i] = emi_bound_table[l_rule_info.bound_tb_idx][i];
  313. }
  314. static int *get_bound_table(enum DISP_HW_MAPPING_TB_TYPE tb_type)
  315. {
  316. switch (tb_type) {
  317. case DISP_HW_EMI_BOUND_TB:
  318. return emi_bound_table[l_rule_info.bound_tb_idx];
  319. case DISP_HW_LARB_BOUND_TB:
  320. return larb_bound_table[l_rule_info.bound_tb_idx];
  321. default:
  322. break;
  323. }
  324. return NULL;
  325. }
  326. static uint16_t get_mapping_table(struct drm_device *dev, int disp_idx,
  327. enum DISP_HW_MAPPING_TB_TYPE tb_type,
  328. int param)
  329. {
  330. uint16_t map = 0;
  331. uint16_t tmp_map = 0;
  332. int i;
  333. int cnt = 0;
  334. struct drm_crtc *crtc;
  335. const struct mtk_addon_scenario_data *addon_data = NULL;
  336. struct mtk_drm_private *priv = dev->dev_private;
  337. drm_for_each_crtc(crtc, dev) {
  338. if (drm_crtc_index(crtc) == disp_idx) {
  339. addon_data =
  340. mtk_addon_get_scenario_data(__func__,
  341. crtc,
  342. l_rule_info.addon_scn[disp_idx]);
  343. break;
  344. }
  345. }
  346. if (!addon_data) {
  347. DDPPR_ERR("disp_idx:%d cannot get addon data\n", disp_idx);
  348. return 0;
  349. }
  350. switch (tb_type) {
  351. case DISP_HW_OVL_TB:
  352. map = ovl_mapping_table[addon_data->hrt_type];
  353. if (mtk_drm_helper_get_opt(priv->helper_opt,
  354. MTK_DRM_OPT_VDS_PATH_SWITCH) &&
  355. priv->need_vds_path_switch)
  356. map = ovl_mapping_tb_vds_switch[addon_data->hrt_type];
  357. break;
  358. case DISP_HW_LARB_TB:
  359. map = larb_mapping_table[addon_data->hrt_type];
  360. if (mtk_drm_helper_get_opt(priv->helper_opt,
  361. MTK_DRM_OPT_VDS_PATH_SWITCH) &&
  362. priv->need_vds_path_switch)
  363. map = larb_mapping_tb_vds_switch[addon_data->hrt_type];
  364. break;
  365. case DISP_HW_LAYER_TB:
  366. if (param <= MAX_PHY_OVL_CNT && param >= 0) {
  367. tmp_map = layer_mapping_table[addon_data->hrt_type];
  368. if (mtk_drm_helper_get_opt(priv->helper_opt,
  369. MTK_DRM_OPT_VDS_PATH_SWITCH) &&
  370. priv->need_vds_path_switch)
  371. tmp_map = layer_mapping_table_vds_switch[
  372. addon_data->hrt_type];
  373. for (i = 0, map = 0; i < 16; i++) {
  374. if (cnt == param)
  375. break;
  376. if (tmp_map & 0x1) {
  377. map |= (0x1 << i);
  378. cnt++;
  379. }
  380. tmp_map >>= 1;
  381. }
  382. }
  383. break;
  384. default:
  385. break;
  386. }
  387. return map;
  388. }
  389. void mtk_layering_rule_init(struct drm_device *dev)
  390. {
  391. struct mtk_drm_private *private = dev->dev_private;
  392. l_rule_info.primary_fps = 60;
  393. l_rule_info.hrt_idx = 0;
  394. mtk_register_layering_rule_ops(&l_rule_ops, &l_rule_info);
  395. mtk_set_layering_opt(
  396. LYE_OPT_RPO,
  397. mtk_drm_helper_get_opt(private->helper_opt, MTK_DRM_OPT_RPO));
  398. mtk_set_layering_opt(LYE_OPT_EXT_LAYER,
  399. mtk_drm_helper_get_opt(private->helper_opt,
  400. MTK_DRM_OPT_OVL_EXT_LAYER));
  401. mtk_set_layering_opt(LYE_OPT_CLEAR_LAYER,
  402. mtk_drm_helper_get_opt(private->helper_opt,
  403. MTK_DRM_OPT_CLEAR_LAYER));
  404. }
  405. static bool _rollback_all_to_GPU_for_idle(struct drm_device *dev)
  406. {
  407. struct mtk_drm_private *priv = dev->dev_private;
  408. /* Slghtly modify this function for TUI */
  409. if (atomic_read(&priv->rollback_all))
  410. return true;
  411. if (!mtk_drm_helper_get_opt(priv->helper_opt,
  412. MTK_DRM_OPT_IDLEMGR_BY_REPAINT) ||
  413. !atomic_read(&priv->idle_need_repaint)) {
  414. atomic_set(&priv->idle_need_repaint, 0);
  415. return false;
  416. }
  417. atomic_set(&priv->idle_need_repaint, 0);
  418. return true;
  419. }
  420. unsigned long long _layering_get_frame_bw(struct drm_crtc *crtc,
  421. struct drm_display_mode *mode)
  422. {
  423. static unsigned long long bw_base;
  424. static int fps;
  425. unsigned int vact_fps;
  426. int width = mode->hdisplay, height = mode->vdisplay;
  427. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  428. if (mtk_crtc->panel_ext && mtk_crtc->panel_ext->params) {
  429. struct mtk_panel_params *params;
  430. params = mtk_crtc->panel_ext->params;
  431. if (params->dyn_fps.switch_en == 1 &&
  432. params->dyn_fps.vact_timing_fps != 0)
  433. vact_fps = params->dyn_fps.vact_timing_fps;
  434. else
  435. vact_fps = mode->vrefresh;
  436. } else
  437. vact_fps = mode->vrefresh;
  438. DDPINFO("%s,vrefresh = %d", __func__, vact_fps);
  439. if (fps == vact_fps)
  440. return bw_base;
  441. fps = vact_fps;
  442. bw_base = (unsigned long long)width * height * fps * 125 * 4;
  443. bw_base /= 100 * 1024 * 1024;
  444. return bw_base;
  445. }
  446. static int layering_get_valid_hrt(struct drm_crtc *crtc,
  447. struct drm_display_mode *mode)
  448. {
  449. unsigned long long dvfs_bw = 0;
  450. #ifdef MTK_FB_MMDVFS_SUPPORT
  451. unsigned long long tmp = 0;
  452. struct mtk_ddp_comp *output_comp;
  453. struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
  454. dvfs_bw = mm_hrt_get_available_hrt_bw(get_virtual_port(VIRTUAL_DISP));
  455. if (dvfs_bw == 0xffffffffffffffff) {
  456. DDPPR_ERR("mm_hrt_get_available_hrt_bw=-1\n");
  457. return 600;
  458. }
  459. dvfs_bw *= 10000;
  460. output_comp = mtk_ddp_comp_request_output(mtk_crtc);
  461. if (output_comp)
  462. mtk_ddp_comp_io_cmd(output_comp, NULL,
  463. GET_FRAME_HRT_BW_BY_DATARATE, &tmp);
  464. if (!tmp) {
  465. DDPPR_ERR("Get frame hrt bw by datarate is zero\n");
  466. return 600;
  467. }
  468. dvfs_bw /= tmp * 100;
  469. /* error handling when requested BW is less than 2 layers */
  470. if (dvfs_bw < 200) {
  471. // disp_aee_print("avail BW less than 2 layers, BW: %llu\n",
  472. // dvfs_bw);
  473. DDPPR_ERR("avail BW less than 2 layers, BW: %llu\n", dvfs_bw);
  474. dvfs_bw = 200;
  475. }
  476. DDPINFO("get avail HRT BW:%u : %llu %llu\n",
  477. mm_hrt_get_available_hrt_bw(get_virtual_port(VIRTUAL_DISP)),
  478. dvfs_bw, tmp);
  479. #else
  480. dvfs_bw = 600;
  481. #endif
  482. return dvfs_bw;
  483. }
  484. void mtk_update_layering_opt_by_disp_opt(enum MTK_DRM_HELPER_OPT opt, int value)
  485. {
  486. switch (opt) {
  487. case MTK_DRM_OPT_OVL_EXT_LAYER:
  488. mtk_set_layering_opt(LYE_OPT_EXT_LAYER, value);
  489. break;
  490. case MTK_DRM_OPT_RPO:
  491. mtk_set_layering_opt(LYE_OPT_RPO, value);
  492. break;
  493. case MTK_DRM_OPT_CLEAR_LAYER:
  494. mtk_set_layering_opt(LYE_OPT_CLEAR_LAYER, value);
  495. break;
  496. default:
  497. break;
  498. }
  499. }
  500. unsigned int _layering_rule_get_hrt_idx(void)
  501. {
  502. return l_rule_info.hrt_idx;
  503. }
  504. #define SET_CLIP_R(clip, clip_r) (clip |= ((clip_r & 0xFF) << 0))
  505. #define SET_CLIP_B(clip, clip_b) (clip |= ((clip_b & 0xFF) << 8))
  506. #define SET_CLIP_L(clip, clip_l) (clip |= ((clip_l & 0xFF) << 16))
  507. #define SET_CLIP_T(clip, clip_t) (clip |= ((clip_t & 0xFF) << 24))
  508. #define GET_CLIP_R(clip) ((clip >> 0) & 0xFF)
  509. #define GET_CLIP_B(clip) ((clip >> 8) & 0xFF)
  510. #define GET_CLIP_L(clip) ((clip >> 16) & 0xFF)
  511. #define GET_CLIP_T(clip) ((clip >> 24) & 0xFF)
  512. static void calc_clip_x(struct drm_mtk_layer_config *cfg)
  513. {
  514. unsigned int tile_w = 16;
  515. unsigned int src_x_s, src_x_e; /* aligned */
  516. unsigned int clip_l = 0, clip_r = 0;
  517. src_x_s = (cfg->src_offset_x) & ~(tile_w - 1);
  518. src_x_e = (cfg->src_offset_x + cfg->src_width + tile_w - 1) &
  519. ~(tile_w - 1);
  520. clip_l = cfg->src_offset_x - src_x_s;
  521. clip_r = src_x_e - cfg->src_offset_x - cfg->src_width;
  522. SET_CLIP_R(cfg->clip, clip_r);
  523. SET_CLIP_L(cfg->clip, clip_l);
  524. }
  525. static void calc_clip_y(struct drm_mtk_layer_config *cfg)
  526. {
  527. unsigned int tile_h = 4;
  528. unsigned int src_y_s, src_y_e; /* aligned */
  529. unsigned int clip_t = 0, clip_b = 0;
  530. src_y_s = (cfg->src_offset_y) & ~(tile_h - 1);
  531. src_y_e = (cfg->src_offset_y + cfg->src_height + tile_h - 1) &
  532. ~(tile_h - 1);
  533. clip_t = cfg->src_offset_y - src_y_s;
  534. clip_b = src_y_e - cfg->src_offset_y - cfg->src_height;
  535. SET_CLIP_T(cfg->clip, clip_t);
  536. SET_CLIP_B(cfg->clip, clip_b);
  537. }
  538. static void backup_input_config(struct drm_mtk_layering_info *disp_info)
  539. {
  540. unsigned int size = 0;
  541. /* free before use */
  542. if (g_input_config != 0) {
  543. kfree(g_input_config);
  544. g_input_config = 0;
  545. }
  546. if (disp_info->layer_num[HRT_PRIMARY] <= 0 ||
  547. disp_info->input_config[HRT_PRIMARY] == NULL)
  548. return;
  549. /* memory allocate */
  550. size = sizeof(struct drm_mtk_layer_config) *
  551. disp_info->layer_num[HRT_PRIMARY];
  552. g_input_config = kzalloc(size, GFP_KERNEL);
  553. if (g_input_config == 0) {
  554. DDPPR_ERR("%s: allocate memory fail\n", __func__);
  555. return;
  556. }
  557. /* memory copy */
  558. memcpy(g_input_config, disp_info->input_config[HRT_PRIMARY], size);
  559. }
  560. static void fbdc_pre_calculate(struct drm_mtk_layering_info *disp_info)
  561. {
  562. unsigned int i = 0;
  563. struct drm_mtk_layer_config *cfg = NULL;
  564. /* backup g_input_config */
  565. backup_input_config(disp_info);
  566. for (i = 0; i < disp_info->layer_num[HRT_PRIMARY]; i++) {
  567. cfg = &(disp_info->input_config[HRT_PRIMARY][i]);
  568. cfg->clip = 0;
  569. if (!cfg->compress)
  570. continue;
  571. if (mtk_is_gles_layer(disp_info, HRT_PRIMARY, i))
  572. continue;
  573. if (cfg->src_height != cfg->dst_height ||
  574. cfg->src_width != cfg->dst_width)
  575. continue;
  576. calc_clip_x(cfg);
  577. calc_clip_y(cfg);
  578. }
  579. }
  580. static void
  581. fbdc_adjust_layout_for_ext_grouping(struct drm_mtk_layering_info *disp_info)
  582. {
  583. int i = 0;
  584. struct drm_mtk_layer_config *c;
  585. unsigned int dst_offset_x, dst_offset_y;
  586. unsigned int clip_r, clip_b, clip_l, clip_t;
  587. for (i = 0; i < disp_info->layer_num[HRT_PRIMARY]; i++) {
  588. c = &(disp_info->input_config[HRT_PRIMARY][i]);
  589. /* skip if not compress, gles, resize */
  590. if (!c->compress ||
  591. mtk_is_gles_layer(disp_info, HRT_PRIMARY, i) ||
  592. (c->src_height != c->dst_height) ||
  593. (c->src_width != c->dst_width))
  594. continue;
  595. dst_offset_x = c->dst_offset_x;
  596. dst_offset_y = c->dst_offset_y;
  597. clip_r = GET_CLIP_R(c->clip);
  598. clip_b = GET_CLIP_B(c->clip);
  599. clip_l = GET_CLIP_L(c->clip);
  600. clip_t = GET_CLIP_T(c->clip);
  601. /* bounary handling */
  602. if (dst_offset_x < clip_l)
  603. c->dst_offset_x = 0;
  604. else
  605. c->dst_offset_x -= clip_l;
  606. if (dst_offset_y < clip_t)
  607. c->dst_offset_y = 0;
  608. else
  609. c->dst_offset_y -= clip_t;
  610. c->dst_width += (clip_r + dst_offset_x - c->dst_offset_x);
  611. c->dst_height += (clip_b + dst_offset_y - c->dst_offset_y);
  612. }
  613. }
  614. static int get_below_ext_layer(struct drm_mtk_layering_info *disp_info,
  615. int disp_idx, int cur)
  616. {
  617. struct drm_mtk_layer_config *c, *tmp_c;
  618. int phy_id = -1, ext_id = -1, l_dst_offset_y = -1, i;
  619. if (disp_idx < 0)
  620. return -1;
  621. c = &(disp_info->input_config[disp_idx][cur]);
  622. /* search for phy */
  623. if (c->ext_sel_layer != -1) {
  624. for (i = cur - 1; i >= 0; i--) {
  625. tmp_c = &(disp_info->input_config[disp_idx][i]);
  626. if (tmp_c->ext_sel_layer == -1)
  627. phy_id = i;
  628. }
  629. if (phy_id == -1) /* error handle */
  630. return -1;
  631. } else
  632. phy_id = cur;
  633. /* traverse the ext layer below cur */
  634. tmp_c = &(disp_info->input_config[disp_idx][phy_id]);
  635. if (tmp_c->dst_offset_y > c->dst_offset_y) {
  636. ext_id = phy_id;
  637. l_dst_offset_y = tmp_c->dst_offset_y;
  638. }
  639. for (i = phy_id + 1; i <= phy_id + 3; i++) {
  640. /* skip itself */
  641. if (i == cur)
  642. continue;
  643. /* hit max num, stop */
  644. if (i >= disp_info->layer_num[disp_idx])
  645. break;
  646. /* hit gles, stop */
  647. if (mtk_is_gles_layer(disp_info, disp_idx, i))
  648. break;
  649. tmp_c = &(disp_info->input_config[disp_idx][i]);
  650. /* hit phy layer, stop */
  651. if (tmp_c->ext_sel_layer == -1)
  652. break;
  653. if (tmp_c->dst_offset_y > c->dst_offset_y) {
  654. if (l_dst_offset_y == -1 ||
  655. l_dst_offset_y > tmp_c->dst_offset_y) {
  656. ext_id = i;
  657. l_dst_offset_y = tmp_c->dst_offset_y;
  658. }
  659. }
  660. }
  661. return ext_id;
  662. }
  663. static void
  664. fbdc_adjust_layout_for_overlap_calc(struct drm_mtk_layering_info *disp_info)
  665. {
  666. int i = 0, ext_id = 0;
  667. struct drm_mtk_layer_config *c, *ext_c;
  668. /* adjust dst layout because src clip */
  669. fbdc_adjust_layout_for_ext_grouping(disp_info);
  670. /* adjust dst layout because of buffer pre-fetch */
  671. for (i = 0; i < disp_info->layer_num[HRT_PRIMARY]; i++) {
  672. /* skip gles layer */
  673. if (mtk_is_gles_layer(disp_info, HRT_PRIMARY, i))
  674. continue;
  675. c = &(disp_info->input_config[HRT_PRIMARY][i]);
  676. /* skip resize layer */
  677. if ((c->src_height != c->dst_height) ||
  678. (c->src_width != c->dst_width))
  679. continue;
  680. /* if compressed, shift up 4 lines because pre-fetching */
  681. if (c->compress) {
  682. if (c->dst_height > 4)
  683. c->dst_height -= 4;
  684. else
  685. c->dst_height = 1;
  686. }
  687. /* if there is compressed ext layer below this layer,
  688. * add pre-fetch lines behind it
  689. */
  690. ext_id = get_below_ext_layer(disp_info, HRT_PRIMARY, i);
  691. if (mtk_is_layer_id_valid(disp_info, HRT_PRIMARY, ext_id) ==
  692. true) {
  693. ext_c = &(disp_info->input_config[HRT_PRIMARY][ext_id]);
  694. if (ext_c->compress)
  695. c->dst_height += (GET_CLIP_T(ext_c->clip) + 4);
  696. }
  697. }
  698. }
  699. static void fbdc_adjust_layout(struct drm_mtk_layering_info *disp_info,
  700. enum ADJUST_LAYOUT_PURPOSE p)
  701. {
  702. if (p == ADJUST_LAYOUT_EXT_GROUPING)
  703. fbdc_adjust_layout_for_ext_grouping(disp_info);
  704. else
  705. fbdc_adjust_layout_for_overlap_calc(disp_info);
  706. }
  707. static void fbdc_restore_layout(struct drm_mtk_layering_info *dst_info,
  708. enum ADJUST_LAYOUT_PURPOSE p)
  709. {
  710. int i = 0;
  711. struct drm_mtk_layer_config *layer_info_s, *layer_info_d;
  712. if (g_input_config == 0)
  713. return;
  714. for (i = 0; i < dst_info->layer_num[HRT_PRIMARY]; i++) {
  715. layer_info_d = &(dst_info->input_config[HRT_PRIMARY][i]);
  716. layer_info_s = &(g_input_config[i]);
  717. layer_info_d->dst_offset_x = layer_info_s->dst_offset_x;
  718. layer_info_d->dst_offset_y = layer_info_s->dst_offset_y;
  719. layer_info_d->dst_width = layer_info_s->dst_width;
  720. layer_info_d->dst_height = layer_info_s->dst_height;
  721. }
  722. }
  723. static struct layering_rule_ops l_rule_ops = {
  724. .scenario_decision = layering_rule_senario_decision,
  725. .get_bound_table = get_bound_table,
  726. /* HRT table would change so do not use get_hrt_bound
  727. * in layering_rule_base. Instead, copy hrt table before calculation
  728. */
  729. .get_hrt_bound = NULL,
  730. .copy_hrt_bound_table = copy_hrt_bound_table,
  731. .get_mapping_table = get_mapping_table,
  732. .rollback_to_gpu_by_hw_limitation = filter_by_hw_limitation,
  733. .rollback_all_to_GPU_for_idle = _rollback_all_to_GPU_for_idle,
  734. .fbdc_pre_calculate = fbdc_pre_calculate,
  735. .fbdc_adjust_layout = fbdc_adjust_layout,
  736. .fbdc_restore_layout = fbdc_restore_layout,
  737. .fbdc_rule = filter_by_fbdc,
  738. };