mtk_layering_rule_base.c 76 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976
  1. /*
  2. * Copyright (C) 2016 MediaTek Inc.
  3. * Copyright (C) 2021 XiaoMi, Inc.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  12. * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
  13. */
  14. #include <linux/delay.h>
  15. #include <linux/sched.h>
  16. #include <linux/semaphore.h>
  17. #include <linux/module.h>
  18. #include <linux/wait.h>
  19. #include <linux/kthread.h>
  20. #include <linux/mutex.h>
  21. #include <linux/types.h>
  22. #include <linux/of.h>
  23. #include <linux/of_irq.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/slab.h>
  26. #include <linux/file.h>
  27. #include <linux/string.h>
  28. #include <linux/mm.h>
  29. #include <mt-plat/mtk_chip.h>
  30. #include <drm/drm_modes.h>
  31. #include <drm/drm_property.h>
  32. #ifdef CONFIG_MTK_DCS
  33. #include <mt-plat/mtk_meminfo.h>
  34. #endif
  35. #include "mtk_layering_rule.h"
  36. #include "mtk_drm_drv.h"
  37. #include "mtk_drm_ddp_comp.h"
  38. #include "mtk_drm_plane.h"
  39. #include "mtk_drm_assert.h"
  40. #include "mtk_log.h"
  41. #include "mtk_drm_mmp.h"
  42. #include "mtk_drm_fbdev.h"
  43. #define CREATE_TRACE_POINTS
  44. #include "mtk_layer_layout_trace.h"
  45. #include "mtk_drm_gem.h"
  46. static struct drm_mtk_layering_info layering_info;
  47. #ifdef HRT_UT_DEBUG
  48. static int debug_resolution_level;
  49. #endif
  50. static struct layering_rule_info_t *l_rule_info;
  51. static struct layering_rule_ops *l_rule_ops;
  52. static int ext_id_tuning(struct drm_device *dev,
  53. struct drm_mtk_layering_info *disp_info,
  54. int disp_idx);
  55. static unsigned int roll_gpu_for_idle;
  56. static int g_emi_bound_table[HRT_LEVEL_NUM];
  57. #define RSZ_TILE_LENGTH 1440
  58. #define RSZ_IN_MAX_HEIGHT 4096
  59. #define DISP_RSZ_LAYER_NUM 2
  60. static struct {
  61. enum LYE_HELPER_OPT opt;
  62. unsigned int val;
  63. const char *desc;
  64. } help_info[] = {
  65. {LYE_OPT_DUAL_PIPE, 0, "LYE_OPT_DUAL_PIPE"},
  66. {LYE_OPT_EXT_LAYER, 0, "LYE_OPT_EXTENDED_LAYER"},
  67. {LYE_OPT_RPO, 0, "LYE_OPT_RPO"},
  68. {LYE_OPT_CLEAR_LAYER, 0, "LYE_OPT_CLEAR_LAYER"},
  69. };
  70. void mtk_set_layering_opt(enum LYE_HELPER_OPT opt, int value)
  71. {
  72. if (opt >= LYE_OPT_NUM) {
  73. DDPMSG("%s invalid layering opt:%d\n", __func__, opt);
  74. return;
  75. }
  76. if (value < 0) {
  77. DDPPR_ERR("%s invalid opt value:%d\n", __func__, value);
  78. return;
  79. }
  80. help_info[opt].val = !!value;
  81. }
  82. static int get_layering_opt(enum LYE_HELPER_OPT opt)
  83. {
  84. if (opt >= LYE_OPT_NUM) {
  85. DDPMSG("%s invalid layering opt:%d\n", __func__, opt);
  86. return -1;
  87. }
  88. return help_info[opt].val;
  89. }
  90. /*
  91. * bool is_decouple_path(struct drm_mtk_layering_info *disp_info) {
  92. * if (disp_info->disp_mode[HRT_PRIMARY] != 1)
  93. * return true;
  94. * else
  95. * return false;
  96. * }
  97. */
  98. /*
  99. * static bool is_argb_fmt(uint32_t format) {
  100. * switch (format) {
  101. * case DRM_FORMAT_ARGB8888:
  102. * case DRM_FORMAT_ABGR8888:
  103. * case DRM_FORMAT_RGBA8888:
  104. * case DRM_FORMAT_BGRA8888:
  105. * return true;
  106. * default:
  107. * return false;
  108. * }
  109. * }
  110. */
  111. bool mtk_is_yuv(uint32_t format)
  112. {
  113. switch (format) {
  114. case DRM_FORMAT_YUV422:
  115. case DRM_FORMAT_YVU422:
  116. case DRM_FORMAT_YUV444:
  117. case DRM_FORMAT_YVU444:
  118. return true;
  119. default:
  120. return false;
  121. }
  122. }
  123. bool mtk_is_layer_id_valid(struct drm_mtk_layering_info *disp_info,
  124. int disp_idx, int i)
  125. {
  126. if (i < 0 || i >= disp_info->layer_num[disp_idx])
  127. return false;
  128. else
  129. return true;
  130. }
  131. bool mtk_is_gles_layer(struct drm_mtk_layering_info *disp_info, int disp_idx,
  132. int layer_idx)
  133. {
  134. if (layer_idx >= disp_info->gles_head[disp_idx] &&
  135. layer_idx <= disp_info->gles_tail[disp_idx])
  136. return true;
  137. else
  138. return false;
  139. }
  140. inline bool mtk_has_layer_cap(struct drm_mtk_layer_config *layer_info,
  141. enum MTK_LAYERING_CAPS l_caps)
  142. {
  143. if (layer_info->layer_caps & l_caps)
  144. return true;
  145. return false;
  146. }
  147. static int is_overlap_on_yaxis(struct drm_mtk_layer_config *lhs,
  148. struct drm_mtk_layer_config *rhs)
  149. {
  150. /*
  151. * HWC may adjust the offset of yuv layer due to alignment limitation
  152. * after querying layering rule.
  153. * So it have chance to make yuv layer overlap with other ext layer.
  154. * We add the workaround here to avoid the yuv as the base layer of
  155. * extended layer and will remove it once the HWC correct the problem.
  156. */
  157. if (mtk_is_yuv(lhs->src_fmt))
  158. return 1;
  159. if ((lhs->dst_offset_y + lhs->dst_height <= rhs->dst_offset_y) ||
  160. (rhs->dst_offset_y + rhs->dst_height <= lhs->dst_offset_y))
  161. return 0;
  162. return 1;
  163. }
  164. static bool is_layer_across_each_pipe(struct drm_mtk_layer_config *layer_info)
  165. {
  166. /* TODO: support dual pipe */
  167. #if 0
  168. int dst_x, dst_w;
  169. if (!get_layering_opt(LYE_OPT_DUAL_PIPE))
  170. return true;
  171. dst_x = layer_info->dst_offset_x;
  172. dst_w = layer_info->dst_width;
  173. if ((dst_x + dst_w <= primary_display_get_width() / 2) ||
  174. (dst_x > primary_display_get_width() / 2))
  175. return false;
  176. #endif
  177. return true;
  178. }
  179. static inline bool is_extended_layer(struct drm_mtk_layer_config *layer_info)
  180. {
  181. return (layer_info->ext_sel_layer != -1);
  182. }
  183. static bool is_extended_base_layer_valid(struct drm_mtk_layer_config *configs,
  184. int layer_idx)
  185. {
  186. if ((layer_idx == 0) ||
  187. (configs->src_fmt == MTK_DRM_FORMAT_DIM) ||
  188. mtk_has_layer_cap(configs, MTK_DISP_RSZ_LAYER))
  189. return false;
  190. /*
  191. * Under dual pipe, if the layer is not included in each pipe,
  192. * it cannot be used as a base layer for extended layer
  193. * because extended layer would not find base layer in one of
  194. * display pipe.
  195. * So always mark this specific layer as overlap to avoid the fail case.
  196. */
  197. if (!is_layer_across_each_pipe(configs))
  198. return false;
  199. return true;
  200. }
  201. static inline bool is_extended_over_limit(int ext_cnt)
  202. {
  203. if (ext_cnt > 3)
  204. return true;
  205. return false;
  206. }
  207. /**
  208. * check if continuous ext layers are overlapped with each other
  209. * also need to check the below nearest phy layer
  210. * which these ext layers will be attached to
  211. * 1. check all ext layers, if overlapped with any one, change it to phy layer
  212. * 2. if more than 1 ext layer exist, need to check the phy layer
  213. */
  214. static int is_continuous_ext_layer_overlap(struct drm_mtk_layer_config *configs,
  215. int curr)
  216. {
  217. int overlapped;
  218. struct drm_mtk_layer_config *src_info, *dst_info;
  219. int i;
  220. overlapped = 0;
  221. dst_info = &configs[curr];
  222. for (i = curr - 1; i >= 0; i--) {
  223. src_info = &configs[i];
  224. if (is_extended_layer(src_info)) {
  225. overlapped |= is_overlap_on_yaxis(src_info, dst_info);
  226. if (overlapped)
  227. break;
  228. } else {
  229. overlapped |= is_overlap_on_yaxis(src_info, dst_info);
  230. if (!is_extended_base_layer_valid(src_info, i))
  231. overlapped |= 1;
  232. break;
  233. }
  234. }
  235. return overlapped;
  236. }
  237. bool is_triple_disp(struct drm_mtk_layering_info *disp_info)
  238. {
  239. if (disp_info->layer_num[HRT_PRIMARY] &&
  240. disp_info->layer_num[HRT_SECONDARY] &&
  241. disp_info->layer_num[HRT_THIRD])
  242. return true;
  243. return false;
  244. }
  245. static int get_phy_ovl_layer_cnt(struct drm_mtk_layering_info *info,
  246. int disp_idx)
  247. {
  248. int total_cnt = 0;
  249. int i;
  250. struct drm_mtk_layer_config *layer_info;
  251. if (info->layer_num[disp_idx] > 0) {
  252. total_cnt = info->layer_num[disp_idx];
  253. if (info->gles_head[disp_idx] >= 0) {
  254. total_cnt -= info->gles_tail[disp_idx] -
  255. info->gles_head[disp_idx];
  256. }
  257. if (get_layering_opt(LYE_OPT_EXT_LAYER)) {
  258. for (i = 0; i < info->layer_num[disp_idx]; i++) {
  259. layer_info = &info->input_config[disp_idx][i];
  260. if (is_extended_layer(layer_info) &&
  261. !mtk_is_gles_layer(info, disp_idx, i))
  262. total_cnt--;
  263. }
  264. }
  265. }
  266. return total_cnt;
  267. }
  268. int mtk_get_phy_layer_limit(uint16_t layer_map_tb)
  269. {
  270. int total_cnt = 0;
  271. int i;
  272. for (i = 0; i < 16; i++) {
  273. if (layer_map_tb & 0x1)
  274. total_cnt++;
  275. layer_map_tb >>= 1;
  276. }
  277. return total_cnt;
  278. }
  279. static int get_ovl_by_phy(struct drm_device *dev, int disp_idx,
  280. uint16_t layer_map_tb, int phy_layer_idx)
  281. {
  282. uint16_t ovl_mapping_tb;
  283. int i, ovl_idx = 0, layer_idx = 0;
  284. ovl_mapping_tb =
  285. l_rule_ops->get_mapping_table(dev, disp_idx, DISP_HW_OVL_TB, 0);
  286. for (layer_idx = 0; layer_idx < MAX_PHY_OVL_CNT; layer_idx++) {
  287. if (layer_map_tb & 0x1) {
  288. if (phy_layer_idx == 0)
  289. break;
  290. phy_layer_idx--;
  291. }
  292. layer_map_tb >>= 1;
  293. }
  294. if (layer_idx == MAX_PHY_OVL_CNT) {
  295. DDPPR_ERR("%s fail, phy_layer_idx:%d\n", __func__,
  296. phy_layer_idx);
  297. return -1;
  298. }
  299. for (i = 0; i < layer_idx; i++) {
  300. if (ovl_mapping_tb & 0x1)
  301. ovl_idx++;
  302. ovl_mapping_tb >>= 1;
  303. }
  304. #ifdef HRT_DEBUG_LEVEL2
  305. DDPMSG("%s,phy:%d,layer_tb:0x%x,L_idx:%d ovl_idx:%d, ov_tb:0x%x\n",
  306. __func__, phy_layer_idx, layer_map_tb, layer_idx, ovl_idx,
  307. ovl_mapping_tb);
  308. #endif
  309. return ovl_idx;
  310. }
  311. static int get_phy_ovl_index(struct drm_device *dev, int disp_idx,
  312. int layer_idx)
  313. {
  314. uint16_t ovl_mapping_tb =
  315. l_rule_ops->get_mapping_table(dev, disp_idx, DISP_HW_OVL_TB, 0);
  316. int phy_layer_cnt, layer_flag;
  317. phy_layer_cnt = 0;
  318. layer_flag = 1 << layer_idx;
  319. while (layer_idx) {
  320. layer_idx--;
  321. layer_flag >>= 1;
  322. if (ovl_mapping_tb & layer_flag)
  323. break;
  324. phy_layer_cnt++;
  325. }
  326. return phy_layer_cnt;
  327. }
  328. static int get_larb_by_ovl(struct drm_device *dev, int ovl_idx, int disp_idx)
  329. {
  330. uint16_t larb_mapping_tb;
  331. int larb_idx;
  332. larb_mapping_tb = l_rule_ops->get_mapping_table(dev, disp_idx,
  333. DISP_HW_LARB_TB, 0);
  334. larb_idx = (larb_mapping_tb >> ovl_idx * 4) & 0xF;
  335. return larb_idx;
  336. }
  337. static void dump_disp_info(struct drm_mtk_layering_info *disp_info,
  338. enum DISP_DEBUG_LEVEL debug_level)
  339. {
  340. int i, j;
  341. struct drm_mtk_layer_config *layer_info;
  342. #define _HRT_FMT \
  343. "HRT hrt_num:0x%x/mod:%d/dal:%d/addon_scn:(%d, %d, %d)/bd_tb:%d/i:%d\n"
  344. #define _L_FMT \
  345. "L%d->%d/(%d,%d,%d,%d)/(%d,%d,%d,%d)/f0x%x/ds%d/e%d/cap0x%x/compr%d/secure%d\n"
  346. if (debug_level < DISP_DEBUG_LEVEL_INFO) {
  347. DDPMSG(_HRT_FMT,
  348. disp_info->hrt_num,
  349. disp_info->disp_mode_idx[0],
  350. l_rule_info->dal_enable,
  351. l_rule_info->addon_scn[HRT_PRIMARY],
  352. l_rule_info->addon_scn[HRT_SECONDARY],
  353. l_rule_info->addon_scn[HRT_THIRD],
  354. l_rule_info->bound_tb_idx,
  355. roll_gpu_for_idle);
  356. for (i = 0; i < HRT_TYPE_NUM; i++) {
  357. if (disp_info->layer_num[i] <= 0)
  358. continue;
  359. DDPMSG("HRT D%d/M%d/LN%d/hrt:0x%x/G(%d,%d)/id%u\n", i,
  360. disp_info->disp_mode[i], disp_info->layer_num[i],
  361. disp_info->hrt_num, disp_info->gles_head[i],
  362. disp_info->gles_tail[i], disp_info->hrt_idx);
  363. for (j = 0; j < disp_info->layer_num[i]; j++) {
  364. layer_info = &disp_info->input_config[i][j];
  365. DDPMSG(_L_FMT, j, layer_info->ovl_id,
  366. layer_info->src_offset_x,
  367. layer_info->src_offset_y,
  368. layer_info->src_width,
  369. layer_info->src_height,
  370. layer_info->dst_offset_x,
  371. layer_info->dst_offset_y,
  372. layer_info->dst_width,
  373. layer_info->dst_height,
  374. layer_info->src_fmt,
  375. layer_info->dataspace,
  376. layer_info->ext_sel_layer,
  377. layer_info->layer_caps,
  378. layer_info->compress,
  379. layer_info->secure);
  380. }
  381. }
  382. } else {
  383. DDPINFO(_HRT_FMT, disp_info->hrt_num,
  384. disp_info->disp_mode_idx[0],
  385. l_rule_info->dal_enable,
  386. l_rule_info->addon_scn[HRT_PRIMARY],
  387. l_rule_info->addon_scn[HRT_SECONDARY],
  388. l_rule_info->addon_scn[HRT_THIRD],
  389. l_rule_info->bound_tb_idx,
  390. roll_gpu_for_idle);
  391. for (i = 0; i < HRT_TYPE_NUM; i++) {
  392. if (disp_info->layer_num[i] <= 0)
  393. continue;
  394. DDPINFO("HRT D%d/M%d/LN%d/hrt:0x%x/G(%d,%d)/id%u\n", i,
  395. disp_info->disp_mode[i],
  396. disp_info->layer_num[i], disp_info->hrt_num,
  397. disp_info->gles_head[i],
  398. disp_info->gles_tail[i], disp_info->hrt_idx);
  399. for (j = 0; j < disp_info->layer_num[i]; j++) {
  400. layer_info = &disp_info->input_config[i][j];
  401. DDPINFO(_L_FMT, j, layer_info->ovl_id,
  402. layer_info->src_offset_x,
  403. layer_info->src_offset_y,
  404. layer_info->src_width,
  405. layer_info->src_height,
  406. layer_info->dst_offset_x,
  407. layer_info->dst_offset_y,
  408. layer_info->dst_width,
  409. layer_info->dst_height,
  410. layer_info->src_fmt,
  411. layer_info->dataspace,
  412. layer_info->ext_sel_layer,
  413. layer_info->layer_caps,
  414. layer_info->compress,
  415. layer_info->secure);
  416. }
  417. }
  418. }
  419. }
  420. static void dump_disp_trace(struct drm_mtk_layering_info *disp_info)
  421. {
  422. int i, j;
  423. struct drm_mtk_layer_config *c;
  424. const int len = 1000;
  425. char msg[len];
  426. int n = 0;
  427. for (i = 0; i < HRT_TYPE_NUM; i++) {
  428. if (disp_info->layer_num[i] <= 0)
  429. continue;
  430. n = snprintf(msg, len, "D%d,ovp:%d,dal:%d,LN:%d,G(%d,%d)",
  431. i, disp_info->hrt_weight, l_rule_info->dal_enable,
  432. disp_info->layer_num[i], disp_info->gles_head[i],
  433. disp_info->gles_tail[i]);
  434. for (j = 0; j < disp_info->layer_num[i]; j++) {
  435. c = &disp_info->input_config[i][j];
  436. n += snprintf(msg + n, len - n,
  437. "|L%d->%d(%u,%u,%ux%u),f:0x%x,c:%d",
  438. j, c->ovl_id, c->dst_offset_x,
  439. c->dst_offset_y, c->dst_width,
  440. c->dst_height, c->src_fmt, c->compress);
  441. }
  442. trace_layer_layout(msg);
  443. }
  444. }
  445. static void
  446. print_disp_info_to_log_buffer(struct drm_mtk_layering_info *disp_info)
  447. {
  448. /*Todo: support fix log buffer*/
  449. #if 0
  450. char *status_buf;
  451. int i, j, n;
  452. struct drm_mtk_layer_config *layer_info;
  453. status_buf = get_dprec_status_ptr(0);
  454. if (status_buf == NULL)
  455. return;
  456. n = 0;
  457. n += snprintf(status_buf + n, LOGGER_BUFFER_SIZE - n,
  458. "Last hrt query data[start]\n");
  459. for (i = 0; i < 2; i++) {
  460. n += snprintf(status_buf + n, LOGGER_BUFFER_SIZE - n,
  461. "HRT D%d/M%d/LN%d/hrt_num:%d/G(%d,%d)/fps:%d\n",
  462. i, disp_info->disp_mode[i], disp_info->layer_num[i],
  463. disp_info->hrt_num, disp_info->gles_head[i],
  464. disp_info->gles_tail[i], l_rule_info->primary_fps);
  465. for (j = 0; j < disp_info->layer_num[i]; j++) {
  466. layer_info = &disp_info->input_config[i][j];
  467. n += snprintf(status_buf + n, LOGGER_BUFFER_SIZE - n,
  468. "L%d->%d/of(%d,%d)/wh(%d,%d)/fmt:0x%x/compr:%u\n",
  469. j, layer_info->ovl_id,
  470. layer_info->dst_offset_x,
  471. layer_info->dst_offset_y,
  472. layer_info->dst_width,
  473. layer_info->dst_height,
  474. layer_info->src_fmt,
  475. layer_info->compress);
  476. }
  477. }
  478. n += snprintf(status_buf + n, LOGGER_BUFFER_SIZE - n,
  479. "Last hrt query data[end]\n");
  480. #endif
  481. }
  482. void mtk_rollback_layer_to_GPU(struct drm_mtk_layering_info *disp_info,
  483. int disp_idx, int i)
  484. {
  485. if (mtk_is_layer_id_valid(disp_info, disp_idx, i) == false)
  486. return;
  487. if (disp_info->gles_head[disp_idx] == -1 ||
  488. disp_info->gles_head[disp_idx] > i)
  489. disp_info->gles_head[disp_idx] = i;
  490. if (disp_info->gles_tail[disp_idx] == -1 ||
  491. disp_info->gles_tail[disp_idx] < i)
  492. disp_info->gles_tail[disp_idx] = i;
  493. disp_info->input_config[disp_idx][i].ext_sel_layer = -1;
  494. }
  495. /* rollback and set NO_FBDC flag */
  496. void mtk_rollback_compress_layer_to_GPU(struct drm_mtk_layering_info *disp_info,
  497. int disp_idx, int i)
  498. {
  499. if (mtk_is_layer_id_valid(disp_info, disp_idx, i) == false)
  500. return;
  501. if (disp_info->input_config[disp_idx][i].compress == 0)
  502. return;
  503. mtk_rollback_layer_to_GPU(disp_info, disp_idx, i);
  504. disp_info->input_config[disp_idx][i].layer_caps |= MTK_NO_FBDC;
  505. }
  506. int mtk_rollback_resize_layer_to_GPU_range(
  507. struct drm_mtk_layering_info *disp_info, int disp_idx, int start_idx,
  508. int end_idx)
  509. {
  510. int i;
  511. struct drm_mtk_layer_config *lc;
  512. if (disp_info->layer_num[disp_idx] <= 0) {
  513. /* direct skip */
  514. return 0;
  515. }
  516. if (start_idx < 0 || end_idx >= disp_info->layer_num[disp_idx])
  517. return -EINVAL;
  518. for (i = start_idx; i <= end_idx; i++) {
  519. lc = &disp_info->input_config[disp_idx][i];
  520. if ((lc->src_height != lc->dst_height) ||
  521. (lc->src_width != lc->dst_width)) {
  522. if (mtk_has_layer_cap(lc, MTK_MDP_RSZ_LAYER))
  523. continue;
  524. if (disp_info->gles_head[disp_idx] == -1 ||
  525. disp_info->gles_head[disp_idx] > i)
  526. disp_info->gles_head[disp_idx] = i;
  527. if (disp_info->gles_tail[disp_idx] == -1 ||
  528. disp_info->gles_tail[disp_idx] < i)
  529. disp_info->gles_tail[disp_idx] = i;
  530. }
  531. }
  532. if (disp_info->gles_head[disp_idx] != -1) {
  533. for (i = disp_info->gles_head[disp_idx];
  534. i <= disp_info->gles_tail[disp_idx]; i++) {
  535. lc = &disp_info->input_config[disp_idx][i];
  536. lc->ext_sel_layer = -1;
  537. }
  538. }
  539. return 0;
  540. }
  541. int mtk_rollback_all_resize_layer_to_GPU(
  542. struct drm_mtk_layering_info *disp_info, int disp_idx)
  543. {
  544. mtk_rollback_resize_layer_to_GPU_range(
  545. disp_info, disp_idx, 0, disp_info->layer_num[disp_idx] - 1);
  546. return 0;
  547. }
  548. static int _rollback_to_GPU_bottom_up(struct drm_mtk_layering_info *info,
  549. int disp, int ovl_limit)
  550. {
  551. int available_ovl_num, i, j;
  552. struct drm_mtk_layer_config *l_info;
  553. available_ovl_num = ovl_limit;
  554. for (i = 0; i < info->layer_num[disp]; i++) {
  555. l_info = &info->input_config[disp][i];
  556. if (is_extended_layer(l_info))
  557. continue;
  558. available_ovl_num--;
  559. if (mtk_is_gles_layer(info, disp, i)) {
  560. info->gles_head[disp] = i;
  561. if (info->gles_tail[disp] == -1) {
  562. info->gles_tail[disp] = i;
  563. for (j = i + 1; j < info->layer_num[disp];
  564. j++) {
  565. l_info = &info->input_config[disp][j];
  566. if (is_extended_layer(l_info))
  567. info->gles_tail[disp] = j;
  568. else
  569. break;
  570. }
  571. }
  572. break;
  573. } else if (available_ovl_num <= 0) {
  574. available_ovl_num = 0;
  575. info->gles_head[disp] = i;
  576. info->gles_tail[disp] = info->layer_num[disp] - 1;
  577. break;
  578. }
  579. }
  580. if (available_ovl_num < 0)
  581. DDPPR_ERR("%s available_ovl_num invalid:%d\n", __func__,
  582. available_ovl_num);
  583. return available_ovl_num;
  584. }
  585. static int _rollback_to_GPU_top_down(struct drm_mtk_layering_info *disp_info,
  586. int disp, int ovl_limit)
  587. {
  588. int available_ovl_num, i;
  589. int tmp_ext = -1;
  590. struct drm_mtk_layer_config *layer_info;
  591. available_ovl_num = ovl_limit;
  592. for (i = disp_info->layer_num[disp] - 1; i > disp_info->gles_tail[disp];
  593. i--) {
  594. layer_info = &disp_info->input_config[disp][i];
  595. if (!is_extended_layer(layer_info)) {
  596. if (mtk_is_gles_layer(disp_info, disp, i))
  597. break;
  598. if (available_ovl_num <= 0) {
  599. available_ovl_num = 0;
  600. if (tmp_ext == -1)
  601. disp_info->gles_tail[disp] = i;
  602. else
  603. disp_info->gles_tail[disp] = tmp_ext;
  604. break;
  605. }
  606. tmp_ext = -1;
  607. available_ovl_num--;
  608. } else {
  609. if (tmp_ext == -1)
  610. tmp_ext = i;
  611. }
  612. }
  613. if (available_ovl_num < 0)
  614. DDPPR_ERR("%s available_ovl_num invalid:%d\n", __func__,
  615. available_ovl_num);
  616. return available_ovl_num;
  617. }
  618. static int rollback_to_GPU(struct drm_mtk_layering_info *info, int disp,
  619. int available)
  620. {
  621. int available_ovl_num, i;
  622. bool has_gles_layer = false;
  623. struct drm_mtk_layer_config *l_info;
  624. available_ovl_num = available;
  625. if (info->gles_head[disp] != -1)
  626. has_gles_layer = true;
  627. available_ovl_num =
  628. _rollback_to_GPU_bottom_up(info, disp, available_ovl_num);
  629. if (has_gles_layer)
  630. available_ovl_num = _rollback_to_GPU_top_down(
  631. info, disp, available_ovl_num);
  632. if (info->gles_head[disp] == -1 && info->gles_tail[disp] == -1)
  633. goto out;
  634. if (mtk_is_layer_id_valid(info, disp, info->gles_head[disp]) == false) {
  635. dump_disp_info(info, DISP_DEBUG_LEVEL_CRITICAL);
  636. DDPAEE("invalid gles_head:%d, aval:%d\n",
  637. info->gles_head[disp], available);
  638. goto out;
  639. }
  640. if (mtk_is_layer_id_valid(info, disp, info->gles_tail[disp]) == false) {
  641. dump_disp_info(info, DISP_DEBUG_LEVEL_CRITICAL);
  642. DDPAEE("invalid gles_tail:%d, aval:%d\n",
  643. info->gles_tail[disp], available);
  644. goto out;
  645. }
  646. /* Clear extended layer for all GLES layer */
  647. for (i = info->gles_head[disp]; i <= info->gles_tail[disp]; i++) {
  648. l_info = &info->input_config[disp][i];
  649. l_info->ext_sel_layer = -1;
  650. }
  651. if (info->gles_tail[disp] + 1 < info->layer_num[disp]) {
  652. l_info = &info->input_config[disp][info->gles_tail[disp] + 1];
  653. if (is_extended_layer(l_info))
  654. l_info->ext_sel_layer = -1;
  655. }
  656. out:
  657. return available_ovl_num;
  658. }
  659. static bool unset_disp_rsz_attr(struct drm_mtk_layering_info *disp_info,
  660. int disp_idx)
  661. {
  662. struct drm_mtk_layer_config *c =
  663. &disp_info->input_config[disp_idx][HRT_PRIMARY];
  664. if (l_rule_info->addon_scn[HRT_PRIMARY] == ONE_SCALING &&
  665. mtk_has_layer_cap(c, MTK_MDP_RSZ_LAYER) &&
  666. mtk_has_layer_cap(c, MTK_DISP_RSZ_LAYER)) {
  667. c->layer_caps &= ~MTK_DISP_RSZ_LAYER;
  668. l_rule_info->addon_scn[HRT_PRIMARY] = NONE;
  669. return true;
  670. }
  671. return false;
  672. }
  673. static int _filter_by_ovl_cnt(struct drm_device *dev,
  674. struct drm_mtk_layering_info *disp_info,
  675. int disp_idx)
  676. {
  677. int ovl_num_limit, phy_ovl_cnt;
  678. uint16_t l_tb;
  679. if (disp_info->layer_num[disp_idx] <= 0) {
  680. /* direct skip */
  681. return 0;
  682. }
  683. retry:
  684. phy_ovl_cnt = get_phy_ovl_layer_cnt(disp_info, disp_idx);
  685. l_tb = l_rule_ops->get_mapping_table(dev, disp_idx, DISP_HW_LAYER_TB,
  686. MAX_PHY_OVL_CNT);
  687. ovl_num_limit = mtk_get_phy_layer_limit(l_tb);
  688. if (disp_idx == 0 && l_rule_info->dal_enable)
  689. ovl_num_limit--;
  690. #ifdef HRT_DEBUG_LEVEL2
  691. DDPMSG("phy_ovl_cnt:%d,ovl_n_limit:%d\n", phy_ovl_cnt, ovl_num_limit);
  692. #endif
  693. if (phy_ovl_cnt <= ovl_num_limit)
  694. return 0;
  695. if (unset_disp_rsz_attr(disp_info, disp_idx))
  696. goto retry;
  697. rollback_to_GPU(disp_info, disp_idx, ovl_num_limit);
  698. return 0;
  699. }
  700. static void ext_id_adjustment_and_retry(struct drm_device *dev,
  701. struct drm_mtk_layering_info *info,
  702. int disp_idx, int layer_idx)
  703. {
  704. int j, ext_idx;
  705. struct drm_mtk_layer_config *layer_info;
  706. ext_idx = -1;
  707. for (j = layer_idx; j < layer_idx + 3; j++) {
  708. layer_info = &info->input_config[disp_idx][j];
  709. if (ext_idx == -1) {
  710. layer_info->ext_sel_layer = -1;
  711. if (is_extended_base_layer_valid(layer_info, j))
  712. ext_idx = j;
  713. } else {
  714. layer_info->ext_sel_layer = ext_idx;
  715. }
  716. if (j == (info->layer_num[disp_idx] - 1) ||
  717. !is_extended_layer(&info->input_config[disp_idx][j + 1]))
  718. break;
  719. }
  720. #ifdef HRT_DEBUG_LEVEL2
  721. DDPMSG("[%s]cannot feet current layer layout\n", __func__);
  722. dump_disp_info(info, DISP_DEBUG_LEVEL_ERR);
  723. #endif
  724. ext_id_tuning(dev, info, disp_idx);
  725. }
  726. static int ext_id_tuning(struct drm_device *dev,
  727. struct drm_mtk_layering_info *info, int disp)
  728. {
  729. uint16_t ovl_tb, l_tb;
  730. int phy_ovl_cnt, i;
  731. int ext_cnt = 0, cur_phy_cnt = 0;
  732. struct drm_mtk_layer_config *layer_info;
  733. if (info->layer_num[disp] <= 0) {
  734. /* direct skip */
  735. return 0;
  736. }
  737. _filter_by_ovl_cnt(dev, info, disp);
  738. phy_ovl_cnt = get_phy_ovl_layer_cnt(info, disp);
  739. if (phy_ovl_cnt > MAX_PHY_OVL_CNT) {
  740. DDPPR_ERR("phy_ovl_cnt(%d) over OVL count limit\n",
  741. phy_ovl_cnt);
  742. phy_ovl_cnt = MAX_PHY_OVL_CNT;
  743. }
  744. ovl_tb = l_rule_ops->get_mapping_table(dev, disp, DISP_HW_OVL_TB, 0);
  745. l_tb = l_rule_ops->get_mapping_table(dev, disp, DISP_HW_LAYER_TB,
  746. phy_ovl_cnt);
  747. if (l_rule_info->dal_enable) {
  748. l_tb = l_rule_ops->get_mapping_table(
  749. dev, disp, DISP_HW_LAYER_TB, MAX_PHY_OVL_CNT);
  750. l_tb &= HRT_AEE_LAYER_MASK;
  751. }
  752. for (i = 0; i < info->layer_num[disp]; i++) {
  753. layer_info = &info->input_config[disp][i];
  754. if (is_extended_layer(layer_info)) {
  755. ext_cnt++;
  756. if (is_extended_over_limit(ext_cnt)) {
  757. ext_id_adjustment_and_retry(dev, info, disp, i);
  758. break;
  759. }
  760. } else {
  761. #ifdef HRT_DEBUG_LEVEL2
  762. DDPMSG("i:%d, cur_phy_cnt:%d\n", i, cur_phy_cnt);
  763. #endif
  764. if (mtk_is_gles_layer(info, disp, i) &&
  765. (i != info->gles_head[disp])) {
  766. #ifdef HRT_DEBUG_LEVEL2
  767. DDPMSG("is gles layer, continue\n");
  768. #endif
  769. continue;
  770. }
  771. if (cur_phy_cnt > 0) {
  772. int cur_ovl, pre_ovl;
  773. cur_ovl = get_ovl_by_phy(dev, disp, l_tb,
  774. cur_phy_cnt);
  775. pre_ovl = get_ovl_by_phy(dev, disp, l_tb,
  776. cur_phy_cnt - 1);
  777. if (cur_ovl != pre_ovl)
  778. ext_cnt = 0;
  779. }
  780. cur_phy_cnt++;
  781. }
  782. }
  783. return 0;
  784. }
  785. static int rollback_all_to_GPU(struct drm_mtk_layering_info *disp_info,
  786. int disp_idx)
  787. {
  788. if (disp_info->layer_num[disp_idx] <= 0) {
  789. /* direct skip */
  790. return 0;
  791. }
  792. disp_info->gles_head[disp_idx] = 0;
  793. disp_info->gles_tail[disp_idx] = disp_info->layer_num[disp_idx] - 1;
  794. return 0;
  795. }
  796. static int filter_by_ovl_cnt(struct drm_device *dev,
  797. struct drm_mtk_layering_info *disp_info)
  798. {
  799. int ret, disp_idx;
  800. /* 0->primary display, 1->secondary display */
  801. for (disp_idx = 0; disp_idx < HRT_TYPE_NUM; disp_idx++) {
  802. if (get_layering_opt(LYE_OPT_EXT_LAYER))
  803. ret = ext_id_tuning(dev, disp_info, disp_idx);
  804. else
  805. ret = _filter_by_ovl_cnt(dev, disp_info, disp_idx);
  806. }
  807. #ifdef HRT_DEBUG_LEVEL2
  808. DDPMSG("[%s result]\n", __func__);
  809. dump_disp_info(disp_info, DISP_DEBUG_LEVEL_INFO);
  810. #endif
  811. return ret;
  812. }
  813. static struct hrt_sort_entry *x_entry_list, *y_entry_list;
  814. #ifdef HRT_DEBUG_LEVEL2
  815. static int dump_entry_list(bool sort_by_y)
  816. {
  817. struct hrt_sort_entry *temp;
  818. struct drm_mtk_layer_config *layer_info;
  819. if (sort_by_y)
  820. temp = y_entry_list;
  821. else
  822. temp = x_entry_list;
  823. DDPMSG("%s, sort_by_y:%d, addr:0x%p\n", __func__, sort_by_y, temp);
  824. while (temp) {
  825. layer_info = temp->layer_info;
  826. DDPMSG("key:%d, offset(%d, %d), w/h(%d, %d), overlap_w:%d\n",
  827. temp->key, layer_info->dst_offset_x,
  828. layer_info->dst_offset_y, layer_info->dst_width,
  829. layer_info->dst_height, temp->overlap_w);
  830. temp = temp->tail;
  831. }
  832. DDPMSG("%s end\n", __func__);
  833. return 0;
  834. }
  835. #endif
  836. static int insert_entry(struct hrt_sort_entry **head,
  837. struct hrt_sort_entry *sort_entry)
  838. {
  839. struct hrt_sort_entry *temp;
  840. temp = *head;
  841. while (temp) {
  842. if (sort_entry->key < temp->key ||
  843. ((sort_entry->key == temp->key) &&
  844. (sort_entry->overlap_w > 0))) {
  845. sort_entry->head = temp->head;
  846. sort_entry->tail = temp;
  847. if (temp->head != NULL)
  848. temp->head->tail = sort_entry;
  849. else
  850. *head = sort_entry;
  851. temp->head = sort_entry;
  852. break;
  853. }
  854. if (temp->tail == NULL) {
  855. temp->tail = sort_entry;
  856. sort_entry->head = temp;
  857. sort_entry->tail = NULL;
  858. break;
  859. }
  860. temp = temp->tail;
  861. }
  862. return 0;
  863. }
  864. static int add_layer_entry(struct drm_mtk_layer_config *l_info, bool sort_by_y,
  865. int overlap_w)
  866. {
  867. struct hrt_sort_entry *begin_t, *end_t;
  868. struct hrt_sort_entry **p_entry;
  869. begin_t = kzalloc(sizeof(struct hrt_sort_entry), GFP_KERNEL);
  870. end_t = kzalloc(sizeof(struct hrt_sort_entry), GFP_KERNEL);
  871. begin_t->head = NULL;
  872. begin_t->tail = NULL;
  873. end_t->head = NULL;
  874. end_t->tail = NULL;
  875. if (sort_by_y) {
  876. begin_t->key = l_info->dst_offset_y;
  877. end_t->key = l_info->dst_offset_y + l_info->dst_height - 1;
  878. p_entry = &y_entry_list;
  879. } else {
  880. begin_t->key = l_info->dst_offset_x;
  881. end_t->key = l_info->dst_offset_x + l_info->dst_width - 1;
  882. p_entry = &x_entry_list;
  883. }
  884. begin_t->overlap_w = overlap_w;
  885. begin_t->layer_info = l_info;
  886. end_t->overlap_w = -overlap_w;
  887. end_t->layer_info = l_info;
  888. if (*p_entry == NULL) {
  889. *p_entry = begin_t;
  890. begin_t->head = NULL;
  891. begin_t->tail = end_t;
  892. end_t->head = begin_t;
  893. end_t->tail = NULL;
  894. } else {
  895. /* Inser begin entry */
  896. insert_entry(p_entry, begin_t);
  897. #ifdef HRT_DEBUG_LEVEL2
  898. DDPMSG("Insert key:%d\n", begin_t->key);
  899. dump_entry_list(sort_by_y);
  900. #endif
  901. /* Inser end entry */
  902. insert_entry(p_entry, end_t);
  903. #ifdef HRT_DEBUG_LEVEL2
  904. DDPMSG("Insert key:%d\n", end_t->key);
  905. dump_entry_list(sort_by_y);
  906. #endif
  907. }
  908. return 0;
  909. }
  910. static int remove_layer_entry(struct drm_mtk_layer_config *layer_info,
  911. bool sort_by_y)
  912. {
  913. struct hrt_sort_entry *temp, *free_entry;
  914. if (sort_by_y)
  915. temp = y_entry_list;
  916. else
  917. temp = x_entry_list;
  918. while (temp) {
  919. if (temp->layer_info == layer_info) {
  920. free_entry = temp;
  921. temp = temp->tail;
  922. if (free_entry->head == NULL) {
  923. /* Free head entry */
  924. if (temp != NULL)
  925. temp->head = NULL;
  926. if (sort_by_y)
  927. y_entry_list = temp;
  928. else
  929. x_entry_list = temp;
  930. kfree(free_entry);
  931. } else {
  932. free_entry->head->tail = free_entry->tail;
  933. if (temp)
  934. temp->head = free_entry->head;
  935. kfree(free_entry);
  936. }
  937. } else {
  938. temp = temp->tail;
  939. }
  940. }
  941. return 0;
  942. }
  943. static int free_all_layer_entry(bool sort_by_y)
  944. {
  945. struct hrt_sort_entry *cur_entry, *next_entry;
  946. if (sort_by_y)
  947. cur_entry = y_entry_list;
  948. else
  949. cur_entry = x_entry_list;
  950. while (cur_entry) {
  951. next_entry = cur_entry->tail;
  952. kfree(cur_entry);
  953. cur_entry = next_entry;
  954. }
  955. if (sort_by_y)
  956. y_entry_list = NULL;
  957. else
  958. x_entry_list = NULL;
  959. return 0;
  960. }
  961. static int scan_x_overlap(struct drm_mtk_layering_info *disp_info,
  962. int disp_index, int ovl_overlap_limit_w)
  963. {
  964. struct hrt_sort_entry *tmp_entry;
  965. int overlap_w_sum, max_overlap;
  966. overlap_w_sum = 0;
  967. max_overlap = 0;
  968. tmp_entry = x_entry_list;
  969. while (tmp_entry) {
  970. overlap_w_sum += tmp_entry->overlap_w;
  971. max_overlap = (overlap_w_sum > max_overlap) ? overlap_w_sum
  972. : max_overlap;
  973. tmp_entry = tmp_entry->tail;
  974. }
  975. return max_overlap;
  976. }
  977. static int scan_y_overlap(struct drm_mtk_layering_info *disp_info,
  978. int disp_index, int ovl_overlap_limit_w)
  979. {
  980. struct hrt_sort_entry *tmp_entry;
  981. int overlap_w_sum, tmp_overlap, max_overlap;
  982. overlap_w_sum = 0;
  983. tmp_overlap = 0;
  984. max_overlap = 0;
  985. tmp_entry = y_entry_list;
  986. while (tmp_entry) {
  987. overlap_w_sum += tmp_entry->overlap_w;
  988. if (tmp_entry->overlap_w > 0) {
  989. add_layer_entry(tmp_entry->layer_info, false,
  990. tmp_entry->overlap_w);
  991. } else {
  992. remove_layer_entry(tmp_entry->layer_info, false);
  993. }
  994. if (overlap_w_sum > ovl_overlap_limit_w &&
  995. overlap_w_sum > max_overlap) {
  996. tmp_overlap = scan_x_overlap(disp_info, disp_index,
  997. ovl_overlap_limit_w);
  998. } else {
  999. tmp_overlap = overlap_w_sum;
  1000. }
  1001. max_overlap =
  1002. (tmp_overlap > max_overlap) ? tmp_overlap : max_overlap;
  1003. tmp_entry = tmp_entry->tail;
  1004. }
  1005. return max_overlap;
  1006. }
  1007. static int get_hrt_level(int sum_w, int is_larb)
  1008. {
  1009. int hrt_level;
  1010. int *bound_table;
  1011. enum DISP_HW_MAPPING_TB_TYPE type;
  1012. if (is_larb)
  1013. type = DISP_HW_LARB_BOUND_TB;
  1014. else
  1015. type = DISP_HW_EMI_BOUND_TB;
  1016. bound_table = l_rule_ops->get_bound_table(type);
  1017. for (hrt_level = 0; hrt_level < HRT_LEVEL_NUM; hrt_level++) {
  1018. if (bound_table[hrt_level] != -1 &&
  1019. sum_w <= bound_table[hrt_level] * HRT_UINT_BOUND_BPP)
  1020. return hrt_level;
  1021. }
  1022. return hrt_level;
  1023. }
  1024. static bool has_hrt_limit(struct drm_mtk_layering_info *disp_info, int disp_idx)
  1025. {
  1026. if (disp_info->layer_num[disp_idx] <= 0)
  1027. return false;
  1028. if (disp_info->disp_mode[disp_idx] == MTK_DRM_SESSION_DC_MIRROR)
  1029. return false;
  1030. return true;
  1031. }
  1032. static int get_hrt_disp_num(struct drm_mtk_layering_info *disp_info)
  1033. {
  1034. int cnt = 0, i;
  1035. for (i = 0; i < HRT_TYPE_NUM; i++)
  1036. if (has_hrt_limit(disp_info, i))
  1037. cnt++;
  1038. return cnt;
  1039. }
  1040. /**
  1041. * Return the HRT layer weight.
  1042. * If the layer_info is NULL, return GLES layer weight.
  1043. */
  1044. static int get_layer_weight(int disp_idx,
  1045. struct drm_mtk_layer_config *layer_info)
  1046. {
  1047. int bpp, weight;
  1048. if (layer_info)
  1049. bpp = mtk_get_format_bpp(layer_info->src_fmt);
  1050. else
  1051. bpp = HRT_UINT_BOUND_BPP;
  1052. #ifdef CONFIG_MTK_HDMI_SUPPORT
  1053. if (disp_idx == HRT_SECONDARY) {
  1054. /* To Be Impl */
  1055. #if 0
  1056. struct disp_session_info dispif_info;
  1057. /* For seconary display, set the weight 4K@30 as 2K@60. */
  1058. hdmi_get_dev_info(true, &dispif_info);
  1059. if (dispif_info.displayWidth > 2560)
  1060. weight = HRT_UINT_WEIGHT * 2;
  1061. else if (dispif_info.displayWidth > 1920)
  1062. weight = HRT_UINT_WEIGHT;
  1063. else
  1064. weight = HRT_UINT_WEIGHT / 2;
  1065. if (dispif_info.vsyncFPS <= 30)
  1066. weight /= 2;
  1067. return weight * bpp;
  1068. #endif
  1069. }
  1070. #endif
  1071. weight = HRT_UINT_WEIGHT;
  1072. return weight * bpp;
  1073. }
  1074. static int _calc_hrt_num(struct drm_device *dev,
  1075. struct drm_mtk_layering_info *disp_info, int disp,
  1076. int hrt_type, bool force_scan_y, bool has_dal_layer)
  1077. {
  1078. int i, sum_overlap_w, overlap_l_bound;
  1079. uint16_t layer_map;
  1080. int overlap_w, layer_idx, phy_layer_idx, ovl_cnt;
  1081. bool has_gles = false;
  1082. struct drm_mtk_layer_config *layer_info;
  1083. if (!has_hrt_limit(disp_info, disp))
  1084. return 0;
  1085. /* 1.Initial overlap conditions. */
  1086. sum_overlap_w = 0;
  1087. /*
  1088. * The parameters of hrt table are base on ARGB color format.
  1089. * Multiply the bpp of it.
  1090. */
  1091. overlap_l_bound = g_emi_bound_table[0] * HRT_UINT_BOUND_BPP;
  1092. /*
  1093. * 2.Add each layer info to layer list and sort it by yoffset.
  1094. * Also add up each layer overlap weight.
  1095. */
  1096. layer_idx = -1;
  1097. ovl_cnt = get_phy_ovl_layer_cnt(disp_info, disp);
  1098. layer_map = l_rule_ops->get_mapping_table(dev, disp, DISP_HW_LAYER_TB,
  1099. ovl_cnt);
  1100. if (l_rule_info->dal_enable) {
  1101. layer_map = l_rule_ops->get_mapping_table(
  1102. dev, disp, DISP_HW_LAYER_TB, MAX_PHY_OVL_CNT);
  1103. layer_map &= HRT_AEE_LAYER_MASK;
  1104. }
  1105. for (i = 0; i < disp_info->layer_num[disp]; i++) {
  1106. int ovl_idx;
  1107. layer_info = &disp_info->input_config[disp][i];
  1108. if (disp_info->gles_head[disp] == -1 ||
  1109. (i < disp_info->gles_head[disp] ||
  1110. i > disp_info->gles_tail[disp])) {
  1111. if (hrt_type != HRT_TYPE_EMI) {
  1112. if (layer_idx == -1)
  1113. layer_idx = 0;
  1114. else if (!is_extended_layer(layer_info))
  1115. layer_idx++;
  1116. phy_layer_idx =
  1117. get_phy_ovl_index(dev, disp, layer_idx);
  1118. ovl_idx = get_ovl_by_phy(dev, disp, layer_map,
  1119. layer_idx);
  1120. if (get_larb_by_ovl(dev, ovl_idx, disp) !=
  1121. hrt_type)
  1122. continue;
  1123. }
  1124. overlap_w = get_layer_weight(disp, layer_info);
  1125. sum_overlap_w += overlap_w;
  1126. add_layer_entry(layer_info, true, overlap_w);
  1127. } else if (i == disp_info->gles_head[disp]) {
  1128. /* Add GLES layer */
  1129. if (hrt_type != HRT_TYPE_EMI) {
  1130. if (layer_idx == -1)
  1131. layer_idx = 0;
  1132. else if (!is_extended_layer(layer_info))
  1133. layer_idx++;
  1134. phy_layer_idx =
  1135. get_phy_ovl_index(dev, disp, layer_idx);
  1136. ovl_idx = get_ovl_by_phy(dev, disp, layer_map,
  1137. layer_idx);
  1138. if (get_larb_by_ovl(dev, ovl_idx, disp) !=
  1139. hrt_type)
  1140. continue;
  1141. }
  1142. has_gles = true;
  1143. }
  1144. }
  1145. /* Add overlap weight of Gles layer and Assert layer. */
  1146. if (has_gles)
  1147. sum_overlap_w += get_layer_weight(disp, NULL);
  1148. if (has_dal_layer)
  1149. sum_overlap_w += HRT_AEE_WEIGHT;
  1150. /*
  1151. * 3.Calculate the HRT bound if the total layer weight over the
  1152. * lower bound or has secondary display.
  1153. */
  1154. if (sum_overlap_w > overlap_l_bound ||
  1155. has_hrt_limit(disp_info, HRT_SECONDARY) || force_scan_y) {
  1156. sum_overlap_w =
  1157. scan_y_overlap(disp_info, disp, overlap_l_bound);
  1158. /* Add overlap weight of Gles layer and Assert layer. */
  1159. if (has_gles)
  1160. sum_overlap_w += get_layer_weight(disp, NULL);
  1161. if (has_dal_layer)
  1162. sum_overlap_w += HRT_AEE_WEIGHT;
  1163. }
  1164. #ifdef HRT_DEBUG_LEVEL1
  1165. DDPMSG("%s disp:%d, disp:%d, hrt_type:%d, sum_overlap_w:%d\n", __func__,
  1166. disp, disp, hrt_type, sum_overlap_w);
  1167. #endif
  1168. free_all_layer_entry(true);
  1169. return sum_overlap_w;
  1170. }
  1171. #ifdef HAS_LARB_HRT
  1172. static int calc_larb_hrt_level(struct drm_device *dev,
  1173. struct drm_mtk_layering_info *disp_info)
  1174. {
  1175. int larb_hrt_level, i, sum_overlap_w;
  1176. larb_hrt_level = 0;
  1177. for (i = HRT_TYPE_LARB0; i <= HRT_TYPE_LARB1; i++) {
  1178. int tmp_hrt_level;
  1179. sum_overlap_w = _calc_hrt_num(dev, disp_info, HRT_PRIMARY, i,
  1180. true, l_rule_info->dal_enable);
  1181. sum_overlap_w += _calc_hrt_num(dev, disp_info, HRT_SECONDARY, i,
  1182. true, false);
  1183. sum_overlap_w += _calc_hrt_num(dev, disp_info, HRT_THIRD, i,
  1184. true, false);
  1185. tmp_hrt_level = get_hrt_level(sum_overlap_w, true);
  1186. if (tmp_hrt_level > larb_hrt_level)
  1187. larb_hrt_level = tmp_hrt_level;
  1188. }
  1189. return larb_hrt_level;
  1190. }
  1191. #endif
  1192. static int calc_hrt_num(struct drm_device *dev,
  1193. struct drm_mtk_layering_info *disp_info)
  1194. {
  1195. int emi_hrt_level;
  1196. int sum_overlap_w = 0;
  1197. #ifdef HAS_LARB_HRT
  1198. int larb_hrt_level;
  1199. #endif
  1200. int overlap_num;
  1201. /* TODO support display helper */
  1202. /* bool scan_overlap = !!disp_helper_get_option(DISP_OPT_HRT_MODE); */
  1203. bool scan_overlap = true;
  1204. /* Calculate HRT for EMI level */
  1205. if (has_hrt_limit(disp_info, HRT_PRIMARY)) {
  1206. sum_overlap_w =
  1207. _calc_hrt_num(dev, disp_info, HRT_PRIMARY, HRT_TYPE_EMI,
  1208. scan_overlap, l_rule_info->dal_enable);
  1209. }
  1210. if (has_hrt_limit(disp_info, HRT_SECONDARY)) {
  1211. sum_overlap_w +=
  1212. _calc_hrt_num(dev, disp_info, HRT_SECONDARY,
  1213. HRT_TYPE_EMI, scan_overlap, false);
  1214. }
  1215. if (has_hrt_limit(disp_info, HRT_THIRD)) {
  1216. sum_overlap_w += _calc_hrt_num(dev, disp_info, HRT_THIRD,
  1217. HRT_TYPE_EMI, scan_overlap,
  1218. false);
  1219. }
  1220. emi_hrt_level = get_hrt_level(sum_overlap_w, false);
  1221. overlap_num = sum_overlap_w / 200;
  1222. /*
  1223. * The larb bound always meets the limit for HRT_LEVEL2
  1224. * in 8+4 ovl architecture.
  1225. * So calculate larb bound only for HRT_LEVEL2.
  1226. */
  1227. disp_info->hrt_num = emi_hrt_level;
  1228. #ifdef HRT_DEBUG_LEVEL1
  1229. DDPMSG("EMI hrt lv2:%d,overlap_w:%d\n", emi_hrt_level, sum_overlap_w);
  1230. #endif
  1231. #ifdef HAS_LARB_HRT
  1232. /* Need to calculate larb hrt for HRT_LEVEL_LOW level. */
  1233. /* TODO: Should revise larb calculation statement here */
  1234. /* if (hrt_level != HRT_LEVEL_NUM - 2) */
  1235. /* return hrt_level; */
  1236. /* Check Larb Bound here */
  1237. larb_hrt_level = calc_larb_hrt_level(dev, disp_info);
  1238. #ifdef HRT_DEBUG_LEVEL1
  1239. DDPMSG("Larb hrt level:%d\n", larb_hrt_level);
  1240. #endif
  1241. if (emi_hrt_level < larb_hrt_level)
  1242. disp_info->hrt_num = larb_hrt_level;
  1243. else
  1244. #endif
  1245. disp_info->hrt_num = emi_hrt_level;
  1246. return overlap_num;
  1247. }
  1248. /**
  1249. * dispatch which one layer could be ext layer
  1250. */
  1251. static int ext_layer_grouping(struct drm_device *dev,
  1252. struct drm_mtk_layering_info *disp_info)
  1253. {
  1254. int cont_ext_layer_cnt = 0, ext_idx = 0;
  1255. int is_ext_layer, disp_idx, i;
  1256. struct drm_mtk_layer_config *src_info, *dst_info;
  1257. int available_layers = 0, phy_layer_cnt = 0;
  1258. for (disp_idx = 0; disp_idx < HRT_TYPE_NUM; disp_idx++) {
  1259. /* initialize ext layer info */
  1260. for (i = 0; i < disp_info->layer_num[disp_idx]; i++)
  1261. disp_info->input_config[disp_idx][i].ext_sel_layer = -1;
  1262. if (!get_layering_opt(LYE_OPT_EXT_LAYER))
  1263. continue;
  1264. #ifndef LAYERING_SUPPORT_EXT_LAYER_ON_2ND_DISP
  1265. if (disp_idx != HRT_PRIMARY)
  1266. continue;
  1267. #endif
  1268. /*
  1269. * If the physical layer > input layer,
  1270. * then skip using extended layer.
  1271. */
  1272. phy_layer_cnt =
  1273. mtk_get_phy_layer_limit(l_rule_ops->get_mapping_table(
  1274. dev, disp_idx, DISP_HW_LAYER_TB,
  1275. MAX_PHY_OVL_CNT));
  1276. /* Remove the rule here so that we can have more oppotunity to
  1277. * test extended layer
  1278. * if (phy_layer_cnt > disp_info->layer_num[disp_idx])
  1279. * continue;
  1280. */
  1281. for (i = 1; i < disp_info->layer_num[disp_idx]; i++) {
  1282. dst_info = &disp_info->input_config[disp_idx][i];
  1283. src_info = &disp_info->input_config[disp_idx][i - 1];
  1284. /* skip other GPU layers */
  1285. if (mtk_is_gles_layer(disp_info, disp_idx, i) ||
  1286. mtk_is_gles_layer(disp_info, disp_idx, i - 1)) {
  1287. cont_ext_layer_cnt = 0;
  1288. if (i > disp_info->gles_tail[disp_idx]) {
  1289. int tmp;
  1290. tmp = disp_info->gles_tail[disp_idx] -
  1291. disp_info->gles_head[disp_idx];
  1292. ext_idx = i - tmp;
  1293. }
  1294. continue;
  1295. }
  1296. is_ext_layer = !is_continuous_ext_layer_overlap(
  1297. disp_info->input_config[disp_idx], i);
  1298. /*
  1299. * The yuv layer is not supported as extended layer
  1300. * as the HWC has a special for yuv content.
  1301. */
  1302. if (mtk_is_yuv(dst_info->src_fmt))
  1303. is_ext_layer = false;
  1304. if (is_ext_layer && cont_ext_layer_cnt < 3) {
  1305. ++cont_ext_layer_cnt;
  1306. dst_info->ext_sel_layer = ext_idx;
  1307. } else {
  1308. cont_ext_layer_cnt = 0;
  1309. ext_idx = i;
  1310. if (i > disp_info->gles_tail[disp_idx]) {
  1311. ext_idx -=
  1312. disp_info->gles_tail[disp_idx] -
  1313. disp_info->gles_head[disp_idx];
  1314. }
  1315. }
  1316. }
  1317. }
  1318. #ifdef HRT_DEBUG_LEVEL1
  1319. DDPMSG("[ext layer grouping]\n");
  1320. dump_disp_info(disp_info, DISP_DEBUG_LEVEL_INFO);
  1321. #endif
  1322. return available_layers;
  1323. }
  1324. void lye_add_lye_priv_blob(struct mtk_plane_comp_state *comp_state,
  1325. struct mtk_drm_lyeblob_ids *lyeblob_ids,
  1326. int plane_idx, int disp_idx,
  1327. struct drm_device *drm_dev)
  1328. {
  1329. struct drm_property_blob *blob;
  1330. blob = drm_property_create_blob(
  1331. drm_dev, sizeof(struct mtk_plane_comp_state), comp_state);
  1332. lyeblob_ids->lye_plane_blob_id[disp_idx][plane_idx] = blob->base.id;
  1333. }
  1334. static int mtk_lye_get_comp_id(int disp_idx, struct drm_device *drm_dev,
  1335. int layer_map_idx)
  1336. {
  1337. uint16_t ovl_mapping_tb = l_rule_ops->get_mapping_table(
  1338. drm_dev, disp_idx, DISP_HW_OVL_TB, 0);
  1339. struct mtk_drm_private *priv = drm_dev->dev_private;
  1340. /* TODO: The component ID should be changed by ddp path and platforms */
  1341. if (disp_idx == 0) {
  1342. if (HRT_GET_FIRST_SET_BIT(ovl_mapping_tb) >= layer_map_idx)
  1343. return DDP_COMPONENT_DMDP_RDMA0;
  1344. /* When open VDS path switch feature, primary OVL have OVL0 only */
  1345. else if (mtk_drm_helper_get_opt(priv->helper_opt,
  1346. MTK_DRM_OPT_VDS_PATH_SWITCH) &&
  1347. priv->need_vds_path_switch)
  1348. return DDP_COMPONENT_OVL0;
  1349. else if (HRT_GET_FIRST_SET_BIT(
  1350. ovl_mapping_tb -
  1351. HRT_GET_FIRST_SET_BIT(ovl_mapping_tb)) >=
  1352. layer_map_idx)
  1353. return DDP_COMPONENT_OVL0_2L;
  1354. else
  1355. return DDP_COMPONENT_OVL0;
  1356. }
  1357. #if defined(CONFIG_MACH_MT6885)
  1358. else if (disp_idx == 1)
  1359. return DDP_COMPONENT_OVL2_2L;
  1360. else
  1361. return DDP_COMPONENT_OVL1_2L;
  1362. #else
  1363. /* When open VDS path switch feature, vds OVL is OVL0_2L */
  1364. else if (mtk_drm_helper_get_opt(priv->helper_opt,
  1365. MTK_DRM_OPT_VDS_PATH_SWITCH))
  1366. return DDP_COMPONENT_OVL0_2L;
  1367. else
  1368. return DDP_COMPONENT_OVL2_2L;
  1369. #endif
  1370. }
  1371. static int mtk_lye_get_lye_id(int disp_idx, struct drm_device *drm_dev,
  1372. int layer_map_idx)
  1373. {
  1374. int cnt = 0;
  1375. if (layer_map_idx != 0)
  1376. while (!(layer_map_idx & 0x1)) {
  1377. cnt++;
  1378. layer_map_idx >>= 1;
  1379. }
  1380. layer_map_idx = cnt;
  1381. return get_phy_ovl_index(drm_dev, disp_idx, layer_map_idx);
  1382. }
  1383. static void clear_layer(struct drm_mtk_layering_info *disp_info)
  1384. {
  1385. int di = 0;
  1386. int i = 0;
  1387. struct drm_mtk_layer_config *c;
  1388. if (!get_layering_opt(LYE_OPT_CLEAR_LAYER))
  1389. return;
  1390. for (di = 0; di < HRT_TYPE_NUM; di++) {
  1391. int g_head = disp_info->gles_head[di];
  1392. int top = -1;
  1393. if (disp_info->layer_num[di] <= 0)
  1394. continue;
  1395. if (g_head == -1)
  1396. continue;
  1397. for (i = disp_info->layer_num[di] - 1; i >= g_head; i--) {
  1398. c = &disp_info->input_config[di][i];
  1399. if (mtk_has_layer_cap(c, MTK_LAYERING_OVL_ONLY) &&
  1400. mtk_has_layer_cap(c, MTK_CLIENT_CLEAR_LAYER)) {
  1401. top = i;
  1402. break;
  1403. }
  1404. }
  1405. if (top == -1)
  1406. continue;
  1407. if (!mtk_is_gles_layer(disp_info, di, top))
  1408. continue;
  1409. DDPMSG("%s:D%d:L%d\n", __func__, di, top);
  1410. disp_info->gles_head[di] = 0;
  1411. disp_info->gles_tail[di] = disp_info->layer_num[di] - 1;
  1412. c = &disp_info->input_config[di][top];
  1413. if (top == disp_info->gles_head[di])
  1414. disp_info->gles_head[di]++;
  1415. else if (top == disp_info->gles_tail[di])
  1416. disp_info->gles_tail[di]--;
  1417. else
  1418. c->layer_caps |= MTK_DISP_CLIENT_CLEAR_LAYER;
  1419. if ((c->src_width < c->dst_width &&
  1420. c->src_height < c->dst_height) &&
  1421. get_layering_opt(LYE_OPT_RPO) &&
  1422. top < disp_info->gles_tail[di]) {
  1423. c->layer_caps |= MTK_DISP_RSZ_LAYER;
  1424. l_rule_info->addon_scn[di] = ONE_SCALING;
  1425. } else {
  1426. c->layer_caps &= ~MTK_DISP_RSZ_LAYER;
  1427. l_rule_info->addon_scn[di] = NONE;
  1428. if ((c->src_width != c->dst_width ||
  1429. c->src_height != c->dst_height) &&
  1430. !mtk_has_layer_cap(c, MTK_MDP_RSZ_LAYER)) {
  1431. c->layer_caps &= ~MTK_DISP_CLIENT_CLEAR_LAYER;
  1432. DDPMSG("%s:remove clear(rsz), caps:0x%08x\n",
  1433. __func__, c->layer_caps);
  1434. }
  1435. }
  1436. for (i = 0; i < disp_info->layer_num[di]; i++) {
  1437. c = &disp_info->input_config[di][i];
  1438. c->ext_sel_layer = -1;
  1439. if (i != top)
  1440. c->layer_caps &= ~MTK_DISP_RSZ_LAYER;
  1441. }
  1442. }
  1443. }
  1444. static int _dispatch_lye_blob_idx(struct drm_mtk_layering_info *disp_info,
  1445. int layer_map, int disp_idx,
  1446. struct mtk_drm_lyeblob_ids *lyeblob_ids,
  1447. struct drm_device *drm_dev)
  1448. {
  1449. struct drm_mtk_layer_config *layer_info;
  1450. int ext_cnt = 0, plane_idx = 0, layer_map_idx = 0;
  1451. struct mtk_plane_comp_state comp_state;
  1452. int prev_comp_id = -1;
  1453. int i;
  1454. int clear_idx = -1;
  1455. #if defined(CONFIG_MACH_MT6853) || defined(CONFIG_MACH_MT6833)
  1456. int no_compress_layer_num = 0;
  1457. #endif
  1458. for (i = 0; i < disp_info->layer_num[disp_idx]; i++) {
  1459. layer_info = &disp_info->input_config[disp_idx][i];
  1460. if (mtk_has_layer_cap(layer_info,
  1461. MTK_DISP_CLIENT_CLEAR_LAYER)) {
  1462. clear_idx = i;
  1463. break;
  1464. }
  1465. }
  1466. for (i = 0; i < disp_info->layer_num[disp_idx]; i++) {
  1467. layer_info = &disp_info->input_config[disp_idx][i];
  1468. if (clear_idx < 0 &&
  1469. mtk_has_layer_cap(layer_info, MTK_DISP_CLIENT_CLEAR_LAYER))
  1470. continue;
  1471. if (i < clear_idx) {
  1472. continue;
  1473. } else if (i == clear_idx) {
  1474. i = -1;
  1475. clear_idx = -1;
  1476. }
  1477. comp_state.layer_caps = layer_info->layer_caps;
  1478. if (mtk_is_gles_layer(disp_info, disp_idx, i) &&
  1479. i != disp_info->gles_head[disp_idx]) {
  1480. layer_info->ovl_id = plane_idx - 1;
  1481. continue;
  1482. }
  1483. if (!is_extended_layer(layer_info))
  1484. layer_map &= ~layer_map_idx;
  1485. layer_map_idx = HRT_GET_FIRST_SET_BIT(layer_map);
  1486. comp_state.comp_id =
  1487. mtk_lye_get_comp_id(disp_idx, drm_dev, layer_map_idx);
  1488. comp_state.lye_id =
  1489. mtk_lye_get_lye_id(disp_idx, drm_dev, layer_map_idx);
  1490. if (is_extended_layer(layer_info)) {
  1491. comp_state.ext_lye_id = LYE_EXT0 + ext_cnt;
  1492. ext_cnt++;
  1493. } else {
  1494. if (comp_state.comp_id != prev_comp_id)
  1495. ext_cnt = 0;
  1496. comp_state.ext_lye_id = LYE_NORMAL;
  1497. }
  1498. #if defined(CONFIG_MACH_MT6853) || defined(CONFIG_MACH_MT6833)
  1499. if (disp_idx == 0 &&
  1500. (comp_state.comp_id == DDP_COMPONENT_OVL0_2L) &&
  1501. !is_extended_layer(layer_info) &&
  1502. layer_info->compress != 1) {
  1503. DDPINFO("%s layer_id %d no compress phy layer\n",
  1504. __func__, i);
  1505. no_compress_layer_num++;
  1506. }
  1507. #endif
  1508. lye_add_lye_priv_blob(&comp_state, lyeblob_ids, plane_idx,
  1509. disp_idx, drm_dev);
  1510. layer_info->ovl_id = plane_idx;
  1511. plane_idx++;
  1512. prev_comp_id = comp_state.comp_id;
  1513. }
  1514. #if defined(CONFIG_MACH_MT6853) || defined(CONFIG_MACH_MT6833)
  1515. if (disp_idx == 0) {
  1516. HRT_SET_NO_COMPRESS_FLAG(disp_info->hrt_num,
  1517. no_compress_layer_num);
  1518. DDPINFO("%s disp_info->hrt_num=0x%x,no_comp_layer_num=%d\n",
  1519. __func__, disp_info->hrt_num,
  1520. no_compress_layer_num);
  1521. }
  1522. #endif
  1523. return 0;
  1524. }
  1525. static int dispatch_ovl_id(struct drm_mtk_layering_info *disp_info,
  1526. struct mtk_drm_lyeblob_ids *lyeblob_ids,
  1527. struct drm_device *drm_dev)
  1528. {
  1529. int disp_idx;
  1530. bool no_disp = true;
  1531. for (disp_idx = 0; disp_idx < HRT_TYPE_NUM; disp_idx++)
  1532. if (disp_info->layer_num[disp_idx] > 0) {
  1533. no_disp = false;
  1534. break;
  1535. }
  1536. if (no_disp) {
  1537. DDPINFO("There is no disp need dispatch\n");
  1538. return 0;
  1539. }
  1540. /* Dispatch gles range if necessary */
  1541. if (disp_info->hrt_num > HRT_LEVEL_NUM - 1) {
  1542. int max_ovl_cnt = g_emi_bound_table[HRT_LEVEL_NUM - 1];
  1543. int valid_ovl_cnt = max_ovl_cnt;
  1544. int hrt_disp_num = get_hrt_disp_num(disp_info);
  1545. if (l_rule_info->dal_enable)
  1546. valid_ovl_cnt -= (HRT_AEE_WEIGHT / HRT_UINT_BOUND_BPP);
  1547. valid_ovl_cnt /= HRT_UINT_WEIGHT;
  1548. hrt_disp_num--;
  1549. for (disp_idx = HRT_TYPE_NUM - 1; disp_idx >= 0; disp_idx--) {
  1550. if (!has_hrt_limit(disp_info, disp_idx))
  1551. continue;
  1552. valid_ovl_cnt =
  1553. rollback_to_GPU(disp_info, disp_idx,
  1554. valid_ovl_cnt - hrt_disp_num);
  1555. valid_ovl_cnt += hrt_disp_num;
  1556. hrt_disp_num--;
  1557. }
  1558. /* ajust hrt_num */
  1559. disp_info->hrt_num =
  1560. get_hrt_level(max_ovl_cnt * HRT_UINT_BOUND_BPP, 0);
  1561. disp_info->hrt_weight = max_ovl_cnt * 2 / HRT_UINT_WEIGHT;
  1562. }
  1563. clear_layer(disp_info);
  1564. /* Dispatch OVL id */
  1565. for (disp_idx = 0; disp_idx < HRT_TYPE_NUM; disp_idx++) {
  1566. int ovl_cnt;
  1567. uint16_t layer_map;
  1568. struct drm_mtk_layer_config *c;
  1569. bool clear = false;
  1570. int i = 0;
  1571. if (disp_info->layer_num[disp_idx] <= 0)
  1572. continue;
  1573. for (i = 0; i < disp_info->layer_num[disp_idx]; i++) {
  1574. c = &disp_info->input_config[disp_idx][i];
  1575. if (mtk_has_layer_cap(c, MTK_DISP_CLIENT_CLEAR_LAYER)) {
  1576. clear = true;
  1577. break;
  1578. }
  1579. }
  1580. ovl_cnt = get_phy_ovl_layer_cnt(disp_info, disp_idx);
  1581. if (clear)
  1582. ovl_cnt++;
  1583. layer_map = l_rule_ops->get_mapping_table(
  1584. drm_dev, disp_idx, DISP_HW_LAYER_TB, ovl_cnt);
  1585. if (l_rule_info->dal_enable) {
  1586. layer_map = l_rule_ops->get_mapping_table(
  1587. drm_dev, disp_idx, DISP_HW_LAYER_TB,
  1588. MAX_PHY_OVL_CNT);
  1589. layer_map &= HRT_AEE_LAYER_MASK;
  1590. }
  1591. _dispatch_lye_blob_idx(disp_info, layer_map,
  1592. disp_idx, lyeblob_ids, drm_dev);
  1593. }
  1594. return 0;
  1595. }
  1596. static int check_layering_result(struct drm_mtk_layering_info *info)
  1597. {
  1598. int disp_idx;
  1599. bool no_disp = true;
  1600. for (disp_idx = 0; disp_idx < HRT_TYPE_NUM; disp_idx++)
  1601. if (info->layer_num[disp_idx] > 0) {
  1602. no_disp = false;
  1603. break;
  1604. }
  1605. if (no_disp) {
  1606. DDPINFO("There is no disp need check\n");
  1607. return 0;
  1608. }
  1609. for (disp_idx = 0; disp_idx < HRT_TYPE_NUM; disp_idx++) {
  1610. int layer_num, max_ovl_id, ovl_layer_num;
  1611. if (info->layer_num[disp_idx] <= 0)
  1612. continue;
  1613. if (disp_idx == HRT_PRIMARY)
  1614. ovl_layer_num = PRIMARY_OVL_LAYER_NUM;
  1615. else
  1616. ovl_layer_num = SECONDARY_OVL_LAYER_NUM;
  1617. layer_num = info->layer_num[disp_idx];
  1618. max_ovl_id = info->input_config[disp_idx][layer_num - 1].ovl_id;
  1619. if (max_ovl_id >= ovl_layer_num)
  1620. DDPAEE("Inv ovl:%d,disp:%d\n", max_ovl_id, disp_idx);
  1621. }
  1622. return 0;
  1623. }
  1624. static int check_disp_info(struct drm_mtk_layering_info *disp_info)
  1625. {
  1626. int disp_idx, ghead, gtail;
  1627. int i;
  1628. if (disp_info == NULL) {
  1629. DDPPR_ERR("[HRT]disp_info is empty\n");
  1630. return -1;
  1631. }
  1632. for (i = 0; i < 3; i++) {
  1633. int mode = disp_info->disp_mode[i];
  1634. int layer_num = disp_info->layer_num[i];
  1635. if (mode < 0 || mode >= MTK_DRM_SESSION_NUM) {
  1636. DDPPR_ERR("[HRT] i %d, invalid mode %d\n", i, mode);
  1637. return -1;
  1638. }
  1639. if (layer_num < 0) {
  1640. DDPPR_ERR("[HRT] i %d, invalid layer num %d\n",
  1641. i, layer_num);
  1642. return -1;
  1643. }
  1644. }
  1645. /* these are set by kernel, should be 0 */
  1646. if (disp_info->res_idx || disp_info->hrt_weight || disp_info->hrt_idx) {
  1647. DDPPR_ERR("[HRT] fail, res_idx %d, hrt_weight %u, hrt_idx %u\n",
  1648. disp_info->res_idx,
  1649. disp_info->hrt_weight,
  1650. disp_info->hrt_idx);
  1651. return -1;
  1652. }
  1653. for (disp_idx = 0; disp_idx < HRT_TYPE_NUM; disp_idx++) {
  1654. if (disp_info->layer_num[disp_idx] > 0 &&
  1655. disp_info->input_config[disp_idx] == NULL) {
  1656. DDPPR_ERR(
  1657. "[HRT]input config is empty,disp:%d,l_num:%d\n",
  1658. disp_idx, disp_info->layer_num[disp_idx]);
  1659. return -1;
  1660. }
  1661. ghead = disp_info->gles_head[disp_idx];
  1662. gtail = disp_info->gles_tail[disp_idx];
  1663. if ((ghead < 0 && gtail >= 0) || (gtail < 0 && ghead >= 0)) {
  1664. dump_disp_info(disp_info, DISP_DEBUG_LEVEL_ERR);
  1665. DDPPR_ERR("[HRT]gles invalid,disp:%d,head:%d,tail:%d\n",
  1666. disp_idx, disp_info->gles_head[disp_idx],
  1667. disp_info->gles_tail[disp_idx]);
  1668. return -1;
  1669. }
  1670. }
  1671. return 0;
  1672. }
  1673. static int
  1674. _copy_layer_info_from_disp(struct drm_mtk_layering_info *disp_info_user,
  1675. int debug_mode, int disp_idx)
  1676. {
  1677. struct drm_mtk_layering_info *l_info = &layering_info;
  1678. unsigned long int layer_size = 0;
  1679. int ret = 0, layer_num = 0;
  1680. if (l_info->layer_num[disp_idx] <= 0) {
  1681. /* direct skip */
  1682. return 0;
  1683. }
  1684. layer_num = l_info->layer_num[disp_idx];
  1685. layer_size = sizeof(struct drm_mtk_layer_config) * layer_num;
  1686. l_info->input_config[disp_idx] = kzalloc(layer_size, GFP_KERNEL);
  1687. if (l_info->input_config[disp_idx] == NULL) {
  1688. DDPPR_ERR("%s:%d invalid input_config[%d]:0x%p\n",
  1689. __func__, __LINE__,
  1690. disp_idx, l_info->input_config[disp_idx]);
  1691. return -ENOMEM;
  1692. }
  1693. if (debug_mode) {
  1694. memcpy(l_info->input_config[disp_idx],
  1695. disp_info_user->input_config[disp_idx], layer_size);
  1696. } else {
  1697. if (copy_from_user(l_info->input_config[disp_idx],
  1698. disp_info_user->input_config[disp_idx],
  1699. layer_size)) {
  1700. DDPPR_ERR("%s:%d copy failed:(0x%p,0x%p), size:%ld\n",
  1701. __func__, __LINE__,
  1702. l_info->input_config[disp_idx],
  1703. disp_info_user->input_config[disp_idx],
  1704. layer_size);
  1705. return -EFAULT;
  1706. }
  1707. }
  1708. return ret;
  1709. }
  1710. static int set_disp_info(struct drm_mtk_layering_info *disp_info_user,
  1711. int debug_mode)
  1712. {
  1713. int i;
  1714. memcpy(&layering_info, disp_info_user,
  1715. sizeof(struct drm_mtk_layering_info));
  1716. for (i = 0; i < HRT_TYPE_NUM; i++)
  1717. _copy_layer_info_from_disp(disp_info_user, debug_mode, i);
  1718. memset(l_rule_info->addon_scn, 0x0, sizeof(l_rule_info->addon_scn));
  1719. return 0;
  1720. }
  1721. static int
  1722. _copy_layer_info_by_disp(struct drm_mtk_layering_info *disp_info_user,
  1723. int debug_mode, int disp_idx)
  1724. {
  1725. struct drm_mtk_layering_info *l_info = &layering_info;
  1726. unsigned long int layer_size = 0;
  1727. int ret = 0;
  1728. if (l_info->layer_num[disp_idx] <= 0) {
  1729. /* direct skip */
  1730. return -EFAULT;
  1731. }
  1732. disp_info_user->gles_head[disp_idx] = l_info->gles_head[disp_idx];
  1733. disp_info_user->gles_tail[disp_idx] = l_info->gles_tail[disp_idx];
  1734. layer_size = sizeof(struct drm_mtk_layer_config) *
  1735. disp_info_user->layer_num[disp_idx];
  1736. if (debug_mode) {
  1737. memcpy(disp_info_user->input_config[disp_idx],
  1738. l_info->input_config[disp_idx], layer_size);
  1739. } else {
  1740. if (copy_to_user(disp_info_user->input_config[disp_idx],
  1741. l_info->input_config[disp_idx], layer_size)) {
  1742. DDPINFO("[DISP][FB]: copy_to_user failed! line:%d\n",
  1743. __LINE__);
  1744. ret = -EFAULT;
  1745. }
  1746. kfree(l_info->input_config[disp_idx]);
  1747. }
  1748. return ret;
  1749. }
  1750. static int copy_layer_info_to_user(struct drm_mtk_layering_info *disp_info_user,
  1751. int debug_mode)
  1752. {
  1753. int ret = 0, i;
  1754. struct drm_mtk_layering_info *l_info = &layering_info;
  1755. disp_info_user->hrt_num = l_info->hrt_num;
  1756. disp_info_user->hrt_idx = l_info->hrt_idx;
  1757. disp_info_user->hrt_weight = l_info->hrt_weight;
  1758. for (i = 0; i < HRT_TYPE_NUM; i++)
  1759. _copy_layer_info_by_disp(disp_info_user, debug_mode, i);
  1760. return ret;
  1761. }
  1762. #ifdef HRT_UT_DEBUG
  1763. static int set_hrt_state(enum HRT_SYS_STATE sys_state, int en)
  1764. {
  1765. switch (sys_state) {
  1766. case DISP_HRT_MJC_ON:
  1767. if (en)
  1768. l_rule_info->hrt_sys_state |= (1 << sys_state);
  1769. else
  1770. l_rule_info->hrt_sys_state &= ~(1 << sys_state);
  1771. break;
  1772. case DISP_HRT_FORCE_DUAL_OFF:
  1773. if (en)
  1774. l_rule_info->hrt_sys_state |= (1 << sys_state);
  1775. else
  1776. l_rule_info->hrt_sys_state &= ~(1 << sys_state);
  1777. break;
  1778. case DISP_HRT_MULTI_TUI_ON:
  1779. if (en)
  1780. l_rule_info->hrt_sys_state |= (1 << sys_state);
  1781. else
  1782. l_rule_info->hrt_sys_state &= ~(1 << sys_state);
  1783. break;
  1784. default:
  1785. DDPPR_ERR("unknown hrt scenario\n");
  1786. break;
  1787. }
  1788. DDPMSG("Set hrt sys_state:%d, en:%d\n", sys_state, en);
  1789. return 0;
  1790. }
  1791. #endif
  1792. void mtk_register_layering_rule_ops(struct layering_rule_ops *ops,
  1793. struct layering_rule_info_t *info)
  1794. {
  1795. l_rule_ops = ops;
  1796. l_rule_info = info;
  1797. }
  1798. void lye_add_blob_ids(struct drm_mtk_layering_info *l_info,
  1799. struct mtk_drm_lyeblob_ids *lyeblob_ids,
  1800. struct drm_device *drm_dev,
  1801. int crtc_num,
  1802. int crtc_mask)
  1803. {
  1804. struct drm_property_blob *blob;
  1805. struct mtk_lye_ddp_state lye_state;
  1806. struct mtk_drm_private *mtk_drm = drm_dev->dev_private;
  1807. unsigned int i;
  1808. memcpy(lye_state.scn, l_rule_info->addon_scn, sizeof(lye_state.scn));
  1809. for (i = 0 ; i < HRT_TYPE_NUM ; i++) {
  1810. if (lye_state.scn[i] < NONE ||
  1811. lye_state.scn[i] >= ADDON_SCN_NR) {
  1812. DDPPR_ERR("[%s]abnormal scn[%u]:%d,set scn to 0\n",
  1813. __func__, i, lye_state.scn[i]);
  1814. lye_state.scn[i] = NONE;
  1815. }
  1816. }
  1817. lye_state.lc_tgt_layer = 0;
  1818. blob = drm_property_create_blob(
  1819. drm_dev, sizeof(struct mtk_lye_ddp_state), &lye_state);
  1820. lyeblob_ids->lye_idx = l_rule_info->hrt_idx;
  1821. lyeblob_ids->frame_weight = l_info->hrt_weight;
  1822. lyeblob_ids->hrt_num = l_info->hrt_num;
  1823. lyeblob_ids->ddp_blob_id = blob->base.id;
  1824. lyeblob_ids->ref_cnt = crtc_num;
  1825. lyeblob_ids->ref_cnt_mask = crtc_mask;
  1826. lyeblob_ids->free_cnt_mask = crtc_mask;
  1827. INIT_LIST_HEAD(&lyeblob_ids->list);
  1828. mutex_lock(&mtk_drm->lyeblob_list_mutex);
  1829. list_add_tail(&lyeblob_ids->list, &mtk_drm->lyeblob_head);
  1830. mutex_unlock(&mtk_drm->lyeblob_list_mutex);
  1831. }
  1832. static bool is_rsz_valid(struct drm_mtk_layer_config *c)
  1833. {
  1834. if (c->src_width == c->dst_width && c->src_height == c->dst_height)
  1835. return false;
  1836. if (c->src_width > c->dst_width || c->src_height > c->dst_height)
  1837. return false;
  1838. /*
  1839. * HWC adjusts MDP layer alignment after query_valid_layer.
  1840. * This makes the decision of layering rule unreliable. Thus we
  1841. * add constraint to avoid frame_cfg becoming scale-down.
  1842. *
  1843. * TODO: If HWC adjusts MDP layer alignment before
  1844. * query_valid_layer, we could remove this if statement.
  1845. */
  1846. /* HWC adjusts MDP layer alignment, we remove this if statement */
  1847. #if 0
  1848. if ((mtk_has_layer_cap(c, MTK_MDP_RSZ_LAYER) ||
  1849. mtk_has_layer_cap(c, MTK_DISP_RSZ_LAYER)) &&
  1850. (c->dst_width - c->src_width <= MDP_ALIGNMENT_MARGIN ||
  1851. c->dst_height - c->src_height <= MDP_ALIGNMENT_MARGIN))
  1852. return false;
  1853. #endif
  1854. return true;
  1855. }
  1856. static int is_same_ratio(struct drm_mtk_layer_config *ref,
  1857. struct drm_mtk_layer_config *c)
  1858. {
  1859. int diff_w, diff_h;
  1860. if (!ref->dst_width || !ref->dst_height) {
  1861. DDPPR_ERR("%s:ref dst(%dx%d)\n", __func__, ref->dst_width,
  1862. ref->dst_height);
  1863. return -EINVAL;
  1864. }
  1865. diff_w = (c->dst_width * ref->src_width + (ref->dst_width - 1)) /
  1866. ref->dst_width -
  1867. c->src_width;
  1868. diff_h = (c->dst_height * ref->src_height + (ref->dst_height - 1)) /
  1869. ref->dst_height -
  1870. c->src_height;
  1871. if (abs(diff_w) > 1 || abs(diff_h) > 1)
  1872. return false;
  1873. return true;
  1874. }
  1875. #define RATIO_LIMIT 2
  1876. static bool same_ratio_limitation(struct drm_crtc *crtc,
  1877. struct drm_mtk_layer_config *tgt, int limitation)
  1878. {
  1879. int panel_w = 0, panel_h = 0;
  1880. int diff_w = 0, diff_h = 0;
  1881. panel_w = crtc->mode.hdisplay;
  1882. panel_h = crtc->mode.vdisplay;
  1883. diff_w = tgt->dst_width - tgt->src_width;
  1884. diff_h = tgt->dst_height - tgt->src_height;
  1885. if (panel_w <= 0 || panel_h <= 0)
  1886. return false;
  1887. if (((100 * diff_w/panel_w < limitation) && (diff_w > 0)) ||
  1888. ((100 * diff_h/panel_h < limitation) && (diff_h > 0)))
  1889. return true;
  1890. else
  1891. return false;
  1892. }
  1893. static int RPO_rule(struct drm_crtc *crtc,
  1894. struct drm_mtk_layering_info *disp_info, int disp_idx,
  1895. bool has_pq)
  1896. {
  1897. struct drm_mtk_layer_config *c = NULL;
  1898. struct drm_mtk_layer_config *ref_layer = NULL;
  1899. struct mtk_rect src_layer_roi = {0};
  1900. struct mtk_rect dst_layer_roi = {0};
  1901. struct mtk_rect src_roi = {0};
  1902. struct mtk_rect dst_roi = {0};
  1903. int rsz_idx = -1;
  1904. int i = 0;
  1905. /* if need pq, we only support one resize layer for DMDP */
  1906. if (has_pq) {
  1907. c = &disp_info->input_config[disp_idx][i];
  1908. if (c->src_width == c->dst_width &&
  1909. c->src_height == c->dst_height)
  1910. return 0;
  1911. if (c->src_width > c->dst_width ||
  1912. c->src_height > c->dst_height)
  1913. return 0;
  1914. c->layer_caps |= MTK_DMDP_RSZ_LAYER;
  1915. rsz_idx = i;
  1916. goto done;
  1917. }
  1918. for (i = 0; i < disp_info->layer_num[disp_idx] &&
  1919. i < DISP_RSZ_LAYER_NUM; i++) {
  1920. c = &disp_info->input_config[disp_idx][i];
  1921. /*if (i == 0 && c->src_fmt == MTK_DRM_FORMAT_DIM)
  1922. * continue;
  1923. */
  1924. if (disp_info->gles_head[disp_idx] >= 0 &&
  1925. disp_info->gles_head[disp_idx] <= i)
  1926. break;
  1927. /* RSZ HW limitation */
  1928. /* 4x4 < input resolution size */
  1929. if ((c->src_width <= 4) || (c->src_height <= 4))
  1930. break;
  1931. if (!is_rsz_valid(c))
  1932. break;
  1933. if (!ref_layer)
  1934. ref_layer = c;
  1935. else if (is_same_ratio(ref_layer, c) <= 0 &&
  1936. is_same_ratio(c, ref_layer) <= 0)
  1937. break;
  1938. else if (same_ratio_limitation(crtc, c, RATIO_LIMIT))
  1939. break;
  1940. mtk_rect_make(&src_layer_roi,
  1941. ((c->dst_offset_x * c->src_width * 10)
  1942. / c->dst_width + 5) / 10,
  1943. ((c->dst_offset_y * c->src_height * 10)
  1944. / c->dst_height + 5) / 10,
  1945. c->src_width, c->src_height);
  1946. mtk_rect_make(&dst_layer_roi,
  1947. c->dst_offset_x, c->dst_offset_y,
  1948. c->dst_width, c->dst_height);
  1949. mtk_rect_join(&src_layer_roi, &src_roi, &src_roi);
  1950. mtk_rect_join(&dst_layer_roi, &dst_roi, &dst_roi);
  1951. if (src_roi.width > dst_roi.width ||
  1952. src_roi.height > dst_roi.height) {
  1953. DDPPR_ERR(
  1954. "L%d:scale down(%d,%d,%dx%d)->(%d,%d,%dx%d)\n",
  1955. i, src_roi.x, src_roi.y, src_roi.width,
  1956. src_roi.height, dst_roi.x, dst_roi.y,
  1957. dst_roi.width, dst_roi.height);
  1958. break;
  1959. }
  1960. if (src_roi.width > RSZ_TILE_LENGTH ||
  1961. src_roi.height > RSZ_IN_MAX_HEIGHT)
  1962. break;
  1963. c->layer_caps |= MTK_DISP_RSZ_LAYER;
  1964. rsz_idx = i;
  1965. }
  1966. done:
  1967. return rsz_idx + 1;
  1968. }
  1969. /* resizing_rule - layering rule resize layer layout */
  1970. static unsigned int resizing_rule(struct drm_device *dev,
  1971. struct drm_mtk_layering_info *disp_info,
  1972. bool has_pq)
  1973. {
  1974. unsigned int scale_num = 0;
  1975. struct drm_crtc *crtc;
  1976. /* RPO only support primary */
  1977. if (disp_info->layer_num[HRT_SECONDARY] > 0)
  1978. mtk_rollback_all_resize_layer_to_GPU(disp_info, HRT_SECONDARY);
  1979. if (disp_info->layer_num[HRT_THIRD] > 0)
  1980. mtk_rollback_all_resize_layer_to_GPU(disp_info, HRT_THIRD);
  1981. if (disp_info->layer_num[HRT_PRIMARY] > 0) {
  1982. drm_for_each_crtc(crtc, dev)
  1983. if (drm_crtc_index(crtc) == 0)
  1984. break;
  1985. if (crtc)
  1986. scale_num = RPO_rule(crtc, disp_info, HRT_PRIMARY,
  1987. has_pq);
  1988. mtk_rollback_resize_layer_to_GPU_range(disp_info, HRT_PRIMARY,
  1989. scale_num, disp_info->layer_num[HRT_PRIMARY] - 1);
  1990. }
  1991. return scale_num;
  1992. }
  1993. static unsigned int get_scn_decision_flag(
  1994. struct drm_mtk_layering_info *disp_info)
  1995. {
  1996. unsigned int scn_decision_flag = 0;
  1997. if (is_triple_disp(disp_info))
  1998. scn_decision_flag |= SCN_TRIPLE_DISP;
  1999. return scn_decision_flag;
  2000. }
  2001. static int get_crtc_num(
  2002. struct drm_mtk_layering_info *disp_info_user,
  2003. int *crtc_mask)
  2004. {
  2005. int i;
  2006. int crtc_num;
  2007. int input_config_num;
  2008. if (!crtc_mask) {
  2009. DDPPR_ERR("%s:%d null crtc_mask\n",
  2010. __func__, __LINE__);
  2011. return 0;
  2012. }
  2013. switch (disp_info_user->disp_mode[0]) {
  2014. case MTK_DRM_SESSION_DL:
  2015. case MTK_DRM_SESSION_DC_MIRROR:
  2016. crtc_num = 1;
  2017. break;
  2018. case MTK_DRM_SESSION_DOUBLE_DL:
  2019. crtc_num = 2;
  2020. break;
  2021. case MTK_DRM_SESSION_TRIPLE_DL:
  2022. crtc_num = 3;
  2023. break;
  2024. default:
  2025. crtc_num = 0;
  2026. break;
  2027. }
  2028. /*
  2029. * when CRTC 0 disabled, disp_mode[0] would be 0,
  2030. * but it might still exist other display.
  2031. * Thus traverse each CRTC's disp_mode for
  2032. * active CRTC number
  2033. */
  2034. if (crtc_num == 0) {
  2035. for (i = 0 ; i < 3; i++)
  2036. crtc_num += !!disp_info_user->disp_mode[i];
  2037. }
  2038. /* check input config number */
  2039. input_config_num = 0;
  2040. *crtc_mask = 0;
  2041. for (i = 0; i < 3; i++) {
  2042. if (disp_info_user->input_config[i]) {
  2043. *crtc_mask |= (1 << i);
  2044. input_config_num++;
  2045. }
  2046. }
  2047. if (input_config_num != crtc_num) {
  2048. DDPPR_ERR("%s:%d mode[%d] num:%d not matched config num:%d\n",
  2049. __func__, __LINE__,
  2050. disp_info_user->disp_mode[0],
  2051. crtc_num, input_config_num);
  2052. crtc_num = min(crtc_num, input_config_num);
  2053. }
  2054. return crtc_num;
  2055. }
  2056. static int layering_rule_start(struct drm_mtk_layering_info *disp_info_user,
  2057. int debug_mode, struct drm_device *dev)
  2058. {
  2059. int ret;
  2060. int overlap_num;
  2061. struct mtk_drm_lyeblob_ids *lyeblob_ids;
  2062. unsigned int scale_num = 0;
  2063. unsigned int scn_decision_flag = 0;
  2064. int crtc_num, crtc_mask;
  2065. int disp_idx;
  2066. DRM_MMP_EVENT_START(layering, (unsigned long)disp_info_user,
  2067. (unsigned long)dev);
  2068. roll_gpu_for_idle = 0;
  2069. if (l_rule_ops == NULL || l_rule_info == NULL) {
  2070. DRM_MMP_MARK(layering, 0, 0);
  2071. DRM_MMP_EVENT_END(layering, 0, 0);
  2072. DDPPR_ERR("Layering rule has not been initialize:(%p,%p)\n",
  2073. l_rule_ops, l_rule_info);
  2074. return -EFAULT;
  2075. }
  2076. if (check_disp_info(disp_info_user) < 0) {
  2077. DRM_MMP_MARK(layering, 0, 1);
  2078. DRM_MMP_EVENT_END(layering, 0, 0);
  2079. DDPPR_ERR("check_disp_info fail\n");
  2080. return -EFAULT;
  2081. }
  2082. if (set_disp_info(disp_info_user, debug_mode)) {
  2083. DRM_MMP_MARK(layering, 0, 2);
  2084. DRM_MMP_EVENT_END(layering, 0, 0);
  2085. return -EFAULT;
  2086. }
  2087. print_disp_info_to_log_buffer(&layering_info);
  2088. #ifdef HRT_DEBUG_LEVEL1
  2089. DDPMSG("[Input data]\n");
  2090. dump_disp_info(&layering_info, DISP_DEBUG_LEVEL_INFO);
  2091. #endif
  2092. l_rule_info->hrt_idx++;
  2093. if (l_rule_info->hrt_idx == 0xffffffff)
  2094. l_rule_info->hrt_idx = 0;
  2095. l_rule_ops->copy_hrt_bound_table(&layering_info,
  2096. 0, g_emi_bound_table, dev);
  2097. /* 1.Pre-distribution */
  2098. l_rule_info->dal_enable = mtk_drm_dal_enable();
  2099. if (l_rule_ops->rollback_to_gpu_by_hw_limitation)
  2100. ret = l_rule_ops->rollback_to_gpu_by_hw_limitation(
  2101. dev, &layering_info);
  2102. scn_decision_flag = get_scn_decision_flag(&layering_info);
  2103. /* Check and choose the Resize Scenario */
  2104. if (get_layering_opt(LYE_OPT_RPO)) {
  2105. bool has_pq = (scn_decision_flag & SCN_NEED_VP_PQ)
  2106. | (scn_decision_flag & SCN_NEED_GAME_PQ);
  2107. scale_num = resizing_rule(dev, &layering_info, has_pq);
  2108. } else {
  2109. mtk_rollback_all_resize_layer_to_GPU(&layering_info,
  2110. HRT_PRIMARY);
  2111. mtk_rollback_all_resize_layer_to_GPU(&layering_info,
  2112. HRT_SECONDARY);
  2113. mtk_rollback_all_resize_layer_to_GPU(&layering_info,
  2114. HRT_THIRD);
  2115. }
  2116. /* fbdc_rule should be after resizing_rule
  2117. * for optimizing secondary display BW
  2118. */
  2119. if (l_rule_ops->fbdc_rule)
  2120. l_rule_ops->fbdc_rule(&layering_info);
  2121. /* Add for FBDC */
  2122. if (l_rule_ops->fbdc_pre_calculate)
  2123. l_rule_ops->fbdc_pre_calculate(&layering_info);
  2124. /* Initial HRT conditions */
  2125. l_rule_ops->scenario_decision(scn_decision_flag, scale_num);
  2126. /* Layer Grouping */
  2127. if (l_rule_ops->fbdc_adjust_layout)
  2128. l_rule_ops->fbdc_adjust_layout(&layering_info,
  2129. ADJUST_LAYOUT_EXT_GROUPING);
  2130. ret = ext_layer_grouping(dev, &layering_info);
  2131. if (l_rule_ops->fbdc_restore_layout)
  2132. l_rule_ops->fbdc_restore_layout(&layering_info,
  2133. ADJUST_LAYOUT_EXT_GROUPING);
  2134. /* GLES adjustment and ext layer checking */
  2135. ret = filter_by_ovl_cnt(dev, &layering_info);
  2136. /*
  2137. * 2.Overlapping
  2138. * Calculate overlap number of available input layers.
  2139. * If the overlap number is out of bound, then decrease
  2140. * the number of available layers to overlap number.
  2141. */
  2142. /* [PVRIC] change dst layout before calculate overlap */
  2143. if (l_rule_ops->fbdc_adjust_layout)
  2144. l_rule_ops->fbdc_adjust_layout(&layering_info,
  2145. ADJUST_LAYOUT_OVERLAP_CAL);
  2146. overlap_num = calc_hrt_num(dev, &layering_info);
  2147. layering_info.hrt_weight = overlap_num;
  2148. DDPINFO("overlap_num %u\n", layering_info.hrt_weight);
  2149. if (l_rule_ops->fbdc_restore_layout)
  2150. l_rule_ops->fbdc_restore_layout(&layering_info,
  2151. ADJUST_LAYOUT_OVERLAP_CAL);
  2152. /*
  2153. * 3.Dispatching
  2154. * Fill layer id for each input layers.
  2155. * All the gles layers set as same layer id.
  2156. */
  2157. if (l_rule_ops->rollback_all_to_GPU_for_idle != NULL &&
  2158. l_rule_ops->rollback_all_to_GPU_for_idle(dev)) {
  2159. int i;
  2160. roll_gpu_for_idle = 1;
  2161. rollback_all_to_GPU(&layering_info, HRT_PRIMARY);
  2162. /* TODO: assume resize layer would be 2 */
  2163. for (i = 0 ; i < layering_info.layer_num[disp_idx] ; i++)
  2164. layering_info.input_config[HRT_PRIMARY][i].layer_caps &=
  2165. ~MTK_DISP_RSZ_LAYER;
  2166. l_rule_info->addon_scn[HRT_PRIMARY] = NONE;
  2167. layering_info.hrt_num = HRT_LEVEL_LEVEL0;
  2168. layering_info.hrt_weight = 2;
  2169. }
  2170. lyeblob_ids = kzalloc(sizeof(struct mtk_drm_lyeblob_ids), GFP_KERNEL);
  2171. ret = dispatch_ovl_id(&layering_info, lyeblob_ids, dev);
  2172. check_layering_result(&layering_info);
  2173. layering_info.hrt_idx = l_rule_info->hrt_idx;
  2174. HRT_SET_AEE_FLAG(layering_info.hrt_num, l_rule_info->dal_enable);
  2175. HRT_SET_WROT_SRAM_FLAG(layering_info.hrt_num, l_rule_info->wrot_sram);
  2176. dump_disp_info(&layering_info, DISP_DEBUG_LEVEL_INFO);
  2177. dump_disp_trace(&layering_info);
  2178. /* Remove MMP */
  2179. /* mmprofile_log_ex(ddp_mmp_get_events()->hrt, MMPROFILE_FLAG_PULSE,
  2180. * layering_info.hrt_num,
  2181. * (layering_info.gles_head[0] << 24) |
  2182. * (layering_info.gles_tail[0] << 16) |
  2183. * (layering_info.layer_num[0] << 8) |
  2184. * layering_info.layer_num[1]);
  2185. */
  2186. crtc_num = get_crtc_num(disp_info_user, &crtc_mask);
  2187. lye_add_blob_ids(&layering_info, lyeblob_ids, dev, crtc_num, crtc_mask);
  2188. for (disp_idx = 0; disp_idx < HRT_TYPE_NUM; disp_idx++)
  2189. DRM_MMP_MARK(layering, layering_info.hrt_num,
  2190. (layering_info.gles_head[disp_idx] << 24) |
  2191. (layering_info.gles_tail[disp_idx] << 16) |
  2192. (layering_info.layer_num[disp_idx] << 8) |
  2193. disp_idx);
  2194. ret = copy_layer_info_to_user(disp_info_user, debug_mode);
  2195. DRM_MMP_EVENT_END(layering, (unsigned long)disp_info_user,
  2196. (unsigned long)dev);
  2197. return ret;
  2198. }
  2199. /**** UT Program ****/
  2200. #ifdef HRT_UT_DEBUG
  2201. static void debug_set_layer_data(struct drm_mtk_layering_info *disp_info,
  2202. int disp_id, int data_type, int value)
  2203. {
  2204. static int layer_id = -1;
  2205. struct drm_mtk_layer_config *layer_info = NULL;
  2206. if (data_type != HRT_LAYER_DATA_ID && layer_id == -1)
  2207. return;
  2208. layer_info = &disp_info->input_config[disp_id][layer_id];
  2209. switch (data_type) {
  2210. case HRT_LAYER_DATA_ID:
  2211. layer_id = value;
  2212. break;
  2213. case HRT_LAYER_DATA_SRC_FMT:
  2214. layer_info->src_fmt = value;
  2215. break;
  2216. case HRT_LAYER_DATA_DST_OFFSET_X:
  2217. layer_info->dst_offset_x = value;
  2218. break;
  2219. case HRT_LAYER_DATA_DST_OFFSET_Y:
  2220. layer_info->dst_offset_y = value;
  2221. break;
  2222. case HRT_LAYER_DATA_DST_WIDTH:
  2223. layer_info->dst_width = value;
  2224. break;
  2225. case HRT_LAYER_DATA_DST_HEIGHT:
  2226. layer_info->dst_height = value;
  2227. break;
  2228. case HRT_LAYER_DATA_SRC_WIDTH:
  2229. layer_info->src_width = value;
  2230. break;
  2231. case HRT_LAYER_DATA_SRC_HEIGHT:
  2232. layer_info->src_height = value;
  2233. break;
  2234. case HRT_LAYER_DATA_SRC_OFFSET_X:
  2235. layer_info->src_offset_x = value;
  2236. break;
  2237. case HRT_LAYER_DATA_SRC_OFFSET_Y:
  2238. layer_info->src_offset_y = value;
  2239. break;
  2240. case HRT_LAYER_DATA_COMPRESS:
  2241. layer_info->compress = value;
  2242. break;
  2243. case HRT_LAYER_DATA_CAPS:
  2244. layer_info->layer_caps = value;
  2245. break;
  2246. default:
  2247. break;
  2248. }
  2249. }
  2250. static char *parse_hrt_data_value(char *start, long int *value)
  2251. {
  2252. char *tok_start = NULL, *tok_end = NULL;
  2253. int ret;
  2254. tok_start = strchr(start + 1, ']');
  2255. tok_end = strchr(tok_start + 1, '[');
  2256. if (tok_end)
  2257. *tok_end = 0;
  2258. ret = kstrtol(tok_start + 1, 10, value);
  2259. if (ret)
  2260. DDPINFO("Parsing error gles_num:%d, p:%s, ret:%d\n",
  2261. (int)*value, tok_start + 1, ret);
  2262. return tok_end;
  2263. }
  2264. static void print_drm_mtk_layer_config(struct drm_mtk_layer_config *c)
  2265. {
  2266. DDPMSG("L%u/(%u,%u,%u,%u)/(%u,%u,%u,%u)/cpr%d/e%d/cap0x%x/cl%x\n",
  2267. c->ovl_id, c->src_offset_x, c->src_offset_y, c->src_width,
  2268. c->src_height, c->dst_offset_x, c->dst_offset_y, c->dst_width,
  2269. c->dst_height, c->compress, c->ext_sel_layer, c->layer_caps,
  2270. c->clip);
  2271. }
  2272. static void print_hrt_result(struct drm_mtk_layering_info *disp_info)
  2273. {
  2274. unsigned int i = 0, j = 0;
  2275. for (i = 0; i < HRT_TYPE_NUM; i++) {
  2276. DDPMSG("### DISP%d ###\n", i);
  2277. DDPMSG("[head]%d[tail]%d\n", disp_info->gles_head[i],
  2278. disp_info->gles_tail[i]);
  2279. DDPMSG("[hrt_num]%d\n", disp_info->hrt_num);
  2280. for (j = 0; j < disp_info->layer_num[i]; j++)
  2281. print_drm_mtk_layer_config(
  2282. &(disp_info->input_config[i][j]));
  2283. }
  2284. }
  2285. static int load_hrt_test_data(struct drm_mtk_layering_info *disp_info,
  2286. struct drm_device *dev)
  2287. {
  2288. char filename[] = "/sdcard/hrt_data.txt";
  2289. char line_buf[512];
  2290. char *tok;
  2291. struct file *filp;
  2292. mm_segment_t oldfs;
  2293. int ret, pos, i;
  2294. long int disp_id, test_case;
  2295. bool is_end = false, is_test_pass = false;
  2296. struct drm_mtk_layer_config *input_config;
  2297. pos = 0;
  2298. test_case = -1;
  2299. oldfs = get_fs();
  2300. set_fs(KERNEL_DS);
  2301. filp = filp_open(filename, O_RDONLY, 0777);
  2302. if (IS_ERR(filp)) {
  2303. DDPINFO("File open error:%s\n", filename);
  2304. return -1;
  2305. }
  2306. if (!filp->f_op) {
  2307. DDPINFO("File Operation Method Error!!\n");
  2308. return -1;
  2309. }
  2310. while (1) {
  2311. ret = filp->f_op->llseek(filp, filp->f_pos, pos);
  2312. memset(line_buf, 0x0, sizeof(line_buf));
  2313. ret = filp->f_op->read(filp, line_buf, sizeof(line_buf),
  2314. &filp->f_pos);
  2315. tok = strchr(line_buf, '\n');
  2316. if (tok != NULL)
  2317. *tok = '\0';
  2318. else
  2319. is_end = true;
  2320. pos += strlen(line_buf) + 1;
  2321. filp->f_pos = pos;
  2322. if (strncmp(line_buf, "#", 1) == 0) {
  2323. continue;
  2324. } else if (strncmp(line_buf, "[layer_num]", 11) == 0) {
  2325. unsigned long int layer_num = 0;
  2326. unsigned long int layer_size = 0;
  2327. tok = parse_hrt_data_value(line_buf, &layer_num);
  2328. if (!tok)
  2329. goto end;
  2330. tok = parse_hrt_data_value(tok, &disp_id);
  2331. if (disp_id >= HRT_TYPE_NUM)
  2332. goto end;
  2333. if (layer_num != 0) {
  2334. layer_size =
  2335. sizeof(struct drm_mtk_layer_config) *
  2336. layer_num;
  2337. disp_info->input_config[disp_id] =
  2338. kzalloc(layer_size, GFP_KERNEL);
  2339. }
  2340. disp_info->layer_num[disp_id] = layer_num;
  2341. if (disp_info->input_config[disp_id] == NULL)
  2342. return 0;
  2343. } else if (strncmp(line_buf, "[set_layer]", 11) == 0) {
  2344. unsigned long int tmp_info;
  2345. tok = strchr(line_buf, ']');
  2346. if (!tok)
  2347. goto end;
  2348. tok = parse_hrt_data_value(tok, &disp_id);
  2349. for (i = 0; i < HRT_LAYER_DATA_NUM; i++) {
  2350. tok = parse_hrt_data_value(tok, &tmp_info);
  2351. debug_set_layer_data(disp_info, disp_id, i,
  2352. tmp_info);
  2353. }
  2354. } else if (strncmp(line_buf, "[test_start]", 12) == 0) {
  2355. tok = parse_hrt_data_value(line_buf, &test_case);
  2356. layering_rule_start(disp_info, 1, dev);
  2357. is_test_pass = true;
  2358. } else if (strncmp(line_buf, "[test_end]", 10) == 0) {
  2359. kfree(disp_info->input_config[0]);
  2360. kfree(disp_info->input_config[1]);
  2361. memset(disp_info, 0x0,
  2362. sizeof(struct drm_mtk_layering_info));
  2363. is_end = true;
  2364. } else if (strncmp(line_buf, "[print_out_test_result]", 23) ==
  2365. 0) {
  2366. DDPINFO("Test case %d is %s\n", (int)test_case,
  2367. is_test_pass ? "Pass" : "Fail");
  2368. } else if (strncmp(line_buf, "[layer_result]", 14) == 0) {
  2369. long int layer_result = 0, layer_id;
  2370. tok = strchr(line_buf, ']');
  2371. if (!tok)
  2372. goto end;
  2373. tok = parse_hrt_data_value(tok, &disp_id);
  2374. if (!tok)
  2375. goto end;
  2376. tok = parse_hrt_data_value(tok, &layer_id);
  2377. if (!tok)
  2378. goto end;
  2379. tok = parse_hrt_data_value(tok, &layer_result);
  2380. input_config =
  2381. &disp_info->input_config[disp_id][layer_id];
  2382. if (layer_result != input_config->ovl_id) {
  2383. DDPINFO("case:%d,ovl_id incorrect,%d/%d\n",
  2384. (int)test_case, input_config->ovl_id,
  2385. (int)layer_result);
  2386. is_test_pass = false;
  2387. }
  2388. if (!tok)
  2389. goto end;
  2390. tok = parse_hrt_data_value(tok, &layer_result);
  2391. if (layer_result != input_config->ext_sel_layer) {
  2392. DDPINFO("case:%d,ext_sel_layer wrong,%d/%d\n",
  2393. (int)test_case,
  2394. input_config->ext_sel_layer,
  2395. (int)layer_result);
  2396. is_test_pass = false;
  2397. }
  2398. } else if (strncmp(line_buf, "[gles_result]", 13) == 0) {
  2399. long int gles_num = 0;
  2400. tok = strchr(line_buf, ']');
  2401. if (!tok)
  2402. goto end;
  2403. tok = parse_hrt_data_value(tok, &disp_id);
  2404. if (!tok)
  2405. goto end;
  2406. tok = parse_hrt_data_value(tok, &gles_num);
  2407. if (gles_num != disp_info->gles_head[disp_id]) {
  2408. DDPINFO("case:%d,gles head err,%d/%d\n",
  2409. (int)test_case,
  2410. disp_info->gles_head[disp_id],
  2411. (int)gles_num);
  2412. is_test_pass = false;
  2413. }
  2414. if (!tok)
  2415. goto end;
  2416. tok = parse_hrt_data_value(tok, &gles_num);
  2417. if (gles_num != disp_info->gles_tail[disp_id]) {
  2418. DDPINFO("case:%d,gles tail err,%d/%d\n",
  2419. (int)test_case,
  2420. disp_info->gles_tail[disp_id],
  2421. (int)gles_num);
  2422. is_test_pass = false;
  2423. }
  2424. } else if (strncmp(line_buf, "[hrt_result]", 12) == 0) {
  2425. unsigned long int hrt_num = 0;
  2426. int path_scen;
  2427. tok = parse_hrt_data_value(line_buf, &hrt_num);
  2428. if (hrt_num != HRT_GET_DVFS_LEVEL(disp_info->hrt_num))
  2429. DDPINFO("case:%d,hrt num err,%d/%d\n",
  2430. (int)test_case,
  2431. HRT_GET_DVFS_LEVEL(disp_info->hrt_num),
  2432. (int)hrt_num);
  2433. if (!tok)
  2434. goto end;
  2435. tok = parse_hrt_data_value(tok, &hrt_num);
  2436. path_scen = HRT_GET_PATH_SCENARIO(disp_info->hrt_num) &
  2437. 0x1F;
  2438. if (hrt_num != path_scen) {
  2439. DDPINFO("case:%d,hrt path err,%d/%d\n",
  2440. (int)test_case, path_scen,
  2441. (int)hrt_num);
  2442. is_test_pass = false;
  2443. }
  2444. if (!tok)
  2445. goto end;
  2446. tok = parse_hrt_data_value(tok, &hrt_num);
  2447. if (hrt_num !=
  2448. HRT_GET_SCALE_SCENARIO(disp_info->hrt_num)) {
  2449. DDPINFO("case:%d, hrt scale err,%d/%d\n",
  2450. (int)test_case,
  2451. HRT_GET_SCALE_SCENARIO(
  2452. disp_info->hrt_num),
  2453. (int)hrt_num);
  2454. is_test_pass = false;
  2455. }
  2456. } else if (strncmp(line_buf, "[change_layer_num]", 18) == 0) {
  2457. unsigned long int layer_num = 0;
  2458. tok = parse_hrt_data_value(line_buf, &layer_num);
  2459. if (!tok)
  2460. goto end;
  2461. tok = parse_hrt_data_value(tok, &disp_id);
  2462. disp_info->layer_num[disp_id] = layer_num;
  2463. } else if (!strncmp(line_buf, "[force_dual_pipe_off]", 21)) {
  2464. unsigned long int force_off = 0;
  2465. tok = parse_hrt_data_value(line_buf, &force_off);
  2466. set_hrt_state(DISP_HRT_FORCE_DUAL_OFF, force_off);
  2467. } else if (!strncmp(line_buf, "[resolution_level]", 18)) {
  2468. unsigned long int resolution_level = 0;
  2469. tok = parse_hrt_data_value(line_buf, &resolution_level);
  2470. debug_resolution_level = resolution_level;
  2471. } else if (!strncmp(line_buf, "[set_gles]", 10)) {
  2472. long int gles_num = 0;
  2473. tok = strchr(line_buf, ']');
  2474. if (!tok)
  2475. goto end;
  2476. tok = parse_hrt_data_value(tok, &disp_id);
  2477. if (!tok)
  2478. goto end;
  2479. tok = parse_hrt_data_value(tok, &gles_num);
  2480. disp_info->gles_head[disp_id] = gles_num;
  2481. if (!tok)
  2482. goto end;
  2483. tok = parse_hrt_data_value(tok, &gles_num);
  2484. disp_info->gles_tail[disp_id] = gles_num;
  2485. } else if (!strncmp(line_buf, "[disp_mode]", 11)) {
  2486. unsigned long int disp_mode = 0;
  2487. tok = parse_hrt_data_value(line_buf, &disp_mode);
  2488. if (!tok)
  2489. goto end;
  2490. tok = parse_hrt_data_value(tok, &disp_id);
  2491. disp_info->disp_mode[disp_id] = disp_mode;
  2492. } else if (!strncmp(line_buf, "[print_out_hrt_result]", 22))
  2493. print_hrt_result(disp_info);
  2494. if (is_end)
  2495. break;
  2496. }
  2497. end:
  2498. filp_close(filp, NULL);
  2499. set_fs(oldfs);
  2500. DDPINFO("end set_fs\n");
  2501. return 0;
  2502. }
  2503. static int gen_hrt_pattern(struct drm_device *dev)
  2504. {
  2505. #ifdef HRT_UT_DEBUG
  2506. struct drm_mtk_layering_info disp_info;
  2507. struct drm_mtk_layer_config *layer_info;
  2508. int i;
  2509. memset(&disp_info, 0x0, sizeof(struct drm_mtk_layering_info));
  2510. disp_info.gles_head[0] = -1;
  2511. disp_info.gles_head[1] = -1;
  2512. disp_info.gles_tail[0] = -1;
  2513. disp_info.gles_tail[1] = -1;
  2514. if (!load_hrt_test_data(&disp_info))
  2515. return 0;
  2516. /* Primary Display */
  2517. disp_info.disp_mode[0] = DRM_DISP_SESSION_DIRECT_LINK_MODE;
  2518. disp_info.layer_num[0] = 5;
  2519. disp_info.gles_head[0] = 3;
  2520. disp_info.gles_tail[0] = 5;
  2521. disp_info.input_config[0] =
  2522. kzalloc(sizeof(struct drm_mtk_layer_config) * 5, GFP_KERNEL);
  2523. layer_info = disp_info.input_config[0];
  2524. for (i = 0; i < disp_info.layer_num[0]; i++)
  2525. layer_info[i].src_fmt = DRM_FORMAT_ARGB8888;
  2526. layer_info = disp_info.input_config[0];
  2527. layer_info[0].dst_offset_x = 0;
  2528. layer_info[0].dst_offset_y = 0;
  2529. layer_info[0].dst_width = 1080;
  2530. layer_info[0].dst_height = 1920;
  2531. layer_info[1].dst_offset_x = 0;
  2532. layer_info[1].dst_offset_y = 0;
  2533. layer_info[1].dst_width = 1080;
  2534. layer_info[1].dst_height = 1920;
  2535. layer_info[2].dst_offset_x = 269;
  2536. layer_info[2].dst_offset_y = 72;
  2537. layer_info[2].dst_width = 657;
  2538. layer_info[2].dst_height = 612;
  2539. layer_info[3].dst_offset_x = 0;
  2540. layer_info[3].dst_offset_y = 0;
  2541. layer_info[3].dst_width = 1080;
  2542. layer_info[3].dst_height = 72;
  2543. layer_info[4].dst_offset_x = 1079;
  2544. layer_info[4].dst_offset_y = 72;
  2545. layer_info[4].dst_width = 1;
  2546. layer_info[4].dst_height = 1704;
  2547. /* Secondary Display */
  2548. disp_info.disp_mode[1] = DRM_DISP_SESSION_DIRECT_LINK_MODE;
  2549. disp_info.layer_num[1] = 0;
  2550. disp_info.gles_head[1] = -1;
  2551. disp_info.gles_tail[1] = -1;
  2552. DDPMSG("free test pattern\n");
  2553. kfree(disp_info.input_config[0]);
  2554. msleep(50);
  2555. #endif
  2556. return 0;
  2557. }
  2558. #endif
  2559. /**** UT Program end ****/
  2560. int mtk_layering_rule_ioctl(struct drm_device *dev, void *data,
  2561. struct drm_file *file_priv)
  2562. {
  2563. struct drm_mtk_layering_info *disp_info_user = data;
  2564. int ret;
  2565. ret = layering_rule_start(disp_info_user, 0, dev);
  2566. if (ret < 0)
  2567. DDPPR_ERR("layering_rule_start error:%d\n", ret);
  2568. return 0;
  2569. }