mdss_mdp_ctl.c 75 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003
  1. /* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #define pr_fmt(fmt) "%s: " fmt, __func__
  14. #include <linux/errno.h>
  15. #include <linux/mutex.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/dma-mapping.h>
  18. #include <linux/delay.h>
  19. #include <linux/sort.h>
  20. #include "mdss_fb.h"
  21. #include "mdss_mdp.h"
  22. #include "mdss_debug.h"
  23. #include "mdss_mdp_trace.h"
  24. #include "mdss_debug.h"
  25. static void mdss_mdp_xlog_mixer_reg(struct mdss_mdp_ctl *ctl);
  26. static inline u64 fudge_factor(u64 val, u32 numer, u32 denom)
  27. {
  28. u64 result = (val * (u64)numer);
  29. do_div(result, denom);
  30. return result;
  31. }
  32. static inline u64 apply_fudge_factor(u64 val,
  33. struct mdss_fudge_factor *factor)
  34. {
  35. return fudge_factor(val, factor->numer, factor->denom);
  36. }
  37. #ifdef CONFIG_VIDEO_MHL_V2
  38. extern int hdmi_hpd_status(void);
  39. #endif
  40. static DEFINE_MUTEX(mdss_mdp_ctl_lock);
  41. static int mdss_mdp_mixer_free(struct mdss_mdp_mixer *mixer);
  42. static inline int __mdss_mdp_ctl_get_mixer_off(struct mdss_mdp_mixer *mixer);
  43. static inline void mdp_mixer_write(struct mdss_mdp_mixer *mixer,
  44. u32 reg, u32 val)
  45. {
  46. writel_relaxed(val, mixer->base + reg);
  47. }
  48. static inline u32 mdp_mixer_read(struct mdss_mdp_mixer *mixer, u32 reg)
  49. {
  50. return readl_relaxed(mixer->base + reg);
  51. }
  52. static inline u32 mdss_mdp_get_pclk_rate(struct mdss_mdp_ctl *ctl)
  53. {
  54. struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
  55. return (ctl->intf_type == MDSS_INTF_DSI) ?
  56. pinfo->mipi.dsi_pclk_rate :
  57. pinfo->clk_rate;
  58. }
  59. static inline u32 mdss_mdp_clk_fudge_factor(struct mdss_mdp_mixer *mixer,
  60. u32 rate)
  61. {
  62. struct mdss_panel_info *pinfo = &mixer->ctl->panel_data->panel_info;
  63. rate = apply_fudge_factor(rate, &mdss_res->clk_factor);
  64. /*
  65. * If the panel is video mode and its back porch period is
  66. * small, the workaround of increasing mdp clk is needed to
  67. * avoid underrun.
  68. */
  69. if (mixer->ctl->is_video_mode && pinfo &&
  70. (pinfo->lcdc.v_back_porch < MDP_MIN_VBP))
  71. rate = apply_fudge_factor(rate, &mdss_res->clk_factor);
  72. return rate;
  73. }
  74. struct mdss_mdp_prefill_params {
  75. u32 smp_bytes;
  76. u32 xres;
  77. u32 src_w;
  78. u32 dst_w;
  79. u32 src_h;
  80. u32 dst_h;
  81. u32 dst_y;
  82. u32 bpp;
  83. bool is_yuv;
  84. bool is_caf;
  85. bool is_fbc;
  86. bool is_bwc;
  87. bool is_tile;
  88. bool is_hflip;
  89. };
  90. static inline bool mdss_mdp_perf_is_caf(struct mdss_mdp_pipe *pipe)
  91. {
  92. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  93. /*
  94. * CAF mode filter is enabled when format is yuv and
  95. * upscaling. Post processing had the decision to use CAF
  96. * under these conditions.
  97. */
  98. return ((mdata->mdp_rev >= MDSS_MDP_HW_REV_102) &&
  99. pipe->src_fmt->is_yuv && ((pipe->src.h >> pipe->vert_deci) <=
  100. pipe->dst.h));
  101. }
  102. static inline u32 mdss_mdp_calc_y_scaler_bytes(struct mdss_mdp_prefill_params
  103. *params, struct mdss_prefill_data *prefill)
  104. {
  105. u32 y_scaler_bytes = 0, y_scaler_lines = 0;
  106. if (params->is_yuv) {
  107. if (params->src_h != params->dst_h) {
  108. y_scaler_lines = (params->is_caf) ?
  109. prefill->y_scaler_lines_caf :
  110. prefill->y_scaler_lines_bilinear;
  111. /*
  112. * y is src_width, u is src_width/2 and v is
  113. * src_width/2, so the total is scaler_lines *
  114. * src_w * 2
  115. */
  116. y_scaler_bytes = y_scaler_lines * params->src_w * 2;
  117. }
  118. } else {
  119. if (params->src_h != params->dst_h) {
  120. y_scaler_lines = prefill->y_scaler_lines_bilinear;
  121. y_scaler_bytes = y_scaler_lines * params->src_w *
  122. params->bpp;
  123. }
  124. }
  125. return y_scaler_bytes;
  126. }
  127. static inline u32 mdss_mdp_calc_latency_buf_bytes(struct mdss_mdp_prefill_params
  128. *params, struct mdss_prefill_data *prefill)
  129. {
  130. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  131. u32 latency_lines, latency_buf_bytes;
  132. if (params->is_yuv) {
  133. if (params->is_bwc) {
  134. latency_lines = 4;
  135. latency_buf_bytes = params->src_w * params->bpp *
  136. latency_lines;
  137. } else {
  138. latency_lines = 2;
  139. latency_buf_bytes = ALIGN(params->src_w * params->bpp *
  140. latency_lines, mdata->smp_mb_size) * 2;
  141. }
  142. } else {
  143. if (params->is_tile) {
  144. latency_lines = 8;
  145. latency_buf_bytes = params->src_w * params->bpp *
  146. latency_lines;
  147. } else if (params->is_bwc) {
  148. latency_lines = 4;
  149. latency_buf_bytes = params->src_w * params->bpp *
  150. latency_lines;
  151. } else {
  152. latency_lines = 2;
  153. latency_buf_bytes = ALIGN(params->src_w * params->bpp *
  154. latency_lines, mdata->smp_mb_size);
  155. }
  156. }
  157. return latency_buf_bytes;
  158. }
  159. static inline u32 mdss_mdp_calc_scaling_w_h(u32 val, u32 src_h, u32 dst_h,
  160. u32 src_w, u32 dst_w)
  161. {
  162. if (dst_h)
  163. val = mult_frac(val, src_h, dst_h);
  164. if (dst_w)
  165. val = mult_frac(val, src_w, dst_w);
  166. return val;
  167. }
  168. static u32 mdss_mdp_perf_calc_pipe_prefill_video(struct mdss_mdp_prefill_params
  169. *params)
  170. {
  171. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  172. struct mdss_prefill_data *prefill = &mdata->prefill_data;
  173. u32 prefill_bytes;
  174. u32 latency_buf_bytes;
  175. u32 y_buf_bytes = 0;
  176. u32 y_scaler_bytes;
  177. u32 pp_bytes = 0, pp_lines = 0;
  178. u32 post_scaler_bytes;
  179. u32 fbc_bytes = 0;
  180. prefill_bytes = prefill->ot_bytes;
  181. latency_buf_bytes = mdss_mdp_calc_latency_buf_bytes(params, prefill);
  182. prefill_bytes += latency_buf_bytes;
  183. pr_debug("latency_buf_bytes bw_calc=%d actual=%d\n", latency_buf_bytes,
  184. params->smp_bytes);
  185. if (params->is_yuv)
  186. y_buf_bytes = prefill->y_buf_bytes;
  187. y_scaler_bytes = mdss_mdp_calc_y_scaler_bytes(params, prefill);
  188. prefill_bytes += y_buf_bytes + y_scaler_bytes;
  189. post_scaler_bytes = prefill->post_scaler_pixels * params->bpp;
  190. post_scaler_bytes = mdss_mdp_calc_scaling_w_h(post_scaler_bytes,
  191. params->src_h, params->dst_h, params->src_w, params->dst_w);
  192. prefill_bytes += post_scaler_bytes;
  193. if (params->xres)
  194. pp_lines = DIV_ROUND_UP(prefill->pp_pixels, params->xres);
  195. if (params->xres && params->dst_h && (params->dst_y <= pp_lines))
  196. pp_bytes = ((params->src_w * params->bpp * prefill->pp_pixels /
  197. params->xres) * params->src_h) / params->dst_h;
  198. prefill_bytes += pp_bytes;
  199. if (params->is_fbc) {
  200. fbc_bytes = prefill->fbc_lines * params->bpp;
  201. fbc_bytes = mdss_mdp_calc_scaling_w_h(fbc_bytes, params->src_h,
  202. params->dst_h, params->src_w, params->dst_w);
  203. }
  204. prefill_bytes += fbc_bytes;
  205. pr_debug("ot=%d y_buf=%d pp_lines=%d pp=%d post_sc=%d fbc_bytes=%d\n",
  206. prefill->ot_bytes, y_buf_bytes, pp_lines, pp_bytes,
  207. post_scaler_bytes, fbc_bytes);
  208. return prefill_bytes;
  209. }
  210. static u32 mdss_mdp_perf_calc_pipe_prefill_cmd(struct mdss_mdp_prefill_params
  211. *params)
  212. {
  213. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  214. struct mdss_prefill_data *prefill = &mdata->prefill_data;
  215. u32 prefill_bytes;
  216. u32 ot_bytes = 0;
  217. u32 latency_lines, latency_buf_bytes;
  218. u32 y_buf_bytes = 0;
  219. u32 y_scaler_bytes;
  220. u32 fbc_cmd_lines = 0, fbc_cmd_bytes = 0;
  221. u32 post_scaler_bytes = 0;
  222. /* y_scaler_bytes are same for the first or non first line */
  223. y_scaler_bytes = mdss_mdp_calc_y_scaler_bytes(params, prefill);
  224. prefill_bytes = y_scaler_bytes;
  225. /* 1st line if fbc is not enabled and 2nd line if fbc is enabled */
  226. if (((params->dst_y == 0) && !params->is_fbc) ||
  227. ((params->dst_y <= 1) && params->is_fbc)) {
  228. if (params->is_bwc || params->is_tile)
  229. latency_lines = 4;
  230. else if (!params->is_caf && params->is_hflip)
  231. latency_lines = 1;
  232. else
  233. latency_lines = 0;
  234. latency_buf_bytes = params->src_w * params->bpp * latency_lines;
  235. prefill_bytes += latency_buf_bytes;
  236. fbc_cmd_lines++;
  237. if (params->is_fbc)
  238. fbc_cmd_lines++;
  239. fbc_cmd_bytes = params->bpp * params->dst_w * fbc_cmd_lines;
  240. fbc_cmd_bytes = mdss_mdp_calc_scaling_w_h(fbc_cmd_bytes,
  241. params->src_h, params->dst_h, params->src_w,
  242. params->dst_w);
  243. prefill_bytes += fbc_cmd_bytes;
  244. } else {
  245. ot_bytes = prefill->ot_bytes;
  246. prefill_bytes += ot_bytes;
  247. latency_buf_bytes = mdss_mdp_calc_latency_buf_bytes(params,
  248. prefill);
  249. prefill_bytes += latency_buf_bytes;
  250. if (params->is_yuv)
  251. y_buf_bytes = prefill->y_buf_bytes;
  252. prefill_bytes += y_buf_bytes;
  253. post_scaler_bytes = prefill->post_scaler_pixels * params->bpp;
  254. post_scaler_bytes = mdss_mdp_calc_scaling_w_h(post_scaler_bytes,
  255. params->src_h, params->dst_h, params->src_w,
  256. params->dst_w);
  257. prefill_bytes += post_scaler_bytes;
  258. }
  259. pr_debug("ot=%d bwc=%d smp=%d y_buf=%d fbc=%d\n", ot_bytes,
  260. params->is_bwc, latency_buf_bytes, y_buf_bytes, fbc_cmd_bytes);
  261. return prefill_bytes;
  262. }
  263. /**
  264. * mdss_mdp_perf_calc_pipe() - calculate performance numbers required by pipe
  265. * @pipe: Source pipe struct containing updated pipe params
  266. * @perf: Structure containing values that should be updated for
  267. * performance tuning
  268. * @apply_fudge: Boolean to determine if mdp clock fudge is applicable
  269. *
  270. * Function calculates the minimum required performance calculations in order
  271. * to avoid MDP underflow. The calculations are based on the way MDP
  272. * fetches (bandwidth requirement) and processes data through MDP pipeline
  273. * (MDP clock requirement) based on frame size and scaling requirements.
  274. */
  275. int mdss_mdp_perf_calc_pipe(struct mdss_mdp_pipe *pipe,
  276. struct mdss_mdp_perf_params *perf, struct mdss_mdp_img_rect *roi,
  277. bool apply_fudge)
  278. {
  279. struct mdss_mdp_mixer *mixer;
  280. int fps = DEFAULT_FRAME_RATE;
  281. u32 quota, rate, v_total, src_h, xres = 0;
  282. struct mdss_mdp_img_rect src, dst;
  283. bool is_fbc = false;
  284. struct mdss_mdp_prefill_params prefill_params;
  285. if (!pipe || !perf || !pipe->mixer)
  286. return -EINVAL;
  287. mixer = pipe->mixer;
  288. dst = pipe->dst;
  289. src = pipe->src;
  290. if (mixer->rotator_mode) {
  291. v_total = pipe->flags & MDP_ROT_90 ? pipe->dst.w : pipe->dst.h;
  292. } else if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
  293. struct mdss_panel_info *pinfo;
  294. pinfo = &mixer->ctl->panel_data->panel_info;
  295. if (pinfo->type == MIPI_VIDEO_PANEL) {
  296. fps = pinfo->panel_max_fps;
  297. v_total = pinfo->panel_max_vtotal;
  298. } else {
  299. fps = mdss_panel_get_framerate(pinfo);
  300. v_total = mdss_panel_get_vtotal(pinfo);
  301. }
  302. xres = pinfo->xres;
  303. is_fbc = pinfo->fbc.enabled;
  304. } else {
  305. v_total = mixer->height;
  306. xres = mixer->width;
  307. }
  308. if (roi)
  309. mdss_mdp_crop_rect(&src, &dst, roi);
  310. pr_debug("v_total=%d, xres=%d fps=%d\n", v_total, xres, fps);
  311. /*
  312. * when doing vertical decimation lines will be skipped, hence there is
  313. * no need to account for these lines in MDP clock or request bus
  314. * bandwidth to fetch them.
  315. */
  316. src_h = DECIMATED_DIMENSION(src.h, pipe->vert_deci);
  317. quota = fps * src.w * src_h;
  318. pr_debug("src(w,h)(%d,%d) dst(w,h)(%d,%d) dst_y=%d bpp=%d yuv=%d\n",
  319. pipe->src.w, src_h, pipe->dst.w, pipe->dst.h, pipe->dst.y,
  320. pipe->src_fmt->bpp, pipe->src_fmt->is_yuv);
  321. if (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420)
  322. /*
  323. * with decimation, chroma is not downsampled, this means we
  324. * need to allocate bw for extra lines that will be fetched
  325. */
  326. if (pipe->vert_deci)
  327. quota *= 2;
  328. else
  329. quota = (quota * 3) / 2;
  330. else
  331. quota *= pipe->src_fmt->bpp;
  332. rate = dst.w;
  333. if (src_h > dst.h)
  334. rate = (rate * src_h) / dst.h;
  335. rate *= v_total * fps;
  336. if (mixer->rotator_mode) {
  337. rate /= 4; /* block mode fetch at 4 pix/clk */
  338. quota *= 2; /* bus read + write */
  339. perf->bw_overlap = quota;
  340. } else {
  341. perf->bw_overlap = (quota / dst.h) * v_total;
  342. }
  343. #if !defined(CONFIG_MACH_VIENNA) && !defined(CONFIG_SEC_MILLET_PROJECT) && !defined(CONFIG_MACH_LT03) && !defined(CONFIG_SEC_K_PROJECT)
  344. /* The following change has been taken from CL 2767750. The bw has been increased as a fix
  345. * for underrun during UHD video play cases. */
  346. if ( ((pipe->src.h * pipe->src.w) / (pipe->dst.h * pipe->dst.w)) > 6) {
  347. perf->bw_overlap = perf->bw_overlap * 2;
  348. }
  349. #endif
  350. if (apply_fudge)
  351. perf->mdp_clk_rate = mdss_mdp_clk_fudge_factor(mixer, rate);
  352. else
  353. perf->mdp_clk_rate = rate;
  354. prefill_params.smp_bytes = mdss_mdp_smp_get_size(pipe);
  355. prefill_params.xres = xres;
  356. prefill_params.src_w = src.w;
  357. prefill_params.src_h = src_h;
  358. prefill_params.dst_w = dst.w;
  359. prefill_params.dst_h = dst.h;
  360. prefill_params.dst_y = dst.y;
  361. prefill_params.bpp = pipe->src_fmt->bpp;
  362. prefill_params.is_yuv = pipe->src_fmt->is_yuv;
  363. prefill_params.is_caf = mdss_mdp_perf_is_caf(pipe);
  364. prefill_params.is_fbc = is_fbc;
  365. prefill_params.is_bwc = pipe->bwc_mode;
  366. prefill_params.is_tile = pipe->src_fmt->tile;
  367. prefill_params.is_hflip = pipe->flags & MDP_FLIP_LR;
  368. if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
  369. perf->prefill_bytes = (mixer->ctl->is_video_mode) ?
  370. mdss_mdp_perf_calc_pipe_prefill_video(&prefill_params) :
  371. mdss_mdp_perf_calc_pipe_prefill_cmd(&prefill_params);
  372. }
  373. else
  374. perf->prefill_bytes = 0;
  375. pr_debug("mixer=%d pnum=%d clk_rate=%u bw_overlap=%llu prefill=%d\n",
  376. mixer->num, pipe->num, perf->mdp_clk_rate, perf->bw_overlap,
  377. perf->prefill_bytes);
  378. return 0;
  379. }
  380. static inline int mdss_mdp_perf_is_overlap(u32 y00, u32 y01, u32 y10, u32 y11)
  381. {
  382. return (y10 < y00 && y11 >= y01) || (y10 >= y00 && y10 < y01);
  383. }
  384. static inline int cmpu32(const void *a, const void *b)
  385. {
  386. return (*(u32 *)a < *(u32 *)b) ? -1 : 0;
  387. }
  388. static void mdss_mdp_perf_calc_mixer(struct mdss_mdp_mixer *mixer,
  389. struct mdss_mdp_perf_params *perf,
  390. struct mdss_mdp_pipe **pipe_list, int num_pipes)
  391. {
  392. struct mdss_mdp_pipe *pipe;
  393. struct mdss_panel_info *pinfo = NULL;
  394. int fps = DEFAULT_FRAME_RATE;
  395. u32 v_total = 0;
  396. int i;
  397. u32 max_clk_rate = 0;
  398. u64 bw_overlap_max = 0;
  399. u64 bw_overlap[MDSS_MDP_MAX_STAGE] = { 0 };
  400. u32 v_region[MDSS_MDP_MAX_STAGE * 2] = { 0 };
  401. u32 prefill_bytes = 0;
  402. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  403. bool apply_fudge = true;
  404. BUG_ON(num_pipes > MDSS_MDP_MAX_STAGE);
  405. memset(perf, 0, sizeof(*perf));
  406. if (!mixer->rotator_mode) {
  407. if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
  408. pinfo = &mixer->ctl->panel_data->panel_info;
  409. if (pinfo->type == MIPI_VIDEO_PANEL) {
  410. fps = pinfo->panel_max_fps;
  411. v_total = pinfo->panel_max_vtotal;
  412. } else {
  413. fps = mdss_panel_get_framerate(pinfo);
  414. v_total = mdss_panel_get_vtotal(pinfo);
  415. }
  416. if (pinfo->type == WRITEBACK_PANEL)
  417. pinfo = NULL;
  418. } else {
  419. v_total = mixer->height;
  420. }
  421. perf->mdp_clk_rate = mixer->width * v_total * fps;
  422. perf->mdp_clk_rate =
  423. mdss_mdp_clk_fudge_factor(mixer, perf->mdp_clk_rate);
  424. if (!pinfo) /* perf for bus writeback */
  425. perf->bw_overlap =
  426. fps * mixer->width * mixer->height * 3;
  427. }
  428. memset(bw_overlap, 0, sizeof(u64) * MDSS_MDP_MAX_STAGE);
  429. memset(v_region, 0, sizeof(u32) * MDSS_MDP_MAX_STAGE * 2);
  430. /*
  431. * Apply this logic only for 8x26 to reduce clock rate
  432. * for single video playback use case
  433. */
  434. if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev, MDSS_MDP_HW_REV_101)
  435. && mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
  436. u32 npipes = 0;
  437. for (i = 0; i < MDSS_MDP_MAX_STAGE; i++) {
  438. pipe = mixer->stage_pipe[i];
  439. if (pipe) {
  440. if (npipes) {
  441. apply_fudge = true;
  442. break;
  443. }
  444. npipes++;
  445. apply_fudge = !(pipe->src_fmt->is_yuv)
  446. || !(pipe->flags
  447. & MDP_SOURCE_ROTATED_90);
  448. }
  449. }
  450. }
  451. for (i = 0; i < num_pipes; i++) {
  452. struct mdss_mdp_perf_params tmp;
  453. pipe = pipe_list[i];
  454. if (pipe == NULL)
  455. continue;
  456. if (mdss_mdp_perf_calc_pipe(pipe, &tmp, &mixer->roi,
  457. apply_fudge))
  458. continue;
  459. prefill_bytes += tmp.prefill_bytes;
  460. bw_overlap[i] = tmp.bw_overlap;
  461. v_region[2*i] = pipe->dst.y;
  462. v_region[2*i + 1] = pipe->dst.y + pipe->dst.h;
  463. if (tmp.mdp_clk_rate > max_clk_rate)
  464. max_clk_rate = tmp.mdp_clk_rate;
  465. }
  466. /*
  467. * Sort the v_region array so the total display area can be
  468. * divided into individual regions. Check how many pipes fetch
  469. * data for each region and sum them up, then the worst case
  470. * of all regions is ib request.
  471. */
  472. sort(v_region, num_pipes * 2, sizeof(u32), cmpu32, NULL);
  473. for (i = 1; i < num_pipes * 2; i++) {
  474. int j;
  475. u64 bw_max_region = 0;
  476. u32 y0, y1;
  477. pr_debug("v_region[%d]%d\n", i, v_region[i]);
  478. if (v_region[i] == v_region[i-1])
  479. continue;
  480. y0 = v_region[i-1];
  481. y1 = v_region[i];
  482. for (j = 0; j < num_pipes; j++) {
  483. if (!bw_overlap[j])
  484. continue;
  485. pipe = pipe_list[j];
  486. if (mdss_mdp_perf_is_overlap(y0, y1, pipe->dst.y,
  487. (pipe->dst.y + pipe->dst.h)))
  488. bw_max_region += bw_overlap[j];
  489. pr_debug("v[%d](%d,%d)pipe[%d](%d,%d)bw(%llu %llu)\n",
  490. i, y0, y1, j, pipe->dst.y,
  491. pipe->dst.y + pipe->dst.h, bw_overlap[j],
  492. bw_max_region);
  493. }
  494. bw_overlap_max = max(bw_overlap_max, bw_max_region);
  495. }
  496. perf->bw_overlap += bw_overlap_max;
  497. perf->prefill_bytes += prefill_bytes;
  498. if (max_clk_rate > perf->mdp_clk_rate)
  499. perf->mdp_clk_rate = max_clk_rate;
  500. pr_debug("final mixer=%d video=%d clk_rate=%u bw=%llu prefill=%d\n",
  501. mixer->num, mixer->ctl->is_video_mode, perf->mdp_clk_rate,
  502. perf->bw_overlap, perf->prefill_bytes);
  503. }
  504. static u32 mdss_mdp_get_vbp_factor(struct mdss_mdp_ctl *ctl)
  505. {
  506. u32 fps, v_total, vbp, vbp_fac;
  507. struct mdss_panel_info *pinfo;
  508. if (!ctl || !ctl->panel_data)
  509. return 0;
  510. pinfo = &ctl->panel_data->panel_info;
  511. fps = mdss_panel_get_framerate(pinfo);
  512. v_total = mdss_panel_get_vtotal(pinfo);
  513. vbp = pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width;
  514. vbp_fac = (vbp) ? fps * v_total / vbp : 0;
  515. pr_debug("vbp_fac=%d vbp=%d v_total=%d\n", vbp_fac, vbp, v_total);
  516. return vbp_fac;
  517. }
  518. static u32 mdss_mdp_get_vbp_factor_max(struct mdss_mdp_ctl *ctl)
  519. {
  520. u32 vbp_max = 0;
  521. int i;
  522. struct mdss_data_type *mdata;
  523. if (!ctl || !ctl->mdata)
  524. return 0;
  525. mdata = ctl->mdata;
  526. for (i = 0; i < mdata->nctl; i++) {
  527. struct mdss_mdp_ctl *ctl = mdata->ctl_off + i;
  528. u32 vbp_fac;
  529. if (ctl->power_on) {
  530. vbp_fac = mdss_mdp_get_vbp_factor(ctl);
  531. vbp_max = max(vbp_max, vbp_fac);
  532. }
  533. }
  534. return vbp_max;
  535. }
  536. static bool mdss_mdp_video_mode_intf_connected(struct mdss_mdp_ctl *ctl)
  537. {
  538. int i;
  539. struct mdss_data_type *mdata;
  540. if (!ctl || !ctl->mdata)
  541. return 0;
  542. mdata = ctl->mdata;
  543. for (i = 0; i < mdata->nctl; i++) {
  544. struct mdss_mdp_ctl *ctl = mdata->ctl_off + i;
  545. if (ctl->is_video_mode && ctl->power_on) {
  546. pr_debug("video interface connected ctl:%d\n",
  547. ctl->num);
  548. return true;
  549. }
  550. }
  551. return false;
  552. }
  553. static void __mdss_mdp_perf_calc_ctl_helper(struct mdss_mdp_ctl *ctl,
  554. struct mdss_mdp_perf_params *perf,
  555. struct mdss_mdp_pipe **left_plist, int left_cnt,
  556. struct mdss_mdp_pipe **right_plist, int right_cnt)
  557. {
  558. struct mdss_mdp_perf_params tmp;
  559. memset(perf, 0, sizeof(*perf));
  560. if (left_cnt && ctl->mixer_left) {
  561. mdss_mdp_perf_calc_mixer(ctl->mixer_left, &tmp,
  562. left_plist, left_cnt);
  563. perf->bw_overlap += tmp.bw_overlap;
  564. perf->prefill_bytes += tmp.prefill_bytes;
  565. perf->mdp_clk_rate = tmp.mdp_clk_rate;
  566. }
  567. if (right_cnt && ctl->mixer_right) {
  568. mdss_mdp_perf_calc_mixer(ctl->mixer_right, &tmp,
  569. right_plist, right_cnt);
  570. perf->bw_overlap += tmp.bw_overlap;
  571. perf->prefill_bytes += tmp.prefill_bytes;
  572. if (tmp.mdp_clk_rate > perf->mdp_clk_rate)
  573. perf->mdp_clk_rate = tmp.mdp_clk_rate;
  574. if (ctl->intf_type) {
  575. u32 clk_rate = mdss_mdp_get_pclk_rate(ctl);
  576. /* minimum clock rate due to inefficiency in 3dmux */
  577. clk_rate = mult_frac(clk_rate >> 1, 9, 8);
  578. if (clk_rate > perf->mdp_clk_rate)
  579. perf->mdp_clk_rate = clk_rate;
  580. }
  581. }
  582. /* request minimum bandwidth to have bus clock on when display is on */
  583. if (perf->bw_overlap == 0)
  584. perf->bw_overlap = SZ_16M;
  585. if (ctl->intf_type != MDSS_MDP_NO_INTF) {
  586. u32 vbp_fac = mdss_mdp_get_vbp_factor_max(ctl);
  587. perf->bw_prefill = perf->prefill_bytes;
  588. /*
  589. * Prefill bandwidth equals the amount of data (number
  590. * of prefill_bytes) divided by the the amount time
  591. * available (blanking period). It is equivalent that
  592. * prefill bytes times a factor in unit Hz, which is
  593. * the reciprocal of time.
  594. */
  595. perf->bw_prefill *= vbp_fac;
  596. }
  597. perf->bw_ctl = max(perf->bw_prefill, perf->bw_overlap);
  598. }
  599. int mdss_mdp_perf_bw_check(struct mdss_mdp_ctl *ctl,
  600. struct mdss_mdp_pipe **left_plist, int left_cnt,
  601. struct mdss_mdp_pipe **right_plist, int right_cnt)
  602. {
  603. struct mdss_data_type *mdata = ctl->mdata;
  604. struct mdss_mdp_perf_params perf;
  605. u32 bw, threshold, i;
  606. u64 bw_sum_of_intfs = 0;
  607. /* we only need bandwidth check on real-time clients (interfaces) */
  608. if (ctl->intf_type == MDSS_MDP_NO_INTF)
  609. return 0;
  610. __mdss_mdp_perf_calc_ctl_helper(ctl, &perf,
  611. left_plist, left_cnt, right_plist, right_cnt);
  612. ctl->bw_pending = perf.bw_ctl;
  613. for (i = 0; i < mdata->nctl; i++) {
  614. struct mdss_mdp_ctl *temp = mdata->ctl_off + i;
  615. if (temp->power_on && (temp->intf_type != MDSS_MDP_NO_INTF))
  616. bw_sum_of_intfs += temp->bw_pending;
  617. }
  618. /* convert bandwidth to kb */
  619. bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
  620. pr_debug("calculated bandwidth=%uk\n", bw);
  621. threshold = (ctl->is_video_mode ||
  622. mdss_mdp_video_mode_intf_connected(ctl)) ?
  623. mdata->max_bw_low : mdata->max_bw_high;
  624. if (bw > threshold) {
  625. ctl->bw_pending = 0;
  626. pr_debug("exceeds bandwidth: %ukb > %ukb\n", bw, threshold);
  627. return -E2BIG;
  628. }
  629. return 0;
  630. }
  631. static void mdss_mdp_perf_calc_ctl(struct mdss_mdp_ctl *ctl,
  632. struct mdss_mdp_perf_params *perf)
  633. {
  634. struct mdss_mdp_pipe **left_plist, **right_plist;
  635. left_plist = ctl->mixer_left ? ctl->mixer_left->stage_pipe : NULL;
  636. right_plist = ctl->mixer_right ? ctl->mixer_right->stage_pipe : NULL;
  637. __mdss_mdp_perf_calc_ctl_helper(ctl, perf,
  638. left_plist, (left_plist ? MDSS_MDP_MAX_STAGE : 0),
  639. right_plist, (right_plist ? MDSS_MDP_MAX_STAGE : 0));
  640. if (ctl->is_video_mode || ((ctl->intf_type != MDSS_MDP_NO_INTF) &&
  641. mdss_mdp_video_mode_intf_connected(ctl))) {
  642. perf->bw_ctl =
  643. max(apply_fudge_factor(perf->bw_overlap,
  644. &mdss_res->ib_factor_overlap),
  645. apply_fudge_factor(perf->bw_prefill,
  646. &mdss_res->ib_factor));
  647. }
  648. pr_debug("ctl=%d clk_rate=%u\n", ctl->num, perf->mdp_clk_rate);
  649. pr_debug("bw_overlap=%llu bw_prefill=%llu prefill_bytes=%d\n",
  650. perf->bw_overlap, perf->bw_prefill, perf->prefill_bytes);
  651. }
  652. static void set_status(u32 *value, bool status, u32 bit_num)
  653. {
  654. if (status)
  655. *value |= BIT(bit_num);
  656. else
  657. *value &= ~BIT(bit_num);
  658. }
  659. /**
  660. * @ mdss_mdp_ctl_perf_set_transaction_status() -
  661. * Set the status of the on-going operations
  662. * for the command mode panels.
  663. * @ctl - pointer to a ctl
  664. *
  665. * This function is called to set the status bit in the perf_transaction_status
  666. * according to the operation that it is on-going for the command mode
  667. * panels, where:
  668. *
  669. * PERF_SW_COMMIT_STATE:
  670. * 1 - If SW operation has been commited and bw
  671. * has been requested (HW transaction have not started yet).
  672. * 0 - If there is no SW operation pending
  673. * PERF_HW_MDP_STATE:
  674. * 1 - If HW transaction is on-going
  675. * 0 - If there is no HW transaction on going (ping-pong interrupt
  676. * has finished)
  677. * Only if both states are zero there are no pending operations and
  678. * BW could be released.
  679. * State can be queried calling "mdss_mdp_ctl_perf_get_transaction_status"
  680. */
  681. void mdss_mdp_ctl_perf_set_transaction_status(struct mdss_mdp_ctl *ctl,
  682. enum mdss_mdp_perf_state_type component, bool new_status)
  683. {
  684. u32 previous_transaction;
  685. bool previous_status;
  686. unsigned long flags;
  687. if (!ctl || !ctl->panel_data ||
  688. (ctl->panel_data->panel_info.type != MIPI_CMD_PANEL))
  689. return;
  690. spin_lock_irqsave(&ctl->spin_lock, flags);
  691. previous_transaction = ctl->perf_transaction_status;
  692. previous_status = previous_transaction & BIT(component) ?
  693. PERF_STATUS_BUSY : PERF_STATUS_DONE;
  694. /*
  695. * If we set "done" state when previous state was not "busy",
  696. * we want to print a warning since maybe there is a state
  697. * that we are not considering
  698. */
  699. WARN((PERF_STATUS_DONE == new_status) &&
  700. (PERF_STATUS_BUSY != previous_status),
  701. "unexpected previous state for component: %d\n", component);
  702. set_status(&ctl->perf_transaction_status, new_status,
  703. (u32)component);
  704. pr_debug("component:%d previous_transaction:%d transaction_status:%d\n",
  705. component, previous_transaction, ctl->perf_transaction_status);
  706. pr_debug("new_status:%d prev_status:%d\n",
  707. new_status, previous_status);
  708. spin_unlock_irqrestore(&ctl->spin_lock, flags);
  709. }
  710. /**
  711. * @ mdss_mdp_ctl_perf_get_transaction_status() -
  712. * Get the status of the on-going operations
  713. * for the command mode panels.
  714. * @ctl - pointer to a ctl
  715. *
  716. * Return:
  717. * The status of the transactions for the command mode panels,
  718. * note that the bandwidth can be released only if all transaction
  719. * status bits are zero.
  720. */
  721. u32 mdss_mdp_ctl_perf_get_transaction_status(struct mdss_mdp_ctl *ctl)
  722. {
  723. unsigned long flags;
  724. u32 transaction_status;
  725. if (!ctl)
  726. return PERF_STATUS_BUSY;
  727. /*
  728. * If Rotator mode and bandwidth has been released; return STATUS_DONE
  729. * so the bandwidth is re-calculated.
  730. */
  731. if (ctl->mixer_left && ctl->mixer_left->rotator_mode &&
  732. !ctl->perf_release_ctl_bw)
  733. return PERF_STATUS_DONE;
  734. /*
  735. * If Video Mode or not valid data to determine the status, return busy
  736. * status, so the bandwidth cannot be freed by the caller
  737. */
  738. if (!ctl || !ctl->panel_data ||
  739. (ctl->panel_data->panel_info.type != MIPI_CMD_PANEL)) {
  740. return PERF_STATUS_BUSY;
  741. }
  742. spin_lock_irqsave(&ctl->spin_lock, flags);
  743. transaction_status = ctl->perf_transaction_status;
  744. spin_unlock_irqrestore(&ctl->spin_lock, flags);
  745. return transaction_status;
  746. }
  747. static inline void mdss_mdp_ctl_perf_update_bus(struct mdss_mdp_ctl *ctl)
  748. {
  749. u64 bw_sum_of_intfs = 0;
  750. u64 bus_ab_quota, bus_ib_quota;
  751. struct mdss_data_type *mdata;
  752. int i;
  753. if (!ctl || !ctl->mdata)
  754. return;
  755. ATRACE_BEGIN(__func__);
  756. mdata = ctl->mdata;
  757. for (i = 0; i < mdata->nctl; i++) {
  758. struct mdss_mdp_ctl *ctl;
  759. ctl = mdata->ctl_off + i;
  760. if (ctl->power_on) {
  761. bw_sum_of_intfs += ctl->cur_perf.bw_ctl;
  762. pr_debug("c=%d bw=%llu\n", ctl->num,
  763. ctl->cur_perf.bw_ctl);
  764. }
  765. }
  766. bus_ib_quota = max(bw_sum_of_intfs, mdata->perf_tune.min_bus_vote);
  767. bus_ab_quota = apply_fudge_factor(bus_ib_quota,
  768. &mdss_res->ab_factor);
  769. trace_mdp_perf_update_bus(bus_ab_quota, bus_ib_quota);
  770. ATRACE_INT("bus_quota", bus_ib_quota);
  771. mdss_bus_scale_set_quota(MDSS_HW_MDP, bus_ab_quota, bus_ib_quota);
  772. pr_debug("ab=%llu ib=%llu\n", bus_ab_quota, bus_ib_quota);
  773. ATRACE_END(__func__);
  774. }
  775. /**
  776. * @mdss_mdp_ctl_perf_release_bw() - request zero bandwidth
  777. * @ctl - pointer to a ctl
  778. *
  779. * Function checks a state variable for the ctl, if all pending commit
  780. * requests are done, meaning no more bandwidth is needed, release
  781. * bandwidth request.
  782. */
  783. void mdss_mdp_ctl_perf_release_bw(struct mdss_mdp_ctl *ctl)
  784. {
  785. int transaction_status;
  786. struct mdss_data_type *mdata;
  787. int i;
  788. /* only do this for command panel */
  789. if (!ctl || !ctl->mdata || !ctl->panel_data ||
  790. (ctl->panel_data->panel_info.type != MIPI_CMD_PANEL))
  791. return;
  792. mutex_lock(&mdss_mdp_ctl_lock);
  793. mdata = ctl->mdata;
  794. /*
  795. * If video interface present, cmd panel bandwidth cannot be
  796. * released.
  797. */
  798. for (i = 0; i < mdata->nctl; i++) {
  799. struct mdss_mdp_ctl *ctl = mdata->ctl_off + i;
  800. if (ctl->power_on && ctl->is_video_mode)
  801. goto exit;
  802. }
  803. transaction_status = mdss_mdp_ctl_perf_get_transaction_status(ctl);
  804. pr_debug("transaction_status=0x%x\n", transaction_status);
  805. /*Release the bandwidth only if there are no transactions pending*/
  806. if (!transaction_status && mdata->enable_bw_release) {
  807. trace_mdp_cmd_release_bw(ctl->num);
  808. ctl->cur_perf.bw_ctl = 0;
  809. ctl->new_perf.bw_ctl = 0;
  810. pr_debug("Release BW ctl=%d\n", ctl->num);
  811. mdss_mdp_ctl_perf_update_bus(ctl);
  812. }
  813. exit:
  814. mutex_unlock(&mdss_mdp_ctl_lock);
  815. }
  816. static int mdss_mdp_select_clk_lvl(struct mdss_mdp_ctl *ctl,
  817. u32 clk_rate)
  818. {
  819. int i;
  820. struct mdss_data_type *mdata;
  821. if (!ctl)
  822. return -ENODEV;
  823. mdata = ctl->mdata;
  824. for (i = 0; i < mdata->nclk_lvl; i++) {
  825. if (clk_rate > mdata->clock_levels[i]) {
  826. continue;
  827. } else {
  828. clk_rate = mdata->clock_levels[i];
  829. break;
  830. }
  831. }
  832. return clk_rate;
  833. }
  834. static void mdss_mdp_perf_release_ctl_bw(struct mdss_mdp_ctl *ctl,
  835. struct mdss_mdp_perf_params *perf)
  836. {
  837. /* Set to zero controller bandwidth. */
  838. memset(perf, 0, sizeof(*perf));
  839. ctl->perf_release_ctl_bw = false;
  840. }
  841. #define ADDING_BW_ROTATE_MODE 130
  842. #define ADDING_BW_LANDSCAPE_MODE 107
  843. static void mdss_mdp_ctl_perf_update(struct mdss_mdp_ctl *ctl,
  844. int params_changed)
  845. {
  846. struct mdss_mdp_perf_params *new, *old;
  847. int update_bus = 0, update_clk = 0;
  848. struct mdss_data_type *mdata;
  849. bool is_bw_released;
  850. if (!ctl || !ctl->mdata)
  851. return;
  852. ATRACE_BEGIN(__func__);
  853. mutex_lock(&mdss_mdp_ctl_lock);
  854. mdata = ctl->mdata;
  855. old = &ctl->cur_perf;
  856. new = &ctl->new_perf;
  857. /*
  858. * We could have released the bandwidth if there were no transactions
  859. * pending, so we want to re-calculate the bandwidth in this situation
  860. */
  861. is_bw_released = !mdss_mdp_ctl_perf_get_transaction_status(ctl);
  862. if (ctl->power_on) {
  863. if (ctl->perf_release_ctl_bw &&
  864. mdata->enable_rotator_bw_release)
  865. mdss_mdp_perf_release_ctl_bw(ctl, new);
  866. else if (is_bw_released || params_changed)
  867. mdss_mdp_perf_calc_ctl(ctl, new);
  868. /*
  869. * if params have just changed delay the update until
  870. * later once the hw configuration has been flushed to
  871. * MDP
  872. */
  873. if ((params_changed && (new->bw_ctl > old->bw_ctl)) ||
  874. (!params_changed && (new->bw_ctl < old->bw_ctl))) {
  875. pr_debug("c=%d p=%d new_bw=%llu,old_bw=%llu\n",
  876. ctl->num, params_changed, new->bw_ctl,
  877. old->bw_ctl);
  878. old->bw_ctl = new->bw_ctl;
  879. update_bus = 1;
  880. }
  881. if ((params_changed && (new->mdp_clk_rate > old->mdp_clk_rate))
  882. || (!params_changed && (new->mdp_clk_rate <
  883. old->mdp_clk_rate))) {
  884. old->mdp_clk_rate = new->mdp_clk_rate;
  885. update_clk = 1;
  886. }
  887. } else {
  888. memset(old, 0, sizeof(old));
  889. memset(new, 0, sizeof(new));
  890. update_bus = 1;
  891. update_clk = 1;
  892. }
  893. if (update_bus)
  894. mdss_mdp_ctl_perf_update_bus(ctl);
  895. if (update_clk) {
  896. u32 clk_rate = 0;
  897. int i;
  898. for (i = 0; i < mdata->nctl; i++) {
  899. struct mdss_mdp_ctl *ctl;
  900. ctl = mdata->ctl_off + i;
  901. if (ctl->power_on)
  902. clk_rate = max(ctl->cur_perf.mdp_clk_rate,
  903. clk_rate);
  904. }
  905. clk_rate = mdss_mdp_select_clk_lvl(ctl, clk_rate);
  906. ATRACE_INT("mdp_clk", clk_rate);
  907. mdss_mdp_set_clk_rate(clk_rate);
  908. pr_debug("update clk rate = %d HZ\n", clk_rate);
  909. }
  910. mutex_unlock(&mdss_mdp_ctl_lock);
  911. ATRACE_END(__func__);
  912. }
  913. static struct mdss_mdp_ctl *mdss_mdp_ctl_alloc(struct mdss_data_type *mdata,
  914. u32 off)
  915. {
  916. struct mdss_mdp_ctl *ctl = NULL;
  917. u32 cnum;
  918. u32 nctl = mdata->nctl;
  919. mutex_lock(&mdss_mdp_ctl_lock);
  920. if (!mdata->has_wfd_blk)
  921. nctl++;
  922. for (cnum = off; cnum < nctl; cnum++) {
  923. ctl = mdata->ctl_off + cnum;
  924. if (ctl->ref_cnt == 0) {
  925. ctl->ref_cnt++;
  926. ctl->mdata = mdata;
  927. mutex_init(&ctl->lock);
  928. spin_lock_init(&ctl->spin_lock);
  929. BLOCKING_INIT_NOTIFIER_HEAD(&ctl->notifier_head);
  930. pr_debug("alloc ctl_num=%d\n", ctl->num);
  931. break;
  932. }
  933. ctl = NULL;
  934. }
  935. mutex_unlock(&mdss_mdp_ctl_lock);
  936. return ctl;
  937. }
  938. static int mdss_mdp_ctl_free(struct mdss_mdp_ctl *ctl)
  939. {
  940. if (!ctl)
  941. return -ENODEV;
  942. pr_debug("free ctl_num=%d ref_cnt=%d\n", ctl->num, ctl->ref_cnt);
  943. if (!ctl->ref_cnt) {
  944. pr_err("called with ref_cnt=0\n");
  945. return -EINVAL;
  946. }
  947. if (ctl->mixer_left) {
  948. mdss_mdp_mixer_free(ctl->mixer_left);
  949. ctl->mixer_left = NULL;
  950. }
  951. if (ctl->mixer_right) {
  952. mdss_mdp_mixer_free(ctl->mixer_right);
  953. ctl->mixer_right = NULL;
  954. }
  955. mutex_lock(&mdss_mdp_ctl_lock);
  956. ctl->ref_cnt--;
  957. ctl->intf_num = MDSS_MDP_NO_INTF;
  958. ctl->intf_type = MDSS_MDP_NO_INTF;
  959. ctl->is_secure = false;
  960. ctl->power_on = false;
  961. ctl->start_fnc = NULL;
  962. ctl->stop_fnc = NULL;
  963. ctl->prepare_fnc = NULL;
  964. ctl->display_fnc = NULL;
  965. ctl->wait_fnc = NULL;
  966. ctl->read_line_cnt_fnc = NULL;
  967. ctl->add_vsync_handler = NULL;
  968. ctl->remove_vsync_handler = NULL;
  969. ctl->panel_data = NULL;
  970. ctl->config_fps_fnc = NULL;
  971. mutex_unlock(&mdss_mdp_ctl_lock);
  972. return 0;
  973. }
  974. static struct mdss_mdp_mixer *mdss_mdp_mixer_alloc(
  975. struct mdss_mdp_ctl *ctl, u32 type, int mux)
  976. {
  977. struct mdss_mdp_mixer *mixer = NULL, *alt_mixer = NULL;
  978. u32 nmixers_intf;
  979. u32 nmixers_wb;
  980. u32 i;
  981. u32 nmixers;
  982. struct mdss_mdp_mixer *mixer_pool = NULL;
  983. if (!ctl || !ctl->mdata)
  984. return NULL;
  985. mutex_lock(&mdss_mdp_ctl_lock);
  986. nmixers_intf = ctl->mdata->nmixers_intf;
  987. nmixers_wb = ctl->mdata->nmixers_wb;
  988. switch (type) {
  989. case MDSS_MDP_MIXER_TYPE_INTF:
  990. mixer_pool = ctl->mdata->mixer_intf;
  991. nmixers = nmixers_intf;
  992. /*
  993. * try to reserve first layer mixer for write back if
  994. * assertive display needs to be supported through wfd
  995. */
  996. if (ctl->mdata->has_wb_ad && ctl->intf_num) {
  997. alt_mixer = mixer_pool;
  998. mixer_pool++;
  999. nmixers--;
  1000. }
  1001. break;
  1002. case MDSS_MDP_MIXER_TYPE_WRITEBACK:
  1003. mixer_pool = ctl->mdata->mixer_wb;
  1004. nmixers = nmixers_wb;
  1005. break;
  1006. default:
  1007. nmixers = 0;
  1008. pr_err("invalid pipe type %d\n", type);
  1009. break;
  1010. }
  1011. /* early mdp revision only supports mux of dual pipe on mixers 0 and 1,
  1012. * need to ensure that these pipes are readily available by using
  1013. * mixer 2 if available and mux is not required */
  1014. if (!mux && (ctl->mdata->mdp_rev == MDSS_MDP_HW_REV_100) &&
  1015. (type == MDSS_MDP_MIXER_TYPE_INTF) &&
  1016. (nmixers >= MDSS_MDP_INTF_LAYERMIXER2) &&
  1017. (mixer_pool[MDSS_MDP_INTF_LAYERMIXER2].ref_cnt == 0))
  1018. mixer_pool += MDSS_MDP_INTF_LAYERMIXER2;
  1019. /*Allocate virtual wb mixer if no dedicated wfd wb blk is present*/
  1020. if (!ctl->mdata->has_wfd_blk && (type == MDSS_MDP_MIXER_TYPE_WRITEBACK))
  1021. nmixers += 1;
  1022. for (i = 0; i < nmixers; i++) {
  1023. mixer = mixer_pool + i;
  1024. if (mixer->ref_cnt == 0) {
  1025. mixer->ref_cnt++;
  1026. mixer->params_changed++;
  1027. mixer->ctl = ctl;
  1028. pr_debug("alloc mixer num %d for ctl=%d\n",
  1029. mixer->num, ctl->num);
  1030. break;
  1031. }
  1032. mixer = NULL;
  1033. }
  1034. if (!mixer && alt_mixer && (alt_mixer->ref_cnt == 0))
  1035. mixer = alt_mixer;
  1036. mutex_unlock(&mdss_mdp_ctl_lock);
  1037. return mixer;
  1038. }
  1039. static int mdss_mdp_mixer_free(struct mdss_mdp_mixer *mixer)
  1040. {
  1041. if (!mixer)
  1042. return -ENODEV;
  1043. pr_debug("free mixer_num=%d ref_cnt=%d\n", mixer->num, mixer->ref_cnt);
  1044. if (!mixer->ref_cnt) {
  1045. pr_err("called with ref_cnt=0\n");
  1046. return -EINVAL;
  1047. }
  1048. mutex_lock(&mdss_mdp_ctl_lock);
  1049. mixer->ref_cnt--;
  1050. mutex_unlock(&mdss_mdp_ctl_lock);
  1051. return 0;
  1052. }
  1053. struct mdss_mdp_mixer *mdss_mdp_wb_mixer_alloc(int rotator)
  1054. {
  1055. struct mdss_mdp_ctl *ctl = NULL;
  1056. struct mdss_mdp_mixer *mixer = NULL;
  1057. ctl = mdss_mdp_ctl_alloc(mdss_res, mdss_res->nmixers_intf);
  1058. if (!ctl) {
  1059. pr_debug("unable to allocate wb ctl\n");
  1060. return NULL;
  1061. }
  1062. mixer = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_WRITEBACK, false);
  1063. if (!mixer) {
  1064. pr_debug("unable to allocate wb mixer\n");
  1065. goto error;
  1066. }
  1067. mixer->rotator_mode = rotator;
  1068. switch (mixer->num) {
  1069. case MDSS_MDP_WB_LAYERMIXER0:
  1070. ctl->opmode = (rotator ? MDSS_MDP_CTL_OP_ROT0_MODE :
  1071. MDSS_MDP_CTL_OP_WB0_MODE);
  1072. break;
  1073. case MDSS_MDP_WB_LAYERMIXER1:
  1074. ctl->opmode = (rotator ? MDSS_MDP_CTL_OP_ROT1_MODE :
  1075. MDSS_MDP_CTL_OP_WB1_MODE);
  1076. break;
  1077. default:
  1078. pr_err("invalid layer mixer=%d\n", mixer->num);
  1079. goto error;
  1080. }
  1081. ctl->mixer_left = mixer;
  1082. ctl->start_fnc = mdss_mdp_writeback_start;
  1083. ctl->power_on = true;
  1084. ctl->wb_type = (rotator ? MDSS_MDP_WB_CTL_TYPE_BLOCK :
  1085. MDSS_MDP_WB_CTL_TYPE_LINE);
  1086. mixer->ctl = ctl;
  1087. if (ctl->start_fnc)
  1088. ctl->start_fnc(ctl);
  1089. return mixer;
  1090. error:
  1091. if (mixer)
  1092. mdss_mdp_mixer_free(mixer);
  1093. if (ctl)
  1094. mdss_mdp_ctl_free(ctl);
  1095. return NULL;
  1096. }
  1097. int mdss_mdp_wb_mixer_destroy(struct mdss_mdp_mixer *mixer)
  1098. {
  1099. struct mdss_mdp_ctl *ctl;
  1100. if (!mixer || !mixer->ctl) {
  1101. pr_err("invalid ctl handle\n");
  1102. return -ENODEV;
  1103. }
  1104. ctl = mixer->ctl;
  1105. mixer->rotator_mode = 0;
  1106. pr_debug("destroy ctl=%d mixer=%d\n", ctl->num, mixer->num);
  1107. if (ctl->stop_fnc)
  1108. ctl->stop_fnc(ctl);
  1109. mdss_mdp_ctl_free(ctl);
  1110. mdss_mdp_ctl_perf_update(ctl, 0);
  1111. return 0;
  1112. }
  1113. int mdss_mdp_ctl_splash_finish(struct mdss_mdp_ctl *ctl, bool handoff)
  1114. {
  1115. struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
  1116. if (sctl)
  1117. sctl->panel_data->panel_info.cont_splash_enabled = 0;
  1118. switch (ctl->panel_data->panel_info.type) {
  1119. case MIPI_VIDEO_PANEL:
  1120. case EDP_PANEL:
  1121. return mdss_mdp_video_reconfigure_splash_done(ctl, handoff);
  1122. case MIPI_CMD_PANEL:
  1123. return mdss_mdp_cmd_reconfigure_splash_done(ctl, handoff);
  1124. default:
  1125. return 0;
  1126. }
  1127. }
  1128. #if defined(CONFIG_FB_MSM_EDP_SAMSUNG)
  1129. int mdss_mdp_scan_pipes(void)
  1130. {
  1131. unsigned long off;
  1132. u32 size;
  1133. int i, pnum = 0;
  1134. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  1135. for (i = 0; i < 6; i++) {
  1136. off = MDSS_MDP_REG_SSPP_OFFSET(i) + MDSS_MDP_REG_SSPP_SRC_SIZE;
  1137. size = MDSS_MDP_REG_READ(off);
  1138. pr_debug("%s: i=%d: addr=%x hw=%x\n",
  1139. __func__, i, (int)off, (int)size);
  1140. if (size)
  1141. pnum++;
  1142. }
  1143. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  1144. return pnum;
  1145. }
  1146. #endif
  1147. static inline int mdss_mdp_set_split_ctl(struct mdss_mdp_ctl *ctl,
  1148. struct mdss_mdp_ctl *split_ctl)
  1149. {
  1150. if (!ctl || !split_ctl)
  1151. return -ENODEV;
  1152. /* setup split ctl mixer as right mixer of original ctl so that
  1153. * original ctl can work the same way as dual pipe solution */
  1154. ctl->mixer_right = split_ctl->mixer_left;
  1155. return 0;
  1156. }
  1157. static int mdss_mdp_ctl_fbc_enable(int enable,
  1158. struct mdss_mdp_mixer *mixer, struct mdss_panel_info *pdata)
  1159. {
  1160. struct fbc_panel_info *fbc;
  1161. u32 mode = 0, budget_ctl = 0, lossy_mode = 0;
  1162. if (!pdata) {
  1163. pr_err("Invalid pdata\n");
  1164. return -EINVAL;
  1165. }
  1166. fbc = &pdata->fbc;
  1167. if (!fbc || !fbc->enabled) {
  1168. pr_err("Invalid FBC structure\n");
  1169. return -EINVAL;
  1170. }
  1171. if (mixer->num == MDSS_MDP_INTF_LAYERMIXER0)
  1172. pr_debug("Mixer supports FBC.\n");
  1173. else {
  1174. pr_debug("Mixer doesn't support FBC.\n");
  1175. return -EINVAL;
  1176. }
  1177. if (enable) {
  1178. mode = ((pdata->xres) << 16) | ((fbc->comp_mode) << 8) |
  1179. ((fbc->qerr_enable) << 7) | ((fbc->cd_bias) << 4) |
  1180. ((fbc->pat_enable) << 3) | ((fbc->vlc_enable) << 2) |
  1181. ((fbc->bflc_enable) << 1) | enable;
  1182. budget_ctl = ((fbc->line_x_budget) << 12) |
  1183. ((fbc->block_x_budget) << 8) | fbc->block_budget;
  1184. lossy_mode = ((fbc->lossless_mode_thd) << 16) |
  1185. ((fbc->lossy_mode_thd) << 8) |
  1186. ((fbc->lossy_rgb_thd) << 3) | fbc->lossy_mode_idx;
  1187. }
  1188. mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_FBC_MODE, mode);
  1189. mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_FBC_BUDGET_CTL,
  1190. budget_ctl);
  1191. mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_FBC_LOSSY_MODE,
  1192. lossy_mode);
  1193. return 0;
  1194. }
  1195. int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl)
  1196. {
  1197. struct mdss_mdp_ctl *split_ctl;
  1198. u32 width, height;
  1199. int split_fb;
  1200. if (!ctl || !ctl->panel_data) {
  1201. pr_err("invalid ctl handle\n");
  1202. return -ENODEV;
  1203. }
  1204. split_ctl = mdss_mdp_get_split_ctl(ctl);
  1205. width = ctl->panel_data->panel_info.xres;
  1206. height = ctl->panel_data->panel_info.yres;
  1207. split_fb = (ctl->mfd->split_fb_left &&
  1208. ctl->mfd->split_fb_right &&
  1209. (ctl->mfd->split_fb_left <= MAX_MIXER_WIDTH) &&
  1210. (ctl->mfd->split_fb_right <= MAX_MIXER_WIDTH)) ? 1 : 0;
  1211. pr_debug("max=%d xres=%d left=%d right=%d\n", MAX_MIXER_WIDTH,
  1212. width, ctl->mfd->split_fb_left, ctl->mfd->split_fb_right);
  1213. if ((split_ctl && (width > MAX_MIXER_WIDTH)) ||
  1214. (width > (2 * MAX_MIXER_WIDTH))) {
  1215. pr_err("Unsupported panel resolution: %dx%d\n", width, height);
  1216. return -ENOTSUPP;
  1217. }
  1218. ctl->width = width;
  1219. ctl->height = height;
  1220. ctl->roi = (struct mdss_mdp_img_rect) {0, 0, width, height};
  1221. if (!ctl->mixer_left) {
  1222. ctl->mixer_left =
  1223. mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_INTF,
  1224. ((width > MAX_MIXER_WIDTH) || split_fb));
  1225. if (!ctl->mixer_left) {
  1226. pr_err("unable to allocate layer mixer\n");
  1227. return -ENOMEM;
  1228. }
  1229. }
  1230. if (split_fb)
  1231. width = ctl->mfd->split_fb_left;
  1232. else if (width > MAX_MIXER_WIDTH)
  1233. width /= 2;
  1234. ctl->mixer_left->width = width;
  1235. ctl->mixer_left->height = height;
  1236. ctl->mixer_left->roi = (struct mdss_mdp_img_rect) {0, 0, width, height};
  1237. if (split_ctl) {
  1238. pr_debug("split display detected\n");
  1239. return 0;
  1240. }
  1241. if (split_fb)
  1242. width = ctl->mfd->split_fb_right;
  1243. if (width < ctl->width) {
  1244. if (ctl->mixer_right == NULL) {
  1245. ctl->mixer_right = mdss_mdp_mixer_alloc(ctl,
  1246. MDSS_MDP_MIXER_TYPE_INTF, true);
  1247. if (!ctl->mixer_right) {
  1248. pr_err("unable to allocate right mixer\n");
  1249. if (ctl->mixer_left)
  1250. mdss_mdp_mixer_free(ctl->mixer_left);
  1251. return -ENOMEM;
  1252. }
  1253. }
  1254. ctl->mixer_right->width = width;
  1255. ctl->mixer_right->height = height;
  1256. ctl->mixer_right->roi = (struct mdss_mdp_img_rect)
  1257. {0, 0, width, height};
  1258. } else if (ctl->mixer_right) {
  1259. mdss_mdp_mixer_free(ctl->mixer_right);
  1260. ctl->mixer_right = NULL;
  1261. }
  1262. if (ctl->mixer_right) {
  1263. ctl->opmode |= MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
  1264. MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT;
  1265. } else {
  1266. ctl->opmode &= ~(MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
  1267. MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT);
  1268. }
  1269. return 0;
  1270. }
  1271. static int mdss_mdp_ctl_setup_wfd(struct mdss_mdp_ctl *ctl)
  1272. {
  1273. struct mdss_data_type *mdata = ctl->mdata;
  1274. struct mdss_mdp_mixer *mixer;
  1275. int mixer_type;
  1276. /* if WB2 is supported, try to allocate it first */
  1277. if (mdata->nmixers_intf >= MDSS_MDP_INTF_LAYERMIXER2)
  1278. mixer_type = MDSS_MDP_MIXER_TYPE_INTF;
  1279. else
  1280. mixer_type = MDSS_MDP_MIXER_TYPE_WRITEBACK;
  1281. mixer = mdss_mdp_mixer_alloc(ctl, mixer_type, false);
  1282. if (!mixer && mixer_type == MDSS_MDP_MIXER_TYPE_INTF)
  1283. mixer = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_WRITEBACK,
  1284. false);
  1285. if (!mixer) {
  1286. pr_err("Unable to allocate writeback mixer\n");
  1287. return -ENOMEM;
  1288. }
  1289. if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
  1290. ctl->opmode = MDSS_MDP_CTL_OP_WFD_MODE;
  1291. } else {
  1292. switch (mixer->num) {
  1293. case MDSS_MDP_WB_LAYERMIXER0:
  1294. ctl->opmode = MDSS_MDP_CTL_OP_WB0_MODE;
  1295. break;
  1296. case MDSS_MDP_WB_LAYERMIXER1:
  1297. ctl->opmode = MDSS_MDP_CTL_OP_WB1_MODE;
  1298. break;
  1299. default:
  1300. pr_err("Incorrect writeback config num=%d\n",
  1301. mixer->num);
  1302. mdss_mdp_mixer_free(mixer);
  1303. return -EINVAL;
  1304. }
  1305. ctl->wb_type = MDSS_MDP_WB_CTL_TYPE_LINE;
  1306. }
  1307. ctl->mixer_left = mixer;
  1308. return 0;
  1309. }
  1310. struct mdss_mdp_ctl *mdss_mdp_ctl_init(struct mdss_panel_data *pdata,
  1311. struct msm_fb_data_type *mfd)
  1312. {
  1313. struct mdss_mdp_ctl *ctl;
  1314. int ret = 0;
  1315. struct mdss_data_type *mdata = mfd_to_mdata(mfd);
  1316. ctl = mdss_mdp_ctl_alloc(mdata, MDSS_MDP_CTL0);
  1317. if (!ctl) {
  1318. pr_err("unable to allocate ctl\n");
  1319. return ERR_PTR(-ENOMEM);
  1320. }
  1321. ctl->mfd = mfd;
  1322. ctl->panel_data = pdata;
  1323. ctl->is_video_mode = false;
  1324. ctl->perf_release_ctl_bw = false;
  1325. switch (pdata->panel_info.type) {
  1326. case EDP_PANEL:
  1327. ctl->is_video_mode = true;
  1328. ctl->intf_num = MDSS_MDP_INTF0;
  1329. ctl->intf_type = MDSS_INTF_EDP;
  1330. ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
  1331. ctl->start_fnc = mdss_mdp_video_start;
  1332. break;
  1333. case MIPI_VIDEO_PANEL:
  1334. ctl->is_video_mode = true;
  1335. if (pdata->panel_info.pdest == DISPLAY_1)
  1336. ctl->intf_num = MDSS_MDP_INTF1;
  1337. else
  1338. ctl->intf_num = MDSS_MDP_INTF2;
  1339. ctl->intf_type = MDSS_INTF_DSI;
  1340. ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
  1341. ctl->start_fnc = mdss_mdp_video_start;
  1342. break;
  1343. case MIPI_CMD_PANEL:
  1344. if (pdata->panel_info.pdest == DISPLAY_1)
  1345. ctl->intf_num = MDSS_MDP_INTF1;
  1346. else
  1347. ctl->intf_num = MDSS_MDP_INTF2;
  1348. ctl->intf_type = MDSS_INTF_DSI;
  1349. ctl->opmode = MDSS_MDP_CTL_OP_CMD_MODE;
  1350. ctl->start_fnc = mdss_mdp_cmd_start;
  1351. break;
  1352. case DTV_PANEL:
  1353. ctl->is_video_mode = true;
  1354. ctl->intf_num = MDSS_MDP_INTF3;
  1355. ctl->intf_type = MDSS_INTF_HDMI;
  1356. ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
  1357. ctl->start_fnc = mdss_mdp_video_start;
  1358. #ifndef CONFIG_VIDEO_MHL_V2
  1359. /*
  1360. * mdss_mdp_limited_lut_igc_config() is for make limited range
  1361. * but we use limited range in MHL driver side
  1362. * so comment that function
  1363. */
  1364. ret = mdss_mdp_limited_lut_igc_config(ctl);
  1365. if (ret)
  1366. pr_err("Unable to config IGC LUT data");
  1367. #endif
  1368. break;
  1369. case WRITEBACK_PANEL:
  1370. ctl->intf_num = MDSS_MDP_NO_INTF;
  1371. ctl->start_fnc = mdss_mdp_writeback_start;
  1372. ret = mdss_mdp_ctl_setup_wfd(ctl);
  1373. if (ret)
  1374. goto ctl_init_fail;
  1375. break;
  1376. default:
  1377. pr_err("unsupported panel type (%d)\n", pdata->panel_info.type);
  1378. ret = -EINVAL;
  1379. goto ctl_init_fail;
  1380. }
  1381. ctl->opmode |= (ctl->intf_num << 4);
  1382. if (ctl->intf_num == MDSS_MDP_NO_INTF) {
  1383. ctl->dst_format = pdata->panel_info.out_format;
  1384. } else {
  1385. struct mdp_dither_cfg_data dither = {
  1386. .block = mfd->index + MDP_LOGICAL_BLOCK_DISP_0,
  1387. .flags = MDP_PP_OPS_DISABLE,
  1388. };
  1389. switch (pdata->panel_info.bpp) {
  1390. case 18:
  1391. if (ctl->intf_type == MDSS_INTF_DSI)
  1392. ctl->dst_format = MDSS_MDP_PANEL_FORMAT_RGB666 |
  1393. MDSS_MDP_PANEL_FORMAT_PACK_ALIGN_MSB;
  1394. else
  1395. ctl->dst_format = MDSS_MDP_PANEL_FORMAT_RGB666;
  1396. dither.flags = MDP_PP_OPS_ENABLE | MDP_PP_OPS_WRITE;
  1397. dither.g_y_depth = 2;
  1398. dither.r_cr_depth = 2;
  1399. dither.b_cb_depth = 2;
  1400. break;
  1401. case 24:
  1402. default:
  1403. ctl->dst_format = MDSS_MDP_PANEL_FORMAT_RGB888;
  1404. break;
  1405. }
  1406. mdss_mdp_dither_config(&dither, NULL);
  1407. }
  1408. return ctl;
  1409. ctl_init_fail:
  1410. mdss_mdp_ctl_free(ctl);
  1411. return ERR_PTR(ret);
  1412. }
  1413. int mdss_mdp_ctl_split_display_setup(struct mdss_mdp_ctl *ctl,
  1414. struct mdss_panel_data *pdata)
  1415. {
  1416. struct mdss_mdp_ctl *sctl;
  1417. struct mdss_mdp_mixer *mixer;
  1418. if (!ctl || !pdata)
  1419. return -ENODEV;
  1420. if (pdata->panel_info.xres > MAX_MIXER_WIDTH) {
  1421. pr_err("Unsupported second panel resolution: %dx%d\n",
  1422. pdata->panel_info.xres, pdata->panel_info.yres);
  1423. return -ENOTSUPP;
  1424. }
  1425. if (ctl->mixer_right) {
  1426. pr_err("right mixer already setup for ctl=%d\n", ctl->num);
  1427. return -EPERM;
  1428. }
  1429. sctl = mdss_mdp_ctl_init(pdata, ctl->mfd);
  1430. if (!sctl) {
  1431. pr_err("unable to setup split display\n");
  1432. return -ENODEV;
  1433. }
  1434. sctl->width = pdata->panel_info.xres;
  1435. sctl->height = pdata->panel_info.yres;
  1436. ctl->mixer_left = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_INTF,
  1437. false);
  1438. if (!ctl->mixer_left) {
  1439. pr_err("unable to allocate layer mixer\n");
  1440. mdss_mdp_ctl_destroy(sctl);
  1441. return -ENOMEM;
  1442. }
  1443. mixer = mdss_mdp_mixer_alloc(sctl, MDSS_MDP_MIXER_TYPE_INTF, false);
  1444. if (!mixer) {
  1445. pr_err("unable to allocate layer mixer\n");
  1446. mdss_mdp_ctl_destroy(sctl);
  1447. return -ENOMEM;
  1448. }
  1449. mixer->width = sctl->width;
  1450. mixer->height = sctl->height;
  1451. mixer->roi = (struct mdss_mdp_img_rect)
  1452. {0, 0, mixer->width, mixer->height};
  1453. sctl->mixer_left = mixer;
  1454. return mdss_mdp_set_split_ctl(ctl, sctl);
  1455. }
  1456. static void mdss_mdp_ctl_split_display_enable(int enable,
  1457. struct mdss_mdp_ctl *main_ctl, struct mdss_mdp_ctl *slave_ctl)
  1458. {
  1459. u32 upper = 0, lower = 0;
  1460. pr_debug("split main ctl=%d intf=%d slave ctl=%d intf=%d\n",
  1461. main_ctl->num, main_ctl->intf_num,
  1462. slave_ctl->num, slave_ctl->intf_num);
  1463. if (enable) {
  1464. if (main_ctl->opmode & MDSS_MDP_CTL_OP_CMD_MODE) {
  1465. upper |= BIT(1);
  1466. lower |= BIT(1);
  1467. /* interface controlling sw trigger */
  1468. if (main_ctl->intf_num == MDSS_MDP_INTF2)
  1469. upper |= BIT(4);
  1470. else
  1471. upper |= BIT(8);
  1472. } else { /* video mode */
  1473. if (main_ctl->intf_num == MDSS_MDP_INTF2)
  1474. lower |= BIT(4);
  1475. else
  1476. lower |= BIT(8);
  1477. }
  1478. }
  1479. MDSS_MDP_REG_WRITE(MDSS_MDP_REG_SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper);
  1480. MDSS_MDP_REG_WRITE(MDSS_MDP_REG_SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower);
  1481. MDSS_MDP_REG_WRITE(MDSS_MDP_REG_SPLIT_DISPLAY_EN, enable);
  1482. }
  1483. int mdss_mdp_ctl_destroy(struct mdss_mdp_ctl *ctl)
  1484. {
  1485. struct mdss_mdp_ctl *sctl;
  1486. int rc;
  1487. rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CLOSE, NULL);
  1488. WARN(rc, "unable to close panel for intf=%d\n", ctl->intf_num);
  1489. sctl = mdss_mdp_get_split_ctl(ctl);
  1490. if (sctl) {
  1491. pr_debug("destroying split display ctl=%d\n", sctl->num);
  1492. if (sctl->mixer_left)
  1493. mdss_mdp_mixer_free(sctl->mixer_left);
  1494. mdss_mdp_ctl_free(sctl);
  1495. } else if (ctl->mixer_right) {
  1496. mdss_mdp_mixer_free(ctl->mixer_right);
  1497. ctl->mixer_right = NULL;
  1498. }
  1499. if (ctl->mixer_left) {
  1500. mdss_mdp_mixer_free(ctl->mixer_left);
  1501. ctl->mixer_left = NULL;
  1502. }
  1503. mdss_mdp_ctl_free(ctl);
  1504. return 0;
  1505. }
  1506. int mdss_mdp_ctl_intf_event(struct mdss_mdp_ctl *ctl, int event, void *arg)
  1507. {
  1508. struct mdss_panel_data *pdata;
  1509. int rc = 0;
  1510. if (!ctl || !ctl->panel_data)
  1511. return -ENODEV;
  1512. pdata = ctl->panel_data;
  1513. pr_debug("sending ctl=%d event=%d\n", ctl->num, event);
  1514. do {
  1515. if (pdata->event_handler)
  1516. rc = pdata->event_handler(pdata, event, arg);
  1517. pdata = pdata->next;
  1518. } while (rc == 0 && pdata);
  1519. return rc;
  1520. }
  1521. /*
  1522. * mdss_mdp_ctl_restore() - restore mdp ctl path
  1523. * @ctl: mdp controller.
  1524. *
  1525. * This function is called whenever MDP comes out of a power collapse as
  1526. * a result of a screen update when DSI ULPS mode is enabled. It restores
  1527. * the MDP controller's software state to the hardware registers.
  1528. */
  1529. void mdss_mdp_ctl_restore(struct mdss_mdp_ctl *ctl)
  1530. {
  1531. u32 temp;
  1532. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  1533. temp = readl_relaxed(ctl->mdata->mdp_base +
  1534. MDSS_MDP_REG_DISP_INTF_SEL);
  1535. temp |= (ctl->intf_type << ((ctl->intf_num - MDSS_MDP_INTF0) * 8));
  1536. writel_relaxed(temp, ctl->mdata->mdp_base +
  1537. MDSS_MDP_REG_DISP_INTF_SEL);
  1538. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  1539. }
  1540. static int mdss_mdp_ctl_start_sub(struct mdss_mdp_ctl *ctl, bool handoff)
  1541. {
  1542. struct mdss_mdp_mixer *mixer;
  1543. u32 outsize, temp;
  1544. int ret = 0;
  1545. int i, nmixers;
  1546. pr_debug("ctl_num=%d\n", ctl->num);
  1547. /*
  1548. * Need start_fnc in 2 cases:
  1549. * (1) handoff
  1550. * (2) continuous splash finished.
  1551. */
  1552. if (handoff || !ctl->panel_data->panel_info.cont_splash_enabled) {
  1553. if (ctl->start_fnc)
  1554. ret = ctl->start_fnc(ctl);
  1555. else
  1556. pr_warn("no start function for ctl=%d type=%d\n",
  1557. ctl->num,
  1558. ctl->panel_data->panel_info.type);
  1559. if (ret) {
  1560. pr_err("unable to start intf\n");
  1561. return ret;
  1562. }
  1563. }
  1564. if (!ctl->panel_data->panel_info.cont_splash_enabled) {
  1565. nmixers = MDSS_MDP_INTF_MAX_LAYERMIXER +
  1566. MDSS_MDP_WB_MAX_LAYERMIXER;
  1567. for (i = 0; i < nmixers; i++)
  1568. mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_LAYER(i), 0);
  1569. }
  1570. mixer = ctl->mixer_left;
  1571. mdss_mdp_pp_resume(ctl, mixer->num);
  1572. mixer->params_changed++;
  1573. temp = MDSS_MDP_REG_READ(MDSS_MDP_REG_DISP_INTF_SEL);
  1574. temp |= (ctl->intf_type << ((ctl->intf_num - MDSS_MDP_INTF0) * 8));
  1575. MDSS_MDP_REG_WRITE(MDSS_MDP_REG_DISP_INTF_SEL, temp);
  1576. outsize = (mixer->height << 16) | mixer->width;
  1577. mdp_mixer_write(mixer, MDSS_MDP_REG_LM_OUT_SIZE, outsize);
  1578. if (ctl->panel_data->panel_info.fbc.enabled) {
  1579. ret = mdss_mdp_ctl_fbc_enable(1, ctl->mixer_left,
  1580. &ctl->panel_data->panel_info);
  1581. }
  1582. return ret;
  1583. }
  1584. int mdss_mdp_ctl_start(struct mdss_mdp_ctl *ctl, bool handoff)
  1585. {
  1586. struct mdss_mdp_ctl *sctl;
  1587. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  1588. int ret = 0;
  1589. if (ctl->power_on) {
  1590. pr_debug("%d: panel already on!\n", __LINE__);
  1591. return 0;
  1592. }
  1593. ret = mdss_mdp_ctl_setup(ctl);
  1594. if (ret)
  1595. return ret;
  1596. sctl = mdss_mdp_get_split_ctl(ctl);
  1597. mutex_lock(&ctl->lock);
  1598. /*
  1599. * keep power_on false during handoff to avoid unexpected
  1600. * operations to overlay.
  1601. */
  1602. if (!handoff)
  1603. ctl->power_on = true;
  1604. memset(&ctl->cur_perf, 0, sizeof(ctl->cur_perf));
  1605. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  1606. ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_RESET, NULL);
  1607. if (ret) {
  1608. pr_err("panel power on failed ctl=%d\n", ctl->num);
  1609. goto error;
  1610. }
  1611. ret = mdss_mdp_ctl_start_sub(ctl, handoff);
  1612. if (ret == 0) {
  1613. if (sctl) { /* split display is available */
  1614. ret = mdss_mdp_ctl_start_sub(sctl, handoff);
  1615. if (!ret)
  1616. mdss_mdp_ctl_split_display_enable(1, ctl, sctl);
  1617. } else if (ctl->mixer_right) {
  1618. struct mdss_mdp_mixer *mixer = ctl->mixer_right;
  1619. u32 out, off;
  1620. mdss_mdp_pp_resume(ctl, mixer->num);
  1621. mixer->params_changed++;
  1622. out = (mixer->height << 16) | mixer->width;
  1623. off = MDSS_MDP_REG_LM_OFFSET(mixer->num);
  1624. MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_OUT_SIZE, out);
  1625. mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_PACK_3D, 0);
  1626. }
  1627. }
  1628. mdss_mdp_hist_intr_setup(&mdata->hist_intr, MDSS_IRQ_RESUME);
  1629. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  1630. error:
  1631. mutex_unlock(&ctl->lock);
  1632. return ret;
  1633. }
  1634. int mdss_mdp_ctl_stop(struct mdss_mdp_ctl *ctl)
  1635. {
  1636. struct mdss_mdp_ctl *sctl;
  1637. int ret = 0;
  1638. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  1639. u32 off;
  1640. if (!ctl->power_on) {
  1641. pr_debug("%s %d already off!\n", __func__, __LINE__);
  1642. return 0;
  1643. }
  1644. sctl = mdss_mdp_get_split_ctl(ctl);
  1645. pr_debug("ctl_num=%d\n", ctl->num);
  1646. mutex_lock(&ctl->lock);
  1647. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  1648. mdss_mdp_hist_intr_setup(&mdata->hist_intr, MDSS_IRQ_SUSPEND);
  1649. if (ctl->stop_fnc)
  1650. ret = ctl->stop_fnc(ctl);
  1651. else
  1652. pr_warn("no stop func for ctl=%d\n", ctl->num);
  1653. if (sctl && sctl->stop_fnc) {
  1654. ret = sctl->stop_fnc(sctl);
  1655. mdss_mdp_ctl_split_display_enable(0, ctl, sctl);
  1656. }
  1657. if (ret) {
  1658. pr_warn("error powering off intf ctl=%d\n", ctl->num);
  1659. } else {
  1660. mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, 0);
  1661. if (sctl)
  1662. mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_TOP, 0);
  1663. if (ctl->mixer_left) {
  1664. off = __mdss_mdp_ctl_get_mixer_off(ctl->mixer_left);
  1665. mdss_mdp_ctl_write(ctl, off, 0);
  1666. }
  1667. if (ctl->mixer_right) {
  1668. off = __mdss_mdp_ctl_get_mixer_off(ctl->mixer_right);
  1669. mdss_mdp_ctl_write(ctl, off, 0);
  1670. }
  1671. ctl->power_on = false;
  1672. ctl->play_cnt = 0;
  1673. mdss_mdp_ctl_perf_update(ctl, 0);
  1674. }
  1675. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  1676. mutex_unlock(&ctl->lock);
  1677. return ret;
  1678. }
  1679. void mdss_mdp_set_roi(struct mdss_mdp_ctl *ctl,
  1680. struct mdp_display_commit *data)
  1681. {
  1682. struct mdss_mdp_img_rect temp_roi, mixer_roi;
  1683. temp_roi.x = data->roi.x;
  1684. temp_roi.y = data->roi.y;
  1685. temp_roi.w = data->roi.w;
  1686. temp_roi.h = data->roi.h;
  1687. /*
  1688. * No Partial Update for:
  1689. * 1) dual DSI panels
  1690. * 2) non-cmd mode panels
  1691. */
  1692. if (!temp_roi.w || !temp_roi.h || ctl->mixer_right ||
  1693. (ctl->panel_data->panel_info.type != MIPI_CMD_PANEL) ||
  1694. !ctl->panel_data->panel_info.partial_update_enabled) {
  1695. temp_roi = (struct mdss_mdp_img_rect)
  1696. {0, 0, ctl->mixer_left->width,
  1697. ctl->mixer_left->height};
  1698. }
  1699. ctl->roi_changed = 0;
  1700. if (((temp_roi.x != ctl->roi.x) ||
  1701. (temp_roi.y != ctl->roi.y)) ||
  1702. ((temp_roi.w != ctl->roi.w) ||
  1703. (temp_roi.h != ctl->roi.h))) {
  1704. ctl->roi = temp_roi;
  1705. ctl->roi_changed++;
  1706. mixer_roi = ctl->mixer_left->roi;
  1707. if ((mixer_roi.w != temp_roi.w) ||
  1708. (mixer_roi.h != temp_roi.h)) {
  1709. ctl->mixer_left->roi = temp_roi;
  1710. ctl->mixer_left->params_changed++;
  1711. }
  1712. }
  1713. pr_debug("ROI requested: [%d, %d, %d, %d]\n",
  1714. ctl->roi.x, ctl->roi.y, ctl->roi.w, ctl->roi.h);
  1715. }
  1716. /*
  1717. * mdss_mdp_ctl_reset() - reset mdp ctl path.
  1718. * @ctl: mdp controller.
  1719. * this function called when underflow happen,
  1720. * it will reset mdp ctl path and poll for its completion
  1721. *
  1722. * Note: called within atomic context.
  1723. */
  1724. int mdss_mdp_ctl_reset(struct mdss_mdp_ctl *ctl)
  1725. {
  1726. u32 status = 1;
  1727. int cnt = 20;
  1728. mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_SW_RESET, 1);
  1729. /*
  1730. * it takes around 30us to have mdp finish resetting its ctl path
  1731. * poll every 50us so that reset should be completed at 1st poll
  1732. */
  1733. do {
  1734. udelay(50);
  1735. status = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_SW_RESET);
  1736. status &= 0x01;
  1737. pr_debug("status=%x\n", status);
  1738. cnt--;
  1739. if (cnt == 0) {
  1740. pr_err("timeout\n");
  1741. return -EAGAIN;
  1742. }
  1743. } while (status);
  1744. return 0;
  1745. }
  1746. static int mdss_mdp_mixer_setup(struct mdss_mdp_ctl *ctl,
  1747. struct mdss_mdp_mixer *mixer)
  1748. {
  1749. struct mdss_mdp_pipe *pipe;
  1750. u32 off, blend_op, blend_stage;
  1751. u32 mixercfg = 0, blend_color_out = 0, bg_alpha_enable = 0;
  1752. u32 fg_alpha = 0, bg_alpha = 0;
  1753. int stage, secure = 0;
  1754. int screen_state;
  1755. int outsize = 0;
  1756. u32 op_mode;
  1757. screen_state = ctl->force_screen_state;
  1758. if (!mixer)
  1759. return -ENODEV;
  1760. trace_mdp_mixer_update(mixer->num);
  1761. pr_debug("setup mixer=%d\n", mixer->num);
  1762. outsize = (mixer->roi.h << 16) | mixer->roi.w;
  1763. mdp_mixer_write(mixer, MDSS_MDP_REG_LM_OUT_SIZE, outsize);
  1764. if (screen_state == MDSS_SCREEN_FORCE_BLANK) {
  1765. mixercfg = MDSS_MDP_LM_BORDER_COLOR;
  1766. goto update_mixer;
  1767. }
  1768. pipe = mixer->stage_pipe[MDSS_MDP_STAGE_BASE];
  1769. if (pipe == NULL) {
  1770. mixercfg = MDSS_MDP_LM_BORDER_COLOR;
  1771. } else {
  1772. if (pipe->num == MDSS_MDP_SSPP_VIG3 ||
  1773. pipe->num == MDSS_MDP_SSPP_RGB3) {
  1774. /* Add 2 to account for Cursor & Border bits */
  1775. mixercfg = 1 << ((3 * pipe->num)+2);
  1776. } else {
  1777. mixercfg = 1 << (3 * pipe->num);
  1778. }
  1779. if (pipe->src_fmt->alpha_enable)
  1780. bg_alpha_enable = 1;
  1781. secure = pipe->flags & MDP_SECURE_OVERLAY_SESSION;
  1782. }
  1783. for (stage = MDSS_MDP_STAGE_0; stage < MDSS_MDP_MAX_STAGE; stage++) {
  1784. pipe = mixer->stage_pipe[stage];
  1785. if (pipe == NULL)
  1786. continue;
  1787. if (stage != pipe->mixer_stage) {
  1788. mixer->stage_pipe[stage] = NULL;
  1789. continue;
  1790. }
  1791. blend_stage = stage - MDSS_MDP_STAGE_0;
  1792. off = MDSS_MDP_REG_LM_BLEND_OFFSET(blend_stage);
  1793. blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
  1794. MDSS_MDP_BLEND_BG_ALPHA_BG_CONST);
  1795. fg_alpha = pipe->alpha;
  1796. bg_alpha = 0xFF - pipe->alpha;
  1797. /* keep fg alpha */
  1798. blend_color_out |= 1 << (blend_stage + 1);
  1799. switch (pipe->blend_op) {
  1800. case BLEND_OP_OPAQUE:
  1801. blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
  1802. MDSS_MDP_BLEND_BG_ALPHA_BG_CONST);
  1803. pr_debug("pnum=%d stg=%d op=OPAQUE\n", pipe->num,
  1804. stage);
  1805. break;
  1806. case BLEND_OP_PREMULTIPLIED:
  1807. if (pipe->src_fmt->alpha_enable) {
  1808. blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
  1809. MDSS_MDP_BLEND_BG_ALPHA_FG_PIXEL);
  1810. if (fg_alpha != 0xff) {
  1811. bg_alpha = fg_alpha;
  1812. blend_op |=
  1813. MDSS_MDP_BLEND_BG_MOD_ALPHA |
  1814. MDSS_MDP_BLEND_BG_INV_MOD_ALPHA;
  1815. } else {
  1816. blend_op |= MDSS_MDP_BLEND_BG_INV_ALPHA;
  1817. }
  1818. }
  1819. pr_debug("pnum=%d stg=%d op=PREMULTIPLIED\n", pipe->num,
  1820. stage);
  1821. break;
  1822. case BLEND_OP_COVERAGE:
  1823. if (pipe->src_fmt->alpha_enable) {
  1824. blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_PIXEL |
  1825. MDSS_MDP_BLEND_BG_ALPHA_FG_PIXEL);
  1826. if (fg_alpha != 0xff) {
  1827. bg_alpha = fg_alpha;
  1828. blend_op |=
  1829. MDSS_MDP_BLEND_FG_MOD_ALPHA |
  1830. MDSS_MDP_BLEND_FG_INV_MOD_ALPHA |
  1831. MDSS_MDP_BLEND_BG_MOD_ALPHA |
  1832. MDSS_MDP_BLEND_BG_INV_MOD_ALPHA;
  1833. } else {
  1834. blend_op |= MDSS_MDP_BLEND_BG_INV_ALPHA;
  1835. }
  1836. }
  1837. pr_debug("pnum=%d stg=%d op=COVERAGE\n", pipe->num,
  1838. stage);
  1839. break;
  1840. default:
  1841. blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
  1842. MDSS_MDP_BLEND_BG_ALPHA_BG_CONST);
  1843. pr_debug("pnum=%d stg=%d op=NONE\n", pipe->num,
  1844. stage);
  1845. break;
  1846. }
  1847. if (!pipe->src_fmt->alpha_enable && bg_alpha_enable)
  1848. blend_color_out = 0;
  1849. mixercfg |= stage << (3 * pipe->num);
  1850. trace_mdp_sspp_change(pipe);
  1851. pr_debug("stg=%d op=%x fg_alpha=%x bg_alpha=%x\n", stage,
  1852. blend_op, fg_alpha, bg_alpha);
  1853. mdp_mixer_write(mixer, off + MDSS_MDP_REG_LM_OP_MODE, blend_op);
  1854. mdp_mixer_write(mixer, off + MDSS_MDP_REG_LM_BLEND_FG_ALPHA,
  1855. fg_alpha);
  1856. mdp_mixer_write(mixer, off + MDSS_MDP_REG_LM_BLEND_BG_ALPHA,
  1857. bg_alpha);
  1858. }
  1859. if (mixer->cursor_enabled)
  1860. mixercfg |= MDSS_MDP_LM_CURSOR_OUT;
  1861. update_mixer:
  1862. pr_debug("mixer=%d mixer_cfg=%x\n", mixer->num, mixercfg);
  1863. if (mixer->num == MDSS_MDP_INTF_LAYERMIXER3)
  1864. ctl->flush_bits |= BIT(20);
  1865. else if (mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK)
  1866. ctl->flush_bits |= BIT(9) << mixer->num;
  1867. else
  1868. ctl->flush_bits |= BIT(6) << mixer->num;
  1869. op_mode = mdp_mixer_read(mixer, MDSS_MDP_REG_LM_OP_MODE);
  1870. /* Read GC enable/disable status on LM */
  1871. op_mode = (op_mode & BIT(0));
  1872. blend_color_out |= op_mode;
  1873. mdp_mixer_write(mixer, MDSS_MDP_REG_LM_OP_MODE, blend_color_out);
  1874. off = __mdss_mdp_ctl_get_mixer_off(mixer);
  1875. mdss_mdp_ctl_write(ctl, off, mixercfg);
  1876. return 0;
  1877. }
  1878. int mdss_mdp_mixer_addr_setup(struct mdss_data_type *mdata,
  1879. u32 *mixer_offsets, u32 *dspp_offsets, u32 *pingpong_offsets,
  1880. u32 type, u32 len)
  1881. {
  1882. struct mdss_mdp_mixer *head;
  1883. u32 i;
  1884. int rc = 0;
  1885. u32 size = len;
  1886. if ((type == MDSS_MDP_MIXER_TYPE_WRITEBACK) && !mdata->has_wfd_blk)
  1887. size++;
  1888. head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_mixer) *
  1889. size, GFP_KERNEL);
  1890. if (!head) {
  1891. pr_err("unable to setup mixer type=%d :kzalloc fail\n",
  1892. type);
  1893. return -ENOMEM;
  1894. }
  1895. for (i = 0; i < len; i++) {
  1896. head[i].type = type;
  1897. head[i].base = mdata->mdp_base + mixer_offsets[i];
  1898. head[i].ref_cnt = 0;
  1899. head[i].num = i;
  1900. if (type == MDSS_MDP_MIXER_TYPE_INTF) {
  1901. head[i].dspp_base = mdata->mdp_base + dspp_offsets[i];
  1902. head[i].pingpong_base = mdata->mdp_base +
  1903. pingpong_offsets[i];
  1904. }
  1905. }
  1906. /*
  1907. * Duplicate the last writeback mixer for concurrent line and block mode
  1908. * operations
  1909. */
  1910. if ((type == MDSS_MDP_MIXER_TYPE_WRITEBACK) && !mdata->has_wfd_blk)
  1911. head[len] = head[len - 1];
  1912. switch (type) {
  1913. case MDSS_MDP_MIXER_TYPE_INTF:
  1914. mdata->mixer_intf = head;
  1915. break;
  1916. case MDSS_MDP_MIXER_TYPE_WRITEBACK:
  1917. mdata->mixer_wb = head;
  1918. break;
  1919. default:
  1920. pr_err("Invalid mixer type=%d\n", type);
  1921. rc = -EINVAL;
  1922. break;
  1923. }
  1924. return rc;
  1925. }
  1926. int mdss_mdp_ctl_addr_setup(struct mdss_data_type *mdata,
  1927. u32 *ctl_offsets, u32 *wb_offsets, u32 len)
  1928. {
  1929. struct mdss_mdp_ctl *head;
  1930. struct mutex *shared_lock = NULL;
  1931. struct mutex *wb_lock = NULL;
  1932. u32 i;
  1933. u32 size = len;
  1934. if (!mdata->has_wfd_blk) {
  1935. size++;
  1936. shared_lock = devm_kzalloc(&mdata->pdev->dev,
  1937. sizeof(struct mutex),
  1938. GFP_KERNEL);
  1939. if (!shared_lock) {
  1940. pr_err("unable to allocate mem for mutex\n");
  1941. return -ENOMEM;
  1942. }
  1943. mutex_init(shared_lock);
  1944. wb_lock = devm_kzalloc(&mdata->pdev->dev,
  1945. sizeof(struct mutex),
  1946. GFP_KERNEL);
  1947. if (!wb_lock) {
  1948. pr_err("unable to allocate mem for mutex\n");
  1949. return -ENOMEM;
  1950. }
  1951. mutex_init(wb_lock);
  1952. }
  1953. head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_ctl) *
  1954. size, GFP_KERNEL);
  1955. if (!head) {
  1956. pr_err("unable to setup ctl and wb: kzalloc fail\n");
  1957. return -ENOMEM;
  1958. }
  1959. for (i = 0; i < len; i++) {
  1960. head[i].num = i;
  1961. head[i].base = (mdata->mdp_base) + ctl_offsets[i];
  1962. head[i].wb_base = (mdata->mdp_base) + wb_offsets[i];
  1963. head[i].ref_cnt = 0;
  1964. }
  1965. if (!mdata->has_wfd_blk) {
  1966. head[len - 1].shared_lock = shared_lock;
  1967. head[len - 1].wb_lock = wb_lock;
  1968. /*
  1969. * Allocate a virtual ctl to be able to perform simultaneous
  1970. * line mode and block mode operations on the same
  1971. * writeback block
  1972. */
  1973. head[len] = head[len - 1];
  1974. head[len].num = head[len - 1].num;
  1975. }
  1976. mdata->ctl_off = head;
  1977. return 0;
  1978. }
  1979. struct mdss_mdp_mixer *mdss_mdp_mixer_get(struct mdss_mdp_ctl *ctl, int mux)
  1980. {
  1981. struct mdss_mdp_mixer *mixer = NULL;
  1982. struct mdss_overlay_private *mdp5_data = NULL;
  1983. bool is_mixer_swapped = false;
  1984. if (!ctl) {
  1985. pr_err("ctl not initialized\n");
  1986. return NULL;
  1987. }
  1988. if (ctl->mfd) {
  1989. mdp5_data = mfd_to_mdp5_data(ctl->mfd);
  1990. if (!mdp5_data) {
  1991. pr_err("mdp5_data not initialized\n");
  1992. return NULL;
  1993. }
  1994. is_mixer_swapped = mdp5_data->mixer_swap;
  1995. }
  1996. switch (mux) {
  1997. case MDSS_MDP_MIXER_MUX_DEFAULT:
  1998. case MDSS_MDP_MIXER_MUX_LEFT:
  1999. mixer = is_mixer_swapped ?
  2000. ctl->mixer_right : ctl->mixer_left;
  2001. break;
  2002. case MDSS_MDP_MIXER_MUX_RIGHT:
  2003. mixer = is_mixer_swapped ?
  2004. ctl->mixer_left : ctl->mixer_right;
  2005. break;
  2006. }
  2007. return mixer;
  2008. }
  2009. struct mdss_mdp_pipe *mdss_mdp_mixer_stage_pipe(struct mdss_mdp_ctl *ctl,
  2010. int mux, int stage)
  2011. {
  2012. struct mdss_mdp_pipe *pipe = NULL;
  2013. struct mdss_mdp_mixer *mixer;
  2014. if (!ctl)
  2015. return NULL;
  2016. if (mutex_lock_interruptible(&ctl->lock))
  2017. return NULL;
  2018. mixer = mdss_mdp_mixer_get(ctl, mux);
  2019. if (mixer)
  2020. pipe = mixer->stage_pipe[stage];
  2021. mutex_unlock(&ctl->lock);
  2022. return pipe;
  2023. }
  2024. int mdss_mdp_mixer_pipe_update(struct mdss_mdp_pipe *pipe, int params_changed)
  2025. {
  2026. struct mdss_mdp_ctl *ctl;
  2027. struct mdss_mdp_mixer *mixer;
  2028. int i;
  2029. if (!pipe)
  2030. return -EINVAL;
  2031. mixer = pipe->mixer;
  2032. if (!mixer)
  2033. return -EINVAL;
  2034. ctl = mixer->ctl;
  2035. if (!ctl)
  2036. return -EINVAL;
  2037. if (pipe->mixer_stage >= MDSS_MDP_MAX_STAGE) {
  2038. pr_err("invalid mixer stage\n");
  2039. return -EINVAL;
  2040. }
  2041. pr_debug("pnum=%x mixer=%d stage=%d\n", pipe->num, mixer->num,
  2042. pipe->mixer_stage);
  2043. if (mutex_lock_interruptible(&ctl->lock))
  2044. return -EINTR;
  2045. if (params_changed) {
  2046. mixer->params_changed++;
  2047. for (i = 0; i < MDSS_MDP_MAX_STAGE; i++) {
  2048. if (i == pipe->mixer_stage)
  2049. mixer->stage_pipe[i] = pipe;
  2050. else if (mixer->stage_pipe[i] == pipe)
  2051. mixer->stage_pipe[i] = NULL;
  2052. }
  2053. }
  2054. if (pipe->type == MDSS_MDP_PIPE_TYPE_DMA)
  2055. ctl->flush_bits |= BIT(pipe->num) << 5;
  2056. else if (pipe->num == MDSS_MDP_SSPP_VIG3 ||
  2057. pipe->num == MDSS_MDP_SSPP_RGB3)
  2058. ctl->flush_bits |= BIT(pipe->num) << 10;
  2059. else /* RGB/VIG 0-2 pipes */
  2060. ctl->flush_bits |= BIT(pipe->num);
  2061. mutex_unlock(&ctl->lock);
  2062. return 0;
  2063. }
  2064. /**
  2065. * mdss_mdp_mixer_unstage_all() - Unstage all pipes from mixer
  2066. * @mixer: Mixer from which to unstage all pipes
  2067. *
  2068. * Unstage any pipes that are currently attached to mixer.
  2069. *
  2070. * NOTE: this will not update the pipe structure, and thus a full
  2071. * deinitialization or reconfiguration of all pipes is expected after this call.
  2072. */
  2073. void mdss_mdp_mixer_unstage_all(struct mdss_mdp_mixer *mixer)
  2074. {
  2075. struct mdss_mdp_pipe *tmp;
  2076. int i;
  2077. if (!mixer)
  2078. return;
  2079. for (i = 0; i < MDSS_MDP_MAX_STAGE; i++) {
  2080. tmp = mixer->stage_pipe[i];
  2081. if (tmp) {
  2082. mixer->stage_pipe[i] = NULL;
  2083. mixer->params_changed++;
  2084. tmp->params_changed++;
  2085. }
  2086. }
  2087. }
  2088. int mdss_mdp_mixer_pipe_unstage(struct mdss_mdp_pipe *pipe)
  2089. {
  2090. struct mdss_mdp_ctl *ctl;
  2091. struct mdss_mdp_mixer *mixer;
  2092. if (!pipe)
  2093. return -EINVAL;
  2094. mixer = pipe->mixer;
  2095. if (!mixer)
  2096. return -EINVAL;
  2097. ctl = mixer->ctl;
  2098. if (!ctl)
  2099. return -EINVAL;
  2100. pr_debug("unstage pnum=%d stage=%d mixer=%d\n", pipe->num,
  2101. pipe->mixer_stage, mixer->num);
  2102. if (mutex_lock_interruptible(&ctl->lock))
  2103. return -EINTR;
  2104. if (pipe == mixer->stage_pipe[pipe->mixer_stage]) {
  2105. mixer->params_changed++;
  2106. mixer->stage_pipe[pipe->mixer_stage] = NULL;
  2107. }
  2108. mutex_unlock(&ctl->lock);
  2109. return 0;
  2110. }
  2111. static int mdss_mdp_mixer_update(struct mdss_mdp_mixer *mixer)
  2112. {
  2113. u32 off = 0;
  2114. if (!mixer)
  2115. return -EINVAL;
  2116. mixer->params_changed = 0;
  2117. /* skip mixer setup for rotator */
  2118. if (!mixer->rotator_mode) {
  2119. mdss_mdp_mixer_setup(mixer->ctl, mixer);
  2120. } else {
  2121. off = __mdss_mdp_ctl_get_mixer_off(mixer);
  2122. mdss_mdp_ctl_write(mixer->ctl, off, 0);
  2123. }
  2124. return 0;
  2125. }
  2126. int mdss_mdp_ctl_update_fps(struct mdss_mdp_ctl *ctl, int fps)
  2127. {
  2128. int ret = 0;
  2129. struct mdss_mdp_ctl *sctl = NULL;
  2130. sctl = mdss_mdp_get_split_ctl(ctl);
  2131. if (ctl->config_fps_fnc)
  2132. ret = ctl->config_fps_fnc(ctl, sctl, fps);
  2133. return ret;
  2134. }
  2135. int mdss_mdp_display_wakeup_time(struct mdss_mdp_ctl *ctl,
  2136. ktime_t *wakeup_time)
  2137. {
  2138. struct mdss_panel_info *pinfo;
  2139. u32 clk_rate, clk_period;
  2140. u32 current_line, total_line;
  2141. u32 time_of_line, time_to_vsync;
  2142. ktime_t current_time = ktime_get();
  2143. if (!ctl) {
  2144. pr_err("%s : invalid ctl\n", __func__);
  2145. return -ENODEV;
  2146. }
  2147. if (!ctl->read_line_cnt_fnc)
  2148. return -ENOSYS;
  2149. pinfo = &ctl->panel_data->panel_info;
  2150. if (!pinfo)
  2151. return -ENODEV;
  2152. clk_rate = mdss_mdp_get_pclk_rate(ctl);
  2153. clk_rate /= 1000; /* in kHz */
  2154. if (!clk_rate)
  2155. return -EINVAL;
  2156. /*
  2157. * calculate clk_period as pico second to maintain good
  2158. * accuracy with high pclk rate and this number is in 17 bit
  2159. * range.
  2160. */
  2161. clk_period = 1000000000 / clk_rate;
  2162. if (!clk_period)
  2163. return -EINVAL;
  2164. time_of_line = (pinfo->lcdc.h_back_porch +
  2165. pinfo->lcdc.h_front_porch +
  2166. pinfo->lcdc.h_pulse_width +
  2167. pinfo->xres) * clk_period;
  2168. time_of_line /= 1000; /* in nano second */
  2169. if (!time_of_line)
  2170. return -EINVAL;
  2171. current_line = ctl->read_line_cnt_fnc(ctl);
  2172. total_line = pinfo->lcdc.v_back_porch +
  2173. pinfo->lcdc.v_front_porch +
  2174. pinfo->lcdc.v_pulse_width +
  2175. pinfo->yres;
  2176. if (current_line > total_line)
  2177. return -EINVAL;
  2178. time_to_vsync = time_of_line * (total_line - current_line);
  2179. if (!time_to_vsync)
  2180. return -EINVAL;
  2181. *wakeup_time = ktime_add_ns(current_time, time_to_vsync);
  2182. pr_debug("clk_rate=%dkHz clk_period=%d cur_line=%d tot_line=%d\n",
  2183. clk_rate, clk_period, current_line, total_line);
  2184. pr_debug("time_to_vsync=%d current_time=%d wakeup_time=%d\n",
  2185. time_to_vsync, (int)ktime_to_ms(current_time),
  2186. (int)ktime_to_ms(*wakeup_time));
  2187. return 0;
  2188. }
  2189. int mdss_mdp_display_wait4comp(struct mdss_mdp_ctl *ctl)
  2190. {
  2191. int ret;
  2192. if (!ctl) {
  2193. pr_err("invalid ctl\n");
  2194. return -ENODEV;
  2195. }
  2196. ret = mutex_lock_interruptible(&ctl->lock);
  2197. if (ret)
  2198. return ret;
  2199. if (!ctl->power_on) {
  2200. mutex_unlock(&ctl->lock);
  2201. return 0;
  2202. }
  2203. ATRACE_BEGIN("wait_fnc");
  2204. if (ctl->wait_fnc)
  2205. ret = ctl->wait_fnc(ctl, NULL);
  2206. ATRACE_END("wait_fnc");
  2207. trace_mdp_commit(ctl);
  2208. mdss_mdp_ctl_perf_update(ctl, 0);
  2209. mutex_unlock(&ctl->lock);
  2210. return ret;
  2211. }
  2212. int mdss_mdp_display_wait4pingpong(struct mdss_mdp_ctl *ctl)
  2213. {
  2214. int ret;
  2215. ret = mutex_lock_interruptible(&ctl->lock);
  2216. if (ret)
  2217. return ret;
  2218. if (!ctl->power_on) {
  2219. mutex_unlock(&ctl->lock);
  2220. return 0;
  2221. }
  2222. if (ctl->wait_pingpong)
  2223. ret = ctl->wait_pingpong(ctl, NULL);
  2224. mutex_unlock(&ctl->lock);
  2225. return ret;
  2226. }
  2227. #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  2228. struct mdss_mdp_ctl *commit_ctl;
  2229. #endif
  2230. int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg)
  2231. {
  2232. struct mdss_mdp_ctl *sctl = NULL;
  2233. int mixer1_changed, mixer2_changed;
  2234. int ret = 0;
  2235. bool is_bw_released;
  2236. #if defined(CONFIG_FB_MSM_CAMERA_CSC)
  2237. struct mdss_overlay_private *mdp5_data = NULL;
  2238. #endif
  2239. if (!ctl) {
  2240. pr_err("display function not set\n");
  2241. return -ENODEV;
  2242. }
  2243. #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  2244. commit_ctl = ctl;
  2245. #endif
  2246. ATRACE_BEGIN(__func__);
  2247. mutex_lock(&ctl->lock);
  2248. pr_debug("commit ctl=%d play_cnt=%d\n", ctl->num, ctl->play_cnt);
  2249. if (!ctl->power_on) {
  2250. mutex_unlock(&ctl->lock);
  2251. return 0;
  2252. }
  2253. sctl = mdss_mdp_get_split_ctl(ctl);
  2254. mixer1_changed = (ctl->mixer_left && ctl->mixer_left->params_changed);
  2255. mixer2_changed = (ctl->mixer_right && ctl->mixer_right->params_changed);
  2256. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  2257. /*
  2258. * We could have released the bandwidth if there were no transactions
  2259. * pending, so we want to re-calculate the bandwidth in this situation
  2260. */
  2261. is_bw_released = !mdss_mdp_ctl_perf_get_transaction_status(ctl);
  2262. mdss_mdp_ctl_perf_set_transaction_status(ctl, PERF_SW_COMMIT_STATE,
  2263. PERF_STATUS_BUSY);
  2264. if (is_bw_released || mixer1_changed || mixer2_changed
  2265. || ctl->force_screen_state) {
  2266. ATRACE_BEGIN("prepare_fnc");
  2267. if (ctl->prepare_fnc)
  2268. ret = ctl->prepare_fnc(ctl, arg);
  2269. ATRACE_END("prepare_fnc");
  2270. if (ret) {
  2271. pr_err("error preparing display\n");
  2272. goto done;
  2273. }
  2274. ATRACE_BEGIN("mixer_programming");
  2275. mdss_mdp_ctl_perf_update(ctl, 1);
  2276. if (mixer1_changed)
  2277. mdss_mdp_mixer_update(ctl->mixer_left);
  2278. if (mixer2_changed)
  2279. mdss_mdp_mixer_update(ctl->mixer_right);
  2280. mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, ctl->opmode);
  2281. ctl->flush_bits |= BIT(17); /* CTL */
  2282. if (sctl) {
  2283. mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_TOP,
  2284. sctl->opmode);
  2285. sctl->flush_bits |= BIT(17);
  2286. }
  2287. ATRACE_END("mixer_programming");
  2288. }
  2289. ATRACE_BEGIN("frame_ready");
  2290. if (!ctl->shared_lock)
  2291. mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_READY);
  2292. ATRACE_END("frame_ready");
  2293. ATRACE_BEGIN("wait_pingpong");
  2294. if (ctl->wait_pingpong)
  2295. ctl->wait_pingpong(ctl, NULL);
  2296. ATRACE_END("wait_pingpong");
  2297. ctl->roi_bkup.w = ctl->roi.w;
  2298. ctl->roi_bkup.h = ctl->roi.h;
  2299. ATRACE_BEGIN("postproc_programming");
  2300. if (ctl->mfd && ctl->mfd->dcm_state != DTM_ENTER)
  2301. /* postprocessing setup, including dspp */
  2302. mdss_mdp_pp_setup_locked(ctl);
  2303. ATRACE_END("postproc_programming");
  2304. ATRACE_BEGIN("flush_kickoff");
  2305. mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, ctl->flush_bits);
  2306. if (sctl) {
  2307. mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_FLUSH,
  2308. sctl->flush_bits);
  2309. }
  2310. wmb();
  2311. ctl->flush_bits = 0;
  2312. #if defined(CONFIG_FB_MSM_CAMERA_CSC)
  2313. if(ctl->mfd)
  2314. mdp5_data = mfd_to_mdp5_data(ctl->mfd);
  2315. if (mdp5_data) {
  2316. mutex_lock(&mdp5_data->list_lock);
  2317. if (csc_change == 1) {
  2318. struct mdss_mdp_pipe *pipe, *next;
  2319. if (ctl->wait_video_pingpong) {
  2320. mdss_mdp_irq_enable(MDSS_MDP_IRQ_PING_PONG_COMP, ctl->num);
  2321. ctl->wait_video_pingpong(ctl, NULL);
  2322. }
  2323. list_for_each_entry_safe(pipe, next, &mdp5_data->pipes_used, list) {
  2324. if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
  2325. pr_info(" mdss_mdp_csc_setup start\n");
  2326. mdss_mdp_csc_setup(MDSS_MDP_BLOCK_SSPP, pipe->num, 1,
  2327. MDSS_MDP_CSC_YUV2RGB);
  2328. csc_change = 0;
  2329. }
  2330. }
  2331. }
  2332. mutex_unlock(&mdp5_data->list_lock);
  2333. }
  2334. #endif
  2335. mdss_mdp_xlog_mixer_reg(ctl);
  2336. if (ctl->display_fnc)
  2337. ret = ctl->display_fnc(ctl, arg); /* kickoff */
  2338. if (ret)
  2339. pr_warn("error displaying frame\n");
  2340. ctl->play_cnt++;
  2341. ATRACE_END("flush_kickoff");
  2342. done:
  2343. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  2344. mutex_unlock(&ctl->lock);
  2345. ATRACE_END(__func__);
  2346. return ret;
  2347. }
  2348. void mdss_mdp_ctl_notifier_register(struct mdss_mdp_ctl *ctl,
  2349. struct notifier_block *notifier)
  2350. {
  2351. blocking_notifier_chain_register(&ctl->notifier_head, notifier);
  2352. }
  2353. void mdss_mdp_ctl_notifier_unregister(struct mdss_mdp_ctl *ctl,
  2354. struct notifier_block *notifier)
  2355. {
  2356. blocking_notifier_chain_unregister(&ctl->notifier_head, notifier);
  2357. }
  2358. int mdss_mdp_ctl_notify(struct mdss_mdp_ctl *ctl, int event)
  2359. {
  2360. return blocking_notifier_call_chain(&ctl->notifier_head, event, ctl);
  2361. }
  2362. int mdss_mdp_get_ctl_mixers(u32 fb_num, u32 *mixer_id)
  2363. {
  2364. int i;
  2365. struct mdss_mdp_ctl *ctl;
  2366. struct mdss_data_type *mdata;
  2367. u32 mixer_cnt = 0;
  2368. mutex_lock(&mdss_mdp_ctl_lock);
  2369. mdata = mdss_mdp_get_mdata();
  2370. for (i = 0; i < mdata->nctl; i++) {
  2371. ctl = mdata->ctl_off + i;
  2372. if ((ctl->power_on) && (ctl->mfd) &&
  2373. (ctl->mfd->index == fb_num)) {
  2374. if (ctl->mixer_left) {
  2375. mixer_id[mixer_cnt] = ctl->mixer_left->num;
  2376. mixer_cnt++;
  2377. }
  2378. if (mixer_cnt && ctl->mixer_right) {
  2379. mixer_id[mixer_cnt] = ctl->mixer_right->num;
  2380. mixer_cnt++;
  2381. }
  2382. if (mixer_cnt)
  2383. break;
  2384. }
  2385. }
  2386. mutex_unlock(&mdss_mdp_ctl_lock);
  2387. return mixer_cnt;
  2388. }
  2389. /**
  2390. * @mdss_mdp_ctl_mixer_switch() - return ctl mixer of @return_type
  2391. * @ctl: Pointer to ctl structure to be switched.
  2392. * @return_type: wb_type of the ctl to be switched to.
  2393. *
  2394. * Virtual mixer switch should be performed only when there is no
  2395. * dedicated wfd block and writeback block is shared.
  2396. */
  2397. struct mdss_mdp_ctl *mdss_mdp_ctl_mixer_switch(struct mdss_mdp_ctl *ctl,
  2398. u32 return_type)
  2399. {
  2400. int i;
  2401. struct mdss_data_type *mdata = ctl->mdata;
  2402. if (ctl->wb_type == return_type) {
  2403. mdata->mixer_switched = false;
  2404. return ctl;
  2405. }
  2406. for (i = 0; i <= mdata->nctl; i++) {
  2407. if (mdata->ctl_off[i].wb_type == return_type) {
  2408. pr_debug("switching mixer from ctl=%d to ctl=%d\n",
  2409. ctl->num, mdata->ctl_off[i].num);
  2410. mdata->mixer_switched = true;
  2411. return mdata->ctl_off + i;
  2412. }
  2413. }
  2414. pr_err("unable to switch mixer to type=%d\n", return_type);
  2415. return NULL;
  2416. }
  2417. static inline int __mdss_mdp_ctl_get_mixer_off(struct mdss_mdp_mixer *mixer)
  2418. {
  2419. if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
  2420. if (mixer->num == MDSS_MDP_INTF_LAYERMIXER3)
  2421. return MDSS_MDP_CTL_X_LAYER_5;
  2422. else
  2423. return MDSS_MDP_REG_CTL_LAYER(mixer->num);
  2424. } else {
  2425. return MDSS_MDP_REG_CTL_LAYER(mixer->num +
  2426. MDSS_MDP_INTF_LAYERMIXER3);
  2427. }
  2428. }
  2429. u32 mdss_mdp_get_mixercfg(struct mdss_mdp_mixer *mixer)
  2430. {
  2431. if (!mixer && !mixer->ctl)
  2432. return 0;
  2433. return mdss_mdp_ctl_read(mixer->ctl,
  2434. __mdss_mdp_ctl_get_mixer_off(mixer));
  2435. }
  2436. static int __mdss_mdp_mixer_handoff_helper(struct mdss_mdp_mixer *mixer,
  2437. struct mdss_mdp_pipe *pipe)
  2438. {
  2439. int rc = 0;
  2440. if (!mixer) {
  2441. rc = -EINVAL;
  2442. goto error;
  2443. }
  2444. if (mixer->stage_pipe[MDSS_MDP_STAGE_UNUSED] != NULL) {
  2445. pr_err("More than one pipe staged on mixer num %d\n",
  2446. mixer->num);
  2447. rc = -EINVAL;
  2448. goto error;
  2449. }
  2450. pr_debug("Staging pipe num %d on mixer num %d\n",
  2451. pipe->num, mixer->num);
  2452. mixer->stage_pipe[MDSS_MDP_STAGE_UNUSED] = pipe;
  2453. pipe->mixer = mixer;
  2454. pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED;
  2455. error:
  2456. return rc;
  2457. }
  2458. /**
  2459. * mdss_mdp_mixer_handoff() - Stages a given pipe on the appropriate mixer
  2460. * @ctl: pointer to the control structure associated with the overlay device.
  2461. * @num: the mixer number on which the pipe needs to be staged.
  2462. * @pipe: pointer to the pipe to be staged.
  2463. *
  2464. * Function stages a given pipe on either the left mixer or the right mixer
  2465. * for the control structre based on the mixer number. If the input mixer
  2466. * number does not match either of the mixers then an error is returned.
  2467. * This function is called during overlay handoff when certain pipes are
  2468. * already staged by the bootloader.
  2469. */
  2470. int mdss_mdp_mixer_handoff(struct mdss_mdp_ctl *ctl, u32 num,
  2471. struct mdss_mdp_pipe *pipe)
  2472. {
  2473. int rc = 0;
  2474. struct mdss_mdp_mixer *mx_left = ctl->mixer_left;
  2475. struct mdss_mdp_mixer *mx_right = ctl->mixer_right;
  2476. /*
  2477. * For performance calculations, stage the handed off pipe
  2478. * as MDSS_MDP_STAGE_UNUSED
  2479. */
  2480. if (mx_left && (mx_left->num == num)) {
  2481. rc = __mdss_mdp_mixer_handoff_helper(mx_left, pipe);
  2482. } else if (mx_right && (mx_right->num == num)) {
  2483. rc = __mdss_mdp_mixer_handoff_helper(mx_right, pipe);
  2484. } else {
  2485. pr_err("pipe num %d staged on unallocated mixer num %d\n",
  2486. pipe->num, num);
  2487. rc = -EINVAL;
  2488. }
  2489. return rc;
  2490. }
  2491. static void mdss_mdp_xlog_mixer_reg(struct mdss_mdp_ctl *ctl)
  2492. {
  2493. int i, off;
  2494. u32 data[MDSS_MDP_INTF_MAX_LAYERMIXER];
  2495. for (i = 0; i < MDSS_MDP_INTF_MAX_LAYERMIXER; i++) {
  2496. off = MDSS_MDP_REG_CTL_LAYER(i);
  2497. data[i] = mdss_mdp_ctl_read(ctl, off);
  2498. }
  2499. MDSS_XLOG(data[MDSS_MDP_INTF_LAYERMIXER0],
  2500. data[MDSS_MDP_INTF_LAYERMIXER1],
  2501. data[MDSS_MDP_INTF_LAYERMIXER2],
  2502. data[MDSS_MDP_INTF_LAYERMIXER3], off);
  2503. }
  2504. #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  2505. void mdss_mdp_mixer_read(void)
  2506. {
  2507. int i, off;
  2508. u32 data[4];
  2509. for (i=0; i < 4; i++) {
  2510. off = MDSS_MDP_REG_CTL_LAYER(i);
  2511. data[i] = mdss_mdp_ctl_read(commit_ctl, off);
  2512. }
  2513. xlog(__func__, data[0], data[1], data[2], data[3], off, 0);
  2514. }
  2515. #endif