mdp.c 79 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279
  1. /* drivers/video/msm_fb/mdp.c
  2. *
  3. * MSM MDP Interface (used by framebuffer core)
  4. *
  5. * Copyright (c) 2007-2013, 2016 The Linux Foundation. All rights reserved.
  6. * Copyright (C) 2007 Google Incorporated
  7. *
  8. * This software is licensed under the terms of the GNU General Public
  9. * License version 2, as published by the Free Software Foundation, and
  10. * may be copied, distributed, and modified under those terms.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. */
  17. #include <linux/module.h>
  18. #include <linux/kernel.h>
  19. #include <linux/sched.h>
  20. #include <linux/time.h>
  21. #include <linux/init.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/hrtimer.h>
  25. #include <linux/clk.h>
  26. #include <mach/hardware.h>
  27. #include <linux/io.h>
  28. #include <linux/debugfs.h>
  29. #include <linux/delay.h>
  30. #include <linux/mutex.h>
  31. #include <linux/pm_runtime.h>
  32. #include <linux/regulator/consumer.h>
  33. #include <asm/system.h>
  34. #include <asm/mach-types.h>
  35. #include <linux/semaphore.h>
  36. #include <linux/uaccess.h>
  37. #include <mach/event_timer.h>
  38. #include <mach/clk.h>
  39. #include "mdp.h"
  40. #include "msm_fb.h"
  41. #ifdef CONFIG_FB_MSM_MDP40
  42. #include "mdp4.h"
  43. #endif
  44. #include "mipi_dsi.h"
  45. uint32 mdp4_extn_disp;
  46. static struct clk *mdp_clk;
  47. static struct clk *mdp_pclk;
  48. static struct clk *mdp_lut_clk;
  49. int mdp_rev;
  50. int mdp_iommu_split_domain;
  51. u32 mdp_max_clk = 200000000;
  52. u64 mdp_max_bw = 2000000000;
  53. static struct platform_device *mdp_init_pdev;
  54. static struct regulator *footswitch, *dsi_pll_vdda, *dsi_pll_vddio;
  55. static unsigned int mdp_footswitch_on;
  56. struct completion mdp_ppp_comp;
  57. struct semaphore mdp_ppp_mutex;
  58. struct semaphore mdp_pipe_ctrl_mutex;
  59. unsigned long mdp_timer_duration = (HZ/20); /* 50 msecond */
  60. boolean mdp_ppp_waiting = FALSE;
  61. uint32 mdp_tv_underflow_cnt;
  62. uint32 mdp_lcdc_underflow_cnt;
  63. boolean mdp_current_clk_on = FALSE;
  64. boolean mdp_is_in_isr = FALSE;
  65. struct vsync vsync_cntrl;
  66. /*
  67. * legacy mdp_in_processing is only for DMA2-MDDI
  68. * this applies to DMA2 block only
  69. */
  70. uint32 mdp_in_processing = FALSE;
  71. #ifdef CONFIG_FB_MSM_MDP40
  72. uint32 mdp_intr_mask = MDP4_ANY_INTR_MASK;
  73. #else
  74. uint32 mdp_intr_mask = MDP_ANY_INTR_MASK;
  75. #endif
  76. MDP_BLOCK_TYPE mdp_debug[MDP_MAX_BLOCK];
  77. atomic_t mdp_block_power_cnt[MDP_MAX_BLOCK];
  78. spinlock_t mdp_spin_lock;
  79. struct workqueue_struct *mdp_dma_wq; /*mdp dma wq */
  80. struct workqueue_struct *mdp_vsync_wq; /*mdp vsync wq */
  81. struct workqueue_struct *mdp_hist_wq; /*mdp histogram wq */
  82. bool mdp_pp_initialized = FALSE;
  83. static struct workqueue_struct *mdp_pipe_ctrl_wq; /* mdp mdp pipe ctrl wq */
  84. static struct delayed_work mdp_pipe_ctrl_worker;
  85. static boolean mdp_suspended = FALSE;
  86. ulong mdp4_display_intf;
  87. DEFINE_MUTEX(mdp_suspend_mutex);
  88. #ifdef CONFIG_FB_MSM_MDP40
  89. struct mdp_dma_data dma2_data;
  90. struct mdp_dma_data dma_s_data;
  91. struct mdp_dma_data dma_e_data;
  92. #else
  93. static struct mdp_dma_data dma2_data;
  94. static struct mdp_dma_data dma_s_data;
  95. #ifndef CONFIG_FB_MSM_MDP303
  96. static struct mdp_dma_data dma_e_data;
  97. #endif
  98. #endif
  99. #ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
  100. struct mdp_dma_data dma_wb_data;
  101. #endif
  102. static struct mdp_dma_data dma3_data;
  103. extern ktime_t mdp_dma2_last_update_time;
  104. extern uint32 mdp_dma2_update_time_in_usec;
  105. extern int mdp_lcd_rd_cnt_offset_slow;
  106. extern int mdp_lcd_rd_cnt_offset_fast;
  107. extern int mdp_usec_diff_threshold;
  108. extern int first_pixel_start_x;
  109. extern int first_pixel_start_y;
  110. #ifdef MSM_FB_ENABLE_DBGFS
  111. struct dentry *mdp_dir;
  112. #endif
  113. #if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
  114. static int mdp_suspend(struct platform_device *pdev, pm_message_t state);
  115. #else
  116. #define mdp_suspend NULL
  117. #endif
  118. struct timeval mdp_dma2_timeval;
  119. struct timeval mdp_ppp_timeval;
  120. #ifdef CONFIG_HAS_EARLYSUSPEND
  121. static struct early_suspend early_suspend;
  122. #endif
  123. static u32 mdp_irq;
  124. static uint32 mdp_prim_panel_type = NO_PANEL;
  125. #ifndef CONFIG_FB_MSM_MDP22
  126. struct list_head mdp_hist_lut_list;
  127. DEFINE_MUTEX(mdp_hist_lut_list_mutex);
  128. uint32_t mdp_block2base(uint32_t block)
  129. {
  130. uint32_t base = 0x0;
  131. switch (block) {
  132. case MDP_BLOCK_DMA_P:
  133. base = 0x90000;
  134. break;
  135. case MDP_BLOCK_DMA_S:
  136. base = 0xA0000;
  137. break;
  138. case MDP_BLOCK_VG_1:
  139. base = 0x20000;
  140. break;
  141. case MDP_BLOCK_VG_2:
  142. base = 0x30000;
  143. break;
  144. case MDP_BLOCK_RGB_1:
  145. base = 0x40000;
  146. break;
  147. case MDP_BLOCK_RGB_2:
  148. base = 0x50000;
  149. break;
  150. case MDP_BLOCK_OVERLAY_0:
  151. base = 0x10000;
  152. break;
  153. case MDP_BLOCK_OVERLAY_1:
  154. base = 0x18000;
  155. break;
  156. case MDP_BLOCK_OVERLAY_2:
  157. base = (mdp_rev >= MDP_REV_43) ? 0x88000 : 0;
  158. break;
  159. default:
  160. break;
  161. }
  162. return base;
  163. }
  164. static uint32_t mdp_pp_block2hist_lut(uint32_t block)
  165. {
  166. uint32_t valid = 0;
  167. switch (block) {
  168. case MDP_BLOCK_DMA_P:
  169. valid = (mdp_rev >= MDP_REV_40) ? 1 : 0;
  170. break;
  171. case MDP_BLOCK_DMA_S:
  172. valid = (mdp_rev >= MDP_REV_40) ? 1 : 0;
  173. break;
  174. case MDP_BLOCK_VG_1:
  175. valid = (mdp_rev >= MDP_REV_40) ? 1 : 0;
  176. break;
  177. case MDP_BLOCK_VG_2:
  178. valid = (mdp_rev >= MDP_REV_40) ? 1 : 0;
  179. break;
  180. default:
  181. break;
  182. }
  183. return valid;
  184. }
  185. static void mdp_hist_lut_init_mgmt(struct mdp_hist_lut_mgmt *mgmt,
  186. uint32_t block)
  187. {
  188. mutex_init(&mgmt->lock);
  189. mgmt->block = block;
  190. mutex_lock(&mdp_hist_lut_list_mutex);
  191. list_add(&mgmt->list, &mdp_hist_lut_list);
  192. mutex_unlock(&mdp_hist_lut_list_mutex);
  193. }
  194. static int mdp_hist_lut_destroy(void)
  195. {
  196. struct mdp_hist_lut_mgmt *temp;
  197. struct list_head *pos, *q;
  198. mutex_lock(&mdp_hist_lut_list_mutex);
  199. list_for_each_safe(pos, q, &mdp_hist_lut_list) {
  200. temp = list_entry(pos, struct mdp_hist_lut_mgmt, list);
  201. list_del(pos);
  202. kfree(temp);
  203. }
  204. mutex_unlock(&mdp_hist_lut_list_mutex);
  205. return 0;
  206. }
  207. static int mdp_hist_lut_init(void)
  208. {
  209. struct mdp_hist_lut_mgmt *temp;
  210. if (mdp_pp_initialized)
  211. return -EEXIST;
  212. INIT_LIST_HEAD(&mdp_hist_lut_list);
  213. if (mdp_rev >= MDP_REV_30) {
  214. temp = kmalloc(sizeof(struct mdp_hist_lut_mgmt), GFP_KERNEL);
  215. if (!temp)
  216. goto exit;
  217. mdp_hist_lut_init_mgmt(temp, MDP_BLOCK_DMA_P);
  218. }
  219. if (mdp_rev >= MDP_REV_40) {
  220. temp = kmalloc(sizeof(struct mdp_hist_lut_mgmt), GFP_KERNEL);
  221. if (!temp)
  222. goto exit_list;
  223. mdp_hist_lut_init_mgmt(temp, MDP_BLOCK_VG_1);
  224. temp = kmalloc(sizeof(struct mdp_hist_lut_mgmt), GFP_KERNEL);
  225. if (!temp)
  226. goto exit_list;
  227. mdp_hist_lut_init_mgmt(temp, MDP_BLOCK_VG_2);
  228. }
  229. if (mdp_rev > MDP_REV_42) {
  230. temp = kmalloc(sizeof(struct mdp_hist_lut_mgmt), GFP_KERNEL);
  231. if (!temp)
  232. goto exit_list;
  233. mdp_hist_lut_init_mgmt(temp, MDP_BLOCK_DMA_S);
  234. }
  235. return 0;
  236. exit_list:
  237. mdp_hist_lut_destroy();
  238. exit:
  239. pr_err("Failed initializing histogram LUT memory\n");
  240. return -ENOMEM;
  241. }
  242. static int mdp_hist_lut_block2mgmt(uint32_t block,
  243. struct mdp_hist_lut_mgmt **mgmt)
  244. {
  245. struct mdp_hist_lut_mgmt *temp, *output;
  246. int ret = 0;
  247. output = NULL;
  248. mutex_lock(&mdp_hist_lut_list_mutex);
  249. list_for_each_entry(temp, &mdp_hist_lut_list, list) {
  250. if (temp->block == block)
  251. output = temp;
  252. }
  253. mutex_unlock(&mdp_hist_lut_list_mutex);
  254. if (output == NULL)
  255. ret = -EINVAL;
  256. else
  257. *mgmt = output;
  258. return ret;
  259. }
  260. #define MDP_HIST_LUT_SIZE (256)
  261. static int mdp_hist_lut_write_off(struct mdp_hist_lut_data *data,
  262. struct mdp_hist_lut_info *info, uint32_t offset)
  263. {
  264. int i;
  265. uint32_t element[MDP_HIST_LUT_SIZE];
  266. uint32_t base = mdp_block2base(info->block);
  267. uint32_t sel = info->bank_sel;
  268. if (data->len != MDP_HIST_LUT_SIZE) {
  269. pr_err("%s: data->len != %d", __func__, MDP_HIST_LUT_SIZE);
  270. return -EINVAL;
  271. }
  272. if (copy_from_user(&element, data->data,
  273. MDP_HIST_LUT_SIZE * sizeof(uint32_t))) {
  274. pr_err("%s: Error copying histogram data", __func__);
  275. return -ENOMEM;
  276. }
  277. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  278. for (i = 0; i < MDP_HIST_LUT_SIZE; i++)
  279. MDP_OUTP(MDP_BASE + base + offset + (0x400*(sel)) + (4*i),
  280. element[i]);
  281. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
  282. return 0;
  283. }
  284. static int mdp_hist_lut_write(struct mdp_hist_lut_data *data,
  285. struct mdp_hist_lut_info *info)
  286. {
  287. int ret = 0;
  288. if (data->block != info->block) {
  289. ret = -1;
  290. pr_err("%s, data/info mdp_block mismatch! %d != %d\n",
  291. __func__, data->block, info->block);
  292. goto error;
  293. }
  294. switch (data->block) {
  295. case MDP_BLOCK_VG_1:
  296. case MDP_BLOCK_VG_2:
  297. ret = mdp_hist_lut_write_off(data, info, 0x3400);
  298. break;
  299. case MDP_BLOCK_DMA_P:
  300. case MDP_BLOCK_DMA_S:
  301. ret = mdp_hist_lut_write_off(data, info, 0x4800);
  302. break;
  303. default:
  304. ret = -EINVAL;
  305. goto error;
  306. }
  307. error:
  308. return ret;
  309. }
  310. #define MDP_HIST_LUT_VG_EN_MASK (0x20000)
  311. #define MDP_HIST_LUT_VG_EN_SHIFT (17)
  312. #define MDP_HIST_LUT_VG_EN_OFFSET (0x0058)
  313. #define MDP_HIST_LUT_VG_SEL_OFFSET (0x0064)
  314. static void mdp_hist_lut_commit_vg(struct mdp_hist_lut_info *info)
  315. {
  316. uint32_t out_en, temp_en;
  317. uint32_t base = mdp_block2base(info->block);
  318. temp_en = (info->is_enabled) ? (1 << MDP_HIST_LUT_VG_EN_SHIFT) : 0x0;
  319. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  320. out_en = inpdw(MDP_BASE + base + MDP_HIST_LUT_VG_EN_OFFSET) &
  321. ~MDP_HIST_LUT_VG_EN_MASK;
  322. MDP_OUTP(MDP_BASE + base + MDP_HIST_LUT_VG_EN_OFFSET, out_en | temp_en);
  323. if (info->has_sel_update)
  324. MDP_OUTP(MDP_BASE + base + MDP_HIST_LUT_VG_SEL_OFFSET,
  325. info->bank_sel);
  326. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
  327. }
  328. #define MDP_HIST_LUT_DMA_EN_MASK (0x7)
  329. #define MDP_HIST_LUT_DMA_SEL_MASK (0x400)
  330. #define MDP_HIST_LUT_DMA_SEL_SHIFT (10)
  331. #define MDP_HIST_LUT_DMA_P_OFFSET (0x0070)
  332. #define MDP_HIST_LUT_DMA_S_OFFSET (0x0028)
  333. static void mdp_hist_lut_commit_dma(struct mdp_hist_lut_info *info)
  334. {
  335. uint32_t out, temp, mask;
  336. uint32_t base = mdp_block2base(info->block);
  337. uint32_t offset = (info->block == MDP_BLOCK_DMA_P) ?
  338. MDP_HIST_LUT_DMA_P_OFFSET : MDP_HIST_LUT_DMA_S_OFFSET;
  339. mask = MDP_HIST_LUT_DMA_EN_MASK;
  340. temp = (info->is_enabled) ? 0x7 : 0x0;
  341. if (info->has_sel_update) {
  342. mask |= MDP_HIST_LUT_DMA_SEL_MASK;
  343. temp |= ((info->bank_sel & 0x1) << MDP_HIST_LUT_DMA_SEL_SHIFT);
  344. }
  345. out = inpdw(MDP_BASE + base + offset) & ~mask;
  346. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  347. MDP_OUTP(MDP_BASE + base + offset, out | temp);
  348. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
  349. }
  350. static void mdp_hist_lut_commit_info(struct mdp_hist_lut_info *info)
  351. {
  352. switch (info->block) {
  353. case MDP_BLOCK_VG_1:
  354. case MDP_BLOCK_VG_2:
  355. mdp_hist_lut_commit_vg(info);
  356. break;
  357. case MDP_BLOCK_DMA_P:
  358. case MDP_BLOCK_DMA_S:
  359. mdp_hist_lut_commit_dma(info);
  360. break;
  361. default:
  362. goto error;
  363. }
  364. error:
  365. return;
  366. }
  367. static void mdp_hist_lut_update_info(struct mdp_hist_lut_info *info, int ops)
  368. {
  369. info->bank_sel = (ops & 0x8) >> 3;
  370. info->is_enabled = (ops & 0x1) ? TRUE : FALSE;
  371. info->has_sel_update = (ops & 0x10) ? TRUE : FALSE;
  372. }
  373. int mdp_hist_lut_config(struct mdp_hist_lut_data *data)
  374. {
  375. struct mdp_hist_lut_mgmt *mgmt = NULL;
  376. struct mdp_hist_lut_info info;
  377. int ret = 0;
  378. if (!mdp_pp_block2hist_lut(data->block)) {
  379. ret = -ENOTTY;
  380. goto error;
  381. }
  382. ret = mdp_hist_lut_block2mgmt(data->block, &mgmt);
  383. if (ret)
  384. goto error;
  385. mutex_lock(&mgmt->lock);
  386. info.block = mgmt->block;
  387. mdp_hist_lut_update_info(&info, data->ops);
  388. switch ((data->ops & 0x6) >> 1) {
  389. case 0x1:
  390. pr_info("%s: histogram LUT read not supported\n", __func__);
  391. break;
  392. case 0x2:
  393. ret = mdp_hist_lut_write(data, &info);
  394. if (ret)
  395. goto error_lock;
  396. break;
  397. default:
  398. break;
  399. }
  400. mdp_hist_lut_commit_info(&info);
  401. error_lock:
  402. mutex_unlock(&mgmt->lock);
  403. error:
  404. return ret;
  405. }
  406. DEFINE_MUTEX(mdp_lut_push_sem);
  407. static int mdp_lut_i;
  408. static int mdp_lut_hw_update(struct fb_cmap *cmap)
  409. {
  410. int i;
  411. u16 *c[3];
  412. u16 r, g, b;
  413. c[0] = cmap->green;
  414. c[1] = cmap->blue;
  415. c[2] = cmap->red;
  416. if (cmap->start > MDP_HIST_LUT_SIZE || cmap->len > MDP_HIST_LUT_SIZE ||
  417. (cmap->start + cmap->len > MDP_HIST_LUT_SIZE)) {
  418. pr_err("mdp_lut_hw_update invalid arguments\n");
  419. return -EINVAL;
  420. }
  421. for (i = 0; i < cmap->len; i++) {
  422. if (copy_from_user(&r, cmap->red++, sizeof(r)) ||
  423. copy_from_user(&g, cmap->green++, sizeof(g)) ||
  424. copy_from_user(&b, cmap->blue++, sizeof(b)))
  425. return -EFAULT;
  426. #ifdef CONFIG_FB_MSM_MDP40
  427. MDP_OUTP(MDP_BASE + 0x94800 +
  428. #else
  429. MDP_OUTP(MDP_BASE + 0x93800 +
  430. #endif
  431. (0x400*mdp_lut_i) + cmap->start*4 + i*4,
  432. ((g & 0xff) |
  433. ((b & 0xff) << 8) |
  434. ((r & 0xff) << 16)));
  435. }
  436. return 0;
  437. }
  438. static int mdp_lut_push;
  439. static int mdp_lut_push_i;
  440. static int mdp_lut_update_nonlcdc(struct fb_info *info, struct fb_cmap *cmap)
  441. {
  442. int ret;
  443. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  444. mdp_clk_ctrl(1);
  445. ret = mdp_lut_hw_update(cmap);
  446. mdp_clk_ctrl(0);
  447. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
  448. if (ret)
  449. return ret;
  450. mutex_lock(&mdp_lut_push_sem);
  451. mdp_lut_push = 1;
  452. mdp_lut_push_i = mdp_lut_i;
  453. mutex_unlock(&mdp_lut_push_sem);
  454. mdp_lut_i = (mdp_lut_i + 1)%2;
  455. return 0;
  456. }
  457. static int mdp_lut_update_lcdc(struct fb_info *info, struct fb_cmap *cmap)
  458. {
  459. int ret;
  460. uint32_t out;
  461. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  462. mdp_clk_ctrl(1);
  463. ret = mdp_lut_hw_update(cmap);
  464. if (ret) {
  465. mdp_clk_ctrl(0);
  466. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
  467. return ret;
  468. }
  469. /*mask off non LUT select bits*/
  470. out = inpdw(MDP_BASE + 0x90070);
  471. MDP_OUTP(MDP_BASE + 0x90070, (mdp_lut_i << 10) | 0x7 | out);
  472. mdp_clk_ctrl(0);
  473. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
  474. mdp_lut_i = (mdp_lut_i + 1)%2;
  475. return 0;
  476. }
  477. static void mdp_lut_enable(void)
  478. {
  479. uint32_t out;
  480. if (mdp_lut_push) {
  481. mutex_lock(&mdp_lut_push_sem);
  482. mdp_lut_push = 0;
  483. out = inpdw(MDP_BASE + 0x90070) & ~((0x1 << 10) | 0x7);
  484. MDP_OUTP(MDP_BASE + 0x90070,
  485. (mdp_lut_push_i << 10) | 0x7 | out);
  486. mutex_unlock(&mdp_lut_push_sem);
  487. }
  488. }
  489. #define MDP_REV42_HIST_MAX_BIN 128
  490. #define MDP_REV41_HIST_MAX_BIN 32
  491. #define MDP_HIST_DATA32_R_OFF 0x0100
  492. #define MDP_HIST_DATA32_G_OFF 0x0200
  493. #define MDP_HIST_DATA32_B_OFF 0x0300
  494. #define MDP_HIST_DATA128_R_OFF 0x0400
  495. #define MDP_HIST_DATA128_G_OFF 0x0800
  496. #define MDP_HIST_DATA128_B_OFF 0x0C00
  497. #define MDP_HIST_DATA_LUMA_OFF 0x0200
  498. #define MDP_HIST_EXTRA_DATA0_OFF 0x0028
  499. #define MDP_HIST_EXTRA_DATA1_OFF 0x002C
  500. struct mdp_hist_mgmt *mdp_hist_mgmt_array[MDP_HIST_MGMT_MAX];
  501. void __mdp_histogram_kickoff(struct mdp_hist_mgmt *mgmt)
  502. {
  503. char *mdp_hist_base = MDP_BASE + mgmt->base;
  504. if (mgmt->mdp_is_hist_data == TRUE) {
  505. MDP_OUTP(mdp_hist_base + 0x0004, mgmt->frame_cnt);
  506. MDP_OUTP(mdp_hist_base, 1);
  507. }
  508. }
  509. void __mdp_histogram_reset(struct mdp_hist_mgmt *mgmt)
  510. {
  511. char *mdp_hist_base = MDP_BASE + mgmt->base;
  512. MDP_OUTP(mdp_hist_base + 0x000C, 1);
  513. }
  514. static void mdp_hist_read_work(struct work_struct *data);
  515. static int mdp_hist_init_mgmt(struct mdp_hist_mgmt *mgmt, uint32_t block)
  516. {
  517. uint32_t bins, extra, index, intr = 0, term = 0;
  518. init_completion(&mgmt->mdp_hist_comp);
  519. mutex_init(&mgmt->mdp_hist_mutex);
  520. mutex_init(&mgmt->mdp_do_hist_mutex);
  521. mgmt->block = block;
  522. mgmt->base = mdp_block2base(block);
  523. mgmt->mdp_is_hist_start = FALSE;
  524. mgmt->mdp_is_hist_data = FALSE;
  525. mgmt->mdp_is_hist_valid = FALSE;
  526. mgmt->mdp_is_hist_init = FALSE;
  527. mgmt->frame_cnt = 0;
  528. mgmt->bit_mask = 0;
  529. mgmt->num_bins = 0;
  530. switch (block) {
  531. case MDP_BLOCK_DMA_P:
  532. term = MDP_HISTOGRAM_TERM_DMA_P;
  533. intr = (mdp_rev >= MDP_REV_40) ? INTR_DMA_P_HISTOGRAM :
  534. MDP_HIST_DONE;
  535. bins = (mdp_rev >= MDP_REV_42) ? MDP_REV42_HIST_MAX_BIN :
  536. MDP_REV41_HIST_MAX_BIN;
  537. extra = 2;
  538. mgmt->base += (mdp_rev >= MDP_REV_40) ? 0x5000 : 0x4000;
  539. index = MDP_HIST_MGMT_DMA_P;
  540. break;
  541. case MDP_BLOCK_DMA_S:
  542. term = MDP_HISTOGRAM_TERM_DMA_S;
  543. intr = INTR_DMA_S_HISTOGRAM;
  544. bins = MDP_REV42_HIST_MAX_BIN;
  545. extra = 2;
  546. mgmt->base += 0x5000;
  547. index = MDP_HIST_MGMT_DMA_S;
  548. break;
  549. case MDP_BLOCK_VG_1:
  550. term = MDP_HISTOGRAM_TERM_VG_1;
  551. intr = INTR_VG1_HISTOGRAM;
  552. bins = MDP_REV42_HIST_MAX_BIN;
  553. extra = 1;
  554. mgmt->base += 0x6000;
  555. index = MDP_HIST_MGMT_VG_1;
  556. break;
  557. case MDP_BLOCK_VG_2:
  558. term = MDP_HISTOGRAM_TERM_VG_2;
  559. intr = INTR_VG2_HISTOGRAM;
  560. bins = MDP_REV42_HIST_MAX_BIN;
  561. extra = 1;
  562. mgmt->base += 0x6000;
  563. index = MDP_HIST_MGMT_VG_2;
  564. break;
  565. default:
  566. term = MDP_HISTOGRAM_TERM_DMA_P;
  567. intr = (mdp_rev >= MDP_REV_40) ? INTR_DMA_P_HISTOGRAM :
  568. MDP_HIST_DONE;
  569. bins = (mdp_rev >= MDP_REV_42) ? MDP_REV42_HIST_MAX_BIN :
  570. MDP_REV41_HIST_MAX_BIN;
  571. extra = 2;
  572. mgmt->base += (mdp_rev >= MDP_REV_40) ? 0x5000 : 0x4000;
  573. index = MDP_HIST_MGMT_DMA_P;
  574. }
  575. mgmt->irq_term = term;
  576. mgmt->intr = intr;
  577. mgmt->c0 = kmalloc(bins * sizeof(uint32_t), GFP_KERNEL);
  578. if (mgmt->c0 == NULL)
  579. goto error;
  580. mgmt->c1 = kmalloc(bins * sizeof(uint32_t), GFP_KERNEL);
  581. if (mgmt->c1 == NULL)
  582. goto error_1;
  583. mgmt->c2 = kmalloc(bins * sizeof(uint32_t), GFP_KERNEL);
  584. if (mgmt->c2 == NULL)
  585. goto error_2;
  586. mgmt->extra_info = kmalloc(extra * sizeof(uint32_t), GFP_KERNEL);
  587. if (mgmt->extra_info == NULL)
  588. goto error_extra;
  589. INIT_WORK(&mgmt->mdp_histogram_worker, mdp_hist_read_work);
  590. mgmt->hist = NULL;
  591. mdp_hist_mgmt_array[index] = mgmt;
  592. return 0;
  593. error_extra:
  594. kfree(mgmt->c2);
  595. error_2:
  596. kfree(mgmt->c1);
  597. error_1:
  598. kfree(mgmt->c0);
  599. error:
  600. return -ENOMEM;
  601. }
  602. static void mdp_hist_del_mgmt(struct mdp_hist_mgmt *mgmt)
  603. {
  604. kfree(mgmt->extra_info);
  605. kfree(mgmt->c2);
  606. kfree(mgmt->c1);
  607. kfree(mgmt->c0);
  608. }
  609. static int mdp_histogram_destroy(void)
  610. {
  611. struct mdp_hist_mgmt *temp;
  612. int i;
  613. for (i = 0; i < MDP_HIST_MGMT_MAX; i++) {
  614. temp = mdp_hist_mgmt_array[i];
  615. if (!temp)
  616. continue;
  617. mdp_hist_del_mgmt(temp);
  618. kfree(temp);
  619. mdp_hist_mgmt_array[i] = NULL;
  620. }
  621. return 0;
  622. }
  623. static int mdp_histogram_init(void)
  624. {
  625. struct mdp_hist_mgmt *temp;
  626. int i, ret;
  627. if (mdp_pp_initialized)
  628. return -EEXIST;
  629. mdp_hist_wq = alloc_workqueue("mdp_hist_wq",
  630. WQ_NON_REENTRANT | WQ_UNBOUND, 0);
  631. for (i = 0; i < MDP_HIST_MGMT_MAX; i++)
  632. mdp_hist_mgmt_array[i] = NULL;
  633. if (mdp_rev >= MDP_REV_30) {
  634. temp = kmalloc(sizeof(struct mdp_hist_mgmt), GFP_KERNEL);
  635. if (!temp)
  636. goto exit;
  637. ret = mdp_hist_init_mgmt(temp, MDP_BLOCK_DMA_P);
  638. if (ret) {
  639. kfree(temp);
  640. goto exit;
  641. }
  642. }
  643. if (mdp_rev >= MDP_REV_40) {
  644. temp = kmalloc(sizeof(struct mdp_hist_mgmt), GFP_KERNEL);
  645. if (!temp)
  646. goto exit_list;
  647. ret = mdp_hist_init_mgmt(temp, MDP_BLOCK_VG_1);
  648. if (ret)
  649. goto exit_list;
  650. temp = kmalloc(sizeof(struct mdp_hist_mgmt), GFP_KERNEL);
  651. if (!temp)
  652. goto exit_list;
  653. ret = mdp_hist_init_mgmt(temp, MDP_BLOCK_VG_2);
  654. if (ret)
  655. goto exit_list;
  656. }
  657. if (mdp_rev >= MDP_REV_42) {
  658. temp = kmalloc(sizeof(struct mdp_hist_mgmt), GFP_KERNEL);
  659. if (!temp)
  660. goto exit_list;
  661. ret = mdp_hist_init_mgmt(temp, MDP_BLOCK_DMA_S);
  662. if (ret)
  663. goto exit_list;
  664. }
  665. return 0;
  666. exit_list:
  667. mdp_histogram_destroy();
  668. exit:
  669. return -ENOMEM;
  670. }
  671. int mdp_histogram_block2mgmt(uint32_t block, struct mdp_hist_mgmt **mgmt)
  672. {
  673. struct mdp_hist_mgmt *temp, *output;
  674. int i, ret = 0;
  675. output = NULL;
  676. for (i = 0; i < MDP_HIST_MGMT_MAX; i++) {
  677. temp = mdp_hist_mgmt_array[i];
  678. if (!temp)
  679. continue;
  680. if (temp->block == block) {
  681. output = temp;
  682. break;
  683. }
  684. }
  685. if (output == NULL)
  686. ret = -EINVAL;
  687. else
  688. *mgmt = output;
  689. return ret;
  690. }
  691. static int mdp_histogram_enable(struct mdp_hist_mgmt *mgmt)
  692. {
  693. uint32_t base;
  694. unsigned long flag;
  695. if (mgmt->mdp_is_hist_data == TRUE) {
  696. pr_err("%s histogram already started\n", __func__);
  697. return -EINVAL;
  698. }
  699. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  700. base = (uint32_t) (MDP_BASE + mgmt->base);
  701. /*First make sure that device is not collecting histogram*/
  702. mgmt->mdp_is_hist_data = FALSE;
  703. mgmt->mdp_is_hist_valid = FALSE;
  704. mgmt->mdp_is_hist_init = FALSE;
  705. spin_lock_irqsave(&mdp_spin_lock, flag);
  706. outp32(MDP_INTR_CLEAR, mgmt->intr);
  707. mdp_intr_mask &= ~mgmt->intr;
  708. outp32(MDP_INTR_ENABLE, mdp_intr_mask);
  709. MDP_OUTP(base + 0x001C, 0);
  710. MDP_OUTP(base + 0x0018, INTR_HIST_DONE | INTR_HIST_RESET_SEQ_DONE);
  711. MDP_OUTP(base + 0x0024, 0);
  712. spin_unlock_irqrestore(&mdp_spin_lock, flag);
  713. mutex_unlock(&mgmt->mdp_hist_mutex);
  714. cancel_work_sync(&mgmt->mdp_histogram_worker);
  715. mutex_lock(&mgmt->mdp_hist_mutex);
  716. /*Then initialize histogram*/
  717. INIT_COMPLETION(mgmt->mdp_hist_comp);
  718. spin_lock_irqsave(&mdp_spin_lock, flag);
  719. MDP_OUTP(base + 0x0018, INTR_HIST_DONE | INTR_HIST_RESET_SEQ_DONE);
  720. MDP_OUTP(base + 0x0010, 1);
  721. MDP_OUTP(base + 0x001C, INTR_HIST_DONE | INTR_HIST_RESET_SEQ_DONE);
  722. outp32(MDP_INTR_CLEAR, mgmt->intr);
  723. mdp_intr_mask |= mgmt->intr;
  724. outp32(MDP_INTR_ENABLE, mdp_intr_mask);
  725. mdp_enable_irq(mgmt->irq_term);
  726. spin_unlock_irqrestore(&mdp_spin_lock, flag);
  727. MDP_OUTP(base + 0x0004, mgmt->frame_cnt);
  728. if (mgmt->block != MDP_BLOCK_VG_1 && mgmt->block != MDP_BLOCK_VG_2)
  729. MDP_OUTP(base + 0x0008, mgmt->bit_mask);
  730. mgmt->mdp_is_hist_data = TRUE;
  731. mgmt->mdp_is_hist_valid = TRUE;
  732. mgmt->mdp_is_hist_init = FALSE;
  733. __mdp_histogram_reset(mgmt);
  734. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
  735. return 0;
  736. }
  737. static int mdp_histogram_disable(struct mdp_hist_mgmt *mgmt)
  738. {
  739. uint32_t base, status;
  740. unsigned long flag;
  741. if (mgmt->mdp_is_hist_data == FALSE) {
  742. pr_err("%s histogram already stopped\n", __func__);
  743. return -EINVAL;
  744. }
  745. mgmt->mdp_is_hist_data = FALSE;
  746. mgmt->mdp_is_hist_valid = FALSE;
  747. mgmt->mdp_is_hist_init = FALSE;
  748. base = (uint32_t) (MDP_BASE + mgmt->base);
  749. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  750. spin_lock_irqsave(&mdp_spin_lock, flag);
  751. outp32(MDP_INTR_CLEAR, mgmt->intr);
  752. mdp_intr_mask &= ~mgmt->intr;
  753. outp32(MDP_INTR_ENABLE, mdp_intr_mask);
  754. mdp_disable_irq_nosync(mgmt->irq_term);
  755. spin_unlock_irqrestore(&mdp_spin_lock, flag);
  756. if (mdp_rev >= MDP_REV_42)
  757. MDP_OUTP(base + 0x0020, 1);
  758. status = inpdw(base + 0x001C);
  759. status &= ~(INTR_HIST_DONE | INTR_HIST_RESET_SEQ_DONE);
  760. MDP_OUTP(base + 0x001C, status);
  761. MDP_OUTP(base + 0x0018, INTR_HIST_DONE | INTR_HIST_RESET_SEQ_DONE);
  762. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
  763. if (mgmt->hist != NULL) {
  764. mgmt->hist = NULL;
  765. complete(&mgmt->mdp_hist_comp);
  766. }
  767. return 0;
  768. }
  769. /*call when spanning mgmt_array only*/
  770. int _mdp_histogram_ctrl(boolean en, struct mdp_hist_mgmt *mgmt)
  771. {
  772. int ret = 0;
  773. mutex_lock(&mgmt->mdp_hist_mutex);
  774. if (mgmt->mdp_is_hist_start == TRUE) {
  775. if (en)
  776. ret = mdp_histogram_enable(mgmt);
  777. else
  778. ret = mdp_histogram_disable(mgmt);
  779. }
  780. mutex_unlock(&mgmt->mdp_hist_mutex);
  781. if (en == false)
  782. cancel_work_sync(&mgmt->mdp_histogram_worker);
  783. return ret;
  784. }
  785. int mdp_histogram_ctrl(boolean en, uint32_t block)
  786. {
  787. struct mdp_hist_mgmt *mgmt = NULL;
  788. int ret = 0;
  789. ret = mdp_histogram_block2mgmt(block, &mgmt);
  790. if (ret)
  791. goto error;
  792. ret = _mdp_histogram_ctrl(en, mgmt);
  793. error:
  794. return ret;
  795. }
  796. int mdp_histogram_ctrl_all(boolean en)
  797. {
  798. struct mdp_hist_mgmt *temp;
  799. int i, ret = 0, ret_temp = 0;
  800. for (i = 0; i < MDP_HIST_MGMT_MAX; i++) {
  801. temp = mdp_hist_mgmt_array[i];
  802. if (!temp)
  803. continue;
  804. ret_temp = _mdp_histogram_ctrl(en, temp);
  805. if (ret_temp)
  806. ret = ret_temp;
  807. }
  808. return ret;
  809. }
  810. int mdp_histogram_start(struct mdp_histogram_start_req *req)
  811. {
  812. struct mdp_hist_mgmt *mgmt = NULL;
  813. int ret;
  814. ret = mdp_histogram_block2mgmt(req->block, &mgmt);
  815. if (ret) {
  816. ret = -ENOTTY;
  817. goto error;
  818. }
  819. mutex_lock(&mgmt->mdp_do_hist_mutex);
  820. mutex_lock(&mgmt->mdp_hist_mutex);
  821. if (mgmt->mdp_is_hist_start == TRUE) {
  822. pr_err("%s histogram already started\n", __func__);
  823. ret = -EPERM;
  824. goto error_lock;
  825. }
  826. mgmt->block = req->block;
  827. mgmt->frame_cnt = req->frame_cnt;
  828. mgmt->bit_mask = req->bit_mask;
  829. mgmt->num_bins = req->num_bins;
  830. ret = mdp_histogram_enable(mgmt);
  831. mgmt->mdp_is_hist_start = TRUE;
  832. error_lock:
  833. mutex_unlock(&mgmt->mdp_hist_mutex);
  834. mutex_unlock(&mgmt->mdp_do_hist_mutex);
  835. error:
  836. return ret;
  837. }
  838. int mdp_histogram_stop(struct fb_info *info, uint32_t block)
  839. {
  840. struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
  841. struct mdp_hist_mgmt *mgmt = NULL;
  842. int ret;
  843. ret = mdp_histogram_block2mgmt(block, &mgmt);
  844. if (ret) {
  845. ret = -ENOTTY;
  846. goto error;
  847. }
  848. mutex_lock(&mgmt->mdp_do_hist_mutex);
  849. mutex_lock(&mgmt->mdp_hist_mutex);
  850. if (mgmt->mdp_is_hist_start == FALSE) {
  851. pr_err("%s histogram already stopped\n", __func__);
  852. ret = -EPERM;
  853. goto error_lock;
  854. }
  855. mgmt->mdp_is_hist_start = FALSE;
  856. if (!mfd->panel_power_on) {
  857. mgmt->mdp_is_hist_data = FALSE;
  858. if (mgmt->hist != NULL) {
  859. mgmt->hist = NULL;
  860. complete(&mgmt->mdp_hist_comp);
  861. }
  862. ret = -EINVAL;
  863. goto error_lock;
  864. }
  865. ret = mdp_histogram_disable(mgmt);
  866. mutex_unlock(&mgmt->mdp_hist_mutex);
  867. cancel_work_sync(&mgmt->mdp_histogram_worker);
  868. mutex_unlock(&mgmt->mdp_do_hist_mutex);
  869. return ret;
  870. error_lock:
  871. mutex_unlock(&mgmt->mdp_hist_mutex);
  872. mutex_unlock(&mgmt->mdp_do_hist_mutex);
  873. error:
  874. return ret;
  875. }
  876. /*call from within mdp_hist_mutex context*/
  877. static int _mdp_histogram_read_dma_data(struct mdp_hist_mgmt *mgmt)
  878. {
  879. char *mdp_hist_base;
  880. uint32_t r_data_offset, g_data_offset, b_data_offset;
  881. int i, ret = 0;
  882. mdp_hist_base = MDP_BASE + mgmt->base;
  883. r_data_offset = (32 == mgmt->num_bins) ? MDP_HIST_DATA32_R_OFF :
  884. MDP_HIST_DATA128_R_OFF;
  885. g_data_offset = (32 == mgmt->num_bins) ? MDP_HIST_DATA32_G_OFF :
  886. MDP_HIST_DATA128_G_OFF;
  887. b_data_offset = (32 == mgmt->num_bins) ? MDP_HIST_DATA32_B_OFF :
  888. MDP_HIST_DATA128_B_OFF;
  889. if (mgmt->c0 == NULL || mgmt->c1 == NULL || mgmt->c2 == NULL) {
  890. ret = -ENOMEM;
  891. goto hist_err;
  892. }
  893. if (!mgmt->hist) {
  894. pr_err("%s: mgmt->hist not set, mgmt->hist = 0x%08x",
  895. __func__, (uint32_t) mgmt->hist);
  896. return -EINVAL;
  897. }
  898. if (mgmt->hist->bin_cnt != mgmt->num_bins) {
  899. pr_err("%s, bins config = %d, bin requested = %d", __func__,
  900. mgmt->num_bins, mgmt->hist->bin_cnt);
  901. return -EINVAL;
  902. }
  903. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  904. mdp_clk_ctrl(1);
  905. for (i = 0; i < mgmt->num_bins; i++) {
  906. mgmt->c0[i] = inpdw(mdp_hist_base + r_data_offset + (4*i));
  907. mgmt->c1[i] = inpdw(mdp_hist_base + g_data_offset + (4*i));
  908. mgmt->c2[i] = inpdw(mdp_hist_base + b_data_offset + (4*i));
  909. }
  910. if (mdp_rev >= MDP_REV_42) {
  911. if (mgmt->extra_info) {
  912. mgmt->extra_info[0] = inpdw(mdp_hist_base +
  913. MDP_HIST_EXTRA_DATA0_OFF);
  914. mgmt->extra_info[1] = inpdw(mdp_hist_base +
  915. MDP_HIST_EXTRA_DATA0_OFF + 4);
  916. } else
  917. ret = -ENOMEM;
  918. }
  919. mdp_clk_ctrl(0);
  920. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
  921. if (!ret)
  922. return ret;
  923. hist_err:
  924. pr_err("%s: invalid hist buffer\n", __func__);
  925. return ret;
  926. }
  927. /*call from within mdp_hist_mutex context*/
  928. static int _mdp_histogram_read_vg_data(struct mdp_hist_mgmt *mgmt)
  929. {
  930. char *mdp_hist_base;
  931. int i, ret = 0;
  932. mdp_hist_base = MDP_BASE + mgmt->base;
  933. if (mgmt->c0 == NULL) {
  934. ret = -ENOMEM;
  935. goto hist_err;
  936. }
  937. if (!mgmt->hist) {
  938. pr_err("%s: mgmt->hist not set", __func__);
  939. return -EINVAL;
  940. }
  941. if (mgmt->hist->bin_cnt != mgmt->num_bins) {
  942. pr_err("%s, bins config = %d, bin requested = %d", __func__,
  943. mgmt->num_bins, mgmt->hist->bin_cnt);
  944. return -EINVAL;
  945. }
  946. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  947. mdp_clk_ctrl(1);
  948. for (i = 0; i < mgmt->num_bins; i++)
  949. mgmt->c0[i] = inpdw(mdp_hist_base + MDP_HIST_DATA_LUMA_OFF +
  950. (4*i));
  951. if (mdp_rev >= MDP_REV_42) {
  952. if (mgmt->extra_info) {
  953. mgmt->extra_info[0] = inpdw(mdp_hist_base +
  954. MDP_HIST_EXTRA_DATA0_OFF);
  955. } else
  956. ret = -ENOMEM;
  957. }
  958. mdp_clk_ctrl(0);
  959. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
  960. if (!ret)
  961. return ret;
  962. hist_err:
  963. pr_err("%s: invalid hist buffer\n", __func__);
  964. return ret;
  965. }
  966. static void mdp_hist_read_work(struct work_struct *data)
  967. {
  968. struct mdp_hist_mgmt *mgmt = container_of(data, struct mdp_hist_mgmt,
  969. mdp_histogram_worker);
  970. int ret = 0;
  971. bool hist_ready;
  972. mutex_lock(&mgmt->mdp_hist_mutex);
  973. if (mgmt->mdp_is_hist_data == FALSE) {
  974. pr_debug("%s, Histogram disabled before read.\n", __func__);
  975. ret = -EINVAL;
  976. goto error;
  977. }
  978. if (mgmt->hist == NULL) {
  979. if ((mgmt->mdp_is_hist_init == TRUE) &&
  980. ((!completion_done(&mgmt->mdp_hist_comp)) &&
  981. waitqueue_active(&mgmt->mdp_hist_comp.wait)))
  982. pr_err("mgmt->hist invalid NULL\n");
  983. ret = -EINVAL;
  984. }
  985. hist_ready = (mgmt->mdp_is_hist_init && mgmt->mdp_is_hist_valid);
  986. if (!ret && hist_ready) {
  987. switch (mgmt->block) {
  988. case MDP_BLOCK_DMA_P:
  989. case MDP_BLOCK_DMA_S:
  990. ret = _mdp_histogram_read_dma_data(mgmt);
  991. break;
  992. case MDP_BLOCK_VG_1:
  993. case MDP_BLOCK_VG_2:
  994. ret = _mdp_histogram_read_vg_data(mgmt);
  995. break;
  996. default:
  997. pr_err("%s, invalid MDP block = %d\n", __func__,
  998. mgmt->block);
  999. ret = -EINVAL;
  1000. goto error;
  1001. }
  1002. }
  1003. /*
  1004. * if read was triggered by an underrun or failed copying,
  1005. * don't wake up readers
  1006. */
  1007. if (!ret && hist_ready) {
  1008. mgmt->hist = NULL;
  1009. if (waitqueue_active(&mgmt->mdp_hist_comp.wait))
  1010. complete(&mgmt->mdp_hist_comp);
  1011. }
  1012. if (mgmt->mdp_is_hist_valid == FALSE)
  1013. mgmt->mdp_is_hist_valid = TRUE;
  1014. if (mgmt->mdp_is_hist_init == FALSE)
  1015. mgmt->mdp_is_hist_init = TRUE;
  1016. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  1017. if (!ret && hist_ready)
  1018. __mdp_histogram_kickoff(mgmt);
  1019. else
  1020. __mdp_histogram_reset(mgmt);
  1021. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
  1022. error:
  1023. mutex_unlock(&mgmt->mdp_hist_mutex);
  1024. }
  1025. /*call from within mdp_hist_mutex*/
  1026. static int _mdp_copy_hist_data(struct mdp_histogram_data *hist,
  1027. struct mdp_hist_mgmt *mgmt)
  1028. {
  1029. int ret;
  1030. if (hist->c0) {
  1031. ret = copy_to_user(hist->c0, mgmt->c0,
  1032. sizeof(uint32_t) * (hist->bin_cnt));
  1033. if (ret)
  1034. goto err;
  1035. }
  1036. if (hist->c1) {
  1037. ret = copy_to_user(hist->c1, mgmt->c1,
  1038. sizeof(uint32_t) * (hist->bin_cnt));
  1039. if (ret)
  1040. goto err;
  1041. }
  1042. if (hist->c2) {
  1043. ret = copy_to_user(hist->c2, mgmt->c2,
  1044. sizeof(uint32_t) * (hist->bin_cnt));
  1045. if (ret)
  1046. goto err;
  1047. }
  1048. if (hist->extra_info) {
  1049. ret = copy_to_user(hist->extra_info, mgmt->extra_info,
  1050. sizeof(uint32_t) * ((hist->block > MDP_BLOCK_VG_2) ? 2 : 1));
  1051. if (ret)
  1052. goto err;
  1053. }
  1054. err:
  1055. return ret;
  1056. }
  1057. #define MDP_HISTOGRAM_TIMEOUT_MS 84 /*5 Frames*/
  1058. static int mdp_do_histogram(struct fb_info *info,
  1059. struct mdp_histogram_data *hist)
  1060. {
  1061. struct mdp_hist_mgmt *mgmt = NULL;
  1062. int ret = 0;
  1063. unsigned long timeout = (MDP_HISTOGRAM_TIMEOUT_MS * HZ) / 1000;
  1064. ret = mdp_histogram_block2mgmt(hist->block, &mgmt);
  1065. if (ret) {
  1066. pr_info("%s - %d", __func__, __LINE__);
  1067. ret = -EINVAL;
  1068. return ret;
  1069. }
  1070. mutex_lock(&mgmt->mdp_do_hist_mutex);
  1071. if (!mgmt->frame_cnt || (mgmt->num_bins == 0)) {
  1072. pr_info("%s - frame_cnt = %d, num_bins = %d", __func__,
  1073. mgmt->frame_cnt, mgmt->num_bins);
  1074. ret = -EINVAL;
  1075. goto error;
  1076. }
  1077. if ((mdp_rev <= MDP_REV_41 && hist->bin_cnt > MDP_REV41_HIST_MAX_BIN)
  1078. || (mdp_rev == MDP_REV_42 &&
  1079. hist->bin_cnt > MDP_REV42_HIST_MAX_BIN)) {
  1080. pr_info("%s - mdp_rev = %d, num_bins = %d", __func__, mdp_rev,
  1081. hist->bin_cnt);
  1082. ret = -EINVAL;
  1083. goto error;
  1084. }
  1085. mutex_lock(&mgmt->mdp_hist_mutex);
  1086. if (!mgmt->mdp_is_hist_data) {
  1087. pr_info("%s - hist_data = false!", __func__);
  1088. ret = -EINVAL;
  1089. goto error_lock;
  1090. }
  1091. if (!mgmt->mdp_is_hist_start) {
  1092. pr_err("%s histogram not started\n", __func__);
  1093. ret = -EPERM;
  1094. goto error_lock;
  1095. }
  1096. if (mgmt->hist != NULL) {
  1097. pr_err("%s; histogram attempted to be read twice\n", __func__);
  1098. ret = -EPERM;
  1099. goto error_lock;
  1100. }
  1101. INIT_COMPLETION(mgmt->mdp_hist_comp);
  1102. mgmt->hist = hist;
  1103. mutex_unlock(&mgmt->mdp_hist_mutex);
  1104. ret = wait_for_completion_killable_timeout(&mgmt->mdp_hist_comp,
  1105. timeout);
  1106. if (ret <= 0) {
  1107. if (!ret) {
  1108. mgmt->hist = NULL;
  1109. ret = -ETIMEDOUT;
  1110. pr_debug("%s: bin collection timedout", __func__);
  1111. } else {
  1112. mgmt->hist = NULL;
  1113. pr_debug("%s: bin collection interrupted", __func__);
  1114. }
  1115. goto error;
  1116. }
  1117. mutex_lock(&mgmt->mdp_hist_mutex);
  1118. if (mgmt->mdp_is_hist_data && mgmt->mdp_is_hist_init)
  1119. ret = _mdp_copy_hist_data(hist, mgmt);
  1120. else
  1121. ret = -ENODATA;
  1122. error_lock:
  1123. mutex_unlock(&mgmt->mdp_hist_mutex);
  1124. error:
  1125. mutex_unlock(&mgmt->mdp_do_hist_mutex);
  1126. return ret;
  1127. }
  1128. #endif
  1129. #ifdef CONFIG_FB_MSM_MDP303
  1130. /* vsync_isr_handler: Called from isr context*/
  1131. static void vsync_isr_handler(void)
  1132. {
  1133. vsync_cntrl.vsync_time = ktime_get();
  1134. }
  1135. #endif
  1136. ssize_t mdp_dma_show_event(struct device *dev,
  1137. struct device_attribute *attr, char *buf)
  1138. {
  1139. ssize_t ret = 0;
  1140. if (atomic_read(&vsync_cntrl.suspend) > 0 ||
  1141. atomic_read(&vsync_cntrl.vsync_resume) == 0)
  1142. return 0;
  1143. INIT_COMPLETION(vsync_cntrl.vsync_wait);
  1144. wait_for_completion(&vsync_cntrl.vsync_wait);
  1145. ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu",
  1146. ktime_to_ns(vsync_cntrl.vsync_time));
  1147. buf[strlen(buf) + 1] = '\0';
  1148. return ret;
  1149. }
  1150. /* Returns < 0 on error, 0 on timeout, or > 0 on successful wait */
  1151. int mdp_ppp_pipe_wait(void)
  1152. {
  1153. int ret = 1;
  1154. boolean wait;
  1155. unsigned long flag;
  1156. /* wait 5 seconds for the operation to complete before declaring
  1157. the MDP hung */
  1158. spin_lock_irqsave(&mdp_spin_lock, flag);
  1159. wait = mdp_ppp_waiting;
  1160. spin_unlock_irqrestore(&mdp_spin_lock, flag);
  1161. if (wait == TRUE) {
  1162. ret = wait_for_completion_interruptible_timeout(&mdp_ppp_comp,
  1163. 5 * HZ);
  1164. if (!ret)
  1165. printk(KERN_ERR "%s: Timed out waiting for the MDP.\n",
  1166. __func__);
  1167. }
  1168. return ret;
  1169. }
  1170. #define MAX_VSYNC_GAP 4
  1171. #define DEFAULT_FRAME_RATE 60
  1172. u32 mdp_get_panel_framerate(struct msm_fb_data_type *mfd)
  1173. {
  1174. u32 frame_rate = 0, pixel_rate = 0, total_pixel;
  1175. struct msm_panel_info *panel_info = &mfd->panel_info;
  1176. pixel_rate =
  1177. (panel_info->type == MIPI_CMD_PANEL ||
  1178. panel_info->type == MIPI_VIDEO_PANEL) ?
  1179. panel_info->mipi.dsi_pclk_rate :
  1180. panel_info->clk_rate;
  1181. if (!pixel_rate)
  1182. pr_warn("%s pixel rate is zero\n", __func__);
  1183. total_pixel =
  1184. (panel_info->lcdc.h_back_porch +
  1185. panel_info->lcdc.h_front_porch +
  1186. panel_info->lcdc.h_pulse_width +
  1187. panel_info->xres) *
  1188. (panel_info->lcdc.v_back_porch +
  1189. panel_info->lcdc.v_front_porch +
  1190. panel_info->lcdc.v_pulse_width +
  1191. panel_info->yres);
  1192. if (total_pixel)
  1193. frame_rate = pixel_rate / total_pixel;
  1194. else
  1195. pr_warn("%s total pixels are zero\n", __func__);
  1196. if (mfd->dest == DISPLAY_LCD) {
  1197. if (panel_info->type == MDDI_PANEL && panel_info->mddi.is_type1)
  1198. frame_rate = panel_info->lcd.refx100 / (100 * 2);
  1199. else if (panel_info->type != MIPI_CMD_PANEL)
  1200. frame_rate = panel_info->lcd.refx100 / 100;
  1201. }
  1202. if (frame_rate == 0) {
  1203. frame_rate = DEFAULT_FRAME_RATE;
  1204. pr_warn("%s frame rate=%d is default\n", __func__, frame_rate);
  1205. }
  1206. pr_debug("%s frame rate=%d total_pixel=%d, pixel_rate=%d\n", __func__,
  1207. frame_rate, total_pixel, pixel_rate);
  1208. return frame_rate;
  1209. }
  1210. static int mdp_diff_to_next_vsync(ktime_t cur_time,
  1211. ktime_t last_vsync, u32 vsync_period)
  1212. {
  1213. int diff_from_last, diff_to_next;
  1214. /*
  1215. * Get interval beween last vsync and current time
  1216. * Current time = CPU programming MDP for next Vsync
  1217. */
  1218. diff_from_last =
  1219. (ktime_to_us(ktime_sub(cur_time, last_vsync)));
  1220. diff_from_last /= USEC_PER_MSEC;
  1221. /*
  1222. * If the last Vsync occurred too long ago, skip programming
  1223. * the timer
  1224. */
  1225. if (diff_from_last < (vsync_period * MAX_VSYNC_GAP)) {
  1226. if (diff_from_last > vsync_period)
  1227. diff_to_next =
  1228. (diff_from_last - vsync_period) % vsync_period;
  1229. else
  1230. diff_to_next = vsync_period - diff_from_last;
  1231. } else {
  1232. /* mark it out of range */
  1233. diff_to_next = vsync_period + 1;
  1234. }
  1235. return diff_to_next;
  1236. }
  1237. void mdp_update_pm(struct msm_fb_data_type *mfd, ktime_t pre_vsync)
  1238. {
  1239. u32 vsync_period;
  1240. int diff_to_next;
  1241. ktime_t cur_time, wakeup_time;
  1242. if (!mfd->cpu_pm_hdl)
  1243. return;
  1244. vsync_period = mfd->panel_info.frame_interval;
  1245. cur_time = ktime_get();
  1246. diff_to_next = mdp_diff_to_next_vsync(cur_time,
  1247. pre_vsync,
  1248. vsync_period);
  1249. if (diff_to_next > vsync_period)
  1250. return;
  1251. pr_debug("%s cur_time %d, pre_vsync %d, to_next %d\n",
  1252. __func__,
  1253. (int)ktime_to_ms(cur_time),
  1254. (int)ktime_to_ms(pre_vsync),
  1255. diff_to_next);
  1256. wakeup_time = ktime_add_ns(cur_time, diff_to_next * NSEC_PER_MSEC);
  1257. activate_event_timer(mfd->cpu_pm_hdl, wakeup_time);
  1258. }
  1259. static DEFINE_SPINLOCK(mdp_lock);
  1260. static int mdp_irq_mask;
  1261. static int mdp_irq_enabled;
  1262. /*
  1263. * mdp_enable_irq: can not be called from isr
  1264. */
  1265. void mdp_enable_irq(uint32 term)
  1266. {
  1267. unsigned long irq_flags;
  1268. spin_lock_irqsave(&mdp_lock, irq_flags);
  1269. if (mdp_irq_mask & term) {
  1270. printk(KERN_ERR "%s: MDP IRQ term-0x%x is already set, mask=%x irq=%d\n",
  1271. __func__, term, mdp_irq_mask, mdp_irq_enabled);
  1272. } else {
  1273. mdp_irq_mask |= term;
  1274. if (mdp_irq_mask && !mdp_irq_enabled) {
  1275. mdp_irq_enabled = 1;
  1276. enable_irq(mdp_irq);
  1277. }
  1278. }
  1279. spin_unlock_irqrestore(&mdp_lock, irq_flags);
  1280. }
  1281. /*
  1282. * mdp_disable_irq: can not be called from isr
  1283. */
  1284. void mdp_disable_irq(uint32 term)
  1285. {
  1286. unsigned long irq_flags;
  1287. spin_lock_irqsave(&mdp_lock, irq_flags);
  1288. if (!(mdp_irq_mask & term)) {
  1289. printk(KERN_ERR "%s: MDP IRQ term-0x%x is NOT set, mask=%x irq=%d\n",
  1290. __func__, term, mdp_irq_mask, mdp_irq_enabled);
  1291. } else {
  1292. mdp_irq_mask &= ~term;
  1293. if (!mdp_irq_mask && mdp_irq_enabled) {
  1294. mdp_irq_enabled = 0;
  1295. disable_irq(mdp_irq);
  1296. }
  1297. }
  1298. spin_unlock_irqrestore(&mdp_lock, irq_flags);
  1299. }
  1300. void mdp_disable_irq_nosync(uint32 term)
  1301. {
  1302. spin_lock(&mdp_lock);
  1303. if (!(mdp_irq_mask & term)) {
  1304. printk(KERN_ERR "%s: MDP IRQ term-0x%x is NOT set, mask=%x irq=%d\n",
  1305. __func__, term, mdp_irq_mask, mdp_irq_enabled);
  1306. } else {
  1307. mdp_irq_mask &= ~term;
  1308. if (!mdp_irq_mask && mdp_irq_enabled) {
  1309. mdp_irq_enabled = 0;
  1310. disable_irq_nosync(mdp_irq);
  1311. }
  1312. }
  1313. spin_unlock(&mdp_lock);
  1314. }
  1315. void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd)
  1316. {
  1317. unsigned long flag;
  1318. /* complete all the writes before starting */
  1319. wmb();
  1320. /* kick off PPP engine */
  1321. if (term == MDP_PPP_TERM) {
  1322. if (mdp_debug[MDP_PPP_BLOCK])
  1323. jiffies_to_timeval(jiffies, &mdp_ppp_timeval);
  1324. /* let's turn on PPP block */
  1325. mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  1326. mdp_enable_irq(term);
  1327. INIT_COMPLETION(mdp_ppp_comp);
  1328. spin_lock_irqsave(&mdp_spin_lock, flag);
  1329. mdp_ppp_waiting = TRUE;
  1330. spin_unlock_irqrestore(&mdp_spin_lock, flag);
  1331. outpdw(MDP_BASE + 0x30, 0x1000);
  1332. wait_for_completion_killable(&mdp_ppp_comp);
  1333. mdp_disable_irq(term);
  1334. if (mdp_debug[MDP_PPP_BLOCK]) {
  1335. struct timeval now;
  1336. jiffies_to_timeval(jiffies, &now);
  1337. mdp_ppp_timeval.tv_usec =
  1338. now.tv_usec - mdp_ppp_timeval.tv_usec;
  1339. MSM_FB_DEBUG("MDP-PPP: %d\n",
  1340. (int)mdp_ppp_timeval.tv_usec);
  1341. }
  1342. } else if (term == MDP_DMA2_TERM) {
  1343. if (mdp_debug[MDP_DMA2_BLOCK]) {
  1344. MSM_FB_DEBUG("MDP-DMA2: %d\n",
  1345. (int)mdp_dma2_timeval.tv_usec);
  1346. jiffies_to_timeval(jiffies, &mdp_dma2_timeval);
  1347. }
  1348. /* DMA update timestamp */
  1349. mdp_dma2_last_update_time = ktime_get_real();
  1350. /* let's turn on DMA2 block */
  1351. #ifdef CONFIG_FB_MSM_MDP22
  1352. outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x0044, 0x0);/* start DMA */
  1353. #else
  1354. mdp_lut_enable();
  1355. #ifdef CONFIG_FB_MSM_MDP40
  1356. outpdw(MDP_BASE + 0x000c, 0x0); /* start DMA */
  1357. #else
  1358. outpdw(MDP_BASE + 0x0044, 0x0); /* start DMA */
  1359. #ifdef CONFIG_FB_MSM_MDP303
  1360. #ifdef CONFIG_FB_MSM_MIPI_DSI
  1361. mipi_dsi_cmd_mdp_start();
  1362. #endif
  1363. #endif
  1364. #endif
  1365. #endif
  1366. #ifdef CONFIG_FB_MSM_MDP40
  1367. } else if (term == MDP_DMA_S_TERM) {
  1368. mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  1369. outpdw(MDP_BASE + 0x0010, 0x0); /* start DMA */
  1370. } else if (term == MDP_DMA_E_TERM) {
  1371. mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  1372. outpdw(MDP_BASE + 0x0014, 0x0); /* start DMA */
  1373. } else if (term == MDP_OVERLAY0_TERM) {
  1374. mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  1375. outpdw(MDP_BASE + 0x0004, 0);
  1376. } else if (term == MDP_OVERLAY1_TERM) {
  1377. mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  1378. outpdw(MDP_BASE + 0x0008, 0);
  1379. } else if (term == MDP_OVERLAY2_TERM) {
  1380. mdp_pipe_ctrl(MDP_OVERLAY2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  1381. outpdw(MDP_BASE + 0x00D0, 0);
  1382. }
  1383. #else
  1384. } else if (term == MDP_DMA_S_TERM) {
  1385. mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  1386. outpdw(MDP_BASE + 0x0048, 0x0); /* start DMA */
  1387. } else if (term == MDP_DMA_E_TERM) {
  1388. mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  1389. outpdw(MDP_BASE + 0x004C, 0x0);
  1390. }
  1391. #endif
  1392. }
  1393. static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
  1394. static int pdev_list_cnt;
  1395. static void mdp_pipe_ctrl_workqueue_handler(struct work_struct *work)
  1396. {
  1397. mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
  1398. }
  1399. static int mdp_clk_rate;
  1400. #ifdef CONFIG_FB_MSM_NO_MDP_PIPE_CTRL
  1401. /*
  1402. * mdp_clk_disable_unprepare(void) called from thread context
  1403. */
  1404. static void mdp_clk_disable_unprepare(void)
  1405. {
  1406. mb();
  1407. vsync_clk_disable_unprepare();
  1408. if (mdp_clk != NULL)
  1409. clk_disable_unprepare(mdp_clk);
  1410. if (mdp_pclk != NULL)
  1411. clk_disable_unprepare(mdp_pclk);
  1412. if (mdp_lut_clk != NULL)
  1413. clk_disable_unprepare(mdp_lut_clk);
  1414. }
  1415. /*
  1416. * mdp_clk_prepare_enable(void) called from thread context
  1417. */
  1418. static void mdp_clk_prepare_enable(void)
  1419. {
  1420. if (mdp_clk != NULL)
  1421. clk_prepare_enable(mdp_clk);
  1422. if (mdp_pclk != NULL)
  1423. clk_prepare_enable(mdp_pclk);
  1424. if (mdp_lut_clk != NULL)
  1425. clk_prepare_enable(mdp_lut_clk);
  1426. vsync_clk_prepare_enable();
  1427. }
  1428. /*
  1429. * mdp_clk_ctrl: called from thread context
  1430. */
  1431. void mdp_clk_ctrl(int on)
  1432. {
  1433. static int mdp_clk_cnt;
  1434. mutex_lock(&mdp_suspend_mutex);
  1435. if (on) {
  1436. if (mdp_clk_cnt == 0)
  1437. mdp_clk_prepare_enable();
  1438. mdp_clk_cnt++;
  1439. } else {
  1440. if (mdp_clk_cnt) {
  1441. mdp_clk_cnt--;
  1442. if (mdp_clk_cnt == 0)
  1443. mdp_clk_disable_unprepare();
  1444. } else
  1445. pr_err("%s: %d: mdp clk off is invalid\n",
  1446. __func__, __LINE__);
  1447. }
  1448. pr_debug("%s: on=%d cnt=%d\n", __func__, on, mdp_clk_cnt);
  1449. mutex_unlock(&mdp_suspend_mutex);
  1450. }
  1451. void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
  1452. boolean isr)
  1453. {
  1454. /* do nothing */
  1455. }
  1456. #else
  1457. void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
  1458. boolean isr)
  1459. {
  1460. boolean mdp_all_blocks_off = TRUE;
  1461. int i;
  1462. unsigned long flag;
  1463. struct msm_fb_panel_data *pdata;
  1464. /*
  1465. * It is assumed that if isr = TRUE then start = OFF
  1466. * if start = ON when isr = TRUE it could happen that the usercontext
  1467. * could turn off the clocks while the interrupt is updating the
  1468. * power to ON
  1469. */
  1470. WARN_ON(isr == TRUE && state == MDP_BLOCK_POWER_ON);
  1471. spin_lock_irqsave(&mdp_spin_lock, flag);
  1472. if (MDP_BLOCK_POWER_ON == state) {
  1473. atomic_inc(&mdp_block_power_cnt[block]);
  1474. if (MDP_DMA2_BLOCK == block)
  1475. mdp_in_processing = TRUE;
  1476. } else {
  1477. atomic_dec(&mdp_block_power_cnt[block]);
  1478. if (atomic_read(&mdp_block_power_cnt[block]) < 0) {
  1479. /*
  1480. * Master has to serve a request to power off MDP always
  1481. * It also has a timer to power off. So, in case of
  1482. * timer expires first and DMA2 finishes later,
  1483. * master has to power off two times
  1484. * There shouldn't be multiple power-off request for
  1485. * other blocks
  1486. */
  1487. if (block != MDP_MASTER_BLOCK) {
  1488. MSM_FB_INFO("mdp_block_power_cnt[block=%d] \
  1489. multiple power-off request\n", block);
  1490. }
  1491. atomic_set(&mdp_block_power_cnt[block], 0);
  1492. }
  1493. if (MDP_DMA2_BLOCK == block)
  1494. mdp_in_processing = FALSE;
  1495. }
  1496. spin_unlock_irqrestore(&mdp_spin_lock, flag);
  1497. /*
  1498. * If it's in isr, we send our request to workqueue.
  1499. * Otherwise, processing happens in the current context
  1500. */
  1501. if (isr) {
  1502. if (mdp_current_clk_on) {
  1503. /* checking all blocks power state */
  1504. for (i = 0; i < MDP_MAX_BLOCK; i++) {
  1505. if (atomic_read(&mdp_block_power_cnt[i]) > 0) {
  1506. mdp_all_blocks_off = FALSE;
  1507. break;
  1508. }
  1509. }
  1510. if (mdp_all_blocks_off) {
  1511. /* send workqueue to turn off mdp power */
  1512. queue_delayed_work(mdp_pipe_ctrl_wq,
  1513. &mdp_pipe_ctrl_worker,
  1514. mdp_timer_duration);
  1515. }
  1516. }
  1517. } else {
  1518. down(&mdp_pipe_ctrl_mutex);
  1519. /* checking all blocks power state */
  1520. for (i = 0; i < MDP_MAX_BLOCK; i++) {
  1521. if (atomic_read(&mdp_block_power_cnt[i]) > 0) {
  1522. mdp_all_blocks_off = FALSE;
  1523. break;
  1524. }
  1525. }
  1526. /*
  1527. * find out whether a delayable work item is currently
  1528. * pending
  1529. */
  1530. if (delayed_work_pending(&mdp_pipe_ctrl_worker)) {
  1531. /*
  1532. * try to cancel the current work if it fails to
  1533. * stop (which means del_timer can't delete it
  1534. * from the list, it's about to expire and run),
  1535. * we have to let it run. queue_delayed_work won't
  1536. * accept the next job which is same as
  1537. * queue_delayed_work(mdp_timer_duration = 0)
  1538. */
  1539. cancel_delayed_work(&mdp_pipe_ctrl_worker);
  1540. }
  1541. if ((mdp_all_blocks_off) && (mdp_current_clk_on)) {
  1542. mutex_lock(&mdp_suspend_mutex);
  1543. if (block == MDP_MASTER_BLOCK || mdp_suspended) {
  1544. mdp_current_clk_on = FALSE;
  1545. mb();
  1546. /* turn off MDP clks */
  1547. mdp_vsync_clk_disable();
  1548. for (i = 0; i < pdev_list_cnt; i++) {
  1549. pdata = (struct msm_fb_panel_data *)
  1550. pdev_list[i]->dev.platform_data;
  1551. if (pdata && pdata->clk_func)
  1552. pdata->clk_func(0);
  1553. }
  1554. if (mdp_clk != NULL) {
  1555. mdp_clk_rate = clk_get_rate(mdp_clk);
  1556. clk_disable_unprepare(mdp_clk);
  1557. if (mdp_hw_revision <=
  1558. MDP4_REVISION_V2_1 &&
  1559. mdp_clk_rate > 122880000) {
  1560. clk_set_rate(mdp_clk,
  1561. 122880000);
  1562. }
  1563. MSM_FB_DEBUG("MDP CLK OFF\n");
  1564. }
  1565. if (mdp_pclk != NULL) {
  1566. clk_disable_unprepare(mdp_pclk);
  1567. MSM_FB_DEBUG("MDP PCLK OFF\n");
  1568. }
  1569. if (mdp_lut_clk != NULL)
  1570. clk_disable_unprepare(mdp_lut_clk);
  1571. } else {
  1572. /* send workqueue to turn off mdp power */
  1573. queue_delayed_work(mdp_pipe_ctrl_wq,
  1574. &mdp_pipe_ctrl_worker,
  1575. mdp_timer_duration);
  1576. }
  1577. mutex_unlock(&mdp_suspend_mutex);
  1578. } else if ((!mdp_all_blocks_off) && (!mdp_current_clk_on)) {
  1579. mdp_current_clk_on = TRUE;
  1580. /* turn on MDP clks */
  1581. for (i = 0; i < pdev_list_cnt; i++) {
  1582. pdata = (struct msm_fb_panel_data *)
  1583. pdev_list[i]->dev.platform_data;
  1584. if (pdata && pdata->clk_func)
  1585. pdata->clk_func(1);
  1586. }
  1587. if (mdp_clk != NULL) {
  1588. if (mdp_hw_revision <=
  1589. MDP4_REVISION_V2_1 &&
  1590. mdp_clk_rate > 122880000) {
  1591. clk_set_rate(mdp_clk,
  1592. mdp_clk_rate);
  1593. }
  1594. clk_prepare_enable(mdp_clk);
  1595. MSM_FB_DEBUG("MDP CLK ON\n");
  1596. }
  1597. if (mdp_pclk != NULL) {
  1598. clk_prepare_enable(mdp_pclk);
  1599. MSM_FB_DEBUG("MDP PCLK ON\n");
  1600. }
  1601. if (mdp_lut_clk != NULL)
  1602. clk_prepare_enable(mdp_lut_clk);
  1603. mdp_vsync_clk_enable();
  1604. }
  1605. up(&mdp_pipe_ctrl_mutex);
  1606. }
  1607. }
  1608. void mdp_clk_ctrl(int on)
  1609. {
  1610. /* do nothing */
  1611. }
  1612. #endif
  1613. void mdp_histogram_handle_isr(struct mdp_hist_mgmt *mgmt)
  1614. {
  1615. uint32 isr, mask;
  1616. char *base_addr = MDP_BASE + mgmt->base;
  1617. isr = inpdw(base_addr + MDP_HIST_INTR_STATUS_OFF);
  1618. mask = inpdw(base_addr + MDP_HIST_INTR_ENABLE_OFF);
  1619. outpdw(base_addr + MDP_HIST_INTR_CLEAR_OFF, isr);
  1620. mb();
  1621. isr &= mask;
  1622. if (isr & INTR_HIST_RESET_SEQ_DONE)
  1623. __mdp_histogram_kickoff(mgmt);
  1624. else if (isr & INTR_HIST_DONE)
  1625. queue_work(mdp_hist_wq, &mgmt->mdp_histogram_worker);
  1626. }
  1627. #ifndef CONFIG_FB_MSM_MDP40
  1628. irqreturn_t mdp_isr(int irq, void *ptr)
  1629. {
  1630. uint32 mdp_interrupt = 0;
  1631. struct mdp_dma_data *dma;
  1632. unsigned long flag;
  1633. struct mdp_hist_mgmt *mgmt = NULL;
  1634. int i, ret;
  1635. int vsync_isr, disabled_clocks;
  1636. /* Ensure all the register write are complete */
  1637. mb();
  1638. mdp_is_in_isr = TRUE;
  1639. mdp_interrupt = inp32(MDP_INTR_STATUS);
  1640. outp32(MDP_INTR_CLEAR, mdp_interrupt);
  1641. mdp_interrupt &= mdp_intr_mask;
  1642. if (mdp_interrupt & TV_ENC_UNDERRUN) {
  1643. mdp_interrupt &= ~(TV_ENC_UNDERRUN);
  1644. mdp_tv_underflow_cnt++;
  1645. }
  1646. if (!mdp_interrupt)
  1647. goto out;
  1648. /*Primary Vsync interrupt*/
  1649. if (mdp_interrupt & MDP_PRIM_RDPTR) {
  1650. spin_lock_irqsave(&mdp_spin_lock, flag);
  1651. vsync_isr = vsync_cntrl.vsync_irq_enabled;
  1652. disabled_clocks = vsync_cntrl.disabled_clocks;
  1653. if ((!vsync_isr && !vsync_cntrl.disabled_clocks)
  1654. || (!vsync_isr && vsync_cntrl.vsync_dma_enabled)) {
  1655. mdp_intr_mask &= ~MDP_PRIM_RDPTR;
  1656. outp32(MDP_INTR_ENABLE, mdp_intr_mask);
  1657. mdp_disable_irq_nosync(MDP_VSYNC_TERM);
  1658. vsync_cntrl.disabled_clocks = 1;
  1659. } else if (vsync_isr) {
  1660. vsync_isr_handler();
  1661. }
  1662. vsync_cntrl.vsync_dma_enabled = 0;
  1663. spin_unlock_irqrestore(&mdp_spin_lock, flag);
  1664. complete(&vsync_cntrl.vsync_comp);
  1665. if (!vsync_isr && !disabled_clocks)
  1666. mdp_pipe_ctrl(MDP_CMD_BLOCK,
  1667. MDP_BLOCK_POWER_OFF, TRUE);
  1668. complete_all(&vsync_cntrl.vsync_wait);
  1669. }
  1670. /* DMA3 TV-Out Start */
  1671. if (mdp_interrupt & TV_OUT_DMA3_START) {
  1672. /* let's disable TV out interrupt */
  1673. mdp_intr_mask &= ~TV_OUT_DMA3_START;
  1674. outp32(MDP_INTR_ENABLE, mdp_intr_mask);
  1675. dma = &dma3_data;
  1676. if (dma->waiting) {
  1677. dma->waiting = FALSE;
  1678. complete(&dma->comp);
  1679. }
  1680. }
  1681. if (mdp_rev >= MDP_REV_30) {
  1682. /* Only DMA_P histogram exists for this MDP rev*/
  1683. if (mdp_interrupt & MDP_HIST_DONE) {
  1684. ret = mdp_histogram_block2mgmt(MDP_BLOCK_DMA_P, &mgmt);
  1685. if (!ret)
  1686. mdp_histogram_handle_isr(mgmt);
  1687. outp32(MDP_INTR_CLEAR, MDP_HIST_DONE);
  1688. }
  1689. /* LCDC UnderFlow */
  1690. if (mdp_interrupt & LCDC_UNDERFLOW) {
  1691. mdp_lcdc_underflow_cnt++;
  1692. /*when underflow happens HW resets all the histogram
  1693. registers that were set before so restore them back
  1694. to normal.*/
  1695. for (i = 0; i < MDP_HIST_MGMT_MAX; i++) {
  1696. mgmt = mdp_hist_mgmt_array[i];
  1697. if (!mgmt)
  1698. continue;
  1699. mgmt->mdp_is_hist_valid = FALSE;
  1700. }
  1701. }
  1702. /* LCDC Frame Start */
  1703. if (mdp_interrupt & LCDC_FRAME_START) {
  1704. dma = &dma2_data;
  1705. spin_lock_irqsave(&mdp_spin_lock, flag);
  1706. vsync_isr = vsync_cntrl.vsync_irq_enabled;
  1707. /* let's disable LCDC interrupt */
  1708. if (dma->waiting) {
  1709. dma->waiting = FALSE;
  1710. complete(&dma->comp);
  1711. }
  1712. if (!vsync_isr) {
  1713. mdp_intr_mask &= ~LCDC_FRAME_START;
  1714. outp32(MDP_INTR_ENABLE, mdp_intr_mask);
  1715. mdp_disable_irq_nosync(MDP_VSYNC_TERM);
  1716. vsync_cntrl.disabled_clocks = 1;
  1717. } else {
  1718. vsync_isr_handler();
  1719. }
  1720. spin_unlock_irqrestore(&mdp_spin_lock, flag);
  1721. if (!vsync_isr)
  1722. mdp_pipe_ctrl(MDP_CMD_BLOCK,
  1723. MDP_BLOCK_POWER_OFF, TRUE);
  1724. complete_all(&vsync_cntrl.vsync_wait);
  1725. }
  1726. /* DMA2 LCD-Out Complete */
  1727. if (mdp_interrupt & MDP_DMA_S_DONE) {
  1728. dma = &dma_s_data;
  1729. dma->busy = FALSE;
  1730. mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_OFF,
  1731. TRUE);
  1732. complete(&dma->comp);
  1733. }
  1734. /* DMA_E LCD-Out Complete */
  1735. if (mdp_interrupt & MDP_DMA_E_DONE) {
  1736. dma = &dma_e_data;
  1737. dma->busy = FALSE;
  1738. mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_OFF,
  1739. TRUE);
  1740. complete(&dma->comp);
  1741. }
  1742. }
  1743. /* DMA2 LCD-Out Complete */
  1744. if (mdp_interrupt & MDP_DMA_P_DONE) {
  1745. struct timeval now;
  1746. mdp_dma2_last_update_time = ktime_sub(ktime_get_real(),
  1747. mdp_dma2_last_update_time);
  1748. if (mdp_debug[MDP_DMA2_BLOCK]) {
  1749. jiffies_to_timeval(jiffies, &now);
  1750. mdp_dma2_timeval.tv_usec =
  1751. now.tv_usec - mdp_dma2_timeval.tv_usec;
  1752. }
  1753. #ifndef CONFIG_FB_MSM_MDP303
  1754. dma = &dma2_data;
  1755. spin_lock_irqsave(&mdp_spin_lock, flag);
  1756. dma->busy = FALSE;
  1757. spin_unlock_irqrestore(&mdp_spin_lock, flag);
  1758. mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
  1759. complete(&dma->comp);
  1760. #else
  1761. if (mdp_prim_panel_type == MIPI_CMD_PANEL) {
  1762. dma = &dma2_data;
  1763. spin_lock_irqsave(&mdp_spin_lock, flag);
  1764. dma->busy = FALSE;
  1765. spin_unlock_irqrestore(&mdp_spin_lock, flag);
  1766. mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF,
  1767. TRUE);
  1768. mdp_disable_irq_nosync(MDP_DMA2_TERM);
  1769. complete(&dma->comp);
  1770. }
  1771. #endif
  1772. }
  1773. /* PPP Complete */
  1774. if (mdp_interrupt & MDP_PPP_DONE) {
  1775. #ifdef CONFIG_FB_MSM_MDP31
  1776. MDP_OUTP(MDP_BASE + 0x00100, 0xFFFF);
  1777. #endif
  1778. mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
  1779. spin_lock_irqsave(&mdp_spin_lock, flag);
  1780. if (mdp_ppp_waiting) {
  1781. mdp_ppp_waiting = FALSE;
  1782. complete(&mdp_ppp_comp);
  1783. }
  1784. spin_unlock_irqrestore(&mdp_spin_lock, flag);
  1785. }
  1786. out:
  1787. mdp_is_in_isr = FALSE;
  1788. return IRQ_HANDLED;
  1789. }
  1790. #endif
  1791. static void mdp_drv_init(void)
  1792. {
  1793. int i;
  1794. for (i = 0; i < MDP_MAX_BLOCK; i++) {
  1795. mdp_debug[i] = 0;
  1796. }
  1797. /* initialize spin lock and workqueue */
  1798. spin_lock_init(&mdp_spin_lock);
  1799. mdp_dma_wq = create_singlethread_workqueue("mdp_dma_wq");
  1800. mdp_vsync_wq = create_singlethread_workqueue("mdp_vsync_wq");
  1801. mdp_pipe_ctrl_wq = create_singlethread_workqueue("mdp_pipe_ctrl_wq");
  1802. INIT_DELAYED_WORK(&mdp_pipe_ctrl_worker,
  1803. mdp_pipe_ctrl_workqueue_handler);
  1804. /* initialize semaphore */
  1805. init_completion(&mdp_ppp_comp);
  1806. sema_init(&mdp_ppp_mutex, 1);
  1807. sema_init(&mdp_pipe_ctrl_mutex, 1);
  1808. dma2_data.busy = FALSE;
  1809. dma2_data.dmap_busy = FALSE;
  1810. dma2_data.waiting = FALSE;
  1811. init_completion(&dma2_data.comp);
  1812. init_completion(&vsync_cntrl.vsync_comp);
  1813. init_completion(&dma2_data.dmap_comp);
  1814. sema_init(&dma2_data.mutex, 1);
  1815. mutex_init(&dma2_data.ov_mutex);
  1816. dma3_data.busy = FALSE;
  1817. dma3_data.waiting = FALSE;
  1818. init_completion(&dma3_data.comp);
  1819. sema_init(&dma3_data.mutex, 1);
  1820. dma_s_data.busy = FALSE;
  1821. dma_s_data.waiting = FALSE;
  1822. dma_s_data.dmap_busy = FALSE;
  1823. init_completion(&dma_s_data.comp);
  1824. sema_init(&dma_s_data.mutex, 1);
  1825. mutex_init(&dma_s_data.ov_mutex);
  1826. #ifndef CONFIG_FB_MSM_MDP303
  1827. dma_e_data.busy = FALSE;
  1828. dma_e_data.waiting = FALSE;
  1829. init_completion(&dma_e_data.comp);
  1830. mutex_init(&dma_e_data.ov_mutex);
  1831. #endif
  1832. #ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
  1833. dma_wb_data.busy = FALSE;
  1834. dma_wb_data.waiting = FALSE;
  1835. init_completion(&dma_wb_data.comp);
  1836. mutex_init(&dma_wb_data.ov_mutex);
  1837. #endif
  1838. /* initializing mdp power block counter to 0 */
  1839. for (i = 0; i < MDP_MAX_BLOCK; i++) {
  1840. atomic_set(&mdp_block_power_cnt[i], 0);
  1841. }
  1842. vsync_cntrl.disabled_clocks = 1;
  1843. init_completion(&vsync_cntrl.vsync_wait);
  1844. atomic_set(&vsync_cntrl.vsync_resume, 1);
  1845. #ifdef MSM_FB_ENABLE_DBGFS
  1846. {
  1847. struct dentry *root;
  1848. char sub_name[] = "mdp";
  1849. root = msm_fb_get_debugfs_root();
  1850. if (root != NULL) {
  1851. mdp_dir = debugfs_create_dir(sub_name, root);
  1852. if (mdp_dir) {
  1853. msm_fb_debugfs_file_create(mdp_dir,
  1854. "dma2_update_time_in_usec",
  1855. (u32 *) &mdp_dma2_update_time_in_usec);
  1856. msm_fb_debugfs_file_create(mdp_dir,
  1857. "vs_rdcnt_slow",
  1858. (u32 *) &mdp_lcd_rd_cnt_offset_slow);
  1859. msm_fb_debugfs_file_create(mdp_dir,
  1860. "vs_rdcnt_fast",
  1861. (u32 *) &mdp_lcd_rd_cnt_offset_fast);
  1862. msm_fb_debugfs_file_create(mdp_dir,
  1863. "mdp_usec_diff_threshold",
  1864. (u32 *) &mdp_usec_diff_threshold);
  1865. msm_fb_debugfs_file_create(mdp_dir,
  1866. "mdp_current_clk_on",
  1867. (u32 *) &mdp_current_clk_on);
  1868. #ifdef CONFIG_FB_MSM_LCDC
  1869. msm_fb_debugfs_file_create(mdp_dir,
  1870. "lcdc_start_x",
  1871. (u32 *) &first_pixel_start_x);
  1872. msm_fb_debugfs_file_create(mdp_dir,
  1873. "lcdc_start_y",
  1874. (u32 *) &first_pixel_start_y);
  1875. #endif
  1876. }
  1877. }
  1878. }
  1879. #endif
  1880. }
  1881. static int mdp_probe(struct platform_device *pdev);
  1882. static int mdp_remove(struct platform_device *pdev);
  1883. static int mdp_runtime_suspend(struct device *dev)
  1884. {
  1885. dev_dbg(dev, "pm_runtime: suspending...\n");
  1886. return 0;
  1887. }
  1888. static int mdp_runtime_resume(struct device *dev)
  1889. {
  1890. dev_dbg(dev, "pm_runtime: resuming...\n");
  1891. return 0;
  1892. }
  1893. static struct dev_pm_ops mdp_dev_pm_ops = {
  1894. .runtime_suspend = mdp_runtime_suspend,
  1895. .runtime_resume = mdp_runtime_resume,
  1896. };
  1897. static struct platform_driver mdp_driver = {
  1898. .probe = mdp_probe,
  1899. .remove = mdp_remove,
  1900. #ifndef CONFIG_HAS_EARLYSUSPEND
  1901. .suspend = mdp_suspend,
  1902. .resume = NULL,
  1903. #endif
  1904. .shutdown = NULL,
  1905. .driver = {
  1906. /*
  1907. * Driver name must match the device name added in
  1908. * platform.c.
  1909. */
  1910. .name = "mdp",
  1911. .pm = &mdp_dev_pm_ops,
  1912. },
  1913. };
  1914. static int mdp_fps_level_change(struct platform_device *pdev, u32 fps_level)
  1915. {
  1916. int ret = 0;
  1917. ret = panel_next_fps_level_change(pdev, fps_level);
  1918. return ret;
  1919. }
  1920. static int mdp_off(struct platform_device *pdev)
  1921. {
  1922. int ret = 0;
  1923. struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
  1924. pr_debug("%s:+\n", __func__);
  1925. mdp_histogram_ctrl_all(FALSE);
  1926. atomic_set(&vsync_cntrl.suspend, 1);
  1927. atomic_set(&vsync_cntrl.vsync_resume, 0);
  1928. complete_all(&vsync_cntrl.vsync_wait);
  1929. mdp_clk_ctrl(1);
  1930. if (mfd->panel.type == MIPI_CMD_PANEL)
  1931. mdp4_dsi_cmd_off(pdev);
  1932. else if (mfd->panel.type == MIPI_VIDEO_PANEL)
  1933. mdp4_dsi_video_off(pdev);
  1934. else if (mfd->panel.type == HDMI_PANEL ||
  1935. mfd->panel.type == LCDC_PANEL ||
  1936. mfd->panel.type == LVDS_PANEL)
  1937. mdp4_lcdc_off(pdev);
  1938. else if (mfd->panel.type == MDDI_PANEL)
  1939. mdp4_mddi_off(pdev);
  1940. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  1941. ret = panel_next_off(pdev);
  1942. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
  1943. mdp_clk_ctrl(0);
  1944. #ifdef CONFIG_MSM_BUS_SCALING
  1945. mdp_bus_scale_update_request(0, 0);
  1946. #endif
  1947. if (mdp_rev >= MDP_REV_41 && mfd->panel.type == MIPI_CMD_PANEL)
  1948. mdp_dsi_cmd_overlay_suspend(mfd);
  1949. pr_debug("%s:-\n", __func__);
  1950. return ret;
  1951. }
  1952. #ifdef CONFIG_FB_MSM_MDP303
  1953. unsigned is_mdp4_hw_reset(void)
  1954. {
  1955. return 0;
  1956. }
  1957. void mdp4_hw_init(void)
  1958. {
  1959. /* empty */
  1960. }
  1961. #endif
  1962. static int mdp_on(struct platform_device *pdev)
  1963. {
  1964. int ret = 0;
  1965. struct msm_fb_data_type *mfd;
  1966. mfd = platform_get_drvdata(pdev);
  1967. pr_debug("%s:+\n", __func__);
  1968. if (mdp_rev >= MDP_REV_40) {
  1969. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  1970. mdp_clk_ctrl(1);
  1971. mdp4_hw_init();
  1972. outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
  1973. if (mfd->panel.type == MIPI_CMD_PANEL) {
  1974. mdp_vsync_cfg_regs(mfd, FALSE);
  1975. mdp4_dsi_cmd_on(pdev);
  1976. } else if (mfd->panel.type == MIPI_VIDEO_PANEL) {
  1977. mdp4_dsi_video_on(pdev);
  1978. } else if (mfd->panel.type == HDMI_PANEL ||
  1979. mfd->panel.type == LCDC_PANEL ||
  1980. mfd->panel.type == LVDS_PANEL) {
  1981. mdp4_lcdc_on(pdev);
  1982. } else if (mfd->panel.type == MDDI_PANEL) {
  1983. mdp_vsync_cfg_regs(mfd, FALSE);
  1984. mdp4_mddi_on(pdev);
  1985. }
  1986. mdp_clk_ctrl(0);
  1987. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
  1988. }
  1989. if (mdp_rev == MDP_REV_303 && mfd->panel.type == MIPI_CMD_PANEL) {
  1990. vsync_cntrl.dev = mfd->fbi->dev;
  1991. atomic_set(&vsync_cntrl.suspend, 0);
  1992. atomic_set(&vsync_cntrl.vsync_resume, 1);
  1993. }
  1994. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  1995. ret = panel_next_on(pdev);
  1996. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
  1997. mdp_histogram_ctrl_all(TRUE);
  1998. mdp_restore_rgb();
  1999. if (ret == 0)
  2000. ret = panel_next_late_init(pdev);
  2001. pr_debug("%s:-\n", __func__);
  2002. return ret;
  2003. }
  2004. static int mdp_resource_initialized;
  2005. static struct msm_panel_common_pdata *mdp_pdata;
  2006. uint32 mdp_hw_revision;
  2007. /*
  2008. * mdp_hw_revision:
  2009. * 0 == V1
  2010. * 1 == V2
  2011. * 2 == V2.1
  2012. *
  2013. */
  2014. void mdp_hw_version(void)
  2015. {
  2016. char *cp;
  2017. uint32 *hp;
  2018. if (mdp_pdata == NULL)
  2019. return;
  2020. mdp_hw_revision = MDP4_REVISION_NONE;
  2021. if (mdp_pdata->hw_revision_addr == 0)
  2022. return;
  2023. /* tlmmgpio2 shadow */
  2024. cp = (char *)ioremap(mdp_pdata->hw_revision_addr, 0x16);
  2025. if (cp == NULL)
  2026. return;
  2027. hp = (uint32 *)cp; /* HW_REVISION_NUMBER */
  2028. mdp_hw_revision = *hp;
  2029. iounmap(cp);
  2030. mdp_hw_revision >>= 28; /* bit 31:28 */
  2031. mdp_hw_revision &= 0x0f;
  2032. MSM_FB_DEBUG("%s: mdp_hw_revision=%x\n",
  2033. __func__, mdp_hw_revision);
  2034. }
  2035. #ifdef CONFIG_MSM_BUS_SCALING
  2036. #ifndef MDP_BUS_VECTOR_ENTRY
  2037. #define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
  2038. { \
  2039. .src = MSM_BUS_MASTER_MDP_PORT0, \
  2040. .dst = MSM_BUS_SLAVE_EBI_CH0, \
  2041. .ab = (ab_val), \
  2042. .ib = (ib_val), \
  2043. }
  2044. #endif
  2045. /*
  2046. * Entry 0 hold 0 request
  2047. * Entry 1 and 2 do ping pong request
  2048. */
  2049. static struct msm_bus_vectors mdp_bus_vectors[] = {
  2050. MDP_BUS_VECTOR_ENTRY(0, 0),
  2051. MDP_BUS_VECTOR_ENTRY( 128000000, 160000000),
  2052. MDP_BUS_VECTOR_ENTRY( 128000000, 160000000),
  2053. };
  2054. static struct msm_bus_paths mdp_bus_usecases[ARRAY_SIZE(mdp_bus_vectors)];
  2055. static struct msm_bus_scale_pdata mdp_bus_scale_table = {
  2056. .usecase = mdp_bus_usecases,
  2057. .num_usecases = ARRAY_SIZE(mdp_bus_usecases),
  2058. .name = "mdp",
  2059. };
  2060. static uint32_t mdp_bus_scale_handle;
  2061. static int mdp_bus_scale_register(void)
  2062. {
  2063. struct msm_bus_scale_pdata *bus_pdata = &mdp_bus_scale_table;
  2064. int i;
  2065. for (i = 0; i < bus_pdata->num_usecases; i++) {
  2066. mdp_bus_usecases[i].num_paths = 1;
  2067. mdp_bus_usecases[i].vectors = &mdp_bus_vectors[i];
  2068. }
  2069. mdp_bus_scale_handle = msm_bus_scale_register_client(bus_pdata);
  2070. if (!mdp_bus_scale_handle) {
  2071. pr_err("%s: not able to get bus scale!\n", __func__);
  2072. return -ENOMEM;
  2073. }
  2074. return 0;
  2075. }
  2076. int mdp_bus_scale_update_request(u64 ab, u64 ib)
  2077. {
  2078. static int bus_index = 1;
  2079. if (mdp_bus_scale_handle < 1) {
  2080. pr_err("%s invalid bus handle\n", __func__);
  2081. return -EINVAL;
  2082. }
  2083. if (!ab)
  2084. return msm_bus_scale_client_update_request
  2085. (mdp_bus_scale_handle, 0);
  2086. /* ping pong bus_index between table entry 1 and 2 */
  2087. bus_index++;
  2088. bus_index = (bus_index > 2) ? 1 : bus_index;
  2089. mdp_bus_usecases[bus_index].vectors->ab = min(ab, mdp_max_bw);
  2090. ib = max(ib, ab);
  2091. mdp_bus_usecases[bus_index].vectors->ib = min(ib, mdp_max_bw);
  2092. pr_debug("%s: handle=%d index=%d ab=%llu ib=%llu\n", __func__,
  2093. (u32)mdp_bus_scale_handle, bus_index,
  2094. mdp_bus_usecases[bus_index].vectors->ab,
  2095. mdp_bus_usecases[bus_index].vectors->ib);
  2096. return msm_bus_scale_client_update_request
  2097. (mdp_bus_scale_handle, bus_index);
  2098. }
  2099. #endif
  2100. DEFINE_MUTEX(mdp_clk_lock);
  2101. int mdp_set_core_clk(u32 rate)
  2102. {
  2103. int ret = -EINVAL;
  2104. if (mdp_clk)
  2105. ret = clk_set_rate(mdp_clk, rate);
  2106. if (ret)
  2107. pr_err("%s unable to set mdp clk rate", __func__);
  2108. else
  2109. pr_debug("%s mdp clk rate to be set %d: actual rate %ld\n",
  2110. __func__, rate, clk_get_rate(mdp_clk));
  2111. return ret;
  2112. }
  2113. int mdp_clk_round_rate(u32 rate)
  2114. {
  2115. return clk_round_rate(mdp_clk, rate);
  2116. }
  2117. unsigned long mdp_get_core_clk(void)
  2118. {
  2119. unsigned long clk_rate = 0;
  2120. if (mdp_clk) {
  2121. mutex_lock(&mdp_clk_lock);
  2122. clk_rate = clk_get_rate(mdp_clk);
  2123. mutex_unlock(&mdp_clk_lock);
  2124. }
  2125. return clk_rate;
  2126. }
  2127. static int mdp_irq_clk_setup(struct platform_device *pdev,
  2128. char cont_splashScreen)
  2129. {
  2130. int ret;
  2131. #ifdef CONFIG_FB_MSM_MDP40
  2132. ret = request_irq(mdp_irq, mdp4_isr, IRQF_DISABLED, "MDP", 0);
  2133. #else
  2134. ret = request_irq(mdp_irq, mdp_isr, IRQF_DISABLED, "MDP", 0);
  2135. #endif
  2136. if (ret) {
  2137. printk(KERN_ERR "mdp request_irq() failed!\n");
  2138. return ret;
  2139. }
  2140. disable_irq(mdp_irq);
  2141. dsi_pll_vdda = regulator_get(&pdev->dev, "dsi_pll_vdda");
  2142. if (IS_ERR(dsi_pll_vdda)) {
  2143. dsi_pll_vdda = NULL;
  2144. } else {
  2145. if (mdp_rev == MDP_REV_42 || mdp_rev == MDP_REV_44) {
  2146. ret = regulator_set_voltage(dsi_pll_vdda, 1200000,
  2147. 1200000);
  2148. if (ret) {
  2149. pr_err("set_voltage failed for dsi_pll_vdda, ret=%d\n",
  2150. ret);
  2151. }
  2152. }
  2153. }
  2154. dsi_pll_vddio = regulator_get(&pdev->dev, "dsi_pll_vddio");
  2155. if (IS_ERR(dsi_pll_vddio)) {
  2156. dsi_pll_vddio = NULL;
  2157. } else {
  2158. if (mdp_rev == MDP_REV_42) {
  2159. ret = regulator_set_voltage(dsi_pll_vddio, 1800000,
  2160. 1800000);
  2161. if (ret) {
  2162. pr_err("set_voltage failed for dsi_pll_vddio, ret=%d\n",
  2163. ret);
  2164. }
  2165. }
  2166. }
  2167. footswitch = regulator_get(&pdev->dev, "vdd");
  2168. if (IS_ERR(footswitch)) {
  2169. footswitch = NULL;
  2170. } else {
  2171. regulator_enable(footswitch);
  2172. mdp_footswitch_on = 1;
  2173. }
  2174. mdp_clk = clk_get(&pdev->dev, "core_clk");
  2175. if (IS_ERR(mdp_clk)) {
  2176. ret = PTR_ERR(mdp_clk);
  2177. printk(KERN_ERR "can't get mdp_clk error:%d!\n", ret);
  2178. free_irq(mdp_irq, 0);
  2179. return ret;
  2180. }
  2181. mdp_pclk = clk_get(&pdev->dev, "iface_clk");
  2182. if (IS_ERR(mdp_pclk))
  2183. mdp_pclk = NULL;
  2184. if (mdp_rev >= MDP_REV_42) {
  2185. mdp_lut_clk = clk_get(&pdev->dev, "lut_clk");
  2186. if (IS_ERR(mdp_lut_clk)) {
  2187. ret = PTR_ERR(mdp_lut_clk);
  2188. pr_err("can't get mdp_clk error:%d!\n", ret);
  2189. clk_put(mdp_clk);
  2190. free_irq(mdp_irq, 0);
  2191. return ret;
  2192. }
  2193. } else {
  2194. mdp_lut_clk = NULL;
  2195. }
  2196. #ifdef CONFIG_FB_MSM_MDP40
  2197. if (mdp_pdata)
  2198. mdp_max_clk = mdp_pdata->mdp_max_clk;
  2199. else
  2200. pr_err("%s cannot get mdp max clk!\n", __func__);
  2201. if (!mdp_max_clk)
  2202. pr_err("%s mdp max clk is zero!\n", __func__);
  2203. if (cont_splashScreen)
  2204. mdp_clk_rate = clk_get_rate(mdp_clk);
  2205. else
  2206. mdp_clk_rate = mdp_max_clk;
  2207. mutex_lock(&mdp_clk_lock);
  2208. clk_set_rate(mdp_clk, mdp_clk_rate);
  2209. if (mdp_lut_clk != NULL)
  2210. clk_set_rate(mdp_lut_clk, mdp_clk_rate);
  2211. mutex_unlock(&mdp_clk_lock);
  2212. MSM_FB_DEBUG("mdp_clk: mdp_clk=%d\n", (int)clk_get_rate(mdp_clk));
  2213. #endif
  2214. if (mdp_rev == MDP_REV_42 && !cont_splashScreen) {
  2215. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  2216. /* DSI Video Timing generator disable */
  2217. outpdw(MDP_BASE + 0xE0000, 0x0);
  2218. /* Clear MDP Interrupt Enable register */
  2219. outpdw(MDP_BASE + 0x50, 0x0);
  2220. /* Set Overlay Proc 0 to reset state */
  2221. outpdw(MDP_BASE + 0x10004, 0x3);
  2222. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
  2223. }
  2224. return 0;
  2225. }
  2226. static int mdp_probe(struct platform_device *pdev)
  2227. {
  2228. struct platform_device *msm_fb_dev = NULL;
  2229. struct msm_fb_data_type *mfd;
  2230. struct msm_fb_panel_data *pdata = NULL;
  2231. int rc;
  2232. resource_size_t size ;
  2233. unsigned long flag;
  2234. u32 frame_rate;
  2235. #ifdef CONFIG_FB_MSM_MDP40
  2236. int intf, if_no;
  2237. #endif
  2238. #if defined(CONFIG_FB_MSM_MIPI_DSI) && defined(CONFIG_FB_MSM_MDP40)
  2239. struct mipi_panel_info *mipi;
  2240. #endif
  2241. if ((pdev->id == 0) && (pdev->num_resources > 0)) {
  2242. mdp_init_pdev = pdev;
  2243. mdp_pdata = pdev->dev.platform_data;
  2244. size = resource_size(&pdev->resource[0]);
  2245. msm_mdp_base = ioremap(pdev->resource[0].start, size);
  2246. MSM_FB_DEBUG("MDP HW Base phy_Address = 0x%x virt = 0x%x\n",
  2247. (int)pdev->resource[0].start, (int)msm_mdp_base);
  2248. if (unlikely(!msm_mdp_base))
  2249. return -ENOMEM;
  2250. mdp_irq = platform_get_irq(pdev, 0);
  2251. if (mdp_irq < 0) {
  2252. pr_err("mdp: can not get mdp irq\n");
  2253. return -ENOMEM;
  2254. }
  2255. mdp_rev = mdp_pdata->mdp_rev;
  2256. mdp_iommu_split_domain = mdp_pdata->mdp_iommu_split_domain;
  2257. rc = mdp_irq_clk_setup(pdev, mdp_pdata->cont_splash_enabled);
  2258. if (rc)
  2259. return rc;
  2260. mdp_clk_ctrl(1);
  2261. mdp_hw_version();
  2262. /* initializing mdp hw */
  2263. #ifdef CONFIG_FB_MSM_MDP40
  2264. if (!(mdp_pdata->cont_splash_enabled))
  2265. mdp4_hw_init();
  2266. #else
  2267. mdp_hw_init(mdp_pdata->cont_splash_enabled);
  2268. #endif
  2269. #ifdef CONFIG_FB_MSM_OVERLAY
  2270. mdp_hw_cursor_init();
  2271. #endif
  2272. if (!(mdp_pdata->cont_splash_enabled))
  2273. mdp_clk_ctrl(0);
  2274. mdp_resource_initialized = 1;
  2275. return 0;
  2276. }
  2277. if (!mdp_resource_initialized)
  2278. return -EPERM;
  2279. mfd = platform_get_drvdata(pdev);
  2280. if (!mfd)
  2281. return -ENODEV;
  2282. if (mfd->key != MFD_KEY)
  2283. return -EINVAL;
  2284. if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
  2285. return -ENOMEM;
  2286. msm_fb_dev = platform_device_alloc("msm_fb", pdev->id);
  2287. if (!msm_fb_dev)
  2288. return -ENOMEM;
  2289. /* link to the latest pdev */
  2290. mfd->pdev = msm_fb_dev;
  2291. mfd->mdp_rev = mdp_rev;
  2292. mfd->vsync_init = NULL;
  2293. mfd->ov0_wb_buf = MDP_ALLOC(sizeof(struct mdp_buf_type));
  2294. mfd->ov1_wb_buf = MDP_ALLOC(sizeof(struct mdp_buf_type));
  2295. memset((void *)mfd->ov0_wb_buf, 0, sizeof(struct mdp_buf_type));
  2296. memset((void *)mfd->ov1_wb_buf, 0, sizeof(struct mdp_buf_type));
  2297. if (mdp_pdata) {
  2298. mfd->ov0_wb_buf->size = mdp_pdata->ov0_wb_size;
  2299. mfd->ov1_wb_buf->size = mdp_pdata->ov1_wb_size;
  2300. mfd->mem_hid = mdp_pdata->mem_hid;
  2301. mfd->avtimer_phy = mdp_pdata->avtimer_phy;
  2302. } else {
  2303. mfd->ov0_wb_buf->size = 0;
  2304. mfd->ov1_wb_buf->size = 0;
  2305. mfd->mem_hid = 0;
  2306. mfd->avtimer_phy = 0;
  2307. }
  2308. /* initialize Post Processing data*/
  2309. mdp_hist_lut_init();
  2310. mdp_histogram_init();
  2311. mdp_pp_initialized = TRUE;
  2312. /* add panel data */
  2313. if (platform_device_add_data
  2314. (msm_fb_dev, pdev->dev.platform_data,
  2315. sizeof(struct msm_fb_panel_data))) {
  2316. printk(KERN_ERR "mdp_probe: platform_device_add_data failed!\n");
  2317. rc = -ENOMEM;
  2318. goto mdp_probe_err;
  2319. }
  2320. if (mdp_pdata) {
  2321. if (mdp_pdata->cont_splash_enabled &&
  2322. mfd->panel_info.pdest == DISPLAY_1) {
  2323. char *cp;
  2324. uint32 bpp = 3;
  2325. /*read panel wxh and calculate splash screen
  2326. size*/
  2327. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  2328. mdp_pdata->splash_screen_size =
  2329. inpdw(MDP_BASE + 0x90004);
  2330. mdp_pdata->splash_screen_size =
  2331. (((mdp_pdata->splash_screen_size >> 16) &
  2332. 0x00000FFF) * (
  2333. mdp_pdata->splash_screen_size &
  2334. 0x00000FFF)) * bpp;
  2335. mdp_pdata->splash_screen_addr =
  2336. inpdw(MDP_BASE + 0x90008);
  2337. mfd->copy_splash_buf = dma_alloc_coherent(NULL,
  2338. mdp_pdata->splash_screen_size,
  2339. (dma_addr_t *) &(mfd->copy_splash_phys),
  2340. GFP_KERNEL);
  2341. if (!mfd->copy_splash_buf) {
  2342. pr_err("DMA ALLOC FAILED for SPLASH\n");
  2343. return -ENOMEM;
  2344. }
  2345. cp = (char *)ioremap(
  2346. mdp_pdata->splash_screen_addr,
  2347. mdp_pdata->splash_screen_size);
  2348. if (!cp) {
  2349. pr_err("IOREMAP FAILED for SPLASH\n");
  2350. return -ENOMEM;
  2351. }
  2352. memcpy(mfd->copy_splash_buf, cp,
  2353. mdp_pdata->splash_screen_size);
  2354. MDP_OUTP(MDP_BASE + 0x90008,
  2355. mfd->copy_splash_phys);
  2356. }
  2357. mfd->cont_splash_done = (1 - mdp_pdata->cont_splash_enabled);
  2358. }
  2359. /* data chain */
  2360. pdata = msm_fb_dev->dev.platform_data;
  2361. pdata->on = mdp_on;
  2362. pdata->off = mdp_off;
  2363. pdata->fps_level_change = mdp_fps_level_change;
  2364. pdata->late_init = NULL;
  2365. pdata->next = pdev;
  2366. mdp_clk_ctrl(1);
  2367. mdp_prim_panel_type = mfd->panel.type;
  2368. switch (mfd->panel.type) {
  2369. case EXT_MDDI_PANEL:
  2370. case MDDI_PANEL:
  2371. case EBI2_PANEL:
  2372. INIT_WORK(&mfd->dma_update_worker,
  2373. mdp_lcd_update_workqueue_handler);
  2374. INIT_WORK(&mfd->vsync_resync_worker,
  2375. mdp_vsync_resync_workqueue_handler);
  2376. mfd->hw_refresh = FALSE;
  2377. if (mfd->panel.type == MDDI_PANEL)
  2378. mdp4_mddi_rdptr_init(0);
  2379. if (mfd->panel.type == EXT_MDDI_PANEL) {
  2380. /* 15 fps -> 66 msec */
  2381. mfd->refresh_timer_duration = (66 * HZ / 1000);
  2382. } else {
  2383. /* 24 fps -> 42 msec */
  2384. mfd->refresh_timer_duration = (42 * HZ / 1000);
  2385. }
  2386. #ifdef CONFIG_FB_MSM_MDP22
  2387. mfd->dma_fnc = mdp_dma2_update;
  2388. mfd->dma = &dma2_data;
  2389. #else
  2390. if (mfd->panel_info.pdest == DISPLAY_1) {
  2391. #if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
  2392. mfd->dma_fnc = mdp4_mddi_overlay;
  2393. mfd->cursor_update = mdp4_mddi_overlay_cursor;
  2394. #else
  2395. mfd->dma_fnc = mdp_dma2_update;
  2396. #endif
  2397. mfd->dma = &dma2_data;
  2398. mfd->lut_update = mdp_lut_update_nonlcdc;
  2399. mfd->do_histogram = mdp_do_histogram;
  2400. mfd->start_histogram = mdp_histogram_start;
  2401. mfd->stop_histogram = mdp_histogram_stop;
  2402. } else {
  2403. mfd->dma_fnc = mdp_dma_s_update;
  2404. mfd->dma = &dma_s_data;
  2405. }
  2406. #endif
  2407. if (mdp_pdata)
  2408. mfd->vsync_gpio = mdp_pdata->gpio;
  2409. else
  2410. mfd->vsync_gpio = -1;
  2411. #ifdef CONFIG_FB_MSM_MDP40
  2412. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  2413. spin_lock_irqsave(&mdp_spin_lock, flag);
  2414. mdp_intr_mask |= INTR_OVERLAY0_DONE;
  2415. if (mdp_hw_revision < MDP4_REVISION_V2_1) {
  2416. /* dmas dmap switch */
  2417. mdp_intr_mask |= INTR_DMA_S_DONE;
  2418. }
  2419. outp32(MDP_INTR_ENABLE, mdp_intr_mask);
  2420. spin_unlock_irqrestore(&mdp_spin_lock, flag);
  2421. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
  2422. if (mfd->panel.type == EBI2_PANEL)
  2423. intf = EBI2_INTF;
  2424. else
  2425. intf = MDDI_INTF;
  2426. if (mfd->panel_info.pdest == DISPLAY_1)
  2427. if_no = PRIMARY_INTF_SEL;
  2428. else
  2429. if_no = SECONDARY_INTF_SEL;
  2430. mdp4_display_intf_sel(if_no, intf);
  2431. #endif
  2432. mdp_config_vsync(mdp_init_pdev, mfd);
  2433. break;
  2434. #ifdef CONFIG_FB_MSM_MIPI_DSI
  2435. case MIPI_VIDEO_PANEL:
  2436. #ifndef CONFIG_FB_MSM_MDP303
  2437. mipi = &mfd->panel_info.mipi;
  2438. mfd->vsync_init = mdp4_dsi_vsync_init;
  2439. mfd->vsync_show = mdp4_dsi_video_show_event;
  2440. mfd->hw_refresh = TRUE;
  2441. mfd->dma_fnc = mdp4_dsi_video_overlay;
  2442. mfd->lut_update = mdp_lut_update_lcdc;
  2443. mfd->do_histogram = mdp_do_histogram;
  2444. mfd->start_histogram = mdp_histogram_start;
  2445. mfd->stop_histogram = mdp_histogram_stop;
  2446. if (mfd->panel_info.pdest == DISPLAY_1) {
  2447. if_no = PRIMARY_INTF_SEL;
  2448. mfd->dma = &dma2_data;
  2449. } else {
  2450. if_no = EXTERNAL_INTF_SEL;
  2451. mfd->dma = &dma_e_data;
  2452. }
  2453. mdp4_display_intf_sel(if_no, DSI_VIDEO_INTF);
  2454. #else
  2455. pdata->on = mdp_dsi_video_on;
  2456. pdata->off = mdp_dsi_video_off;
  2457. mfd->hw_refresh = TRUE;
  2458. mfd->dma_fnc = mdp_dsi_video_update;
  2459. mfd->do_histogram = mdp_do_histogram;
  2460. mfd->start_histogram = mdp_histogram_start;
  2461. mfd->stop_histogram = mdp_histogram_stop;
  2462. mfd->vsync_ctrl = mdp_dma_video_vsync_ctrl;
  2463. mfd->vsync_show = mdp_dma_video_show_event;
  2464. if (mfd->panel_info.pdest == DISPLAY_1)
  2465. mfd->dma = &dma2_data;
  2466. else {
  2467. printk(KERN_ERR "Invalid Selection of destination panel\n");
  2468. rc = -ENODEV;
  2469. mdp_clk_ctrl(0);
  2470. goto mdp_probe_err;
  2471. }
  2472. #endif
  2473. if (mdp_rev >= MDP_REV_40)
  2474. mfd->cursor_update = mdp_hw_cursor_sync_update;
  2475. else
  2476. mfd->cursor_update = mdp_hw_cursor_update;
  2477. break;
  2478. case MIPI_CMD_PANEL:
  2479. #ifndef CONFIG_FB_MSM_MDP303
  2480. mfd->dma_fnc = mdp4_dsi_cmd_overlay;
  2481. mipi = &mfd->panel_info.mipi;
  2482. mfd->vsync_init = mdp4_dsi_rdptr_init;
  2483. mfd->vsync_show = mdp4_dsi_cmd_show_event;
  2484. if (mfd->panel_info.pdest == DISPLAY_1) {
  2485. if_no = PRIMARY_INTF_SEL;
  2486. mfd->dma = &dma2_data;
  2487. } else {
  2488. if_no = SECONDARY_INTF_SEL;
  2489. mfd->dma = &dma_s_data;
  2490. }
  2491. mfd->lut_update = mdp_lut_update_nonlcdc;
  2492. mfd->do_histogram = mdp_do_histogram;
  2493. mfd->start_histogram = mdp_histogram_start;
  2494. mfd->stop_histogram = mdp_histogram_stop;
  2495. mdp4_display_intf_sel(if_no, DSI_CMD_INTF);
  2496. #else
  2497. mfd->dma_fnc = mdp_dma2_update;
  2498. mfd->do_histogram = mdp_do_histogram;
  2499. mfd->start_histogram = mdp_histogram_start;
  2500. mfd->stop_histogram = mdp_histogram_stop;
  2501. mfd->vsync_ctrl = mdp_dma_vsync_ctrl;
  2502. mfd->vsync_show = mdp_dma_show_event;
  2503. if (mfd->panel_info.pdest == DISPLAY_1)
  2504. mfd->dma = &dma2_data;
  2505. else {
  2506. printk(KERN_ERR "Invalid Selection of destination panel\n");
  2507. rc = -ENODEV;
  2508. mdp_clk_ctrl(0);
  2509. goto mdp_probe_err;
  2510. }
  2511. INIT_WORK(&mfd->dma_update_worker,
  2512. mdp_lcd_update_workqueue_handler);
  2513. #endif
  2514. mdp_config_vsync(mdp_init_pdev, mfd);
  2515. break;
  2516. #endif
  2517. #ifdef CONFIG_FB_MSM_DTV
  2518. case DTV_PANEL:
  2519. mfd->vsync_init = mdp4_dtv_vsync_init;
  2520. mfd->vsync_show = mdp4_dtv_show_event;
  2521. pdata->on = mdp4_dtv_on;
  2522. pdata->off = mdp4_dtv_off;
  2523. mfd->hw_refresh = TRUE;
  2524. mfd->cursor_update = mdp_hw_cursor_sync_update;
  2525. mfd->dma_fnc = mdp4_dtv_overlay;
  2526. mfd->dma = &dma_e_data;
  2527. mfd->do_histogram = mdp_do_histogram;
  2528. mfd->start_histogram = mdp_histogram_start;
  2529. mfd->stop_histogram = mdp_histogram_stop;
  2530. mdp4_display_intf_sel(EXTERNAL_INTF_SEL, DTV_INTF);
  2531. break;
  2532. #endif
  2533. case HDMI_PANEL:
  2534. case LCDC_PANEL:
  2535. case LVDS_PANEL:
  2536. #ifdef CONFIG_FB_MSM_MDP303
  2537. pdata->on = mdp_lcdc_on;
  2538. pdata->off = mdp_lcdc_off;
  2539. #endif
  2540. mfd->hw_refresh = TRUE;
  2541. #if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDP40)
  2542. mfd->cursor_update = mdp_hw_cursor_sync_update;
  2543. #else
  2544. mfd->cursor_update = mdp_hw_cursor_update;
  2545. #endif
  2546. #ifndef CONFIG_FB_MSM_MDP22
  2547. mfd->lut_update = mdp_lut_update_lcdc;
  2548. mfd->do_histogram = mdp_do_histogram;
  2549. mfd->start_histogram = mdp_histogram_start;
  2550. mfd->stop_histogram = mdp_histogram_stop;
  2551. #endif
  2552. #ifdef CONFIG_FB_MSM_OVERLAY
  2553. mfd->dma_fnc = mdp4_lcdc_overlay;
  2554. #else
  2555. mfd->dma_fnc = mdp_lcdc_update;
  2556. #endif
  2557. #ifdef CONFIG_FB_MSM_MDP40
  2558. mfd->vsync_init = mdp4_lcdc_vsync_init;
  2559. mfd->vsync_show = mdp4_lcdc_show_event;
  2560. if (mfd->panel.type == HDMI_PANEL) {
  2561. mfd->dma = &dma_e_data;
  2562. mdp4_display_intf_sel(EXTERNAL_INTF_SEL, LCDC_RGB_INTF);
  2563. } else {
  2564. mfd->dma = &dma2_data;
  2565. mdp4_display_intf_sel(PRIMARY_INTF_SEL, LCDC_RGB_INTF);
  2566. }
  2567. #else
  2568. mfd->dma = &dma2_data;
  2569. mfd->vsync_ctrl = mdp_dma_lcdc_vsync_ctrl;
  2570. mfd->vsync_show = mdp_dma_lcdc_show_event;
  2571. spin_lock_irqsave(&mdp_spin_lock, flag);
  2572. mdp_intr_mask &= ~MDP_DMA_P_DONE;
  2573. outp32(MDP_INTR_ENABLE, mdp_intr_mask);
  2574. spin_unlock_irqrestore(&mdp_spin_lock, flag);
  2575. #endif
  2576. break;
  2577. case TV_PANEL:
  2578. #if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_TVOUT)
  2579. pdata->on = mdp4_atv_on;
  2580. pdata->off = mdp4_atv_off;
  2581. mfd->dma_fnc = mdp4_atv_overlay;
  2582. mfd->dma = &dma_e_data;
  2583. mdp4_display_intf_sel(EXTERNAL_INTF_SEL, TV_INTF);
  2584. #else
  2585. pdata->on = mdp_dma3_on;
  2586. pdata->off = mdp_dma3_off;
  2587. mfd->hw_refresh = TRUE;
  2588. mfd->dma_fnc = mdp_dma3_update;
  2589. mfd->dma = &dma3_data;
  2590. #endif
  2591. break;
  2592. #ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
  2593. case WRITEBACK_PANEL:
  2594. {
  2595. unsigned int mdp_version;
  2596. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON,
  2597. FALSE);
  2598. mdp_version = inpdw(MDP_BASE + 0x0);
  2599. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF,
  2600. FALSE);
  2601. if (mdp_version < 0x04030303) {
  2602. pr_err("%s: writeback panel not supprted\n",
  2603. __func__);
  2604. platform_device_put(msm_fb_dev);
  2605. mdp_clk_ctrl(0);
  2606. return -ENODEV;
  2607. }
  2608. mdp4_wfd_init(0);
  2609. pdata->on = mdp4_overlay_writeback_on;
  2610. pdata->off = mdp4_overlay_writeback_off;
  2611. mfd->dma_fnc = mdp4_writeback_overlay;
  2612. mfd->dma = &dma_wb_data;
  2613. mdp4_display_intf_sel(EXTERNAL_INTF_SEL, DTV_INTF);
  2614. }
  2615. break;
  2616. #endif
  2617. default:
  2618. printk(KERN_ERR "mdp_probe: unknown device type!\n");
  2619. rc = -ENODEV;
  2620. mdp_clk_ctrl(0);
  2621. goto mdp_probe_err;
  2622. }
  2623. if (mdp_rev >= MDP_REV_40) {
  2624. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  2625. mdp4_display_intf = inpdw(MDP_BASE + 0x0038);
  2626. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
  2627. }
  2628. frame_rate = mdp_get_panel_framerate(mfd);
  2629. if (frame_rate) {
  2630. mfd->panel_info.frame_interval = 1000 / frame_rate;
  2631. mfd->cpu_pm_hdl = add_event_timer(NULL, (void *)mfd);
  2632. }
  2633. mdp_clk_ctrl(0);
  2634. #ifdef CONFIG_MSM_BUS_SCALING
  2635. if (mdp_bus_scale_register())
  2636. return -ENOMEM;
  2637. /* req bus bandwidth immediately */
  2638. if (!(mfd->cont_splash_done))
  2639. mdp_bus_scale_update_request
  2640. (MDP_BUS_SCALE_INIT, MDP_BUS_SCALE_INIT);
  2641. #endif
  2642. /* set driver data */
  2643. platform_set_drvdata(msm_fb_dev, mfd);
  2644. rc = platform_device_add(msm_fb_dev);
  2645. if (rc) {
  2646. goto mdp_probe_err;
  2647. }
  2648. pm_runtime_set_active(&pdev->dev);
  2649. pm_runtime_enable(&pdev->dev);
  2650. pdev_list[pdev_list_cnt++] = pdev;
  2651. mdp4_extn_disp = 0;
  2652. /*
  2653. * vsync_init call not required for mdp3.
  2654. * vsync_init call required for mdp4 targets.
  2655. */
  2656. if ((mfd->vsync_init != NULL) || (mdp_rev < MDP_REV_40)) {
  2657. if (mdp_rev >= MDP_REV_40)
  2658. mfd->vsync_init(0);
  2659. if (!mfd->vsync_sysfs_created) {
  2660. mfd->dev_attr.attr.name = "vsync_event";
  2661. mfd->dev_attr.attr.mode = S_IRUGO;
  2662. mfd->dev_attr.show = mfd->vsync_show;
  2663. sysfs_attr_init(&mfd->dev_attr.attr);
  2664. rc = sysfs_create_file(&mfd->fbi->dev->kobj,
  2665. &mfd->dev_attr.attr);
  2666. if (rc) {
  2667. pr_err("%s: sysfs creation failed, ret=%d\n",
  2668. __func__, rc);
  2669. return rc;
  2670. }
  2671. kobject_uevent(&mfd->fbi->dev->kobj, KOBJ_ADD);
  2672. pr_debug("%s: kobject_uevent(KOBJ_ADD)\n", __func__);
  2673. mfd->vsync_sysfs_created = 1;
  2674. }
  2675. }
  2676. return 0;
  2677. mdp_probe_err:
  2678. platform_device_put(msm_fb_dev);
  2679. #ifdef CONFIG_MSM_BUS_SCALING
  2680. if (mdp_bus_scale_handle > 0)
  2681. msm_bus_scale_unregister_client(mdp_bus_scale_handle);
  2682. #endif
  2683. return rc;
  2684. }
  2685. void mdp_footswitch_ctrl(boolean on)
  2686. {
  2687. mutex_lock(&mdp_suspend_mutex);
  2688. if (!mdp_suspended || mdp4_extn_disp || !footswitch ||
  2689. mdp_rev <= MDP_REV_41) {
  2690. mutex_unlock(&mdp_suspend_mutex);
  2691. return;
  2692. }
  2693. if (dsi_pll_vddio)
  2694. regulator_enable(dsi_pll_vddio);
  2695. if (dsi_pll_vdda)
  2696. regulator_enable(dsi_pll_vdda);
  2697. mipi_dsi_prepare_clocks();
  2698. mipi_dsi_ahb_ctrl(1);
  2699. mipi_dsi_phy_ctrl(1);
  2700. mipi_dsi_clk_enable();
  2701. if (on && !mdp_footswitch_on) {
  2702. pr_debug("Enable MDP FS\n");
  2703. regulator_enable(footswitch);
  2704. mdp_footswitch_on = 1;
  2705. } else if (!on && mdp_footswitch_on) {
  2706. pr_debug("Disable MDP FS\n");
  2707. regulator_disable(footswitch);
  2708. mdp_footswitch_on = 0;
  2709. }
  2710. mipi_dsi_clk_disable();
  2711. mipi_dsi_phy_ctrl(0);
  2712. mipi_dsi_ahb_ctrl(0);
  2713. mipi_dsi_unprepare_clocks();
  2714. if (dsi_pll_vdda)
  2715. regulator_disable(dsi_pll_vdda);
  2716. if (dsi_pll_vddio)
  2717. regulator_disable(dsi_pll_vddio);
  2718. mutex_unlock(&mdp_suspend_mutex);
  2719. }
  2720. void mdp_free_splash_buffer(struct msm_fb_data_type *mfd)
  2721. {
  2722. if (mfd->copy_splash_buf) {
  2723. dma_free_coherent(NULL, mdp_pdata->splash_screen_size,
  2724. mfd->copy_splash_buf,
  2725. (dma_addr_t) mfd->copy_splash_phys);
  2726. mfd->copy_splash_buf = NULL;
  2727. }
  2728. }
  2729. #ifdef CONFIG_PM
  2730. static void mdp_suspend_sub(void)
  2731. {
  2732. /* cancel pipe ctrl worker */
  2733. cancel_delayed_work(&mdp_pipe_ctrl_worker);
  2734. /* for workder can't be cancelled... */
  2735. flush_workqueue(mdp_pipe_ctrl_wq);
  2736. /* let's wait for PPP completion */
  2737. while (atomic_read(&mdp_block_power_cnt[MDP_PPP_BLOCK]) > 0)
  2738. cpu_relax();
  2739. /* try to power down */
  2740. mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
  2741. mutex_lock(&mdp_suspend_mutex);
  2742. mdp_suspended = TRUE;
  2743. mutex_unlock(&mdp_suspend_mutex);
  2744. }
  2745. #endif
  2746. #if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
  2747. static int mdp_suspend(struct platform_device *pdev, pm_message_t state)
  2748. {
  2749. if (pdev->id == 0) {
  2750. mdp_suspend_sub();
  2751. if (mdp_current_clk_on) {
  2752. printk(KERN_WARNING"MDP suspend failed\n");
  2753. return -EBUSY;
  2754. }
  2755. }
  2756. return 0;
  2757. }
  2758. #endif
  2759. #ifdef CONFIG_HAS_EARLYSUSPEND
  2760. static void mdp_early_suspend(struct early_suspend *h)
  2761. {
  2762. mdp_suspend_sub();
  2763. #ifdef CONFIG_FB_MSM_DTV
  2764. mdp4_solidfill_commit(MDP4_MIXER1);
  2765. mdp4_dtv_set_black_screen();
  2766. #endif
  2767. mdp_footswitch_ctrl(FALSE);
  2768. }
  2769. static void mdp_early_resume(struct early_suspend *h)
  2770. {
  2771. mdp_footswitch_ctrl(TRUE);
  2772. mutex_lock(&mdp_suspend_mutex);
  2773. mdp_suspended = FALSE;
  2774. mutex_unlock(&mdp_suspend_mutex);
  2775. }
  2776. #endif
  2777. static int mdp_remove(struct platform_device *pdev)
  2778. {
  2779. if (footswitch != NULL)
  2780. regulator_put(footswitch);
  2781. /*free post processing memory*/
  2782. mdp_histogram_destroy();
  2783. mdp_hist_lut_destroy();
  2784. mdp_pp_initialized = FALSE;
  2785. iounmap(msm_mdp_base);
  2786. pm_runtime_disable(&pdev->dev);
  2787. #ifdef CONFIG_MSM_BUS_SCALING
  2788. if (mdp_bus_scale_handle > 0)
  2789. msm_bus_scale_unregister_client(mdp_bus_scale_handle);
  2790. #endif
  2791. return 0;
  2792. }
  2793. static int mdp_register_driver(void)
  2794. {
  2795. #ifdef CONFIG_HAS_EARLYSUSPEND
  2796. early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 1;
  2797. early_suspend.suspend = mdp_early_suspend;
  2798. early_suspend.resume = mdp_early_resume;
  2799. register_early_suspend(&early_suspend);
  2800. #endif
  2801. return platform_driver_register(&mdp_driver);
  2802. }
  2803. static int __init mdp_driver_init(void)
  2804. {
  2805. int ret;
  2806. mdp_drv_init();
  2807. ret = mdp_register_driver();
  2808. if (ret) {
  2809. printk(KERN_ERR "mdp_register_driver() failed!\n");
  2810. return ret;
  2811. }
  2812. #if defined(CONFIG_MDP_DEBUG_FS)
  2813. mdp_debugfs_init();
  2814. #endif
  2815. return 0;
  2816. }
  2817. module_init(mdp_driver_init);