mdss_mdp.c 73 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029
  1. /*
  2. * MDSS MDP Interface (used by framebuffer core)
  3. *
  4. * Copyright (c) 2007-2014, The Linux Foundation. All rights reserved.
  5. * Copyright (C) 2007 Google Incorporated
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. */
  16. #define pr_fmt(fmt) "%s: " fmt, __func__
  17. #include <linux/clk.h>
  18. #include <linux/debugfs.h>
  19. #include <linux/delay.h>
  20. #include <linux/hrtimer.h>
  21. #include <linux/kernel.h>
  22. #include <linux/init.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/io.h>
  25. #include <linux/iommu.h>
  26. #include <linux/of.h>
  27. #include <linux/of_address.h>
  28. #include <linux/pm.h>
  29. #include <linux/pm_runtime.h>
  30. #include <linux/regulator/consumer.h>
  31. #include <linux/memory_alloc.h>
  32. #include <linux/module.h>
  33. #include <linux/mutex.h>
  34. #include <linux/sched.h>
  35. #include <linux/time.h>
  36. #include <linux/spinlock.h>
  37. #include <linux/semaphore.h>
  38. #include <linux/uaccess.h>
  39. #include <mach/board.h>
  40. #include <mach/clk.h>
  41. #include <mach/hardware.h>
  42. #include <mach/msm_bus.h>
  43. #include <mach/msm_bus_board.h>
  44. #include <mach/iommu.h>
  45. #include <mach/iommu_domains.h>
  46. #include <mach/memory.h>
  47. #include <mach/msm_memtypes.h>
  48. #include <mach/rpm-regulator-smd.h>
  49. #include <mach/scm.h>
  50. #include "mdss.h"
  51. #include "mdss_fb.h"
  52. #include "mdss_mdp.h"
  53. #include "mdss_panel.h"
  54. #include "mdss_debug.h"
  55. #define CREATE_TRACE_POINTS
  56. #include "mdss_mdp_trace.h"
  57. struct mdss_data_type *mdss_res;
  58. static int mdp_clk_cnt;
  59. #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  60. void xlog(const char *name, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4, u32 data5);
  61. #endif
  62. static int mdss_fb_mem_get_iommu_domain(void)
  63. {
  64. return mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE);
  65. }
  66. struct msm_mdp_interface mdp5 = {
  67. .init_fnc = mdss_mdp_overlay_init,
  68. .fb_mem_get_iommu_domain = mdss_fb_mem_get_iommu_domain,
  69. .panel_register_done = mdss_panel_register_done,
  70. .fb_stride = mdss_mdp_fb_stride,
  71. .check_dsi_status = mdss_check_dsi_ctrl_status,
  72. };
  73. #define DEFAULT_TOTAL_RGB_PIPES 3
  74. #define DEFAULT_TOTAL_VIG_PIPES 3
  75. #define DEFAULT_TOTAL_DMA_PIPES 2
  76. #define IB_QUOTA 800000000
  77. #define AB_QUOTA 800000000
  78. #define MEM_PROTECT_SD_CTRL 0xF
  79. static DEFINE_SPINLOCK(mdp_lock);
  80. static DEFINE_MUTEX(mdp_clk_lock);
  81. static DEFINE_MUTEX(bus_bw_lock);
  82. static DEFINE_MUTEX(mdp_iommu_lock);
  83. static DEFINE_MUTEX(mdp_iommu_ref_cnt_lock);
  84. static struct mdss_panel_intf pan_types[] = {
  85. {"dsi", MDSS_PANEL_INTF_DSI},
  86. {"edp", MDSS_PANEL_INTF_EDP},
  87. {"hdmi", MDSS_PANEL_INTF_HDMI},
  88. };
  89. struct mdss_iommu_map_type mdss_iommu_map[MDSS_IOMMU_MAX_DOMAIN] = {
  90. [MDSS_IOMMU_DOMAIN_UNSECURE] = {
  91. .client_name = "mdp_ns",
  92. .ctx_name = "mdp_0",
  93. .partitions = {
  94. {
  95. .start = SZ_128K,
  96. .size = SZ_1G - SZ_128K,
  97. },
  98. },
  99. .npartitions = 1,
  100. },
  101. [MDSS_IOMMU_DOMAIN_SECURE] = {
  102. .client_name = "mdp_secure",
  103. .ctx_name = "mdp_1",
  104. .partitions = {
  105. {
  106. .start = SZ_1G,
  107. .size = SZ_1G,
  108. },
  109. },
  110. .npartitions = 1,
  111. },
  112. };
  113. struct mdss_hw mdss_mdp_hw = {
  114. .hw_ndx = MDSS_HW_MDP,
  115. .ptr = NULL,
  116. .irq_handler = mdss_mdp_isr,
  117. };
  118. static DEFINE_SPINLOCK(mdss_lock);
  119. struct mdss_hw *mdss_irq_handlers[MDSS_MAX_HW_BLK];
  120. static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on);
  121. static int mdss_mdp_parse_dt(struct platform_device *pdev);
  122. static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev);
  123. static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev);
  124. static int mdss_mdp_parse_dt_ctl(struct platform_device *pdev);
  125. static int mdss_mdp_parse_dt_video_intf(struct platform_device *pdev);
  126. static int mdss_mdp_parse_dt_handler(struct platform_device *pdev,
  127. char *prop_name, u32 *offsets, int len);
  128. static int mdss_mdp_parse_dt_prop_len(struct platform_device *pdev,
  129. char *prop_name);
  130. static int mdss_mdp_parse_dt_smp(struct platform_device *pdev);
  131. static int mdss_mdp_parse_dt_prefill(struct platform_device *pdev);
  132. static int mdss_mdp_parse_dt_misc(struct platform_device *pdev);
  133. static int mdss_mdp_parse_dt_ad_cfg(struct platform_device *pdev);
  134. static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev);
  135. u32 mdss_mdp_fb_stride(u32 fb_index, u32 xres, int bpp)
  136. {
  137. /* The adreno GPU hardware requires that the pitch be aligned to
  138. 32 pixels for color buffers, so for the cases where the GPU
  139. is writing directly to fb0, the framebuffer pitch
  140. also needs to be 32 pixel aligned */
  141. if (fb_index == 0)
  142. return ALIGN(xres, 32) * bpp;
  143. else
  144. return xres * bpp;
  145. }
  146. static inline int mdss_irq_dispatch(u32 hw_ndx, int irq, void *ptr)
  147. {
  148. struct mdss_hw *hw;
  149. int rc = -ENODEV;
  150. spin_lock(&mdss_lock);
  151. hw = mdss_irq_handlers[hw_ndx];
  152. #if 0//defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  153. xlog(__func__, (u32)hw, hw_ndx, 0, 0, 0, 0xeeee);
  154. #endif
  155. spin_unlock(&mdss_lock);
  156. if (hw)
  157. rc = hw->irq_handler(irq, hw->ptr);
  158. return rc;
  159. }
  160. static irqreturn_t mdss_irq_handler(int irq, void *ptr)
  161. {
  162. struct mdss_data_type *mdata = ptr;
  163. u32 intr = MDSS_MDP_REG_READ(MDSS_REG_HW_INTR_STATUS);
  164. #if 0//defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  165. xlog(__func__, intr, (u32) mdata, 0, 0, 0, 0xffff);
  166. #endif
  167. if (!mdata)
  168. return IRQ_NONE;
  169. mdata->irq_buzy = true;
  170. if (intr & MDSS_INTR_MDP) {
  171. spin_lock(&mdp_lock);
  172. mdss_irq_dispatch(MDSS_HW_MDP, irq, ptr);
  173. spin_unlock(&mdp_lock);
  174. }
  175. if (intr & MDSS_INTR_DSI0)
  176. mdss_irq_dispatch(MDSS_HW_DSI0, irq, ptr);
  177. if (intr & MDSS_INTR_DSI1)
  178. mdss_irq_dispatch(MDSS_HW_DSI1, irq, ptr);
  179. if (intr & MDSS_INTR_EDP)
  180. mdss_irq_dispatch(MDSS_HW_EDP, irq, ptr);
  181. if (intr & MDSS_INTR_HDMI)
  182. mdss_irq_dispatch(MDSS_HW_HDMI, irq, ptr);
  183. mdata->irq_buzy = false;
  184. return IRQ_HANDLED;
  185. }
  186. int mdss_register_irq(struct mdss_hw *hw)
  187. {
  188. unsigned long irq_flags;
  189. u32 ndx_bit;
  190. if (!hw || hw->hw_ndx >= MDSS_MAX_HW_BLK)
  191. return -EINVAL;
  192. ndx_bit = BIT(hw->hw_ndx);
  193. spin_lock_irqsave(&mdss_lock, irq_flags);
  194. if (!mdss_irq_handlers[hw->hw_ndx])
  195. mdss_irq_handlers[hw->hw_ndx] = hw;
  196. else
  197. pr_err("panel %d's irq at %pK is already registered\n",
  198. hw->hw_ndx, hw->irq_handler);
  199. spin_unlock_irqrestore(&mdss_lock, irq_flags);
  200. return 0;
  201. } /* mdss_regsiter_irq */
  202. EXPORT_SYMBOL(mdss_register_irq);
  203. void mdss_enable_irq(struct mdss_hw *hw)
  204. {
  205. unsigned long irq_flags;
  206. u32 ndx_bit;
  207. if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
  208. return;
  209. if (!mdss_irq_handlers[hw->hw_ndx]) {
  210. pr_err("failed. First register the irq then enable it.\n");
  211. return;
  212. }
  213. ndx_bit = BIT(hw->hw_ndx);
  214. pr_debug("Enable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
  215. mdss_res->irq_ena, mdss_res->irq_mask);
  216. spin_lock_irqsave(&mdss_lock, irq_flags);
  217. #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  218. xlog(__func__, hw->hw_ndx, ndx_bit,mdss_res->irq_mask, mdss_res->irq_ena, mdss_res->irq, 0xB);
  219. #endif
  220. if (mdss_res->irq_mask & ndx_bit) {
  221. pr_debug("MDSS HW ndx=%d is already set, mask=%x\n",
  222. hw->hw_ndx, mdss_res->irq_mask);
  223. #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  224. xlog(__func__, 0, 0, 0, 0, 0, 0xFF);
  225. #endif
  226. } else {
  227. mdss_res->irq_mask |= ndx_bit;
  228. if (!mdss_res->irq_ena) {
  229. mdss_res->irq_ena = true;
  230. enable_irq(mdss_res->irq);
  231. }
  232. }
  233. #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  234. xlog(__func__, hw->hw_ndx, ndx_bit,mdss_res->irq_mask, mdss_res->irq_ena, mdss_res->irq, 0xE);
  235. #endif
  236. spin_unlock_irqrestore(&mdss_lock, irq_flags);
  237. }
  238. EXPORT_SYMBOL(mdss_enable_irq);
  239. void mdss_disable_irq(struct mdss_hw *hw)
  240. {
  241. unsigned long irq_flags;
  242. u32 ndx_bit;
  243. if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
  244. return;
  245. ndx_bit = BIT(hw->hw_ndx);
  246. pr_debug("Disable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
  247. mdss_res->irq_ena, mdss_res->irq_mask);
  248. spin_lock_irqsave(&mdss_lock, irq_flags);
  249. #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  250. xlog(__func__, hw->hw_ndx, ndx_bit,mdss_res->irq_mask, mdss_res->irq_ena, mdss_res->irq, 0xB);
  251. #endif
  252. if (!(mdss_res->irq_mask & ndx_bit)) {
  253. pr_warn("MDSS HW ndx=%d is NOT set, mask=%x, hist mask=%x\n",
  254. hw->hw_ndx, mdss_res->mdp_irq_mask,
  255. mdss_res->mdp_hist_irq_mask);
  256. } else {
  257. mdss_res->irq_mask &= ~ndx_bit;
  258. if (mdss_res->irq_mask == 0) {
  259. mdss_res->irq_ena = false;
  260. disable_irq_nosync(mdss_res->irq);
  261. }
  262. }
  263. #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  264. xlog(__func__, hw->hw_ndx, ndx_bit,mdss_res->irq_mask, mdss_res->irq_ena, mdss_res->irq, 0xE);
  265. #endif
  266. spin_unlock_irqrestore(&mdss_lock, irq_flags);
  267. }
  268. EXPORT_SYMBOL(mdss_disable_irq);
  269. /* called from interrupt context */
  270. void mdss_disable_irq_nosync(struct mdss_hw *hw)
  271. {
  272. u32 ndx_bit;
  273. if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
  274. return;
  275. ndx_bit = BIT(hw->hw_ndx);
  276. pr_debug("Disable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
  277. mdss_res->irq_ena, mdss_res->irq_mask);
  278. spin_lock(&mdss_lock);
  279. #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  280. xlog(__func__, hw->hw_ndx, ndx_bit,mdss_res->irq_mask, mdss_res->irq_ena, mdss_res->irq, 0xB);
  281. #endif
  282. if (!(mdss_res->irq_mask & ndx_bit)) {
  283. pr_warn("MDSS HW ndx=%d is NOT set, mask=%x, hist mask=%x\n",
  284. hw->hw_ndx, mdss_res->mdp_irq_mask,
  285. mdss_res->mdp_hist_irq_mask);
  286. } else {
  287. mdss_res->irq_mask &= ~ndx_bit;
  288. if (mdss_res->irq_mask == 0) {
  289. mdss_res->irq_ena = false;
  290. disable_irq_nosync(mdss_res->irq);
  291. }
  292. }
  293. #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  294. xlog(__func__, hw->hw_ndx, ndx_bit,mdss_res->irq_mask, mdss_res->irq_ena, mdss_res->irq, 0xE);
  295. #endif
  296. spin_unlock(&mdss_lock);
  297. }
  298. EXPORT_SYMBOL(mdss_disable_irq_nosync);
  299. static int mdss_mdp_bus_scale_register(struct mdss_data_type *mdata)
  300. {
  301. if (!mdata->bus_hdl) {
  302. mdata->bus_hdl =
  303. msm_bus_scale_register_client(mdata->bus_scale_table);
  304. if (IS_ERR_VALUE(mdata->bus_hdl)) {
  305. pr_err("bus_client register failed\n");
  306. return -EINVAL;
  307. }
  308. pr_debug("register bus_hdl=%x\n", mdata->bus_hdl);
  309. }
  310. return mdss_bus_scale_set_quota(MDSS_HW_MDP, AB_QUOTA, IB_QUOTA);
  311. }
  312. static void mdss_mdp_bus_scale_unregister(struct mdss_data_type *mdata)
  313. {
  314. pr_debug("unregister bus_hdl=%x\n", mdata->bus_hdl);
  315. if (mdata->bus_hdl)
  316. msm_bus_scale_unregister_client(mdata->bus_hdl);
  317. }
  318. unsigned long clk_rate_dbg;
  319. u64 bus_ab_quota_dbg, bus_ib_quota_dbg;
  320. #if defined(CONFIG_MACH_MILLET3G_CHN_OPEN)
  321. #define MDSS_MDP_BUS_FUDGE_FACTOR_IB(val) (((val) * 5) / 4)
  322. #endif
  323. int mdss_mdp_bus_scale_set_quota(u64 ab_quota, u64 ib_quota)
  324. {
  325. int new_uc_idx;
  326. if (mdss_res->bus_hdl < 1) {
  327. pr_err("invalid bus handle %d\n", mdss_res->bus_hdl);
  328. return -EINVAL;
  329. }
  330. if ((ab_quota | ib_quota) == 0) {
  331. new_uc_idx = 0;
  332. } else {
  333. int i;
  334. struct msm_bus_vectors *vect = NULL;
  335. struct msm_bus_scale_pdata *bw_table =
  336. mdss_res->bus_scale_table;
  337. unsigned long size;
  338. if (!bw_table || !mdss_res->axi_port_cnt) {
  339. pr_err("invalid input\n");
  340. return -EINVAL;
  341. }
  342. size = SZ_64M / mdss_res->axi_port_cnt;
  343. ab_quota = div_u64(ab_quota, mdss_res->axi_port_cnt);
  344. ib_quota = div_u64(ib_quota, mdss_res->axi_port_cnt);
  345. new_uc_idx = (mdss_res->curr_bw_uc_idx %
  346. (bw_table->num_usecases - 1)) + 1;
  347. for (i = 0; i < mdss_res->axi_port_cnt; i++) {
  348. vect = &bw_table->usecase[mdss_res->curr_bw_uc_idx].
  349. vectors[i];
  350. /* avoid performing updates for small changes */
  351. if ((ALIGN(ab_quota, size) == ALIGN(vect->ab, size)) &&
  352. (ALIGN(ib_quota, size) == ALIGN(vect->ib, size))) {
  353. pr_debug("skip bus scaling, no changes\n");
  354. return 0;
  355. }
  356. vect = &bw_table->usecase[new_uc_idx].vectors[i];
  357. #if defined(CONFIG_MACH_MILLET3G_CHN_OPEN)
  358. vect->ab = ab_quota;
  359. vect->ib = MDSS_MDP_BUS_FUDGE_FACTOR_IB(ib_quota);
  360. bus_ab_quota_dbg = ab_quota;
  361. bus_ib_quota_dbg = MDSS_MDP_BUS_FUDGE_FACTOR_IB(ib_quota);
  362. #else
  363. vect->ab = ab_quota;
  364. vect->ib = ib_quota;
  365. bus_ab_quota_dbg = ab_quota;
  366. bus_ib_quota_dbg = ib_quota;
  367. #endif
  368. pr_debug("uc_idx=%d path_idx=%d ab=%llu ib=%llu\n",
  369. new_uc_idx, i, vect->ab, vect->ib);
  370. }
  371. }
  372. mdss_res->curr_bw_uc_idx = new_uc_idx;
  373. return msm_bus_scale_client_update_request(mdss_res->bus_hdl,
  374. new_uc_idx);
  375. }
  376. int mdss_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota)
  377. {
  378. int rc = 0;
  379. int i;
  380. u64 total_ab = 0;
  381. u64 total_ib = 0;
  382. mutex_lock(&bus_bw_lock);
  383. mdss_res->ab[client] = ab_quota;
  384. mdss_res->ib[client] = ib_quota;
  385. for (i = 0; i < MDSS_MAX_HW_BLK; i++) {
  386. total_ab += mdss_res->ab[i];
  387. total_ib = max(total_ib, mdss_res->ib[i]);
  388. }
  389. rc = mdss_mdp_bus_scale_set_quota(total_ab, total_ib);
  390. mutex_unlock(&bus_bw_lock);
  391. return rc;
  392. }
  393. static inline u32 mdss_mdp_irq_mask(u32 intr_type, u32 intf_num)
  394. {
  395. if (intr_type == MDSS_MDP_IRQ_INTF_UNDER_RUN ||
  396. intr_type == MDSS_MDP_IRQ_INTF_VSYNC)
  397. intf_num = (intf_num - MDSS_MDP_INTF0) * 2;
  398. return 1 << (intr_type + intf_num);
  399. }
  400. /* function assumes that mdp is clocked to access hw registers */
  401. void mdss_mdp_irq_clear(struct mdss_data_type *mdata,
  402. u32 intr_type, u32 intf_num)
  403. {
  404. unsigned long irq_flags;
  405. u32 irq;
  406. irq = mdss_mdp_irq_mask(intr_type, intf_num);
  407. pr_debug("clearing mdp irq mask=%x\n", irq);
  408. spin_lock_irqsave(&mdp_lock, irq_flags);
  409. writel_relaxed(irq, mdata->mdp_base + MDSS_MDP_REG_INTR_CLEAR);
  410. spin_unlock_irqrestore(&mdp_lock, irq_flags);
  411. }
  412. #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  413. int mdss_mdp_debug_bus(void)
  414. {
  415. u32 status;
  416. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  417. MDSS_MDP_REG_WRITE(0x398, 0x7001);
  418. MDSS_MDP_REG_WRITE(0x448, 0x3f1);
  419. status = MDSS_MDP_REG_READ(0x44c);
  420. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  421. xlog(__func__, status, 0, 0, 0, 0, 0xDDDDDD);
  422. return 0;
  423. }
  424. EXPORT_SYMBOL(mdss_mdp_debug_bus);
  425. #endif
  426. int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num)
  427. {
  428. u32 irq;
  429. unsigned long irq_flags;
  430. int ret = 0;
  431. irq = mdss_mdp_irq_mask(intr_type, intf_num);
  432. spin_lock_irqsave(&mdp_lock, irq_flags);
  433. #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  434. xlog(__func__,mdss_res->mdp_irq_mask, irq, intr_type, intf_num, 0, 0);
  435. #endif
  436. if (mdss_res->mdp_irq_mask & irq) {
  437. pr_warn("MDSS MDP IRQ-0x%x is already set, mask=%x\n",
  438. irq, mdss_res->mdp_irq_mask);
  439. ret = -EBUSY;
  440. } else {
  441. pr_debug("MDP IRQ mask old=%x new=%x\n",
  442. mdss_res->mdp_irq_mask, irq);
  443. mdss_res->mdp_irq_mask |= irq;
  444. MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_CLEAR, irq);
  445. MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_EN,
  446. mdss_res->mdp_irq_mask);
  447. mdss_enable_irq(&mdss_mdp_hw);
  448. }
  449. spin_unlock_irqrestore(&mdp_lock, irq_flags);
  450. return ret;
  451. }
  452. int mdss_mdp_hist_irq_enable(u32 irq)
  453. {
  454. unsigned long irq_flags;
  455. int ret = 0;
  456. spin_lock_irqsave(&mdp_lock, irq_flags);
  457. if (mdss_res->mdp_hist_irq_mask & irq) {
  458. pr_warn("MDSS MDP Hist IRQ-0x%x is already set, mask=%x\n",
  459. irq, mdss_res->mdp_hist_irq_mask);
  460. ret = -EBUSY;
  461. } else {
  462. pr_debug("MDP IRQ mask old=%x new=%x\n",
  463. mdss_res->mdp_hist_irq_mask, irq);
  464. mdss_res->mdp_hist_irq_mask |= irq;
  465. MDSS_MDP_REG_WRITE(MDSS_MDP_REG_HIST_INTR_CLEAR, irq);
  466. MDSS_MDP_REG_WRITE(MDSS_MDP_REG_HIST_INTR_EN,
  467. mdss_res->mdp_hist_irq_mask);
  468. mdss_enable_irq(&mdss_mdp_hw);
  469. }
  470. spin_unlock_irqrestore(&mdp_lock, irq_flags);
  471. return ret;
  472. }
  473. void mdss_mdp_irq_disable(u32 intr_type, u32 intf_num)
  474. {
  475. u32 irq;
  476. unsigned long irq_flags;
  477. irq = mdss_mdp_irq_mask(intr_type, intf_num);
  478. spin_lock_irqsave(&mdp_lock, irq_flags);
  479. #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  480. xlog(__func__,mdss_res->mdp_irq_mask, irq, intr_type, intf_num, 0, 0);
  481. #endif
  482. if (!(mdss_res->mdp_irq_mask & irq)) {
  483. pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
  484. irq, mdss_res->mdp_irq_mask);
  485. } else {
  486. mdss_res->mdp_irq_mask &= ~irq;
  487. MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_EN,
  488. mdss_res->mdp_irq_mask);
  489. if ((mdss_res->mdp_irq_mask == 0) &&
  490. (mdss_res->mdp_hist_irq_mask == 0))
  491. mdss_disable_irq(&mdss_mdp_hw);
  492. }
  493. spin_unlock_irqrestore(&mdp_lock, irq_flags);
  494. }
  495. void mdss_mdp_hist_irq_disable(u32 irq)
  496. {
  497. unsigned long irq_flags;
  498. spin_lock_irqsave(&mdp_lock, irq_flags);
  499. if (!(mdss_res->mdp_hist_irq_mask & irq)) {
  500. pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
  501. irq, mdss_res->mdp_hist_irq_mask);
  502. } else {
  503. mdss_res->mdp_hist_irq_mask &= ~irq;
  504. MDSS_MDP_REG_WRITE(MDSS_MDP_REG_HIST_INTR_EN,
  505. mdss_res->mdp_hist_irq_mask);
  506. if ((mdss_res->mdp_irq_mask == 0) &&
  507. (mdss_res->mdp_hist_irq_mask == 0))
  508. mdss_disable_irq(&mdss_mdp_hw);
  509. }
  510. spin_unlock_irqrestore(&mdp_lock, irq_flags);
  511. }
  512. /**
  513. * mdss_mdp_irq_disable_nosync() - disable mdp irq
  514. * @intr_type: mdp interface type
  515. * @intf_num: mdp interface num
  516. *
  517. * This fucntion is called from interrupt context
  518. * mdp_lock is already held at up stream (mdss_irq_handler)
  519. * therefore spin_lock(&mdp_lock) is not allowed here
  520. *
  521. */
  522. void mdss_mdp_irq_disable_nosync(u32 intr_type, u32 intf_num)
  523. {
  524. u32 irq;
  525. irq = mdss_mdp_irq_mask(intr_type, intf_num);
  526. if (!(mdss_res->mdp_irq_mask & irq)) {
  527. pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
  528. irq, mdss_res->mdp_irq_mask);
  529. } else {
  530. mdss_res->mdp_irq_mask &= ~irq;
  531. MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_EN,
  532. mdss_res->mdp_irq_mask);
  533. if ((mdss_res->mdp_irq_mask == 0) &&
  534. (mdss_res->mdp_hist_irq_mask == 0))
  535. mdss_disable_irq_nosync(&mdss_mdp_hw);
  536. }
  537. }
  538. static inline struct clk *mdss_mdp_get_clk(u32 clk_idx)
  539. {
  540. if (clk_idx < MDSS_MAX_CLK)
  541. return mdss_res->mdp_clk[clk_idx];
  542. return NULL;
  543. }
  544. static int mdss_mdp_clk_update(u32 clk_idx, u32 enable)
  545. {
  546. int ret = -ENODEV;
  547. struct clk *clk = mdss_mdp_get_clk(clk_idx);
  548. if (clk) {
  549. pr_debug("clk=%d en=%d\n", clk_idx, enable);
  550. if (enable) {
  551. if (clk_idx == MDSS_CLK_MDP_VSYNC)
  552. clk_set_rate(clk, 19200000);
  553. ret = clk_prepare_enable(clk);
  554. } else {
  555. clk_disable_unprepare(clk);
  556. ret = 0;
  557. }
  558. }
  559. return ret;
  560. }
  561. int mdss_mdp_vsync_clk_enable(int enable)
  562. {
  563. int ret = 0;
  564. pr_debug("clk enable=%d\n", enable);
  565. mutex_lock(&mdp_clk_lock);
  566. if (mdss_res->vsync_ena != enable) {
  567. mdss_res->vsync_ena = enable;
  568. ret = mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
  569. }
  570. mutex_unlock(&mdp_clk_lock);
  571. return ret;
  572. }
  573. void mdss_mdp_set_clk_rate(unsigned long rate)
  574. {
  575. struct mdss_data_type *mdata = mdss_res;
  576. unsigned long clk_rate;
  577. struct clk *clk = mdss_mdp_get_clk(MDSS_CLK_MDP_SRC);
  578. unsigned long min_clk_rate;
  579. min_clk_rate = max(rate, mdata->perf_tune.min_mdp_clk);
  580. if (clk) {
  581. mutex_lock(&mdp_clk_lock);
  582. if (min_clk_rate < mdata->max_mdp_clk_rate)
  583. clk_rate = clk_round_rate(clk, min_clk_rate);
  584. else
  585. clk_rate = mdata->max_mdp_clk_rate;
  586. if (IS_ERR_VALUE(clk_rate)) {
  587. pr_err("unable to round rate err=%ld\n", clk_rate);
  588. } else if (clk_rate != clk_get_rate(clk)) {
  589. clk_rate_dbg = clk_rate;
  590. if (IS_ERR_VALUE(clk_set_rate(clk, clk_rate)))
  591. pr_err("clk_set_rate failed\n");
  592. else
  593. pr_debug("mdp clk rate=%lu\n", clk_rate);
  594. }
  595. mutex_unlock(&mdp_clk_lock);
  596. } else {
  597. pr_err("mdp src clk not setup properly\n");
  598. }
  599. }
  600. unsigned long mdss_mdp_get_clk_rate(u32 clk_idx)
  601. {
  602. unsigned long clk_rate = 0;
  603. struct clk *clk = mdss_mdp_get_clk(clk_idx);
  604. mutex_lock(&mdp_clk_lock);
  605. if (clk)
  606. clk_rate = clk_get_rate(clk);
  607. mutex_unlock(&mdp_clk_lock);
  608. return clk_rate;
  609. }
  610. void mdss_iommu_lock(void)
  611. {
  612. mutex_lock(&mdp_iommu_lock);
  613. }
  614. void mdss_iommu_unlock(void)
  615. {
  616. mutex_unlock(&mdp_iommu_lock);
  617. }
  618. int mdss_iommu_ctrl(int enable)
  619. {
  620. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  621. int rc = 0;
  622. //mutex_lock(&mdp_iommu_lock);
  623. mutex_lock(&mdp_iommu_ref_cnt_lock);
  624. pr_debug("%pS: enable %d mdata->iommu_ref_cnt %d\n",
  625. __builtin_return_address(0), enable, mdata->iommu_ref_cnt);
  626. if (enable) {
  627. if (mdata->iommu_ref_cnt == 0)
  628. rc = mdss_iommu_attach(mdata);
  629. mdata->iommu_ref_cnt++;
  630. } else {
  631. if (mdata->iommu_ref_cnt) {
  632. mdata->iommu_ref_cnt--;
  633. if (mdata->iommu_ref_cnt == 0)
  634. rc = mdss_iommu_dettach(mdata);
  635. } else {
  636. pr_err("unbalanced iommu ref\n");
  637. }
  638. }
  639. //mutex_unlock(&mdp_iommu_lock);
  640. mutex_unlock(&mdp_iommu_ref_cnt_lock);
  641. if (IS_ERR_VALUE(rc))
  642. return rc;
  643. else
  644. return mdata->iommu_ref_cnt;
  645. }
  646. /**
  647. * mdss_bus_bandwidth_ctrl() -- place bus bandwidth request
  648. * @enable: value of enable or disable
  649. *
  650. * Function place bus bandwidth request to allocate saved bandwidth
  651. * if enabled or free bus bandwidth allocation if disabled.
  652. * Bus bandwidth is required by mdp.For dsi, it only requires to send
  653. * dcs coammnd. It returns error if bandwidth request fails.
  654. */
  655. void mdss_bus_bandwidth_ctrl(int enable)
  656. {
  657. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  658. static int bus_bw_cnt;
  659. int changed = 0;
  660. mutex_lock(&bus_bw_lock);
  661. if (enable) {
  662. if (bus_bw_cnt == 0)
  663. changed++;
  664. bus_bw_cnt++;
  665. } else {
  666. if (bus_bw_cnt) {
  667. bus_bw_cnt--;
  668. if (bus_bw_cnt == 0)
  669. changed++;
  670. } else {
  671. pr_err("Can not be turned off\n");
  672. }
  673. }
  674. pr_debug("bw_cnt=%d changed=%d enable=%d\n",
  675. bus_bw_cnt, changed, enable);
  676. if (changed) {
  677. if (!enable) {
  678. msm_bus_scale_client_update_request(
  679. mdata->bus_hdl, 0);
  680. pm_runtime_put(&mdata->pdev->dev);
  681. } else {
  682. pm_runtime_get_sync(&mdata->pdev->dev);
  683. msm_bus_scale_client_update_request(
  684. mdata->bus_hdl, mdata->curr_bw_uc_idx);
  685. }
  686. }
  687. mutex_unlock(&bus_bw_lock);
  688. }
  689. EXPORT_SYMBOL(mdss_bus_bandwidth_ctrl);
  690. void mdss_mdp_clk_ctrl(int enable, int isr)
  691. {
  692. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  693. static int mdp_clk_cnt;
  694. int changed = 0;
  695. mutex_lock(&mdp_clk_lock);
  696. if (enable) {
  697. if (mdp_clk_cnt == 0)
  698. changed++;
  699. mdp_clk_cnt++;
  700. } else {
  701. if (mdp_clk_cnt) {
  702. mdp_clk_cnt--;
  703. if (mdp_clk_cnt == 0)
  704. changed++;
  705. } else {
  706. pr_err("Can not be turned off\n");
  707. }
  708. }
  709. MDSS_XLOG(mdp_clk_cnt, changed, enable, current->pid);
  710. #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  711. xlog(__func__, mdp_clk_cnt, changed, enable, 0, 0, 0);
  712. #endif
  713. pr_debug("%s: clk_cnt=%d changed=%d enable=%d\n",
  714. __func__, mdp_clk_cnt, changed, enable);
  715. if (changed) {
  716. mdata->clk_ena = enable;
  717. if (enable)
  718. pm_runtime_get_sync(&mdata->pdev->dev);
  719. mdss_mdp_clk_update(MDSS_CLK_AHB, enable);
  720. mdss_mdp_clk_update(MDSS_CLK_AXI, enable);
  721. mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, enable);
  722. mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, enable);
  723. if (mdata->vsync_ena)
  724. mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
  725. if (!enable)
  726. pm_runtime_put(&mdata->pdev->dev);
  727. }
  728. mutex_unlock(&mdp_clk_lock);
  729. }
  730. static inline int mdss_mdp_irq_clk_register(struct mdss_data_type *mdata,
  731. char *clk_name, int clk_idx)
  732. {
  733. struct clk *tmp;
  734. if (clk_idx >= MDSS_MAX_CLK) {
  735. pr_err("invalid clk index %d\n", clk_idx);
  736. return -EINVAL;
  737. }
  738. tmp = devm_clk_get(&mdata->pdev->dev, clk_name);
  739. if (IS_ERR(tmp)) {
  740. pr_err("unable to get clk: %s\n", clk_name);
  741. return PTR_ERR(tmp);
  742. }
  743. mdata->mdp_clk[clk_idx] = tmp;
  744. return 0;
  745. }
  746. static int mdss_mdp_irq_clk_setup(struct mdss_data_type *mdata)
  747. {
  748. int ret;
  749. ret = of_property_read_u32(mdata->pdev->dev.of_node,
  750. "qcom,max-clk-rate", &mdata->max_mdp_clk_rate);
  751. if (ret) {
  752. pr_err("failed to get max mdp clock rate\n");
  753. return ret;
  754. }
  755. pr_debug("max mdp clk rate=%d\n", mdata->max_mdp_clk_rate);
  756. ret = devm_request_irq(&mdata->pdev->dev, mdata->irq, mdss_irq_handler,
  757. IRQF_DISABLED, "MDSS", mdata);
  758. if (ret) {
  759. pr_err("mdp request_irq() failed!\n");
  760. return ret;
  761. }
  762. disable_irq(mdata->irq);
  763. mdata->fs = devm_regulator_get(&mdata->pdev->dev, "vdd");
  764. if (IS_ERR_OR_NULL(mdata->fs)) {
  765. mdata->fs = NULL;
  766. pr_err("unable to get gdsc regulator\n");
  767. return -EINVAL;
  768. }
  769. mdata->fs_ena = false;
  770. mdata->vdd_cx = devm_regulator_get(&mdata->pdev->dev,
  771. "vdd-cx");
  772. if (IS_ERR_OR_NULL(mdata->vdd_cx)) {
  773. pr_debug("unable to get CX reg. rc=%d\n",
  774. PTR_RET(mdata->vdd_cx));
  775. mdata->vdd_cx = NULL;
  776. }
  777. if (mdss_mdp_irq_clk_register(mdata, "bus_clk", MDSS_CLK_AXI) ||
  778. mdss_mdp_irq_clk_register(mdata, "iface_clk", MDSS_CLK_AHB) ||
  779. mdss_mdp_irq_clk_register(mdata, "core_clk_src",
  780. MDSS_CLK_MDP_SRC) ||
  781. mdss_mdp_irq_clk_register(mdata, "core_clk",
  782. MDSS_CLK_MDP_CORE) ||
  783. mdss_mdp_irq_clk_register(mdata, "lut_clk", MDSS_CLK_MDP_LUT) ||
  784. mdss_mdp_irq_clk_register(mdata, "vsync_clk", MDSS_CLK_MDP_VSYNC))
  785. return -EINVAL;
  786. mdss_mdp_set_clk_rate(MDP_CLK_DEFAULT_RATE);
  787. pr_debug("mdp clk rate=%ld\n", mdss_mdp_get_clk_rate(MDSS_CLK_MDP_SRC));
  788. return 0;
  789. }
  790. void mdss_mdp_dump_power_clk(void)
  791. {
  792. u8 clk_idx = 0;
  793. struct clk *clk;
  794. pr_info(" ============ dump power & mdss clk start ============\n");
  795. for(clk_idx = MDSS_CLK_AHB ; clk_idx < MDSS_MAX_CLK ;clk_idx++)
  796. {
  797. clk = mdss_mdp_get_clk(clk_idx);
  798. clock_debug_print_clock2(clk);
  799. }
  800. pr_info("%s: mdp_clk_cnt =%d \n", __func__, mdp_clk_cnt);
  801. pr_info(" ============ dump power & mdss clk end ============\n");
  802. }
  803. /*static int mdss_iommu_fault_handler(struct iommu_domain *domain,
  804. struct device *dev, unsigned long iova, int flags, void *token)
  805. {
  806. pr_err("MDP IOMMU page fault: iova 0x%lx\n", iova);
  807. return 0;
  808. }*/
  809. int mdss_iommu_attach(struct mdss_data_type *mdata)
  810. {
  811. struct iommu_domain *domain;
  812. struct mdss_iommu_map_type *iomap;
  813. int i, rc = 0;
  814. MDSS_XLOG(mdata->iommu_attached);
  815. #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  816. xlog(__func__, mdata->iommu_attached, 0, 0, 0, 0, 0);
  817. #endif
  818. mutex_lock(&mdp_iommu_lock);
  819. if (mdata->iommu_attached) {
  820. pr_debug("mdp iommu already attached\n");
  821. goto end;
  822. }
  823. for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
  824. iomap = mdata->iommu_map + i;
  825. domain = msm_get_iommu_domain(iomap->domain_idx);
  826. if (!domain) {
  827. WARN(1, "could not attach iommu client %s to ctx %s\n",
  828. iomap->client_name, iomap->ctx_name);
  829. continue;
  830. }
  831. rc = iommu_attach_device(domain, iomap->ctx);
  832. if (rc) {
  833. WARN(1, "mdp::iommu device attach failed rc:%d\n", rc);
  834. for (i--; i >= 0; i--) {
  835. iomap = mdata->iommu_map + i;
  836. iommu_detach_device(domain, iomap->ctx);
  837. }
  838. goto end;
  839. }
  840. }
  841. mdata->iommu_attached = true;
  842. end:
  843. mutex_unlock(&mdp_iommu_lock);
  844. return rc;
  845. }
  846. int mdss_iommu_dettach(struct mdss_data_type *mdata)
  847. {
  848. struct iommu_domain *domain;
  849. struct mdss_iommu_map_type *iomap;
  850. int i;
  851. MDSS_XLOG(mdata->iommu_attached);
  852. #if defined (CONFIG_FB_MSM_MDSS_DSI_DBG)
  853. xlog(__func__, mdata->iommu_attached, 0, 0, 0, 0, 0);
  854. #endif
  855. mutex_lock(&mdp_iommu_lock);
  856. if (!mdata->iommu_attached) {
  857. pr_debug("mdp iommu already dettached\n");
  858. //return 0;
  859. goto end;
  860. }
  861. for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
  862. iomap = mdata->iommu_map + i;
  863. domain = msm_get_iommu_domain(iomap->domain_idx);
  864. if (!domain) {
  865. pr_err("unable to get iommu domain(%d)\n",
  866. iomap->domain_idx);
  867. continue;
  868. }
  869. iommu_detach_device(domain, iomap->ctx);
  870. }
  871. mdata->iommu_attached = false;
  872. MDSS_XLOG(mdata->iommu_attached);
  873. end:
  874. mutex_unlock(&mdp_iommu_lock);
  875. return 0;
  876. }
  877. int mdss_iommu_init(struct mdss_data_type *mdata)
  878. {
  879. struct msm_iova_layout layout;
  880. struct iommu_domain *domain;
  881. struct mdss_iommu_map_type *iomap;
  882. int i;
  883. if (mdata->iommu_map) {
  884. pr_warn("iommu already initialized\n");
  885. return 0;
  886. }
  887. for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
  888. iomap = &mdss_iommu_map[i];
  889. layout.client_name = iomap->client_name;
  890. layout.partitions = iomap->partitions;
  891. layout.npartitions = iomap->npartitions;
  892. layout.is_secure = (i == MDSS_IOMMU_DOMAIN_SECURE);
  893. //layout.domain_flags = 0;
  894. iomap->domain_idx = msm_register_domain(&layout);
  895. if (IS_ERR_VALUE(iomap->domain_idx))
  896. return -EINVAL;
  897. domain = msm_get_iommu_domain(iomap->domain_idx);
  898. if (!domain) {
  899. pr_err("unable to get iommu domain(%d)\n",
  900. iomap->domain_idx);
  901. return -EINVAL;
  902. }
  903. iomap->ctx = msm_iommu_get_ctx(iomap->ctx_name);
  904. if (!iomap->ctx) {
  905. pr_warn("unable to get iommu ctx(%s)\n",
  906. iomap->ctx_name);
  907. return -EINVAL;
  908. }
  909. }
  910. mdata->iommu_map = mdss_iommu_map;
  911. return 0;
  912. }
  913. static int mdss_debug_stat_ctl_dump(struct mdss_mdp_ctl *ctl,
  914. char *bp, int len)
  915. {
  916. int total = 0;
  917. if (!ctl->ref_cnt)
  918. return 0;
  919. if (ctl->intf_num) {
  920. total = scnprintf(bp, len,
  921. "intf%d: play: %08u \tvsync: %08u \tunderrun: %08u\n",
  922. ctl->intf_num, ctl->play_cnt,
  923. ctl->vsync_cnt, ctl->underrun_cnt);
  924. } else {
  925. total = scnprintf(bp, len, "wb: \tmode=%x \tplay: %08u\n",
  926. ctl->opmode, ctl->play_cnt);
  927. }
  928. return total;
  929. }
  930. static int mdss_debug_dump_stats(void *data, char *buf, int len)
  931. {
  932. struct mdss_data_type *mdata = data;
  933. struct mdss_mdp_pipe *pipe;
  934. int i, total = 0;
  935. for (i = 0; i < mdata->nctl; i++)
  936. total += mdss_debug_stat_ctl_dump(mdata->ctl_off + i,
  937. buf + total, len - total);
  938. total += scnprintf(buf + total, len - total, "\n");
  939. for (i = 0; i < mdata->nvig_pipes; i++) {
  940. pipe = mdata->vig_pipes + i;
  941. total += scnprintf(buf + total, len - total,
  942. "VIG%d : %08u\t", i, pipe->play_cnt);
  943. }
  944. total += scnprintf(buf + total, len - total, "\n");
  945. for (i = 0; i < mdata->nrgb_pipes; i++) {
  946. pipe = mdata->rgb_pipes + i;
  947. total += scnprintf(buf + total, len - total,
  948. "RGB%d : %08u\t", i, pipe->play_cnt);
  949. }
  950. total += scnprintf(buf + total, len - total, "\n");
  951. for (i = 0; i < mdata->ndma_pipes; i++) {
  952. pipe = mdata->dma_pipes + i;
  953. total += scnprintf(buf + total, len - total,
  954. "DMA%d : %08u\t", i, pipe->play_cnt);
  955. }
  956. return total;
  957. }
  958. static void mdss_debug_enable_clock(int on)
  959. {
  960. if (on)
  961. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  962. else
  963. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  964. }
  965. static int mdss_mdp_debug_init(struct mdss_data_type *mdata)
  966. {
  967. int rc;
  968. mdata->debug_inf.debug_dump_stats = mdss_debug_dump_stats;
  969. mdata->debug_inf.debug_enable_clock = mdss_debug_enable_clock;
  970. rc = mdss_debugfs_init(mdata);
  971. if (rc)
  972. return rc;
  973. mdss_debug_register_base("mdp", mdata->mdp_base, mdata->mdp_reg_size);
  974. return 0;
  975. }
  976. int mdss_hw_init(struct mdss_data_type *mdata)
  977. {
  978. int i, j;
  979. char *offset;
  980. struct mdss_mdp_pipe *vig;
  981. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  982. mdata->mdp_rev = MDSS_MDP_REG_READ(MDSS_MDP_REG_HW_VERSION);
  983. pr_info_once("MDP Rev=%x\n", mdata->mdp_rev);
  984. if (mdata->hw_settings) {
  985. struct mdss_hw_settings *hws = mdata->hw_settings;
  986. while (hws->reg) {
  987. writel_relaxed(hws->val, hws->reg);
  988. hws++;
  989. }
  990. }
  991. for (i = 0; i < mdata->nmixers_intf; i++) {
  992. offset = mdata->mixer_intf[i].dspp_base +
  993. MDSS_MDP_REG_DSPP_HIST_LUT_BASE;
  994. for (j = 0; j < ENHIST_LUT_ENTRIES; j++)
  995. writel_relaxed(j, offset);
  996. /* swap */
  997. writel_relaxed(1, offset + 4);
  998. }
  999. vig = mdata->vig_pipes;
  1000. for (i = 0; i < mdata->nvig_pipes; i++) {
  1001. offset = vig[i].base +
  1002. MDSS_MDP_REG_VIG_HIST_LUT_BASE;
  1003. for (j = 0; j < ENHIST_LUT_ENTRIES; j++)
  1004. writel_relaxed(j, offset);
  1005. /* swap */
  1006. writel_relaxed(1, offset + 16);
  1007. }
  1008. mdata->nmax_concurrent_ad_hw =
  1009. (mdata->mdp_rev < MDSS_MDP_HW_REV_103) ? 1 : 2;
  1010. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  1011. pr_debug("MDP hw init done\n");
  1012. return 0;
  1013. }
  1014. static u32 mdss_mdp_res_init(struct mdss_data_type *mdata)
  1015. {
  1016. u32 rc = 0;
  1017. if (mdata->res_init) {
  1018. pr_err("mdss resources already initialized\n");
  1019. return -EPERM;
  1020. }
  1021. mdata->res_init = true;
  1022. mdata->clk_ena = false;
  1023. mdata->irq_mask = MDSS_MDP_DEFAULT_INTR_MASK;
  1024. mdata->irq_ena = false;
  1025. rc = mdss_mdp_irq_clk_setup(mdata);
  1026. if (rc)
  1027. return rc;
  1028. mdata->hist_intr.req = 0;
  1029. mdata->hist_intr.curr = 0;
  1030. mdata->hist_intr.state = 0;
  1031. spin_lock_init(&mdata->hist_intr.lock);
  1032. mdata->iclient = msm_ion_client_create(-1, mdata->pdev->name);
  1033. if (IS_ERR_OR_NULL(mdata->iclient)) {
  1034. pr_err("msm_ion_client_create() return error (%pK)\n",
  1035. mdata->iclient);
  1036. mdata->iclient = NULL;
  1037. }
  1038. rc = mdss_iommu_init(mdata);
  1039. return rc;
  1040. }
  1041. /**
  1042. * mdss_mdp_footswitch_ctrl_splash() - clocks handoff for cont. splash screen
  1043. * @on: 1 to start handoff, 0 to complete the handoff after first frame update
  1044. *
  1045. * MDSS Clocks and GDSC are already on during continous splash screen, but
  1046. * increasing ref count will keep clocks from being turned off until handoff
  1047. * has properly happend after frame update.
  1048. */
  1049. void mdss_mdp_footswitch_ctrl_splash(int on)
  1050. {
  1051. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  1052. if (mdata != NULL) {
  1053. if (on) {
  1054. pr_debug("Enable MDP FS for splash.\n");
  1055. mdata->handoff_pending = true;
  1056. regulator_enable(mdata->fs);
  1057. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  1058. mdss_hw_init(mdata);
  1059. } else {
  1060. pr_debug("Disable MDP FS for splash.\n");
  1061. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
  1062. regulator_disable(mdata->fs);
  1063. mdata->handoff_pending = false;
  1064. }
  1065. } else {
  1066. pr_warn("mdss mdata not initialized\n");
  1067. }
  1068. }
  1069. static ssize_t mdss_mdp_show_capabilities(struct device *dev,
  1070. struct device_attribute *attr, char *buf)
  1071. {
  1072. struct mdss_data_type *mdata = dev_get_drvdata(dev);
  1073. size_t len = PAGE_SIZE;
  1074. int cnt = 0;
  1075. #define SPRINT(fmt, ...) \
  1076. (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
  1077. SPRINT("mdp_version=5\n");
  1078. SPRINT("hw_rev=%d\n", mdata->mdp_rev);
  1079. SPRINT("rgb_pipes=%d\n", mdata->nrgb_pipes);
  1080. SPRINT("vig_pipes=%d\n", mdata->nvig_pipes);
  1081. SPRINT("dma_pipes=%d\n", mdata->ndma_pipes);
  1082. SPRINT("smp_count=%d\n", mdata->smp_mb_cnt);
  1083. SPRINT("smp_size=%d\n", mdata->smp_mb_size);
  1084. SPRINT("smp_mb_per_pipe=%d\n", mdata->smp_mb_per_pipe);
  1085. SPRINT("max_downscale_ratio=%d\n", MAX_DOWNSCALE_RATIO);
  1086. SPRINT("max_upscale_ratio=%d\n", MAX_UPSCALE_RATIO);
  1087. if (mdata->max_bw_low)
  1088. SPRINT("max_bandwidth_low=%u\n", mdata->max_bw_low);
  1089. if (mdata->max_bw_high)
  1090. SPRINT("max_bandwidth_high=%u\n", mdata->max_bw_high);
  1091. SPRINT("features=");
  1092. if (mdata->has_bwc)
  1093. SPRINT(" bwc");
  1094. if (mdata->has_decimation)
  1095. SPRINT(" decimation");
  1096. if (mdata->highest_bank_bit)
  1097. SPRINT(" tile_format");
  1098. SPRINT("\n");
  1099. return cnt;
  1100. }
  1101. static DEVICE_ATTR(caps, S_IRUGO, mdss_mdp_show_capabilities, NULL);
  1102. static struct attribute *mdp_fs_attrs[] = {
  1103. &dev_attr_caps.attr,
  1104. NULL
  1105. };
  1106. static struct attribute_group mdp_fs_attr_group = {
  1107. .attrs = mdp_fs_attrs
  1108. };
  1109. static int mdss_mdp_register_sysfs(struct mdss_data_type *mdata)
  1110. {
  1111. struct device *dev = &mdata->pdev->dev;
  1112. int rc;
  1113. rc = sysfs_create_group(&dev->kobj, &mdp_fs_attr_group);
  1114. return rc;
  1115. }
  1116. struct mdss_util_intf mdss_util = {
  1117. mdss_iommu_lock,
  1118. mdss_iommu_unlock
  1119. };
  1120. struct mdss_util_intf *mdss_get_util_intf()
  1121. {
  1122. return &mdss_util;
  1123. }
  1124. EXPORT_SYMBOL(mdss_get_util_intf);
  1125. static int mdss_mdp_probe(struct platform_device *pdev)
  1126. {
  1127. struct resource *res;
  1128. int rc;
  1129. struct mdss_data_type *mdata;
  1130. pr_info("%s : ++ \n",__func__);
  1131. if (!pdev->dev.of_node) {
  1132. pr_err("MDP driver only supports device tree probe\n");
  1133. return -ENOTSUPP;
  1134. }
  1135. if (mdss_res) {
  1136. pr_err("MDP already initialized\n");
  1137. return -EINVAL;
  1138. }
  1139. mdata = devm_kzalloc(&pdev->dev, sizeof(*mdata), GFP_KERNEL);
  1140. if (mdata == NULL)
  1141. return -ENOMEM;
  1142. pdev->id = 0;
  1143. mdata->pdev = pdev;
  1144. platform_set_drvdata(pdev, mdata);
  1145. mdss_res = mdata;
  1146. mutex_init(&mdata->reg_lock);
  1147. atomic_set(&mdata->sd_client_count, 0);
  1148. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdp_phys");
  1149. if (!res) {
  1150. pr_err("unable to get MDP base address\n");
  1151. rc = -ENOMEM;
  1152. goto probe_done;
  1153. }
  1154. mdss_res->mdss_util = mdss_get_util_intf();
  1155. if (mdss_res->mdss_util == NULL) {
  1156. pr_err("Failed to get mdss utility functions\n");
  1157. rc = -ENODEV;
  1158. goto probe_done;
  1159. }
  1160. mdata->mdp_reg_size = resource_size(res);
  1161. mdata->mdp_base = devm_ioremap(&pdev->dev, res->start,
  1162. mdata->mdp_reg_size);
  1163. if (unlikely(!mdata->mdp_base)) {
  1164. pr_err("unable to map MDP base\n");
  1165. rc = -ENOMEM;
  1166. goto probe_done;
  1167. }
  1168. pr_info("MDP HW Base phy_Address=0x%x virt=0x%x\n",
  1169. (int) res->start,
  1170. (int) mdata->mdp_base);
  1171. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vbif_phys");
  1172. if (!res) {
  1173. pr_err("unable to get MDSS VBIF base address\n");
  1174. rc = -ENOMEM;
  1175. goto probe_done;
  1176. }
  1177. mdata->vbif_base = devm_ioremap(&pdev->dev, res->start,
  1178. resource_size(res));
  1179. if (unlikely(!mdata->vbif_base)) {
  1180. pr_err("unable to map MDSS VBIF base\n");
  1181. rc = -ENOMEM;
  1182. goto probe_done;
  1183. }
  1184. pr_info("MDSS VBIF HW Base phy_Address=0x%x virt=0x%x\n",
  1185. (int) res->start,
  1186. (int) mdata->vbif_base);
  1187. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1188. if (!res) {
  1189. pr_err("unable to get MDSS irq\n");
  1190. rc = -ENOMEM;
  1191. goto probe_done;
  1192. }
  1193. mdata->irq = res->start;
  1194. mdss_mdp_hw.ptr = mdata;
  1195. /*populate hw iomem base info from device tree*/
  1196. rc = mdss_mdp_parse_dt(pdev);
  1197. if (rc) {
  1198. pr_err("unable to parse device tree\n");
  1199. goto probe_done;
  1200. }
  1201. rc = mdss_mdp_res_init(mdata);
  1202. if (rc) {
  1203. pr_err("unable to initialize mdss mdp resources\n");
  1204. goto probe_done;
  1205. }
  1206. rc = mdss_mdp_pp_init(&pdev->dev);
  1207. if (rc) {
  1208. pr_err("unable to initialize mdss pp resources\n");
  1209. goto probe_done;
  1210. }
  1211. rc = mdss_mdp_bus_scale_register(mdata);
  1212. if (rc) {
  1213. pr_err("unable to register bus scaling\n");
  1214. goto probe_done;
  1215. }
  1216. rc = mdss_mdp_debug_init(mdata);
  1217. if (rc) {
  1218. pr_err("unable to initialize mdp debugging\n");
  1219. goto probe_done;
  1220. }
  1221. pm_runtime_set_suspended(&pdev->dev);
  1222. pm_runtime_enable(&pdev->dev);
  1223. if (!pm_runtime_enabled(&pdev->dev))
  1224. mdss_mdp_footswitch_ctrl(mdata, true);
  1225. rc = mdss_mdp_register_sysfs(mdata);
  1226. if (rc)
  1227. pr_err("unable to register mdp sysfs nodes\n");
  1228. rc = mdss_fb_register_mdp_instance(&mdp5);
  1229. if (rc)
  1230. pr_err("unable to register mdp instance\n");
  1231. rc = mdss_register_irq(&mdss_mdp_hw);
  1232. if (rc)
  1233. pr_err("mdss_register_irq failed.\n");
  1234. /* Below code is not required now because there is mo Call to turn off the mdp clock */
  1235. #if 0 // defined(CONFIG_FB_MSM_EDP_SAMSUNG)
  1236. if (mdss_mdp_scan_pipes()) {
  1237. mdss_mdp_bus_scale_set_quota(AB_QUOTA, IB_QUOTA);
  1238. /* keep clock on if continuous splash from lk */
  1239. mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
  1240. }
  1241. #endif
  1242. probe_done:
  1243. if (IS_ERR_VALUE(rc)) {
  1244. mdss_mdp_hw.ptr = NULL;
  1245. mdss_mdp_pp_term(&pdev->dev);
  1246. mutex_destroy(&mdata->reg_lock);
  1247. mdss_res = NULL;
  1248. }
  1249. pr_info("%s : -- \n",__func__);
  1250. return rc;
  1251. }
  1252. static void mdss_mdp_parse_dt_regs_array(const u32 *arr, char __iomem *hw_base,
  1253. struct mdss_hw_settings *hws, int count)
  1254. {
  1255. u32 len, reg;
  1256. int i;
  1257. if (!arr)
  1258. return;
  1259. for (i = 0, len = count * 2; i < len; i += 2) {
  1260. reg = be32_to_cpu(arr[i]);
  1261. hws->reg = hw_base + reg;
  1262. hws->val = be32_to_cpu(arr[i + 1]);
  1263. pr_debug("reg: 0x%04x=0x%08x\n", reg, hws->val);
  1264. hws++;
  1265. }
  1266. }
  1267. int mdss_mdp_parse_dt_hw_settings(struct platform_device *pdev)
  1268. {
  1269. struct mdss_data_type *mdata = platform_get_drvdata(pdev);
  1270. struct mdss_hw_settings *hws;
  1271. const u32 *vbif_arr, *mdp_arr;
  1272. int vbif_len, mdp_len;
  1273. vbif_arr = of_get_property(pdev->dev.of_node, "qcom,vbif-settings",
  1274. &vbif_len);
  1275. if (!vbif_arr || (vbif_len & 1)) {
  1276. pr_warn("MDSS VBIF settings not found\n");
  1277. vbif_len = 0;
  1278. }
  1279. vbif_len /= 2 * sizeof(u32);
  1280. mdp_arr = of_get_property(pdev->dev.of_node, "qcom,mdp-settings",
  1281. &mdp_len);
  1282. if (!mdp_arr || (mdp_len & 1)) {
  1283. pr_warn("MDSS MDP settings not found\n");
  1284. mdp_len = 0;
  1285. }
  1286. mdp_len /= 2 * sizeof(u32);
  1287. if ((mdp_len + vbif_len) == 0)
  1288. return 0;
  1289. hws = devm_kzalloc(&pdev->dev, sizeof(*hws) * (vbif_len + mdp_len + 1),
  1290. GFP_KERNEL);
  1291. if (!hws)
  1292. return -ENOMEM;
  1293. mdss_mdp_parse_dt_regs_array(vbif_arr, mdata->vbif_base, hws, vbif_len);
  1294. mdss_mdp_parse_dt_regs_array(mdp_arr, mdata->mdp_base,
  1295. hws + vbif_len, mdp_len);
  1296. mdata->hw_settings = hws;
  1297. return 0;
  1298. }
  1299. static int mdss_mdp_get_pan_intf(const char *pan_intf)
  1300. {
  1301. int i, rc = MDSS_PANEL_INTF_INVALID;
  1302. if (!pan_intf)
  1303. return rc;
  1304. for (i = 0; i < ARRAY_SIZE(pan_types); i++) {
  1305. if (!strncmp(pan_intf, pan_types[i].name, MDSS_MAX_PANEL_LEN)) {
  1306. rc = pan_types[i].type;
  1307. break;
  1308. }
  1309. }
  1310. return rc;
  1311. }
  1312. static int mdss_mdp_get_pan_cfg(struct mdss_panel_cfg *pan_cfg)
  1313. {
  1314. char *t = NULL;
  1315. char pan_intf_str[MDSS_MAX_PANEL_LEN];
  1316. int rc, i, panel_len;
  1317. char pan_name[MDSS_MAX_PANEL_LEN];
  1318. if (!pan_cfg)
  1319. return -EINVAL;
  1320. strlcpy(pan_name, &pan_cfg->arg_cfg[0], sizeof(pan_cfg->arg_cfg));
  1321. #if defined (CONFIG_FB_MSM_MIPI_SAMSUNG_OCTA_CMD_WQHD_PT_PANEL) || defined (CONFIG_FB_MSM_MDSS_S6E8AA0A_HD_PANEL)\
  1322. || defined(CONFIG_FB_MSM_MDSS_SHARP_HD_PANEL)
  1323. if (pan_name[0] == '0') {
  1324. pan_cfg->lk_cfg = false;
  1325. } else if (pan_name[0] == '1') {
  1326. pan_cfg->lk_cfg = true;
  1327. } else {
  1328. /* read from dt */
  1329. pan_cfg->lk_cfg = true;
  1330. pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
  1331. return -EINVAL;
  1332. }
  1333. #else
  1334. pan_cfg->lk_cfg = false;
  1335. #endif
  1336. /* skip lk cfg and delimiter; ex: "0:" */
  1337. strlcpy(pan_name, &pan_name[2], MDSS_MAX_PANEL_LEN);
  1338. t = strnstr(pan_name, ":", MDSS_MAX_PANEL_LEN);
  1339. if (!t) {
  1340. pr_err("%s: pan_name=[%s] invalid\n",
  1341. __func__, pan_name);
  1342. pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
  1343. return -EINVAL;
  1344. }
  1345. for (i = 0; ((pan_name + i) < t) && (i < 4); i++)
  1346. pan_intf_str[i] = *(pan_name + i);
  1347. pan_intf_str[i] = 0;
  1348. pr_debug("%s:%d panel intf %s\n", __func__, __LINE__, pan_intf_str);
  1349. /* point to the start of panel name */
  1350. t = t + 1;
  1351. strlcpy(&pan_cfg->arg_cfg[0], t, sizeof(pan_cfg->arg_cfg));
  1352. pr_debug("%s:%d: t=[%s] panel name=[%s]\n", __func__, __LINE__,
  1353. t, pan_cfg->arg_cfg);
  1354. panel_len = strlen(pan_cfg->arg_cfg);
  1355. if (!panel_len) {
  1356. pr_err("%s: Panel name is invalid\n", __func__);
  1357. pan_cfg->pan_intf = MDSS_PANEL_INTF_INVALID;
  1358. return -EINVAL;
  1359. }
  1360. rc = mdss_mdp_get_pan_intf(pan_intf_str);
  1361. pan_cfg->pan_intf = (rc < 0) ? MDSS_PANEL_INTF_INVALID : rc;
  1362. return 0;
  1363. }
  1364. static int mdss_mdp_parse_dt_pan_intf(struct platform_device *pdev)
  1365. {
  1366. int rc;
  1367. struct mdss_data_type *mdata = platform_get_drvdata(pdev);
  1368. const char *prim_intf = NULL;
  1369. rc = of_property_read_string(pdev->dev.of_node,
  1370. "qcom,mdss-pref-prim-intf", &prim_intf);
  1371. if (rc)
  1372. return -ENODEV;
  1373. rc = mdss_mdp_get_pan_intf(prim_intf);
  1374. if (rc < 0) {
  1375. mdata->pan_cfg.pan_intf = MDSS_PANEL_INTF_INVALID;
  1376. } else {
  1377. mdata->pan_cfg.pan_intf = rc;
  1378. rc = 0;
  1379. }
  1380. return rc;
  1381. }
  1382. static int mdss_mdp_parse_bootarg(struct platform_device *pdev)
  1383. {
  1384. struct device_node *chosen_node;
  1385. static const char *cmd_line;
  1386. char *disp_idx, *end_idx;
  1387. int rc, len = 0, name_len, cmd_len;
  1388. int *intf_type;
  1389. char *panel_name;
  1390. struct mdss_panel_cfg *pan_cfg;
  1391. struct mdss_data_type *mdata = platform_get_drvdata(pdev);
  1392. mdata->pan_cfg.arg_cfg[MDSS_MAX_PANEL_LEN] = 0;
  1393. pan_cfg = &mdata->pan_cfg;
  1394. panel_name = &pan_cfg->arg_cfg[0];
  1395. intf_type = &pan_cfg->pan_intf;
  1396. /* reads from dt by default */
  1397. pan_cfg->lk_cfg = true;
  1398. chosen_node = of_find_node_by_name(NULL, "chosen");
  1399. if (!chosen_node) {
  1400. pr_err("%s: get chosen node failed\n", __func__);
  1401. rc = -ENODEV;
  1402. goto get_dt_pan;
  1403. }
  1404. cmd_line = of_get_property(chosen_node, "bootargs", &len);
  1405. if (!cmd_line || len <= 0) {
  1406. pr_err("%s: get bootargs failed\n", __func__);
  1407. rc = -ENODEV;
  1408. goto get_dt_pan;
  1409. }
  1410. name_len = strlen("mdss_mdp.panel=");
  1411. cmd_len = strlen(cmd_line);
  1412. disp_idx = strnstr(cmd_line, "mdss_mdp.panel=", cmd_len);
  1413. if (!disp_idx) {
  1414. pr_err("%s:%d:cmdline panel not set disp_idx=[%pK]\n",
  1415. __func__, __LINE__, disp_idx);
  1416. memset(panel_name, 0x00, MDSS_MAX_PANEL_LEN);
  1417. *intf_type = MDSS_PANEL_INTF_INVALID;
  1418. rc = MDSS_PANEL_INTF_INVALID;
  1419. goto get_dt_pan;
  1420. }
  1421. disp_idx += name_len;
  1422. end_idx = strnstr(disp_idx, " ", MDSS_MAX_PANEL_LEN);
  1423. pr_debug("%s:%d: pan_name=[%s] end=[%s]\n", __func__, __LINE__,
  1424. disp_idx, end_idx);
  1425. if (!end_idx) {
  1426. end_idx = disp_idx + strlen(disp_idx) + 1;
  1427. pr_warn("%s:%d: pan_name=[%s] end=[%s]\n", __func__,
  1428. __LINE__, disp_idx, end_idx);
  1429. }
  1430. if (end_idx <= disp_idx) {
  1431. pr_err("%s:%d:cmdline pan incorrect end=[%pK] disp=[%pK]\n",
  1432. __func__, __LINE__, end_idx, disp_idx);
  1433. memset(panel_name, 0x00, MDSS_MAX_PANEL_LEN);
  1434. *intf_type = MDSS_PANEL_INTF_INVALID;
  1435. rc = MDSS_PANEL_INTF_INVALID;
  1436. goto get_dt_pan;
  1437. }
  1438. *end_idx = 0;
  1439. len = end_idx - disp_idx + 1;
  1440. if (len <= 0) {
  1441. pr_warn("%s: panel name not rx", __func__);
  1442. rc = -EINVAL;
  1443. goto get_dt_pan;
  1444. }
  1445. strlcpy(panel_name, disp_idx, min(++len, MDSS_MAX_PANEL_LEN));
  1446. pr_debug("%s:%d panel:[%s]", __func__, __LINE__, panel_name);
  1447. of_node_put(chosen_node);
  1448. rc = mdss_mdp_get_pan_cfg(pan_cfg);
  1449. if (!rc) {
  1450. pan_cfg->init_done = true;
  1451. return rc;
  1452. }
  1453. get_dt_pan:
  1454. rc = mdss_mdp_parse_dt_pan_intf(pdev);
  1455. /* if pref pan intf is not present */
  1456. if (rc)
  1457. pr_err("%s:unable to parse device tree for pan intf\n",
  1458. __func__);
  1459. else
  1460. pan_cfg->init_done = true;
  1461. of_node_put(chosen_node);
  1462. return rc;
  1463. }
  1464. static int mdss_mdp_parse_dt(struct platform_device *pdev)
  1465. {
  1466. int rc;
  1467. rc = mdss_mdp_parse_dt_hw_settings(pdev);
  1468. if (rc) {
  1469. pr_err("Error in device tree : hw settings\n");
  1470. return rc;
  1471. }
  1472. rc = mdss_mdp_parse_dt_pipe(pdev);
  1473. if (rc) {
  1474. pr_err("Error in device tree : pipes\n");
  1475. return rc;
  1476. }
  1477. rc = mdss_mdp_parse_dt_mixer(pdev);
  1478. if (rc) {
  1479. pr_err("Error in device tree : mixers\n");
  1480. return rc;
  1481. }
  1482. rc = mdss_mdp_parse_dt_ctl(pdev);
  1483. if (rc) {
  1484. pr_err("Error in device tree : ctl\n");
  1485. return rc;
  1486. }
  1487. rc = mdss_mdp_parse_dt_video_intf(pdev);
  1488. if (rc) {
  1489. pr_err("Error in device tree : ctl\n");
  1490. return rc;
  1491. }
  1492. rc = mdss_mdp_parse_dt_smp(pdev);
  1493. if (rc) {
  1494. pr_err("Error in device tree : smp\n");
  1495. return rc;
  1496. }
  1497. rc = mdss_mdp_parse_dt_prefill(pdev);
  1498. if (rc) {
  1499. pr_err("Error in device tree : prefill\n");
  1500. return rc;
  1501. }
  1502. rc = mdss_mdp_parse_dt_misc(pdev);
  1503. if (rc) {
  1504. pr_err("Error in device tree : misc\n");
  1505. return rc;
  1506. }
  1507. rc = mdss_mdp_parse_dt_ad_cfg(pdev);
  1508. if (rc) {
  1509. pr_err("Error in device tree : ad\n");
  1510. return rc;
  1511. }
  1512. rc = mdss_mdp_parse_bootarg(pdev);
  1513. if (rc) {
  1514. pr_err("%s: Error in panel override:rc=[%d]\n",
  1515. __func__, rc);
  1516. return rc;
  1517. }
  1518. rc = mdss_mdp_parse_dt_bus_scale(pdev);
  1519. if (rc) {
  1520. pr_err("Error in device tree : bus scale\n");
  1521. return rc;
  1522. }
  1523. return 0;
  1524. }
  1525. static int mdss_mdp_parse_dt_pipe_clk_ctrl(struct platform_device *pdev,
  1526. char *prop_name, struct mdss_mdp_pipe *pipe_list, u32 npipes)
  1527. {
  1528. int rc = 0;
  1529. size_t len;
  1530. const u32 *arr;
  1531. arr = of_get_property(pdev->dev.of_node, prop_name, &len);
  1532. if (arr) {
  1533. int i, j;
  1534. len /= sizeof(u32);
  1535. for (i = 0, j = 0; i < len; j++) {
  1536. struct mdss_mdp_pipe *pipe = NULL;
  1537. if (j >= npipes) {
  1538. pr_err("invalid clk ctrl enries for prop: %s\n",
  1539. prop_name);
  1540. return -EINVAL;
  1541. }
  1542. pipe = &pipe_list[j];
  1543. pipe->clk_ctrl.reg_off = be32_to_cpu(arr[i++]);
  1544. pipe->clk_ctrl.bit_off = be32_to_cpu(arr[i++]);
  1545. /* status register is next in line to ctrl register */
  1546. pipe->clk_status.reg_off = pipe->clk_ctrl.reg_off + 4;
  1547. pipe->clk_status.bit_off = be32_to_cpu(arr[i++]);
  1548. pr_debug("%s[%d]: ctrl: reg_off: 0x%x bit_off: %d\n",
  1549. prop_name, j, pipe->clk_ctrl.reg_off,
  1550. pipe->clk_ctrl.bit_off);
  1551. pr_debug("%s[%d]: status: reg_off: 0x%x bit_off: %d\n",
  1552. prop_name, j, pipe->clk_status.reg_off,
  1553. pipe->clk_status.bit_off);
  1554. }
  1555. if (j != npipes) {
  1556. pr_err("%s: %d entries found. required %d\n",
  1557. prop_name, j, npipes);
  1558. for (i = 0; i < npipes; i++) {
  1559. memset(&pipe_list[i].clk_ctrl, 0,
  1560. sizeof(pipe_list[i].clk_ctrl));
  1561. memset(&pipe_list[i].clk_status, 0,
  1562. sizeof(pipe_list[i].clk_status));
  1563. }
  1564. rc = -EINVAL;
  1565. }
  1566. } else {
  1567. pr_err("error mandatory property '%s' not found\n", prop_name);
  1568. rc = -EINVAL;
  1569. }
  1570. return rc;
  1571. }
  1572. static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev)
  1573. {
  1574. u32 npipes, dma_off;
  1575. int rc = 0;
  1576. u32 nfids = 0, setup_cnt = 0, len, nxids = 0;
  1577. u32 *offsets = NULL, *ftch_id = NULL, *xin_id = NULL;
  1578. struct mdss_data_type *mdata = platform_get_drvdata(pdev);
  1579. mdata->nvig_pipes = mdss_mdp_parse_dt_prop_len(pdev,
  1580. "qcom,mdss-pipe-vig-off");
  1581. mdata->nrgb_pipes = mdss_mdp_parse_dt_prop_len(pdev,
  1582. "qcom,mdss-pipe-rgb-off");
  1583. mdata->ndma_pipes = mdss_mdp_parse_dt_prop_len(pdev,
  1584. "qcom,mdss-pipe-dma-off");
  1585. npipes = mdata->nvig_pipes + mdata->nrgb_pipes + mdata->ndma_pipes;
  1586. nfids += mdss_mdp_parse_dt_prop_len(pdev,
  1587. "qcom,mdss-pipe-vig-fetch-id");
  1588. nfids += mdss_mdp_parse_dt_prop_len(pdev,
  1589. "qcom,mdss-pipe-rgb-fetch-id");
  1590. nfids += mdss_mdp_parse_dt_prop_len(pdev,
  1591. "qcom,mdss-pipe-dma-fetch-id");
  1592. if (npipes != nfids) {
  1593. pr_err("device tree err: unequal number of pipes and smp ids");
  1594. return -EINVAL;
  1595. }
  1596. nxids += mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-vig-xin-id");
  1597. nxids += mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-rgb-xin-id");
  1598. nxids += mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-pipe-dma-xin-id");
  1599. if (npipes != nxids) {
  1600. pr_err("device tree err: unequal number of pipes and xin ids");
  1601. return -EINVAL;
  1602. }
  1603. offsets = kzalloc(sizeof(u32) * npipes, GFP_KERNEL);
  1604. if (!offsets) {
  1605. pr_err("no mem assigned for offsets: kzalloc fail\n");
  1606. return -ENOMEM;
  1607. }
  1608. ftch_id = kzalloc(sizeof(u32) * nfids, GFP_KERNEL);
  1609. if (!ftch_id) {
  1610. pr_err("no mem assigned for ftch_id: kzalloc fail\n");
  1611. rc = -ENOMEM;
  1612. goto ftch_alloc_fail;
  1613. }
  1614. xin_id = kzalloc(sizeof(u32) * nxids, GFP_KERNEL);
  1615. if (!xin_id) {
  1616. pr_err("no mem assigned for xin_id: kzalloc fail\n");
  1617. rc = -ENOMEM;
  1618. goto xin_alloc_fail;
  1619. }
  1620. mdata->vig_pipes = devm_kzalloc(&mdata->pdev->dev,
  1621. sizeof(struct mdss_mdp_pipe) * mdata->nvig_pipes, GFP_KERNEL);
  1622. if (!mdata->vig_pipes) {
  1623. pr_err("no mem for vig_pipes: kzalloc fail\n");
  1624. rc = -ENOMEM;
  1625. goto vig_alloc_fail;
  1626. }
  1627. mdata->rgb_pipes = devm_kzalloc(&mdata->pdev->dev,
  1628. sizeof(struct mdss_mdp_pipe) * mdata->nrgb_pipes, GFP_KERNEL);
  1629. if (!mdata->rgb_pipes) {
  1630. pr_err("no mem for rgb_pipes: kzalloc fail\n");
  1631. rc = -ENOMEM;
  1632. goto rgb_alloc_fail;
  1633. }
  1634. mdata->dma_pipes = devm_kzalloc(&mdata->pdev->dev,
  1635. sizeof(struct mdss_mdp_pipe) * mdata->ndma_pipes, GFP_KERNEL);
  1636. if (!mdata->dma_pipes) {
  1637. pr_err("no mem for dma_pipes: kzalloc fail\n");
  1638. rc = -ENOMEM;
  1639. goto dma_alloc_fail;
  1640. }
  1641. rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-vig-fetch-id",
  1642. ftch_id, mdata->nvig_pipes);
  1643. if (rc)
  1644. goto parse_fail;
  1645. rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-vig-xin-id",
  1646. xin_id, mdata->nvig_pipes);
  1647. if (rc)
  1648. goto parse_fail;
  1649. rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-vig-off",
  1650. offsets, mdata->nvig_pipes);
  1651. if (rc)
  1652. goto parse_fail;
  1653. len = min_t(int, DEFAULT_TOTAL_VIG_PIPES, (int)mdata->nvig_pipes);
  1654. rc = mdss_mdp_pipe_addr_setup(mdata, mdata->vig_pipes, offsets, ftch_id,
  1655. xin_id, MDSS_MDP_PIPE_TYPE_VIG, MDSS_MDP_SSPP_VIG0, len);
  1656. if (rc)
  1657. goto parse_fail;
  1658. setup_cnt += len;
  1659. rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-rgb-fetch-id",
  1660. ftch_id + mdata->nvig_pipes, mdata->nrgb_pipes);
  1661. if (rc)
  1662. goto parse_fail;
  1663. rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-rgb-xin-id",
  1664. xin_id + mdata->nvig_pipes, mdata->nrgb_pipes);
  1665. if (rc)
  1666. goto parse_fail;
  1667. rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-rgb-off",
  1668. offsets + mdata->nvig_pipes, mdata->nrgb_pipes);
  1669. if (rc)
  1670. goto parse_fail;
  1671. len = min_t(int, DEFAULT_TOTAL_RGB_PIPES, (int)mdata->nrgb_pipes);
  1672. rc = mdss_mdp_pipe_addr_setup(mdata, mdata->rgb_pipes,
  1673. offsets + mdata->nvig_pipes, ftch_id + mdata->nvig_pipes,
  1674. xin_id + mdata->nvig_pipes, MDSS_MDP_PIPE_TYPE_RGB,
  1675. MDSS_MDP_SSPP_RGB0, len);
  1676. if (rc)
  1677. goto parse_fail;
  1678. setup_cnt += len;
  1679. dma_off = mdata->nvig_pipes + mdata->nrgb_pipes;
  1680. rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-dma-fetch-id",
  1681. ftch_id + dma_off, mdata->ndma_pipes);
  1682. if (rc)
  1683. goto parse_fail;
  1684. rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-dma-xin-id",
  1685. xin_id + dma_off, mdata->ndma_pipes);
  1686. if (rc)
  1687. goto parse_fail;
  1688. rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-dma-off",
  1689. offsets + dma_off, mdata->ndma_pipes);
  1690. if (rc)
  1691. goto parse_fail;
  1692. len = mdata->ndma_pipes;
  1693. rc = mdss_mdp_pipe_addr_setup(mdata, mdata->dma_pipes,
  1694. offsets + dma_off, ftch_id + dma_off, xin_id + dma_off,
  1695. MDSS_MDP_PIPE_TYPE_DMA, MDSS_MDP_SSPP_DMA0, len);
  1696. if (rc)
  1697. goto parse_fail;
  1698. setup_cnt += len;
  1699. if (mdata->nvig_pipes > DEFAULT_TOTAL_VIG_PIPES) {
  1700. rc = mdss_mdp_pipe_addr_setup(mdata,
  1701. mdata->vig_pipes + DEFAULT_TOTAL_VIG_PIPES,
  1702. offsets + DEFAULT_TOTAL_VIG_PIPES,
  1703. ftch_id + DEFAULT_TOTAL_VIG_PIPES,
  1704. xin_id + DEFAULT_TOTAL_VIG_PIPES,
  1705. MDSS_MDP_PIPE_TYPE_VIG, setup_cnt,
  1706. mdata->nvig_pipes - DEFAULT_TOTAL_VIG_PIPES);
  1707. if (rc)
  1708. goto parse_fail;
  1709. setup_cnt += mdata->nvig_pipes - DEFAULT_TOTAL_VIG_PIPES;
  1710. }
  1711. if (mdata->nrgb_pipes > DEFAULT_TOTAL_RGB_PIPES) {
  1712. rc = mdss_mdp_pipe_addr_setup(mdata,
  1713. mdata->rgb_pipes + DEFAULT_TOTAL_RGB_PIPES,
  1714. offsets + mdata->nvig_pipes + DEFAULT_TOTAL_RGB_PIPES,
  1715. ftch_id + mdata->nvig_pipes + DEFAULT_TOTAL_RGB_PIPES,
  1716. xin_id + mdata->nvig_pipes + DEFAULT_TOTAL_RGB_PIPES,
  1717. MDSS_MDP_PIPE_TYPE_RGB, setup_cnt,
  1718. mdata->nrgb_pipes - DEFAULT_TOTAL_RGB_PIPES);
  1719. if (rc)
  1720. goto parse_fail;
  1721. setup_cnt += mdata->nrgb_pipes - DEFAULT_TOTAL_RGB_PIPES;
  1722. }
  1723. rc = mdss_mdp_parse_dt_pipe_clk_ctrl(pdev,
  1724. "qcom,mdss-pipe-vig-clk-ctrl-offsets", mdata->vig_pipes,
  1725. mdata->nvig_pipes);
  1726. if (rc)
  1727. goto parse_fail;
  1728. rc = mdss_mdp_parse_dt_pipe_clk_ctrl(pdev,
  1729. "qcom,mdss-pipe-rgb-clk-ctrl-offsets", mdata->rgb_pipes,
  1730. mdata->nrgb_pipes);
  1731. if (rc)
  1732. goto parse_fail;
  1733. rc = mdss_mdp_parse_dt_pipe_clk_ctrl(pdev,
  1734. "qcom,mdss-pipe-dma-clk-ctrl-offsets", mdata->dma_pipes,
  1735. mdata->ndma_pipes);
  1736. if (rc)
  1737. goto parse_fail;
  1738. goto parse_done;
  1739. parse_fail:
  1740. kfree(mdata->dma_pipes);
  1741. dma_alloc_fail:
  1742. kfree(mdata->rgb_pipes);
  1743. rgb_alloc_fail:
  1744. kfree(mdata->vig_pipes);
  1745. parse_done:
  1746. vig_alloc_fail:
  1747. kfree(xin_id);
  1748. xin_alloc_fail:
  1749. kfree(ftch_id);
  1750. ftch_alloc_fail:
  1751. kfree(offsets);
  1752. return rc;
  1753. }
  1754. static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev)
  1755. {
  1756. u32 nmixers, ndspp, npingpong;
  1757. int rc = 0;
  1758. u32 *mixer_offsets = NULL, *dspp_offsets = NULL,
  1759. *pingpong_offsets = NULL;
  1760. struct mdss_data_type *mdata = platform_get_drvdata(pdev);
  1761. mdata->nmixers_intf = mdss_mdp_parse_dt_prop_len(pdev,
  1762. "qcom,mdss-mixer-intf-off");
  1763. mdata->nmixers_wb = mdss_mdp_parse_dt_prop_len(pdev,
  1764. "qcom,mdss-mixer-wb-off");
  1765. ndspp = mdss_mdp_parse_dt_prop_len(pdev,
  1766. "qcom,mdss-dspp-off");
  1767. npingpong = mdss_mdp_parse_dt_prop_len(pdev,
  1768. "qcom,mdss-pingpong-off");
  1769. nmixers = mdata->nmixers_intf + mdata->nmixers_wb;
  1770. if (mdata->nmixers_intf != ndspp) {
  1771. pr_err("device tree err: unequal no of dspp and intf mixers\n");
  1772. return -EINVAL;
  1773. }
  1774. if (mdata->nmixers_intf != npingpong) {
  1775. pr_err("device tree err: unequal no of pingpong and intf mixers\n");
  1776. return -EINVAL;
  1777. }
  1778. mixer_offsets = kzalloc(sizeof(u32) * nmixers, GFP_KERNEL);
  1779. if (!mixer_offsets) {
  1780. pr_err("no mem assigned: kzalloc fail\n");
  1781. return -ENOMEM;
  1782. }
  1783. dspp_offsets = kzalloc(sizeof(u32) * ndspp, GFP_KERNEL);
  1784. if (!dspp_offsets) {
  1785. pr_err("no mem assigned: kzalloc fail\n");
  1786. rc = -ENOMEM;
  1787. goto dspp_alloc_fail;
  1788. }
  1789. pingpong_offsets = kzalloc(sizeof(u32) * npingpong, GFP_KERNEL);
  1790. if (!pingpong_offsets) {
  1791. pr_err("no mem assigned: kzalloc fail\n");
  1792. rc = -ENOMEM;
  1793. goto pingpong_alloc_fail;
  1794. }
  1795. rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-mixer-intf-off",
  1796. mixer_offsets, mdata->nmixers_intf);
  1797. if (rc)
  1798. goto parse_done;
  1799. rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-mixer-wb-off",
  1800. mixer_offsets + mdata->nmixers_intf, mdata->nmixers_wb);
  1801. if (rc)
  1802. goto parse_done;
  1803. rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-dspp-off",
  1804. dspp_offsets, ndspp);
  1805. if (rc)
  1806. goto parse_done;
  1807. rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pingpong-off",
  1808. pingpong_offsets, npingpong);
  1809. if (rc)
  1810. goto parse_done;
  1811. rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets,
  1812. dspp_offsets, pingpong_offsets,
  1813. MDSS_MDP_MIXER_TYPE_INTF, mdata->nmixers_intf);
  1814. if (rc)
  1815. goto parse_done;
  1816. rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets +
  1817. mdata->nmixers_intf, NULL, NULL,
  1818. MDSS_MDP_MIXER_TYPE_WRITEBACK, mdata->nmixers_wb);
  1819. if (rc)
  1820. goto parse_done;
  1821. parse_done:
  1822. kfree(pingpong_offsets);
  1823. pingpong_alloc_fail:
  1824. kfree(dspp_offsets);
  1825. dspp_alloc_fail:
  1826. kfree(mixer_offsets);
  1827. return rc;
  1828. }
  1829. static int mdss_mdp_parse_dt_ctl(struct platform_device *pdev)
  1830. {
  1831. u32 nwb;
  1832. int rc = 0;
  1833. u32 *ctl_offsets = NULL, *wb_offsets = NULL;
  1834. struct mdss_data_type *mdata = platform_get_drvdata(pdev);
  1835. mdata->nctl = mdss_mdp_parse_dt_prop_len(pdev,
  1836. "qcom,mdss-ctl-off");
  1837. nwb = mdss_mdp_parse_dt_prop_len(pdev,
  1838. "qcom,mdss-wb-off");
  1839. if (mdata->nctl != nwb) {
  1840. pr_err("device tree err: unequal number of ctl and wb\n");
  1841. rc = -EINVAL;
  1842. goto parse_done;
  1843. }
  1844. ctl_offsets = kzalloc(sizeof(u32) * mdata->nctl, GFP_KERNEL);
  1845. if (!ctl_offsets) {
  1846. pr_err("no more mem for ctl offsets\n");
  1847. return -ENOMEM;
  1848. }
  1849. wb_offsets = kzalloc(sizeof(u32) * nwb, GFP_KERNEL);
  1850. if (!wb_offsets) {
  1851. pr_err("no more mem for writeback offsets\n");
  1852. rc = -ENOMEM;
  1853. goto wb_alloc_fail;
  1854. }
  1855. rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-ctl-off",
  1856. ctl_offsets, mdata->nctl);
  1857. if (rc)
  1858. goto parse_done;
  1859. rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-wb-off",
  1860. wb_offsets, nwb);
  1861. if (rc)
  1862. goto parse_done;
  1863. rc = mdss_mdp_ctl_addr_setup(mdata, ctl_offsets, wb_offsets,
  1864. mdata->nctl);
  1865. if (rc)
  1866. goto parse_done;
  1867. parse_done:
  1868. kfree(wb_offsets);
  1869. wb_alloc_fail:
  1870. kfree(ctl_offsets);
  1871. return rc;
  1872. }
  1873. static int mdss_mdp_parse_dt_video_intf(struct platform_device *pdev)
  1874. {
  1875. struct mdss_data_type *mdata = platform_get_drvdata(pdev);
  1876. u32 count;
  1877. u32 *offsets;
  1878. int rc;
  1879. count = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-intf-off");
  1880. if (count == 0)
  1881. return -EINVAL;
  1882. offsets = kzalloc(sizeof(u32) * count, GFP_KERNEL);
  1883. if (!offsets) {
  1884. pr_err("no mem assigned for video intf\n");
  1885. return -ENOMEM;
  1886. }
  1887. rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-intf-off",
  1888. offsets, count);
  1889. if (rc)
  1890. goto parse_fail;
  1891. rc = mdss_mdp_video_addr_setup(mdata, offsets, count);
  1892. if (rc)
  1893. pr_err("unable to setup video interfaces\n");
  1894. parse_fail:
  1895. kfree(offsets);
  1896. return rc;
  1897. }
  1898. static int mdss_mdp_parse_dt_smp(struct platform_device *pdev)
  1899. {
  1900. struct mdss_data_type *mdata = platform_get_drvdata(pdev);
  1901. u32 num;
  1902. u32 data[2];
  1903. int rc, len;
  1904. const u32 *arr;
  1905. num = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-smp-data");
  1906. if (num != 2)
  1907. return -EINVAL;
  1908. rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-smp-data", data, num);
  1909. if (rc)
  1910. return rc;
  1911. rc = mdss_mdp_smp_setup(mdata, data[0], data[1]);
  1912. if (rc) {
  1913. pr_err("unable to setup smp data\n");
  1914. return rc;
  1915. }
  1916. rc = of_property_read_u32(pdev->dev.of_node,
  1917. "qcom,mdss-smp-mb-per-pipe", data);
  1918. mdata->smp_mb_per_pipe = (!rc ? data[0] : 0);
  1919. #if defined(CONFIG_SEC_MATISSE_PROJECT)
  1920. mdata->smp_mb_per_pipe = 2;
  1921. #endif
  1922. rc = 0;
  1923. arr = of_get_property(pdev->dev.of_node,
  1924. "qcom,mdss-pipe-rgb-fixed-mmb", &len);
  1925. if (arr) {
  1926. int i, j, k;
  1927. u32 cnt, mmb;
  1928. len /= sizeof(u32);
  1929. for (i = 0, k = 0; i < len; k++) {
  1930. struct mdss_mdp_pipe *pipe = NULL;
  1931. if (k >= mdata->nrgb_pipes) {
  1932. pr_err("invalid fixed mmbs for rgb pipes\n");
  1933. return -EINVAL;
  1934. }
  1935. pipe = &mdata->rgb_pipes[k];
  1936. cnt = be32_to_cpu(arr[i++]);
  1937. if (cnt == 0)
  1938. continue;
  1939. for (j = 0; j < cnt; j++) {
  1940. mmb = be32_to_cpu(arr[i++]);
  1941. if (mmb > mdata->smp_mb_cnt) {
  1942. pr_err("overflow mmb%d: rgb%d: max%d\n",
  1943. mmb, k, mdata->smp_mb_cnt);
  1944. return -EINVAL;
  1945. }
  1946. /* rgb pipes fetches only single plane */
  1947. set_bit(mmb, pipe->smp_map[0].fixed);
  1948. }
  1949. if (bitmap_intersects(pipe->smp_map[0].fixed,
  1950. mdata->mmb_alloc_map, mdata->smp_mb_cnt)) {
  1951. pr_err("overlapping fixed mmb map\n");
  1952. return -EINVAL;
  1953. }
  1954. bitmap_or(mdata->mmb_alloc_map, pipe->smp_map[0].fixed,
  1955. mdata->mmb_alloc_map, mdata->smp_mb_cnt);
  1956. }
  1957. }
  1958. return rc;
  1959. }
  1960. static void mdss_mdp_parse_dt_fudge_factors(struct platform_device *pdev,
  1961. char *prop_name, struct mdss_fudge_factor *ff)
  1962. {
  1963. int rc;
  1964. u32 data[2] = {1, 1};
  1965. rc = mdss_mdp_parse_dt_handler(pdev, prop_name, data, 2);
  1966. if (rc) {
  1967. pr_err("err reading %s\n", prop_name);
  1968. } else {
  1969. ff->numer = data[0];
  1970. ff->denom = data[1];
  1971. }
  1972. }
  1973. static int mdss_mdp_parse_dt_prefill(struct platform_device *pdev)
  1974. {
  1975. struct mdss_data_type *mdata = platform_get_drvdata(pdev);
  1976. struct mdss_prefill_data *prefill = &mdata->prefill_data;
  1977. int rc;
  1978. rc = of_property_read_u32(pdev->dev.of_node,
  1979. "qcom,mdss-prefill-outstanding-buffer-bytes",
  1980. &prefill->ot_bytes);
  1981. if (rc) {
  1982. pr_err("prefill outstanding buffer bytes not specified\n");
  1983. return rc;
  1984. }
  1985. rc = of_property_read_u32(pdev->dev.of_node,
  1986. "qcom,mdss-prefill-y-buffer-bytes", &prefill->y_buf_bytes);
  1987. if (rc) {
  1988. pr_err("prefill y buffer bytes not specified\n");
  1989. return rc;
  1990. }
  1991. rc = of_property_read_u32(pdev->dev.of_node,
  1992. "qcom,mdss-prefill-scaler-buffer-lines-bilinear",
  1993. &prefill->y_scaler_lines_bilinear);
  1994. if (rc) {
  1995. pr_err("prefill scaler lines for bilinear not specified\n");
  1996. return rc;
  1997. }
  1998. rc = of_property_read_u32(pdev->dev.of_node,
  1999. "qcom,mdss-prefill-scaler-buffer-lines-caf",
  2000. &prefill->y_scaler_lines_caf);
  2001. if (rc) {
  2002. pr_debug("prefill scaler lines for caf not specified\n");
  2003. return rc;
  2004. }
  2005. rc = of_property_read_u32(pdev->dev.of_node,
  2006. "qcom,mdss-prefill-post-scaler-buffer-pixels",
  2007. &prefill->post_scaler_pixels);
  2008. if (rc) {
  2009. pr_err("prefill post scaler buffer pixels not specified\n");
  2010. return rc;
  2011. }
  2012. rc = of_property_read_u32(pdev->dev.of_node,
  2013. "qcom,mdss-prefill-pingpong-buffer-pixels",
  2014. &prefill->pp_pixels);
  2015. if (rc) {
  2016. pr_err("prefill pingpong buffer lines not specified\n");
  2017. return rc;
  2018. }
  2019. rc = of_property_read_u32(pdev->dev.of_node,
  2020. "qcom,mdss-prefill-fbc-lines", &prefill->fbc_lines);
  2021. if (rc) {
  2022. pr_err("prefill FBC lines not specified\n");
  2023. return rc;
  2024. }
  2025. return 0;
  2026. }
  2027. static int mdss_mdp_parse_dt_misc(struct platform_device *pdev)
  2028. {
  2029. struct mdss_data_type *mdata = platform_get_drvdata(pdev);
  2030. u32 data;
  2031. int rc;
  2032. struct property *prop = NULL;
  2033. rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-rot-block-size",
  2034. &data);
  2035. mdata->rot_block_size = (!rc ? data : 128);
  2036. rc = of_property_read_u32(pdev->dev.of_node,
  2037. "qcom,mdss-rotator-ot-limit", &data);
  2038. mdata->rotator_ot_limit = (!rc ? data : 0);
  2039. mdata->has_bwc = of_property_read_bool(pdev->dev.of_node,
  2040. "qcom,mdss-has-bwc");
  2041. mdata->has_decimation = of_property_read_bool(pdev->dev.of_node,
  2042. "qcom,mdss-has-decimation");
  2043. mdata->has_wfd_blk = of_property_read_bool(pdev->dev.of_node,
  2044. "qcom,mdss-has-wfd-blk");
  2045. mdata->has_no_lut_read = of_property_read_bool(pdev->dev.of_node,
  2046. "qcom,mdss-no-lut-read");
  2047. mdata->idle_pc_enabled = of_property_read_bool(pdev->dev.of_node,
  2048. "qcom,mdss-idle-power-collapse-enabled");
  2049. prop = of_find_property(pdev->dev.of_node, "batfet-supply", NULL);
  2050. mdata->batfet_required = prop ? true : false;
  2051. rc = of_property_read_u32(pdev->dev.of_node,
  2052. "qcom,mdss-highest-bank-bit", &(mdata->highest_bank_bit));
  2053. if (rc)
  2054. pr_debug("Could not read optional property: highest bank bit\n");
  2055. /*
  2056. * 2x factor on AB because bus driver will divide by 2
  2057. * due to 2x ports to BIMC
  2058. */
  2059. mdata->ab_factor.numer = 2;
  2060. mdata->ab_factor.denom = 1;
  2061. mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ab-factor",
  2062. &mdata->ab_factor);
  2063. /*
  2064. * 1.2 factor on ib as default value. This value is
  2065. * experimentally determined and should be tuned in device
  2066. * tree.
  2067. */
  2068. mdata->ib_factor.numer = 6;
  2069. mdata->ib_factor.denom = 5;
  2070. mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ib-factor",
  2071. &mdata->ib_factor);
  2072. /*
  2073. * Set overlap ib value equal to ib by default. This value can
  2074. * be tuned in device tree to be different from ib.
  2075. * This factor apply when the max bandwidth per pipe
  2076. * is the overlap BW.
  2077. */
  2078. mdata->ib_factor_overlap.numer = mdata->ib_factor.numer;
  2079. mdata->ib_factor_overlap.denom = mdata->ib_factor.denom;
  2080. mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-ib-factor-overlap",
  2081. &mdata->ib_factor_overlap);
  2082. mdata->clk_factor.numer = 1;
  2083. mdata->clk_factor.denom = 1;
  2084. mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-clk-factor",
  2085. &mdata->clk_factor);
  2086. rc = of_property_read_u32(pdev->dev.of_node,
  2087. "qcom,max-bandwidth-low-kbps", &mdata->max_bw_low);
  2088. if (rc)
  2089. pr_debug("max bandwidth (low) property not specified\n");
  2090. rc = of_property_read_u32(pdev->dev.of_node,
  2091. "qcom,max-bandwidth-high-kbps", &mdata->max_bw_high);
  2092. if (rc)
  2093. pr_debug("max bandwidth (high) property not specified\n");
  2094. mdata->nclk_lvl = mdss_mdp_parse_dt_prop_len(pdev,
  2095. "qcom,mdss-clk-levels");
  2096. if (mdata->nclk_lvl) {
  2097. mdata->clock_levels = kzalloc(sizeof(u32) * mdata->nclk_lvl,
  2098. GFP_KERNEL);
  2099. if (!mdata->clock_levels) {
  2100. pr_err("no mem assigned for mdata clock_levels\n");
  2101. return -ENOMEM;
  2102. }
  2103. rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-clk-levels",
  2104. mdata->clock_levels, mdata->nclk_lvl);
  2105. if (rc)
  2106. pr_debug("clock levels not found\n");
  2107. }
  2108. return 0;
  2109. }
  2110. static int mdss_mdp_parse_dt_ad_cfg(struct platform_device *pdev)
  2111. {
  2112. struct mdss_data_type *mdata = platform_get_drvdata(pdev);
  2113. u32 *ad_offsets = NULL;
  2114. int rc;
  2115. mdata->nad_cfgs = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-ad-off");
  2116. if (mdata->nad_cfgs == 0) {
  2117. pr_info("SS is not using assertive display\n");
  2118. mdata->ad_cfgs = NULL;
  2119. return 0;
  2120. }
  2121. if (mdata->nad_cfgs > mdata->nmixers_intf)
  2122. return -EINVAL;
  2123. mdata->has_wb_ad = of_property_read_bool(pdev->dev.of_node,
  2124. "qcom,mdss-has-wb-ad");
  2125. ad_offsets = kzalloc(sizeof(u32) * mdata->nad_cfgs, GFP_KERNEL);
  2126. if (!ad_offsets) {
  2127. pr_err("no mem assigned: kzalloc fail\n");
  2128. return -ENOMEM;
  2129. }
  2130. rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-ad-off", ad_offsets,
  2131. mdata->nad_cfgs);
  2132. if (rc)
  2133. goto parse_done;
  2134. rc = mdss_mdp_ad_addr_setup(mdata, ad_offsets);
  2135. if (rc)
  2136. pr_err("unable to setup assertive display\n");
  2137. parse_done:
  2138. kfree(ad_offsets);
  2139. return rc;
  2140. }
  2141. static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev)
  2142. {
  2143. int rc;
  2144. struct mdss_data_type *mdata = platform_get_drvdata(pdev);
  2145. rc = of_property_read_u32(pdev->dev.of_node, "qcom,msm-bus,num-paths",
  2146. &mdata->axi_port_cnt);
  2147. if (rc) {
  2148. pr_err("Error. qcom,msm-bus,num-paths prop not found.rc=%d\n",
  2149. rc);
  2150. return rc;
  2151. }
  2152. mdata->bus_scale_table = msm_bus_cl_get_pdata(pdev);
  2153. if (IS_ERR_OR_NULL(mdata->bus_scale_table)) {
  2154. rc = PTR_ERR(mdata->bus_scale_table);
  2155. if (!rc)
  2156. rc = -EINVAL;
  2157. pr_err("msm_bus_cl_get_pdata failed. rc=%d\n", rc);
  2158. mdata->bus_scale_table = NULL;
  2159. }
  2160. return rc;
  2161. }
  2162. static int mdss_mdp_parse_dt_handler(struct platform_device *pdev,
  2163. char *prop_name, u32 *offsets, int len)
  2164. {
  2165. int rc;
  2166. rc = of_property_read_u32_array(pdev->dev.of_node, prop_name,
  2167. offsets, len);
  2168. if (rc) {
  2169. pr_err("Error from prop %s : u32 array read\n", prop_name);
  2170. return -EINVAL;
  2171. }
  2172. return 0;
  2173. }
  2174. static int mdss_mdp_parse_dt_prop_len(struct platform_device *pdev,
  2175. char *prop_name)
  2176. {
  2177. int len = 0;
  2178. of_find_property(pdev->dev.of_node, prop_name, &len);
  2179. if (len < 1) {
  2180. pr_err("Error from prop %s : spec error in device tree\n",
  2181. prop_name);
  2182. return 0;
  2183. }
  2184. len = len/sizeof(u32);
  2185. return len;
  2186. }
  2187. struct mdss_data_type *mdss_mdp_get_mdata(void)
  2188. {
  2189. return mdss_res;
  2190. }
  2191. /**
  2192. * mdss_is_ready() - checks if mdss is probed and ready
  2193. *
  2194. * Checks if mdss resources have been initialized
  2195. *
  2196. * returns true if mdss is ready, else returns false
  2197. */
  2198. bool mdss_is_ready(void)
  2199. {
  2200. return mdss_mdp_get_mdata() ? true : false;
  2201. }
  2202. EXPORT_SYMBOL(mdss_mdp_get_mdata);
  2203. /**
  2204. * mdss_panel_intf_type() - checks if a given intf type is primary
  2205. * @intf_val: panel interface type of the individual controller
  2206. *
  2207. * Individual controller queries with MDP to check if it is
  2208. * configured as the primary interface.
  2209. *
  2210. * returns a pointer to the configured structure mdss_panel_cfg
  2211. * to the controller that's configured as the primary panel interface.
  2212. * returns NULL on error or if @intf_val is not the configured
  2213. * controller.
  2214. */
  2215. struct mdss_panel_cfg *mdss_panel_intf_type(int intf_val)
  2216. {
  2217. if (!mdss_res || !mdss_res->pan_cfg.init_done)
  2218. return ERR_PTR(-EPROBE_DEFER);
  2219. if (mdss_res->pan_cfg.pan_intf == intf_val)
  2220. return &mdss_res->pan_cfg;
  2221. else
  2222. return NULL;
  2223. }
  2224. EXPORT_SYMBOL(mdss_panel_intf_type);
  2225. int mdss_panel_get_boot_cfg(void)
  2226. {
  2227. int rc;
  2228. if (!mdss_res || !mdss_res->pan_cfg.init_done)
  2229. return -EPROBE_DEFER;
  2230. if (mdss_res->pan_cfg.lk_cfg)
  2231. rc = 1;
  2232. else
  2233. rc = 0;
  2234. return rc;
  2235. }
  2236. static int mdss_mdp_cx_ctrl(struct mdss_data_type *mdata, int enable)
  2237. {
  2238. int rc = 0;
  2239. if (!mdata->vdd_cx)
  2240. return rc;
  2241. if (enable) {
  2242. rc = regulator_set_voltage(
  2243. mdata->vdd_cx,
  2244. RPM_REGULATOR_CORNER_SVS_SOC,
  2245. RPM_REGULATOR_CORNER_SUPER_TURBO);
  2246. if (rc < 0)
  2247. goto vreg_set_voltage_fail;
  2248. pr_debug("Enabling CX power rail\n");
  2249. rc = regulator_enable(mdata->vdd_cx);
  2250. if (rc) {
  2251. pr_err("Failed to enable regulator.\n");
  2252. return rc;
  2253. }
  2254. } else {
  2255. pr_debug("Disabling CX power rail\n");
  2256. rc = regulator_disable(mdata->vdd_cx);
  2257. if (rc) {
  2258. pr_err("Failed to disable regulator.\n");
  2259. return rc;
  2260. }
  2261. rc = regulator_set_voltage(
  2262. mdata->vdd_cx,
  2263. RPM_REGULATOR_CORNER_NONE,
  2264. RPM_REGULATOR_CORNER_SUPER_TURBO);
  2265. if (rc < 0)
  2266. goto vreg_set_voltage_fail;
  2267. }
  2268. return rc;
  2269. vreg_set_voltage_fail:
  2270. pr_err("Set vltg fail\n");
  2271. return rc;
  2272. }
  2273. void mdss_mdp_batfet_ctrl(struct mdss_data_type *mdata, int enable)
  2274. {
  2275. if (!mdata->batfet_required)
  2276. return;
  2277. if (!mdata->batfet) {
  2278. if (enable) {
  2279. mdata->batfet = devm_regulator_get(&mdata->pdev->dev,
  2280. "batfet");
  2281. if (IS_ERR_OR_NULL(mdata->batfet)) {
  2282. pr_debug("unable to get batfet reg. rc=%d\n",
  2283. PTR_RET(mdata->batfet));
  2284. mdata->batfet = NULL;
  2285. return;
  2286. }
  2287. } else {
  2288. pr_debug("Batfet regulator disable w/o enable\n");
  2289. return;
  2290. }
  2291. }
  2292. if (enable)
  2293. regulator_enable(mdata->batfet);
  2294. else
  2295. regulator_disable(mdata->batfet);
  2296. }
  2297. static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on)
  2298. {
  2299. if (!mdata->fs)
  2300. return;
  2301. if (on) {
  2302. pr_debug("Enable MDP FS\n");
  2303. if (!mdata->fs_ena) {
  2304. regulator_enable(mdata->fs);
  2305. if (!mdata->idle_pc) {
  2306. mdss_mdp_cx_ctrl(mdata, true);
  2307. mdss_mdp_batfet_ctrl(mdata, true);
  2308. }
  2309. }
  2310. mdata->fs_ena = true;
  2311. } else {
  2312. pr_debug("Disable MDP FS\n");
  2313. if (mdata->fs_ena) {
  2314. regulator_disable(mdata->fs);
  2315. if (!mdata->idle_pc) {
  2316. mdss_mdp_cx_ctrl(mdata, false);
  2317. mdss_mdp_batfet_ctrl(mdata, false);
  2318. }
  2319. }
  2320. mdata->fs_ena = false;
  2321. }
  2322. }
  2323. /**
  2324. * mdss_mdp_footswitch_ctrl_idle_pc() - MDSS GDSC control with idle power collapse
  2325. * @on: 1 to turn on footswitch, 0 to turn off footswitch
  2326. * @dev: framebuffer device node
  2327. *
  2328. * MDSS GDSC can be voted off during idle-screen usecase for MIPI DSI command
  2329. * mode displays. Upon subsequent frame update, MDSS GDSC needs to turned back
  2330. * on and hw state needs to be restored. It returns error if footswitch control
  2331. * API fails.
  2332. */
  2333. int mdss_mdp_footswitch_ctrl_idle_pc(int on, struct device *dev)
  2334. {
  2335. struct mdss_data_type *mdata = mdss_mdp_get_mdata();
  2336. int rc = 0;
  2337. pr_debug("called on=%d\n", on);
  2338. if (on) {
  2339. pm_runtime_get_sync(dev);
  2340. rc = mdss_iommu_ctrl(1);
  2341. if (IS_ERR_VALUE(rc)) {
  2342. pr_err("mdss iommu attach failed rc=%d\n", rc);
  2343. return rc;
  2344. }
  2345. mdss_hw_init(mdata);
  2346. mdata->idle_pc = false;
  2347. mdss_iommu_ctrl(0);
  2348. } else {
  2349. mdata->idle_pc = true;
  2350. pm_runtime_put_sync(dev);
  2351. }
  2352. return 0;
  2353. }
  2354. int mdss_mdp_secure_display_ctrl(unsigned int enable)
  2355. {
  2356. struct sd_ctrl_req {
  2357. unsigned int enable;
  2358. } __attribute__ ((__packed__)) request;
  2359. unsigned int resp = -1;
  2360. int ret = 0;
  2361. request.enable = enable;
  2362. ret = scm_call(SCM_SVC_MP, MEM_PROTECT_SD_CTRL,
  2363. &request, sizeof(request), &resp, sizeof(resp));
  2364. pr_debug("scm_call MEM_PROTECT_SD_CTRL(%u): ret=%d, resp=%x",
  2365. enable, ret, resp);
  2366. if (ret)
  2367. return ret;
  2368. return resp;
  2369. }
  2370. static inline int mdss_mdp_suspend_sub(struct mdss_data_type *mdata)
  2371. {
  2372. mdata->suspend_fs_ena = mdata->fs_ena;
  2373. mdss_mdp_footswitch_ctrl(mdata, false);
  2374. pr_debug("suspend done fs=%d\n", mdata->suspend_fs_ena);
  2375. return 0;
  2376. }
  2377. static inline int mdss_mdp_resume_sub(struct mdss_data_type *mdata)
  2378. {
  2379. if (mdata->suspend_fs_ena)
  2380. mdss_mdp_footswitch_ctrl(mdata, true);
  2381. pr_debug("resume done fs=%d\n", mdata->suspend_fs_ena);
  2382. return 0;
  2383. }
  2384. #ifdef CONFIG_PM_SLEEP
  2385. static int mdss_mdp_pm_suspend(struct device *dev)
  2386. {
  2387. struct mdss_data_type *mdata;
  2388. mdata = dev_get_drvdata(dev);
  2389. if (!mdata)
  2390. return -ENODEV;
  2391. dev_dbg(dev, "display pm suspend\n");
  2392. return mdss_mdp_suspend_sub(mdata);
  2393. }
  2394. static int mdss_mdp_pm_resume(struct device *dev)
  2395. {
  2396. struct mdss_data_type *mdata;
  2397. mdata = dev_get_drvdata(dev);
  2398. if (!mdata)
  2399. return -ENODEV;
  2400. dev_dbg(dev, "display pm resume\n");
  2401. return mdss_mdp_resume_sub(mdata);
  2402. }
  2403. #endif
  2404. #if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
  2405. static int mdss_mdp_suspend(struct platform_device *pdev, pm_message_t state)
  2406. {
  2407. struct mdss_data_type *mdata = platform_get_drvdata(pdev);
  2408. if (!mdata)
  2409. return -ENODEV;
  2410. dev_dbg(&pdev->dev, "display suspend\n");
  2411. return mdss_mdp_suspend_sub(mdata);
  2412. }
  2413. static int mdss_mdp_resume(struct platform_device *pdev)
  2414. {
  2415. struct mdss_data_type *mdata = platform_get_drvdata(pdev);
  2416. if (!mdata)
  2417. return -ENODEV;
  2418. dev_dbg(&pdev->dev, "display resume\n");
  2419. return mdss_mdp_resume_sub(mdata);
  2420. }
  2421. #else
  2422. #define mdss_mdp_suspend NULL
  2423. #define mdss_mdp_resume NULL
  2424. #endif
  2425. #ifdef CONFIG_PM_RUNTIME
  2426. static int mdss_mdp_runtime_resume(struct device *dev)
  2427. {
  2428. struct mdss_data_type *mdata = dev_get_drvdata(dev);
  2429. bool device_on = true;
  2430. if (!mdata)
  2431. return -ENODEV;
  2432. dev_dbg(dev, "pm_runtime: resuming...\n");
  2433. /* do not resume panels when coming out of idle power collapse */
  2434. if (!mdata->idle_pc)
  2435. device_for_each_child(dev, &device_on, mdss_fb_suspres_panel);
  2436. mdss_mdp_footswitch_ctrl(mdata, true);
  2437. return 0;
  2438. }
  2439. static int mdss_mdp_runtime_idle(struct device *dev)
  2440. {
  2441. struct mdss_data_type *mdata = dev_get_drvdata(dev);
  2442. if (!mdata)
  2443. return -ENODEV;
  2444. dev_dbg(dev, "pm_runtime: idling...\n");
  2445. return 0;
  2446. }
  2447. static int mdss_mdp_runtime_suspend(struct device *dev)
  2448. {
  2449. struct mdss_data_type *mdata = dev_get_drvdata(dev);
  2450. bool device_on = false;
  2451. if (!mdata)
  2452. return -ENODEV;
  2453. dev_dbg(dev, "pm_runtime: suspending...\n");
  2454. if (mdata->clk_ena) {
  2455. pr_err("MDP suspend failed\n");
  2456. return -EBUSY;
  2457. }
  2458. /* do not suspend panels when going in to idle power collapse */
  2459. if (!mdata->idle_pc)
  2460. device_for_each_child(dev, &device_on, mdss_fb_suspres_panel);
  2461. mdss_mdp_footswitch_ctrl(mdata, false);
  2462. return 0;
  2463. }
  2464. #endif
  2465. static const struct dev_pm_ops mdss_mdp_pm_ops = {
  2466. SET_SYSTEM_SLEEP_PM_OPS(mdss_mdp_pm_suspend, mdss_mdp_pm_resume)
  2467. SET_RUNTIME_PM_OPS(mdss_mdp_runtime_suspend,
  2468. mdss_mdp_runtime_resume,
  2469. mdss_mdp_runtime_idle)
  2470. };
  2471. static int mdss_mdp_remove(struct platform_device *pdev)
  2472. {
  2473. struct mdss_data_type *mdata = platform_get_drvdata(pdev);
  2474. if (!mdata)
  2475. return -ENODEV;
  2476. pm_runtime_disable(&pdev->dev);
  2477. mdss_mdp_pp_term(&pdev->dev);
  2478. mdss_mdp_bus_scale_unregister(mdata);
  2479. mdss_debugfs_remove(mdata);
  2480. return 0;
  2481. }
  2482. static const struct of_device_id mdss_mdp_dt_match[] = {
  2483. { .compatible = "qcom,mdss_mdp",},
  2484. {}
  2485. };
  2486. MODULE_DEVICE_TABLE(of, mdss_mdp_dt_match);
  2487. static struct platform_driver mdss_mdp_driver = {
  2488. .probe = mdss_mdp_probe,
  2489. .remove = mdss_mdp_remove,
  2490. .suspend = mdss_mdp_suspend,
  2491. .resume = mdss_mdp_resume,
  2492. .shutdown = NULL,
  2493. .driver = {
  2494. /*
  2495. * Driver name must match the device name added in
  2496. * platform.c.
  2497. */
  2498. .name = "mdp",
  2499. .of_match_table = mdss_mdp_dt_match,
  2500. .pm = &mdss_mdp_pm_ops,
  2501. },
  2502. };
  2503. static int mdss_mdp_register_driver(void)
  2504. {
  2505. return platform_driver_register(&mdss_mdp_driver);
  2506. }
  2507. static int __init mdss_mdp_driver_init(void)
  2508. {
  2509. int ret;
  2510. ret = mdss_mdp_register_driver();
  2511. if (ret) {
  2512. pr_err("mdp_register_driver() failed!\n");
  2513. return ret;
  2514. }
  2515. return 0;
  2516. }
  2517. void mdss_mdp_underrun_clk_info(void)
  2518. {
  2519. pr_info(" mdp_clk = %ld, bus_ab = %llu, bus_ib = %llu\n",
  2520. clk_rate_dbg, bus_ab_quota_dbg, bus_ib_quota_dbg);
  2521. }
  2522. module_init(mdss_mdp_driver_init);