mdp4_overlay_dsi_cmd.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189
  1. /* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #include <linux/module.h>
  14. #include <linux/kernel.h>
  15. #include <linux/sched.h>
  16. #include <linux/time.h>
  17. #include <linux/init.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/delay.h>
  20. #include <linux/io.h>
  21. #include <linux/semaphore.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/fb.h>
  24. #include <asm/system.h>
  25. #include <asm/mach-types.h>
  26. #include <mach/hardware.h>
  27. #include "mdp.h"
  28. #include "msm_fb.h"
  29. #include "mipi_dsi.h"
  30. #include "mdp4.h"
  31. static int vsync_start_y_adjust = 4;
  32. #define MAX_CONTROLLER 1
  33. /*
  34. * VSYNC_EXPIRE_TICK == 0 means clock always on
  35. * VSYNC_EXPIRE_TICK == 4 is recommended
  36. */
  37. #define VSYNC_EXPIRE_TICK 4
  38. static struct vsycn_ctrl {
  39. struct device *dev;
  40. int inited;
  41. int update_ndx;
  42. int expire_tick;
  43. int blt_wait;
  44. u32 ov_koff;
  45. u32 ov_done;
  46. u32 dmap_koff;
  47. u32 dmap_done;
  48. u32 pan_display;
  49. uint32 rdptr_intr_tot;
  50. uint32 rdptr_sirq_tot;
  51. atomic_t suspend;
  52. int wait_vsync_cnt;
  53. int blt_change;
  54. int blt_free;
  55. int blt_end;
  56. int sysfs_created;
  57. struct mutex update_lock;
  58. struct completion ov_comp;
  59. struct completion dmap_comp;
  60. struct completion vsync_comp;
  61. spinlock_t spin_lock;
  62. struct msm_fb_data_type *mfd;
  63. struct mdp4_overlay_pipe *base_pipe;
  64. struct vsync_update vlist[2];
  65. int vsync_enabled;
  66. int clk_enabled;
  67. int new_update;
  68. int clk_control;
  69. ktime_t vsync_time;
  70. struct work_struct clk_work;
  71. } vsync_ctrl_db[MAX_CONTROLLER];
  72. static void vsync_irq_enable(int intr, int term)
  73. {
  74. unsigned long flag;
  75. spin_lock_irqsave(&mdp_spin_lock, flag);
  76. /* no need to clrear other interrupts for comamnd mode */
  77. mdp_intr_mask |= intr;
  78. outp32(MDP_INTR_ENABLE, mdp_intr_mask);
  79. mdp_enable_irq(term);
  80. spin_unlock_irqrestore(&mdp_spin_lock, flag);
  81. }
  82. static void vsync_irq_disable(int intr, int term)
  83. {
  84. unsigned long flag;
  85. spin_lock_irqsave(&mdp_spin_lock, flag);
  86. /* no need to clrear other interrupts for comamnd mode */
  87. mdp_intr_mask &= ~intr;
  88. outp32(MDP_INTR_ENABLE, mdp_intr_mask);
  89. mdp_disable_irq_nosync(term);
  90. spin_unlock_irqrestore(&mdp_spin_lock, flag);
  91. }
  92. static void mdp4_dsi_cmd_blt_ov_update(struct mdp4_overlay_pipe *pipe)
  93. {
  94. uint32 off, addr;
  95. int bpp;
  96. char *overlay_base;
  97. if (pipe->ov_blt_addr == 0)
  98. return;
  99. #ifdef BLT_RGB565
  100. bpp = 2; /* overlay ouput is RGB565 */
  101. #else
  102. bpp = 3; /* overlay ouput is RGB888 */
  103. #endif
  104. off = 0;
  105. if (pipe->ov_cnt & 0x01)
  106. off = pipe->src_height * pipe->src_width * bpp;
  107. addr = pipe->ov_blt_addr + off;
  108. /* overlay 0 */
  109. overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
  110. outpdw(overlay_base + 0x000c, addr);
  111. outpdw(overlay_base + 0x001c, addr);
  112. }
  113. static void mdp4_dsi_cmd_blt_dmap_update(struct mdp4_overlay_pipe *pipe)
  114. {
  115. uint32 off, addr;
  116. int bpp;
  117. if (pipe->ov_blt_addr == 0)
  118. return;
  119. #ifdef BLT_RGB565
  120. bpp = 2; /* overlay ouput is RGB565 */
  121. #else
  122. bpp = 3; /* overlay ouput is RGB888 */
  123. #endif
  124. off = 0;
  125. if (pipe->dmap_cnt & 0x01)
  126. off = pipe->src_height * pipe->src_width * bpp;
  127. addr = pipe->dma_blt_addr + off;
  128. /* dmap */
  129. MDP_OUTP(MDP_BASE + 0x90008, addr);
  130. }
  131. static void mdp4_dsi_cmd_wait4dmap(int cndx);
  132. static void mdp4_dsi_cmd_wait4ov(int cndx);
  133. static void mdp4_dsi_cmd_do_blt(struct msm_fb_data_type *mfd, int enable)
  134. {
  135. unsigned long flags;
  136. int cndx = 0;
  137. struct vsycn_ctrl *vctrl;
  138. struct mdp4_overlay_pipe *pipe;
  139. int need_wait;
  140. vctrl = &vsync_ctrl_db[cndx];
  141. pipe = vctrl->base_pipe;
  142. mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
  143. if (mfd->ov0_wb_buf->write_addr == 0) {
  144. pr_err("%s: no blt_base assigned\n", __func__);
  145. return;
  146. }
  147. spin_lock_irqsave(&vctrl->spin_lock, flags);
  148. if (enable && pipe->ov_blt_addr == 0) {
  149. vctrl->blt_change++;
  150. if (vctrl->dmap_koff != vctrl->dmap_done) {
  151. INIT_COMPLETION(vctrl->dmap_comp);
  152. need_wait = 1;
  153. }
  154. } else if (enable == 0 && pipe->ov_blt_addr) {
  155. vctrl->blt_change++;
  156. if (vctrl->ov_koff != vctrl->dmap_done) {
  157. INIT_COMPLETION(vctrl->dmap_comp);
  158. need_wait = 1;
  159. }
  160. }
  161. spin_unlock_irqrestore(&vctrl->spin_lock, flags);
  162. if (need_wait)
  163. mdp4_dsi_cmd_wait4dmap(0);
  164. spin_lock_irqsave(&vctrl->spin_lock, flags);
  165. if (enable && pipe->ov_blt_addr == 0) {
  166. pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
  167. pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
  168. pipe->ov_cnt = 0;
  169. pipe->dmap_cnt = 0;
  170. vctrl->ov_koff = vctrl->dmap_koff;
  171. vctrl->ov_done = vctrl->dmap_done;
  172. vctrl->blt_free = 0;
  173. vctrl->blt_wait = 0;
  174. vctrl->blt_end = 0;
  175. mdp4_stat.blt_dsi_video++;
  176. } else if (enable == 0 && pipe->ov_blt_addr) {
  177. pipe->ov_blt_addr = 0;
  178. pipe->dma_blt_addr = 0;
  179. vctrl->blt_end = 1;
  180. vctrl->blt_free = 4; /* 4 commits to free wb buf */
  181. }
  182. pr_debug("%s: changed=%d enable=%d ov_blt_addr=%x\n", __func__,
  183. vctrl->blt_change, enable, (int)pipe->ov_blt_addr);
  184. spin_unlock_irqrestore(&vctrl->spin_lock, flags);
  185. }
  186. /*
  187. * mdp4_dsi_cmd_do_update:
  188. * called from thread context
  189. */
  190. void mdp4_dsi_cmd_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe)
  191. {
  192. struct vsycn_ctrl *vctrl;
  193. struct vsync_update *vp;
  194. struct mdp4_overlay_pipe *pp;
  195. int undx;
  196. if (cndx >= MAX_CONTROLLER) {
  197. pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
  198. return;
  199. }
  200. vctrl = &vsync_ctrl_db[cndx];
  201. if (atomic_read(&vctrl->suspend)) {
  202. pr_err("%s: suspended, no more pipe queue\n", __func__);
  203. return;
  204. }
  205. mutex_lock(&vctrl->update_lock);
  206. undx = vctrl->update_ndx;
  207. vp = &vctrl->vlist[undx];
  208. pp = &vp->plist[pipe->pipe_ndx - 1]; /* ndx start form 1 */
  209. pr_debug("%s: vndx=%d pipe_ndx=%d expire=%x pid=%d\n", __func__,
  210. undx, pipe->pipe_ndx, vctrl->expire_tick, current->pid);
  211. *pp = *pipe; /* clone it */
  212. vp->update_cnt++;
  213. mutex_unlock(&vctrl->update_lock);
  214. mdp4_stat.overlay_play[pipe->mixer_num]++;
  215. }
  216. static void mdp4_dsi_cmd_blt_ov_update(struct mdp4_overlay_pipe *pipe);
  217. int mdp4_dsi_cmd_pipe_commit(int cndx, int wait)
  218. {
  219. int i, undx;
  220. int mixer = 0;
  221. struct vsycn_ctrl *vctrl;
  222. struct vsync_update *vp;
  223. struct mdp4_overlay_pipe *pipe;
  224. struct mdp4_overlay_pipe *real_pipe;
  225. unsigned long flags;
  226. int need_dmap_wait = 0;
  227. int need_ov_wait = 0;
  228. int cnt = 0;
  229. vctrl = &vsync_ctrl_db[0];
  230. mutex_lock(&vctrl->update_lock);
  231. undx = vctrl->update_ndx;
  232. vp = &vctrl->vlist[undx];
  233. pipe = vctrl->base_pipe;
  234. mixer = pipe->mixer_num;
  235. mdp_update_pm(vctrl->mfd, vctrl->vsync_time);
  236. if (vp->update_cnt == 0) {
  237. mutex_unlock(&vctrl->update_lock);
  238. return cnt;
  239. }
  240. vctrl->update_ndx++;
  241. vctrl->update_ndx &= 0x01;
  242. vp->update_cnt = 0; /* reset */
  243. if (vctrl->blt_free) {
  244. vctrl->blt_free--;
  245. if (vctrl->blt_free == 0)
  246. mdp4_free_writeback_buf(vctrl->mfd, mixer);
  247. }
  248. mutex_unlock(&vctrl->update_lock);
  249. /* free previous committed iommu back to pool */
  250. mdp4_overlay_iommu_unmap_freelist(mixer);
  251. spin_lock_irqsave(&vctrl->spin_lock, flags);
  252. if (pipe->ov_blt_addr) {
  253. /* Blt */
  254. if (vctrl->blt_wait)
  255. need_dmap_wait = 1;
  256. if (vctrl->ov_koff != vctrl->ov_done) {
  257. INIT_COMPLETION(vctrl->ov_comp);
  258. need_ov_wait = 1;
  259. }
  260. } else {
  261. /* direct out */
  262. if (vctrl->dmap_koff != vctrl->dmap_done) {
  263. INIT_COMPLETION(vctrl->dmap_comp);
  264. pr_debug("%s: wait, ok=%d od=%d dk=%d dd=%d cpu=%d\n",
  265. __func__, vctrl->ov_koff, vctrl->ov_done,
  266. vctrl->dmap_koff, vctrl->dmap_done, smp_processor_id());
  267. need_dmap_wait = 1;
  268. }
  269. }
  270. spin_unlock_irqrestore(&vctrl->spin_lock, flags);
  271. if (need_dmap_wait) {
  272. pr_debug("%s: wait4dmap\n", __func__);
  273. mdp4_dsi_cmd_wait4dmap(0);
  274. }
  275. if (need_ov_wait) {
  276. pr_debug("%s: wait4ov\n", __func__);
  277. mdp4_dsi_cmd_wait4ov(0);
  278. }
  279. if (pipe->ov_blt_addr) {
  280. if (vctrl->blt_end) {
  281. vctrl->blt_end = 0;
  282. pipe->ov_blt_addr = 0;
  283. pipe->dma_blt_addr = 0;
  284. }
  285. }
  286. if (vctrl->blt_change) {
  287. mdp4_overlayproc_cfg(pipe);
  288. mdp4_overlay_dmap_xy(pipe);
  289. vctrl->blt_change = 0;
  290. }
  291. pipe = vp->plist;
  292. for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
  293. if (pipe->pipe_used) {
  294. cnt++;
  295. real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
  296. if (real_pipe && real_pipe->pipe_used) {
  297. /* pipe not unset */
  298. mdp4_overlay_vsync_commit(pipe);
  299. }
  300. /* free previous iommu to freelist
  301. * which will be freed at next
  302. * pipe_commit
  303. */
  304. mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
  305. pipe->pipe_used = 0; /* clear */
  306. }
  307. }
  308. /* tx dcs command if had any */
  309. mipi_dsi_cmdlist_commit(1);
  310. mdp4_mixer_stage_commit(mixer);
  311. pipe = vctrl->base_pipe;
  312. spin_lock_irqsave(&vctrl->spin_lock, flags);
  313. if (pipe->ov_blt_addr) {
  314. mdp4_dsi_cmd_blt_ov_update(pipe);
  315. pipe->ov_cnt++;
  316. vctrl->ov_koff++;
  317. INIT_COMPLETION(vctrl->ov_comp);
  318. vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
  319. } else {
  320. INIT_COMPLETION(vctrl->dmap_comp);
  321. vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
  322. vctrl->dmap_koff++;
  323. }
  324. pr_debug("%s: kickoff, pid=%d\n", __func__, current->pid);
  325. /* kickoff overlay engine */
  326. mdp4_stat.kickoff_ov0++;
  327. outpdw(MDP_BASE + 0x0004, 0);
  328. mb(); /* make sure kickoff ececuted */
  329. spin_unlock_irqrestore(&vctrl->spin_lock, flags);
  330. mdp4_stat.overlay_commit[pipe->mixer_num]++;
  331. if (wait) {
  332. long long tick;
  333. mdp4_dsi_cmd_wait4vsync(cndx, &tick);
  334. }
  335. return cnt;
  336. }
  337. static void mdp4_overlay_update_dsi_cmd(struct msm_fb_data_type *mfd);
  338. void mdp4_dsi_cmd_vsync_ctrl(struct fb_info *info, int enable)
  339. {
  340. struct vsycn_ctrl *vctrl;
  341. unsigned long flags;
  342. int cndx = 0;
  343. int clk_set_on = 0;
  344. vctrl = &vsync_ctrl_db[cndx];
  345. mutex_lock(&vctrl->update_lock);
  346. pr_debug("%s: clk_enabled=%d vsync_enabled=%d req=%d\n", __func__,
  347. vctrl->clk_enabled, vctrl->vsync_enabled, enable);
  348. if (vctrl->vsync_enabled == enable) {
  349. mutex_unlock(&vctrl->update_lock);
  350. return;
  351. }
  352. vctrl->vsync_enabled = enable;
  353. if (enable) {
  354. spin_lock_irqsave(&vctrl->spin_lock, flags);
  355. vctrl->clk_control = 0;
  356. vctrl->expire_tick = 0;
  357. spin_unlock_irqrestore(&vctrl->spin_lock, flags);
  358. if (vctrl->clk_enabled == 0) {
  359. pr_debug("%s: SET_CLK_ON\n", __func__);
  360. mipi_dsi_clk_cfg(1);
  361. mdp_clk_ctrl(1);
  362. vctrl->clk_enabled = 1;
  363. vctrl->new_update = 1;
  364. clk_set_on = 1;
  365. }
  366. if (clk_set_on) {
  367. vsync_irq_enable(INTR_PRIMARY_RDPTR,
  368. MDP_PRIM_RDPTR_TERM);
  369. }
  370. } else {
  371. spin_lock_irqsave(&vctrl->spin_lock, flags);
  372. vctrl->expire_tick = VSYNC_EXPIRE_TICK;
  373. spin_unlock_irqrestore(&vctrl->spin_lock, flags);
  374. }
  375. mutex_unlock(&vctrl->update_lock);
  376. }
  377. void mdp4_dsi_cmd_wait4vsync(int cndx, long long *vtime)
  378. {
  379. struct vsycn_ctrl *vctrl;
  380. struct mdp4_overlay_pipe *pipe;
  381. unsigned long flags;
  382. if (cndx >= MAX_CONTROLLER) {
  383. pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
  384. return;
  385. }
  386. vctrl = &vsync_ctrl_db[cndx];
  387. pipe = vctrl->base_pipe;
  388. if (atomic_read(&vctrl->suspend) > 0) {
  389. *vtime = -1;
  390. return;
  391. }
  392. spin_lock_irqsave(&vctrl->spin_lock, flags);
  393. if (vctrl->wait_vsync_cnt == 0)
  394. INIT_COMPLETION(vctrl->vsync_comp);
  395. vctrl->wait_vsync_cnt++;
  396. spin_unlock_irqrestore(&vctrl->spin_lock, flags);
  397. wait_for_completion(&vctrl->vsync_comp);
  398. mdp4_stat.wait4vsync0++;
  399. *vtime = ktime_to_ns(vctrl->vsync_time);
  400. }
  401. static void mdp4_dsi_cmd_wait4dmap(int cndx)
  402. {
  403. struct vsycn_ctrl *vctrl;
  404. if (cndx >= MAX_CONTROLLER) {
  405. pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
  406. return;
  407. }
  408. vctrl = &vsync_ctrl_db[cndx];
  409. if (atomic_read(&vctrl->suspend) > 0)
  410. return;
  411. wait_for_completion(&vctrl->dmap_comp);
  412. }
  413. static void mdp4_dsi_cmd_wait4ov(int cndx)
  414. {
  415. struct vsycn_ctrl *vctrl;
  416. if (cndx >= MAX_CONTROLLER) {
  417. pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
  418. return;
  419. }
  420. vctrl = &vsync_ctrl_db[cndx];
  421. if (atomic_read(&vctrl->suspend) > 0)
  422. return;
  423. wait_for_completion(&vctrl->ov_comp);
  424. }
  425. /*
  426. * primary_rdptr_isr:
  427. * called from interrupt context
  428. */
  429. static void primary_rdptr_isr(int cndx)
  430. {
  431. struct vsycn_ctrl *vctrl;
  432. vctrl = &vsync_ctrl_db[cndx];
  433. pr_debug("%s: ISR, tick=%d pan=%d cpu=%d\n", __func__,
  434. vctrl->expire_tick, vctrl->pan_display, smp_processor_id());
  435. vctrl->rdptr_intr_tot++;
  436. spin_lock(&vctrl->spin_lock);
  437. vctrl->vsync_time = ktime_get();
  438. if (vctrl->new_update) {
  439. vctrl->new_update = 0;
  440. spin_unlock(&vctrl->spin_lock);
  441. return;
  442. }
  443. complete_all(&vctrl->vsync_comp);
  444. vctrl->wait_vsync_cnt = 0;
  445. if (vctrl->expire_tick) {
  446. vctrl->expire_tick--;
  447. if (vctrl->expire_tick == 0) {
  448. if (vctrl->pan_display <= 0) {
  449. vctrl->clk_control = 1;
  450. schedule_work(&vctrl->clk_work);
  451. } else {
  452. /* wait one more vsycn */
  453. vctrl->expire_tick += 1;
  454. }
  455. }
  456. }
  457. spin_unlock(&vctrl->spin_lock);
  458. }
  459. void mdp4_dmap_done_dsi_cmd(int cndx)
  460. {
  461. struct vsycn_ctrl *vctrl;
  462. struct mdp4_overlay_pipe *pipe;
  463. int diff;
  464. vctrl = &vsync_ctrl_db[cndx];
  465. pipe = vctrl->base_pipe;
  466. if (pipe == NULL)
  467. return;
  468. /* blt enabled */
  469. spin_lock(&vctrl->spin_lock);
  470. vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
  471. vctrl->dmap_done++;
  472. if (vctrl->pan_display)
  473. vctrl->pan_display--;
  474. diff = vctrl->ov_done - vctrl->dmap_done;
  475. pr_debug("%s: ov_koff=%d ov_done=%d dmap_koff=%d dmap_done=%d cpu=%d\n",
  476. __func__, vctrl->ov_koff, vctrl->ov_done, vctrl->dmap_koff,
  477. vctrl->dmap_done, smp_processor_id());
  478. complete(&vctrl->dmap_comp);
  479. if (diff <= 0) {
  480. if (vctrl->blt_wait)
  481. vctrl->blt_wait = 0;
  482. spin_unlock(&vctrl->spin_lock);
  483. return;
  484. }
  485. /* kick dmap */
  486. mdp4_dsi_cmd_blt_dmap_update(pipe);
  487. pipe->dmap_cnt++;
  488. mdp4_stat.kickoff_dmap++;
  489. vctrl->dmap_koff++;
  490. vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
  491. outpdw(MDP_BASE + 0x000c, 0); /* kickoff dmap engine */
  492. mb(); /* make sure kickoff executed */
  493. spin_unlock(&vctrl->spin_lock);
  494. }
  495. /*
  496. * mdp4_overlay0_done_dsi_cmd: called from isr
  497. */
  498. void mdp4_overlay0_done_dsi_cmd(int cndx)
  499. {
  500. struct vsycn_ctrl *vctrl;
  501. struct mdp4_overlay_pipe *pipe;
  502. int diff;
  503. vctrl = &vsync_ctrl_db[cndx];
  504. pipe = vctrl->base_pipe;
  505. if (pipe == NULL)
  506. return;
  507. spin_lock(&vctrl->spin_lock);
  508. vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
  509. vctrl->ov_done++;
  510. complete(&vctrl->ov_comp);
  511. diff = vctrl->ov_done - vctrl->dmap_done;
  512. pr_debug("%s: ov_koff=%d ov_done=%d dmap_koff=%d dmap_done=%d cpu=%d\n",
  513. __func__, vctrl->ov_koff, vctrl->ov_done, vctrl->dmap_koff,
  514. vctrl->dmap_done, smp_processor_id());
  515. if (pipe->ov_blt_addr == 0) {
  516. /* blt disabled */
  517. spin_unlock(&vctrl->spin_lock);
  518. return;
  519. }
  520. if (diff > 1) {
  521. /*
  522. * two overlay_done and none dmap_done yet
  523. * let dmap_done kickoff dmap
  524. * and put pipe_commit to wait
  525. */
  526. vctrl->blt_wait = 1;
  527. pr_debug("%s: blt_wait set\n", __func__);
  528. spin_unlock(&vctrl->spin_lock);
  529. return;
  530. }
  531. mdp4_dsi_cmd_blt_dmap_update(pipe);
  532. pipe->dmap_cnt++;
  533. mdp4_stat.kickoff_dmap++;
  534. vctrl->dmap_koff++;
  535. vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
  536. outpdw(MDP_BASE + 0x000c, 0); /* kickoff dmap engine */
  537. mb(); /* make sure kickoff executed */
  538. spin_unlock(&vctrl->spin_lock);
  539. }
  540. static void clk_ctrl_work(struct work_struct *work)
  541. {
  542. unsigned long flags;
  543. struct vsycn_ctrl *vctrl =
  544. container_of(work, typeof(*vctrl), clk_work);
  545. mutex_lock(&vctrl->update_lock);
  546. spin_lock_irqsave(&vctrl->spin_lock, flags);
  547. if (vctrl->clk_control && vctrl->clk_enabled) {
  548. vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM);
  549. vctrl->clk_enabled = 0;
  550. vctrl->clk_control = 0;
  551. spin_unlock_irqrestore(&vctrl->spin_lock, flags);
  552. /* make sure dsi link is idle */
  553. mipi_dsi_mdp_busy_wait();
  554. mipi_dsi_clk_cfg(0);
  555. mdp_clk_ctrl(0);
  556. pr_debug("%s: SET_CLK_OFF, pid=%d\n", __func__, current->pid);
  557. } else {
  558. spin_unlock_irqrestore(&vctrl->spin_lock, flags);
  559. }
  560. mutex_unlock(&vctrl->update_lock);
  561. }
  562. ssize_t mdp4_dsi_cmd_show_event(struct device *dev,
  563. struct device_attribute *attr, char *buf)
  564. {
  565. int cndx;
  566. struct vsycn_ctrl *vctrl;
  567. ssize_t ret = 0;
  568. unsigned long flags;
  569. u64 vsync_tick;
  570. cndx = 0;
  571. vctrl = &vsync_ctrl_db[0];
  572. if (atomic_read(&vctrl->suspend) > 0)
  573. return 0;
  574. spin_lock_irqsave(&vctrl->spin_lock, flags);
  575. if (vctrl->wait_vsync_cnt == 0)
  576. INIT_COMPLETION(vctrl->vsync_comp);
  577. vctrl->wait_vsync_cnt++;
  578. spin_unlock_irqrestore(&vctrl->spin_lock, flags);
  579. ret = wait_for_completion_interruptible_timeout(&vctrl->vsync_comp,
  580. msecs_to_jiffies(VSYNC_PERIOD * 4));
  581. if (ret <= 0) {
  582. vctrl->wait_vsync_cnt = 0;
  583. vsync_tick = ktime_to_ns(ktime_get());
  584. ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick);
  585. buf[strlen(buf) + 1] = '\0';
  586. return ret;
  587. }
  588. spin_lock_irqsave(&vctrl->spin_lock, flags);
  589. vsync_tick = ktime_to_ns(vctrl->vsync_time);
  590. spin_unlock_irqrestore(&vctrl->spin_lock, flags);
  591. ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick);
  592. pr_debug("%s: UEVENT\n", __func__);
  593. buf[strlen(buf) + 1] = '\0';
  594. return ret;
  595. }
  596. void mdp4_dsi_rdptr_init(int cndx)
  597. {
  598. struct vsycn_ctrl *vctrl;
  599. if (cndx >= MAX_CONTROLLER) {
  600. pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
  601. return;
  602. }
  603. vctrl = &vsync_ctrl_db[cndx];
  604. if (vctrl->inited)
  605. return;
  606. vctrl->inited = 1;
  607. vctrl->update_ndx = 0;
  608. mutex_init(&vctrl->update_lock);
  609. init_completion(&vctrl->ov_comp);
  610. init_completion(&vctrl->dmap_comp);
  611. init_completion(&vctrl->vsync_comp);
  612. spin_lock_init(&vctrl->spin_lock);
  613. atomic_set(&vctrl->suspend, 1);
  614. INIT_WORK(&vctrl->clk_work, clk_ctrl_work);
  615. }
  616. void mdp4_primary_rdptr(void)
  617. {
  618. primary_rdptr_isr(0);
  619. }
  620. static __u32 msm_fb_line_length(__u32 fb_index, __u32 xres, int bpp)
  621. {
  622. /*
  623. * The adreno GPU hardware requires that the pitch be aligned to
  624. * 32 pixels for color buffers, so for the cases where the GPU
  625. * is writing directly to fb0, the framebuffer pitch
  626. * also needs to be 32 pixel aligned
  627. */
  628. if (fb_index == 0)
  629. return ALIGN(xres, 32) * bpp;
  630. else
  631. return xres * bpp;
  632. }
  633. void mdp4_mipi_vsync_enable(struct msm_fb_data_type *mfd,
  634. struct mdp4_overlay_pipe *pipe, int which)
  635. {
  636. uint32 start_y, data, tear_en;
  637. tear_en = (1 << which);
  638. if ((mfd->use_mdp_vsync) && (mfd->ibuf.vsync_enable) &&
  639. (mfd->panel_info.lcd.vsync_enable)) {
  640. if (vsync_start_y_adjust <= pipe->dst_y)
  641. start_y = pipe->dst_y - vsync_start_y_adjust;
  642. else
  643. start_y = (mfd->total_lcd_lines - 1) -
  644. (vsync_start_y_adjust - pipe->dst_y);
  645. if (which == 0)
  646. MDP_OUTP(MDP_BASE + 0x210, start_y); /* primary */
  647. else
  648. MDP_OUTP(MDP_BASE + 0x214, start_y); /* secondary */
  649. data = inpdw(MDP_BASE + 0x20c);
  650. data |= tear_en;
  651. MDP_OUTP(MDP_BASE + 0x20c, data);
  652. } else {
  653. data = inpdw(MDP_BASE + 0x20c);
  654. data &= ~tear_en;
  655. MDP_OUTP(MDP_BASE + 0x20c, data);
  656. }
  657. }
  658. void mdp4_dsi_cmd_base_swap(int cndx, struct mdp4_overlay_pipe *pipe)
  659. {
  660. struct vsycn_ctrl *vctrl;
  661. if (cndx >= MAX_CONTROLLER) {
  662. pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
  663. return;
  664. }
  665. vctrl = &vsync_ctrl_db[cndx];
  666. vctrl->base_pipe = pipe;
  667. }
  668. static void mdp4_overlay_setup_pipe_addr(struct msm_fb_data_type *mfd,
  669. struct mdp4_overlay_pipe *pipe)
  670. {
  671. MDPIBUF *iBuf = &mfd->ibuf;
  672. struct fb_info *fbi;
  673. int bpp;
  674. uint8 *src;
  675. /* whole screen for base layer */
  676. src = (uint8 *) iBuf->buf;
  677. fbi = mfd->fbi;
  678. if (pipe->is_3d) {
  679. bpp = fbi->var.bits_per_pixel / 8;
  680. pipe->src_height = pipe->src_height_3d;
  681. pipe->src_width = pipe->src_width_3d;
  682. pipe->src_h = pipe->src_height_3d;
  683. pipe->src_w = pipe->src_width_3d;
  684. pipe->dst_h = pipe->src_height_3d;
  685. pipe->dst_w = pipe->src_width_3d;
  686. pipe->srcp0_ystride = msm_fb_line_length(0,
  687. pipe->src_width, bpp);
  688. } else {
  689. /* 2D */
  690. pipe->src_height = fbi->var.yres;
  691. pipe->src_width = fbi->var.xres;
  692. pipe->src_h = fbi->var.yres;
  693. pipe->src_w = fbi->var.xres;
  694. pipe->dst_h = fbi->var.yres;
  695. pipe->dst_w = fbi->var.xres;
  696. pipe->srcp0_ystride = fbi->fix.line_length;
  697. }
  698. pipe->src_y = 0;
  699. pipe->src_x = 0;
  700. pipe->dst_y = 0;
  701. pipe->dst_x = 0;
  702. pipe->srcp0_addr = (uint32)src;
  703. }
  704. static void mdp4_overlay_update_dsi_cmd(struct msm_fb_data_type *mfd)
  705. {
  706. int ptype;
  707. struct mdp4_overlay_pipe *pipe;
  708. int ret;
  709. int cndx = 0;
  710. struct vsycn_ctrl *vctrl;
  711. if (mfd->key != MFD_KEY)
  712. return;
  713. vctrl = &vsync_ctrl_db[cndx];
  714. if (vctrl->base_pipe == NULL) {
  715. ptype = mdp4_overlay_format2type(mfd->fb_imgType);
  716. if (ptype < 0)
  717. printk(KERN_INFO "%s: format2type failed\n", __func__);
  718. pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0);
  719. if (pipe == NULL) {
  720. printk(KERN_INFO "%s: pipe_alloc failed\n", __func__);
  721. return;
  722. }
  723. pipe->pipe_used++;
  724. pipe->mixer_stage = MDP4_MIXER_STAGE_BASE;
  725. pipe->mixer_num = MDP4_MIXER0;
  726. pipe->src_format = mfd->fb_imgType;
  727. mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_DSI_CMD);
  728. ret = mdp4_overlay_format2pipe(pipe);
  729. if (ret < 0)
  730. printk(KERN_INFO "%s: format2type failed\n", __func__);
  731. vctrl->base_pipe = pipe; /* keep it */
  732. mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
  733. pipe->ov_blt_addr = 0;
  734. pipe->dma_blt_addr = 0;
  735. } else {
  736. pipe = vctrl->base_pipe;
  737. }
  738. /* TE enabled */
  739. mdp4_mipi_vsync_enable(mfd, pipe, 0);
  740. MDP_OUTP(MDP_BASE + 0x021c, 10); /* read pointer */
  741. /*
  742. * configure dsi stream id
  743. * dma_p = 0, dma_s = 1
  744. */
  745. MDP_OUTP(MDP_BASE + 0x000a0, 0x10);
  746. /* disable dsi trigger */
  747. MDP_OUTP(MDP_BASE + 0x000a4, 0x00);
  748. mdp4_overlay_setup_pipe_addr(mfd, pipe);
  749. mdp4_overlay_rgb_setup(pipe);
  750. mdp4_overlay_reg_flush(pipe, 1);
  751. mdp4_mixer_stage_up(pipe, 0);
  752. mdp4_overlayproc_cfg(pipe);
  753. mdp4_overlay_dmap_xy(pipe);
  754. mdp4_overlay_dmap_cfg(mfd, 0);
  755. wmb();
  756. }
  757. /* 3D side by side */
  758. void mdp4_dsi_cmd_3d_sbys(struct msm_fb_data_type *mfd,
  759. struct msmfb_overlay_3d *r3d)
  760. {
  761. struct fb_info *fbi;
  762. int bpp;
  763. uint8 *src = NULL;
  764. int cndx = 0;
  765. struct vsycn_ctrl *vctrl;
  766. struct mdp4_overlay_pipe *pipe;
  767. vctrl = &vsync_ctrl_db[cndx];
  768. pipe = vctrl->base_pipe;
  769. if (pipe == NULL)
  770. return;
  771. if (pipe->pipe_used == 0 ||
  772. pipe->mixer_stage != MDP4_MIXER_STAGE_BASE) {
  773. pr_err("%s: NOT baselayer\n", __func__);
  774. return;
  775. }
  776. pipe->is_3d = r3d->is_3d;
  777. pipe->src_height_3d = r3d->height;
  778. pipe->src_width_3d = r3d->width;
  779. if (pipe->is_3d)
  780. mdp4_overlay_panel_3d(pipe->mixer_num, MDP4_3D_SIDE_BY_SIDE);
  781. else
  782. mdp4_overlay_panel_3d(pipe->mixer_num, MDP4_3D_NONE);
  783. fbi = mfd->fbi;
  784. if (pipe->is_3d) {
  785. bpp = fbi->var.bits_per_pixel / 8;
  786. pipe->src_height = pipe->src_height_3d;
  787. pipe->src_width = pipe->src_width_3d;
  788. pipe->src_h = pipe->src_height_3d;
  789. pipe->src_w = pipe->src_width_3d;
  790. pipe->dst_h = pipe->src_height_3d;
  791. pipe->dst_w = pipe->src_width_3d;
  792. pipe->srcp0_ystride = msm_fb_line_length(0,
  793. pipe->src_width, bpp);
  794. } else {
  795. /* 2D */
  796. pipe->src_height = fbi->var.yres;
  797. pipe->src_width = fbi->var.xres;
  798. pipe->src_h = fbi->var.yres;
  799. pipe->src_w = fbi->var.xres;
  800. pipe->dst_h = fbi->var.yres;
  801. pipe->dst_w = fbi->var.xres;
  802. pipe->srcp0_ystride = fbi->fix.line_length;
  803. }
  804. pipe->src_y = 0;
  805. pipe->src_x = 0;
  806. pipe->dst_y = 0;
  807. pipe->dst_x = 0;
  808. pipe->srcp0_addr = (uint32)src;
  809. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
  810. mdp_clk_ctrl(1);
  811. mdp4_overlay_rgb_setup(pipe);
  812. mdp4_overlay_reg_flush(pipe, 1);
  813. mdp4_mixer_stage_up(pipe, 0);
  814. mdp4_overlayproc_cfg(pipe);
  815. mdp4_overlay_dmap_xy(pipe);
  816. mdp4_overlay_dmap_cfg(mfd, 0);
  817. mdp4_mixer_stage_commit(pipe->mixer_num);
  818. /* MDP cmd block disable */
  819. mdp_clk_ctrl(0);
  820. mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
  821. }
  822. void mdp4_dsi_cmd_blt_start(struct msm_fb_data_type *mfd)
  823. {
  824. mdp4_dsi_cmd_do_blt(mfd, 1);
  825. }
  826. void mdp4_dsi_cmd_blt_stop(struct msm_fb_data_type *mfd)
  827. {
  828. mdp4_dsi_cmd_do_blt(mfd, 0);
  829. }
  830. void mdp4_dsi_cmd_overlay_blt(struct msm_fb_data_type *mfd,
  831. struct msmfb_overlay_blt *req)
  832. {
  833. mdp4_dsi_cmd_do_blt(mfd, req->enable);
  834. }
  835. int mdp4_dsi_cmd_on(struct platform_device *pdev)
  836. {
  837. int ret = 0;
  838. int cndx = 0;
  839. struct msm_fb_data_type *mfd;
  840. struct vsycn_ctrl *vctrl;
  841. pr_debug("%s+: pid=%d\n", __func__, current->pid);
  842. mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
  843. mfd->cont_splash_done = 1;
  844. vctrl = &vsync_ctrl_db[cndx];
  845. vctrl->mfd = mfd;
  846. vctrl->dev = mfd->fbi->dev;
  847. mdp_clk_ctrl(1);
  848. mdp4_overlay_update_dsi_cmd(mfd);
  849. mdp_clk_ctrl(0);
  850. mdp4_iommu_attach();
  851. atomic_set(&vctrl->suspend, 0);
  852. pr_debug("%s-:\n", __func__);
  853. return ret;
  854. }
  855. int mdp4_dsi_cmd_off(struct platform_device *pdev)
  856. {
  857. int ret = 0;
  858. int cndx = 0;
  859. struct msm_fb_data_type *mfd;
  860. struct vsycn_ctrl *vctrl;
  861. struct mdp4_overlay_pipe *pipe;
  862. struct vsync_update *vp;
  863. int undx;
  864. int need_wait, cnt;
  865. unsigned long flags;
  866. pr_debug("%s+: pid=%d\n", __func__, current->pid);
  867. mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
  868. vctrl = &vsync_ctrl_db[cndx];
  869. pipe = vctrl->base_pipe;
  870. if (pipe == NULL) {
  871. pr_err("%s: NO base pipe\n", __func__);
  872. return ret;
  873. }
  874. need_wait = 0;
  875. mutex_lock(&vctrl->update_lock);
  876. atomic_set(&vctrl->suspend, 1);
  877. complete_all(&vctrl->vsync_comp);
  878. pr_debug("%s: clk=%d pan=%d\n", __func__,
  879. vctrl->clk_enabled, vctrl->pan_display);
  880. if (vctrl->clk_enabled)
  881. need_wait = 1;
  882. mutex_unlock(&vctrl->update_lock);
  883. cnt = 0;
  884. if (need_wait) {
  885. while (vctrl->clk_enabled) {
  886. msleep(20);
  887. cnt++;
  888. if (cnt > 10)
  889. break;
  890. }
  891. }
  892. if (cnt > 10) {
  893. spin_lock_irqsave(&vctrl->spin_lock, flags);
  894. vctrl->clk_control = 0;
  895. vctrl->clk_enabled = 0;
  896. vctrl->expire_tick = 0;
  897. spin_unlock_irqrestore(&vctrl->spin_lock, flags);
  898. mipi_dsi_clk_cfg(0);
  899. mdp_clk_ctrl(0);
  900. pr_err("%s: Error, SET_CLK_OFF by force\n", __func__);
  901. }
  902. /* sanity check, free pipes besides base layer */
  903. mdp4_overlay_unset_mixer(pipe->mixer_num);
  904. mdp4_mixer_stage_down(pipe, 1);
  905. mdp4_overlay_pipe_free(pipe);
  906. vctrl->base_pipe = NULL;
  907. undx = vctrl->update_ndx;
  908. vp = &vctrl->vlist[undx];
  909. if (vp->update_cnt) {
  910. /*
  911. * pipe's iommu will be freed at next overlay play
  912. * and iommu_drop statistic will be increased by one
  913. */
  914. vp->update_cnt = 0; /* empty queue */
  915. }
  916. pr_debug("%s-:\n", __func__);
  917. return ret;
  918. }
  919. void mdp_dsi_cmd_overlay_suspend(struct msm_fb_data_type *mfd)
  920. {
  921. int cndx = 0;
  922. struct vsycn_ctrl *vctrl;
  923. struct mdp4_overlay_pipe *pipe;
  924. vctrl = &vsync_ctrl_db[cndx];
  925. pipe = vctrl->base_pipe;
  926. /* dis-engage rgb0 from mixer0 */
  927. if (pipe) {
  928. if (mfd->ref_cnt == 0) {
  929. /* adb stop */
  930. if (pipe->pipe_type == OVERLAY_TYPE_BF)
  931. mdp4_overlay_borderfill_stage_down(pipe);
  932. /* pipe == rgb1 */
  933. mdp4_overlay_unset_mixer(pipe->mixer_num);
  934. vctrl->base_pipe = NULL;
  935. } else {
  936. mdp4_mixer_stage_down(pipe, 1);
  937. mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 1);
  938. }
  939. }
  940. }
  941. void mdp4_dsi_cmd_overlay(struct msm_fb_data_type *mfd)
  942. {
  943. int cndx = 0;
  944. struct vsycn_ctrl *vctrl;
  945. struct mdp4_overlay_pipe *pipe;
  946. unsigned long flags;
  947. int clk_set_on = 0;
  948. mutex_lock(&mfd->dma->ov_mutex);
  949. vctrl = &vsync_ctrl_db[cndx];
  950. if (!mfd->panel_power_on) {
  951. mutex_unlock(&mfd->dma->ov_mutex);
  952. return;
  953. }
  954. pipe = vctrl->base_pipe;
  955. if (pipe == NULL) {
  956. pr_err("%s: NO base pipe\n", __func__);
  957. mutex_unlock(&mfd->dma->ov_mutex);
  958. return;
  959. }
  960. mutex_lock(&vctrl->update_lock);
  961. if (atomic_read(&vctrl->suspend)) {
  962. mutex_unlock(&vctrl->update_lock);
  963. mutex_unlock(&mfd->dma->ov_mutex);
  964. pr_err("%s: suspended, no more pan display\n", __func__);
  965. return;
  966. }
  967. spin_lock_irqsave(&vctrl->spin_lock, flags);
  968. vctrl->clk_control = 0;
  969. vctrl->pan_display++;
  970. if (!vctrl->clk_enabled) {
  971. clk_set_on = 1;
  972. vctrl->clk_enabled = 1;
  973. vctrl->expire_tick = VSYNC_EXPIRE_TICK;
  974. }
  975. spin_unlock_irqrestore(&vctrl->spin_lock, flags);
  976. if (clk_set_on) {
  977. pr_debug("%s: SET_CLK_ON\n", __func__);
  978. mipi_dsi_clk_cfg(1);
  979. mdp_clk_ctrl(1);
  980. vsync_irq_enable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM);
  981. }
  982. mutex_unlock(&vctrl->update_lock);
  983. if (pipe->mixer_stage == MDP4_MIXER_STAGE_BASE) {
  984. mdp4_mipi_vsync_enable(mfd, pipe, 0);
  985. mdp4_overlay_setup_pipe_addr(mfd, pipe);
  986. mdp4_dsi_cmd_pipe_queue(0, pipe);
  987. }
  988. mdp4_overlay_mdp_perf_upd(mfd, 1);
  989. mdp4_dsi_cmd_pipe_commit(cndx, 0);
  990. mdp4_overlay_mdp_perf_upd(mfd, 0);
  991. mutex_unlock(&mfd->dma->ov_mutex);
  992. }