mdp4_overlay_writeback.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837
  1. /* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #include <linux/module.h>
  14. #include <linux/kernel.h>
  15. #include <linux/sched.h>
  16. #include <linux/time.h>
  17. #include <linux/init.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/hrtimer.h>
  20. #include <linux/delay.h>
  21. #include <mach/hardware.h>
  22. #include <linux/io.h>
  23. #include <mach/iommu_domains.h>
  24. #include <asm/system.h>
  25. #include <asm/mach-types.h>
  26. #include <linux/semaphore.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/fb.h>
  29. #include "mdp.h"
  30. #include "msm_fb.h"
  31. #include "mdp4.h"
  32. enum {
  33. WB_OPEN,
  34. WB_START,
  35. WB_STOPING,
  36. WB_STOP
  37. };
  38. enum {
  39. REGISTERED,
  40. IN_FREE_QUEUE,
  41. IN_BUSY_QUEUE,
  42. WITH_CLIENT
  43. };
  44. #define MAX_CONTROLLER 1
  45. #define VSYNC_EXPIRE_TICK 0
  46. static struct vsycn_ctrl {
  47. struct device *dev;
  48. int inited;
  49. int update_ndx;
  50. u32 ov_koff;
  51. u32 ov_done;
  52. atomic_t suspend;
  53. struct mutex update_lock;
  54. struct completion ov_comp;
  55. spinlock_t spin_lock;
  56. struct msm_fb_data_type *mfd;
  57. struct mdp4_overlay_pipe *base_pipe;
  58. struct vsync_update vlist[2];
  59. struct work_struct clk_work;
  60. } vsync_ctrl_db[MAX_CONTROLLER];
  61. static void vsync_irq_enable(int intr, int term)
  62. {
  63. unsigned long flag;
  64. spin_lock_irqsave(&mdp_spin_lock, flag);
  65. /* no need to clrear other interrupts for comamnd mode */
  66. mdp_intr_mask |= intr;
  67. outp32(MDP_INTR_ENABLE, mdp_intr_mask);
  68. mdp_enable_irq(term);
  69. spin_unlock_irqrestore(&mdp_spin_lock, flag);
  70. }
  71. static void vsync_irq_disable(int intr, int term)
  72. {
  73. unsigned long flag;
  74. spin_lock_irqsave(&mdp_spin_lock, flag);
  75. /* no need to clrear other interrupts for comamnd mode */
  76. mdp_intr_mask &= ~intr;
  77. outp32(MDP_INTR_ENABLE, mdp_intr_mask);
  78. mdp_disable_irq_nosync(term);
  79. spin_unlock_irqrestore(&mdp_spin_lock, flag);
  80. }
  81. static int mdp4_overlay_writeback_update(struct msm_fb_data_type *mfd);
  82. static void mdp4_wfd_queue_wakeup(struct msm_fb_data_type *mfd,
  83. struct msmfb_writeback_data_list *node);
  84. static void mdp4_wfd_dequeue_update(struct msm_fb_data_type *mfd,
  85. struct msmfb_writeback_data_list **wfdnode);
  86. static int is_wb_operation_allowed(struct msm_fb_data_type *mfd);
  87. int mdp4_overlay_writeback_on(struct platform_device *pdev)
  88. {
  89. struct msm_fb_data_type *mfd;
  90. struct fb_info *fbi;
  91. uint8 *buf;
  92. struct mdp4_overlay_pipe *pipe;
  93. int bpp;
  94. int ret;
  95. uint32 data;
  96. struct vsycn_ctrl *vctrl;
  97. int cndx = 0;
  98. mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
  99. if (!mfd)
  100. return -ENODEV;
  101. if (mfd->key != MFD_KEY)
  102. return -EINVAL;
  103. vctrl = &vsync_ctrl_db[cndx];
  104. vctrl->mfd = mfd;
  105. vctrl->dev = mfd->fbi->dev;
  106. fbi = mfd->fbi;
  107. bpp = fbi->var.bits_per_pixel / 8;
  108. buf = (uint8 *) fbi->fix.smem_start;
  109. buf += fbi->var.xoffset * bpp +
  110. fbi->var.yoffset * fbi->fix.line_length;
  111. /* MDP cmd block enable */
  112. mdp_clk_ctrl(1);
  113. if (vctrl->base_pipe == NULL) {
  114. pipe = mdp4_overlay_pipe_alloc(OVERLAY_TYPE_BF, MDP4_MIXER2);
  115. if (pipe == NULL) {
  116. pr_info("%s: pipe_alloc failed\n", __func__);
  117. return -EIO;
  118. }
  119. pipe->pipe_used++;
  120. pipe->mixer_stage = MDP4_MIXER_STAGE_BASE;
  121. pipe->mixer_num = MDP4_MIXER2;
  122. pipe->src_format = MDP_ARGB_8888;
  123. mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_WRITEBACK);
  124. ret = mdp4_overlay_format2pipe(pipe);
  125. if (ret < 0)
  126. pr_info("%s: format2type failed\n", __func__);
  127. vctrl->base_pipe = pipe; /* keep it */
  128. } else {
  129. pipe = vctrl->base_pipe;
  130. }
  131. ret = panel_next_on(pdev);
  132. /* MDP_LAYERMIXER_WB_MUX_SEL to use mixer1 axi for mixer2 writeback */
  133. if (hdmi_prim_display)
  134. data = 0x01;
  135. else
  136. data = 0x02;
  137. outpdw(MDP_BASE + 0x100F4, data);
  138. MDP_OUTP(MDP_BASE + MDP4_OVERLAYPROC1_BASE + 0x5004,
  139. ((0x0 & 0xFFF) << 16) | /* 12-bit B */
  140. (0x0 & 0xFFF)); /* 12-bit G */
  141. /* MSP_BORDER_COLOR */
  142. MDP_OUTP(MDP_BASE + MDP4_OVERLAYPROC1_BASE + 0x5008,
  143. (0x0 & 0xFFF)); /* 12-bit R */
  144. mdp_clk_ctrl(0);
  145. return ret;
  146. }
  147. int mdp4_overlay_writeback_off(struct platform_device *pdev)
  148. {
  149. int cndx = 0;
  150. struct msm_fb_data_type *mfd;
  151. struct vsycn_ctrl *vctrl;
  152. struct mdp4_overlay_pipe *pipe;
  153. int ret = 0;
  154. int undx;
  155. struct vsync_update *vp;
  156. pr_debug("%s+:\n", __func__);
  157. mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
  158. vctrl = &vsync_ctrl_db[cndx];
  159. pipe = vctrl->base_pipe;
  160. if (pipe == NULL) {
  161. pr_err("%s: NO base pipe\n", __func__);
  162. return ret;
  163. }
  164. /* sanity check, free pipes besides base layer */
  165. mdp4_overlay_unset_mixer(pipe->mixer_num);
  166. mdp4_mixer_stage_down(pipe, 1);
  167. mdp4_overlay_pipe_free(pipe);
  168. vctrl->base_pipe = NULL;
  169. undx = vctrl->update_ndx;
  170. vp = &vctrl->vlist[undx];
  171. if (vp->update_cnt) {
  172. /*
  173. * pipe's iommu will be freed at next overlay play
  174. * and iommu_drop statistic will be increased by one
  175. */
  176. vp->update_cnt = 0; /* empty queue */
  177. }
  178. ret = panel_next_off(pdev);
  179. mdp_clk_ctrl(1);
  180. /* MDP_LAYERMIXER_WB_MUX_SEL to restore to default cfg*/
  181. outpdw(MDP_BASE + 0x100F4, 0x0);
  182. mdp_clk_ctrl(0);
  183. pr_debug("%s-:\n", __func__);
  184. return ret;
  185. }
  186. static int mdp4_overlay_writeback_update(struct msm_fb_data_type *mfd)
  187. {
  188. struct fb_info *fbi;
  189. uint8 *buf;
  190. unsigned int buf_offset;
  191. struct mdp4_overlay_pipe *pipe;
  192. int bpp;
  193. int cndx = 0;
  194. struct vsycn_ctrl *vctrl;
  195. if (mfd->key != MFD_KEY)
  196. return -ENODEV;
  197. fbi = mfd->fbi;
  198. vctrl = &vsync_ctrl_db[cndx];
  199. pipe = vctrl->base_pipe;
  200. if (!pipe) {
  201. pr_err("%s: no base layer pipe\n", __func__);
  202. return -EINVAL;
  203. }
  204. bpp = fbi->var.bits_per_pixel / 8;
  205. buf = (uint8 *) fbi->fix.smem_start;
  206. buf_offset = fbi->var.xoffset * bpp +
  207. fbi->var.yoffset * fbi->fix.line_length;
  208. /* MDP cmd block enable */
  209. mdp_clk_ctrl(1);
  210. pipe->src_height = fbi->var.yres;
  211. pipe->src_width = fbi->var.xres;
  212. pipe->src_h = fbi->var.yres;
  213. pipe->src_w = fbi->var.xres;
  214. pipe->dst_h = fbi->var.yres;
  215. pipe->dst_w = fbi->var.xres;
  216. pipe->srcp0_ystride = fbi->fix.line_length;
  217. pipe->src_y = 0;
  218. pipe->src_x = 0;
  219. pipe->dst_y = 0;
  220. pipe->dst_x = 0;
  221. mdp4_overlay_mdp_pipe_req(pipe, mfd);
  222. mdp4_calc_blt_mdp_bw(mfd, pipe);
  223. if (mfd->display_iova)
  224. pipe->srcp0_addr = mfd->display_iova + buf_offset;
  225. else
  226. pipe->srcp0_addr = (uint32)(buf + buf_offset);
  227. mdp4_mixer_stage_up(pipe, 0);
  228. mdp4_overlayproc_cfg(pipe);
  229. if (hdmi_prim_display)
  230. outpdw(MDP_BASE + 0x100F4, 0x01);
  231. else
  232. outpdw(MDP_BASE + 0x100F4, 0x02);
  233. /* MDP cmd block disable */
  234. mdp_clk_ctrl(0);
  235. wmb();
  236. return 0;
  237. }
  238. /*
  239. * mdp4_wfd_piep_queue:
  240. * called from thread context
  241. */
  242. void mdp4_wfd_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe)
  243. {
  244. struct vsycn_ctrl *vctrl;
  245. struct vsync_update *vp;
  246. struct mdp4_overlay_pipe *pp;
  247. int undx;
  248. if (cndx >= MAX_CONTROLLER) {
  249. pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
  250. return;
  251. }
  252. vctrl = &vsync_ctrl_db[cndx];
  253. if (atomic_read(&vctrl->suspend) > 0)
  254. return;
  255. mutex_lock(&vctrl->update_lock);
  256. undx = vctrl->update_ndx;
  257. vp = &vctrl->vlist[undx];
  258. pp = &vp->plist[pipe->pipe_ndx - 1]; /* ndx start form 1 */
  259. pr_debug("%s: vndx=%d pipe_ndx=%d pid=%d\n", __func__,
  260. undx, pipe->pipe_ndx, current->pid);
  261. *pp = *pipe; /* clone it */
  262. vp->update_cnt++;
  263. mutex_unlock(&vctrl->update_lock);
  264. mdp4_stat.overlay_play[pipe->mixer_num]++;
  265. }
  266. static void mdp4_wfd_wait4ov(int cndx);
  267. int mdp4_wfd_pipe_commit(struct msm_fb_data_type *mfd,
  268. int cndx, int wait)
  269. {
  270. int i, undx;
  271. int mixer = 0;
  272. struct vsycn_ctrl *vctrl;
  273. struct vsync_update *vp;
  274. struct mdp4_overlay_pipe *pipe;
  275. struct mdp4_overlay_pipe *real_pipe;
  276. unsigned long flags;
  277. int cnt = 0;
  278. struct msmfb_writeback_data_list *node = NULL;
  279. rc = is_wb_operation_allowed(mfd);
  280. if (rc) {
  281. pr_debug("%s: Unable to commit, error = %d", __func__, rc);
  282. return rc;
  283. }
  284. vctrl = &vsync_ctrl_db[cndx];
  285. mutex_lock(&vctrl->update_lock);
  286. undx = vctrl->update_ndx;
  287. vp = &vctrl->vlist[undx];
  288. pipe = vctrl->base_pipe;
  289. mixer = pipe->mixer_num;
  290. if (vp->update_cnt == 0) {
  291. mutex_unlock(&vctrl->update_lock);
  292. return cnt;
  293. }
  294. vctrl->update_ndx++;
  295. vctrl->update_ndx &= 0x01;
  296. vp->update_cnt = 0; /* reset */
  297. mutex_unlock(&vctrl->update_lock);
  298. mdp4_wfd_dequeue_update(mfd, &node);
  299. /* free previous committed iommu back to pool */
  300. mdp4_overlay_iommu_unmap_freelist(mixer);
  301. pipe = vp->plist;
  302. for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
  303. if (pipe->pipe_used) {
  304. cnt++;
  305. real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
  306. if (real_pipe && real_pipe->pipe_used) {
  307. /* pipe not unset */
  308. mdp4_overlay_vsync_commit(pipe);
  309. }
  310. /* free previous iommu to freelist
  311. * which will be freed at next
  312. * pipe_commit
  313. */
  314. mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
  315. pipe->pipe_used = 0; /* clear */
  316. }
  317. }
  318. mdp_clk_ctrl(1);
  319. mdp4_mixer_stage_commit(mixer);
  320. pipe = vctrl->base_pipe;
  321. spin_lock_irqsave(&vctrl->spin_lock, flags);
  322. vctrl->ov_koff++;
  323. INIT_COMPLETION(vctrl->ov_comp);
  324. vsync_irq_enable(INTR_OVERLAY2_DONE, MDP_OVERLAY2_TERM);
  325. pr_debug("%s: kickoff\n", __func__);
  326. /* kickoff overlay engine */
  327. mdp4_stat.kickoff_ov2++;
  328. outpdw(MDP_BASE + 0x00D0, 0);
  329. mb(); /* make sure kickoff executed */
  330. spin_unlock_irqrestore(&vctrl->spin_lock, flags);
  331. mdp4_stat.overlay_commit[pipe->mixer_num]++;
  332. if (wait)
  333. mdp4_wfd_wait4ov(cndx);
  334. mdp4_wfd_queue_wakeup(mfd, node);
  335. return cnt;
  336. }
  337. static void clk_ctrl_work(struct work_struct *work)
  338. {
  339. struct vsycn_ctrl *vctrl =
  340. container_of(work, typeof(*vctrl), clk_work);
  341. mdp_clk_ctrl(0);
  342. }
  343. void mdp4_wfd_init(int cndx)
  344. {
  345. struct vsycn_ctrl *vctrl;
  346. if (cndx >= MAX_CONTROLLER) {
  347. pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
  348. return;
  349. }
  350. vctrl = &vsync_ctrl_db[cndx];
  351. if (vctrl->inited)
  352. return;
  353. vctrl->inited = 1;
  354. vctrl->update_ndx = 0;
  355. mutex_init(&vctrl->update_lock);
  356. init_completion(&vctrl->ov_comp);
  357. spin_lock_init(&vctrl->spin_lock);
  358. INIT_WORK(&vctrl->clk_work, clk_ctrl_work);
  359. }
  360. static void mdp4_wfd_wait4ov(int cndx)
  361. {
  362. struct vsycn_ctrl *vctrl;
  363. if (cndx >= MAX_CONTROLLER) {
  364. pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
  365. return;
  366. }
  367. vctrl = &vsync_ctrl_db[cndx];
  368. if (atomic_read(&vctrl->suspend) > 0)
  369. return;
  370. wait_for_completion(&vctrl->ov_comp);
  371. }
  372. void mdp4_overlay2_done_wfd(struct mdp_dma_data *dma)
  373. {
  374. struct vsycn_ctrl *vctrl;
  375. struct mdp4_overlay_pipe *pipe;
  376. int cndx = 0;
  377. vctrl = &vsync_ctrl_db[cndx];
  378. pipe = vctrl->base_pipe;
  379. spin_lock(&vctrl->spin_lock);
  380. vsync_irq_disable(INTR_OVERLAY2_DONE, MDP_OVERLAY2_TERM);
  381. vctrl->ov_done++;
  382. complete(&vctrl->ov_comp);
  383. schedule_work(&vctrl->clk_work);
  384. pr_debug("%s ovdone interrupt\n", __func__);
  385. spin_unlock(&vctrl->spin_lock);
  386. }
  387. void mdp4_writeback_overlay(struct msm_fb_data_type *mfd)
  388. {
  389. struct vsycn_ctrl *vctrl;
  390. struct mdp4_overlay_pipe *pipe;
  391. if (mfd && !mfd->panel_power_on)
  392. return;
  393. pr_debug("%s:+ mfd=%x\n", __func__, (int)mfd);
  394. vctrl = &vsync_ctrl_db[0];
  395. pipe = vctrl->base_pipe;
  396. mutex_lock(&mfd->dma->ov_mutex);
  397. if (pipe->pipe_type == OVERLAY_TYPE_RGB)
  398. mdp4_wfd_pipe_queue(0, pipe);
  399. mdp4_overlay_mdp_perf_upd(mfd, 1);
  400. mdp4_wfd_pipe_commit(mfd, 0, 1);
  401. mdp4_overlay_mdp_perf_upd(mfd, 0);
  402. mutex_unlock(&mfd->dma->ov_mutex);
  403. }
  404. static int mdp4_overlay_writeback_register_buffer(
  405. struct msm_fb_data_type *mfd, struct msmfb_writeback_data_list *node)
  406. {
  407. if (!node) {
  408. pr_err("Cannot register a NULL node\n");
  409. return -EINVAL;
  410. }
  411. node->state = REGISTERED;
  412. list_add_tail(&node->registered_entry, &mfd->writeback_register_queue);
  413. return 0;
  414. }
  415. static struct msmfb_writeback_data_list *get_if_registered(
  416. struct msm_fb_data_type *mfd, struct msmfb_data *data)
  417. {
  418. struct msmfb_writeback_data_list *temp;
  419. bool found = false;
  420. int domain;
  421. if (!list_empty(&mfd->writeback_register_queue)) {
  422. list_for_each_entry(temp,
  423. &mfd->writeback_register_queue,
  424. registered_entry) {
  425. if (temp && temp->buf_info.iova == data->iova) {
  426. found = true;
  427. break;
  428. }
  429. }
  430. }
  431. if (!found) {
  432. temp = kzalloc(sizeof(struct msmfb_writeback_data_list),
  433. GFP_KERNEL);
  434. if (temp == NULL) {
  435. pr_err("%s: out of memory\n", __func__);
  436. goto register_alloc_fail;
  437. }
  438. temp->ihdl = NULL;
  439. if (data->iova)
  440. temp->addr = (void *)(data->iova + data->offset);
  441. else if (mfd->iclient) {
  442. struct ion_handle *srcp_ihdl;
  443. ulong len;
  444. srcp_ihdl = ion_import_dma_buf(mfd->iclient,
  445. data->memory_id);
  446. if (IS_ERR_OR_NULL(srcp_ihdl)) {
  447. pr_err("%s: ion import fd failed\n", __func__);
  448. goto register_ion_fail;
  449. }
  450. if (mdp_iommu_split_domain)
  451. domain = DISPLAY_WRITE_DOMAIN;
  452. else
  453. domain = DISPLAY_READ_DOMAIN;
  454. if (ion_map_iommu(mfd->iclient,
  455. srcp_ihdl,
  456. domain,
  457. GEN_POOL,
  458. SZ_4K,
  459. 0,
  460. (ulong *)&temp->addr,
  461. (ulong *)&len,
  462. 0,
  463. ION_IOMMU_UNMAP_DELAYED)) {
  464. ion_free(mfd->iclient, srcp_ihdl);
  465. pr_err("%s: unable to get ion mapping addr\n",
  466. __func__);
  467. goto register_ion_fail;
  468. }
  469. temp->addr += data->offset;
  470. temp->ihdl = srcp_ihdl;
  471. }
  472. else {
  473. pr_err("%s: only support ion memory\n", __func__);
  474. goto register_ion_fail;
  475. }
  476. memcpy(&temp->buf_info, data, sizeof(struct msmfb_data));
  477. if (mdp4_overlay_writeback_register_buffer(mfd, temp)) {
  478. pr_err("%s: error registering node\n", __func__);
  479. goto register_ion_fail;
  480. }
  481. }
  482. return temp;
  483. register_ion_fail:
  484. kfree(temp);
  485. register_alloc_fail:
  486. return NULL;
  487. }
  488. int mdp4_writeback_start(
  489. struct fb_info *info)
  490. {
  491. struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
  492. mutex_lock(&mfd->writeback_mutex);
  493. mfd->writeback_state = WB_START;
  494. mutex_unlock(&mfd->writeback_mutex);
  495. wake_up(&mfd->wait_q);
  496. return 0;
  497. }
  498. int mdp4_writeback_queue_buffer(struct fb_info *info, struct msmfb_data *data)
  499. {
  500. struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
  501. struct msmfb_writeback_data_list *node = NULL;
  502. int rv = 0;
  503. mutex_lock(&mfd->writeback_mutex);
  504. node = get_if_registered(mfd, data);
  505. if (!node || node->state == IN_BUSY_QUEUE ||
  506. node->state == IN_FREE_QUEUE) {
  507. pr_err("memory not registered or Buffer already with us\n");
  508. rv = -EINVAL;
  509. goto exit;
  510. }
  511. list_add_tail(&node->active_entry, &mfd->writeback_free_queue);
  512. node->state = IN_FREE_QUEUE;
  513. exit:
  514. mutex_unlock(&mfd->writeback_mutex);
  515. return rv;
  516. }
  517. static int is_buffer_ready(struct msm_fb_data_type *mfd)
  518. {
  519. int rc;
  520. mutex_lock(&mfd->writeback_mutex);
  521. rc = !list_empty(&mfd->writeback_busy_queue) ||
  522. (mfd->writeback_state == WB_STOPING);
  523. mutex_unlock(&mfd->writeback_mutex);
  524. return rc;
  525. }
  526. int mdp4_writeback_dequeue_buffer(struct fb_info *info, struct msmfb_data *data)
  527. {
  528. struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
  529. struct msmfb_writeback_data_list *node = NULL;
  530. int rc = 0, domain;
  531. rc = wait_event_interruptible(mfd->wait_q, is_buffer_ready(mfd));
  532. if (rc) {
  533. pr_err("failed to get dequeued buffer\n");
  534. return -ENOBUFS;
  535. }
  536. mutex_lock(&mfd->writeback_mutex);
  537. if (mfd->writeback_state == WB_STOPING) {
  538. mfd->writeback_state = WB_STOP;
  539. mutex_unlock(&mfd->writeback_mutex);
  540. return -ENOBUFS;
  541. } else if (!list_empty(&mfd->writeback_busy_queue)) {
  542. node = list_first_entry(&mfd->writeback_busy_queue,
  543. struct msmfb_writeback_data_list, active_entry);
  544. }
  545. if (node) {
  546. list_del(&node->active_entry);
  547. node->state = WITH_CLIENT;
  548. memcpy(data, &node->buf_info, sizeof(struct msmfb_data));
  549. if (!data->iova)
  550. if (mfd->iclient && node->ihdl) {
  551. if (mdp_iommu_split_domain)
  552. domain = DISPLAY_WRITE_DOMAIN;
  553. else
  554. domain = DISPLAY_READ_DOMAIN;
  555. ion_unmap_iommu(mfd->iclient,
  556. node->ihdl,
  557. domain,
  558. GEN_POOL);
  559. ion_free(mfd->iclient,
  560. node->ihdl);
  561. }
  562. } else {
  563. pr_err("node is NULL. Somebody else dequeued?\n");
  564. rc = -ENOBUFS;
  565. }
  566. mutex_unlock(&mfd->writeback_mutex);
  567. return rc;
  568. }
  569. static bool is_writeback_inactive(struct msm_fb_data_type *mfd)
  570. {
  571. bool active;
  572. mutex_lock(&mfd->writeback_mutex);
  573. active = !mfd->writeback_active_cnt;
  574. mutex_unlock(&mfd->writeback_mutex);
  575. return active;
  576. }
  577. int mdp4_writeback_stop(struct fb_info *info)
  578. {
  579. struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
  580. mutex_lock(&mfd->writeback_mutex);
  581. mfd->writeback_state = WB_STOPING;
  582. mutex_unlock(&mfd->writeback_mutex);
  583. /* Wait for all pending writebacks to finish */
  584. wait_event_interruptible(mfd->wait_q, is_writeback_inactive(mfd));
  585. /* Wake up dequeue thread in case of no UI update*/
  586. wake_up(&mfd->wait_q);
  587. return 0;
  588. }
  589. int mdp4_writeback_init(struct fb_info *info)
  590. {
  591. struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
  592. mutex_init(&mfd->writeback_mutex);
  593. mutex_init(&mfd->unregister_mutex);
  594. INIT_LIST_HEAD(&mfd->writeback_free_queue);
  595. INIT_LIST_HEAD(&mfd->writeback_busy_queue);
  596. INIT_LIST_HEAD(&mfd->writeback_register_queue);
  597. mfd->writeback_state = WB_OPEN;
  598. init_waitqueue_head(&mfd->wait_q);
  599. return 0;
  600. }
  601. int mdp4_writeback_terminate(struct fb_info *info)
  602. {
  603. struct list_head *ptr, *next;
  604. struct msmfb_writeback_data_list *temp;
  605. struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
  606. int rc = 0;
  607. mutex_lock(&mfd->unregister_mutex);
  608. mutex_lock(&mfd->writeback_mutex);
  609. if (mfd->writeback_state != WB_STOPING &&
  610. mfd->writeback_state != WB_STOP) {
  611. pr_err("%s called without stopping\n", __func__);
  612. rc = -EPERM;
  613. goto terminate_err;
  614. }
  615. if (!list_empty(&mfd->writeback_register_queue)) {
  616. list_for_each_safe(ptr, next,
  617. &mfd->writeback_register_queue) {
  618. temp = list_entry(ptr,
  619. struct msmfb_writeback_data_list,
  620. registered_entry);
  621. list_del(&temp->registered_entry);
  622. kfree(temp);
  623. }
  624. }
  625. INIT_LIST_HEAD(&mfd->writeback_register_queue);
  626. INIT_LIST_HEAD(&mfd->writeback_busy_queue);
  627. INIT_LIST_HEAD(&mfd->writeback_free_queue);
  628. terminate_err:
  629. mutex_unlock(&mfd->writeback_mutex);
  630. mutex_unlock(&mfd->unregister_mutex);
  631. return rc;
  632. }
  633. static void mdp4_wfd_dequeue_update(struct msm_fb_data_type *mfd,
  634. struct msmfb_writeback_data_list **wfdnode)
  635. {
  636. struct vsycn_ctrl *vctrl;
  637. struct mdp4_overlay_pipe *pipe;
  638. struct msmfb_writeback_data_list *node = NULL;
  639. if (mfd && !mfd->panel_power_on)
  640. return;
  641. pr_debug("%s:+ mfd=%x\n", __func__, (int)mfd);
  642. vctrl = &vsync_ctrl_db[0];
  643. pipe = vctrl->base_pipe;
  644. mutex_lock(&mfd->unregister_mutex);
  645. mutex_lock(&mfd->writeback_mutex);
  646. if (!list_empty(&mfd->writeback_free_queue)
  647. && mfd->writeback_state != WB_STOPING
  648. && mfd->writeback_state != WB_STOP) {
  649. node = list_first_entry(&mfd->writeback_free_queue,
  650. struct msmfb_writeback_data_list, active_entry);
  651. }
  652. if (node) {
  653. list_del(&(node->active_entry));
  654. node->state = IN_BUSY_QUEUE;
  655. mfd->writeback_active_cnt++;
  656. }
  657. mutex_unlock(&mfd->writeback_mutex);
  658. pipe->ov_blt_addr = (ulong) (node ? node->addr : NULL);
  659. if (!pipe->ov_blt_addr) {
  660. pr_err("%s: no writeback buffer 0x%x, %pK\n", __func__,
  661. (unsigned int)pipe->ov_blt_addr, node);
  662. mutex_unlock(&mfd->unregister_mutex);
  663. return;
  664. }
  665. mdp4_overlay_writeback_update(mfd);
  666. *wfdnode = node;
  667. mutex_unlock(&mfd->unregister_mutex);
  668. }
  669. static void mdp4_wfd_queue_wakeup(struct msm_fb_data_type *mfd,
  670. struct msmfb_writeback_data_list *node)
  671. {
  672. if (mfd && !mfd->panel_power_on)
  673. return;
  674. if (node == NULL)
  675. return;
  676. pr_debug("%s: mfd=%x node: %pK", __func__, (int)mfd, node);
  677. mutex_lock(&mfd->writeback_mutex);
  678. list_add_tail(&node->active_entry, &mfd->writeback_busy_queue);
  679. mfd->writeback_active_cnt--;
  680. mutex_unlock(&mfd->writeback_mutex);
  681. wake_up(&mfd->wait_q);
  682. }
  683. int mdp4_writeback_set_mirroring_hint(struct fb_info *info, int hint)
  684. {
  685. struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
  686. if (mfd->panel.type != WRITEBACK_PANEL)
  687. return -ENOTSUPP;
  688. switch (hint) {
  689. case MDP_WRITEBACK_MIRROR_ON:
  690. case MDP_WRITEBACK_MIRROR_PAUSE:
  691. case MDP_WRITEBACK_MIRROR_RESUME:
  692. case MDP_WRITEBACK_MIRROR_OFF:
  693. pr_info("wfd state switched to %d\n", hint);
  694. switch_set_state(&mfd->writeback_sdev, hint);
  695. return 0;
  696. default:
  697. return -EINVAL;
  698. }
  699. }