mdss_mdp_wb.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913
  1. /* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #define pr_fmt(fmt) "%s: " fmt, __func__
  14. #include <linux/errno.h>
  15. #include <linux/kernel.h>
  16. #include <linux/major.h>
  17. #include <linux/module.h>
  18. #include <linux/uaccess.h>
  19. #include <linux/iommu.h>
  20. #include <mach/iommu.h>
  21. #include <mach/iommu_domains.h>
  22. #include "mdss_mdp.h"
  23. #include "mdss_fb.h"
  24. #include "mdss_wb.h"
  25. enum mdss_mdp_wb_state {
  26. WB_OPEN,
  27. WB_START,
  28. WB_STOPING,
  29. WB_STOP
  30. };
  31. struct mdss_mdp_wb {
  32. u32 fb_ndx;
  33. struct mutex lock;
  34. struct list_head busy_queue;
  35. struct list_head free_queue;
  36. struct list_head register_queue;
  37. wait_queue_head_t wait_q;
  38. u32 state;
  39. int is_secure;
  40. struct mdss_mdp_pipe *secure_pipe;
  41. };
  42. enum mdss_mdp_wb_node_state {
  43. REGISTERED,
  44. IN_FREE_QUEUE,
  45. IN_BUSY_QUEUE,
  46. WITH_CLIENT,
  47. WB_BUFFER_READY,
  48. };
  49. struct mdss_mdp_wb_data {
  50. struct list_head registered_entry;
  51. struct list_head active_entry;
  52. struct msmfb_data buf_info;
  53. struct mdss_mdp_data buf_data;
  54. int state;
  55. bool user_alloc;
  56. };
  57. static DEFINE_MUTEX(mdss_mdp_wb_buf_lock);
  58. static struct mdss_mdp_wb mdss_mdp_wb_info;
  59. static void mdss_mdp_wb_free_node(struct mdss_mdp_wb_data *node);
  60. #ifdef DEBUG_WRITEBACK
  61. /* for debugging: writeback output buffer to allocated memory */
  62. static inline
  63. struct mdss_mdp_data *mdss_mdp_wb_debug_buffer(struct msm_fb_data_type *mfd)
  64. {
  65. static struct ion_handle *ihdl;
  66. static void *videomemory;
  67. static ion_phys_addr_t mdss_wb_mem;
  68. static struct mdss_mdp_data mdss_wb_buffer = { .num_planes = 1, };
  69. int rc;
  70. if (IS_ERR_OR_NULL(ihdl)) {
  71. struct fb_info *fbi;
  72. size_t img_size;
  73. struct ion_client *iclient = mdss_get_ionclient();
  74. struct mdss_mdp_img_data *img = mdss_wb_buffer.p;
  75. fbi = mfd->fbi;
  76. img_size = fbi->var.xres * fbi->var.yres *
  77. fbi->var.bits_per_pixel / 8;
  78. ihdl = ion_alloc(iclient, img_size, SZ_4K,
  79. ION_HEAP(ION_SF_HEAP_ID), 0);
  80. if (IS_ERR_OR_NULL(ihdl)) {
  81. pr_err("unable to alloc fbmem from ion (%pK)\n", ihdl);
  82. return NULL;
  83. }
  84. videomemory = ion_map_kernel(iclient, ihdl);
  85. ion_phys(iclient, ihdl, &mdss_wb_mem, &img_size);
  86. if (is_mdss_iommu_attached()) {
  87. int domain = MDSS_IOMMU_DOMAIN_UNSECURE;
  88. rc = ion_map_iommu(iclient, ihdl,
  89. mdss_get_iommu_domain(domain),
  90. 0, SZ_4K, 0,
  91. (unsigned long *) &img->addr,
  92. (unsigned long *) &img->len,
  93. 0, 0);
  94. } else {
  95. img->addr = mdss_wb_mem;
  96. img->len = img_size;
  97. }
  98. pr_debug("ihdl=%pK virt=%pK phys=0x%lx iova=0x%x size=%u\n",
  99. ihdl, videomemory, mdss_wb_mem, img->addr, img_size);
  100. }
  101. return &mdss_wb_buffer;
  102. }
  103. #else
  104. static inline
  105. struct mdss_mdp_data *mdss_mdp_wb_debug_buffer(struct msm_fb_data_type *mfd)
  106. {
  107. return NULL;
  108. }
  109. #endif
  110. /*
  111. * mdss_mdp_get_secure() - Queries the secure status of a writeback session
  112. * @mfd: Frame buffer device structure
  113. * @enabled: Pointer to convey if session is secure
  114. *
  115. * This api enables an entity (userspace process, driver module, etc.) to
  116. * query the secure status of a writeback session. The secure status is
  117. * then supplied via a pointer.
  118. */
  119. int mdss_mdp_wb_get_secure(struct msm_fb_data_type *mfd, uint8_t *enabled)
  120. {
  121. struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
  122. if (!wb)
  123. return -EINVAL;
  124. *enabled = wb->is_secure;
  125. return 0;
  126. }
  127. /*
  128. * mdss_mdp_set_secure() - Updates the secure status of a writeback session
  129. * @mfd: Frame buffer device structure
  130. * @enable: New secure status (1: secure, 0: non-secure)
  131. *
  132. * This api enables an entity to modify the secure status of a writeback
  133. * session. If enable is 1, we allocate a secure pipe so that MDP is
  134. * allowed to write back into the secure buffer. If enable is 0, we
  135. * deallocate the secure pipe (if it was allocated previously).
  136. */
  137. int mdss_mdp_wb_set_secure(struct msm_fb_data_type *mfd, int enable)
  138. {
  139. struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
  140. struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
  141. struct mdss_mdp_pipe *pipe;
  142. struct mdss_mdp_mixer *mixer;
  143. pr_debug("setting secure=%d\n", enable);
  144. if ((enable != 1) && (enable != 0)) {
  145. pr_err("Invalid enable value = %d\n", enable);
  146. return -EINVAL;
  147. }
  148. if (!ctl || !ctl->mdata) {
  149. pr_err("%s : ctl is NULL", __func__);
  150. return -EINVAL;
  151. }
  152. if (!wb) {
  153. pr_err("unable to start, writeback is not initialized\n");
  154. return -ENODEV;
  155. }
  156. ctl->is_secure = enable;
  157. wb->is_secure = enable;
  158. /* newer revisions don't require secure src pipe for secure session */
  159. if (ctl->mdata->mdp_rev > MDSS_MDP_HW_REV_100)
  160. return 0;
  161. pipe = wb->secure_pipe;
  162. if (!enable) {
  163. if (pipe) {
  164. /* unset pipe */
  165. mdss_mdp_mixer_pipe_unstage(pipe);
  166. mdss_mdp_pipe_destroy(pipe);
  167. wb->secure_pipe = NULL;
  168. }
  169. return 0;
  170. }
  171. mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_DEFAULT);
  172. if (!mixer) {
  173. pr_err("Unable to find mixer for wb\n");
  174. return -ENOENT;
  175. }
  176. if (!pipe) {
  177. pipe = mdss_mdp_pipe_alloc(mixer, MDSS_MDP_PIPE_TYPE_RGB);
  178. if (!pipe)
  179. pipe = mdss_mdp_pipe_alloc(mixer,
  180. MDSS_MDP_PIPE_TYPE_VIG);
  181. if (!pipe) {
  182. pr_err("Unable to get pipe to set secure session\n");
  183. return -ENOMEM;
  184. }
  185. pipe->src_fmt = mdss_mdp_get_format_params(MDP_RGBA_8888);
  186. pipe->mfd = mfd;
  187. pipe->mixer_stage = MDSS_MDP_STAGE_BASE;
  188. wb->secure_pipe = pipe;
  189. }
  190. pipe->img_height = mixer->height;
  191. pipe->img_width = mixer->width;
  192. pipe->src.x = 0;
  193. pipe->src.y = 0;
  194. pipe->src.w = pipe->img_width;
  195. pipe->src.h = pipe->img_height;
  196. pipe->dst = pipe->src;
  197. pipe->flags = (enable ? MDP_SECURE_OVERLAY_SESSION : 0);
  198. pipe->params_changed++;
  199. pr_debug("setting secure pipe=%d flags=%x\n", pipe->num, pipe->flags);
  200. return mdss_mdp_pipe_queue_data(pipe, NULL);
  201. }
  202. static int mdss_mdp_wb_init(struct msm_fb_data_type *mfd)
  203. {
  204. struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
  205. struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
  206. int rc = 0;
  207. mutex_lock(&mdss_mdp_wb_buf_lock);
  208. if (wb == NULL) {
  209. wb = &mdss_mdp_wb_info;
  210. wb->fb_ndx = mfd->index;
  211. mdp5_data->wb = wb;
  212. } else if (mfd->index != wb->fb_ndx) {
  213. pr_err("only one writeback intf supported at a time\n");
  214. rc = -EMLINK;
  215. goto error;
  216. } else {
  217. pr_debug("writeback already initialized\n");
  218. }
  219. pr_debug("init writeback on fb%d\n", wb->fb_ndx);
  220. mutex_init(&wb->lock);
  221. INIT_LIST_HEAD(&wb->free_queue);
  222. INIT_LIST_HEAD(&wb->busy_queue);
  223. INIT_LIST_HEAD(&wb->register_queue);
  224. wb->state = WB_OPEN;
  225. init_waitqueue_head(&wb->wait_q);
  226. mdp5_data->wb = wb;
  227. error:
  228. mutex_unlock(&mdss_mdp_wb_buf_lock);
  229. return rc;
  230. }
  231. static int mdss_mdp_wb_terminate(struct msm_fb_data_type *mfd)
  232. {
  233. struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
  234. struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
  235. if (!wb) {
  236. pr_err("unable to terminate, writeback is not initialized\n");
  237. return -ENODEV;
  238. }
  239. pr_debug("terminate writeback\n");
  240. mutex_lock(&mdss_mdp_wb_buf_lock);
  241. mutex_lock(&wb->lock);
  242. if (!list_empty(&wb->register_queue)) {
  243. struct mdss_mdp_wb_data *node, *temp;
  244. list_for_each_entry_safe(node, temp, &wb->register_queue,
  245. registered_entry) {
  246. mdss_mdp_wb_free_node(node);
  247. list_del(&node->registered_entry);
  248. kfree(node);
  249. }
  250. }
  251. wb->is_secure = false;
  252. if (wb->secure_pipe)
  253. mdss_mdp_pipe_destroy(wb->secure_pipe);
  254. mutex_unlock(&wb->lock);
  255. if (mdp5_data->ctl)
  256. mdp5_data->ctl->is_secure = false;
  257. mdp5_data->wb = NULL;
  258. mutex_unlock(&mdss_mdp_wb_buf_lock);
  259. return 0;
  260. }
  261. static int mdss_mdp_wb_start(struct msm_fb_data_type *mfd)
  262. {
  263. struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
  264. if (!wb) {
  265. pr_err("unable to start, writeback is not initialized\n");
  266. return -ENODEV;
  267. }
  268. mutex_lock(&wb->lock);
  269. wb->state = WB_START;
  270. mutex_unlock(&wb->lock);
  271. wake_up(&wb->wait_q);
  272. return 0;
  273. }
  274. static int mdss_mdp_wb_stop(struct msm_fb_data_type *mfd)
  275. {
  276. struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
  277. if (!wb) {
  278. pr_err("unable to stop, writeback is not initialized\n");
  279. return -ENODEV;
  280. }
  281. mutex_lock(&wb->lock);
  282. wb->state = WB_STOPING;
  283. mutex_unlock(&wb->lock);
  284. wake_up(&wb->wait_q);
  285. return 0;
  286. }
  287. static int mdss_mdp_wb_register_node(struct mdss_mdp_wb *wb,
  288. struct mdss_mdp_wb_data *node)
  289. {
  290. if (!node) {
  291. pr_err("Invalid wb node\n");
  292. return -EINVAL;
  293. }
  294. node->state = REGISTERED;
  295. list_add_tail(&node->registered_entry, &wb->register_queue);
  296. return 0;
  297. }
  298. static struct mdss_mdp_wb_data *get_local_node(struct mdss_mdp_wb *wb,
  299. struct msmfb_data *data) {
  300. struct mdss_mdp_wb_data *node;
  301. struct mdss_mdp_img_data *buf;
  302. int ret;
  303. if (!data->iova)
  304. return NULL;
  305. if (!list_empty(&wb->register_queue)) {
  306. list_for_each_entry(node, &wb->register_queue, registered_entry)
  307. if (node->buf_info.iova == data->iova) {
  308. pr_debug("found node iova=%x addr=%x\n",
  309. data->iova, node->buf_data.p[0].addr);
  310. return node;
  311. }
  312. }
  313. node = kzalloc(sizeof(struct mdss_mdp_wb_data), GFP_KERNEL);
  314. if (node == NULL) {
  315. pr_err("out of memory\n");
  316. return NULL;
  317. }
  318. node->buf_data.num_planes = 1;
  319. node->buf_info = *data;
  320. buf = &node->buf_data.p[0];
  321. buf->addr = (u32) (data->iova + data->offset);
  322. buf->len = UINT_MAX; /* trusted source */
  323. if (wb->is_secure)
  324. buf->flags |= MDP_SECURE_OVERLAY_SESSION;
  325. ret = mdss_mdp_wb_register_node(wb, node);
  326. if (IS_ERR_VALUE(ret)) {
  327. pr_err("error registering wb node\n");
  328. kfree(node);
  329. return NULL;
  330. }
  331. pr_debug("register node iova=0x%x addr=0x%x\n", data->iova, buf->addr);
  332. return node;
  333. }
  334. static struct mdss_mdp_wb_data *get_user_node(struct msm_fb_data_type *mfd,
  335. struct msmfb_data *data)
  336. {
  337. struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
  338. struct mdss_mdp_wb_data *node;
  339. struct mdss_mdp_img_data *buf;
  340. int ret;
  341. if (!list_empty(&wb->register_queue)) {
  342. struct ion_client *iclient = mdss_get_ionclient();
  343. struct ion_handle *ihdl;
  344. if (!iclient) {
  345. pr_err("iclient is NULL\n");
  346. return NULL;
  347. }
  348. ihdl = ion_import_dma_buf(iclient, data->memory_id);
  349. if (IS_ERR_OR_NULL(ihdl)) {
  350. pr_err("unable to import fd %d\n", data->memory_id);
  351. return NULL;
  352. }
  353. /* only interested in ptr address, so we can free handle */
  354. ion_free(iclient, ihdl);
  355. list_for_each_entry(node, &wb->register_queue, registered_entry)
  356. if ((node->buf_data.p[0].srcp_ihdl == ihdl) &&
  357. (node->buf_info.offset == data->offset)) {
  358. pr_debug("found fd=%d hdl=%pK off=%x addr=%x\n",
  359. data->memory_id, ihdl,
  360. data->offset,
  361. node->buf_data.p[0].addr);
  362. return node;
  363. }
  364. }
  365. node = kzalloc(sizeof(struct mdss_mdp_wb_data), GFP_KERNEL);
  366. if (node == NULL) {
  367. pr_err("out of memory\n");
  368. return NULL;
  369. }
  370. node->user_alloc = true;
  371. node->buf_data.num_planes = 1;
  372. buf = &node->buf_data.p[0];
  373. if (wb->is_secure)
  374. buf->flags |= MDP_SECURE_OVERLAY_SESSION;
  375. ret = mdss_iommu_ctrl(1);
  376. if (IS_ERR_VALUE(ret)) {
  377. pr_err("IOMMU attach failed\n");
  378. goto register_fail;
  379. }
  380. ret = mdss_mdp_get_img(data, buf);
  381. if (IS_ERR_VALUE(ret)) {
  382. pr_err("error getting buffer info\n");
  383. mdss_iommu_ctrl(0);
  384. goto register_fail;
  385. }
  386. mdss_iommu_ctrl(0);
  387. memcpy(&node->buf_info, data, sizeof(*data));
  388. ret = mdss_mdp_wb_register_node(wb, node);
  389. if (IS_ERR_VALUE(ret)) {
  390. pr_err("error registering wb node\n");
  391. goto register_fail;
  392. }
  393. pr_debug("register node mem_id=%d offset=%u addr=0x%x len=%d\n",
  394. data->memory_id, data->offset, buf->addr, buf->len);
  395. return node;
  396. register_fail:
  397. kfree(node);
  398. return NULL;
  399. }
  400. static void mdss_mdp_wb_free_node(struct mdss_mdp_wb_data *node)
  401. {
  402. struct mdss_mdp_img_data *buf;
  403. if (node->user_alloc) {
  404. buf = &node->buf_data.p[0];
  405. pr_debug("free user mem_id=%d ihdl=%pK, offset=%u addr=0x%x\n",
  406. node->buf_info.memory_id,
  407. buf->srcp_ihdl,
  408. node->buf_info.offset,
  409. buf->addr);
  410. mdss_mdp_put_img(&node->buf_data.p[0]);
  411. node->user_alloc = false;
  412. }
  413. }
  414. static int mdss_mdp_wb_queue(struct msm_fb_data_type *mfd,
  415. struct msmfb_data *data, int local)
  416. {
  417. struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
  418. struct mdss_mdp_wb_data *node = NULL;
  419. int ret = 0;
  420. if (!wb) {
  421. pr_err("unable to queue, writeback is not initialized\n");
  422. return -ENODEV;
  423. }
  424. pr_debug("fb%d queue\n", wb->fb_ndx);
  425. mutex_lock(&wb->lock);
  426. if (local)
  427. node = get_local_node(wb, data);
  428. if (node == NULL)
  429. node = get_user_node(mfd, data);
  430. if (!node) {
  431. pr_err("memory not registered\n");
  432. ret = -ENOENT;
  433. } else {
  434. struct mdss_mdp_img_data *buf = &node->buf_data.p[0];
  435. switch (node->state) {
  436. case IN_FREE_QUEUE:
  437. pr_err("node 0x%pa was already queueued before\n",
  438. &buf->addr);
  439. ret = -EINVAL;
  440. break;
  441. case IN_BUSY_QUEUE:
  442. pr_err("node 0x%pa still in busy state\n", &buf->addr);
  443. ret = -EBUSY;
  444. break;
  445. case WB_BUFFER_READY:
  446. pr_debug("node 0x%pa re-queueded without dequeue\n",
  447. &buf->addr);
  448. list_del(&node->active_entry);
  449. case WITH_CLIENT:
  450. case REGISTERED:
  451. list_add_tail(&node->active_entry, &wb->free_queue);
  452. node->state = IN_FREE_QUEUE;
  453. break;
  454. default:
  455. pr_err("Invalid node 0x%pa state %d\n",
  456. &buf->addr, node->state);
  457. ret = -EINVAL;
  458. break;
  459. }
  460. }
  461. mutex_unlock(&wb->lock);
  462. return ret;
  463. }
  464. static int is_buffer_ready(struct mdss_mdp_wb *wb)
  465. {
  466. int rc;
  467. mutex_lock(&wb->lock);
  468. rc = !list_empty(&wb->busy_queue) || (wb->state == WB_STOPING);
  469. mutex_unlock(&wb->lock);
  470. return rc;
  471. }
  472. static int mdss_mdp_wb_dequeue(struct msm_fb_data_type *mfd,
  473. struct msmfb_data *data)
  474. {
  475. struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
  476. struct mdss_mdp_wb_data *node = NULL;
  477. struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
  478. int ret;
  479. if (!wb) {
  480. pr_err("unable to dequeue, writeback is not initialized\n");
  481. return -ENODEV;
  482. }
  483. if (!ctl) {
  484. pr_err("unable to dequeue, ctl is not initialized\n");
  485. return -ENODEV;
  486. }
  487. ret = wait_event_interruptible(wb->wait_q, is_buffer_ready(wb));
  488. if (ret) {
  489. pr_err("failed to get dequeued buffer\n");
  490. return -ENOBUFS;
  491. }
  492. mutex_lock(&wb->lock);
  493. if (wb->state == WB_STOPING) {
  494. pr_debug("wfd stopped\n");
  495. mdss_mdp_display_wait4comp(ctl);
  496. wb->state = WB_STOP;
  497. ret = -ENOBUFS;
  498. } else if (!list_empty(&wb->busy_queue)) {
  499. struct mdss_mdp_img_data *buf;
  500. node = list_first_entry(&wb->busy_queue,
  501. struct mdss_mdp_wb_data,
  502. active_entry);
  503. list_del(&node->active_entry);
  504. node->state = WITH_CLIENT;
  505. memcpy(data, &node->buf_info, sizeof(*data));
  506. buf = &node->buf_data.p[0];
  507. pr_debug("found node addr=%x len=%d\n", buf->addr, buf->len);
  508. } else {
  509. pr_debug("node is NULL, wait for next\n");
  510. ret = -ENOBUFS;
  511. }
  512. mutex_unlock(&wb->lock);
  513. return ret;
  514. }
  515. int mdss_mdp_wb_kickoff(struct msm_fb_data_type *mfd)
  516. {
  517. struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
  518. struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
  519. struct mdss_mdp_wb_data *node = NULL;
  520. int ret = 0;
  521. struct mdss_mdp_writeback_arg wb_args = {
  522. .data = NULL,
  523. };
  524. if (!ctl->power_on)
  525. return 0;
  526. memset(&wb_args, 0, sizeof(wb_args));
  527. mutex_lock(&mdss_mdp_wb_buf_lock);
  528. if (wb) {
  529. mutex_lock(&wb->lock);
  530. /* in case of reinit of control path need to reset secure */
  531. if (ctl->play_cnt == 0)
  532. mdss_mdp_wb_set_secure(ctl->mfd, wb->is_secure);
  533. if (!list_empty(&wb->free_queue) && wb->state != WB_STOPING &&
  534. wb->state != WB_STOP) {
  535. node = list_first_entry(&wb->free_queue,
  536. struct mdss_mdp_wb_data,
  537. active_entry);
  538. list_del(&node->active_entry);
  539. node->state = IN_BUSY_QUEUE;
  540. wb_args.data = &node->buf_data;
  541. } else {
  542. pr_debug("unable to get buf wb state=%d\n", wb->state);
  543. }
  544. mutex_unlock(&wb->lock);
  545. }
  546. if (wb_args.data == NULL)
  547. wb_args.data = mdss_mdp_wb_debug_buffer(ctl->mfd);
  548. if (wb_args.data == NULL) {
  549. pr_err("unable to get writeback buf ctl=%d\n", ctl->num);
  550. /* drop buffer but don't return error */
  551. ret = 0;
  552. mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_DONE);
  553. goto kickoff_fail;
  554. }
  555. ret = mdss_mdp_writeback_display_commit(ctl, &wb_args);
  556. if (ret) {
  557. pr_err("error on commit ctl=%d\n", ctl->num);
  558. goto kickoff_fail;
  559. }
  560. if (wb && node) {
  561. mutex_lock(&wb->lock);
  562. list_add_tail(&node->active_entry, &wb->busy_queue);
  563. node->state = WB_BUFFER_READY;
  564. mutex_unlock(&wb->lock);
  565. wake_up(&wb->wait_q);
  566. }
  567. kickoff_fail:
  568. mutex_unlock(&mdss_mdp_wb_buf_lock);
  569. return ret;
  570. }
  571. int mdss_mdp_wb_set_mirr_hint(struct msm_fb_data_type *mfd, int hint)
  572. {
  573. struct mdss_panel_data *pdata = NULL;
  574. struct mdss_wb_ctrl *wb_ctrl = NULL;
  575. if (!mfd) {
  576. pr_err("No panel data!\n");
  577. return -EINVAL;
  578. }
  579. pdata = mfd->pdev->dev.platform_data;
  580. wb_ctrl = container_of(pdata, struct mdss_wb_ctrl, pdata);
  581. switch (hint) {
  582. case MDP_WRITEBACK_MIRROR_ON:
  583. case MDP_WRITEBACK_MIRROR_PAUSE:
  584. case MDP_WRITEBACK_MIRROR_RESUME:
  585. case MDP_WRITEBACK_MIRROR_OFF:
  586. pr_info("wfd state switched to %d\n", hint);
  587. switch_set_state(&wb_ctrl->sdev, hint);
  588. return 0;
  589. default:
  590. return -EINVAL;
  591. }
  592. }
  593. int mdss_mdp_wb_get_format(struct msm_fb_data_type *mfd,
  594. struct mdp_mixer_cfg *mixer_cfg)
  595. {
  596. struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
  597. if (!ctl) {
  598. pr_err("No panel data!\n");
  599. return -EINVAL;
  600. } else {
  601. mixer_cfg->writeback_format = ctl->dst_format;
  602. }
  603. return 0;
  604. }
  605. int mdss_mdp_wb_set_format(struct msm_fb_data_type *mfd, u32 dst_format)
  606. {
  607. struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
  608. if (!ctl) {
  609. pr_err("No panel data!\n");
  610. return -EINVAL;
  611. } else if (dst_format >= MDP_IMGTYPE_LIMIT2) {
  612. pr_err("Invalid dst format=%u\n", dst_format);
  613. return -EINVAL;
  614. } else {
  615. ctl->dst_format = dst_format;
  616. }
  617. pr_debug("wfd format %d\n", ctl->dst_format);
  618. return 0;
  619. }
  620. int mdss_mdp_wb_ioctl_handler(struct msm_fb_data_type *mfd, u32 cmd,
  621. void *arg)
  622. {
  623. struct msmfb_data data;
  624. int ret = -ENOSYS, hint = 0;
  625. switch (cmd) {
  626. case MSMFB_WRITEBACK_INIT:
  627. ret = mdss_mdp_wb_init(mfd);
  628. break;
  629. case MSMFB_WRITEBACK_START:
  630. ret = mdss_mdp_wb_start(mfd);
  631. break;
  632. case MSMFB_WRITEBACK_STOP:
  633. ret = mdss_mdp_wb_stop(mfd);
  634. break;
  635. case MSMFB_WRITEBACK_QUEUE_BUFFER:
  636. if (!copy_from_user(&data, arg, sizeof(data))) {
  637. ret = mdss_mdp_wb_queue(mfd, &data, false);
  638. ret = copy_to_user(arg, &data, sizeof(data));
  639. } else {
  640. pr_err("wb queue buf failed on copy_from_user\n");
  641. ret = -EFAULT;
  642. }
  643. break;
  644. case MSMFB_WRITEBACK_DEQUEUE_BUFFER:
  645. if (!copy_from_user(&data, arg, sizeof(data))) {
  646. ret = mdss_mdp_wb_dequeue(mfd, &data);
  647. ret = copy_to_user(arg, &data, sizeof(data));
  648. } else {
  649. pr_err("wb dequeue buf failed on copy_from_user\n");
  650. ret = -EFAULT;
  651. }
  652. break;
  653. case MSMFB_WRITEBACK_TERMINATE:
  654. ret = mdss_iommu_ctrl(1);
  655. if (IS_ERR_VALUE(ret)) {
  656. pr_err("IOMMU attach failed\n");
  657. return ret;
  658. }
  659. ret = mdss_mdp_wb_terminate(mfd);
  660. mdss_iommu_ctrl(0);
  661. break;
  662. case MSMFB_WRITEBACK_SET_MIRRORING_HINT:
  663. if (!copy_from_user(&hint, arg, sizeof(hint))) {
  664. ret = mdss_mdp_wb_set_mirr_hint(mfd, hint);
  665. } else {
  666. pr_err("set mirroring hint failed on copy_from_user\n");
  667. ret = -EFAULT;
  668. }
  669. break;
  670. }
  671. return ret;
  672. }
  673. int msm_fb_writeback_start(struct fb_info *info)
  674. {
  675. struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
  676. if (!mfd)
  677. return -ENODEV;
  678. return mdss_mdp_wb_start(mfd);
  679. }
  680. EXPORT_SYMBOL(msm_fb_writeback_start);
  681. int msm_fb_writeback_queue_buffer(struct fb_info *info,
  682. struct msmfb_data *data)
  683. {
  684. struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
  685. if (!mfd)
  686. return -ENODEV;
  687. return mdss_mdp_wb_queue(mfd, data, true);
  688. }
  689. EXPORT_SYMBOL(msm_fb_writeback_queue_buffer);
  690. int msm_fb_writeback_dequeue_buffer(struct fb_info *info,
  691. struct msmfb_data *data)
  692. {
  693. struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
  694. if (!mfd)
  695. return -ENODEV;
  696. return mdss_mdp_wb_dequeue(mfd, data);
  697. }
  698. EXPORT_SYMBOL(msm_fb_writeback_dequeue_buffer);
  699. int msm_fb_writeback_stop(struct fb_info *info)
  700. {
  701. struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
  702. if (!mfd)
  703. return -ENODEV;
  704. return mdss_mdp_wb_stop(mfd);
  705. }
  706. EXPORT_SYMBOL(msm_fb_writeback_stop);
  707. int msm_fb_writeback_init(struct fb_info *info)
  708. {
  709. struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
  710. if (!mfd)
  711. return -ENODEV;
  712. return mdss_mdp_wb_init(mfd);
  713. }
  714. EXPORT_SYMBOL(msm_fb_writeback_init);
  715. int msm_fb_writeback_terminate(struct fb_info *info)
  716. {
  717. struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
  718. if (!mfd)
  719. return -ENODEV;
  720. return mdss_mdp_wb_terminate(mfd);
  721. }
  722. EXPORT_SYMBOL(msm_fb_writeback_terminate);
  723. int msm_fb_get_iommu_domain(struct fb_info *info, int domain)
  724. {
  725. int mdss_domain;
  726. switch (domain) {
  727. case MDP_IOMMU_DOMAIN_CP:
  728. mdss_domain = MDSS_IOMMU_DOMAIN_SECURE;
  729. break;
  730. case MDP_IOMMU_DOMAIN_NS:
  731. mdss_domain = MDSS_IOMMU_DOMAIN_UNSECURE;
  732. break;
  733. default:
  734. pr_err("Invalid mdp iommu domain (%d)\n", domain);
  735. return -EINVAL;
  736. }
  737. return mdss_get_iommu_domain(mdss_domain);
  738. }
  739. EXPORT_SYMBOL(msm_fb_get_iommu_domain);
  740. int msm_fb_writeback_set_secure(struct fb_info *info, int enable)
  741. {
  742. struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
  743. if (!mfd)
  744. return -ENODEV;
  745. return mdss_mdp_wb_set_secure(mfd, enable);
  746. }
  747. EXPORT_SYMBOL(msm_fb_writeback_set_secure);
  748. /**
  749. * msm_fb_writeback_iommu_ref() - Add/Remove vote on MDSS IOMMU being attached.
  750. * @enable - true adds vote on MDSS IOMMU, false removes the vote.
  751. *
  752. * Call to vote on MDSS IOMMU being enabled. To ensure buffers are properly
  753. * mapped to IOMMU context bank.
  754. */
  755. int msm_fb_writeback_iommu_ref(struct fb_info *info, int enable)
  756. {
  757. int ret;
  758. if (enable) {
  759. ret = mdss_iommu_ctrl(1);
  760. if (IS_ERR_VALUE(ret)) {
  761. pr_err("IOMMU attach failed\n");
  762. return ret;
  763. }
  764. } else {
  765. mdss_iommu_ctrl(0);
  766. }
  767. return 0;
  768. }
  769. EXPORT_SYMBOL(msm_fb_writeback_iommu_ref);