wfd-ioctl.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840
  1. /* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #include <linux/module.h>
  14. #include <linux/types.h>
  15. #include <linux/list.h>
  16. #include <linux/ioctl.h>
  17. #include <linux/mutex.h>
  18. #include <linux/init.h>
  19. #include <linux/version.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/sched.h>
  22. #include <linux/kthread.h>
  23. #include <linux/time.h>
  24. #include <linux/slab.h>
  25. #include <mach/board.h>
  26. #include <media/v4l2-dev.h>
  27. #include <media/v4l2-device.h>
  28. #include <media/v4l2-event.h>
  29. #include <media/v4l2-ioctl.h>
  30. #include <media/v4l2-subdev.h>
  31. #include <media/videobuf2-core.h>
  32. #include <media/videobuf2-msm-mem.h>
  33. #include "wfd-util.h"
  34. #include "mdp-subdev.h"
  35. #include "enc-subdev.h"
  36. #include "vsg-subdev.h"
  37. #define WFD_VERSION KERNEL_VERSION(0, 0, 1)
  38. #define WFD_NUM_DEVICES 2
  39. #define WFD_DEVICE_NUMBER_BASE 38
  40. #define WFD_DEVICE_SECURE (WFD_DEVICE_NUMBER_BASE + 1)
  41. #define DEFAULT_WFD_WIDTH 1280
  42. #define DEFAULT_WFD_HEIGHT 720
  43. #define VENC_INPUT_BUFFERS 4
  44. #define MAX_EVENTS 16
  45. struct wfd_device {
  46. struct mutex dev_lock;
  47. struct platform_device *pdev;
  48. struct v4l2_device v4l2_dev;
  49. struct video_device *pvdev;
  50. struct v4l2_subdev mdp_sdev;
  51. struct v4l2_subdev enc_sdev;
  52. struct v4l2_subdev vsg_sdev;
  53. struct ion_client *ion_client;
  54. bool secure;
  55. bool in_use;
  56. bool mdp_iommu_split_domain;
  57. };
  58. struct mem_info {
  59. u32 fd;
  60. u32 offset;
  61. };
  62. struct mem_info_entry {
  63. struct list_head list;
  64. unsigned long userptr;
  65. struct mem_info minfo;
  66. };
  67. struct mem_region_pair {
  68. struct mem_region *enc;
  69. struct mem_region *mdp;
  70. struct list_head list;
  71. };
  72. struct wfd_inst {
  73. struct vb2_queue vid_bufq;
  74. struct mutex lock;
  75. struct mutex vb2_lock;
  76. u32 buf_count;
  77. struct task_struct *mdp_task;
  78. void *mdp_inst;
  79. void *venc_inst;
  80. u32 height;
  81. u32 width;
  82. u32 pixelformat;
  83. struct list_head minfo_list;
  84. bool streamoff;
  85. u32 input_bufs_allocated;
  86. u32 input_buf_size;
  87. u32 out_buf_size;
  88. struct list_head input_mem_list;
  89. struct wfd_stats stats;
  90. struct completion stop_mdp_thread;
  91. struct v4l2_fh event_handler;
  92. };
  93. struct wfd_vid_buffer {
  94. struct vb2_buffer vidbuf;
  95. };
  96. static inline struct wfd_inst *file_to_inst(struct file *filp)
  97. {
  98. return container_of(filp->private_data, struct wfd_inst, event_handler);
  99. }
  100. static int wfd_vidbuf_queue_setup(struct vb2_queue *q,
  101. const struct v4l2_format *fmt,
  102. unsigned int *num_buffers,
  103. unsigned int *num_planes,
  104. unsigned int sizes[], void *alloc_ctxs[])
  105. {
  106. struct file *priv_data = (struct file *)(q->drv_priv);
  107. struct wfd_inst *inst = file_to_inst(priv_data);
  108. int i;
  109. WFD_MSG_DBG("In %s\n", __func__);
  110. if (num_buffers == NULL || num_planes == NULL)
  111. return -EINVAL;
  112. *num_planes = 1;
  113. mutex_lock(&inst->lock);
  114. for (i = 0; i < *num_planes; ++i) {
  115. sizes[i] = inst->out_buf_size;
  116. alloc_ctxs[i] = inst;
  117. }
  118. mutex_unlock(&inst->lock);
  119. return 0;
  120. }
  121. static void wfd_vidbuf_wait_prepare(struct vb2_queue *q)
  122. {
  123. struct file *priv_data = (struct file *)(q->drv_priv);
  124. struct wfd_inst *inst = file_to_inst(priv_data);
  125. mutex_unlock(&inst->vb2_lock);
  126. }
  127. static void wfd_vidbuf_wait_finish(struct vb2_queue *q)
  128. {
  129. struct file *priv_data = (struct file *)(q->drv_priv);
  130. struct wfd_inst *inst = file_to_inst(priv_data);
  131. mutex_lock(&inst->vb2_lock);
  132. }
  133. static unsigned long wfd_enc_addr_to_mdp_addr(struct wfd_inst *inst,
  134. unsigned long addr)
  135. {
  136. struct list_head *ptr, *next;
  137. struct mem_region_pair *mpair;
  138. if (!list_empty(&inst->input_mem_list)) {
  139. list_for_each_safe(ptr, next,
  140. &inst->input_mem_list) {
  141. mpair = list_entry(ptr, struct mem_region_pair,
  142. list);
  143. if (mpair->enc->paddr == (u8 *)addr)
  144. return (unsigned long)mpair->mdp->paddr;
  145. }
  146. }
  147. return (unsigned long)NULL;
  148. }
  149. #ifdef CONFIG_MSM_WFD_DEBUG
  150. static void *wfd_map_kernel(struct ion_client *client,
  151. struct ion_handle *handle)
  152. {
  153. return ion_map_kernel(client, handle);
  154. }
  155. static void wfd_unmap_kernel(struct ion_client *client,
  156. struct ion_handle *handle)
  157. {
  158. ion_unmap_kernel(client, handle);
  159. }
  160. #else
  161. static void *wfd_map_kernel(struct ion_client *client,
  162. struct ion_handle *handle)
  163. {
  164. return NULL;
  165. }
  166. static void wfd_unmap_kernel(struct ion_client *client,
  167. struct ion_handle *handle)
  168. {
  169. return;
  170. }
  171. #endif
  172. static int wfd_allocate_ion_buffer(struct ion_client *client,
  173. bool secure, struct mem_region *mregion)
  174. {
  175. struct ion_handle *handle = NULL;
  176. unsigned int alloc_regions = 0, ion_flags = 0, align = 0;
  177. int rc = 0;
  178. if (secure) {
  179. alloc_regions = ION_HEAP(ION_CP_MM_HEAP_ID);
  180. ion_flags = ION_FLAG_SECURE;
  181. align = SZ_1M;
  182. } else {
  183. alloc_regions = ION_HEAP(ION_IOMMU_HEAP_ID);
  184. align = SZ_4K;
  185. }
  186. handle = ion_alloc(client, mregion->size, align,
  187. alloc_regions, ion_flags);
  188. if (IS_ERR_OR_NULL(handle)) {
  189. WFD_MSG_ERR("Failed to allocate input buffer\n");
  190. rc = PTR_ERR(handle);
  191. goto alloc_fail;
  192. }
  193. mregion->kvaddr = secure ? NULL :
  194. wfd_map_kernel(client, handle);
  195. mregion->ion_handle = handle;
  196. return rc;
  197. alloc_fail:
  198. if (!IS_ERR_OR_NULL(handle)) {
  199. if (!IS_ERR_OR_NULL(mregion->kvaddr))
  200. wfd_unmap_kernel(client, handle);
  201. ion_free(client, handle);
  202. mregion->kvaddr = NULL;
  203. mregion->paddr = NULL;
  204. mregion->ion_handle = NULL;
  205. }
  206. return rc;
  207. }
  208. /* Doesn't do iommu unmap */
  209. static int wfd_free_ion_buffer(struct ion_client *client,
  210. struct mem_region *mregion)
  211. {
  212. if (!client || !mregion) {
  213. WFD_MSG_ERR("Failed to free ion buffer: "
  214. "Invalid client or region");
  215. return -EINVAL;
  216. }
  217. if (!IS_ERR_OR_NULL(mregion->kvaddr))
  218. wfd_unmap_kernel(client, mregion->ion_handle);
  219. ion_free(client, mregion->ion_handle);
  220. return 0;
  221. }
  222. static int wfd_flush_ion_buffer(struct ion_client *client,
  223. struct mem_region *mregion)
  224. {
  225. if (!client || !mregion) {
  226. WFD_MSG_ERR("Failed to flush ion buffer: "
  227. "Invalid client or region");
  228. return -EINVAL;
  229. } else if (!mregion->ion_handle) {
  230. WFD_MSG_ERR("Failed to flush ion buffer: "
  231. "not an ion buffer");
  232. return -EINVAL;
  233. }
  234. return msm_ion_do_cache_op(client,
  235. mregion->ion_handle,
  236. mregion->kvaddr,
  237. mregion->size,
  238. ION_IOC_INV_CACHES);
  239. }
  240. static int wfd_allocate_input_buffers(struct wfd_device *wfd_dev,
  241. struct wfd_inst *inst)
  242. {
  243. int i;
  244. struct mem_region *enc_mregion, *mdp_mregion;
  245. struct mem_region_pair *mpair;
  246. int rc;
  247. struct mdp_buf_info mdp_buf = {0};
  248. struct mem_region_map mmap_context = {0};
  249. mutex_lock(&inst->lock);
  250. if (inst->input_bufs_allocated) {
  251. mutex_unlock(&inst->lock);
  252. return 0;
  253. }
  254. inst->input_bufs_allocated = true;
  255. mutex_unlock(&inst->lock);
  256. for (i = 0; i < VENC_INPUT_BUFFERS; ++i) {
  257. mpair = kzalloc(sizeof(*mpair), GFP_KERNEL);
  258. enc_mregion = kzalloc(sizeof(*enc_mregion), GFP_KERNEL);
  259. mdp_mregion = kzalloc(sizeof(*enc_mregion), GFP_KERNEL);
  260. enc_mregion->size = ALIGN(inst->input_buf_size, SZ_4K);
  261. rc = wfd_allocate_ion_buffer(wfd_dev->ion_client,
  262. wfd_dev->secure, enc_mregion);
  263. if (rc) {
  264. WFD_MSG_ERR("Failed to allocate input memory\n");
  265. goto alloc_fail;
  266. }
  267. mmap_context.mregion = enc_mregion;
  268. mmap_context.ion_client = wfd_dev->ion_client;
  269. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
  270. ENC_MMAP, &mmap_context);
  271. if (rc) {
  272. WFD_MSG_ERR("Failed to map input memory\n");
  273. goto alloc_fail;
  274. } else if (!enc_mregion->paddr) {
  275. WFD_MSG_ERR("ENC_MMAP returned success" \
  276. "but failed to map input memory\n");
  277. rc = -EINVAL;
  278. goto alloc_fail;
  279. }
  280. WFD_MSG_DBG("NOTE: enc paddr = [%p->%p], kvaddr = %p\n",
  281. enc_mregion->paddr, (int8_t *)
  282. enc_mregion->paddr + enc_mregion->size,
  283. enc_mregion->kvaddr);
  284. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
  285. SET_INPUT_BUFFER, (void *)enc_mregion);
  286. if (rc) {
  287. WFD_MSG_ERR("Setting enc input buffer failed\n");
  288. goto set_input_fail;
  289. }
  290. /* map the buffer from encoder to mdp */
  291. mdp_mregion->kvaddr = enc_mregion->kvaddr;
  292. mdp_mregion->size = enc_mregion->size;
  293. mdp_mregion->offset = enc_mregion->offset;
  294. mdp_mregion->fd = enc_mregion->fd;
  295. mdp_mregion->cookie = 0;
  296. mdp_mregion->ion_handle = enc_mregion->ion_handle;
  297. memset(&mmap_context, 0, sizeof(mmap_context));
  298. mmap_context.mregion = mdp_mregion;
  299. mmap_context.ion_client = wfd_dev->ion_client;
  300. mmap_context.cookie = inst->mdp_inst;
  301. rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl,
  302. MDP_MMAP, (void *)&mmap_context);
  303. if (rc) {
  304. WFD_MSG_ERR(
  305. "Failed to map to mdp, rc = %d, paddr = 0x%p\n",
  306. rc, mdp_mregion->paddr);
  307. mdp_mregion->kvaddr = NULL;
  308. mdp_mregion->paddr = NULL;
  309. mdp_mregion->ion_handle = NULL;
  310. goto mdp_mmap_fail;
  311. } else if (!mdp_mregion->paddr) {
  312. WFD_MSG_ERR("MDP_MMAP returned success" \
  313. "but failed to map to MDP\n");
  314. rc = -EINVAL;
  315. mdp_mregion->kvaddr = NULL;
  316. mdp_mregion->paddr = NULL;
  317. mdp_mregion->ion_handle = NULL;
  318. goto mdp_mmap_fail;
  319. }
  320. mdp_buf.inst = inst->mdp_inst;
  321. mdp_buf.cookie = enc_mregion;
  322. mdp_buf.kvaddr = (u32) mdp_mregion->kvaddr;
  323. mdp_buf.paddr = (u32) mdp_mregion->paddr;
  324. WFD_MSG_DBG("NOTE: mdp paddr = [%p->%p], kvaddr = %p\n",
  325. mdp_mregion->paddr, (void *)
  326. ((int)mdp_mregion->paddr + mdp_mregion->size),
  327. mdp_mregion->kvaddr);
  328. rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl,
  329. MDP_Q_BUFFER, (void *)&mdp_buf);
  330. if (rc) {
  331. WFD_MSG_ERR("Unable to queue the"
  332. " buffer to mdp\n");
  333. goto mdp_q_fail;
  334. } else {
  335. wfd_stats_update(&inst->stats,
  336. WFD_STAT_EVENT_MDP_QUEUE);
  337. }
  338. INIT_LIST_HEAD(&mpair->list);
  339. mpair->enc = enc_mregion;
  340. mpair->mdp = mdp_mregion;
  341. list_add_tail(&mpair->list, &inst->input_mem_list);
  342. }
  343. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
  344. ALLOC_RECON_BUFFERS, NULL);
  345. if (rc) {
  346. WFD_MSG_ERR("Failed to allocate recon buffers\n");
  347. goto recon_alloc_fail;
  348. }
  349. return rc;
  350. /*
  351. * Clean up only the buffer that we failed in setting up.
  352. * Caller will clean up the rest by calling free_input_buffers()
  353. */
  354. mdp_q_fail:
  355. memset(&mmap_context, 0, sizeof(mmap_context));
  356. mmap_context.mregion = mdp_mregion;
  357. mmap_context.ion_client = wfd_dev->ion_client;
  358. mmap_context.cookie = inst->mdp_inst;
  359. v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl,
  360. MDP_MUNMAP, (void *)&mmap_context);
  361. mdp_mmap_fail:
  362. v4l2_subdev_call(&wfd_dev->enc_sdev,
  363. core, ioctl, FREE_INPUT_BUFFER,
  364. (void *)enc_mregion);
  365. set_input_fail:
  366. memset(&mmap_context, 0, sizeof(mmap_context));
  367. mmap_context.ion_client = wfd_dev->ion_client;
  368. mmap_context.mregion = enc_mregion;
  369. v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
  370. ENC_MUNMAP, &mmap_context);
  371. alloc_fail:
  372. kfree(mpair);
  373. kfree(enc_mregion);
  374. kfree(mdp_mregion);
  375. recon_alloc_fail:
  376. return rc;
  377. }
  378. static void wfd_free_input_buffers(struct wfd_device *wfd_dev,
  379. struct wfd_inst *inst)
  380. {
  381. struct list_head *ptr, *next;
  382. struct mem_region_pair *mpair;
  383. int rc = 0;
  384. mutex_lock(&inst->lock);
  385. if (!inst->input_bufs_allocated) {
  386. mutex_unlock(&inst->lock);
  387. return;
  388. }
  389. inst->input_bufs_allocated = false;
  390. mutex_unlock(&inst->lock);
  391. if (!list_empty(&inst->input_mem_list)) {
  392. list_for_each_safe(ptr, next,
  393. &inst->input_mem_list) {
  394. mpair = list_entry(ptr, struct mem_region_pair,
  395. list);
  396. rc = v4l2_subdev_call(&wfd_dev->enc_sdev,
  397. core, ioctl, FREE_INPUT_BUFFER,
  398. (void *)mpair->enc);
  399. if (rc)
  400. WFD_MSG_ERR("Failed to free buffers "
  401. "from encoder\n");
  402. if (mpair->mdp->paddr) {
  403. struct mem_region_map temp = {0};
  404. temp.ion_client = wfd_dev->ion_client;
  405. temp.mregion = mpair->mdp;
  406. temp.cookie = inst->mdp_inst;
  407. v4l2_subdev_call(&wfd_dev->mdp_sdev, core,
  408. ioctl, MDP_MUNMAP,
  409. (void *)&temp);
  410. }
  411. if (mpair->enc->paddr) {
  412. struct mem_region_map temp = {0};
  413. temp.ion_client = wfd_dev->ion_client;
  414. temp.mregion = mpair->enc;
  415. v4l2_subdev_call(&wfd_dev->enc_sdev,
  416. core, ioctl, ENC_MUNMAP, &temp);
  417. }
  418. wfd_free_ion_buffer(wfd_dev->ion_client, mpair->enc);
  419. list_del(&mpair->list);
  420. kfree(mpair->enc);
  421. kfree(mpair->mdp);
  422. kfree(mpair);
  423. }
  424. }
  425. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
  426. FREE_RECON_BUFFERS, NULL);
  427. if (rc)
  428. WFD_MSG_ERR("Failed to free recon buffers\n");
  429. }
  430. static struct mem_info *wfd_get_mem_info(struct wfd_inst *inst,
  431. unsigned long userptr)
  432. {
  433. struct mem_info_entry *temp;
  434. struct mem_info *ret = NULL;
  435. mutex_lock(&inst->lock);
  436. if (!list_empty(&inst->minfo_list)) {
  437. list_for_each_entry(temp, &inst->minfo_list, list) {
  438. if (temp && temp->userptr == userptr) {
  439. ret = &temp->minfo;
  440. break;
  441. }
  442. }
  443. }
  444. mutex_unlock(&inst->lock);
  445. return ret;
  446. }
  447. static void wfd_put_mem_info(struct wfd_inst *inst,
  448. struct mem_info *minfo)
  449. {
  450. struct list_head *ptr, *next;
  451. struct mem_info_entry *temp;
  452. mutex_lock(&inst->lock);
  453. if (!list_empty(&inst->minfo_list)) {
  454. list_for_each_safe(ptr, next,
  455. &inst->minfo_list) {
  456. temp = list_entry(ptr, struct mem_info_entry,
  457. list);
  458. if (temp && (&temp->minfo == minfo)) {
  459. list_del(&temp->list);
  460. kfree(temp);
  461. }
  462. }
  463. }
  464. mutex_unlock(&inst->lock);
  465. }
  466. static void wfd_unregister_out_buf(struct wfd_inst *inst,
  467. struct mem_info *minfo)
  468. {
  469. if (!minfo || !inst) {
  470. WFD_MSG_ERR("Invalid arguments\n");
  471. return;
  472. }
  473. wfd_put_mem_info(inst, minfo);
  474. }
  475. static int wfd_vidbuf_buf_init(struct vb2_buffer *vb)
  476. {
  477. int rc = 0;
  478. struct vb2_queue *q = vb->vb2_queue;
  479. struct file *priv_data = (struct file *)(q->drv_priv);
  480. struct wfd_inst *inst = file_to_inst(priv_data);
  481. struct wfd_device *wfd_dev =
  482. (struct wfd_device *)video_drvdata(priv_data);
  483. struct mem_info *minfo = vb2_plane_cookie(vb, 0);
  484. struct mem_region mregion;
  485. mregion.fd = minfo->fd;
  486. mregion.offset = minfo->offset;
  487. mregion.cookie = (u32)vb;
  488. /*TODO: should be fixed in kernel 3.2*/
  489. mregion.size = inst->out_buf_size;
  490. if (inst && !inst->vid_bufq.streaming) {
  491. rc = wfd_allocate_input_buffers(wfd_dev, inst);
  492. if (rc) {
  493. WFD_MSG_ERR("Failed to allocate input buffers\n");
  494. goto free_input_bufs;
  495. }
  496. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
  497. SET_OUTPUT_BUFFER, (void *)&mregion);
  498. if (rc) {
  499. WFD_MSG_ERR("Failed to set output buffer\n");
  500. goto free_input_bufs;
  501. }
  502. }
  503. return rc;
  504. free_input_bufs:
  505. wfd_free_input_buffers(wfd_dev, inst);
  506. return rc;
  507. }
  508. static int wfd_vidbuf_buf_prepare(struct vb2_buffer *vb)
  509. {
  510. return 0;
  511. }
  512. static int wfd_vidbuf_buf_finish(struct vb2_buffer *vb)
  513. {
  514. return 0;
  515. }
  516. static void wfd_vidbuf_buf_cleanup(struct vb2_buffer *vb)
  517. {
  518. int rc = 0;
  519. struct vb2_queue *q = vb->vb2_queue;
  520. struct file *priv_data = (struct file *)(q->drv_priv);
  521. struct wfd_device *wfd_dev =
  522. (struct wfd_device *)video_drvdata(priv_data);
  523. struct wfd_inst *inst = file_to_inst(priv_data);
  524. struct mem_info *minfo = vb2_plane_cookie(vb, 0);
  525. struct mem_region mregion;
  526. if (minfo == NULL) {
  527. WFD_MSG_DBG("not freeing buffers since allocation failed");
  528. return;
  529. }
  530. mregion.fd = minfo->fd;
  531. mregion.offset = minfo->offset;
  532. mregion.cookie = (u32)vb;
  533. mregion.size = inst->out_buf_size;
  534. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
  535. FREE_OUTPUT_BUFFER, (void *)&mregion);
  536. if (rc)
  537. WFD_MSG_ERR("Failed to free output buffer\n");
  538. wfd_unregister_out_buf(inst, minfo);
  539. }
  540. static int mdp_output_thread(void *data)
  541. {
  542. int rc = 0, no_sig_wait = 0;
  543. struct file *filp = (struct file *)data;
  544. struct wfd_inst *inst = file_to_inst(filp);
  545. struct wfd_device *wfd_dev =
  546. (struct wfd_device *)video_drvdata(filp);
  547. struct mdp_buf_info obuf_mdp = {inst->mdp_inst, 0, 0, 0};
  548. struct mem_region *mregion;
  549. struct vsg_buf_info ibuf_vsg;
  550. while (!kthread_should_stop()) {
  551. if (rc) {
  552. WFD_MSG_DBG("%s() error in output thread\n", __func__);
  553. if (!no_sig_wait) {
  554. wait_for_completion(&inst->stop_mdp_thread);
  555. no_sig_wait = 1;
  556. }
  557. continue;
  558. }
  559. WFD_MSG_DBG("waiting for mdp output\n");
  560. rc = v4l2_subdev_call(&wfd_dev->mdp_sdev,
  561. core, ioctl, MDP_DQ_BUFFER, (void *)&obuf_mdp);
  562. if (rc) {
  563. if (rc != -ENOBUFS)
  564. WFD_MSG_ERR("MDP reported err %d\n", rc);
  565. WFD_MSG_ERR("Streamoff called\n");
  566. continue;
  567. } else {
  568. wfd_stats_update(&inst->stats,
  569. WFD_STAT_EVENT_MDP_DEQUEUE);
  570. }
  571. mregion = obuf_mdp.cookie;
  572. if (!mregion) {
  573. WFD_MSG_ERR("mdp cookie is null\n");
  574. rc = -EINVAL;
  575. continue;
  576. }
  577. ibuf_vsg.mdp_buf_info = obuf_mdp;
  578. ibuf_vsg.mdp_buf_info.inst = inst->mdp_inst;
  579. ibuf_vsg.mdp_buf_info.cookie = mregion;
  580. ibuf_vsg.mdp_buf_info.kvaddr = (u32) mregion->kvaddr;
  581. ibuf_vsg.mdp_buf_info.paddr =
  582. (u32)wfd_enc_addr_to_mdp_addr(inst,
  583. (unsigned long)mregion->paddr);
  584. rc = v4l2_subdev_call(&wfd_dev->vsg_sdev,
  585. core, ioctl, VSG_Q_BUFFER, (void *)&ibuf_vsg);
  586. if (rc) {
  587. WFD_MSG_ERR("Failed to queue frame to vsg\n");
  588. continue;
  589. } else {
  590. wfd_stats_update(&inst->stats,
  591. WFD_STAT_EVENT_VSG_QUEUE);
  592. }
  593. }
  594. WFD_MSG_DBG("Exiting the thread\n");
  595. return rc;
  596. }
  597. static int wfd_vidbuf_start_streaming(struct vb2_queue *q, unsigned int count)
  598. {
  599. struct file *priv_data = (struct file *)(q->drv_priv);
  600. struct wfd_device *wfd_dev =
  601. (struct wfd_device *)video_drvdata(priv_data);
  602. struct wfd_inst *inst = file_to_inst(priv_data);
  603. int rc = 0;
  604. WFD_MSG_ERR("Stream on called\n");
  605. WFD_MSG_DBG("enc start\n");
  606. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
  607. ENCODE_START, (void *)inst->venc_inst);
  608. if (rc) {
  609. WFD_MSG_ERR("Failed to start encoder\n");
  610. goto subdev_start_fail;
  611. }
  612. WFD_MSG_DBG("vsg start\n");
  613. rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl,
  614. VSG_START, NULL);
  615. if (rc) {
  616. WFD_MSG_ERR("Failed to start vsg\n");
  617. goto subdev_start_fail;
  618. }
  619. init_completion(&inst->stop_mdp_thread);
  620. inst->mdp_task = kthread_run(mdp_output_thread, priv_data,
  621. "mdp_output_thread");
  622. if (IS_ERR(inst->mdp_task)) {
  623. rc = PTR_ERR(inst->mdp_task);
  624. goto subdev_start_fail;
  625. }
  626. WFD_MSG_DBG("mdp start\n");
  627. rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl,
  628. MDP_START, (void *)inst->mdp_inst);
  629. if (rc)
  630. WFD_MSG_ERR("Failed to start MDP\n");
  631. subdev_start_fail:
  632. return rc;
  633. }
  634. static int wfd_vidbuf_stop_streaming(struct vb2_queue *q)
  635. {
  636. struct file *priv_data = (struct file *)(q->drv_priv);
  637. struct wfd_device *wfd_dev =
  638. (struct wfd_device *)video_drvdata(priv_data);
  639. struct wfd_inst *inst = file_to_inst(priv_data);
  640. int rc = 0;
  641. WFD_MSG_DBG("mdp stop\n");
  642. rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl,
  643. MDP_STOP, (void *)inst->mdp_inst);
  644. if (rc)
  645. WFD_MSG_ERR("Failed to stop MDP\n");
  646. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
  647. ENCODE_FLUSH, (void *)inst->venc_inst);
  648. if (rc)
  649. WFD_MSG_ERR("Failed to flush encoder\n");
  650. WFD_MSG_DBG("vsg stop\n");
  651. rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl,
  652. VSG_STOP, NULL);
  653. if (rc)
  654. WFD_MSG_ERR("Failed to stop VSG\n");
  655. complete(&inst->stop_mdp_thread);
  656. kthread_stop(inst->mdp_task);
  657. WFD_MSG_DBG("enc stop\n");
  658. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
  659. ENCODE_STOP, (void *)inst->venc_inst);
  660. if (rc)
  661. WFD_MSG_ERR("Failed to stop encoder\n");
  662. return rc;
  663. }
  664. static void wfd_vidbuf_buf_queue(struct vb2_buffer *vb)
  665. {
  666. int rc = 0;
  667. struct vb2_queue *q = vb->vb2_queue;
  668. struct file *priv_data = (struct file *)(q->drv_priv);
  669. struct wfd_device *wfd_dev =
  670. (struct wfd_device *)video_drvdata(priv_data);
  671. struct wfd_inst *inst = file_to_inst(priv_data);
  672. struct mem_region mregion;
  673. struct mem_info *minfo = vb2_plane_cookie(vb, 0);
  674. mregion.fd = minfo->fd;
  675. mregion.offset = minfo->offset;
  676. mregion.cookie = (u32)vb;
  677. mregion.size = inst->out_buf_size;
  678. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
  679. FILL_OUTPUT_BUFFER, (void *)&mregion);
  680. if (rc) {
  681. WFD_MSG_ERR("Failed to fill output buffer\n");
  682. }
  683. }
  684. static struct vb2_ops wfd_vidbuf_ops = {
  685. .queue_setup = wfd_vidbuf_queue_setup,
  686. .wait_prepare = wfd_vidbuf_wait_prepare,
  687. .wait_finish = wfd_vidbuf_wait_finish,
  688. .buf_init = wfd_vidbuf_buf_init,
  689. .buf_prepare = wfd_vidbuf_buf_prepare,
  690. .buf_finish = wfd_vidbuf_buf_finish,
  691. .buf_cleanup = wfd_vidbuf_buf_cleanup,
  692. .start_streaming = wfd_vidbuf_start_streaming,
  693. .stop_streaming = wfd_vidbuf_stop_streaming,
  694. .buf_queue = wfd_vidbuf_buf_queue,
  695. };
  696. static const struct v4l2_subdev_core_ops mdp_subdev_core_ops = {
  697. .init = mdp_init,
  698. .ioctl = mdp_ioctl,
  699. };
  700. static const struct v4l2_subdev_ops mdp_subdev_ops = {
  701. .core = &mdp_subdev_core_ops,
  702. };
  703. static const struct v4l2_subdev_core_ops enc_subdev_core_ops = {
  704. .init = venc_init,
  705. .load_fw = venc_load_fw,
  706. .ioctl = venc_ioctl,
  707. };
  708. static const struct v4l2_subdev_ops enc_subdev_ops = {
  709. .core = &enc_subdev_core_ops,
  710. };
  711. static const struct v4l2_subdev_core_ops vsg_subdev_core_ops = {
  712. .init = vsg_init,
  713. .ioctl = vsg_ioctl,
  714. };
  715. static const struct v4l2_subdev_ops vsg_subdev_ops = {
  716. .core = &vsg_subdev_core_ops,
  717. };
  718. static int wfdioc_querycap(struct file *filp, void *fh,
  719. struct v4l2_capability *cap) {
  720. WFD_MSG_DBG("wfdioc_querycap: E\n");
  721. memset(cap, 0, sizeof(struct v4l2_capability));
  722. strlcpy(cap->driver, "wifi-display", sizeof(cap->driver));
  723. strlcpy(cap->card, "msm", sizeof(cap->card));
  724. cap->version = WFD_VERSION;
  725. cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
  726. WFD_MSG_DBG("wfdioc_querycap: X\n");
  727. return 0;
  728. }
  729. static int wfdioc_g_fmt(struct file *filp, void *fh,
  730. struct v4l2_format *fmt)
  731. {
  732. struct wfd_inst *inst = file_to_inst(filp);
  733. if (!fmt) {
  734. WFD_MSG_ERR("Invalid argument\n");
  735. return -EINVAL;
  736. }
  737. if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
  738. WFD_MSG_ERR("Only V4L2_BUF_TYPE_VIDEO_CAPTURE is supported\n");
  739. return -EINVAL;
  740. }
  741. mutex_lock(&inst->lock);
  742. fmt->fmt.pix.width = inst->width;
  743. fmt->fmt.pix.height = inst->height;
  744. fmt->fmt.pix.pixelformat = inst->pixelformat;
  745. fmt->fmt.pix.sizeimage = inst->out_buf_size;
  746. fmt->fmt.pix.priv = 0;
  747. mutex_unlock(&inst->lock);
  748. return 0;
  749. }
  750. static int wfdioc_s_fmt(struct file *filp, void *fh,
  751. struct v4l2_format *fmt)
  752. {
  753. int rc = 0;
  754. struct wfd_inst *inst = file_to_inst(filp);
  755. struct wfd_device *wfd_dev = video_drvdata(filp);
  756. struct mdp_prop prop;
  757. struct bufreq breq;
  758. if (!fmt) {
  759. WFD_MSG_ERR("Invalid argument\n");
  760. return -EINVAL;
  761. }
  762. if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
  763. fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_H264) {
  764. WFD_MSG_ERR("Only V4L2_BUF_TYPE_VIDEO_CAPTURE and "
  765. "V4L2_PIX_FMT_H264 are supported\n");
  766. return -EINVAL;
  767. }
  768. if (fmt->fmt.pix.width % 16) {
  769. WFD_MSG_ERR("Only 16 byte aligned widths are supported\n");
  770. return -ENOTSUPP;
  771. }
  772. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, SET_FORMAT,
  773. (void *)fmt);
  774. if (rc) {
  775. WFD_MSG_ERR("Failed to set format on encoder, rc = %d\n", rc);
  776. return rc;
  777. }
  778. breq.count = VENC_INPUT_BUFFERS;
  779. breq.height = fmt->fmt.pix.height;
  780. breq.width = fmt->fmt.pix.width;
  781. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
  782. SET_BUFFER_REQ, (void *)&breq);
  783. if (rc) {
  784. WFD_MSG_ERR("Failed to set buffer reqs on encoder\n");
  785. return rc;
  786. }
  787. mutex_lock(&inst->lock);
  788. inst->input_buf_size = breq.size;
  789. inst->out_buf_size = fmt->fmt.pix.sizeimage;
  790. prop.height = inst->height = fmt->fmt.pix.height;
  791. prop.width = inst->width = fmt->fmt.pix.width;
  792. prop.inst = inst->mdp_inst;
  793. mutex_unlock(&inst->lock);
  794. rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl, MDP_SET_PROP,
  795. (void *)&prop);
  796. if (rc)
  797. WFD_MSG_ERR("Failed to set height/width property on mdp\n");
  798. return rc;
  799. }
  800. static int wfdioc_reqbufs(struct file *filp, void *fh,
  801. struct v4l2_requestbuffers *b)
  802. {
  803. struct wfd_inst *inst = file_to_inst(filp);
  804. struct wfd_device *wfd_dev = video_drvdata(filp);
  805. int rc = 0;
  806. if (b->type != V4L2_CAP_VIDEO_CAPTURE ||
  807. b->memory != V4L2_MEMORY_USERPTR) {
  808. WFD_MSG_ERR("Only V4L2_CAP_VIDEO_CAPTURE and "
  809. "V4L2_MEMORY_USERPTR are supported\n");
  810. return -EINVAL;
  811. }
  812. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
  813. GET_BUFFER_REQ, (void *)b);
  814. if (rc) {
  815. WFD_MSG_ERR("Failed to get buf reqs from encoder\n");
  816. return rc;
  817. }
  818. mutex_lock(&inst->lock);
  819. inst->buf_count = b->count;
  820. mutex_unlock(&inst->lock);
  821. rc = vb2_reqbufs(&inst->vid_bufq, b);
  822. return rc;
  823. }
  824. static int wfd_register_out_buf(struct wfd_inst *inst,
  825. struct v4l2_buffer *b)
  826. {
  827. struct mem_info_entry *minfo_entry;
  828. struct mem_info *minfo;
  829. if (!b || !inst || !b->reserved) {
  830. WFD_MSG_ERR("Invalid arguments\n");
  831. return -EINVAL;
  832. }
  833. minfo = wfd_get_mem_info(inst, b->m.userptr);
  834. if (!minfo) {
  835. minfo_entry = kzalloc(sizeof(struct mem_info_entry),
  836. GFP_KERNEL);
  837. if (copy_from_user(&minfo_entry->minfo, (void *)b->reserved,
  838. sizeof(struct mem_info))) {
  839. WFD_MSG_ERR(" copy_from_user failed. Populate"
  840. " v4l2_buffer->reserved with meminfo\n");
  841. return -EINVAL;
  842. }
  843. minfo_entry->userptr = b->m.userptr;
  844. mutex_lock(&inst->lock);
  845. list_add_tail(&minfo_entry->list, &inst->minfo_list);
  846. mutex_unlock(&inst->lock);
  847. } else
  848. WFD_MSG_DBG("Buffer already registered\n");
  849. return 0;
  850. }
  851. static int wfdioc_qbuf(struct file *filp, void *fh,
  852. struct v4l2_buffer *b)
  853. {
  854. int rc = 0;
  855. struct wfd_inst *inst = file_to_inst(filp);
  856. if (!inst || !b ||
  857. (b->index < 0 || b->index >= inst->buf_count)) {
  858. WFD_MSG_ERR("Invalid input parameters to QBUF IOCTL\n");
  859. return -EINVAL;
  860. }
  861. rc = wfd_register_out_buf(inst, b);
  862. if (rc) {
  863. WFD_MSG_ERR("Failed to register buffer\n");
  864. return rc;
  865. }
  866. mutex_lock(&inst->vb2_lock);
  867. rc = vb2_qbuf(&inst->vid_bufq, b);
  868. mutex_unlock(&inst->vb2_lock);
  869. if (rc)
  870. WFD_MSG_ERR("Failed to queue buffer\n");
  871. else
  872. wfd_stats_update(&inst->stats, WFD_STAT_EVENT_CLIENT_QUEUE);
  873. return rc;
  874. }
  875. static int wfdioc_streamon(struct file *filp, void *fh,
  876. enum v4l2_buf_type i)
  877. {
  878. int rc = 0;
  879. struct wfd_inst *inst = file_to_inst(filp);
  880. if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
  881. WFD_MSG_ERR("stream on for buffer type = %d is not "
  882. "supported.\n", i);
  883. return -EINVAL;
  884. }
  885. mutex_lock(&inst->lock);
  886. inst->streamoff = false;
  887. mutex_unlock(&inst->lock);
  888. rc = vb2_streamon(&inst->vid_bufq, i);
  889. if (rc) {
  890. WFD_MSG_ERR("videobuf_streamon failed with err = %d\n", rc);
  891. goto vidbuf_streamon_failed;
  892. }
  893. return rc;
  894. vidbuf_streamon_failed:
  895. vb2_streamoff(&inst->vid_bufq, i);
  896. return rc;
  897. }
  898. static int wfdioc_streamoff(struct file *filp, void *fh,
  899. enum v4l2_buf_type i)
  900. {
  901. struct wfd_inst *inst = file_to_inst(filp);
  902. if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
  903. WFD_MSG_ERR("stream off for buffer type = %d is not "
  904. "supported.\n", i);
  905. return -EINVAL;
  906. }
  907. mutex_lock(&inst->lock);
  908. if (inst->streamoff) {
  909. WFD_MSG_ERR("Module is already in streamoff state\n");
  910. mutex_unlock(&inst->lock);
  911. return -EINVAL;
  912. }
  913. inst->streamoff = true;
  914. mutex_unlock(&inst->lock);
  915. WFD_MSG_DBG("Calling videobuf_streamoff\n");
  916. vb2_streamoff(&inst->vid_bufq, i);
  917. wake_up(&inst->event_handler.wait);
  918. return 0;
  919. }
  920. static int wfdioc_dqbuf(struct file *filp, void *fh,
  921. struct v4l2_buffer *b)
  922. {
  923. struct wfd_inst *inst = file_to_inst(filp);
  924. int rc;
  925. WFD_MSG_DBG("Waiting to dequeue buffer\n");
  926. mutex_lock(&inst->vb2_lock);
  927. rc = vb2_dqbuf(&inst->vid_bufq, b, false);
  928. mutex_unlock(&inst->vb2_lock);
  929. if (rc)
  930. WFD_MSG_ERR("Failed to dequeue buffer\n");
  931. else
  932. wfd_stats_update(&inst->stats, WFD_STAT_EVENT_CLIENT_DEQUEUE);
  933. return rc;
  934. }
  935. static int wfdioc_g_ctrl(struct file *filp, void *fh,
  936. struct v4l2_control *a)
  937. {
  938. int rc = 0;
  939. struct wfd_device *wfd_dev = video_drvdata(filp);
  940. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core,
  941. ioctl, GET_PROP, a);
  942. if (rc)
  943. WFD_MSG_ERR("Failed to get encoder property\n");
  944. return rc;
  945. }
  946. static int wfdioc_s_ctrl(struct file *filp, void *fh,
  947. struct v4l2_control *a)
  948. {
  949. int rc = 0;
  950. struct wfd_device *wfd_dev = video_drvdata(filp);
  951. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core,
  952. ioctl, SET_PROP, a);
  953. if (rc)
  954. WFD_MSG_ERR("Failed to set encoder property\n");
  955. return rc;
  956. }
  957. static int wfdioc_g_parm(struct file *filp, void *fh,
  958. struct v4l2_streamparm *a)
  959. {
  960. int rc = 0;
  961. struct wfd_device *wfd_dev = video_drvdata(filp);
  962. struct wfd_inst *inst = file_to_inst(filp);
  963. int64_t frame_interval = 0,
  964. max_frame_interval = 0; /* both in nsecs*/
  965. struct v4l2_qcom_frameskip frameskip, *usr_frameskip;
  966. usr_frameskip = (struct v4l2_qcom_frameskip *)
  967. a->parm.capture.extendedmode;
  968. if (!usr_frameskip) {
  969. rc = -EINVAL;
  970. goto get_parm_fail;
  971. }
  972. rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core,
  973. ioctl, VSG_GET_FRAME_INTERVAL, &frame_interval);
  974. if (rc < 0)
  975. goto get_parm_fail;
  976. rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core,
  977. ioctl, VSG_GET_MAX_FRAME_INTERVAL, &max_frame_interval);
  978. if (rc < 0)
  979. goto get_parm_fail;
  980. frameskip = (struct v4l2_qcom_frameskip) {
  981. .maxframeinterval = max_frame_interval,
  982. };
  983. a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  984. a->parm.capture = (struct v4l2_captureparm) {
  985. .capability = V4L2_CAP_TIMEPERFRAME,
  986. .capturemode = 0,
  987. .timeperframe = (struct v4l2_fract) {
  988. .numerator = frame_interval,
  989. .denominator = NSEC_PER_SEC,
  990. },
  991. .readbuffers = inst->buf_count,
  992. .extendedmode = (__u32)usr_frameskip,
  993. .reserved = {0}
  994. };
  995. rc = copy_to_user((void *)a->parm.capture.extendedmode,
  996. &frameskip, sizeof(frameskip));
  997. if (rc < 0)
  998. goto get_parm_fail;
  999. get_parm_fail:
  1000. return rc;
  1001. }
  1002. static int wfdioc_s_parm(struct file *filp, void *fh,
  1003. struct v4l2_streamparm *a)
  1004. {
  1005. int rc = 0;
  1006. struct wfd_device *wfd_dev = video_drvdata(filp);
  1007. struct wfd_inst *inst = file_to_inst(filp);
  1008. struct v4l2_qcom_frameskip frameskip;
  1009. int64_t frame_interval = 0,
  1010. max_frame_interval = 0,
  1011. frame_interval_variance = 0;
  1012. void *extendedmode = NULL;
  1013. enum vsg_modes vsg_mode = VSG_MODE_VFR;
  1014. enum venc_framerate_modes venc_mode = VENC_MODE_VFR;
  1015. if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
  1016. rc = -ENOTSUPP;
  1017. goto set_parm_fail;
  1018. }
  1019. if (a->parm.capture.readbuffers == 0 ||
  1020. a->parm.capture.readbuffers == inst->buf_count) {
  1021. a->parm.capture.readbuffers = inst->buf_count;
  1022. } else {
  1023. rc = -EINVAL;
  1024. goto set_parm_fail;
  1025. }
  1026. extendedmode = (void *)a->parm.capture.extendedmode;
  1027. if (a->parm.capture.capability & V4L2_CAP_TIMEPERFRAME) {
  1028. if (a->parm.capture.timeperframe.denominator == 0) {
  1029. rc = -EINVAL;
  1030. goto set_parm_fail;
  1031. }
  1032. frame_interval =
  1033. a->parm.capture.timeperframe.numerator * NSEC_PER_SEC /
  1034. a->parm.capture.timeperframe.denominator;
  1035. rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core,
  1036. ioctl, VSG_SET_FRAME_INTERVAL,
  1037. &frame_interval);
  1038. if (rc)
  1039. goto set_parm_fail;
  1040. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core,
  1041. ioctl, SET_FRAMERATE,
  1042. &a->parm.capture.timeperframe);
  1043. if (rc)
  1044. goto set_parm_fail;
  1045. }
  1046. if (a->parm.capture.capability & V4L2_CAP_QCOM_FRAMESKIP &&
  1047. extendedmode) {
  1048. rc = copy_from_user(&frameskip,
  1049. extendedmode, sizeof(frameskip));
  1050. if (rc)
  1051. goto set_parm_fail;
  1052. max_frame_interval = (int64_t)frameskip.maxframeinterval;
  1053. frame_interval_variance = frameskip.fpsvariance;
  1054. vsg_mode = VSG_MODE_VFR;
  1055. venc_mode = VENC_MODE_VFR;
  1056. rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core,
  1057. ioctl, VSG_SET_MAX_FRAME_INTERVAL,
  1058. &max_frame_interval);
  1059. if (rc)
  1060. goto set_parm_fail;
  1061. } else {
  1062. vsg_mode = VSG_MODE_CFR;
  1063. venc_mode = VENC_MODE_CFR;
  1064. }
  1065. rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core,
  1066. ioctl, VSG_SET_MODE, &vsg_mode);
  1067. if (rc) {
  1068. WFD_MSG_ERR("Setting FR mode for VSG failed\n");
  1069. goto set_parm_fail;
  1070. }
  1071. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core,
  1072. ioctl, SET_FRAMERATE_MODE,
  1073. &venc_mode);
  1074. if (rc) {
  1075. WFD_MSG_ERR("Setting FR mode for VENC failed\n");
  1076. goto set_parm_fail;
  1077. }
  1078. if (frame_interval_variance) {
  1079. rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core,
  1080. ioctl, VSG_SET_FRAME_INTERVAL_VARIANCE,
  1081. &frame_interval_variance);
  1082. if (rc) {
  1083. WFD_MSG_ERR("Setting FR variance for VSG failed\n");
  1084. goto set_parm_fail;
  1085. }
  1086. }
  1087. set_parm_fail:
  1088. return rc;
  1089. }
  1090. static int wfdioc_subscribe_event(struct v4l2_fh *fh,
  1091. struct v4l2_event_subscription *sub)
  1092. {
  1093. struct wfd_inst *inst = container_of(fh, struct wfd_inst,
  1094. event_handler);
  1095. return v4l2_event_subscribe(&inst->event_handler, sub, MAX_EVENTS);
  1096. }
  1097. static int wfdioc_unsubscribe_event(struct v4l2_fh *fh,
  1098. struct v4l2_event_subscription *sub)
  1099. {
  1100. struct wfd_inst *inst = container_of(fh, struct wfd_inst,
  1101. event_handler);
  1102. return v4l2_event_unsubscribe(&inst->event_handler, sub);
  1103. }
  1104. static const struct v4l2_ioctl_ops g_wfd_ioctl_ops = {
  1105. .vidioc_querycap = wfdioc_querycap,
  1106. .vidioc_s_fmt_vid_cap = wfdioc_s_fmt,
  1107. .vidioc_g_fmt_vid_cap = wfdioc_g_fmt,
  1108. .vidioc_reqbufs = wfdioc_reqbufs,
  1109. .vidioc_qbuf = wfdioc_qbuf,
  1110. .vidioc_streamon = wfdioc_streamon,
  1111. .vidioc_streamoff = wfdioc_streamoff,
  1112. .vidioc_dqbuf = wfdioc_dqbuf,
  1113. .vidioc_g_ctrl = wfdioc_g_ctrl,
  1114. .vidioc_s_ctrl = wfdioc_s_ctrl,
  1115. .vidioc_g_parm = wfdioc_g_parm,
  1116. .vidioc_s_parm = wfdioc_s_parm,
  1117. .vidioc_subscribe_event = wfdioc_subscribe_event,
  1118. .vidioc_unsubscribe_event = wfdioc_unsubscribe_event,
  1119. };
  1120. static int wfd_set_default_properties(struct file *filp)
  1121. {
  1122. struct v4l2_format fmt;
  1123. struct v4l2_control ctrl;
  1124. struct wfd_inst *inst = file_to_inst(filp);
  1125. if (!inst) {
  1126. WFD_MSG_ERR("Invalid argument\n");
  1127. return -EINVAL;
  1128. }
  1129. mutex_lock(&inst->lock);
  1130. fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  1131. fmt.fmt.pix.height = inst->height = DEFAULT_WFD_HEIGHT;
  1132. fmt.fmt.pix.width = inst->width = DEFAULT_WFD_WIDTH;
  1133. fmt.fmt.pix.pixelformat = inst->pixelformat
  1134. = V4L2_PIX_FMT_H264;
  1135. mutex_unlock(&inst->lock);
  1136. wfdioc_s_fmt(filp, filp->private_data, &fmt);
  1137. ctrl.id = V4L2_CID_MPEG_VIDEO_HEADER_MODE;
  1138. ctrl.value = V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_I_FRAME;
  1139. wfdioc_s_ctrl(filp, filp->private_data, &ctrl);
  1140. return 0;
  1141. }
  1142. static void venc_op_buffer_done(void *cookie, u32 status,
  1143. struct vb2_buffer *buf)
  1144. {
  1145. struct file *filp = cookie;
  1146. struct wfd_inst *inst = file_to_inst(filp);
  1147. WFD_MSG_DBG("yay!! got callback\n");
  1148. mutex_lock(&inst->vb2_lock);
  1149. vb2_buffer_done(buf, status ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
  1150. mutex_unlock(&inst->vb2_lock);
  1151. }
  1152. static void venc_ip_buffer_done(void *cookie, u32 status,
  1153. struct mem_region *mregion)
  1154. {
  1155. struct file *filp = cookie;
  1156. struct wfd_inst *inst = file_to_inst(filp);
  1157. struct vsg_buf_info buf;
  1158. struct mdp_buf_info mdp_buf = {0};
  1159. struct wfd_device *wfd_dev =
  1160. (struct wfd_device *)video_drvdata(filp);
  1161. int rc = 0;
  1162. WFD_MSG_DBG("yay!! got ip callback\n");
  1163. mdp_buf.inst = inst->mdp_inst;
  1164. mdp_buf.cookie = mregion;
  1165. mdp_buf.kvaddr = (u32) mregion->kvaddr;
  1166. mdp_buf.paddr =
  1167. (u32)wfd_enc_addr_to_mdp_addr(inst,
  1168. (unsigned long)mregion->paddr);
  1169. buf.mdp_buf_info = mdp_buf;
  1170. rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core,
  1171. ioctl, VSG_RETURN_IP_BUFFER, (void *)&buf);
  1172. if (rc)
  1173. WFD_MSG_ERR("Failed to return buffer to vsg\n");
  1174. else
  1175. wfd_stats_update(&inst->stats, WFD_STAT_EVENT_ENC_DEQUEUE);
  1176. }
  1177. static void venc_on_event(void *cookie, enum venc_event e)
  1178. {
  1179. struct file *filp = cookie;
  1180. struct wfd_inst *inst = file_to_inst(filp);
  1181. struct v4l2_event event;
  1182. int type = 0;
  1183. switch (e) {
  1184. case VENC_EVENT_HARDWARE_ERROR:
  1185. type = V4L2_EVENT_MSM_VIDC_SYS_ERROR;
  1186. break;
  1187. default:
  1188. /* Whatever~~ */
  1189. break;
  1190. }
  1191. if (type) {
  1192. event.id = 0;
  1193. event.type = type;
  1194. v4l2_event_queue_fh(&inst->event_handler, &event);
  1195. }
  1196. }
  1197. static int vsg_release_input_frame(void *cookie, struct vsg_buf_info *buf)
  1198. {
  1199. struct file *filp = cookie;
  1200. struct wfd_inst *inst = file_to_inst(filp);
  1201. struct wfd_device *wfd_dev =
  1202. (struct wfd_device *)video_drvdata(filp);
  1203. int rc = 0;
  1204. rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core,
  1205. ioctl, MDP_Q_BUFFER, buf);
  1206. if (rc)
  1207. WFD_MSG_ERR("Failed to Q buffer to mdp\n");
  1208. else {
  1209. wfd_stats_update(&inst->stats, WFD_STAT_EVENT_MDP_QUEUE);
  1210. wfd_stats_update(&inst->stats, WFD_STAT_EVENT_VSG_DEQUEUE);
  1211. }
  1212. return rc;
  1213. }
  1214. static int vsg_encode_frame(void *cookie, struct vsg_buf_info *buf)
  1215. {
  1216. struct file *filp = cookie;
  1217. struct wfd_inst *inst = file_to_inst(filp);
  1218. struct wfd_device *wfd_dev =
  1219. (struct wfd_device *)video_drvdata(filp);
  1220. struct venc_buf_info venc_buf;
  1221. int rc = 0;
  1222. if (!buf)
  1223. return -EINVAL;
  1224. venc_buf = (struct venc_buf_info){
  1225. .timestamp = timespec_to_ns(&buf->time),
  1226. .mregion = buf->mdp_buf_info.cookie
  1227. };
  1228. wfd_flush_ion_buffer(wfd_dev->ion_client, venc_buf.mregion);
  1229. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
  1230. ENCODE_FRAME, &venc_buf);
  1231. if (rc)
  1232. WFD_MSG_ERR("Encode failed\n");
  1233. else
  1234. wfd_stats_update(&inst->stats, WFD_STAT_EVENT_ENC_QUEUE);
  1235. return rc;
  1236. }
  1237. void *wfd_vb2_mem_ops_get_userptr(void *alloc_ctx, unsigned long vaddr,
  1238. unsigned long size, int write)
  1239. {
  1240. return wfd_get_mem_info(alloc_ctx, vaddr);
  1241. }
  1242. void wfd_vb2_mem_ops_put_userptr(void *buf_priv)
  1243. {
  1244. /*TODO: Free the list*/
  1245. }
  1246. void *wfd_vb2_mem_ops_cookie(void *buf_priv)
  1247. {
  1248. return buf_priv;
  1249. }
  1250. static struct vb2_mem_ops wfd_vb2_mem_ops = {
  1251. .get_userptr = wfd_vb2_mem_ops_get_userptr,
  1252. .put_userptr = wfd_vb2_mem_ops_put_userptr,
  1253. .cookie = wfd_vb2_mem_ops_cookie,
  1254. };
  1255. int wfd_initialize_vb2_queue(struct vb2_queue *q, void *priv)
  1256. {
  1257. q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  1258. q->io_modes = VB2_USERPTR;
  1259. q->ops = &wfd_vidbuf_ops;
  1260. q->mem_ops = &wfd_vb2_mem_ops;
  1261. q->drv_priv = priv;
  1262. return vb2_queue_init(q);
  1263. }
  1264. static int wfd_open(struct file *filp)
  1265. {
  1266. int rc = 0;
  1267. struct wfd_inst *inst = NULL;
  1268. struct wfd_device *wfd_dev = NULL;
  1269. struct venc_msg_ops enc_mops;
  1270. struct mdp_msg_ops mdp_mops;
  1271. struct vsg_msg_ops vsg_mops;
  1272. WFD_MSG_DBG("wfd_open: E\n");
  1273. wfd_dev = video_drvdata(filp);
  1274. if (!wfd_dev) {
  1275. rc = -EINVAL;
  1276. goto err_dev_busy;
  1277. }
  1278. mutex_lock(&wfd_dev->dev_lock);
  1279. if (wfd_dev->in_use) {
  1280. WFD_MSG_ERR("Device already in use.\n");
  1281. rc = -EBUSY;
  1282. mutex_unlock(&wfd_dev->dev_lock);
  1283. goto err_dev_busy;
  1284. }
  1285. wfd_dev->in_use = true;
  1286. mutex_unlock(&wfd_dev->dev_lock);
  1287. inst = kzalloc(sizeof(struct wfd_inst), GFP_KERNEL);
  1288. if (!inst) {
  1289. WFD_MSG_ERR("Could not allocate memory for "
  1290. "wfd instance\n");
  1291. rc = -ENOMEM;
  1292. goto err_mdp_open;
  1293. }
  1294. filp->private_data = &inst->event_handler;
  1295. mutex_init(&inst->lock);
  1296. mutex_init(&inst->vb2_lock);
  1297. INIT_LIST_HEAD(&inst->input_mem_list);
  1298. INIT_LIST_HEAD(&inst->minfo_list);
  1299. /* Set up userspace event handlers */
  1300. v4l2_fh_init(&inst->event_handler, wfd_dev->pvdev);
  1301. v4l2_fh_add(&inst->event_handler);
  1302. wfd_stats_init(&inst->stats, MINOR(filp->f_dentry->d_inode->i_rdev));
  1303. mdp_mops.secure = wfd_dev->secure;
  1304. mdp_mops.iommu_split_domain = wfd_dev->mdp_iommu_split_domain;
  1305. rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl, MDP_OPEN,
  1306. (void *)&mdp_mops);
  1307. if (rc) {
  1308. WFD_MSG_ERR("Failed to open mdp subdevice: %d\n", rc);
  1309. goto err_mdp_open;
  1310. }
  1311. inst->mdp_inst = mdp_mops.cookie;
  1312. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, load_fw);
  1313. if (rc) {
  1314. WFD_MSG_ERR("Failed to load video encoder firmware: %d\n", rc);
  1315. goto err_venc;
  1316. }
  1317. enc_mops.op_buffer_done = venc_op_buffer_done;
  1318. enc_mops.ip_buffer_done = venc_ip_buffer_done;
  1319. enc_mops.on_event = venc_on_event;
  1320. enc_mops.cbdata = filp;
  1321. enc_mops.secure = wfd_dev->secure;
  1322. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, OPEN,
  1323. (void *)&enc_mops);
  1324. if (rc || !enc_mops.cookie) {
  1325. WFD_MSG_ERR("Failed to open encoder subdevice: %d\n", rc);
  1326. goto err_venc;
  1327. }
  1328. inst->venc_inst = enc_mops.cookie;
  1329. vsg_mops.encode_frame = vsg_encode_frame;
  1330. vsg_mops.release_input_frame = vsg_release_input_frame;
  1331. vsg_mops.cbdata = filp;
  1332. rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl, VSG_OPEN,
  1333. &vsg_mops);
  1334. if (rc) {
  1335. WFD_MSG_ERR("Failed to open vsg subdevice: %d\n", rc);
  1336. goto err_vsg_open;
  1337. }
  1338. wfd_initialize_vb2_queue(&inst->vid_bufq, filp);
  1339. wfd_set_default_properties(filp);
  1340. WFD_MSG_DBG("wfd_open: X\n");
  1341. return rc;
  1342. err_vsg_open:
  1343. v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl, CLOSE, NULL);
  1344. err_venc:
  1345. v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl,
  1346. MDP_CLOSE, (void *)inst->mdp_inst);
  1347. err_mdp_open:
  1348. v4l2_fh_del(&inst->event_handler);
  1349. mutex_lock(&wfd_dev->dev_lock);
  1350. wfd_dev->in_use = false;
  1351. mutex_unlock(&wfd_dev->dev_lock);
  1352. kfree(inst);
  1353. err_dev_busy:
  1354. return rc;
  1355. }
  1356. static int wfd_close(struct file *filp)
  1357. {
  1358. struct wfd_inst *inst;
  1359. struct wfd_device *wfd_dev;
  1360. int rc = 0;
  1361. wfd_dev = video_drvdata(filp);
  1362. WFD_MSG_DBG("wfd_close: E\n");
  1363. inst = file_to_inst(filp);
  1364. if (inst) {
  1365. wfdioc_streamoff(filp, NULL, V4L2_BUF_TYPE_VIDEO_CAPTURE);
  1366. vb2_queue_release(&inst->vid_bufq);
  1367. wfd_free_input_buffers(wfd_dev, inst);
  1368. rc = v4l2_subdev_call(&wfd_dev->mdp_sdev, core, ioctl,
  1369. MDP_CLOSE, (void *)inst->mdp_inst);
  1370. if (rc)
  1371. WFD_MSG_ERR("Failed to CLOSE mdp subdevice: %d\n", rc);
  1372. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, ioctl,
  1373. CLOSE, (void *)inst->venc_inst);
  1374. if (rc)
  1375. WFD_MSG_ERR("Failed to CLOSE enc subdev: %d\n", rc);
  1376. rc = v4l2_subdev_call(&wfd_dev->vsg_sdev, core, ioctl,
  1377. VSG_CLOSE, NULL);
  1378. if (rc)
  1379. WFD_MSG_ERR("Failed to CLOSE vsg subdev: %d\n", rc);
  1380. wfd_stats_deinit(&inst->stats);
  1381. v4l2_fh_del(&inst->event_handler);
  1382. mutex_destroy(&inst->lock);
  1383. mutex_destroy(&inst->vb2_lock);
  1384. kfree(inst);
  1385. }
  1386. mutex_lock(&wfd_dev->dev_lock);
  1387. wfd_dev->in_use = false;
  1388. mutex_unlock(&wfd_dev->dev_lock);
  1389. WFD_MSG_DBG("wfd_close: X\n");
  1390. return 0;
  1391. }
  1392. unsigned int wfd_poll(struct file *filp, struct poll_table_struct *pt)
  1393. {
  1394. struct wfd_inst *inst = file_to_inst(filp);
  1395. unsigned long flags = 0;
  1396. bool streamoff = false;
  1397. poll_wait(filp, &inst->event_handler.wait, pt);
  1398. mutex_lock(&inst->lock);
  1399. streamoff = inst->streamoff;
  1400. mutex_unlock(&inst->lock);
  1401. if (v4l2_event_pending(&inst->event_handler))
  1402. flags |= POLLPRI;
  1403. if (streamoff)
  1404. flags |= POLLERR;
  1405. return flags;
  1406. }
  1407. static const struct v4l2_file_operations g_wfd_fops = {
  1408. .owner = THIS_MODULE,
  1409. .open = wfd_open,
  1410. .release = wfd_close,
  1411. .ioctl = video_ioctl2,
  1412. .poll = wfd_poll,
  1413. };
  1414. void release_video_device(struct video_device *pvdev)
  1415. {
  1416. }
  1417. static int wfd_dev_setup(struct wfd_device *wfd_dev, int dev_num,
  1418. struct platform_device *pdev)
  1419. {
  1420. int rc = 0;
  1421. rc = v4l2_device_register(&pdev->dev, &wfd_dev->v4l2_dev);
  1422. if (rc) {
  1423. WFD_MSG_ERR("Failed to register the video device\n");
  1424. goto err_v4l2_registration;
  1425. }
  1426. wfd_dev->pvdev = video_device_alloc();
  1427. if (!wfd_dev->pvdev) {
  1428. WFD_MSG_ERR("Failed to allocate video device\n");
  1429. rc = -ENOMEM;
  1430. goto err_video_device_alloc;
  1431. }
  1432. wfd_dev->pvdev->release = release_video_device;
  1433. wfd_dev->pvdev->fops = &g_wfd_fops;
  1434. wfd_dev->pvdev->ioctl_ops = &g_wfd_ioctl_ops;
  1435. rc = video_register_device(wfd_dev->pvdev, VFL_TYPE_GRABBER,
  1436. dev_num);
  1437. if (rc) {
  1438. WFD_MSG_ERR("Failed to register the device\n");
  1439. goto err_video_register_device;
  1440. }
  1441. video_set_drvdata(wfd_dev->pvdev, wfd_dev);
  1442. v4l2_subdev_init(&wfd_dev->mdp_sdev, &mdp_subdev_ops);
  1443. strncpy(wfd_dev->mdp_sdev.name, "wfd-mdp", V4L2_SUBDEV_NAME_SIZE);
  1444. rc = v4l2_device_register_subdev(&wfd_dev->v4l2_dev,
  1445. &wfd_dev->mdp_sdev);
  1446. if (rc) {
  1447. WFD_MSG_ERR("Failed to register mdp subdevice: %d\n", rc);
  1448. goto err_mdp_register_subdev;
  1449. }
  1450. v4l2_subdev_init(&wfd_dev->enc_sdev, &enc_subdev_ops);
  1451. strncpy(wfd_dev->enc_sdev.name, "wfd-venc", V4L2_SUBDEV_NAME_SIZE);
  1452. rc = v4l2_device_register_subdev(&wfd_dev->v4l2_dev,
  1453. &wfd_dev->enc_sdev);
  1454. if (rc) {
  1455. WFD_MSG_ERR("Failed to register encoder subdevice: %d\n", rc);
  1456. goto err_venc_register_subdev;
  1457. }
  1458. rc = v4l2_subdev_call(&wfd_dev->enc_sdev, core, init, 0);
  1459. if (rc) {
  1460. WFD_MSG_ERR("Failed to initiate encoder device %d\n", rc);
  1461. goto err_venc_init;
  1462. }
  1463. v4l2_subdev_init(&wfd_dev->vsg_sdev, &vsg_subdev_ops);
  1464. strncpy(wfd_dev->vsg_sdev.name, "wfd-vsg", V4L2_SUBDEV_NAME_SIZE);
  1465. rc = v4l2_device_register_subdev(&wfd_dev->v4l2_dev,
  1466. &wfd_dev->vsg_sdev);
  1467. if (rc) {
  1468. WFD_MSG_ERR("Failed to register vsg subdevice: %d\n", rc);
  1469. goto err_venc_init;
  1470. }
  1471. WFD_MSG_DBG("__wfd_probe: X\n");
  1472. return rc;
  1473. err_venc_init:
  1474. v4l2_device_unregister_subdev(&wfd_dev->enc_sdev);
  1475. err_venc_register_subdev:
  1476. v4l2_device_unregister_subdev(&wfd_dev->mdp_sdev);
  1477. err_mdp_register_subdev:
  1478. video_unregister_device(wfd_dev->pvdev);
  1479. err_video_register_device:
  1480. video_device_release(wfd_dev->pvdev);
  1481. err_video_device_alloc:
  1482. v4l2_device_unregister(&wfd_dev->v4l2_dev);
  1483. err_v4l2_registration:
  1484. return rc;
  1485. }
  1486. static int __devinit __wfd_probe(struct platform_device *pdev)
  1487. {
  1488. int rc = 0, c = 0;
  1489. struct wfd_device *wfd_dev; /* Should be taken as an array*/
  1490. struct ion_client *ion_client = NULL;
  1491. struct msm_wfd_platform_data *wfd_priv;
  1492. WFD_MSG_DBG("__wfd_probe: E\n");
  1493. wfd_dev = kzalloc(sizeof(*wfd_dev)*WFD_NUM_DEVICES, GFP_KERNEL);
  1494. if (!wfd_dev) {
  1495. WFD_MSG_ERR("Could not allocate memory for "
  1496. "wfd device\n");
  1497. rc = -ENOMEM;
  1498. goto err_v4l2_probe;
  1499. }
  1500. wfd_priv = pdev->dev.platform_data;
  1501. pdev->dev.platform_data = (void *) wfd_dev;
  1502. ion_client = msm_ion_client_create(-1, "wfd");
  1503. rc = wfd_stats_setup();
  1504. if (rc) {
  1505. WFD_MSG_ERR("No debugfs support: %d\n", rc);
  1506. /* Don't treat this as a fatal err */
  1507. rc = 0;
  1508. }
  1509. if (!ion_client) {
  1510. WFD_MSG_ERR("Failed to create ion client\n");
  1511. rc = -ENODEV;
  1512. goto err_v4l2_probe;
  1513. }
  1514. for (c = 0; c < WFD_NUM_DEVICES; ++c) {
  1515. rc = wfd_dev_setup(&wfd_dev[c],
  1516. WFD_DEVICE_NUMBER_BASE + c, pdev);
  1517. if (rc) {
  1518. /* Clear out old devices */
  1519. for (--c; c >= 0; --c) {
  1520. v4l2_device_unregister_subdev(
  1521. &wfd_dev[c].vsg_sdev);
  1522. v4l2_device_unregister_subdev(
  1523. &wfd_dev[c].enc_sdev);
  1524. v4l2_device_unregister_subdev(
  1525. &wfd_dev[c].mdp_sdev);
  1526. video_unregister_device(wfd_dev[c].pvdev);
  1527. video_device_release(wfd_dev[c].pvdev);
  1528. v4l2_device_unregister(&wfd_dev[c].v4l2_dev);
  1529. }
  1530. goto err_v4l2_probe;
  1531. }
  1532. /* Other device specific stuff */
  1533. mutex_init(&wfd_dev[c].dev_lock);
  1534. wfd_dev[c].ion_client = ion_client;
  1535. wfd_dev[c].in_use = false;
  1536. if (wfd_priv && wfd_priv->wfd_check_mdp_iommu_split) {
  1537. wfd_dev[c].mdp_iommu_split_domain =
  1538. wfd_priv->wfd_check_mdp_iommu_split();
  1539. }
  1540. switch (WFD_DEVICE_NUMBER_BASE + c) {
  1541. case WFD_DEVICE_SECURE:
  1542. wfd_dev[c].secure = true;
  1543. break;
  1544. default:
  1545. break;
  1546. }
  1547. }
  1548. WFD_MSG_DBG("__wfd_probe: X\n");
  1549. return rc;
  1550. err_v4l2_probe:
  1551. kfree(wfd_dev);
  1552. return rc;
  1553. }
  1554. static int __devexit __wfd_remove(struct platform_device *pdev)
  1555. {
  1556. struct wfd_device *wfd_dev;
  1557. int c = 0;
  1558. wfd_dev = (struct wfd_device *)pdev->dev.platform_data;
  1559. WFD_MSG_DBG("Inside wfd_remove\n");
  1560. if (!wfd_dev) {
  1561. WFD_MSG_ERR("Error removing WFD device");
  1562. return -ENODEV;
  1563. }
  1564. wfd_stats_teardown();
  1565. for (c = 0; c < WFD_NUM_DEVICES; ++c) {
  1566. v4l2_device_unregister_subdev(&wfd_dev[c].vsg_sdev);
  1567. v4l2_device_unregister_subdev(&wfd_dev[c].enc_sdev);
  1568. v4l2_device_unregister_subdev(&wfd_dev[c].mdp_sdev);
  1569. video_unregister_device(wfd_dev[c].pvdev);
  1570. video_device_release(wfd_dev[c].pvdev);
  1571. v4l2_device_unregister(&wfd_dev[c].v4l2_dev);
  1572. }
  1573. kfree(wfd_dev);
  1574. return 0;
  1575. }
  1576. static const struct of_device_id msm_wfd_dt_match[] = {
  1577. {.compatible = "qcom,msm-wfd"},
  1578. {}
  1579. };
  1580. MODULE_DEVICE_TABLE(of, msm_vidc_dt_match);
  1581. static struct platform_driver wfd_driver = {
  1582. .probe = __wfd_probe,
  1583. .remove = __wfd_remove,
  1584. .driver = {
  1585. .name = "msm_wfd",
  1586. .owner = THIS_MODULE,
  1587. .of_match_table = msm_wfd_dt_match,
  1588. }
  1589. };
  1590. static int __init wfd_init(void)
  1591. {
  1592. int rc = 0;
  1593. WFD_MSG_DBG("Calling init function of wfd driver\n");
  1594. rc = platform_driver_register(&wfd_driver);
  1595. if (rc) {
  1596. WFD_MSG_ERR("failed to load the driver\n");
  1597. goto err_platform_registration;
  1598. }
  1599. err_platform_registration:
  1600. return rc;
  1601. }
  1602. static void __exit wfd_exit(void)
  1603. {
  1604. WFD_MSG_DBG("wfd_exit: X\n");
  1605. platform_driver_unregister(&wfd_driver);
  1606. }
  1607. module_init(wfd_init);
  1608. module_exit(wfd_exit);