enc-venus-subdev.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580
  1. /* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #include <linux/bitmap.h>
  14. #include <linux/completion.h>
  15. #include <linux/ion.h>
  16. #include <linux/jiffies.h>
  17. #include <linux/kthread.h>
  18. #include <linux/list.h>
  19. #include <linux/mutex.h>
  20. #include <linux/wait.h>
  21. #include <linux/slab.h>
  22. #include <linux/workqueue.h>
  23. #include <mach/iommu_domains.h>
  24. #include <media/msm_vidc.h>
  25. #include <media/v4l2-subdev.h>
  26. #include "enc-subdev.h"
  27. #include "wfd-util.h"
  28. #define BUF_TYPE_OUTPUT V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
  29. #define BUF_TYPE_INPUT V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
  30. #define TIMEOUT msecs_to_jiffies(100)
  31. static struct ion_client *venc_ion_client;
  32. static long venc_secure(struct v4l2_subdev *sd);
  33. struct index_bitmap {
  34. unsigned long *bitmap;
  35. int size;
  36. int size_bits; /*Size in bits, not necessarily size/8 */
  37. };
  38. struct venc_inst {
  39. void *vidc_context;
  40. struct mutex lock;
  41. struct venc_msg_ops vmops;
  42. struct mem_region registered_input_bufs, registered_output_bufs;
  43. struct index_bitmap free_input_indices, free_output_indices;
  44. int num_output_planes, num_input_planes;
  45. struct task_struct *callback_thread;
  46. bool callback_thread_running;
  47. struct completion dq_complete, cmd_complete;
  48. bool secure;
  49. struct workqueue_struct *fill_buf_wq;
  50. };
  51. struct fill_buf_work {
  52. struct venc_inst *inst;
  53. struct mem_region *mregion;
  54. struct work_struct work;
  55. };
  56. static const int subscribed_events[] = {
  57. V4L2_EVENT_MSM_VIDC_CLOSE_DONE,
  58. V4L2_EVENT_MSM_VIDC_FLUSH_DONE,
  59. V4L2_EVENT_MSM_VIDC_SYS_ERROR,
  60. };
  61. int venc_load_fw(struct v4l2_subdev *sd)
  62. {
  63. /*No need to explicitly load the fw */
  64. return 0;
  65. }
  66. int venc_init(struct v4l2_subdev *sd, u32 val)
  67. {
  68. if (!venc_ion_client)
  69. venc_ion_client = msm_ion_client_create(-1, "wfd_enc_subdev");
  70. return venc_ion_client ? 0 : -ENOMEM;
  71. }
  72. static int invalidate_cache(struct ion_client *client,
  73. struct mem_region *mregion)
  74. {
  75. if (!client || !mregion) {
  76. WFD_MSG_ERR(
  77. "Failed to flush ion buffer: invalid client or region\n");
  78. return -EINVAL;
  79. } else if (!mregion->ion_handle) {
  80. WFD_MSG_ERR(
  81. "Failed to flush ion buffer: not an ion buffer\n");
  82. return -EINVAL;
  83. }
  84. return msm_ion_do_cache_op(client,
  85. mregion->ion_handle,
  86. mregion->kvaddr,
  87. mregion->size,
  88. ION_IOC_INV_CACHES);
  89. }
  90. static int next_free_index(struct index_bitmap *index_bitmap)
  91. {
  92. int index = find_first_zero_bit(index_bitmap->bitmap,
  93. index_bitmap->size_bits);
  94. return (index >= index_bitmap->size_bits) ?
  95. -1 : index;
  96. }
  97. static int mark_index_busy(struct index_bitmap *index_bitmap, int index)
  98. {
  99. if (index > index_bitmap->size_bits) {
  100. WFD_MSG_WARN("Marking unknown index as busy\n");
  101. return -EINVAL;
  102. }
  103. set_bit(index, index_bitmap->bitmap);
  104. return 0;
  105. }
  106. static int mark_index_free(struct index_bitmap *index_bitmap, int index)
  107. {
  108. if (index > index_bitmap->size_bits) {
  109. WFD_MSG_WARN("Marking unknown index as free\n");
  110. return -EINVAL;
  111. }
  112. clear_bit(index, index_bitmap->bitmap);
  113. return 0;
  114. }
  115. static int get_list_len(struct mem_region *list)
  116. {
  117. struct mem_region *curr = NULL;
  118. int index = 0;
  119. list_for_each_entry(curr, &list->list, list) {
  120. ++index;
  121. }
  122. return index;
  123. }
  124. static struct mem_region *get_registered_mregion(struct mem_region *list,
  125. struct mem_region *mregion)
  126. {
  127. struct mem_region *curr = NULL;
  128. list_for_each_entry(curr, &list->list, list) {
  129. if (unlikely(mem_region_equals(curr, mregion)))
  130. return curr;
  131. }
  132. return NULL;
  133. }
  134. static int venc_vidc_callback_thread(void *data)
  135. {
  136. struct venc_inst *inst = data;
  137. WFD_MSG_DBG("Starting callback thread\n");
  138. while (!kthread_should_stop()) {
  139. bool dequeue_buf = false;
  140. struct v4l2_buffer buffer = {0};
  141. struct v4l2_event event = {0};
  142. int num_planes = 0;
  143. int flags = msm_vidc_wait(inst->vidc_context);
  144. if (flags & POLLERR) {
  145. WFD_MSG_ERR("Encoder reported error\n");
  146. break;
  147. }
  148. if (flags & POLLPRI) {
  149. bool bail_out = false;
  150. msm_vidc_dqevent(inst->vidc_context, &event);
  151. switch (event.type) {
  152. case V4L2_EVENT_MSM_VIDC_CLOSE_DONE:
  153. WFD_MSG_DBG("enc callback thread shutting " \
  154. "down normally\n");
  155. bail_out = true;
  156. break;
  157. case V4L2_EVENT_MSM_VIDC_SYS_ERROR:
  158. inst->vmops.on_event(inst->vmops.cbdata,
  159. VENC_EVENT_HARDWARE_ERROR);
  160. bail_out = true;
  161. break;
  162. default:
  163. WFD_MSG_INFO("Got unknown event %d, ignoring\n",
  164. event.type);
  165. }
  166. complete_all(&inst->cmd_complete);
  167. if (bail_out)
  168. break;
  169. }
  170. if (flags & POLLIN || flags & POLLRDNORM) {
  171. buffer.type = BUF_TYPE_OUTPUT;
  172. dequeue_buf = true;
  173. num_planes = inst->num_output_planes;
  174. WFD_MSG_DBG("Output buffer ready!\n");
  175. }
  176. if (flags & POLLOUT || flags & POLLWRNORM) {
  177. buffer.type = BUF_TYPE_INPUT;
  178. dequeue_buf = true;
  179. num_planes = inst->num_input_planes;
  180. WFD_MSG_DBG("Input buffer ready!\n");
  181. }
  182. if (dequeue_buf) {
  183. int rc = 0;
  184. struct v4l2_plane *planes = NULL;
  185. struct mem_region *curr = NULL, *mregion = NULL;
  186. struct list_head *reg_bufs = NULL;
  187. struct index_bitmap *bitmap = NULL;
  188. planes = kzalloc(sizeof(*planes) * num_planes,
  189. GFP_KERNEL);
  190. buffer.m.planes = planes;
  191. buffer.length = 1;
  192. buffer.memory = V4L2_MEMORY_USERPTR;
  193. rc = msm_vidc_dqbuf(inst->vidc_context, &buffer);
  194. if (rc) {
  195. WFD_MSG_ERR("Error dequeuing buffer " \
  196. "from vidc: %d", rc);
  197. goto abort_dequeue;
  198. }
  199. reg_bufs = buffer.type == BUF_TYPE_OUTPUT ?
  200. &inst->registered_output_bufs.list :
  201. &inst->registered_input_bufs.list;
  202. bitmap = buffer.type == BUF_TYPE_OUTPUT ?
  203. &inst->free_output_indices :
  204. &inst->free_input_indices;
  205. list_for_each_entry(curr, reg_bufs, list) {
  206. if ((u32)curr->paddr ==
  207. buffer.m.planes[0].m.userptr) {
  208. mregion = curr;
  209. break;
  210. }
  211. }
  212. if (!mregion) {
  213. WFD_MSG_ERR("Got done msg for unknown buf\n");
  214. goto abort_dequeue;
  215. }
  216. if (buffer.type == BUF_TYPE_OUTPUT &&
  217. inst->vmops.op_buffer_done) {
  218. struct vb2_buffer *vb =
  219. (struct vb2_buffer *)mregion->cookie;
  220. vb->v4l2_buf.flags = buffer.flags;
  221. vb->v4l2_buf.timestamp = buffer.timestamp;
  222. vb->v4l2_planes[0].bytesused =
  223. buffer.m.planes[0].bytesused;
  224. /* Buffer is on its way to userspace, so
  225. * invalidate the cache */
  226. rc = invalidate_cache(venc_ion_client, mregion);
  227. if (rc) {
  228. WFD_MSG_WARN(
  229. "Failed to invalidate cache %d\n",
  230. rc);
  231. /* Not fatal, move on */
  232. }
  233. inst->vmops.op_buffer_done(
  234. inst->vmops.cbdata, 0, vb);
  235. } else if (buffer.type == BUF_TYPE_INPUT &&
  236. inst->vmops.ip_buffer_done) {
  237. inst->vmops.ip_buffer_done(
  238. inst->vmops.cbdata,
  239. 0, mregion);
  240. }
  241. complete_all(&inst->dq_complete);
  242. mutex_lock(&inst->lock);
  243. mark_index_free(bitmap, buffer.index);
  244. mutex_unlock(&inst->lock);
  245. abort_dequeue:
  246. kfree(planes);
  247. }
  248. }
  249. WFD_MSG_DBG("Exiting callback thread\n");
  250. mutex_lock(&inst->lock);
  251. inst->callback_thread_running = false;
  252. mutex_unlock(&inst->lock);
  253. return 0;
  254. }
  255. static long set_default_properties(struct venc_inst *inst)
  256. {
  257. struct v4l2_control ctrl = {0};
  258. /* Set the IDR period as 1. The venus core doesn't give
  259. * the sps/pps for I-frames, only IDR. */
  260. ctrl.id = V4L2_CID_MPEG_VIDC_VIDEO_IDR_PERIOD;
  261. ctrl.value = 1;
  262. return msm_vidc_s_ctrl(inst->vidc_context, &ctrl);
  263. }
  264. static int subscribe_events(struct venc_inst *inst)
  265. {
  266. struct v4l2_event_subscription event = {0};
  267. int c = 0, rc = 0;
  268. for (c = 0; c < ARRAY_SIZE(subscribed_events); c++) {
  269. event.type = subscribed_events[c];
  270. rc = msm_vidc_subscribe_event(inst->vidc_context, &event);
  271. if (rc) {
  272. WFD_MSG_ERR("Failed to subscribe to event 0x%x\n",
  273. subscribed_events[c]);
  274. return rc;
  275. }
  276. }
  277. return 0;
  278. }
  279. static void unsubscribe_events(struct venc_inst *inst)
  280. {
  281. struct v4l2_event_subscription event = {0};
  282. int c = 0, rc = 0;
  283. for (c = 0; c < ARRAY_SIZE(subscribed_events); c++) {
  284. event.type = subscribed_events[c];
  285. rc = msm_vidc_unsubscribe_event(inst->vidc_context, &event);
  286. if (rc) {
  287. /* Just log and ignore failiures */
  288. WFD_MSG_WARN("Failed to unsubscribe to event 0x%x\n",
  289. subscribed_events[c]);
  290. }
  291. }
  292. }
  293. static long venc_open(struct v4l2_subdev *sd, void *arg)
  294. {
  295. struct venc_inst *inst = NULL;
  296. struct venc_msg_ops *vmops = arg;
  297. int rc = 0;
  298. if (!vmops) {
  299. WFD_MSG_ERR("Callbacks required for %s\n", __func__);
  300. rc = -EINVAL;
  301. goto venc_open_fail;
  302. } else if (!sd) {
  303. WFD_MSG_ERR("Subdevice required for %s\n", __func__);
  304. rc = -EINVAL;
  305. goto venc_open_fail;
  306. }
  307. inst = kzalloc(sizeof(*inst), GFP_KERNEL);
  308. if (!inst) {
  309. WFD_MSG_ERR("Failed to allocate memory\n");
  310. rc = -EINVAL;
  311. goto venc_open_fail;
  312. }
  313. inst->vmops = *vmops;
  314. inst->secure = vmops->secure; /* We need to inform vidc, but defer
  315. until after s_fmt() */
  316. INIT_LIST_HEAD(&inst->registered_output_bufs.list);
  317. INIT_LIST_HEAD(&inst->registered_input_bufs.list);
  318. init_completion(&inst->dq_complete);
  319. init_completion(&inst->cmd_complete);
  320. mutex_init(&inst->lock);
  321. inst->fill_buf_wq = create_singlethread_workqueue("venc_vidc_ftb_wq");
  322. if (!inst->fill_buf_wq) {
  323. WFD_MSG_ERR("Failed to create ftb wq\n");
  324. rc = -ENOMEM;
  325. goto vidc_wq_create_fail;
  326. }
  327. inst->vidc_context = msm_vidc_open(MSM_VIDC_CORE_VENUS,
  328. MSM_VIDC_ENCODER);
  329. if (!inst->vidc_context) {
  330. WFD_MSG_ERR("Failed to create vidc context\n");
  331. rc = -ENXIO;
  332. goto vidc_open_fail;
  333. }
  334. rc = subscribe_events(inst);
  335. if (rc) {
  336. WFD_MSG_ERR("Failed to subscribe to events\n");
  337. goto vidc_subscribe_fail;
  338. }
  339. inst->callback_thread = kthread_run(venc_vidc_callback_thread, inst,
  340. "venc_vidc_callback_thread");
  341. if (IS_ERR(inst->callback_thread)) {
  342. WFD_MSG_ERR("Failed to create callback thread\n");
  343. rc = PTR_ERR(inst->callback_thread);
  344. inst->callback_thread = NULL;
  345. goto vidc_kthread_create_fail;
  346. }
  347. inst->callback_thread_running = true;
  348. sd->dev_priv = inst;
  349. vmops->cookie = inst;
  350. return 0;
  351. vidc_kthread_create_fail:
  352. unsubscribe_events(inst);
  353. vidc_subscribe_fail:
  354. msm_vidc_close(inst->vidc_context);
  355. vidc_open_fail:
  356. destroy_workqueue(inst->fill_buf_wq);
  357. vidc_wq_create_fail:
  358. kfree(inst);
  359. venc_open_fail:
  360. return rc;
  361. }
  362. static long venc_close(struct v4l2_subdev *sd, void *arg)
  363. {
  364. struct venc_inst *inst = NULL;
  365. struct v4l2_encoder_cmd enc_cmd = {0};
  366. int rc = 0;
  367. if (!sd) {
  368. WFD_MSG_ERR("Subdevice required for %s\n", __func__);
  369. rc = -EINVAL;
  370. goto venc_close_fail;
  371. }
  372. inst = (struct venc_inst *)sd->dev_priv;
  373. enc_cmd.cmd = V4L2_ENC_CMD_STOP;
  374. msm_vidc_encoder_cmd(inst->vidc_context, &enc_cmd);
  375. wait_for_completion(&inst->cmd_complete);
  376. destroy_workqueue(inst->fill_buf_wq);
  377. if (inst->callback_thread && inst->callback_thread_running)
  378. kthread_stop(inst->callback_thread);
  379. unsubscribe_events(inst);
  380. rc = msm_vidc_close(inst->vidc_context);
  381. if (rc)
  382. WFD_MSG_WARN("Failed to close vidc context\n");
  383. kfree(inst);
  384. sd->dev_priv = inst = NULL;
  385. venc_close_fail:
  386. return rc;
  387. }
  388. static long venc_get_buffer_req(struct v4l2_subdev *sd, void *arg)
  389. {
  390. int rc = 0;
  391. struct venc_inst *inst = NULL;
  392. struct bufreq *bufreq = arg;
  393. struct v4l2_requestbuffers v4l2_bufreq = {0};
  394. struct v4l2_format v4l2_format = {0};
  395. if (!sd) {
  396. WFD_MSG_ERR("Subdevice required for %s\n", __func__);
  397. rc = -EINVAL;
  398. goto venc_buf_req_fail;
  399. } else if (!arg) {
  400. WFD_MSG_ERR("Invalid buffer requirements\n");
  401. rc = -EINVAL;
  402. goto venc_buf_req_fail;
  403. }
  404. inst = (struct venc_inst *)sd->dev_priv;
  405. /* Get buffer count */
  406. v4l2_bufreq = (struct v4l2_requestbuffers) {
  407. .count = bufreq->count,
  408. .type = BUF_TYPE_OUTPUT,
  409. .memory = V4L2_MEMORY_USERPTR,
  410. };
  411. rc = msm_vidc_reqbufs(inst->vidc_context, &v4l2_bufreq);
  412. if (rc) {
  413. WFD_MSG_ERR("Failed getting buffer requirements\n");
  414. goto venc_buf_req_fail;
  415. }
  416. /* Get buffer size */
  417. v4l2_format.type = BUF_TYPE_OUTPUT;
  418. rc = msm_vidc_g_fmt(inst->vidc_context, &v4l2_format);
  419. if (rc) {
  420. WFD_MSG_ERR("Failed getting OP buffer size\n");
  421. goto venc_buf_req_fail;
  422. }
  423. bufreq->count = v4l2_bufreq.count;
  424. bufreq->size = v4l2_format.fmt.pix_mp.plane_fmt[0].sizeimage;
  425. inst->free_output_indices.size_bits = bufreq->count;
  426. inst->free_output_indices.size = roundup(bufreq->count,
  427. sizeof(unsigned long)) / sizeof(unsigned long);
  428. inst->free_output_indices.bitmap = kzalloc(inst->free_output_indices.
  429. size, GFP_KERNEL);
  430. venc_buf_req_fail:
  431. return rc;
  432. }
  433. static long venc_set_buffer_req(struct v4l2_subdev *sd, void *arg)
  434. {
  435. int rc = 0;
  436. struct venc_inst *inst = NULL;
  437. struct bufreq *bufreq = arg;
  438. struct v4l2_requestbuffers v4l2_bufreq = {0};
  439. struct v4l2_format v4l2_format = {0};
  440. if (!sd) {
  441. WFD_MSG_ERR("Subdevice required for %s\n", __func__);
  442. rc = -EINVAL;
  443. goto venc_buf_req_fail;
  444. } else if (!arg) {
  445. WFD_MSG_ERR("Invalid buffer requirements\n");
  446. rc = -EINVAL;
  447. goto venc_buf_req_fail;
  448. }
  449. inst = (struct venc_inst *)sd->dev_priv;
  450. /* Attempt to set buffer count */
  451. v4l2_bufreq = (struct v4l2_requestbuffers) {
  452. .count = bufreq->count,
  453. .type = BUF_TYPE_INPUT,
  454. .memory = V4L2_MEMORY_USERPTR,
  455. };
  456. rc = msm_vidc_reqbufs(inst->vidc_context, &v4l2_bufreq);
  457. if (rc) {
  458. WFD_MSG_ERR("Failed getting buffer requirements");
  459. goto venc_buf_req_fail;
  460. }
  461. /* Get buffer size */
  462. v4l2_format.type = BUF_TYPE_INPUT;
  463. rc = msm_vidc_g_fmt(inst->vidc_context, &v4l2_format);
  464. if (rc) {
  465. WFD_MSG_ERR("Failed getting OP buffer size\n");
  466. goto venc_buf_req_fail;
  467. }
  468. bufreq->count = v4l2_bufreq.count;
  469. bufreq->size = ALIGN(v4l2_format.fmt.pix_mp.plane_fmt[0].sizeimage,
  470. inst->secure ? SZ_1M : SZ_4K);
  471. inst->free_input_indices.size_bits = bufreq->count;
  472. inst->free_input_indices.size = roundup(bufreq->count,
  473. sizeof(unsigned long)) / sizeof(unsigned long);
  474. inst->free_input_indices.bitmap = kzalloc(inst->free_input_indices.
  475. size, GFP_KERNEL);
  476. venc_buf_req_fail:
  477. return rc;
  478. }
  479. static long venc_start(struct v4l2_subdev *sd)
  480. {
  481. struct venc_inst *inst = NULL;
  482. int rc = 0;
  483. if (!sd) {
  484. WFD_MSG_ERR("Subdevice required for %s\n", __func__);
  485. rc = -EINVAL;
  486. goto venc_start_fail;
  487. }
  488. inst = (struct venc_inst *)sd->dev_priv;
  489. if (set_default_properties(inst))
  490. WFD_MSG_WARN("Couldn't set default properties\n");
  491. rc = msm_vidc_streamon(inst->vidc_context, BUF_TYPE_OUTPUT);
  492. if (rc) {
  493. WFD_MSG_ERR("Failed to streamon vidc's output port");
  494. goto venc_start_fail;
  495. }
  496. rc = msm_vidc_streamon(inst->vidc_context, BUF_TYPE_INPUT);
  497. if (rc) {
  498. WFD_MSG_ERR("Failed to streamon vidc's input port");
  499. goto venc_start_fail;
  500. }
  501. venc_start_fail:
  502. return rc;
  503. }
  504. static long venc_stop(struct v4l2_subdev *sd)
  505. {
  506. struct venc_inst *inst = NULL;
  507. int rc = 0;
  508. if (!sd) {
  509. WFD_MSG_ERR("Subdevice required for %s\n", __func__);
  510. rc = -EINVAL;
  511. goto venc_stop_fail;
  512. }
  513. inst = (struct venc_inst *)sd->dev_priv;
  514. flush_workqueue(inst->fill_buf_wq);
  515. rc = msm_vidc_streamoff(inst->vidc_context, BUF_TYPE_INPUT);
  516. if (rc) {
  517. WFD_MSG_ERR("Failed to streamoff vidc's input port");
  518. goto venc_stop_fail;
  519. }
  520. rc = msm_vidc_streamoff(inst->vidc_context, BUF_TYPE_OUTPUT);
  521. if (rc) {
  522. WFD_MSG_ERR("Failed to streamoff vidc's output port");
  523. goto venc_stop_fail;
  524. }
  525. venc_stop_fail:
  526. return rc;
  527. }
  528. static void populate_planes(struct v4l2_plane *planes, int num_planes,
  529. void *userptr, int size)
  530. {
  531. int c = 0;
  532. planes[0] = (struct v4l2_plane) {
  533. .length = size,
  534. .m.userptr = (int)userptr,
  535. };
  536. for (c = 1; c < num_planes - 1; ++c) {
  537. planes[c] = (struct v4l2_plane) {
  538. .length = 0,
  539. .m.userptr = (int)NULL,
  540. };
  541. }
  542. }
  543. static long venc_set_input_buffer(struct v4l2_subdev *sd, void *arg)
  544. {
  545. int rc = 0;
  546. struct venc_inst *inst = NULL;
  547. struct v4l2_buffer buf = {0};
  548. struct v4l2_plane *planes = NULL;
  549. struct mem_region *mregion = arg;
  550. if (!sd) {
  551. WFD_MSG_ERR("Subdevice required for %s\n", __func__);
  552. rc = -EINVAL;
  553. goto set_input_buffer_fail;
  554. } else if (!arg) {
  555. WFD_MSG_ERR("Invalid input buffer\n");
  556. rc = -EINVAL;
  557. goto set_input_buffer_fail;
  558. }
  559. inst = (struct venc_inst *)sd->dev_priv;
  560. if (get_registered_mregion(&inst->registered_input_bufs, mregion)) {
  561. WFD_MSG_ERR("Duplicate input buffer\n");
  562. rc = -EEXIST;
  563. goto set_input_buffer_fail;
  564. }
  565. mregion = kzalloc(sizeof(*mregion), GFP_KERNEL);
  566. planes = kzalloc(sizeof(*planes) * inst->num_input_planes, GFP_KERNEL);
  567. if (!mregion || !planes)
  568. return -ENOMEM;
  569. *mregion = *(struct mem_region *)arg;
  570. populate_planes(planes, inst->num_input_planes,
  571. mregion->paddr, mregion->size);
  572. buf = (struct v4l2_buffer) {
  573. .index = get_list_len(&inst->registered_input_bufs),
  574. .type = BUF_TYPE_INPUT,
  575. .bytesused = 0,
  576. .memory = V4L2_MEMORY_USERPTR,
  577. .m.planes = planes,
  578. .length = inst->num_input_planes,
  579. };
  580. WFD_MSG_DBG("Prepare %p with index, %d",
  581. (void *)buf.m.planes[0].m.userptr, buf.index);
  582. rc = msm_vidc_prepare_buf(inst->vidc_context, &buf);
  583. if (rc) {
  584. WFD_MSG_ERR("Failed to prepare input buffer\n");
  585. goto set_input_buffer_fail;
  586. }
  587. list_add_tail(&mregion->list, &inst->registered_input_bufs.list);
  588. kfree(planes);
  589. return 0;
  590. set_input_buffer_fail:
  591. kfree(mregion);
  592. kfree(planes);
  593. return rc;
  594. }
  595. #ifdef CONFIG_MSM_WFD_DEBUG
  596. static void *venc_map_kernel(struct ion_client *client,
  597. struct ion_handle *handle)
  598. {
  599. return ion_map_kernel(client, handle);
  600. }
  601. static void venc_unmap_kernel(struct ion_client *client,
  602. struct ion_handle *handle)
  603. {
  604. ion_unmap_kernel(client, handle);
  605. }
  606. #else
  607. static void *venc_map_kernel(struct ion_client *client,
  608. struct ion_handle *handle)
  609. {
  610. return NULL;
  611. }
  612. static void venc_unmap_kernel(struct ion_client *client,
  613. struct ion_handle *handle)
  614. {
  615. return;
  616. }
  617. #endif
  618. static int venc_map_user_to_kernel(struct venc_inst *inst,
  619. struct mem_region *mregion)
  620. {
  621. int rc = 0;
  622. unsigned long size = 0, align_req = 0, flags = 0;
  623. int domain = 0, partition = 0;
  624. if (!mregion) {
  625. rc = -EINVAL;
  626. goto venc_map_fail;
  627. }
  628. align_req = inst->secure ? SZ_1M : SZ_4K;
  629. if (mregion->size % align_req != 0) {
  630. WFD_MSG_ERR("Memregion not aligned to %ld\n", align_req);
  631. rc = -EINVAL;
  632. goto venc_map_fail;
  633. }
  634. mregion->ion_handle = ion_import_dma_buf(venc_ion_client, mregion->fd);
  635. if (IS_ERR_OR_NULL(mregion->ion_handle)) {
  636. rc = PTR_ERR(mregion->ion_handle);
  637. WFD_MSG_ERR("Failed to get handle: %p, %d, %d, %d\n",
  638. venc_ion_client, mregion->fd, mregion->offset, rc);
  639. mregion->ion_handle = NULL;
  640. goto venc_map_fail;
  641. }
  642. rc = ion_handle_get_flags(venc_ion_client, mregion->ion_handle, &flags);
  643. if (rc) {
  644. WFD_MSG_ERR("Failed to get ion flags %d\n", rc);
  645. goto venc_map_fail;
  646. }
  647. mregion->kvaddr = inst->secure ? NULL :
  648. venc_map_kernel(venc_ion_client, mregion->ion_handle);
  649. if (inst->secure) {
  650. rc = msm_ion_secure_buffer(venc_ion_client,
  651. mregion->ion_handle, VIDEO_BITSTREAM, 0);
  652. if (rc) {
  653. WFD_MSG_ERR("Failed to secure output buffer\n");
  654. goto venc_map_iommu_map_fail;
  655. }
  656. }
  657. rc = msm_vidc_get_iommu_domain_partition(inst->vidc_context,
  658. flags, BUF_TYPE_OUTPUT, &domain, &partition);
  659. if (rc) {
  660. WFD_MSG_ERR("Failed to get domain for output buffer\n");
  661. goto venc_domain_fail;
  662. }
  663. rc = ion_map_iommu(venc_ion_client, mregion->ion_handle,
  664. domain, partition, align_req, 0,
  665. (unsigned long *)&mregion->paddr, &size, 0, 0);
  666. if (rc) {
  667. WFD_MSG_ERR("Failed to map into iommu\n");
  668. goto venc_map_iommu_map_fail;
  669. } else if (size < mregion->size) {
  670. WFD_MSG_ERR("Failed to iommu map the correct size\n");
  671. goto venc_map_iommu_size_fail;
  672. }
  673. return 0;
  674. venc_map_iommu_size_fail:
  675. ion_unmap_iommu(venc_ion_client, mregion->ion_handle,
  676. domain, partition);
  677. venc_domain_fail:
  678. if (inst->secure)
  679. msm_ion_unsecure_buffer(venc_ion_client, mregion->ion_handle);
  680. venc_map_iommu_map_fail:
  681. if (!inst->secure && !IS_ERR_OR_NULL(mregion->kvaddr))
  682. venc_unmap_kernel(venc_ion_client, mregion->ion_handle);
  683. venc_map_fail:
  684. return rc;
  685. }
  686. static int venc_unmap_user_to_kernel(struct venc_inst *inst,
  687. struct mem_region *mregion)
  688. {
  689. unsigned long flags = 0;
  690. int domain = 0, partition = 0, rc = 0;
  691. if (!mregion || !mregion->ion_handle)
  692. return 0;
  693. rc = ion_handle_get_flags(venc_ion_client, mregion->ion_handle, &flags);
  694. if (rc) {
  695. WFD_MSG_ERR("Failed to get ion flags %d\n", rc);
  696. return rc;
  697. }
  698. rc = msm_vidc_get_iommu_domain_partition(inst->vidc_context,
  699. flags, BUF_TYPE_OUTPUT, &domain, &partition);
  700. if (rc) {
  701. WFD_MSG_ERR("Failed to get domain for input buffer\n");
  702. return rc;
  703. }
  704. if (mregion->paddr) {
  705. ion_unmap_iommu(venc_ion_client, mregion->ion_handle,
  706. domain, partition);
  707. mregion->paddr = NULL;
  708. }
  709. if (!IS_ERR_OR_NULL(mregion->kvaddr)) {
  710. venc_unmap_kernel(venc_ion_client, mregion->ion_handle);
  711. mregion->kvaddr = NULL;
  712. }
  713. if (inst->secure)
  714. msm_ion_unsecure_buffer(venc_ion_client, mregion->ion_handle);
  715. ion_free(venc_ion_client, mregion->ion_handle);
  716. return rc;
  717. }
  718. static long venc_set_output_buffer(struct v4l2_subdev *sd, void *arg)
  719. {
  720. int rc = 0;
  721. struct venc_inst *inst = NULL;
  722. struct v4l2_buffer buf = {0};
  723. struct v4l2_plane *planes = NULL;
  724. struct mem_region *mregion = arg;
  725. if (!sd) {
  726. WFD_MSG_ERR("Subdevice required for %s\n", __func__);
  727. rc = -EINVAL;
  728. goto venc_set_output_buffer_fail;
  729. } else if (!mregion) {
  730. WFD_MSG_ERR("Invalid output buffer\n");
  731. rc = -EINVAL;
  732. goto venc_set_output_buffer_fail;
  733. }
  734. inst = (struct venc_inst *)sd->dev_priv;
  735. /* Check if buf already registered */
  736. if (get_registered_mregion(&inst->registered_output_bufs, mregion)) {
  737. WFD_MSG_ERR("Duplicate output buffer\n");
  738. rc = -EEXIST;
  739. goto venc_set_output_buffer_fail;
  740. }
  741. mregion = kzalloc(sizeof(*mregion), GFP_KERNEL);
  742. planes = kzalloc(sizeof(*planes) * inst->num_output_planes, GFP_KERNEL);
  743. if (!mregion || !planes) {
  744. WFD_MSG_ERR("Failed to allocate memory\n");
  745. goto venc_set_output_buffer_fail;
  746. }
  747. *mregion = *(struct mem_region *)arg;
  748. INIT_LIST_HEAD(&mregion->list);
  749. rc = venc_map_user_to_kernel(inst, mregion);
  750. if (rc) {
  751. WFD_MSG_ERR("Failed to map output buffer\n");
  752. goto venc_set_output_buffer_map_fail;
  753. }
  754. populate_planes(planes, inst->num_output_planes,
  755. mregion->paddr, mregion->size);
  756. buf = (struct v4l2_buffer) {
  757. .index = get_list_len(&inst->registered_output_bufs),
  758. .type = BUF_TYPE_OUTPUT,
  759. .bytesused = 0,
  760. .memory = V4L2_MEMORY_USERPTR,
  761. .m.planes = planes,
  762. .length = inst->num_output_planes,
  763. };
  764. WFD_MSG_DBG("Prepare %p with index, %d",
  765. (void *)buf.m.planes[0].m.userptr, buf.index);
  766. rc = msm_vidc_prepare_buf(inst->vidc_context, &buf);
  767. if (rc) {
  768. WFD_MSG_ERR("Failed to prepare output buffer\n");
  769. goto venc_set_output_buffer_prepare_fail;
  770. }
  771. list_add_tail(&mregion->list, &inst->registered_output_bufs.list);
  772. kfree(planes);
  773. return 0;
  774. venc_set_output_buffer_prepare_fail:
  775. venc_unmap_user_to_kernel(inst, mregion);
  776. venc_set_output_buffer_map_fail:
  777. kfree(mregion);
  778. kfree(planes);
  779. venc_set_output_buffer_fail:
  780. return rc;
  781. }
  782. static long venc_set_format(struct v4l2_subdev *sd, void *arg)
  783. {
  784. struct venc_inst *inst = NULL;
  785. struct v4l2_format *fmt = arg, temp;
  786. int rc = 0, align_req = 0;
  787. if (!sd) {
  788. WFD_MSG_ERR("Subdevice required for %s\n", __func__);
  789. rc = -EINVAL;
  790. goto venc_set_format_fail;
  791. } else if (!fmt) {
  792. WFD_MSG_ERR("Invalid format\n");
  793. rc = -EINVAL;
  794. goto venc_set_format_fail;
  795. } else if (fmt->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
  796. WFD_MSG_ERR("Invalid buffer type %d\n", fmt->type);
  797. rc = -ENOTSUPP;
  798. goto venc_set_format_fail;
  799. }
  800. inst = (struct venc_inst *)sd->dev_priv;
  801. temp = (struct v4l2_format) {
  802. .type = BUF_TYPE_OUTPUT,
  803. .fmt.pix_mp = (struct v4l2_pix_format_mplane) {
  804. .width = fmt->fmt.pix.width,
  805. .height = fmt->fmt.pix.height,
  806. .pixelformat = fmt->fmt.pix.pixelformat,
  807. },
  808. };
  809. rc = msm_vidc_s_fmt(inst->vidc_context, &temp);
  810. if (rc) {
  811. WFD_MSG_ERR("Failed to format for output port\n");
  812. goto venc_set_format_fail;
  813. } else if (!temp.fmt.pix_mp.num_planes) {
  814. WFD_MSG_ERR("No. of planes for output buffers make no sense\n");
  815. rc = -EINVAL;
  816. goto venc_set_format_fail;
  817. }
  818. align_req = inst->secure ? SZ_1M : SZ_4K;
  819. fmt->fmt.pix.sizeimage = ALIGN(temp.fmt.pix_mp.plane_fmt[0].sizeimage,
  820. align_req);
  821. inst->num_output_planes = temp.fmt.pix_mp.num_planes;
  822. temp.type = BUF_TYPE_INPUT;
  823. temp.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12;
  824. rc = msm_vidc_s_fmt(inst->vidc_context, &temp);
  825. inst->num_input_planes = temp.fmt.pix_mp.num_planes;
  826. if (rc) {
  827. WFD_MSG_ERR("Failed to format for input port\n");
  828. goto venc_set_format_fail;
  829. }
  830. /* If the device was secured previously, we need to inform vidc _now_ */
  831. if (inst->secure) {
  832. rc = venc_secure(sd);
  833. if (rc) {
  834. WFD_MSG_ERR("Failed secure vidc\n");
  835. goto venc_set_format_fail;
  836. }
  837. }
  838. venc_set_format_fail:
  839. return rc;
  840. }
  841. static long venc_set_framerate(struct v4l2_subdev *sd, void *arg)
  842. {
  843. struct venc_inst *inst = NULL;
  844. struct v4l2_streamparm p = {0};
  845. if (!sd) {
  846. WFD_MSG_ERR("Subdevice required for %s\n", __func__);
  847. return -EINVAL;
  848. } else if (!arg) {
  849. WFD_MSG_ERR("Invalid framerate\n");
  850. return -EINVAL;
  851. }
  852. inst = (struct venc_inst *)sd->dev_priv;
  853. p.type = BUF_TYPE_INPUT;
  854. p.parm.output.timeperframe = *(struct v4l2_fract *)arg;
  855. return msm_vidc_s_parm(inst->vidc_context, &p);
  856. }
  857. static long fill_outbuf(struct venc_inst *inst, struct mem_region *mregion)
  858. {
  859. struct v4l2_buffer buffer = {0};
  860. struct v4l2_plane plane = {0};
  861. int index = 0, rc = 0;
  862. if (!mregion) {
  863. WFD_MSG_ERR("Output buffer not registered\n");
  864. return -ENOENT;
  865. }
  866. plane = (struct v4l2_plane) {
  867. .length = mregion->size,
  868. .m.userptr = (u32)mregion->paddr,
  869. };
  870. while (true) {
  871. mutex_lock(&inst->lock);
  872. index = next_free_index(&inst->free_output_indices);
  873. mutex_unlock(&inst->lock);
  874. if (index < 0) {
  875. rc = wait_for_completion_timeout(&inst->dq_complete,
  876. TIMEOUT);
  877. if (!rc) {
  878. WFD_MSG_ERR(
  879. "Timed out waiting for an output buffer\n");
  880. rc = -ETIMEDOUT;
  881. goto err_fill_buf;
  882. }
  883. } else {
  884. break;
  885. }
  886. }
  887. buffer = (struct v4l2_buffer) {
  888. .index = index,
  889. .type = BUF_TYPE_OUTPUT,
  890. .memory = V4L2_MEMORY_USERPTR,
  891. .m.planes = &plane,
  892. .length = 1,
  893. };
  894. WFD_MSG_DBG("Fill buffer %p with index, %d",
  895. (void *)buffer.m.planes[0].m.userptr, buffer.index);
  896. rc = msm_vidc_qbuf(inst->vidc_context, &buffer);
  897. if (!rc) {
  898. mutex_lock(&inst->lock);
  899. mark_index_busy(&inst->free_output_indices, index);
  900. mutex_unlock(&inst->lock);
  901. }
  902. err_fill_buf:
  903. return rc;
  904. }
  905. static void fill_outbuf_helper(struct work_struct *work)
  906. {
  907. int rc;
  908. struct fill_buf_work *fbw =
  909. container_of(work, struct fill_buf_work, work);
  910. rc = fill_outbuf(fbw->inst, fbw->mregion);
  911. if (rc) {
  912. struct vb2_buffer *vb = NULL;
  913. WFD_MSG_ERR("Failed to fill buffer async\n");
  914. vb = (struct vb2_buffer *)fbw->mregion->cookie;
  915. vb->v4l2_buf.flags = 0;
  916. vb->v4l2_buf.timestamp = ns_to_timeval(-1);
  917. vb->v4l2_planes[0].bytesused = 0;
  918. fbw->inst->vmops.op_buffer_done(
  919. fbw->inst->vmops.cbdata, rc, vb);
  920. }
  921. kfree(fbw);
  922. }
  923. static long venc_fill_outbuf(struct v4l2_subdev *sd, void *arg)
  924. {
  925. struct fill_buf_work *fbw;
  926. struct venc_inst *inst = NULL;
  927. struct mem_region *mregion;
  928. if (!sd) {
  929. WFD_MSG_ERR("Subdevice required for %s\n", __func__);
  930. return -EINVAL;
  931. } else if (!arg) {
  932. WFD_MSG_ERR("Invalid output buffer ot fill\n");
  933. return -EINVAL;
  934. }
  935. inst = (struct venc_inst *)sd->dev_priv;
  936. mregion = get_registered_mregion(&inst->registered_output_bufs, arg);
  937. if (!mregion) {
  938. WFD_MSG_ERR("Output buffer not registered\n");
  939. return -ENOENT;
  940. }
  941. fbw = kzalloc(sizeof(*fbw), GFP_KERNEL);
  942. if (!fbw) {
  943. WFD_MSG_ERR("Couldn't allocate memory\n");
  944. return -ENOMEM;
  945. }
  946. INIT_WORK(&fbw->work, fill_outbuf_helper);
  947. fbw->inst = inst;
  948. fbw->mregion = mregion;
  949. /* XXX: The need for a wq to qbuf to vidc is necessitated as a
  950. * workaround for a bug in the v4l2 framework. VIDIOC_QBUF from
  951. * triggers a down_read(current->mm->mmap_sem). There is another
  952. * _read(..) as msm_vidc_qbuf() depends on videobuf2 framework
  953. * as well. However, a _write(..) after the first _read() by a
  954. * different driver will prevent the second _read(...) from
  955. * suceeding.
  956. *
  957. * As we can't modify the framework, we're working around by issue
  958. * by queuing in a different thread effectively.
  959. */
  960. queue_work(inst->fill_buf_wq, &fbw->work);
  961. return 0;
  962. }
  963. static long venc_encode_frame(struct v4l2_subdev *sd, void *arg)
  964. {
  965. struct venc_inst *inst = NULL;
  966. struct venc_buf_info *venc_buf = arg;
  967. struct mem_region *mregion = NULL;
  968. struct v4l2_buffer buffer = {0};
  969. struct v4l2_plane plane = {0};
  970. int index = 0, rc = 0;
  971. if (!sd) {
  972. WFD_MSG_ERR("Subdevice required for %s\n", __func__);
  973. return -EINVAL;
  974. } else if (!venc_buf) {
  975. WFD_MSG_ERR("Invalid output buffer ot fill\n");
  976. return -EINVAL;
  977. }
  978. inst = (struct venc_inst *)sd->dev_priv;
  979. mregion = venc_buf->mregion;
  980. plane = (struct v4l2_plane) {
  981. .length = mregion->size,
  982. .m.userptr = (u32)mregion->paddr,
  983. .bytesused = mregion->size,
  984. };
  985. while (true) {
  986. mutex_lock(&inst->lock);
  987. index = next_free_index(&inst->free_input_indices);
  988. mutex_unlock(&inst->lock);
  989. if (index < 0) {
  990. rc = wait_for_completion_timeout(&inst->dq_complete,
  991. TIMEOUT);
  992. if (!rc) {
  993. WFD_MSG_ERR(
  994. "Timed out waiting for an input buffer\n");
  995. rc = -ETIMEDOUT;
  996. goto err_encode_frame;
  997. }
  998. } else {
  999. break;
  1000. }
  1001. }
  1002. buffer = (struct v4l2_buffer) {
  1003. .index = index,
  1004. .type = BUF_TYPE_INPUT,
  1005. .timestamp = ns_to_timeval(venc_buf->timestamp),
  1006. .memory = V4L2_MEMORY_USERPTR,
  1007. .m.planes = &plane,
  1008. .length = 1,
  1009. };
  1010. WFD_MSG_DBG("Encode buffer %p with index, %d",
  1011. (void *)buffer.m.planes[0].m.userptr, buffer.index);
  1012. rc = msm_vidc_qbuf(inst->vidc_context, &buffer);
  1013. if (!rc) {
  1014. mutex_lock(&inst->lock);
  1015. mark_index_busy(&inst->free_input_indices, index);
  1016. mutex_unlock(&inst->lock);
  1017. }
  1018. err_encode_frame:
  1019. return rc;
  1020. }
  1021. static long venc_alloc_recon_buffers(struct v4l2_subdev *sd, void *arg)
  1022. {
  1023. /* vidc driver allocates internally on streamon */
  1024. return 0;
  1025. }
  1026. static long venc_free_buffer(struct venc_inst *inst, int type,
  1027. struct mem_region *to_free, bool unmap_user_buffer)
  1028. {
  1029. struct mem_region *mregion = NULL;
  1030. struct mem_region *buf_list = NULL;
  1031. if (type == BUF_TYPE_OUTPUT) {
  1032. buf_list = &inst->registered_output_bufs;
  1033. } else if (type == BUF_TYPE_INPUT) {
  1034. buf_list = &inst->registered_input_bufs;
  1035. } else {
  1036. WFD_MSG_ERR("Trying to free a buffer of unknown type\n");
  1037. return -EINVAL;
  1038. }
  1039. mregion = get_registered_mregion(buf_list, to_free);
  1040. if (!mregion) {
  1041. WFD_MSG_ERR("Buffer not registered, cannot free\n");
  1042. return -ENOENT;
  1043. }
  1044. if (unmap_user_buffer) {
  1045. int rc = venc_unmap_user_to_kernel(inst, mregion);
  1046. if (rc)
  1047. WFD_MSG_WARN("Unable to unmap user buffer\n");
  1048. }
  1049. list_del(&mregion->list);
  1050. kfree(mregion);
  1051. return 0;
  1052. }
  1053. static long venc_free_output_buffer(struct v4l2_subdev *sd, void *arg)
  1054. {
  1055. int rc = 0;
  1056. struct venc_inst *inst = NULL;
  1057. if (!sd) {
  1058. WFD_MSG_ERR("Subdevice required for %s\n", __func__);
  1059. rc = -EINVAL;
  1060. goto venc_free_output_buffer_fail;
  1061. } else if (!arg) {
  1062. WFD_MSG_ERR("Invalid output buffer\n");
  1063. rc = -EINVAL;
  1064. goto venc_free_output_buffer_fail;
  1065. }
  1066. inst = (struct venc_inst *)sd->dev_priv;
  1067. return venc_free_buffer(inst, BUF_TYPE_OUTPUT, arg, true);
  1068. venc_free_output_buffer_fail:
  1069. return rc;
  1070. }
  1071. static long venc_flush_buffers(struct v4l2_subdev *sd, void *arg)
  1072. {
  1073. struct venc_inst *inst = NULL;
  1074. struct v4l2_encoder_cmd enc_cmd = {0};
  1075. int rc = 0;
  1076. if (!sd) {
  1077. WFD_MSG_ERR("Subdevice required for %s\n", __func__);
  1078. rc = -EINVAL;
  1079. goto venc_flush_buffers_fail;
  1080. }
  1081. inst = (struct venc_inst *)sd->dev_priv;
  1082. flush_workqueue(inst->fill_buf_wq);
  1083. enc_cmd.cmd = V4L2_ENC_QCOM_CMD_FLUSH;
  1084. enc_cmd.flags = V4L2_QCOM_CMD_FLUSH_OUTPUT |
  1085. V4L2_QCOM_CMD_FLUSH_CAPTURE;
  1086. msm_vidc_encoder_cmd(inst->vidc_context, &enc_cmd);
  1087. wait_for_completion(&inst->cmd_complete);
  1088. venc_flush_buffers_fail:
  1089. return rc;
  1090. }
  1091. static long venc_free_input_buffer(struct v4l2_subdev *sd, void *arg)
  1092. {
  1093. int rc = 0;
  1094. struct venc_inst *inst = NULL;
  1095. if (!sd) {
  1096. WFD_MSG_ERR("Subdevice required for %s\n", __func__);
  1097. rc = -EINVAL;
  1098. goto venc_free_input_buffer_fail;
  1099. } else if (!arg) {
  1100. WFD_MSG_ERR("Invalid output buffer\n");
  1101. rc = -EINVAL;
  1102. goto venc_free_input_buffer_fail;
  1103. }
  1104. inst = (struct venc_inst *)sd->dev_priv;
  1105. return venc_free_buffer(inst, BUF_TYPE_INPUT, arg, false);
  1106. venc_free_input_buffer_fail:
  1107. return rc;
  1108. }
  1109. static long venc_free_recon_buffers(struct v4l2_subdev *sd, void *arg)
  1110. {
  1111. /* vidc driver takes care of this */
  1112. return 0;
  1113. }
  1114. static long venc_set_property(struct v4l2_subdev *sd, void *arg)
  1115. {
  1116. struct venc_inst *inst = NULL;
  1117. if (!sd) {
  1118. WFD_MSG_ERR("Subdevice required for %s\n", __func__);
  1119. return -EINVAL;
  1120. }
  1121. inst = (struct venc_inst *)sd->dev_priv;
  1122. return msm_vidc_s_ctrl(inst->vidc_context, (struct v4l2_control *)arg);
  1123. }
  1124. static long venc_get_property(struct v4l2_subdev *sd, void *arg)
  1125. {
  1126. struct venc_inst *inst = NULL;
  1127. if (!sd) {
  1128. WFD_MSG_ERR("Subdevice required for %s\n", __func__);
  1129. return -EINVAL;
  1130. }
  1131. inst = (struct venc_inst *)sd->dev_priv;
  1132. return msm_vidc_g_ctrl(inst->vidc_context, (struct v4l2_control *)arg);
  1133. }
  1134. long venc_mmap(struct v4l2_subdev *sd, void *arg)
  1135. {
  1136. struct mem_region_map *mmap = arg;
  1137. struct mem_region *mregion = NULL;
  1138. unsigned long size = 0, align_req = 0, flags = 0;
  1139. int domain = 0, partition = 0, rc = 0;
  1140. void *paddr = NULL;
  1141. struct venc_inst *inst = NULL;
  1142. if (!sd) {
  1143. WFD_MSG_ERR("Subdevice required for %s\n", __func__);
  1144. return -EINVAL;
  1145. } else if (!mmap || !mmap->mregion) {
  1146. WFD_MSG_ERR("Memregion required for %s\n", __func__);
  1147. return -EINVAL;
  1148. }
  1149. inst = (struct venc_inst *)sd->dev_priv;
  1150. mregion = mmap->mregion;
  1151. align_req = inst->secure ? SZ_1M : SZ_4K;
  1152. if (mregion->size % align_req != 0) {
  1153. WFD_MSG_ERR("Memregion not aligned to %ld\n", align_req);
  1154. rc = -EINVAL;
  1155. goto venc_map_bad_align;
  1156. }
  1157. rc = ion_handle_get_flags(mmap->ion_client, mregion->ion_handle,
  1158. &flags);
  1159. if (rc) {
  1160. WFD_MSG_ERR("Failed to get ion flags %d\n", rc);
  1161. goto venc_map_bad_align;
  1162. }
  1163. if (inst->secure) {
  1164. rc = msm_ion_secure_buffer(mmap->ion_client,
  1165. mregion->ion_handle, VIDEO_PIXEL, 0);
  1166. if (rc) {
  1167. WFD_MSG_ERR("Failed to secure input buffer\n");
  1168. goto venc_map_bad_align;
  1169. }
  1170. }
  1171. rc = msm_vidc_get_iommu_domain_partition(inst->vidc_context,
  1172. flags, BUF_TYPE_INPUT, &domain, &partition);
  1173. if (rc) {
  1174. WFD_MSG_ERR("Failed to get domain for output buffer\n");
  1175. goto venc_map_domain_fail;
  1176. }
  1177. rc = ion_map_iommu(mmap->ion_client, mregion->ion_handle,
  1178. domain, partition, align_req, 0,
  1179. (unsigned long *)&paddr, &size, 0, 0);
  1180. if (rc) {
  1181. WFD_MSG_ERR("Failed to get physical addr %d\n", rc);
  1182. paddr = NULL;
  1183. goto venc_map_bad_align;
  1184. } else if (size < mregion->size) {
  1185. WFD_MSG_ERR("Failed to map enough memory\n");
  1186. rc = -ENOMEM;
  1187. goto venc_map_iommu_size_fail;
  1188. }
  1189. mregion->paddr = paddr;
  1190. return rc;
  1191. venc_map_iommu_size_fail:
  1192. ion_unmap_iommu(venc_ion_client, mregion->ion_handle,
  1193. domain, partition);
  1194. venc_map_domain_fail:
  1195. if (inst->secure)
  1196. msm_ion_unsecure_buffer(mmap->ion_client, mregion->ion_handle);
  1197. venc_map_bad_align:
  1198. return rc;
  1199. }
  1200. long venc_munmap(struct v4l2_subdev *sd, void *arg)
  1201. {
  1202. struct mem_region_map *mmap = arg;
  1203. struct mem_region *mregion = NULL;
  1204. struct venc_inst *inst = NULL;
  1205. unsigned long flags = 0;
  1206. int domain = 0, partition = 0, rc = 0;
  1207. if (!sd) {
  1208. WFD_MSG_ERR("Subdevice required for %s\n", __func__);
  1209. return -EINVAL;
  1210. } else if (!mmap || !mmap->mregion) {
  1211. WFD_MSG_ERR("Memregion required for %s\n", __func__);
  1212. return -EINVAL;
  1213. }
  1214. inst = (struct venc_inst *)sd->dev_priv;
  1215. mregion = mmap->mregion;
  1216. rc = ion_handle_get_flags(mmap->ion_client,
  1217. mregion->ion_handle, &flags);
  1218. if (rc) {
  1219. WFD_MSG_ERR("Failed to get ion flags %d\n", rc);
  1220. return rc;
  1221. }
  1222. rc = msm_vidc_get_iommu_domain_partition(inst->vidc_context,
  1223. flags, BUF_TYPE_INPUT, &domain, &partition);
  1224. if (rc) {
  1225. WFD_MSG_ERR("Failed to get domain for input buffer\n");
  1226. return rc;
  1227. }
  1228. if (mregion->paddr) {
  1229. ion_unmap_iommu(mmap->ion_client, mregion->ion_handle,
  1230. domain, partition);
  1231. mregion->paddr = NULL;
  1232. }
  1233. if (inst->secure)
  1234. msm_ion_unsecure_buffer(mmap->ion_client, mregion->ion_handle);
  1235. return rc;
  1236. }
  1237. static long venc_set_framerate_mode(struct v4l2_subdev *sd,
  1238. void *arg)
  1239. {
  1240. /* TODO: Unsupported for now, but return false success
  1241. * to preserve binary compatibility for userspace apps
  1242. * across targets */
  1243. return 0;
  1244. }
  1245. static long venc_secure(struct v4l2_subdev *sd)
  1246. {
  1247. struct venc_inst *inst = NULL;
  1248. struct v4l2_control ctrl;
  1249. int rc = 0;
  1250. if (!sd) {
  1251. WFD_MSG_ERR("Subdevice required for %s\n", __func__);
  1252. return -EINVAL;
  1253. }
  1254. inst = sd->dev_priv;
  1255. if (!list_empty(&inst->registered_input_bufs.list) ||
  1256. !list_empty(&inst->registered_output_bufs.list)) {
  1257. WFD_MSG_ERR(
  1258. "Attempt to (un)secure encoder not allowed after registering buffers"
  1259. );
  1260. rc = -EEXIST;
  1261. }
  1262. ctrl.id = V4L2_CID_MPEG_VIDC_VIDEO_SECURE;
  1263. rc = msm_vidc_s_ctrl(inst->vidc_context, &ctrl);
  1264. if (rc) {
  1265. WFD_MSG_ERR("Failed to move vidc into secure mode\n");
  1266. goto secure_fail;
  1267. }
  1268. secure_fail:
  1269. return rc;
  1270. }
  1271. long venc_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
  1272. {
  1273. long rc = 0;
  1274. switch (cmd) {
  1275. case OPEN:
  1276. rc = venc_open(sd, arg);
  1277. break;
  1278. case CLOSE:
  1279. rc = venc_close(sd, arg);
  1280. break;
  1281. case ENCODE_START:
  1282. rc = venc_start(sd);
  1283. break;
  1284. case ENCODE_FRAME:
  1285. venc_encode_frame(sd, arg);
  1286. break;
  1287. case ENCODE_STOP:
  1288. rc = venc_stop(sd);
  1289. break;
  1290. case SET_PROP:
  1291. rc = venc_set_property(sd, arg);
  1292. break;
  1293. case GET_PROP:
  1294. rc = venc_get_property(sd, arg);
  1295. break;
  1296. case GET_BUFFER_REQ:
  1297. rc = venc_get_buffer_req(sd, arg);
  1298. break;
  1299. case SET_BUFFER_REQ:
  1300. rc = venc_set_buffer_req(sd, arg);
  1301. break;
  1302. case FREE_BUFFER:
  1303. break;
  1304. case FILL_OUTPUT_BUFFER:
  1305. rc = venc_fill_outbuf(sd, arg);
  1306. break;
  1307. case SET_FORMAT:
  1308. rc = venc_set_format(sd, arg);
  1309. break;
  1310. case SET_FRAMERATE:
  1311. rc = venc_set_framerate(sd, arg);
  1312. break;
  1313. case SET_INPUT_BUFFER:
  1314. rc = venc_set_input_buffer(sd, arg);
  1315. break;
  1316. case SET_OUTPUT_BUFFER:
  1317. rc = venc_set_output_buffer(sd, arg);
  1318. break;
  1319. case ALLOC_RECON_BUFFERS:
  1320. rc = venc_alloc_recon_buffers(sd, arg);
  1321. break;
  1322. case FREE_OUTPUT_BUFFER:
  1323. rc = venc_free_output_buffer(sd, arg);
  1324. break;
  1325. case FREE_INPUT_BUFFER:
  1326. rc = venc_free_input_buffer(sd, arg);
  1327. break;
  1328. case FREE_RECON_BUFFERS:
  1329. rc = venc_free_recon_buffers(sd, arg);
  1330. break;
  1331. case ENCODE_FLUSH:
  1332. rc = venc_flush_buffers(sd, arg);
  1333. break;
  1334. case ENC_MMAP:
  1335. rc = venc_mmap(sd, arg);
  1336. break;
  1337. case ENC_MUNMAP:
  1338. rc = venc_munmap(sd, arg);
  1339. break;
  1340. case SET_FRAMERATE_MODE:
  1341. rc = venc_set_framerate_mode(sd, arg);
  1342. break;
  1343. default:
  1344. WFD_MSG_ERR("Unknown ioctl %d to enc-subdev\n", cmd);
  1345. rc = -ENOTSUPP;
  1346. break;
  1347. }
  1348. return rc;
  1349. }