vpdma.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172
  1. /*
  2. * VPDMA helper library
  3. *
  4. * Copyright (c) 2013 Texas Instruments Inc.
  5. *
  6. * David Griego, <dagriego@biglakesoftware.com>
  7. * Dale Farnsworth, <dale@farnsworth.org>
  8. * Archit Taneja, <archit@ti.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License version 2 as published by
  12. * the Free Software Foundation.
  13. */
  14. #include <linux/delay.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/err.h>
  17. #include <linux/firmware.h>
  18. #include <linux/io.h>
  19. #include <linux/module.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/sched.h>
  22. #include <linux/slab.h>
  23. #include <linux/videodev2.h>
  24. #include "vpdma.h"
  25. #include "vpdma_priv.h"
  26. #define VPDMA_FIRMWARE "vpdma-1b8.bin"
  27. const struct vpdma_data_format vpdma_yuv_fmts[] = {
  28. [VPDMA_DATA_FMT_Y444] = {
  29. .type = VPDMA_DATA_FMT_TYPE_YUV,
  30. .data_type = DATA_TYPE_Y444,
  31. .depth = 8,
  32. },
  33. [VPDMA_DATA_FMT_Y422] = {
  34. .type = VPDMA_DATA_FMT_TYPE_YUV,
  35. .data_type = DATA_TYPE_Y422,
  36. .depth = 8,
  37. },
  38. [VPDMA_DATA_FMT_Y420] = {
  39. .type = VPDMA_DATA_FMT_TYPE_YUV,
  40. .data_type = DATA_TYPE_Y420,
  41. .depth = 8,
  42. },
  43. [VPDMA_DATA_FMT_C444] = {
  44. .type = VPDMA_DATA_FMT_TYPE_YUV,
  45. .data_type = DATA_TYPE_C444,
  46. .depth = 8,
  47. },
  48. [VPDMA_DATA_FMT_C422] = {
  49. .type = VPDMA_DATA_FMT_TYPE_YUV,
  50. .data_type = DATA_TYPE_C422,
  51. .depth = 8,
  52. },
  53. [VPDMA_DATA_FMT_C420] = {
  54. .type = VPDMA_DATA_FMT_TYPE_YUV,
  55. .data_type = DATA_TYPE_C420,
  56. .depth = 4,
  57. },
  58. [VPDMA_DATA_FMT_YCR422] = {
  59. .type = VPDMA_DATA_FMT_TYPE_YUV,
  60. .data_type = DATA_TYPE_YCR422,
  61. .depth = 16,
  62. },
  63. [VPDMA_DATA_FMT_YC444] = {
  64. .type = VPDMA_DATA_FMT_TYPE_YUV,
  65. .data_type = DATA_TYPE_YC444,
  66. .depth = 24,
  67. },
  68. [VPDMA_DATA_FMT_CRY422] = {
  69. .type = VPDMA_DATA_FMT_TYPE_YUV,
  70. .data_type = DATA_TYPE_CRY422,
  71. .depth = 16,
  72. },
  73. [VPDMA_DATA_FMT_CBY422] = {
  74. .type = VPDMA_DATA_FMT_TYPE_YUV,
  75. .data_type = DATA_TYPE_CBY422,
  76. .depth = 16,
  77. },
  78. [VPDMA_DATA_FMT_YCB422] = {
  79. .type = VPDMA_DATA_FMT_TYPE_YUV,
  80. .data_type = DATA_TYPE_YCB422,
  81. .depth = 16,
  82. },
  83. };
  84. EXPORT_SYMBOL(vpdma_yuv_fmts);
  85. const struct vpdma_data_format vpdma_rgb_fmts[] = {
  86. [VPDMA_DATA_FMT_RGB565] = {
  87. .type = VPDMA_DATA_FMT_TYPE_RGB,
  88. .data_type = DATA_TYPE_RGB16_565,
  89. .depth = 16,
  90. },
  91. [VPDMA_DATA_FMT_ARGB16_1555] = {
  92. .type = VPDMA_DATA_FMT_TYPE_RGB,
  93. .data_type = DATA_TYPE_ARGB_1555,
  94. .depth = 16,
  95. },
  96. [VPDMA_DATA_FMT_ARGB16] = {
  97. .type = VPDMA_DATA_FMT_TYPE_RGB,
  98. .data_type = DATA_TYPE_ARGB_4444,
  99. .depth = 16,
  100. },
  101. [VPDMA_DATA_FMT_RGBA16_5551] = {
  102. .type = VPDMA_DATA_FMT_TYPE_RGB,
  103. .data_type = DATA_TYPE_RGBA_5551,
  104. .depth = 16,
  105. },
  106. [VPDMA_DATA_FMT_RGBA16] = {
  107. .type = VPDMA_DATA_FMT_TYPE_RGB,
  108. .data_type = DATA_TYPE_RGBA_4444,
  109. .depth = 16,
  110. },
  111. [VPDMA_DATA_FMT_ARGB24] = {
  112. .type = VPDMA_DATA_FMT_TYPE_RGB,
  113. .data_type = DATA_TYPE_ARGB24_6666,
  114. .depth = 24,
  115. },
  116. [VPDMA_DATA_FMT_RGB24] = {
  117. .type = VPDMA_DATA_FMT_TYPE_RGB,
  118. .data_type = DATA_TYPE_RGB24_888,
  119. .depth = 24,
  120. },
  121. [VPDMA_DATA_FMT_ARGB32] = {
  122. .type = VPDMA_DATA_FMT_TYPE_RGB,
  123. .data_type = DATA_TYPE_ARGB32_8888,
  124. .depth = 32,
  125. },
  126. [VPDMA_DATA_FMT_RGBA24] = {
  127. .type = VPDMA_DATA_FMT_TYPE_RGB,
  128. .data_type = DATA_TYPE_RGBA24_6666,
  129. .depth = 24,
  130. },
  131. [VPDMA_DATA_FMT_RGBA32] = {
  132. .type = VPDMA_DATA_FMT_TYPE_RGB,
  133. .data_type = DATA_TYPE_RGBA32_8888,
  134. .depth = 32,
  135. },
  136. [VPDMA_DATA_FMT_BGR565] = {
  137. .type = VPDMA_DATA_FMT_TYPE_RGB,
  138. .data_type = DATA_TYPE_BGR16_565,
  139. .depth = 16,
  140. },
  141. [VPDMA_DATA_FMT_ABGR16_1555] = {
  142. .type = VPDMA_DATA_FMT_TYPE_RGB,
  143. .data_type = DATA_TYPE_ABGR_1555,
  144. .depth = 16,
  145. },
  146. [VPDMA_DATA_FMT_ABGR16] = {
  147. .type = VPDMA_DATA_FMT_TYPE_RGB,
  148. .data_type = DATA_TYPE_ABGR_4444,
  149. .depth = 16,
  150. },
  151. [VPDMA_DATA_FMT_BGRA16_5551] = {
  152. .type = VPDMA_DATA_FMT_TYPE_RGB,
  153. .data_type = DATA_TYPE_BGRA_5551,
  154. .depth = 16,
  155. },
  156. [VPDMA_DATA_FMT_BGRA16] = {
  157. .type = VPDMA_DATA_FMT_TYPE_RGB,
  158. .data_type = DATA_TYPE_BGRA_4444,
  159. .depth = 16,
  160. },
  161. [VPDMA_DATA_FMT_ABGR24] = {
  162. .type = VPDMA_DATA_FMT_TYPE_RGB,
  163. .data_type = DATA_TYPE_ABGR24_6666,
  164. .depth = 24,
  165. },
  166. [VPDMA_DATA_FMT_BGR24] = {
  167. .type = VPDMA_DATA_FMT_TYPE_RGB,
  168. .data_type = DATA_TYPE_BGR24_888,
  169. .depth = 24,
  170. },
  171. [VPDMA_DATA_FMT_ABGR32] = {
  172. .type = VPDMA_DATA_FMT_TYPE_RGB,
  173. .data_type = DATA_TYPE_ABGR32_8888,
  174. .depth = 32,
  175. },
  176. [VPDMA_DATA_FMT_BGRA24] = {
  177. .type = VPDMA_DATA_FMT_TYPE_RGB,
  178. .data_type = DATA_TYPE_BGRA24_6666,
  179. .depth = 24,
  180. },
  181. [VPDMA_DATA_FMT_BGRA32] = {
  182. .type = VPDMA_DATA_FMT_TYPE_RGB,
  183. .data_type = DATA_TYPE_BGRA32_8888,
  184. .depth = 32,
  185. },
  186. };
  187. EXPORT_SYMBOL(vpdma_rgb_fmts);
  188. /*
  189. * To handle RAW format we are re-using the CBY422
  190. * vpdma data type so that we use the vpdma to re-order
  191. * the incoming bytes, as the parser assumes that the
  192. * first byte presented on the bus is the MSB of a 2
  193. * bytes value.
  194. * RAW8 handles from 1 to 8 bits
  195. * RAW16 handles from 9 to 16 bits
  196. */
  197. const struct vpdma_data_format vpdma_raw_fmts[] = {
  198. [VPDMA_DATA_FMT_RAW8] = {
  199. .type = VPDMA_DATA_FMT_TYPE_YUV,
  200. .data_type = DATA_TYPE_CBY422,
  201. .depth = 8,
  202. },
  203. [VPDMA_DATA_FMT_RAW16] = {
  204. .type = VPDMA_DATA_FMT_TYPE_YUV,
  205. .data_type = DATA_TYPE_CBY422,
  206. .depth = 16,
  207. },
  208. };
  209. EXPORT_SYMBOL(vpdma_raw_fmts);
  210. const struct vpdma_data_format vpdma_misc_fmts[] = {
  211. [VPDMA_DATA_FMT_MV] = {
  212. .type = VPDMA_DATA_FMT_TYPE_MISC,
  213. .data_type = DATA_TYPE_MV,
  214. .depth = 4,
  215. },
  216. };
  217. EXPORT_SYMBOL(vpdma_misc_fmts);
  218. struct vpdma_channel_info {
  219. int num; /* VPDMA channel number */
  220. int cstat_offset; /* client CSTAT register offset */
  221. };
  222. static const struct vpdma_channel_info chan_info[] = {
  223. [VPE_CHAN_LUMA1_IN] = {
  224. .num = VPE_CHAN_NUM_LUMA1_IN,
  225. .cstat_offset = VPDMA_DEI_LUMA1_CSTAT,
  226. },
  227. [VPE_CHAN_CHROMA1_IN] = {
  228. .num = VPE_CHAN_NUM_CHROMA1_IN,
  229. .cstat_offset = VPDMA_DEI_CHROMA1_CSTAT,
  230. },
  231. [VPE_CHAN_LUMA2_IN] = {
  232. .num = VPE_CHAN_NUM_LUMA2_IN,
  233. .cstat_offset = VPDMA_DEI_LUMA2_CSTAT,
  234. },
  235. [VPE_CHAN_CHROMA2_IN] = {
  236. .num = VPE_CHAN_NUM_CHROMA2_IN,
  237. .cstat_offset = VPDMA_DEI_CHROMA2_CSTAT,
  238. },
  239. [VPE_CHAN_LUMA3_IN] = {
  240. .num = VPE_CHAN_NUM_LUMA3_IN,
  241. .cstat_offset = VPDMA_DEI_LUMA3_CSTAT,
  242. },
  243. [VPE_CHAN_CHROMA3_IN] = {
  244. .num = VPE_CHAN_NUM_CHROMA3_IN,
  245. .cstat_offset = VPDMA_DEI_CHROMA3_CSTAT,
  246. },
  247. [VPE_CHAN_MV_IN] = {
  248. .num = VPE_CHAN_NUM_MV_IN,
  249. .cstat_offset = VPDMA_DEI_MV_IN_CSTAT,
  250. },
  251. [VPE_CHAN_MV_OUT] = {
  252. .num = VPE_CHAN_NUM_MV_OUT,
  253. .cstat_offset = VPDMA_DEI_MV_OUT_CSTAT,
  254. },
  255. [VPE_CHAN_LUMA_OUT] = {
  256. .num = VPE_CHAN_NUM_LUMA_OUT,
  257. .cstat_offset = VPDMA_VIP_UP_Y_CSTAT,
  258. },
  259. [VPE_CHAN_CHROMA_OUT] = {
  260. .num = VPE_CHAN_NUM_CHROMA_OUT,
  261. .cstat_offset = VPDMA_VIP_UP_UV_CSTAT,
  262. },
  263. [VPE_CHAN_RGB_OUT] = {
  264. .num = VPE_CHAN_NUM_RGB_OUT,
  265. .cstat_offset = VPDMA_VIP_UP_Y_CSTAT,
  266. },
  267. };
  268. static u32 read_reg(struct vpdma_data *vpdma, int offset)
  269. {
  270. return ioread32(vpdma->base + offset);
  271. }
  272. static void write_reg(struct vpdma_data *vpdma, int offset, u32 value)
  273. {
  274. iowrite32(value, vpdma->base + offset);
  275. }
  276. static int read_field_reg(struct vpdma_data *vpdma, int offset,
  277. u32 mask, int shift)
  278. {
  279. return (read_reg(vpdma, offset) & (mask << shift)) >> shift;
  280. }
  281. static void write_field_reg(struct vpdma_data *vpdma, int offset, u32 field,
  282. u32 mask, int shift)
  283. {
  284. u32 val = read_reg(vpdma, offset);
  285. val &= ~(mask << shift);
  286. val |= (field & mask) << shift;
  287. write_reg(vpdma, offset, val);
  288. }
  289. void vpdma_dump_regs(struct vpdma_data *vpdma)
  290. {
  291. struct device *dev = &vpdma->pdev->dev;
  292. #define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(vpdma, VPDMA_##r))
  293. dev_dbg(dev, "VPDMA Registers:\n");
  294. DUMPREG(PID);
  295. DUMPREG(LIST_ADDR);
  296. DUMPREG(LIST_ATTR);
  297. DUMPREG(LIST_STAT_SYNC);
  298. DUMPREG(BG_RGB);
  299. DUMPREG(BG_YUV);
  300. DUMPREG(SETUP);
  301. DUMPREG(MAX_SIZE1);
  302. DUMPREG(MAX_SIZE2);
  303. DUMPREG(MAX_SIZE3);
  304. /*
  305. * dumping registers of only group0 and group3, because VPE channels
  306. * lie within group0 and group3 registers
  307. */
  308. DUMPREG(INT_CHAN_STAT(0));
  309. DUMPREG(INT_CHAN_MASK(0));
  310. DUMPREG(INT_CHAN_STAT(3));
  311. DUMPREG(INT_CHAN_MASK(3));
  312. DUMPREG(INT_CLIENT0_STAT);
  313. DUMPREG(INT_CLIENT0_MASK);
  314. DUMPREG(INT_CLIENT1_STAT);
  315. DUMPREG(INT_CLIENT1_MASK);
  316. DUMPREG(INT_LIST0_STAT);
  317. DUMPREG(INT_LIST0_MASK);
  318. /*
  319. * these are registers specific to VPE clients, we can make this
  320. * function dump client registers specific to VPE or VIP based on
  321. * who is using it
  322. */
  323. DUMPREG(DEI_CHROMA1_CSTAT);
  324. DUMPREG(DEI_LUMA1_CSTAT);
  325. DUMPREG(DEI_CHROMA2_CSTAT);
  326. DUMPREG(DEI_LUMA2_CSTAT);
  327. DUMPREG(DEI_CHROMA3_CSTAT);
  328. DUMPREG(DEI_LUMA3_CSTAT);
  329. DUMPREG(DEI_MV_IN_CSTAT);
  330. DUMPREG(DEI_MV_OUT_CSTAT);
  331. DUMPREG(VIP_UP_Y_CSTAT);
  332. DUMPREG(VIP_UP_UV_CSTAT);
  333. DUMPREG(VPI_CTL_CSTAT);
  334. }
  335. EXPORT_SYMBOL(vpdma_dump_regs);
  336. /*
  337. * Allocate a DMA buffer
  338. */
  339. int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size)
  340. {
  341. buf->size = size;
  342. buf->mapped = false;
  343. buf->addr = kzalloc(size, GFP_KERNEL);
  344. if (!buf->addr)
  345. return -ENOMEM;
  346. WARN_ON(((unsigned long)buf->addr & VPDMA_DESC_ALIGN) != 0);
  347. return 0;
  348. }
  349. EXPORT_SYMBOL(vpdma_alloc_desc_buf);
  350. void vpdma_free_desc_buf(struct vpdma_buf *buf)
  351. {
  352. WARN_ON(buf->mapped);
  353. kfree(buf->addr);
  354. buf->addr = NULL;
  355. buf->size = 0;
  356. }
  357. EXPORT_SYMBOL(vpdma_free_desc_buf);
  358. /*
  359. * map descriptor/payload DMA buffer, enabling DMA access
  360. */
  361. int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
  362. {
  363. struct device *dev = &vpdma->pdev->dev;
  364. WARN_ON(buf->mapped);
  365. buf->dma_addr = dma_map_single(dev, buf->addr, buf->size,
  366. DMA_BIDIRECTIONAL);
  367. if (dma_mapping_error(dev, buf->dma_addr)) {
  368. dev_err(dev, "failed to map buffer\n");
  369. return -EINVAL;
  370. }
  371. buf->mapped = true;
  372. return 0;
  373. }
  374. EXPORT_SYMBOL(vpdma_map_desc_buf);
  375. /*
  376. * unmap descriptor/payload DMA buffer, disabling DMA access and
  377. * allowing the main processor to acces the data
  378. */
  379. void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
  380. {
  381. struct device *dev = &vpdma->pdev->dev;
  382. if (buf->mapped)
  383. dma_unmap_single(dev, buf->dma_addr, buf->size,
  384. DMA_BIDIRECTIONAL);
  385. buf->mapped = false;
  386. }
  387. EXPORT_SYMBOL(vpdma_unmap_desc_buf);
  388. /*
  389. * Cleanup all pending descriptors of a list
  390. * First, stop the current list being processed.
  391. * If the VPDMA was busy, this step makes vpdma to accept post lists.
  392. * To cleanup the internal FSM, post abort list descriptor for all the
  393. * channels from @channels array of size @size.
  394. */
  395. int vpdma_list_cleanup(struct vpdma_data *vpdma, int list_num,
  396. int *channels, int size)
  397. {
  398. struct vpdma_desc_list abort_list;
  399. int i, ret, timeout = 500;
  400. write_reg(vpdma, VPDMA_LIST_ATTR,
  401. (list_num << VPDMA_LIST_NUM_SHFT) |
  402. (1 << VPDMA_LIST_STOP_SHFT));
  403. if (size <= 0 || !channels)
  404. return 0;
  405. ret = vpdma_create_desc_list(&abort_list,
  406. size * sizeof(struct vpdma_dtd), VPDMA_LIST_TYPE_NORMAL);
  407. if (ret)
  408. return ret;
  409. for (i = 0; i < size; i++)
  410. vpdma_add_abort_channel_ctd(&abort_list, channels[i]);
  411. ret = vpdma_map_desc_buf(vpdma, &abort_list.buf);
  412. if (ret)
  413. return ret;
  414. ret = vpdma_submit_descs(vpdma, &abort_list, list_num);
  415. if (ret)
  416. return ret;
  417. while (vpdma_list_busy(vpdma, list_num) && --timeout)
  418. ;
  419. if (timeout == 0) {
  420. dev_err(&vpdma->pdev->dev, "Timed out cleaning up VPDMA list\n");
  421. return -EBUSY;
  422. }
  423. vpdma_unmap_desc_buf(vpdma, &abort_list.buf);
  424. vpdma_free_desc_buf(&abort_list.buf);
  425. return 0;
  426. }
  427. EXPORT_SYMBOL(vpdma_list_cleanup);
  428. /*
  429. * create a descriptor list, the user of this list will append configuration,
  430. * control and data descriptors to this list, this list will be submitted to
  431. * VPDMA. VPDMA's list parser will go through each descriptor and perform the
  432. * required DMA operations
  433. */
  434. int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type)
  435. {
  436. int r;
  437. r = vpdma_alloc_desc_buf(&list->buf, size);
  438. if (r)
  439. return r;
  440. list->next = list->buf.addr;
  441. list->type = type;
  442. return 0;
  443. }
  444. EXPORT_SYMBOL(vpdma_create_desc_list);
  445. /*
  446. * once a descriptor list is parsed by VPDMA, we reset the list by emptying it,
  447. * to allow new descriptors to be added to the list.
  448. */
  449. void vpdma_reset_desc_list(struct vpdma_desc_list *list)
  450. {
  451. list->next = list->buf.addr;
  452. }
  453. EXPORT_SYMBOL(vpdma_reset_desc_list);
  454. /*
  455. * free the buffer allocated fot the VPDMA descriptor list, this should be
  456. * called when the user doesn't want to use VPDMA any more.
  457. */
  458. void vpdma_free_desc_list(struct vpdma_desc_list *list)
  459. {
  460. vpdma_free_desc_buf(&list->buf);
  461. list->next = NULL;
  462. }
  463. EXPORT_SYMBOL(vpdma_free_desc_list);
  464. bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num)
  465. {
  466. return read_reg(vpdma, VPDMA_LIST_STAT_SYNC) & BIT(list_num + 16);
  467. }
  468. EXPORT_SYMBOL(vpdma_list_busy);
  469. /*
  470. * submit a list of DMA descriptors to the VPE VPDMA, do not wait for completion
  471. */
  472. int vpdma_submit_descs(struct vpdma_data *vpdma,
  473. struct vpdma_desc_list *list, int list_num)
  474. {
  475. int list_size;
  476. unsigned long flags;
  477. if (vpdma_list_busy(vpdma, list_num))
  478. return -EBUSY;
  479. /* 16-byte granularity */
  480. list_size = (list->next - list->buf.addr) >> 4;
  481. spin_lock_irqsave(&vpdma->lock, flags);
  482. write_reg(vpdma, VPDMA_LIST_ADDR, (u32) list->buf.dma_addr);
  483. write_reg(vpdma, VPDMA_LIST_ATTR,
  484. (list_num << VPDMA_LIST_NUM_SHFT) |
  485. (list->type << VPDMA_LIST_TYPE_SHFT) |
  486. list_size);
  487. spin_unlock_irqrestore(&vpdma->lock, flags);
  488. return 0;
  489. }
  490. EXPORT_SYMBOL(vpdma_submit_descs);
  491. static void dump_dtd(struct vpdma_dtd *dtd);
  492. void vpdma_update_dma_addr(struct vpdma_data *vpdma,
  493. struct vpdma_desc_list *list, dma_addr_t dma_addr,
  494. void *write_dtd, int drop, int idx)
  495. {
  496. struct vpdma_dtd *dtd = list->buf.addr;
  497. dma_addr_t write_desc_addr;
  498. int offset;
  499. dtd += idx;
  500. vpdma_unmap_desc_buf(vpdma, &list->buf);
  501. dtd->start_addr = dma_addr;
  502. /* Calculate write address from the offset of write_dtd from start
  503. * of the list->buf
  504. */
  505. offset = (void *)write_dtd - list->buf.addr;
  506. write_desc_addr = list->buf.dma_addr + offset;
  507. if (drop)
  508. dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
  509. 1, 1, 0);
  510. else
  511. dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
  512. 1, 0, 0);
  513. vpdma_map_desc_buf(vpdma, &list->buf);
  514. dump_dtd(dtd);
  515. }
  516. EXPORT_SYMBOL(vpdma_update_dma_addr);
  517. void vpdma_set_max_size(struct vpdma_data *vpdma, int reg_addr,
  518. u32 width, u32 height)
  519. {
  520. if (reg_addr != VPDMA_MAX_SIZE1 && reg_addr != VPDMA_MAX_SIZE2 &&
  521. reg_addr != VPDMA_MAX_SIZE3)
  522. reg_addr = VPDMA_MAX_SIZE1;
  523. write_field_reg(vpdma, reg_addr, width - 1,
  524. VPDMA_MAX_SIZE_WIDTH_MASK, VPDMA_MAX_SIZE_WIDTH_SHFT);
  525. write_field_reg(vpdma, reg_addr, height - 1,
  526. VPDMA_MAX_SIZE_HEIGHT_MASK, VPDMA_MAX_SIZE_HEIGHT_SHFT);
  527. }
  528. EXPORT_SYMBOL(vpdma_set_max_size);
  529. static void dump_cfd(struct vpdma_cfd *cfd)
  530. {
  531. int class;
  532. class = cfd_get_class(cfd);
  533. pr_debug("config descriptor of payload class: %s\n",
  534. class == CFD_CLS_BLOCK ? "simple block" :
  535. "address data block");
  536. if (class == CFD_CLS_BLOCK)
  537. pr_debug("word0: dst_addr_offset = 0x%08x\n",
  538. cfd->dest_addr_offset);
  539. if (class == CFD_CLS_BLOCK)
  540. pr_debug("word1: num_data_wrds = %d\n", cfd->block_len);
  541. pr_debug("word2: payload_addr = 0x%08x\n", cfd->payload_addr);
  542. pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, payload_len = %d\n",
  543. cfd_get_pkt_type(cfd),
  544. cfd_get_direct(cfd), class, cfd_get_dest(cfd),
  545. cfd_get_payload_len(cfd));
  546. }
  547. /*
  548. * append a configuration descriptor to the given descriptor list, where the
  549. * payload is in the form of a simple data block specified in the descriptor
  550. * header, this is used to upload scaler coefficients to the scaler module
  551. */
  552. void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client,
  553. struct vpdma_buf *blk, u32 dest_offset)
  554. {
  555. struct vpdma_cfd *cfd;
  556. int len = blk->size;
  557. WARN_ON(blk->dma_addr & VPDMA_DESC_ALIGN);
  558. cfd = list->next;
  559. WARN_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
  560. cfd->dest_addr_offset = dest_offset;
  561. cfd->block_len = len;
  562. cfd->payload_addr = (u32) blk->dma_addr;
  563. cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_BLOCK,
  564. client, len >> 4);
  565. list->next = cfd + 1;
  566. dump_cfd(cfd);
  567. }
  568. EXPORT_SYMBOL(vpdma_add_cfd_block);
  569. /*
  570. * append a configuration descriptor to the given descriptor list, where the
  571. * payload is in the address data block format, this is used to a configure a
  572. * discontiguous set of MMRs
  573. */
  574. void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client,
  575. struct vpdma_buf *adb)
  576. {
  577. struct vpdma_cfd *cfd;
  578. unsigned int len = adb->size;
  579. WARN_ON(len & VPDMA_ADB_SIZE_ALIGN);
  580. WARN_ON(adb->dma_addr & VPDMA_DESC_ALIGN);
  581. cfd = list->next;
  582. BUG_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
  583. cfd->w0 = 0;
  584. cfd->w1 = 0;
  585. cfd->payload_addr = (u32) adb->dma_addr;
  586. cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_ADB,
  587. client, len >> 4);
  588. list->next = cfd + 1;
  589. dump_cfd(cfd);
  590. };
  591. EXPORT_SYMBOL(vpdma_add_cfd_adb);
  592. /*
  593. * control descriptor format change based on what type of control descriptor it
  594. * is, we only use 'sync on channel' control descriptors for now, so assume it's
  595. * that
  596. */
  597. static void dump_ctd(struct vpdma_ctd *ctd)
  598. {
  599. pr_debug("control descriptor\n");
  600. pr_debug("word3: pkt_type = %d, source = %d, ctl_type = %d\n",
  601. ctd_get_pkt_type(ctd), ctd_get_source(ctd), ctd_get_ctl(ctd));
  602. }
  603. /*
  604. * append a 'sync on channel' type control descriptor to the given descriptor
  605. * list, this descriptor stalls the VPDMA list till the time DMA is completed
  606. * on the specified channel
  607. */
  608. void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list,
  609. enum vpdma_channel chan)
  610. {
  611. struct vpdma_ctd *ctd;
  612. ctd = list->next;
  613. WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size));
  614. ctd->w0 = 0;
  615. ctd->w1 = 0;
  616. ctd->w2 = 0;
  617. ctd->type_source_ctl = ctd_type_source_ctl(chan_info[chan].num,
  618. CTD_TYPE_SYNC_ON_CHANNEL);
  619. list->next = ctd + 1;
  620. dump_ctd(ctd);
  621. }
  622. EXPORT_SYMBOL(vpdma_add_sync_on_channel_ctd);
  623. /*
  624. * append an 'abort_channel' type control descriptor to the given descriptor
  625. * list, this descriptor aborts any DMA transaction happening using the
  626. * specified channel
  627. */
  628. void vpdma_add_abort_channel_ctd(struct vpdma_desc_list *list,
  629. int chan_num)
  630. {
  631. struct vpdma_ctd *ctd;
  632. ctd = list->next;
  633. WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size));
  634. ctd->w0 = 0;
  635. ctd->w1 = 0;
  636. ctd->w2 = 0;
  637. ctd->type_source_ctl = ctd_type_source_ctl(chan_num,
  638. CTD_TYPE_ABORT_CHANNEL);
  639. list->next = ctd + 1;
  640. dump_ctd(ctd);
  641. }
  642. EXPORT_SYMBOL(vpdma_add_abort_channel_ctd);
  643. static void dump_dtd(struct vpdma_dtd *dtd)
  644. {
  645. int dir, chan;
  646. dir = dtd_get_dir(dtd);
  647. chan = dtd_get_chan(dtd);
  648. pr_debug("%s data transfer descriptor for channel %d\n",
  649. dir == DTD_DIR_OUT ? "outbound" : "inbound", chan);
  650. pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n",
  651. dtd_get_data_type(dtd), dtd_get_notify(dtd), dtd_get_field(dtd),
  652. dtd_get_1d(dtd), dtd_get_even_line_skip(dtd),
  653. dtd_get_odd_line_skip(dtd), dtd_get_line_stride(dtd));
  654. if (dir == DTD_DIR_IN)
  655. pr_debug("word1: line_length = %d, xfer_height = %d\n",
  656. dtd_get_line_length(dtd), dtd_get_xfer_height(dtd));
  657. pr_debug("word2: start_addr = %pad\n", &dtd->start_addr);
  658. pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, pri = %d, next_chan = %d\n",
  659. dtd_get_pkt_type(dtd),
  660. dtd_get_mode(dtd), dir, chan, dtd_get_priority(dtd),
  661. dtd_get_next_chan(dtd));
  662. if (dir == DTD_DIR_IN)
  663. pr_debug("word4: frame_width = %d, frame_height = %d\n",
  664. dtd_get_frame_width(dtd), dtd_get_frame_height(dtd));
  665. else
  666. pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, drp_data = %d, use_desc_reg = %d\n",
  667. dtd_get_desc_write_addr(dtd), dtd_get_write_desc(dtd),
  668. dtd_get_drop_data(dtd), dtd_get_use_desc(dtd));
  669. if (dir == DTD_DIR_IN)
  670. pr_debug("word5: hor_start = %d, ver_start = %d\n",
  671. dtd_get_h_start(dtd), dtd_get_v_start(dtd));
  672. else
  673. pr_debug("word5: max_width %d, max_height %d\n",
  674. dtd_get_max_width(dtd), dtd_get_max_height(dtd));
  675. pr_debug("word6: client specific attr0 = 0x%08x\n", dtd->client_attr0);
  676. pr_debug("word7: client specific attr1 = 0x%08x\n", dtd->client_attr1);
  677. }
  678. /*
  679. * append an outbound data transfer descriptor to the given descriptor list,
  680. * this sets up a 'client to memory' VPDMA transfer for the given VPDMA channel
  681. *
  682. * @list: vpdma desc list to which we add this decriptor
  683. * @width: width of the image in pixels in memory
  684. * @c_rect: compose params of output image
  685. * @fmt: vpdma data format of the buffer
  686. * dma_addr: dma address as seen by VPDMA
  687. * max_width: enum for maximum width of data transfer
  688. * max_height: enum for maximum height of data transfer
  689. * chan: VPDMA channel
  690. * flags: VPDMA flags to configure some descriptor fileds
  691. */
  692. void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
  693. int stride, const struct v4l2_rect *c_rect,
  694. const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
  695. int max_w, int max_h, enum vpdma_channel chan, u32 flags)
  696. {
  697. vpdma_rawchan_add_out_dtd(list, width, stride, c_rect, fmt, dma_addr,
  698. max_w, max_h, chan_info[chan].num, flags);
  699. }
  700. EXPORT_SYMBOL(vpdma_add_out_dtd);
  701. void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
  702. int stride, const struct v4l2_rect *c_rect,
  703. const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
  704. int max_w, int max_h, int raw_vpdma_chan, u32 flags)
  705. {
  706. int priority = 0;
  707. int field = 0;
  708. int notify = 1;
  709. int channel, next_chan;
  710. struct v4l2_rect rect = *c_rect;
  711. int depth = fmt->depth;
  712. struct vpdma_dtd *dtd;
  713. channel = next_chan = raw_vpdma_chan;
  714. if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
  715. fmt->data_type == DATA_TYPE_C420) {
  716. rect.height >>= 1;
  717. rect.top >>= 1;
  718. depth = 8;
  719. }
  720. dma_addr += rect.top * stride + (rect.left * depth >> 3);
  721. dtd = list->next;
  722. WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
  723. dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
  724. notify,
  725. field,
  726. !!(flags & VPDMA_DATA_FRAME_1D),
  727. !!(flags & VPDMA_DATA_EVEN_LINE_SKIP),
  728. !!(flags & VPDMA_DATA_ODD_LINE_SKIP),
  729. stride);
  730. dtd->w1 = 0;
  731. dtd->start_addr = (u32) dma_addr;
  732. dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
  733. DTD_DIR_OUT, channel, priority, next_chan);
  734. dtd->desc_write_addr = dtd_desc_write_addr(0, 0, 0, 0);
  735. dtd->max_width_height = dtd_max_width_height(max_w, max_h);
  736. dtd->client_attr0 = 0;
  737. dtd->client_attr1 = 0;
  738. list->next = dtd + 1;
  739. dump_dtd(dtd);
  740. }
  741. EXPORT_SYMBOL(vpdma_rawchan_add_out_dtd);
  742. /*
  743. * append an inbound data transfer descriptor to the given descriptor list,
  744. * this sets up a 'memory to client' VPDMA transfer for the given VPDMA channel
  745. *
  746. * @list: vpdma desc list to which we add this decriptor
  747. * @width: width of the image in pixels in memory(not the cropped width)
  748. * @c_rect: crop params of input image
  749. * @fmt: vpdma data format of the buffer
  750. * dma_addr: dma address as seen by VPDMA
  751. * chan: VPDMA channel
  752. * field: top or bottom field info of the input image
  753. * flags: VPDMA flags to configure some descriptor fileds
  754. * frame_width/height: the complete width/height of the image presented to the
  755. * client (this makes sense when multiple channels are
  756. * connected to the same client, forming a larger frame)
  757. * start_h, start_v: position where the given channel starts providing pixel
  758. * data to the client (makes sense when multiple channels
  759. * contribute to the client)
  760. */
  761. void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
  762. int stride, const struct v4l2_rect *c_rect,
  763. const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
  764. enum vpdma_channel chan, int field, u32 flags, int frame_width,
  765. int frame_height, int start_h, int start_v)
  766. {
  767. int priority = 0;
  768. int notify = 1;
  769. int depth = fmt->depth;
  770. int channel, next_chan;
  771. struct v4l2_rect rect = *c_rect;
  772. struct vpdma_dtd *dtd;
  773. channel = next_chan = chan_info[chan].num;
  774. if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
  775. fmt->data_type == DATA_TYPE_C420) {
  776. rect.height >>= 1;
  777. rect.top >>= 1;
  778. depth = 8;
  779. }
  780. dma_addr += rect.top * stride + (rect.left * depth >> 3);
  781. dtd = list->next;
  782. WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
  783. dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
  784. notify,
  785. field,
  786. !!(flags & VPDMA_DATA_FRAME_1D),
  787. !!(flags & VPDMA_DATA_EVEN_LINE_SKIP),
  788. !!(flags & VPDMA_DATA_ODD_LINE_SKIP),
  789. stride);
  790. dtd->xfer_length_height = dtd_xfer_length_height(rect.width,
  791. rect.height);
  792. dtd->start_addr = (u32) dma_addr;
  793. dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
  794. DTD_DIR_IN, channel, priority, next_chan);
  795. dtd->frame_width_height = dtd_frame_width_height(frame_width,
  796. frame_height);
  797. dtd->start_h_v = dtd_start_h_v(start_h, start_v);
  798. dtd->client_attr0 = 0;
  799. dtd->client_attr1 = 0;
  800. list->next = dtd + 1;
  801. dump_dtd(dtd);
  802. }
  803. EXPORT_SYMBOL(vpdma_add_in_dtd);
  804. int vpdma_hwlist_alloc(struct vpdma_data *vpdma, void *priv)
  805. {
  806. int i, list_num = -1;
  807. unsigned long flags;
  808. spin_lock_irqsave(&vpdma->lock, flags);
  809. for (i = 0; i < VPDMA_MAX_NUM_LIST &&
  810. vpdma->hwlist_used[i] == true; i++)
  811. ;
  812. if (i < VPDMA_MAX_NUM_LIST) {
  813. list_num = i;
  814. vpdma->hwlist_used[i] = true;
  815. vpdma->hwlist_priv[i] = priv;
  816. }
  817. spin_unlock_irqrestore(&vpdma->lock, flags);
  818. return list_num;
  819. }
  820. EXPORT_SYMBOL(vpdma_hwlist_alloc);
  821. void *vpdma_hwlist_get_priv(struct vpdma_data *vpdma, int list_num)
  822. {
  823. if (!vpdma || list_num >= VPDMA_MAX_NUM_LIST)
  824. return NULL;
  825. return vpdma->hwlist_priv[list_num];
  826. }
  827. EXPORT_SYMBOL(vpdma_hwlist_get_priv);
  828. void *vpdma_hwlist_release(struct vpdma_data *vpdma, int list_num)
  829. {
  830. void *priv;
  831. unsigned long flags;
  832. spin_lock_irqsave(&vpdma->lock, flags);
  833. vpdma->hwlist_used[list_num] = false;
  834. priv = vpdma->hwlist_priv;
  835. spin_unlock_irqrestore(&vpdma->lock, flags);
  836. return priv;
  837. }
  838. EXPORT_SYMBOL(vpdma_hwlist_release);
  839. /* set or clear the mask for list complete interrupt */
  840. void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int irq_num,
  841. int list_num, bool enable)
  842. {
  843. u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num;
  844. u32 val;
  845. val = read_reg(vpdma, reg_addr);
  846. if (enable)
  847. val |= (1 << (list_num * 2));
  848. else
  849. val &= ~(1 << (list_num * 2));
  850. write_reg(vpdma, reg_addr, val);
  851. }
  852. EXPORT_SYMBOL(vpdma_enable_list_complete_irq);
  853. /* get the LIST_STAT register */
  854. unsigned int vpdma_get_list_stat(struct vpdma_data *vpdma, int irq_num)
  855. {
  856. u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num;
  857. return read_reg(vpdma, reg_addr);
  858. }
  859. EXPORT_SYMBOL(vpdma_get_list_stat);
  860. /* get the LIST_MASK register */
  861. unsigned int vpdma_get_list_mask(struct vpdma_data *vpdma, int irq_num)
  862. {
  863. u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num;
  864. return read_reg(vpdma, reg_addr);
  865. }
  866. EXPORT_SYMBOL(vpdma_get_list_mask);
  867. /* clear previosuly occured list intterupts in the LIST_STAT register */
  868. void vpdma_clear_list_stat(struct vpdma_data *vpdma, int irq_num,
  869. int list_num)
  870. {
  871. u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num;
  872. write_reg(vpdma, reg_addr, 3 << (list_num * 2));
  873. }
  874. EXPORT_SYMBOL(vpdma_clear_list_stat);
  875. void vpdma_set_bg_color(struct vpdma_data *vpdma,
  876. struct vpdma_data_format *fmt, u32 color)
  877. {
  878. if (fmt->type == VPDMA_DATA_FMT_TYPE_RGB)
  879. write_reg(vpdma, VPDMA_BG_RGB, color);
  880. else if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV)
  881. write_reg(vpdma, VPDMA_BG_YUV, color);
  882. }
  883. EXPORT_SYMBOL(vpdma_set_bg_color);
  884. /*
  885. * configures the output mode of the line buffer for the given client, the
  886. * line buffer content can either be mirrored(each line repeated twice) or
  887. * passed to the client as is
  888. */
  889. void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
  890. enum vpdma_channel chan)
  891. {
  892. int client_cstat = chan_info[chan].cstat_offset;
  893. write_field_reg(vpdma, client_cstat, line_mode,
  894. VPDMA_CSTAT_LINE_MODE_MASK, VPDMA_CSTAT_LINE_MODE_SHIFT);
  895. }
  896. EXPORT_SYMBOL(vpdma_set_line_mode);
  897. /*
  898. * configures the event which should trigger VPDMA transfer for the given
  899. * client
  900. */
  901. void vpdma_set_frame_start_event(struct vpdma_data *vpdma,
  902. enum vpdma_frame_start_event fs_event,
  903. enum vpdma_channel chan)
  904. {
  905. int client_cstat = chan_info[chan].cstat_offset;
  906. write_field_reg(vpdma, client_cstat, fs_event,
  907. VPDMA_CSTAT_FRAME_START_MASK, VPDMA_CSTAT_FRAME_START_SHIFT);
  908. }
  909. EXPORT_SYMBOL(vpdma_set_frame_start_event);
  910. static void vpdma_firmware_cb(const struct firmware *f, void *context)
  911. {
  912. struct vpdma_data *vpdma = context;
  913. struct vpdma_buf fw_dma_buf;
  914. int i, r;
  915. dev_dbg(&vpdma->pdev->dev, "firmware callback\n");
  916. if (!f || !f->data) {
  917. dev_err(&vpdma->pdev->dev, "couldn't get firmware\n");
  918. return;
  919. }
  920. /* already initialized */
  921. if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
  922. VPDMA_LIST_RDY_SHFT)) {
  923. vpdma->cb(vpdma->pdev);
  924. return;
  925. }
  926. r = vpdma_alloc_desc_buf(&fw_dma_buf, f->size);
  927. if (r) {
  928. dev_err(&vpdma->pdev->dev,
  929. "failed to allocate dma buffer for firmware\n");
  930. goto rel_fw;
  931. }
  932. memcpy(fw_dma_buf.addr, f->data, f->size);
  933. vpdma_map_desc_buf(vpdma, &fw_dma_buf);
  934. write_reg(vpdma, VPDMA_LIST_ADDR, (u32) fw_dma_buf.dma_addr);
  935. for (i = 0; i < 100; i++) { /* max 1 second */
  936. msleep_interruptible(10);
  937. if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
  938. VPDMA_LIST_RDY_SHFT))
  939. break;
  940. }
  941. if (i == 100) {
  942. dev_err(&vpdma->pdev->dev, "firmware upload failed\n");
  943. goto free_buf;
  944. }
  945. vpdma->cb(vpdma->pdev);
  946. free_buf:
  947. vpdma_unmap_desc_buf(vpdma, &fw_dma_buf);
  948. vpdma_free_desc_buf(&fw_dma_buf);
  949. rel_fw:
  950. release_firmware(f);
  951. }
  952. static int vpdma_load_firmware(struct vpdma_data *vpdma)
  953. {
  954. int r;
  955. struct device *dev = &vpdma->pdev->dev;
  956. r = request_firmware_nowait(THIS_MODULE, 1,
  957. (const char *) VPDMA_FIRMWARE, dev, GFP_KERNEL, vpdma,
  958. vpdma_firmware_cb);
  959. if (r) {
  960. dev_err(dev, "firmware not available %s\n", VPDMA_FIRMWARE);
  961. return r;
  962. } else {
  963. dev_info(dev, "loading firmware %s\n", VPDMA_FIRMWARE);
  964. }
  965. return 0;
  966. }
  967. int vpdma_create(struct platform_device *pdev, struct vpdma_data *vpdma,
  968. void (*cb)(struct platform_device *pdev))
  969. {
  970. struct resource *res;
  971. int r;
  972. dev_dbg(&pdev->dev, "vpdma_create\n");
  973. vpdma->pdev = pdev;
  974. vpdma->cb = cb;
  975. spin_lock_init(&vpdma->lock);
  976. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpdma");
  977. if (res == NULL) {
  978. dev_err(&pdev->dev, "missing platform resources data\n");
  979. return -ENODEV;
  980. }
  981. vpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
  982. if (!vpdma->base) {
  983. dev_err(&pdev->dev, "failed to ioremap\n");
  984. return -ENOMEM;
  985. }
  986. r = vpdma_load_firmware(vpdma);
  987. if (r) {
  988. pr_err("failed to load firmware %s\n", VPDMA_FIRMWARE);
  989. return r;
  990. }
  991. return 0;
  992. }
  993. EXPORT_SYMBOL(vpdma_create);
  994. MODULE_AUTHOR("Texas Instruments Inc.");
  995. MODULE_FIRMWARE(VPDMA_FIRMWARE);
  996. MODULE_LICENSE("GPL v2");