ispqueue.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158
  1. /*
  2. * ispqueue.c
  3. *
  4. * TI OMAP3 ISP - Video buffers queue handling
  5. *
  6. * Copyright (C) 2010 Nokia Corporation
  7. *
  8. * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
  9. * Sakari Ailus <sakari.ailus@iki.fi>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  23. * 02110-1301 USA
  24. */
  25. #include <asm/cacheflush.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/mm.h>
  28. #include <linux/pagemap.h>
  29. #include <linux/poll.h>
  30. #include <linux/scatterlist.h>
  31. #include <linux/sched.h>
  32. #include <linux/slab.h>
  33. #include <linux/vmalloc.h>
  34. #include "ispqueue.h"
  35. /* -----------------------------------------------------------------------------
  36. * Video buffers management
  37. */
  38. /*
  39. * isp_video_buffer_cache_sync - Keep the buffers coherent between CPU and ISP
  40. *
  41. * The typical operation required here is Cache Invalidation across
  42. * the (user space) buffer address range. And this _must_ be done
  43. * at QBUF stage (and *only* at QBUF).
  44. *
  45. * We try to use optimal cache invalidation function:
  46. * - dmac_map_area:
  47. * - used when the number of pages are _low_.
  48. * - it becomes quite slow as the number of pages increase.
  49. * - for 648x492 viewfinder (150 pages) it takes 1.3 ms.
  50. * - for 5 Mpix buffer (2491 pages) it takes between 25-50 ms.
  51. *
  52. * - flush_cache_all:
  53. * - used when the number of pages are _high_.
  54. * - time taken in the range of 500-900 us.
  55. * - has a higher penalty but, as whole dcache + icache is invalidated
  56. */
  57. /*
  58. * FIXME: dmac_inv_range crashes randomly on the user space buffer
  59. * address. Fall back to flush_cache_all for now.
  60. */
  61. #define ISP_CACHE_FLUSH_PAGES_MAX 0
  62. static void isp_video_buffer_cache_sync(struct isp_video_buffer *buf)
  63. {
  64. if (buf->skip_cache)
  65. return;
  66. if (buf->vbuf.m.userptr == 0 || buf->npages == 0 ||
  67. buf->npages > ISP_CACHE_FLUSH_PAGES_MAX)
  68. flush_cache_all();
  69. else {
  70. dmac_map_area((void *)buf->vbuf.m.userptr, buf->vbuf.length,
  71. DMA_FROM_DEVICE);
  72. outer_inv_range(buf->vbuf.m.userptr,
  73. buf->vbuf.m.userptr + buf->vbuf.length);
  74. }
  75. }
  76. /*
  77. * isp_video_buffer_lock_vma - Prevent VMAs from being unmapped
  78. *
  79. * Lock the VMAs underlying the given buffer into memory. This avoids the
  80. * userspace buffer mapping from being swapped out, making VIPT cache handling
  81. * easier.
  82. *
  83. * Note that the pages will not be freed as the buffers have been locked to
  84. * memory using by a call to get_user_pages(), but the userspace mapping could
  85. * still disappear if the VMAs are not locked. This is caused by the memory
  86. * management code trying to be as lock-less as possible, which results in the
  87. * userspace mapping manager not finding out that the pages are locked under
  88. * some conditions.
  89. */
  90. static int isp_video_buffer_lock_vma(struct isp_video_buffer *buf, int lock)
  91. {
  92. struct vm_area_struct *vma;
  93. unsigned long start;
  94. unsigned long end;
  95. int ret = 0;
  96. if (buf->vbuf.memory == V4L2_MEMORY_MMAP)
  97. return 0;
  98. /* We can be called from workqueue context if the current task dies to
  99. * unlock the VMAs. In that case there's no current memory management
  100. * context so unlocking can't be performed, but the VMAs have been or
  101. * are getting destroyed anyway so it doesn't really matter.
  102. */
  103. if (!current || !current->mm)
  104. return lock ? -EINVAL : 0;
  105. start = buf->vbuf.m.userptr;
  106. end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
  107. down_write(&current->mm->mmap_sem);
  108. spin_lock(&current->mm->page_table_lock);
  109. do {
  110. vma = find_vma(current->mm, start);
  111. if (vma == NULL) {
  112. ret = -EFAULT;
  113. goto out;
  114. }
  115. if (lock)
  116. vma->vm_flags |= VM_LOCKED;
  117. else
  118. vma->vm_flags &= ~VM_LOCKED;
  119. start = vma->vm_end + 1;
  120. } while (vma->vm_end < end);
  121. if (lock)
  122. buf->vm_flags |= VM_LOCKED;
  123. else
  124. buf->vm_flags &= ~VM_LOCKED;
  125. out:
  126. spin_unlock(&current->mm->page_table_lock);
  127. up_write(&current->mm->mmap_sem);
  128. return ret;
  129. }
  130. /*
  131. * isp_video_buffer_sglist_kernel - Build a scatter list for a vmalloc'ed buffer
  132. *
  133. * Iterate over the vmalloc'ed area and create a scatter list entry for every
  134. * page.
  135. */
  136. static int isp_video_buffer_sglist_kernel(struct isp_video_buffer *buf)
  137. {
  138. struct scatterlist *sglist;
  139. unsigned int npages;
  140. unsigned int i;
  141. void *addr;
  142. addr = buf->vaddr;
  143. npages = PAGE_ALIGN(buf->vbuf.length) >> PAGE_SHIFT;
  144. sglist = vmalloc(npages * sizeof(*sglist));
  145. if (sglist == NULL)
  146. return -ENOMEM;
  147. sg_init_table(sglist, npages);
  148. for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
  149. struct page *page = vmalloc_to_page(addr);
  150. if (page == NULL || PageHighMem(page)) {
  151. vfree(sglist);
  152. return -EINVAL;
  153. }
  154. sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
  155. }
  156. buf->sglen = npages;
  157. buf->sglist = sglist;
  158. return 0;
  159. }
  160. /*
  161. * isp_video_buffer_sglist_user - Build a scatter list for a userspace buffer
  162. *
  163. * Walk the buffer pages list and create a 1:1 mapping to a scatter list.
  164. */
  165. static int isp_video_buffer_sglist_user(struct isp_video_buffer *buf)
  166. {
  167. struct scatterlist *sglist;
  168. unsigned int offset = buf->offset;
  169. unsigned int i;
  170. sglist = vmalloc(buf->npages * sizeof(*sglist));
  171. if (sglist == NULL)
  172. return -ENOMEM;
  173. sg_init_table(sglist, buf->npages);
  174. for (i = 0; i < buf->npages; ++i) {
  175. if (PageHighMem(buf->pages[i])) {
  176. vfree(sglist);
  177. return -EINVAL;
  178. }
  179. sg_set_page(&sglist[i], buf->pages[i], PAGE_SIZE - offset,
  180. offset);
  181. offset = 0;
  182. }
  183. buf->sglen = buf->npages;
  184. buf->sglist = sglist;
  185. return 0;
  186. }
  187. /*
  188. * isp_video_buffer_sglist_pfnmap - Build a scatter list for a VM_PFNMAP buffer
  189. *
  190. * Create a scatter list of physically contiguous pages starting at the buffer
  191. * memory physical address.
  192. */
  193. static int isp_video_buffer_sglist_pfnmap(struct isp_video_buffer *buf)
  194. {
  195. struct scatterlist *sglist;
  196. unsigned int offset = buf->offset;
  197. unsigned long pfn = buf->paddr >> PAGE_SHIFT;
  198. unsigned int i;
  199. sglist = vmalloc(buf->npages * sizeof(*sglist));
  200. if (sglist == NULL)
  201. return -ENOMEM;
  202. sg_init_table(sglist, buf->npages);
  203. for (i = 0; i < buf->npages; ++i, ++pfn) {
  204. sg_set_page(&sglist[i], pfn_to_page(pfn), PAGE_SIZE - offset,
  205. offset);
  206. /* PFNMAP buffers will not get DMA-mapped, set the DMA address
  207. * manually.
  208. */
  209. sg_dma_address(&sglist[i]) = (pfn << PAGE_SHIFT) + offset;
  210. offset = 0;
  211. }
  212. buf->sglen = buf->npages;
  213. buf->sglist = sglist;
  214. return 0;
  215. }
  216. /*
  217. * isp_video_buffer_cleanup - Release pages for a userspace VMA.
  218. *
  219. * Release pages locked by a call isp_video_buffer_prepare_user and free the
  220. * pages table.
  221. */
  222. static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
  223. {
  224. enum dma_data_direction direction;
  225. unsigned int i;
  226. if (buf->queue->ops->buffer_cleanup)
  227. buf->queue->ops->buffer_cleanup(buf);
  228. if (!(buf->vm_flags & VM_PFNMAP)) {
  229. direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
  230. ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  231. dma_unmap_sg(buf->queue->dev, buf->sglist, buf->sglen,
  232. direction);
  233. }
  234. vfree(buf->sglist);
  235. buf->sglist = NULL;
  236. buf->sglen = 0;
  237. if (buf->pages != NULL) {
  238. isp_video_buffer_lock_vma(buf, 0);
  239. for (i = 0; i < buf->npages; ++i)
  240. page_cache_release(buf->pages[i]);
  241. vfree(buf->pages);
  242. buf->pages = NULL;
  243. }
  244. buf->npages = 0;
  245. buf->skip_cache = false;
  246. }
  247. /*
  248. * isp_video_buffer_prepare_user - Pin userspace VMA pages to memory.
  249. *
  250. * This function creates a list of pages for a userspace VMA. The number of
  251. * pages is first computed based on the buffer size, and pages are then
  252. * retrieved by a call to get_user_pages.
  253. *
  254. * Pages are pinned to memory by get_user_pages, making them available for DMA
  255. * transfers. However, due to memory management optimization, it seems the
  256. * get_user_pages doesn't guarantee that the pinned pages will not be written
  257. * to swap and removed from the userspace mapping(s). When this happens, a page
  258. * fault can be generated when accessing those unmapped pages.
  259. *
  260. * If the fault is triggered by a page table walk caused by VIPT cache
  261. * management operations, the page fault handler might oops if the MM semaphore
  262. * is held, as it can't handle kernel page faults in that case. To fix that, a
  263. * fixup entry needs to be added to the cache management code, or the userspace
  264. * VMA must be locked to avoid removing pages from the userspace mapping in the
  265. * first place.
  266. *
  267. * If the number of pages retrieved is smaller than the number required by the
  268. * buffer size, the function returns -EFAULT.
  269. */
  270. static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf)
  271. {
  272. unsigned long data;
  273. unsigned int first;
  274. unsigned int last;
  275. int ret;
  276. data = buf->vbuf.m.userptr;
  277. first = (data & PAGE_MASK) >> PAGE_SHIFT;
  278. last = ((data + buf->vbuf.length - 1) & PAGE_MASK) >> PAGE_SHIFT;
  279. buf->offset = data & ~PAGE_MASK;
  280. buf->npages = last - first + 1;
  281. buf->pages = vmalloc(buf->npages * sizeof(buf->pages[0]));
  282. if (buf->pages == NULL)
  283. return -ENOMEM;
  284. down_read(&current->mm->mmap_sem);
  285. ret = get_user_pages(current, current->mm, data & PAGE_MASK,
  286. buf->npages,
  287. buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE, 0,
  288. buf->pages, NULL);
  289. up_read(&current->mm->mmap_sem);
  290. if (ret != buf->npages) {
  291. buf->npages = ret < 0 ? 0 : ret;
  292. isp_video_buffer_cleanup(buf);
  293. return -EFAULT;
  294. }
  295. ret = isp_video_buffer_lock_vma(buf, 1);
  296. if (ret < 0)
  297. isp_video_buffer_cleanup(buf);
  298. return ret;
  299. }
  300. /*
  301. * isp_video_buffer_prepare_pfnmap - Validate a VM_PFNMAP userspace buffer
  302. *
  303. * Userspace VM_PFNMAP buffers are supported only if they are contiguous in
  304. * memory and if they span a single VMA.
  305. *
  306. * Return 0 if the buffer is valid, or -EFAULT otherwise.
  307. */
  308. static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf)
  309. {
  310. struct vm_area_struct *vma;
  311. unsigned long prev_pfn;
  312. unsigned long this_pfn;
  313. unsigned long start;
  314. unsigned long end;
  315. dma_addr_t pa;
  316. int ret = -EFAULT;
  317. start = buf->vbuf.m.userptr;
  318. end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
  319. buf->offset = start & ~PAGE_MASK;
  320. buf->npages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
  321. buf->pages = NULL;
  322. down_read(&current->mm->mmap_sem);
  323. vma = find_vma(current->mm, start);
  324. if (vma == NULL || vma->vm_end < end)
  325. goto done;
  326. for (prev_pfn = 0; start <= end; start += PAGE_SIZE) {
  327. ret = follow_pfn(vma, start, &this_pfn);
  328. if (ret)
  329. goto done;
  330. if (prev_pfn == 0)
  331. pa = this_pfn << PAGE_SHIFT;
  332. else if (this_pfn != prev_pfn + 1) {
  333. ret = -EFAULT;
  334. goto done;
  335. }
  336. prev_pfn = this_pfn;
  337. }
  338. buf->paddr = pa + buf->offset;
  339. ret = 0;
  340. done:
  341. up_read(&current->mm->mmap_sem);
  342. return ret;
  343. }
  344. /*
  345. * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address
  346. *
  347. * This function locates the VMAs for the buffer's userspace address and checks
  348. * that their flags match. The only flag that we need to care for at the moment
  349. * is VM_PFNMAP.
  350. *
  351. * The buffer vm_flags field is set to the first VMA flags.
  352. *
  353. * Return -EFAULT if no VMA can be found for part of the buffer, or if the VMAs
  354. * have incompatible flags.
  355. */
  356. static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer *buf)
  357. {
  358. struct vm_area_struct *vma;
  359. pgprot_t vm_page_prot;
  360. unsigned long start;
  361. unsigned long end;
  362. int ret = -EFAULT;
  363. start = buf->vbuf.m.userptr;
  364. end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
  365. down_read(&current->mm->mmap_sem);
  366. do {
  367. vma = find_vma(current->mm, start);
  368. if (vma == NULL)
  369. goto done;
  370. if (start == buf->vbuf.m.userptr) {
  371. buf->vm_flags = vma->vm_flags;
  372. vm_page_prot = vma->vm_page_prot;
  373. }
  374. if ((buf->vm_flags ^ vma->vm_flags) & VM_PFNMAP)
  375. goto done;
  376. if (vm_page_prot != vma->vm_page_prot)
  377. goto done;
  378. start = vma->vm_end + 1;
  379. } while (vma->vm_end < end);
  380. /* Skip cache management to enhance performances for non-cached or
  381. * write-combining buffers.
  382. */
  383. if (vm_page_prot == pgprot_noncached(vm_page_prot) ||
  384. vm_page_prot == pgprot_writecombine(vm_page_prot))
  385. buf->skip_cache = true;
  386. ret = 0;
  387. done:
  388. up_read(&current->mm->mmap_sem);
  389. return ret;
  390. }
  391. /*
  392. * isp_video_buffer_prepare - Make a buffer ready for operation
  393. *
  394. * Preparing a buffer involves:
  395. *
  396. * - validating VMAs (userspace buffers only)
  397. * - locking pages and VMAs into memory (userspace buffers only)
  398. * - building page and scatter-gather lists
  399. * - mapping buffers for DMA operation
  400. * - performing driver-specific preparation
  401. *
  402. * The function must be called in userspace context with a valid mm context
  403. * (this excludes cleanup paths such as sys_close when the userspace process
  404. * segfaults).
  405. */
  406. static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
  407. {
  408. enum dma_data_direction direction;
  409. int ret;
  410. switch (buf->vbuf.memory) {
  411. case V4L2_MEMORY_MMAP:
  412. ret = isp_video_buffer_sglist_kernel(buf);
  413. break;
  414. case V4L2_MEMORY_USERPTR:
  415. ret = isp_video_buffer_prepare_vm_flags(buf);
  416. if (ret < 0)
  417. return ret;
  418. if (buf->vm_flags & VM_PFNMAP) {
  419. ret = isp_video_buffer_prepare_pfnmap(buf);
  420. if (ret < 0)
  421. return ret;
  422. ret = isp_video_buffer_sglist_pfnmap(buf);
  423. } else {
  424. ret = isp_video_buffer_prepare_user(buf);
  425. if (ret < 0)
  426. return ret;
  427. ret = isp_video_buffer_sglist_user(buf);
  428. }
  429. break;
  430. default:
  431. return -EINVAL;
  432. }
  433. if (ret < 0)
  434. goto done;
  435. if (!(buf->vm_flags & VM_PFNMAP)) {
  436. direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
  437. ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  438. ret = dma_map_sg(buf->queue->dev, buf->sglist, buf->sglen,
  439. direction);
  440. if (ret != buf->sglen) {
  441. ret = -EFAULT;
  442. goto done;
  443. }
  444. }
  445. if (buf->queue->ops->buffer_prepare)
  446. ret = buf->queue->ops->buffer_prepare(buf);
  447. done:
  448. if (ret < 0) {
  449. isp_video_buffer_cleanup(buf);
  450. return ret;
  451. }
  452. return ret;
  453. }
  454. /*
  455. * isp_video_queue_query - Query the status of a given buffer
  456. *
  457. * Locking: must be called with the queue lock held.
  458. */
  459. static void isp_video_buffer_query(struct isp_video_buffer *buf,
  460. struct v4l2_buffer *vbuf)
  461. {
  462. memcpy(vbuf, &buf->vbuf, sizeof(*vbuf));
  463. if (buf->vma_use_count)
  464. vbuf->flags |= V4L2_BUF_FLAG_MAPPED;
  465. switch (buf->state) {
  466. case ISP_BUF_STATE_ERROR:
  467. vbuf->flags |= V4L2_BUF_FLAG_ERROR;
  468. case ISP_BUF_STATE_DONE:
  469. vbuf->flags |= V4L2_BUF_FLAG_DONE;
  470. case ISP_BUF_STATE_QUEUED:
  471. case ISP_BUF_STATE_ACTIVE:
  472. vbuf->flags |= V4L2_BUF_FLAG_QUEUED;
  473. break;
  474. case ISP_BUF_STATE_IDLE:
  475. default:
  476. break;
  477. }
  478. }
  479. /*
  480. * isp_video_buffer_wait - Wait for a buffer to be ready
  481. *
  482. * In non-blocking mode, return immediately with 0 if the buffer is ready or
  483. * -EAGAIN if the buffer is in the QUEUED or ACTIVE state.
  484. *
  485. * In blocking mode, wait (interruptibly but with no timeout) on the buffer wait
  486. * queue using the same condition.
  487. */
  488. static int isp_video_buffer_wait(struct isp_video_buffer *buf, int nonblocking)
  489. {
  490. if (nonblocking) {
  491. return (buf->state != ISP_BUF_STATE_QUEUED &&
  492. buf->state != ISP_BUF_STATE_ACTIVE)
  493. ? 0 : -EAGAIN;
  494. }
  495. return wait_event_interruptible(buf->wait,
  496. buf->state != ISP_BUF_STATE_QUEUED &&
  497. buf->state != ISP_BUF_STATE_ACTIVE);
  498. }
  499. /* -----------------------------------------------------------------------------
  500. * Queue management
  501. */
  502. /*
  503. * isp_video_queue_free - Free video buffers memory
  504. *
  505. * Buffers can only be freed if the queue isn't streaming and if no buffer is
  506. * mapped to userspace. Return -EBUSY if those conditions aren't statisfied.
  507. *
  508. * This function must be called with the queue lock held.
  509. */
  510. static int isp_video_queue_free(struct isp_video_queue *queue)
  511. {
  512. unsigned int i;
  513. if (queue->streaming)
  514. return -EBUSY;
  515. for (i = 0; i < queue->count; ++i) {
  516. if (queue->buffers[i]->vma_use_count != 0)
  517. return -EBUSY;
  518. }
  519. for (i = 0; i < queue->count; ++i) {
  520. struct isp_video_buffer *buf = queue->buffers[i];
  521. isp_video_buffer_cleanup(buf);
  522. vfree(buf->vaddr);
  523. buf->vaddr = NULL;
  524. kfree(buf);
  525. queue->buffers[i] = NULL;
  526. }
  527. INIT_LIST_HEAD(&queue->queue);
  528. queue->count = 0;
  529. return 0;
  530. }
  531. /*
  532. * isp_video_queue_alloc - Allocate video buffers memory
  533. *
  534. * This function must be called with the queue lock held.
  535. */
  536. static int isp_video_queue_alloc(struct isp_video_queue *queue,
  537. unsigned int nbuffers,
  538. unsigned int size, enum v4l2_memory memory)
  539. {
  540. struct isp_video_buffer *buf;
  541. unsigned int i;
  542. void *mem;
  543. int ret;
  544. /* Start by freeing the buffers. */
  545. ret = isp_video_queue_free(queue);
  546. if (ret < 0)
  547. return ret;
  548. /* Bail out of no buffers should be allocated. */
  549. if (nbuffers == 0)
  550. return 0;
  551. /* Initialize the allocated buffers. */
  552. for (i = 0; i < nbuffers; ++i) {
  553. buf = kzalloc(queue->bufsize, GFP_KERNEL);
  554. if (buf == NULL)
  555. break;
  556. if (memory == V4L2_MEMORY_MMAP) {
  557. /* Allocate video buffers memory for mmap mode. Align
  558. * the size to the page size.
  559. */
  560. mem = vmalloc_32_user(PAGE_ALIGN(size));
  561. if (mem == NULL) {
  562. kfree(buf);
  563. break;
  564. }
  565. buf->vbuf.m.offset = i * PAGE_ALIGN(size);
  566. buf->vaddr = mem;
  567. }
  568. buf->vbuf.index = i;
  569. buf->vbuf.length = size;
  570. buf->vbuf.type = queue->type;
  571. buf->vbuf.field = V4L2_FIELD_NONE;
  572. buf->vbuf.memory = memory;
  573. buf->queue = queue;
  574. init_waitqueue_head(&buf->wait);
  575. queue->buffers[i] = buf;
  576. }
  577. if (i == 0)
  578. return -ENOMEM;
  579. queue->count = i;
  580. return nbuffers;
  581. }
  582. /**
  583. * omap3isp_video_queue_cleanup - Clean up the video buffers queue
  584. * @queue: Video buffers queue
  585. *
  586. * Free all allocated resources and clean up the video buffers queue. The queue
  587. * must not be busy (no ongoing video stream) and buffers must have been
  588. * unmapped.
  589. *
  590. * Return 0 on success or -EBUSY if the queue is busy or buffers haven't been
  591. * unmapped.
  592. */
  593. int omap3isp_video_queue_cleanup(struct isp_video_queue *queue)
  594. {
  595. return isp_video_queue_free(queue);
  596. }
  597. /**
  598. * omap3isp_video_queue_init - Initialize the video buffers queue
  599. * @queue: Video buffers queue
  600. * @type: V4L2 buffer type (capture or output)
  601. * @ops: Driver-specific queue operations
  602. * @dev: Device used for DMA operations
  603. * @bufsize: Size of the driver-specific buffer structure
  604. *
  605. * Initialize the video buffers queue with the supplied parameters.
  606. *
  607. * The queue type must be one of V4L2_BUF_TYPE_VIDEO_CAPTURE or
  608. * V4L2_BUF_TYPE_VIDEO_OUTPUT. Other buffer types are not supported yet.
  609. *
  610. * Buffer objects will be allocated using the given buffer size to allow room
  611. * for driver-specific fields. Driver-specific buffer structures must start
  612. * with a struct isp_video_buffer field. Drivers with no driver-specific buffer
  613. * structure must pass the size of the isp_video_buffer structure in the bufsize
  614. * parameter.
  615. *
  616. * Return 0 on success.
  617. */
  618. int omap3isp_video_queue_init(struct isp_video_queue *queue,
  619. enum v4l2_buf_type type,
  620. const struct isp_video_queue_operations *ops,
  621. struct device *dev, unsigned int bufsize)
  622. {
  623. INIT_LIST_HEAD(&queue->queue);
  624. mutex_init(&queue->lock);
  625. spin_lock_init(&queue->irqlock);
  626. queue->type = type;
  627. queue->ops = ops;
  628. queue->dev = dev;
  629. queue->bufsize = bufsize;
  630. return 0;
  631. }
  632. /* -----------------------------------------------------------------------------
  633. * V4L2 operations
  634. */
  635. /**
  636. * omap3isp_video_queue_reqbufs - Allocate video buffers memory
  637. *
  638. * This function is intended to be used as a VIDIOC_REQBUFS ioctl handler. It
  639. * allocated video buffer objects and, for MMAP buffers, buffer memory.
  640. *
  641. * If the number of buffers is 0, all buffers are freed and the function returns
  642. * without performing any allocation.
  643. *
  644. * If the number of buffers is not 0, currently allocated buffers (if any) are
  645. * freed and the requested number of buffers are allocated. Depending on
  646. * driver-specific requirements and on memory availability, a number of buffer
  647. * smaller or bigger than requested can be allocated. This isn't considered as
  648. * an error.
  649. *
  650. * Return 0 on success or one of the following error codes:
  651. *
  652. * -EINVAL if the buffer type or index are invalid
  653. * -EBUSY if the queue is busy (streaming or buffers mapped)
  654. * -ENOMEM if the buffers can't be allocated due to an out-of-memory condition
  655. */
  656. int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue,
  657. struct v4l2_requestbuffers *rb)
  658. {
  659. unsigned int nbuffers = rb->count;
  660. unsigned int size;
  661. int ret;
  662. if (rb->type != queue->type)
  663. return -EINVAL;
  664. queue->ops->queue_prepare(queue, &nbuffers, &size);
  665. if (size == 0)
  666. return -EINVAL;
  667. nbuffers = min_t(unsigned int, nbuffers, ISP_VIDEO_MAX_BUFFERS);
  668. mutex_lock(&queue->lock);
  669. ret = isp_video_queue_alloc(queue, nbuffers, size, rb->memory);
  670. if (ret < 0)
  671. goto done;
  672. rb->count = ret;
  673. ret = 0;
  674. done:
  675. mutex_unlock(&queue->lock);
  676. return ret;
  677. }
  678. /**
  679. * omap3isp_video_queue_querybuf - Query the status of a buffer in a queue
  680. *
  681. * This function is intended to be used as a VIDIOC_QUERYBUF ioctl handler. It
  682. * returns the status of a given video buffer.
  683. *
  684. * Return 0 on success or -EINVAL if the buffer type or index are invalid.
  685. */
  686. int omap3isp_video_queue_querybuf(struct isp_video_queue *queue,
  687. struct v4l2_buffer *vbuf)
  688. {
  689. struct isp_video_buffer *buf;
  690. int ret = 0;
  691. if (vbuf->type != queue->type)
  692. return -EINVAL;
  693. mutex_lock(&queue->lock);
  694. if (vbuf->index >= queue->count) {
  695. ret = -EINVAL;
  696. goto done;
  697. }
  698. buf = queue->buffers[vbuf->index];
  699. isp_video_buffer_query(buf, vbuf);
  700. done:
  701. mutex_unlock(&queue->lock);
  702. return ret;
  703. }
  704. /**
  705. * omap3isp_video_queue_qbuf - Queue a buffer
  706. *
  707. * This function is intended to be used as a VIDIOC_QBUF ioctl handler.
  708. *
  709. * The v4l2_buffer structure passed from userspace is first sanity tested. If
  710. * sane, the buffer is then processed and added to the main queue and, if the
  711. * queue is streaming, to the IRQ queue.
  712. *
  713. * Before being enqueued, USERPTR buffers are checked for address changes. If
  714. * the buffer has a different userspace address, the old memory area is unlocked
  715. * and the new memory area is locked.
  716. */
  717. int omap3isp_video_queue_qbuf(struct isp_video_queue *queue,
  718. struct v4l2_buffer *vbuf)
  719. {
  720. struct isp_video_buffer *buf;
  721. unsigned long flags;
  722. int ret = -EINVAL;
  723. if (vbuf->type != queue->type)
  724. goto done;
  725. mutex_lock(&queue->lock);
  726. if (vbuf->index >= queue->count)
  727. goto done;
  728. buf = queue->buffers[vbuf->index];
  729. if (vbuf->memory != buf->vbuf.memory)
  730. goto done;
  731. if (buf->state != ISP_BUF_STATE_IDLE)
  732. goto done;
  733. if (vbuf->memory == V4L2_MEMORY_USERPTR &&
  734. vbuf->length < buf->vbuf.length)
  735. goto done;
  736. if (vbuf->memory == V4L2_MEMORY_USERPTR &&
  737. vbuf->m.userptr != buf->vbuf.m.userptr) {
  738. isp_video_buffer_cleanup(buf);
  739. buf->vbuf.m.userptr = vbuf->m.userptr;
  740. buf->prepared = 0;
  741. }
  742. if (!buf->prepared) {
  743. ret = isp_video_buffer_prepare(buf);
  744. if (ret < 0)
  745. goto done;
  746. buf->prepared = 1;
  747. }
  748. isp_video_buffer_cache_sync(buf);
  749. buf->state = ISP_BUF_STATE_QUEUED;
  750. list_add_tail(&buf->stream, &queue->queue);
  751. if (queue->streaming) {
  752. spin_lock_irqsave(&queue->irqlock, flags);
  753. queue->ops->buffer_queue(buf);
  754. spin_unlock_irqrestore(&queue->irqlock, flags);
  755. }
  756. ret = 0;
  757. done:
  758. mutex_unlock(&queue->lock);
  759. return ret;
  760. }
  761. /**
  762. * omap3isp_video_queue_dqbuf - Dequeue a buffer
  763. *
  764. * This function is intended to be used as a VIDIOC_DQBUF ioctl handler.
  765. *
  766. * The v4l2_buffer structure passed from userspace is first sanity tested. If
  767. * sane, the buffer is then processed and added to the main queue and, if the
  768. * queue is streaming, to the IRQ queue.
  769. *
  770. * Before being enqueued, USERPTR buffers are checked for address changes. If
  771. * the buffer has a different userspace address, the old memory area is unlocked
  772. * and the new memory area is locked.
  773. */
  774. int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue,
  775. struct v4l2_buffer *vbuf, int nonblocking)
  776. {
  777. struct isp_video_buffer *buf;
  778. int ret;
  779. if (vbuf->type != queue->type)
  780. return -EINVAL;
  781. mutex_lock(&queue->lock);
  782. if (list_empty(&queue->queue)) {
  783. ret = -EINVAL;
  784. goto done;
  785. }
  786. buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
  787. ret = isp_video_buffer_wait(buf, nonblocking);
  788. if (ret < 0)
  789. goto done;
  790. list_del(&buf->stream);
  791. isp_video_buffer_query(buf, vbuf);
  792. buf->state = ISP_BUF_STATE_IDLE;
  793. vbuf->flags &= ~V4L2_BUF_FLAG_QUEUED;
  794. done:
  795. mutex_unlock(&queue->lock);
  796. return ret;
  797. }
  798. /**
  799. * omap3isp_video_queue_streamon - Start streaming
  800. *
  801. * This function is intended to be used as a VIDIOC_STREAMON ioctl handler. It
  802. * starts streaming on the queue and calls the buffer_queue operation for all
  803. * queued buffers.
  804. *
  805. * Return 0 on success.
  806. */
  807. int omap3isp_video_queue_streamon(struct isp_video_queue *queue)
  808. {
  809. struct isp_video_buffer *buf;
  810. unsigned long flags;
  811. mutex_lock(&queue->lock);
  812. if (queue->streaming)
  813. goto done;
  814. queue->streaming = 1;
  815. spin_lock_irqsave(&queue->irqlock, flags);
  816. list_for_each_entry(buf, &queue->queue, stream)
  817. queue->ops->buffer_queue(buf);
  818. spin_unlock_irqrestore(&queue->irqlock, flags);
  819. done:
  820. mutex_unlock(&queue->lock);
  821. return 0;
  822. }
  823. /**
  824. * omap3isp_video_queue_streamoff - Stop streaming
  825. *
  826. * This function is intended to be used as a VIDIOC_STREAMOFF ioctl handler. It
  827. * stops streaming on the queue and wakes up all the buffers.
  828. *
  829. * Drivers must stop the hardware and synchronize with interrupt handlers and/or
  830. * delayed works before calling this function to make sure no buffer will be
  831. * touched by the driver and/or hardware.
  832. */
  833. void omap3isp_video_queue_streamoff(struct isp_video_queue *queue)
  834. {
  835. struct isp_video_buffer *buf;
  836. unsigned long flags;
  837. unsigned int i;
  838. mutex_lock(&queue->lock);
  839. if (!queue->streaming)
  840. goto done;
  841. queue->streaming = 0;
  842. spin_lock_irqsave(&queue->irqlock, flags);
  843. for (i = 0; i < queue->count; ++i) {
  844. buf = queue->buffers[i];
  845. if (buf->state == ISP_BUF_STATE_ACTIVE)
  846. wake_up(&buf->wait);
  847. buf->state = ISP_BUF_STATE_IDLE;
  848. }
  849. spin_unlock_irqrestore(&queue->irqlock, flags);
  850. INIT_LIST_HEAD(&queue->queue);
  851. done:
  852. mutex_unlock(&queue->lock);
  853. }
  854. /**
  855. * omap3isp_video_queue_discard_done - Discard all buffers marked as DONE
  856. *
  857. * This function is intended to be used with suspend/resume operations. It
  858. * discards all 'done' buffers as they would be too old to be requested after
  859. * resume.
  860. *
  861. * Drivers must stop the hardware and synchronize with interrupt handlers and/or
  862. * delayed works before calling this function to make sure no buffer will be
  863. * touched by the driver and/or hardware.
  864. */
  865. void omap3isp_video_queue_discard_done(struct isp_video_queue *queue)
  866. {
  867. struct isp_video_buffer *buf;
  868. unsigned int i;
  869. mutex_lock(&queue->lock);
  870. if (!queue->streaming)
  871. goto done;
  872. for (i = 0; i < queue->count; ++i) {
  873. buf = queue->buffers[i];
  874. if (buf->state == ISP_BUF_STATE_DONE)
  875. buf->state = ISP_BUF_STATE_ERROR;
  876. }
  877. done:
  878. mutex_unlock(&queue->lock);
  879. }
  880. static void isp_video_queue_vm_open(struct vm_area_struct *vma)
  881. {
  882. struct isp_video_buffer *buf = vma->vm_private_data;
  883. buf->vma_use_count++;
  884. }
  885. static void isp_video_queue_vm_close(struct vm_area_struct *vma)
  886. {
  887. struct isp_video_buffer *buf = vma->vm_private_data;
  888. buf->vma_use_count--;
  889. }
  890. static const struct vm_operations_struct isp_video_queue_vm_ops = {
  891. .open = isp_video_queue_vm_open,
  892. .close = isp_video_queue_vm_close,
  893. };
  894. /**
  895. * omap3isp_video_queue_mmap - Map buffers to userspace
  896. *
  897. * This function is intended to be used as an mmap() file operation handler. It
  898. * maps a buffer to userspace based on the VMA offset.
  899. *
  900. * Only buffers of memory type MMAP are supported.
  901. */
  902. int omap3isp_video_queue_mmap(struct isp_video_queue *queue,
  903. struct vm_area_struct *vma)
  904. {
  905. struct isp_video_buffer *uninitialized_var(buf);
  906. unsigned long size;
  907. unsigned int i;
  908. int ret = 0;
  909. mutex_lock(&queue->lock);
  910. for (i = 0; i < queue->count; ++i) {
  911. buf = queue->buffers[i];
  912. if ((buf->vbuf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
  913. break;
  914. }
  915. if (i == queue->count) {
  916. ret = -EINVAL;
  917. goto done;
  918. }
  919. size = vma->vm_end - vma->vm_start;
  920. if (buf->vbuf.memory != V4L2_MEMORY_MMAP ||
  921. size != PAGE_ALIGN(buf->vbuf.length)) {
  922. ret = -EINVAL;
  923. goto done;
  924. }
  925. ret = remap_vmalloc_range(vma, buf->vaddr, 0);
  926. if (ret < 0)
  927. goto done;
  928. vma->vm_ops = &isp_video_queue_vm_ops;
  929. vma->vm_private_data = buf;
  930. isp_video_queue_vm_open(vma);
  931. done:
  932. mutex_unlock(&queue->lock);
  933. return ret;
  934. }
  935. /**
  936. * omap3isp_video_queue_poll - Poll video queue state
  937. *
  938. * This function is intended to be used as a poll() file operation handler. It
  939. * polls the state of the video buffer at the front of the queue and returns an
  940. * events mask.
  941. *
  942. * If no buffer is present at the front of the queue, POLLERR is returned.
  943. */
  944. unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue,
  945. struct file *file, poll_table *wait)
  946. {
  947. struct isp_video_buffer *buf;
  948. unsigned int mask = 0;
  949. mutex_lock(&queue->lock);
  950. if (list_empty(&queue->queue)) {
  951. mask |= POLLERR;
  952. goto done;
  953. }
  954. buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
  955. poll_wait(file, &buf->wait, wait);
  956. if (buf->state == ISP_BUF_STATE_DONE ||
  957. buf->state == ISP_BUF_STATE_ERROR) {
  958. if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
  959. mask |= POLLIN | POLLRDNORM;
  960. else
  961. mask |= POLLOUT | POLLWRNORM;
  962. }
  963. done:
  964. mutex_unlock(&queue->lock);
  965. return mask;
  966. }