vsp1_video.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191
  1. /*
  2. * vsp1_video.c -- R-Car VSP1 Video Node
  3. *
  4. * Copyright (C) 2013-2015 Renesas Electronics Corporation
  5. *
  6. * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. */
  13. #include <linux/list.h>
  14. #include <linux/module.h>
  15. #include <linux/mutex.h>
  16. #include <linux/slab.h>
  17. #include <linux/v4l2-mediabus.h>
  18. #include <linux/videodev2.h>
  19. #include <linux/wait.h>
  20. #include <media/media-entity.h>
  21. #include <media/v4l2-dev.h>
  22. #include <media/v4l2-fh.h>
  23. #include <media/v4l2-ioctl.h>
  24. #include <media/v4l2-subdev.h>
  25. #include <media/videobuf2-v4l2.h>
  26. #include <media/videobuf2-dma-contig.h>
  27. #include "vsp1.h"
  28. #include "vsp1_bru.h"
  29. #include "vsp1_dl.h"
  30. #include "vsp1_entity.h"
  31. #include "vsp1_pipe.h"
  32. #include "vsp1_rwpf.h"
  33. #include "vsp1_uds.h"
  34. #include "vsp1_video.h"
  35. #define VSP1_VIDEO_DEF_FORMAT V4L2_PIX_FMT_YUYV
  36. #define VSP1_VIDEO_DEF_WIDTH 1024
  37. #define VSP1_VIDEO_DEF_HEIGHT 768
  38. #define VSP1_VIDEO_MIN_WIDTH 2U
  39. #define VSP1_VIDEO_MAX_WIDTH 8190U
  40. #define VSP1_VIDEO_MIN_HEIGHT 2U
  41. #define VSP1_VIDEO_MAX_HEIGHT 8190U
  42. /* -----------------------------------------------------------------------------
  43. * Helper functions
  44. */
  45. static struct v4l2_subdev *
  46. vsp1_video_remote_subdev(struct media_pad *local, u32 *pad)
  47. {
  48. struct media_pad *remote;
  49. remote = media_entity_remote_pad(local);
  50. if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
  51. return NULL;
  52. if (pad)
  53. *pad = remote->index;
  54. return media_entity_to_v4l2_subdev(remote->entity);
  55. }
  56. static int vsp1_video_verify_format(struct vsp1_video *video)
  57. {
  58. struct v4l2_subdev_format fmt;
  59. struct v4l2_subdev *subdev;
  60. int ret;
  61. subdev = vsp1_video_remote_subdev(&video->pad, &fmt.pad);
  62. if (subdev == NULL)
  63. return -EINVAL;
  64. fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  65. ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
  66. if (ret < 0)
  67. return ret == -ENOIOCTLCMD ? -EINVAL : ret;
  68. if (video->rwpf->fmtinfo->mbus != fmt.format.code ||
  69. video->rwpf->format.height != fmt.format.height ||
  70. video->rwpf->format.width != fmt.format.width)
  71. return -EINVAL;
  72. return 0;
  73. }
  74. static int __vsp1_video_try_format(struct vsp1_video *video,
  75. struct v4l2_pix_format_mplane *pix,
  76. const struct vsp1_format_info **fmtinfo)
  77. {
  78. static const u32 xrgb_formats[][2] = {
  79. { V4L2_PIX_FMT_RGB444, V4L2_PIX_FMT_XRGB444 },
  80. { V4L2_PIX_FMT_RGB555, V4L2_PIX_FMT_XRGB555 },
  81. { V4L2_PIX_FMT_BGR32, V4L2_PIX_FMT_XBGR32 },
  82. { V4L2_PIX_FMT_RGB32, V4L2_PIX_FMT_XRGB32 },
  83. };
  84. const struct vsp1_format_info *info;
  85. unsigned int width = pix->width;
  86. unsigned int height = pix->height;
  87. unsigned int i;
  88. /* Backward compatibility: replace deprecated RGB formats by their XRGB
  89. * equivalent. This selects the format older userspace applications want
  90. * while still exposing the new format.
  91. */
  92. for (i = 0; i < ARRAY_SIZE(xrgb_formats); ++i) {
  93. if (xrgb_formats[i][0] == pix->pixelformat) {
  94. pix->pixelformat = xrgb_formats[i][1];
  95. break;
  96. }
  97. }
  98. /* Retrieve format information and select the default format if the
  99. * requested format isn't supported.
  100. */
  101. info = vsp1_get_format_info(video->vsp1, pix->pixelformat);
  102. if (info == NULL)
  103. info = vsp1_get_format_info(video->vsp1, VSP1_VIDEO_DEF_FORMAT);
  104. pix->pixelformat = info->fourcc;
  105. pix->colorspace = V4L2_COLORSPACE_SRGB;
  106. pix->field = V4L2_FIELD_NONE;
  107. memset(pix->reserved, 0, sizeof(pix->reserved));
  108. /* Align the width and height for YUV 4:2:2 and 4:2:0 formats. */
  109. width = round_down(width, info->hsub);
  110. height = round_down(height, info->vsub);
  111. /* Clamp the width and height. */
  112. pix->width = clamp(width, VSP1_VIDEO_MIN_WIDTH, VSP1_VIDEO_MAX_WIDTH);
  113. pix->height = clamp(height, VSP1_VIDEO_MIN_HEIGHT,
  114. VSP1_VIDEO_MAX_HEIGHT);
  115. /* Compute and clamp the stride and image size. While not documented in
  116. * the datasheet, strides not aligned to a multiple of 128 bytes result
  117. * in image corruption.
  118. */
  119. for (i = 0; i < min(info->planes, 2U); ++i) {
  120. unsigned int hsub = i > 0 ? info->hsub : 1;
  121. unsigned int vsub = i > 0 ? info->vsub : 1;
  122. unsigned int align = 128;
  123. unsigned int bpl;
  124. bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline,
  125. pix->width / hsub * info->bpp[i] / 8,
  126. round_down(65535U, align));
  127. pix->plane_fmt[i].bytesperline = round_up(bpl, align);
  128. pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline
  129. * pix->height / vsub;
  130. }
  131. if (info->planes == 3) {
  132. /* The second and third planes must have the same stride. */
  133. pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline;
  134. pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage;
  135. }
  136. pix->num_planes = info->planes;
  137. if (fmtinfo)
  138. *fmtinfo = info;
  139. return 0;
  140. }
  141. /* -----------------------------------------------------------------------------
  142. * VSP1 Partition Algorithm support
  143. */
  144. static void vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline *pipe)
  145. {
  146. struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
  147. const struct v4l2_mbus_framefmt *format;
  148. struct vsp1_entity *entity;
  149. unsigned int div_size;
  150. format = vsp1_entity_get_pad_format(&pipe->output->entity,
  151. pipe->output->entity.config,
  152. RWPF_PAD_SOURCE);
  153. div_size = format->width;
  154. /* Gen2 hardware doesn't require image partitioning. */
  155. if (vsp1->info->gen == 2) {
  156. pipe->div_size = div_size;
  157. pipe->partitions = 1;
  158. return;
  159. }
  160. list_for_each_entry(entity, &pipe->entities, list_pipe) {
  161. unsigned int entity_max = VSP1_VIDEO_MAX_WIDTH;
  162. if (entity->ops->max_width) {
  163. entity_max = entity->ops->max_width(entity, pipe);
  164. if (entity_max)
  165. div_size = min(div_size, entity_max);
  166. }
  167. }
  168. pipe->div_size = div_size;
  169. pipe->partitions = DIV_ROUND_UP(format->width, div_size);
  170. }
  171. /**
  172. * vsp1_video_partition - Calculate the active partition output window
  173. *
  174. * @div_size: pre-determined maximum partition division size
  175. * @index: partition index
  176. *
  177. * Returns a v4l2_rect describing the partition window.
  178. */
  179. static struct v4l2_rect vsp1_video_partition(struct vsp1_pipeline *pipe,
  180. unsigned int div_size,
  181. unsigned int index)
  182. {
  183. const struct v4l2_mbus_framefmt *format;
  184. struct v4l2_rect partition;
  185. unsigned int modulus;
  186. format = vsp1_entity_get_pad_format(&pipe->output->entity,
  187. pipe->output->entity.config,
  188. RWPF_PAD_SOURCE);
  189. /* A single partition simply processes the output size in full. */
  190. if (pipe->partitions <= 1) {
  191. partition.left = 0;
  192. partition.top = 0;
  193. partition.width = format->width;
  194. partition.height = format->height;
  195. return partition;
  196. }
  197. /* Initialise the partition with sane starting conditions. */
  198. partition.left = index * div_size;
  199. partition.top = 0;
  200. partition.width = div_size;
  201. partition.height = format->height;
  202. modulus = format->width % div_size;
  203. /*
  204. * We need to prevent the last partition from being smaller than the
  205. * *minimum* width of the hardware capabilities.
  206. *
  207. * If the modulus is less than half of the partition size,
  208. * the penultimate partition is reduced to half, which is added
  209. * to the final partition: |1234|1234|1234|12|341|
  210. * to prevents this: |1234|1234|1234|1234|1|.
  211. */
  212. if (modulus) {
  213. /*
  214. * pipe->partitions is 1 based, whilst index is a 0 based index.
  215. * Normalise this locally.
  216. */
  217. unsigned int partitions = pipe->partitions - 1;
  218. if (modulus < div_size / 2) {
  219. if (index == partitions - 1) {
  220. /* Halve the penultimate partition. */
  221. partition.width = div_size / 2;
  222. } else if (index == partitions) {
  223. /* Increase the final partition. */
  224. partition.width = (div_size / 2) + modulus;
  225. partition.left -= div_size / 2;
  226. }
  227. } else if (index == partitions) {
  228. partition.width = modulus;
  229. }
  230. }
  231. return partition;
  232. }
  233. /* -----------------------------------------------------------------------------
  234. * Pipeline Management
  235. */
  236. /*
  237. * vsp1_video_complete_buffer - Complete the current buffer
  238. * @video: the video node
  239. *
  240. * This function completes the current buffer by filling its sequence number,
  241. * time stamp and payload size, and hands it back to the videobuf core.
  242. *
  243. * When operating in DU output mode (deep pipeline to the DU through the LIF),
  244. * the VSP1 needs to constantly supply frames to the display. In that case, if
  245. * no other buffer is queued, reuse the one that has just been processed instead
  246. * of handing it back to the videobuf core.
  247. *
  248. * Return the next queued buffer or NULL if the queue is empty.
  249. */
  250. static struct vsp1_vb2_buffer *
  251. vsp1_video_complete_buffer(struct vsp1_video *video)
  252. {
  253. struct vsp1_pipeline *pipe = video->rwpf->pipe;
  254. struct vsp1_vb2_buffer *next = NULL;
  255. struct vsp1_vb2_buffer *done;
  256. unsigned long flags;
  257. unsigned int i;
  258. spin_lock_irqsave(&video->irqlock, flags);
  259. if (list_empty(&video->irqqueue)) {
  260. spin_unlock_irqrestore(&video->irqlock, flags);
  261. return NULL;
  262. }
  263. done = list_first_entry(&video->irqqueue,
  264. struct vsp1_vb2_buffer, queue);
  265. /* In DU output mode reuse the buffer if the list is singular. */
  266. if (pipe->lif && list_is_singular(&video->irqqueue)) {
  267. spin_unlock_irqrestore(&video->irqlock, flags);
  268. return done;
  269. }
  270. list_del(&done->queue);
  271. if (!list_empty(&video->irqqueue))
  272. next = list_first_entry(&video->irqqueue,
  273. struct vsp1_vb2_buffer, queue);
  274. spin_unlock_irqrestore(&video->irqlock, flags);
  275. done->buf.sequence = pipe->sequence;
  276. done->buf.vb2_buf.timestamp = ktime_get_ns();
  277. for (i = 0; i < done->buf.vb2_buf.num_planes; ++i)
  278. vb2_set_plane_payload(&done->buf.vb2_buf, i,
  279. vb2_plane_size(&done->buf.vb2_buf, i));
  280. vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE);
  281. return next;
  282. }
  283. static void vsp1_video_frame_end(struct vsp1_pipeline *pipe,
  284. struct vsp1_rwpf *rwpf)
  285. {
  286. struct vsp1_video *video = rwpf->video;
  287. struct vsp1_vb2_buffer *buf;
  288. buf = vsp1_video_complete_buffer(video);
  289. if (buf == NULL)
  290. return;
  291. video->rwpf->mem = buf->mem;
  292. pipe->buffers_ready |= 1 << video->pipe_index;
  293. }
  294. static void vsp1_video_pipeline_run_partition(struct vsp1_pipeline *pipe,
  295. struct vsp1_dl_list *dl)
  296. {
  297. struct vsp1_entity *entity;
  298. pipe->partition = vsp1_video_partition(pipe, pipe->div_size,
  299. pipe->current_partition);
  300. list_for_each_entry(entity, &pipe->entities, list_pipe) {
  301. if (entity->ops->configure)
  302. entity->ops->configure(entity, pipe, dl,
  303. VSP1_ENTITY_PARAMS_PARTITION);
  304. }
  305. }
  306. static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe)
  307. {
  308. struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
  309. struct vsp1_entity *entity;
  310. if (!pipe->dl)
  311. pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
  312. /*
  313. * Start with the runtime parameters as the configure operation can
  314. * compute/cache information needed when configuring partitions. This
  315. * is the case with flipping in the WPF.
  316. */
  317. list_for_each_entry(entity, &pipe->entities, list_pipe) {
  318. if (entity->ops->configure)
  319. entity->ops->configure(entity, pipe, pipe->dl,
  320. VSP1_ENTITY_PARAMS_RUNTIME);
  321. }
  322. /* Run the first partition */
  323. pipe->current_partition = 0;
  324. vsp1_video_pipeline_run_partition(pipe, pipe->dl);
  325. /* Process consecutive partitions as necessary */
  326. for (pipe->current_partition = 1;
  327. pipe->current_partition < pipe->partitions;
  328. pipe->current_partition++) {
  329. struct vsp1_dl_list *dl;
  330. /*
  331. * Partition configuration operations will utilise
  332. * the pipe->current_partition variable to determine
  333. * the work they should complete.
  334. */
  335. dl = vsp1_dl_list_get(pipe->output->dlm);
  336. /*
  337. * An incomplete chain will still function, but output only
  338. * the partitions that had a dl available. The frame end
  339. * interrupt will be marked on the last dl in the chain.
  340. */
  341. if (!dl) {
  342. dev_err(vsp1->dev, "Failed to obtain a dl list. Frame will be incomplete\n");
  343. break;
  344. }
  345. vsp1_video_pipeline_run_partition(pipe, dl);
  346. vsp1_dl_list_add_chain(pipe->dl, dl);
  347. }
  348. /* Complete, and commit the head display list. */
  349. vsp1_dl_list_commit(pipe->dl);
  350. pipe->dl = NULL;
  351. vsp1_pipeline_run(pipe);
  352. }
  353. static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe)
  354. {
  355. struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
  356. enum vsp1_pipeline_state state;
  357. unsigned long flags;
  358. unsigned int i;
  359. spin_lock_irqsave(&pipe->irqlock, flags);
  360. /* Complete buffers on all video nodes. */
  361. for (i = 0; i < vsp1->info->rpf_count; ++i) {
  362. if (!pipe->inputs[i])
  363. continue;
  364. vsp1_video_frame_end(pipe, pipe->inputs[i]);
  365. }
  366. vsp1_video_frame_end(pipe, pipe->output);
  367. state = pipe->state;
  368. pipe->state = VSP1_PIPELINE_STOPPED;
  369. /* If a stop has been requested, mark the pipeline as stopped and
  370. * return. Otherwise restart the pipeline if ready.
  371. */
  372. if (state == VSP1_PIPELINE_STOPPING)
  373. wake_up(&pipe->wq);
  374. else if (vsp1_pipeline_ready(pipe))
  375. vsp1_video_pipeline_run(pipe);
  376. spin_unlock_irqrestore(&pipe->irqlock, flags);
  377. }
  378. static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline *pipe,
  379. struct vsp1_rwpf *input,
  380. struct vsp1_rwpf *output)
  381. {
  382. struct media_entity_enum ent_enum;
  383. struct vsp1_entity *entity;
  384. struct media_pad *pad;
  385. bool bru_found = false;
  386. int ret;
  387. ret = media_entity_enum_init(&ent_enum, &input->entity.vsp1->media_dev);
  388. if (ret < 0)
  389. return ret;
  390. pad = media_entity_remote_pad(&input->entity.pads[RWPF_PAD_SOURCE]);
  391. while (1) {
  392. if (pad == NULL) {
  393. ret = -EPIPE;
  394. goto out;
  395. }
  396. /* We've reached a video node, that shouldn't have happened. */
  397. if (!is_media_entity_v4l2_subdev(pad->entity)) {
  398. ret = -EPIPE;
  399. goto out;
  400. }
  401. entity = to_vsp1_entity(
  402. media_entity_to_v4l2_subdev(pad->entity));
  403. /* A BRU is present in the pipeline, store the BRU input pad
  404. * number in the input RPF for use when configuring the RPF.
  405. */
  406. if (entity->type == VSP1_ENTITY_BRU) {
  407. struct vsp1_bru *bru = to_bru(&entity->subdev);
  408. bru->inputs[pad->index].rpf = input;
  409. input->bru_input = pad->index;
  410. bru_found = true;
  411. }
  412. /* We've reached the WPF, we're done. */
  413. if (entity->type == VSP1_ENTITY_WPF)
  414. break;
  415. /* Ensure the branch has no loop. */
  416. if (media_entity_enum_test_and_set(&ent_enum,
  417. &entity->subdev.entity)) {
  418. ret = -EPIPE;
  419. goto out;
  420. }
  421. /* UDS can't be chained. */
  422. if (entity->type == VSP1_ENTITY_UDS) {
  423. if (pipe->uds) {
  424. ret = -EPIPE;
  425. goto out;
  426. }
  427. pipe->uds = entity;
  428. pipe->uds_input = bru_found ? pipe->bru
  429. : &input->entity;
  430. }
  431. /* Follow the source link. The link setup operations ensure
  432. * that the output fan-out can't be more than one, there is thus
  433. * no need to verify here that only a single source link is
  434. * activated.
  435. */
  436. pad = &entity->pads[entity->source_pad];
  437. pad = media_entity_remote_pad(pad);
  438. }
  439. /* The last entity must be the output WPF. */
  440. if (entity != &output->entity)
  441. ret = -EPIPE;
  442. out:
  443. media_entity_enum_cleanup(&ent_enum);
  444. return ret;
  445. }
  446. static int vsp1_video_pipeline_build(struct vsp1_pipeline *pipe,
  447. struct vsp1_video *video)
  448. {
  449. struct media_entity_graph graph;
  450. struct media_entity *entity = &video->video.entity;
  451. struct media_device *mdev = entity->graph_obj.mdev;
  452. unsigned int i;
  453. int ret;
  454. /* Walk the graph to locate the entities and video nodes. */
  455. ret = media_entity_graph_walk_init(&graph, mdev);
  456. if (ret)
  457. return ret;
  458. media_entity_graph_walk_start(&graph, entity);
  459. while ((entity = media_entity_graph_walk_next(&graph))) {
  460. struct v4l2_subdev *subdev;
  461. struct vsp1_rwpf *rwpf;
  462. struct vsp1_entity *e;
  463. if (!is_media_entity_v4l2_subdev(entity))
  464. continue;
  465. subdev = media_entity_to_v4l2_subdev(entity);
  466. e = to_vsp1_entity(subdev);
  467. list_add_tail(&e->list_pipe, &pipe->entities);
  468. if (e->type == VSP1_ENTITY_RPF) {
  469. rwpf = to_rwpf(subdev);
  470. pipe->inputs[rwpf->entity.index] = rwpf;
  471. rwpf->video->pipe_index = ++pipe->num_inputs;
  472. rwpf->pipe = pipe;
  473. } else if (e->type == VSP1_ENTITY_WPF) {
  474. rwpf = to_rwpf(subdev);
  475. pipe->output = rwpf;
  476. rwpf->video->pipe_index = 0;
  477. rwpf->pipe = pipe;
  478. } else if (e->type == VSP1_ENTITY_LIF) {
  479. pipe->lif = e;
  480. } else if (e->type == VSP1_ENTITY_BRU) {
  481. pipe->bru = e;
  482. }
  483. }
  484. media_entity_graph_walk_cleanup(&graph);
  485. /* We need one output and at least one input. */
  486. if (pipe->num_inputs == 0 || !pipe->output)
  487. return -EPIPE;
  488. /* Follow links downstream for each input and make sure the graph
  489. * contains no loop and that all branches end at the output WPF.
  490. */
  491. for (i = 0; i < video->vsp1->info->rpf_count; ++i) {
  492. if (!pipe->inputs[i])
  493. continue;
  494. ret = vsp1_video_pipeline_build_branch(pipe, pipe->inputs[i],
  495. pipe->output);
  496. if (ret < 0)
  497. return ret;
  498. }
  499. return 0;
  500. }
  501. static int vsp1_video_pipeline_init(struct vsp1_pipeline *pipe,
  502. struct vsp1_video *video)
  503. {
  504. vsp1_pipeline_init(pipe);
  505. pipe->frame_end = vsp1_video_pipeline_frame_end;
  506. return vsp1_video_pipeline_build(pipe, video);
  507. }
  508. static struct vsp1_pipeline *vsp1_video_pipeline_get(struct vsp1_video *video)
  509. {
  510. struct vsp1_pipeline *pipe;
  511. int ret;
  512. /* Get a pipeline object for the video node. If a pipeline has already
  513. * been allocated just increment its reference count and return it.
  514. * Otherwise allocate a new pipeline and initialize it, it will be freed
  515. * when the last reference is released.
  516. */
  517. if (!video->rwpf->pipe) {
  518. pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
  519. if (!pipe)
  520. return ERR_PTR(-ENOMEM);
  521. ret = vsp1_video_pipeline_init(pipe, video);
  522. if (ret < 0) {
  523. vsp1_pipeline_reset(pipe);
  524. kfree(pipe);
  525. return ERR_PTR(ret);
  526. }
  527. } else {
  528. pipe = video->rwpf->pipe;
  529. kref_get(&pipe->kref);
  530. }
  531. return pipe;
  532. }
  533. static void vsp1_video_pipeline_release(struct kref *kref)
  534. {
  535. struct vsp1_pipeline *pipe = container_of(kref, typeof(*pipe), kref);
  536. vsp1_pipeline_reset(pipe);
  537. kfree(pipe);
  538. }
  539. static void vsp1_video_pipeline_put(struct vsp1_pipeline *pipe)
  540. {
  541. struct media_device *mdev = &pipe->output->entity.vsp1->media_dev;
  542. mutex_lock(&mdev->graph_mutex);
  543. kref_put(&pipe->kref, vsp1_video_pipeline_release);
  544. mutex_unlock(&mdev->graph_mutex);
  545. }
  546. /* -----------------------------------------------------------------------------
  547. * videobuf2 Queue Operations
  548. */
  549. static int
  550. vsp1_video_queue_setup(struct vb2_queue *vq,
  551. unsigned int *nbuffers, unsigned int *nplanes,
  552. unsigned int sizes[], struct device *alloc_devs[])
  553. {
  554. struct vsp1_video *video = vb2_get_drv_priv(vq);
  555. const struct v4l2_pix_format_mplane *format = &video->rwpf->format;
  556. unsigned int i;
  557. if (*nplanes) {
  558. if (*nplanes != format->num_planes)
  559. return -EINVAL;
  560. for (i = 0; i < *nplanes; i++)
  561. if (sizes[i] < format->plane_fmt[i].sizeimage)
  562. return -EINVAL;
  563. return 0;
  564. }
  565. *nplanes = format->num_planes;
  566. for (i = 0; i < format->num_planes; ++i)
  567. sizes[i] = format->plane_fmt[i].sizeimage;
  568. return 0;
  569. }
  570. static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
  571. {
  572. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  573. struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
  574. struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
  575. const struct v4l2_pix_format_mplane *format = &video->rwpf->format;
  576. unsigned int i;
  577. if (vb->num_planes < format->num_planes)
  578. return -EINVAL;
  579. for (i = 0; i < vb->num_planes; ++i) {
  580. buf->mem.addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
  581. if (vb2_plane_size(vb, i) < format->plane_fmt[i].sizeimage)
  582. return -EINVAL;
  583. }
  584. for ( ; i < 3; ++i)
  585. buf->mem.addr[i] = 0;
  586. return 0;
  587. }
  588. static void vsp1_video_buffer_queue(struct vb2_buffer *vb)
  589. {
  590. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  591. struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
  592. struct vsp1_pipeline *pipe = video->rwpf->pipe;
  593. struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
  594. unsigned long flags;
  595. bool empty;
  596. spin_lock_irqsave(&video->irqlock, flags);
  597. empty = list_empty(&video->irqqueue);
  598. list_add_tail(&buf->queue, &video->irqqueue);
  599. spin_unlock_irqrestore(&video->irqlock, flags);
  600. if (!empty)
  601. return;
  602. spin_lock_irqsave(&pipe->irqlock, flags);
  603. video->rwpf->mem = buf->mem;
  604. pipe->buffers_ready |= 1 << video->pipe_index;
  605. if (vb2_is_streaming(&video->queue) &&
  606. vsp1_pipeline_ready(pipe))
  607. vsp1_video_pipeline_run(pipe);
  608. spin_unlock_irqrestore(&pipe->irqlock, flags);
  609. }
  610. static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
  611. {
  612. struct vsp1_entity *entity;
  613. /* Determine this pipelines sizes for image partitioning support. */
  614. vsp1_video_pipeline_setup_partitions(pipe);
  615. /* Prepare the display list. */
  616. pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
  617. if (!pipe->dl)
  618. return -ENOMEM;
  619. if (pipe->uds) {
  620. struct vsp1_uds *uds = to_uds(&pipe->uds->subdev);
  621. /* If a BRU is present in the pipeline before the UDS, the alpha
  622. * component doesn't need to be scaled as the BRU output alpha
  623. * value is fixed to 255. Otherwise we need to scale the alpha
  624. * component only when available at the input RPF.
  625. */
  626. if (pipe->uds_input->type == VSP1_ENTITY_BRU) {
  627. uds->scale_alpha = false;
  628. } else {
  629. struct vsp1_rwpf *rpf =
  630. to_rwpf(&pipe->uds_input->subdev);
  631. uds->scale_alpha = rpf->fmtinfo->alpha;
  632. }
  633. }
  634. list_for_each_entry(entity, &pipe->entities, list_pipe) {
  635. vsp1_entity_route_setup(entity, pipe->dl);
  636. if (entity->ops->configure)
  637. entity->ops->configure(entity, pipe, pipe->dl,
  638. VSP1_ENTITY_PARAMS_INIT);
  639. }
  640. return 0;
  641. }
  642. static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
  643. {
  644. struct vsp1_video *video = vb2_get_drv_priv(vq);
  645. struct vsp1_pipeline *pipe = video->rwpf->pipe;
  646. bool start_pipeline = false;
  647. unsigned long flags;
  648. int ret;
  649. mutex_lock(&pipe->lock);
  650. if (pipe->stream_count == pipe->num_inputs) {
  651. ret = vsp1_video_setup_pipeline(pipe);
  652. if (ret < 0) {
  653. mutex_unlock(&pipe->lock);
  654. return ret;
  655. }
  656. start_pipeline = true;
  657. }
  658. pipe->stream_count++;
  659. mutex_unlock(&pipe->lock);
  660. /*
  661. * vsp1_pipeline_ready() is not sufficient to establish that all streams
  662. * are prepared and the pipeline is configured, as multiple streams
  663. * can race through streamon with buffers already queued; Therefore we
  664. * don't even attempt to start the pipeline until the last stream has
  665. * called through here.
  666. */
  667. if (!start_pipeline)
  668. return 0;
  669. spin_lock_irqsave(&pipe->irqlock, flags);
  670. if (vsp1_pipeline_ready(pipe))
  671. vsp1_video_pipeline_run(pipe);
  672. spin_unlock_irqrestore(&pipe->irqlock, flags);
  673. return 0;
  674. }
  675. static void vsp1_video_stop_streaming(struct vb2_queue *vq)
  676. {
  677. struct vsp1_video *video = vb2_get_drv_priv(vq);
  678. struct vsp1_pipeline *pipe = video->rwpf->pipe;
  679. struct vsp1_vb2_buffer *buffer;
  680. unsigned long flags;
  681. int ret;
  682. /*
  683. * Clear the buffers ready flag to make sure the device won't be started
  684. * by a QBUF on the video node on the other side of the pipeline.
  685. */
  686. spin_lock_irqsave(&video->irqlock, flags);
  687. pipe->buffers_ready &= ~(1 << video->pipe_index);
  688. spin_unlock_irqrestore(&video->irqlock, flags);
  689. mutex_lock(&pipe->lock);
  690. if (--pipe->stream_count == pipe->num_inputs) {
  691. /* Stop the pipeline. */
  692. ret = vsp1_pipeline_stop(pipe);
  693. if (ret == -ETIMEDOUT)
  694. dev_err(video->vsp1->dev, "pipeline stop timeout\n");
  695. vsp1_dl_list_put(pipe->dl);
  696. pipe->dl = NULL;
  697. }
  698. mutex_unlock(&pipe->lock);
  699. media_entity_pipeline_stop(&video->video.entity);
  700. vsp1_video_pipeline_put(pipe);
  701. /* Remove all buffers from the IRQ queue. */
  702. spin_lock_irqsave(&video->irqlock, flags);
  703. list_for_each_entry(buffer, &video->irqqueue, queue)
  704. vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR);
  705. INIT_LIST_HEAD(&video->irqqueue);
  706. spin_unlock_irqrestore(&video->irqlock, flags);
  707. }
  708. static const struct vb2_ops vsp1_video_queue_qops = {
  709. .queue_setup = vsp1_video_queue_setup,
  710. .buf_prepare = vsp1_video_buffer_prepare,
  711. .buf_queue = vsp1_video_buffer_queue,
  712. .wait_prepare = vb2_ops_wait_prepare,
  713. .wait_finish = vb2_ops_wait_finish,
  714. .start_streaming = vsp1_video_start_streaming,
  715. .stop_streaming = vsp1_video_stop_streaming,
  716. };
  717. /* -----------------------------------------------------------------------------
  718. * V4L2 ioctls
  719. */
  720. static int
  721. vsp1_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
  722. {
  723. struct v4l2_fh *vfh = file->private_data;
  724. struct vsp1_video *video = to_vsp1_video(vfh->vdev);
  725. cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
  726. | V4L2_CAP_VIDEO_CAPTURE_MPLANE
  727. | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
  728. if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
  729. cap->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE
  730. | V4L2_CAP_STREAMING;
  731. else
  732. cap->device_caps = V4L2_CAP_VIDEO_OUTPUT_MPLANE
  733. | V4L2_CAP_STREAMING;
  734. strlcpy(cap->driver, "vsp1", sizeof(cap->driver));
  735. strlcpy(cap->card, video->video.name, sizeof(cap->card));
  736. snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
  737. dev_name(video->vsp1->dev));
  738. return 0;
  739. }
  740. static int
  741. vsp1_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
  742. {
  743. struct v4l2_fh *vfh = file->private_data;
  744. struct vsp1_video *video = to_vsp1_video(vfh->vdev);
  745. if (format->type != video->queue.type)
  746. return -EINVAL;
  747. mutex_lock(&video->lock);
  748. format->fmt.pix_mp = video->rwpf->format;
  749. mutex_unlock(&video->lock);
  750. return 0;
  751. }
  752. static int
  753. vsp1_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
  754. {
  755. struct v4l2_fh *vfh = file->private_data;
  756. struct vsp1_video *video = to_vsp1_video(vfh->vdev);
  757. if (format->type != video->queue.type)
  758. return -EINVAL;
  759. return __vsp1_video_try_format(video, &format->fmt.pix_mp, NULL);
  760. }
  761. static int
  762. vsp1_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
  763. {
  764. struct v4l2_fh *vfh = file->private_data;
  765. struct vsp1_video *video = to_vsp1_video(vfh->vdev);
  766. const struct vsp1_format_info *info;
  767. int ret;
  768. if (format->type != video->queue.type)
  769. return -EINVAL;
  770. ret = __vsp1_video_try_format(video, &format->fmt.pix_mp, &info);
  771. if (ret < 0)
  772. return ret;
  773. mutex_lock(&video->lock);
  774. if (vb2_is_busy(&video->queue)) {
  775. ret = -EBUSY;
  776. goto done;
  777. }
  778. video->rwpf->format = format->fmt.pix_mp;
  779. video->rwpf->fmtinfo = info;
  780. done:
  781. mutex_unlock(&video->lock);
  782. return ret;
  783. }
  784. static int
  785. vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
  786. {
  787. struct v4l2_fh *vfh = file->private_data;
  788. struct vsp1_video *video = to_vsp1_video(vfh->vdev);
  789. struct media_device *mdev = &video->vsp1->media_dev;
  790. struct vsp1_pipeline *pipe;
  791. int ret;
  792. if (video->queue.owner && video->queue.owner != file->private_data)
  793. return -EBUSY;
  794. /* Get a pipeline for the video node and start streaming on it. No link
  795. * touching an entity in the pipeline can be activated or deactivated
  796. * once streaming is started.
  797. */
  798. mutex_lock(&mdev->graph_mutex);
  799. pipe = vsp1_video_pipeline_get(video);
  800. if (IS_ERR(pipe)) {
  801. mutex_unlock(&mdev->graph_mutex);
  802. return PTR_ERR(pipe);
  803. }
  804. ret = __media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
  805. if (ret < 0) {
  806. mutex_unlock(&mdev->graph_mutex);
  807. goto err_pipe;
  808. }
  809. mutex_unlock(&mdev->graph_mutex);
  810. /* Verify that the configured format matches the output of the connected
  811. * subdev.
  812. */
  813. ret = vsp1_video_verify_format(video);
  814. if (ret < 0)
  815. goto err_stop;
  816. /* Start the queue. */
  817. ret = vb2_streamon(&video->queue, type);
  818. if (ret < 0)
  819. goto err_stop;
  820. return 0;
  821. err_stop:
  822. media_entity_pipeline_stop(&video->video.entity);
  823. err_pipe:
  824. vsp1_video_pipeline_put(pipe);
  825. return ret;
  826. }
  827. static const struct v4l2_ioctl_ops vsp1_video_ioctl_ops = {
  828. .vidioc_querycap = vsp1_video_querycap,
  829. .vidioc_g_fmt_vid_cap_mplane = vsp1_video_get_format,
  830. .vidioc_s_fmt_vid_cap_mplane = vsp1_video_set_format,
  831. .vidioc_try_fmt_vid_cap_mplane = vsp1_video_try_format,
  832. .vidioc_g_fmt_vid_out_mplane = vsp1_video_get_format,
  833. .vidioc_s_fmt_vid_out_mplane = vsp1_video_set_format,
  834. .vidioc_try_fmt_vid_out_mplane = vsp1_video_try_format,
  835. .vidioc_reqbufs = vb2_ioctl_reqbufs,
  836. .vidioc_querybuf = vb2_ioctl_querybuf,
  837. .vidioc_qbuf = vb2_ioctl_qbuf,
  838. .vidioc_dqbuf = vb2_ioctl_dqbuf,
  839. .vidioc_create_bufs = vb2_ioctl_create_bufs,
  840. .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
  841. .vidioc_streamon = vsp1_video_streamon,
  842. .vidioc_streamoff = vb2_ioctl_streamoff,
  843. };
  844. /* -----------------------------------------------------------------------------
  845. * V4L2 File Operations
  846. */
  847. static int vsp1_video_open(struct file *file)
  848. {
  849. struct vsp1_video *video = video_drvdata(file);
  850. struct v4l2_fh *vfh;
  851. int ret = 0;
  852. vfh = kzalloc(sizeof(*vfh), GFP_KERNEL);
  853. if (vfh == NULL)
  854. return -ENOMEM;
  855. v4l2_fh_init(vfh, &video->video);
  856. v4l2_fh_add(vfh);
  857. file->private_data = vfh;
  858. ret = vsp1_device_get(video->vsp1);
  859. if (ret < 0) {
  860. v4l2_fh_del(vfh);
  861. kfree(vfh);
  862. }
  863. return ret;
  864. }
  865. static int vsp1_video_release(struct file *file)
  866. {
  867. struct vsp1_video *video = video_drvdata(file);
  868. struct v4l2_fh *vfh = file->private_data;
  869. mutex_lock(&video->lock);
  870. if (video->queue.owner == vfh) {
  871. vb2_queue_release(&video->queue);
  872. video->queue.owner = NULL;
  873. }
  874. mutex_unlock(&video->lock);
  875. vsp1_device_put(video->vsp1);
  876. v4l2_fh_release(file);
  877. file->private_data = NULL;
  878. return 0;
  879. }
  880. static const struct v4l2_file_operations vsp1_video_fops = {
  881. .owner = THIS_MODULE,
  882. .unlocked_ioctl = video_ioctl2,
  883. .open = vsp1_video_open,
  884. .release = vsp1_video_release,
  885. .poll = vb2_fop_poll,
  886. .mmap = vb2_fop_mmap,
  887. };
  888. /* -----------------------------------------------------------------------------
  889. * Initialization and Cleanup
  890. */
  891. struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
  892. struct vsp1_rwpf *rwpf)
  893. {
  894. struct vsp1_video *video;
  895. const char *direction;
  896. int ret;
  897. video = devm_kzalloc(vsp1->dev, sizeof(*video), GFP_KERNEL);
  898. if (!video)
  899. return ERR_PTR(-ENOMEM);
  900. rwpf->video = video;
  901. video->vsp1 = vsp1;
  902. video->rwpf = rwpf;
  903. if (rwpf->entity.type == VSP1_ENTITY_RPF) {
  904. direction = "input";
  905. video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
  906. video->pad.flags = MEDIA_PAD_FL_SOURCE;
  907. video->video.vfl_dir = VFL_DIR_TX;
  908. } else {
  909. direction = "output";
  910. video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
  911. video->pad.flags = MEDIA_PAD_FL_SINK;
  912. video->video.vfl_dir = VFL_DIR_RX;
  913. }
  914. mutex_init(&video->lock);
  915. spin_lock_init(&video->irqlock);
  916. INIT_LIST_HEAD(&video->irqqueue);
  917. /* Initialize the media entity... */
  918. ret = media_entity_pads_init(&video->video.entity, 1, &video->pad);
  919. if (ret < 0)
  920. return ERR_PTR(ret);
  921. /* ... and the format ... */
  922. rwpf->format.pixelformat = VSP1_VIDEO_DEF_FORMAT;
  923. rwpf->format.width = VSP1_VIDEO_DEF_WIDTH;
  924. rwpf->format.height = VSP1_VIDEO_DEF_HEIGHT;
  925. __vsp1_video_try_format(video, &rwpf->format, &rwpf->fmtinfo);
  926. /* ... and the video node... */
  927. video->video.v4l2_dev = &video->vsp1->v4l2_dev;
  928. video->video.fops = &vsp1_video_fops;
  929. snprintf(video->video.name, sizeof(video->video.name), "%s %s",
  930. rwpf->entity.subdev.name, direction);
  931. video->video.vfl_type = VFL_TYPE_GRABBER;
  932. video->video.release = video_device_release_empty;
  933. video->video.ioctl_ops = &vsp1_video_ioctl_ops;
  934. video_set_drvdata(&video->video, video);
  935. video->queue.type = video->type;
  936. video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
  937. video->queue.lock = &video->lock;
  938. video->queue.drv_priv = video;
  939. video->queue.buf_struct_size = sizeof(struct vsp1_vb2_buffer);
  940. video->queue.ops = &vsp1_video_queue_qops;
  941. video->queue.mem_ops = &vb2_dma_contig_memops;
  942. video->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
  943. video->queue.dev = video->vsp1->dev;
  944. ret = vb2_queue_init(&video->queue);
  945. if (ret < 0) {
  946. dev_err(video->vsp1->dev, "failed to initialize vb2 queue\n");
  947. goto error;
  948. }
  949. /* ... and register the video device. */
  950. video->video.queue = &video->queue;
  951. ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
  952. if (ret < 0) {
  953. dev_err(video->vsp1->dev, "failed to register video device\n");
  954. goto error;
  955. }
  956. return video;
  957. error:
  958. vsp1_video_cleanup(video);
  959. return ERR_PTR(ret);
  960. }
  961. void vsp1_video_cleanup(struct vsp1_video *video)
  962. {
  963. if (video_is_registered(&video->video))
  964. video_unregister_device(&video->video);
  965. media_entity_cleanup(&video->video.entity);
  966. }