vsp1_video.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275
  1. /*
  2. * vsp1_video.c -- R-Car VSP1 Video Node
  3. *
  4. * Copyright (C) 2013-2015 Renesas Electronics Corporation
  5. *
  6. * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. */
  13. #include <linux/list.h>
  14. #include <linux/module.h>
  15. #include <linux/mutex.h>
  16. #include <linux/slab.h>
  17. #include <linux/v4l2-mediabus.h>
  18. #include <linux/videodev2.h>
  19. #include <linux/wait.h>
  20. #include <media/media-entity.h>
  21. #include <media/v4l2-dev.h>
  22. #include <media/v4l2-fh.h>
  23. #include <media/v4l2-ioctl.h>
  24. #include <media/v4l2-subdev.h>
  25. #include <media/videobuf2-v4l2.h>
  26. #include <media/videobuf2-dma-contig.h>
  27. #include "vsp1.h"
  28. #include "vsp1_bru.h"
  29. #include "vsp1_dl.h"
  30. #include "vsp1_entity.h"
  31. #include "vsp1_hgo.h"
  32. #include "vsp1_hgt.h"
  33. #include "vsp1_pipe.h"
  34. #include "vsp1_rwpf.h"
  35. #include "vsp1_uds.h"
  36. #include "vsp1_video.h"
  37. #define VSP1_VIDEO_DEF_FORMAT V4L2_PIX_FMT_YUYV
  38. #define VSP1_VIDEO_DEF_WIDTH 1024
  39. #define VSP1_VIDEO_DEF_HEIGHT 768
  40. #define VSP1_VIDEO_MIN_WIDTH 2U
  41. #define VSP1_VIDEO_MAX_WIDTH 8190U
  42. #define VSP1_VIDEO_MIN_HEIGHT 2U
  43. #define VSP1_VIDEO_MAX_HEIGHT 8190U
  44. /* -----------------------------------------------------------------------------
  45. * Helper functions
  46. */
  47. static struct v4l2_subdev *
  48. vsp1_video_remote_subdev(struct media_pad *local, u32 *pad)
  49. {
  50. struct media_pad *remote;
  51. remote = media_entity_remote_pad(local);
  52. if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
  53. return NULL;
  54. if (pad)
  55. *pad = remote->index;
  56. return media_entity_to_v4l2_subdev(remote->entity);
  57. }
  58. static int vsp1_video_verify_format(struct vsp1_video *video)
  59. {
  60. struct v4l2_subdev_format fmt;
  61. struct v4l2_subdev *subdev;
  62. int ret;
  63. subdev = vsp1_video_remote_subdev(&video->pad, &fmt.pad);
  64. if (subdev == NULL)
  65. return -EINVAL;
  66. fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  67. ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
  68. if (ret < 0)
  69. return ret == -ENOIOCTLCMD ? -EINVAL : ret;
  70. if (video->rwpf->fmtinfo->mbus != fmt.format.code ||
  71. video->rwpf->format.height != fmt.format.height ||
  72. video->rwpf->format.width != fmt.format.width)
  73. return -EINVAL;
  74. return 0;
  75. }
  76. static int __vsp1_video_try_format(struct vsp1_video *video,
  77. struct v4l2_pix_format_mplane *pix,
  78. const struct vsp1_format_info **fmtinfo)
  79. {
  80. static const u32 xrgb_formats[][2] = {
  81. { V4L2_PIX_FMT_RGB444, V4L2_PIX_FMT_XRGB444 },
  82. { V4L2_PIX_FMT_RGB555, V4L2_PIX_FMT_XRGB555 },
  83. { V4L2_PIX_FMT_BGR32, V4L2_PIX_FMT_XBGR32 },
  84. { V4L2_PIX_FMT_RGB32, V4L2_PIX_FMT_XRGB32 },
  85. };
  86. const struct vsp1_format_info *info;
  87. unsigned int width = pix->width;
  88. unsigned int height = pix->height;
  89. unsigned int i;
  90. /*
  91. * Backward compatibility: replace deprecated RGB formats by their XRGB
  92. * equivalent. This selects the format older userspace applications want
  93. * while still exposing the new format.
  94. */
  95. for (i = 0; i < ARRAY_SIZE(xrgb_formats); ++i) {
  96. if (xrgb_formats[i][0] == pix->pixelformat) {
  97. pix->pixelformat = xrgb_formats[i][1];
  98. break;
  99. }
  100. }
  101. /*
  102. * Retrieve format information and select the default format if the
  103. * requested format isn't supported.
  104. */
  105. info = vsp1_get_format_info(video->vsp1, pix->pixelformat);
  106. if (info == NULL)
  107. info = vsp1_get_format_info(video->vsp1, VSP1_VIDEO_DEF_FORMAT);
  108. pix->pixelformat = info->fourcc;
  109. pix->colorspace = V4L2_COLORSPACE_SRGB;
  110. pix->field = V4L2_FIELD_NONE;
  111. if (info->fourcc == V4L2_PIX_FMT_HSV24 ||
  112. info->fourcc == V4L2_PIX_FMT_HSV32)
  113. pix->hsv_enc = V4L2_HSV_ENC_256;
  114. memset(pix->reserved, 0, sizeof(pix->reserved));
  115. /* Align the width and height for YUV 4:2:2 and 4:2:0 formats. */
  116. width = round_down(width, info->hsub);
  117. height = round_down(height, info->vsub);
  118. /* Clamp the width and height. */
  119. pix->width = clamp(width, VSP1_VIDEO_MIN_WIDTH, VSP1_VIDEO_MAX_WIDTH);
  120. pix->height = clamp(height, VSP1_VIDEO_MIN_HEIGHT,
  121. VSP1_VIDEO_MAX_HEIGHT);
  122. /*
  123. * Compute and clamp the stride and image size. While not documented in
  124. * the datasheet, strides not aligned to a multiple of 128 bytes result
  125. * in image corruption.
  126. */
  127. for (i = 0; i < min(info->planes, 2U); ++i) {
  128. unsigned int hsub = i > 0 ? info->hsub : 1;
  129. unsigned int vsub = i > 0 ? info->vsub : 1;
  130. unsigned int align = 128;
  131. unsigned int bpl;
  132. bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline,
  133. pix->width / hsub * info->bpp[i] / 8,
  134. round_down(65535U, align));
  135. pix->plane_fmt[i].bytesperline = round_up(bpl, align);
  136. pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline
  137. * pix->height / vsub;
  138. }
  139. if (info->planes == 3) {
  140. /* The second and third planes must have the same stride. */
  141. pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline;
  142. pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage;
  143. }
  144. pix->num_planes = info->planes;
  145. if (fmtinfo)
  146. *fmtinfo = info;
  147. return 0;
  148. }
  149. /* -----------------------------------------------------------------------------
  150. * VSP1 Partition Algorithm support
  151. */
  152. /**
  153. * vsp1_video_calculate_partition - Calculate the active partition output window
  154. *
  155. * @pipe: the pipeline
  156. * @partition: partition that will hold the calculated values
  157. * @div_size: pre-determined maximum partition division size
  158. * @index: partition index
  159. */
  160. static void vsp1_video_calculate_partition(struct vsp1_pipeline *pipe,
  161. struct vsp1_partition *partition,
  162. unsigned int div_size,
  163. unsigned int index)
  164. {
  165. const struct v4l2_mbus_framefmt *format;
  166. struct vsp1_partition_window window;
  167. unsigned int modulus;
  168. /*
  169. * Partitions are computed on the size before rotation, use the format
  170. * at the WPF sink.
  171. */
  172. format = vsp1_entity_get_pad_format(&pipe->output->entity,
  173. pipe->output->entity.config,
  174. RWPF_PAD_SINK);
  175. /* A single partition simply processes the output size in full. */
  176. if (pipe->partitions <= 1) {
  177. window.left = 0;
  178. window.width = format->width;
  179. vsp1_pipeline_propagate_partition(pipe, partition, index,
  180. &window);
  181. return;
  182. }
  183. /* Initialise the partition with sane starting conditions. */
  184. window.left = index * div_size;
  185. window.width = div_size;
  186. modulus = format->width % div_size;
  187. /*
  188. * We need to prevent the last partition from being smaller than the
  189. * *minimum* width of the hardware capabilities.
  190. *
  191. * If the modulus is less than half of the partition size,
  192. * the penultimate partition is reduced to half, which is added
  193. * to the final partition: |1234|1234|1234|12|341|
  194. * to prevents this: |1234|1234|1234|1234|1|.
  195. */
  196. if (modulus) {
  197. /*
  198. * pipe->partitions is 1 based, whilst index is a 0 based index.
  199. * Normalise this locally.
  200. */
  201. unsigned int partitions = pipe->partitions - 1;
  202. if (modulus < div_size / 2) {
  203. if (index == partitions - 1) {
  204. /* Halve the penultimate partition. */
  205. window.width = div_size / 2;
  206. } else if (index == partitions) {
  207. /* Increase the final partition. */
  208. window.width = (div_size / 2) + modulus;
  209. window.left -= div_size / 2;
  210. }
  211. } else if (index == partitions) {
  212. window.width = modulus;
  213. }
  214. }
  215. vsp1_pipeline_propagate_partition(pipe, partition, index, &window);
  216. }
  217. static int vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline *pipe)
  218. {
  219. struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
  220. const struct v4l2_mbus_framefmt *format;
  221. struct vsp1_entity *entity;
  222. unsigned int div_size;
  223. unsigned int i;
  224. /*
  225. * Partitions are computed on the size before rotation, use the format
  226. * at the WPF sink.
  227. */
  228. format = vsp1_entity_get_pad_format(&pipe->output->entity,
  229. pipe->output->entity.config,
  230. RWPF_PAD_SINK);
  231. div_size = format->width;
  232. /*
  233. * Only Gen3 hardware requires image partitioning, Gen2 will operate
  234. * with a single partition that covers the whole output.
  235. */
  236. if (vsp1->info->gen == 3) {
  237. list_for_each_entry(entity, &pipe->entities, list_pipe) {
  238. unsigned int entity_max;
  239. if (!entity->ops->max_width)
  240. continue;
  241. entity_max = entity->ops->max_width(entity, pipe);
  242. if (entity_max)
  243. div_size = min(div_size, entity_max);
  244. }
  245. }
  246. pipe->partitions = DIV_ROUND_UP(format->width, div_size);
  247. pipe->part_table = kcalloc(pipe->partitions, sizeof(*pipe->part_table),
  248. GFP_KERNEL);
  249. if (!pipe->part_table)
  250. return -ENOMEM;
  251. for (i = 0; i < pipe->partitions; ++i)
  252. vsp1_video_calculate_partition(pipe, &pipe->part_table[i],
  253. div_size, i);
  254. return 0;
  255. }
  256. /* -----------------------------------------------------------------------------
  257. * Pipeline Management
  258. */
  259. /*
  260. * vsp1_video_complete_buffer - Complete the current buffer
  261. * @video: the video node
  262. *
  263. * This function completes the current buffer by filling its sequence number,
  264. * time stamp and payload size, and hands it back to the videobuf core.
  265. *
  266. * When operating in DU output mode (deep pipeline to the DU through the LIF),
  267. * the VSP1 needs to constantly supply frames to the display. In that case, if
  268. * no other buffer is queued, reuse the one that has just been processed instead
  269. * of handing it back to the videobuf core.
  270. *
  271. * Return the next queued buffer or NULL if the queue is empty.
  272. */
  273. static struct vsp1_vb2_buffer *
  274. vsp1_video_complete_buffer(struct vsp1_video *video)
  275. {
  276. struct vsp1_pipeline *pipe = video->rwpf->pipe;
  277. struct vsp1_vb2_buffer *next = NULL;
  278. struct vsp1_vb2_buffer *done;
  279. unsigned long flags;
  280. unsigned int i;
  281. spin_lock_irqsave(&video->irqlock, flags);
  282. if (list_empty(&video->irqqueue)) {
  283. spin_unlock_irqrestore(&video->irqlock, flags);
  284. return NULL;
  285. }
  286. done = list_first_entry(&video->irqqueue,
  287. struct vsp1_vb2_buffer, queue);
  288. /* In DU output mode reuse the buffer if the list is singular. */
  289. if (pipe->lif && list_is_singular(&video->irqqueue)) {
  290. spin_unlock_irqrestore(&video->irqlock, flags);
  291. return done;
  292. }
  293. list_del(&done->queue);
  294. if (!list_empty(&video->irqqueue))
  295. next = list_first_entry(&video->irqqueue,
  296. struct vsp1_vb2_buffer, queue);
  297. spin_unlock_irqrestore(&video->irqlock, flags);
  298. done->buf.sequence = pipe->sequence;
  299. done->buf.vb2_buf.timestamp = ktime_get_ns();
  300. for (i = 0; i < done->buf.vb2_buf.num_planes; ++i)
  301. vb2_set_plane_payload(&done->buf.vb2_buf, i,
  302. vb2_plane_size(&done->buf.vb2_buf, i));
  303. vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE);
  304. return next;
  305. }
  306. static void vsp1_video_frame_end(struct vsp1_pipeline *pipe,
  307. struct vsp1_rwpf *rwpf)
  308. {
  309. struct vsp1_video *video = rwpf->video;
  310. struct vsp1_vb2_buffer *buf;
  311. buf = vsp1_video_complete_buffer(video);
  312. if (buf == NULL)
  313. return;
  314. video->rwpf->mem = buf->mem;
  315. pipe->buffers_ready |= 1 << video->pipe_index;
  316. }
  317. static void vsp1_video_pipeline_run_partition(struct vsp1_pipeline *pipe,
  318. struct vsp1_dl_list *dl,
  319. unsigned int partition)
  320. {
  321. struct vsp1_entity *entity;
  322. pipe->partition = &pipe->part_table[partition];
  323. list_for_each_entry(entity, &pipe->entities, list_pipe) {
  324. if (entity->ops->configure)
  325. entity->ops->configure(entity, pipe, dl,
  326. VSP1_ENTITY_PARAMS_PARTITION);
  327. }
  328. }
  329. static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe)
  330. {
  331. struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
  332. struct vsp1_entity *entity;
  333. unsigned int partition;
  334. if (!pipe->dl)
  335. pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
  336. /*
  337. * Start with the runtime parameters as the configure operation can
  338. * compute/cache information needed when configuring partitions. This
  339. * is the case with flipping in the WPF.
  340. */
  341. list_for_each_entry(entity, &pipe->entities, list_pipe) {
  342. if (entity->ops->configure)
  343. entity->ops->configure(entity, pipe, pipe->dl,
  344. VSP1_ENTITY_PARAMS_RUNTIME);
  345. }
  346. /* Run the first partition */
  347. vsp1_video_pipeline_run_partition(pipe, pipe->dl, 0);
  348. /* Process consecutive partitions as necessary */
  349. for (partition = 1; partition < pipe->partitions; ++partition) {
  350. struct vsp1_dl_list *dl;
  351. dl = vsp1_dl_list_get(pipe->output->dlm);
  352. /*
  353. * An incomplete chain will still function, but output only
  354. * the partitions that had a dl available. The frame end
  355. * interrupt will be marked on the last dl in the chain.
  356. */
  357. if (!dl) {
  358. dev_err(vsp1->dev, "Failed to obtain a dl list. Frame will be incomplete\n");
  359. break;
  360. }
  361. vsp1_video_pipeline_run_partition(pipe, dl, partition);
  362. vsp1_dl_list_add_chain(pipe->dl, dl);
  363. }
  364. /* Complete, and commit the head display list. */
  365. vsp1_dl_list_commit(pipe->dl);
  366. pipe->dl = NULL;
  367. vsp1_pipeline_run(pipe);
  368. }
  369. static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe,
  370. bool completed)
  371. {
  372. struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
  373. enum vsp1_pipeline_state state;
  374. unsigned long flags;
  375. unsigned int i;
  376. /* M2M Pipelines should never call here with an incomplete frame. */
  377. WARN_ON_ONCE(!completed);
  378. spin_lock_irqsave(&pipe->irqlock, flags);
  379. /* Complete buffers on all video nodes. */
  380. for (i = 0; i < vsp1->info->rpf_count; ++i) {
  381. if (!pipe->inputs[i])
  382. continue;
  383. vsp1_video_frame_end(pipe, pipe->inputs[i]);
  384. }
  385. vsp1_video_frame_end(pipe, pipe->output);
  386. state = pipe->state;
  387. pipe->state = VSP1_PIPELINE_STOPPED;
  388. /*
  389. * If a stop has been requested, mark the pipeline as stopped and
  390. * return. Otherwise restart the pipeline if ready.
  391. */
  392. if (state == VSP1_PIPELINE_STOPPING)
  393. wake_up(&pipe->wq);
  394. else if (vsp1_pipeline_ready(pipe))
  395. vsp1_video_pipeline_run(pipe);
  396. spin_unlock_irqrestore(&pipe->irqlock, flags);
  397. }
  398. static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline *pipe,
  399. struct vsp1_rwpf *input,
  400. struct vsp1_rwpf *output)
  401. {
  402. struct media_entity_enum ent_enum;
  403. struct vsp1_entity *entity;
  404. struct media_pad *pad;
  405. struct vsp1_bru *bru = NULL;
  406. int ret;
  407. ret = media_entity_enum_init(&ent_enum, &input->entity.vsp1->media_dev);
  408. if (ret < 0)
  409. return ret;
  410. /*
  411. * The main data path doesn't include the HGO or HGT, use
  412. * vsp1_entity_remote_pad() to traverse the graph.
  413. */
  414. pad = vsp1_entity_remote_pad(&input->entity.pads[RWPF_PAD_SOURCE]);
  415. while (1) {
  416. if (pad == NULL) {
  417. ret = -EPIPE;
  418. goto out;
  419. }
  420. /* We've reached a video node, that shouldn't have happened. */
  421. if (!is_media_entity_v4l2_subdev(pad->entity)) {
  422. ret = -EPIPE;
  423. goto out;
  424. }
  425. entity = to_vsp1_entity(
  426. media_entity_to_v4l2_subdev(pad->entity));
  427. /*
  428. * A BRU or BRS is present in the pipeline, store its input pad
  429. * number in the input RPF for use when configuring the RPF.
  430. */
  431. if (entity->type == VSP1_ENTITY_BRU ||
  432. entity->type == VSP1_ENTITY_BRS) {
  433. /* BRU and BRS can't be chained. */
  434. if (bru) {
  435. ret = -EPIPE;
  436. goto out;
  437. }
  438. bru = to_bru(&entity->subdev);
  439. bru->inputs[pad->index].rpf = input;
  440. input->bru_input = pad->index;
  441. }
  442. /* We've reached the WPF, we're done. */
  443. if (entity->type == VSP1_ENTITY_WPF)
  444. break;
  445. /* Ensure the branch has no loop. */
  446. if (media_entity_enum_test_and_set(&ent_enum,
  447. &entity->subdev.entity)) {
  448. ret = -EPIPE;
  449. goto out;
  450. }
  451. /* UDS can't be chained. */
  452. if (entity->type == VSP1_ENTITY_UDS) {
  453. if (pipe->uds) {
  454. ret = -EPIPE;
  455. goto out;
  456. }
  457. pipe->uds = entity;
  458. pipe->uds_input = bru ? &bru->entity : &input->entity;
  459. }
  460. /* Follow the source link, ignoring any HGO or HGT. */
  461. pad = &entity->pads[entity->source_pad];
  462. pad = vsp1_entity_remote_pad(pad);
  463. }
  464. /* The last entity must be the output WPF. */
  465. if (entity != &output->entity)
  466. ret = -EPIPE;
  467. out:
  468. media_entity_enum_cleanup(&ent_enum);
  469. return ret;
  470. }
  471. static int vsp1_video_pipeline_build(struct vsp1_pipeline *pipe,
  472. struct vsp1_video *video)
  473. {
  474. struct media_graph graph;
  475. struct media_entity *entity = &video->video.entity;
  476. struct media_device *mdev = entity->graph_obj.mdev;
  477. unsigned int i;
  478. int ret;
  479. /* Walk the graph to locate the entities and video nodes. */
  480. ret = media_graph_walk_init(&graph, mdev);
  481. if (ret)
  482. return ret;
  483. media_graph_walk_start(&graph, entity);
  484. while ((entity = media_graph_walk_next(&graph))) {
  485. struct v4l2_subdev *subdev;
  486. struct vsp1_rwpf *rwpf;
  487. struct vsp1_entity *e;
  488. if (!is_media_entity_v4l2_subdev(entity))
  489. continue;
  490. subdev = media_entity_to_v4l2_subdev(entity);
  491. e = to_vsp1_entity(subdev);
  492. list_add_tail(&e->list_pipe, &pipe->entities);
  493. switch (e->type) {
  494. case VSP1_ENTITY_RPF:
  495. rwpf = to_rwpf(subdev);
  496. pipe->inputs[rwpf->entity.index] = rwpf;
  497. rwpf->video->pipe_index = ++pipe->num_inputs;
  498. rwpf->pipe = pipe;
  499. break;
  500. case VSP1_ENTITY_WPF:
  501. rwpf = to_rwpf(subdev);
  502. pipe->output = rwpf;
  503. rwpf->video->pipe_index = 0;
  504. rwpf->pipe = pipe;
  505. break;
  506. case VSP1_ENTITY_LIF:
  507. pipe->lif = e;
  508. break;
  509. case VSP1_ENTITY_BRU:
  510. case VSP1_ENTITY_BRS:
  511. pipe->bru = e;
  512. break;
  513. case VSP1_ENTITY_HGO:
  514. pipe->hgo = e;
  515. to_hgo(subdev)->histo.pipe = pipe;
  516. break;
  517. case VSP1_ENTITY_HGT:
  518. pipe->hgt = e;
  519. to_hgt(subdev)->histo.pipe = pipe;
  520. break;
  521. default:
  522. break;
  523. }
  524. }
  525. media_graph_walk_cleanup(&graph);
  526. /* We need one output and at least one input. */
  527. if (pipe->num_inputs == 0 || !pipe->output)
  528. return -EPIPE;
  529. /*
  530. * Follow links downstream for each input and make sure the graph
  531. * contains no loop and that all branches end at the output WPF.
  532. */
  533. for (i = 0; i < video->vsp1->info->rpf_count; ++i) {
  534. if (!pipe->inputs[i])
  535. continue;
  536. ret = vsp1_video_pipeline_build_branch(pipe, pipe->inputs[i],
  537. pipe->output);
  538. if (ret < 0)
  539. return ret;
  540. }
  541. return 0;
  542. }
  543. static int vsp1_video_pipeline_init(struct vsp1_pipeline *pipe,
  544. struct vsp1_video *video)
  545. {
  546. vsp1_pipeline_init(pipe);
  547. pipe->frame_end = vsp1_video_pipeline_frame_end;
  548. return vsp1_video_pipeline_build(pipe, video);
  549. }
  550. static struct vsp1_pipeline *vsp1_video_pipeline_get(struct vsp1_video *video)
  551. {
  552. struct vsp1_pipeline *pipe;
  553. int ret;
  554. /*
  555. * Get a pipeline object for the video node. If a pipeline has already
  556. * been allocated just increment its reference count and return it.
  557. * Otherwise allocate a new pipeline and initialize it, it will be freed
  558. * when the last reference is released.
  559. */
  560. if (!video->rwpf->pipe) {
  561. pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
  562. if (!pipe)
  563. return ERR_PTR(-ENOMEM);
  564. ret = vsp1_video_pipeline_init(pipe, video);
  565. if (ret < 0) {
  566. vsp1_pipeline_reset(pipe);
  567. kfree(pipe);
  568. return ERR_PTR(ret);
  569. }
  570. } else {
  571. pipe = video->rwpf->pipe;
  572. kref_get(&pipe->kref);
  573. }
  574. return pipe;
  575. }
  576. static void vsp1_video_pipeline_release(struct kref *kref)
  577. {
  578. struct vsp1_pipeline *pipe = container_of(kref, typeof(*pipe), kref);
  579. vsp1_pipeline_reset(pipe);
  580. kfree(pipe);
  581. }
  582. static void vsp1_video_pipeline_put(struct vsp1_pipeline *pipe)
  583. {
  584. struct media_device *mdev = &pipe->output->entity.vsp1->media_dev;
  585. mutex_lock(&mdev->graph_mutex);
  586. kref_put(&pipe->kref, vsp1_video_pipeline_release);
  587. mutex_unlock(&mdev->graph_mutex);
  588. }
  589. /* -----------------------------------------------------------------------------
  590. * videobuf2 Queue Operations
  591. */
  592. static int
  593. vsp1_video_queue_setup(struct vb2_queue *vq,
  594. unsigned int *nbuffers, unsigned int *nplanes,
  595. unsigned int sizes[], struct device *alloc_devs[])
  596. {
  597. struct vsp1_video *video = vb2_get_drv_priv(vq);
  598. const struct v4l2_pix_format_mplane *format = &video->rwpf->format;
  599. unsigned int i;
  600. if (*nplanes) {
  601. if (*nplanes != format->num_planes)
  602. return -EINVAL;
  603. for (i = 0; i < *nplanes; i++)
  604. if (sizes[i] < format->plane_fmt[i].sizeimage)
  605. return -EINVAL;
  606. return 0;
  607. }
  608. *nplanes = format->num_planes;
  609. for (i = 0; i < format->num_planes; ++i)
  610. sizes[i] = format->plane_fmt[i].sizeimage;
  611. return 0;
  612. }
  613. static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
  614. {
  615. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  616. struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
  617. struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
  618. const struct v4l2_pix_format_mplane *format = &video->rwpf->format;
  619. unsigned int i;
  620. if (vb->num_planes < format->num_planes)
  621. return -EINVAL;
  622. for (i = 0; i < vb->num_planes; ++i) {
  623. buf->mem.addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
  624. if (vb2_plane_size(vb, i) < format->plane_fmt[i].sizeimage)
  625. return -EINVAL;
  626. }
  627. for ( ; i < 3; ++i)
  628. buf->mem.addr[i] = 0;
  629. return 0;
  630. }
  631. static void vsp1_video_buffer_queue(struct vb2_buffer *vb)
  632. {
  633. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  634. struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
  635. struct vsp1_pipeline *pipe = video->rwpf->pipe;
  636. struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
  637. unsigned long flags;
  638. bool empty;
  639. spin_lock_irqsave(&video->irqlock, flags);
  640. empty = list_empty(&video->irqqueue);
  641. list_add_tail(&buf->queue, &video->irqqueue);
  642. spin_unlock_irqrestore(&video->irqlock, flags);
  643. if (!empty)
  644. return;
  645. spin_lock_irqsave(&pipe->irqlock, flags);
  646. video->rwpf->mem = buf->mem;
  647. pipe->buffers_ready |= 1 << video->pipe_index;
  648. if (vb2_is_streaming(&video->queue) &&
  649. vsp1_pipeline_ready(pipe))
  650. vsp1_video_pipeline_run(pipe);
  651. spin_unlock_irqrestore(&pipe->irqlock, flags);
  652. }
  653. static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
  654. {
  655. struct vsp1_entity *entity;
  656. int ret;
  657. /* Determine this pipelines sizes for image partitioning support. */
  658. ret = vsp1_video_pipeline_setup_partitions(pipe);
  659. if (ret < 0)
  660. return ret;
  661. /* Prepare the display list. */
  662. pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
  663. if (!pipe->dl)
  664. return -ENOMEM;
  665. if (pipe->uds) {
  666. struct vsp1_uds *uds = to_uds(&pipe->uds->subdev);
  667. /*
  668. * If a BRU or BRS is present in the pipeline before the UDS,
  669. * the alpha component doesn't need to be scaled as the BRU and
  670. * BRS output alpha value is fixed to 255. Otherwise we need to
  671. * scale the alpha component only when available at the input
  672. * RPF.
  673. */
  674. if (pipe->uds_input->type == VSP1_ENTITY_BRU ||
  675. pipe->uds_input->type == VSP1_ENTITY_BRS) {
  676. uds->scale_alpha = false;
  677. } else {
  678. struct vsp1_rwpf *rpf =
  679. to_rwpf(&pipe->uds_input->subdev);
  680. uds->scale_alpha = rpf->fmtinfo->alpha;
  681. }
  682. }
  683. list_for_each_entry(entity, &pipe->entities, list_pipe) {
  684. vsp1_entity_route_setup(entity, pipe, pipe->dl);
  685. if (entity->ops->configure)
  686. entity->ops->configure(entity, pipe, pipe->dl,
  687. VSP1_ENTITY_PARAMS_INIT);
  688. }
  689. return 0;
  690. }
  691. static void vsp1_video_release_buffers(struct vsp1_video *video)
  692. {
  693. struct vsp1_vb2_buffer *buffer;
  694. unsigned long flags;
  695. /* Remove all buffers from the IRQ queue. */
  696. spin_lock_irqsave(&video->irqlock, flags);
  697. list_for_each_entry(buffer, &video->irqqueue, queue)
  698. vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR);
  699. INIT_LIST_HEAD(&video->irqqueue);
  700. spin_unlock_irqrestore(&video->irqlock, flags);
  701. }
  702. static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe)
  703. {
  704. lockdep_assert_held(&pipe->lock);
  705. /* Release our partition table allocation */
  706. kfree(pipe->part_table);
  707. pipe->part_table = NULL;
  708. vsp1_dl_list_put(pipe->dl);
  709. pipe->dl = NULL;
  710. }
  711. static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
  712. {
  713. struct vsp1_video *video = vb2_get_drv_priv(vq);
  714. struct vsp1_pipeline *pipe = video->rwpf->pipe;
  715. bool start_pipeline = false;
  716. unsigned long flags;
  717. int ret;
  718. mutex_lock(&pipe->lock);
  719. if (pipe->stream_count == pipe->num_inputs) {
  720. ret = vsp1_video_setup_pipeline(pipe);
  721. if (ret < 0) {
  722. vsp1_video_release_buffers(video);
  723. vsp1_video_cleanup_pipeline(pipe);
  724. mutex_unlock(&pipe->lock);
  725. return ret;
  726. }
  727. start_pipeline = true;
  728. }
  729. pipe->stream_count++;
  730. mutex_unlock(&pipe->lock);
  731. /*
  732. * vsp1_pipeline_ready() is not sufficient to establish that all streams
  733. * are prepared and the pipeline is configured, as multiple streams
  734. * can race through streamon with buffers already queued; Therefore we
  735. * don't even attempt to start the pipeline until the last stream has
  736. * called through here.
  737. */
  738. if (!start_pipeline)
  739. return 0;
  740. spin_lock_irqsave(&pipe->irqlock, flags);
  741. if (vsp1_pipeline_ready(pipe))
  742. vsp1_video_pipeline_run(pipe);
  743. spin_unlock_irqrestore(&pipe->irqlock, flags);
  744. return 0;
  745. }
  746. static void vsp1_video_stop_streaming(struct vb2_queue *vq)
  747. {
  748. struct vsp1_video *video = vb2_get_drv_priv(vq);
  749. struct vsp1_pipeline *pipe = video->rwpf->pipe;
  750. unsigned long flags;
  751. int ret;
  752. /*
  753. * Clear the buffers ready flag to make sure the device won't be started
  754. * by a QBUF on the video node on the other side of the pipeline.
  755. */
  756. spin_lock_irqsave(&video->irqlock, flags);
  757. pipe->buffers_ready &= ~(1 << video->pipe_index);
  758. spin_unlock_irqrestore(&video->irqlock, flags);
  759. mutex_lock(&pipe->lock);
  760. if (--pipe->stream_count == pipe->num_inputs) {
  761. /* Stop the pipeline. */
  762. ret = vsp1_pipeline_stop(pipe);
  763. if (ret == -ETIMEDOUT)
  764. dev_err(video->vsp1->dev, "pipeline stop timeout\n");
  765. vsp1_video_cleanup_pipeline(pipe);
  766. }
  767. mutex_unlock(&pipe->lock);
  768. media_pipeline_stop(&video->video.entity);
  769. vsp1_video_release_buffers(video);
  770. vsp1_video_pipeline_put(pipe);
  771. }
  772. static const struct vb2_ops vsp1_video_queue_qops = {
  773. .queue_setup = vsp1_video_queue_setup,
  774. .buf_prepare = vsp1_video_buffer_prepare,
  775. .buf_queue = vsp1_video_buffer_queue,
  776. .wait_prepare = vb2_ops_wait_prepare,
  777. .wait_finish = vb2_ops_wait_finish,
  778. .start_streaming = vsp1_video_start_streaming,
  779. .stop_streaming = vsp1_video_stop_streaming,
  780. };
  781. /* -----------------------------------------------------------------------------
  782. * V4L2 ioctls
  783. */
  784. static int
  785. vsp1_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
  786. {
  787. struct v4l2_fh *vfh = file->private_data;
  788. struct vsp1_video *video = to_vsp1_video(vfh->vdev);
  789. cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
  790. | V4L2_CAP_VIDEO_CAPTURE_MPLANE
  791. | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
  792. if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
  793. cap->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE
  794. | V4L2_CAP_STREAMING;
  795. else
  796. cap->device_caps = V4L2_CAP_VIDEO_OUTPUT_MPLANE
  797. | V4L2_CAP_STREAMING;
  798. strlcpy(cap->driver, "vsp1", sizeof(cap->driver));
  799. strlcpy(cap->card, video->video.name, sizeof(cap->card));
  800. snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
  801. dev_name(video->vsp1->dev));
  802. return 0;
  803. }
  804. static int
  805. vsp1_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
  806. {
  807. struct v4l2_fh *vfh = file->private_data;
  808. struct vsp1_video *video = to_vsp1_video(vfh->vdev);
  809. if (format->type != video->queue.type)
  810. return -EINVAL;
  811. mutex_lock(&video->lock);
  812. format->fmt.pix_mp = video->rwpf->format;
  813. mutex_unlock(&video->lock);
  814. return 0;
  815. }
  816. static int
  817. vsp1_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
  818. {
  819. struct v4l2_fh *vfh = file->private_data;
  820. struct vsp1_video *video = to_vsp1_video(vfh->vdev);
  821. if (format->type != video->queue.type)
  822. return -EINVAL;
  823. return __vsp1_video_try_format(video, &format->fmt.pix_mp, NULL);
  824. }
  825. static int
  826. vsp1_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
  827. {
  828. struct v4l2_fh *vfh = file->private_data;
  829. struct vsp1_video *video = to_vsp1_video(vfh->vdev);
  830. const struct vsp1_format_info *info;
  831. int ret;
  832. if (format->type != video->queue.type)
  833. return -EINVAL;
  834. ret = __vsp1_video_try_format(video, &format->fmt.pix_mp, &info);
  835. if (ret < 0)
  836. return ret;
  837. mutex_lock(&video->lock);
  838. if (vb2_is_busy(&video->queue)) {
  839. ret = -EBUSY;
  840. goto done;
  841. }
  842. video->rwpf->format = format->fmt.pix_mp;
  843. video->rwpf->fmtinfo = info;
  844. done:
  845. mutex_unlock(&video->lock);
  846. return ret;
  847. }
  848. static int
  849. vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
  850. {
  851. struct v4l2_fh *vfh = file->private_data;
  852. struct vsp1_video *video = to_vsp1_video(vfh->vdev);
  853. struct media_device *mdev = &video->vsp1->media_dev;
  854. struct vsp1_pipeline *pipe;
  855. int ret;
  856. if (video->queue.owner && video->queue.owner != file->private_data)
  857. return -EBUSY;
  858. /*
  859. * Get a pipeline for the video node and start streaming on it. No link
  860. * touching an entity in the pipeline can be activated or deactivated
  861. * once streaming is started.
  862. */
  863. mutex_lock(&mdev->graph_mutex);
  864. pipe = vsp1_video_pipeline_get(video);
  865. if (IS_ERR(pipe)) {
  866. mutex_unlock(&mdev->graph_mutex);
  867. return PTR_ERR(pipe);
  868. }
  869. ret = __media_pipeline_start(&video->video.entity, &pipe->pipe);
  870. if (ret < 0) {
  871. mutex_unlock(&mdev->graph_mutex);
  872. goto err_pipe;
  873. }
  874. mutex_unlock(&mdev->graph_mutex);
  875. /*
  876. * Verify that the configured format matches the output of the connected
  877. * subdev.
  878. */
  879. ret = vsp1_video_verify_format(video);
  880. if (ret < 0)
  881. goto err_stop;
  882. /* Start the queue. */
  883. ret = vb2_streamon(&video->queue, type);
  884. if (ret < 0)
  885. goto err_stop;
  886. return 0;
  887. err_stop:
  888. media_pipeline_stop(&video->video.entity);
  889. err_pipe:
  890. vsp1_video_pipeline_put(pipe);
  891. return ret;
  892. }
  893. static const struct v4l2_ioctl_ops vsp1_video_ioctl_ops = {
  894. .vidioc_querycap = vsp1_video_querycap,
  895. .vidioc_g_fmt_vid_cap_mplane = vsp1_video_get_format,
  896. .vidioc_s_fmt_vid_cap_mplane = vsp1_video_set_format,
  897. .vidioc_try_fmt_vid_cap_mplane = vsp1_video_try_format,
  898. .vidioc_g_fmt_vid_out_mplane = vsp1_video_get_format,
  899. .vidioc_s_fmt_vid_out_mplane = vsp1_video_set_format,
  900. .vidioc_try_fmt_vid_out_mplane = vsp1_video_try_format,
  901. .vidioc_reqbufs = vb2_ioctl_reqbufs,
  902. .vidioc_querybuf = vb2_ioctl_querybuf,
  903. .vidioc_qbuf = vb2_ioctl_qbuf,
  904. .vidioc_dqbuf = vb2_ioctl_dqbuf,
  905. .vidioc_expbuf = vb2_ioctl_expbuf,
  906. .vidioc_create_bufs = vb2_ioctl_create_bufs,
  907. .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
  908. .vidioc_streamon = vsp1_video_streamon,
  909. .vidioc_streamoff = vb2_ioctl_streamoff,
  910. };
  911. /* -----------------------------------------------------------------------------
  912. * V4L2 File Operations
  913. */
  914. static int vsp1_video_open(struct file *file)
  915. {
  916. struct vsp1_video *video = video_drvdata(file);
  917. struct v4l2_fh *vfh;
  918. int ret = 0;
  919. vfh = kzalloc(sizeof(*vfh), GFP_KERNEL);
  920. if (vfh == NULL)
  921. return -ENOMEM;
  922. v4l2_fh_init(vfh, &video->video);
  923. v4l2_fh_add(vfh);
  924. file->private_data = vfh;
  925. ret = vsp1_device_get(video->vsp1);
  926. if (ret < 0) {
  927. v4l2_fh_del(vfh);
  928. v4l2_fh_exit(vfh);
  929. kfree(vfh);
  930. }
  931. return ret;
  932. }
  933. static int vsp1_video_release(struct file *file)
  934. {
  935. struct vsp1_video *video = video_drvdata(file);
  936. struct v4l2_fh *vfh = file->private_data;
  937. mutex_lock(&video->lock);
  938. if (video->queue.owner == vfh) {
  939. vb2_queue_release(&video->queue);
  940. video->queue.owner = NULL;
  941. }
  942. mutex_unlock(&video->lock);
  943. vsp1_device_put(video->vsp1);
  944. v4l2_fh_release(file);
  945. file->private_data = NULL;
  946. return 0;
  947. }
  948. static const struct v4l2_file_operations vsp1_video_fops = {
  949. .owner = THIS_MODULE,
  950. .unlocked_ioctl = video_ioctl2,
  951. .open = vsp1_video_open,
  952. .release = vsp1_video_release,
  953. .poll = vb2_fop_poll,
  954. .mmap = vb2_fop_mmap,
  955. };
  956. /* -----------------------------------------------------------------------------
  957. * Initialization and Cleanup
  958. */
  959. struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
  960. struct vsp1_rwpf *rwpf)
  961. {
  962. struct vsp1_video *video;
  963. const char *direction;
  964. int ret;
  965. video = devm_kzalloc(vsp1->dev, sizeof(*video), GFP_KERNEL);
  966. if (!video)
  967. return ERR_PTR(-ENOMEM);
  968. rwpf->video = video;
  969. video->vsp1 = vsp1;
  970. video->rwpf = rwpf;
  971. if (rwpf->entity.type == VSP1_ENTITY_RPF) {
  972. direction = "input";
  973. video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
  974. video->pad.flags = MEDIA_PAD_FL_SOURCE;
  975. video->video.vfl_dir = VFL_DIR_TX;
  976. } else {
  977. direction = "output";
  978. video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
  979. video->pad.flags = MEDIA_PAD_FL_SINK;
  980. video->video.vfl_dir = VFL_DIR_RX;
  981. }
  982. mutex_init(&video->lock);
  983. spin_lock_init(&video->irqlock);
  984. INIT_LIST_HEAD(&video->irqqueue);
  985. /* Initialize the media entity... */
  986. ret = media_entity_pads_init(&video->video.entity, 1, &video->pad);
  987. if (ret < 0)
  988. return ERR_PTR(ret);
  989. /* ... and the format ... */
  990. rwpf->format.pixelformat = VSP1_VIDEO_DEF_FORMAT;
  991. rwpf->format.width = VSP1_VIDEO_DEF_WIDTH;
  992. rwpf->format.height = VSP1_VIDEO_DEF_HEIGHT;
  993. __vsp1_video_try_format(video, &rwpf->format, &rwpf->fmtinfo);
  994. /* ... and the video node... */
  995. video->video.v4l2_dev = &video->vsp1->v4l2_dev;
  996. video->video.fops = &vsp1_video_fops;
  997. snprintf(video->video.name, sizeof(video->video.name), "%s %s",
  998. rwpf->entity.subdev.name, direction);
  999. video->video.vfl_type = VFL_TYPE_GRABBER;
  1000. video->video.release = video_device_release_empty;
  1001. video->video.ioctl_ops = &vsp1_video_ioctl_ops;
  1002. video_set_drvdata(&video->video, video);
  1003. video->queue.type = video->type;
  1004. video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
  1005. video->queue.lock = &video->lock;
  1006. video->queue.drv_priv = video;
  1007. video->queue.buf_struct_size = sizeof(struct vsp1_vb2_buffer);
  1008. video->queue.ops = &vsp1_video_queue_qops;
  1009. video->queue.mem_ops = &vb2_dma_contig_memops;
  1010. video->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
  1011. video->queue.dev = video->vsp1->bus_master;
  1012. ret = vb2_queue_init(&video->queue);
  1013. if (ret < 0) {
  1014. dev_err(video->vsp1->dev, "failed to initialize vb2 queue\n");
  1015. goto error;
  1016. }
  1017. /* ... and register the video device. */
  1018. video->video.queue = &video->queue;
  1019. ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
  1020. if (ret < 0) {
  1021. dev_err(video->vsp1->dev, "failed to register video device\n");
  1022. goto error;
  1023. }
  1024. return video;
  1025. error:
  1026. vsp1_video_cleanup(video);
  1027. return ERR_PTR(ret);
  1028. }
  1029. void vsp1_video_cleanup(struct vsp1_video *video)
  1030. {
  1031. if (video_is_registered(&video->video))
  1032. video_unregister_device(&video->video);
  1033. media_entity_cleanup(&video->video.entity);
  1034. }