vsp1_dl.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797
  1. /*
  2. * vsp1_dl.h -- R-Car VSP1 Display List
  3. *
  4. * Copyright (C) 2015 Renesas Corporation
  5. *
  6. * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. */
  13. #include <linux/device.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/gfp.h>
  16. #include <linux/slab.h>
  17. #include <linux/workqueue.h>
  18. #include "vsp1.h"
  19. #include "vsp1_dl.h"
  20. #define VSP1_DL_NUM_ENTRIES 256
  21. #define VSP1_DLH_INT_ENABLE (1 << 1)
  22. #define VSP1_DLH_AUTO_START (1 << 0)
  23. struct vsp1_dl_header_list {
  24. u32 num_bytes;
  25. u32 addr;
  26. } __attribute__((__packed__));
  27. struct vsp1_dl_header {
  28. u32 num_lists;
  29. struct vsp1_dl_header_list lists[8];
  30. u32 next_header;
  31. u32 flags;
  32. } __attribute__((__packed__));
  33. struct vsp1_dl_entry {
  34. u32 addr;
  35. u32 data;
  36. } __attribute__((__packed__));
  37. /**
  38. * struct vsp1_dl_body - Display list body
  39. * @list: entry in the display list list of bodies
  40. * @vsp1: the VSP1 device
  41. * @entries: array of entries
  42. * @dma: DMA address of the entries
  43. * @size: size of the DMA memory in bytes
  44. * @num_entries: number of stored entries
  45. */
  46. struct vsp1_dl_body {
  47. struct list_head list;
  48. struct vsp1_device *vsp1;
  49. struct vsp1_dl_entry *entries;
  50. dma_addr_t dma;
  51. size_t size;
  52. unsigned int num_entries;
  53. };
  54. /**
  55. * struct vsp1_dl_list - Display list
  56. * @list: entry in the display list manager lists
  57. * @dlm: the display list manager
  58. * @header: display list header, NULL for headerless lists
  59. * @dma: DMA address for the header
  60. * @body0: first display list body
  61. * @fragments: list of extra display list bodies
  62. * @chain: entry in the display list partition chain
  63. */
  64. struct vsp1_dl_list {
  65. struct list_head list;
  66. struct vsp1_dl_manager *dlm;
  67. struct vsp1_dl_header *header;
  68. dma_addr_t dma;
  69. struct vsp1_dl_body body0;
  70. struct list_head fragments;
  71. bool has_chain;
  72. struct list_head chain;
  73. };
  74. enum vsp1_dl_mode {
  75. VSP1_DL_MODE_HEADER,
  76. VSP1_DL_MODE_HEADERLESS,
  77. };
  78. /**
  79. * struct vsp1_dl_manager - Display List manager
  80. * @index: index of the related WPF
  81. * @mode: display list operation mode (header or headerless)
  82. * @singleshot: execute the display list in single-shot mode
  83. * @vsp1: the VSP1 device
  84. * @lock: protects the free, active, queued, pending and gc_fragments lists
  85. * @free: array of all free display lists
  86. * @active: list currently being processed (loaded) by hardware
  87. * @queued: list queued to the hardware (written to the DL registers)
  88. * @pending: list waiting to be queued to the hardware
  89. * @gc_work: fragments garbage collector work struct
  90. * @gc_fragments: array of display list fragments waiting to be freed
  91. */
  92. struct vsp1_dl_manager {
  93. unsigned int index;
  94. enum vsp1_dl_mode mode;
  95. bool singleshot;
  96. struct vsp1_device *vsp1;
  97. spinlock_t lock;
  98. struct list_head free;
  99. struct vsp1_dl_list *active;
  100. struct vsp1_dl_list *queued;
  101. struct vsp1_dl_list *pending;
  102. struct work_struct gc_work;
  103. struct list_head gc_fragments;
  104. };
  105. /* -----------------------------------------------------------------------------
  106. * Display List Body Management
  107. */
  108. /*
  109. * Initialize a display list body object and allocate DMA memory for the body
  110. * data. The display list body object is expected to have been initialized to
  111. * 0 when allocated.
  112. */
  113. static int vsp1_dl_body_init(struct vsp1_device *vsp1,
  114. struct vsp1_dl_body *dlb, unsigned int num_entries,
  115. size_t extra_size)
  116. {
  117. size_t size = num_entries * sizeof(*dlb->entries) + extra_size;
  118. dlb->vsp1 = vsp1;
  119. dlb->size = size;
  120. dlb->entries = dma_alloc_wc(vsp1->bus_master, dlb->size, &dlb->dma,
  121. GFP_KERNEL);
  122. if (!dlb->entries)
  123. return -ENOMEM;
  124. return 0;
  125. }
  126. /*
  127. * Cleanup a display list body and free allocated DMA memory allocated.
  128. */
  129. static void vsp1_dl_body_cleanup(struct vsp1_dl_body *dlb)
  130. {
  131. dma_free_wc(dlb->vsp1->bus_master, dlb->size, dlb->entries, dlb->dma);
  132. }
  133. /**
  134. * vsp1_dl_fragment_alloc - Allocate a display list fragment
  135. * @vsp1: The VSP1 device
  136. * @num_entries: The maximum number of entries that the fragment can contain
  137. *
  138. * Allocate a display list fragment with enough memory to contain the requested
  139. * number of entries.
  140. *
  141. * Return a pointer to a fragment on success or NULL if memory can't be
  142. * allocated.
  143. */
  144. struct vsp1_dl_body *vsp1_dl_fragment_alloc(struct vsp1_device *vsp1,
  145. unsigned int num_entries)
  146. {
  147. struct vsp1_dl_body *dlb;
  148. int ret;
  149. dlb = kzalloc(sizeof(*dlb), GFP_KERNEL);
  150. if (!dlb)
  151. return NULL;
  152. ret = vsp1_dl_body_init(vsp1, dlb, num_entries, 0);
  153. if (ret < 0) {
  154. kfree(dlb);
  155. return NULL;
  156. }
  157. return dlb;
  158. }
  159. /**
  160. * vsp1_dl_fragment_free - Free a display list fragment
  161. * @dlb: The fragment
  162. *
  163. * Free the given display list fragment and the associated DMA memory.
  164. *
  165. * Fragments must only be freed explicitly if they are not added to a display
  166. * list, as the display list will take ownership of them and free them
  167. * otherwise. Manual free typically happens at cleanup time for fragments that
  168. * have been allocated but not used.
  169. *
  170. * Passing a NULL pointer to this function is safe, in that case no operation
  171. * will be performed.
  172. */
  173. void vsp1_dl_fragment_free(struct vsp1_dl_body *dlb)
  174. {
  175. if (!dlb)
  176. return;
  177. vsp1_dl_body_cleanup(dlb);
  178. kfree(dlb);
  179. }
  180. /**
  181. * vsp1_dl_fragment_write - Write a register to a display list fragment
  182. * @dlb: The fragment
  183. * @reg: The register address
  184. * @data: The register value
  185. *
  186. * Write the given register and value to the display list fragment. The maximum
  187. * number of entries that can be written in a fragment is specified when the
  188. * fragment is allocated by vsp1_dl_fragment_alloc().
  189. */
  190. void vsp1_dl_fragment_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
  191. {
  192. dlb->entries[dlb->num_entries].addr = reg;
  193. dlb->entries[dlb->num_entries].data = data;
  194. dlb->num_entries++;
  195. }
  196. /* -----------------------------------------------------------------------------
  197. * Display List Transaction Management
  198. */
  199. static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
  200. {
  201. struct vsp1_dl_list *dl;
  202. size_t header_size;
  203. int ret;
  204. dl = kzalloc(sizeof(*dl), GFP_KERNEL);
  205. if (!dl)
  206. return NULL;
  207. INIT_LIST_HEAD(&dl->fragments);
  208. dl->dlm = dlm;
  209. /*
  210. * Initialize the display list body and allocate DMA memory for the body
  211. * and the optional header. Both are allocated together to avoid memory
  212. * fragmentation, with the header located right after the body in
  213. * memory.
  214. */
  215. header_size = dlm->mode == VSP1_DL_MODE_HEADER
  216. ? ALIGN(sizeof(struct vsp1_dl_header), 8)
  217. : 0;
  218. ret = vsp1_dl_body_init(dlm->vsp1, &dl->body0, VSP1_DL_NUM_ENTRIES,
  219. header_size);
  220. if (ret < 0) {
  221. kfree(dl);
  222. return NULL;
  223. }
  224. if (dlm->mode == VSP1_DL_MODE_HEADER) {
  225. size_t header_offset = VSP1_DL_NUM_ENTRIES
  226. * sizeof(*dl->body0.entries);
  227. dl->header = ((void *)dl->body0.entries) + header_offset;
  228. dl->dma = dl->body0.dma + header_offset;
  229. memset(dl->header, 0, sizeof(*dl->header));
  230. dl->header->lists[0].addr = dl->body0.dma;
  231. }
  232. return dl;
  233. }
  234. static void vsp1_dl_list_free(struct vsp1_dl_list *dl)
  235. {
  236. vsp1_dl_body_cleanup(&dl->body0);
  237. list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
  238. kfree(dl);
  239. }
  240. /**
  241. * vsp1_dl_list_get - Get a free display list
  242. * @dlm: The display list manager
  243. *
  244. * Get a display list from the pool of free lists and return it.
  245. *
  246. * This function must be called without the display list manager lock held.
  247. */
  248. struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
  249. {
  250. struct vsp1_dl_list *dl = NULL;
  251. unsigned long flags;
  252. spin_lock_irqsave(&dlm->lock, flags);
  253. if (!list_empty(&dlm->free)) {
  254. dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
  255. list_del(&dl->list);
  256. /*
  257. * The display list chain must be initialised to ensure every
  258. * display list can assert list_empty() if it is not in a chain.
  259. */
  260. INIT_LIST_HEAD(&dl->chain);
  261. }
  262. spin_unlock_irqrestore(&dlm->lock, flags);
  263. return dl;
  264. }
  265. /* This function must be called with the display list manager lock held.*/
  266. static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
  267. {
  268. struct vsp1_dl_list *dl_child;
  269. if (!dl)
  270. return;
  271. /*
  272. * Release any linked display-lists which were chained for a single
  273. * hardware operation.
  274. */
  275. if (dl->has_chain) {
  276. list_for_each_entry(dl_child, &dl->chain, chain)
  277. __vsp1_dl_list_put(dl_child);
  278. }
  279. dl->has_chain = false;
  280. /*
  281. * We can't free fragments here as DMA memory can only be freed in
  282. * interruptible context. Move all fragments to the display list
  283. * manager's list of fragments to be freed, they will be
  284. * garbage-collected by the work queue.
  285. */
  286. if (!list_empty(&dl->fragments)) {
  287. list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
  288. schedule_work(&dl->dlm->gc_work);
  289. }
  290. dl->body0.num_entries = 0;
  291. list_add_tail(&dl->list, &dl->dlm->free);
  292. }
  293. /**
  294. * vsp1_dl_list_put - Release a display list
  295. * @dl: The display list
  296. *
  297. * Release the display list and return it to the pool of free lists.
  298. *
  299. * Passing a NULL pointer to this function is safe, in that case no operation
  300. * will be performed.
  301. */
  302. void vsp1_dl_list_put(struct vsp1_dl_list *dl)
  303. {
  304. unsigned long flags;
  305. if (!dl)
  306. return;
  307. spin_lock_irqsave(&dl->dlm->lock, flags);
  308. __vsp1_dl_list_put(dl);
  309. spin_unlock_irqrestore(&dl->dlm->lock, flags);
  310. }
  311. /**
  312. * vsp1_dl_list_write - Write a register to the display list
  313. * @dl: The display list
  314. * @reg: The register address
  315. * @data: The register value
  316. *
  317. * Write the given register and value to the display list. Up to 256 registers
  318. * can be written per display list.
  319. */
  320. void vsp1_dl_list_write(struct vsp1_dl_list *dl, u32 reg, u32 data)
  321. {
  322. vsp1_dl_fragment_write(&dl->body0, reg, data);
  323. }
  324. /**
  325. * vsp1_dl_list_add_fragment - Add a fragment to the display list
  326. * @dl: The display list
  327. * @dlb: The fragment
  328. *
  329. * Add a display list body as a fragment to a display list. Registers contained
  330. * in fragments are processed after registers contained in the main display
  331. * list, in the order in which fragments are added.
  332. *
  333. * Adding a fragment to a display list passes ownership of the fragment to the
  334. * list. The caller must not touch the fragment after this call, and must not
  335. * free it explicitly with vsp1_dl_fragment_free().
  336. *
  337. * Fragments are only usable for display lists in header mode. Attempt to
  338. * add a fragment to a header-less display list will return an error.
  339. */
  340. int vsp1_dl_list_add_fragment(struct vsp1_dl_list *dl,
  341. struct vsp1_dl_body *dlb)
  342. {
  343. /* Multi-body lists are only available in header mode. */
  344. if (dl->dlm->mode != VSP1_DL_MODE_HEADER)
  345. return -EINVAL;
  346. list_add_tail(&dlb->list, &dl->fragments);
  347. return 0;
  348. }
  349. /**
  350. * vsp1_dl_list_add_chain - Add a display list to a chain
  351. * @head: The head display list
  352. * @dl: The new display list
  353. *
  354. * Add a display list to an existing display list chain. The chained lists
  355. * will be automatically processed by the hardware without intervention from
  356. * the CPU. A display list end interrupt will only complete after the last
  357. * display list in the chain has completed processing.
  358. *
  359. * Adding a display list to a chain passes ownership of the display list to
  360. * the head display list item. The chain is released when the head dl item is
  361. * put back with __vsp1_dl_list_put().
  362. *
  363. * Chained display lists are only usable in header mode. Attempts to add a
  364. * display list to a chain in header-less mode will return an error.
  365. */
  366. int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
  367. struct vsp1_dl_list *dl)
  368. {
  369. /* Chained lists are only available in header mode. */
  370. if (head->dlm->mode != VSP1_DL_MODE_HEADER)
  371. return -EINVAL;
  372. head->has_chain = true;
  373. list_add_tail(&dl->chain, &head->chain);
  374. return 0;
  375. }
  376. static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
  377. {
  378. struct vsp1_dl_manager *dlm = dl->dlm;
  379. struct vsp1_dl_header_list *hdr = dl->header->lists;
  380. struct vsp1_dl_body *dlb;
  381. unsigned int num_lists = 0;
  382. /*
  383. * Fill the header with the display list bodies addresses and sizes. The
  384. * address of the first body has already been filled when the display
  385. * list was allocated.
  386. */
  387. hdr->num_bytes = dl->body0.num_entries
  388. * sizeof(*dl->header->lists);
  389. list_for_each_entry(dlb, &dl->fragments, list) {
  390. num_lists++;
  391. hdr++;
  392. hdr->addr = dlb->dma;
  393. hdr->num_bytes = dlb->num_entries
  394. * sizeof(*dl->header->lists);
  395. }
  396. dl->header->num_lists = num_lists;
  397. if (!list_empty(&dl->chain) && !is_last) {
  398. /*
  399. * If this display list's chain is not empty, we are on a list,
  400. * and the next item is the display list that we must queue for
  401. * automatic processing by the hardware.
  402. */
  403. struct vsp1_dl_list *next = list_next_entry(dl, chain);
  404. dl->header->next_header = next->dma;
  405. dl->header->flags = VSP1_DLH_AUTO_START;
  406. } else if (!dlm->singleshot) {
  407. /*
  408. * if the display list manager works in continuous mode, the VSP
  409. * should loop over the display list continuously until
  410. * instructed to do otherwise.
  411. */
  412. dl->header->next_header = dl->dma;
  413. dl->header->flags = VSP1_DLH_INT_ENABLE | VSP1_DLH_AUTO_START;
  414. } else {
  415. /*
  416. * Otherwise, in mem-to-mem mode, we work in single-shot mode
  417. * and the next display list must not be started automatically.
  418. */
  419. dl->header->flags = VSP1_DLH_INT_ENABLE;
  420. }
  421. }
  422. static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm)
  423. {
  424. struct vsp1_device *vsp1 = dlm->vsp1;
  425. if (!dlm->queued)
  426. return false;
  427. /*
  428. * Check whether the VSP1 has taken the update. In headerless mode the
  429. * hardware indicates this by clearing the UPD bit in the DL_BODY_SIZE
  430. * register, and in header mode by clearing the UPDHDR bit in the CMD
  431. * register.
  432. */
  433. if (dlm->mode == VSP1_DL_MODE_HEADERLESS)
  434. return !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE)
  435. & VI6_DL_BODY_SIZE_UPD);
  436. else
  437. return !!(vsp1_read(vsp1, VI6_CMD(dlm->index))
  438. & VI6_CMD_UPDHDR);
  439. }
  440. static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
  441. {
  442. struct vsp1_dl_manager *dlm = dl->dlm;
  443. struct vsp1_device *vsp1 = dlm->vsp1;
  444. if (dlm->mode == VSP1_DL_MODE_HEADERLESS) {
  445. /*
  446. * In headerless mode, program the hardware directly with the
  447. * display list body address and size and set the UPD bit. The
  448. * bit will be cleared by the hardware when the display list
  449. * processing starts.
  450. */
  451. vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
  452. vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
  453. (dl->body0.num_entries * sizeof(*dl->header->lists)));
  454. } else {
  455. /*
  456. * In header mode, program the display list header address. If
  457. * the hardware is idle (single-shot mode or first frame in
  458. * continuous mode) it will then be started independently. If
  459. * the hardware is operating, the VI6_DL_HDR_REF_ADDR register
  460. * will be updated with the display list address.
  461. */
  462. vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
  463. }
  464. }
  465. static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl)
  466. {
  467. struct vsp1_dl_manager *dlm = dl->dlm;
  468. /*
  469. * If a previous display list has been queued to the hardware but not
  470. * processed yet, the VSP can start processing it at any time. In that
  471. * case we can't replace the queued list by the new one, as we could
  472. * race with the hardware. We thus mark the update as pending, it will
  473. * be queued up to the hardware by the frame end interrupt handler.
  474. */
  475. if (vsp1_dl_list_hw_update_pending(dlm)) {
  476. __vsp1_dl_list_put(dlm->pending);
  477. dlm->pending = dl;
  478. return;
  479. }
  480. /*
  481. * Pass the new display list to the hardware and mark it as queued. It
  482. * will become active when the hardware starts processing it.
  483. */
  484. vsp1_dl_list_hw_enqueue(dl);
  485. __vsp1_dl_list_put(dlm->queued);
  486. dlm->queued = dl;
  487. }
  488. static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl)
  489. {
  490. struct vsp1_dl_manager *dlm = dl->dlm;
  491. /*
  492. * When working in single-shot mode, the caller guarantees that the
  493. * hardware is idle at this point. Just commit the head display list
  494. * to hardware. Chained lists will be started automatically.
  495. */
  496. vsp1_dl_list_hw_enqueue(dl);
  497. dlm->active = dl;
  498. }
  499. void vsp1_dl_list_commit(struct vsp1_dl_list *dl)
  500. {
  501. struct vsp1_dl_manager *dlm = dl->dlm;
  502. struct vsp1_dl_list *dl_child;
  503. unsigned long flags;
  504. if (dlm->mode == VSP1_DL_MODE_HEADER) {
  505. /* Fill the header for the head and chained display lists. */
  506. vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
  507. list_for_each_entry(dl_child, &dl->chain, chain) {
  508. bool last = list_is_last(&dl_child->chain, &dl->chain);
  509. vsp1_dl_list_fill_header(dl_child, last);
  510. }
  511. }
  512. spin_lock_irqsave(&dlm->lock, flags);
  513. if (dlm->singleshot)
  514. vsp1_dl_list_commit_singleshot(dl);
  515. else
  516. vsp1_dl_list_commit_continuous(dl);
  517. spin_unlock_irqrestore(&dlm->lock, flags);
  518. }
  519. /* -----------------------------------------------------------------------------
  520. * Display List Manager
  521. */
  522. /**
  523. * vsp1_dlm_irq_frame_end - Display list handler for the frame end interrupt
  524. * @dlm: the display list manager
  525. *
  526. * Return true if the previous display list has completed at frame end, or false
  527. * if it has been delayed by one frame because the display list commit raced
  528. * with the frame end interrupt. The function always returns true in header mode
  529. * as display list processing is then not continuous and races never occur.
  530. */
  531. bool vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
  532. {
  533. bool completed = false;
  534. spin_lock(&dlm->lock);
  535. /*
  536. * The mem-to-mem pipelines work in single-shot mode. No new display
  537. * list can be queued, we don't have to do anything.
  538. */
  539. if (dlm->singleshot) {
  540. __vsp1_dl_list_put(dlm->active);
  541. dlm->active = NULL;
  542. completed = true;
  543. goto done;
  544. }
  545. /*
  546. * If the commit operation raced with the interrupt and occurred after
  547. * the frame end event but before interrupt processing, the hardware
  548. * hasn't taken the update into account yet. We have to skip one frame
  549. * and retry.
  550. */
  551. if (vsp1_dl_list_hw_update_pending(dlm))
  552. goto done;
  553. /*
  554. * The device starts processing the queued display list right after the
  555. * frame end interrupt. The display list thus becomes active.
  556. */
  557. if (dlm->queued) {
  558. __vsp1_dl_list_put(dlm->active);
  559. dlm->active = dlm->queued;
  560. dlm->queued = NULL;
  561. completed = true;
  562. }
  563. /*
  564. * Now that the VSP has started processing the queued display list, we
  565. * can queue the pending display list to the hardware if one has been
  566. * prepared.
  567. */
  568. if (dlm->pending) {
  569. vsp1_dl_list_hw_enqueue(dlm->pending);
  570. dlm->queued = dlm->pending;
  571. dlm->pending = NULL;
  572. }
  573. done:
  574. spin_unlock(&dlm->lock);
  575. return completed;
  576. }
  577. /* Hardware Setup */
  578. void vsp1_dlm_setup(struct vsp1_device *vsp1)
  579. {
  580. u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
  581. | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
  582. | VI6_DL_CTRL_DLE;
  583. /*
  584. * The DRM pipeline operates with display lists in Continuous Frame
  585. * Mode, all other pipelines use manual start.
  586. */
  587. if (vsp1->drm)
  588. ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0;
  589. vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
  590. vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
  591. }
  592. void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
  593. {
  594. unsigned long flags;
  595. spin_lock_irqsave(&dlm->lock, flags);
  596. __vsp1_dl_list_put(dlm->active);
  597. __vsp1_dl_list_put(dlm->queued);
  598. __vsp1_dl_list_put(dlm->pending);
  599. spin_unlock_irqrestore(&dlm->lock, flags);
  600. dlm->active = NULL;
  601. dlm->queued = NULL;
  602. dlm->pending = NULL;
  603. }
  604. /*
  605. * Free all fragments awaiting to be garbage-collected.
  606. *
  607. * This function must be called without the display list manager lock held.
  608. */
  609. static void vsp1_dlm_fragments_free(struct vsp1_dl_manager *dlm)
  610. {
  611. unsigned long flags;
  612. spin_lock_irqsave(&dlm->lock, flags);
  613. while (!list_empty(&dlm->gc_fragments)) {
  614. struct vsp1_dl_body *dlb;
  615. dlb = list_first_entry(&dlm->gc_fragments, struct vsp1_dl_body,
  616. list);
  617. list_del(&dlb->list);
  618. spin_unlock_irqrestore(&dlm->lock, flags);
  619. vsp1_dl_fragment_free(dlb);
  620. spin_lock_irqsave(&dlm->lock, flags);
  621. }
  622. spin_unlock_irqrestore(&dlm->lock, flags);
  623. }
  624. static void vsp1_dlm_garbage_collect(struct work_struct *work)
  625. {
  626. struct vsp1_dl_manager *dlm =
  627. container_of(work, struct vsp1_dl_manager, gc_work);
  628. vsp1_dlm_fragments_free(dlm);
  629. }
  630. struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
  631. unsigned int index,
  632. unsigned int prealloc)
  633. {
  634. struct vsp1_dl_manager *dlm;
  635. unsigned int i;
  636. dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL);
  637. if (!dlm)
  638. return NULL;
  639. dlm->index = index;
  640. dlm->mode = index == 0 && !vsp1->info->uapi
  641. ? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER;
  642. dlm->singleshot = vsp1->info->uapi;
  643. dlm->vsp1 = vsp1;
  644. spin_lock_init(&dlm->lock);
  645. INIT_LIST_HEAD(&dlm->free);
  646. INIT_LIST_HEAD(&dlm->gc_fragments);
  647. INIT_WORK(&dlm->gc_work, vsp1_dlm_garbage_collect);
  648. for (i = 0; i < prealloc; ++i) {
  649. struct vsp1_dl_list *dl;
  650. dl = vsp1_dl_list_alloc(dlm);
  651. if (!dl)
  652. return NULL;
  653. list_add_tail(&dl->list, &dlm->free);
  654. }
  655. return dlm;
  656. }
  657. void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
  658. {
  659. struct vsp1_dl_list *dl, *next;
  660. if (!dlm)
  661. return;
  662. cancel_work_sync(&dlm->gc_work);
  663. list_for_each_entry_safe(dl, next, &dlm->free, list) {
  664. list_del(&dl->list);
  665. vsp1_dl_list_free(dl);
  666. }
  667. vsp1_dlm_fragments_free(dlm);
  668. }