vsp1_dl.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729
  1. /*
  2. * vsp1_dl.h -- R-Car VSP1 Display List
  3. *
  4. * Copyright (C) 2015 Renesas Corporation
  5. *
  6. * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. */
  13. #include <linux/device.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/gfp.h>
  16. #include <linux/slab.h>
  17. #include <linux/workqueue.h>
  18. #include "vsp1.h"
  19. #include "vsp1_dl.h"
  20. #define VSP1_DL_NUM_ENTRIES 256
  21. #define VSP1_DLH_INT_ENABLE (1 << 1)
  22. #define VSP1_DLH_AUTO_START (1 << 0)
  23. struct vsp1_dl_header_list {
  24. u32 num_bytes;
  25. u32 addr;
  26. } __attribute__((__packed__));
  27. struct vsp1_dl_header {
  28. u32 num_lists;
  29. struct vsp1_dl_header_list lists[8];
  30. u32 next_header;
  31. u32 flags;
  32. } __attribute__((__packed__));
  33. struct vsp1_dl_entry {
  34. u32 addr;
  35. u32 data;
  36. } __attribute__((__packed__));
  37. /**
  38. * struct vsp1_dl_body - Display list body
  39. * @list: entry in the display list list of bodies
  40. * @vsp1: the VSP1 device
  41. * @entries: array of entries
  42. * @dma: DMA address of the entries
  43. * @size: size of the DMA memory in bytes
  44. * @num_entries: number of stored entries
  45. */
  46. struct vsp1_dl_body {
  47. struct list_head list;
  48. struct vsp1_device *vsp1;
  49. struct vsp1_dl_entry *entries;
  50. dma_addr_t dma;
  51. size_t size;
  52. unsigned int num_entries;
  53. };
  54. /**
  55. * struct vsp1_dl_list - Display list
  56. * @list: entry in the display list manager lists
  57. * @dlm: the display list manager
  58. * @header: display list header, NULL for headerless lists
  59. * @dma: DMA address for the header
  60. * @body0: first display list body
  61. * @fragments: list of extra display list bodies
  62. * @chain: entry in the display list partition chain
  63. */
  64. struct vsp1_dl_list {
  65. struct list_head list;
  66. struct vsp1_dl_manager *dlm;
  67. struct vsp1_dl_header *header;
  68. dma_addr_t dma;
  69. struct vsp1_dl_body body0;
  70. struct list_head fragments;
  71. bool has_chain;
  72. struct list_head chain;
  73. };
  74. enum vsp1_dl_mode {
  75. VSP1_DL_MODE_HEADER,
  76. VSP1_DL_MODE_HEADERLESS,
  77. };
  78. /**
  79. * struct vsp1_dl_manager - Display List manager
  80. * @index: index of the related WPF
  81. * @mode: display list operation mode (header or headerless)
  82. * @vsp1: the VSP1 device
  83. * @lock: protects the free, active, queued, pending and gc_fragments lists
  84. * @free: array of all free display lists
  85. * @active: list currently being processed (loaded) by hardware
  86. * @queued: list queued to the hardware (written to the DL registers)
  87. * @pending: list waiting to be queued to the hardware
  88. * @gc_work: fragments garbage collector work struct
  89. * @gc_fragments: array of display list fragments waiting to be freed
  90. */
  91. struct vsp1_dl_manager {
  92. unsigned int index;
  93. enum vsp1_dl_mode mode;
  94. struct vsp1_device *vsp1;
  95. spinlock_t lock;
  96. struct list_head free;
  97. struct vsp1_dl_list *active;
  98. struct vsp1_dl_list *queued;
  99. struct vsp1_dl_list *pending;
  100. struct work_struct gc_work;
  101. struct list_head gc_fragments;
  102. };
  103. /* -----------------------------------------------------------------------------
  104. * Display List Body Management
  105. */
  106. /*
  107. * Initialize a display list body object and allocate DMA memory for the body
  108. * data. The display list body object is expected to have been initialized to
  109. * 0 when allocated.
  110. */
  111. static int vsp1_dl_body_init(struct vsp1_device *vsp1,
  112. struct vsp1_dl_body *dlb, unsigned int num_entries,
  113. size_t extra_size)
  114. {
  115. size_t size = num_entries * sizeof(*dlb->entries) + extra_size;
  116. dlb->vsp1 = vsp1;
  117. dlb->size = size;
  118. dlb->entries = dma_alloc_wc(vsp1->dev, dlb->size, &dlb->dma,
  119. GFP_KERNEL);
  120. if (!dlb->entries)
  121. return -ENOMEM;
  122. return 0;
  123. }
  124. /*
  125. * Cleanup a display list body and free allocated DMA memory allocated.
  126. */
  127. static void vsp1_dl_body_cleanup(struct vsp1_dl_body *dlb)
  128. {
  129. dma_free_wc(dlb->vsp1->dev, dlb->size, dlb->entries, dlb->dma);
  130. }
  131. /**
  132. * vsp1_dl_fragment_alloc - Allocate a display list fragment
  133. * @vsp1: The VSP1 device
  134. * @num_entries: The maximum number of entries that the fragment can contain
  135. *
  136. * Allocate a display list fragment with enough memory to contain the requested
  137. * number of entries.
  138. *
  139. * Return a pointer to a fragment on success or NULL if memory can't be
  140. * allocated.
  141. */
  142. struct vsp1_dl_body *vsp1_dl_fragment_alloc(struct vsp1_device *vsp1,
  143. unsigned int num_entries)
  144. {
  145. struct vsp1_dl_body *dlb;
  146. int ret;
  147. dlb = kzalloc(sizeof(*dlb), GFP_KERNEL);
  148. if (!dlb)
  149. return NULL;
  150. ret = vsp1_dl_body_init(vsp1, dlb, num_entries, 0);
  151. if (ret < 0) {
  152. kfree(dlb);
  153. return NULL;
  154. }
  155. return dlb;
  156. }
  157. /**
  158. * vsp1_dl_fragment_free - Free a display list fragment
  159. * @dlb: The fragment
  160. *
  161. * Free the given display list fragment and the associated DMA memory.
  162. *
  163. * Fragments must only be freed explicitly if they are not added to a display
  164. * list, as the display list will take ownership of them and free them
  165. * otherwise. Manual free typically happens at cleanup time for fragments that
  166. * have been allocated but not used.
  167. *
  168. * Passing a NULL pointer to this function is safe, in that case no operation
  169. * will be performed.
  170. */
  171. void vsp1_dl_fragment_free(struct vsp1_dl_body *dlb)
  172. {
  173. if (!dlb)
  174. return;
  175. vsp1_dl_body_cleanup(dlb);
  176. kfree(dlb);
  177. }
  178. /**
  179. * vsp1_dl_fragment_write - Write a register to a display list fragment
  180. * @dlb: The fragment
  181. * @reg: The register address
  182. * @data: The register value
  183. *
  184. * Write the given register and value to the display list fragment. The maximum
  185. * number of entries that can be written in a fragment is specified when the
  186. * fragment is allocated by vsp1_dl_fragment_alloc().
  187. */
  188. void vsp1_dl_fragment_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
  189. {
  190. dlb->entries[dlb->num_entries].addr = reg;
  191. dlb->entries[dlb->num_entries].data = data;
  192. dlb->num_entries++;
  193. }
  194. /* -----------------------------------------------------------------------------
  195. * Display List Transaction Management
  196. */
  197. static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
  198. {
  199. struct vsp1_dl_list *dl;
  200. size_t header_size;
  201. int ret;
  202. dl = kzalloc(sizeof(*dl), GFP_KERNEL);
  203. if (!dl)
  204. return NULL;
  205. INIT_LIST_HEAD(&dl->fragments);
  206. dl->dlm = dlm;
  207. /* Initialize the display list body and allocate DMA memory for the body
  208. * and the optional header. Both are allocated together to avoid memory
  209. * fragmentation, with the header located right after the body in
  210. * memory.
  211. */
  212. header_size = dlm->mode == VSP1_DL_MODE_HEADER
  213. ? ALIGN(sizeof(struct vsp1_dl_header), 8)
  214. : 0;
  215. ret = vsp1_dl_body_init(dlm->vsp1, &dl->body0, VSP1_DL_NUM_ENTRIES,
  216. header_size);
  217. if (ret < 0) {
  218. kfree(dl);
  219. return NULL;
  220. }
  221. if (dlm->mode == VSP1_DL_MODE_HEADER) {
  222. size_t header_offset = VSP1_DL_NUM_ENTRIES
  223. * sizeof(*dl->body0.entries);
  224. dl->header = ((void *)dl->body0.entries) + header_offset;
  225. dl->dma = dl->body0.dma + header_offset;
  226. memset(dl->header, 0, sizeof(*dl->header));
  227. dl->header->lists[0].addr = dl->body0.dma;
  228. }
  229. return dl;
  230. }
  231. static void vsp1_dl_list_free(struct vsp1_dl_list *dl)
  232. {
  233. vsp1_dl_body_cleanup(&dl->body0);
  234. list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
  235. kfree(dl);
  236. }
  237. /**
  238. * vsp1_dl_list_get - Get a free display list
  239. * @dlm: The display list manager
  240. *
  241. * Get a display list from the pool of free lists and return it.
  242. *
  243. * This function must be called without the display list manager lock held.
  244. */
  245. struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
  246. {
  247. struct vsp1_dl_list *dl = NULL;
  248. unsigned long flags;
  249. spin_lock_irqsave(&dlm->lock, flags);
  250. if (!list_empty(&dlm->free)) {
  251. dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
  252. list_del(&dl->list);
  253. /*
  254. * The display list chain must be initialised to ensure every
  255. * display list can assert list_empty() if it is not in a chain.
  256. */
  257. INIT_LIST_HEAD(&dl->chain);
  258. }
  259. spin_unlock_irqrestore(&dlm->lock, flags);
  260. return dl;
  261. }
  262. /* This function must be called with the display list manager lock held.*/
  263. static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
  264. {
  265. struct vsp1_dl_list *dl_child;
  266. if (!dl)
  267. return;
  268. /*
  269. * Release any linked display-lists which were chained for a single
  270. * hardware operation.
  271. */
  272. if (dl->has_chain) {
  273. list_for_each_entry(dl_child, &dl->chain, chain)
  274. __vsp1_dl_list_put(dl_child);
  275. }
  276. dl->has_chain = false;
  277. /*
  278. * We can't free fragments here as DMA memory can only be freed in
  279. * interruptible context. Move all fragments to the display list
  280. * manager's list of fragments to be freed, they will be
  281. * garbage-collected by the work queue.
  282. */
  283. if (!list_empty(&dl->fragments)) {
  284. list_splice_init(&dl->fragments, &dl->dlm->gc_fragments);
  285. schedule_work(&dl->dlm->gc_work);
  286. }
  287. dl->body0.num_entries = 0;
  288. list_add_tail(&dl->list, &dl->dlm->free);
  289. }
  290. /**
  291. * vsp1_dl_list_put - Release a display list
  292. * @dl: The display list
  293. *
  294. * Release the display list and return it to the pool of free lists.
  295. *
  296. * Passing a NULL pointer to this function is safe, in that case no operation
  297. * will be performed.
  298. */
  299. void vsp1_dl_list_put(struct vsp1_dl_list *dl)
  300. {
  301. unsigned long flags;
  302. if (!dl)
  303. return;
  304. spin_lock_irqsave(&dl->dlm->lock, flags);
  305. __vsp1_dl_list_put(dl);
  306. spin_unlock_irqrestore(&dl->dlm->lock, flags);
  307. }
  308. /**
  309. * vsp1_dl_list_write - Write a register to the display list
  310. * @dl: The display list
  311. * @reg: The register address
  312. * @data: The register value
  313. *
  314. * Write the given register and value to the display list. Up to 256 registers
  315. * can be written per display list.
  316. */
  317. void vsp1_dl_list_write(struct vsp1_dl_list *dl, u32 reg, u32 data)
  318. {
  319. vsp1_dl_fragment_write(&dl->body0, reg, data);
  320. }
  321. /**
  322. * vsp1_dl_list_add_fragment - Add a fragment to the display list
  323. * @dl: The display list
  324. * @dlb: The fragment
  325. *
  326. * Add a display list body as a fragment to a display list. Registers contained
  327. * in fragments are processed after registers contained in the main display
  328. * list, in the order in which fragments are added.
  329. *
  330. * Adding a fragment to a display list passes ownership of the fragment to the
  331. * list. The caller must not touch the fragment after this call, and must not
  332. * free it explicitly with vsp1_dl_fragment_free().
  333. *
  334. * Fragments are only usable for display lists in header mode. Attempt to
  335. * add a fragment to a header-less display list will return an error.
  336. */
  337. int vsp1_dl_list_add_fragment(struct vsp1_dl_list *dl,
  338. struct vsp1_dl_body *dlb)
  339. {
  340. /* Multi-body lists are only available in header mode. */
  341. if (dl->dlm->mode != VSP1_DL_MODE_HEADER)
  342. return -EINVAL;
  343. list_add_tail(&dlb->list, &dl->fragments);
  344. return 0;
  345. }
  346. /**
  347. * vsp1_dl_list_add_chain - Add a display list to a chain
  348. * @head: The head display list
  349. * @dl: The new display list
  350. *
  351. * Add a display list to an existing display list chain. The chained lists
  352. * will be automatically processed by the hardware without intervention from
  353. * the CPU. A display list end interrupt will only complete after the last
  354. * display list in the chain has completed processing.
  355. *
  356. * Adding a display list to a chain passes ownership of the display list to
  357. * the head display list item. The chain is released when the head dl item is
  358. * put back with __vsp1_dl_list_put().
  359. *
  360. * Chained display lists are only usable in header mode. Attempts to add a
  361. * display list to a chain in header-less mode will return an error.
  362. */
  363. int vsp1_dl_list_add_chain(struct vsp1_dl_list *head,
  364. struct vsp1_dl_list *dl)
  365. {
  366. /* Chained lists are only available in header mode. */
  367. if (head->dlm->mode != VSP1_DL_MODE_HEADER)
  368. return -EINVAL;
  369. head->has_chain = true;
  370. list_add_tail(&dl->chain, &head->chain);
  371. return 0;
  372. }
  373. static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last)
  374. {
  375. struct vsp1_dl_header_list *hdr = dl->header->lists;
  376. struct vsp1_dl_body *dlb;
  377. unsigned int num_lists = 0;
  378. /*
  379. * Fill the header with the display list bodies addresses and sizes. The
  380. * address of the first body has already been filled when the display
  381. * list was allocated.
  382. */
  383. hdr->num_bytes = dl->body0.num_entries
  384. * sizeof(*dl->header->lists);
  385. list_for_each_entry(dlb, &dl->fragments, list) {
  386. num_lists++;
  387. hdr++;
  388. hdr->addr = dlb->dma;
  389. hdr->num_bytes = dlb->num_entries
  390. * sizeof(*dl->header->lists);
  391. }
  392. dl->header->num_lists = num_lists;
  393. /*
  394. * If this display list's chain is not empty, we are on a list, where
  395. * the next item in the list is the display list entity which should be
  396. * automatically queued by the hardware.
  397. */
  398. if (!list_empty(&dl->chain) && !is_last) {
  399. struct vsp1_dl_list *next = list_next_entry(dl, chain);
  400. dl->header->next_header = next->dma;
  401. dl->header->flags = VSP1_DLH_AUTO_START;
  402. } else {
  403. dl->header->flags = VSP1_DLH_INT_ENABLE;
  404. }
  405. }
  406. void vsp1_dl_list_commit(struct vsp1_dl_list *dl)
  407. {
  408. struct vsp1_dl_manager *dlm = dl->dlm;
  409. struct vsp1_device *vsp1 = dlm->vsp1;
  410. unsigned long flags;
  411. bool update;
  412. spin_lock_irqsave(&dlm->lock, flags);
  413. if (dl->dlm->mode == VSP1_DL_MODE_HEADER) {
  414. struct vsp1_dl_list *dl_child;
  415. /*
  416. * In header mode the caller guarantees that the hardware is
  417. * idle at this point.
  418. */
  419. /* Fill the header for the head and chained display lists. */
  420. vsp1_dl_list_fill_header(dl, list_empty(&dl->chain));
  421. list_for_each_entry(dl_child, &dl->chain, chain) {
  422. bool last = list_is_last(&dl_child->chain, &dl->chain);
  423. vsp1_dl_list_fill_header(dl_child, last);
  424. }
  425. /*
  426. * Commit the head display list to hardware. Chained headers
  427. * will auto-start.
  428. */
  429. vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
  430. dlm->active = dl;
  431. goto done;
  432. }
  433. /* Once the UPD bit has been set the hardware can start processing the
  434. * display list at any time and we can't touch the address and size
  435. * registers. In that case mark the update as pending, it will be
  436. * queued up to the hardware by the frame end interrupt handler.
  437. */
  438. update = !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD);
  439. if (update) {
  440. __vsp1_dl_list_put(dlm->pending);
  441. dlm->pending = dl;
  442. goto done;
  443. }
  444. /* Program the hardware with the display list body address and size.
  445. * The UPD bit will be cleared by the device when the display list is
  446. * processed.
  447. */
  448. vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
  449. vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
  450. (dl->body0.num_entries * sizeof(*dl->header->lists)));
  451. __vsp1_dl_list_put(dlm->queued);
  452. dlm->queued = dl;
  453. done:
  454. spin_unlock_irqrestore(&dlm->lock, flags);
  455. }
  456. /* -----------------------------------------------------------------------------
  457. * Display List Manager
  458. */
  459. /* Interrupt Handling */
  460. void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm)
  461. {
  462. spin_lock(&dlm->lock);
  463. /* The display start interrupt signals the end of the display list
  464. * processing by the device. The active display list, if any, won't be
  465. * accessed anymore and can be reused.
  466. */
  467. __vsp1_dl_list_put(dlm->active);
  468. dlm->active = NULL;
  469. spin_unlock(&dlm->lock);
  470. }
  471. void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
  472. {
  473. struct vsp1_device *vsp1 = dlm->vsp1;
  474. spin_lock(&dlm->lock);
  475. __vsp1_dl_list_put(dlm->active);
  476. dlm->active = NULL;
  477. /* Header mode is used for mem-to-mem pipelines only. We don't need to
  478. * perform any operation as there can't be any new display list queued
  479. * in that case.
  480. */
  481. if (dlm->mode == VSP1_DL_MODE_HEADER)
  482. goto done;
  483. /* The UPD bit set indicates that the commit operation raced with the
  484. * interrupt and occurred after the frame end event and UPD clear but
  485. * before interrupt processing. The hardware hasn't taken the update
  486. * into account yet, we'll thus skip one frame and retry.
  487. */
  488. if (vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD)
  489. goto done;
  490. /* The device starts processing the queued display list right after the
  491. * frame end interrupt. The display list thus becomes active.
  492. */
  493. if (dlm->queued) {
  494. dlm->active = dlm->queued;
  495. dlm->queued = NULL;
  496. }
  497. /* Now that the UPD bit has been cleared we can queue the next display
  498. * list to the hardware if one has been prepared.
  499. */
  500. if (dlm->pending) {
  501. struct vsp1_dl_list *dl = dlm->pending;
  502. vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
  503. vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
  504. (dl->body0.num_entries *
  505. sizeof(*dl->header->lists)));
  506. dlm->queued = dl;
  507. dlm->pending = NULL;
  508. }
  509. done:
  510. spin_unlock(&dlm->lock);
  511. }
  512. /* Hardware Setup */
  513. void vsp1_dlm_setup(struct vsp1_device *vsp1)
  514. {
  515. u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
  516. | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
  517. | VI6_DL_CTRL_DLE;
  518. /* The DRM pipeline operates with display lists in Continuous Frame
  519. * Mode, all other pipelines use manual start.
  520. */
  521. if (vsp1->drm)
  522. ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0;
  523. vsp1_write(vsp1, VI6_DL_CTRL, ctrl);
  524. vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
  525. }
  526. void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
  527. {
  528. unsigned long flags;
  529. spin_lock_irqsave(&dlm->lock, flags);
  530. __vsp1_dl_list_put(dlm->active);
  531. __vsp1_dl_list_put(dlm->queued);
  532. __vsp1_dl_list_put(dlm->pending);
  533. spin_unlock_irqrestore(&dlm->lock, flags);
  534. dlm->active = NULL;
  535. dlm->queued = NULL;
  536. dlm->pending = NULL;
  537. }
  538. /*
  539. * Free all fragments awaiting to be garbage-collected.
  540. *
  541. * This function must be called without the display list manager lock held.
  542. */
  543. static void vsp1_dlm_fragments_free(struct vsp1_dl_manager *dlm)
  544. {
  545. unsigned long flags;
  546. spin_lock_irqsave(&dlm->lock, flags);
  547. while (!list_empty(&dlm->gc_fragments)) {
  548. struct vsp1_dl_body *dlb;
  549. dlb = list_first_entry(&dlm->gc_fragments, struct vsp1_dl_body,
  550. list);
  551. list_del(&dlb->list);
  552. spin_unlock_irqrestore(&dlm->lock, flags);
  553. vsp1_dl_fragment_free(dlb);
  554. spin_lock_irqsave(&dlm->lock, flags);
  555. }
  556. spin_unlock_irqrestore(&dlm->lock, flags);
  557. }
  558. static void vsp1_dlm_garbage_collect(struct work_struct *work)
  559. {
  560. struct vsp1_dl_manager *dlm =
  561. container_of(work, struct vsp1_dl_manager, gc_work);
  562. vsp1_dlm_fragments_free(dlm);
  563. }
  564. struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
  565. unsigned int index,
  566. unsigned int prealloc)
  567. {
  568. struct vsp1_dl_manager *dlm;
  569. unsigned int i;
  570. dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL);
  571. if (!dlm)
  572. return NULL;
  573. dlm->index = index;
  574. dlm->mode = index == 0 && !vsp1->info->uapi
  575. ? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER;
  576. dlm->vsp1 = vsp1;
  577. spin_lock_init(&dlm->lock);
  578. INIT_LIST_HEAD(&dlm->free);
  579. INIT_LIST_HEAD(&dlm->gc_fragments);
  580. INIT_WORK(&dlm->gc_work, vsp1_dlm_garbage_collect);
  581. for (i = 0; i < prealloc; ++i) {
  582. struct vsp1_dl_list *dl;
  583. dl = vsp1_dl_list_alloc(dlm);
  584. if (!dl)
  585. return NULL;
  586. list_add_tail(&dl->list, &dlm->free);
  587. }
  588. return dlm;
  589. }
  590. void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
  591. {
  592. struct vsp1_dl_list *dl, *next;
  593. if (!dlm)
  594. return;
  595. cancel_work_sync(&dlm->gc_work);
  596. list_for_each_entry_safe(dl, next, &dlm->free, list) {
  597. list_del(&dl->list);
  598. vsp1_dl_list_free(dl);
  599. }
  600. vsp1_dlm_fragments_free(dlm);
  601. }