ctvmem.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. /**
  2. * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
  3. *
  4. * This source file is released under GPL v2 license (no other versions).
  5. * See the COPYING file included in the main directory of this source
  6. * distribution for the license terms and conditions.
  7. *
  8. * @File ctvmem.c
  9. *
  10. * @Brief
  11. * This file contains the implementation of virtual memory management object
  12. * for card device.
  13. *
  14. * @Author Liu Chun
  15. * @Date Apr 1 2008
  16. */
  17. #include "ctvmem.h"
  18. #include "ctatc.h"
  19. #include <linux/slab.h>
  20. #include <linux/mm.h>
  21. #include <linux/io.h>
  22. #include <sound/pcm.h>
  23. #define CT_PTES_PER_PAGE (CT_PAGE_SIZE / sizeof(void *))
  24. #define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * CT_PAGE_SIZE)
  25. /* *
  26. * Find or create vm block based on requested @size.
  27. * @size must be page aligned.
  28. * */
  29. static struct ct_vm_block *
  30. get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc)
  31. {
  32. struct ct_vm_block *block = NULL, *entry;
  33. struct list_head *pos;
  34. size = CT_PAGE_ALIGN(size);
  35. if (size > vm->size) {
  36. dev_err(atc->card->dev,
  37. "Fail! No sufficient device virtual memory space available!\n");
  38. return NULL;
  39. }
  40. mutex_lock(&vm->lock);
  41. list_for_each(pos, &vm->unused) {
  42. entry = list_entry(pos, struct ct_vm_block, list);
  43. if (entry->size >= size)
  44. break; /* found a block that is big enough */
  45. }
  46. if (pos == &vm->unused)
  47. goto out;
  48. if (entry->size == size) {
  49. /* Move the vm node from unused list to used list directly */
  50. list_move(&entry->list, &vm->used);
  51. vm->size -= size;
  52. block = entry;
  53. goto out;
  54. }
  55. block = kzalloc(sizeof(*block), GFP_KERNEL);
  56. if (!block)
  57. goto out;
  58. block->addr = entry->addr;
  59. block->size = size;
  60. list_add(&block->list, &vm->used);
  61. entry->addr += size;
  62. entry->size -= size;
  63. vm->size -= size;
  64. out:
  65. mutex_unlock(&vm->lock);
  66. return block;
  67. }
  68. static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block)
  69. {
  70. struct ct_vm_block *entry, *pre_ent;
  71. struct list_head *pos, *pre;
  72. block->size = CT_PAGE_ALIGN(block->size);
  73. mutex_lock(&vm->lock);
  74. list_del(&block->list);
  75. vm->size += block->size;
  76. list_for_each(pos, &vm->unused) {
  77. entry = list_entry(pos, struct ct_vm_block, list);
  78. if (entry->addr >= (block->addr + block->size))
  79. break; /* found a position */
  80. }
  81. if (pos == &vm->unused) {
  82. list_add_tail(&block->list, &vm->unused);
  83. entry = block;
  84. } else {
  85. if ((block->addr + block->size) == entry->addr) {
  86. entry->addr = block->addr;
  87. entry->size += block->size;
  88. kfree(block);
  89. } else {
  90. __list_add(&block->list, pos->prev, pos);
  91. entry = block;
  92. }
  93. }
  94. pos = &entry->list;
  95. pre = pos->prev;
  96. while (pre != &vm->unused) {
  97. entry = list_entry(pos, struct ct_vm_block, list);
  98. pre_ent = list_entry(pre, struct ct_vm_block, list);
  99. if ((pre_ent->addr + pre_ent->size) > entry->addr)
  100. break;
  101. pre_ent->size += entry->size;
  102. list_del(pos);
  103. kfree(entry);
  104. pos = pre;
  105. pre = pos->prev;
  106. }
  107. mutex_unlock(&vm->lock);
  108. }
  109. /* Map host addr (kmalloced/vmalloced) to device logical addr. */
  110. static struct ct_vm_block *
  111. ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size)
  112. {
  113. struct ct_vm_block *block;
  114. unsigned int pte_start;
  115. unsigned i, pages;
  116. unsigned long *ptp;
  117. struct ct_atc *atc = snd_pcm_substream_chip(substream);
  118. block = get_vm_block(vm, size, atc);
  119. if (block == NULL) {
  120. dev_err(atc->card->dev,
  121. "No virtual memory block that is big enough to allocate!\n");
  122. return NULL;
  123. }
  124. ptp = (unsigned long *)vm->ptp[0].area;
  125. pte_start = (block->addr >> CT_PAGE_SHIFT);
  126. pages = block->size >> CT_PAGE_SHIFT;
  127. for (i = 0; i < pages; i++) {
  128. unsigned long addr;
  129. addr = snd_pcm_sgbuf_get_addr(substream, i << CT_PAGE_SHIFT);
  130. ptp[pte_start + i] = addr;
  131. }
  132. block->size = size;
  133. return block;
  134. }
  135. static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
  136. {
  137. /* do unmapping */
  138. put_vm_block(vm, block);
  139. }
  140. /* *
  141. * return the host physical addr of the @index-th device
  142. * page table page on success, or ~0UL on failure.
  143. * The first returned ~0UL indicates the termination.
  144. * */
  145. static dma_addr_t
  146. ct_get_ptp_phys(struct ct_vm *vm, int index)
  147. {
  148. return (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
  149. }
  150. int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci)
  151. {
  152. struct ct_vm *vm;
  153. struct ct_vm_block *block;
  154. int i, err = 0;
  155. *rvm = NULL;
  156. vm = kzalloc(sizeof(*vm), GFP_KERNEL);
  157. if (!vm)
  158. return -ENOMEM;
  159. mutex_init(&vm->lock);
  160. /* Allocate page table pages */
  161. for (i = 0; i < CT_PTP_NUM; i++) {
  162. err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
  163. snd_dma_pci_data(pci),
  164. PAGE_SIZE, &vm->ptp[i]);
  165. if (err < 0)
  166. break;
  167. }
  168. if (err < 0) {
  169. /* no page table pages are allocated */
  170. ct_vm_destroy(vm);
  171. return -ENOMEM;
  172. }
  173. vm->size = CT_ADDRS_PER_PAGE * i;
  174. vm->map = ct_vm_map;
  175. vm->unmap = ct_vm_unmap;
  176. vm->get_ptp_phys = ct_get_ptp_phys;
  177. INIT_LIST_HEAD(&vm->unused);
  178. INIT_LIST_HEAD(&vm->used);
  179. block = kzalloc(sizeof(*block), GFP_KERNEL);
  180. if (NULL != block) {
  181. block->addr = 0;
  182. block->size = vm->size;
  183. list_add(&block->list, &vm->unused);
  184. }
  185. *rvm = vm;
  186. return 0;
  187. }
  188. /* The caller must ensure no mapping pages are being used
  189. * by hardware before calling this function */
  190. void ct_vm_destroy(struct ct_vm *vm)
  191. {
  192. int i;
  193. struct list_head *pos;
  194. struct ct_vm_block *entry;
  195. /* free used and unused list nodes */
  196. while (!list_empty(&vm->used)) {
  197. pos = vm->used.next;
  198. list_del(pos);
  199. entry = list_entry(pos, struct ct_vm_block, list);
  200. kfree(entry);
  201. }
  202. while (!list_empty(&vm->unused)) {
  203. pos = vm->unused.next;
  204. list_del(pos);
  205. entry = list_entry(pos, struct ct_vm_block, list);
  206. kfree(entry);
  207. }
  208. /* free allocated page table pages */
  209. for (i = 0; i < CT_PTP_NUM; i++)
  210. snd_dma_free_pages(&vm->ptp[i]);
  211. vm->size = 0;
  212. kfree(vm);
  213. }