dma-contiguous.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. #ifndef __LINUX_CMA_H
  2. #define __LINUX_CMA_H
  3. /*
  4. * Contiguous Memory Allocator for DMA mapping framework
  5. * Copyright (c) 2010-2011 by Samsung Electronics.
  6. * Written by:
  7. * Marek Szyprowski <m.szyprowski@samsung.com>
  8. * Michal Nazarewicz <mina86@mina86.com>
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License as
  12. * published by the Free Software Foundation; either version 2 of the
  13. * License or (at your optional) any later version of the license.
  14. */
  15. /*
  16. * Contiguous Memory Allocator
  17. *
  18. * The Contiguous Memory Allocator (CMA) makes it possible to
  19. * allocate big contiguous chunks of memory after the system has
  20. * booted.
  21. *
  22. * Why is it needed?
  23. *
  24. * Various devices on embedded systems have no scatter-getter and/or
  25. * IO map support and require contiguous blocks of memory to
  26. * operate. They include devices such as cameras, hardware video
  27. * coders, etc.
  28. *
  29. * Such devices often require big memory buffers (a full HD frame
  30. * is, for instance, more then 2 mega pixels large, i.e. more than 6
  31. * MB of memory), which makes mechanisms such as kmalloc() or
  32. * alloc_page() ineffective.
  33. *
  34. * At the same time, a solution where a big memory region is
  35. * reserved for a device is suboptimal since often more memory is
  36. * reserved then strictly required and, moreover, the memory is
  37. * inaccessible to page system even if device drivers don't use it.
  38. *
  39. * CMA tries to solve this issue by operating on memory regions
  40. * where only movable pages can be allocated from. This way, kernel
  41. * can use the memory for pagecache and when device driver requests
  42. * it, allocated pages can be migrated.
  43. *
  44. * Driver usage
  45. *
  46. * CMA should not be used by the device drivers directly. It is
  47. * only a helper framework for dma-mapping subsystem.
  48. *
  49. * For more information, see kernel-docs in drivers/base/dma-contiguous.c
  50. */
  51. #ifdef __KERNEL__
  52. struct cma;
  53. struct page;
  54. struct device;
  55. #ifdef CONFIG_CMA
  56. /*
  57. * There is always at least global CMA area and a few optional device
  58. * private areas configured in kernel .config.
  59. */
  60. #define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
  61. phys_addr_t cma_get_base(struct device *dev);
  62. extern struct cma *dma_contiguous_def_area;
  63. void dma_contiguous_reserve(phys_addr_t addr_limit);
  64. int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t *res_base,
  65. phys_addr_t limit, const char *name,
  66. bool in_system);
  67. int dma_contiguous_add_device(struct device *dev, phys_addr_t base);
  68. /**
  69. * dma_declare_contiguous() - reserve area for contiguous memory handling
  70. * for particular device
  71. * @dev: Pointer to device structure.
  72. * @size: Size of the reserved memory.
  73. * @base: Start address of the reserved memory (optional, 0 for any).
  74. * @limit: End address of the reserved memory (optional, 0 for any).
  75. *
  76. * This function reserves memory for specified device. It should be
  77. * called by board specific code when early allocator (memblock or bootmem)
  78. * is still activate.
  79. */
  80. static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
  81. phys_addr_t base, phys_addr_t limit)
  82. {
  83. int ret;
  84. ret = dma_contiguous_reserve_area(size, &base, limit, NULL, true);
  85. if (ret == 0)
  86. ret = dma_contiguous_add_device(dev, base);
  87. return ret;
  88. }
  89. static inline int dma_declare_contiguous_reserved(struct device *dev,
  90. phys_addr_t size,
  91. phys_addr_t base,
  92. phys_addr_t limit)
  93. {
  94. int ret;
  95. ret = dma_contiguous_reserve_area(size, &base, limit, NULL, false);
  96. if (ret == 0)
  97. ret = dma_contiguous_add_device(dev, base);
  98. return ret;
  99. }
  100. struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
  101. unsigned int order);
  102. bool dma_release_from_contiguous(struct device *dev, struct page *pages,
  103. int count);
  104. #else
  105. #define MAX_CMA_AREAS (0)
  106. static inline void dma_contiguous_reserve(phys_addr_t limit) { }
  107. static inline
  108. int dma_declare_contiguous(struct device *dev, phys_addr_t size,
  109. phys_addr_t base, phys_addr_t limit)
  110. {
  111. return -ENOSYS;
  112. }
  113. static inline
  114. struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
  115. unsigned int order)
  116. {
  117. return NULL;
  118. }
  119. static inline
  120. bool dma_release_from_contiguous(struct device *dev, struct page *pages,
  121. int count)
  122. {
  123. return false;
  124. }
  125. static inline phys_addr_t cma_get_base(struct device *dev)
  126. {
  127. return 0;
  128. }
  129. #endif
  130. #endif
  131. #endif