memremap.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115
  1. #ifndef _LINUX_MEMREMAP_H_
  2. #define _LINUX_MEMREMAP_H_
  3. #include <linux/mm.h>
  4. #include <linux/ioport.h>
  5. #include <linux/percpu-refcount.h>
  6. struct resource;
  7. struct device;
  8. /**
  9. * struct vmem_altmap - pre-allocated storage for vmemmap_populate
  10. * @base_pfn: base of the entire dev_pagemap mapping
  11. * @reserve: pages mapped, but reserved for driver use (relative to @base)
  12. * @free: free pages set aside in the mapping for memmap storage
  13. * @align: pages reserved to meet allocation alignments
  14. * @alloc: track pages consumed, private to vmemmap_populate()
  15. */
  16. struct vmem_altmap {
  17. const unsigned long base_pfn;
  18. const unsigned long reserve;
  19. unsigned long free;
  20. unsigned long align;
  21. unsigned long alloc;
  22. };
  23. unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
  24. void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
  25. #ifdef CONFIG_ZONE_DEVICE
  26. struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start);
  27. #else
  28. static inline struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
  29. {
  30. return NULL;
  31. }
  32. #endif
  33. /**
  34. * struct dev_pagemap - metadata for ZONE_DEVICE mappings
  35. * @altmap: pre-allocated/reserved memory for vmemmap allocations
  36. * @res: physical address range covered by @ref
  37. * @ref: reference count that pins the devm_memremap_pages() mapping
  38. * @dev: host device of the mapping for debug
  39. */
  40. struct dev_pagemap {
  41. struct vmem_altmap *altmap;
  42. const struct resource *res;
  43. struct percpu_ref *ref;
  44. struct device *dev;
  45. };
  46. #ifdef CONFIG_ZONE_DEVICE
  47. void *devm_memremap_pages(struct device *dev, struct resource *res,
  48. struct percpu_ref *ref, struct vmem_altmap *altmap);
  49. struct dev_pagemap *find_dev_pagemap(resource_size_t phys);
  50. #else
  51. static inline void *devm_memremap_pages(struct device *dev,
  52. struct resource *res, struct percpu_ref *ref,
  53. struct vmem_altmap *altmap)
  54. {
  55. /*
  56. * Fail attempts to call devm_memremap_pages() without
  57. * ZONE_DEVICE support enabled, this requires callers to fall
  58. * back to plain devm_memremap() based on config
  59. */
  60. WARN_ON_ONCE(1);
  61. return ERR_PTR(-ENXIO);
  62. }
  63. static inline struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
  64. {
  65. return NULL;
  66. }
  67. #endif
  68. /**
  69. * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
  70. * @pfn: page frame number to lookup page_map
  71. * @pgmap: optional known pgmap that already has a reference
  72. *
  73. * @pgmap allows the overhead of a lookup to be bypassed when @pfn lands in the
  74. * same mapping.
  75. */
  76. static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
  77. struct dev_pagemap *pgmap)
  78. {
  79. const struct resource *res = pgmap ? pgmap->res : NULL;
  80. resource_size_t phys = PFN_PHYS(pfn);
  81. /*
  82. * In the cached case we're already holding a live reference so
  83. * we can simply do a blind increment
  84. */
  85. if (res && phys >= res->start && phys <= res->end) {
  86. percpu_ref_get(pgmap->ref);
  87. return pgmap;
  88. }
  89. /* fall back to slow path lookup */
  90. rcu_read_lock();
  91. pgmap = find_dev_pagemap(phys);
  92. if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
  93. pgmap = NULL;
  94. rcu_read_unlock();
  95. return pgmap;
  96. }
  97. static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
  98. {
  99. if (pgmap)
  100. percpu_ref_put(pgmap->ref);
  101. }
  102. #endif /* _LINUX_MEMREMAP_H_ */