devres.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435
  1. #include <linux/pci.h>
  2. #include <linux/io.h>
  3. #include <linux/gfp.h>
  4. #include <linux/export.h>
  5. void devm_ioremap_release(struct device *dev, void *res)
  6. {
  7. iounmap(*(void __iomem **)res);
  8. }
  9. static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
  10. {
  11. return *(void **)res == match_data;
  12. }
  13. /**
  14. * devm_ioremap - Managed ioremap()
  15. * @dev: Generic device to remap IO address for
  16. * @offset: BUS offset to map
  17. * @size: Size of map
  18. *
  19. * Managed ioremap(). Map is automatically unmapped on driver detach.
  20. */
  21. void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
  22. unsigned long size)
  23. {
  24. void __iomem **ptr, *addr;
  25. ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
  26. if (!ptr)
  27. return NULL;
  28. addr = ioremap(offset, size);
  29. if (addr) {
  30. *ptr = addr;
  31. devres_add(dev, ptr);
  32. } else
  33. devres_free(ptr);
  34. return addr;
  35. }
  36. EXPORT_SYMBOL(devm_ioremap);
  37. /**
  38. * devm_ioremap_nocache - Managed ioremap_nocache()
  39. * @dev: Generic device to remap IO address for
  40. * @offset: BUS offset to map
  41. * @size: Size of map
  42. *
  43. * Managed ioremap_nocache(). Map is automatically unmapped on driver
  44. * detach.
  45. */
  46. void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
  47. unsigned long size)
  48. {
  49. void __iomem **ptr, *addr;
  50. ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
  51. if (!ptr)
  52. return NULL;
  53. addr = ioremap_nocache(offset, size);
  54. if (addr) {
  55. *ptr = addr;
  56. devres_add(dev, ptr);
  57. } else
  58. devres_free(ptr);
  59. return addr;
  60. }
  61. EXPORT_SYMBOL(devm_ioremap_nocache);
  62. /**
  63. * devm_iounmap - Managed iounmap()
  64. * @dev: Generic device to unmap for
  65. * @addr: Address to unmap
  66. *
  67. * Managed iounmap(). @addr must have been mapped using devm_ioremap*().
  68. */
  69. void devm_iounmap(struct device *dev, void __iomem *addr)
  70. {
  71. WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
  72. (void *)addr));
  73. iounmap(addr);
  74. }
  75. EXPORT_SYMBOL(devm_iounmap);
  76. /**
  77. * devm_ioremap_resource() - check, request region, and ioremap resource
  78. * @dev: generic device to handle the resource for
  79. * @res: resource to be handled
  80. *
  81. * Checks that a resource is a valid memory region, requests the memory region
  82. * and ioremaps it either as cacheable or as non-cacheable memory depending on
  83. * the resource's flags. All operations are managed and will be undone on
  84. * driver detach.
  85. *
  86. * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
  87. * on failure. Usage example:
  88. *
  89. * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  90. * base = devm_ioremap_resource(&pdev->dev, res);
  91. * if (IS_ERR(base))
  92. * return PTR_ERR(base);
  93. */
  94. void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
  95. {
  96. resource_size_t size;
  97. const char *name;
  98. void __iomem *dest_ptr;
  99. BUG_ON(!dev);
  100. if (!res || resource_type(res) != IORESOURCE_MEM) {
  101. dev_err(dev, "invalid resource\n");
  102. return ERR_PTR(-EINVAL);
  103. }
  104. size = resource_size(res);
  105. name = res->name ?: dev_name(dev);
  106. if (!devm_request_mem_region(dev, res->start, size, name)) {
  107. dev_err(dev, "can't request region for resource %pR\n", res);
  108. return ERR_PTR(-EBUSY);
  109. }
  110. if (res->flags & IORESOURCE_CACHEABLE)
  111. dest_ptr = devm_ioremap(dev, res->start, size);
  112. else
  113. dest_ptr = devm_ioremap_nocache(dev, res->start, size);
  114. if (!dest_ptr) {
  115. dev_err(dev, "ioremap failed for resource %pR\n", res);
  116. devm_release_mem_region(dev, res->start, size);
  117. dest_ptr = ERR_PTR(-ENOMEM);
  118. }
  119. return dest_ptr;
  120. }
  121. EXPORT_SYMBOL(devm_ioremap_resource);
  122. /**
  123. * devm_request_and_ioremap() - Check, request region, and ioremap resource
  124. * @dev: Generic device to handle the resource for
  125. * @res: resource to be handled
  126. *
  127. * Takes all necessary steps to ioremap a mem resource. Uses managed device, so
  128. * everything is undone on driver detach. Checks arguments, so you can feed
  129. * it the result from e.g. platform_get_resource() directly. Returns the
  130. * remapped pointer or NULL on error. Usage example:
  131. *
  132. * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  133. * base = devm_request_and_ioremap(&pdev->dev, res);
  134. * if (!base)
  135. * return -EADDRNOTAVAIL;
  136. */
  137. void __iomem *devm_request_and_ioremap(struct device *device,
  138. struct resource *res)
  139. {
  140. void __iomem *dest_ptr;
  141. dest_ptr = devm_ioremap_resource(device, res);
  142. if (IS_ERR(dest_ptr))
  143. return NULL;
  144. return dest_ptr;
  145. }
  146. EXPORT_SYMBOL(devm_request_and_ioremap);
  147. #ifdef CONFIG_HAS_IOPORT
  148. /*
  149. * Generic iomap devres
  150. */
  151. static void devm_ioport_map_release(struct device *dev, void *res)
  152. {
  153. ioport_unmap(*(void __iomem **)res);
  154. }
  155. static int devm_ioport_map_match(struct device *dev, void *res,
  156. void *match_data)
  157. {
  158. return *(void **)res == match_data;
  159. }
  160. /**
  161. * devm_ioport_map - Managed ioport_map()
  162. * @dev: Generic device to map ioport for
  163. * @port: Port to map
  164. * @nr: Number of ports to map
  165. *
  166. * Managed ioport_map(). Map is automatically unmapped on driver
  167. * detach.
  168. */
  169. void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
  170. unsigned int nr)
  171. {
  172. void __iomem **ptr, *addr;
  173. ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
  174. if (!ptr)
  175. return NULL;
  176. addr = ioport_map(port, nr);
  177. if (addr) {
  178. *ptr = addr;
  179. devres_add(dev, ptr);
  180. } else
  181. devres_free(ptr);
  182. return addr;
  183. }
  184. EXPORT_SYMBOL(devm_ioport_map);
  185. /**
  186. * devm_ioport_unmap - Managed ioport_unmap()
  187. * @dev: Generic device to unmap for
  188. * @addr: Address to unmap
  189. *
  190. * Managed ioport_unmap(). @addr must have been mapped using
  191. * devm_ioport_map().
  192. */
  193. void devm_ioport_unmap(struct device *dev, void __iomem *addr)
  194. {
  195. ioport_unmap(addr);
  196. WARN_ON(devres_destroy(dev, devm_ioport_map_release,
  197. devm_ioport_map_match, (void *)addr));
  198. }
  199. EXPORT_SYMBOL(devm_ioport_unmap);
  200. #ifdef CONFIG_PCI
  201. /*
  202. * PCI iomap devres
  203. */
  204. #define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
  205. struct pcim_iomap_devres {
  206. void __iomem *table[PCIM_IOMAP_MAX];
  207. };
  208. static void pcim_iomap_release(struct device *gendev, void *res)
  209. {
  210. struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
  211. struct pcim_iomap_devres *this = res;
  212. int i;
  213. for (i = 0; i < PCIM_IOMAP_MAX; i++)
  214. if (this->table[i])
  215. pci_iounmap(dev, this->table[i]);
  216. }
  217. /**
  218. * pcim_iomap_table - access iomap allocation table
  219. * @pdev: PCI device to access iomap table for
  220. *
  221. * Access iomap allocation table for @dev. If iomap table doesn't
  222. * exist and @pdev is managed, it will be allocated. All iomaps
  223. * recorded in the iomap table are automatically unmapped on driver
  224. * detach.
  225. *
  226. * This function might sleep when the table is first allocated but can
  227. * be safely called without context and guaranteed to succed once
  228. * allocated.
  229. */
  230. void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
  231. {
  232. struct pcim_iomap_devres *dr, *new_dr;
  233. dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
  234. if (dr)
  235. return dr->table;
  236. new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
  237. if (!new_dr)
  238. return NULL;
  239. dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
  240. return dr->table;
  241. }
  242. EXPORT_SYMBOL(pcim_iomap_table);
  243. /**
  244. * pcim_iomap - Managed pcim_iomap()
  245. * @pdev: PCI device to iomap for
  246. * @bar: BAR to iomap
  247. * @maxlen: Maximum length of iomap
  248. *
  249. * Managed pci_iomap(). Map is automatically unmapped on driver
  250. * detach.
  251. */
  252. void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
  253. {
  254. void __iomem **tbl;
  255. BUG_ON(bar >= PCIM_IOMAP_MAX);
  256. tbl = (void __iomem **)pcim_iomap_table(pdev);
  257. if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
  258. return NULL;
  259. tbl[bar] = pci_iomap(pdev, bar, maxlen);
  260. return tbl[bar];
  261. }
  262. EXPORT_SYMBOL(pcim_iomap);
  263. /**
  264. * pcim_iounmap - Managed pci_iounmap()
  265. * @pdev: PCI device to iounmap for
  266. * @addr: Address to unmap
  267. *
  268. * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
  269. */
  270. void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
  271. {
  272. void __iomem **tbl;
  273. int i;
  274. pci_iounmap(pdev, addr);
  275. tbl = (void __iomem **)pcim_iomap_table(pdev);
  276. BUG_ON(!tbl);
  277. for (i = 0; i < PCIM_IOMAP_MAX; i++)
  278. if (tbl[i] == addr) {
  279. tbl[i] = NULL;
  280. return;
  281. }
  282. WARN_ON(1);
  283. }
  284. EXPORT_SYMBOL(pcim_iounmap);
  285. /**
  286. * pcim_iomap_regions - Request and iomap PCI BARs
  287. * @pdev: PCI device to map IO resources for
  288. * @mask: Mask of BARs to request and iomap
  289. * @name: Name used when requesting regions
  290. *
  291. * Request and iomap regions specified by @mask.
  292. */
  293. int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
  294. {
  295. void __iomem * const *iomap;
  296. int i, rc;
  297. iomap = pcim_iomap_table(pdev);
  298. if (!iomap)
  299. return -ENOMEM;
  300. for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  301. unsigned long len;
  302. if (!(mask & (1 << i)))
  303. continue;
  304. rc = -EINVAL;
  305. len = pci_resource_len(pdev, i);
  306. if (!len)
  307. goto err_inval;
  308. rc = pci_request_region(pdev, i, name);
  309. if (rc)
  310. goto err_inval;
  311. rc = -ENOMEM;
  312. if (!pcim_iomap(pdev, i, 0))
  313. goto err_region;
  314. }
  315. return 0;
  316. err_region:
  317. pci_release_region(pdev, i);
  318. err_inval:
  319. while (--i >= 0) {
  320. if (!(mask & (1 << i)))
  321. continue;
  322. pcim_iounmap(pdev, iomap[i]);
  323. pci_release_region(pdev, i);
  324. }
  325. return rc;
  326. }
  327. EXPORT_SYMBOL(pcim_iomap_regions);
  328. /**
  329. * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
  330. * @pdev: PCI device to map IO resources for
  331. * @mask: Mask of BARs to iomap
  332. * @name: Name used when requesting regions
  333. *
  334. * Request all PCI BARs and iomap regions specified by @mask.
  335. */
  336. int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
  337. const char *name)
  338. {
  339. int request_mask = ((1 << 6) - 1) & ~mask;
  340. int rc;
  341. rc = pci_request_selected_regions(pdev, request_mask, name);
  342. if (rc)
  343. return rc;
  344. rc = pcim_iomap_regions(pdev, mask, name);
  345. if (rc)
  346. pci_release_selected_regions(pdev, request_mask);
  347. return rc;
  348. }
  349. EXPORT_SYMBOL(pcim_iomap_regions_request_all);
  350. /**
  351. * pcim_iounmap_regions - Unmap and release PCI BARs
  352. * @pdev: PCI device to map IO resources for
  353. * @mask: Mask of BARs to unmap and release
  354. *
  355. * Unmap and release regions specified by @mask.
  356. */
  357. void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
  358. {
  359. void __iomem * const *iomap;
  360. int i;
  361. iomap = pcim_iomap_table(pdev);
  362. if (!iomap)
  363. return;
  364. for (i = 0; i < PCIM_IOMAP_MAX; i++) {
  365. if (!(mask & (1 << i)))
  366. continue;
  367. pcim_iounmap(pdev, iomap[i]);
  368. pci_release_region(pdev, i);
  369. }
  370. }
  371. EXPORT_SYMBOL(pcim_iounmap_regions);
  372. #endif /* CONFIG_PCI */
  373. #endif /* CONFIG_HAS_IOPORT */