devres.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435
  1. #include <linux/err.h>
  2. #include <linux/pci.h>
  3. #include <linux/io.h>
  4. #include <linux/gfp.h>
  5. #include <linux/export.h>
  6. void devm_ioremap_release(struct device *dev, void *res)
  7. {
  8. iounmap(*(void __iomem **)res);
  9. }
  10. static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
  11. {
  12. return *(void **)res == match_data;
  13. }
  14. /**
  15. * devm_ioremap - Managed ioremap()
  16. * @dev: Generic device to remap IO address for
  17. * @offset: BUS offset to map
  18. * @size: Size of map
  19. *
  20. * Managed ioremap(). Map is automatically unmapped on driver detach.
  21. */
  22. void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
  23. unsigned long size)
  24. {
  25. void __iomem **ptr, *addr;
  26. ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
  27. if (!ptr)
  28. return NULL;
  29. addr = ioremap(offset, size);
  30. if (addr) {
  31. *ptr = addr;
  32. devres_add(dev, ptr);
  33. } else
  34. devres_free(ptr);
  35. return addr;
  36. }
  37. EXPORT_SYMBOL(devm_ioremap);
  38. /**
  39. * devm_ioremap_nocache - Managed ioremap_nocache()
  40. * @dev: Generic device to remap IO address for
  41. * @offset: BUS offset to map
  42. * @size: Size of map
  43. *
  44. * Managed ioremap_nocache(). Map is automatically unmapped on driver
  45. * detach.
  46. */
  47. void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
  48. unsigned long size)
  49. {
  50. void __iomem **ptr, *addr;
  51. ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
  52. if (!ptr)
  53. return NULL;
  54. addr = ioremap_nocache(offset, size);
  55. if (addr) {
  56. *ptr = addr;
  57. devres_add(dev, ptr);
  58. } else
  59. devres_free(ptr);
  60. return addr;
  61. }
  62. EXPORT_SYMBOL(devm_ioremap_nocache);
  63. /**
  64. * devm_iounmap - Managed iounmap()
  65. * @dev: Generic device to unmap for
  66. * @addr: Address to unmap
  67. *
  68. * Managed iounmap(). @addr must have been mapped using devm_ioremap*().
  69. */
  70. void devm_iounmap(struct device *dev, void __iomem *addr)
  71. {
  72. WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
  73. (void *)addr));
  74. iounmap(addr);
  75. }
  76. EXPORT_SYMBOL(devm_iounmap);
  77. /**
  78. * devm_ioremap_resource() - check, request region, and ioremap resource
  79. * @dev: generic device to handle the resource for
  80. * @res: resource to be handled
  81. *
  82. * Checks that a resource is a valid memory region, requests the memory region
  83. * and ioremaps it either as cacheable or as non-cacheable memory depending on
  84. * the resource's flags. All operations are managed and will be undone on
  85. * driver detach.
  86. *
  87. * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
  88. * on failure. Usage example:
  89. *
  90. * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  91. * base = devm_ioremap_resource(&pdev->dev, res);
  92. * if (IS_ERR(base))
  93. * return PTR_ERR(base);
  94. */
  95. void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
  96. {
  97. resource_size_t size;
  98. const char *name;
  99. void __iomem *dest_ptr;
  100. BUG_ON(!dev);
  101. if (!res || resource_type(res) != IORESOURCE_MEM) {
  102. dev_err(dev, "invalid resource\n");
  103. return ERR_PTR(-EINVAL);
  104. }
  105. size = resource_size(res);
  106. name = res->name ?: dev_name(dev);
  107. if (!devm_request_mem_region(dev, res->start, size, name)) {
  108. dev_err(dev, "can't request region for resource %pR\n", res);
  109. return ERR_PTR(-EBUSY);
  110. }
  111. if (res->flags & IORESOURCE_CACHEABLE)
  112. dest_ptr = devm_ioremap(dev, res->start, size);
  113. else
  114. dest_ptr = devm_ioremap_nocache(dev, res->start, size);
  115. if (!dest_ptr) {
  116. dev_err(dev, "ioremap failed for resource %pR\n", res);
  117. devm_release_mem_region(dev, res->start, size);
  118. dest_ptr = ERR_PTR(-ENOMEM);
  119. }
  120. return dest_ptr;
  121. }
  122. EXPORT_SYMBOL(devm_ioremap_resource);
  123. /**
  124. * devm_request_and_ioremap() - Check, request region, and ioremap resource
  125. * @dev: Generic device to handle the resource for
  126. * @res: resource to be handled
  127. *
  128. * Takes all necessary steps to ioremap a mem resource. Uses managed device, so
  129. * everything is undone on driver detach. Checks arguments, so you can feed
  130. * it the result from e.g. platform_get_resource() directly. Returns the
  131. * remapped pointer or NULL on error. Usage example:
  132. *
  133. * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  134. * base = devm_request_and_ioremap(&pdev->dev, res);
  135. * if (!base)
  136. * return -EADDRNOTAVAIL;
  137. */
  138. void __iomem *devm_request_and_ioremap(struct device *device,
  139. struct resource *res)
  140. {
  141. void __iomem *dest_ptr;
  142. dest_ptr = devm_ioremap_resource(device, res);
  143. if (IS_ERR(dest_ptr))
  144. return NULL;
  145. return dest_ptr;
  146. }
  147. EXPORT_SYMBOL(devm_request_and_ioremap);
  148. #ifdef CONFIG_HAS_IOPORT
  149. /*
  150. * Generic iomap devres
  151. */
  152. static void devm_ioport_map_release(struct device *dev, void *res)
  153. {
  154. ioport_unmap(*(void __iomem **)res);
  155. }
  156. static int devm_ioport_map_match(struct device *dev, void *res,
  157. void *match_data)
  158. {
  159. return *(void **)res == match_data;
  160. }
  161. /**
  162. * devm_ioport_map - Managed ioport_map()
  163. * @dev: Generic device to map ioport for
  164. * @port: Port to map
  165. * @nr: Number of ports to map
  166. *
  167. * Managed ioport_map(). Map is automatically unmapped on driver
  168. * detach.
  169. */
  170. void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
  171. unsigned int nr)
  172. {
  173. void __iomem **ptr, *addr;
  174. ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
  175. if (!ptr)
  176. return NULL;
  177. addr = ioport_map(port, nr);
  178. if (addr) {
  179. *ptr = addr;
  180. devres_add(dev, ptr);
  181. } else
  182. devres_free(ptr);
  183. return addr;
  184. }
  185. EXPORT_SYMBOL(devm_ioport_map);
  186. /**
  187. * devm_ioport_unmap - Managed ioport_unmap()
  188. * @dev: Generic device to unmap for
  189. * @addr: Address to unmap
  190. *
  191. * Managed ioport_unmap(). @addr must have been mapped using
  192. * devm_ioport_map().
  193. */
  194. void devm_ioport_unmap(struct device *dev, void __iomem *addr)
  195. {
  196. ioport_unmap(addr);
  197. WARN_ON(devres_destroy(dev, devm_ioport_map_release,
  198. devm_ioport_map_match, (void *)addr));
  199. }
  200. EXPORT_SYMBOL(devm_ioport_unmap);
  201. #endif /* CONFIG_HAS_IOPORT */
  202. #ifdef CONFIG_PCI
  203. /*
  204. * PCI iomap devres
  205. */
  206. #define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
  207. struct pcim_iomap_devres {
  208. void __iomem *table[PCIM_IOMAP_MAX];
  209. };
  210. static void pcim_iomap_release(struct device *gendev, void *res)
  211. {
  212. struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
  213. struct pcim_iomap_devres *this = res;
  214. int i;
  215. for (i = 0; i < PCIM_IOMAP_MAX; i++)
  216. if (this->table[i])
  217. pci_iounmap(dev, this->table[i]);
  218. }
  219. /**
  220. * pcim_iomap_table - access iomap allocation table
  221. * @pdev: PCI device to access iomap table for
  222. *
  223. * Access iomap allocation table for @dev. If iomap table doesn't
  224. * exist and @pdev is managed, it will be allocated. All iomaps
  225. * recorded in the iomap table are automatically unmapped on driver
  226. * detach.
  227. *
  228. * This function might sleep when the table is first allocated but can
  229. * be safely called without context and guaranteed to succed once
  230. * allocated.
  231. */
  232. void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
  233. {
  234. struct pcim_iomap_devres *dr, *new_dr;
  235. dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
  236. if (dr)
  237. return dr->table;
  238. new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
  239. if (!new_dr)
  240. return NULL;
  241. dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
  242. return dr->table;
  243. }
  244. EXPORT_SYMBOL(pcim_iomap_table);
  245. /**
  246. * pcim_iomap - Managed pcim_iomap()
  247. * @pdev: PCI device to iomap for
  248. * @bar: BAR to iomap
  249. * @maxlen: Maximum length of iomap
  250. *
  251. * Managed pci_iomap(). Map is automatically unmapped on driver
  252. * detach.
  253. */
  254. void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
  255. {
  256. void __iomem **tbl;
  257. BUG_ON(bar >= PCIM_IOMAP_MAX);
  258. tbl = (void __iomem **)pcim_iomap_table(pdev);
  259. if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
  260. return NULL;
  261. tbl[bar] = pci_iomap(pdev, bar, maxlen);
  262. return tbl[bar];
  263. }
  264. EXPORT_SYMBOL(pcim_iomap);
  265. /**
  266. * pcim_iounmap - Managed pci_iounmap()
  267. * @pdev: PCI device to iounmap for
  268. * @addr: Address to unmap
  269. *
  270. * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
  271. */
  272. void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
  273. {
  274. void __iomem **tbl;
  275. int i;
  276. pci_iounmap(pdev, addr);
  277. tbl = (void __iomem **)pcim_iomap_table(pdev);
  278. BUG_ON(!tbl);
  279. for (i = 0; i < PCIM_IOMAP_MAX; i++)
  280. if (tbl[i] == addr) {
  281. tbl[i] = NULL;
  282. return;
  283. }
  284. WARN_ON(1);
  285. }
  286. EXPORT_SYMBOL(pcim_iounmap);
  287. /**
  288. * pcim_iomap_regions - Request and iomap PCI BARs
  289. * @pdev: PCI device to map IO resources for
  290. * @mask: Mask of BARs to request and iomap
  291. * @name: Name used when requesting regions
  292. *
  293. * Request and iomap regions specified by @mask.
  294. */
  295. int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
  296. {
  297. void __iomem * const *iomap;
  298. int i, rc;
  299. iomap = pcim_iomap_table(pdev);
  300. if (!iomap)
  301. return -ENOMEM;
  302. for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  303. unsigned long len;
  304. if (!(mask & (1 << i)))
  305. continue;
  306. rc = -EINVAL;
  307. len = pci_resource_len(pdev, i);
  308. if (!len)
  309. goto err_inval;
  310. rc = pci_request_region(pdev, i, name);
  311. if (rc)
  312. goto err_inval;
  313. rc = -ENOMEM;
  314. if (!pcim_iomap(pdev, i, 0))
  315. goto err_region;
  316. }
  317. return 0;
  318. err_region:
  319. pci_release_region(pdev, i);
  320. err_inval:
  321. while (--i >= 0) {
  322. if (!(mask & (1 << i)))
  323. continue;
  324. pcim_iounmap(pdev, iomap[i]);
  325. pci_release_region(pdev, i);
  326. }
  327. return rc;
  328. }
  329. EXPORT_SYMBOL(pcim_iomap_regions);
  330. /**
  331. * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
  332. * @pdev: PCI device to map IO resources for
  333. * @mask: Mask of BARs to iomap
  334. * @name: Name used when requesting regions
  335. *
  336. * Request all PCI BARs and iomap regions specified by @mask.
  337. */
  338. int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
  339. const char *name)
  340. {
  341. int request_mask = ((1 << 6) - 1) & ~mask;
  342. int rc;
  343. rc = pci_request_selected_regions(pdev, request_mask, name);
  344. if (rc)
  345. return rc;
  346. rc = pcim_iomap_regions(pdev, mask, name);
  347. if (rc)
  348. pci_release_selected_regions(pdev, request_mask);
  349. return rc;
  350. }
  351. EXPORT_SYMBOL(pcim_iomap_regions_request_all);
  352. /**
  353. * pcim_iounmap_regions - Unmap and release PCI BARs
  354. * @pdev: PCI device to map IO resources for
  355. * @mask: Mask of BARs to unmap and release
  356. *
  357. * Unmap and release regions specified by @mask.
  358. */
  359. void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
  360. {
  361. void __iomem * const *iomap;
  362. int i;
  363. iomap = pcim_iomap_table(pdev);
  364. if (!iomap)
  365. return;
  366. for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  367. if (!(mask & (1 << i)))
  368. continue;
  369. pcim_iounmap(pdev, iomap[i]);
  370. pci_release_region(pdev, i);
  371. }
  372. }
  373. EXPORT_SYMBOL(pcim_iounmap_regions);
  374. #endif /* CONFIG_PCI */