atomicio.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365
  1. /*
  2. * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then
  3. * accessing in atomic context.
  4. *
  5. * This is used for NMI handler to access IO memory area, because
  6. * ioremap/iounmap can not be used in NMI handler. The IO memory area
  7. * is pre-mapped in process context and accessed in NMI handler.
  8. *
  9. * Copyright (C) 2009-2010, Intel Corp.
  10. * Author: Huang Ying <ying.huang@intel.com>
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License version
  14. * 2 as published by the Free Software Foundation.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to the Free Software
  23. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  24. */
  25. #include <linux/kernel.h>
  26. #include <linux/module.h>
  27. #include <linux/init.h>
  28. #include <linux/acpi.h>
  29. #include <linux/io.h>
  30. #include <linux/kref.h>
  31. #include <linux/rculist.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/slab.h>
  34. #include <acpi/atomicio.h>
  35. #define ACPI_PFX "ACPI: "
  36. static LIST_HEAD(acpi_iomaps);
  37. /*
  38. * Used for mutual exclusion between writers of acpi_iomaps list, for
  39. * synchronization between readers and writer, RCU is used.
  40. */
  41. static DEFINE_SPINLOCK(acpi_iomaps_lock);
  42. struct acpi_iomap {
  43. struct list_head list;
  44. void __iomem *vaddr;
  45. unsigned long size;
  46. phys_addr_t paddr;
  47. struct kref ref;
  48. };
  49. /* acpi_iomaps_lock or RCU read lock must be held before calling */
  50. static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr,
  51. unsigned long size)
  52. {
  53. struct acpi_iomap *map;
  54. list_for_each_entry_rcu(map, &acpi_iomaps, list) {
  55. if (map->paddr + map->size >= paddr + size &&
  56. map->paddr <= paddr)
  57. return map;
  58. }
  59. return NULL;
  60. }
  61. /*
  62. * Atomic "ioremap" used by NMI handler, if the specified IO memory
  63. * area is not pre-mapped, NULL will be returned.
  64. *
  65. * acpi_iomaps_lock or RCU read lock must be held before calling
  66. */
  67. static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
  68. unsigned long size)
  69. {
  70. struct acpi_iomap *map;
  71. map = __acpi_find_iomap(paddr, size);
  72. if (map)
  73. return map->vaddr + (paddr - map->paddr);
  74. else
  75. return NULL;
  76. }
  77. /* acpi_iomaps_lock must be held before calling */
  78. static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
  79. unsigned long size)
  80. {
  81. struct acpi_iomap *map;
  82. map = __acpi_find_iomap(paddr, size);
  83. if (map) {
  84. kref_get(&map->ref);
  85. return map->vaddr + (paddr - map->paddr);
  86. } else
  87. return NULL;
  88. }
  89. /*
  90. * Used to pre-map the specified IO memory area. First try to find
  91. * whether the area is already pre-mapped, if it is, increase the
  92. * reference count (in __acpi_try_ioremap) and return; otherwise, do
  93. * the real ioremap, and add the mapping into acpi_iomaps list.
  94. */
  95. static void __iomem *acpi_pre_map(phys_addr_t paddr,
  96. unsigned long size)
  97. {
  98. void __iomem *vaddr;
  99. struct acpi_iomap *map;
  100. unsigned long pg_sz, flags;
  101. phys_addr_t pg_off;
  102. spin_lock_irqsave(&acpi_iomaps_lock, flags);
  103. vaddr = __acpi_try_ioremap(paddr, size);
  104. spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
  105. if (vaddr)
  106. return vaddr;
  107. pg_off = paddr & PAGE_MASK;
  108. pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
  109. vaddr = ioremap(pg_off, pg_sz);
  110. if (!vaddr)
  111. return NULL;
  112. map = kmalloc(sizeof(*map), GFP_KERNEL);
  113. if (!map)
  114. goto err_unmap;
  115. INIT_LIST_HEAD(&map->list);
  116. map->paddr = pg_off;
  117. map->size = pg_sz;
  118. map->vaddr = vaddr;
  119. kref_init(&map->ref);
  120. spin_lock_irqsave(&acpi_iomaps_lock, flags);
  121. vaddr = __acpi_try_ioremap(paddr, size);
  122. if (vaddr) {
  123. spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
  124. iounmap(map->vaddr);
  125. kfree(map);
  126. return vaddr;
  127. }
  128. list_add_tail_rcu(&map->list, &acpi_iomaps);
  129. spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
  130. return map->vaddr + (paddr - map->paddr);
  131. err_unmap:
  132. iounmap(vaddr);
  133. return NULL;
  134. }
  135. /* acpi_iomaps_lock must be held before calling */
  136. static void __acpi_kref_del_iomap(struct kref *ref)
  137. {
  138. struct acpi_iomap *map;
  139. map = container_of(ref, struct acpi_iomap, ref);
  140. list_del_rcu(&map->list);
  141. }
  142. /*
  143. * Used to post-unmap the specified IO memory area. The iounmap is
  144. * done only if the reference count goes zero.
  145. */
  146. static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
  147. {
  148. struct acpi_iomap *map;
  149. unsigned long flags;
  150. int del;
  151. spin_lock_irqsave(&acpi_iomaps_lock, flags);
  152. map = __acpi_find_iomap(paddr, size);
  153. BUG_ON(!map);
  154. del = kref_put(&map->ref, __acpi_kref_del_iomap);
  155. spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
  156. if (!del)
  157. return;
  158. synchronize_rcu();
  159. iounmap(map->vaddr);
  160. kfree(map);
  161. }
  162. /* In NMI handler, should set silent = 1 */
  163. static int acpi_check_gar(struct acpi_generic_address *reg,
  164. u64 *paddr, int silent)
  165. {
  166. u32 width, space_id;
  167. width = reg->bit_width;
  168. space_id = reg->space_id;
  169. /* Handle possible alignment issues */
  170. memcpy(paddr, &reg->address, sizeof(*paddr));
  171. if (!*paddr) {
  172. if (!silent)
  173. pr_warning(FW_BUG ACPI_PFX
  174. "Invalid physical address in GAR [0x%llx/%u/%u]\n",
  175. *paddr, width, space_id);
  176. return -EINVAL;
  177. }
  178. if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
  179. if (!silent)
  180. pr_warning(FW_BUG ACPI_PFX
  181. "Invalid bit width in GAR [0x%llx/%u/%u]\n",
  182. *paddr, width, space_id);
  183. return -EINVAL;
  184. }
  185. if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
  186. space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
  187. if (!silent)
  188. pr_warning(FW_BUG ACPI_PFX
  189. "Invalid address space type in GAR [0x%llx/%u/%u]\n",
  190. *paddr, width, space_id);
  191. return -EINVAL;
  192. }
  193. return 0;
  194. }
  195. /* Pre-map, working on GAR */
  196. int acpi_pre_map_gar(struct acpi_generic_address *reg)
  197. {
  198. u64 paddr;
  199. void __iomem *vaddr;
  200. int rc;
  201. if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
  202. return 0;
  203. rc = acpi_check_gar(reg, &paddr, 0);
  204. if (rc)
  205. return rc;
  206. vaddr = acpi_pre_map(paddr, reg->bit_width / 8);
  207. if (!vaddr)
  208. return -EIO;
  209. return 0;
  210. }
  211. EXPORT_SYMBOL_GPL(acpi_pre_map_gar);
  212. /* Post-unmap, working on GAR */
  213. int acpi_post_unmap_gar(struct acpi_generic_address *reg)
  214. {
  215. u64 paddr;
  216. int rc;
  217. if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
  218. return 0;
  219. rc = acpi_check_gar(reg, &paddr, 0);
  220. if (rc)
  221. return rc;
  222. acpi_post_unmap(paddr, reg->bit_width / 8);
  223. return 0;
  224. }
  225. EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
  226. /*
  227. * Can be used in atomic (including NMI) or process context. RCU read
  228. * lock can only be released after the IO memory area accessing.
  229. */
  230. static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
  231. {
  232. void __iomem *addr;
  233. rcu_read_lock();
  234. addr = __acpi_ioremap_fast(paddr, width);
  235. switch (width) {
  236. case 8:
  237. *val = readb(addr);
  238. break;
  239. case 16:
  240. *val = readw(addr);
  241. break;
  242. case 32:
  243. *val = readl(addr);
  244. break;
  245. #ifdef readq
  246. case 64:
  247. *val = readq(addr);
  248. break;
  249. #endif
  250. default:
  251. return -EINVAL;
  252. }
  253. rcu_read_unlock();
  254. return 0;
  255. }
  256. static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
  257. {
  258. void __iomem *addr;
  259. rcu_read_lock();
  260. addr = __acpi_ioremap_fast(paddr, width);
  261. switch (width) {
  262. case 8:
  263. writeb(val, addr);
  264. break;
  265. case 16:
  266. writew(val, addr);
  267. break;
  268. case 32:
  269. writel(val, addr);
  270. break;
  271. #ifdef writeq
  272. case 64:
  273. writeq(val, addr);
  274. break;
  275. #endif
  276. default:
  277. return -EINVAL;
  278. }
  279. rcu_read_unlock();
  280. return 0;
  281. }
  282. /* GAR accessing in atomic (including NMI) or process context */
  283. int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg)
  284. {
  285. u64 paddr;
  286. int rc;
  287. rc = acpi_check_gar(reg, &paddr, 1);
  288. if (rc)
  289. return rc;
  290. *val = 0;
  291. switch (reg->space_id) {
  292. case ACPI_ADR_SPACE_SYSTEM_MEMORY:
  293. return acpi_atomic_read_mem(paddr, val, reg->bit_width);
  294. case ACPI_ADR_SPACE_SYSTEM_IO:
  295. return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width);
  296. default:
  297. return -EINVAL;
  298. }
  299. }
  300. EXPORT_SYMBOL_GPL(acpi_atomic_read);
  301. int acpi_atomic_write(u64 val, struct acpi_generic_address *reg)
  302. {
  303. u64 paddr;
  304. int rc;
  305. rc = acpi_check_gar(reg, &paddr, 1);
  306. if (rc)
  307. return rc;
  308. switch (reg->space_id) {
  309. case ACPI_ADR_SPACE_SYSTEM_MEMORY:
  310. return acpi_atomic_write_mem(paddr, val, reg->bit_width);
  311. case ACPI_ADR_SPACE_SYSTEM_IO:
  312. return acpi_os_write_port(paddr, val, reg->bit_width);
  313. default:
  314. return -EINVAL;
  315. }
  316. }
  317. EXPORT_SYMBOL_GPL(acpi_atomic_write);