atomicio.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360
  1. /*
  2. * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then
  3. * accessing in atomic context.
  4. *
  5. * This is used for NMI handler to access IO memory area, because
  6. * ioremap/iounmap can not be used in NMI handler. The IO memory area
  7. * is pre-mapped in process context and accessed in NMI handler.
  8. *
  9. * Copyright (C) 2009-2010, Intel Corp.
  10. * Author: Huang Ying <ying.huang@intel.com>
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License version
  14. * 2 as published by the Free Software Foundation.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to the Free Software
  23. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  24. */
  25. #include <linux/kernel.h>
  26. #include <linux/module.h>
  27. #include <linux/init.h>
  28. #include <linux/acpi.h>
  29. #include <linux/io.h>
  30. #include <linux/kref.h>
  31. #include <linux/rculist.h>
  32. #include <linux/interrupt.h>
  33. #include <acpi/atomicio.h>
  34. #define ACPI_PFX "ACPI: "
  35. static LIST_HEAD(acpi_iomaps);
  36. /*
  37. * Used for mutual exclusion between writers of acpi_iomaps list, for
  38. * synchronization between readers and writer, RCU is used.
  39. */
  40. static DEFINE_SPINLOCK(acpi_iomaps_lock);
  41. struct acpi_iomap {
  42. struct list_head list;
  43. void __iomem *vaddr;
  44. unsigned long size;
  45. phys_addr_t paddr;
  46. struct kref ref;
  47. };
  48. /* acpi_iomaps_lock or RCU read lock must be held before calling */
  49. static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr,
  50. unsigned long size)
  51. {
  52. struct acpi_iomap *map;
  53. list_for_each_entry_rcu(map, &acpi_iomaps, list) {
  54. if (map->paddr + map->size >= paddr + size &&
  55. map->paddr <= paddr)
  56. return map;
  57. }
  58. return NULL;
  59. }
  60. /*
  61. * Atomic "ioremap" used by NMI handler, if the specified IO memory
  62. * area is not pre-mapped, NULL will be returned.
  63. *
  64. * acpi_iomaps_lock or RCU read lock must be held before calling
  65. */
  66. static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
  67. unsigned long size)
  68. {
  69. struct acpi_iomap *map;
  70. map = __acpi_find_iomap(paddr, size);
  71. if (map)
  72. return map->vaddr + (paddr - map->paddr);
  73. else
  74. return NULL;
  75. }
  76. /* acpi_iomaps_lock must be held before calling */
  77. static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
  78. unsigned long size)
  79. {
  80. struct acpi_iomap *map;
  81. map = __acpi_find_iomap(paddr, size);
  82. if (map) {
  83. kref_get(&map->ref);
  84. return map->vaddr + (paddr - map->paddr);
  85. } else
  86. return NULL;
  87. }
  88. /*
  89. * Used to pre-map the specified IO memory area. First try to find
  90. * whether the area is already pre-mapped, if it is, increase the
  91. * reference count (in __acpi_try_ioremap) and return; otherwise, do
  92. * the real ioremap, and add the mapping into acpi_iomaps list.
  93. */
  94. static void __iomem *acpi_pre_map(phys_addr_t paddr,
  95. unsigned long size)
  96. {
  97. void __iomem *vaddr;
  98. struct acpi_iomap *map;
  99. unsigned long pg_sz, flags;
  100. phys_addr_t pg_off;
  101. spin_lock_irqsave(&acpi_iomaps_lock, flags);
  102. vaddr = __acpi_try_ioremap(paddr, size);
  103. spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
  104. if (vaddr)
  105. return vaddr;
  106. pg_off = paddr & PAGE_MASK;
  107. pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
  108. vaddr = ioremap(pg_off, pg_sz);
  109. if (!vaddr)
  110. return NULL;
  111. map = kmalloc(sizeof(*map), GFP_KERNEL);
  112. if (!map)
  113. goto err_unmap;
  114. INIT_LIST_HEAD(&map->list);
  115. map->paddr = pg_off;
  116. map->size = pg_sz;
  117. map->vaddr = vaddr;
  118. kref_init(&map->ref);
  119. spin_lock_irqsave(&acpi_iomaps_lock, flags);
  120. vaddr = __acpi_try_ioremap(paddr, size);
  121. if (vaddr) {
  122. spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
  123. iounmap(map->vaddr);
  124. kfree(map);
  125. return vaddr;
  126. }
  127. list_add_tail_rcu(&map->list, &acpi_iomaps);
  128. spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
  129. return vaddr + (paddr - pg_off);
  130. err_unmap:
  131. iounmap(vaddr);
  132. return NULL;
  133. }
  134. /* acpi_iomaps_lock must be held before calling */
  135. static void __acpi_kref_del_iomap(struct kref *ref)
  136. {
  137. struct acpi_iomap *map;
  138. map = container_of(ref, struct acpi_iomap, ref);
  139. list_del_rcu(&map->list);
  140. }
  141. /*
  142. * Used to post-unmap the specified IO memory area. The iounmap is
  143. * done only if the reference count goes zero.
  144. */
  145. static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
  146. {
  147. struct acpi_iomap *map;
  148. unsigned long flags;
  149. int del;
  150. spin_lock_irqsave(&acpi_iomaps_lock, flags);
  151. map = __acpi_find_iomap(paddr, size);
  152. BUG_ON(!map);
  153. del = kref_put(&map->ref, __acpi_kref_del_iomap);
  154. spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
  155. if (!del)
  156. return;
  157. synchronize_rcu();
  158. iounmap(map->vaddr);
  159. kfree(map);
  160. }
  161. /* In NMI handler, should set silent = 1 */
  162. static int acpi_check_gar(struct acpi_generic_address *reg,
  163. u64 *paddr, int silent)
  164. {
  165. u32 width, space_id;
  166. width = reg->bit_width;
  167. space_id = reg->space_id;
  168. /* Handle possible alignment issues */
  169. memcpy(paddr, &reg->address, sizeof(*paddr));
  170. if (!*paddr) {
  171. if (!silent)
  172. pr_warning(FW_BUG ACPI_PFX
  173. "Invalid physical address in GAR [0x%llx/%u/%u]\n",
  174. *paddr, width, space_id);
  175. return -EINVAL;
  176. }
  177. if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
  178. if (!silent)
  179. pr_warning(FW_BUG ACPI_PFX
  180. "Invalid bit width in GAR [0x%llx/%u/%u]\n",
  181. *paddr, width, space_id);
  182. return -EINVAL;
  183. }
  184. if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
  185. space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
  186. if (!silent)
  187. pr_warning(FW_BUG ACPI_PFX
  188. "Invalid address space type in GAR [0x%llx/%u/%u]\n",
  189. *paddr, width, space_id);
  190. return -EINVAL;
  191. }
  192. return 0;
  193. }
  194. /* Pre-map, working on GAR */
  195. int acpi_pre_map_gar(struct acpi_generic_address *reg)
  196. {
  197. u64 paddr;
  198. void __iomem *vaddr;
  199. int rc;
  200. if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
  201. return 0;
  202. rc = acpi_check_gar(reg, &paddr, 0);
  203. if (rc)
  204. return rc;
  205. vaddr = acpi_pre_map(paddr, reg->bit_width / 8);
  206. if (!vaddr)
  207. return -EIO;
  208. return 0;
  209. }
  210. EXPORT_SYMBOL_GPL(acpi_pre_map_gar);
  211. /* Post-unmap, working on GAR */
  212. int acpi_post_unmap_gar(struct acpi_generic_address *reg)
  213. {
  214. u64 paddr;
  215. int rc;
  216. if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
  217. return 0;
  218. rc = acpi_check_gar(reg, &paddr, 0);
  219. if (rc)
  220. return rc;
  221. acpi_post_unmap(paddr, reg->bit_width / 8);
  222. return 0;
  223. }
  224. EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
  225. /*
  226. * Can be used in atomic (including NMI) or process context. RCU read
  227. * lock can only be released after the IO memory area accessing.
  228. */
  229. static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
  230. {
  231. void __iomem *addr;
  232. rcu_read_lock();
  233. addr = __acpi_ioremap_fast(paddr, width);
  234. switch (width) {
  235. case 8:
  236. *val = readb(addr);
  237. break;
  238. case 16:
  239. *val = readw(addr);
  240. break;
  241. case 32:
  242. *val = readl(addr);
  243. break;
  244. case 64:
  245. *val = readq(addr);
  246. break;
  247. default:
  248. return -EINVAL;
  249. }
  250. rcu_read_unlock();
  251. return 0;
  252. }
  253. static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
  254. {
  255. void __iomem *addr;
  256. rcu_read_lock();
  257. addr = __acpi_ioremap_fast(paddr, width);
  258. switch (width) {
  259. case 8:
  260. writeb(val, addr);
  261. break;
  262. case 16:
  263. writew(val, addr);
  264. break;
  265. case 32:
  266. writel(val, addr);
  267. break;
  268. case 64:
  269. writeq(val, addr);
  270. break;
  271. default:
  272. return -EINVAL;
  273. }
  274. rcu_read_unlock();
  275. return 0;
  276. }
  277. /* GAR accessing in atomic (including NMI) or process context */
  278. int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg)
  279. {
  280. u64 paddr;
  281. int rc;
  282. rc = acpi_check_gar(reg, &paddr, 1);
  283. if (rc)
  284. return rc;
  285. *val = 0;
  286. switch (reg->space_id) {
  287. case ACPI_ADR_SPACE_SYSTEM_MEMORY:
  288. return acpi_atomic_read_mem(paddr, val, reg->bit_width);
  289. case ACPI_ADR_SPACE_SYSTEM_IO:
  290. return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width);
  291. default:
  292. return -EINVAL;
  293. }
  294. }
  295. EXPORT_SYMBOL_GPL(acpi_atomic_read);
  296. int acpi_atomic_write(u64 val, struct acpi_generic_address *reg)
  297. {
  298. u64 paddr;
  299. int rc;
  300. rc = acpi_check_gar(reg, &paddr, 1);
  301. if (rc)
  302. return rc;
  303. switch (reg->space_id) {
  304. case ACPI_ADR_SPACE_SYSTEM_MEMORY:
  305. return acpi_atomic_write_mem(paddr, val, reg->bit_width);
  306. case ACPI_ADR_SPACE_SYSTEM_IO:
  307. return acpi_os_write_port(paddr, val, reg->bit_width);
  308. default:
  309. return -EINVAL;
  310. }
  311. }
  312. EXPORT_SYMBOL_GPL(acpi_atomic_write);