atomicio.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422
  1. /*
  2. * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then
  3. * accessing in atomic context.
  4. *
  5. * This is used for NMI handler to access IO memory area, because
  6. * ioremap/iounmap can not be used in NMI handler. The IO memory area
  7. * is pre-mapped in process context and accessed in NMI handler.
  8. *
  9. * Copyright (C) 2009-2010, Intel Corp.
  10. * Author: Huang Ying <ying.huang@intel.com>
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License version
  14. * 2 as published by the Free Software Foundation.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to the Free Software
  23. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  24. */
  25. #include <linux/kernel.h>
  26. #include <linux/export.h>
  27. #include <linux/init.h>
  28. #include <linux/acpi.h>
  29. #include <linux/io.h>
  30. #include <linux/kref.h>
  31. #include <linux/rculist.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/slab.h>
  34. #include <linux/mm.h>
  35. #include <linux/highmem.h>
  36. #include <acpi/atomicio.h>
  37. #define ACPI_PFX "ACPI: "
  38. static LIST_HEAD(acpi_iomaps);
  39. /*
  40. * Used for mutual exclusion between writers of acpi_iomaps list, for
  41. * synchronization between readers and writer, RCU is used.
  42. */
  43. static DEFINE_SPINLOCK(acpi_iomaps_lock);
  44. struct acpi_iomap {
  45. struct list_head list;
  46. void __iomem *vaddr;
  47. unsigned long size;
  48. phys_addr_t paddr;
  49. struct kref ref;
  50. };
  51. /* acpi_iomaps_lock or RCU read lock must be held before calling */
  52. static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr,
  53. unsigned long size)
  54. {
  55. struct acpi_iomap *map;
  56. list_for_each_entry_rcu(map, &acpi_iomaps, list) {
  57. if (map->paddr + map->size >= paddr + size &&
  58. map->paddr <= paddr)
  59. return map;
  60. }
  61. return NULL;
  62. }
  63. /*
  64. * Atomic "ioremap" used by NMI handler, if the specified IO memory
  65. * area is not pre-mapped, NULL will be returned.
  66. *
  67. * acpi_iomaps_lock or RCU read lock must be held before calling
  68. */
  69. static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
  70. unsigned long size)
  71. {
  72. struct acpi_iomap *map;
  73. map = __acpi_find_iomap(paddr, size/8);
  74. if (map)
  75. return map->vaddr + (paddr - map->paddr);
  76. else
  77. return NULL;
  78. }
  79. /* acpi_iomaps_lock must be held before calling */
  80. static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
  81. unsigned long size)
  82. {
  83. struct acpi_iomap *map;
  84. map = __acpi_find_iomap(paddr, size);
  85. if (map) {
  86. kref_get(&map->ref);
  87. return map->vaddr + (paddr - map->paddr);
  88. } else
  89. return NULL;
  90. }
  91. #ifndef CONFIG_IA64
  92. #define should_use_kmap(pfn) page_is_ram(pfn)
  93. #else
  94. /* ioremap will take care of cache attributes */
  95. #define should_use_kmap(pfn) 0
  96. #endif
  97. static void __iomem *acpi_map(phys_addr_t pg_off, unsigned long pg_sz)
  98. {
  99. unsigned long pfn;
  100. pfn = pg_off >> PAGE_SHIFT;
  101. if (should_use_kmap(pfn)) {
  102. if (pg_sz > PAGE_SIZE)
  103. return NULL;
  104. return (void __iomem __force *)kmap(pfn_to_page(pfn));
  105. } else
  106. return ioremap(pg_off, pg_sz);
  107. }
  108. static void acpi_unmap(phys_addr_t pg_off, void __iomem *vaddr)
  109. {
  110. unsigned long pfn;
  111. pfn = pg_off >> PAGE_SHIFT;
  112. if (page_is_ram(pfn))
  113. kunmap(pfn_to_page(pfn));
  114. else
  115. iounmap(vaddr);
  116. }
  117. /*
  118. * Used to pre-map the specified IO memory area. First try to find
  119. * whether the area is already pre-mapped, if it is, increase the
  120. * reference count (in __acpi_try_ioremap) and return; otherwise, do
  121. * the real ioremap, and add the mapping into acpi_iomaps list.
  122. */
  123. static void __iomem *acpi_pre_map(phys_addr_t paddr,
  124. unsigned long size)
  125. {
  126. void __iomem *vaddr;
  127. struct acpi_iomap *map;
  128. unsigned long pg_sz, flags;
  129. phys_addr_t pg_off;
  130. spin_lock_irqsave(&acpi_iomaps_lock, flags);
  131. vaddr = __acpi_try_ioremap(paddr, size);
  132. spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
  133. if (vaddr)
  134. return vaddr;
  135. pg_off = paddr & PAGE_MASK;
  136. pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
  137. vaddr = acpi_map(pg_off, pg_sz);
  138. if (!vaddr)
  139. return NULL;
  140. map = kmalloc(sizeof(*map), GFP_KERNEL);
  141. if (!map)
  142. goto err_unmap;
  143. INIT_LIST_HEAD(&map->list);
  144. map->paddr = pg_off;
  145. map->size = pg_sz;
  146. map->vaddr = vaddr;
  147. kref_init(&map->ref);
  148. spin_lock_irqsave(&acpi_iomaps_lock, flags);
  149. vaddr = __acpi_try_ioremap(paddr, size);
  150. if (vaddr) {
  151. spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
  152. acpi_unmap(pg_off, map->vaddr);
  153. kfree(map);
  154. return vaddr;
  155. }
  156. list_add_tail_rcu(&map->list, &acpi_iomaps);
  157. spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
  158. return map->vaddr + (paddr - map->paddr);
  159. err_unmap:
  160. acpi_unmap(pg_off, vaddr);
  161. return NULL;
  162. }
  163. /* acpi_iomaps_lock must be held before calling */
  164. static void __acpi_kref_del_iomap(struct kref *ref)
  165. {
  166. struct acpi_iomap *map;
  167. map = container_of(ref, struct acpi_iomap, ref);
  168. list_del_rcu(&map->list);
  169. }
  170. /*
  171. * Used to post-unmap the specified IO memory area. The iounmap is
  172. * done only if the reference count goes zero.
  173. */
  174. static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
  175. {
  176. struct acpi_iomap *map;
  177. unsigned long flags;
  178. int del;
  179. spin_lock_irqsave(&acpi_iomaps_lock, flags);
  180. map = __acpi_find_iomap(paddr, size);
  181. BUG_ON(!map);
  182. del = kref_put(&map->ref, __acpi_kref_del_iomap);
  183. spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
  184. if (!del)
  185. return;
  186. synchronize_rcu();
  187. acpi_unmap(map->paddr, map->vaddr);
  188. kfree(map);
  189. }
  190. /* In NMI handler, should set silent = 1 */
  191. static int acpi_check_gar(struct acpi_generic_address *reg,
  192. u64 *paddr, int silent)
  193. {
  194. u32 width, space_id;
  195. width = reg->bit_width;
  196. space_id = reg->space_id;
  197. /* Handle possible alignment issues */
  198. memcpy(paddr, &reg->address, sizeof(*paddr));
  199. if (!*paddr) {
  200. if (!silent)
  201. pr_warning(FW_BUG ACPI_PFX
  202. "Invalid physical address in GAR [0x%llx/%u/%u]\n",
  203. *paddr, width, space_id);
  204. return -EINVAL;
  205. }
  206. if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
  207. if (!silent)
  208. pr_warning(FW_BUG ACPI_PFX
  209. "Invalid bit width in GAR [0x%llx/%u/%u]\n",
  210. *paddr, width, space_id);
  211. return -EINVAL;
  212. }
  213. if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
  214. space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
  215. if (!silent)
  216. pr_warning(FW_BUG ACPI_PFX
  217. "Invalid address space type in GAR [0x%llx/%u/%u]\n",
  218. *paddr, width, space_id);
  219. return -EINVAL;
  220. }
  221. return 0;
  222. }
  223. /* Pre-map, working on GAR */
  224. int acpi_pre_map_gar(struct acpi_generic_address *reg)
  225. {
  226. u64 paddr;
  227. void __iomem *vaddr;
  228. int rc;
  229. if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
  230. return 0;
  231. rc = acpi_check_gar(reg, &paddr, 0);
  232. if (rc)
  233. return rc;
  234. vaddr = acpi_pre_map(paddr, reg->bit_width / 8);
  235. if (!vaddr)
  236. return -EIO;
  237. return 0;
  238. }
  239. EXPORT_SYMBOL_GPL(acpi_pre_map_gar);
  240. /* Post-unmap, working on GAR */
  241. int acpi_post_unmap_gar(struct acpi_generic_address *reg)
  242. {
  243. u64 paddr;
  244. int rc;
  245. if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
  246. return 0;
  247. rc = acpi_check_gar(reg, &paddr, 0);
  248. if (rc)
  249. return rc;
  250. acpi_post_unmap(paddr, reg->bit_width / 8);
  251. return 0;
  252. }
  253. EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
  254. #ifdef readq
  255. static inline u64 read64(const volatile void __iomem *addr)
  256. {
  257. return readq(addr);
  258. }
  259. #else
  260. static inline u64 read64(const volatile void __iomem *addr)
  261. {
  262. u64 l, h;
  263. l = readl(addr);
  264. h = readl(addr+4);
  265. return l | (h << 32);
  266. }
  267. #endif
  268. /*
  269. * Can be used in atomic (including NMI) or process context. RCU read
  270. * lock can only be released after the IO memory area accessing.
  271. */
  272. static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
  273. {
  274. void __iomem *addr;
  275. rcu_read_lock();
  276. addr = __acpi_ioremap_fast(paddr, width);
  277. switch (width) {
  278. case 8:
  279. *val = readb(addr);
  280. break;
  281. case 16:
  282. *val = readw(addr);
  283. break;
  284. case 32:
  285. *val = readl(addr);
  286. break;
  287. case 64:
  288. *val = read64(addr);
  289. break;
  290. default:
  291. return -EINVAL;
  292. }
  293. rcu_read_unlock();
  294. return 0;
  295. }
  296. #ifdef writeq
  297. static inline void write64(u64 val, volatile void __iomem *addr)
  298. {
  299. writeq(val, addr);
  300. }
  301. #else
  302. static inline void write64(u64 val, volatile void __iomem *addr)
  303. {
  304. writel(val, addr);
  305. writel(val>>32, addr+4);
  306. }
  307. #endif
  308. static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
  309. {
  310. void __iomem *addr;
  311. rcu_read_lock();
  312. addr = __acpi_ioremap_fast(paddr, width);
  313. switch (width) {
  314. case 8:
  315. writeb(val, addr);
  316. break;
  317. case 16:
  318. writew(val, addr);
  319. break;
  320. case 32:
  321. writel(val, addr);
  322. break;
  323. case 64:
  324. write64(val, addr);
  325. break;
  326. default:
  327. return -EINVAL;
  328. }
  329. rcu_read_unlock();
  330. return 0;
  331. }
  332. /* GAR accessing in atomic (including NMI) or process context */
  333. int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg)
  334. {
  335. u64 paddr;
  336. int rc;
  337. rc = acpi_check_gar(reg, &paddr, 1);
  338. if (rc)
  339. return rc;
  340. *val = 0;
  341. switch (reg->space_id) {
  342. case ACPI_ADR_SPACE_SYSTEM_MEMORY:
  343. return acpi_atomic_read_mem(paddr, val, reg->bit_width);
  344. case ACPI_ADR_SPACE_SYSTEM_IO:
  345. return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width);
  346. default:
  347. return -EINVAL;
  348. }
  349. }
  350. EXPORT_SYMBOL_GPL(acpi_atomic_read);
  351. int acpi_atomic_write(u64 val, struct acpi_generic_address *reg)
  352. {
  353. u64 paddr;
  354. int rc;
  355. rc = acpi_check_gar(reg, &paddr, 1);
  356. if (rc)
  357. return rc;
  358. switch (reg->space_id) {
  359. case ACPI_ADR_SPACE_SYSTEM_MEMORY:
  360. return acpi_atomic_write_mem(paddr, val, reg->bit_width);
  361. case ACPI_ADR_SPACE_SYSTEM_IO:
  362. return acpi_os_write_port(paddr, val, reg->bit_width);
  363. default:
  364. return -EINVAL;
  365. }
  366. }
  367. EXPORT_SYMBOL_GPL(acpi_atomic_write);