iommu.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572
  1. /*
  2. * arch/ppc64/kernel/iommu.c
  3. * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
  4. *
  5. * Rewrite, cleanup, new allocation schemes, virtual merging:
  6. * Copyright (C) 2004 Olof Johansson, IBM Corporation
  7. * and Ben. Herrenschmidt, IBM Corporation
  8. *
  9. * Dynamic DMA mapping support, bus-independent parts.
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2 of the License, or
  14. * (at your option) any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to the Free Software
  23. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  24. */
  25. #include <linux/config.h>
  26. #include <linux/init.h>
  27. #include <linux/types.h>
  28. #include <linux/slab.h>
  29. #include <linux/mm.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/string.h>
  32. #include <linux/dma-mapping.h>
  33. #include <linux/init.h>
  34. #include <linux/bitops.h>
  35. #include <asm/io.h>
  36. #include <asm/prom.h>
  37. #include <asm/iommu.h>
  38. #include <asm/pci-bridge.h>
  39. #include <asm/machdep.h>
  40. #define DBG(...)
  41. #ifdef CONFIG_IOMMU_VMERGE
  42. static int novmerge = 0;
  43. #else
  44. static int novmerge = 1;
  45. #endif
  46. static int __init setup_iommu(char *str)
  47. {
  48. if (!strcmp(str, "novmerge"))
  49. novmerge = 1;
  50. else if (!strcmp(str, "vmerge"))
  51. novmerge = 0;
  52. return 1;
  53. }
  54. __setup("iommu=", setup_iommu);
  55. static unsigned long iommu_range_alloc(struct iommu_table *tbl,
  56. unsigned long npages,
  57. unsigned long *handle,
  58. unsigned int align_order)
  59. {
  60. unsigned long n, end, i, start;
  61. unsigned long limit;
  62. int largealloc = npages > 15;
  63. int pass = 0;
  64. unsigned long align_mask;
  65. align_mask = 0xffffffffffffffffl >> (64 - align_order);
  66. /* This allocator was derived from x86_64's bit string search */
  67. /* Sanity check */
  68. if (unlikely(npages) == 0) {
  69. if (printk_ratelimit())
  70. WARN_ON(1);
  71. return DMA_ERROR_CODE;
  72. }
  73. if (handle && *handle)
  74. start = *handle;
  75. else
  76. start = largealloc ? tbl->it_largehint : tbl->it_hint;
  77. /* Use only half of the table for small allocs (15 pages or less) */
  78. limit = largealloc ? tbl->it_size : tbl->it_halfpoint;
  79. if (largealloc && start < tbl->it_halfpoint)
  80. start = tbl->it_halfpoint;
  81. /* The case below can happen if we have a small segment appended
  82. * to a large, or when the previous alloc was at the very end of
  83. * the available space. If so, go back to the initial start.
  84. */
  85. if (start >= limit)
  86. start = largealloc ? tbl->it_largehint : tbl->it_hint;
  87. again:
  88. n = find_next_zero_bit(tbl->it_map, limit, start);
  89. /* Align allocation */
  90. n = (n + align_mask) & ~align_mask;
  91. end = n + npages;
  92. if (unlikely(end >= limit)) {
  93. if (likely(pass < 2)) {
  94. /* First failure, just rescan the half of the table.
  95. * Second failure, rescan the other half of the table.
  96. */
  97. start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
  98. limit = pass ? tbl->it_size : limit;
  99. pass++;
  100. goto again;
  101. } else {
  102. /* Third failure, give up */
  103. return DMA_ERROR_CODE;
  104. }
  105. }
  106. for (i = n; i < end; i++)
  107. if (test_bit(i, tbl->it_map)) {
  108. start = i+1;
  109. goto again;
  110. }
  111. for (i = n; i < end; i++)
  112. __set_bit(i, tbl->it_map);
  113. /* Bump the hint to a new block for small allocs. */
  114. if (largealloc) {
  115. /* Don't bump to new block to avoid fragmentation */
  116. tbl->it_largehint = end;
  117. } else {
  118. /* Overflow will be taken care of at the next allocation */
  119. tbl->it_hint = (end + tbl->it_blocksize - 1) &
  120. ~(tbl->it_blocksize - 1);
  121. }
  122. /* Update handle for SG allocations */
  123. if (handle)
  124. *handle = end;
  125. return n;
  126. }
  127. static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
  128. unsigned int npages, enum dma_data_direction direction,
  129. unsigned int align_order)
  130. {
  131. unsigned long entry, flags;
  132. dma_addr_t ret = DMA_ERROR_CODE;
  133. spin_lock_irqsave(&(tbl->it_lock), flags);
  134. entry = iommu_range_alloc(tbl, npages, NULL, align_order);
  135. if (unlikely(entry == DMA_ERROR_CODE)) {
  136. spin_unlock_irqrestore(&(tbl->it_lock), flags);
  137. return DMA_ERROR_CODE;
  138. }
  139. entry += tbl->it_offset; /* Offset into real TCE table */
  140. ret = entry << PAGE_SHIFT; /* Set the return dma address */
  141. /* Put the TCEs in the HW table */
  142. ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & PAGE_MASK,
  143. direction);
  144. /* Flush/invalidate TLB caches if necessary */
  145. if (ppc_md.tce_flush)
  146. ppc_md.tce_flush(tbl);
  147. spin_unlock_irqrestore(&(tbl->it_lock), flags);
  148. /* Make sure updates are seen by hardware */
  149. mb();
  150. return ret;
  151. }
  152. static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
  153. unsigned int npages)
  154. {
  155. unsigned long entry, free_entry;
  156. unsigned long i;
  157. entry = dma_addr >> PAGE_SHIFT;
  158. free_entry = entry - tbl->it_offset;
  159. if (((free_entry + npages) > tbl->it_size) ||
  160. (entry < tbl->it_offset)) {
  161. if (printk_ratelimit()) {
  162. printk(KERN_INFO "iommu_free: invalid entry\n");
  163. printk(KERN_INFO "\tentry = 0x%lx\n", entry);
  164. printk(KERN_INFO "\tdma_addr = 0x%lx\n", (u64)dma_addr);
  165. printk(KERN_INFO "\tTable = 0x%lx\n", (u64)tbl);
  166. printk(KERN_INFO "\tbus# = 0x%lx\n", (u64)tbl->it_busno);
  167. printk(KERN_INFO "\tsize = 0x%lx\n", (u64)tbl->it_size);
  168. printk(KERN_INFO "\tstartOff = 0x%lx\n", (u64)tbl->it_offset);
  169. printk(KERN_INFO "\tindex = 0x%lx\n", (u64)tbl->it_index);
  170. WARN_ON(1);
  171. }
  172. return;
  173. }
  174. ppc_md.tce_free(tbl, entry, npages);
  175. for (i = 0; i < npages; i++)
  176. __clear_bit(free_entry+i, tbl->it_map);
  177. }
  178. static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
  179. unsigned int npages)
  180. {
  181. unsigned long flags;
  182. spin_lock_irqsave(&(tbl->it_lock), flags);
  183. __iommu_free(tbl, dma_addr, npages);
  184. /* Make sure TLB cache is flushed if the HW needs it. We do
  185. * not do an mb() here on purpose, it is not needed on any of
  186. * the current platforms.
  187. */
  188. if (ppc_md.tce_flush)
  189. ppc_md.tce_flush(tbl);
  190. spin_unlock_irqrestore(&(tbl->it_lock), flags);
  191. }
  192. int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
  193. struct scatterlist *sglist, int nelems,
  194. enum dma_data_direction direction)
  195. {
  196. dma_addr_t dma_next = 0, dma_addr;
  197. unsigned long flags;
  198. struct scatterlist *s, *outs, *segstart;
  199. int outcount, incount;
  200. unsigned long handle;
  201. BUG_ON(direction == DMA_NONE);
  202. if ((nelems == 0) || !tbl)
  203. return 0;
  204. outs = s = segstart = &sglist[0];
  205. outcount = 1;
  206. incount = nelems;
  207. handle = 0;
  208. /* Init first segment length for backout at failure */
  209. outs->dma_length = 0;
  210. DBG("mapping %d elements:\n", nelems);
  211. spin_lock_irqsave(&(tbl->it_lock), flags);
  212. for (s = outs; nelems; nelems--, s++) {
  213. unsigned long vaddr, npages, entry, slen;
  214. slen = s->length;
  215. /* Sanity check */
  216. if (slen == 0) {
  217. dma_next = 0;
  218. continue;
  219. }
  220. /* Allocate iommu entries for that segment */
  221. vaddr = (unsigned long)page_address(s->page) + s->offset;
  222. npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK);
  223. npages >>= PAGE_SHIFT;
  224. entry = iommu_range_alloc(tbl, npages, &handle, 0);
  225. DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
  226. /* Handle failure */
  227. if (unlikely(entry == DMA_ERROR_CODE)) {
  228. if (printk_ratelimit())
  229. printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
  230. " npages %lx\n", tbl, vaddr, npages);
  231. goto failure;
  232. }
  233. /* Convert entry to a dma_addr_t */
  234. entry += tbl->it_offset;
  235. dma_addr = entry << PAGE_SHIFT;
  236. dma_addr |= s->offset;
  237. DBG(" - %lx pages, entry: %lx, dma_addr: %lx\n",
  238. npages, entry, dma_addr);
  239. /* Insert into HW table */
  240. ppc_md.tce_build(tbl, entry, npages, vaddr & PAGE_MASK, direction);
  241. /* If we are in an open segment, try merging */
  242. if (segstart != s) {
  243. DBG(" - trying merge...\n");
  244. /* We cannot merge if:
  245. * - allocated dma_addr isn't contiguous to previous allocation
  246. */
  247. if (novmerge || (dma_addr != dma_next)) {
  248. /* Can't merge: create a new segment */
  249. segstart = s;
  250. outcount++; outs++;
  251. DBG(" can't merge, new segment.\n");
  252. } else {
  253. outs->dma_length += s->length;
  254. DBG(" merged, new len: %lx\n", outs->dma_length);
  255. }
  256. }
  257. if (segstart == s) {
  258. /* This is a new segment, fill entries */
  259. DBG(" - filling new segment.\n");
  260. outs->dma_address = dma_addr;
  261. outs->dma_length = slen;
  262. }
  263. /* Calculate next page pointer for contiguous check */
  264. dma_next = dma_addr + slen;
  265. DBG(" - dma next is: %lx\n", dma_next);
  266. }
  267. /* Flush/invalidate TLB caches if necessary */
  268. if (ppc_md.tce_flush)
  269. ppc_md.tce_flush(tbl);
  270. spin_unlock_irqrestore(&(tbl->it_lock), flags);
  271. /* Make sure updates are seen by hardware */
  272. mb();
  273. DBG("mapped %d elements:\n", outcount);
  274. /* For the sake of iommu_unmap_sg, we clear out the length in the
  275. * next entry of the sglist if we didn't fill the list completely
  276. */
  277. if (outcount < incount) {
  278. outs++;
  279. outs->dma_address = DMA_ERROR_CODE;
  280. outs->dma_length = 0;
  281. }
  282. return outcount;
  283. failure:
  284. for (s = &sglist[0]; s <= outs; s++) {
  285. if (s->dma_length != 0) {
  286. unsigned long vaddr, npages;
  287. vaddr = s->dma_address & PAGE_MASK;
  288. npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr)
  289. >> PAGE_SHIFT;
  290. __iommu_free(tbl, vaddr, npages);
  291. }
  292. }
  293. spin_unlock_irqrestore(&(tbl->it_lock), flags);
  294. return 0;
  295. }
  296. void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
  297. int nelems, enum dma_data_direction direction)
  298. {
  299. unsigned long flags;
  300. BUG_ON(direction == DMA_NONE);
  301. if (!tbl)
  302. return;
  303. spin_lock_irqsave(&(tbl->it_lock), flags);
  304. while (nelems--) {
  305. unsigned int npages;
  306. dma_addr_t dma_handle = sglist->dma_address;
  307. if (sglist->dma_length == 0)
  308. break;
  309. npages = (PAGE_ALIGN(dma_handle + sglist->dma_length)
  310. - (dma_handle & PAGE_MASK)) >> PAGE_SHIFT;
  311. __iommu_free(tbl, dma_handle, npages);
  312. sglist++;
  313. }
  314. /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
  315. * do not do an mb() here, the affected platforms do not need it
  316. * when freeing.
  317. */
  318. if (ppc_md.tce_flush)
  319. ppc_md.tce_flush(tbl);
  320. spin_unlock_irqrestore(&(tbl->it_lock), flags);
  321. }
  322. /*
  323. * Build a iommu_table structure. This contains a bit map which
  324. * is used to manage allocation of the tce space.
  325. */
  326. struct iommu_table *iommu_init_table(struct iommu_table *tbl)
  327. {
  328. unsigned long sz;
  329. static int welcomed = 0;
  330. /* Set aside 1/4 of the table for large allocations. */
  331. tbl->it_halfpoint = tbl->it_size * 3 / 4;
  332. /* number of bytes needed for the bitmap */
  333. sz = (tbl->it_size + 7) >> 3;
  334. tbl->it_map = (unsigned long *)__get_free_pages(GFP_ATOMIC, get_order(sz));
  335. if (!tbl->it_map)
  336. panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
  337. memset(tbl->it_map, 0, sz);
  338. tbl->it_hint = 0;
  339. tbl->it_largehint = tbl->it_halfpoint;
  340. spin_lock_init(&tbl->it_lock);
  341. /* Clear the hardware table in case firmware left allocations in it */
  342. ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
  343. if (!welcomed) {
  344. printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
  345. novmerge ? "disabled" : "enabled");
  346. welcomed = 1;
  347. }
  348. return tbl;
  349. }
  350. void iommu_free_table(struct device_node *dn)
  351. {
  352. struct pci_dn *pdn = dn->data;
  353. struct iommu_table *tbl = pdn->iommu_table;
  354. unsigned long bitmap_sz, i;
  355. unsigned int order;
  356. if (!tbl || !tbl->it_map) {
  357. printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__,
  358. dn->full_name);
  359. return;
  360. }
  361. /* verify that table contains no entries */
  362. /* it_size is in entries, and we're examining 64 at a time */
  363. for (i = 0; i < (tbl->it_size/64); i++) {
  364. if (tbl->it_map[i] != 0) {
  365. printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
  366. __FUNCTION__, dn->full_name);
  367. break;
  368. }
  369. }
  370. /* calculate bitmap size in bytes */
  371. bitmap_sz = (tbl->it_size + 7) / 8;
  372. /* free bitmap */
  373. order = get_order(bitmap_sz);
  374. free_pages((unsigned long) tbl->it_map, order);
  375. /* free table */
  376. kfree(tbl);
  377. }
  378. /* Creates TCEs for a user provided buffer. The user buffer must be
  379. * contiguous real kernel storage (not vmalloc). The address of the buffer
  380. * passed here is the kernel (virtual) address of the buffer. The buffer
  381. * need not be page aligned, the dma_addr_t returned will point to the same
  382. * byte within the page as vaddr.
  383. */
  384. dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
  385. size_t size, enum dma_data_direction direction)
  386. {
  387. dma_addr_t dma_handle = DMA_ERROR_CODE;
  388. unsigned long uaddr;
  389. unsigned int npages;
  390. BUG_ON(direction == DMA_NONE);
  391. uaddr = (unsigned long)vaddr;
  392. npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK);
  393. npages >>= PAGE_SHIFT;
  394. if (tbl) {
  395. dma_handle = iommu_alloc(tbl, vaddr, npages, direction, 0);
  396. if (dma_handle == DMA_ERROR_CODE) {
  397. if (printk_ratelimit()) {
  398. printk(KERN_INFO "iommu_alloc failed, "
  399. "tbl %p vaddr %p npages %d\n",
  400. tbl, vaddr, npages);
  401. }
  402. } else
  403. dma_handle |= (uaddr & ~PAGE_MASK);
  404. }
  405. return dma_handle;
  406. }
  407. void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
  408. size_t size, enum dma_data_direction direction)
  409. {
  410. BUG_ON(direction == DMA_NONE);
  411. if (tbl)
  412. iommu_free(tbl, dma_handle, (PAGE_ALIGN(dma_handle + size) -
  413. (dma_handle & PAGE_MASK)) >> PAGE_SHIFT);
  414. }
  415. /* Allocates a contiguous real buffer and creates mappings over it.
  416. * Returns the virtual address of the buffer and sets dma_handle
  417. * to the dma address (mapping) of the first page.
  418. */
  419. void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
  420. dma_addr_t *dma_handle, gfp_t flag)
  421. {
  422. void *ret = NULL;
  423. dma_addr_t mapping;
  424. unsigned int npages, order;
  425. size = PAGE_ALIGN(size);
  426. npages = size >> PAGE_SHIFT;
  427. order = get_order(size);
  428. /*
  429. * Client asked for way too much space. This is checked later
  430. * anyway. It is easier to debug here for the drivers than in
  431. * the tce tables.
  432. */
  433. if (order >= IOMAP_MAX_ORDER) {
  434. printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
  435. return NULL;
  436. }
  437. if (!tbl)
  438. return NULL;
  439. /* Alloc enough pages (and possibly more) */
  440. ret = (void *)__get_free_pages(flag, order);
  441. if (!ret)
  442. return NULL;
  443. memset(ret, 0, size);
  444. /* Set up tces to cover the allocated range */
  445. mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, order);
  446. if (mapping == DMA_ERROR_CODE) {
  447. free_pages((unsigned long)ret, order);
  448. ret = NULL;
  449. } else
  450. *dma_handle = mapping;
  451. return ret;
  452. }
  453. void iommu_free_coherent(struct iommu_table *tbl, size_t size,
  454. void *vaddr, dma_addr_t dma_handle)
  455. {
  456. unsigned int npages;
  457. if (tbl) {
  458. size = PAGE_ALIGN(size);
  459. npages = size >> PAGE_SHIFT;
  460. iommu_free(tbl, dma_handle, npages);
  461. free_pages((unsigned long)vaddr, get_order(size));
  462. }
  463. }