iommu.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763
  1. /* iommu.c: Generic sparc64 IOMMU support.
  2. *
  3. * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
  4. * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/module.h>
  8. #include <linux/delay.h>
  9. #include <linux/device.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/errno.h>
  12. #include <linux/iommu-helper.h>
  13. #ifdef CONFIG_PCI
  14. #include <linux/pci.h>
  15. #endif
  16. #include <asm/iommu.h>
  17. #include "iommu_common.h"
  18. #define STC_CTXMATCH_ADDR(STC, CTX) \
  19. ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
  20. #define STC_FLUSHFLAG_INIT(STC) \
  21. (*((STC)->strbuf_flushflag) = 0UL)
  22. #define STC_FLUSHFLAG_SET(STC) \
  23. (*((STC)->strbuf_flushflag) != 0UL)
  24. #define iommu_read(__reg) \
  25. ({ u64 __ret; \
  26. __asm__ __volatile__("ldxa [%1] %2, %0" \
  27. : "=r" (__ret) \
  28. : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
  29. : "memory"); \
  30. __ret; \
  31. })
  32. #define iommu_write(__reg, __val) \
  33. __asm__ __volatile__("stxa %0, [%1] %2" \
  34. : /* no outputs */ \
  35. : "r" (__val), "r" (__reg), \
  36. "i" (ASI_PHYS_BYPASS_EC_E))
  37. /* Must be invoked under the IOMMU lock. */
  38. static void iommu_flushall(struct iommu *iommu)
  39. {
  40. if (iommu->iommu_flushinv) {
  41. iommu_write(iommu->iommu_flushinv, ~(u64)0);
  42. } else {
  43. unsigned long tag;
  44. int entry;
  45. tag = iommu->iommu_tags;
  46. for (entry = 0; entry < 16; entry++) {
  47. iommu_write(tag, 0);
  48. tag += 8;
  49. }
  50. /* Ensure completion of previous PIO writes. */
  51. (void) iommu_read(iommu->write_complete_reg);
  52. }
  53. }
  54. #define IOPTE_CONSISTENT(CTX) \
  55. (IOPTE_VALID | IOPTE_CACHE | \
  56. (((CTX) << 47) & IOPTE_CONTEXT))
  57. #define IOPTE_STREAMING(CTX) \
  58. (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
  59. /* Existing mappings are never marked invalid, instead they
  60. * are pointed to a dummy page.
  61. */
  62. #define IOPTE_IS_DUMMY(iommu, iopte) \
  63. ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
  64. static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
  65. {
  66. unsigned long val = iopte_val(*iopte);
  67. val &= ~IOPTE_PAGE;
  68. val |= iommu->dummy_page_pa;
  69. iopte_val(*iopte) = val;
  70. }
  71. /* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
  72. * facility it must all be done in one pass while under the iommu lock.
  73. *
  74. * On sun4u platforms, we only flush the IOMMU once every time we've passed
  75. * over the entire page table doing allocations. Therefore we only ever advance
  76. * the hint and cannot backtrack it.
  77. */
  78. unsigned long iommu_range_alloc(struct device *dev,
  79. struct iommu *iommu,
  80. unsigned long npages,
  81. unsigned long *handle)
  82. {
  83. unsigned long n, end, start, limit, boundary_size;
  84. struct iommu_arena *arena = &iommu->arena;
  85. int pass = 0;
  86. /* This allocator was derived from x86_64's bit string search */
  87. /* Sanity check */
  88. if (unlikely(npages == 0)) {
  89. if (printk_ratelimit())
  90. WARN_ON(1);
  91. return DMA_ERROR_CODE;
  92. }
  93. if (handle && *handle)
  94. start = *handle;
  95. else
  96. start = arena->hint;
  97. limit = arena->limit;
  98. /* The case below can happen if we have a small segment appended
  99. * to a large, or when the previous alloc was at the very end of
  100. * the available space. If so, go back to the beginning and flush.
  101. */
  102. if (start >= limit) {
  103. start = 0;
  104. if (iommu->flush_all)
  105. iommu->flush_all(iommu);
  106. }
  107. again:
  108. if (dev)
  109. boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
  110. 1 << IO_PAGE_SHIFT);
  111. else
  112. boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
  113. n = iommu_area_alloc(arena->map, limit, start, npages, 0,
  114. boundary_size >> IO_PAGE_SHIFT, 0);
  115. if (n == -1) {
  116. if (likely(pass < 1)) {
  117. /* First failure, rescan from the beginning. */
  118. start = 0;
  119. if (iommu->flush_all)
  120. iommu->flush_all(iommu);
  121. pass++;
  122. goto again;
  123. } else {
  124. /* Second failure, give up */
  125. return DMA_ERROR_CODE;
  126. }
  127. }
  128. end = n + npages;
  129. arena->hint = end;
  130. /* Update handle for SG allocations */
  131. if (handle)
  132. *handle = end;
  133. return n;
  134. }
  135. void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
  136. {
  137. struct iommu_arena *arena = &iommu->arena;
  138. unsigned long entry;
  139. entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
  140. iommu_area_free(arena->map, entry, npages);
  141. }
  142. int iommu_table_init(struct iommu *iommu, int tsbsize,
  143. u32 dma_offset, u32 dma_addr_mask)
  144. {
  145. unsigned long i, tsbbase, order, sz, num_tsb_entries;
  146. num_tsb_entries = tsbsize / sizeof(iopte_t);
  147. /* Setup initial software IOMMU state. */
  148. spin_lock_init(&iommu->lock);
  149. iommu->ctx_lowest_free = 1;
  150. iommu->page_table_map_base = dma_offset;
  151. iommu->dma_addr_mask = dma_addr_mask;
  152. /* Allocate and initialize the free area map. */
  153. sz = num_tsb_entries / 8;
  154. sz = (sz + 7UL) & ~7UL;
  155. iommu->arena.map = kzalloc(sz, GFP_KERNEL);
  156. if (!iommu->arena.map) {
  157. printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
  158. return -ENOMEM;
  159. }
  160. iommu->arena.limit = num_tsb_entries;
  161. if (tlb_type != hypervisor)
  162. iommu->flush_all = iommu_flushall;
  163. /* Allocate and initialize the dummy page which we
  164. * set inactive IO PTEs to point to.
  165. */
  166. iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
  167. if (!iommu->dummy_page) {
  168. printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
  169. goto out_free_map;
  170. }
  171. memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
  172. iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
  173. /* Now allocate and setup the IOMMU page table itself. */
  174. order = get_order(tsbsize);
  175. tsbbase = __get_free_pages(GFP_KERNEL, order);
  176. if (!tsbbase) {
  177. printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
  178. goto out_free_dummy_page;
  179. }
  180. iommu->page_table = (iopte_t *)tsbbase;
  181. for (i = 0; i < num_tsb_entries; i++)
  182. iopte_make_dummy(iommu, &iommu->page_table[i]);
  183. return 0;
  184. out_free_dummy_page:
  185. free_page(iommu->dummy_page);
  186. iommu->dummy_page = 0UL;
  187. out_free_map:
  188. kfree(iommu->arena.map);
  189. iommu->arena.map = NULL;
  190. return -ENOMEM;
  191. }
  192. static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
  193. unsigned long npages)
  194. {
  195. unsigned long entry;
  196. entry = iommu_range_alloc(dev, iommu, npages, NULL);
  197. if (unlikely(entry == DMA_ERROR_CODE))
  198. return NULL;
  199. return iommu->page_table + entry;
  200. }
  201. static int iommu_alloc_ctx(struct iommu *iommu)
  202. {
  203. int lowest = iommu->ctx_lowest_free;
  204. int sz = IOMMU_NUM_CTXS - lowest;
  205. int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
  206. if (unlikely(n == sz)) {
  207. n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
  208. if (unlikely(n == lowest)) {
  209. printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
  210. n = 0;
  211. }
  212. }
  213. if (n)
  214. __set_bit(n, iommu->ctx_bitmap);
  215. return n;
  216. }
  217. static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
  218. {
  219. if (likely(ctx)) {
  220. __clear_bit(ctx, iommu->ctx_bitmap);
  221. if (ctx < iommu->ctx_lowest_free)
  222. iommu->ctx_lowest_free = ctx;
  223. }
  224. }
  225. static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
  226. dma_addr_t *dma_addrp, gfp_t gfp)
  227. {
  228. struct iommu *iommu;
  229. iopte_t *iopte;
  230. unsigned long flags, order, first_page;
  231. void *ret;
  232. int npages;
  233. size = IO_PAGE_ALIGN(size);
  234. order = get_order(size);
  235. if (order >= 10)
  236. return NULL;
  237. first_page = __get_free_pages(gfp, order);
  238. if (first_page == 0UL)
  239. return NULL;
  240. memset((char *)first_page, 0, PAGE_SIZE << order);
  241. iommu = dev->archdata.iommu;
  242. spin_lock_irqsave(&iommu->lock, flags);
  243. iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
  244. spin_unlock_irqrestore(&iommu->lock, flags);
  245. if (unlikely(iopte == NULL)) {
  246. free_pages(first_page, order);
  247. return NULL;
  248. }
  249. *dma_addrp = (iommu->page_table_map_base +
  250. ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
  251. ret = (void *) first_page;
  252. npages = size >> IO_PAGE_SHIFT;
  253. first_page = __pa(first_page);
  254. while (npages--) {
  255. iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
  256. IOPTE_WRITE |
  257. (first_page & IOPTE_PAGE));
  258. iopte++;
  259. first_page += IO_PAGE_SIZE;
  260. }
  261. return ret;
  262. }
  263. static void dma_4u_free_coherent(struct device *dev, size_t size,
  264. void *cpu, dma_addr_t dvma)
  265. {
  266. struct iommu *iommu;
  267. iopte_t *iopte;
  268. unsigned long flags, order, npages;
  269. npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
  270. iommu = dev->archdata.iommu;
  271. iopte = iommu->page_table +
  272. ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
  273. spin_lock_irqsave(&iommu->lock, flags);
  274. iommu_range_free(iommu, dvma, npages);
  275. spin_unlock_irqrestore(&iommu->lock, flags);
  276. order = get_order(size);
  277. if (order < 10)
  278. free_pages((unsigned long)cpu, order);
  279. }
  280. static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
  281. enum dma_data_direction direction)
  282. {
  283. struct iommu *iommu;
  284. struct strbuf *strbuf;
  285. iopte_t *base;
  286. unsigned long flags, npages, oaddr;
  287. unsigned long i, base_paddr, ctx;
  288. u32 bus_addr, ret;
  289. unsigned long iopte_protection;
  290. iommu = dev->archdata.iommu;
  291. strbuf = dev->archdata.stc;
  292. if (unlikely(direction == DMA_NONE))
  293. goto bad_no_ctx;
  294. oaddr = (unsigned long)ptr;
  295. npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
  296. npages >>= IO_PAGE_SHIFT;
  297. spin_lock_irqsave(&iommu->lock, flags);
  298. base = alloc_npages(dev, iommu, npages);
  299. ctx = 0;
  300. if (iommu->iommu_ctxflush)
  301. ctx = iommu_alloc_ctx(iommu);
  302. spin_unlock_irqrestore(&iommu->lock, flags);
  303. if (unlikely(!base))
  304. goto bad;
  305. bus_addr = (iommu->page_table_map_base +
  306. ((base - iommu->page_table) << IO_PAGE_SHIFT));
  307. ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
  308. base_paddr = __pa(oaddr & IO_PAGE_MASK);
  309. if (strbuf->strbuf_enabled)
  310. iopte_protection = IOPTE_STREAMING(ctx);
  311. else
  312. iopte_protection = IOPTE_CONSISTENT(ctx);
  313. if (direction != DMA_TO_DEVICE)
  314. iopte_protection |= IOPTE_WRITE;
  315. for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
  316. iopte_val(*base) = iopte_protection | base_paddr;
  317. return ret;
  318. bad:
  319. iommu_free_ctx(iommu, ctx);
  320. bad_no_ctx:
  321. if (printk_ratelimit())
  322. WARN_ON(1);
  323. return DMA_ERROR_CODE;
  324. }
  325. static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
  326. u32 vaddr, unsigned long ctx, unsigned long npages,
  327. enum dma_data_direction direction)
  328. {
  329. int limit;
  330. if (strbuf->strbuf_ctxflush &&
  331. iommu->iommu_ctxflush) {
  332. unsigned long matchreg, flushreg;
  333. u64 val;
  334. flushreg = strbuf->strbuf_ctxflush;
  335. matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
  336. iommu_write(flushreg, ctx);
  337. val = iommu_read(matchreg);
  338. val &= 0xffff;
  339. if (!val)
  340. goto do_flush_sync;
  341. while (val) {
  342. if (val & 0x1)
  343. iommu_write(flushreg, ctx);
  344. val >>= 1;
  345. }
  346. val = iommu_read(matchreg);
  347. if (unlikely(val)) {
  348. printk(KERN_WARNING "strbuf_flush: ctx flush "
  349. "timeout matchreg[%lx] ctx[%lx]\n",
  350. val, ctx);
  351. goto do_page_flush;
  352. }
  353. } else {
  354. unsigned long i;
  355. do_page_flush:
  356. for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
  357. iommu_write(strbuf->strbuf_pflush, vaddr);
  358. }
  359. do_flush_sync:
  360. /* If the device could not have possibly put dirty data into
  361. * the streaming cache, no flush-flag synchronization needs
  362. * to be performed.
  363. */
  364. if (direction == DMA_TO_DEVICE)
  365. return;
  366. STC_FLUSHFLAG_INIT(strbuf);
  367. iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
  368. (void) iommu_read(iommu->write_complete_reg);
  369. limit = 100000;
  370. while (!STC_FLUSHFLAG_SET(strbuf)) {
  371. limit--;
  372. if (!limit)
  373. break;
  374. udelay(1);
  375. rmb();
  376. }
  377. if (!limit)
  378. printk(KERN_WARNING "strbuf_flush: flushflag timeout "
  379. "vaddr[%08x] ctx[%lx] npages[%ld]\n",
  380. vaddr, ctx, npages);
  381. }
  382. static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
  383. size_t sz, enum dma_data_direction direction)
  384. {
  385. struct iommu *iommu;
  386. struct strbuf *strbuf;
  387. iopte_t *base;
  388. unsigned long flags, npages, ctx, i;
  389. if (unlikely(direction == DMA_NONE)) {
  390. if (printk_ratelimit())
  391. WARN_ON(1);
  392. return;
  393. }
  394. iommu = dev->archdata.iommu;
  395. strbuf = dev->archdata.stc;
  396. npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
  397. npages >>= IO_PAGE_SHIFT;
  398. base = iommu->page_table +
  399. ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
  400. bus_addr &= IO_PAGE_MASK;
  401. spin_lock_irqsave(&iommu->lock, flags);
  402. /* Record the context, if any. */
  403. ctx = 0;
  404. if (iommu->iommu_ctxflush)
  405. ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
  406. /* Step 1: Kick data out of streaming buffers if necessary. */
  407. if (strbuf->strbuf_enabled)
  408. strbuf_flush(strbuf, iommu, bus_addr, ctx,
  409. npages, direction);
  410. /* Step 2: Clear out TSB entries. */
  411. for (i = 0; i < npages; i++)
  412. iopte_make_dummy(iommu, base + i);
  413. iommu_range_free(iommu, bus_addr, npages);
  414. iommu_free_ctx(iommu, ctx);
  415. spin_unlock_irqrestore(&iommu->lock, flags);
  416. }
  417. static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
  418. int nelems, enum dma_data_direction direction)
  419. {
  420. unsigned long flags, ctx, i, npages, iopte_protection;
  421. struct scatterlist *sg;
  422. struct strbuf *strbuf;
  423. struct iommu *iommu;
  424. iopte_t *base;
  425. u32 dma_base;
  426. /* Fast path single entry scatterlists. */
  427. if (nelems == 1) {
  428. sglist->dma_address =
  429. dma_4u_map_single(dev, sg_virt(sglist),
  430. sglist->length, direction);
  431. if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
  432. return 0;
  433. sglist->dma_length = sglist->length;
  434. return 1;
  435. }
  436. iommu = dev->archdata.iommu;
  437. strbuf = dev->archdata.stc;
  438. if (unlikely(direction == DMA_NONE))
  439. goto bad_no_ctx;
  440. npages = calc_npages(sglist, nelems);
  441. spin_lock_irqsave(&iommu->lock, flags);
  442. base = alloc_npages(dev, iommu, npages);
  443. ctx = 0;
  444. if (iommu->iommu_ctxflush)
  445. ctx = iommu_alloc_ctx(iommu);
  446. spin_unlock_irqrestore(&iommu->lock, flags);
  447. if (base == NULL)
  448. goto bad;
  449. dma_base = iommu->page_table_map_base +
  450. ((base - iommu->page_table) << IO_PAGE_SHIFT);
  451. if (strbuf->strbuf_enabled)
  452. iopte_protection = IOPTE_STREAMING(ctx);
  453. else
  454. iopte_protection = IOPTE_CONSISTENT(ctx);
  455. if (direction != DMA_TO_DEVICE)
  456. iopte_protection |= IOPTE_WRITE;
  457. for_each_sg(sglist, sg, nelems, i) {
  458. unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
  459. unsigned long slen = sg->length;
  460. unsigned long this_npages;
  461. this_npages = iommu_num_pages(paddr, slen);
  462. sg->dma_address = dma_base | (paddr & ~IO_PAGE_MASK);
  463. sg->dma_length = slen;
  464. paddr &= IO_PAGE_MASK;
  465. while (this_npages--) {
  466. iopte_val(*base) = iopte_protection | paddr;
  467. base++;
  468. paddr += IO_PAGE_SIZE;
  469. dma_base += IO_PAGE_SIZE;
  470. }
  471. }
  472. return nelems;
  473. bad:
  474. iommu_free_ctx(iommu, ctx);
  475. bad_no_ctx:
  476. if (printk_ratelimit())
  477. WARN_ON(1);
  478. return 0;
  479. }
  480. static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
  481. int nelems, enum dma_data_direction direction)
  482. {
  483. unsigned long flags, ctx, i, npages;
  484. struct strbuf *strbuf;
  485. struct iommu *iommu;
  486. iopte_t *base;
  487. u32 bus_addr;
  488. if (unlikely(direction == DMA_NONE)) {
  489. if (printk_ratelimit())
  490. WARN_ON(1);
  491. }
  492. iommu = dev->archdata.iommu;
  493. strbuf = dev->archdata.stc;
  494. bus_addr = sglist->dma_address & IO_PAGE_MASK;
  495. npages = calc_npages(sglist, nelems);
  496. base = iommu->page_table +
  497. ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
  498. spin_lock_irqsave(&iommu->lock, flags);
  499. /* Record the context, if any. */
  500. ctx = 0;
  501. if (iommu->iommu_ctxflush)
  502. ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
  503. /* Step 1: Kick data out of streaming buffers if necessary. */
  504. if (strbuf->strbuf_enabled)
  505. strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
  506. /* Step 2: Clear out the TSB entries. */
  507. for (i = 0; i < npages; i++)
  508. iopte_make_dummy(iommu, base + i);
  509. iommu_range_free(iommu, bus_addr, npages);
  510. iommu_free_ctx(iommu, ctx);
  511. spin_unlock_irqrestore(&iommu->lock, flags);
  512. }
  513. static void dma_4u_sync_single_for_cpu(struct device *dev,
  514. dma_addr_t bus_addr, size_t sz,
  515. enum dma_data_direction direction)
  516. {
  517. struct iommu *iommu;
  518. struct strbuf *strbuf;
  519. unsigned long flags, ctx, npages;
  520. iommu = dev->archdata.iommu;
  521. strbuf = dev->archdata.stc;
  522. if (!strbuf->strbuf_enabled)
  523. return;
  524. spin_lock_irqsave(&iommu->lock, flags);
  525. npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
  526. npages >>= IO_PAGE_SHIFT;
  527. bus_addr &= IO_PAGE_MASK;
  528. /* Step 1: Record the context, if any. */
  529. ctx = 0;
  530. if (iommu->iommu_ctxflush &&
  531. strbuf->strbuf_ctxflush) {
  532. iopte_t *iopte;
  533. iopte = iommu->page_table +
  534. ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
  535. ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
  536. }
  537. /* Step 2: Kick data out of streaming buffers. */
  538. strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
  539. spin_unlock_irqrestore(&iommu->lock, flags);
  540. }
  541. static void dma_4u_sync_sg_for_cpu(struct device *dev,
  542. struct scatterlist *sglist, int nelems,
  543. enum dma_data_direction direction)
  544. {
  545. struct iommu *iommu;
  546. struct strbuf *strbuf;
  547. unsigned long flags, ctx, npages, i;
  548. struct scatterlist *sg, *sgprv;
  549. u32 bus_addr;
  550. iommu = dev->archdata.iommu;
  551. strbuf = dev->archdata.stc;
  552. if (!strbuf->strbuf_enabled)
  553. return;
  554. spin_lock_irqsave(&iommu->lock, flags);
  555. /* Step 1: Record the context, if any. */
  556. ctx = 0;
  557. if (iommu->iommu_ctxflush &&
  558. strbuf->strbuf_ctxflush) {
  559. iopte_t *iopte;
  560. iopte = iommu->page_table +
  561. ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
  562. ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
  563. }
  564. /* Step 2: Kick data out of streaming buffers. */
  565. bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
  566. sgprv = NULL;
  567. for_each_sg(sglist, sg, nelems, i) {
  568. if (sg->dma_length == 0)
  569. break;
  570. sgprv = sg;
  571. }
  572. npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
  573. - bus_addr) >> IO_PAGE_SHIFT;
  574. strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
  575. spin_unlock_irqrestore(&iommu->lock, flags);
  576. }
  577. const struct dma_ops sun4u_dma_ops = {
  578. .alloc_coherent = dma_4u_alloc_coherent,
  579. .free_coherent = dma_4u_free_coherent,
  580. .map_single = dma_4u_map_single,
  581. .unmap_single = dma_4u_unmap_single,
  582. .map_sg = dma_4u_map_sg,
  583. .unmap_sg = dma_4u_unmap_sg,
  584. .sync_single_for_cpu = dma_4u_sync_single_for_cpu,
  585. .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
  586. };
  587. const struct dma_ops *dma_ops = &sun4u_dma_ops;
  588. EXPORT_SYMBOL(dma_ops);
  589. int dma_supported(struct device *dev, u64 device_mask)
  590. {
  591. struct iommu *iommu = dev->archdata.iommu;
  592. u64 dma_addr_mask = iommu->dma_addr_mask;
  593. if (device_mask >= (1UL << 32UL))
  594. return 0;
  595. if ((device_mask & dma_addr_mask) == dma_addr_mask)
  596. return 1;
  597. #ifdef CONFIG_PCI
  598. if (dev->bus == &pci_bus_type)
  599. return pci_dma_supported(to_pci_dev(dev), device_mask);
  600. #endif
  601. return 0;
  602. }
  603. EXPORT_SYMBOL(dma_supported);
  604. int dma_set_mask(struct device *dev, u64 dma_mask)
  605. {
  606. #ifdef CONFIG_PCI
  607. if (dev->bus == &pci_bus_type)
  608. return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
  609. #endif
  610. return -EINVAL;
  611. }
  612. EXPORT_SYMBOL(dma_set_mask);