swiotlb-xen.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537
  1. /*
  2. * Copyright 2010
  3. * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  4. *
  5. * This code provides a IOMMU for Xen PV guests with PCI passthrough.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License v2.0 as published by
  9. * the Free Software Foundation
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * PV guests under Xen are running in an non-contiguous memory architecture.
  17. *
  18. * When PCI pass-through is utilized, this necessitates an IOMMU for
  19. * translating bus (DMA) to virtual and vice-versa and also providing a
  20. * mechanism to have contiguous pages for device drivers operations (say DMA
  21. * operations).
  22. *
  23. * Specifically, under Xen the Linux idea of pages is an illusion. It
  24. * assumes that pages start at zero and go up to the available memory. To
  25. * help with that, the Linux Xen MMU provides a lookup mechanism to
  26. * translate the page frame numbers (PFN) to machine frame numbers (MFN)
  27. * and vice-versa. The MFN are the "real" frame numbers. Furthermore
  28. * memory is not contiguous. Xen hypervisor stitches memory for guests
  29. * from different pools, which means there is no guarantee that PFN==MFN
  30. * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
  31. * allocated in descending order (high to low), meaning the guest might
  32. * never get any MFN's under the 4GB mark.
  33. *
  34. */
  35. #include <linux/bootmem.h>
  36. #include <linux/dma-mapping.h>
  37. #include <xen/swiotlb-xen.h>
  38. #include <xen/page.h>
  39. #include <xen/xen-ops.h>
  40. #include <xen/hvc-console.h>
  41. /*
  42. * Used to do a quick range check in swiotlb_tbl_unmap_single and
  43. * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
  44. * API.
  45. */
  46. static char *xen_io_tlb_start, *xen_io_tlb_end;
  47. static unsigned long xen_io_tlb_nslabs;
  48. /*
  49. * Quick lookup value of the bus address of the IOTLB.
  50. */
  51. u64 start_dma_addr;
  52. static dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
  53. {
  54. return phys_to_machine(XPADDR(paddr)).maddr;
  55. }
  56. static phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
  57. {
  58. return machine_to_phys(XMADDR(baddr)).paddr;
  59. }
  60. static dma_addr_t xen_virt_to_bus(void *address)
  61. {
  62. return xen_phys_to_bus(virt_to_phys(address));
  63. }
  64. static int check_pages_physically_contiguous(unsigned long pfn,
  65. unsigned int offset,
  66. size_t length)
  67. {
  68. unsigned long next_mfn;
  69. int i;
  70. int nr_pages;
  71. next_mfn = pfn_to_mfn(pfn);
  72. nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
  73. for (i = 1; i < nr_pages; i++) {
  74. if (pfn_to_mfn(++pfn) != ++next_mfn)
  75. return 0;
  76. }
  77. return 1;
  78. }
  79. static int range_straddles_page_boundary(phys_addr_t p, size_t size)
  80. {
  81. unsigned long pfn = PFN_DOWN(p);
  82. unsigned int offset = p & ~PAGE_MASK;
  83. if (offset + size <= PAGE_SIZE)
  84. return 0;
  85. if (check_pages_physically_contiguous(pfn, offset, size))
  86. return 0;
  87. return 1;
  88. }
  89. static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
  90. {
  91. unsigned long mfn = PFN_DOWN(dma_addr);
  92. unsigned long pfn = mfn_to_local_pfn(mfn);
  93. phys_addr_t paddr;
  94. /* If the address is outside our domain, it CAN
  95. * have the same virtual address as another address
  96. * in our domain. Therefore _only_ check address within our domain.
  97. */
  98. if (pfn_valid(pfn)) {
  99. paddr = PFN_PHYS(pfn);
  100. return paddr >= virt_to_phys(xen_io_tlb_start) &&
  101. paddr < virt_to_phys(xen_io_tlb_end);
  102. }
  103. return 0;
  104. }
  105. static int max_dma_bits = 32;
  106. static int
  107. xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
  108. {
  109. int i, rc;
  110. int dma_bits;
  111. dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
  112. i = 0;
  113. do {
  114. int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
  115. do {
  116. rc = xen_create_contiguous_region(
  117. (unsigned long)buf + (i << IO_TLB_SHIFT),
  118. get_order(slabs << IO_TLB_SHIFT),
  119. dma_bits);
  120. } while (rc && dma_bits++ < max_dma_bits);
  121. if (rc)
  122. return rc;
  123. i += slabs;
  124. } while (i < nslabs);
  125. return 0;
  126. }
  127. void __init xen_swiotlb_init(int verbose)
  128. {
  129. unsigned long bytes;
  130. int rc = -ENOMEM;
  131. unsigned long nr_tbl;
  132. char *m = NULL;
  133. unsigned int repeat = 3;
  134. nr_tbl = swioltb_nr_tbl();
  135. if (nr_tbl)
  136. xen_io_tlb_nslabs = nr_tbl;
  137. else {
  138. xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
  139. xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
  140. }
  141. retry:
  142. bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
  143. /*
  144. * Get IO TLB memory from any location.
  145. */
  146. xen_io_tlb_start = alloc_bootmem(bytes);
  147. if (!xen_io_tlb_start) {
  148. m = "Cannot allocate Xen-SWIOTLB buffer!\n";
  149. goto error;
  150. }
  151. xen_io_tlb_end = xen_io_tlb_start + bytes;
  152. /*
  153. * And replace that memory with pages under 4GB.
  154. */
  155. rc = xen_swiotlb_fixup(xen_io_tlb_start,
  156. bytes,
  157. xen_io_tlb_nslabs);
  158. if (rc) {
  159. free_bootmem(__pa(xen_io_tlb_start), bytes);
  160. m = "Failed to get contiguous memory for DMA from Xen!\n"\
  161. "You either: don't have the permissions, do not have"\
  162. " enough free memory under 4GB, or the hypervisor memory"\
  163. "is too fragmented!";
  164. goto error;
  165. }
  166. start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
  167. swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);
  168. return;
  169. error:
  170. if (repeat--) {
  171. xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
  172. (xen_io_tlb_nslabs >> 1));
  173. printk(KERN_INFO "Xen-SWIOTLB: Lowering to %luMB\n",
  174. (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
  175. goto retry;
  176. }
  177. xen_raw_printk("%s (rc:%d)", rc, m);
  178. panic("%s (rc:%d)", rc, m);
  179. }
  180. void *
  181. xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
  182. dma_addr_t *dma_handle, gfp_t flags)
  183. {
  184. void *ret;
  185. int order = get_order(size);
  186. u64 dma_mask = DMA_BIT_MASK(32);
  187. unsigned long vstart;
  188. /*
  189. * Ignore region specifiers - the kernel's ideas of
  190. * pseudo-phys memory layout has nothing to do with the
  191. * machine physical layout. We can't allocate highmem
  192. * because we can't return a pointer to it.
  193. */
  194. flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
  195. if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
  196. return ret;
  197. vstart = __get_free_pages(flags, order);
  198. ret = (void *)vstart;
  199. if (hwdev && hwdev->coherent_dma_mask)
  200. dma_mask = dma_alloc_coherent_mask(hwdev, flags);
  201. if (ret) {
  202. if (xen_create_contiguous_region(vstart, order,
  203. fls64(dma_mask)) != 0) {
  204. free_pages(vstart, order);
  205. return NULL;
  206. }
  207. memset(ret, 0, size);
  208. *dma_handle = virt_to_machine(ret).maddr;
  209. }
  210. return ret;
  211. }
  212. EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
  213. void
  214. xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
  215. dma_addr_t dev_addr)
  216. {
  217. int order = get_order(size);
  218. if (dma_release_from_coherent(hwdev, order, vaddr))
  219. return;
  220. xen_destroy_contiguous_region((unsigned long)vaddr, order);
  221. free_pages((unsigned long)vaddr, order);
  222. }
  223. EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
  224. /*
  225. * Map a single buffer of the indicated size for DMA in streaming mode. The
  226. * physical address to use is returned.
  227. *
  228. * Once the device is given the dma address, the device owns this memory until
  229. * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
  230. */
  231. dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
  232. unsigned long offset, size_t size,
  233. enum dma_data_direction dir,
  234. struct dma_attrs *attrs)
  235. {
  236. phys_addr_t phys = page_to_phys(page) + offset;
  237. dma_addr_t dev_addr = xen_phys_to_bus(phys);
  238. void *map;
  239. BUG_ON(dir == DMA_NONE);
  240. /*
  241. * If the address happens to be in the device's DMA window,
  242. * we can safely return the device addr and not worry about bounce
  243. * buffering it.
  244. */
  245. if (dma_capable(dev, dev_addr, size) &&
  246. !range_straddles_page_boundary(phys, size) && !swiotlb_force)
  247. return dev_addr;
  248. /*
  249. * Oh well, have to allocate and map a bounce buffer.
  250. */
  251. map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
  252. if (!map)
  253. return DMA_ERROR_CODE;
  254. dev_addr = xen_virt_to_bus(map);
  255. /*
  256. * Ensure that the address returned is DMA'ble
  257. */
  258. if (!dma_capable(dev, dev_addr, size)) {
  259. swiotlb_tbl_unmap_single(dev, map, size, dir);
  260. dev_addr = 0;
  261. }
  262. return dev_addr;
  263. }
  264. EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
  265. /*
  266. * Unmap a single streaming mode DMA translation. The dma_addr and size must
  267. * match what was provided for in a previous xen_swiotlb_map_page call. All
  268. * other usages are undefined.
  269. *
  270. * After this call, reads by the cpu to the buffer are guaranteed to see
  271. * whatever the device wrote there.
  272. */
  273. static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
  274. size_t size, enum dma_data_direction dir)
  275. {
  276. phys_addr_t paddr = xen_bus_to_phys(dev_addr);
  277. BUG_ON(dir == DMA_NONE);
  278. /* NOTE: We use dev_addr here, not paddr! */
  279. if (is_xen_swiotlb_buffer(dev_addr)) {
  280. swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
  281. return;
  282. }
  283. if (dir != DMA_FROM_DEVICE)
  284. return;
  285. /*
  286. * phys_to_virt doesn't work with hihgmem page but we could
  287. * call dma_mark_clean() with hihgmem page here. However, we
  288. * are fine since dma_mark_clean() is null on POWERPC. We can
  289. * make dma_mark_clean() take a physical address if necessary.
  290. */
  291. dma_mark_clean(phys_to_virt(paddr), size);
  292. }
  293. void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
  294. size_t size, enum dma_data_direction dir,
  295. struct dma_attrs *attrs)
  296. {
  297. xen_unmap_single(hwdev, dev_addr, size, dir);
  298. }
  299. EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
  300. /*
  301. * Make physical memory consistent for a single streaming mode DMA translation
  302. * after a transfer.
  303. *
  304. * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
  305. * using the cpu, yet do not wish to teardown the dma mapping, you must
  306. * call this function before doing so. At the next point you give the dma
  307. * address back to the card, you must first perform a
  308. * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
  309. */
  310. static void
  311. xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
  312. size_t size, enum dma_data_direction dir,
  313. enum dma_sync_target target)
  314. {
  315. phys_addr_t paddr = xen_bus_to_phys(dev_addr);
  316. BUG_ON(dir == DMA_NONE);
  317. /* NOTE: We use dev_addr here, not paddr! */
  318. if (is_xen_swiotlb_buffer(dev_addr)) {
  319. swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
  320. target);
  321. return;
  322. }
  323. if (dir != DMA_FROM_DEVICE)
  324. return;
  325. dma_mark_clean(phys_to_virt(paddr), size);
  326. }
  327. void
  328. xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
  329. size_t size, enum dma_data_direction dir)
  330. {
  331. xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
  332. }
  333. EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
  334. void
  335. xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
  336. size_t size, enum dma_data_direction dir)
  337. {
  338. xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
  339. }
  340. EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
  341. /*
  342. * Map a set of buffers described by scatterlist in streaming mode for DMA.
  343. * This is the scatter-gather version of the above xen_swiotlb_map_page
  344. * interface. Here the scatter gather list elements are each tagged with the
  345. * appropriate dma address and length. They are obtained via
  346. * sg_dma_{address,length}(SG).
  347. *
  348. * NOTE: An implementation may be able to use a smaller number of
  349. * DMA address/length pairs than there are SG table elements.
  350. * (for example via virtual mapping capabilities)
  351. * The routine returns the number of addr/length pairs actually
  352. * used, at most nents.
  353. *
  354. * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
  355. * same here.
  356. */
  357. int
  358. xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
  359. int nelems, enum dma_data_direction dir,
  360. struct dma_attrs *attrs)
  361. {
  362. struct scatterlist *sg;
  363. int i;
  364. BUG_ON(dir == DMA_NONE);
  365. for_each_sg(sgl, sg, nelems, i) {
  366. phys_addr_t paddr = sg_phys(sg);
  367. dma_addr_t dev_addr = xen_phys_to_bus(paddr);
  368. if (swiotlb_force ||
  369. !dma_capable(hwdev, dev_addr, sg->length) ||
  370. range_straddles_page_boundary(paddr, sg->length)) {
  371. void *map = swiotlb_tbl_map_single(hwdev,
  372. start_dma_addr,
  373. sg_phys(sg),
  374. sg->length, dir);
  375. if (!map) {
  376. /* Don't panic here, we expect map_sg users
  377. to do proper error handling. */
  378. xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
  379. attrs);
  380. sgl[0].dma_length = 0;
  381. return DMA_ERROR_CODE;
  382. }
  383. sg->dma_address = xen_virt_to_bus(map);
  384. } else
  385. sg->dma_address = dev_addr;
  386. sg->dma_length = sg->length;
  387. }
  388. return nelems;
  389. }
  390. EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
  391. int
  392. xen_swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
  393. enum dma_data_direction dir)
  394. {
  395. return xen_swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
  396. }
  397. EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg);
  398. /*
  399. * Unmap a set of streaming mode DMA translations. Again, cpu read rules
  400. * concerning calls here are the same as for swiotlb_unmap_page() above.
  401. */
  402. void
  403. xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
  404. int nelems, enum dma_data_direction dir,
  405. struct dma_attrs *attrs)
  406. {
  407. struct scatterlist *sg;
  408. int i;
  409. BUG_ON(dir == DMA_NONE);
  410. for_each_sg(sgl, sg, nelems, i)
  411. xen_unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
  412. }
  413. EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
  414. void
  415. xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
  416. enum dma_data_direction dir)
  417. {
  418. return xen_swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
  419. }
  420. EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg);
  421. /*
  422. * Make physical memory consistent for a set of streaming mode DMA translations
  423. * after a transfer.
  424. *
  425. * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
  426. * and usage.
  427. */
  428. static void
  429. xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
  430. int nelems, enum dma_data_direction dir,
  431. enum dma_sync_target target)
  432. {
  433. struct scatterlist *sg;
  434. int i;
  435. for_each_sg(sgl, sg, nelems, i)
  436. xen_swiotlb_sync_single(hwdev, sg->dma_address,
  437. sg->dma_length, dir, target);
  438. }
  439. void
  440. xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
  441. int nelems, enum dma_data_direction dir)
  442. {
  443. xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
  444. }
  445. EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
  446. void
  447. xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
  448. int nelems, enum dma_data_direction dir)
  449. {
  450. xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
  451. }
  452. EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
  453. int
  454. xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
  455. {
  456. return !dma_addr;
  457. }
  458. EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error);
  459. /*
  460. * Return whether the given device DMA address mask can be supported
  461. * properly. For example, if your device can only drive the low 24-bits
  462. * during bus mastering, then you would pass 0x00ffffff as the mask to
  463. * this function.
  464. */
  465. int
  466. xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
  467. {
  468. return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
  469. }
  470. EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);