pci-swiotlb.c 2.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485
  1. /* Glue code to lib/swiotlb.c */
  2. #include <linux/pci.h>
  3. #include <linux/cache.h>
  4. #include <linux/module.h>
  5. #include <linux/swiotlb.h>
  6. #include <linux/bootmem.h>
  7. #include <linux/dma-mapping.h>
  8. #include <asm/iommu.h>
  9. #include <asm/swiotlb.h>
  10. #include <asm/dma.h>
  11. int swiotlb __read_mostly;
  12. void * __init swiotlb_alloc_boot(size_t size, unsigned long nslabs)
  13. {
  14. return alloc_bootmem_low_pages(size);
  15. }
  16. void *swiotlb_alloc(unsigned order, unsigned long nslabs)
  17. {
  18. return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
  19. }
  20. dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
  21. {
  22. return paddr;
  23. }
  24. phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
  25. {
  26. return baddr;
  27. }
  28. int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
  29. {
  30. return 0;
  31. }
  32. static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
  33. dma_addr_t *dma_handle, gfp_t flags)
  34. {
  35. void *vaddr;
  36. vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags);
  37. if (vaddr)
  38. return vaddr;
  39. return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
  40. }
  41. static struct dma_map_ops swiotlb_dma_ops = {
  42. .mapping_error = swiotlb_dma_mapping_error,
  43. .alloc_coherent = x86_swiotlb_alloc_coherent,
  44. .free_coherent = swiotlb_free_coherent,
  45. .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
  46. .sync_single_for_device = swiotlb_sync_single_for_device,
  47. .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
  48. .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
  49. .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
  50. .sync_sg_for_device = swiotlb_sync_sg_for_device,
  51. .map_sg = swiotlb_map_sg_attrs,
  52. .unmap_sg = swiotlb_unmap_sg_attrs,
  53. .map_page = swiotlb_map_page,
  54. .unmap_page = swiotlb_unmap_page,
  55. .dma_supported = NULL,
  56. };
  57. void __init pci_swiotlb_init(void)
  58. {
  59. /* don't initialize swiotlb if iommu=off (no_iommu=1) */
  60. #ifdef CONFIG_X86_64
  61. if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) ||
  62. iommu_pass_through)
  63. swiotlb = 1;
  64. #endif
  65. if (swiotlb_force)
  66. swiotlb = 1;
  67. if (swiotlb) {
  68. printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
  69. swiotlb_init();
  70. dma_ops = &swiotlb_dma_ops;
  71. }
  72. }