dma-swiotlb.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. /*
  2. * Contains routines needed to support swiotlb for ppc.
  3. *
  4. * Copyright (C) 2009 Becky Bruce, Freescale Semiconductor
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; either version 2 of the License, or (at your
  9. * option) any later version.
  10. *
  11. */
  12. #include <linux/dma-mapping.h>
  13. #include <linux/pfn.h>
  14. #include <linux/of_platform.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/pci.h>
  17. #include <asm/machdep.h>
  18. #include <asm/swiotlb.h>
  19. #include <asm/dma.h>
  20. #include <asm/abs_addr.h>
  21. int swiotlb __read_mostly;
  22. unsigned int ppc_swiotlb_enable;
  23. void *swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t addr)
  24. {
  25. unsigned long pfn = PFN_DOWN(swiotlb_bus_to_phys(hwdev, addr));
  26. void *pageaddr = page_address(pfn_to_page(pfn));
  27. if (pageaddr != NULL)
  28. return pageaddr + (addr % PAGE_SIZE);
  29. return NULL;
  30. }
  31. dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
  32. {
  33. return paddr + get_dma_direct_offset(hwdev);
  34. }
  35. phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
  36. {
  37. return baddr - get_dma_direct_offset(hwdev);
  38. }
  39. /*
  40. * Determine if an address needs bounce buffering via swiotlb.
  41. * Going forward I expect the swiotlb code to generalize on using
  42. * a dma_ops->addr_needs_map, and this function will move from here to the
  43. * generic swiotlb code.
  44. */
  45. int
  46. swiotlb_arch_address_needs_mapping(struct device *hwdev, dma_addr_t addr,
  47. size_t size)
  48. {
  49. struct dma_mapping_ops *dma_ops = get_dma_ops(hwdev);
  50. BUG_ON(!dma_ops);
  51. return dma_ops->addr_needs_map(hwdev, addr, size);
  52. }
  53. /*
  54. * Determine if an address is reachable by a pci device, or if we must bounce.
  55. */
  56. static int
  57. swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
  58. {
  59. u64 mask = dma_get_mask(hwdev);
  60. dma_addr_t max;
  61. struct pci_controller *hose;
  62. struct pci_dev *pdev = to_pci_dev(hwdev);
  63. hose = pci_bus_to_host(pdev->bus);
  64. max = hose->dma_window_base_cur + hose->dma_window_size;
  65. /* check that we're within mapped pci window space */
  66. if ((addr + size > max) | (addr < hose->dma_window_base_cur))
  67. return 1;
  68. return !is_buffer_dma_capable(mask, addr, size);
  69. }
  70. static int
  71. swiotlb_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
  72. {
  73. return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
  74. }
  75. /*
  76. * At the moment, all platforms that use this code only require
  77. * swiotlb to be used if we're operating on HIGHMEM. Since
  78. * we don't ever call anything other than map_sg, unmap_sg,
  79. * map_page, and unmap_page on highmem, use normal dma_ops
  80. * for everything else.
  81. */
  82. struct dma_mapping_ops swiotlb_dma_ops = {
  83. .alloc_coherent = dma_direct_alloc_coherent,
  84. .free_coherent = dma_direct_free_coherent,
  85. .map_sg = swiotlb_map_sg_attrs,
  86. .unmap_sg = swiotlb_unmap_sg_attrs,
  87. .dma_supported = swiotlb_dma_supported,
  88. .map_page = swiotlb_map_page,
  89. .unmap_page = swiotlb_unmap_page,
  90. .addr_needs_map = swiotlb_addr_needs_map,
  91. .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
  92. .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
  93. .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
  94. .sync_sg_for_device = swiotlb_sync_sg_for_device
  95. };
  96. struct dma_mapping_ops swiotlb_pci_dma_ops = {
  97. .alloc_coherent = dma_direct_alloc_coherent,
  98. .free_coherent = dma_direct_free_coherent,
  99. .map_sg = swiotlb_map_sg_attrs,
  100. .unmap_sg = swiotlb_unmap_sg_attrs,
  101. .dma_supported = swiotlb_dma_supported,
  102. .map_page = swiotlb_map_page,
  103. .unmap_page = swiotlb_unmap_page,
  104. .addr_needs_map = swiotlb_pci_addr_needs_map,
  105. .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
  106. .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
  107. .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
  108. .sync_sg_for_device = swiotlb_sync_sg_for_device
  109. };
  110. static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
  111. unsigned long action, void *data)
  112. {
  113. struct device *dev = data;
  114. /* We are only intereted in device addition */
  115. if (action != BUS_NOTIFY_ADD_DEVICE)
  116. return 0;
  117. /* May need to bounce if the device can't address all of DRAM */
  118. if (dma_get_mask(dev) < lmb_end_of_DRAM())
  119. set_dma_ops(dev, &swiotlb_dma_ops);
  120. return NOTIFY_DONE;
  121. }
  122. static struct notifier_block ppc_swiotlb_plat_bus_notifier = {
  123. .notifier_call = ppc_swiotlb_bus_notify,
  124. .priority = 0,
  125. };
  126. static struct notifier_block ppc_swiotlb_of_bus_notifier = {
  127. .notifier_call = ppc_swiotlb_bus_notify,
  128. .priority = 0,
  129. };
  130. int __init swiotlb_setup_bus_notifier(void)
  131. {
  132. bus_register_notifier(&platform_bus_type,
  133. &ppc_swiotlb_plat_bus_notifier);
  134. bus_register_notifier(&of_platform_bus_type,
  135. &ppc_swiotlb_of_bus_notifier);
  136. return 0;
  137. }