dma_64.c 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. /*
  2. * Copyright (C) 2004 IBM Corporation
  3. *
  4. * Implements the generic device dma API for ppc64. Handles
  5. * the pci and vio busses
  6. */
  7. #include <linux/device.h>
  8. #include <linux/dma-mapping.h>
  9. /* Include the busses we support */
  10. #include <linux/pci.h>
  11. #include <asm/vio.h>
  12. #include <asm/ibmebus.h>
  13. #include <asm/scatterlist.h>
  14. #include <asm/bug.h>
  15. static struct dma_mapping_ops *get_dma_ops(struct device *dev)
  16. {
  17. #ifdef CONFIG_PCI
  18. if (dev->bus == &pci_bus_type)
  19. return &pci_dma_ops;
  20. #endif
  21. #ifdef CONFIG_IBMVIO
  22. if (dev->bus == &vio_bus_type)
  23. return &vio_dma_ops;
  24. #endif
  25. #ifdef CONFIG_IBMEBUS
  26. if (dev->bus == &ibmebus_bus_type)
  27. return &ibmebus_dma_ops;
  28. #endif
  29. return NULL;
  30. }
  31. int dma_supported(struct device *dev, u64 mask)
  32. {
  33. struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
  34. if (dma_ops)
  35. return dma_ops->dma_supported(dev, mask);
  36. BUG();
  37. return 0;
  38. }
  39. EXPORT_SYMBOL(dma_supported);
  40. int dma_set_mask(struct device *dev, u64 dma_mask)
  41. {
  42. #ifdef CONFIG_PCI
  43. if (dev->bus == &pci_bus_type)
  44. return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
  45. #endif
  46. #ifdef CONFIG_IBMVIO
  47. if (dev->bus == &vio_bus_type)
  48. return -EIO;
  49. #endif /* CONFIG_IBMVIO */
  50. #ifdef CONFIG_IBMEBUS
  51. if (dev->bus == &ibmebus_bus_type)
  52. return -EIO;
  53. #endif
  54. BUG();
  55. return 0;
  56. }
  57. EXPORT_SYMBOL(dma_set_mask);
  58. void *dma_alloc_coherent(struct device *dev, size_t size,
  59. dma_addr_t *dma_handle, gfp_t flag)
  60. {
  61. struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
  62. if (dma_ops)
  63. return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
  64. BUG();
  65. return NULL;
  66. }
  67. EXPORT_SYMBOL(dma_alloc_coherent);
  68. void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
  69. dma_addr_t dma_handle)
  70. {
  71. struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
  72. if (dma_ops)
  73. dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
  74. else
  75. BUG();
  76. }
  77. EXPORT_SYMBOL(dma_free_coherent);
  78. dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size,
  79. enum dma_data_direction direction)
  80. {
  81. struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
  82. if (dma_ops)
  83. return dma_ops->map_single(dev, cpu_addr, size, direction);
  84. BUG();
  85. return (dma_addr_t)0;
  86. }
  87. EXPORT_SYMBOL(dma_map_single);
  88. void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  89. enum dma_data_direction direction)
  90. {
  91. struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
  92. if (dma_ops)
  93. dma_ops->unmap_single(dev, dma_addr, size, direction);
  94. else
  95. BUG();
  96. }
  97. EXPORT_SYMBOL(dma_unmap_single);
  98. dma_addr_t dma_map_page(struct device *dev, struct page *page,
  99. unsigned long offset, size_t size,
  100. enum dma_data_direction direction)
  101. {
  102. struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
  103. if (dma_ops)
  104. return dma_ops->map_single(dev,
  105. (page_address(page) + offset), size, direction);
  106. BUG();
  107. return (dma_addr_t)0;
  108. }
  109. EXPORT_SYMBOL(dma_map_page);
  110. void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
  111. enum dma_data_direction direction)
  112. {
  113. struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
  114. if (dma_ops)
  115. dma_ops->unmap_single(dev, dma_address, size, direction);
  116. else
  117. BUG();
  118. }
  119. EXPORT_SYMBOL(dma_unmap_page);
  120. int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  121. enum dma_data_direction direction)
  122. {
  123. struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
  124. if (dma_ops)
  125. return dma_ops->map_sg(dev, sg, nents, direction);
  126. BUG();
  127. return 0;
  128. }
  129. EXPORT_SYMBOL(dma_map_sg);
  130. void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
  131. enum dma_data_direction direction)
  132. {
  133. struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
  134. if (dma_ops)
  135. dma_ops->unmap_sg(dev, sg, nhwentries, direction);
  136. else
  137. BUG();
  138. }
  139. EXPORT_SYMBOL(dma_unmap_sg);