dma.c 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. /*
  2. * Copyright (C) 2004 IBM Corporation
  3. *
  4. * Implements the generic device dma API for ppc64. Handles
  5. * the pci and vio busses
  6. */
  7. #include <linux/device.h>
  8. #include <linux/dma-mapping.h>
  9. /* Include the busses we support */
  10. #include <linux/pci.h>
  11. #include <asm/vio.h>
  12. #include <asm/scatterlist.h>
  13. #include <asm/bug.h>
  14. static struct dma_mapping_ops *get_dma_ops(struct device *dev)
  15. {
  16. #ifdef CONFIG_PCI
  17. if (dev->bus == &pci_bus_type)
  18. return &pci_dma_ops;
  19. #endif
  20. #ifdef CONFIG_IBMVIO
  21. if (dev->bus == &vio_bus_type)
  22. return &vio_dma_ops;
  23. #endif
  24. return NULL;
  25. }
  26. int dma_supported(struct device *dev, u64 mask)
  27. {
  28. struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
  29. if (dma_ops)
  30. return dma_ops->dma_supported(dev, mask);
  31. BUG();
  32. return 0;
  33. }
  34. EXPORT_SYMBOL(dma_supported);
  35. int dma_set_mask(struct device *dev, u64 dma_mask)
  36. {
  37. #ifdef CONFIG_PCI
  38. if (dev->bus == &pci_bus_type)
  39. return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
  40. #endif
  41. #ifdef CONFIG_IBMVIO
  42. if (dev->bus == &vio_bus_type)
  43. return -EIO;
  44. #endif /* CONFIG_IBMVIO */
  45. BUG();
  46. return 0;
  47. }
  48. EXPORT_SYMBOL(dma_set_mask);
  49. void *dma_alloc_coherent(struct device *dev, size_t size,
  50. dma_addr_t *dma_handle, unsigned int __nocast flag)
  51. {
  52. struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
  53. if (dma_ops)
  54. return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
  55. BUG();
  56. return NULL;
  57. }
  58. EXPORT_SYMBOL(dma_alloc_coherent);
  59. void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
  60. dma_addr_t dma_handle)
  61. {
  62. struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
  63. if (dma_ops)
  64. dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
  65. else
  66. BUG();
  67. }
  68. EXPORT_SYMBOL(dma_free_coherent);
  69. dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size,
  70. enum dma_data_direction direction)
  71. {
  72. struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
  73. if (dma_ops)
  74. return dma_ops->map_single(dev, cpu_addr, size, direction);
  75. BUG();
  76. return (dma_addr_t)0;
  77. }
  78. EXPORT_SYMBOL(dma_map_single);
  79. void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  80. enum dma_data_direction direction)
  81. {
  82. struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
  83. if (dma_ops)
  84. dma_ops->unmap_single(dev, dma_addr, size, direction);
  85. else
  86. BUG();
  87. }
  88. EXPORT_SYMBOL(dma_unmap_single);
  89. dma_addr_t dma_map_page(struct device *dev, struct page *page,
  90. unsigned long offset, size_t size,
  91. enum dma_data_direction direction)
  92. {
  93. struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
  94. if (dma_ops)
  95. return dma_ops->map_single(dev,
  96. (page_address(page) + offset), size, direction);
  97. BUG();
  98. return (dma_addr_t)0;
  99. }
  100. EXPORT_SYMBOL(dma_map_page);
  101. void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
  102. enum dma_data_direction direction)
  103. {
  104. struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
  105. if (dma_ops)
  106. dma_ops->unmap_single(dev, dma_address, size, direction);
  107. else
  108. BUG();
  109. }
  110. EXPORT_SYMBOL(dma_unmap_page);
  111. int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  112. enum dma_data_direction direction)
  113. {
  114. struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
  115. if (dma_ops)
  116. return dma_ops->map_sg(dev, sg, nents, direction);
  117. BUG();
  118. return 0;
  119. }
  120. EXPORT_SYMBOL(dma_map_sg);
  121. void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
  122. enum dma_data_direction direction)
  123. {
  124. struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
  125. if (dma_ops)
  126. dma_ops->unmap_sg(dev, sg, nhwentries, direction);
  127. else
  128. BUG();
  129. }
  130. EXPORT_SYMBOL(dma_unmap_sg);