dma-mapping.h 1.9 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677
  1. #ifndef ___ASM_SPARC_DMA_MAPPING_H
  2. #define ___ASM_SPARC_DMA_MAPPING_H
  3. #include <linux/scatterlist.h>
  4. #include <linux/mm.h>
  5. #include <linux/dma-debug.h>
  6. #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
  7. extern int dma_supported(struct device *dev, u64 mask);
  8. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  9. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  10. #define dma_is_consistent(d, h) (1)
  11. extern struct dma_map_ops *dma_ops, pci32_dma_ops;
  12. extern struct bus_type pci_bus_type;
  13. static inline struct dma_map_ops *get_dma_ops(struct device *dev)
  14. {
  15. #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
  16. if (dev->bus == &pci_bus_type)
  17. return &pci32_dma_ops;
  18. #endif
  19. return dma_ops;
  20. }
  21. #include <asm-generic/dma-mapping-common.h>
  22. static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  23. dma_addr_t *dma_handle, gfp_t flag)
  24. {
  25. struct dma_map_ops *ops = get_dma_ops(dev);
  26. void *cpu_addr;
  27. cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
  28. debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
  29. return cpu_addr;
  30. }
  31. static inline void dma_free_coherent(struct device *dev, size_t size,
  32. void *cpu_addr, dma_addr_t dma_handle)
  33. {
  34. struct dma_map_ops *ops = get_dma_ops(dev);
  35. debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
  36. ops->free_coherent(dev, size, cpu_addr, dma_handle);
  37. }
  38. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  39. {
  40. return (dma_addr == DMA_ERROR_CODE);
  41. }
  42. static inline int dma_get_cache_alignment(void)
  43. {
  44. /*
  45. * no easy way to get cache size on all processors, so return
  46. * the maximum possible, to be safe
  47. */
  48. return (1 << INTERNODE_CACHE_SHIFT);
  49. }
  50. static inline int dma_set_mask(struct device *dev, u64 mask)
  51. {
  52. #ifdef CONFIG_PCI
  53. if (dev->bus == &pci_bus_type) {
  54. if (!dev->dma_mask || !dma_supported(dev, mask))
  55. return -EINVAL;
  56. *dev->dma_mask = mask;
  57. return 0;
  58. }
  59. #endif
  60. return -EINVAL;
  61. }
  62. #endif