dma-mapping.h 1.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465
  1. #ifndef ___ASM_SPARC_DMA_MAPPING_H
  2. #define ___ASM_SPARC_DMA_MAPPING_H
  3. #include <linux/scatterlist.h>
  4. #include <linux/mm.h>
  5. #include <linux/dma-debug.h>
  6. #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
  7. extern int dma_supported(struct device *dev, u64 mask);
  8. extern int dma_set_mask(struct device *dev, u64 dma_mask);
  9. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  10. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  11. #define dma_is_consistent(d, h) (1)
  12. extern struct dma_map_ops *dma_ops, pci32_dma_ops;
  13. extern struct bus_type pci_bus_type;
  14. static inline struct dma_map_ops *get_dma_ops(struct device *dev)
  15. {
  16. #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
  17. if (dev->bus == &pci_bus_type)
  18. return &pci32_dma_ops;
  19. #endif
  20. return dma_ops;
  21. }
  22. #include <asm-generic/dma-mapping-common.h>
  23. static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  24. dma_addr_t *dma_handle, gfp_t flag)
  25. {
  26. struct dma_map_ops *ops = get_dma_ops(dev);
  27. void *cpu_addr;
  28. cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
  29. debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
  30. return cpu_addr;
  31. }
  32. static inline void dma_free_coherent(struct device *dev, size_t size,
  33. void *cpu_addr, dma_addr_t dma_handle)
  34. {
  35. struct dma_map_ops *ops = get_dma_ops(dev);
  36. debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
  37. ops->free_coherent(dev, size, cpu_addr, dma_handle);
  38. }
  39. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  40. {
  41. return (dma_addr == DMA_ERROR_CODE);
  42. }
  43. static inline int dma_get_cache_alignment(void)
  44. {
  45. /*
  46. * no easy way to get cache size on all processors, so return
  47. * the maximum possible, to be safe
  48. */
  49. return (1 << INTERNODE_CACHE_SHIFT);
  50. }
  51. #endif