dma-mapping_32.h 1.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677
  1. #ifndef _ASM_I386_DMA_MAPPING_H
  2. #define _ASM_I386_DMA_MAPPING_H
  3. #include <linux/mm.h>
  4. #include <linux/scatterlist.h>
  5. #include <asm/cache.h>
  6. #include <asm/io.h>
  7. #include <asm/bug.h>
  8. static inline int
  9. dma_mapping_error(dma_addr_t dma_addr)
  10. {
  11. return 0;
  12. }
  13. extern int forbid_dac;
  14. static inline int
  15. dma_supported(struct device *dev, u64 mask)
  16. {
  17. /*
  18. * we fall back to GFP_DMA when the mask isn't all 1s,
  19. * so we can't guarantee allocations that must be
  20. * within a tighter range than GFP_DMA..
  21. */
  22. if(mask < 0x00ffffff)
  23. return 0;
  24. /* Work around chipset bugs */
  25. if (forbid_dac > 0 && mask > 0xffffffffULL)
  26. return 0;
  27. return 1;
  28. }
  29. static inline int
  30. dma_set_mask(struct device *dev, u64 mask)
  31. {
  32. if(!dev->dma_mask || !dma_supported(dev, mask))
  33. return -EIO;
  34. *dev->dma_mask = mask;
  35. return 0;
  36. }
  37. static inline int
  38. dma_get_cache_alignment(void)
  39. {
  40. /* no easy way to get cache size on all x86, so return the
  41. * maximum possible, to be safe */
  42. return (1 << INTERNODE_CACHE_SHIFT);
  43. }
  44. #define dma_is_consistent(d, h) (1)
  45. static inline void
  46. dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  47. enum dma_data_direction direction)
  48. {
  49. flush_write_buffers();
  50. }
  51. #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
  52. extern int
  53. dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
  54. dma_addr_t device_addr, size_t size, int flags);
  55. extern void
  56. dma_release_declared_memory(struct device *dev);
  57. extern void *
  58. dma_mark_declared_memory_occupied(struct device *dev,
  59. dma_addr_t device_addr, size_t size);
  60. #endif