dma-mapping_32.h 2.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. #ifndef _ASM_I386_DMA_MAPPING_H
  2. #define _ASM_I386_DMA_MAPPING_H
  3. #include <linux/mm.h>
  4. #include <linux/scatterlist.h>
  5. #include <asm/cache.h>
  6. #include <asm/io.h>
  7. #include <asm/bug.h>
  8. static inline dma_addr_t
  9. dma_map_page(struct device *dev, struct page *page, unsigned long offset,
  10. size_t size, enum dma_data_direction direction)
  11. {
  12. BUG_ON(!valid_dma_direction(direction));
  13. return page_to_phys(page) + offset;
  14. }
  15. static inline void
  16. dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
  17. enum dma_data_direction direction)
  18. {
  19. BUG_ON(!valid_dma_direction(direction));
  20. }
  21. static inline int
  22. dma_mapping_error(dma_addr_t dma_addr)
  23. {
  24. return 0;
  25. }
  26. extern int forbid_dac;
  27. static inline int
  28. dma_supported(struct device *dev, u64 mask)
  29. {
  30. /*
  31. * we fall back to GFP_DMA when the mask isn't all 1s,
  32. * so we can't guarantee allocations that must be
  33. * within a tighter range than GFP_DMA..
  34. */
  35. if(mask < 0x00ffffff)
  36. return 0;
  37. /* Work around chipset bugs */
  38. if (forbid_dac > 0 && mask > 0xffffffffULL)
  39. return 0;
  40. return 1;
  41. }
  42. static inline int
  43. dma_set_mask(struct device *dev, u64 mask)
  44. {
  45. if(!dev->dma_mask || !dma_supported(dev, mask))
  46. return -EIO;
  47. *dev->dma_mask = mask;
  48. return 0;
  49. }
  50. static inline int
  51. dma_get_cache_alignment(void)
  52. {
  53. /* no easy way to get cache size on all x86, so return the
  54. * maximum possible, to be safe */
  55. return (1 << INTERNODE_CACHE_SHIFT);
  56. }
  57. #define dma_is_consistent(d, h) (1)
  58. static inline void
  59. dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  60. enum dma_data_direction direction)
  61. {
  62. flush_write_buffers();
  63. }
  64. #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
  65. extern int
  66. dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
  67. dma_addr_t device_addr, size_t size, int flags);
  68. extern void
  69. dma_release_declared_memory(struct device *dev);
  70. extern void *
  71. dma_mark_declared_memory_occupied(struct device *dev,
  72. dma_addr_t device_addr, size_t size);
  73. #endif