pci_64.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. #ifndef __SPARC64_PCI_H
  2. #define __SPARC64_PCI_H
  3. #ifdef __KERNEL__
  4. #include <linux/dma-mapping.h>
  5. /* Can be used to override the logic in pci_scan_bus for skipping
  6. * already-configured bus numbers - to be used for buggy BIOSes
  7. * or architectures with incomplete PCI setup by the loader.
  8. */
  9. #define pcibios_assign_all_busses() 0
  10. #define PCIBIOS_MIN_IO 0UL
  11. #define PCIBIOS_MIN_MEM 0UL
  12. #define PCI_IRQ_NONE 0xffffffff
  13. #define PCI_CACHE_LINE_BYTES 64
  14. static inline void pcibios_set_master(struct pci_dev *dev)
  15. {
  16. /* No special bus mastering setup handling */
  17. }
  18. static inline void pcibios_penalize_isa_irq(int irq, int active)
  19. {
  20. /* We don't do dynamic PCI IRQ allocation */
  21. }
  22. /* The PCI address space does not equal the physical memory
  23. * address space. The networking and block device layers use
  24. * this boolean for bounce buffer decisions.
  25. */
  26. #define PCI_DMA_BUS_IS_PHYS (0)
  27. static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
  28. dma_addr_t *dma_handle)
  29. {
  30. return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC);
  31. }
  32. static inline void pci_free_consistent(struct pci_dev *pdev, size_t size,
  33. void *vaddr, dma_addr_t dma_handle)
  34. {
  35. return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle);
  36. }
  37. static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr,
  38. size_t size, int direction)
  39. {
  40. return dma_map_single(&pdev->dev, ptr, size,
  41. (enum dma_data_direction) direction);
  42. }
  43. static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
  44. size_t size, int direction)
  45. {
  46. dma_unmap_single(&pdev->dev, dma_addr, size,
  47. (enum dma_data_direction) direction);
  48. }
  49. #define pci_map_page(dev, page, off, size, dir) \
  50. pci_map_single(dev, (page_address(page) + (off)), size, dir)
  51. #define pci_unmap_page(dev,addr,sz,dir) \
  52. pci_unmap_single(dev,addr,sz,dir)
  53. /* pci_unmap_{single,page} is not a nop, thus... */
  54. #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
  55. dma_addr_t ADDR_NAME;
  56. #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
  57. __u32 LEN_NAME;
  58. #define pci_unmap_addr(PTR, ADDR_NAME) \
  59. ((PTR)->ADDR_NAME)
  60. #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
  61. (((PTR)->ADDR_NAME) = (VAL))
  62. #define pci_unmap_len(PTR, LEN_NAME) \
  63. ((PTR)->LEN_NAME)
  64. #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
  65. (((PTR)->LEN_NAME) = (VAL))
  66. static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg,
  67. int nents, int direction)
  68. {
  69. return dma_map_sg(&pdev->dev, sg, nents,
  70. (enum dma_data_direction) direction);
  71. }
  72. static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg,
  73. int nents, int direction)
  74. {
  75. dma_unmap_sg(&pdev->dev, sg, nents,
  76. (enum dma_data_direction) direction);
  77. }
  78. static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev,
  79. dma_addr_t dma_handle,
  80. size_t size, int direction)
  81. {
  82. dma_sync_single_for_cpu(&pdev->dev, dma_handle, size,
  83. (enum dma_data_direction) direction);
  84. }
  85. static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev,
  86. dma_addr_t dma_handle,
  87. size_t size, int direction)
  88. {
  89. /* No flushing needed to sync cpu writes to the device. */
  90. }
  91. static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev,
  92. struct scatterlist *sg,
  93. int nents, int direction)
  94. {
  95. dma_sync_sg_for_cpu(&pdev->dev, sg, nents,
  96. (enum dma_data_direction) direction);
  97. }
  98. static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev,
  99. struct scatterlist *sg,
  100. int nelems, int direction)
  101. {
  102. /* No flushing needed to sync cpu writes to the device. */
  103. }
  104. /* Return whether the given PCI device DMA address mask can
  105. * be supported properly. For example, if your device can
  106. * only drive the low 24-bits during PCI bus mastering, then
  107. * you would pass 0x00ffffff as the mask to this function.
  108. */
  109. extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
  110. /* PCI IOMMU mapping bypass support. */
  111. /* PCI 64-bit addressing works for all slots on all controller
  112. * types on sparc64. However, it requires that the device
  113. * can drive enough of the 64 bits.
  114. */
  115. #define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
  116. #define PCI64_ADDR_BASE 0xfffc000000000000UL
  117. static inline int pci_dma_mapping_error(struct pci_dev *pdev,
  118. dma_addr_t dma_addr)
  119. {
  120. return dma_mapping_error(&pdev->dev, dma_addr);
  121. }
  122. #ifdef CONFIG_PCI
  123. static inline void pci_dma_burst_advice(struct pci_dev *pdev,
  124. enum pci_dma_burst_strategy *strat,
  125. unsigned long *strategy_parameter)
  126. {
  127. unsigned long cacheline_size;
  128. u8 byte;
  129. pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
  130. if (byte == 0)
  131. cacheline_size = 1024;
  132. else
  133. cacheline_size = (int) byte * 4;
  134. *strat = PCI_DMA_BURST_BOUNDARY;
  135. *strategy_parameter = cacheline_size;
  136. }
  137. #endif
  138. /* Return the index of the PCI controller for device PDEV. */
  139. extern int pci_domain_nr(struct pci_bus *bus);
  140. static inline int pci_proc_domain(struct pci_bus *bus)
  141. {
  142. return 1;
  143. }
  144. /* Platform support for /proc/bus/pci/X/Y mmap()s. */
  145. #define HAVE_PCI_MMAP
  146. #define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
  147. #define get_pci_unmapped_area get_fb_unmapped_area
  148. extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
  149. enum pci_mmap_state mmap_state,
  150. int write_combine);
  151. extern void
  152. pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
  153. struct resource *res);
  154. extern void
  155. pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
  156. struct pci_bus_region *region);
  157. static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
  158. {
  159. return PCI_IRQ_NONE;
  160. }
  161. struct device_node;
  162. extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
  163. #define HAVE_ARCH_PCI_RESOURCE_TO_USER
  164. extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
  165. const struct resource *rsrc,
  166. resource_size_t *start, resource_size_t *end);
  167. #endif /* __KERNEL__ */
  168. #endif /* __SPARC64_PCI_H */