pci_64.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. #ifndef __SPARC64_PCI_H
  2. #define __SPARC64_PCI_H
  3. #ifdef __KERNEL__
  4. #include <linux/dma-mapping.h>
  5. /* Can be used to override the logic in pci_scan_bus for skipping
  6. * already-configured bus numbers - to be used for buggy BIOSes
  7. * or architectures with incomplete PCI setup by the loader.
  8. */
  9. #define pcibios_assign_all_busses() 0
  10. #define pcibios_scan_all_fns(a, b) 0
  11. #define PCIBIOS_MIN_IO 0UL
  12. #define PCIBIOS_MIN_MEM 0UL
  13. #define PCI_IRQ_NONE 0xffffffff
  14. #define PCI_CACHE_LINE_BYTES 64
  15. static inline void pcibios_set_master(struct pci_dev *dev)
  16. {
  17. /* No special bus mastering setup handling */
  18. }
  19. static inline void pcibios_penalize_isa_irq(int irq, int active)
  20. {
  21. /* We don't do dynamic PCI IRQ allocation */
  22. }
  23. /* The PCI address space does not equal the physical memory
  24. * address space. The networking and block device layers use
  25. * this boolean for bounce buffer decisions.
  26. */
  27. #define PCI_DMA_BUS_IS_PHYS (0)
  28. static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
  29. dma_addr_t *dma_handle)
  30. {
  31. return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC);
  32. }
  33. static inline void pci_free_consistent(struct pci_dev *pdev, size_t size,
  34. void *vaddr, dma_addr_t dma_handle)
  35. {
  36. return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle);
  37. }
  38. static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr,
  39. size_t size, int direction)
  40. {
  41. return dma_map_single(&pdev->dev, ptr, size,
  42. (enum dma_data_direction) direction);
  43. }
  44. static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
  45. size_t size, int direction)
  46. {
  47. dma_unmap_single(&pdev->dev, dma_addr, size,
  48. (enum dma_data_direction) direction);
  49. }
  50. #define pci_map_page(dev, page, off, size, dir) \
  51. pci_map_single(dev, (page_address(page) + (off)), size, dir)
  52. #define pci_unmap_page(dev,addr,sz,dir) \
  53. pci_unmap_single(dev,addr,sz,dir)
  54. /* pci_unmap_{single,page} is not a nop, thus... */
  55. #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
  56. dma_addr_t ADDR_NAME;
  57. #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
  58. __u32 LEN_NAME;
  59. #define pci_unmap_addr(PTR, ADDR_NAME) \
  60. ((PTR)->ADDR_NAME)
  61. #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
  62. (((PTR)->ADDR_NAME) = (VAL))
  63. #define pci_unmap_len(PTR, LEN_NAME) \
  64. ((PTR)->LEN_NAME)
  65. #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
  66. (((PTR)->LEN_NAME) = (VAL))
  67. static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg,
  68. int nents, int direction)
  69. {
  70. return dma_map_sg(&pdev->dev, sg, nents,
  71. (enum dma_data_direction) direction);
  72. }
  73. static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg,
  74. int nents, int direction)
  75. {
  76. dma_unmap_sg(&pdev->dev, sg, nents,
  77. (enum dma_data_direction) direction);
  78. }
  79. static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev,
  80. dma_addr_t dma_handle,
  81. size_t size, int direction)
  82. {
  83. dma_sync_single_for_cpu(&pdev->dev, dma_handle, size,
  84. (enum dma_data_direction) direction);
  85. }
  86. static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev,
  87. dma_addr_t dma_handle,
  88. size_t size, int direction)
  89. {
  90. /* No flushing needed to sync cpu writes to the device. */
  91. }
  92. static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev,
  93. struct scatterlist *sg,
  94. int nents, int direction)
  95. {
  96. dma_sync_sg_for_cpu(&pdev->dev, sg, nents,
  97. (enum dma_data_direction) direction);
  98. }
  99. static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev,
  100. struct scatterlist *sg,
  101. int nelems, int direction)
  102. {
  103. /* No flushing needed to sync cpu writes to the device. */
  104. }
  105. /* Return whether the given PCI device DMA address mask can
  106. * be supported properly. For example, if your device can
  107. * only drive the low 24-bits during PCI bus mastering, then
  108. * you would pass 0x00ffffff as the mask to this function.
  109. */
  110. extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
  111. /* PCI IOMMU mapping bypass support. */
  112. /* PCI 64-bit addressing works for all slots on all controller
  113. * types on sparc64. However, it requires that the device
  114. * can drive enough of the 64 bits.
  115. */
  116. #define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
  117. #define PCI64_ADDR_BASE 0xfffc000000000000UL
  118. static inline int pci_dma_mapping_error(struct pci_dev *pdev,
  119. dma_addr_t dma_addr)
  120. {
  121. return dma_mapping_error(&pdev->dev, dma_addr);
  122. }
  123. #ifdef CONFIG_PCI
  124. static inline void pci_dma_burst_advice(struct pci_dev *pdev,
  125. enum pci_dma_burst_strategy *strat,
  126. unsigned long *strategy_parameter)
  127. {
  128. unsigned long cacheline_size;
  129. u8 byte;
  130. pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
  131. if (byte == 0)
  132. cacheline_size = 1024;
  133. else
  134. cacheline_size = (int) byte * 4;
  135. *strat = PCI_DMA_BURST_BOUNDARY;
  136. *strategy_parameter = cacheline_size;
  137. }
  138. #endif
  139. /* Return the index of the PCI controller for device PDEV. */
  140. extern int pci_domain_nr(struct pci_bus *bus);
  141. static inline int pci_proc_domain(struct pci_bus *bus)
  142. {
  143. return 1;
  144. }
  145. /* Platform support for /proc/bus/pci/X/Y mmap()s. */
  146. #define HAVE_PCI_MMAP
  147. #define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
  148. #define get_pci_unmapped_area get_fb_unmapped_area
  149. extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
  150. enum pci_mmap_state mmap_state,
  151. int write_combine);
  152. extern void
  153. pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
  154. struct resource *res);
  155. extern void
  156. pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
  157. struct pci_bus_region *region);
  158. static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
  159. {
  160. return PCI_IRQ_NONE;
  161. }
  162. struct device_node;
  163. extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
  164. #define HAVE_ARCH_PCI_RESOURCE_TO_USER
  165. extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
  166. const struct resource *rsrc,
  167. resource_size_t *start, resource_size_t *end);
  168. #endif /* __KERNEL__ */
  169. #endif /* __SPARC64_PCI_H */