pci.h 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. #ifndef __SPARC64_PCI_H
  2. #define __SPARC64_PCI_H
  3. #ifdef __KERNEL__
  4. #include <linux/fs.h>
  5. #include <linux/mm.h>
  6. /* Can be used to override the logic in pci_scan_bus for skipping
  7. * already-configured bus numbers - to be used for buggy BIOSes
  8. * or architectures with incomplete PCI setup by the loader.
  9. */
  10. #define pcibios_assign_all_busses() 0
  11. #define pcibios_scan_all_fns(a, b) 0
  12. #define PCIBIOS_MIN_IO 0UL
  13. #define PCIBIOS_MIN_MEM 0UL
  14. #define PCI_IRQ_NONE 0xffffffff
  15. #define PCI_CACHE_LINE_BYTES 64
  16. static inline void pcibios_set_master(struct pci_dev *dev)
  17. {
  18. /* No special bus mastering setup handling */
  19. }
  20. static inline void pcibios_penalize_isa_irq(int irq, int active)
  21. {
  22. /* We don't do dynamic PCI IRQ allocation */
  23. }
  24. /* Dynamic DMA mapping stuff.
  25. */
  26. /* The PCI address space does not equal the physical memory
  27. * address space. The networking and block device layers use
  28. * this boolean for bounce buffer decisions.
  29. */
  30. #define PCI_DMA_BUS_IS_PHYS (0)
  31. #include <asm/scatterlist.h>
  32. struct pci_dev;
  33. struct pci_iommu_ops {
  34. void *(*alloc_consistent)(struct pci_dev *, size_t, dma_addr_t *, gfp_t);
  35. void (*free_consistent)(struct pci_dev *, size_t, void *, dma_addr_t);
  36. dma_addr_t (*map_single)(struct pci_dev *, void *, size_t, int);
  37. void (*unmap_single)(struct pci_dev *, dma_addr_t, size_t, int);
  38. int (*map_sg)(struct pci_dev *, struct scatterlist *, int, int);
  39. void (*unmap_sg)(struct pci_dev *, struct scatterlist *, int, int);
  40. void (*dma_sync_single_for_cpu)(struct pci_dev *, dma_addr_t, size_t, int);
  41. void (*dma_sync_sg_for_cpu)(struct pci_dev *, struct scatterlist *, int, int);
  42. };
  43. extern const struct pci_iommu_ops *pci_iommu_ops;
  44. /* Allocate and map kernel buffer using consistent mode DMA for a device.
  45. * hwdev should be valid struct pci_dev pointer for PCI devices.
  46. */
  47. static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
  48. {
  49. return pci_iommu_ops->alloc_consistent(hwdev, size, dma_handle, GFP_ATOMIC);
  50. }
  51. /* Free and unmap a consistent DMA buffer.
  52. * cpu_addr is what was returned from pci_alloc_consistent,
  53. * size must be the same as what as passed into pci_alloc_consistent,
  54. * and likewise dma_addr must be the same as what *dma_addrp was set to.
  55. *
  56. * References to the memory and mappings associated with cpu_addr/dma_addr
  57. * past this call are illegal.
  58. */
  59. static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
  60. {
  61. return pci_iommu_ops->free_consistent(hwdev, size, vaddr, dma_handle);
  62. }
  63. /* Map a single buffer of the indicated size for DMA in streaming mode.
  64. * The 32-bit bus address to use is returned.
  65. *
  66. * Once the device is given the dma address, the device owns this memory
  67. * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
  68. */
  69. static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
  70. {
  71. return pci_iommu_ops->map_single(hwdev, ptr, size, direction);
  72. }
  73. /* Unmap a single streaming mode DMA translation. The dma_addr and size
  74. * must match what was provided for in a previous pci_map_single call. All
  75. * other usages are undefined.
  76. *
  77. * After this call, reads by the cpu to the buffer are guaranteed to see
  78. * whatever the device wrote there.
  79. */
  80. static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction)
  81. {
  82. pci_iommu_ops->unmap_single(hwdev, dma_addr, size, direction);
  83. }
  84. /* No highmem on sparc64, plus we have an IOMMU, so mapping pages is easy. */
  85. #define pci_map_page(dev, page, off, size, dir) \
  86. pci_map_single(dev, (page_address(page) + (off)), size, dir)
  87. #define pci_unmap_page(dev,addr,sz,dir) pci_unmap_single(dev,addr,sz,dir)
  88. /* pci_unmap_{single,page} is not a nop, thus... */
  89. #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
  90. dma_addr_t ADDR_NAME;
  91. #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
  92. __u32 LEN_NAME;
  93. #define pci_unmap_addr(PTR, ADDR_NAME) \
  94. ((PTR)->ADDR_NAME)
  95. #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
  96. (((PTR)->ADDR_NAME) = (VAL))
  97. #define pci_unmap_len(PTR, LEN_NAME) \
  98. ((PTR)->LEN_NAME)
  99. #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
  100. (((PTR)->LEN_NAME) = (VAL))
  101. /* Map a set of buffers described by scatterlist in streaming
  102. * mode for DMA. This is the scatter-gather version of the
  103. * above pci_map_single interface. Here the scatter gather list
  104. * elements are each tagged with the appropriate dma address
  105. * and length. They are obtained via sg_dma_{address,length}(SG).
  106. *
  107. * NOTE: An implementation may be able to use a smaller number of
  108. * DMA address/length pairs than there are SG table elements.
  109. * (for example via virtual mapping capabilities)
  110. * The routine returns the number of addr/length pairs actually
  111. * used, at most nents.
  112. *
  113. * Device ownership issues as mentioned above for pci_map_single are
  114. * the same here.
  115. */
  116. static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
  117. {
  118. return pci_iommu_ops->map_sg(hwdev, sg, nents, direction);
  119. }
  120. /* Unmap a set of streaming mode DMA translations.
  121. * Again, cpu read rules concerning calls here are the same as for
  122. * pci_unmap_single() above.
  123. */
  124. static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction)
  125. {
  126. pci_iommu_ops->unmap_sg(hwdev, sg, nhwents, direction);
  127. }
  128. /* Make physical memory consistent for a single
  129. * streaming mode DMA translation after a transfer.
  130. *
  131. * If you perform a pci_map_single() but wish to interrogate the
  132. * buffer using the cpu, yet do not wish to teardown the PCI dma
  133. * mapping, you must call this function before doing so. At the
  134. * next point you give the PCI dma address back to the card, you
  135. * must first perform a pci_dma_sync_for_device, and then the
  136. * device again owns the buffer.
  137. */
  138. static inline void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction)
  139. {
  140. pci_iommu_ops->dma_sync_single_for_cpu(hwdev, dma_handle, size, direction);
  141. }
  142. static inline void
  143. pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
  144. size_t size, int direction)
  145. {
  146. /* No flushing needed to sync cpu writes to the device. */
  147. BUG_ON(direction == PCI_DMA_NONE);
  148. }
  149. /* Make physical memory consistent for a set of streaming
  150. * mode DMA translations after a transfer.
  151. *
  152. * The same as pci_dma_sync_single_* but for a scatter-gather list,
  153. * same rules and usage.
  154. */
  155. static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
  156. {
  157. pci_iommu_ops->dma_sync_sg_for_cpu(hwdev, sg, nelems, direction);
  158. }
  159. static inline void
  160. pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
  161. int nelems, int direction)
  162. {
  163. /* No flushing needed to sync cpu writes to the device. */
  164. BUG_ON(direction == PCI_DMA_NONE);
  165. }
  166. /* Return whether the given PCI device DMA address mask can
  167. * be supported properly. For example, if your device can
  168. * only drive the low 24-bits during PCI bus mastering, then
  169. * you would pass 0x00ffffff as the mask to this function.
  170. */
  171. extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
  172. /* PCI IOMMU mapping bypass support. */
  173. /* PCI 64-bit addressing works for all slots on all controller
  174. * types on sparc64. However, it requires that the device
  175. * can drive enough of the 64 bits.
  176. */
  177. #define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
  178. #define PCI64_ADDR_BASE 0xfffc000000000000UL
  179. #define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
  180. static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
  181. {
  182. return (dma_addr == PCI_DMA_ERROR_CODE);
  183. }
  184. #ifdef CONFIG_PCI
  185. static inline void pci_dma_burst_advice(struct pci_dev *pdev,
  186. enum pci_dma_burst_strategy *strat,
  187. unsigned long *strategy_parameter)
  188. {
  189. unsigned long cacheline_size;
  190. u8 byte;
  191. pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
  192. if (byte == 0)
  193. cacheline_size = 1024;
  194. else
  195. cacheline_size = (int) byte * 4;
  196. *strat = PCI_DMA_BURST_BOUNDARY;
  197. *strategy_parameter = cacheline_size;
  198. }
  199. #endif
  200. /* Return the index of the PCI controller for device PDEV. */
  201. extern int pci_domain_nr(struct pci_bus *bus);
  202. static inline int pci_proc_domain(struct pci_bus *bus)
  203. {
  204. return 1;
  205. }
  206. /* Platform support for /proc/bus/pci/X/Y mmap()s. */
  207. #define HAVE_PCI_MMAP
  208. #define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
  209. #define get_pci_unmapped_area get_fb_unmapped_area
  210. extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
  211. enum pci_mmap_state mmap_state,
  212. int write_combine);
  213. extern void
  214. pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
  215. struct resource *res);
  216. extern void
  217. pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
  218. struct pci_bus_region *region);
  219. extern struct resource *pcibios_select_root(struct pci_dev *, struct resource *);
  220. static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
  221. {
  222. return PCI_IRQ_NONE;
  223. }
  224. struct device_node;
  225. extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
  226. #endif /* __KERNEL__ */
  227. #endif /* __SPARC64_PCI_H */