pci-noop.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. /*
  2. * linux/arch/alpha/kernel/pci-noop.c
  3. *
  4. * Stub PCI interfaces for Jensen-specific kernels.
  5. */
  6. #include <linux/pci.h>
  7. #include <linux/init.h>
  8. #include <linux/bootmem.h>
  9. #include <linux/mm.h>
  10. #include <linux/errno.h>
  11. #include <linux/sched.h>
  12. #include <linux/dma-mapping.h>
  13. #include "proto.h"
  14. /*
  15. * The PCI controller list.
  16. */
  17. struct pci_controller *hose_head, **hose_tail = &hose_head;
  18. struct pci_controller *pci_isa_hose;
  19. struct pci_controller * __init
  20. alloc_pci_controller(void)
  21. {
  22. struct pci_controller *hose;
  23. hose = alloc_bootmem(sizeof(*hose));
  24. *hose_tail = hose;
  25. hose_tail = &hose->next;
  26. return hose;
  27. }
  28. struct resource * __init
  29. alloc_resource(void)
  30. {
  31. struct resource *res;
  32. res = alloc_bootmem(sizeof(*res));
  33. return res;
  34. }
  35. asmlinkage long
  36. sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn)
  37. {
  38. struct pci_controller *hose;
  39. /* from hose or from bus.devfn */
  40. if (which & IOBASE_FROM_HOSE) {
  41. for (hose = hose_head; hose; hose = hose->next)
  42. if (hose->index == bus)
  43. break;
  44. if (!hose)
  45. return -ENODEV;
  46. } else {
  47. /* Special hook for ISA access. */
  48. if (bus == 0 && dfn == 0)
  49. hose = pci_isa_hose;
  50. else
  51. return -ENODEV;
  52. }
  53. switch (which & ~IOBASE_FROM_HOSE) {
  54. case IOBASE_HOSE:
  55. return hose->index;
  56. case IOBASE_SPARSE_MEM:
  57. return hose->sparse_mem_base;
  58. case IOBASE_DENSE_MEM:
  59. return hose->dense_mem_base;
  60. case IOBASE_SPARSE_IO:
  61. return hose->sparse_io_base;
  62. case IOBASE_DENSE_IO:
  63. return hose->dense_io_base;
  64. case IOBASE_ROOT_BUS:
  65. return hose->bus->number;
  66. }
  67. return -EOPNOTSUPP;
  68. }
  69. asmlinkage long
  70. sys_pciconfig_read(unsigned long bus, unsigned long dfn,
  71. unsigned long off, unsigned long len, void *buf)
  72. {
  73. if (!capable(CAP_SYS_ADMIN))
  74. return -EPERM;
  75. else
  76. return -ENODEV;
  77. }
  78. asmlinkage long
  79. sys_pciconfig_write(unsigned long bus, unsigned long dfn,
  80. unsigned long off, unsigned long len, void *buf)
  81. {
  82. if (!capable(CAP_SYS_ADMIN))
  83. return -EPERM;
  84. else
  85. return -ENODEV;
  86. }
  87. /* Stubs for the routines in pci_iommu.c: */
  88. void *
  89. pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
  90. {
  91. return NULL;
  92. }
  93. void
  94. pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
  95. dma_addr_t dma_addr)
  96. {
  97. }
  98. dma_addr_t
  99. pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size,
  100. int direction)
  101. {
  102. return (dma_addr_t) 0;
  103. }
  104. void
  105. pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
  106. int direction)
  107. {
  108. }
  109. int
  110. pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
  111. int direction)
  112. {
  113. return 0;
  114. }
  115. void
  116. pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
  117. int direction)
  118. {
  119. }
  120. int
  121. pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask)
  122. {
  123. return 0;
  124. }
  125. /* Generic DMA mapping functions: */
  126. void *
  127. dma_alloc_coherent(struct device *dev, size_t size,
  128. dma_addr_t *dma_handle, int gfp)
  129. {
  130. void *ret;
  131. if (!dev || *dev->dma_mask >= 0xffffffffUL)
  132. gfp &= ~GFP_DMA;
  133. ret = (void *)__get_free_pages(gfp, get_order(size));
  134. if (ret) {
  135. memset(ret, 0, size);
  136. *dma_handle = virt_to_bus(ret);
  137. }
  138. return ret;
  139. }
  140. EXPORT_SYMBOL(dma_alloc_coherent);
  141. int
  142. dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  143. enum dma_data_direction direction)
  144. {
  145. int i;
  146. for (i = 0; i < nents; i++ ) {
  147. void *va;
  148. BUG_ON(!sg[i].page);
  149. va = page_address(sg[i].page) + sg[i].offset;
  150. sg_dma_address(sg + i) = (dma_addr_t)virt_to_bus(va);
  151. sg_dma_len(sg + i) = sg[i].length;
  152. }
  153. return nents;
  154. }
  155. EXPORT_SYMBOL(dma_map_sg);
  156. int
  157. dma_set_mask(struct device *dev, u64 mask)
  158. {
  159. if (!dev->dma_mask || !dma_supported(dev, mask))
  160. return -EIO;
  161. *dev->dma_mask = mask;
  162. return 0;
  163. }
  164. void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
  165. {
  166. return NULL;
  167. }
  168. void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
  169. {
  170. }
  171. EXPORT_SYMBOL(pci_iomap);
  172. EXPORT_SYMBOL(pci_iounmap);