i386.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385
  1. /*
  2. * Low-Level PCI Access for i386 machines
  3. *
  4. * Copyright 1993, 1994 Drew Eckhardt
  5. * Visionary Computing
  6. * (Unix and Linux consulting and custom programming)
  7. * Drew@Colorado.EDU
  8. * +1 (303) 786-7975
  9. *
  10. * Drew's work was sponsored by:
  11. * iX Multiuser Multitasking Magazine
  12. * Hannover, Germany
  13. * hm@ix.de
  14. *
  15. * Copyright 1997--2000 Martin Mares <mj@ucw.cz>
  16. *
  17. * For more information, please consult the following manuals (look at
  18. * http://www.pcisig.com/ for how to get them):
  19. *
  20. * PCI BIOS Specification
  21. * PCI Local Bus Specification
  22. * PCI to PCI Bridge Specification
  23. * PCI System Design Guide
  24. *
  25. */
  26. #include <linux/types.h>
  27. #include <linux/kernel.h>
  28. #include <linux/export.h>
  29. #include <linux/pci.h>
  30. #include <linux/init.h>
  31. #include <linux/ioport.h>
  32. #include <linux/errno.h>
  33. #include <linux/bootmem.h>
  34. #include <asm/pat.h>
  35. #include <asm/e820.h>
  36. #include <asm/pci_x86.h>
  37. #include <asm/io_apic.h>
  38. /*
  39. * This list of dynamic mappings is for temporarily maintaining
  40. * original BIOS BAR addresses for possible reinstatement.
  41. */
  42. struct pcibios_fwaddrmap {
  43. struct list_head list;
  44. struct pci_dev *dev;
  45. resource_size_t fw_addr[DEVICE_COUNT_RESOURCE];
  46. };
  47. static LIST_HEAD(pcibios_fwaddrmappings);
  48. static DEFINE_SPINLOCK(pcibios_fwaddrmap_lock);
  49. /* Must be called with 'pcibios_fwaddrmap_lock' lock held. */
  50. static struct pcibios_fwaddrmap *pcibios_fwaddrmap_lookup(struct pci_dev *dev)
  51. {
  52. struct pcibios_fwaddrmap *map;
  53. list_for_each_entry(map, &pcibios_fwaddrmappings, list)
  54. if (map->dev == dev)
  55. return map;
  56. return NULL;
  57. }
  58. static void
  59. pcibios_save_fw_addr(struct pci_dev *dev, int idx, resource_size_t fw_addr)
  60. {
  61. unsigned long flags;
  62. struct pcibios_fwaddrmap *map;
  63. spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
  64. map = pcibios_fwaddrmap_lookup(dev);
  65. if (!map) {
  66. spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
  67. map = kzalloc(sizeof(*map), GFP_KERNEL);
  68. if (!map)
  69. return;
  70. map->dev = pci_dev_get(dev);
  71. map->fw_addr[idx] = fw_addr;
  72. INIT_LIST_HEAD(&map->list);
  73. spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
  74. list_add_tail(&map->list, &pcibios_fwaddrmappings);
  75. } else
  76. map->fw_addr[idx] = fw_addr;
  77. spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
  78. }
  79. resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx)
  80. {
  81. unsigned long flags;
  82. struct pcibios_fwaddrmap *map;
  83. resource_size_t fw_addr = 0;
  84. spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
  85. map = pcibios_fwaddrmap_lookup(dev);
  86. if (map)
  87. fw_addr = map->fw_addr[idx];
  88. spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
  89. return fw_addr;
  90. }
  91. static void pcibios_fw_addr_list_del(void)
  92. {
  93. unsigned long flags;
  94. struct pcibios_fwaddrmap *entry, *next;
  95. spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
  96. list_for_each_entry_safe(entry, next, &pcibios_fwaddrmappings, list) {
  97. list_del(&entry->list);
  98. pci_dev_put(entry->dev);
  99. kfree(entry);
  100. }
  101. spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
  102. }
  103. static int
  104. skip_isa_ioresource_align(struct pci_dev *dev) {
  105. if ((pci_probe & PCI_CAN_SKIP_ISA_ALIGN) &&
  106. !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
  107. return 1;
  108. return 0;
  109. }
  110. /*
  111. * We need to avoid collisions with `mirrored' VGA ports
  112. * and other strange ISA hardware, so we always want the
  113. * addresses to be allocated in the 0x000-0x0ff region
  114. * modulo 0x400.
  115. *
  116. * Why? Because some silly external IO cards only decode
  117. * the low 10 bits of the IO address. The 0x00-0xff region
  118. * is reserved for motherboard devices that decode all 16
  119. * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
  120. * but we want to try to avoid allocating at 0x2900-0x2bff
  121. * which might have be mirrored at 0x0100-0x03ff..
  122. */
  123. resource_size_t
  124. pcibios_align_resource(void *data, const struct resource *res,
  125. resource_size_t size, resource_size_t align)
  126. {
  127. struct pci_dev *dev = data;
  128. resource_size_t start = res->start;
  129. if (res->flags & IORESOURCE_IO) {
  130. if (skip_isa_ioresource_align(dev))
  131. return start;
  132. if (start & 0x300)
  133. start = (start + 0x3ff) & ~0x3ff;
  134. }
  135. return start;
  136. }
  137. EXPORT_SYMBOL(pcibios_align_resource);
  138. /*
  139. * Handle resources of PCI devices. If the world were perfect, we could
  140. * just allocate all the resource regions and do nothing more. It isn't.
  141. * On the other hand, we cannot just re-allocate all devices, as it would
  142. * require us to know lots of host bridge internals. So we attempt to
  143. * keep as much of the original configuration as possible, but tweak it
  144. * when it's found to be wrong.
  145. *
  146. * Known BIOS problems we have to work around:
  147. * - I/O or memory regions not configured
  148. * - regions configured, but not enabled in the command register
  149. * - bogus I/O addresses above 64K used
  150. * - expansion ROMs left enabled (this may sound harmless, but given
  151. * the fact the PCI specs explicitly allow address decoders to be
  152. * shared between expansion ROMs and other resource regions, it's
  153. * at least dangerous)
  154. * - bad resource sizes or overlaps with other regions
  155. *
  156. * Our solution:
  157. * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
  158. * This gives us fixed barriers on where we can allocate.
  159. * (2) Allocate resources for all enabled devices. If there is
  160. * a collision, just mark the resource as unallocated. Also
  161. * disable expansion ROMs during this step.
  162. * (3) Try to allocate resources for disabled devices. If the
  163. * resources were assigned correctly, everything goes well,
  164. * if they weren't, they won't disturb allocation of other
  165. * resources.
  166. * (4) Assign new addresses to resources which were either
  167. * not configured at all or misconfigured. If explicitly
  168. * requested by the user, configure expansion ROM address
  169. * as well.
  170. */
  171. static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
  172. {
  173. struct pci_bus *bus;
  174. struct pci_dev *dev;
  175. int idx;
  176. struct resource *r;
  177. /* Depth-First Search on bus tree */
  178. list_for_each_entry(bus, bus_list, node) {
  179. if ((dev = bus->self)) {
  180. for (idx = PCI_BRIDGE_RESOURCES;
  181. idx < PCI_NUM_RESOURCES; idx++) {
  182. r = &dev->resource[idx];
  183. if (!r->flags)
  184. continue;
  185. if (!r->start ||
  186. pci_claim_resource(dev, idx) < 0) {
  187. /*
  188. * Something is wrong with the region.
  189. * Invalidate the resource to prevent
  190. * child resource allocations in this
  191. * range.
  192. */
  193. r->start = r->end = 0;
  194. r->flags = 0;
  195. }
  196. }
  197. }
  198. pcibios_allocate_bus_resources(&bus->children);
  199. }
  200. }
  201. struct pci_check_idx_range {
  202. int start;
  203. int end;
  204. };
  205. static void __init pcibios_allocate_resources(int pass)
  206. {
  207. struct pci_dev *dev = NULL;
  208. int idx, disabled, i;
  209. u16 command;
  210. struct resource *r;
  211. struct pci_check_idx_range idx_range[] = {
  212. { PCI_STD_RESOURCES, PCI_STD_RESOURCE_END },
  213. #ifdef CONFIG_PCI_IOV
  214. { PCI_IOV_RESOURCES, PCI_IOV_RESOURCE_END },
  215. #endif
  216. };
  217. for_each_pci_dev(dev) {
  218. pci_read_config_word(dev, PCI_COMMAND, &command);
  219. for (i = 0; i < ARRAY_SIZE(idx_range); i++)
  220. for (idx = idx_range[i].start; idx <= idx_range[i].end; idx++) {
  221. r = &dev->resource[idx];
  222. if (r->parent) /* Already allocated */
  223. continue;
  224. if (!r->start) /* Address not assigned at all */
  225. continue;
  226. if (r->flags & IORESOURCE_IO)
  227. disabled = !(command & PCI_COMMAND_IO);
  228. else
  229. disabled = !(command & PCI_COMMAND_MEMORY);
  230. if (pass == disabled) {
  231. dev_dbg(&dev->dev,
  232. "BAR %d: reserving %pr (d=%d, p=%d)\n",
  233. idx, r, disabled, pass);
  234. if (pci_claim_resource(dev, idx) < 0) {
  235. /* We'll assign a new address later */
  236. pcibios_save_fw_addr(dev,
  237. idx, r->start);
  238. r->end -= r->start;
  239. r->start = 0;
  240. }
  241. }
  242. }
  243. if (!pass) {
  244. r = &dev->resource[PCI_ROM_RESOURCE];
  245. if (r->flags & IORESOURCE_ROM_ENABLE) {
  246. /* Turn the ROM off, leave the resource region,
  247. * but keep it unregistered. */
  248. u32 reg;
  249. dev_dbg(&dev->dev, "disabling ROM %pR\n", r);
  250. r->flags &= ~IORESOURCE_ROM_ENABLE;
  251. pci_read_config_dword(dev,
  252. dev->rom_base_reg, &reg);
  253. pci_write_config_dword(dev, dev->rom_base_reg,
  254. reg & ~PCI_ROM_ADDRESS_ENABLE);
  255. }
  256. }
  257. }
  258. }
  259. static int __init pcibios_assign_resources(void)
  260. {
  261. struct pci_dev *dev = NULL;
  262. struct resource *r;
  263. if (!(pci_probe & PCI_ASSIGN_ROMS)) {
  264. /*
  265. * Try to use BIOS settings for ROMs, otherwise let
  266. * pci_assign_unassigned_resources() allocate the new
  267. * addresses.
  268. */
  269. for_each_pci_dev(dev) {
  270. r = &dev->resource[PCI_ROM_RESOURCE];
  271. if (!r->flags || !r->start)
  272. continue;
  273. if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) {
  274. r->end -= r->start;
  275. r->start = 0;
  276. }
  277. }
  278. }
  279. pci_assign_unassigned_resources();
  280. pcibios_fw_addr_list_del();
  281. return 0;
  282. }
  283. void __init pcibios_resource_survey(void)
  284. {
  285. DBG("PCI: Allocating resources\n");
  286. pcibios_allocate_bus_resources(&pci_root_buses);
  287. pcibios_allocate_resources(0);
  288. pcibios_allocate_resources(1);
  289. e820_reserve_resources_late();
  290. /*
  291. * Insert the IO APIC resources after PCI initialization has
  292. * occurred to handle IO APICS that are mapped in on a BAR in
  293. * PCI space, but before trying to assign unassigned pci res.
  294. */
  295. ioapic_insert_resources();
  296. }
  297. /**
  298. * called in fs_initcall (one below subsys_initcall),
  299. * give a chance for motherboard reserve resources
  300. */
  301. fs_initcall(pcibios_assign_resources);
  302. static const struct vm_operations_struct pci_mmap_ops = {
  303. .access = generic_access_phys,
  304. };
  305. int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
  306. enum pci_mmap_state mmap_state, int write_combine)
  307. {
  308. unsigned long prot;
  309. /* I/O space cannot be accessed via normal processor loads and
  310. * stores on this platform.
  311. */
  312. if (mmap_state == pci_mmap_io)
  313. return -EINVAL;
  314. prot = pgprot_val(vma->vm_page_prot);
  315. /*
  316. * Return error if pat is not enabled and write_combine is requested.
  317. * Caller can followup with UC MINUS request and add a WC mtrr if there
  318. * is a free mtrr slot.
  319. */
  320. if (!pat_enabled && write_combine)
  321. return -EINVAL;
  322. if (pat_enabled && write_combine)
  323. prot |= _PAGE_CACHE_WC;
  324. else if (pat_enabled || boot_cpu_data.x86 > 3)
  325. /*
  326. * ioremap() and ioremap_nocache() defaults to UC MINUS for now.
  327. * To avoid attribute conflicts, request UC MINUS here
  328. * as well.
  329. */
  330. prot |= _PAGE_CACHE_UC_MINUS;
  331. prot |= _PAGE_IOMAP; /* creating a mapping for IO */
  332. vma->vm_page_prot = __pgprot(prot);
  333. if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
  334. vma->vm_end - vma->vm_start,
  335. vma->vm_page_prot))
  336. return -EAGAIN;
  337. vma->vm_ops = &pci_mmap_ops;
  338. return 0;
  339. }