i386.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444
  1. /*
  2. * Low-Level PCI Access for i386 machines
  3. *
  4. * Copyright 1993, 1994 Drew Eckhardt
  5. * Visionary Computing
  6. * (Unix and Linux consulting and custom programming)
  7. * Drew@Colorado.EDU
  8. * +1 (303) 786-7975
  9. *
  10. * Drew's work was sponsored by:
  11. * iX Multiuser Multitasking Magazine
  12. * Hannover, Germany
  13. * hm@ix.de
  14. *
  15. * Copyright 1997--2000 Martin Mares <mj@ucw.cz>
  16. *
  17. * For more information, please consult the following manuals (look at
  18. * http://www.pcisig.com/ for how to get them):
  19. *
  20. * PCI BIOS Specification
  21. * PCI Local Bus Specification
  22. * PCI to PCI Bridge Specification
  23. * PCI System Design Guide
  24. *
  25. */
  26. #include <linux/types.h>
  27. #include <linux/kernel.h>
  28. #include <linux/export.h>
  29. #include <linux/pci.h>
  30. #include <linux/init.h>
  31. #include <linux/ioport.h>
  32. #include <linux/errno.h>
  33. #include <linux/bootmem.h>
  34. #include <asm/pat.h>
  35. #include <asm/e820.h>
  36. #include <asm/pci_x86.h>
  37. #include <asm/io_apic.h>
  38. /*
  39. * This list of dynamic mappings is for temporarily maintaining
  40. * original BIOS BAR addresses for possible reinstatement.
  41. */
  42. struct pcibios_fwaddrmap {
  43. struct list_head list;
  44. struct pci_dev *dev;
  45. resource_size_t fw_addr[DEVICE_COUNT_RESOURCE];
  46. };
  47. static LIST_HEAD(pcibios_fwaddrmappings);
  48. static DEFINE_SPINLOCK(pcibios_fwaddrmap_lock);
  49. static bool pcibios_fw_addr_done;
  50. /* Must be called with 'pcibios_fwaddrmap_lock' lock held. */
  51. static struct pcibios_fwaddrmap *pcibios_fwaddrmap_lookup(struct pci_dev *dev)
  52. {
  53. struct pcibios_fwaddrmap *map;
  54. WARN_ON_SMP(!spin_is_locked(&pcibios_fwaddrmap_lock));
  55. list_for_each_entry(map, &pcibios_fwaddrmappings, list)
  56. if (map->dev == dev)
  57. return map;
  58. return NULL;
  59. }
  60. static void
  61. pcibios_save_fw_addr(struct pci_dev *dev, int idx, resource_size_t fw_addr)
  62. {
  63. unsigned long flags;
  64. struct pcibios_fwaddrmap *map;
  65. if (pcibios_fw_addr_done)
  66. return;
  67. spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
  68. map = pcibios_fwaddrmap_lookup(dev);
  69. if (!map) {
  70. spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
  71. map = kzalloc(sizeof(*map), GFP_KERNEL);
  72. if (!map)
  73. return;
  74. map->dev = pci_dev_get(dev);
  75. map->fw_addr[idx] = fw_addr;
  76. INIT_LIST_HEAD(&map->list);
  77. spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
  78. list_add_tail(&map->list, &pcibios_fwaddrmappings);
  79. } else
  80. map->fw_addr[idx] = fw_addr;
  81. spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
  82. }
  83. resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx)
  84. {
  85. unsigned long flags;
  86. struct pcibios_fwaddrmap *map;
  87. resource_size_t fw_addr = 0;
  88. if (pcibios_fw_addr_done)
  89. return 0;
  90. spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
  91. map = pcibios_fwaddrmap_lookup(dev);
  92. if (map)
  93. fw_addr = map->fw_addr[idx];
  94. spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
  95. return fw_addr;
  96. }
  97. static void __init pcibios_fw_addr_list_del(void)
  98. {
  99. unsigned long flags;
  100. struct pcibios_fwaddrmap *entry, *next;
  101. spin_lock_irqsave(&pcibios_fwaddrmap_lock, flags);
  102. list_for_each_entry_safe(entry, next, &pcibios_fwaddrmappings, list) {
  103. list_del(&entry->list);
  104. pci_dev_put(entry->dev);
  105. kfree(entry);
  106. }
  107. spin_unlock_irqrestore(&pcibios_fwaddrmap_lock, flags);
  108. pcibios_fw_addr_done = true;
  109. }
  110. static int
  111. skip_isa_ioresource_align(struct pci_dev *dev) {
  112. if ((pci_probe & PCI_CAN_SKIP_ISA_ALIGN) &&
  113. !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
  114. return 1;
  115. return 0;
  116. }
  117. /*
  118. * We need to avoid collisions with `mirrored' VGA ports
  119. * and other strange ISA hardware, so we always want the
  120. * addresses to be allocated in the 0x000-0x0ff region
  121. * modulo 0x400.
  122. *
  123. * Why? Because some silly external IO cards only decode
  124. * the low 10 bits of the IO address. The 0x00-0xff region
  125. * is reserved for motherboard devices that decode all 16
  126. * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
  127. * but we want to try to avoid allocating at 0x2900-0x2bff
  128. * which might have be mirrored at 0x0100-0x03ff..
  129. */
  130. resource_size_t
  131. pcibios_align_resource(void *data, const struct resource *res,
  132. resource_size_t size, resource_size_t align)
  133. {
  134. struct pci_dev *dev = data;
  135. resource_size_t start = res->start;
  136. if (res->flags & IORESOURCE_IO) {
  137. if (skip_isa_ioresource_align(dev))
  138. return start;
  139. if (start & 0x300)
  140. start = (start + 0x3ff) & ~0x3ff;
  141. }
  142. return start;
  143. }
  144. EXPORT_SYMBOL(pcibios_align_resource);
  145. /*
  146. * Handle resources of PCI devices. If the world were perfect, we could
  147. * just allocate all the resource regions and do nothing more. It isn't.
  148. * On the other hand, we cannot just re-allocate all devices, as it would
  149. * require us to know lots of host bridge internals. So we attempt to
  150. * keep as much of the original configuration as possible, but tweak it
  151. * when it's found to be wrong.
  152. *
  153. * Known BIOS problems we have to work around:
  154. * - I/O or memory regions not configured
  155. * - regions configured, but not enabled in the command register
  156. * - bogus I/O addresses above 64K used
  157. * - expansion ROMs left enabled (this may sound harmless, but given
  158. * the fact the PCI specs explicitly allow address decoders to be
  159. * shared between expansion ROMs and other resource regions, it's
  160. * at least dangerous)
  161. * - bad resource sizes or overlaps with other regions
  162. *
  163. * Our solution:
  164. * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
  165. * This gives us fixed barriers on where we can allocate.
  166. * (2) Allocate resources for all enabled devices. If there is
  167. * a collision, just mark the resource as unallocated. Also
  168. * disable expansion ROMs during this step.
  169. * (3) Try to allocate resources for disabled devices. If the
  170. * resources were assigned correctly, everything goes well,
  171. * if they weren't, they won't disturb allocation of other
  172. * resources.
  173. * (4) Assign new addresses to resources which were either
  174. * not configured at all or misconfigured. If explicitly
  175. * requested by the user, configure expansion ROM address
  176. * as well.
  177. */
  178. static void pcibios_allocate_bridge_resources(struct pci_dev *dev)
  179. {
  180. int idx;
  181. struct resource *r;
  182. for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
  183. r = &dev->resource[idx];
  184. if (!r->flags)
  185. continue;
  186. if (!r->start || pci_claim_resource(dev, idx) < 0) {
  187. /*
  188. * Something is wrong with the region.
  189. * Invalidate the resource to prevent
  190. * child resource allocations in this
  191. * range.
  192. */
  193. r->start = r->end = 0;
  194. r->flags = 0;
  195. }
  196. }
  197. }
  198. static void pcibios_allocate_bus_resources(struct pci_bus *bus)
  199. {
  200. struct pci_bus *child;
  201. /* Depth-First Search on bus tree */
  202. if (bus->self)
  203. pcibios_allocate_bridge_resources(bus->self);
  204. list_for_each_entry(child, &bus->children, node)
  205. pcibios_allocate_bus_resources(child);
  206. }
  207. struct pci_check_idx_range {
  208. int start;
  209. int end;
  210. };
  211. static void pcibios_allocate_dev_resources(struct pci_dev *dev, int pass)
  212. {
  213. int idx, disabled, i;
  214. u16 command;
  215. struct resource *r;
  216. struct pci_check_idx_range idx_range[] = {
  217. { PCI_STD_RESOURCES, PCI_STD_RESOURCE_END },
  218. #ifdef CONFIG_PCI_IOV
  219. { PCI_IOV_RESOURCES, PCI_IOV_RESOURCE_END },
  220. #endif
  221. };
  222. pci_read_config_word(dev, PCI_COMMAND, &command);
  223. for (i = 0; i < ARRAY_SIZE(idx_range); i++)
  224. for (idx = idx_range[i].start; idx <= idx_range[i].end; idx++) {
  225. r = &dev->resource[idx];
  226. if (r->parent) /* Already allocated */
  227. continue;
  228. if (!r->start) /* Address not assigned at all */
  229. continue;
  230. if (r->flags & IORESOURCE_IO)
  231. disabled = !(command & PCI_COMMAND_IO);
  232. else
  233. disabled = !(command & PCI_COMMAND_MEMORY);
  234. if (pass == disabled) {
  235. dev_dbg(&dev->dev,
  236. "BAR %d: reserving %pr (d=%d, p=%d)\n",
  237. idx, r, disabled, pass);
  238. if (pci_claim_resource(dev, idx) < 0) {
  239. /* We'll assign a new address later */
  240. pcibios_save_fw_addr(dev,
  241. idx, r->start);
  242. r->end -= r->start;
  243. r->start = 0;
  244. }
  245. }
  246. }
  247. if (!pass) {
  248. r = &dev->resource[PCI_ROM_RESOURCE];
  249. if (r->flags & IORESOURCE_ROM_ENABLE) {
  250. /* Turn the ROM off, leave the resource region,
  251. * but keep it unregistered. */
  252. u32 reg;
  253. dev_dbg(&dev->dev, "disabling ROM %pR\n", r);
  254. r->flags &= ~IORESOURCE_ROM_ENABLE;
  255. pci_read_config_dword(dev, dev->rom_base_reg, &reg);
  256. pci_write_config_dword(dev, dev->rom_base_reg,
  257. reg & ~PCI_ROM_ADDRESS_ENABLE);
  258. }
  259. }
  260. }
  261. static void pcibios_allocate_resources(struct pci_bus *bus, int pass)
  262. {
  263. struct pci_dev *dev;
  264. struct pci_bus *child;
  265. list_for_each_entry(dev, &bus->devices, bus_list) {
  266. pcibios_allocate_dev_resources(dev, pass);
  267. child = dev->subordinate;
  268. if (child)
  269. pcibios_allocate_resources(child, pass);
  270. }
  271. }
  272. static void pcibios_allocate_dev_rom_resource(struct pci_dev *dev)
  273. {
  274. struct resource *r;
  275. /*
  276. * Try to use BIOS settings for ROMs, otherwise let
  277. * pci_assign_unassigned_resources() allocate the new
  278. * addresses.
  279. */
  280. r = &dev->resource[PCI_ROM_RESOURCE];
  281. if (!r->flags || !r->start)
  282. return;
  283. if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) {
  284. r->end -= r->start;
  285. r->start = 0;
  286. }
  287. }
  288. static void pcibios_allocate_rom_resources(struct pci_bus *bus)
  289. {
  290. struct pci_dev *dev;
  291. struct pci_bus *child;
  292. list_for_each_entry(dev, &bus->devices, bus_list) {
  293. pcibios_allocate_dev_rom_resource(dev);
  294. child = dev->subordinate;
  295. if (child)
  296. pcibios_allocate_rom_resources(child);
  297. }
  298. }
  299. static int __init pcibios_assign_resources(void)
  300. {
  301. struct pci_bus *bus;
  302. if (!(pci_probe & PCI_ASSIGN_ROMS))
  303. list_for_each_entry(bus, &pci_root_buses, node)
  304. pcibios_allocate_rom_resources(bus);
  305. pci_assign_unassigned_resources();
  306. pcibios_fw_addr_list_del();
  307. return 0;
  308. }
  309. void pcibios_resource_survey_bus(struct pci_bus *bus)
  310. {
  311. dev_printk(KERN_DEBUG, &bus->dev, "Allocating resources\n");
  312. pcibios_allocate_bus_resources(bus);
  313. pcibios_allocate_resources(bus, 0);
  314. pcibios_allocate_resources(bus, 1);
  315. if (!(pci_probe & PCI_ASSIGN_ROMS))
  316. pcibios_allocate_rom_resources(bus);
  317. }
  318. void __init pcibios_resource_survey(void)
  319. {
  320. struct pci_bus *bus;
  321. DBG("PCI: Allocating resources\n");
  322. list_for_each_entry(bus, &pci_root_buses, node)
  323. pcibios_allocate_bus_resources(bus);
  324. list_for_each_entry(bus, &pci_root_buses, node)
  325. pcibios_allocate_resources(bus, 0);
  326. list_for_each_entry(bus, &pci_root_buses, node)
  327. pcibios_allocate_resources(bus, 1);
  328. e820_reserve_resources_late();
  329. /*
  330. * Insert the IO APIC resources after PCI initialization has
  331. * occurred to handle IO APICS that are mapped in on a BAR in
  332. * PCI space, but before trying to assign unassigned pci res.
  333. */
  334. ioapic_insert_resources();
  335. }
  336. /**
  337. * called in fs_initcall (one below subsys_initcall),
  338. * give a chance for motherboard reserve resources
  339. */
  340. fs_initcall(pcibios_assign_resources);
  341. static const struct vm_operations_struct pci_mmap_ops = {
  342. .access = generic_access_phys,
  343. };
  344. int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
  345. enum pci_mmap_state mmap_state, int write_combine)
  346. {
  347. unsigned long prot;
  348. /* I/O space cannot be accessed via normal processor loads and
  349. * stores on this platform.
  350. */
  351. if (mmap_state == pci_mmap_io)
  352. return -EINVAL;
  353. prot = pgprot_val(vma->vm_page_prot);
  354. /*
  355. * Return error if pat is not enabled and write_combine is requested.
  356. * Caller can followup with UC MINUS request and add a WC mtrr if there
  357. * is a free mtrr slot.
  358. */
  359. if (!pat_enabled && write_combine)
  360. return -EINVAL;
  361. if (pat_enabled && write_combine)
  362. prot |= _PAGE_CACHE_WC;
  363. else if (pat_enabled || boot_cpu_data.x86 > 3)
  364. /*
  365. * ioremap() and ioremap_nocache() defaults to UC MINUS for now.
  366. * To avoid attribute conflicts, request UC MINUS here
  367. * as well.
  368. */
  369. prot |= _PAGE_CACHE_UC_MINUS;
  370. prot |= _PAGE_IOMAP; /* creating a mapping for IO */
  371. vma->vm_page_prot = __pgprot(prot);
  372. if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
  373. vma->vm_end - vma->vm_start,
  374. vma->vm_page_prot))
  375. return -EAGAIN;
  376. vma->vm_ops = &pci_mmap_ops;
  377. return 0;
  378. }