acpi.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404
  1. #include <linux/pci.h>
  2. #include <linux/acpi.h>
  3. #include <linux/init.h>
  4. #include <linux/irq.h>
  5. #include <linux/dmi.h>
  6. #include <linux/slab.h>
  7. #include <asm/numa.h>
  8. #include <asm/pci_x86.h>
  9. struct pci_root_info {
  10. struct acpi_device *bridge;
  11. char *name;
  12. unsigned int res_num;
  13. struct resource *res;
  14. struct pci_bus *bus;
  15. int busnum;
  16. };
  17. static bool pci_use_crs = true;
  18. static int __init set_use_crs(const struct dmi_system_id *id)
  19. {
  20. pci_use_crs = true;
  21. return 0;
  22. }
  23. static const struct dmi_system_id pci_use_crs_table[] __initconst = {
  24. /* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */
  25. {
  26. .callback = set_use_crs,
  27. .ident = "IBM System x3800",
  28. .matches = {
  29. DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
  30. DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
  31. },
  32. },
  33. /* https://bugzilla.kernel.org/show_bug.cgi?id=16007 */
  34. /* 2006 AMD HT/VIA system with two host bridges */
  35. {
  36. .callback = set_use_crs,
  37. .ident = "ASRock ALiveSATA2-GLAN",
  38. .matches = {
  39. DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
  40. },
  41. },
  42. {}
  43. };
  44. void __init pci_acpi_crs_quirks(void)
  45. {
  46. int year;
  47. if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
  48. pci_use_crs = false;
  49. dmi_check_system(pci_use_crs_table);
  50. /*
  51. * If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that
  52. * takes precedence over anything we figured out above.
  53. */
  54. if (pci_probe & PCI_ROOT_NO_CRS)
  55. pci_use_crs = false;
  56. else if (pci_probe & PCI_USE__CRS)
  57. pci_use_crs = true;
  58. printk(KERN_INFO "PCI: %s host bridge windows from ACPI; "
  59. "if necessary, use \"pci=%s\" and report a bug\n",
  60. pci_use_crs ? "Using" : "Ignoring",
  61. pci_use_crs ? "nocrs" : "use_crs");
  62. }
  63. static acpi_status
  64. resource_to_addr(struct acpi_resource *resource,
  65. struct acpi_resource_address64 *addr)
  66. {
  67. acpi_status status;
  68. struct acpi_resource_memory24 *memory24;
  69. struct acpi_resource_memory32 *memory32;
  70. struct acpi_resource_fixed_memory32 *fixed_memory32;
  71. memset(addr, 0, sizeof(*addr));
  72. switch (resource->type) {
  73. case ACPI_RESOURCE_TYPE_MEMORY24:
  74. memory24 = &resource->data.memory24;
  75. addr->resource_type = ACPI_MEMORY_RANGE;
  76. addr->minimum = memory24->minimum;
  77. addr->address_length = memory24->address_length;
  78. addr->maximum = addr->minimum + addr->address_length - 1;
  79. return AE_OK;
  80. case ACPI_RESOURCE_TYPE_MEMORY32:
  81. memory32 = &resource->data.memory32;
  82. addr->resource_type = ACPI_MEMORY_RANGE;
  83. addr->minimum = memory32->minimum;
  84. addr->address_length = memory32->address_length;
  85. addr->maximum = addr->minimum + addr->address_length - 1;
  86. return AE_OK;
  87. case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
  88. fixed_memory32 = &resource->data.fixed_memory32;
  89. addr->resource_type = ACPI_MEMORY_RANGE;
  90. addr->minimum = fixed_memory32->address;
  91. addr->address_length = fixed_memory32->address_length;
  92. addr->maximum = addr->minimum + addr->address_length - 1;
  93. return AE_OK;
  94. case ACPI_RESOURCE_TYPE_ADDRESS16:
  95. case ACPI_RESOURCE_TYPE_ADDRESS32:
  96. case ACPI_RESOURCE_TYPE_ADDRESS64:
  97. status = acpi_resource_to_address64(resource, addr);
  98. if (ACPI_SUCCESS(status) &&
  99. (addr->resource_type == ACPI_MEMORY_RANGE ||
  100. addr->resource_type == ACPI_IO_RANGE) &&
  101. addr->address_length > 0) {
  102. return AE_OK;
  103. }
  104. break;
  105. }
  106. return AE_ERROR;
  107. }
  108. static acpi_status
  109. count_resource(struct acpi_resource *acpi_res, void *data)
  110. {
  111. struct pci_root_info *info = data;
  112. struct acpi_resource_address64 addr;
  113. acpi_status status;
  114. status = resource_to_addr(acpi_res, &addr);
  115. if (ACPI_SUCCESS(status))
  116. info->res_num++;
  117. return AE_OK;
  118. }
  119. static acpi_status
  120. setup_resource(struct acpi_resource *acpi_res, void *data)
  121. {
  122. struct pci_root_info *info = data;
  123. struct resource *res;
  124. struct acpi_resource_address64 addr;
  125. acpi_status status;
  126. unsigned long flags;
  127. u64 start, end;
  128. status = resource_to_addr(acpi_res, &addr);
  129. if (!ACPI_SUCCESS(status))
  130. return AE_OK;
  131. if (addr.resource_type == ACPI_MEMORY_RANGE) {
  132. flags = IORESOURCE_MEM;
  133. if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
  134. flags |= IORESOURCE_PREFETCH;
  135. } else if (addr.resource_type == ACPI_IO_RANGE) {
  136. flags = IORESOURCE_IO;
  137. } else
  138. return AE_OK;
  139. start = addr.minimum + addr.translation_offset;
  140. end = addr.maximum + addr.translation_offset;
  141. res = &info->res[info->res_num];
  142. res->name = info->name;
  143. res->flags = flags;
  144. res->start = start;
  145. res->end = end;
  146. res->child = NULL;
  147. if (!pci_use_crs) {
  148. dev_printk(KERN_DEBUG, &info->bridge->dev,
  149. "host bridge window %pR (ignored)\n", res);
  150. return AE_OK;
  151. }
  152. info->res_num++;
  153. if (addr.translation_offset)
  154. dev_info(&info->bridge->dev, "host bridge window %pR "
  155. "(PCI address [%#llx-%#llx])\n",
  156. res, res->start - addr.translation_offset,
  157. res->end - addr.translation_offset);
  158. else
  159. dev_info(&info->bridge->dev, "host bridge window %pR\n", res);
  160. return AE_OK;
  161. }
  162. static bool resource_contains(struct resource *res, resource_size_t point)
  163. {
  164. if (res->start <= point && point <= res->end)
  165. return true;
  166. return false;
  167. }
  168. static void coalesce_windows(struct pci_root_info *info, unsigned long type)
  169. {
  170. int i, j;
  171. struct resource *res1, *res2;
  172. for (i = 0; i < info->res_num; i++) {
  173. res1 = &info->res[i];
  174. if (!(res1->flags & type))
  175. continue;
  176. for (j = i + 1; j < info->res_num; j++) {
  177. res2 = &info->res[j];
  178. if (!(res2->flags & type))
  179. continue;
  180. /*
  181. * I don't like throwing away windows because then
  182. * our resources no longer match the ACPI _CRS, but
  183. * the kernel resource tree doesn't allow overlaps.
  184. */
  185. if (resource_contains(res1, res2->start) ||
  186. resource_contains(res1, res2->end) ||
  187. resource_contains(res2, res1->start) ||
  188. resource_contains(res2, res1->end)) {
  189. res1->start = min(res1->start, res2->start);
  190. res1->end = max(res1->end, res2->end);
  191. dev_info(&info->bridge->dev,
  192. "host bridge window expanded to %pR; %pR ignored\n",
  193. res1, res2);
  194. res2->flags = 0;
  195. }
  196. }
  197. }
  198. }
  199. static void add_resources(struct pci_root_info *info)
  200. {
  201. int i;
  202. struct resource *res, *root, *conflict;
  203. if (!pci_use_crs)
  204. return;
  205. coalesce_windows(info, IORESOURCE_MEM);
  206. coalesce_windows(info, IORESOURCE_IO);
  207. for (i = 0; i < info->res_num; i++) {
  208. res = &info->res[i];
  209. if (res->flags & IORESOURCE_MEM)
  210. root = &iomem_resource;
  211. else if (res->flags & IORESOURCE_IO)
  212. root = &ioport_resource;
  213. else
  214. continue;
  215. conflict = insert_resource_conflict(root, res);
  216. if (conflict)
  217. dev_info(&info->bridge->dev,
  218. "ignoring host bridge window %pR (conflicts with %s %pR)\n",
  219. res, conflict->name, conflict);
  220. else
  221. pci_bus_add_resource(info->bus, res, 0);
  222. }
  223. }
  224. static void
  225. get_current_resources(struct acpi_device *device, int busnum,
  226. int domain, struct pci_bus *bus)
  227. {
  228. struct pci_root_info info;
  229. size_t size;
  230. if (pci_use_crs)
  231. pci_bus_remove_resources(bus);
  232. info.bridge = device;
  233. info.bus = bus;
  234. info.res_num = 0;
  235. acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
  236. &info);
  237. if (!info.res_num)
  238. return;
  239. size = sizeof(*info.res) * info.res_num;
  240. info.res = kmalloc(size, GFP_KERNEL);
  241. if (!info.res)
  242. goto res_alloc_fail;
  243. info.name = kasprintf(GFP_KERNEL, "PCI Bus %04x:%02x", domain, busnum);
  244. if (!info.name)
  245. goto name_alloc_fail;
  246. info.res_num = 0;
  247. acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
  248. &info);
  249. add_resources(&info);
  250. return;
  251. name_alloc_fail:
  252. kfree(info.res);
  253. res_alloc_fail:
  254. return;
  255. }
  256. struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
  257. {
  258. struct acpi_device *device = root->device;
  259. int domain = root->segment;
  260. int busnum = root->secondary.start;
  261. struct pci_bus *bus;
  262. struct pci_sysdata *sd;
  263. int node;
  264. #ifdef CONFIG_ACPI_NUMA
  265. int pxm;
  266. #endif
  267. if (domain && !pci_domains_supported) {
  268. printk(KERN_WARNING "pci_bus %04x:%02x: "
  269. "ignored (multiple domains not supported)\n",
  270. domain, busnum);
  271. return NULL;
  272. }
  273. node = -1;
  274. #ifdef CONFIG_ACPI_NUMA
  275. pxm = acpi_get_pxm(device->handle);
  276. if (pxm >= 0)
  277. node = pxm_to_node(pxm);
  278. if (node != -1)
  279. set_mp_bus_to_node(busnum, node);
  280. else
  281. #endif
  282. node = get_mp_bus_to_node(busnum);
  283. if (node != -1 && !node_online(node))
  284. node = -1;
  285. /* Allocate per-root-bus (not per bus) arch-specific data.
  286. * TODO: leak; this memory is never freed.
  287. * It's arguable whether it's worth the trouble to care.
  288. */
  289. sd = kzalloc(sizeof(*sd), GFP_KERNEL);
  290. if (!sd) {
  291. printk(KERN_WARNING "pci_bus %04x:%02x: "
  292. "ignored (out of memory)\n", domain, busnum);
  293. return NULL;
  294. }
  295. sd->domain = domain;
  296. sd->node = node;
  297. /*
  298. * Maybe the desired pci bus has been already scanned. In such case
  299. * it is unnecessary to scan the pci bus with the given domain,busnum.
  300. */
  301. bus = pci_find_bus(domain, busnum);
  302. if (bus) {
  303. /*
  304. * If the desired bus exits, the content of bus->sysdata will
  305. * be replaced by sd.
  306. */
  307. memcpy(bus->sysdata, sd, sizeof(*sd));
  308. kfree(sd);
  309. } else {
  310. bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd);
  311. if (bus) {
  312. get_current_resources(device, busnum, domain, bus);
  313. bus->subordinate = pci_scan_child_bus(bus);
  314. }
  315. }
  316. if (!bus)
  317. kfree(sd);
  318. if (bus && node != -1) {
  319. #ifdef CONFIG_ACPI_NUMA
  320. if (pxm >= 0)
  321. dev_printk(KERN_DEBUG, &bus->dev,
  322. "on NUMA node %d (pxm %d)\n", node, pxm);
  323. #else
  324. dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
  325. #endif
  326. }
  327. return bus;
  328. }
  329. int __init pci_acpi_init(void)
  330. {
  331. struct pci_dev *dev = NULL;
  332. if (acpi_noirq)
  333. return -ENODEV;
  334. printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
  335. acpi_irq_penalty_init();
  336. pcibios_enable_irq = acpi_pci_irq_enable;
  337. pcibios_disable_irq = acpi_pci_irq_disable;
  338. x86_init.pci.init_irq = x86_init_noop;
  339. if (pci_routeirq) {
  340. /*
  341. * PCI IRQ routing is set up by pci_enable_device(), but we
  342. * also do it here in case there are still broken drivers that
  343. * don't use pci_enable_device().
  344. */
  345. printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n");
  346. for_each_pci_dev(dev)
  347. acpi_pci_irq_enable(dev);
  348. }
  349. return 0;
  350. }