acpi.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472
  1. #include <linux/pci.h>
  2. #include <linux/acpi.h>
  3. #include <linux/init.h>
  4. #include <linux/irq.h>
  5. #include <linux/dmi.h>
  6. #include <linux/slab.h>
  7. #include <asm/numa.h>
  8. #include <asm/pci_x86.h>
  9. struct pci_root_info {
  10. struct acpi_device *bridge;
  11. char *name;
  12. unsigned int res_num;
  13. struct resource *res;
  14. struct list_head *resources;
  15. int busnum;
  16. };
  17. static bool pci_use_crs = true;
  18. static int __init set_use_crs(const struct dmi_system_id *id)
  19. {
  20. pci_use_crs = true;
  21. return 0;
  22. }
  23. static int __init set_nouse_crs(const struct dmi_system_id *id)
  24. {
  25. pci_use_crs = false;
  26. return 0;
  27. }
  28. static const struct dmi_system_id pci_use_crs_table[] __initconst = {
  29. /* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */
  30. {
  31. .callback = set_use_crs,
  32. .ident = "IBM System x3800",
  33. .matches = {
  34. DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
  35. DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
  36. },
  37. },
  38. /* https://bugzilla.kernel.org/show_bug.cgi?id=16007 */
  39. /* 2006 AMD HT/VIA system with two host bridges */
  40. {
  41. .callback = set_use_crs,
  42. .ident = "ASRock ALiveSATA2-GLAN",
  43. .matches = {
  44. DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
  45. },
  46. },
  47. /* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */
  48. /* 2006 AMD HT/VIA system with two host bridges */
  49. {
  50. .callback = set_use_crs,
  51. .ident = "ASUS M2V-MX SE",
  52. .matches = {
  53. DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
  54. DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"),
  55. DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
  56. },
  57. },
  58. /* Now for the blacklist.. */
  59. /* https://bugzilla.redhat.com/show_bug.cgi?id=769657 */
  60. {
  61. .callback = set_nouse_crs,
  62. .ident = "Dell Studio 1557",
  63. .matches = {
  64. DMI_MATCH(DMI_BOARD_VENDOR, "Dell Inc."),
  65. DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"),
  66. DMI_MATCH(DMI_BIOS_VERSION, "A09"),
  67. },
  68. },
  69. /* https://bugzilla.redhat.com/show_bug.cgi?id=769657 */
  70. {
  71. .callback = set_nouse_crs,
  72. .ident = "Thinkpad SL510",
  73. .matches = {
  74. DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
  75. DMI_MATCH(DMI_BOARD_NAME, "2847DFG"),
  76. DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
  77. },
  78. },
  79. {}
  80. };
  81. void __init pci_acpi_crs_quirks(void)
  82. {
  83. int year;
  84. if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
  85. pci_use_crs = false;
  86. dmi_check_system(pci_use_crs_table);
  87. /*
  88. * If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that
  89. * takes precedence over anything we figured out above.
  90. */
  91. if (pci_probe & PCI_ROOT_NO_CRS)
  92. pci_use_crs = false;
  93. else if (pci_probe & PCI_USE__CRS)
  94. pci_use_crs = true;
  95. printk(KERN_INFO "PCI: %s host bridge windows from ACPI; "
  96. "if necessary, use \"pci=%s\" and report a bug\n",
  97. pci_use_crs ? "Using" : "Ignoring",
  98. pci_use_crs ? "nocrs" : "use_crs");
  99. }
  100. static acpi_status
  101. resource_to_addr(struct acpi_resource *resource,
  102. struct acpi_resource_address64 *addr)
  103. {
  104. acpi_status status;
  105. struct acpi_resource_memory24 *memory24;
  106. struct acpi_resource_memory32 *memory32;
  107. struct acpi_resource_fixed_memory32 *fixed_memory32;
  108. memset(addr, 0, sizeof(*addr));
  109. switch (resource->type) {
  110. case ACPI_RESOURCE_TYPE_MEMORY24:
  111. memory24 = &resource->data.memory24;
  112. addr->resource_type = ACPI_MEMORY_RANGE;
  113. addr->minimum = memory24->minimum;
  114. addr->address_length = memory24->address_length;
  115. addr->maximum = addr->minimum + addr->address_length - 1;
  116. return AE_OK;
  117. case ACPI_RESOURCE_TYPE_MEMORY32:
  118. memory32 = &resource->data.memory32;
  119. addr->resource_type = ACPI_MEMORY_RANGE;
  120. addr->minimum = memory32->minimum;
  121. addr->address_length = memory32->address_length;
  122. addr->maximum = addr->minimum + addr->address_length - 1;
  123. return AE_OK;
  124. case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
  125. fixed_memory32 = &resource->data.fixed_memory32;
  126. addr->resource_type = ACPI_MEMORY_RANGE;
  127. addr->minimum = fixed_memory32->address;
  128. addr->address_length = fixed_memory32->address_length;
  129. addr->maximum = addr->minimum + addr->address_length - 1;
  130. return AE_OK;
  131. case ACPI_RESOURCE_TYPE_ADDRESS16:
  132. case ACPI_RESOURCE_TYPE_ADDRESS32:
  133. case ACPI_RESOURCE_TYPE_ADDRESS64:
  134. status = acpi_resource_to_address64(resource, addr);
  135. if (ACPI_SUCCESS(status) &&
  136. (addr->resource_type == ACPI_MEMORY_RANGE ||
  137. addr->resource_type == ACPI_IO_RANGE) &&
  138. addr->address_length > 0) {
  139. return AE_OK;
  140. }
  141. break;
  142. }
  143. return AE_ERROR;
  144. }
  145. static acpi_status
  146. count_resource(struct acpi_resource *acpi_res, void *data)
  147. {
  148. struct pci_root_info *info = data;
  149. struct acpi_resource_address64 addr;
  150. acpi_status status;
  151. status = resource_to_addr(acpi_res, &addr);
  152. if (ACPI_SUCCESS(status))
  153. info->res_num++;
  154. return AE_OK;
  155. }
  156. static acpi_status
  157. setup_resource(struct acpi_resource *acpi_res, void *data)
  158. {
  159. struct pci_root_info *info = data;
  160. struct resource *res;
  161. struct acpi_resource_address64 addr;
  162. acpi_status status;
  163. unsigned long flags;
  164. u64 start, orig_end, end;
  165. status = resource_to_addr(acpi_res, &addr);
  166. if (!ACPI_SUCCESS(status))
  167. return AE_OK;
  168. if (addr.resource_type == ACPI_MEMORY_RANGE) {
  169. flags = IORESOURCE_MEM;
  170. if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
  171. flags |= IORESOURCE_PREFETCH;
  172. } else if (addr.resource_type == ACPI_IO_RANGE) {
  173. flags = IORESOURCE_IO;
  174. } else
  175. return AE_OK;
  176. start = addr.minimum + addr.translation_offset;
  177. orig_end = end = addr.maximum + addr.translation_offset;
  178. /* Exclude non-addressable range or non-addressable portion of range */
  179. end = min(end, (u64)iomem_resource.end);
  180. if (end <= start) {
  181. dev_info(&info->bridge->dev,
  182. "host bridge window [%#llx-%#llx] "
  183. "(ignored, not CPU addressable)\n", start, orig_end);
  184. return AE_OK;
  185. } else if (orig_end != end) {
  186. dev_info(&info->bridge->dev,
  187. "host bridge window [%#llx-%#llx] "
  188. "([%#llx-%#llx] ignored, not CPU addressable)\n",
  189. start, orig_end, end + 1, orig_end);
  190. }
  191. res = &info->res[info->res_num];
  192. res->name = info->name;
  193. res->flags = flags;
  194. res->start = start;
  195. res->end = end;
  196. res->child = NULL;
  197. if (!pci_use_crs) {
  198. dev_printk(KERN_DEBUG, &info->bridge->dev,
  199. "host bridge window %pR (ignored)\n", res);
  200. return AE_OK;
  201. }
  202. info->res_num++;
  203. if (addr.translation_offset)
  204. dev_info(&info->bridge->dev, "host bridge window %pR "
  205. "(PCI address [%#llx-%#llx])\n",
  206. res, res->start - addr.translation_offset,
  207. res->end - addr.translation_offset);
  208. else
  209. dev_info(&info->bridge->dev, "host bridge window %pR\n", res);
  210. return AE_OK;
  211. }
  212. static bool resource_contains(struct resource *res, resource_size_t point)
  213. {
  214. if (res->start <= point && point <= res->end)
  215. return true;
  216. return false;
  217. }
  218. static void coalesce_windows(struct pci_root_info *info, unsigned long type)
  219. {
  220. int i, j;
  221. struct resource *res1, *res2;
  222. for (i = 0; i < info->res_num; i++) {
  223. res1 = &info->res[i];
  224. if (!(res1->flags & type))
  225. continue;
  226. for (j = i + 1; j < info->res_num; j++) {
  227. res2 = &info->res[j];
  228. if (!(res2->flags & type))
  229. continue;
  230. /*
  231. * I don't like throwing away windows because then
  232. * our resources no longer match the ACPI _CRS, but
  233. * the kernel resource tree doesn't allow overlaps.
  234. */
  235. if (resource_contains(res1, res2->start) ||
  236. resource_contains(res1, res2->end) ||
  237. resource_contains(res2, res1->start) ||
  238. resource_contains(res2, res1->end)) {
  239. res1->start = min(res1->start, res2->start);
  240. res1->end = max(res1->end, res2->end);
  241. dev_info(&info->bridge->dev,
  242. "host bridge window expanded to %pR; %pR ignored\n",
  243. res1, res2);
  244. res2->flags = 0;
  245. }
  246. }
  247. }
  248. }
  249. static void add_resources(struct pci_root_info *info)
  250. {
  251. int i;
  252. struct resource *res, *root, *conflict;
  253. if (!pci_use_crs)
  254. return;
  255. coalesce_windows(info, IORESOURCE_MEM);
  256. coalesce_windows(info, IORESOURCE_IO);
  257. for (i = 0; i < info->res_num; i++) {
  258. res = &info->res[i];
  259. if (res->flags & IORESOURCE_MEM)
  260. root = &iomem_resource;
  261. else if (res->flags & IORESOURCE_IO)
  262. root = &ioport_resource;
  263. else
  264. continue;
  265. conflict = insert_resource_conflict(root, res);
  266. if (conflict)
  267. dev_info(&info->bridge->dev,
  268. "ignoring host bridge window %pR (conflicts with %s %pR)\n",
  269. res, conflict->name, conflict);
  270. else
  271. pci_add_resource(info->resources, res);
  272. }
  273. }
  274. static void
  275. get_current_resources(struct acpi_device *device, int busnum,
  276. int domain, struct list_head *resources)
  277. {
  278. struct pci_root_info info;
  279. size_t size;
  280. info.bridge = device;
  281. info.res_num = 0;
  282. info.resources = resources;
  283. acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
  284. &info);
  285. if (!info.res_num)
  286. return;
  287. size = sizeof(*info.res) * info.res_num;
  288. info.res = kmalloc(size, GFP_KERNEL);
  289. if (!info.res)
  290. return;
  291. info.name = kasprintf(GFP_KERNEL, "PCI Bus %04x:%02x", domain, busnum);
  292. if (!info.name)
  293. goto name_alloc_fail;
  294. info.res_num = 0;
  295. acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
  296. &info);
  297. add_resources(&info);
  298. return;
  299. name_alloc_fail:
  300. kfree(info.res);
  301. }
  302. struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
  303. {
  304. struct acpi_device *device = root->device;
  305. int domain = root->segment;
  306. int busnum = root->secondary.start;
  307. LIST_HEAD(resources);
  308. struct pci_bus *bus;
  309. struct pci_sysdata *sd;
  310. int node;
  311. #ifdef CONFIG_ACPI_NUMA
  312. int pxm;
  313. #endif
  314. if (domain && !pci_domains_supported) {
  315. printk(KERN_WARNING "pci_bus %04x:%02x: "
  316. "ignored (multiple domains not supported)\n",
  317. domain, busnum);
  318. return NULL;
  319. }
  320. node = -1;
  321. #ifdef CONFIG_ACPI_NUMA
  322. pxm = acpi_get_pxm(device->handle);
  323. if (pxm >= 0)
  324. node = pxm_to_node(pxm);
  325. if (node != -1)
  326. set_mp_bus_to_node(busnum, node);
  327. else
  328. #endif
  329. node = get_mp_bus_to_node(busnum);
  330. if (node != -1 && !node_online(node))
  331. node = -1;
  332. /* Allocate per-root-bus (not per bus) arch-specific data.
  333. * TODO: leak; this memory is never freed.
  334. * It's arguable whether it's worth the trouble to care.
  335. */
  336. sd = kzalloc(sizeof(*sd), GFP_KERNEL);
  337. if (!sd) {
  338. printk(KERN_WARNING "pci_bus %04x:%02x: "
  339. "ignored (out of memory)\n", domain, busnum);
  340. return NULL;
  341. }
  342. sd->domain = domain;
  343. sd->node = node;
  344. /*
  345. * Maybe the desired pci bus has been already scanned. In such case
  346. * it is unnecessary to scan the pci bus with the given domain,busnum.
  347. */
  348. bus = pci_find_bus(domain, busnum);
  349. if (bus) {
  350. /*
  351. * If the desired bus exits, the content of bus->sysdata will
  352. * be replaced by sd.
  353. */
  354. memcpy(bus->sysdata, sd, sizeof(*sd));
  355. kfree(sd);
  356. } else {
  357. get_current_resources(device, busnum, domain, &resources);
  358. if (list_empty(&resources))
  359. x86_pci_root_bus_resources(busnum, &resources);
  360. bus = pci_create_root_bus(NULL, busnum, &pci_root_ops, sd,
  361. &resources);
  362. if (bus)
  363. bus->subordinate = pci_scan_child_bus(bus);
  364. else
  365. pci_free_resource_list(&resources);
  366. }
  367. /* After the PCI-E bus has been walked and all devices discovered,
  368. * configure any settings of the fabric that might be necessary.
  369. */
  370. if (bus) {
  371. struct pci_bus *child;
  372. list_for_each_entry(child, &bus->children, node) {
  373. struct pci_dev *self = child->self;
  374. if (!self)
  375. continue;
  376. pcie_bus_configure_settings(child, self->pcie_mpss);
  377. }
  378. }
  379. if (!bus)
  380. kfree(sd);
  381. if (bus && node != -1) {
  382. #ifdef CONFIG_ACPI_NUMA
  383. if (pxm >= 0)
  384. dev_printk(KERN_DEBUG, &bus->dev,
  385. "on NUMA node %d (pxm %d)\n", node, pxm);
  386. #else
  387. dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
  388. #endif
  389. }
  390. return bus;
  391. }
  392. int __init pci_acpi_init(void)
  393. {
  394. struct pci_dev *dev = NULL;
  395. if (acpi_noirq)
  396. return -ENODEV;
  397. printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
  398. acpi_irq_penalty_init();
  399. pcibios_enable_irq = acpi_pci_irq_enable;
  400. pcibios_disable_irq = acpi_pci_irq_disable;
  401. x86_init.pci.init_irq = x86_init_noop;
  402. if (pci_routeirq) {
  403. /*
  404. * PCI IRQ routing is set up by pci_enable_device(), but we
  405. * also do it here in case there are still broken drivers that
  406. * don't use pci_enable_device().
  407. */
  408. printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n");
  409. for_each_pci_dev(dev)
  410. acpi_pci_irq_enable(dev);
  411. }
  412. return 0;
  413. }