acpi.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. #include <linux/pci.h>
  2. #include <linux/acpi.h>
  3. #include <linux/init.h>
  4. #include <linux/irq.h>
  5. #include <linux/dmi.h>
  6. #include <linux/slab.h>
  7. #include <asm/numa.h>
  8. #include <asm/pci_x86.h>
  9. struct pci_root_info {
  10. struct acpi_device *bridge;
  11. char *name;
  12. unsigned int res_num;
  13. struct resource *res;
  14. struct pci_bus *bus;
  15. int busnum;
  16. };
  17. static bool pci_use_crs = true;
  18. static int __init set_use_crs(const struct dmi_system_id *id)
  19. {
  20. pci_use_crs = true;
  21. return 0;
  22. }
  23. static const struct dmi_system_id pci_use_crs_table[] __initconst = {
  24. /* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */
  25. {
  26. .callback = set_use_crs,
  27. .ident = "IBM System x3800",
  28. .matches = {
  29. DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
  30. DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
  31. },
  32. },
  33. /* https://bugzilla.kernel.org/show_bug.cgi?id=16007 */
  34. /* 2006 AMD HT/VIA system with two host bridges */
  35. {
  36. .callback = set_use_crs,
  37. .ident = "ASRock ALiveSATA2-GLAN",
  38. .matches = {
  39. DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
  40. },
  41. },
  42. /* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */
  43. /* 2006 AMD HT/VIA system with two host bridges */
  44. {
  45. .callback = set_use_crs,
  46. .ident = "ASUS M2V-MX SE",
  47. .matches = {
  48. DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
  49. DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"),
  50. DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
  51. },
  52. },
  53. {}
  54. };
  55. void __init pci_acpi_crs_quirks(void)
  56. {
  57. int year;
  58. if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
  59. pci_use_crs = false;
  60. dmi_check_system(pci_use_crs_table);
  61. /*
  62. * If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that
  63. * takes precedence over anything we figured out above.
  64. */
  65. if (pci_probe & PCI_ROOT_NO_CRS)
  66. pci_use_crs = false;
  67. else if (pci_probe & PCI_USE__CRS)
  68. pci_use_crs = true;
  69. printk(KERN_INFO "PCI: %s host bridge windows from ACPI; "
  70. "if necessary, use \"pci=%s\" and report a bug\n",
  71. pci_use_crs ? "Using" : "Ignoring",
  72. pci_use_crs ? "nocrs" : "use_crs");
  73. }
  74. static acpi_status
  75. resource_to_addr(struct acpi_resource *resource,
  76. struct acpi_resource_address64 *addr)
  77. {
  78. acpi_status status;
  79. struct acpi_resource_memory24 *memory24;
  80. struct acpi_resource_memory32 *memory32;
  81. struct acpi_resource_fixed_memory32 *fixed_memory32;
  82. memset(addr, 0, sizeof(*addr));
  83. switch (resource->type) {
  84. case ACPI_RESOURCE_TYPE_MEMORY24:
  85. memory24 = &resource->data.memory24;
  86. addr->resource_type = ACPI_MEMORY_RANGE;
  87. addr->minimum = memory24->minimum;
  88. addr->address_length = memory24->address_length;
  89. addr->maximum = addr->minimum + addr->address_length - 1;
  90. return AE_OK;
  91. case ACPI_RESOURCE_TYPE_MEMORY32:
  92. memory32 = &resource->data.memory32;
  93. addr->resource_type = ACPI_MEMORY_RANGE;
  94. addr->minimum = memory32->minimum;
  95. addr->address_length = memory32->address_length;
  96. addr->maximum = addr->minimum + addr->address_length - 1;
  97. return AE_OK;
  98. case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
  99. fixed_memory32 = &resource->data.fixed_memory32;
  100. addr->resource_type = ACPI_MEMORY_RANGE;
  101. addr->minimum = fixed_memory32->address;
  102. addr->address_length = fixed_memory32->address_length;
  103. addr->maximum = addr->minimum + addr->address_length - 1;
  104. return AE_OK;
  105. case ACPI_RESOURCE_TYPE_ADDRESS16:
  106. case ACPI_RESOURCE_TYPE_ADDRESS32:
  107. case ACPI_RESOURCE_TYPE_ADDRESS64:
  108. status = acpi_resource_to_address64(resource, addr);
  109. if (ACPI_SUCCESS(status) &&
  110. (addr->resource_type == ACPI_MEMORY_RANGE ||
  111. addr->resource_type == ACPI_IO_RANGE) &&
  112. addr->address_length > 0) {
  113. return AE_OK;
  114. }
  115. break;
  116. }
  117. return AE_ERROR;
  118. }
  119. static acpi_status
  120. count_resource(struct acpi_resource *acpi_res, void *data)
  121. {
  122. struct pci_root_info *info = data;
  123. struct acpi_resource_address64 addr;
  124. acpi_status status;
  125. status = resource_to_addr(acpi_res, &addr);
  126. if (ACPI_SUCCESS(status))
  127. info->res_num++;
  128. return AE_OK;
  129. }
  130. static acpi_status
  131. setup_resource(struct acpi_resource *acpi_res, void *data)
  132. {
  133. struct pci_root_info *info = data;
  134. struct resource *res;
  135. struct acpi_resource_address64 addr;
  136. acpi_status status;
  137. unsigned long flags;
  138. u64 start, end;
  139. status = resource_to_addr(acpi_res, &addr);
  140. if (!ACPI_SUCCESS(status))
  141. return AE_OK;
  142. if (addr.resource_type == ACPI_MEMORY_RANGE) {
  143. flags = IORESOURCE_MEM;
  144. if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
  145. flags |= IORESOURCE_PREFETCH;
  146. } else if (addr.resource_type == ACPI_IO_RANGE) {
  147. flags = IORESOURCE_IO;
  148. } else
  149. return AE_OK;
  150. start = addr.minimum + addr.translation_offset;
  151. end = addr.maximum + addr.translation_offset;
  152. res = &info->res[info->res_num];
  153. res->name = info->name;
  154. res->flags = flags;
  155. res->start = start;
  156. res->end = end;
  157. res->child = NULL;
  158. if (!pci_use_crs) {
  159. dev_printk(KERN_DEBUG, &info->bridge->dev,
  160. "host bridge window %pR (ignored)\n", res);
  161. return AE_OK;
  162. }
  163. info->res_num++;
  164. if (addr.translation_offset)
  165. dev_info(&info->bridge->dev, "host bridge window %pR "
  166. "(PCI address [%#llx-%#llx])\n",
  167. res, res->start - addr.translation_offset,
  168. res->end - addr.translation_offset);
  169. else
  170. dev_info(&info->bridge->dev, "host bridge window %pR\n", res);
  171. return AE_OK;
  172. }
  173. static bool resource_contains(struct resource *res, resource_size_t point)
  174. {
  175. if (res->start <= point && point <= res->end)
  176. return true;
  177. return false;
  178. }
  179. static void coalesce_windows(struct pci_root_info *info, unsigned long type)
  180. {
  181. int i, j;
  182. struct resource *res1, *res2;
  183. for (i = 0; i < info->res_num; i++) {
  184. res1 = &info->res[i];
  185. if (!(res1->flags & type))
  186. continue;
  187. for (j = i + 1; j < info->res_num; j++) {
  188. res2 = &info->res[j];
  189. if (!(res2->flags & type))
  190. continue;
  191. /*
  192. * I don't like throwing away windows because then
  193. * our resources no longer match the ACPI _CRS, but
  194. * the kernel resource tree doesn't allow overlaps.
  195. */
  196. if (resource_contains(res1, res2->start) ||
  197. resource_contains(res1, res2->end) ||
  198. resource_contains(res2, res1->start) ||
  199. resource_contains(res2, res1->end)) {
  200. res1->start = min(res1->start, res2->start);
  201. res1->end = max(res1->end, res2->end);
  202. dev_info(&info->bridge->dev,
  203. "host bridge window expanded to %pR; %pR ignored\n",
  204. res1, res2);
  205. res2->flags = 0;
  206. }
  207. }
  208. }
  209. }
  210. static void add_resources(struct pci_root_info *info)
  211. {
  212. int i;
  213. struct resource *res, *root, *conflict;
  214. if (!pci_use_crs)
  215. return;
  216. coalesce_windows(info, IORESOURCE_MEM);
  217. coalesce_windows(info, IORESOURCE_IO);
  218. for (i = 0; i < info->res_num; i++) {
  219. res = &info->res[i];
  220. if (res->flags & IORESOURCE_MEM)
  221. root = &iomem_resource;
  222. else if (res->flags & IORESOURCE_IO)
  223. root = &ioport_resource;
  224. else
  225. continue;
  226. conflict = insert_resource_conflict(root, res);
  227. if (conflict)
  228. dev_info(&info->bridge->dev,
  229. "ignoring host bridge window %pR (conflicts with %s %pR)\n",
  230. res, conflict->name, conflict);
  231. else
  232. pci_bus_add_resource(info->bus, res, 0);
  233. }
  234. }
  235. static void
  236. get_current_resources(struct acpi_device *device, int busnum,
  237. int domain, struct pci_bus *bus)
  238. {
  239. struct pci_root_info info;
  240. size_t size;
  241. if (pci_use_crs)
  242. pci_bus_remove_resources(bus);
  243. info.bridge = device;
  244. info.bus = bus;
  245. info.res_num = 0;
  246. acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
  247. &info);
  248. if (!info.res_num)
  249. return;
  250. size = sizeof(*info.res) * info.res_num;
  251. info.res = kmalloc(size, GFP_KERNEL);
  252. if (!info.res)
  253. goto res_alloc_fail;
  254. info.name = kasprintf(GFP_KERNEL, "PCI Bus %04x:%02x", domain, busnum);
  255. if (!info.name)
  256. goto name_alloc_fail;
  257. info.res_num = 0;
  258. acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
  259. &info);
  260. add_resources(&info);
  261. return;
  262. name_alloc_fail:
  263. kfree(info.res);
  264. res_alloc_fail:
  265. return;
  266. }
  267. struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
  268. {
  269. struct acpi_device *device = root->device;
  270. int domain = root->segment;
  271. int busnum = root->secondary.start;
  272. struct pci_bus *bus;
  273. struct pci_sysdata *sd;
  274. int node;
  275. #ifdef CONFIG_ACPI_NUMA
  276. int pxm;
  277. #endif
  278. if (domain && !pci_domains_supported) {
  279. printk(KERN_WARNING "pci_bus %04x:%02x: "
  280. "ignored (multiple domains not supported)\n",
  281. domain, busnum);
  282. return NULL;
  283. }
  284. node = -1;
  285. #ifdef CONFIG_ACPI_NUMA
  286. pxm = acpi_get_pxm(device->handle);
  287. if (pxm >= 0)
  288. node = pxm_to_node(pxm);
  289. if (node != -1)
  290. set_mp_bus_to_node(busnum, node);
  291. else
  292. #endif
  293. node = get_mp_bus_to_node(busnum);
  294. if (node != -1 && !node_online(node))
  295. node = -1;
  296. /* Allocate per-root-bus (not per bus) arch-specific data.
  297. * TODO: leak; this memory is never freed.
  298. * It's arguable whether it's worth the trouble to care.
  299. */
  300. sd = kzalloc(sizeof(*sd), GFP_KERNEL);
  301. if (!sd) {
  302. printk(KERN_WARNING "pci_bus %04x:%02x: "
  303. "ignored (out of memory)\n", domain, busnum);
  304. return NULL;
  305. }
  306. sd->domain = domain;
  307. sd->node = node;
  308. /*
  309. * Maybe the desired pci bus has been already scanned. In such case
  310. * it is unnecessary to scan the pci bus with the given domain,busnum.
  311. */
  312. bus = pci_find_bus(domain, busnum);
  313. if (bus) {
  314. /*
  315. * If the desired bus exits, the content of bus->sysdata will
  316. * be replaced by sd.
  317. */
  318. memcpy(bus->sysdata, sd, sizeof(*sd));
  319. kfree(sd);
  320. } else {
  321. bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd);
  322. if (bus) {
  323. get_current_resources(device, busnum, domain, bus);
  324. bus->subordinate = pci_scan_child_bus(bus);
  325. }
  326. }
  327. /* After the PCI-E bus has been walked and all devices discovered,
  328. * configure any settings of the fabric that might be necessary.
  329. */
  330. if (bus) {
  331. struct pci_bus *child;
  332. list_for_each_entry(child, &bus->children, node) {
  333. struct pci_dev *self = child->self;
  334. if (!self)
  335. continue;
  336. pcie_bus_configure_settings(child, self->pcie_mpss);
  337. }
  338. }
  339. if (!bus)
  340. kfree(sd);
  341. if (bus && node != -1) {
  342. #ifdef CONFIG_ACPI_NUMA
  343. if (pxm >= 0)
  344. dev_printk(KERN_DEBUG, &bus->dev,
  345. "on NUMA node %d (pxm %d)\n", node, pxm);
  346. #else
  347. dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
  348. #endif
  349. }
  350. return bus;
  351. }
  352. int __init pci_acpi_init(void)
  353. {
  354. struct pci_dev *dev = NULL;
  355. if (acpi_noirq)
  356. return -ENODEV;
  357. printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
  358. acpi_irq_penalty_init();
  359. pcibios_enable_irq = acpi_pci_irq_enable;
  360. pcibios_disable_irq = acpi_pci_irq_disable;
  361. x86_init.pci.init_irq = x86_init_noop;
  362. if (pci_routeirq) {
  363. /*
  364. * PCI IRQ routing is set up by pci_enable_device(), but we
  365. * also do it here in case there are still broken drivers that
  366. * don't use pci_enable_device().
  367. */
  368. printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n");
  369. for_each_pci_dev(dev)
  370. acpi_pci_irq_enable(dev);
  371. }
  372. return 0;
  373. }