acpi.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593
  1. #include <linux/pci.h>
  2. #include <linux/acpi.h>
  3. #include <linux/init.h>
  4. #include <linux/irq.h>
  5. #include <linux/dmi.h>
  6. #include <linux/slab.h>
  7. #include <asm/numa.h>
  8. #include <asm/pci_x86.h>
  9. struct pci_root_info {
  10. struct acpi_device *bridge;
  11. char name[16];
  12. unsigned int res_num;
  13. struct resource *res;
  14. struct pci_sysdata sd;
  15. #ifdef CONFIG_PCI_MMCONFIG
  16. bool mcfg_added;
  17. u16 segment;
  18. u8 start_bus;
  19. u8 end_bus;
  20. #endif
  21. };
  22. static bool pci_use_crs = true;
  23. static int __init set_use_crs(const struct dmi_system_id *id)
  24. {
  25. pci_use_crs = true;
  26. return 0;
  27. }
  28. static int __init set_nouse_crs(const struct dmi_system_id *id)
  29. {
  30. pci_use_crs = false;
  31. return 0;
  32. }
  33. static const struct dmi_system_id pci_use_crs_table[] __initconst = {
  34. /* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */
  35. {
  36. .callback = set_use_crs,
  37. .ident = "IBM System x3800",
  38. .matches = {
  39. DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
  40. DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
  41. },
  42. },
  43. /* https://bugzilla.kernel.org/show_bug.cgi?id=16007 */
  44. /* 2006 AMD HT/VIA system with two host bridges */
  45. {
  46. .callback = set_use_crs,
  47. .ident = "ASRock ALiveSATA2-GLAN",
  48. .matches = {
  49. DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
  50. },
  51. },
  52. /* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */
  53. /* 2006 AMD HT/VIA system with two host bridges */
  54. {
  55. .callback = set_use_crs,
  56. .ident = "ASUS M2V-MX SE",
  57. .matches = {
  58. DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
  59. DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"),
  60. DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
  61. },
  62. },
  63. /* https://bugzilla.kernel.org/show_bug.cgi?id=42619 */
  64. {
  65. .callback = set_use_crs,
  66. .ident = "MSI MS-7253",
  67. .matches = {
  68. DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
  69. DMI_MATCH(DMI_BOARD_NAME, "MS-7253"),
  70. DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
  71. },
  72. },
  73. /* Now for the blacklist.. */
  74. /* https://bugzilla.redhat.com/show_bug.cgi?id=769657 */
  75. {
  76. .callback = set_nouse_crs,
  77. .ident = "Dell Studio 1557",
  78. .matches = {
  79. DMI_MATCH(DMI_BOARD_VENDOR, "Dell Inc."),
  80. DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"),
  81. DMI_MATCH(DMI_BIOS_VERSION, "A09"),
  82. },
  83. },
  84. /* https://bugzilla.redhat.com/show_bug.cgi?id=769657 */
  85. {
  86. .callback = set_nouse_crs,
  87. .ident = "Thinkpad SL510",
  88. .matches = {
  89. DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
  90. DMI_MATCH(DMI_BOARD_NAME, "2847DFG"),
  91. DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
  92. },
  93. },
  94. {}
  95. };
  96. void __init pci_acpi_crs_quirks(void)
  97. {
  98. int year;
  99. if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
  100. pci_use_crs = false;
  101. dmi_check_system(pci_use_crs_table);
  102. /*
  103. * If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that
  104. * takes precedence over anything we figured out above.
  105. */
  106. if (pci_probe & PCI_ROOT_NO_CRS)
  107. pci_use_crs = false;
  108. else if (pci_probe & PCI_USE__CRS)
  109. pci_use_crs = true;
  110. printk(KERN_INFO "PCI: %s host bridge windows from ACPI; "
  111. "if necessary, use \"pci=%s\" and report a bug\n",
  112. pci_use_crs ? "Using" : "Ignoring",
  113. pci_use_crs ? "nocrs" : "use_crs");
  114. }
  115. #ifdef CONFIG_PCI_MMCONFIG
  116. static int __devinit check_segment(u16 seg, struct device *dev, char *estr)
  117. {
  118. if (seg) {
  119. dev_err(dev,
  120. "%s can't access PCI configuration "
  121. "space under this host bridge.\n",
  122. estr);
  123. return -EIO;
  124. }
  125. /*
  126. * Failure in adding MMCFG information is not fatal,
  127. * just can't access extended configuration space of
  128. * devices under this host bridge.
  129. */
  130. dev_warn(dev,
  131. "%s can't access extended PCI configuration "
  132. "space under this bridge.\n",
  133. estr);
  134. return 0;
  135. }
  136. static int __devinit setup_mcfg_map(struct pci_root_info *info,
  137. u16 seg, u8 start, u8 end,
  138. phys_addr_t addr)
  139. {
  140. int result;
  141. struct device *dev = &info->bridge->dev;
  142. info->start_bus = start;
  143. info->end_bus = end;
  144. info->mcfg_added = false;
  145. /* return success if MMCFG is not in use */
  146. if (raw_pci_ext_ops && raw_pci_ext_ops != &pci_mmcfg)
  147. return 0;
  148. if (!(pci_probe & PCI_PROBE_MMCONF))
  149. return check_segment(seg, dev, "MMCONFIG is disabled,");
  150. result = pci_mmconfig_insert(dev, seg, start, end, addr);
  151. if (result == 0) {
  152. /* enable MMCFG if it hasn't been enabled yet */
  153. if (raw_pci_ext_ops == NULL)
  154. raw_pci_ext_ops = &pci_mmcfg;
  155. info->mcfg_added = true;
  156. } else if (result != -EEXIST)
  157. return check_segment(seg, dev,
  158. "fail to add MMCONFIG information,");
  159. return 0;
  160. }
  161. static void teardown_mcfg_map(struct pci_root_info *info)
  162. {
  163. if (info->mcfg_added) {
  164. pci_mmconfig_delete(info->segment, info->start_bus,
  165. info->end_bus);
  166. info->mcfg_added = false;
  167. }
  168. }
  169. #else
  170. static int __devinit setup_mcfg_map(struct pci_root_info *info,
  171. u16 seg, u8 start, u8 end,
  172. phys_addr_t addr)
  173. {
  174. return 0;
  175. }
  176. static void teardown_mcfg_map(struct pci_root_info *info)
  177. {
  178. }
  179. #endif
  180. static acpi_status
  181. resource_to_addr(struct acpi_resource *resource,
  182. struct acpi_resource_address64 *addr)
  183. {
  184. acpi_status status;
  185. struct acpi_resource_memory24 *memory24;
  186. struct acpi_resource_memory32 *memory32;
  187. struct acpi_resource_fixed_memory32 *fixed_memory32;
  188. memset(addr, 0, sizeof(*addr));
  189. switch (resource->type) {
  190. case ACPI_RESOURCE_TYPE_MEMORY24:
  191. memory24 = &resource->data.memory24;
  192. addr->resource_type = ACPI_MEMORY_RANGE;
  193. addr->minimum = memory24->minimum;
  194. addr->address_length = memory24->address_length;
  195. addr->maximum = addr->minimum + addr->address_length - 1;
  196. return AE_OK;
  197. case ACPI_RESOURCE_TYPE_MEMORY32:
  198. memory32 = &resource->data.memory32;
  199. addr->resource_type = ACPI_MEMORY_RANGE;
  200. addr->minimum = memory32->minimum;
  201. addr->address_length = memory32->address_length;
  202. addr->maximum = addr->minimum + addr->address_length - 1;
  203. return AE_OK;
  204. case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
  205. fixed_memory32 = &resource->data.fixed_memory32;
  206. addr->resource_type = ACPI_MEMORY_RANGE;
  207. addr->minimum = fixed_memory32->address;
  208. addr->address_length = fixed_memory32->address_length;
  209. addr->maximum = addr->minimum + addr->address_length - 1;
  210. return AE_OK;
  211. case ACPI_RESOURCE_TYPE_ADDRESS16:
  212. case ACPI_RESOURCE_TYPE_ADDRESS32:
  213. case ACPI_RESOURCE_TYPE_ADDRESS64:
  214. status = acpi_resource_to_address64(resource, addr);
  215. if (ACPI_SUCCESS(status) &&
  216. (addr->resource_type == ACPI_MEMORY_RANGE ||
  217. addr->resource_type == ACPI_IO_RANGE) &&
  218. addr->address_length > 0) {
  219. return AE_OK;
  220. }
  221. break;
  222. }
  223. return AE_ERROR;
  224. }
  225. static acpi_status
  226. count_resource(struct acpi_resource *acpi_res, void *data)
  227. {
  228. struct pci_root_info *info = data;
  229. struct acpi_resource_address64 addr;
  230. acpi_status status;
  231. status = resource_to_addr(acpi_res, &addr);
  232. if (ACPI_SUCCESS(status))
  233. info->res_num++;
  234. return AE_OK;
  235. }
  236. static acpi_status
  237. setup_resource(struct acpi_resource *acpi_res, void *data)
  238. {
  239. struct pci_root_info *info = data;
  240. struct resource *res;
  241. struct acpi_resource_address64 addr;
  242. acpi_status status;
  243. unsigned long flags;
  244. u64 start, orig_end, end;
  245. status = resource_to_addr(acpi_res, &addr);
  246. if (!ACPI_SUCCESS(status))
  247. return AE_OK;
  248. if (addr.resource_type == ACPI_MEMORY_RANGE) {
  249. flags = IORESOURCE_MEM;
  250. if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
  251. flags |= IORESOURCE_PREFETCH;
  252. } else if (addr.resource_type == ACPI_IO_RANGE) {
  253. flags = IORESOURCE_IO;
  254. } else
  255. return AE_OK;
  256. start = addr.minimum + addr.translation_offset;
  257. orig_end = end = addr.maximum + addr.translation_offset;
  258. /* Exclude non-addressable range or non-addressable portion of range */
  259. end = min(end, (u64)iomem_resource.end);
  260. if (end <= start) {
  261. dev_info(&info->bridge->dev,
  262. "host bridge window [%#llx-%#llx] "
  263. "(ignored, not CPU addressable)\n", start, orig_end);
  264. return AE_OK;
  265. } else if (orig_end != end) {
  266. dev_info(&info->bridge->dev,
  267. "host bridge window [%#llx-%#llx] "
  268. "([%#llx-%#llx] ignored, not CPU addressable)\n",
  269. start, orig_end, end + 1, orig_end);
  270. }
  271. res = &info->res[info->res_num];
  272. res->name = info->name;
  273. res->flags = flags;
  274. res->start = start;
  275. res->end = end;
  276. res->child = NULL;
  277. if (!pci_use_crs) {
  278. dev_printk(KERN_DEBUG, &info->bridge->dev,
  279. "host bridge window %pR (ignored)\n", res);
  280. return AE_OK;
  281. }
  282. info->res_num++;
  283. if (addr.translation_offset)
  284. dev_info(&info->bridge->dev, "host bridge window %pR "
  285. "(PCI address [%#llx-%#llx])\n",
  286. res, res->start - addr.translation_offset,
  287. res->end - addr.translation_offset);
  288. else
  289. dev_info(&info->bridge->dev, "host bridge window %pR\n", res);
  290. return AE_OK;
  291. }
  292. static void coalesce_windows(struct pci_root_info *info, unsigned long type)
  293. {
  294. int i, j;
  295. struct resource *res1, *res2;
  296. for (i = 0; i < info->res_num; i++) {
  297. res1 = &info->res[i];
  298. if (!(res1->flags & type))
  299. continue;
  300. for (j = i + 1; j < info->res_num; j++) {
  301. res2 = &info->res[j];
  302. if (!(res2->flags & type))
  303. continue;
  304. /*
  305. * I don't like throwing away windows because then
  306. * our resources no longer match the ACPI _CRS, but
  307. * the kernel resource tree doesn't allow overlaps.
  308. */
  309. if (resource_overlaps(res1, res2)) {
  310. res1->start = min(res1->start, res2->start);
  311. res1->end = max(res1->end, res2->end);
  312. dev_info(&info->bridge->dev,
  313. "host bridge window expanded to %pR; %pR ignored\n",
  314. res1, res2);
  315. res2->flags = 0;
  316. }
  317. }
  318. }
  319. }
  320. static void add_resources(struct pci_root_info *info,
  321. struct list_head *resources)
  322. {
  323. int i;
  324. struct resource *res, *root, *conflict;
  325. coalesce_windows(info, IORESOURCE_MEM);
  326. coalesce_windows(info, IORESOURCE_IO);
  327. for (i = 0; i < info->res_num; i++) {
  328. res = &info->res[i];
  329. if (res->flags & IORESOURCE_MEM)
  330. root = &iomem_resource;
  331. else if (res->flags & IORESOURCE_IO)
  332. root = &ioport_resource;
  333. else
  334. continue;
  335. conflict = insert_resource_conflict(root, res);
  336. if (conflict)
  337. dev_info(&info->bridge->dev,
  338. "ignoring host bridge window %pR (conflicts with %s %pR)\n",
  339. res, conflict->name, conflict);
  340. else
  341. pci_add_resource(resources, res);
  342. }
  343. }
  344. static void free_pci_root_info_res(struct pci_root_info *info)
  345. {
  346. kfree(info->res);
  347. info->res = NULL;
  348. info->res_num = 0;
  349. }
  350. static void __release_pci_root_info(struct pci_root_info *info)
  351. {
  352. int i;
  353. struct resource *res;
  354. for (i = 0; i < info->res_num; i++) {
  355. res = &info->res[i];
  356. if (!res->parent)
  357. continue;
  358. if (!(res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
  359. continue;
  360. release_resource(res);
  361. }
  362. free_pci_root_info_res(info);
  363. teardown_mcfg_map(info);
  364. kfree(info);
  365. }
  366. static void release_pci_root_info(struct pci_host_bridge *bridge)
  367. {
  368. struct pci_root_info *info = bridge->release_data;
  369. __release_pci_root_info(info);
  370. }
  371. static void
  372. probe_pci_root_info(struct pci_root_info *info, struct acpi_device *device,
  373. int busnum, int domain)
  374. {
  375. size_t size;
  376. sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum);
  377. info->bridge = device;
  378. info->res_num = 0;
  379. acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
  380. info);
  381. if (!info->res_num)
  382. return;
  383. size = sizeof(*info->res) * info->res_num;
  384. info->res_num = 0;
  385. info->res = kmalloc(size, GFP_KERNEL);
  386. if (!info->res)
  387. return;
  388. acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
  389. info);
  390. }
  391. struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
  392. {
  393. struct acpi_device *device = root->device;
  394. struct pci_root_info *info = NULL;
  395. int domain = root->segment;
  396. int busnum = root->secondary.start;
  397. LIST_HEAD(resources);
  398. struct pci_bus *bus = NULL;
  399. struct pci_sysdata *sd;
  400. int node;
  401. #ifdef CONFIG_ACPI_NUMA
  402. int pxm;
  403. #endif
  404. if (domain && !pci_domains_supported) {
  405. printk(KERN_WARNING "pci_bus %04x:%02x: "
  406. "ignored (multiple domains not supported)\n",
  407. domain, busnum);
  408. return NULL;
  409. }
  410. node = -1;
  411. #ifdef CONFIG_ACPI_NUMA
  412. pxm = acpi_get_pxm(device->handle);
  413. if (pxm >= 0)
  414. node = pxm_to_node(pxm);
  415. if (node != -1)
  416. set_mp_bus_to_node(busnum, node);
  417. else
  418. #endif
  419. node = get_mp_bus_to_node(busnum);
  420. if (node != -1 && !node_online(node))
  421. node = -1;
  422. info = kzalloc(sizeof(*info), GFP_KERNEL);
  423. if (!info) {
  424. printk(KERN_WARNING "pci_bus %04x:%02x: "
  425. "ignored (out of memory)\n", domain, busnum);
  426. return NULL;
  427. }
  428. sd = &info->sd;
  429. sd->domain = domain;
  430. sd->node = node;
  431. /*
  432. * Maybe the desired pci bus has been already scanned. In such case
  433. * it is unnecessary to scan the pci bus with the given domain,busnum.
  434. */
  435. bus = pci_find_bus(domain, busnum);
  436. if (bus) {
  437. /*
  438. * If the desired bus exits, the content of bus->sysdata will
  439. * be replaced by sd.
  440. */
  441. memcpy(bus->sysdata, sd, sizeof(*sd));
  442. kfree(info);
  443. } else {
  444. probe_pci_root_info(info, device, busnum, domain);
  445. /* insert busn res at first */
  446. pci_add_resource(&resources, &root->secondary);
  447. /*
  448. * _CRS with no apertures is normal, so only fall back to
  449. * defaults or native bridge info if we're ignoring _CRS.
  450. */
  451. if (pci_use_crs)
  452. add_resources(info, &resources);
  453. else {
  454. free_pci_root_info_res(info);
  455. x86_pci_root_bus_resources(busnum, &resources);
  456. }
  457. if (!setup_mcfg_map(info, domain, (u8)root->secondary.start,
  458. (u8)root->secondary.end, root->mcfg_addr))
  459. bus = pci_create_root_bus(NULL, busnum, &pci_root_ops,
  460. sd, &resources);
  461. if (bus) {
  462. pci_scan_child_bus(bus);
  463. pci_set_host_bridge_release(
  464. to_pci_host_bridge(bus->bridge),
  465. release_pci_root_info, info);
  466. } else {
  467. pci_free_resource_list(&resources);
  468. __release_pci_root_info(info);
  469. }
  470. }
  471. /* After the PCI-E bus has been walked and all devices discovered,
  472. * configure any settings of the fabric that might be necessary.
  473. */
  474. if (bus) {
  475. struct pci_bus *child;
  476. list_for_each_entry(child, &bus->children, node) {
  477. struct pci_dev *self = child->self;
  478. if (!self)
  479. continue;
  480. pcie_bus_configure_settings(child, self->pcie_mpss);
  481. }
  482. }
  483. if (bus && node != -1) {
  484. #ifdef CONFIG_ACPI_NUMA
  485. if (pxm >= 0)
  486. dev_printk(KERN_DEBUG, &bus->dev,
  487. "on NUMA node %d (pxm %d)\n", node, pxm);
  488. #else
  489. dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
  490. #endif
  491. }
  492. return bus;
  493. }
  494. int __init pci_acpi_init(void)
  495. {
  496. struct pci_dev *dev = NULL;
  497. if (acpi_noirq)
  498. return -ENODEV;
  499. printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
  500. acpi_irq_penalty_init();
  501. pcibios_enable_irq = acpi_pci_irq_enable;
  502. pcibios_disable_irq = acpi_pci_irq_disable;
  503. x86_init.pci.init_irq = x86_init_noop;
  504. if (pci_routeirq) {
  505. /*
  506. * PCI IRQ routing is set up by pci_enable_device(), but we
  507. * also do it here in case there are still broken drivers that
  508. * don't use pci_enable_device().
  509. */
  510. printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n");
  511. for_each_pci_dev(dev)
  512. acpi_pci_irq_enable(dev);
  513. }
  514. return 0;
  515. }