setup-bus.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458
  1. /*
  2. * drivers/pci/setup-bus.c
  3. *
  4. * Extruded from code written by
  5. * Dave Rusling (david.rusling@reo.mts.dec.com)
  6. * David Mosberger (davidm@cs.arizona.edu)
  7. * David Miller (davem@redhat.com)
  8. *
  9. * Support routines for initializing a PCI subsystem.
  10. */
  11. /*
  12. * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
  13. * PCI-PCI bridges cleanup, sorted resource allocation.
  14. * Feb 2002, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
  15. * Converted to allocation in 3 passes, which gives
  16. * tighter packing. Prefetchable range support.
  17. */
  18. #include <linux/init.h>
  19. #include <linux/kernel.h>
  20. #include <linux/module.h>
  21. #include <linux/pci.h>
  22. #include <linux/errno.h>
  23. #include <linux/ioport.h>
  24. #include <linux/cache.h>
  25. #include <linux/slab.h>
  26. #include "pci.h"
  27. struct pci_dev_resource {
  28. struct list_head list;
  29. struct resource *res;
  30. struct pci_dev *dev;
  31. resource_size_t start;
  32. resource_size_t end;
  33. resource_size_t add_size;
  34. resource_size_t min_align;
  35. unsigned long flags;
  36. };
  37. static void free_list(struct list_head *head)
  38. {
  39. struct pci_dev_resource *dev_res, *tmp;
  40. list_for_each_entry_safe(dev_res, tmp, head, list) {
  41. list_del(&dev_res->list);
  42. kfree(dev_res);
  43. }
  44. }
  45. int pci_realloc_enable = 0;
  46. #define pci_realloc_enabled() pci_realloc_enable
  47. void pci_realloc(void)
  48. {
  49. pci_realloc_enable = 1;
  50. }
  51. /**
  52. * add_to_list() - add a new resource tracker to the list
  53. * @head: Head of the list
  54. * @dev: device corresponding to which the resource
  55. * belongs
  56. * @res: The resource to be tracked
  57. * @add_size: additional size to be optionally added
  58. * to the resource
  59. */
  60. static int add_to_list(struct list_head *head,
  61. struct pci_dev *dev, struct resource *res,
  62. resource_size_t add_size, resource_size_t min_align)
  63. {
  64. struct pci_dev_resource *tmp;
  65. tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
  66. if (!tmp) {
  67. pr_warning("add_to_list: kmalloc() failed!\n");
  68. return -ENOMEM;
  69. }
  70. tmp->res = res;
  71. tmp->dev = dev;
  72. tmp->start = res->start;
  73. tmp->end = res->end;
  74. tmp->flags = res->flags;
  75. tmp->add_size = add_size;
  76. tmp->min_align = min_align;
  77. list_add(&tmp->list, head);
  78. return 0;
  79. }
  80. static void add_to_failed_list(struct list_head *head,
  81. struct pci_dev *dev, struct resource *res)
  82. {
  83. add_to_list(head, dev, res,
  84. 0 /* dont care */,
  85. 0 /* dont care */);
  86. }
  87. static void remove_from_list(struct list_head *head,
  88. struct resource *res)
  89. {
  90. struct pci_dev_resource *dev_res, *tmp;
  91. list_for_each_entry_safe(dev_res, tmp, head, list) {
  92. if (dev_res->res == res) {
  93. list_del(&dev_res->list);
  94. kfree(dev_res);
  95. break;
  96. }
  97. }
  98. }
  99. static resource_size_t get_res_add_size(struct list_head *head,
  100. struct resource *res)
  101. {
  102. struct pci_dev_resource *dev_res;
  103. list_for_each_entry(dev_res, head, list) {
  104. if (dev_res->res == res) {
  105. int idx = res - &dev_res->dev->resource[0];
  106. dev_printk(KERN_DEBUG, &dev_res->dev->dev,
  107. "res[%d]=%pR get_res_add_size add_size %llx\n",
  108. idx, dev_res->res,
  109. (unsigned long long)dev_res->add_size);
  110. return dev_res->add_size;
  111. }
  112. }
  113. return 0;
  114. }
  115. /* Sort resources by alignment */
  116. static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
  117. {
  118. int i;
  119. for (i = 0; i < PCI_NUM_RESOURCES; i++) {
  120. struct resource *r;
  121. struct pci_dev_resource *dev_res, *tmp;
  122. resource_size_t r_align;
  123. struct list_head *n;
  124. r = &dev->resource[i];
  125. if (r->flags & IORESOURCE_PCI_FIXED)
  126. continue;
  127. if (!(r->flags) || r->parent)
  128. continue;
  129. r_align = pci_resource_alignment(dev, r);
  130. if (!r_align) {
  131. dev_warn(&dev->dev, "BAR %d: %pR has bogus alignment\n",
  132. i, r);
  133. continue;
  134. }
  135. tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
  136. if (!tmp)
  137. panic("pdev_sort_resources(): "
  138. "kmalloc() failed!\n");
  139. tmp->res = r;
  140. tmp->dev = dev;
  141. /* fallback is smallest one or list is empty*/
  142. n = head;
  143. list_for_each_entry(dev_res, head, list) {
  144. resource_size_t align;
  145. align = pci_resource_alignment(dev_res->dev,
  146. dev_res->res);
  147. if (r_align > align) {
  148. n = &dev_res->list;
  149. break;
  150. }
  151. }
  152. /* Insert it just before n*/
  153. list_add_tail(&tmp->list, n);
  154. }
  155. }
  156. static void __dev_sort_resources(struct pci_dev *dev,
  157. struct list_head *head)
  158. {
  159. u16 class = dev->class >> 8;
  160. /* Don't touch classless devices or host bridges or ioapics. */
  161. if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
  162. return;
  163. /* Don't touch ioapic devices already enabled by firmware */
  164. if (class == PCI_CLASS_SYSTEM_PIC) {
  165. u16 command;
  166. pci_read_config_word(dev, PCI_COMMAND, &command);
  167. if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
  168. return;
  169. }
  170. pdev_sort_resources(dev, head);
  171. }
  172. static inline void reset_resource(struct resource *res)
  173. {
  174. res->start = 0;
  175. res->end = 0;
  176. res->flags = 0;
  177. }
  178. /**
  179. * reassign_resources_sorted() - satisfy any additional resource requests
  180. *
  181. * @realloc_head : head of the list tracking requests requiring additional
  182. * resources
  183. * @head : head of the list tracking requests with allocated
  184. * resources
  185. *
  186. * Walk through each element of the realloc_head and try to procure
  187. * additional resources for the element, provided the element
  188. * is in the head list.
  189. */
  190. static void reassign_resources_sorted(struct list_head *realloc_head,
  191. struct list_head *head)
  192. {
  193. struct resource *res;
  194. struct pci_dev_resource *add_res, *tmp;
  195. struct pci_dev_resource *dev_res;
  196. resource_size_t add_size;
  197. int idx;
  198. list_for_each_entry_safe(add_res, tmp, realloc_head, list) {
  199. bool found_match = false;
  200. res = add_res->res;
  201. /* skip resource that has been reset */
  202. if (!res->flags)
  203. goto out;
  204. /* skip this resource if not found in head list */
  205. list_for_each_entry(dev_res, head, list) {
  206. if (dev_res->res == res) {
  207. found_match = true;
  208. break;
  209. }
  210. }
  211. if (!found_match)/* just skip */
  212. continue;
  213. idx = res - &add_res->dev->resource[0];
  214. add_size = add_res->add_size;
  215. if (!resource_size(res)) {
  216. res->start = add_res->start;
  217. res->end = res->start + add_size - 1;
  218. if (pci_assign_resource(add_res->dev, idx))
  219. reset_resource(res);
  220. } else {
  221. resource_size_t align = add_res->min_align;
  222. res->flags |= add_res->flags &
  223. (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
  224. if (pci_reassign_resource(add_res->dev, idx,
  225. add_size, align))
  226. dev_printk(KERN_DEBUG, &add_res->dev->dev,
  227. "failed to add %llx res[%d]=%pR\n",
  228. (unsigned long long)add_size,
  229. idx, res);
  230. }
  231. out:
  232. list_del(&add_res->list);
  233. kfree(add_res);
  234. }
  235. }
  236. /**
  237. * assign_requested_resources_sorted() - satisfy resource requests
  238. *
  239. * @head : head of the list tracking requests for resources
  240. * @failed_list : head of the list tracking requests that could
  241. * not be allocated
  242. *
  243. * Satisfy resource requests of each element in the list. Add
  244. * requests that could not satisfied to the failed_list.
  245. */
  246. static void assign_requested_resources_sorted(struct list_head *head,
  247. struct list_head *fail_head)
  248. {
  249. struct resource *res;
  250. struct pci_dev_resource *dev_res;
  251. int idx;
  252. list_for_each_entry(dev_res, head, list) {
  253. res = dev_res->res;
  254. idx = res - &dev_res->dev->resource[0];
  255. if (resource_size(res) &&
  256. pci_assign_resource(dev_res->dev, idx)) {
  257. if (fail_head && !pci_is_root_bus(dev_res->dev->bus)) {
  258. /*
  259. * if the failed res is for ROM BAR, and it will
  260. * be enabled later, don't add it to the list
  261. */
  262. if (!((idx == PCI_ROM_RESOURCE) &&
  263. (!(res->flags & IORESOURCE_ROM_ENABLE))))
  264. add_to_failed_list(fail_head,
  265. dev_res->dev, res);
  266. }
  267. reset_resource(res);
  268. }
  269. }
  270. }
  271. static void __assign_resources_sorted(struct list_head *head,
  272. struct list_head *realloc_head,
  273. struct list_head *fail_head)
  274. {
  275. /*
  276. * Should not assign requested resources at first.
  277. * they could be adjacent, so later reassign can not reallocate
  278. * them one by one in parent resource window.
  279. * Try to assign requested + add_size at begining
  280. * if could do that, could get out early.
  281. * if could not do that, we still try to assign requested at first,
  282. * then try to reassign add_size for some resources.
  283. */
  284. LIST_HEAD(save_head);
  285. LIST_HEAD(local_fail_head);
  286. struct pci_dev_resource *save_res;
  287. struct pci_dev_resource *dev_res;
  288. /* Check if optional add_size is there */
  289. if (!realloc_head || list_empty(realloc_head))
  290. goto requested_and_reassign;
  291. /* Save original start, end, flags etc at first */
  292. list_for_each_entry(dev_res, head, list) {
  293. if (add_to_list(&save_head, dev_res->dev, dev_res->res, 0, 0)) {
  294. free_list(&save_head);
  295. goto requested_and_reassign;
  296. }
  297. }
  298. /* Update res in head list with add_size in realloc_head list */
  299. list_for_each_entry(dev_res, head, list)
  300. dev_res->res->end += get_res_add_size(realloc_head,
  301. dev_res->res);
  302. /* Try updated head list with add_size added */
  303. assign_requested_resources_sorted(head, &local_fail_head);
  304. /* all assigned with add_size ? */
  305. if (list_empty(&local_fail_head)) {
  306. /* Remove head list from realloc_head list */
  307. list_for_each_entry(dev_res, head, list)
  308. remove_from_list(realloc_head, dev_res->res);
  309. free_list(&save_head);
  310. free_list(head);
  311. return;
  312. }
  313. free_list(&local_fail_head);
  314. /* Release assigned resource */
  315. list_for_each_entry(dev_res, head, list)
  316. if (dev_res->res->parent)
  317. release_resource(dev_res->res);
  318. /* Restore start/end/flags from saved list */
  319. list_for_each_entry(save_res, &save_head, list) {
  320. struct resource *res = save_res->res;
  321. res->start = save_res->start;
  322. res->end = save_res->end;
  323. res->flags = save_res->flags;
  324. }
  325. free_list(&save_head);
  326. requested_and_reassign:
  327. /* Satisfy the must-have resource requests */
  328. assign_requested_resources_sorted(head, fail_head);
  329. /* Try to satisfy any additional optional resource
  330. requests */
  331. if (realloc_head)
  332. reassign_resources_sorted(realloc_head, head);
  333. free_list(head);
  334. }
  335. static void pdev_assign_resources_sorted(struct pci_dev *dev,
  336. struct list_head *add_head,
  337. struct list_head *fail_head)
  338. {
  339. LIST_HEAD(head);
  340. __dev_sort_resources(dev, &head);
  341. __assign_resources_sorted(&head, add_head, fail_head);
  342. }
  343. static void pbus_assign_resources_sorted(const struct pci_bus *bus,
  344. struct list_head *realloc_head,
  345. struct list_head *fail_head)
  346. {
  347. struct pci_dev *dev;
  348. LIST_HEAD(head);
  349. list_for_each_entry(dev, &bus->devices, bus_list)
  350. __dev_sort_resources(dev, &head);
  351. __assign_resources_sorted(&head, realloc_head, fail_head);
  352. }
  353. void pci_setup_cardbus(struct pci_bus *bus)
  354. {
  355. struct pci_dev *bridge = bus->self;
  356. struct resource *res;
  357. struct pci_bus_region region;
  358. dev_info(&bridge->dev, "CardBus bridge to [bus %02x-%02x]\n",
  359. bus->secondary, bus->subordinate);
  360. res = bus->resource[0];
  361. pcibios_resource_to_bus(bridge, &region, res);
  362. if (res->flags & IORESOURCE_IO) {
  363. /*
  364. * The IO resource is allocated a range twice as large as it
  365. * would normally need. This allows us to set both IO regs.
  366. */
  367. dev_info(&bridge->dev, " bridge window %pR\n", res);
  368. pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
  369. region.start);
  370. pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0,
  371. region.end);
  372. }
  373. res = bus->resource[1];
  374. pcibios_resource_to_bus(bridge, &region, res);
  375. if (res->flags & IORESOURCE_IO) {
  376. dev_info(&bridge->dev, " bridge window %pR\n", res);
  377. pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
  378. region.start);
  379. pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1,
  380. region.end);
  381. }
  382. res = bus->resource[2];
  383. pcibios_resource_to_bus(bridge, &region, res);
  384. if (res->flags & IORESOURCE_MEM) {
  385. dev_info(&bridge->dev, " bridge window %pR\n", res);
  386. pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
  387. region.start);
  388. pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0,
  389. region.end);
  390. }
  391. res = bus->resource[3];
  392. pcibios_resource_to_bus(bridge, &region, res);
  393. if (res->flags & IORESOURCE_MEM) {
  394. dev_info(&bridge->dev, " bridge window %pR\n", res);
  395. pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
  396. region.start);
  397. pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1,
  398. region.end);
  399. }
  400. }
  401. EXPORT_SYMBOL(pci_setup_cardbus);
  402. /* Initialize bridges with base/limit values we have collected.
  403. PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998)
  404. requires that if there is no I/O ports or memory behind the
  405. bridge, corresponding range must be turned off by writing base
  406. value greater than limit to the bridge's base/limit registers.
  407. Note: care must be taken when updating I/O base/limit registers
  408. of bridges which support 32-bit I/O. This update requires two
  409. config space writes, so it's quite possible that an I/O window of
  410. the bridge will have some undesirable address (e.g. 0) after the
  411. first write. Ditto 64-bit prefetchable MMIO. */
  412. static void pci_setup_bridge_io(struct pci_bus *bus)
  413. {
  414. struct pci_dev *bridge = bus->self;
  415. struct resource *res;
  416. struct pci_bus_region region;
  417. u32 l, io_upper16;
  418. /* Set up the top and bottom of the PCI I/O segment for this bus. */
  419. res = bus->resource[0];
  420. pcibios_resource_to_bus(bridge, &region, res);
  421. if (res->flags & IORESOURCE_IO) {
  422. pci_read_config_dword(bridge, PCI_IO_BASE, &l);
  423. l &= 0xffff0000;
  424. l |= (region.start >> 8) & 0x00f0;
  425. l |= region.end & 0xf000;
  426. /* Set up upper 16 bits of I/O base/limit. */
  427. io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
  428. dev_info(&bridge->dev, " bridge window %pR\n", res);
  429. } else {
  430. /* Clear upper 16 bits of I/O base/limit. */
  431. io_upper16 = 0;
  432. l = 0x00f0;
  433. }
  434. /* Temporarily disable the I/O range before updating PCI_IO_BASE. */
  435. pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
  436. /* Update lower 16 bits of I/O base/limit. */
  437. pci_write_config_dword(bridge, PCI_IO_BASE, l);
  438. /* Update upper 16 bits of I/O base/limit. */
  439. pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
  440. }
  441. static void pci_setup_bridge_mmio(struct pci_bus *bus)
  442. {
  443. struct pci_dev *bridge = bus->self;
  444. struct resource *res;
  445. struct pci_bus_region region;
  446. u32 l;
  447. /* Set up the top and bottom of the PCI Memory segment for this bus. */
  448. res = bus->resource[1];
  449. pcibios_resource_to_bus(bridge, &region, res);
  450. if (res->flags & IORESOURCE_MEM) {
  451. l = (region.start >> 16) & 0xfff0;
  452. l |= region.end & 0xfff00000;
  453. dev_info(&bridge->dev, " bridge window %pR\n", res);
  454. } else {
  455. l = 0x0000fff0;
  456. }
  457. pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
  458. }
  459. static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
  460. {
  461. struct pci_dev *bridge = bus->self;
  462. struct resource *res;
  463. struct pci_bus_region region;
  464. u32 l, bu, lu;
  465. /* Clear out the upper 32 bits of PREF limit.
  466. If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily
  467. disables PREF range, which is ok. */
  468. pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
  469. /* Set up PREF base/limit. */
  470. bu = lu = 0;
  471. res = bus->resource[2];
  472. pcibios_resource_to_bus(bridge, &region, res);
  473. if (res->flags & IORESOURCE_PREFETCH) {
  474. l = (region.start >> 16) & 0xfff0;
  475. l |= region.end & 0xfff00000;
  476. if (res->flags & IORESOURCE_MEM_64) {
  477. bu = upper_32_bits(region.start);
  478. lu = upper_32_bits(region.end);
  479. }
  480. dev_info(&bridge->dev, " bridge window %pR\n", res);
  481. } else {
  482. l = 0x0000fff0;
  483. }
  484. pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
  485. /* Set the upper 32 bits of PREF base & limit. */
  486. pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
  487. pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
  488. }
  489. static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
  490. {
  491. struct pci_dev *bridge = bus->self;
  492. dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n",
  493. bus->secondary, bus->subordinate);
  494. if (type & IORESOURCE_IO)
  495. pci_setup_bridge_io(bus);
  496. if (type & IORESOURCE_MEM)
  497. pci_setup_bridge_mmio(bus);
  498. if (type & IORESOURCE_PREFETCH)
  499. pci_setup_bridge_mmio_pref(bus);
  500. pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
  501. }
  502. void pci_setup_bridge(struct pci_bus *bus)
  503. {
  504. unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
  505. IORESOURCE_PREFETCH;
  506. __pci_setup_bridge(bus, type);
  507. }
  508. /* Check whether the bridge supports optional I/O and
  509. prefetchable memory ranges. If not, the respective
  510. base/limit registers must be read-only and read as 0. */
  511. static void pci_bridge_check_ranges(struct pci_bus *bus)
  512. {
  513. u16 io;
  514. u32 pmem;
  515. struct pci_dev *bridge = bus->self;
  516. struct resource *b_res;
  517. b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
  518. b_res[1].flags |= IORESOURCE_MEM;
  519. pci_read_config_word(bridge, PCI_IO_BASE, &io);
  520. if (!io) {
  521. pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0);
  522. pci_read_config_word(bridge, PCI_IO_BASE, &io);
  523. pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
  524. }
  525. if (io)
  526. b_res[0].flags |= IORESOURCE_IO;
  527. /* DECchip 21050 pass 2 errata: the bridge may miss an address
  528. disconnect boundary by one PCI data phase.
  529. Workaround: do not use prefetching on this device. */
  530. if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
  531. return;
  532. pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
  533. if (!pmem) {
  534. pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
  535. 0xfff0fff0);
  536. pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
  537. pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
  538. }
  539. if (pmem) {
  540. b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
  541. if ((pmem & PCI_PREF_RANGE_TYPE_MASK) ==
  542. PCI_PREF_RANGE_TYPE_64) {
  543. b_res[2].flags |= IORESOURCE_MEM_64;
  544. b_res[2].flags |= PCI_PREF_RANGE_TYPE_64;
  545. }
  546. }
  547. /* double check if bridge does support 64 bit pref */
  548. if (b_res[2].flags & IORESOURCE_MEM_64) {
  549. u32 mem_base_hi, tmp;
  550. pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32,
  551. &mem_base_hi);
  552. pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
  553. 0xffffffff);
  554. pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
  555. if (!tmp)
  556. b_res[2].flags &= ~IORESOURCE_MEM_64;
  557. pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
  558. mem_base_hi);
  559. }
  560. }
  561. /* Helper function for sizing routines: find first available
  562. bus resource of a given type. Note: we intentionally skip
  563. the bus resources which have already been assigned (that is,
  564. have non-NULL parent resource). */
  565. static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned long type)
  566. {
  567. int i;
  568. struct resource *r;
  569. unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
  570. IORESOURCE_PREFETCH;
  571. pci_bus_for_each_resource(bus, r, i) {
  572. if (r == &ioport_resource || r == &iomem_resource)
  573. continue;
  574. if (r && (r->flags & type_mask) == type && !r->parent)
  575. return r;
  576. }
  577. return NULL;
  578. }
  579. static resource_size_t calculate_iosize(resource_size_t size,
  580. resource_size_t min_size,
  581. resource_size_t size1,
  582. resource_size_t old_size,
  583. resource_size_t align)
  584. {
  585. if (size < min_size)
  586. size = min_size;
  587. if (old_size == 1 )
  588. old_size = 0;
  589. /* To be fixed in 2.5: we should have sort of HAVE_ISA
  590. flag in the struct pci_bus. */
  591. #if defined(CONFIG_ISA) || defined(CONFIG_EISA)
  592. size = (size & 0xff) + ((size & ~0xffUL) << 2);
  593. #endif
  594. size = ALIGN(size + size1, align);
  595. if (size < old_size)
  596. size = old_size;
  597. return size;
  598. }
  599. static resource_size_t calculate_memsize(resource_size_t size,
  600. resource_size_t min_size,
  601. resource_size_t size1,
  602. resource_size_t old_size,
  603. resource_size_t align)
  604. {
  605. if (size < min_size)
  606. size = min_size;
  607. if (old_size == 1 )
  608. old_size = 0;
  609. if (size < old_size)
  610. size = old_size;
  611. size = ALIGN(size + size1, align);
  612. return size;
  613. }
  614. /**
  615. * pbus_size_io() - size the io window of a given bus
  616. *
  617. * @bus : the bus
  618. * @min_size : the minimum io window that must to be allocated
  619. * @add_size : additional optional io window
  620. * @realloc_head : track the additional io window on this list
  621. *
  622. * Sizing the IO windows of the PCI-PCI bridge is trivial,
  623. * since these windows have 4K granularity and the IO ranges
  624. * of non-bridge PCI devices are limited to 256 bytes.
  625. * We must be careful with the ISA aliasing though.
  626. */
  627. static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
  628. resource_size_t add_size, struct list_head *realloc_head)
  629. {
  630. struct pci_dev *dev;
  631. struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
  632. unsigned long size = 0, size0 = 0, size1 = 0;
  633. resource_size_t children_add_size = 0;
  634. if (!b_res)
  635. return;
  636. list_for_each_entry(dev, &bus->devices, bus_list) {
  637. int i;
  638. for (i = 0; i < PCI_NUM_RESOURCES; i++) {
  639. struct resource *r = &dev->resource[i];
  640. unsigned long r_size;
  641. if (r->parent || !(r->flags & IORESOURCE_IO))
  642. continue;
  643. r_size = resource_size(r);
  644. if (r_size < 0x400)
  645. /* Might be re-aligned for ISA */
  646. size += r_size;
  647. else
  648. size1 += r_size;
  649. if (realloc_head)
  650. children_add_size += get_res_add_size(realloc_head, r);
  651. }
  652. }
  653. size0 = calculate_iosize(size, min_size, size1,
  654. resource_size(b_res), 4096);
  655. if (children_add_size > add_size)
  656. add_size = children_add_size;
  657. size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
  658. calculate_iosize(size, min_size, add_size + size1,
  659. resource_size(b_res), 4096);
  660. if (!size0 && !size1) {
  661. if (b_res->start || b_res->end)
  662. dev_info(&bus->self->dev, "disabling bridge window "
  663. "%pR to [bus %02x-%02x] (unused)\n", b_res,
  664. bus->secondary, bus->subordinate);
  665. b_res->flags = 0;
  666. return;
  667. }
  668. /* Alignment of the IO window is always 4K */
  669. b_res->start = 4096;
  670. b_res->end = b_res->start + size0 - 1;
  671. b_res->flags |= IORESOURCE_STARTALIGN;
  672. if (size1 > size0 && realloc_head) {
  673. add_to_list(realloc_head, bus->self, b_res, size1-size0, 4096);
  674. dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window "
  675. "%pR to [bus %02x-%02x] add_size %lx\n", b_res,
  676. bus->secondary, bus->subordinate, size1-size0);
  677. }
  678. }
  679. /**
  680. * pbus_size_mem() - size the memory window of a given bus
  681. *
  682. * @bus : the bus
  683. * @min_size : the minimum memory window that must to be allocated
  684. * @add_size : additional optional memory window
  685. * @realloc_head : track the additional memory window on this list
  686. *
  687. * Calculate the size of the bus and minimal alignment which
  688. * guarantees that all child resources fit in this size.
  689. */
  690. static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
  691. unsigned long type, resource_size_t min_size,
  692. resource_size_t add_size,
  693. struct list_head *realloc_head)
  694. {
  695. struct pci_dev *dev;
  696. resource_size_t min_align, align, size, size0, size1;
  697. resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */
  698. int order, max_order;
  699. struct resource *b_res = find_free_bus_resource(bus, type);
  700. unsigned int mem64_mask = 0;
  701. resource_size_t children_add_size = 0;
  702. if (!b_res)
  703. return 0;
  704. memset(aligns, 0, sizeof(aligns));
  705. max_order = 0;
  706. size = 0;
  707. mem64_mask = b_res->flags & IORESOURCE_MEM_64;
  708. b_res->flags &= ~IORESOURCE_MEM_64;
  709. list_for_each_entry(dev, &bus->devices, bus_list) {
  710. int i;
  711. for (i = 0; i < PCI_NUM_RESOURCES; i++) {
  712. struct resource *r = &dev->resource[i];
  713. resource_size_t r_size;
  714. if (r->parent || (r->flags & mask) != type)
  715. continue;
  716. r_size = resource_size(r);
  717. #ifdef CONFIG_PCI_IOV
  718. /* put SRIOV requested res to the optional list */
  719. if (realloc_head && i >= PCI_IOV_RESOURCES &&
  720. i <= PCI_IOV_RESOURCE_END) {
  721. r->end = r->start - 1;
  722. add_to_list(realloc_head, dev, r, r_size, 0/* dont' care */);
  723. children_add_size += r_size;
  724. continue;
  725. }
  726. #endif
  727. /* For bridges size != alignment */
  728. align = pci_resource_alignment(dev, r);
  729. order = __ffs(align) - 20;
  730. if (order > 11) {
  731. dev_warn(&dev->dev, "disabling BAR %d: %pR "
  732. "(bad alignment %#llx)\n", i, r,
  733. (unsigned long long) align);
  734. r->flags = 0;
  735. continue;
  736. }
  737. size += r_size;
  738. if (order < 0)
  739. order = 0;
  740. /* Exclude ranges with size > align from
  741. calculation of the alignment. */
  742. if (r_size == align)
  743. aligns[order] += align;
  744. if (order > max_order)
  745. max_order = order;
  746. mem64_mask &= r->flags & IORESOURCE_MEM_64;
  747. if (realloc_head)
  748. children_add_size += get_res_add_size(realloc_head, r);
  749. }
  750. }
  751. align = 0;
  752. min_align = 0;
  753. for (order = 0; order <= max_order; order++) {
  754. resource_size_t align1 = 1;
  755. align1 <<= (order + 20);
  756. if (!align)
  757. min_align = align1;
  758. else if (ALIGN(align + min_align, min_align) < align1)
  759. min_align = align1 >> 1;
  760. align += aligns[order];
  761. }
  762. size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
  763. if (children_add_size > add_size)
  764. add_size = children_add_size;
  765. size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
  766. calculate_memsize(size, min_size, add_size,
  767. resource_size(b_res), min_align);
  768. if (!size0 && !size1) {
  769. if (b_res->start || b_res->end)
  770. dev_info(&bus->self->dev, "disabling bridge window "
  771. "%pR to [bus %02x-%02x] (unused)\n", b_res,
  772. bus->secondary, bus->subordinate);
  773. b_res->flags = 0;
  774. return 1;
  775. }
  776. b_res->start = min_align;
  777. b_res->end = size0 + min_align - 1;
  778. b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask;
  779. if (size1 > size0 && realloc_head) {
  780. add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align);
  781. dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window "
  782. "%pR to [bus %02x-%02x] add_size %llx\n", b_res,
  783. bus->secondary, bus->subordinate, (unsigned long long)size1-size0);
  784. }
  785. return 1;
  786. }
  787. unsigned long pci_cardbus_resource_alignment(struct resource *res)
  788. {
  789. if (res->flags & IORESOURCE_IO)
  790. return pci_cardbus_io_size;
  791. if (res->flags & IORESOURCE_MEM)
  792. return pci_cardbus_mem_size;
  793. return 0;
  794. }
  795. static void pci_bus_size_cardbus(struct pci_bus *bus,
  796. struct list_head *realloc_head)
  797. {
  798. struct pci_dev *bridge = bus->self;
  799. struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
  800. u16 ctrl;
  801. /*
  802. * Reserve some resources for CardBus. We reserve
  803. * a fixed amount of bus space for CardBus bridges.
  804. */
  805. b_res[0].start = 0;
  806. b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
  807. if (realloc_head)
  808. add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size, 0 /* dont care */);
  809. b_res[1].start = 0;
  810. b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
  811. if (realloc_head)
  812. add_to_list(realloc_head, bridge, b_res+1, pci_cardbus_io_size, 0 /* dont care */);
  813. /*
  814. * Check whether prefetchable memory is supported
  815. * by this bridge.
  816. */
  817. pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
  818. if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) {
  819. ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0;
  820. pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
  821. pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
  822. }
  823. /*
  824. * If we have prefetchable memory support, allocate
  825. * two regions. Otherwise, allocate one region of
  826. * twice the size.
  827. */
  828. if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
  829. b_res[2].start = 0;
  830. b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN;
  831. if (realloc_head)
  832. add_to_list(realloc_head, bridge, b_res+2, pci_cardbus_mem_size, 0 /* dont care */);
  833. b_res[3].start = 0;
  834. b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
  835. if (realloc_head)
  836. add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size, 0 /* dont care */);
  837. } else {
  838. b_res[3].start = 0;
  839. b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
  840. if (realloc_head)
  841. add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size * 2, 0 /* dont care */);
  842. }
  843. /* set the size of the resource to zero, so that the resource does not
  844. * get assigned during required-resource allocation cycle but gets assigned
  845. * during the optional-resource allocation cycle.
  846. */
  847. b_res[0].start = b_res[1].start = b_res[2].start = b_res[3].start = 1;
  848. b_res[0].end = b_res[1].end = b_res[2].end = b_res[3].end = 0;
  849. }
  850. void __ref __pci_bus_size_bridges(struct pci_bus *bus,
  851. struct list_head *realloc_head)
  852. {
  853. struct pci_dev *dev;
  854. unsigned long mask, prefmask;
  855. resource_size_t additional_mem_size = 0, additional_io_size = 0;
  856. list_for_each_entry(dev, &bus->devices, bus_list) {
  857. struct pci_bus *b = dev->subordinate;
  858. if (!b)
  859. continue;
  860. switch (dev->class >> 8) {
  861. case PCI_CLASS_BRIDGE_CARDBUS:
  862. pci_bus_size_cardbus(b, realloc_head);
  863. break;
  864. case PCI_CLASS_BRIDGE_PCI:
  865. default:
  866. __pci_bus_size_bridges(b, realloc_head);
  867. break;
  868. }
  869. }
  870. /* The root bus? */
  871. if (!bus->self)
  872. return;
  873. switch (bus->self->class >> 8) {
  874. case PCI_CLASS_BRIDGE_CARDBUS:
  875. /* don't size cardbuses yet. */
  876. break;
  877. case PCI_CLASS_BRIDGE_PCI:
  878. pci_bridge_check_ranges(bus);
  879. if (bus->self->is_hotplug_bridge) {
  880. additional_io_size = pci_hotplug_io_size;
  881. additional_mem_size = pci_hotplug_mem_size;
  882. }
  883. /*
  884. * Follow thru
  885. */
  886. default:
  887. pbus_size_io(bus, realloc_head ? 0 : additional_io_size,
  888. additional_io_size, realloc_head);
  889. /* If the bridge supports prefetchable range, size it
  890. separately. If it doesn't, or its prefetchable window
  891. has already been allocated by arch code, try
  892. non-prefetchable range for both types of PCI memory
  893. resources. */
  894. mask = IORESOURCE_MEM;
  895. prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
  896. if (pbus_size_mem(bus, prefmask, prefmask,
  897. realloc_head ? 0 : additional_mem_size,
  898. additional_mem_size, realloc_head))
  899. mask = prefmask; /* Success, size non-prefetch only. */
  900. else
  901. additional_mem_size += additional_mem_size;
  902. pbus_size_mem(bus, mask, IORESOURCE_MEM,
  903. realloc_head ? 0 : additional_mem_size,
  904. additional_mem_size, realloc_head);
  905. break;
  906. }
  907. }
  908. void __ref pci_bus_size_bridges(struct pci_bus *bus)
  909. {
  910. __pci_bus_size_bridges(bus, NULL);
  911. }
  912. EXPORT_SYMBOL(pci_bus_size_bridges);
  913. static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
  914. struct list_head *realloc_head,
  915. struct list_head *fail_head)
  916. {
  917. struct pci_bus *b;
  918. struct pci_dev *dev;
  919. pbus_assign_resources_sorted(bus, realloc_head, fail_head);
  920. list_for_each_entry(dev, &bus->devices, bus_list) {
  921. b = dev->subordinate;
  922. if (!b)
  923. continue;
  924. __pci_bus_assign_resources(b, realloc_head, fail_head);
  925. switch (dev->class >> 8) {
  926. case PCI_CLASS_BRIDGE_PCI:
  927. if (!pci_is_enabled(dev))
  928. pci_setup_bridge(b);
  929. break;
  930. case PCI_CLASS_BRIDGE_CARDBUS:
  931. pci_setup_cardbus(b);
  932. break;
  933. default:
  934. dev_info(&dev->dev, "not setting up bridge for bus "
  935. "%04x:%02x\n", pci_domain_nr(b), b->number);
  936. break;
  937. }
  938. }
  939. }
  940. void __ref pci_bus_assign_resources(const struct pci_bus *bus)
  941. {
  942. __pci_bus_assign_resources(bus, NULL, NULL);
  943. }
  944. EXPORT_SYMBOL(pci_bus_assign_resources);
  945. static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge,
  946. struct list_head *add_head,
  947. struct list_head *fail_head)
  948. {
  949. struct pci_bus *b;
  950. pdev_assign_resources_sorted((struct pci_dev *)bridge,
  951. add_head, fail_head);
  952. b = bridge->subordinate;
  953. if (!b)
  954. return;
  955. __pci_bus_assign_resources(b, add_head, fail_head);
  956. switch (bridge->class >> 8) {
  957. case PCI_CLASS_BRIDGE_PCI:
  958. pci_setup_bridge(b);
  959. break;
  960. case PCI_CLASS_BRIDGE_CARDBUS:
  961. pci_setup_cardbus(b);
  962. break;
  963. default:
  964. dev_info(&bridge->dev, "not setting up bridge for bus "
  965. "%04x:%02x\n", pci_domain_nr(b), b->number);
  966. break;
  967. }
  968. }
  969. static void pci_bridge_release_resources(struct pci_bus *bus,
  970. unsigned long type)
  971. {
  972. int idx;
  973. bool changed = false;
  974. struct pci_dev *dev;
  975. struct resource *r;
  976. unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
  977. IORESOURCE_PREFETCH;
  978. dev = bus->self;
  979. for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END;
  980. idx++) {
  981. r = &dev->resource[idx];
  982. if ((r->flags & type_mask) != type)
  983. continue;
  984. if (!r->parent)
  985. continue;
  986. /*
  987. * if there are children under that, we should release them
  988. * all
  989. */
  990. release_child_resources(r);
  991. if (!release_resource(r)) {
  992. dev_printk(KERN_DEBUG, &dev->dev,
  993. "resource %d %pR released\n", idx, r);
  994. /* keep the old size */
  995. r->end = resource_size(r) - 1;
  996. r->start = 0;
  997. r->flags = 0;
  998. changed = true;
  999. }
  1000. }
  1001. if (changed) {
  1002. /* avoiding touch the one without PREF */
  1003. if (type & IORESOURCE_PREFETCH)
  1004. type = IORESOURCE_PREFETCH;
  1005. __pci_setup_bridge(bus, type);
  1006. }
  1007. }
  1008. enum release_type {
  1009. leaf_only,
  1010. whole_subtree,
  1011. };
  1012. /*
  1013. * try to release pci bridge resources that is from leaf bridge,
  1014. * so we can allocate big new one later
  1015. */
  1016. static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus,
  1017. unsigned long type,
  1018. enum release_type rel_type)
  1019. {
  1020. struct pci_dev *dev;
  1021. bool is_leaf_bridge = true;
  1022. list_for_each_entry(dev, &bus->devices, bus_list) {
  1023. struct pci_bus *b = dev->subordinate;
  1024. if (!b)
  1025. continue;
  1026. is_leaf_bridge = false;
  1027. if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
  1028. continue;
  1029. if (rel_type == whole_subtree)
  1030. pci_bus_release_bridge_resources(b, type,
  1031. whole_subtree);
  1032. }
  1033. if (pci_is_root_bus(bus))
  1034. return;
  1035. if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI)
  1036. return;
  1037. if ((rel_type == whole_subtree) || is_leaf_bridge)
  1038. pci_bridge_release_resources(bus, type);
  1039. }
  1040. static void pci_bus_dump_res(struct pci_bus *bus)
  1041. {
  1042. struct resource *res;
  1043. int i;
  1044. pci_bus_for_each_resource(bus, res, i) {
  1045. if (!res || !res->end || !res->flags)
  1046. continue;
  1047. dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res);
  1048. }
  1049. }
  1050. static void pci_bus_dump_resources(struct pci_bus *bus)
  1051. {
  1052. struct pci_bus *b;
  1053. struct pci_dev *dev;
  1054. pci_bus_dump_res(bus);
  1055. list_for_each_entry(dev, &bus->devices, bus_list) {
  1056. b = dev->subordinate;
  1057. if (!b)
  1058. continue;
  1059. pci_bus_dump_resources(b);
  1060. }
  1061. }
  1062. static int __init pci_bus_get_depth(struct pci_bus *bus)
  1063. {
  1064. int depth = 0;
  1065. struct pci_dev *dev;
  1066. list_for_each_entry(dev, &bus->devices, bus_list) {
  1067. int ret;
  1068. struct pci_bus *b = dev->subordinate;
  1069. if (!b)
  1070. continue;
  1071. ret = pci_bus_get_depth(b);
  1072. if (ret + 1 > depth)
  1073. depth = ret + 1;
  1074. }
  1075. return depth;
  1076. }
  1077. static int __init pci_get_max_depth(void)
  1078. {
  1079. int depth = 0;
  1080. struct pci_bus *bus;
  1081. list_for_each_entry(bus, &pci_root_buses, node) {
  1082. int ret;
  1083. ret = pci_bus_get_depth(bus);
  1084. if (ret > depth)
  1085. depth = ret;
  1086. }
  1087. return depth;
  1088. }
  1089. /*
  1090. * first try will not touch pci bridge res
  1091. * second and later try will clear small leaf bridge res
  1092. * will stop till to the max deepth if can not find good one
  1093. */
  1094. void __init
  1095. pci_assign_unassigned_resources(void)
  1096. {
  1097. struct pci_bus *bus;
  1098. LIST_HEAD(realloc_head); /* list of resources that
  1099. want additional resources */
  1100. struct list_head *add_list = NULL;
  1101. int tried_times = 0;
  1102. enum release_type rel_type = leaf_only;
  1103. LIST_HEAD(fail_head);
  1104. struct pci_dev_resource *fail_res;
  1105. unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
  1106. IORESOURCE_PREFETCH;
  1107. unsigned long failed_type;
  1108. int pci_try_num = 1;
  1109. /* don't realloc if asked to do so */
  1110. if (pci_realloc_enabled()) {
  1111. int max_depth = pci_get_max_depth();
  1112. pci_try_num = max_depth + 1;
  1113. printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n",
  1114. max_depth, pci_try_num);
  1115. }
  1116. again:
  1117. /*
  1118. * last try will use add_list, otherwise will try good to have as
  1119. * must have, so can realloc parent bridge resource
  1120. */
  1121. if (tried_times + 1 == pci_try_num)
  1122. add_list = &realloc_head;
  1123. /* Depth first, calculate sizes and alignments of all
  1124. subordinate buses. */
  1125. list_for_each_entry(bus, &pci_root_buses, node)
  1126. __pci_bus_size_bridges(bus, add_list);
  1127. /* Depth last, allocate resources and update the hardware. */
  1128. list_for_each_entry(bus, &pci_root_buses, node)
  1129. __pci_bus_assign_resources(bus, add_list, &fail_head);
  1130. if (add_list)
  1131. BUG_ON(!list_empty(add_list));
  1132. tried_times++;
  1133. /* any device complain? */
  1134. if (list_empty(&fail_head))
  1135. goto enable_and_dump;
  1136. failed_type = 0;
  1137. list_for_each_entry(fail_res, &fail_head, list)
  1138. failed_type |= fail_res->flags;
  1139. /*
  1140. * io port are tight, don't try extra
  1141. * or if reach the limit, don't want to try more
  1142. */
  1143. failed_type &= type_mask;
  1144. if ((failed_type == IORESOURCE_IO) || (tried_times >= pci_try_num)) {
  1145. free_list(&fail_head);
  1146. goto enable_and_dump;
  1147. }
  1148. printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
  1149. tried_times + 1);
  1150. /* third times and later will not check if it is leaf */
  1151. if ((tried_times + 1) > 2)
  1152. rel_type = whole_subtree;
  1153. /*
  1154. * Try to release leaf bridge's resources that doesn't fit resource of
  1155. * child device under that bridge
  1156. */
  1157. list_for_each_entry(fail_res, &fail_head, list) {
  1158. bus = fail_res->dev->bus;
  1159. pci_bus_release_bridge_resources(bus,
  1160. fail_res->flags & type_mask,
  1161. rel_type);
  1162. }
  1163. /* restore size and flags */
  1164. list_for_each_entry(fail_res, &fail_head, list) {
  1165. struct resource *res = fail_res->res;
  1166. res->start = fail_res->start;
  1167. res->end = fail_res->end;
  1168. res->flags = fail_res->flags;
  1169. if (fail_res->dev->subordinate)
  1170. res->flags = 0;
  1171. }
  1172. free_list(&fail_head);
  1173. goto again;
  1174. enable_and_dump:
  1175. /* Depth last, update the hardware. */
  1176. list_for_each_entry(bus, &pci_root_buses, node)
  1177. pci_enable_bridges(bus);
  1178. /* dump the resource on buses */
  1179. list_for_each_entry(bus, &pci_root_buses, node)
  1180. pci_bus_dump_resources(bus);
  1181. }
  1182. void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
  1183. {
  1184. struct pci_bus *parent = bridge->subordinate;
  1185. LIST_HEAD(add_list); /* list of resources that
  1186. want additional resources */
  1187. int tried_times = 0;
  1188. LIST_HEAD(fail_head);
  1189. struct pci_dev_resource *fail_res;
  1190. int retval;
  1191. unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
  1192. IORESOURCE_PREFETCH;
  1193. again:
  1194. __pci_bus_size_bridges(parent, &add_list);
  1195. __pci_bridge_assign_resources(bridge, &add_list, &fail_head);
  1196. BUG_ON(!list_empty(&add_list));
  1197. tried_times++;
  1198. if (list_empty(&fail_head))
  1199. goto enable_all;
  1200. if (tried_times >= 2) {
  1201. /* still fail, don't need to try more */
  1202. free_list(&fail_head);
  1203. goto enable_all;
  1204. }
  1205. printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
  1206. tried_times + 1);
  1207. /*
  1208. * Try to release leaf bridge's resources that doesn't fit resource of
  1209. * child device under that bridge
  1210. */
  1211. list_for_each_entry(fail_res, &fail_head, list) {
  1212. struct pci_bus *bus = fail_res->dev->bus;
  1213. unsigned long flags = fail_res->flags;
  1214. pci_bus_release_bridge_resources(bus, flags & type_mask,
  1215. whole_subtree);
  1216. }
  1217. /* restore size and flags */
  1218. list_for_each_entry(fail_res, &fail_head, list) {
  1219. struct resource *res = fail_res->res;
  1220. res->start = fail_res->start;
  1221. res->end = fail_res->end;
  1222. res->flags = fail_res->flags;
  1223. if (fail_res->dev->subordinate)
  1224. res->flags = 0;
  1225. }
  1226. free_list(&fail_head);
  1227. goto again;
  1228. enable_all:
  1229. retval = pci_reenable_device(bridge);
  1230. pci_set_master(bridge);
  1231. pci_enable_bridges(parent);
  1232. }
  1233. EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
  1234. #ifdef CONFIG_HOTPLUG
  1235. /**
  1236. * pci_rescan_bus - scan a PCI bus for devices.
  1237. * @bus: PCI bus to scan
  1238. *
  1239. * Scan a PCI bus and child buses for new devices, adds them,
  1240. * and enables them.
  1241. *
  1242. * Returns the max number of subordinate bus discovered.
  1243. */
  1244. unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
  1245. {
  1246. unsigned int max;
  1247. struct pci_dev *dev;
  1248. LIST_HEAD(add_list); /* list of resources that
  1249. want additional resources */
  1250. max = pci_scan_child_bus(bus);
  1251. down_read(&pci_bus_sem);
  1252. list_for_each_entry(dev, &bus->devices, bus_list)
  1253. if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
  1254. dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
  1255. if (dev->subordinate)
  1256. __pci_bus_size_bridges(dev->subordinate,
  1257. &add_list);
  1258. up_read(&pci_bus_sem);
  1259. __pci_bus_assign_resources(bus, &add_list, NULL);
  1260. BUG_ON(!list_empty(&add_list));
  1261. pci_enable_bridges(bus);
  1262. pci_bus_add_devices(bus);
  1263. return max;
  1264. }
  1265. EXPORT_SYMBOL_GPL(pci_rescan_bus);
  1266. #endif