|
@@ -780,11 +780,65 @@ static void pci_bus_dump_resources(struct pci_bus *bus)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static int __init pci_bus_get_depth(struct pci_bus *bus)
|
|
|
+{
|
|
|
+ int depth = 0;
|
|
|
+ struct pci_dev *dev;
|
|
|
+
|
|
|
+ list_for_each_entry(dev, &bus->devices, bus_list) {
|
|
|
+ int ret;
|
|
|
+ struct pci_bus *b = dev->subordinate;
|
|
|
+ if (!b)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ ret = pci_bus_get_depth(b);
|
|
|
+ if (ret + 1 > depth)
|
|
|
+ depth = ret + 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ return depth;
|
|
|
+}
|
|
|
+static int __init pci_get_max_depth(void)
|
|
|
+{
|
|
|
+ int depth = 0;
|
|
|
+ struct pci_bus *bus;
|
|
|
+
|
|
|
+ list_for_each_entry(bus, &pci_root_buses, node) {
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = pci_bus_get_depth(bus);
|
|
|
+ if (ret > depth)
|
|
|
+ depth = ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ return depth;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * first try will not touch pci bridge res
|
|
|
+ * second and later try will clear small leaf bridge res
|
|
|
+ * will stop till to the max deepth if can not find good one
|
|
|
+ */
|
|
|
void __init
|
|
|
pci_assign_unassigned_resources(void)
|
|
|
{
|
|
|
struct pci_bus *bus;
|
|
|
+ int tried_times = 0;
|
|
|
+ enum release_type rel_type = leaf_only;
|
|
|
+ struct resource_list_x head, *list;
|
|
|
+ unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
|
|
|
+ IORESOURCE_PREFETCH;
|
|
|
+ unsigned long failed_type;
|
|
|
+ int max_depth = pci_get_max_depth();
|
|
|
+ int pci_try_num;
|
|
|
+
|
|
|
+ head.next = NULL;
|
|
|
|
|
|
+ pci_try_num = max_depth + 1;
|
|
|
+ printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n",
|
|
|
+ max_depth, pci_try_num);
|
|
|
+
|
|
|
+again:
|
|
|
/* Depth first, calculate sizes and alignments of all
|
|
|
subordinate buses. */
|
|
|
list_for_each_entry(bus, &pci_root_buses, node) {
|
|
@@ -792,9 +846,65 @@ pci_assign_unassigned_resources(void)
|
|
|
}
|
|
|
/* Depth last, allocate resources and update the hardware. */
|
|
|
list_for_each_entry(bus, &pci_root_buses, node) {
|
|
|
- pci_bus_assign_resources(bus);
|
|
|
- pci_enable_bridges(bus);
|
|
|
+ __pci_bus_assign_resources(bus, &head);
|
|
|
+ }
|
|
|
+ tried_times++;
|
|
|
+
|
|
|
+ /* any device complain? */
|
|
|
+ if (!head.next)
|
|
|
+ goto enable_and_dump;
|
|
|
+ failed_type = 0;
|
|
|
+ for (list = head.next; list;) {
|
|
|
+ failed_type |= list->flags;
|
|
|
+ list = list->next;
|
|
|
+ }
|
|
|
+ /*
|
|
|
+ * io port are tight, don't try extra
|
|
|
+ * or if reach the limit, don't want to try more
|
|
|
+ */
|
|
|
+ failed_type &= type_mask;
|
|
|
+ if ((failed_type == IORESOURCE_IO) || (tried_times >= pci_try_num)) {
|
|
|
+ free_failed_list(&head);
|
|
|
+ goto enable_and_dump;
|
|
|
+ }
|
|
|
+
|
|
|
+ printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
|
|
|
+ tried_times + 1);
|
|
|
+
|
|
|
+ /* third times and later will not check if it is leaf */
|
|
|
+ if ((tried_times + 1) > 2)
|
|
|
+ rel_type = whole_subtree;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Try to release leaf bridge's resources that doesn't fit resource of
|
|
|
+ * child device under that bridge
|
|
|
+ */
|
|
|
+ for (list = head.next; list;) {
|
|
|
+ bus = list->dev->bus;
|
|
|
+ pci_bus_release_bridge_resources(bus, list->flags & type_mask,
|
|
|
+ rel_type);
|
|
|
+ list = list->next;
|
|
|
}
|
|
|
+ /* restore size and flags */
|
|
|
+ for (list = head.next; list;) {
|
|
|
+ struct resource *res = list->res;
|
|
|
+
|
|
|
+ res->start = list->start;
|
|
|
+ res->end = list->end;
|
|
|
+ res->flags = list->flags;
|
|
|
+ if (list->dev->subordinate)
|
|
|
+ res->flags = 0;
|
|
|
+
|
|
|
+ list = list->next;
|
|
|
+ }
|
|
|
+ free_failed_list(&head);
|
|
|
+
|
|
|
+ goto again;
|
|
|
+
|
|
|
+enable_and_dump:
|
|
|
+ /* Depth last, update the hardware. */
|
|
|
+ list_for_each_entry(bus, &pci_root_buses, node)
|
|
|
+ pci_enable_bridges(bus);
|
|
|
|
|
|
/* dump the resource on buses */
|
|
|
list_for_each_entry(bus, &pci_root_buses, node) {
|