io_init.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
  7. */
  8. #include <linux/bootmem.h>
  9. #include <linux/nodemask.h>
  10. #include <asm/sn/types.h>
  11. #include <asm/sn/addrs.h>
  12. #include <asm/sn/sn_feature_sets.h>
  13. #include <asm/sn/geo.h>
  14. #include <asm/sn/io.h>
  15. #include <asm/sn/l1.h>
  16. #include <asm/sn/module.h>
  17. #include <asm/sn/pcibr_provider.h>
  18. #include <asm/sn/pcibus_provider_defs.h>
  19. #include <asm/sn/pcidev.h>
  20. #include <asm/sn/simulator.h>
  21. #include <asm/sn/sn_sal.h>
  22. #include <asm/sn/tioca_provider.h>
  23. #include <asm/sn/tioce_provider.h>
  24. #include "xtalk/hubdev.h"
  25. #include "xtalk/xwidgetdev.h"
  26. extern void sn_init_cpei_timer(void);
  27. extern void register_sn_procfs(void);
  28. static struct list_head sn_sysdata_list;
  29. /* sysdata list struct */
  30. struct sysdata_el {
  31. struct list_head entry;
  32. void *sysdata;
  33. };
  34. struct slab_info {
  35. struct hubdev_info hubdev;
  36. };
  37. struct brick {
  38. moduleid_t id; /* Module ID of this module */
  39. struct slab_info slab_info[MAX_SLABS + 1];
  40. };
  41. int sn_ioif_inited; /* SN I/O infrastructure initialized? */
  42. struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
  43. static int max_segment_number; /* Default highest segment number */
  44. static int max_pcibus_number = 255; /* Default highest pci bus number */
  45. /*
  46. * Hooks and struct for unsupported pci providers
  47. */
  48. static dma_addr_t
  49. sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size)
  50. {
  51. return 0;
  52. }
  53. static void
  54. sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction)
  55. {
  56. return;
  57. }
  58. static void *
  59. sn_default_pci_bus_fixup(struct pcibus_bussoft *soft, struct pci_controller *controller)
  60. {
  61. return NULL;
  62. }
  63. static struct sn_pcibus_provider sn_pci_default_provider = {
  64. .dma_map = sn_default_pci_map,
  65. .dma_map_consistent = sn_default_pci_map,
  66. .dma_unmap = sn_default_pci_unmap,
  67. .bus_fixup = sn_default_pci_bus_fixup,
  68. };
  69. /*
  70. * Retrieve the DMA Flush List given nasid, widget, and device.
  71. * This list is needed to implement the WAR - Flush DMA data on PIO Reads.
  72. */
  73. static inline u64
  74. sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
  75. u64 address)
  76. {
  77. struct ia64_sal_retval ret_stuff;
  78. ret_stuff.status = 0;
  79. ret_stuff.v0 = 0;
  80. SAL_CALL_NOLOCK(ret_stuff,
  81. (u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
  82. (u64) nasid, (u64) widget_num,
  83. (u64) device_num, (u64) address, 0, 0, 0);
  84. return ret_stuff.status;
  85. }
  86. /*
  87. * Retrieve the hub device info structure for the given nasid.
  88. */
  89. static inline u64 sal_get_hubdev_info(u64 handle, u64 address)
  90. {
  91. struct ia64_sal_retval ret_stuff;
  92. ret_stuff.status = 0;
  93. ret_stuff.v0 = 0;
  94. SAL_CALL_NOLOCK(ret_stuff,
  95. (u64) SN_SAL_IOIF_GET_HUBDEV_INFO,
  96. (u64) handle, (u64) address, 0, 0, 0, 0, 0);
  97. return ret_stuff.v0;
  98. }
  99. /*
  100. * Retrieve the pci bus information given the bus number.
  101. */
  102. static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
  103. {
  104. struct ia64_sal_retval ret_stuff;
  105. ret_stuff.status = 0;
  106. ret_stuff.v0 = 0;
  107. SAL_CALL_NOLOCK(ret_stuff,
  108. (u64) SN_SAL_IOIF_GET_PCIBUS_INFO,
  109. (u64) segment, (u64) busnum, (u64) address, 0, 0, 0, 0);
  110. return ret_stuff.v0;
  111. }
  112. /*
  113. * Retrieve the pci device information given the bus and device|function number.
  114. */
  115. static inline u64
  116. sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
  117. u64 sn_irq_info)
  118. {
  119. struct ia64_sal_retval ret_stuff;
  120. ret_stuff.status = 0;
  121. ret_stuff.v0 = 0;
  122. SAL_CALL_NOLOCK(ret_stuff,
  123. (u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
  124. (u64) segment, (u64) bus_number, (u64) devfn,
  125. (u64) pci_dev,
  126. sn_irq_info, 0, 0);
  127. return ret_stuff.v0;
  128. }
  129. /*
  130. * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
  131. * device.
  132. */
  133. inline struct pcidev_info *
  134. sn_pcidev_info_get(struct pci_dev *dev)
  135. {
  136. struct pcidev_info *pcidev;
  137. list_for_each_entry(pcidev,
  138. &(SN_PCI_CONTROLLER(dev)->pcidev_info), pdi_list) {
  139. if (pcidev->pdi_linux_pcidev == dev) {
  140. return pcidev;
  141. }
  142. }
  143. return NULL;
  144. }
  145. /* Older PROM flush WAR
  146. *
  147. * 01/16/06 -- This war will be in place until a new official PROM is released.
  148. * Additionally note that the struct sn_flush_device_war also has to be
  149. * removed from arch/ia64/sn/include/xtalk/hubdev.h
  150. */
  151. static u8 war_implemented = 0;
  152. static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
  153. struct sn_flush_device_common *common)
  154. {
  155. struct sn_flush_device_war *war_list;
  156. struct sn_flush_device_war *dev_entry;
  157. struct ia64_sal_retval isrv = {0,0,0,0};
  158. if (!war_implemented) {
  159. printk(KERN_WARNING "PROM version < 4.50 -- implementing old "
  160. "PROM flush WAR\n");
  161. war_implemented = 1;
  162. }
  163. war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
  164. if (!war_list)
  165. BUG();
  166. SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
  167. nasid, widget, __pa(war_list), 0, 0, 0 ,0);
  168. if (isrv.status)
  169. panic("sn_device_fixup_war failed: %s\n",
  170. ia64_sal_strerror(isrv.status));
  171. dev_entry = war_list + device;
  172. memcpy(common,dev_entry, sizeof(*common));
  173. kfree(war_list);
  174. return isrv.status;
  175. }
  176. /*
  177. * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for
  178. * each node in the system.
  179. */
  180. static void __init sn_fixup_ionodes(void)
  181. {
  182. struct sn_flush_device_kernel *sn_flush_device_kernel;
  183. struct sn_flush_device_kernel *dev_entry;
  184. struct hubdev_info *hubdev;
  185. u64 status;
  186. u64 nasid;
  187. int i, widget, device, size;
  188. /*
  189. * Get SGI Specific HUB chipset information.
  190. * Inform Prom that this kernel can support domain bus numbering.
  191. */
  192. for (i = 0; i < num_cnodes; i++) {
  193. hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo);
  194. nasid = cnodeid_to_nasid(i);
  195. hubdev->max_segment_number = 0xffffffff;
  196. hubdev->max_pcibus_number = 0xff;
  197. status = sal_get_hubdev_info(nasid, (u64) __pa(hubdev));
  198. if (status)
  199. continue;
  200. /* Save the largest Domain and pcibus numbers found. */
  201. if (hubdev->max_segment_number) {
  202. /*
  203. * Dealing with a Prom that supports segments.
  204. */
  205. max_segment_number = hubdev->max_segment_number;
  206. max_pcibus_number = hubdev->max_pcibus_number;
  207. }
  208. /* Attach the error interrupt handlers */
  209. if (nasid & 1)
  210. ice_error_init(hubdev);
  211. else
  212. hub_error_init(hubdev);
  213. for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
  214. hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
  215. if (!hubdev->hdi_flush_nasid_list.widget_p)
  216. continue;
  217. size = (HUB_WIDGET_ID_MAX + 1) *
  218. sizeof(struct sn_flush_device_kernel *);
  219. hubdev->hdi_flush_nasid_list.widget_p =
  220. kzalloc(size, GFP_KERNEL);
  221. if (!hubdev->hdi_flush_nasid_list.widget_p)
  222. BUG();
  223. for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
  224. size = DEV_PER_WIDGET *
  225. sizeof(struct sn_flush_device_kernel);
  226. sn_flush_device_kernel = kzalloc(size, GFP_KERNEL);
  227. if (!sn_flush_device_kernel)
  228. BUG();
  229. dev_entry = sn_flush_device_kernel;
  230. for (device = 0; device < DEV_PER_WIDGET;
  231. device++,dev_entry++) {
  232. size = sizeof(struct sn_flush_device_common);
  233. dev_entry->common = kzalloc(size, GFP_KERNEL);
  234. if (!dev_entry->common)
  235. BUG();
  236. if (sn_prom_feature_available(
  237. PRF_DEVICE_FLUSH_LIST))
  238. status = sal_get_device_dmaflush_list(
  239. nasid, widget, device,
  240. (u64)(dev_entry->common));
  241. else
  242. status = sn_device_fixup_war(nasid,
  243. widget, device,
  244. dev_entry->common);
  245. if (status != SALRET_OK)
  246. panic("SAL call failed: %s\n",
  247. ia64_sal_strerror(status));
  248. spin_lock_init(&dev_entry->sfdl_flush_lock);
  249. }
  250. if (sn_flush_device_kernel)
  251. hubdev->hdi_flush_nasid_list.widget_p[widget] =
  252. sn_flush_device_kernel;
  253. }
  254. }
  255. }
  256. /*
  257. * sn_pci_window_fixup() - Create a pci_window for each device resource.
  258. * Until ACPI support is added, we need this code
  259. * to setup pci_windows for use by
  260. * pcibios_bus_to_resource(),
  261. * pcibios_resource_to_bus(), etc.
  262. */
  263. static void
  264. sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
  265. s64 * pci_addrs)
  266. {
  267. struct pci_controller *controller = PCI_CONTROLLER(dev->bus);
  268. unsigned int i;
  269. unsigned int idx;
  270. unsigned int new_count;
  271. struct pci_window *new_window;
  272. if (count == 0)
  273. return;
  274. idx = controller->windows;
  275. new_count = controller->windows + count;
  276. new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL);
  277. if (new_window == NULL)
  278. BUG();
  279. if (controller->window) {
  280. memcpy(new_window, controller->window,
  281. sizeof(struct pci_window) * controller->windows);
  282. kfree(controller->window);
  283. }
  284. /* Setup a pci_window for each device resource. */
  285. for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
  286. if (pci_addrs[i] == -1)
  287. continue;
  288. new_window[idx].offset = dev->resource[i].start - pci_addrs[i];
  289. new_window[idx].resource = dev->resource[i];
  290. idx++;
  291. }
  292. controller->windows = new_count;
  293. controller->window = new_window;
  294. }
  295. void sn_pci_unfixup_slot(struct pci_dev *dev)
  296. {
  297. struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev;
  298. sn_irq_unfixup(dev);
  299. pci_dev_put(host_pci_dev);
  300. pci_dev_put(dev);
  301. }
  302. /*
  303. * sn_pci_fixup_slot() - This routine sets up a slot's resources
  304. * consistent with the Linux PCI abstraction layer. Resources acquired
  305. * from our PCI provider include PIO maps to BAR space and interrupt
  306. * objects.
  307. */
  308. void sn_pci_fixup_slot(struct pci_dev *dev)
  309. {
  310. unsigned int count = 0;
  311. int idx;
  312. int segment = pci_domain_nr(dev->bus);
  313. int status = 0;
  314. struct pcibus_bussoft *bs;
  315. struct pci_bus *host_pci_bus;
  316. struct pci_dev *host_pci_dev;
  317. struct pcidev_info *pcidev_info;
  318. s64 pci_addrs[PCI_ROM_RESOURCE + 1];
  319. struct sn_irq_info *sn_irq_info;
  320. unsigned long size;
  321. unsigned int bus_no, devfn;
  322. pci_dev_get(dev); /* for the sysdata pointer */
  323. pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
  324. if (!pcidev_info)
  325. BUG(); /* Cannot afford to run out of memory */
  326. sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
  327. if (!sn_irq_info)
  328. BUG(); /* Cannot afford to run out of memory */
  329. /* Call to retrieve pci device information needed by kernel. */
  330. status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
  331. dev->devfn,
  332. (u64) __pa(pcidev_info),
  333. (u64) __pa(sn_irq_info));
  334. if (status)
  335. BUG(); /* Cannot get platform pci device information */
  336. /* Add pcidev_info to list in sn_pci_controller struct */
  337. list_add_tail(&pcidev_info->pdi_list,
  338. &(SN_PCI_CONTROLLER(dev->bus)->pcidev_info));
  339. /* Copy over PIO Mapped Addresses */
  340. for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
  341. unsigned long start, end, addr;
  342. if (!pcidev_info->pdi_pio_mapped_addr[idx]) {
  343. pci_addrs[idx] = -1;
  344. continue;
  345. }
  346. start = dev->resource[idx].start;
  347. end = dev->resource[idx].end;
  348. size = end - start;
  349. if (size == 0) {
  350. pci_addrs[idx] = -1;
  351. continue;
  352. }
  353. pci_addrs[idx] = start;
  354. count++;
  355. addr = pcidev_info->pdi_pio_mapped_addr[idx];
  356. addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET;
  357. dev->resource[idx].start = addr;
  358. dev->resource[idx].end = addr + size;
  359. if (dev->resource[idx].flags & IORESOURCE_IO)
  360. dev->resource[idx].parent = &ioport_resource;
  361. else
  362. dev->resource[idx].parent = &iomem_resource;
  363. }
  364. /* Create a pci_window in the pci_controller struct for
  365. * each device resource.
  366. */
  367. if (count > 0)
  368. sn_pci_window_fixup(dev, count, pci_addrs);
  369. /*
  370. * Using the PROMs values for the PCI host bus, get the Linux
  371. * PCI host_pci_dev struct and set up host bus linkages
  372. */
  373. bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff;
  374. devfn = pcidev_info->pdi_slot_host_handle & 0xffffffff;
  375. host_pci_bus = pci_find_bus(segment, bus_no);
  376. host_pci_dev = pci_get_slot(host_pci_bus, devfn);
  377. pcidev_info->host_pci_dev = host_pci_dev;
  378. pcidev_info->pdi_linux_pcidev = dev;
  379. pcidev_info->pdi_host_pcidev_info = SN_PCIDEV_INFO(host_pci_dev);
  380. bs = SN_PCIBUS_BUSSOFT(dev->bus);
  381. pcidev_info->pdi_pcibus_info = bs;
  382. if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) {
  383. SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type];
  384. } else {
  385. SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider;
  386. }
  387. /* Only set up IRQ stuff if this device has a host bus context */
  388. if (bs && sn_irq_info->irq_irq) {
  389. pcidev_info->pdi_sn_irq_info = sn_irq_info;
  390. dev->irq = pcidev_info->pdi_sn_irq_info->irq_irq;
  391. sn_irq_fixup(dev, sn_irq_info);
  392. } else {
  393. pcidev_info->pdi_sn_irq_info = NULL;
  394. kfree(sn_irq_info);
  395. }
  396. /*
  397. * MSI currently not supported on altix. Remove this when
  398. * the MSI abstraction patches are integrated into the kernel
  399. * (sometime after 2.6.16 releases)
  400. */
  401. dev->no_msi = 1;
  402. }
  403. /*
  404. * sn_pci_controller_fixup() - This routine sets up a bus's resources
  405. * consistent with the Linux PCI abstraction layer.
  406. */
  407. void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
  408. {
  409. int status;
  410. int nasid, cnode;
  411. struct pci_controller *controller;
  412. struct sn_pci_controller *sn_controller;
  413. struct pcibus_bussoft *prom_bussoft_ptr;
  414. struct hubdev_info *hubdev_info;
  415. void *provider_soft;
  416. struct sn_pcibus_provider *provider;
  417. status = sal_get_pcibus_info((u64) segment, (u64) busnum,
  418. (u64) ia64_tpa(&prom_bussoft_ptr));
  419. if (status > 0)
  420. return; /*bus # does not exist */
  421. prom_bussoft_ptr = __va(prom_bussoft_ptr);
  422. /* Allocate a sn_pci_controller, which has a pci_controller struct
  423. * as the first member.
  424. */
  425. sn_controller = kzalloc(sizeof(struct sn_pci_controller), GFP_KERNEL);
  426. if (!sn_controller)
  427. BUG();
  428. INIT_LIST_HEAD(&sn_controller->pcidev_info);
  429. controller = &sn_controller->pci_controller;
  430. controller->segment = segment;
  431. if (bus == NULL) {
  432. bus = pci_scan_bus(busnum, &pci_root_ops, controller);
  433. if (bus == NULL)
  434. goto error_return; /* error, or bus already scanned */
  435. bus->sysdata = NULL;
  436. }
  437. if (bus->sysdata)
  438. goto error_return; /* sysdata already alloc'd */
  439. /*
  440. * Per-provider fixup. Copies the contents from prom to local
  441. * area and links SN_PCIBUS_BUSSOFT().
  442. */
  443. if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES)
  444. goto error_return; /* unsupported asic type */
  445. if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB)
  446. goto error_return; /* no further fixup necessary */
  447. provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
  448. if (provider == NULL)
  449. goto error_return; /* no provider registerd for this asic */
  450. bus->sysdata = controller;
  451. if (provider->bus_fixup)
  452. provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr, controller);
  453. else
  454. provider_soft = NULL;
  455. if (provider_soft == NULL) {
  456. /* fixup failed or not applicable */
  457. bus->sysdata = NULL;
  458. goto error_return;
  459. }
  460. /*
  461. * Setup pci_windows for legacy IO and MEM space.
  462. * (Temporary until ACPI support is in place.)
  463. */
  464. controller->window = kcalloc(2, sizeof(struct pci_window), GFP_KERNEL);
  465. if (controller->window == NULL)
  466. BUG();
  467. controller->window[0].offset = prom_bussoft_ptr->bs_legacy_io;
  468. controller->window[0].resource.name = "legacy_io";
  469. controller->window[0].resource.flags = IORESOURCE_IO;
  470. controller->window[0].resource.start = prom_bussoft_ptr->bs_legacy_io;
  471. controller->window[0].resource.end =
  472. controller->window[0].resource.start + 0xffff;
  473. controller->window[0].resource.parent = &ioport_resource;
  474. controller->window[1].offset = prom_bussoft_ptr->bs_legacy_mem;
  475. controller->window[1].resource.name = "legacy_mem";
  476. controller->window[1].resource.flags = IORESOURCE_MEM;
  477. controller->window[1].resource.start = prom_bussoft_ptr->bs_legacy_mem;
  478. controller->window[1].resource.end =
  479. controller->window[1].resource.start + (1024 * 1024) - 1;
  480. controller->window[1].resource.parent = &iomem_resource;
  481. controller->windows = 2;
  482. /*
  483. * Generic bus fixup goes here. Don't reference prom_bussoft_ptr
  484. * after this point.
  485. */
  486. PCI_CONTROLLER(bus)->platform_data = provider_soft;
  487. nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
  488. cnode = nasid_to_cnodeid(nasid);
  489. hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
  490. SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info =
  491. &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]);
  492. /*
  493. * If the node information we obtained during the fixup phase is invalid
  494. * then set controller->node to -1 (undetermined)
  495. */
  496. if (controller->node >= num_online_nodes()) {
  497. struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus);
  498. printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u"
  499. "L_IO=%lx L_MEM=%lx BASE=%lx\n",
  500. b->bs_asic_type, b->bs_xid, b->bs_persist_busnum,
  501. b->bs_legacy_io, b->bs_legacy_mem, b->bs_base);
  502. printk(KERN_WARNING "on node %d but only %d nodes online."
  503. "Association set to undetermined.\n",
  504. controller->node, num_online_nodes());
  505. controller->node = -1;
  506. }
  507. return;
  508. error_return:
  509. kfree(sn_controller);
  510. return;
  511. }
  512. void sn_bus_store_sysdata(struct pci_dev *dev)
  513. {
  514. struct sysdata_el *element;
  515. element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
  516. if (!element) {
  517. dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__);
  518. return;
  519. }
  520. element->sysdata = SN_PCIDEV_INFO(dev);
  521. list_add(&element->entry, &sn_sysdata_list);
  522. }
  523. void sn_bus_free_sysdata(void)
  524. {
  525. struct sysdata_el *element;
  526. struct list_head *list, *safe;
  527. list_for_each_safe(list, safe, &sn_sysdata_list) {
  528. element = list_entry(list, struct sysdata_el, entry);
  529. list_del(&element->entry);
  530. list_del(&(((struct pcidev_info *)
  531. (element->sysdata))->pdi_list));
  532. kfree(element->sysdata);
  533. kfree(element);
  534. }
  535. return;
  536. }
  537. /*
  538. * Ugly hack to get PCI setup until we have a proper ACPI namespace.
  539. */
  540. #define PCI_BUSES_TO_SCAN 256
  541. static int __init sn_pci_init(void)
  542. {
  543. int i, j;
  544. struct pci_dev *pci_dev = NULL;
  545. if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
  546. return 0;
  547. /*
  548. * prime sn_pci_provider[]. Individial provider init routines will
  549. * override their respective default entries.
  550. */
  551. for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
  552. sn_pci_provider[i] = &sn_pci_default_provider;
  553. pcibr_init_provider();
  554. tioca_init_provider();
  555. tioce_init_provider();
  556. /*
  557. * This is needed to avoid bounce limit checks in the blk layer
  558. */
  559. ia64_max_iommu_merge_mask = ~PAGE_MASK;
  560. sn_fixup_ionodes();
  561. sn_irq_lh_init();
  562. INIT_LIST_HEAD(&sn_sysdata_list);
  563. sn_init_cpei_timer();
  564. #ifdef CONFIG_PROC_FS
  565. register_sn_procfs();
  566. #endif
  567. /* busses are not known yet ... */
  568. for (i = 0; i <= max_segment_number; i++)
  569. for (j = 0; j <= max_pcibus_number; j++)
  570. sn_pci_controller_fixup(i, j, NULL);
  571. /*
  572. * Generic Linux PCI Layer has created the pci_bus and pci_dev
  573. * structures - time for us to add our SN PLatform specific
  574. * information.
  575. */
  576. while ((pci_dev =
  577. pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL)
  578. sn_pci_fixup_slot(pci_dev);
  579. sn_ioif_inited = 1; /* sn I/O infrastructure now initialized */
  580. return 0;
  581. }
  582. /*
  583. * hubdev_init_node() - Creates the HUB data structure and link them to it's
  584. * own NODE specific data area.
  585. */
  586. void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
  587. {
  588. struct hubdev_info *hubdev_info;
  589. int size;
  590. pg_data_t *pg;
  591. size = sizeof(struct hubdev_info);
  592. if (node >= num_online_nodes()) /* Headless/memless IO nodes */
  593. pg = NODE_DATA(0);
  594. else
  595. pg = NODE_DATA(node);
  596. hubdev_info = (struct hubdev_info *)alloc_bootmem_node(pg, size);
  597. npda->pdinfo = (void *)hubdev_info;
  598. }
  599. geoid_t
  600. cnodeid_get_geoid(cnodeid_t cnode)
  601. {
  602. struct hubdev_info *hubdev;
  603. hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
  604. return hubdev->hdi_geoid;
  605. }
  606. void sn_generate_path(struct pci_bus *pci_bus, char *address)
  607. {
  608. nasid_t nasid;
  609. cnodeid_t cnode;
  610. geoid_t geoid;
  611. moduleid_t moduleid;
  612. u16 bricktype;
  613. nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
  614. cnode = nasid_to_cnodeid(nasid);
  615. geoid = cnodeid_get_geoid(cnode);
  616. moduleid = geo_module(geoid);
  617. sprintf(address, "module_%c%c%c%c%.2d",
  618. '0'+RACK_GET_CLASS(MODULE_GET_RACK(moduleid)),
  619. '0'+RACK_GET_GROUP(MODULE_GET_RACK(moduleid)),
  620. '0'+RACK_GET_NUM(MODULE_GET_RACK(moduleid)),
  621. MODULE_GET_BTCHAR(moduleid), MODULE_GET_BPOS(moduleid));
  622. /* Tollhouse requires slot id to be displayed */
  623. bricktype = MODULE_GET_BTYPE(moduleid);
  624. if ((bricktype == L1_BRICKTYPE_191010) ||
  625. (bricktype == L1_BRICKTYPE_1932))
  626. sprintf(address, "%s^%d", address, geo_slot(geoid));
  627. }
  628. subsys_initcall(sn_pci_init);
  629. EXPORT_SYMBOL(sn_pci_fixup_slot);
  630. EXPORT_SYMBOL(sn_pci_unfixup_slot);
  631. EXPORT_SYMBOL(sn_pci_controller_fixup);
  632. EXPORT_SYMBOL(sn_bus_store_sysdata);
  633. EXPORT_SYMBOL(sn_bus_free_sysdata);
  634. EXPORT_SYMBOL(sn_generate_path);