pci.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664
  1. /* $Id: pci.c,v 1.39 2002/01/05 01:13:43 davem Exp $
  2. * pci.c: UltraSparc PCI controller support.
  3. *
  4. * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
  5. * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
  6. * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
  7. */
  8. #include <linux/config.h>
  9. #include <linux/module.h>
  10. #include <linux/kernel.h>
  11. #include <linux/string.h>
  12. #include <linux/sched.h>
  13. #include <linux/capability.h>
  14. #include <linux/errno.h>
  15. #include <linux/smp_lock.h>
  16. #include <linux/init.h>
  17. #include <asm/uaccess.h>
  18. #include <asm/pbm.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/irq.h>
  21. #include <asm/ebus.h>
  22. #include <asm/isa.h>
  23. unsigned long pci_memspace_mask = 0xffffffffUL;
  24. #ifndef CONFIG_PCI
  25. /* A "nop" PCI implementation. */
  26. asmlinkage int sys_pciconfig_read(unsigned long bus, unsigned long dfn,
  27. unsigned long off, unsigned long len,
  28. unsigned char *buf)
  29. {
  30. return 0;
  31. }
  32. asmlinkage int sys_pciconfig_write(unsigned long bus, unsigned long dfn,
  33. unsigned long off, unsigned long len,
  34. unsigned char *buf)
  35. {
  36. return 0;
  37. }
  38. #else
  39. /* List of all PCI controllers found in the system. */
  40. struct pci_controller_info *pci_controller_root = NULL;
  41. /* Each PCI controller found gets a unique index. */
  42. int pci_num_controllers = 0;
  43. volatile int pci_poke_in_progress;
  44. volatile int pci_poke_cpu = -1;
  45. volatile int pci_poke_faulted;
  46. static DEFINE_SPINLOCK(pci_poke_lock);
  47. void pci_config_read8(u8 *addr, u8 *ret)
  48. {
  49. unsigned long flags;
  50. u8 byte;
  51. spin_lock_irqsave(&pci_poke_lock, flags);
  52. pci_poke_cpu = smp_processor_id();
  53. pci_poke_in_progress = 1;
  54. pci_poke_faulted = 0;
  55. __asm__ __volatile__("membar #Sync\n\t"
  56. "lduba [%1] %2, %0\n\t"
  57. "membar #Sync"
  58. : "=r" (byte)
  59. : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  60. : "memory");
  61. pci_poke_in_progress = 0;
  62. pci_poke_cpu = -1;
  63. if (!pci_poke_faulted)
  64. *ret = byte;
  65. spin_unlock_irqrestore(&pci_poke_lock, flags);
  66. }
  67. void pci_config_read16(u16 *addr, u16 *ret)
  68. {
  69. unsigned long flags;
  70. u16 word;
  71. spin_lock_irqsave(&pci_poke_lock, flags);
  72. pci_poke_cpu = smp_processor_id();
  73. pci_poke_in_progress = 1;
  74. pci_poke_faulted = 0;
  75. __asm__ __volatile__("membar #Sync\n\t"
  76. "lduha [%1] %2, %0\n\t"
  77. "membar #Sync"
  78. : "=r" (word)
  79. : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  80. : "memory");
  81. pci_poke_in_progress = 0;
  82. pci_poke_cpu = -1;
  83. if (!pci_poke_faulted)
  84. *ret = word;
  85. spin_unlock_irqrestore(&pci_poke_lock, flags);
  86. }
  87. void pci_config_read32(u32 *addr, u32 *ret)
  88. {
  89. unsigned long flags;
  90. u32 dword;
  91. spin_lock_irqsave(&pci_poke_lock, flags);
  92. pci_poke_cpu = smp_processor_id();
  93. pci_poke_in_progress = 1;
  94. pci_poke_faulted = 0;
  95. __asm__ __volatile__("membar #Sync\n\t"
  96. "lduwa [%1] %2, %0\n\t"
  97. "membar #Sync"
  98. : "=r" (dword)
  99. : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  100. : "memory");
  101. pci_poke_in_progress = 0;
  102. pci_poke_cpu = -1;
  103. if (!pci_poke_faulted)
  104. *ret = dword;
  105. spin_unlock_irqrestore(&pci_poke_lock, flags);
  106. }
  107. void pci_config_write8(u8 *addr, u8 val)
  108. {
  109. unsigned long flags;
  110. spin_lock_irqsave(&pci_poke_lock, flags);
  111. pci_poke_cpu = smp_processor_id();
  112. pci_poke_in_progress = 1;
  113. pci_poke_faulted = 0;
  114. __asm__ __volatile__("membar #Sync\n\t"
  115. "stba %0, [%1] %2\n\t"
  116. "membar #Sync"
  117. : /* no outputs */
  118. : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  119. : "memory");
  120. pci_poke_in_progress = 0;
  121. pci_poke_cpu = -1;
  122. spin_unlock_irqrestore(&pci_poke_lock, flags);
  123. }
  124. void pci_config_write16(u16 *addr, u16 val)
  125. {
  126. unsigned long flags;
  127. spin_lock_irqsave(&pci_poke_lock, flags);
  128. pci_poke_cpu = smp_processor_id();
  129. pci_poke_in_progress = 1;
  130. pci_poke_faulted = 0;
  131. __asm__ __volatile__("membar #Sync\n\t"
  132. "stha %0, [%1] %2\n\t"
  133. "membar #Sync"
  134. : /* no outputs */
  135. : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  136. : "memory");
  137. pci_poke_in_progress = 0;
  138. pci_poke_cpu = -1;
  139. spin_unlock_irqrestore(&pci_poke_lock, flags);
  140. }
  141. void pci_config_write32(u32 *addr, u32 val)
  142. {
  143. unsigned long flags;
  144. spin_lock_irqsave(&pci_poke_lock, flags);
  145. pci_poke_cpu = smp_processor_id();
  146. pci_poke_in_progress = 1;
  147. pci_poke_faulted = 0;
  148. __asm__ __volatile__("membar #Sync\n\t"
  149. "stwa %0, [%1] %2\n\t"
  150. "membar #Sync"
  151. : /* no outputs */
  152. : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  153. : "memory");
  154. pci_poke_in_progress = 0;
  155. pci_poke_cpu = -1;
  156. spin_unlock_irqrestore(&pci_poke_lock, flags);
  157. }
  158. /* Probe for all PCI controllers in the system. */
  159. extern void sabre_init(int, char *);
  160. extern void psycho_init(int, char *);
  161. extern void schizo_init(int, char *);
  162. extern void schizo_plus_init(int, char *);
  163. extern void tomatillo_init(int, char *);
  164. extern void sun4v_pci_init(int, char *);
  165. static struct {
  166. char *model_name;
  167. void (*init)(int, char *);
  168. } pci_controller_table[] __initdata = {
  169. { "SUNW,sabre", sabre_init },
  170. { "pci108e,a000", sabre_init },
  171. { "pci108e,a001", sabre_init },
  172. { "SUNW,psycho", psycho_init },
  173. { "pci108e,8000", psycho_init },
  174. { "SUNW,schizo", schizo_init },
  175. { "pci108e,8001", schizo_init },
  176. { "SUNW,schizo+", schizo_plus_init },
  177. { "pci108e,8002", schizo_plus_init },
  178. { "SUNW,tomatillo", tomatillo_init },
  179. { "pci108e,a801", tomatillo_init },
  180. { "SUNW,sun4v-pci", sun4v_pci_init },
  181. };
  182. #define PCI_NUM_CONTROLLER_TYPES (sizeof(pci_controller_table) / \
  183. sizeof(pci_controller_table[0]))
  184. static int __init pci_controller_init(char *model_name, int namelen, int node)
  185. {
  186. int i;
  187. for (i = 0; i < PCI_NUM_CONTROLLER_TYPES; i++) {
  188. if (!strncmp(model_name,
  189. pci_controller_table[i].model_name,
  190. namelen)) {
  191. pci_controller_table[i].init(node, model_name);
  192. return 1;
  193. }
  194. }
  195. printk("PCI: Warning unknown controller, model name [%s]\n",
  196. model_name);
  197. printk("PCI: Ignoring controller...\n");
  198. return 0;
  199. }
  200. static int __init pci_is_controller(char *model_name, int namelen, int node)
  201. {
  202. int i;
  203. for (i = 0; i < PCI_NUM_CONTROLLER_TYPES; i++) {
  204. if (!strncmp(model_name,
  205. pci_controller_table[i].model_name,
  206. namelen)) {
  207. return 1;
  208. }
  209. }
  210. return 0;
  211. }
  212. static int __init pci_controller_scan(int (*handler)(char *, int, int))
  213. {
  214. char namebuf[64];
  215. int node;
  216. int count = 0;
  217. node = prom_getchild(prom_root_node);
  218. while ((node = prom_searchsiblings(node, "pci")) != 0) {
  219. int len;
  220. if ((len = prom_getproperty(node, "model", namebuf, sizeof(namebuf))) > 0 ||
  221. (len = prom_getproperty(node, "compatible", namebuf, sizeof(namebuf))) > 0) {
  222. int item_len = 0;
  223. /* Our value may be a multi-valued string in the
  224. * case of some compatible properties. For sanity,
  225. * only try the first one. */
  226. while (namebuf[item_len] && len) {
  227. len--;
  228. item_len++;
  229. }
  230. if (handler(namebuf, item_len, node))
  231. count++;
  232. }
  233. node = prom_getsibling(node);
  234. if (!node)
  235. break;
  236. }
  237. return count;
  238. }
  239. /* Is there some PCI controller in the system? */
  240. int __init pcic_present(void)
  241. {
  242. return pci_controller_scan(pci_is_controller);
  243. }
  244. struct pci_iommu_ops *pci_iommu_ops;
  245. EXPORT_SYMBOL(pci_iommu_ops);
  246. extern struct pci_iommu_ops pci_sun4u_iommu_ops,
  247. pci_sun4v_iommu_ops;
  248. /* Find each controller in the system, attach and initialize
  249. * software state structure for each and link into the
  250. * pci_controller_root. Setup the controller enough such
  251. * that bus scanning can be done.
  252. */
  253. static void __init pci_controller_probe(void)
  254. {
  255. if (tlb_type == hypervisor)
  256. pci_iommu_ops = &pci_sun4v_iommu_ops;
  257. else
  258. pci_iommu_ops = &pci_sun4u_iommu_ops;
  259. printk("PCI: Probing for controllers.\n");
  260. pci_controller_scan(pci_controller_init);
  261. }
  262. static void __init pci_scan_each_controller_bus(void)
  263. {
  264. struct pci_controller_info *p;
  265. for (p = pci_controller_root; p; p = p->next)
  266. p->scan_bus(p);
  267. }
  268. extern void clock_probe(void);
  269. extern void power_init(void);
  270. static int __init pcibios_init(void)
  271. {
  272. pci_controller_probe();
  273. if (pci_controller_root == NULL)
  274. return 0;
  275. pci_scan_each_controller_bus();
  276. isa_init();
  277. ebus_init();
  278. clock_probe();
  279. power_init();
  280. return 0;
  281. }
  282. subsys_initcall(pcibios_init);
  283. void pcibios_fixup_bus(struct pci_bus *pbus)
  284. {
  285. struct pci_pbm_info *pbm = pbus->sysdata;
  286. /* Generic PCI bus probing sets these to point at
  287. * &io{port,mem}_resouce which is wrong for us.
  288. */
  289. pbus->resource[0] = &pbm->io_space;
  290. pbus->resource[1] = &pbm->mem_space;
  291. }
  292. struct resource *pcibios_select_root(struct pci_dev *pdev, struct resource *r)
  293. {
  294. struct pci_pbm_info *pbm = pdev->bus->sysdata;
  295. struct resource *root = NULL;
  296. if (r->flags & IORESOURCE_IO)
  297. root = &pbm->io_space;
  298. if (r->flags & IORESOURCE_MEM)
  299. root = &pbm->mem_space;
  300. return root;
  301. }
  302. void pcibios_update_irq(struct pci_dev *pdev, int irq)
  303. {
  304. }
  305. void pcibios_align_resource(void *data, struct resource *res,
  306. unsigned long size, unsigned long align)
  307. {
  308. }
  309. int pcibios_enable_device(struct pci_dev *pdev, int mask)
  310. {
  311. return 0;
  312. }
  313. void pcibios_resource_to_bus(struct pci_dev *pdev, struct pci_bus_region *region,
  314. struct resource *res)
  315. {
  316. struct pci_pbm_info *pbm = pdev->bus->sysdata;
  317. struct resource zero_res, *root;
  318. zero_res.start = 0;
  319. zero_res.end = 0;
  320. zero_res.flags = res->flags;
  321. if (res->flags & IORESOURCE_IO)
  322. root = &pbm->io_space;
  323. else
  324. root = &pbm->mem_space;
  325. pbm->parent->resource_adjust(pdev, &zero_res, root);
  326. region->start = res->start - zero_res.start;
  327. region->end = res->end - zero_res.start;
  328. }
  329. EXPORT_SYMBOL(pcibios_resource_to_bus);
  330. void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res,
  331. struct pci_bus_region *region)
  332. {
  333. struct pci_pbm_info *pbm = pdev->bus->sysdata;
  334. struct resource *root;
  335. res->start = region->start;
  336. res->end = region->end;
  337. if (res->flags & IORESOURCE_IO)
  338. root = &pbm->io_space;
  339. else
  340. root = &pbm->mem_space;
  341. pbm->parent->resource_adjust(pdev, res, root);
  342. }
  343. EXPORT_SYMBOL(pcibios_bus_to_resource);
  344. char * __init pcibios_setup(char *str)
  345. {
  346. return str;
  347. }
  348. /* Platform support for /proc/bus/pci/X/Y mmap()s. */
  349. /* If the user uses a host-bridge as the PCI device, he may use
  350. * this to perform a raw mmap() of the I/O or MEM space behind
  351. * that controller.
  352. *
  353. * This can be useful for execution of x86 PCI bios initialization code
  354. * on a PCI card, like the xfree86 int10 stuff does.
  355. */
  356. static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma,
  357. enum pci_mmap_state mmap_state)
  358. {
  359. struct pcidev_cookie *pcp = pdev->sysdata;
  360. struct pci_pbm_info *pbm;
  361. struct pci_controller_info *p;
  362. unsigned long space_size, user_offset, user_size;
  363. if (!pcp)
  364. return -ENXIO;
  365. pbm = pcp->pbm;
  366. if (!pbm)
  367. return -ENXIO;
  368. p = pbm->parent;
  369. if (p->pbms_same_domain) {
  370. unsigned long lowest, highest;
  371. lowest = ~0UL; highest = 0UL;
  372. if (mmap_state == pci_mmap_io) {
  373. if (p->pbm_A.io_space.flags) {
  374. lowest = p->pbm_A.io_space.start;
  375. highest = p->pbm_A.io_space.end + 1;
  376. }
  377. if (p->pbm_B.io_space.flags) {
  378. if (lowest > p->pbm_B.io_space.start)
  379. lowest = p->pbm_B.io_space.start;
  380. if (highest < p->pbm_B.io_space.end + 1)
  381. highest = p->pbm_B.io_space.end + 1;
  382. }
  383. space_size = highest - lowest;
  384. } else {
  385. if (p->pbm_A.mem_space.flags) {
  386. lowest = p->pbm_A.mem_space.start;
  387. highest = p->pbm_A.mem_space.end + 1;
  388. }
  389. if (p->pbm_B.mem_space.flags) {
  390. if (lowest > p->pbm_B.mem_space.start)
  391. lowest = p->pbm_B.mem_space.start;
  392. if (highest < p->pbm_B.mem_space.end + 1)
  393. highest = p->pbm_B.mem_space.end + 1;
  394. }
  395. space_size = highest - lowest;
  396. }
  397. } else {
  398. if (mmap_state == pci_mmap_io) {
  399. space_size = (pbm->io_space.end -
  400. pbm->io_space.start) + 1;
  401. } else {
  402. space_size = (pbm->mem_space.end -
  403. pbm->mem_space.start) + 1;
  404. }
  405. }
  406. /* Make sure the request is in range. */
  407. user_offset = vma->vm_pgoff << PAGE_SHIFT;
  408. user_size = vma->vm_end - vma->vm_start;
  409. if (user_offset >= space_size ||
  410. (user_offset + user_size) > space_size)
  411. return -EINVAL;
  412. if (p->pbms_same_domain) {
  413. unsigned long lowest = ~0UL;
  414. if (mmap_state == pci_mmap_io) {
  415. if (p->pbm_A.io_space.flags)
  416. lowest = p->pbm_A.io_space.start;
  417. if (p->pbm_B.io_space.flags &&
  418. lowest > p->pbm_B.io_space.start)
  419. lowest = p->pbm_B.io_space.start;
  420. } else {
  421. if (p->pbm_A.mem_space.flags)
  422. lowest = p->pbm_A.mem_space.start;
  423. if (p->pbm_B.mem_space.flags &&
  424. lowest > p->pbm_B.mem_space.start)
  425. lowest = p->pbm_B.mem_space.start;
  426. }
  427. vma->vm_pgoff = (lowest + user_offset) >> PAGE_SHIFT;
  428. } else {
  429. if (mmap_state == pci_mmap_io) {
  430. vma->vm_pgoff = (pbm->io_space.start +
  431. user_offset) >> PAGE_SHIFT;
  432. } else {
  433. vma->vm_pgoff = (pbm->mem_space.start +
  434. user_offset) >> PAGE_SHIFT;
  435. }
  436. }
  437. return 0;
  438. }
  439. /* Adjust vm_pgoff of VMA such that it is the physical page offset corresponding
  440. * to the 32-bit pci bus offset for DEV requested by the user.
  441. *
  442. * Basically, the user finds the base address for his device which he wishes
  443. * to mmap. They read the 32-bit value from the config space base register,
  444. * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
  445. * offset parameter of mmap on /proc/bus/pci/XXX for that device.
  446. *
  447. * Returns negative error code on failure, zero on success.
  448. */
  449. static int __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
  450. enum pci_mmap_state mmap_state)
  451. {
  452. unsigned long user_offset = vma->vm_pgoff << PAGE_SHIFT;
  453. unsigned long user32 = user_offset & pci_memspace_mask;
  454. unsigned long largest_base, this_base, addr32;
  455. int i;
  456. if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
  457. return __pci_mmap_make_offset_bus(dev, vma, mmap_state);
  458. /* Figure out which base address this is for. */
  459. largest_base = 0UL;
  460. for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
  461. struct resource *rp = &dev->resource[i];
  462. /* Active? */
  463. if (!rp->flags)
  464. continue;
  465. /* Same type? */
  466. if (i == PCI_ROM_RESOURCE) {
  467. if (mmap_state != pci_mmap_mem)
  468. continue;
  469. } else {
  470. if ((mmap_state == pci_mmap_io &&
  471. (rp->flags & IORESOURCE_IO) == 0) ||
  472. (mmap_state == pci_mmap_mem &&
  473. (rp->flags & IORESOURCE_MEM) == 0))
  474. continue;
  475. }
  476. this_base = rp->start;
  477. addr32 = (this_base & PAGE_MASK) & pci_memspace_mask;
  478. if (mmap_state == pci_mmap_io)
  479. addr32 &= 0xffffff;
  480. if (addr32 <= user32 && this_base > largest_base)
  481. largest_base = this_base;
  482. }
  483. if (largest_base == 0UL)
  484. return -EINVAL;
  485. /* Now construct the final physical address. */
  486. if (mmap_state == pci_mmap_io)
  487. vma->vm_pgoff = (((largest_base & ~0xffffffUL) | user32) >> PAGE_SHIFT);
  488. else
  489. vma->vm_pgoff = (((largest_base & ~(pci_memspace_mask)) | user32) >> PAGE_SHIFT);
  490. return 0;
  491. }
  492. /* Set vm_flags of VMA, as appropriate for this architecture, for a pci device
  493. * mapping.
  494. */
  495. static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
  496. enum pci_mmap_state mmap_state)
  497. {
  498. vma->vm_flags |= (VM_IO | VM_RESERVED);
  499. }
  500. /* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
  501. * device mapping.
  502. */
  503. static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
  504. enum pci_mmap_state mmap_state)
  505. {
  506. /* Our io_remap_pfn_range takes care of this, do nothing. */
  507. }
  508. /* Perform the actual remap of the pages for a PCI device mapping, as appropriate
  509. * for this architecture. The region in the process to map is described by vm_start
  510. * and vm_end members of VMA, the base physical address is found in vm_pgoff.
  511. * The pci device structure is provided so that architectures may make mapping
  512. * decisions on a per-device or per-bus basis.
  513. *
  514. * Returns a negative error code on failure, zero on success.
  515. */
  516. int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
  517. enum pci_mmap_state mmap_state,
  518. int write_combine)
  519. {
  520. int ret;
  521. ret = __pci_mmap_make_offset(dev, vma, mmap_state);
  522. if (ret < 0)
  523. return ret;
  524. __pci_mmap_set_flags(dev, vma, mmap_state);
  525. __pci_mmap_set_pgprot(dev, vma, mmap_state);
  526. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  527. ret = io_remap_pfn_range(vma, vma->vm_start,
  528. vma->vm_pgoff,
  529. vma->vm_end - vma->vm_start,
  530. vma->vm_page_prot);
  531. if (ret)
  532. return ret;
  533. return 0;
  534. }
  535. /* Return the domain nuber for this pci bus */
  536. int pci_domain_nr(struct pci_bus *pbus)
  537. {
  538. struct pci_pbm_info *pbm = pbus->sysdata;
  539. int ret;
  540. if (pbm == NULL || pbm->parent == NULL) {
  541. ret = -ENXIO;
  542. } else {
  543. struct pci_controller_info *p = pbm->parent;
  544. ret = p->index;
  545. if (p->pbms_same_domain == 0)
  546. ret = ((ret << 1) +
  547. ((pbm == &pbm->parent->pbm_B) ? 1 : 0));
  548. }
  549. return ret;
  550. }
  551. EXPORT_SYMBOL(pci_domain_nr);
  552. int pcibios_prep_mwi(struct pci_dev *dev)
  553. {
  554. /* We set correct PCI_CACHE_LINE_SIZE register values for every
  555. * device probed on this platform. So there is nothing to check
  556. * and this always succeeds.
  557. */
  558. return 0;
  559. }
  560. #endif /* !(CONFIG_PCI) */