pci.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688
  1. /* $Id: pci.c,v 1.39 2002/01/05 01:13:43 davem Exp $
  2. * pci.c: UltraSparc PCI controller support.
  3. *
  4. * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
  5. * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
  6. * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
  7. */
  8. #include <linux/config.h>
  9. #include <linux/module.h>
  10. #include <linux/kernel.h>
  11. #include <linux/string.h>
  12. #include <linux/sched.h>
  13. #include <linux/capability.h>
  14. #include <linux/errno.h>
  15. #include <linux/smp_lock.h>
  16. #include <linux/init.h>
  17. #include <asm/uaccess.h>
  18. #include <asm/pbm.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/irq.h>
  21. #include <asm/ebus.h>
  22. #include <asm/isa.h>
  23. unsigned long pci_memspace_mask = 0xffffffffUL;
  24. #ifndef CONFIG_PCI
  25. /* A "nop" PCI implementation. */
  26. asmlinkage int sys_pciconfig_read(unsigned long bus, unsigned long dfn,
  27. unsigned long off, unsigned long len,
  28. unsigned char *buf)
  29. {
  30. return 0;
  31. }
  32. asmlinkage int sys_pciconfig_write(unsigned long bus, unsigned long dfn,
  33. unsigned long off, unsigned long len,
  34. unsigned char *buf)
  35. {
  36. return 0;
  37. }
  38. #else
  39. /* List of all PCI controllers found in the system. */
  40. struct pci_controller_info *pci_controller_root = NULL;
  41. /* Each PCI controller found gets a unique index. */
  42. int pci_num_controllers = 0;
  43. /* At boot time the user can give the kernel a command
  44. * line option which controls if and how PCI devices
  45. * are reordered at PCI bus probing time.
  46. */
  47. int pci_device_reorder = 0;
  48. volatile int pci_poke_in_progress;
  49. volatile int pci_poke_cpu = -1;
  50. volatile int pci_poke_faulted;
  51. static DEFINE_SPINLOCK(pci_poke_lock);
  52. void pci_config_read8(u8 *addr, u8 *ret)
  53. {
  54. unsigned long flags;
  55. u8 byte;
  56. spin_lock_irqsave(&pci_poke_lock, flags);
  57. pci_poke_cpu = smp_processor_id();
  58. pci_poke_in_progress = 1;
  59. pci_poke_faulted = 0;
  60. __asm__ __volatile__("membar #Sync\n\t"
  61. "lduba [%1] %2, %0\n\t"
  62. "membar #Sync"
  63. : "=r" (byte)
  64. : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  65. : "memory");
  66. pci_poke_in_progress = 0;
  67. pci_poke_cpu = -1;
  68. if (!pci_poke_faulted)
  69. *ret = byte;
  70. spin_unlock_irqrestore(&pci_poke_lock, flags);
  71. }
  72. void pci_config_read16(u16 *addr, u16 *ret)
  73. {
  74. unsigned long flags;
  75. u16 word;
  76. spin_lock_irqsave(&pci_poke_lock, flags);
  77. pci_poke_cpu = smp_processor_id();
  78. pci_poke_in_progress = 1;
  79. pci_poke_faulted = 0;
  80. __asm__ __volatile__("membar #Sync\n\t"
  81. "lduha [%1] %2, %0\n\t"
  82. "membar #Sync"
  83. : "=r" (word)
  84. : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  85. : "memory");
  86. pci_poke_in_progress = 0;
  87. pci_poke_cpu = -1;
  88. if (!pci_poke_faulted)
  89. *ret = word;
  90. spin_unlock_irqrestore(&pci_poke_lock, flags);
  91. }
  92. void pci_config_read32(u32 *addr, u32 *ret)
  93. {
  94. unsigned long flags;
  95. u32 dword;
  96. spin_lock_irqsave(&pci_poke_lock, flags);
  97. pci_poke_cpu = smp_processor_id();
  98. pci_poke_in_progress = 1;
  99. pci_poke_faulted = 0;
  100. __asm__ __volatile__("membar #Sync\n\t"
  101. "lduwa [%1] %2, %0\n\t"
  102. "membar #Sync"
  103. : "=r" (dword)
  104. : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  105. : "memory");
  106. pci_poke_in_progress = 0;
  107. pci_poke_cpu = -1;
  108. if (!pci_poke_faulted)
  109. *ret = dword;
  110. spin_unlock_irqrestore(&pci_poke_lock, flags);
  111. }
  112. void pci_config_write8(u8 *addr, u8 val)
  113. {
  114. unsigned long flags;
  115. spin_lock_irqsave(&pci_poke_lock, flags);
  116. pci_poke_cpu = smp_processor_id();
  117. pci_poke_in_progress = 1;
  118. pci_poke_faulted = 0;
  119. __asm__ __volatile__("membar #Sync\n\t"
  120. "stba %0, [%1] %2\n\t"
  121. "membar #Sync"
  122. : /* no outputs */
  123. : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  124. : "memory");
  125. pci_poke_in_progress = 0;
  126. pci_poke_cpu = -1;
  127. spin_unlock_irqrestore(&pci_poke_lock, flags);
  128. }
  129. void pci_config_write16(u16 *addr, u16 val)
  130. {
  131. unsigned long flags;
  132. spin_lock_irqsave(&pci_poke_lock, flags);
  133. pci_poke_cpu = smp_processor_id();
  134. pci_poke_in_progress = 1;
  135. pci_poke_faulted = 0;
  136. __asm__ __volatile__("membar #Sync\n\t"
  137. "stha %0, [%1] %2\n\t"
  138. "membar #Sync"
  139. : /* no outputs */
  140. : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  141. : "memory");
  142. pci_poke_in_progress = 0;
  143. pci_poke_cpu = -1;
  144. spin_unlock_irqrestore(&pci_poke_lock, flags);
  145. }
  146. void pci_config_write32(u32 *addr, u32 val)
  147. {
  148. unsigned long flags;
  149. spin_lock_irqsave(&pci_poke_lock, flags);
  150. pci_poke_cpu = smp_processor_id();
  151. pci_poke_in_progress = 1;
  152. pci_poke_faulted = 0;
  153. __asm__ __volatile__("membar #Sync\n\t"
  154. "stwa %0, [%1] %2\n\t"
  155. "membar #Sync"
  156. : /* no outputs */
  157. : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  158. : "memory");
  159. pci_poke_in_progress = 0;
  160. pci_poke_cpu = -1;
  161. spin_unlock_irqrestore(&pci_poke_lock, flags);
  162. }
  163. /* Probe for all PCI controllers in the system. */
  164. extern void sabre_init(int, char *);
  165. extern void psycho_init(int, char *);
  166. extern void schizo_init(int, char *);
  167. extern void schizo_plus_init(int, char *);
  168. extern void tomatillo_init(int, char *);
  169. static struct {
  170. char *model_name;
  171. void (*init)(int, char *);
  172. } pci_controller_table[] __initdata = {
  173. { "SUNW,sabre", sabre_init },
  174. { "pci108e,a000", sabre_init },
  175. { "pci108e,a001", sabre_init },
  176. { "SUNW,psycho", psycho_init },
  177. { "pci108e,8000", psycho_init },
  178. { "SUNW,schizo", schizo_init },
  179. { "pci108e,8001", schizo_init },
  180. { "SUNW,schizo+", schizo_plus_init },
  181. { "pci108e,8002", schizo_plus_init },
  182. { "SUNW,tomatillo", tomatillo_init },
  183. { "pci108e,a801", tomatillo_init },
  184. };
  185. #define PCI_NUM_CONTROLLER_TYPES (sizeof(pci_controller_table) / \
  186. sizeof(pci_controller_table[0]))
  187. static int __init pci_controller_init(char *model_name, int namelen, int node)
  188. {
  189. int i;
  190. for (i = 0; i < PCI_NUM_CONTROLLER_TYPES; i++) {
  191. if (!strncmp(model_name,
  192. pci_controller_table[i].model_name,
  193. namelen)) {
  194. pci_controller_table[i].init(node, model_name);
  195. return 1;
  196. }
  197. }
  198. printk("PCI: Warning unknown controller, model name [%s]\n",
  199. model_name);
  200. printk("PCI: Ignoring controller...\n");
  201. return 0;
  202. }
  203. static int __init pci_is_controller(char *model_name, int namelen, int node)
  204. {
  205. int i;
  206. for (i = 0; i < PCI_NUM_CONTROLLER_TYPES; i++) {
  207. if (!strncmp(model_name,
  208. pci_controller_table[i].model_name,
  209. namelen)) {
  210. return 1;
  211. }
  212. }
  213. return 0;
  214. }
  215. static int __init pci_controller_scan(int (*handler)(char *, int, int))
  216. {
  217. char namebuf[64];
  218. int node;
  219. int count = 0;
  220. node = prom_getchild(prom_root_node);
  221. while ((node = prom_searchsiblings(node, "pci")) != 0) {
  222. int len;
  223. if ((len = prom_getproperty(node, "model", namebuf, sizeof(namebuf))) > 0 ||
  224. (len = prom_getproperty(node, "compatible", namebuf, sizeof(namebuf))) > 0) {
  225. int item_len = 0;
  226. /* Our value may be a multi-valued string in the
  227. * case of some compatible properties. For sanity,
  228. * only try the first one. */
  229. while (namebuf[item_len] && len) {
  230. len--;
  231. item_len++;
  232. }
  233. if (handler(namebuf, item_len, node))
  234. count++;
  235. }
  236. node = prom_getsibling(node);
  237. if (!node)
  238. break;
  239. }
  240. return count;
  241. }
  242. /* Is there some PCI controller in the system? */
  243. int __init pcic_present(void)
  244. {
  245. return pci_controller_scan(pci_is_controller);
  246. }
  247. /* Find each controller in the system, attach and initialize
  248. * software state structure for each and link into the
  249. * pci_controller_root. Setup the controller enough such
  250. * that bus scanning can be done.
  251. */
  252. static void __init pci_controller_probe(void)
  253. {
  254. printk("PCI: Probing for controllers.\n");
  255. pci_controller_scan(pci_controller_init);
  256. }
  257. static void __init pci_scan_each_controller_bus(void)
  258. {
  259. struct pci_controller_info *p;
  260. for (p = pci_controller_root; p; p = p->next)
  261. p->scan_bus(p);
  262. }
  263. /* Reorder the pci_dev chain, so that onboard devices come first
  264. * and then come the pluggable cards.
  265. */
  266. static void __init pci_reorder_devs(void)
  267. {
  268. struct list_head *pci_onboard = &pci_devices;
  269. struct list_head *walk = pci_onboard->next;
  270. while (walk != pci_onboard) {
  271. struct pci_dev *pdev = pci_dev_g(walk);
  272. struct list_head *walk_next = walk->next;
  273. if (pdev->irq && (__irq_ino(pdev->irq) & 0x20)) {
  274. list_del(walk);
  275. list_add(walk, pci_onboard);
  276. }
  277. walk = walk_next;
  278. }
  279. }
  280. extern void clock_probe(void);
  281. extern void power_init(void);
  282. static int __init pcibios_init(void)
  283. {
  284. pci_controller_probe();
  285. if (pci_controller_root == NULL)
  286. return 0;
  287. pci_scan_each_controller_bus();
  288. if (pci_device_reorder)
  289. pci_reorder_devs();
  290. isa_init();
  291. ebus_init();
  292. clock_probe();
  293. power_init();
  294. return 0;
  295. }
  296. subsys_initcall(pcibios_init);
  297. void pcibios_fixup_bus(struct pci_bus *pbus)
  298. {
  299. struct pci_pbm_info *pbm = pbus->sysdata;
  300. /* Generic PCI bus probing sets these to point at
  301. * &io{port,mem}_resouce which is wrong for us.
  302. */
  303. pbus->resource[0] = &pbm->io_space;
  304. pbus->resource[1] = &pbm->mem_space;
  305. }
  306. struct resource *pcibios_select_root(struct pci_dev *pdev, struct resource *r)
  307. {
  308. struct pci_pbm_info *pbm = pdev->bus->sysdata;
  309. struct resource *root = NULL;
  310. if (r->flags & IORESOURCE_IO)
  311. root = &pbm->io_space;
  312. if (r->flags & IORESOURCE_MEM)
  313. root = &pbm->mem_space;
  314. return root;
  315. }
  316. void pcibios_update_irq(struct pci_dev *pdev, int irq)
  317. {
  318. }
  319. void pcibios_align_resource(void *data, struct resource *res,
  320. unsigned long size, unsigned long align)
  321. {
  322. }
  323. int pcibios_enable_device(struct pci_dev *pdev, int mask)
  324. {
  325. return 0;
  326. }
  327. void pcibios_resource_to_bus(struct pci_dev *pdev, struct pci_bus_region *region,
  328. struct resource *res)
  329. {
  330. struct pci_pbm_info *pbm = pdev->bus->sysdata;
  331. struct resource zero_res, *root;
  332. zero_res.start = 0;
  333. zero_res.end = 0;
  334. zero_res.flags = res->flags;
  335. if (res->flags & IORESOURCE_IO)
  336. root = &pbm->io_space;
  337. else
  338. root = &pbm->mem_space;
  339. pbm->parent->resource_adjust(pdev, &zero_res, root);
  340. region->start = res->start - zero_res.start;
  341. region->end = res->end - zero_res.start;
  342. }
  343. void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res,
  344. struct pci_bus_region *region)
  345. {
  346. struct pci_pbm_info *pbm = pdev->bus->sysdata;
  347. struct resource *root;
  348. res->start = region->start;
  349. res->end = region->end;
  350. if (res->flags & IORESOURCE_IO)
  351. root = &pbm->io_space;
  352. else
  353. root = &pbm->mem_space;
  354. pbm->parent->resource_adjust(pdev, res, root);
  355. }
  356. EXPORT_SYMBOL(pcibios_bus_to_resource);
  357. char * __init pcibios_setup(char *str)
  358. {
  359. if (!strcmp(str, "onboardfirst")) {
  360. pci_device_reorder = 1;
  361. return NULL;
  362. }
  363. if (!strcmp(str, "noreorder")) {
  364. pci_device_reorder = 0;
  365. return NULL;
  366. }
  367. return str;
  368. }
  369. /* Platform support for /proc/bus/pci/X/Y mmap()s. */
  370. /* If the user uses a host-bridge as the PCI device, he may use
  371. * this to perform a raw mmap() of the I/O or MEM space behind
  372. * that controller.
  373. *
  374. * This can be useful for execution of x86 PCI bios initialization code
  375. * on a PCI card, like the xfree86 int10 stuff does.
  376. */
  377. static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma,
  378. enum pci_mmap_state mmap_state)
  379. {
  380. struct pcidev_cookie *pcp = pdev->sysdata;
  381. struct pci_pbm_info *pbm;
  382. struct pci_controller_info *p;
  383. unsigned long space_size, user_offset, user_size;
  384. if (!pcp)
  385. return -ENXIO;
  386. pbm = pcp->pbm;
  387. if (!pbm)
  388. return -ENXIO;
  389. p = pbm->parent;
  390. if (p->pbms_same_domain) {
  391. unsigned long lowest, highest;
  392. lowest = ~0UL; highest = 0UL;
  393. if (mmap_state == pci_mmap_io) {
  394. if (p->pbm_A.io_space.flags) {
  395. lowest = p->pbm_A.io_space.start;
  396. highest = p->pbm_A.io_space.end + 1;
  397. }
  398. if (p->pbm_B.io_space.flags) {
  399. if (lowest > p->pbm_B.io_space.start)
  400. lowest = p->pbm_B.io_space.start;
  401. if (highest < p->pbm_B.io_space.end + 1)
  402. highest = p->pbm_B.io_space.end + 1;
  403. }
  404. space_size = highest - lowest;
  405. } else {
  406. if (p->pbm_A.mem_space.flags) {
  407. lowest = p->pbm_A.mem_space.start;
  408. highest = p->pbm_A.mem_space.end + 1;
  409. }
  410. if (p->pbm_B.mem_space.flags) {
  411. if (lowest > p->pbm_B.mem_space.start)
  412. lowest = p->pbm_B.mem_space.start;
  413. if (highest < p->pbm_B.mem_space.end + 1)
  414. highest = p->pbm_B.mem_space.end + 1;
  415. }
  416. space_size = highest - lowest;
  417. }
  418. } else {
  419. if (mmap_state == pci_mmap_io) {
  420. space_size = (pbm->io_space.end -
  421. pbm->io_space.start) + 1;
  422. } else {
  423. space_size = (pbm->mem_space.end -
  424. pbm->mem_space.start) + 1;
  425. }
  426. }
  427. /* Make sure the request is in range. */
  428. user_offset = vma->vm_pgoff << PAGE_SHIFT;
  429. user_size = vma->vm_end - vma->vm_start;
  430. if (user_offset >= space_size ||
  431. (user_offset + user_size) > space_size)
  432. return -EINVAL;
  433. if (p->pbms_same_domain) {
  434. unsigned long lowest = ~0UL;
  435. if (mmap_state == pci_mmap_io) {
  436. if (p->pbm_A.io_space.flags)
  437. lowest = p->pbm_A.io_space.start;
  438. if (p->pbm_B.io_space.flags &&
  439. lowest > p->pbm_B.io_space.start)
  440. lowest = p->pbm_B.io_space.start;
  441. } else {
  442. if (p->pbm_A.mem_space.flags)
  443. lowest = p->pbm_A.mem_space.start;
  444. if (p->pbm_B.mem_space.flags &&
  445. lowest > p->pbm_B.mem_space.start)
  446. lowest = p->pbm_B.mem_space.start;
  447. }
  448. vma->vm_pgoff = (lowest + user_offset) >> PAGE_SHIFT;
  449. } else {
  450. if (mmap_state == pci_mmap_io) {
  451. vma->vm_pgoff = (pbm->io_space.start +
  452. user_offset) >> PAGE_SHIFT;
  453. } else {
  454. vma->vm_pgoff = (pbm->mem_space.start +
  455. user_offset) >> PAGE_SHIFT;
  456. }
  457. }
  458. return 0;
  459. }
  460. /* Adjust vm_pgoff of VMA such that it is the physical page offset corresponding
  461. * to the 32-bit pci bus offset for DEV requested by the user.
  462. *
  463. * Basically, the user finds the base address for his device which he wishes
  464. * to mmap. They read the 32-bit value from the config space base register,
  465. * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
  466. * offset parameter of mmap on /proc/bus/pci/XXX for that device.
  467. *
  468. * Returns negative error code on failure, zero on success.
  469. */
  470. static int __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
  471. enum pci_mmap_state mmap_state)
  472. {
  473. unsigned long user_offset = vma->vm_pgoff << PAGE_SHIFT;
  474. unsigned long user32 = user_offset & pci_memspace_mask;
  475. unsigned long largest_base, this_base, addr32;
  476. int i;
  477. if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
  478. return __pci_mmap_make_offset_bus(dev, vma, mmap_state);
  479. /* Figure out which base address this is for. */
  480. largest_base = 0UL;
  481. for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
  482. struct resource *rp = &dev->resource[i];
  483. /* Active? */
  484. if (!rp->flags)
  485. continue;
  486. /* Same type? */
  487. if (i == PCI_ROM_RESOURCE) {
  488. if (mmap_state != pci_mmap_mem)
  489. continue;
  490. } else {
  491. if ((mmap_state == pci_mmap_io &&
  492. (rp->flags & IORESOURCE_IO) == 0) ||
  493. (mmap_state == pci_mmap_mem &&
  494. (rp->flags & IORESOURCE_MEM) == 0))
  495. continue;
  496. }
  497. this_base = rp->start;
  498. addr32 = (this_base & PAGE_MASK) & pci_memspace_mask;
  499. if (mmap_state == pci_mmap_io)
  500. addr32 &= 0xffffff;
  501. if (addr32 <= user32 && this_base > largest_base)
  502. largest_base = this_base;
  503. }
  504. if (largest_base == 0UL)
  505. return -EINVAL;
  506. /* Now construct the final physical address. */
  507. if (mmap_state == pci_mmap_io)
  508. vma->vm_pgoff = (((largest_base & ~0xffffffUL) | user32) >> PAGE_SHIFT);
  509. else
  510. vma->vm_pgoff = (((largest_base & ~(pci_memspace_mask)) | user32) >> PAGE_SHIFT);
  511. return 0;
  512. }
  513. /* Set vm_flags of VMA, as appropriate for this architecture, for a pci device
  514. * mapping.
  515. */
  516. static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
  517. enum pci_mmap_state mmap_state)
  518. {
  519. vma->vm_flags |= (VM_IO | VM_RESERVED);
  520. }
  521. /* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
  522. * device mapping.
  523. */
  524. static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
  525. enum pci_mmap_state mmap_state)
  526. {
  527. /* Our io_remap_pfn_range takes care of this, do nothing. */
  528. }
  529. /* Perform the actual remap of the pages for a PCI device mapping, as appropriate
  530. * for this architecture. The region in the process to map is described by vm_start
  531. * and vm_end members of VMA, the base physical address is found in vm_pgoff.
  532. * The pci device structure is provided so that architectures may make mapping
  533. * decisions on a per-device or per-bus basis.
  534. *
  535. * Returns a negative error code on failure, zero on success.
  536. */
  537. int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
  538. enum pci_mmap_state mmap_state,
  539. int write_combine)
  540. {
  541. int ret;
  542. ret = __pci_mmap_make_offset(dev, vma, mmap_state);
  543. if (ret < 0)
  544. return ret;
  545. __pci_mmap_set_flags(dev, vma, mmap_state);
  546. __pci_mmap_set_pgprot(dev, vma, mmap_state);
  547. ret = io_remap_pfn_range(vma, vma->vm_start,
  548. vma->vm_pgoff,
  549. vma->vm_end - vma->vm_start,
  550. vma->vm_page_prot);
  551. if (ret)
  552. return ret;
  553. vma->vm_flags |= VM_IO;
  554. return 0;
  555. }
  556. /* Return the domain nuber for this pci bus */
  557. int pci_domain_nr(struct pci_bus *pbus)
  558. {
  559. struct pci_pbm_info *pbm = pbus->sysdata;
  560. int ret;
  561. if (pbm == NULL || pbm->parent == NULL) {
  562. ret = -ENXIO;
  563. } else {
  564. struct pci_controller_info *p = pbm->parent;
  565. ret = p->index;
  566. if (p->pbms_same_domain == 0)
  567. ret = ((ret << 1) +
  568. ((pbm == &pbm->parent->pbm_B) ? 1 : 0));
  569. }
  570. return ret;
  571. }
  572. EXPORT_SYMBOL(pci_domain_nr);
  573. int pcibios_prep_mwi(struct pci_dev *dev)
  574. {
  575. /* We set correct PCI_CACHE_LINE_SIZE register values for every
  576. * device probed on this platform. So there is nothing to check
  577. * and this always succeeds.
  578. */
  579. return 0;
  580. }
  581. #endif /* !(CONFIG_PCI) */