pci.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806
  1. /* $Id: pci.c,v 1.39 2002/01/05 01:13:43 davem Exp $
  2. * pci.c: UltraSparc PCI controller support.
  3. *
  4. * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
  5. * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
  6. * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
  7. */
  8. #include <linux/config.h>
  9. #include <linux/module.h>
  10. #include <linux/kernel.h>
  11. #include <linux/string.h>
  12. #include <linux/sched.h>
  13. #include <linux/capability.h>
  14. #include <linux/errno.h>
  15. #include <linux/smp_lock.h>
  16. #include <linux/init.h>
  17. #include <asm/uaccess.h>
  18. #include <asm/pbm.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/irq.h>
  21. #include <asm/ebus.h>
  22. #include <asm/isa.h>
  23. unsigned long pci_memspace_mask = 0xffffffffUL;
  24. #ifndef CONFIG_PCI
  25. /* A "nop" PCI implementation. */
  26. asmlinkage int sys_pciconfig_read(unsigned long bus, unsigned long dfn,
  27. unsigned long off, unsigned long len,
  28. unsigned char *buf)
  29. {
  30. return 0;
  31. }
  32. asmlinkage int sys_pciconfig_write(unsigned long bus, unsigned long dfn,
  33. unsigned long off, unsigned long len,
  34. unsigned char *buf)
  35. {
  36. return 0;
  37. }
  38. #else
  39. /* List of all PCI controllers found in the system. */
  40. struct pci_controller_info *pci_controller_root = NULL;
  41. /* Each PCI controller found gets a unique index. */
  42. int pci_num_controllers = 0;
  43. /* At boot time the user can give the kernel a command
  44. * line option which controls if and how PCI devices
  45. * are reordered at PCI bus probing time.
  46. */
  47. int pci_device_reorder = 0;
  48. volatile int pci_poke_in_progress;
  49. volatile int pci_poke_cpu = -1;
  50. volatile int pci_poke_faulted;
  51. static DEFINE_SPINLOCK(pci_poke_lock);
  52. void pci_config_read8(u8 *addr, u8 *ret)
  53. {
  54. unsigned long flags;
  55. u8 byte;
  56. spin_lock_irqsave(&pci_poke_lock, flags);
  57. pci_poke_cpu = smp_processor_id();
  58. pci_poke_in_progress = 1;
  59. pci_poke_faulted = 0;
  60. __asm__ __volatile__("membar #Sync\n\t"
  61. "lduba [%1] %2, %0\n\t"
  62. "membar #Sync"
  63. : "=r" (byte)
  64. : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  65. : "memory");
  66. pci_poke_in_progress = 0;
  67. pci_poke_cpu = -1;
  68. if (!pci_poke_faulted)
  69. *ret = byte;
  70. spin_unlock_irqrestore(&pci_poke_lock, flags);
  71. }
  72. void pci_config_read16(u16 *addr, u16 *ret)
  73. {
  74. unsigned long flags;
  75. u16 word;
  76. spin_lock_irqsave(&pci_poke_lock, flags);
  77. pci_poke_cpu = smp_processor_id();
  78. pci_poke_in_progress = 1;
  79. pci_poke_faulted = 0;
  80. __asm__ __volatile__("membar #Sync\n\t"
  81. "lduha [%1] %2, %0\n\t"
  82. "membar #Sync"
  83. : "=r" (word)
  84. : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  85. : "memory");
  86. pci_poke_in_progress = 0;
  87. pci_poke_cpu = -1;
  88. if (!pci_poke_faulted)
  89. *ret = word;
  90. spin_unlock_irqrestore(&pci_poke_lock, flags);
  91. }
  92. void pci_config_read32(u32 *addr, u32 *ret)
  93. {
  94. unsigned long flags;
  95. u32 dword;
  96. spin_lock_irqsave(&pci_poke_lock, flags);
  97. pci_poke_cpu = smp_processor_id();
  98. pci_poke_in_progress = 1;
  99. pci_poke_faulted = 0;
  100. __asm__ __volatile__("membar #Sync\n\t"
  101. "lduwa [%1] %2, %0\n\t"
  102. "membar #Sync"
  103. : "=r" (dword)
  104. : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  105. : "memory");
  106. pci_poke_in_progress = 0;
  107. pci_poke_cpu = -1;
  108. if (!pci_poke_faulted)
  109. *ret = dword;
  110. spin_unlock_irqrestore(&pci_poke_lock, flags);
  111. }
  112. void pci_config_write8(u8 *addr, u8 val)
  113. {
  114. unsigned long flags;
  115. spin_lock_irqsave(&pci_poke_lock, flags);
  116. pci_poke_cpu = smp_processor_id();
  117. pci_poke_in_progress = 1;
  118. pci_poke_faulted = 0;
  119. __asm__ __volatile__("membar #Sync\n\t"
  120. "stba %0, [%1] %2\n\t"
  121. "membar #Sync"
  122. : /* no outputs */
  123. : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  124. : "memory");
  125. pci_poke_in_progress = 0;
  126. pci_poke_cpu = -1;
  127. spin_unlock_irqrestore(&pci_poke_lock, flags);
  128. }
  129. void pci_config_write16(u16 *addr, u16 val)
  130. {
  131. unsigned long flags;
  132. spin_lock_irqsave(&pci_poke_lock, flags);
  133. pci_poke_cpu = smp_processor_id();
  134. pci_poke_in_progress = 1;
  135. pci_poke_faulted = 0;
  136. __asm__ __volatile__("membar #Sync\n\t"
  137. "stha %0, [%1] %2\n\t"
  138. "membar #Sync"
  139. : /* no outputs */
  140. : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  141. : "memory");
  142. pci_poke_in_progress = 0;
  143. pci_poke_cpu = -1;
  144. spin_unlock_irqrestore(&pci_poke_lock, flags);
  145. }
  146. void pci_config_write32(u32 *addr, u32 val)
  147. {
  148. unsigned long flags;
  149. spin_lock_irqsave(&pci_poke_lock, flags);
  150. pci_poke_cpu = smp_processor_id();
  151. pci_poke_in_progress = 1;
  152. pci_poke_faulted = 0;
  153. __asm__ __volatile__("membar #Sync\n\t"
  154. "stwa %0, [%1] %2\n\t"
  155. "membar #Sync"
  156. : /* no outputs */
  157. : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  158. : "memory");
  159. pci_poke_in_progress = 0;
  160. pci_poke_cpu = -1;
  161. spin_unlock_irqrestore(&pci_poke_lock, flags);
  162. }
  163. /* Probe for all PCI controllers in the system. */
  164. extern void sabre_init(int, char *);
  165. extern void psycho_init(int, char *);
  166. extern void schizo_init(int, char *);
  167. extern void schizo_plus_init(int, char *);
  168. extern void tomatillo_init(int, char *);
  169. static struct {
  170. char *model_name;
  171. void (*init)(int, char *);
  172. } pci_controller_table[] __initdata = {
  173. { "SUNW,sabre", sabre_init },
  174. { "pci108e,a000", sabre_init },
  175. { "pci108e,a001", sabre_init },
  176. { "SUNW,psycho", psycho_init },
  177. { "pci108e,8000", psycho_init },
  178. { "SUNW,schizo", schizo_init },
  179. { "pci108e,8001", schizo_init },
  180. { "SUNW,schizo+", schizo_plus_init },
  181. { "pci108e,8002", schizo_plus_init },
  182. { "SUNW,tomatillo", tomatillo_init },
  183. { "pci108e,a801", tomatillo_init },
  184. };
  185. #define PCI_NUM_CONTROLLER_TYPES (sizeof(pci_controller_table) / \
  186. sizeof(pci_controller_table[0]))
  187. static int __init pci_controller_init(char *model_name, int namelen, int node)
  188. {
  189. int i;
  190. for (i = 0; i < PCI_NUM_CONTROLLER_TYPES; i++) {
  191. if (!strncmp(model_name,
  192. pci_controller_table[i].model_name,
  193. namelen)) {
  194. pci_controller_table[i].init(node, model_name);
  195. return 1;
  196. }
  197. }
  198. printk("PCI: Warning unknown controller, model name [%s]\n",
  199. model_name);
  200. printk("PCI: Ignoring controller...\n");
  201. return 0;
  202. }
  203. static int __init pci_is_controller(char *model_name, int namelen, int node)
  204. {
  205. int i;
  206. for (i = 0; i < PCI_NUM_CONTROLLER_TYPES; i++) {
  207. if (!strncmp(model_name,
  208. pci_controller_table[i].model_name,
  209. namelen)) {
  210. return 1;
  211. }
  212. }
  213. return 0;
  214. }
  215. static int __init pci_controller_scan(int (*handler)(char *, int, int))
  216. {
  217. char namebuf[64];
  218. int node;
  219. int count = 0;
  220. node = prom_getchild(prom_root_node);
  221. while ((node = prom_searchsiblings(node, "pci")) != 0) {
  222. int len;
  223. if ((len = prom_getproperty(node, "model", namebuf, sizeof(namebuf))) > 0 ||
  224. (len = prom_getproperty(node, "compatible", namebuf, sizeof(namebuf))) > 0) {
  225. int item_len = 0;
  226. /* Our value may be a multi-valued string in the
  227. * case of some compatible properties. For sanity,
  228. * only try the first one. */
  229. while (namebuf[item_len] && len) {
  230. len--;
  231. item_len++;
  232. }
  233. if (handler(namebuf, item_len, node))
  234. count++;
  235. }
  236. node = prom_getsibling(node);
  237. if (!node)
  238. break;
  239. }
  240. return count;
  241. }
  242. /* Is there some PCI controller in the system? */
  243. int __init pcic_present(void)
  244. {
  245. return pci_controller_scan(pci_is_controller);
  246. }
  247. /* Find each controller in the system, attach and initialize
  248. * software state structure for each and link into the
  249. * pci_controller_root. Setup the controller enough such
  250. * that bus scanning can be done.
  251. */
  252. static void __init pci_controller_probe(void)
  253. {
  254. printk("PCI: Probing for controllers.\n");
  255. pci_controller_scan(pci_controller_init);
  256. }
  257. static void __init pci_scan_each_controller_bus(void)
  258. {
  259. struct pci_controller_info *p;
  260. for (p = pci_controller_root; p; p = p->next)
  261. p->scan_bus(p);
  262. }
  263. /* Reorder the pci_dev chain, so that onboard devices come first
  264. * and then come the pluggable cards.
  265. */
  266. static void __init pci_reorder_devs(void)
  267. {
  268. struct list_head *pci_onboard = &pci_devices;
  269. struct list_head *walk = pci_onboard->next;
  270. while (walk != pci_onboard) {
  271. struct pci_dev *pdev = pci_dev_g(walk);
  272. struct list_head *walk_next = walk->next;
  273. if (pdev->irq && (__irq_ino(pdev->irq) & 0x20)) {
  274. list_del(walk);
  275. list_add(walk, pci_onboard);
  276. }
  277. walk = walk_next;
  278. }
  279. }
  280. extern void clock_probe(void);
  281. extern void power_init(void);
  282. static int __init pcibios_init(void)
  283. {
  284. pci_controller_probe();
  285. if (pci_controller_root == NULL)
  286. return 0;
  287. pci_scan_each_controller_bus();
  288. if (pci_device_reorder)
  289. pci_reorder_devs();
  290. isa_init();
  291. ebus_init();
  292. clock_probe();
  293. power_init();
  294. return 0;
  295. }
  296. subsys_initcall(pcibios_init);
  297. void pcibios_fixup_bus(struct pci_bus *pbus)
  298. {
  299. struct pci_pbm_info *pbm = pbus->sysdata;
  300. /* Generic PCI bus probing sets these to point at
  301. * &io{port,mem}_resouce which is wrong for us.
  302. */
  303. pbus->resource[0] = &pbm->io_space;
  304. pbus->resource[1] = &pbm->mem_space;
  305. }
  306. int pci_claim_resource(struct pci_dev *pdev, int resource)
  307. {
  308. struct pci_pbm_info *pbm = pdev->bus->sysdata;
  309. struct resource *res = &pdev->resource[resource];
  310. struct resource *root;
  311. if (!pbm)
  312. return -EINVAL;
  313. if (res->flags & IORESOURCE_IO)
  314. root = &pbm->io_space;
  315. else
  316. root = &pbm->mem_space;
  317. pbm->parent->resource_adjust(pdev, res, root);
  318. return request_resource(root, res);
  319. }
  320. /*
  321. * Given the PCI bus a device resides on, try to
  322. * find an acceptable resource allocation for a
  323. * specific device resource..
  324. */
  325. static int pci_assign_bus_resource(const struct pci_bus *bus,
  326. struct pci_dev *dev,
  327. struct resource *res,
  328. unsigned long size,
  329. unsigned long min,
  330. int resno)
  331. {
  332. unsigned int type_mask;
  333. int i;
  334. type_mask = IORESOURCE_IO | IORESOURCE_MEM;
  335. for (i = 0 ; i < 4; i++) {
  336. struct resource *r = bus->resource[i];
  337. if (!r)
  338. continue;
  339. /* type_mask must match */
  340. if ((res->flags ^ r->flags) & type_mask)
  341. continue;
  342. /* Ok, try it out.. */
  343. if (allocate_resource(r, res, size, min, -1, size, NULL, NULL) < 0)
  344. continue;
  345. /* PCI config space updated by caller. */
  346. return 0;
  347. }
  348. return -EBUSY;
  349. }
  350. int pci_assign_resource(struct pci_dev *pdev, int resource)
  351. {
  352. struct pcidev_cookie *pcp = pdev->sysdata;
  353. struct pci_pbm_info *pbm = pcp->pbm;
  354. struct resource *res = &pdev->resource[resource];
  355. unsigned long min, size;
  356. int err;
  357. if (res->flags & IORESOURCE_IO)
  358. min = pbm->io_space.start + 0x400UL;
  359. else
  360. min = pbm->mem_space.start;
  361. size = res->end - res->start + 1;
  362. err = pci_assign_bus_resource(pdev->bus, pdev, res, size, min, resource);
  363. if (err < 0) {
  364. printk("PCI: Failed to allocate resource %d for %s\n",
  365. resource, pci_name(pdev));
  366. } else {
  367. /* Update PCI config space. */
  368. pbm->parent->base_address_update(pdev, resource);
  369. }
  370. return err;
  371. }
  372. /* Sort resources by alignment */
  373. void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
  374. {
  375. int i;
  376. for (i = 0; i < PCI_NUM_RESOURCES; i++) {
  377. struct resource *r;
  378. struct resource_list *list, *tmp;
  379. unsigned long r_align;
  380. r = &dev->resource[i];
  381. r_align = r->end - r->start;
  382. if (!(r->flags) || r->parent)
  383. continue;
  384. if (!r_align) {
  385. printk(KERN_WARNING "PCI: Ignore bogus resource %d "
  386. "[%lx:%lx] of %s\n",
  387. i, r->start, r->end, pci_name(dev));
  388. continue;
  389. }
  390. r_align = (i < PCI_BRIDGE_RESOURCES) ? r_align + 1 : r->start;
  391. for (list = head; ; list = list->next) {
  392. unsigned long align = 0;
  393. struct resource_list *ln = list->next;
  394. int idx;
  395. if (ln) {
  396. idx = ln->res - &ln->dev->resource[0];
  397. align = (idx < PCI_BRIDGE_RESOURCES) ?
  398. ln->res->end - ln->res->start + 1 :
  399. ln->res->start;
  400. }
  401. if (r_align > align) {
  402. tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
  403. if (!tmp)
  404. panic("pdev_sort_resources(): "
  405. "kmalloc() failed!\n");
  406. tmp->next = ln;
  407. tmp->res = r;
  408. tmp->dev = dev;
  409. list->next = tmp;
  410. break;
  411. }
  412. }
  413. }
  414. }
  415. void pcibios_update_irq(struct pci_dev *pdev, int irq)
  416. {
  417. }
  418. void pcibios_align_resource(void *data, struct resource *res,
  419. unsigned long size, unsigned long align)
  420. {
  421. }
  422. int pcibios_enable_device(struct pci_dev *pdev, int mask)
  423. {
  424. return 0;
  425. }
  426. void pcibios_resource_to_bus(struct pci_dev *pdev, struct pci_bus_region *region,
  427. struct resource *res)
  428. {
  429. struct pci_pbm_info *pbm = pdev->bus->sysdata;
  430. struct resource zero_res, *root;
  431. zero_res.start = 0;
  432. zero_res.end = 0;
  433. zero_res.flags = res->flags;
  434. if (res->flags & IORESOURCE_IO)
  435. root = &pbm->io_space;
  436. else
  437. root = &pbm->mem_space;
  438. pbm->parent->resource_adjust(pdev, &zero_res, root);
  439. region->start = res->start - zero_res.start;
  440. region->end = res->end - zero_res.start;
  441. }
  442. void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res,
  443. struct pci_bus_region *region)
  444. {
  445. struct pci_pbm_info *pbm = pdev->bus->sysdata;
  446. struct resource *root;
  447. res->start = region->start;
  448. res->end = region->end;
  449. if (res->flags & IORESOURCE_IO)
  450. root = &pbm->io_space;
  451. else
  452. root = &pbm->mem_space;
  453. pbm->parent->resource_adjust(pdev, res, root);
  454. }
  455. EXPORT_SYMBOL(pcibios_bus_to_resource);
  456. char * __init pcibios_setup(char *str)
  457. {
  458. if (!strcmp(str, "onboardfirst")) {
  459. pci_device_reorder = 1;
  460. return NULL;
  461. }
  462. if (!strcmp(str, "noreorder")) {
  463. pci_device_reorder = 0;
  464. return NULL;
  465. }
  466. return str;
  467. }
  468. /* Platform support for /proc/bus/pci/X/Y mmap()s. */
  469. /* If the user uses a host-bridge as the PCI device, he may use
  470. * this to perform a raw mmap() of the I/O or MEM space behind
  471. * that controller.
  472. *
  473. * This can be useful for execution of x86 PCI bios initialization code
  474. * on a PCI card, like the xfree86 int10 stuff does.
  475. */
  476. static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma,
  477. enum pci_mmap_state mmap_state)
  478. {
  479. struct pcidev_cookie *pcp = pdev->sysdata;
  480. struct pci_pbm_info *pbm;
  481. struct pci_controller_info *p;
  482. unsigned long space_size, user_offset, user_size;
  483. if (!pcp)
  484. return -ENXIO;
  485. pbm = pcp->pbm;
  486. if (!pbm)
  487. return -ENXIO;
  488. p = pbm->parent;
  489. if (p->pbms_same_domain) {
  490. unsigned long lowest, highest;
  491. lowest = ~0UL; highest = 0UL;
  492. if (mmap_state == pci_mmap_io) {
  493. if (p->pbm_A.io_space.flags) {
  494. lowest = p->pbm_A.io_space.start;
  495. highest = p->pbm_A.io_space.end + 1;
  496. }
  497. if (p->pbm_B.io_space.flags) {
  498. if (lowest > p->pbm_B.io_space.start)
  499. lowest = p->pbm_B.io_space.start;
  500. if (highest < p->pbm_B.io_space.end + 1)
  501. highest = p->pbm_B.io_space.end + 1;
  502. }
  503. space_size = highest - lowest;
  504. } else {
  505. if (p->pbm_A.mem_space.flags) {
  506. lowest = p->pbm_A.mem_space.start;
  507. highest = p->pbm_A.mem_space.end + 1;
  508. }
  509. if (p->pbm_B.mem_space.flags) {
  510. if (lowest > p->pbm_B.mem_space.start)
  511. lowest = p->pbm_B.mem_space.start;
  512. if (highest < p->pbm_B.mem_space.end + 1)
  513. highest = p->pbm_B.mem_space.end + 1;
  514. }
  515. space_size = highest - lowest;
  516. }
  517. } else {
  518. if (mmap_state == pci_mmap_io) {
  519. space_size = (pbm->io_space.end -
  520. pbm->io_space.start) + 1;
  521. } else {
  522. space_size = (pbm->mem_space.end -
  523. pbm->mem_space.start) + 1;
  524. }
  525. }
  526. /* Make sure the request is in range. */
  527. user_offset = vma->vm_pgoff << PAGE_SHIFT;
  528. user_size = vma->vm_end - vma->vm_start;
  529. if (user_offset >= space_size ||
  530. (user_offset + user_size) > space_size)
  531. return -EINVAL;
  532. if (p->pbms_same_domain) {
  533. unsigned long lowest = ~0UL;
  534. if (mmap_state == pci_mmap_io) {
  535. if (p->pbm_A.io_space.flags)
  536. lowest = p->pbm_A.io_space.start;
  537. if (p->pbm_B.io_space.flags &&
  538. lowest > p->pbm_B.io_space.start)
  539. lowest = p->pbm_B.io_space.start;
  540. } else {
  541. if (p->pbm_A.mem_space.flags)
  542. lowest = p->pbm_A.mem_space.start;
  543. if (p->pbm_B.mem_space.flags &&
  544. lowest > p->pbm_B.mem_space.start)
  545. lowest = p->pbm_B.mem_space.start;
  546. }
  547. vma->vm_pgoff = (lowest + user_offset) >> PAGE_SHIFT;
  548. } else {
  549. if (mmap_state == pci_mmap_io) {
  550. vma->vm_pgoff = (pbm->io_space.start +
  551. user_offset) >> PAGE_SHIFT;
  552. } else {
  553. vma->vm_pgoff = (pbm->mem_space.start +
  554. user_offset) >> PAGE_SHIFT;
  555. }
  556. }
  557. return 0;
  558. }
  559. /* Adjust vm_pgoff of VMA such that it is the physical page offset corresponding
  560. * to the 32-bit pci bus offset for DEV requested by the user.
  561. *
  562. * Basically, the user finds the base address for his device which he wishes
  563. * to mmap. They read the 32-bit value from the config space base register,
  564. * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
  565. * offset parameter of mmap on /proc/bus/pci/XXX for that device.
  566. *
  567. * Returns negative error code on failure, zero on success.
  568. */
  569. static int __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
  570. enum pci_mmap_state mmap_state)
  571. {
  572. unsigned long user_offset = vma->vm_pgoff << PAGE_SHIFT;
  573. unsigned long user32 = user_offset & pci_memspace_mask;
  574. unsigned long largest_base, this_base, addr32;
  575. int i;
  576. if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
  577. return __pci_mmap_make_offset_bus(dev, vma, mmap_state);
  578. /* Figure out which base address this is for. */
  579. largest_base = 0UL;
  580. for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
  581. struct resource *rp = &dev->resource[i];
  582. /* Active? */
  583. if (!rp->flags)
  584. continue;
  585. /* Same type? */
  586. if (i == PCI_ROM_RESOURCE) {
  587. if (mmap_state != pci_mmap_mem)
  588. continue;
  589. } else {
  590. if ((mmap_state == pci_mmap_io &&
  591. (rp->flags & IORESOURCE_IO) == 0) ||
  592. (mmap_state == pci_mmap_mem &&
  593. (rp->flags & IORESOURCE_MEM) == 0))
  594. continue;
  595. }
  596. this_base = rp->start;
  597. addr32 = (this_base & PAGE_MASK) & pci_memspace_mask;
  598. if (mmap_state == pci_mmap_io)
  599. addr32 &= 0xffffff;
  600. if (addr32 <= user32 && this_base > largest_base)
  601. largest_base = this_base;
  602. }
  603. if (largest_base == 0UL)
  604. return -EINVAL;
  605. /* Now construct the final physical address. */
  606. if (mmap_state == pci_mmap_io)
  607. vma->vm_pgoff = (((largest_base & ~0xffffffUL) | user32) >> PAGE_SHIFT);
  608. else
  609. vma->vm_pgoff = (((largest_base & ~(pci_memspace_mask)) | user32) >> PAGE_SHIFT);
  610. return 0;
  611. }
  612. /* Set vm_flags of VMA, as appropriate for this architecture, for a pci device
  613. * mapping.
  614. */
  615. static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
  616. enum pci_mmap_state mmap_state)
  617. {
  618. vma->vm_flags |= (VM_IO | VM_RESERVED);
  619. }
  620. /* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
  621. * device mapping.
  622. */
  623. static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
  624. enum pci_mmap_state mmap_state)
  625. {
  626. /* Our io_remap_page_range/io_remap_pfn_range takes care of this,
  627. do nothing. */
  628. }
  629. /* Perform the actual remap of the pages for a PCI device mapping, as appropriate
  630. * for this architecture. The region in the process to map is described by vm_start
  631. * and vm_end members of VMA, the base physical address is found in vm_pgoff.
  632. * The pci device structure is provided so that architectures may make mapping
  633. * decisions on a per-device or per-bus basis.
  634. *
  635. * Returns a negative error code on failure, zero on success.
  636. */
  637. int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
  638. enum pci_mmap_state mmap_state,
  639. int write_combine)
  640. {
  641. int ret;
  642. ret = __pci_mmap_make_offset(dev, vma, mmap_state);
  643. if (ret < 0)
  644. return ret;
  645. __pci_mmap_set_flags(dev, vma, mmap_state);
  646. __pci_mmap_set_pgprot(dev, vma, mmap_state);
  647. ret = io_remap_pfn_range(vma, vma->vm_start,
  648. vma->vm_pgoff,
  649. vma->vm_end - vma->vm_start,
  650. vma->vm_page_prot);
  651. if (ret)
  652. return ret;
  653. vma->vm_flags |= VM_IO;
  654. return 0;
  655. }
  656. /* Return the domain nuber for this pci bus */
  657. int pci_domain_nr(struct pci_bus *pbus)
  658. {
  659. struct pci_pbm_info *pbm = pbus->sysdata;
  660. int ret;
  661. if (pbm == NULL || pbm->parent == NULL) {
  662. ret = -ENXIO;
  663. } else {
  664. struct pci_controller_info *p = pbm->parent;
  665. ret = p->index;
  666. if (p->pbms_same_domain == 0)
  667. ret = ((ret << 1) +
  668. ((pbm == &pbm->parent->pbm_B) ? 1 : 0));
  669. }
  670. return ret;
  671. }
  672. EXPORT_SYMBOL(pci_domain_nr);
  673. int pcibios_prep_mwi(struct pci_dev *dev)
  674. {
  675. /* We set correct PCI_CACHE_LINE_SIZE register values for every
  676. * device probed on this platform. So there is nothing to check
  677. * and this always succeeds.
  678. */
  679. return 0;
  680. }
  681. #endif /* !(CONFIG_PCI) */