pci.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946
  1. /*
  2. * Copyright IBM Corp. 2012
  3. *
  4. * Author(s):
  5. * Jan Glauber <jang@linux.vnet.ibm.com>
  6. *
  7. * The System z PCI code is a rewrite from a prototype by
  8. * the following people (Kudoz!):
  9. * Alexander Schmidt
  10. * Christoph Raisch
  11. * Hannes Hering
  12. * Hoang-Nam Nguyen
  13. * Jan-Bernd Themann
  14. * Stefan Roscher
  15. * Thomas Klein
  16. */
  17. #define COMPONENT "zPCI"
  18. #define pr_fmt(fmt) COMPONENT ": " fmt
  19. #include <linux/kernel.h>
  20. #include <linux/slab.h>
  21. #include <linux/err.h>
  22. #include <linux/export.h>
  23. #include <linux/delay.h>
  24. #include <linux/irq.h>
  25. #include <linux/kernel_stat.h>
  26. #include <linux/seq_file.h>
  27. #include <linux/pci.h>
  28. #include <linux/msi.h>
  29. #include <asm/isc.h>
  30. #include <asm/airq.h>
  31. #include <asm/facility.h>
  32. #include <asm/pci_insn.h>
  33. #include <asm/pci_clp.h>
  34. #include <asm/pci_dma.h>
  35. #define DEBUG /* enable pr_debug */
  36. #define SIC_IRQ_MODE_ALL 0
  37. #define SIC_IRQ_MODE_SINGLE 1
  38. #define ZPCI_NR_DMA_SPACES 1
  39. #define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
  40. /* list of all detected zpci devices */
  41. static LIST_HEAD(zpci_list);
  42. static DEFINE_SPINLOCK(zpci_list_lock);
  43. static void zpci_enable_irq(struct irq_data *data);
  44. static void zpci_disable_irq(struct irq_data *data);
  45. static struct irq_chip zpci_irq_chip = {
  46. .name = "zPCI",
  47. .irq_unmask = zpci_enable_irq,
  48. .irq_mask = zpci_disable_irq,
  49. };
  50. static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
  51. static DEFINE_SPINLOCK(zpci_domain_lock);
  52. static struct airq_iv *zpci_aisb_iv;
  53. static struct airq_iv *zpci_aibv[ZPCI_NR_DEVICES];
  54. /* Adapter interrupt definitions */
  55. static void zpci_irq_handler(struct airq_struct *airq);
  56. static struct airq_struct zpci_airq = {
  57. .handler = zpci_irq_handler,
  58. .isc = PCI_ISC,
  59. };
  60. /* I/O Map */
  61. static DEFINE_SPINLOCK(zpci_iomap_lock);
  62. static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
  63. struct zpci_iomap_entry *zpci_iomap_start;
  64. EXPORT_SYMBOL_GPL(zpci_iomap_start);
  65. static struct kmem_cache *zdev_fmb_cache;
  66. struct zpci_dev *get_zdev(struct pci_dev *pdev)
  67. {
  68. return (struct zpci_dev *) pdev->sysdata;
  69. }
  70. struct zpci_dev *get_zdev_by_fid(u32 fid)
  71. {
  72. struct zpci_dev *tmp, *zdev = NULL;
  73. spin_lock(&zpci_list_lock);
  74. list_for_each_entry(tmp, &zpci_list, entry) {
  75. if (tmp->fid == fid) {
  76. zdev = tmp;
  77. break;
  78. }
  79. }
  80. spin_unlock(&zpci_list_lock);
  81. return zdev;
  82. }
  83. static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
  84. {
  85. return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
  86. }
  87. int pci_domain_nr(struct pci_bus *bus)
  88. {
  89. return ((struct zpci_dev *) bus->sysdata)->domain;
  90. }
  91. EXPORT_SYMBOL_GPL(pci_domain_nr);
  92. int pci_proc_domain(struct pci_bus *bus)
  93. {
  94. return pci_domain_nr(bus);
  95. }
  96. EXPORT_SYMBOL_GPL(pci_proc_domain);
  97. /* Modify PCI: Register adapter interruptions */
  98. static int zpci_set_airq(struct zpci_dev *zdev)
  99. {
  100. u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
  101. struct zpci_fib *fib;
  102. int rc;
  103. fib = (void *) get_zeroed_page(GFP_KERNEL);
  104. if (!fib)
  105. return -ENOMEM;
  106. fib->isc = PCI_ISC;
  107. fib->sum = 1; /* enable summary notifications */
  108. fib->noi = airq_iv_end(zdev->aibv);
  109. fib->aibv = (unsigned long) zdev->aibv->vector;
  110. fib->aibvo = 0; /* each zdev has its own interrupt vector */
  111. fib->aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
  112. fib->aisbo = zdev->aisb & 63;
  113. rc = zpci_mod_fc(req, fib);
  114. pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
  115. free_page((unsigned long) fib);
  116. return rc;
  117. }
  118. struct mod_pci_args {
  119. u64 base;
  120. u64 limit;
  121. u64 iota;
  122. u64 fmb_addr;
  123. };
  124. static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args)
  125. {
  126. u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn);
  127. struct zpci_fib *fib;
  128. int rc;
  129. /* The FIB must be available even if it's not used */
  130. fib = (void *) get_zeroed_page(GFP_KERNEL);
  131. if (!fib)
  132. return -ENOMEM;
  133. fib->pba = args->base;
  134. fib->pal = args->limit;
  135. fib->iota = args->iota;
  136. fib->fmb_addr = args->fmb_addr;
  137. rc = zpci_mod_fc(req, fib);
  138. free_page((unsigned long) fib);
  139. return rc;
  140. }
  141. /* Modify PCI: Register I/O address translation parameters */
  142. int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
  143. u64 base, u64 limit, u64 iota)
  144. {
  145. struct mod_pci_args args = { base, limit, iota, 0 };
  146. WARN_ON_ONCE(iota & 0x3fff);
  147. args.iota |= ZPCI_IOTA_RTTO_FLAG;
  148. return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args);
  149. }
  150. /* Modify PCI: Unregister I/O address translation parameters */
  151. int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
  152. {
  153. struct mod_pci_args args = { 0, 0, 0, 0 };
  154. return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args);
  155. }
  156. /* Modify PCI: Unregister adapter interruptions */
  157. static int zpci_clear_airq(struct zpci_dev *zdev)
  158. {
  159. struct mod_pci_args args = { 0, 0, 0, 0 };
  160. return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args);
  161. }
  162. /* Modify PCI: Set PCI function measurement parameters */
  163. int zpci_fmb_enable_device(struct zpci_dev *zdev)
  164. {
  165. struct mod_pci_args args = { 0, 0, 0, 0 };
  166. if (zdev->fmb)
  167. return -EINVAL;
  168. zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
  169. if (!zdev->fmb)
  170. return -ENOMEM;
  171. WARN_ON((u64) zdev->fmb & 0xf);
  172. args.fmb_addr = virt_to_phys(zdev->fmb);
  173. return mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args);
  174. }
  175. /* Modify PCI: Disable PCI function measurement */
  176. int zpci_fmb_disable_device(struct zpci_dev *zdev)
  177. {
  178. struct mod_pci_args args = { 0, 0, 0, 0 };
  179. int rc;
  180. if (!zdev->fmb)
  181. return -EINVAL;
  182. /* Function measurement is disabled if fmb address is zero */
  183. rc = mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args);
  184. kmem_cache_free(zdev_fmb_cache, zdev->fmb);
  185. zdev->fmb = NULL;
  186. return rc;
  187. }
  188. #define ZPCI_PCIAS_CFGSPC 15
  189. static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
  190. {
  191. u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
  192. u64 data;
  193. int rc;
  194. rc = zpci_load(&data, req, offset);
  195. if (!rc) {
  196. data = data << ((8 - len) * 8);
  197. data = le64_to_cpu(data);
  198. *val = (u32) data;
  199. } else
  200. *val = 0xffffffff;
  201. return rc;
  202. }
  203. static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
  204. {
  205. u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
  206. u64 data = val;
  207. int rc;
  208. data = cpu_to_le64(data);
  209. data = data >> ((8 - len) * 8);
  210. rc = zpci_store(data, req, offset);
  211. return rc;
  212. }
  213. static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
  214. {
  215. int offset, pos;
  216. u32 mask_bits;
  217. if (msi->msi_attrib.is_msix) {
  218. offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
  219. PCI_MSIX_ENTRY_VECTOR_CTRL;
  220. msi->masked = readl(msi->mask_base + offset);
  221. writel(flag, msi->mask_base + offset);
  222. } else if (msi->msi_attrib.maskbit) {
  223. pos = (long) msi->mask_base;
  224. pci_read_config_dword(msi->dev, pos, &mask_bits);
  225. mask_bits &= ~(mask);
  226. mask_bits |= flag & mask;
  227. pci_write_config_dword(msi->dev, pos, mask_bits);
  228. } else
  229. return 0;
  230. msi->msi_attrib.maskbit = !!flag;
  231. return 1;
  232. }
  233. static void zpci_enable_irq(struct irq_data *data)
  234. {
  235. struct msi_desc *msi = irq_get_msi_desc(data->irq);
  236. zpci_msi_set_mask_bits(msi, 1, 0);
  237. }
  238. static void zpci_disable_irq(struct irq_data *data)
  239. {
  240. struct msi_desc *msi = irq_get_msi_desc(data->irq);
  241. zpci_msi_set_mask_bits(msi, 1, 1);
  242. }
  243. void pcibios_fixup_bus(struct pci_bus *bus)
  244. {
  245. }
  246. resource_size_t pcibios_align_resource(void *data, const struct resource *res,
  247. resource_size_t size,
  248. resource_size_t align)
  249. {
  250. return 0;
  251. }
  252. /* combine single writes by using store-block insn */
  253. void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
  254. {
  255. zpci_memcpy_toio(to, from, count);
  256. }
  257. /* Create a virtual mapping cookie for a PCI BAR */
  258. void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max)
  259. {
  260. struct zpci_dev *zdev = get_zdev(pdev);
  261. u64 addr;
  262. int idx;
  263. if ((bar & 7) != bar)
  264. return NULL;
  265. idx = zdev->bars[bar].map_idx;
  266. spin_lock(&zpci_iomap_lock);
  267. zpci_iomap_start[idx].fh = zdev->fh;
  268. zpci_iomap_start[idx].bar = bar;
  269. spin_unlock(&zpci_iomap_lock);
  270. addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
  271. return (void __iomem *) addr;
  272. }
  273. EXPORT_SYMBOL_GPL(pci_iomap);
  274. void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
  275. {
  276. unsigned int idx;
  277. idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48;
  278. spin_lock(&zpci_iomap_lock);
  279. zpci_iomap_start[idx].fh = 0;
  280. zpci_iomap_start[idx].bar = 0;
  281. spin_unlock(&zpci_iomap_lock);
  282. }
  283. EXPORT_SYMBOL_GPL(pci_iounmap);
  284. static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
  285. int size, u32 *val)
  286. {
  287. struct zpci_dev *zdev = get_zdev_by_bus(bus);
  288. int ret;
  289. if (!zdev || devfn != ZPCI_DEVFN)
  290. ret = -ENODEV;
  291. else
  292. ret = zpci_cfg_load(zdev, where, val, size);
  293. return ret;
  294. }
  295. static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
  296. int size, u32 val)
  297. {
  298. struct zpci_dev *zdev = get_zdev_by_bus(bus);
  299. int ret;
  300. if (!zdev || devfn != ZPCI_DEVFN)
  301. ret = -ENODEV;
  302. else
  303. ret = zpci_cfg_store(zdev, where, val, size);
  304. return ret;
  305. }
  306. static struct pci_ops pci_root_ops = {
  307. .read = pci_read,
  308. .write = pci_write,
  309. };
  310. static void zpci_irq_handler(struct airq_struct *airq)
  311. {
  312. unsigned long si, ai;
  313. struct airq_iv *aibv;
  314. int irqs_on = 0;
  315. inc_irq_stat(IRQIO_PCI);
  316. for (si = 0;;) {
  317. /* Scan adapter summary indicator bit vector */
  318. si = airq_iv_scan(zpci_aisb_iv, si, airq_iv_end(zpci_aisb_iv));
  319. if (si == -1UL) {
  320. if (irqs_on++)
  321. /* End of second scan with interrupts on. */
  322. break;
  323. /* First scan complete, reenable interrupts. */
  324. zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
  325. si = 0;
  326. continue;
  327. }
  328. /* Scan the adapter interrupt vector for this device. */
  329. aibv = zpci_aibv[si];
  330. for (ai = 0;;) {
  331. ai = airq_iv_scan(aibv, ai, airq_iv_end(aibv));
  332. if (ai == -1UL)
  333. break;
  334. inc_irq_stat(IRQIO_MSI);
  335. airq_iv_lock(aibv, ai);
  336. generic_handle_irq(airq_iv_get_data(aibv, ai));
  337. airq_iv_unlock(aibv, ai);
  338. }
  339. }
  340. }
  341. int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
  342. {
  343. struct zpci_dev *zdev = get_zdev(pdev);
  344. unsigned int hwirq, irq, msi_vecs;
  345. unsigned long aisb;
  346. struct msi_desc *msi;
  347. struct msi_msg msg;
  348. int rc;
  349. pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
  350. if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
  351. return -EINVAL;
  352. msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX);
  353. msi_vecs = min_t(unsigned int, msi_vecs, CONFIG_PCI_NR_MSI);
  354. /* Allocate adapter summary indicator bit */
  355. rc = -EIO;
  356. aisb = airq_iv_alloc_bit(zpci_aisb_iv);
  357. if (aisb == -1UL)
  358. goto out;
  359. zdev->aisb = aisb;
  360. /* Create adapter interrupt vector */
  361. rc = -ENOMEM;
  362. zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK);
  363. if (!zdev->aibv)
  364. goto out_si;
  365. /* Wire up shortcut pointer */
  366. zpci_aibv[aisb] = zdev->aibv;
  367. /* Request MSI interrupts */
  368. hwirq = 0;
  369. list_for_each_entry(msi, &pdev->msi_list, list) {
  370. rc = -EIO;
  371. irq = irq_alloc_desc(0); /* Alloc irq on node 0 */
  372. if (irq == NO_IRQ)
  373. goto out_msi;
  374. rc = irq_set_msi_desc(irq, msi);
  375. if (rc)
  376. goto out_msi;
  377. irq_set_chip_and_handler(irq, &zpci_irq_chip,
  378. handle_simple_irq);
  379. msg.data = hwirq;
  380. msg.address_lo = zdev->msi_addr & 0xffffffff;
  381. msg.address_hi = zdev->msi_addr >> 32;
  382. write_msi_msg(irq, &msg);
  383. airq_iv_set_data(zdev->aibv, hwirq, irq);
  384. hwirq++;
  385. }
  386. /* Enable adapter interrupts */
  387. rc = zpci_set_airq(zdev);
  388. if (rc)
  389. goto out_msi;
  390. return (msi_vecs == nvec) ? 0 : msi_vecs;
  391. out_msi:
  392. list_for_each_entry(msi, &pdev->msi_list, list) {
  393. if (hwirq-- == 0)
  394. break;
  395. irq_set_msi_desc(msi->irq, NULL);
  396. irq_free_desc(msi->irq);
  397. msi->msg.address_lo = 0;
  398. msi->msg.address_hi = 0;
  399. msi->msg.data = 0;
  400. msi->irq = 0;
  401. }
  402. zpci_aibv[aisb] = NULL;
  403. airq_iv_release(zdev->aibv);
  404. out_si:
  405. airq_iv_free_bit(zpci_aisb_iv, aisb);
  406. out:
  407. dev_err(&pdev->dev, "register MSI failed with: %d\n", rc);
  408. return rc;
  409. }
  410. void arch_teardown_msi_irqs(struct pci_dev *pdev)
  411. {
  412. struct zpci_dev *zdev = get_zdev(pdev);
  413. struct msi_desc *msi;
  414. int rc;
  415. pr_info("%s: on pdev: %p\n", __func__, pdev);
  416. /* Disable adapter interrupts */
  417. rc = zpci_clear_airq(zdev);
  418. if (rc) {
  419. dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc);
  420. return;
  421. }
  422. /* Release MSI interrupts */
  423. list_for_each_entry(msi, &pdev->msi_list, list) {
  424. zpci_msi_set_mask_bits(msi, 1, 1);
  425. irq_set_msi_desc(msi->irq, NULL);
  426. irq_free_desc(msi->irq);
  427. msi->msg.address_lo = 0;
  428. msi->msg.address_hi = 0;
  429. msi->msg.data = 0;
  430. msi->irq = 0;
  431. }
  432. zpci_aibv[zdev->aisb] = NULL;
  433. airq_iv_release(zdev->aibv);
  434. airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
  435. }
  436. static void zpci_map_resources(struct zpci_dev *zdev)
  437. {
  438. struct pci_dev *pdev = zdev->pdev;
  439. resource_size_t len;
  440. int i;
  441. for (i = 0; i < PCI_BAR_COUNT; i++) {
  442. len = pci_resource_len(pdev, i);
  443. if (!len)
  444. continue;
  445. pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0);
  446. pdev->resource[i].end = pdev->resource[i].start + len - 1;
  447. }
  448. }
  449. static void zpci_unmap_resources(struct zpci_dev *zdev)
  450. {
  451. struct pci_dev *pdev = zdev->pdev;
  452. resource_size_t len;
  453. int i;
  454. for (i = 0; i < PCI_BAR_COUNT; i++) {
  455. len = pci_resource_len(pdev, i);
  456. if (!len)
  457. continue;
  458. pci_iounmap(pdev, (void *) pdev->resource[i].start);
  459. }
  460. }
  461. struct zpci_dev *zpci_alloc_device(void)
  462. {
  463. struct zpci_dev *zdev;
  464. /* Alloc memory for our private pci device data */
  465. zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
  466. return zdev ? : ERR_PTR(-ENOMEM);
  467. }
  468. void zpci_free_device(struct zpci_dev *zdev)
  469. {
  470. kfree(zdev);
  471. }
  472. int pcibios_add_platform_entries(struct pci_dev *pdev)
  473. {
  474. return zpci_sysfs_add_device(&pdev->dev);
  475. }
  476. static int __init zpci_irq_init(void)
  477. {
  478. int rc;
  479. rc = register_adapter_interrupt(&zpci_airq);
  480. if (rc)
  481. goto out;
  482. /* Set summary to 1 to be called every time for the ISC. */
  483. *zpci_airq.lsi_ptr = 1;
  484. rc = -ENOMEM;
  485. zpci_aisb_iv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC);
  486. if (!zpci_aisb_iv)
  487. goto out_airq;
  488. zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
  489. return 0;
  490. out_airq:
  491. unregister_adapter_interrupt(&zpci_airq);
  492. out:
  493. return rc;
  494. }
  495. static void zpci_irq_exit(void)
  496. {
  497. airq_iv_release(zpci_aisb_iv);
  498. unregister_adapter_interrupt(&zpci_airq);
  499. }
  500. static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size,
  501. unsigned long flags, int domain)
  502. {
  503. struct resource *r;
  504. char *name;
  505. int rc;
  506. r = kzalloc(sizeof(*r), GFP_KERNEL);
  507. if (!r)
  508. return ERR_PTR(-ENOMEM);
  509. r->start = start;
  510. r->end = r->start + size - 1;
  511. r->flags = flags;
  512. r->parent = &iomem_resource;
  513. name = kmalloc(18, GFP_KERNEL);
  514. if (!name) {
  515. kfree(r);
  516. return ERR_PTR(-ENOMEM);
  517. }
  518. sprintf(name, "PCI Bus: %04x:%02x", domain, ZPCI_BUS_NR);
  519. r->name = name;
  520. rc = request_resource(&iomem_resource, r);
  521. if (rc)
  522. pr_debug("request resource %pR failed\n", r);
  523. return r;
  524. }
  525. static int zpci_alloc_iomap(struct zpci_dev *zdev)
  526. {
  527. int entry;
  528. spin_lock(&zpci_iomap_lock);
  529. entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
  530. if (entry == ZPCI_IOMAP_MAX_ENTRIES) {
  531. spin_unlock(&zpci_iomap_lock);
  532. return -ENOSPC;
  533. }
  534. set_bit(entry, zpci_iomap);
  535. spin_unlock(&zpci_iomap_lock);
  536. return entry;
  537. }
  538. static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
  539. {
  540. spin_lock(&zpci_iomap_lock);
  541. memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
  542. clear_bit(entry, zpci_iomap);
  543. spin_unlock(&zpci_iomap_lock);
  544. }
  545. int pcibios_add_device(struct pci_dev *pdev)
  546. {
  547. struct zpci_dev *zdev = get_zdev(pdev);
  548. struct resource *res;
  549. int i;
  550. zdev->pdev = pdev;
  551. zpci_map_resources(zdev);
  552. for (i = 0; i < PCI_BAR_COUNT; i++) {
  553. res = &pdev->resource[i];
  554. if (res->parent || !res->flags)
  555. continue;
  556. pci_claim_resource(pdev, i);
  557. }
  558. return 0;
  559. }
  560. int pcibios_enable_device(struct pci_dev *pdev, int mask)
  561. {
  562. struct zpci_dev *zdev = get_zdev(pdev);
  563. struct resource *res;
  564. u16 cmd;
  565. int i;
  566. zdev->pdev = pdev;
  567. zpci_debug_init_device(zdev);
  568. zpci_fmb_enable_device(zdev);
  569. zpci_map_resources(zdev);
  570. pci_read_config_word(pdev, PCI_COMMAND, &cmd);
  571. for (i = 0; i < PCI_BAR_COUNT; i++) {
  572. res = &pdev->resource[i];
  573. if (res->flags & IORESOURCE_IO)
  574. return -EINVAL;
  575. if (res->flags & IORESOURCE_MEM)
  576. cmd |= PCI_COMMAND_MEMORY;
  577. }
  578. pci_write_config_word(pdev, PCI_COMMAND, cmd);
  579. return 0;
  580. }
  581. void pcibios_disable_device(struct pci_dev *pdev)
  582. {
  583. struct zpci_dev *zdev = get_zdev(pdev);
  584. zpci_unmap_resources(zdev);
  585. zpci_fmb_disable_device(zdev);
  586. zpci_debug_exit_device(zdev);
  587. zdev->pdev = NULL;
  588. }
  589. static int zpci_scan_bus(struct zpci_dev *zdev)
  590. {
  591. struct resource *res;
  592. LIST_HEAD(resources);
  593. int i;
  594. /* allocate mapping entry for each used bar */
  595. for (i = 0; i < PCI_BAR_COUNT; i++) {
  596. unsigned long addr, size, flags;
  597. int entry;
  598. if (!zdev->bars[i].size)
  599. continue;
  600. entry = zpci_alloc_iomap(zdev);
  601. if (entry < 0)
  602. return entry;
  603. zdev->bars[i].map_idx = entry;
  604. /* only MMIO is supported */
  605. flags = IORESOURCE_MEM;
  606. if (zdev->bars[i].val & 8)
  607. flags |= IORESOURCE_PREFETCH;
  608. if (zdev->bars[i].val & 4)
  609. flags |= IORESOURCE_MEM_64;
  610. addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48);
  611. size = 1UL << zdev->bars[i].size;
  612. res = zpci_alloc_bus_resource(addr, size, flags, zdev->domain);
  613. if (IS_ERR(res)) {
  614. zpci_free_iomap(zdev, entry);
  615. return PTR_ERR(res);
  616. }
  617. pci_add_resource(&resources, res);
  618. }
  619. zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
  620. zdev, &resources);
  621. if (!zdev->bus)
  622. return -EIO;
  623. zdev->bus->max_bus_speed = zdev->max_bus_speed;
  624. return 0;
  625. }
  626. static int zpci_alloc_domain(struct zpci_dev *zdev)
  627. {
  628. spin_lock(&zpci_domain_lock);
  629. zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
  630. if (zdev->domain == ZPCI_NR_DEVICES) {
  631. spin_unlock(&zpci_domain_lock);
  632. return -ENOSPC;
  633. }
  634. set_bit(zdev->domain, zpci_domain);
  635. spin_unlock(&zpci_domain_lock);
  636. return 0;
  637. }
  638. static void zpci_free_domain(struct zpci_dev *zdev)
  639. {
  640. spin_lock(&zpci_domain_lock);
  641. clear_bit(zdev->domain, zpci_domain);
  642. spin_unlock(&zpci_domain_lock);
  643. }
  644. int zpci_enable_device(struct zpci_dev *zdev)
  645. {
  646. int rc;
  647. rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
  648. if (rc)
  649. goto out;
  650. pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev->fh, zdev->fid);
  651. rc = zpci_dma_init_device(zdev);
  652. if (rc)
  653. goto out_dma;
  654. zdev->state = ZPCI_FN_STATE_ONLINE;
  655. return 0;
  656. out_dma:
  657. clp_disable_fh(zdev);
  658. out:
  659. return rc;
  660. }
  661. EXPORT_SYMBOL_GPL(zpci_enable_device);
  662. int zpci_disable_device(struct zpci_dev *zdev)
  663. {
  664. zpci_dma_exit_device(zdev);
  665. return clp_disable_fh(zdev);
  666. }
  667. EXPORT_SYMBOL_GPL(zpci_disable_device);
  668. int zpci_create_device(struct zpci_dev *zdev)
  669. {
  670. int rc;
  671. rc = zpci_alloc_domain(zdev);
  672. if (rc)
  673. goto out;
  674. if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
  675. rc = zpci_enable_device(zdev);
  676. if (rc)
  677. goto out_free;
  678. }
  679. rc = zpci_scan_bus(zdev);
  680. if (rc)
  681. goto out_disable;
  682. spin_lock(&zpci_list_lock);
  683. list_add_tail(&zdev->entry, &zpci_list);
  684. spin_unlock(&zpci_list_lock);
  685. zpci_init_slot(zdev);
  686. return 0;
  687. out_disable:
  688. if (zdev->state == ZPCI_FN_STATE_ONLINE)
  689. zpci_disable_device(zdev);
  690. out_free:
  691. zpci_free_domain(zdev);
  692. out:
  693. return rc;
  694. }
  695. void zpci_stop_device(struct zpci_dev *zdev)
  696. {
  697. zpci_dma_exit_device(zdev);
  698. /*
  699. * Note: SCLP disables fh via set-pci-fn so don't
  700. * do that here.
  701. */
  702. }
  703. EXPORT_SYMBOL_GPL(zpci_stop_device);
  704. static inline int barsize(u8 size)
  705. {
  706. return (size) ? (1 << size) >> 10 : 0;
  707. }
  708. static int zpci_mem_init(void)
  709. {
  710. zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
  711. 16, 0, NULL);
  712. if (!zdev_fmb_cache)
  713. goto error_zdev;
  714. /* TODO: use realloc */
  715. zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start),
  716. GFP_KERNEL);
  717. if (!zpci_iomap_start)
  718. goto error_iomap;
  719. return 0;
  720. error_iomap:
  721. kmem_cache_destroy(zdev_fmb_cache);
  722. error_zdev:
  723. return -ENOMEM;
  724. }
  725. static void zpci_mem_exit(void)
  726. {
  727. kfree(zpci_iomap_start);
  728. kmem_cache_destroy(zdev_fmb_cache);
  729. }
  730. static unsigned int s390_pci_probe;
  731. char * __init pcibios_setup(char *str)
  732. {
  733. if (!strcmp(str, "on")) {
  734. s390_pci_probe = 1;
  735. return NULL;
  736. }
  737. return str;
  738. }
  739. static int __init pci_base_init(void)
  740. {
  741. int rc;
  742. if (!s390_pci_probe)
  743. return 0;
  744. if (!test_facility(2) || !test_facility(69)
  745. || !test_facility(71) || !test_facility(72))
  746. return 0;
  747. pr_info("Probing PCI hardware: PCI:%d SID:%d AEN:%d\n",
  748. test_facility(69), test_facility(70),
  749. test_facility(71));
  750. rc = zpci_debug_init();
  751. if (rc)
  752. goto out;
  753. rc = zpci_mem_init();
  754. if (rc)
  755. goto out_mem;
  756. rc = zpci_irq_init();
  757. if (rc)
  758. goto out_irq;
  759. rc = zpci_dma_init();
  760. if (rc)
  761. goto out_dma;
  762. rc = clp_scan_pci_devices();
  763. if (rc)
  764. goto out_find;
  765. return 0;
  766. out_find:
  767. zpci_dma_exit();
  768. out_dma:
  769. zpci_irq_exit();
  770. out_irq:
  771. zpci_mem_exit();
  772. out_mem:
  773. zpci_debug_exit();
  774. out:
  775. return rc;
  776. }
  777. subsys_initcall_sync(pci_base_init);
  778. void zpci_rescan(void)
  779. {
  780. clp_rescan_pci_devices_simple();
  781. }