msi.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799
  1. /*
  2. * File: msi.c
  3. * Purpose: PCI Message Signaled Interrupt (MSI)
  4. *
  5. * Copyright (C) 2003-2004 Intel
  6. * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
  7. */
  8. #include <linux/err.h>
  9. #include <linux/mm.h>
  10. #include <linux/irq.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/init.h>
  13. #include <linux/ioport.h>
  14. #include <linux/pci.h>
  15. #include <linux/proc_fs.h>
  16. #include <linux/msi.h>
  17. #include <linux/smp.h>
  18. #include <asm/errno.h>
  19. #include <asm/io.h>
  20. #include "pci.h"
  21. #include "msi.h"
  22. static int pci_msi_enable = 1;
  23. /* Arch hooks */
  24. int __attribute__ ((weak))
  25. arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
  26. {
  27. return 0;
  28. }
  29. int __attribute__ ((weak))
  30. arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry)
  31. {
  32. return 0;
  33. }
  34. int __attribute__ ((weak))
  35. arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
  36. {
  37. struct msi_desc *entry;
  38. int ret;
  39. list_for_each_entry(entry, &dev->msi_list, list) {
  40. ret = arch_setup_msi_irq(dev, entry);
  41. if (ret)
  42. return ret;
  43. }
  44. return 0;
  45. }
  46. void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq)
  47. {
  48. return;
  49. }
  50. void __attribute__ ((weak))
  51. arch_teardown_msi_irqs(struct pci_dev *dev)
  52. {
  53. struct msi_desc *entry;
  54. list_for_each_entry(entry, &dev->msi_list, list) {
  55. if (entry->irq != 0)
  56. arch_teardown_msi_irq(entry->irq);
  57. }
  58. }
  59. static void __msi_set_enable(struct pci_dev *dev, int pos, int enable)
  60. {
  61. u16 control;
  62. if (pos) {
  63. pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
  64. control &= ~PCI_MSI_FLAGS_ENABLE;
  65. if (enable)
  66. control |= PCI_MSI_FLAGS_ENABLE;
  67. pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
  68. }
  69. }
  70. static void msi_set_enable(struct pci_dev *dev, int enable)
  71. {
  72. __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable);
  73. }
  74. static void msix_set_enable(struct pci_dev *dev, int enable)
  75. {
  76. int pos;
  77. u16 control;
  78. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  79. if (pos) {
  80. pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
  81. control &= ~PCI_MSIX_FLAGS_ENABLE;
  82. if (enable)
  83. control |= PCI_MSIX_FLAGS_ENABLE;
  84. pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
  85. }
  86. }
  87. static inline __attribute_const__ u32 msi_mask(unsigned x)
  88. {
  89. /* Don't shift by >= width of type */
  90. if (x >= 5)
  91. return 0xffffffff;
  92. return (1 << (1 << x)) - 1;
  93. }
  94. static void msix_flush_writes(struct irq_desc *desc)
  95. {
  96. struct msi_desc *entry;
  97. entry = get_irq_desc_msi(desc);
  98. BUG_ON(!entry || !entry->dev);
  99. switch (entry->msi_attrib.type) {
  100. case PCI_CAP_ID_MSI:
  101. /* nothing to do */
  102. break;
  103. case PCI_CAP_ID_MSIX:
  104. {
  105. int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
  106. PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
  107. readl(entry->mask_base + offset);
  108. break;
  109. }
  110. default:
  111. BUG();
  112. break;
  113. }
  114. }
  115. /*
  116. * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to
  117. * mask all MSI interrupts by clearing the MSI enable bit does not work
  118. * reliably as devices without an INTx disable bit will then generate a
  119. * level IRQ which will never be cleared.
  120. *
  121. * Returns 1 if it succeeded in masking the interrupt and 0 if the device
  122. * doesn't support MSI masking.
  123. */
  124. static int msi_set_mask_bits(struct irq_desc *desc, u32 mask, u32 flag)
  125. {
  126. struct msi_desc *entry;
  127. entry = get_irq_desc_msi(desc);
  128. BUG_ON(!entry || !entry->dev);
  129. switch (entry->msi_attrib.type) {
  130. case PCI_CAP_ID_MSI:
  131. if (entry->msi_attrib.maskbit) {
  132. int pos;
  133. u32 mask_bits;
  134. pos = (long)entry->mask_base;
  135. pci_read_config_dword(entry->dev, pos, &mask_bits);
  136. mask_bits &= ~(mask);
  137. mask_bits |= flag & mask;
  138. pci_write_config_dword(entry->dev, pos, mask_bits);
  139. } else {
  140. return 0;
  141. }
  142. break;
  143. case PCI_CAP_ID_MSIX:
  144. {
  145. int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
  146. PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
  147. writel(flag, entry->mask_base + offset);
  148. readl(entry->mask_base + offset);
  149. break;
  150. }
  151. default:
  152. BUG();
  153. break;
  154. }
  155. entry->msi_attrib.masked = !!flag;
  156. return 1;
  157. }
  158. void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
  159. {
  160. struct msi_desc *entry = get_irq_desc_msi(desc);
  161. switch(entry->msi_attrib.type) {
  162. case PCI_CAP_ID_MSI:
  163. {
  164. struct pci_dev *dev = entry->dev;
  165. int pos = entry->msi_attrib.pos;
  166. u16 data;
  167. pci_read_config_dword(dev, msi_lower_address_reg(pos),
  168. &msg->address_lo);
  169. if (entry->msi_attrib.is_64) {
  170. pci_read_config_dword(dev, msi_upper_address_reg(pos),
  171. &msg->address_hi);
  172. pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
  173. } else {
  174. msg->address_hi = 0;
  175. pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
  176. }
  177. msg->data = data;
  178. break;
  179. }
  180. case PCI_CAP_ID_MSIX:
  181. {
  182. void __iomem *base;
  183. base = entry->mask_base +
  184. entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
  185. msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
  186. msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
  187. msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
  188. break;
  189. }
  190. default:
  191. BUG();
  192. }
  193. }
  194. void read_msi_msg(unsigned int irq, struct msi_msg *msg)
  195. {
  196. struct irq_desc *desc = irq_to_desc(irq);
  197. read_msi_msg_desc(desc, msg);
  198. }
  199. void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
  200. {
  201. struct msi_desc *entry = get_irq_desc_msi(desc);
  202. switch (entry->msi_attrib.type) {
  203. case PCI_CAP_ID_MSI:
  204. {
  205. struct pci_dev *dev = entry->dev;
  206. int pos = entry->msi_attrib.pos;
  207. pci_write_config_dword(dev, msi_lower_address_reg(pos),
  208. msg->address_lo);
  209. if (entry->msi_attrib.is_64) {
  210. pci_write_config_dword(dev, msi_upper_address_reg(pos),
  211. msg->address_hi);
  212. pci_write_config_word(dev, msi_data_reg(pos, 1),
  213. msg->data);
  214. } else {
  215. pci_write_config_word(dev, msi_data_reg(pos, 0),
  216. msg->data);
  217. }
  218. break;
  219. }
  220. case PCI_CAP_ID_MSIX:
  221. {
  222. void __iomem *base;
  223. base = entry->mask_base +
  224. entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
  225. writel(msg->address_lo,
  226. base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
  227. writel(msg->address_hi,
  228. base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
  229. writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
  230. break;
  231. }
  232. default:
  233. BUG();
  234. }
  235. entry->msg = *msg;
  236. }
  237. void write_msi_msg(unsigned int irq, struct msi_msg *msg)
  238. {
  239. struct irq_desc *desc = irq_to_desc(irq);
  240. write_msi_msg_desc(desc, msg);
  241. }
  242. void mask_msi_irq(unsigned int irq)
  243. {
  244. struct irq_desc *desc = irq_to_desc(irq);
  245. msi_set_mask_bits(desc, 1, 1);
  246. msix_flush_writes(desc);
  247. }
  248. void unmask_msi_irq(unsigned int irq)
  249. {
  250. struct irq_desc *desc = irq_to_desc(irq);
  251. msi_set_mask_bits(desc, 1, 0);
  252. msix_flush_writes(desc);
  253. }
  254. static int msi_free_irqs(struct pci_dev* dev);
  255. static struct msi_desc* alloc_msi_entry(void)
  256. {
  257. struct msi_desc *entry;
  258. entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL);
  259. if (!entry)
  260. return NULL;
  261. INIT_LIST_HEAD(&entry->list);
  262. entry->irq = 0;
  263. entry->dev = NULL;
  264. return entry;
  265. }
  266. static void pci_intx_for_msi(struct pci_dev *dev, int enable)
  267. {
  268. if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
  269. pci_intx(dev, enable);
  270. }
  271. static void __pci_restore_msi_state(struct pci_dev *dev)
  272. {
  273. int pos;
  274. u16 control;
  275. struct msi_desc *entry;
  276. if (!dev->msi_enabled)
  277. return;
  278. entry = get_irq_msi(dev->irq);
  279. pos = entry->msi_attrib.pos;
  280. pci_intx_for_msi(dev, 0);
  281. msi_set_enable(dev, 0);
  282. write_msi_msg(dev->irq, &entry->msg);
  283. if (entry->msi_attrib.maskbit) {
  284. struct irq_desc *desc = irq_to_desc(dev->irq);
  285. msi_set_mask_bits(desc, entry->msi_attrib.maskbits_mask,
  286. entry->msi_attrib.masked);
  287. }
  288. pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
  289. control &= ~PCI_MSI_FLAGS_QSIZE;
  290. control |= PCI_MSI_FLAGS_ENABLE;
  291. pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
  292. }
  293. static void __pci_restore_msix_state(struct pci_dev *dev)
  294. {
  295. int pos;
  296. struct msi_desc *entry;
  297. u16 control;
  298. if (!dev->msix_enabled)
  299. return;
  300. /* route the table */
  301. pci_intx_for_msi(dev, 0);
  302. msix_set_enable(dev, 0);
  303. list_for_each_entry(entry, &dev->msi_list, list) {
  304. struct irq_desc *desc = irq_to_desc(entry->irq);
  305. write_msi_msg(entry->irq, &entry->msg);
  306. msi_set_mask_bits(desc, 1, entry->msi_attrib.masked);
  307. }
  308. BUG_ON(list_empty(&dev->msi_list));
  309. entry = list_entry(dev->msi_list.next, struct msi_desc, list);
  310. pos = entry->msi_attrib.pos;
  311. pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
  312. control &= ~PCI_MSIX_FLAGS_MASKALL;
  313. control |= PCI_MSIX_FLAGS_ENABLE;
  314. pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
  315. }
  316. void pci_restore_msi_state(struct pci_dev *dev)
  317. {
  318. __pci_restore_msi_state(dev);
  319. __pci_restore_msix_state(dev);
  320. }
  321. EXPORT_SYMBOL_GPL(pci_restore_msi_state);
  322. /**
  323. * msi_capability_init - configure device's MSI capability structure
  324. * @dev: pointer to the pci_dev data structure of MSI device function
  325. *
  326. * Setup the MSI capability structure of device function with a single
  327. * MSI irq, regardless of device function is capable of handling
  328. * multiple messages. A return of zero indicates the successful setup
  329. * of an entry zero with the new MSI irq or non-zero for otherwise.
  330. **/
  331. static int msi_capability_init(struct pci_dev *dev)
  332. {
  333. struct msi_desc *entry;
  334. int pos, ret;
  335. u16 control;
  336. msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
  337. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  338. pci_read_config_word(dev, msi_control_reg(pos), &control);
  339. /* MSI Entry Initialization */
  340. entry = alloc_msi_entry();
  341. if (!entry)
  342. return -ENOMEM;
  343. entry->msi_attrib.type = PCI_CAP_ID_MSI;
  344. entry->msi_attrib.is_64 = is_64bit_address(control);
  345. entry->msi_attrib.entry_nr = 0;
  346. entry->msi_attrib.maskbit = is_mask_bit_support(control);
  347. entry->msi_attrib.masked = 1;
  348. entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
  349. entry->msi_attrib.pos = pos;
  350. entry->dev = dev;
  351. if (entry->msi_attrib.maskbit) {
  352. unsigned int base, maskbits, temp;
  353. base = msi_mask_bits_reg(pos, entry->msi_attrib.is_64);
  354. entry->mask_base = (void __iomem *)(long)base;
  355. /* All MSIs are unmasked by default, Mask them all */
  356. pci_read_config_dword(dev, base, &maskbits);
  357. temp = msi_mask((control & PCI_MSI_FLAGS_QMASK) >> 1);
  358. maskbits |= temp;
  359. pci_write_config_dword(dev, base, maskbits);
  360. entry->msi_attrib.maskbits_mask = temp;
  361. }
  362. list_add_tail(&entry->list, &dev->msi_list);
  363. /* Configure MSI capability structure */
  364. ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI);
  365. if (ret) {
  366. msi_free_irqs(dev);
  367. return ret;
  368. }
  369. /* Set MSI enabled bits */
  370. pci_intx_for_msi(dev, 0);
  371. msi_set_enable(dev, 1);
  372. dev->msi_enabled = 1;
  373. dev->irq = entry->irq;
  374. return 0;
  375. }
  376. /**
  377. * msix_capability_init - configure device's MSI-X capability
  378. * @dev: pointer to the pci_dev data structure of MSI-X device function
  379. * @entries: pointer to an array of struct msix_entry entries
  380. * @nvec: number of @entries
  381. *
  382. * Setup the MSI-X capability structure of device function with a
  383. * single MSI-X irq. A return of zero indicates the successful setup of
  384. * requested MSI-X entries with allocated irqs or non-zero for otherwise.
  385. **/
  386. static int msix_capability_init(struct pci_dev *dev,
  387. struct msix_entry *entries, int nvec)
  388. {
  389. struct msi_desc *entry;
  390. int pos, i, j, nr_entries, ret;
  391. unsigned long phys_addr;
  392. u32 table_offset;
  393. u16 control;
  394. u8 bir;
  395. void __iomem *base;
  396. msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
  397. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  398. /* Request & Map MSI-X table region */
  399. pci_read_config_word(dev, msi_control_reg(pos), &control);
  400. nr_entries = multi_msix_capable(control);
  401. pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
  402. bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
  403. table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
  404. phys_addr = pci_resource_start (dev, bir) + table_offset;
  405. base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
  406. if (base == NULL)
  407. return -ENOMEM;
  408. /* MSI-X Table Initialization */
  409. for (i = 0; i < nvec; i++) {
  410. entry = alloc_msi_entry();
  411. if (!entry)
  412. break;
  413. j = entries[i].entry;
  414. entry->msi_attrib.type = PCI_CAP_ID_MSIX;
  415. entry->msi_attrib.is_64 = 1;
  416. entry->msi_attrib.entry_nr = j;
  417. entry->msi_attrib.maskbit = 1;
  418. entry->msi_attrib.masked = 1;
  419. entry->msi_attrib.default_irq = dev->irq;
  420. entry->msi_attrib.pos = pos;
  421. entry->dev = dev;
  422. entry->mask_base = base;
  423. list_add_tail(&entry->list, &dev->msi_list);
  424. }
  425. ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
  426. if (ret) {
  427. int avail = 0;
  428. list_for_each_entry(entry, &dev->msi_list, list) {
  429. if (entry->irq != 0) {
  430. avail++;
  431. }
  432. }
  433. msi_free_irqs(dev);
  434. /* If we had some success report the number of irqs
  435. * we succeeded in setting up.
  436. */
  437. if (avail == 0)
  438. avail = ret;
  439. return avail;
  440. }
  441. i = 0;
  442. list_for_each_entry(entry, &dev->msi_list, list) {
  443. entries[i].vector = entry->irq;
  444. set_irq_msi(entry->irq, entry);
  445. i++;
  446. }
  447. /* Set MSI-X enabled bits */
  448. pci_intx_for_msi(dev, 0);
  449. msix_set_enable(dev, 1);
  450. dev->msix_enabled = 1;
  451. return 0;
  452. }
  453. /**
  454. * pci_msi_check_device - check whether MSI may be enabled on a device
  455. * @dev: pointer to the pci_dev data structure of MSI device function
  456. * @nvec: how many MSIs have been requested ?
  457. * @type: are we checking for MSI or MSI-X ?
  458. *
  459. * Look at global flags, the device itself, and its parent busses
  460. * to determine if MSI/-X are supported for the device. If MSI/-X is
  461. * supported return 0, else return an error code.
  462. **/
  463. static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
  464. {
  465. struct pci_bus *bus;
  466. int ret;
  467. /* MSI must be globally enabled and supported by the device */
  468. if (!pci_msi_enable || !dev || dev->no_msi)
  469. return -EINVAL;
  470. /*
  471. * You can't ask to have 0 or less MSIs configured.
  472. * a) it's stupid ..
  473. * b) the list manipulation code assumes nvec >= 1.
  474. */
  475. if (nvec < 1)
  476. return -ERANGE;
  477. /* Any bridge which does NOT route MSI transactions from it's
  478. * secondary bus to it's primary bus must set NO_MSI flag on
  479. * the secondary pci_bus.
  480. * We expect only arch-specific PCI host bus controller driver
  481. * or quirks for specific PCI bridges to be setting NO_MSI.
  482. */
  483. for (bus = dev->bus; bus; bus = bus->parent)
  484. if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
  485. return -EINVAL;
  486. ret = arch_msi_check_device(dev, nvec, type);
  487. if (ret)
  488. return ret;
  489. if (!pci_find_capability(dev, type))
  490. return -EINVAL;
  491. return 0;
  492. }
  493. /**
  494. * pci_enable_msi - configure device's MSI capability structure
  495. * @dev: pointer to the pci_dev data structure of MSI device function
  496. *
  497. * Setup the MSI capability structure of device function with
  498. * a single MSI irq upon its software driver call to request for
  499. * MSI mode enabled on its hardware device function. A return of zero
  500. * indicates the successful setup of an entry zero with the new MSI
  501. * irq or non-zero for otherwise.
  502. **/
  503. int pci_enable_msi(struct pci_dev* dev)
  504. {
  505. int status;
  506. status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI);
  507. if (status)
  508. return status;
  509. WARN_ON(!!dev->msi_enabled);
  510. /* Check whether driver already requested for MSI-X irqs */
  511. if (dev->msix_enabled) {
  512. dev_info(&dev->dev, "can't enable MSI "
  513. "(MSI-X already enabled)\n");
  514. return -EINVAL;
  515. }
  516. status = msi_capability_init(dev);
  517. return status;
  518. }
  519. EXPORT_SYMBOL(pci_enable_msi);
  520. void pci_msi_shutdown(struct pci_dev* dev)
  521. {
  522. struct msi_desc *entry;
  523. if (!pci_msi_enable || !dev || !dev->msi_enabled)
  524. return;
  525. msi_set_enable(dev, 0);
  526. pci_intx_for_msi(dev, 1);
  527. dev->msi_enabled = 0;
  528. BUG_ON(list_empty(&dev->msi_list));
  529. entry = list_entry(dev->msi_list.next, struct msi_desc, list);
  530. /* Return the the pci reset with msi irqs unmasked */
  531. if (entry->msi_attrib.maskbit) {
  532. u32 mask = entry->msi_attrib.maskbits_mask;
  533. struct irq_desc *desc = irq_to_desc(dev->irq);
  534. msi_set_mask_bits(desc, mask, ~mask);
  535. }
  536. if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
  537. return;
  538. /* Restore dev->irq to its default pin-assertion irq */
  539. dev->irq = entry->msi_attrib.default_irq;
  540. }
  541. void pci_disable_msi(struct pci_dev* dev)
  542. {
  543. struct msi_desc *entry;
  544. if (!pci_msi_enable || !dev || !dev->msi_enabled)
  545. return;
  546. pci_msi_shutdown(dev);
  547. entry = list_entry(dev->msi_list.next, struct msi_desc, list);
  548. if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
  549. return;
  550. msi_free_irqs(dev);
  551. }
  552. EXPORT_SYMBOL(pci_disable_msi);
  553. static int msi_free_irqs(struct pci_dev* dev)
  554. {
  555. struct msi_desc *entry, *tmp;
  556. list_for_each_entry(entry, &dev->msi_list, list) {
  557. if (entry->irq)
  558. BUG_ON(irq_has_action(entry->irq));
  559. }
  560. arch_teardown_msi_irqs(dev);
  561. list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
  562. if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) {
  563. writel(1, entry->mask_base + entry->msi_attrib.entry_nr
  564. * PCI_MSIX_ENTRY_SIZE
  565. + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
  566. if (list_is_last(&entry->list, &dev->msi_list))
  567. iounmap(entry->mask_base);
  568. }
  569. list_del(&entry->list);
  570. kfree(entry);
  571. }
  572. return 0;
  573. }
  574. /**
  575. * pci_enable_msix - configure device's MSI-X capability structure
  576. * @dev: pointer to the pci_dev data structure of MSI-X device function
  577. * @entries: pointer to an array of MSI-X entries
  578. * @nvec: number of MSI-X irqs requested for allocation by device driver
  579. *
  580. * Setup the MSI-X capability structure of device function with the number
  581. * of requested irqs upon its software driver call to request for
  582. * MSI-X mode enabled on its hardware device function. A return of zero
  583. * indicates the successful configuration of MSI-X capability structure
  584. * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
  585. * Or a return of > 0 indicates that driver request is exceeding the number
  586. * of irqs available. Driver should use the returned value to re-send
  587. * its request.
  588. **/
  589. int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
  590. {
  591. int status, pos, nr_entries;
  592. int i, j;
  593. u16 control;
  594. if (!entries)
  595. return -EINVAL;
  596. status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
  597. if (status)
  598. return status;
  599. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  600. pci_read_config_word(dev, msi_control_reg(pos), &control);
  601. nr_entries = multi_msix_capable(control);
  602. if (nvec > nr_entries)
  603. return -EINVAL;
  604. /* Check for any invalid entries */
  605. for (i = 0; i < nvec; i++) {
  606. if (entries[i].entry >= nr_entries)
  607. return -EINVAL; /* invalid entry */
  608. for (j = i + 1; j < nvec; j++) {
  609. if (entries[i].entry == entries[j].entry)
  610. return -EINVAL; /* duplicate entry */
  611. }
  612. }
  613. WARN_ON(!!dev->msix_enabled);
  614. /* Check whether driver already requested for MSI irq */
  615. if (dev->msi_enabled) {
  616. dev_info(&dev->dev, "can't enable MSI-X "
  617. "(MSI IRQ already assigned)\n");
  618. return -EINVAL;
  619. }
  620. status = msix_capability_init(dev, entries, nvec);
  621. return status;
  622. }
  623. EXPORT_SYMBOL(pci_enable_msix);
  624. static void msix_free_all_irqs(struct pci_dev *dev)
  625. {
  626. msi_free_irqs(dev);
  627. }
  628. void pci_msix_shutdown(struct pci_dev* dev)
  629. {
  630. if (!pci_msi_enable || !dev || !dev->msix_enabled)
  631. return;
  632. msix_set_enable(dev, 0);
  633. pci_intx_for_msi(dev, 1);
  634. dev->msix_enabled = 0;
  635. }
  636. void pci_disable_msix(struct pci_dev* dev)
  637. {
  638. if (!pci_msi_enable || !dev || !dev->msix_enabled)
  639. return;
  640. pci_msix_shutdown(dev);
  641. msix_free_all_irqs(dev);
  642. }
  643. EXPORT_SYMBOL(pci_disable_msix);
  644. /**
  645. * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
  646. * @dev: pointer to the pci_dev data structure of MSI(X) device function
  647. *
  648. * Being called during hotplug remove, from which the device function
  649. * is hot-removed. All previous assigned MSI/MSI-X irqs, if
  650. * allocated for this device function, are reclaimed to unused state,
  651. * which may be used later on.
  652. **/
  653. void msi_remove_pci_irq_vectors(struct pci_dev* dev)
  654. {
  655. if (!pci_msi_enable || !dev)
  656. return;
  657. if (dev->msi_enabled)
  658. msi_free_irqs(dev);
  659. if (dev->msix_enabled)
  660. msix_free_all_irqs(dev);
  661. }
  662. void pci_no_msi(void)
  663. {
  664. pci_msi_enable = 0;
  665. }
  666. /**
  667. * pci_msi_enabled - is MSI enabled?
  668. *
  669. * Returns true if MSI has not been disabled by the command-line option
  670. * pci=nomsi.
  671. **/
  672. int pci_msi_enabled(void)
  673. {
  674. return pci_msi_enable;
  675. }
  676. EXPORT_SYMBOL(pci_msi_enabled);
  677. void pci_msi_init_pci_dev(struct pci_dev *dev)
  678. {
  679. INIT_LIST_HEAD(&dev->msi_list);
  680. }