msi.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760
  1. /*
  2. * File: msi.c
  3. * Purpose: PCI Message Signaled Interrupt (MSI)
  4. *
  5. * Copyright (C) 2003-2004 Intel
  6. * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
  7. */
  8. #include <linux/err.h>
  9. #include <linux/mm.h>
  10. #include <linux/irq.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/init.h>
  13. #include <linux/ioport.h>
  14. #include <linux/pci.h>
  15. #include <linux/proc_fs.h>
  16. #include <linux/msi.h>
  17. #include <linux/smp.h>
  18. #include <asm/errno.h>
  19. #include <asm/io.h>
  20. #include "pci.h"
  21. #include "msi.h"
  22. static int pci_msi_enable = 1;
  23. /* Arch hooks */
  24. #ifndef arch_msi_check_device
  25. int arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
  26. {
  27. return 0;
  28. }
  29. #endif
  30. #ifndef arch_setup_msi_irqs
  31. int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
  32. {
  33. struct msi_desc *entry;
  34. int ret;
  35. list_for_each_entry(entry, &dev->msi_list, list) {
  36. ret = arch_setup_msi_irq(dev, entry);
  37. if (ret < 0)
  38. return ret;
  39. if (ret > 0)
  40. return -ENOSPC;
  41. }
  42. return 0;
  43. }
  44. #endif
  45. #ifndef arch_teardown_msi_irqs
  46. void arch_teardown_msi_irqs(struct pci_dev *dev)
  47. {
  48. struct msi_desc *entry;
  49. list_for_each_entry(entry, &dev->msi_list, list) {
  50. if (entry->irq != 0)
  51. arch_teardown_msi_irq(entry->irq);
  52. }
  53. }
  54. #endif
  55. static void __msi_set_enable(struct pci_dev *dev, int pos, int enable)
  56. {
  57. u16 control;
  58. if (pos) {
  59. pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
  60. control &= ~PCI_MSI_FLAGS_ENABLE;
  61. if (enable)
  62. control |= PCI_MSI_FLAGS_ENABLE;
  63. pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
  64. }
  65. }
  66. static void msi_set_enable(struct pci_dev *dev, int enable)
  67. {
  68. __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable);
  69. }
  70. static void msix_set_enable(struct pci_dev *dev, int enable)
  71. {
  72. int pos;
  73. u16 control;
  74. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  75. if (pos) {
  76. pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
  77. control &= ~PCI_MSIX_FLAGS_ENABLE;
  78. if (enable)
  79. control |= PCI_MSIX_FLAGS_ENABLE;
  80. pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
  81. }
  82. }
  83. static inline __attribute_const__ u32 msi_mask(unsigned x)
  84. {
  85. /* Don't shift by >= width of type */
  86. if (x >= 5)
  87. return 0xffffffff;
  88. return (1 << (1 << x)) - 1;
  89. }
  90. static inline __attribute_const__ u32 msi_capable_mask(u16 control)
  91. {
  92. return msi_mask((control >> 1) & 7);
  93. }
  94. static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
  95. {
  96. return msi_mask((control >> 4) & 7);
  97. }
  98. /*
  99. * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to
  100. * mask all MSI interrupts by clearing the MSI enable bit does not work
  101. * reliably as devices without an INTx disable bit will then generate a
  102. * level IRQ which will never be cleared.
  103. *
  104. * Returns 1 if it succeeded in masking the interrupt and 0 if the device
  105. * doesn't support MSI masking.
  106. */
  107. static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
  108. {
  109. u32 mask_bits = desc->masked;
  110. if (!desc->msi_attrib.maskbit)
  111. return;
  112. mask_bits &= ~mask;
  113. mask_bits |= flag;
  114. pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits);
  115. desc->masked = mask_bits;
  116. }
  117. /*
  118. * This internal function does not flush PCI writes to the device.
  119. * All users must ensure that they read from the device before either
  120. * assuming that the device state is up to date, or returning out of this
  121. * file. This saves a few milliseconds when initialising devices with lots
  122. * of MSI-X interrupts.
  123. */
  124. static void msix_mask_irq(struct msi_desc *desc, u32 flag)
  125. {
  126. u32 mask_bits = desc->masked;
  127. unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
  128. PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
  129. mask_bits &= ~1;
  130. mask_bits |= flag;
  131. writel(mask_bits, desc->mask_base + offset);
  132. desc->masked = mask_bits;
  133. }
  134. static void msi_set_mask_bit(unsigned irq, u32 flag)
  135. {
  136. struct msi_desc *desc = get_irq_msi(irq);
  137. if (desc->msi_attrib.is_msix) {
  138. msix_mask_irq(desc, flag);
  139. readl(desc->mask_base); /* Flush write to device */
  140. } else {
  141. msi_mask_irq(desc, 1, flag);
  142. }
  143. }
  144. void mask_msi_irq(unsigned int irq)
  145. {
  146. msi_set_mask_bit(irq, 1);
  147. }
  148. void unmask_msi_irq(unsigned int irq)
  149. {
  150. msi_set_mask_bit(irq, 0);
  151. }
  152. void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
  153. {
  154. struct msi_desc *entry = get_irq_desc_msi(desc);
  155. if (entry->msi_attrib.is_msix) {
  156. void __iomem *base = entry->mask_base +
  157. entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
  158. msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
  159. msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
  160. msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
  161. } else {
  162. struct pci_dev *dev = entry->dev;
  163. int pos = entry->msi_attrib.pos;
  164. u16 data;
  165. pci_read_config_dword(dev, msi_lower_address_reg(pos),
  166. &msg->address_lo);
  167. if (entry->msi_attrib.is_64) {
  168. pci_read_config_dword(dev, msi_upper_address_reg(pos),
  169. &msg->address_hi);
  170. pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
  171. } else {
  172. msg->address_hi = 0;
  173. pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
  174. }
  175. msg->data = data;
  176. }
  177. }
  178. void read_msi_msg(unsigned int irq, struct msi_msg *msg)
  179. {
  180. struct irq_desc *desc = irq_to_desc(irq);
  181. read_msi_msg_desc(desc, msg);
  182. }
  183. void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
  184. {
  185. struct msi_desc *entry = get_irq_desc_msi(desc);
  186. if (entry->msi_attrib.is_msix) {
  187. void __iomem *base;
  188. base = entry->mask_base +
  189. entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
  190. writel(msg->address_lo,
  191. base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
  192. writel(msg->address_hi,
  193. base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
  194. writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
  195. } else {
  196. struct pci_dev *dev = entry->dev;
  197. int pos = entry->msi_attrib.pos;
  198. pci_write_config_dword(dev, msi_lower_address_reg(pos),
  199. msg->address_lo);
  200. if (entry->msi_attrib.is_64) {
  201. pci_write_config_dword(dev, msi_upper_address_reg(pos),
  202. msg->address_hi);
  203. pci_write_config_word(dev, msi_data_reg(pos, 1),
  204. msg->data);
  205. } else {
  206. pci_write_config_word(dev, msi_data_reg(pos, 0),
  207. msg->data);
  208. }
  209. }
  210. entry->msg = *msg;
  211. }
  212. void write_msi_msg(unsigned int irq, struct msi_msg *msg)
  213. {
  214. struct irq_desc *desc = irq_to_desc(irq);
  215. write_msi_msg_desc(desc, msg);
  216. }
  217. static int msi_free_irqs(struct pci_dev* dev);
  218. static struct msi_desc *alloc_msi_entry(struct pci_dev *dev)
  219. {
  220. struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
  221. if (!desc)
  222. return NULL;
  223. INIT_LIST_HEAD(&desc->list);
  224. desc->dev = dev;
  225. return desc;
  226. }
  227. static void pci_intx_for_msi(struct pci_dev *dev, int enable)
  228. {
  229. if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
  230. pci_intx(dev, enable);
  231. }
  232. static void __pci_restore_msi_state(struct pci_dev *dev)
  233. {
  234. int pos;
  235. u16 control;
  236. struct msi_desc *entry;
  237. if (!dev->msi_enabled)
  238. return;
  239. entry = get_irq_msi(dev->irq);
  240. pos = entry->msi_attrib.pos;
  241. pci_intx_for_msi(dev, 0);
  242. msi_set_enable(dev, 0);
  243. write_msi_msg(dev->irq, &entry->msg);
  244. pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
  245. msi_mask_irq(entry, msi_capable_mask(control), entry->masked);
  246. control &= ~PCI_MSI_FLAGS_QSIZE;
  247. control |= PCI_MSI_FLAGS_ENABLE;
  248. pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
  249. }
  250. static void __pci_restore_msix_state(struct pci_dev *dev)
  251. {
  252. int pos;
  253. struct msi_desc *entry;
  254. u16 control;
  255. if (!dev->msix_enabled)
  256. return;
  257. /* route the table */
  258. pci_intx_for_msi(dev, 0);
  259. msix_set_enable(dev, 0);
  260. list_for_each_entry(entry, &dev->msi_list, list) {
  261. write_msi_msg(entry->irq, &entry->msg);
  262. msix_mask_irq(entry, entry->masked);
  263. }
  264. BUG_ON(list_empty(&dev->msi_list));
  265. entry = list_entry(dev->msi_list.next, struct msi_desc, list);
  266. pos = entry->msi_attrib.pos;
  267. pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
  268. control &= ~PCI_MSIX_FLAGS_MASKALL;
  269. control |= PCI_MSIX_FLAGS_ENABLE;
  270. pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
  271. }
  272. void pci_restore_msi_state(struct pci_dev *dev)
  273. {
  274. __pci_restore_msi_state(dev);
  275. __pci_restore_msix_state(dev);
  276. }
  277. EXPORT_SYMBOL_GPL(pci_restore_msi_state);
  278. /**
  279. * msi_capability_init - configure device's MSI capability structure
  280. * @dev: pointer to the pci_dev data structure of MSI device function
  281. *
  282. * Setup the MSI capability structure of device function with a single
  283. * MSI irq, regardless of device function is capable of handling
  284. * multiple messages. A return of zero indicates the successful setup
  285. * of an entry zero with the new MSI irq or non-zero for otherwise.
  286. **/
  287. static int msi_capability_init(struct pci_dev *dev)
  288. {
  289. struct msi_desc *entry;
  290. int pos, ret;
  291. u16 control;
  292. unsigned mask;
  293. msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
  294. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  295. pci_read_config_word(dev, msi_control_reg(pos), &control);
  296. /* MSI Entry Initialization */
  297. entry = alloc_msi_entry(dev);
  298. if (!entry)
  299. return -ENOMEM;
  300. entry->msi_attrib.is_msix = 0;
  301. entry->msi_attrib.is_64 = is_64bit_address(control);
  302. entry->msi_attrib.entry_nr = 0;
  303. entry->msi_attrib.maskbit = is_mask_bit_support(control);
  304. entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
  305. entry->msi_attrib.pos = pos;
  306. entry->mask_pos = msi_mask_bits_reg(pos, entry->msi_attrib.is_64);
  307. /* All MSIs are unmasked by default, Mask them all */
  308. if (entry->msi_attrib.maskbit)
  309. pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
  310. mask = msi_capable_mask(control);
  311. msi_mask_irq(entry, mask, mask);
  312. list_add_tail(&entry->list, &dev->msi_list);
  313. /* Configure MSI capability structure */
  314. ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI);
  315. if (ret) {
  316. msi_free_irqs(dev);
  317. return ret;
  318. }
  319. /* Set MSI enabled bits */
  320. pci_intx_for_msi(dev, 0);
  321. msi_set_enable(dev, 1);
  322. dev->msi_enabled = 1;
  323. dev->irq = entry->irq;
  324. return 0;
  325. }
  326. /**
  327. * msix_capability_init - configure device's MSI-X capability
  328. * @dev: pointer to the pci_dev data structure of MSI-X device function
  329. * @entries: pointer to an array of struct msix_entry entries
  330. * @nvec: number of @entries
  331. *
  332. * Setup the MSI-X capability structure of device function with a
  333. * single MSI-X irq. A return of zero indicates the successful setup of
  334. * requested MSI-X entries with allocated irqs or non-zero for otherwise.
  335. **/
  336. static int msix_capability_init(struct pci_dev *dev,
  337. struct msix_entry *entries, int nvec)
  338. {
  339. struct msi_desc *entry;
  340. int pos, i, j, nr_entries, ret;
  341. unsigned long phys_addr;
  342. u32 table_offset;
  343. u16 control;
  344. u8 bir;
  345. void __iomem *base;
  346. msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
  347. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  348. /* Request & Map MSI-X table region */
  349. pci_read_config_word(dev, msi_control_reg(pos), &control);
  350. nr_entries = multi_msix_capable(control);
  351. pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
  352. bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
  353. table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
  354. phys_addr = pci_resource_start (dev, bir) + table_offset;
  355. base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
  356. if (base == NULL)
  357. return -ENOMEM;
  358. /* MSI-X Table Initialization */
  359. for (i = 0; i < nvec; i++) {
  360. entry = alloc_msi_entry(dev);
  361. if (!entry)
  362. break;
  363. j = entries[i].entry;
  364. entry->msi_attrib.is_msix = 1;
  365. entry->msi_attrib.is_64 = 1;
  366. entry->msi_attrib.entry_nr = j;
  367. entry->msi_attrib.default_irq = dev->irq;
  368. entry->msi_attrib.pos = pos;
  369. entry->mask_base = base;
  370. entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
  371. PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
  372. msix_mask_irq(entry, 1);
  373. list_add_tail(&entry->list, &dev->msi_list);
  374. }
  375. ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
  376. if (ret < 0) {
  377. /* If we had some success report the number of irqs
  378. * we succeeded in setting up. */
  379. int avail = 0;
  380. list_for_each_entry(entry, &dev->msi_list, list) {
  381. if (entry->irq != 0) {
  382. avail++;
  383. }
  384. }
  385. if (avail != 0)
  386. ret = avail;
  387. }
  388. if (ret) {
  389. msi_free_irqs(dev);
  390. return ret;
  391. }
  392. i = 0;
  393. list_for_each_entry(entry, &dev->msi_list, list) {
  394. entries[i].vector = entry->irq;
  395. set_irq_msi(entry->irq, entry);
  396. i++;
  397. }
  398. /* Set MSI-X enabled bits */
  399. pci_intx_for_msi(dev, 0);
  400. msix_set_enable(dev, 1);
  401. dev->msix_enabled = 1;
  402. return 0;
  403. }
  404. /**
  405. * pci_msi_check_device - check whether MSI may be enabled on a device
  406. * @dev: pointer to the pci_dev data structure of MSI device function
  407. * @nvec: how many MSIs have been requested ?
  408. * @type: are we checking for MSI or MSI-X ?
  409. *
  410. * Look at global flags, the device itself, and its parent busses
  411. * to determine if MSI/-X are supported for the device. If MSI/-X is
  412. * supported return 0, else return an error code.
  413. **/
  414. static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
  415. {
  416. struct pci_bus *bus;
  417. int ret;
  418. /* MSI must be globally enabled and supported by the device */
  419. if (!pci_msi_enable || !dev || dev->no_msi)
  420. return -EINVAL;
  421. /*
  422. * You can't ask to have 0 or less MSIs configured.
  423. * a) it's stupid ..
  424. * b) the list manipulation code assumes nvec >= 1.
  425. */
  426. if (nvec < 1)
  427. return -ERANGE;
  428. /* Any bridge which does NOT route MSI transactions from it's
  429. * secondary bus to it's primary bus must set NO_MSI flag on
  430. * the secondary pci_bus.
  431. * We expect only arch-specific PCI host bus controller driver
  432. * or quirks for specific PCI bridges to be setting NO_MSI.
  433. */
  434. for (bus = dev->bus; bus; bus = bus->parent)
  435. if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
  436. return -EINVAL;
  437. ret = arch_msi_check_device(dev, nvec, type);
  438. if (ret)
  439. return ret;
  440. if (!pci_find_capability(dev, type))
  441. return -EINVAL;
  442. return 0;
  443. }
  444. /**
  445. * pci_enable_msi - configure device's MSI capability structure
  446. * @dev: pointer to the pci_dev data structure of MSI device function
  447. *
  448. * Setup the MSI capability structure of device function with
  449. * a single MSI irq upon its software driver call to request for
  450. * MSI mode enabled on its hardware device function. A return of zero
  451. * indicates the successful setup of an entry zero with the new MSI
  452. * irq or non-zero for otherwise.
  453. **/
  454. int pci_enable_msi(struct pci_dev* dev)
  455. {
  456. int status;
  457. status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI);
  458. if (status)
  459. return status;
  460. WARN_ON(!!dev->msi_enabled);
  461. /* Check whether driver already requested for MSI-X irqs */
  462. if (dev->msix_enabled) {
  463. dev_info(&dev->dev, "can't enable MSI "
  464. "(MSI-X already enabled)\n");
  465. return -EINVAL;
  466. }
  467. status = msi_capability_init(dev);
  468. return status;
  469. }
  470. EXPORT_SYMBOL(pci_enable_msi);
  471. void pci_msi_shutdown(struct pci_dev *dev)
  472. {
  473. struct msi_desc *desc;
  474. u32 mask;
  475. u16 ctrl;
  476. if (!pci_msi_enable || !dev || !dev->msi_enabled)
  477. return;
  478. msi_set_enable(dev, 0);
  479. pci_intx_for_msi(dev, 1);
  480. dev->msi_enabled = 0;
  481. BUG_ON(list_empty(&dev->msi_list));
  482. desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
  483. pci_read_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, &ctrl);
  484. mask = msi_capable_mask(ctrl);
  485. msi_mask_irq(desc, mask, ~mask);
  486. /* Restore dev->irq to its default pin-assertion irq */
  487. dev->irq = desc->msi_attrib.default_irq;
  488. }
  489. void pci_disable_msi(struct pci_dev* dev)
  490. {
  491. struct msi_desc *entry;
  492. if (!pci_msi_enable || !dev || !dev->msi_enabled)
  493. return;
  494. pci_msi_shutdown(dev);
  495. entry = list_entry(dev->msi_list.next, struct msi_desc, list);
  496. if (entry->msi_attrib.is_msix)
  497. return;
  498. msi_free_irqs(dev);
  499. }
  500. EXPORT_SYMBOL(pci_disable_msi);
  501. static int msi_free_irqs(struct pci_dev* dev)
  502. {
  503. struct msi_desc *entry, *tmp;
  504. list_for_each_entry(entry, &dev->msi_list, list) {
  505. if (entry->irq)
  506. BUG_ON(irq_has_action(entry->irq));
  507. }
  508. arch_teardown_msi_irqs(dev);
  509. list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
  510. if (entry->msi_attrib.is_msix) {
  511. writel(1, entry->mask_base + entry->msi_attrib.entry_nr
  512. * PCI_MSIX_ENTRY_SIZE
  513. + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
  514. if (list_is_last(&entry->list, &dev->msi_list))
  515. iounmap(entry->mask_base);
  516. }
  517. list_del(&entry->list);
  518. kfree(entry);
  519. }
  520. return 0;
  521. }
  522. /**
  523. * pci_msix_table_size - return the number of device's MSI-X table entries
  524. * @dev: pointer to the pci_dev data structure of MSI-X device function
  525. */
  526. int pci_msix_table_size(struct pci_dev *dev)
  527. {
  528. int pos;
  529. u16 control;
  530. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  531. if (!pos)
  532. return 0;
  533. pci_read_config_word(dev, msi_control_reg(pos), &control);
  534. return multi_msix_capable(control);
  535. }
  536. /**
  537. * pci_enable_msix - configure device's MSI-X capability structure
  538. * @dev: pointer to the pci_dev data structure of MSI-X device function
  539. * @entries: pointer to an array of MSI-X entries
  540. * @nvec: number of MSI-X irqs requested for allocation by device driver
  541. *
  542. * Setup the MSI-X capability structure of device function with the number
  543. * of requested irqs upon its software driver call to request for
  544. * MSI-X mode enabled on its hardware device function. A return of zero
  545. * indicates the successful configuration of MSI-X capability structure
  546. * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
  547. * Or a return of > 0 indicates that driver request is exceeding the number
  548. * of irqs available. Driver should use the returned value to re-send
  549. * its request.
  550. **/
  551. int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
  552. {
  553. int status, nr_entries;
  554. int i, j;
  555. if (!entries)
  556. return -EINVAL;
  557. status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
  558. if (status)
  559. return status;
  560. nr_entries = pci_msix_table_size(dev);
  561. if (nvec > nr_entries)
  562. return -EINVAL;
  563. /* Check for any invalid entries */
  564. for (i = 0; i < nvec; i++) {
  565. if (entries[i].entry >= nr_entries)
  566. return -EINVAL; /* invalid entry */
  567. for (j = i + 1; j < nvec; j++) {
  568. if (entries[i].entry == entries[j].entry)
  569. return -EINVAL; /* duplicate entry */
  570. }
  571. }
  572. WARN_ON(!!dev->msix_enabled);
  573. /* Check whether driver already requested for MSI irq */
  574. if (dev->msi_enabled) {
  575. dev_info(&dev->dev, "can't enable MSI-X "
  576. "(MSI IRQ already assigned)\n");
  577. return -EINVAL;
  578. }
  579. status = msix_capability_init(dev, entries, nvec);
  580. return status;
  581. }
  582. EXPORT_SYMBOL(pci_enable_msix);
  583. static void msix_free_all_irqs(struct pci_dev *dev)
  584. {
  585. msi_free_irqs(dev);
  586. }
  587. void pci_msix_shutdown(struct pci_dev* dev)
  588. {
  589. if (!pci_msi_enable || !dev || !dev->msix_enabled)
  590. return;
  591. msix_set_enable(dev, 0);
  592. pci_intx_for_msi(dev, 1);
  593. dev->msix_enabled = 0;
  594. }
  595. void pci_disable_msix(struct pci_dev* dev)
  596. {
  597. if (!pci_msi_enable || !dev || !dev->msix_enabled)
  598. return;
  599. pci_msix_shutdown(dev);
  600. msix_free_all_irqs(dev);
  601. }
  602. EXPORT_SYMBOL(pci_disable_msix);
  603. /**
  604. * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
  605. * @dev: pointer to the pci_dev data structure of MSI(X) device function
  606. *
  607. * Being called during hotplug remove, from which the device function
  608. * is hot-removed. All previous assigned MSI/MSI-X irqs, if
  609. * allocated for this device function, are reclaimed to unused state,
  610. * which may be used later on.
  611. **/
  612. void msi_remove_pci_irq_vectors(struct pci_dev* dev)
  613. {
  614. if (!pci_msi_enable || !dev)
  615. return;
  616. if (dev->msi_enabled)
  617. msi_free_irqs(dev);
  618. if (dev->msix_enabled)
  619. msix_free_all_irqs(dev);
  620. }
  621. void pci_no_msi(void)
  622. {
  623. pci_msi_enable = 0;
  624. }
  625. /**
  626. * pci_msi_enabled - is MSI enabled?
  627. *
  628. * Returns true if MSI has not been disabled by the command-line option
  629. * pci=nomsi.
  630. **/
  631. int pci_msi_enabled(void)
  632. {
  633. return pci_msi_enable;
  634. }
  635. EXPORT_SYMBOL(pci_msi_enabled);
  636. void pci_msi_init_pci_dev(struct pci_dev *dev)
  637. {
  638. INIT_LIST_HEAD(&dev->msi_list);
  639. }