msi.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803
  1. /*
  2. * File: msi.c
  3. * Purpose: PCI Message Signaled Interrupt (MSI)
  4. *
  5. * Copyright (C) 2003-2004 Intel
  6. * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
  7. */
  8. #include <linux/err.h>
  9. #include <linux/mm.h>
  10. #include <linux/irq.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/init.h>
  13. #include <linux/ioport.h>
  14. #include <linux/pci.h>
  15. #include <linux/proc_fs.h>
  16. #include <linux/msi.h>
  17. #include <linux/smp.h>
  18. #include <asm/errno.h>
  19. #include <asm/io.h>
  20. #include "pci.h"
  21. #include "msi.h"
  22. static int pci_msi_enable = 1;
  23. /* Arch hooks */
  24. int __attribute__ ((weak))
  25. arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
  26. {
  27. return 0;
  28. }
  29. int __attribute__ ((weak))
  30. arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry)
  31. {
  32. return 0;
  33. }
  34. int __attribute__ ((weak))
  35. arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
  36. {
  37. struct msi_desc *entry;
  38. int ret;
  39. list_for_each_entry(entry, &dev->msi_list, list) {
  40. ret = arch_setup_msi_irq(dev, entry);
  41. if (ret)
  42. return ret;
  43. }
  44. return 0;
  45. }
  46. void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq)
  47. {
  48. return;
  49. }
  50. void __attribute__ ((weak))
  51. arch_teardown_msi_irqs(struct pci_dev *dev)
  52. {
  53. struct msi_desc *entry;
  54. list_for_each_entry(entry, &dev->msi_list, list) {
  55. if (entry->irq != 0)
  56. arch_teardown_msi_irq(entry->irq);
  57. }
  58. }
  59. static void __msi_set_enable(struct pci_dev *dev, int pos, int enable)
  60. {
  61. u16 control;
  62. if (pos) {
  63. pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
  64. control &= ~PCI_MSI_FLAGS_ENABLE;
  65. if (enable)
  66. control |= PCI_MSI_FLAGS_ENABLE;
  67. pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
  68. }
  69. }
  70. static void msi_set_enable(struct pci_dev *dev, int enable)
  71. {
  72. __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable);
  73. }
  74. static void msix_set_enable(struct pci_dev *dev, int enable)
  75. {
  76. int pos;
  77. u16 control;
  78. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  79. if (pos) {
  80. pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
  81. control &= ~PCI_MSIX_FLAGS_ENABLE;
  82. if (enable)
  83. control |= PCI_MSIX_FLAGS_ENABLE;
  84. pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
  85. }
  86. }
  87. static void msix_flush_writes(struct irq_desc *desc)
  88. {
  89. struct msi_desc *entry;
  90. entry = get_irq_desc_msi(desc);
  91. BUG_ON(!entry || !entry->dev);
  92. switch (entry->msi_attrib.type) {
  93. case PCI_CAP_ID_MSI:
  94. /* nothing to do */
  95. break;
  96. case PCI_CAP_ID_MSIX:
  97. {
  98. int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
  99. PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
  100. readl(entry->mask_base + offset);
  101. break;
  102. }
  103. default:
  104. BUG();
  105. break;
  106. }
  107. }
  108. /*
  109. * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to
  110. * mask all MSI interrupts by clearing the MSI enable bit does not work
  111. * reliably as devices without an INTx disable bit will then generate a
  112. * level IRQ which will never be cleared.
  113. *
  114. * Returns 1 if it succeeded in masking the interrupt and 0 if the device
  115. * doesn't support MSI masking.
  116. */
  117. static int msi_set_mask_bits(struct irq_desc *desc, u32 mask, u32 flag)
  118. {
  119. struct msi_desc *entry;
  120. entry = get_irq_desc_msi(desc);
  121. BUG_ON(!entry || !entry->dev);
  122. switch (entry->msi_attrib.type) {
  123. case PCI_CAP_ID_MSI:
  124. if (entry->msi_attrib.maskbit) {
  125. int pos;
  126. u32 mask_bits;
  127. pos = (long)entry->mask_base;
  128. pci_read_config_dword(entry->dev, pos, &mask_bits);
  129. mask_bits &= ~(mask);
  130. mask_bits |= flag & mask;
  131. pci_write_config_dword(entry->dev, pos, mask_bits);
  132. } else {
  133. return 0;
  134. }
  135. break;
  136. case PCI_CAP_ID_MSIX:
  137. {
  138. int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
  139. PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
  140. writel(flag, entry->mask_base + offset);
  141. readl(entry->mask_base + offset);
  142. break;
  143. }
  144. default:
  145. BUG();
  146. break;
  147. }
  148. entry->msi_attrib.masked = !!flag;
  149. return 1;
  150. }
  151. void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
  152. {
  153. struct msi_desc *entry = get_irq_desc_msi(desc);
  154. switch(entry->msi_attrib.type) {
  155. case PCI_CAP_ID_MSI:
  156. {
  157. struct pci_dev *dev = entry->dev;
  158. int pos = entry->msi_attrib.pos;
  159. u16 data;
  160. pci_read_config_dword(dev, msi_lower_address_reg(pos),
  161. &msg->address_lo);
  162. if (entry->msi_attrib.is_64) {
  163. pci_read_config_dword(dev, msi_upper_address_reg(pos),
  164. &msg->address_hi);
  165. pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
  166. } else {
  167. msg->address_hi = 0;
  168. pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
  169. }
  170. msg->data = data;
  171. break;
  172. }
  173. case PCI_CAP_ID_MSIX:
  174. {
  175. void __iomem *base;
  176. base = entry->mask_base +
  177. entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
  178. msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
  179. msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
  180. msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
  181. break;
  182. }
  183. default:
  184. BUG();
  185. }
  186. }
  187. void read_msi_msg(unsigned int irq, struct msi_msg *msg)
  188. {
  189. struct irq_desc *desc = irq_to_desc(irq);
  190. read_msi_msg_desc(desc, msg);
  191. }
  192. void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
  193. {
  194. struct msi_desc *entry = get_irq_desc_msi(desc);
  195. switch (entry->msi_attrib.type) {
  196. case PCI_CAP_ID_MSI:
  197. {
  198. struct pci_dev *dev = entry->dev;
  199. int pos = entry->msi_attrib.pos;
  200. pci_write_config_dword(dev, msi_lower_address_reg(pos),
  201. msg->address_lo);
  202. if (entry->msi_attrib.is_64) {
  203. pci_write_config_dword(dev, msi_upper_address_reg(pos),
  204. msg->address_hi);
  205. pci_write_config_word(dev, msi_data_reg(pos, 1),
  206. msg->data);
  207. } else {
  208. pci_write_config_word(dev, msi_data_reg(pos, 0),
  209. msg->data);
  210. }
  211. break;
  212. }
  213. case PCI_CAP_ID_MSIX:
  214. {
  215. void __iomem *base;
  216. base = entry->mask_base +
  217. entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
  218. writel(msg->address_lo,
  219. base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
  220. writel(msg->address_hi,
  221. base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
  222. writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
  223. break;
  224. }
  225. default:
  226. BUG();
  227. }
  228. entry->msg = *msg;
  229. }
  230. void write_msi_msg(unsigned int irq, struct msi_msg *msg)
  231. {
  232. struct irq_desc *desc = irq_to_desc(irq);
  233. write_msi_msg_desc(desc, msg);
  234. }
  235. void mask_msi_irq(unsigned int irq)
  236. {
  237. struct irq_desc *desc = irq_to_desc(irq);
  238. msi_set_mask_bits(desc, 1, 1);
  239. msix_flush_writes(desc);
  240. }
  241. void unmask_msi_irq(unsigned int irq)
  242. {
  243. struct irq_desc *desc = irq_to_desc(irq);
  244. msi_set_mask_bits(desc, 1, 0);
  245. msix_flush_writes(desc);
  246. }
  247. static int msi_free_irqs(struct pci_dev* dev);
  248. static struct msi_desc* alloc_msi_entry(void)
  249. {
  250. struct msi_desc *entry;
  251. entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL);
  252. if (!entry)
  253. return NULL;
  254. INIT_LIST_HEAD(&entry->list);
  255. entry->irq = 0;
  256. entry->dev = NULL;
  257. return entry;
  258. }
  259. static void pci_intx_for_msi(struct pci_dev *dev, int enable)
  260. {
  261. if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
  262. pci_intx(dev, enable);
  263. }
  264. static void __pci_restore_msi_state(struct pci_dev *dev)
  265. {
  266. int pos;
  267. u16 control;
  268. struct msi_desc *entry;
  269. if (!dev->msi_enabled)
  270. return;
  271. entry = get_irq_msi(dev->irq);
  272. pos = entry->msi_attrib.pos;
  273. pci_intx_for_msi(dev, 0);
  274. msi_set_enable(dev, 0);
  275. write_msi_msg(dev->irq, &entry->msg);
  276. if (entry->msi_attrib.maskbit) {
  277. struct irq_desc *desc = irq_to_desc(dev->irq);
  278. msi_set_mask_bits(desc, entry->msi_attrib.maskbits_mask,
  279. entry->msi_attrib.masked);
  280. }
  281. pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
  282. control &= ~PCI_MSI_FLAGS_QSIZE;
  283. control |= PCI_MSI_FLAGS_ENABLE;
  284. pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
  285. }
  286. static void __pci_restore_msix_state(struct pci_dev *dev)
  287. {
  288. int pos;
  289. struct msi_desc *entry;
  290. u16 control;
  291. if (!dev->msix_enabled)
  292. return;
  293. /* route the table */
  294. pci_intx_for_msi(dev, 0);
  295. msix_set_enable(dev, 0);
  296. list_for_each_entry(entry, &dev->msi_list, list) {
  297. struct irq_desc *desc = irq_to_desc(entry->irq);
  298. write_msi_msg(entry->irq, &entry->msg);
  299. msi_set_mask_bits(desc, 1, entry->msi_attrib.masked);
  300. }
  301. BUG_ON(list_empty(&dev->msi_list));
  302. entry = list_entry(dev->msi_list.next, struct msi_desc, list);
  303. pos = entry->msi_attrib.pos;
  304. pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
  305. control &= ~PCI_MSIX_FLAGS_MASKALL;
  306. control |= PCI_MSIX_FLAGS_ENABLE;
  307. pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
  308. }
  309. void pci_restore_msi_state(struct pci_dev *dev)
  310. {
  311. __pci_restore_msi_state(dev);
  312. __pci_restore_msix_state(dev);
  313. }
  314. EXPORT_SYMBOL_GPL(pci_restore_msi_state);
  315. /**
  316. * msi_capability_init - configure device's MSI capability structure
  317. * @dev: pointer to the pci_dev data structure of MSI device function
  318. *
  319. * Setup the MSI capability structure of device function with a single
  320. * MSI irq, regardless of device function is capable of handling
  321. * multiple messages. A return of zero indicates the successful setup
  322. * of an entry zero with the new MSI irq or non-zero for otherwise.
  323. **/
  324. static int msi_capability_init(struct pci_dev *dev)
  325. {
  326. struct msi_desc *entry;
  327. int pos, ret;
  328. u16 control;
  329. msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
  330. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  331. pci_read_config_word(dev, msi_control_reg(pos), &control);
  332. /* MSI Entry Initialization */
  333. entry = alloc_msi_entry();
  334. if (!entry)
  335. return -ENOMEM;
  336. entry->msi_attrib.type = PCI_CAP_ID_MSI;
  337. entry->msi_attrib.is_64 = is_64bit_address(control);
  338. entry->msi_attrib.entry_nr = 0;
  339. entry->msi_attrib.maskbit = is_mask_bit_support(control);
  340. entry->msi_attrib.masked = 1;
  341. entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
  342. entry->msi_attrib.pos = pos;
  343. if (entry->msi_attrib.maskbit) {
  344. entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
  345. entry->msi_attrib.is_64);
  346. }
  347. entry->dev = dev;
  348. if (entry->msi_attrib.maskbit) {
  349. unsigned int maskbits, temp;
  350. /* All MSIs are unmasked by default, Mask them all */
  351. pci_read_config_dword(dev,
  352. msi_mask_bits_reg(pos, entry->msi_attrib.is_64),
  353. &maskbits);
  354. temp = (1 << multi_msi_capable(control));
  355. temp = ((temp - 1) & ~temp);
  356. maskbits |= temp;
  357. pci_write_config_dword(dev, entry->msi_attrib.is_64, maskbits);
  358. entry->msi_attrib.maskbits_mask = temp;
  359. }
  360. list_add_tail(&entry->list, &dev->msi_list);
  361. /* Configure MSI capability structure */
  362. ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI);
  363. if (ret) {
  364. msi_free_irqs(dev);
  365. return ret;
  366. }
  367. /* Set MSI enabled bits */
  368. pci_intx_for_msi(dev, 0);
  369. msi_set_enable(dev, 1);
  370. dev->msi_enabled = 1;
  371. dev->irq = entry->irq;
  372. return 0;
  373. }
  374. /**
  375. * msix_capability_init - configure device's MSI-X capability
  376. * @dev: pointer to the pci_dev data structure of MSI-X device function
  377. * @entries: pointer to an array of struct msix_entry entries
  378. * @nvec: number of @entries
  379. *
  380. * Setup the MSI-X capability structure of device function with a
  381. * single MSI-X irq. A return of zero indicates the successful setup of
  382. * requested MSI-X entries with allocated irqs or non-zero for otherwise.
  383. **/
  384. static int msix_capability_init(struct pci_dev *dev,
  385. struct msix_entry *entries, int nvec)
  386. {
  387. struct msi_desc *entry;
  388. int pos, i, j, nr_entries, ret;
  389. unsigned long phys_addr;
  390. u32 table_offset;
  391. u16 control;
  392. u8 bir;
  393. void __iomem *base;
  394. msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
  395. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  396. /* Request & Map MSI-X table region */
  397. pci_read_config_word(dev, msi_control_reg(pos), &control);
  398. nr_entries = multi_msix_capable(control);
  399. pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
  400. bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
  401. table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
  402. phys_addr = pci_resource_start (dev, bir) + table_offset;
  403. base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
  404. if (base == NULL)
  405. return -ENOMEM;
  406. /* MSI-X Table Initialization */
  407. for (i = 0; i < nvec; i++) {
  408. entry = alloc_msi_entry();
  409. if (!entry)
  410. break;
  411. j = entries[i].entry;
  412. entry->msi_attrib.type = PCI_CAP_ID_MSIX;
  413. entry->msi_attrib.is_64 = 1;
  414. entry->msi_attrib.entry_nr = j;
  415. entry->msi_attrib.maskbit = 1;
  416. entry->msi_attrib.masked = 1;
  417. entry->msi_attrib.default_irq = dev->irq;
  418. entry->msi_attrib.pos = pos;
  419. entry->dev = dev;
  420. entry->mask_base = base;
  421. list_add_tail(&entry->list, &dev->msi_list);
  422. }
  423. ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
  424. if (ret) {
  425. int avail = 0;
  426. list_for_each_entry(entry, &dev->msi_list, list) {
  427. if (entry->irq != 0) {
  428. avail++;
  429. }
  430. }
  431. msi_free_irqs(dev);
  432. /* If we had some success report the number of irqs
  433. * we succeeded in setting up.
  434. */
  435. if (avail == 0)
  436. avail = ret;
  437. return avail;
  438. }
  439. i = 0;
  440. list_for_each_entry(entry, &dev->msi_list, list) {
  441. entries[i].vector = entry->irq;
  442. set_irq_msi(entry->irq, entry);
  443. i++;
  444. }
  445. /* Set MSI-X enabled bits */
  446. pci_intx_for_msi(dev, 0);
  447. msix_set_enable(dev, 1);
  448. dev->msix_enabled = 1;
  449. return 0;
  450. }
  451. /**
  452. * pci_msi_check_device - check whether MSI may be enabled on a device
  453. * @dev: pointer to the pci_dev data structure of MSI device function
  454. * @nvec: how many MSIs have been requested ?
  455. * @type: are we checking for MSI or MSI-X ?
  456. *
  457. * Look at global flags, the device itself, and its parent busses
  458. * to determine if MSI/-X are supported for the device. If MSI/-X is
  459. * supported return 0, else return an error code.
  460. **/
  461. static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
  462. {
  463. struct pci_bus *bus;
  464. int ret;
  465. /* MSI must be globally enabled and supported by the device */
  466. if (!pci_msi_enable || !dev || dev->no_msi)
  467. return -EINVAL;
  468. /*
  469. * You can't ask to have 0 or less MSIs configured.
  470. * a) it's stupid ..
  471. * b) the list manipulation code assumes nvec >= 1.
  472. */
  473. if (nvec < 1)
  474. return -ERANGE;
  475. /* Any bridge which does NOT route MSI transactions from it's
  476. * secondary bus to it's primary bus must set NO_MSI flag on
  477. * the secondary pci_bus.
  478. * We expect only arch-specific PCI host bus controller driver
  479. * or quirks for specific PCI bridges to be setting NO_MSI.
  480. */
  481. for (bus = dev->bus; bus; bus = bus->parent)
  482. if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
  483. return -EINVAL;
  484. ret = arch_msi_check_device(dev, nvec, type);
  485. if (ret)
  486. return ret;
  487. if (!pci_find_capability(dev, type))
  488. return -EINVAL;
  489. return 0;
  490. }
  491. /**
  492. * pci_enable_msi - configure device's MSI capability structure
  493. * @dev: pointer to the pci_dev data structure of MSI device function
  494. *
  495. * Setup the MSI capability structure of device function with
  496. * a single MSI irq upon its software driver call to request for
  497. * MSI mode enabled on its hardware device function. A return of zero
  498. * indicates the successful setup of an entry zero with the new MSI
  499. * irq or non-zero for otherwise.
  500. **/
  501. int pci_enable_msi(struct pci_dev* dev)
  502. {
  503. int status;
  504. status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI);
  505. if (status)
  506. return status;
  507. WARN_ON(!!dev->msi_enabled);
  508. /* Check whether driver already requested for MSI-X irqs */
  509. if (dev->msix_enabled) {
  510. dev_info(&dev->dev, "can't enable MSI "
  511. "(MSI-X already enabled)\n");
  512. return -EINVAL;
  513. }
  514. status = msi_capability_init(dev);
  515. return status;
  516. }
  517. EXPORT_SYMBOL(pci_enable_msi);
  518. void pci_msi_shutdown(struct pci_dev* dev)
  519. {
  520. struct msi_desc *entry;
  521. if (!pci_msi_enable || !dev || !dev->msi_enabled)
  522. return;
  523. msi_set_enable(dev, 0);
  524. pci_intx_for_msi(dev, 1);
  525. dev->msi_enabled = 0;
  526. BUG_ON(list_empty(&dev->msi_list));
  527. entry = list_entry(dev->msi_list.next, struct msi_desc, list);
  528. /* Return the the pci reset with msi irqs unmasked */
  529. if (entry->msi_attrib.maskbit) {
  530. u32 mask = entry->msi_attrib.maskbits_mask;
  531. struct irq_desc *desc = irq_to_desc(dev->irq);
  532. msi_set_mask_bits(desc, mask, ~mask);
  533. }
  534. if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
  535. return;
  536. /* Restore dev->irq to its default pin-assertion irq */
  537. dev->irq = entry->msi_attrib.default_irq;
  538. }
  539. void pci_disable_msi(struct pci_dev* dev)
  540. {
  541. struct msi_desc *entry;
  542. if (!pci_msi_enable || !dev || !dev->msi_enabled)
  543. return;
  544. pci_msi_shutdown(dev);
  545. entry = list_entry(dev->msi_list.next, struct msi_desc, list);
  546. if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
  547. return;
  548. msi_free_irqs(dev);
  549. }
  550. EXPORT_SYMBOL(pci_disable_msi);
  551. static int msi_free_irqs(struct pci_dev* dev)
  552. {
  553. struct msi_desc *entry, *tmp;
  554. list_for_each_entry(entry, &dev->msi_list, list) {
  555. if (entry->irq)
  556. BUG_ON(irq_has_action(entry->irq));
  557. }
  558. arch_teardown_msi_irqs(dev);
  559. list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
  560. if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) {
  561. writel(1, entry->mask_base + entry->msi_attrib.entry_nr
  562. * PCI_MSIX_ENTRY_SIZE
  563. + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
  564. if (list_is_last(&entry->list, &dev->msi_list))
  565. iounmap(entry->mask_base);
  566. }
  567. list_del(&entry->list);
  568. kfree(entry);
  569. }
  570. return 0;
  571. }
  572. /**
  573. * pci_enable_msix - configure device's MSI-X capability structure
  574. * @dev: pointer to the pci_dev data structure of MSI-X device function
  575. * @entries: pointer to an array of MSI-X entries
  576. * @nvec: number of MSI-X irqs requested for allocation by device driver
  577. *
  578. * Setup the MSI-X capability structure of device function with the number
  579. * of requested irqs upon its software driver call to request for
  580. * MSI-X mode enabled on its hardware device function. A return of zero
  581. * indicates the successful configuration of MSI-X capability structure
  582. * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
  583. * Or a return of > 0 indicates that driver request is exceeding the number
  584. * of irqs available. Driver should use the returned value to re-send
  585. * its request.
  586. **/
  587. int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
  588. {
  589. int status, pos, nr_entries;
  590. int i, j;
  591. u16 control;
  592. if (!entries)
  593. return -EINVAL;
  594. status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
  595. if (status)
  596. return status;
  597. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  598. pci_read_config_word(dev, msi_control_reg(pos), &control);
  599. nr_entries = multi_msix_capable(control);
  600. if (nvec > nr_entries)
  601. return -EINVAL;
  602. /* Check for any invalid entries */
  603. for (i = 0; i < nvec; i++) {
  604. if (entries[i].entry >= nr_entries)
  605. return -EINVAL; /* invalid entry */
  606. for (j = i + 1; j < nvec; j++) {
  607. if (entries[i].entry == entries[j].entry)
  608. return -EINVAL; /* duplicate entry */
  609. }
  610. }
  611. WARN_ON(!!dev->msix_enabled);
  612. /* Check whether driver already requested for MSI irq */
  613. if (dev->msi_enabled) {
  614. dev_info(&dev->dev, "can't enable MSI-X "
  615. "(MSI IRQ already assigned)\n");
  616. return -EINVAL;
  617. }
  618. status = msix_capability_init(dev, entries, nvec);
  619. return status;
  620. }
  621. EXPORT_SYMBOL(pci_enable_msix);
  622. static void msix_free_all_irqs(struct pci_dev *dev)
  623. {
  624. msi_free_irqs(dev);
  625. }
  626. void pci_msix_shutdown(struct pci_dev* dev)
  627. {
  628. if (!pci_msi_enable || !dev || !dev->msix_enabled)
  629. return;
  630. msix_set_enable(dev, 0);
  631. pci_intx_for_msi(dev, 1);
  632. dev->msix_enabled = 0;
  633. }
  634. void pci_disable_msix(struct pci_dev* dev)
  635. {
  636. if (!pci_msi_enable || !dev || !dev->msix_enabled)
  637. return;
  638. pci_msix_shutdown(dev);
  639. msix_free_all_irqs(dev);
  640. }
  641. EXPORT_SYMBOL(pci_disable_msix);
  642. /**
  643. * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
  644. * @dev: pointer to the pci_dev data structure of MSI(X) device function
  645. *
  646. * Being called during hotplug remove, from which the device function
  647. * is hot-removed. All previous assigned MSI/MSI-X irqs, if
  648. * allocated for this device function, are reclaimed to unused state,
  649. * which may be used later on.
  650. **/
  651. void msi_remove_pci_irq_vectors(struct pci_dev* dev)
  652. {
  653. if (!pci_msi_enable || !dev)
  654. return;
  655. if (dev->msi_enabled)
  656. msi_free_irqs(dev);
  657. if (dev->msix_enabled)
  658. msix_free_all_irqs(dev);
  659. }
  660. void pci_no_msi(void)
  661. {
  662. pci_msi_enable = 0;
  663. }
  664. void pci_msi_init_pci_dev(struct pci_dev *dev)
  665. {
  666. INIT_LIST_HEAD(&dev->msi_list);
  667. }
  668. #ifdef CONFIG_ACPI
  669. #include <linux/acpi.h>
  670. #include <linux/pci-acpi.h>
  671. static void __devinit msi_acpi_init(void)
  672. {
  673. if (acpi_pci_disabled)
  674. return;
  675. pci_osc_support_set(OSC_MSI_SUPPORT);
  676. pcie_osc_support_set(OSC_MSI_SUPPORT);
  677. }
  678. #else
  679. static inline void msi_acpi_init(void) { }
  680. #endif /* CONFIG_ACPI */
  681. void __devinit msi_init(void)
  682. {
  683. if (!pci_msi_enable)
  684. return;
  685. msi_acpi_init();
  686. }