msi.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845
  1. /*
  2. * File: msi.c
  3. * Purpose: PCI Message Signaled Interrupt (MSI)
  4. *
  5. * Copyright (C) 2003-2004 Intel
  6. * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
  7. */
  8. #include <linux/err.h>
  9. #include <linux/mm.h>
  10. #include <linux/irq.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/init.h>
  13. #include <linux/ioport.h>
  14. #include <linux/smp_lock.h>
  15. #include <linux/pci.h>
  16. #include <linux/proc_fs.h>
  17. #include <linux/msi.h>
  18. #include <asm/errno.h>
  19. #include <asm/io.h>
  20. #include <asm/smp.h>
  21. #include "pci.h"
  22. #include "msi.h"
  23. static struct kmem_cache* msi_cachep;
  24. static int pci_msi_enable = 1;
  25. static int msi_cache_init(void)
  26. {
  27. msi_cachep = kmem_cache_create("msi_cache", sizeof(struct msi_desc),
  28. 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
  29. if (!msi_cachep)
  30. return -ENOMEM;
  31. return 0;
  32. }
  33. static void msi_set_enable(struct pci_dev *dev, int enable)
  34. {
  35. int pos;
  36. u16 control;
  37. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  38. if (pos) {
  39. pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
  40. control &= ~PCI_MSI_FLAGS_ENABLE;
  41. if (enable)
  42. control |= PCI_MSI_FLAGS_ENABLE;
  43. pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
  44. }
  45. }
  46. static void msix_set_enable(struct pci_dev *dev, int enable)
  47. {
  48. int pos;
  49. u16 control;
  50. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  51. if (pos) {
  52. pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
  53. control &= ~PCI_MSIX_FLAGS_ENABLE;
  54. if (enable)
  55. control |= PCI_MSIX_FLAGS_ENABLE;
  56. pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
  57. }
  58. }
  59. static void msi_set_mask_bit(unsigned int irq, int flag)
  60. {
  61. struct msi_desc *entry;
  62. entry = get_irq_msi(irq);
  63. BUG_ON(!entry || !entry->dev);
  64. switch (entry->msi_attrib.type) {
  65. case PCI_CAP_ID_MSI:
  66. if (entry->msi_attrib.maskbit) {
  67. int pos;
  68. u32 mask_bits;
  69. pos = (long)entry->mask_base;
  70. pci_read_config_dword(entry->dev, pos, &mask_bits);
  71. mask_bits &= ~(1);
  72. mask_bits |= flag;
  73. pci_write_config_dword(entry->dev, pos, mask_bits);
  74. } else {
  75. msi_set_enable(entry->dev, !flag);
  76. }
  77. break;
  78. case PCI_CAP_ID_MSIX:
  79. {
  80. int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
  81. PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
  82. writel(flag, entry->mask_base + offset);
  83. break;
  84. }
  85. default:
  86. BUG();
  87. break;
  88. }
  89. }
  90. void read_msi_msg(unsigned int irq, struct msi_msg *msg)
  91. {
  92. struct msi_desc *entry = get_irq_msi(irq);
  93. switch(entry->msi_attrib.type) {
  94. case PCI_CAP_ID_MSI:
  95. {
  96. struct pci_dev *dev = entry->dev;
  97. int pos = entry->msi_attrib.pos;
  98. u16 data;
  99. pci_read_config_dword(dev, msi_lower_address_reg(pos),
  100. &msg->address_lo);
  101. if (entry->msi_attrib.is_64) {
  102. pci_read_config_dword(dev, msi_upper_address_reg(pos),
  103. &msg->address_hi);
  104. pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
  105. } else {
  106. msg->address_hi = 0;
  107. pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
  108. }
  109. msg->data = data;
  110. break;
  111. }
  112. case PCI_CAP_ID_MSIX:
  113. {
  114. void __iomem *base;
  115. base = entry->mask_base +
  116. entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
  117. msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
  118. msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
  119. msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
  120. break;
  121. }
  122. default:
  123. BUG();
  124. }
  125. }
  126. void write_msi_msg(unsigned int irq, struct msi_msg *msg)
  127. {
  128. struct msi_desc *entry = get_irq_msi(irq);
  129. switch (entry->msi_attrib.type) {
  130. case PCI_CAP_ID_MSI:
  131. {
  132. struct pci_dev *dev = entry->dev;
  133. int pos = entry->msi_attrib.pos;
  134. pci_write_config_dword(dev, msi_lower_address_reg(pos),
  135. msg->address_lo);
  136. if (entry->msi_attrib.is_64) {
  137. pci_write_config_dword(dev, msi_upper_address_reg(pos),
  138. msg->address_hi);
  139. pci_write_config_word(dev, msi_data_reg(pos, 1),
  140. msg->data);
  141. } else {
  142. pci_write_config_word(dev, msi_data_reg(pos, 0),
  143. msg->data);
  144. }
  145. break;
  146. }
  147. case PCI_CAP_ID_MSIX:
  148. {
  149. void __iomem *base;
  150. base = entry->mask_base +
  151. entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
  152. writel(msg->address_lo,
  153. base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
  154. writel(msg->address_hi,
  155. base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
  156. writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
  157. break;
  158. }
  159. default:
  160. BUG();
  161. }
  162. }
  163. void mask_msi_irq(unsigned int irq)
  164. {
  165. msi_set_mask_bit(irq, 1);
  166. }
  167. void unmask_msi_irq(unsigned int irq)
  168. {
  169. msi_set_mask_bit(irq, 0);
  170. }
  171. static int msi_free_irq(struct pci_dev* dev, int irq);
  172. static int msi_init(void)
  173. {
  174. static int status = -ENOMEM;
  175. if (!status)
  176. return status;
  177. status = msi_cache_init();
  178. if (status < 0) {
  179. pci_msi_enable = 0;
  180. printk(KERN_WARNING "PCI: MSI cache init failed\n");
  181. return status;
  182. }
  183. return status;
  184. }
  185. static struct msi_desc* alloc_msi_entry(void)
  186. {
  187. struct msi_desc *entry;
  188. entry = kmem_cache_zalloc(msi_cachep, GFP_KERNEL);
  189. if (!entry)
  190. return NULL;
  191. entry->link.tail = entry->link.head = 0; /* single message */
  192. entry->dev = NULL;
  193. return entry;
  194. }
  195. #ifdef CONFIG_PM
  196. static int __pci_save_msi_state(struct pci_dev *dev)
  197. {
  198. int pos, i = 0;
  199. u16 control;
  200. struct pci_cap_saved_state *save_state;
  201. u32 *cap;
  202. if (!dev->msi_enabled)
  203. return 0;
  204. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  205. if (pos <= 0)
  206. return 0;
  207. save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
  208. GFP_KERNEL);
  209. if (!save_state) {
  210. printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
  211. return -ENOMEM;
  212. }
  213. cap = &save_state->data[0];
  214. pci_read_config_dword(dev, pos, &cap[i++]);
  215. control = cap[0] >> 16;
  216. pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
  217. if (control & PCI_MSI_FLAGS_64BIT) {
  218. pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
  219. pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
  220. } else
  221. pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
  222. if (control & PCI_MSI_FLAGS_MASKBIT)
  223. pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
  224. save_state->cap_nr = PCI_CAP_ID_MSI;
  225. pci_add_saved_cap(dev, save_state);
  226. return 0;
  227. }
  228. static void __pci_restore_msi_state(struct pci_dev *dev)
  229. {
  230. int i = 0, pos;
  231. u16 control;
  232. struct pci_cap_saved_state *save_state;
  233. u32 *cap;
  234. if (!dev->msi_enabled)
  235. return;
  236. save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
  237. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  238. if (!save_state || pos <= 0)
  239. return;
  240. cap = &save_state->data[0];
  241. pci_intx(dev, 0); /* disable intx */
  242. control = cap[i++] >> 16;
  243. msi_set_enable(dev, 0);
  244. pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
  245. if (control & PCI_MSI_FLAGS_64BIT) {
  246. pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
  247. pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]);
  248. } else
  249. pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]);
  250. if (control & PCI_MSI_FLAGS_MASKBIT)
  251. pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
  252. pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
  253. pci_remove_saved_cap(save_state);
  254. kfree(save_state);
  255. }
  256. static int __pci_save_msix_state(struct pci_dev *dev)
  257. {
  258. int pos;
  259. int irq, head, tail = 0;
  260. u16 control;
  261. struct pci_cap_saved_state *save_state;
  262. if (!dev->msix_enabled)
  263. return 0;
  264. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  265. if (pos <= 0)
  266. return 0;
  267. /* save the capability */
  268. pci_read_config_word(dev, msi_control_reg(pos), &control);
  269. save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
  270. GFP_KERNEL);
  271. if (!save_state) {
  272. printk(KERN_ERR "Out of memory in pci_save_msix_state\n");
  273. return -ENOMEM;
  274. }
  275. *((u16 *)&save_state->data[0]) = control;
  276. /* save the table */
  277. irq = head = dev->first_msi_irq;
  278. while (head != tail) {
  279. struct msi_desc *entry;
  280. entry = get_irq_msi(irq);
  281. read_msi_msg(irq, &entry->msg_save);
  282. tail = entry->link.tail;
  283. irq = tail;
  284. }
  285. save_state->cap_nr = PCI_CAP_ID_MSIX;
  286. pci_add_saved_cap(dev, save_state);
  287. return 0;
  288. }
  289. int pci_save_msi_state(struct pci_dev *dev)
  290. {
  291. int rc;
  292. rc = __pci_save_msi_state(dev);
  293. if (rc)
  294. return rc;
  295. rc = __pci_save_msix_state(dev);
  296. return rc;
  297. }
  298. static void __pci_restore_msix_state(struct pci_dev *dev)
  299. {
  300. u16 save;
  301. int pos;
  302. int irq, head, tail = 0;
  303. struct msi_desc *entry;
  304. struct pci_cap_saved_state *save_state;
  305. if (!dev->msix_enabled)
  306. return;
  307. save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
  308. if (!save_state)
  309. return;
  310. save = *((u16 *)&save_state->data[0]);
  311. pci_remove_saved_cap(save_state);
  312. kfree(save_state);
  313. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  314. if (pos <= 0)
  315. return;
  316. /* route the table */
  317. pci_intx(dev, 0); /* disable intx */
  318. msix_set_enable(dev, 0);
  319. irq = head = dev->first_msi_irq;
  320. while (head != tail) {
  321. entry = get_irq_msi(irq);
  322. write_msi_msg(irq, &entry->msg_save);
  323. tail = entry->link.tail;
  324. irq = tail;
  325. }
  326. pci_write_config_word(dev, msi_control_reg(pos), save);
  327. }
  328. void pci_restore_msi_state(struct pci_dev *dev)
  329. {
  330. __pci_restore_msi_state(dev);
  331. __pci_restore_msix_state(dev);
  332. }
  333. #endif /* CONFIG_PM */
  334. /**
  335. * msi_capability_init - configure device's MSI capability structure
  336. * @dev: pointer to the pci_dev data structure of MSI device function
  337. *
  338. * Setup the MSI capability structure of device function with a single
  339. * MSI irq, regardless of device function is capable of handling
  340. * multiple messages. A return of zero indicates the successful setup
  341. * of an entry zero with the new MSI irq or non-zero for otherwise.
  342. **/
  343. static int msi_capability_init(struct pci_dev *dev)
  344. {
  345. struct msi_desc *entry;
  346. int pos, irq;
  347. u16 control;
  348. msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
  349. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  350. pci_read_config_word(dev, msi_control_reg(pos), &control);
  351. /* MSI Entry Initialization */
  352. entry = alloc_msi_entry();
  353. if (!entry)
  354. return -ENOMEM;
  355. entry->msi_attrib.type = PCI_CAP_ID_MSI;
  356. entry->msi_attrib.is_64 = is_64bit_address(control);
  357. entry->msi_attrib.entry_nr = 0;
  358. entry->msi_attrib.maskbit = is_mask_bit_support(control);
  359. entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
  360. entry->msi_attrib.pos = pos;
  361. if (is_mask_bit_support(control)) {
  362. entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
  363. is_64bit_address(control));
  364. }
  365. entry->dev = dev;
  366. if (entry->msi_attrib.maskbit) {
  367. unsigned int maskbits, temp;
  368. /* All MSIs are unmasked by default, Mask them all */
  369. pci_read_config_dword(dev,
  370. msi_mask_bits_reg(pos, is_64bit_address(control)),
  371. &maskbits);
  372. temp = (1 << multi_msi_capable(control));
  373. temp = ((temp - 1) & ~temp);
  374. maskbits |= temp;
  375. pci_write_config_dword(dev,
  376. msi_mask_bits_reg(pos, is_64bit_address(control)),
  377. maskbits);
  378. }
  379. /* Configure MSI capability structure */
  380. irq = arch_setup_msi_irq(dev, entry);
  381. if (irq < 0) {
  382. kmem_cache_free(msi_cachep, entry);
  383. return irq;
  384. }
  385. entry->link.head = irq;
  386. entry->link.tail = irq;
  387. dev->first_msi_irq = irq;
  388. set_irq_msi(irq, entry);
  389. /* Set MSI enabled bits */
  390. pci_intx(dev, 0); /* disable intx */
  391. msi_set_enable(dev, 1);
  392. dev->msi_enabled = 1;
  393. dev->irq = irq;
  394. return 0;
  395. }
  396. /**
  397. * msix_capability_init - configure device's MSI-X capability
  398. * @dev: pointer to the pci_dev data structure of MSI-X device function
  399. * @entries: pointer to an array of struct msix_entry entries
  400. * @nvec: number of @entries
  401. *
  402. * Setup the MSI-X capability structure of device function with a
  403. * single MSI-X irq. A return of zero indicates the successful setup of
  404. * requested MSI-X entries with allocated irqs or non-zero for otherwise.
  405. **/
  406. static int msix_capability_init(struct pci_dev *dev,
  407. struct msix_entry *entries, int nvec)
  408. {
  409. struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
  410. int irq, pos, i, j, nr_entries, temp = 0;
  411. unsigned long phys_addr;
  412. u32 table_offset;
  413. u16 control;
  414. u8 bir;
  415. void __iomem *base;
  416. msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
  417. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  418. /* Request & Map MSI-X table region */
  419. pci_read_config_word(dev, msi_control_reg(pos), &control);
  420. nr_entries = multi_msix_capable(control);
  421. pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
  422. bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
  423. table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
  424. phys_addr = pci_resource_start (dev, bir) + table_offset;
  425. base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
  426. if (base == NULL)
  427. return -ENOMEM;
  428. /* MSI-X Table Initialization */
  429. for (i = 0; i < nvec; i++) {
  430. entry = alloc_msi_entry();
  431. if (!entry)
  432. break;
  433. j = entries[i].entry;
  434. entry->msi_attrib.type = PCI_CAP_ID_MSIX;
  435. entry->msi_attrib.is_64 = 1;
  436. entry->msi_attrib.entry_nr = j;
  437. entry->msi_attrib.maskbit = 1;
  438. entry->msi_attrib.default_irq = dev->irq;
  439. entry->msi_attrib.pos = pos;
  440. entry->dev = dev;
  441. entry->mask_base = base;
  442. /* Configure MSI-X capability structure */
  443. irq = arch_setup_msi_irq(dev, entry);
  444. if (irq < 0) {
  445. kmem_cache_free(msi_cachep, entry);
  446. break;
  447. }
  448. entries[i].vector = irq;
  449. if (!head) {
  450. entry->link.head = irq;
  451. entry->link.tail = irq;
  452. head = entry;
  453. } else {
  454. entry->link.head = temp;
  455. entry->link.tail = tail->link.tail;
  456. tail->link.tail = irq;
  457. head->link.head = irq;
  458. }
  459. temp = irq;
  460. tail = entry;
  461. set_irq_msi(irq, entry);
  462. }
  463. if (i != nvec) {
  464. int avail = i - 1;
  465. i--;
  466. for (; i >= 0; i--) {
  467. irq = (entries + i)->vector;
  468. msi_free_irq(dev, irq);
  469. (entries + i)->vector = 0;
  470. }
  471. /* If we had some success report the number of irqs
  472. * we succeeded in setting up.
  473. */
  474. if (avail <= 0)
  475. avail = -EBUSY;
  476. return avail;
  477. }
  478. dev->first_msi_irq = entries[0].vector;
  479. /* Set MSI-X enabled bits */
  480. pci_intx(dev, 0); /* disable intx */
  481. msix_set_enable(dev, 1);
  482. dev->msix_enabled = 1;
  483. return 0;
  484. }
  485. /**
  486. * pci_msi_supported - check whether MSI may be enabled on device
  487. * @dev: pointer to the pci_dev data structure of MSI device function
  488. *
  489. * Look at global flags, the device itself, and its parent busses
  490. * to return 0 if MSI are supported for the device.
  491. **/
  492. static
  493. int pci_msi_supported(struct pci_dev * dev)
  494. {
  495. struct pci_bus *bus;
  496. /* MSI must be globally enabled and supported by the device */
  497. if (!pci_msi_enable || !dev || dev->no_msi)
  498. return -EINVAL;
  499. /* Any bridge which does NOT route MSI transactions from it's
  500. * secondary bus to it's primary bus must set NO_MSI flag on
  501. * the secondary pci_bus.
  502. * We expect only arch-specific PCI host bus controller driver
  503. * or quirks for specific PCI bridges to be setting NO_MSI.
  504. */
  505. for (bus = dev->bus; bus; bus = bus->parent)
  506. if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
  507. return -EINVAL;
  508. return 0;
  509. }
  510. /**
  511. * pci_enable_msi - configure device's MSI capability structure
  512. * @dev: pointer to the pci_dev data structure of MSI device function
  513. *
  514. * Setup the MSI capability structure of device function with
  515. * a single MSI irq upon its software driver call to request for
  516. * MSI mode enabled on its hardware device function. A return of zero
  517. * indicates the successful setup of an entry zero with the new MSI
  518. * irq or non-zero for otherwise.
  519. **/
  520. int pci_enable_msi(struct pci_dev* dev)
  521. {
  522. int pos, status;
  523. if (pci_msi_supported(dev) < 0)
  524. return -EINVAL;
  525. status = msi_init();
  526. if (status < 0)
  527. return status;
  528. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  529. if (!pos)
  530. return -EINVAL;
  531. WARN_ON(!!dev->msi_enabled);
  532. /* Check whether driver already requested for MSI-X irqs */
  533. if (dev->msix_enabled) {
  534. printk(KERN_INFO "PCI: %s: Can't enable MSI. "
  535. "Device already has MSI-X enabled\n",
  536. pci_name(dev));
  537. return -EINVAL;
  538. }
  539. status = msi_capability_init(dev);
  540. return status;
  541. }
  542. void pci_disable_msi(struct pci_dev* dev)
  543. {
  544. struct msi_desc *entry;
  545. int default_irq;
  546. if (!pci_msi_enable)
  547. return;
  548. if (!dev)
  549. return;
  550. if (!dev->msi_enabled)
  551. return;
  552. msi_set_enable(dev, 0);
  553. pci_intx(dev, 1); /* enable intx */
  554. dev->msi_enabled = 0;
  555. entry = get_irq_msi(dev->first_msi_irq);
  556. if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
  557. return;
  558. }
  559. if (irq_has_action(dev->first_msi_irq)) {
  560. printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without "
  561. "free_irq() on MSI irq %d\n",
  562. pci_name(dev), dev->first_msi_irq);
  563. BUG_ON(irq_has_action(dev->first_msi_irq));
  564. } else {
  565. default_irq = entry->msi_attrib.default_irq;
  566. msi_free_irq(dev, dev->first_msi_irq);
  567. /* Restore dev->irq to its default pin-assertion irq */
  568. dev->irq = default_irq;
  569. }
  570. dev->first_msi_irq = 0;
  571. }
  572. static int msi_free_irq(struct pci_dev* dev, int irq)
  573. {
  574. struct msi_desc *entry;
  575. int head, entry_nr, type;
  576. void __iomem *base;
  577. entry = get_irq_msi(irq);
  578. if (!entry || entry->dev != dev) {
  579. return -EINVAL;
  580. }
  581. type = entry->msi_attrib.type;
  582. entry_nr = entry->msi_attrib.entry_nr;
  583. head = entry->link.head;
  584. base = entry->mask_base;
  585. get_irq_msi(entry->link.head)->link.tail = entry->link.tail;
  586. get_irq_msi(entry->link.tail)->link.head = entry->link.head;
  587. arch_teardown_msi_irq(irq);
  588. kmem_cache_free(msi_cachep, entry);
  589. if (type == PCI_CAP_ID_MSIX) {
  590. writel(1, base + entry_nr * PCI_MSIX_ENTRY_SIZE +
  591. PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
  592. if (head == irq)
  593. iounmap(base);
  594. }
  595. return 0;
  596. }
  597. /**
  598. * pci_enable_msix - configure device's MSI-X capability structure
  599. * @dev: pointer to the pci_dev data structure of MSI-X device function
  600. * @entries: pointer to an array of MSI-X entries
  601. * @nvec: number of MSI-X irqs requested for allocation by device driver
  602. *
  603. * Setup the MSI-X capability structure of device function with the number
  604. * of requested irqs upon its software driver call to request for
  605. * MSI-X mode enabled on its hardware device function. A return of zero
  606. * indicates the successful configuration of MSI-X capability structure
  607. * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
  608. * Or a return of > 0 indicates that driver request is exceeding the number
  609. * of irqs available. Driver should use the returned value to re-send
  610. * its request.
  611. **/
  612. int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
  613. {
  614. int status, pos, nr_entries;
  615. int i, j;
  616. u16 control;
  617. if (!entries || pci_msi_supported(dev) < 0)
  618. return -EINVAL;
  619. status = msi_init();
  620. if (status < 0)
  621. return status;
  622. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  623. if (!pos)
  624. return -EINVAL;
  625. pci_read_config_word(dev, msi_control_reg(pos), &control);
  626. nr_entries = multi_msix_capable(control);
  627. if (nvec > nr_entries)
  628. return -EINVAL;
  629. /* Check for any invalid entries */
  630. for (i = 0; i < nvec; i++) {
  631. if (entries[i].entry >= nr_entries)
  632. return -EINVAL; /* invalid entry */
  633. for (j = i + 1; j < nvec; j++) {
  634. if (entries[i].entry == entries[j].entry)
  635. return -EINVAL; /* duplicate entry */
  636. }
  637. }
  638. WARN_ON(!!dev->msix_enabled);
  639. /* Check whether driver already requested for MSI irq */
  640. if (dev->msi_enabled) {
  641. printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
  642. "Device already has an MSI irq assigned\n",
  643. pci_name(dev));
  644. return -EINVAL;
  645. }
  646. status = msix_capability_init(dev, entries, nvec);
  647. return status;
  648. }
  649. void pci_disable_msix(struct pci_dev* dev)
  650. {
  651. int irq, head, tail = 0, warning = 0;
  652. if (!pci_msi_enable)
  653. return;
  654. if (!dev)
  655. return;
  656. if (!dev->msix_enabled)
  657. return;
  658. msix_set_enable(dev, 0);
  659. pci_intx(dev, 1); /* enable intx */
  660. dev->msix_enabled = 0;
  661. irq = head = dev->first_msi_irq;
  662. while (head != tail) {
  663. tail = get_irq_msi(irq)->link.tail;
  664. if (irq_has_action(irq))
  665. warning = 1;
  666. else if (irq != head) /* Release MSI-X irq */
  667. msi_free_irq(dev, irq);
  668. irq = tail;
  669. }
  670. msi_free_irq(dev, irq);
  671. if (warning) {
  672. printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
  673. "free_irq() on all MSI-X irqs\n",
  674. pci_name(dev));
  675. BUG_ON(warning > 0);
  676. }
  677. dev->first_msi_irq = 0;
  678. }
  679. /**
  680. * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
  681. * @dev: pointer to the pci_dev data structure of MSI(X) device function
  682. *
  683. * Being called during hotplug remove, from which the device function
  684. * is hot-removed. All previous assigned MSI/MSI-X irqs, if
  685. * allocated for this device function, are reclaimed to unused state,
  686. * which may be used later on.
  687. **/
  688. void msi_remove_pci_irq_vectors(struct pci_dev* dev)
  689. {
  690. if (!pci_msi_enable || !dev)
  691. return;
  692. if (dev->msi_enabled) {
  693. if (irq_has_action(dev->first_msi_irq)) {
  694. printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
  695. "called without free_irq() on MSI irq %d\n",
  696. pci_name(dev), dev->first_msi_irq);
  697. BUG_ON(irq_has_action(dev->first_msi_irq));
  698. } else /* Release MSI irq assigned to this device */
  699. msi_free_irq(dev, dev->first_msi_irq);
  700. }
  701. if (dev->msix_enabled) {
  702. int irq, head, tail = 0, warning = 0;
  703. void __iomem *base = NULL;
  704. irq = head = dev->first_msi_irq;
  705. while (head != tail) {
  706. tail = get_irq_msi(irq)->link.tail;
  707. base = get_irq_msi(irq)->mask_base;
  708. if (irq_has_action(irq))
  709. warning = 1;
  710. else if (irq != head) /* Release MSI-X irq */
  711. msi_free_irq(dev, irq);
  712. irq = tail;
  713. }
  714. msi_free_irq(dev, irq);
  715. if (warning) {
  716. iounmap(base);
  717. printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
  718. "called without free_irq() on all MSI-X irqs\n",
  719. pci_name(dev));
  720. BUG_ON(warning > 0);
  721. }
  722. }
  723. }
  724. void pci_no_msi(void)
  725. {
  726. pci_msi_enable = 0;
  727. }
  728. EXPORT_SYMBOL(pci_enable_msi);
  729. EXPORT_SYMBOL(pci_disable_msi);
  730. EXPORT_SYMBOL(pci_enable_msix);
  731. EXPORT_SYMBOL(pci_disable_msix);