msi.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843
  1. /*
  2. * File: msi.c
  3. * Purpose: PCI Message Signaled Interrupt (MSI)
  4. *
  5. * Copyright (C) 2003-2004 Intel
  6. * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
  7. */
  8. #include <linux/err.h>
  9. #include <linux/mm.h>
  10. #include <linux/irq.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/init.h>
  13. #include <linux/ioport.h>
  14. #include <linux/smp_lock.h>
  15. #include <linux/pci.h>
  16. #include <linux/proc_fs.h>
  17. #include <linux/msi.h>
  18. #include <asm/errno.h>
  19. #include <asm/io.h>
  20. #include <asm/smp.h>
  21. #include "pci.h"
  22. #include "msi.h"
  23. static struct kmem_cache* msi_cachep;
  24. static int pci_msi_enable = 1;
  25. static int msi_cache_init(void)
  26. {
  27. msi_cachep = kmem_cache_create("msi_cache", sizeof(struct msi_desc),
  28. 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
  29. if (!msi_cachep)
  30. return -ENOMEM;
  31. return 0;
  32. }
  33. static void msi_set_enable(struct pci_dev *dev, int enable)
  34. {
  35. int pos;
  36. u16 control;
  37. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  38. if (pos) {
  39. pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
  40. control &= ~PCI_MSI_FLAGS_ENABLE;
  41. if (enable)
  42. control |= PCI_MSI_FLAGS_ENABLE;
  43. pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
  44. }
  45. }
  46. static void msix_set_enable(struct pci_dev *dev, int enable)
  47. {
  48. int pos;
  49. u16 control;
  50. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  51. if (pos) {
  52. pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
  53. control &= ~PCI_MSIX_FLAGS_ENABLE;
  54. if (enable)
  55. control |= PCI_MSIX_FLAGS_ENABLE;
  56. pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
  57. }
  58. }
  59. static void msi_set_mask_bit(unsigned int irq, int flag)
  60. {
  61. struct msi_desc *entry;
  62. entry = get_irq_msi(irq);
  63. BUG_ON(!entry || !entry->dev);
  64. switch (entry->msi_attrib.type) {
  65. case PCI_CAP_ID_MSI:
  66. if (entry->msi_attrib.maskbit) {
  67. int pos;
  68. u32 mask_bits;
  69. pos = (long)entry->mask_base;
  70. pci_read_config_dword(entry->dev, pos, &mask_bits);
  71. mask_bits &= ~(1);
  72. mask_bits |= flag;
  73. pci_write_config_dword(entry->dev, pos, mask_bits);
  74. }
  75. break;
  76. case PCI_CAP_ID_MSIX:
  77. {
  78. int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
  79. PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
  80. writel(flag, entry->mask_base + offset);
  81. break;
  82. }
  83. default:
  84. BUG();
  85. break;
  86. }
  87. }
  88. void read_msi_msg(unsigned int irq, struct msi_msg *msg)
  89. {
  90. struct msi_desc *entry = get_irq_msi(irq);
  91. switch(entry->msi_attrib.type) {
  92. case PCI_CAP_ID_MSI:
  93. {
  94. struct pci_dev *dev = entry->dev;
  95. int pos = entry->msi_attrib.pos;
  96. u16 data;
  97. pci_read_config_dword(dev, msi_lower_address_reg(pos),
  98. &msg->address_lo);
  99. if (entry->msi_attrib.is_64) {
  100. pci_read_config_dword(dev, msi_upper_address_reg(pos),
  101. &msg->address_hi);
  102. pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
  103. } else {
  104. msg->address_hi = 0;
  105. pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
  106. }
  107. msg->data = data;
  108. break;
  109. }
  110. case PCI_CAP_ID_MSIX:
  111. {
  112. void __iomem *base;
  113. base = entry->mask_base +
  114. entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
  115. msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
  116. msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
  117. msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
  118. break;
  119. }
  120. default:
  121. BUG();
  122. }
  123. }
  124. void write_msi_msg(unsigned int irq, struct msi_msg *msg)
  125. {
  126. struct msi_desc *entry = get_irq_msi(irq);
  127. switch (entry->msi_attrib.type) {
  128. case PCI_CAP_ID_MSI:
  129. {
  130. struct pci_dev *dev = entry->dev;
  131. int pos = entry->msi_attrib.pos;
  132. pci_write_config_dword(dev, msi_lower_address_reg(pos),
  133. msg->address_lo);
  134. if (entry->msi_attrib.is_64) {
  135. pci_write_config_dword(dev, msi_upper_address_reg(pos),
  136. msg->address_hi);
  137. pci_write_config_word(dev, msi_data_reg(pos, 1),
  138. msg->data);
  139. } else {
  140. pci_write_config_word(dev, msi_data_reg(pos, 0),
  141. msg->data);
  142. }
  143. break;
  144. }
  145. case PCI_CAP_ID_MSIX:
  146. {
  147. void __iomem *base;
  148. base = entry->mask_base +
  149. entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
  150. writel(msg->address_lo,
  151. base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
  152. writel(msg->address_hi,
  153. base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
  154. writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
  155. break;
  156. }
  157. default:
  158. BUG();
  159. }
  160. }
  161. void mask_msi_irq(unsigned int irq)
  162. {
  163. msi_set_mask_bit(irq, 1);
  164. }
  165. void unmask_msi_irq(unsigned int irq)
  166. {
  167. msi_set_mask_bit(irq, 0);
  168. }
  169. static int msi_free_irq(struct pci_dev* dev, int irq);
  170. static int msi_init(void)
  171. {
  172. static int status = -ENOMEM;
  173. if (!status)
  174. return status;
  175. status = msi_cache_init();
  176. if (status < 0) {
  177. pci_msi_enable = 0;
  178. printk(KERN_WARNING "PCI: MSI cache init failed\n");
  179. return status;
  180. }
  181. return status;
  182. }
  183. static struct msi_desc* alloc_msi_entry(void)
  184. {
  185. struct msi_desc *entry;
  186. entry = kmem_cache_zalloc(msi_cachep, GFP_KERNEL);
  187. if (!entry)
  188. return NULL;
  189. entry->link.tail = entry->link.head = 0; /* single message */
  190. entry->dev = NULL;
  191. return entry;
  192. }
  193. #ifdef CONFIG_PM
  194. static int __pci_save_msi_state(struct pci_dev *dev)
  195. {
  196. int pos, i = 0;
  197. u16 control;
  198. struct pci_cap_saved_state *save_state;
  199. u32 *cap;
  200. if (!dev->msi_enabled)
  201. return 0;
  202. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  203. if (pos <= 0)
  204. return 0;
  205. save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
  206. GFP_KERNEL);
  207. if (!save_state) {
  208. printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
  209. return -ENOMEM;
  210. }
  211. cap = &save_state->data[0];
  212. pci_read_config_dword(dev, pos, &cap[i++]);
  213. control = cap[0] >> 16;
  214. pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
  215. if (control & PCI_MSI_FLAGS_64BIT) {
  216. pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
  217. pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
  218. } else
  219. pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
  220. if (control & PCI_MSI_FLAGS_MASKBIT)
  221. pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
  222. save_state->cap_nr = PCI_CAP_ID_MSI;
  223. pci_add_saved_cap(dev, save_state);
  224. return 0;
  225. }
  226. static void __pci_restore_msi_state(struct pci_dev *dev)
  227. {
  228. int i = 0, pos;
  229. u16 control;
  230. struct pci_cap_saved_state *save_state;
  231. u32 *cap;
  232. if (!dev->msi_enabled)
  233. return;
  234. save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
  235. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  236. if (!save_state || pos <= 0)
  237. return;
  238. cap = &save_state->data[0];
  239. pci_intx(dev, 0); /* disable intx */
  240. control = cap[i++] >> 16;
  241. msi_set_enable(dev, 0);
  242. pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
  243. if (control & PCI_MSI_FLAGS_64BIT) {
  244. pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
  245. pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]);
  246. } else
  247. pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]);
  248. if (control & PCI_MSI_FLAGS_MASKBIT)
  249. pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
  250. pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
  251. pci_remove_saved_cap(save_state);
  252. kfree(save_state);
  253. }
  254. static int __pci_save_msix_state(struct pci_dev *dev)
  255. {
  256. int pos;
  257. int irq, head, tail = 0;
  258. u16 control;
  259. struct pci_cap_saved_state *save_state;
  260. if (!dev->msix_enabled)
  261. return 0;
  262. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  263. if (pos <= 0)
  264. return 0;
  265. /* save the capability */
  266. pci_read_config_word(dev, msi_control_reg(pos), &control);
  267. save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
  268. GFP_KERNEL);
  269. if (!save_state) {
  270. printk(KERN_ERR "Out of memory in pci_save_msix_state\n");
  271. return -ENOMEM;
  272. }
  273. *((u16 *)&save_state->data[0]) = control;
  274. /* save the table */
  275. irq = head = dev->first_msi_irq;
  276. while (head != tail) {
  277. struct msi_desc *entry;
  278. entry = get_irq_msi(irq);
  279. read_msi_msg(irq, &entry->msg_save);
  280. tail = entry->link.tail;
  281. irq = tail;
  282. }
  283. save_state->cap_nr = PCI_CAP_ID_MSIX;
  284. pci_add_saved_cap(dev, save_state);
  285. return 0;
  286. }
  287. int pci_save_msi_state(struct pci_dev *dev)
  288. {
  289. int rc;
  290. rc = __pci_save_msi_state(dev);
  291. if (rc)
  292. return rc;
  293. rc = __pci_save_msix_state(dev);
  294. return rc;
  295. }
  296. static void __pci_restore_msix_state(struct pci_dev *dev)
  297. {
  298. u16 save;
  299. int pos;
  300. int irq, head, tail = 0;
  301. struct msi_desc *entry;
  302. struct pci_cap_saved_state *save_state;
  303. if (!dev->msix_enabled)
  304. return;
  305. save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
  306. if (!save_state)
  307. return;
  308. save = *((u16 *)&save_state->data[0]);
  309. pci_remove_saved_cap(save_state);
  310. kfree(save_state);
  311. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  312. if (pos <= 0)
  313. return;
  314. /* route the table */
  315. pci_intx(dev, 0); /* disable intx */
  316. msix_set_enable(dev, 0);
  317. irq = head = dev->first_msi_irq;
  318. while (head != tail) {
  319. entry = get_irq_msi(irq);
  320. write_msi_msg(irq, &entry->msg_save);
  321. tail = entry->link.tail;
  322. irq = tail;
  323. }
  324. pci_write_config_word(dev, msi_control_reg(pos), save);
  325. }
  326. void pci_restore_msi_state(struct pci_dev *dev)
  327. {
  328. __pci_restore_msi_state(dev);
  329. __pci_restore_msix_state(dev);
  330. }
  331. #endif /* CONFIG_PM */
  332. /**
  333. * msi_capability_init - configure device's MSI capability structure
  334. * @dev: pointer to the pci_dev data structure of MSI device function
  335. *
  336. * Setup the MSI capability structure of device function with a single
  337. * MSI irq, regardless of device function is capable of handling
  338. * multiple messages. A return of zero indicates the successful setup
  339. * of an entry zero with the new MSI irq or non-zero for otherwise.
  340. **/
  341. static int msi_capability_init(struct pci_dev *dev)
  342. {
  343. struct msi_desc *entry;
  344. int pos, irq;
  345. u16 control;
  346. msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
  347. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  348. pci_read_config_word(dev, msi_control_reg(pos), &control);
  349. /* MSI Entry Initialization */
  350. entry = alloc_msi_entry();
  351. if (!entry)
  352. return -ENOMEM;
  353. entry->msi_attrib.type = PCI_CAP_ID_MSI;
  354. entry->msi_attrib.is_64 = is_64bit_address(control);
  355. entry->msi_attrib.entry_nr = 0;
  356. entry->msi_attrib.maskbit = is_mask_bit_support(control);
  357. entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
  358. entry->msi_attrib.pos = pos;
  359. if (is_mask_bit_support(control)) {
  360. entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
  361. is_64bit_address(control));
  362. }
  363. entry->dev = dev;
  364. if (entry->msi_attrib.maskbit) {
  365. unsigned int maskbits, temp;
  366. /* All MSIs are unmasked by default, Mask them all */
  367. pci_read_config_dword(dev,
  368. msi_mask_bits_reg(pos, is_64bit_address(control)),
  369. &maskbits);
  370. temp = (1 << multi_msi_capable(control));
  371. temp = ((temp - 1) & ~temp);
  372. maskbits |= temp;
  373. pci_write_config_dword(dev,
  374. msi_mask_bits_reg(pos, is_64bit_address(control)),
  375. maskbits);
  376. }
  377. /* Configure MSI capability structure */
  378. irq = arch_setup_msi_irq(dev, entry);
  379. if (irq < 0) {
  380. kmem_cache_free(msi_cachep, entry);
  381. return irq;
  382. }
  383. entry->link.head = irq;
  384. entry->link.tail = irq;
  385. dev->first_msi_irq = irq;
  386. set_irq_msi(irq, entry);
  387. /* Set MSI enabled bits */
  388. pci_intx(dev, 0); /* disable intx */
  389. msi_set_enable(dev, 1);
  390. dev->msi_enabled = 1;
  391. dev->irq = irq;
  392. return 0;
  393. }
  394. /**
  395. * msix_capability_init - configure device's MSI-X capability
  396. * @dev: pointer to the pci_dev data structure of MSI-X device function
  397. * @entries: pointer to an array of struct msix_entry entries
  398. * @nvec: number of @entries
  399. *
  400. * Setup the MSI-X capability structure of device function with a
  401. * single MSI-X irq. A return of zero indicates the successful setup of
  402. * requested MSI-X entries with allocated irqs or non-zero for otherwise.
  403. **/
  404. static int msix_capability_init(struct pci_dev *dev,
  405. struct msix_entry *entries, int nvec)
  406. {
  407. struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
  408. int irq, pos, i, j, nr_entries, temp = 0;
  409. unsigned long phys_addr;
  410. u32 table_offset;
  411. u16 control;
  412. u8 bir;
  413. void __iomem *base;
  414. msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
  415. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  416. /* Request & Map MSI-X table region */
  417. pci_read_config_word(dev, msi_control_reg(pos), &control);
  418. nr_entries = multi_msix_capable(control);
  419. pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
  420. bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
  421. table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
  422. phys_addr = pci_resource_start (dev, bir) + table_offset;
  423. base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
  424. if (base == NULL)
  425. return -ENOMEM;
  426. /* MSI-X Table Initialization */
  427. for (i = 0; i < nvec; i++) {
  428. entry = alloc_msi_entry();
  429. if (!entry)
  430. break;
  431. j = entries[i].entry;
  432. entry->msi_attrib.type = PCI_CAP_ID_MSIX;
  433. entry->msi_attrib.is_64 = 1;
  434. entry->msi_attrib.entry_nr = j;
  435. entry->msi_attrib.maskbit = 1;
  436. entry->msi_attrib.default_irq = dev->irq;
  437. entry->msi_attrib.pos = pos;
  438. entry->dev = dev;
  439. entry->mask_base = base;
  440. /* Configure MSI-X capability structure */
  441. irq = arch_setup_msi_irq(dev, entry);
  442. if (irq < 0) {
  443. kmem_cache_free(msi_cachep, entry);
  444. break;
  445. }
  446. entries[i].vector = irq;
  447. if (!head) {
  448. entry->link.head = irq;
  449. entry->link.tail = irq;
  450. head = entry;
  451. } else {
  452. entry->link.head = temp;
  453. entry->link.tail = tail->link.tail;
  454. tail->link.tail = irq;
  455. head->link.head = irq;
  456. }
  457. temp = irq;
  458. tail = entry;
  459. set_irq_msi(irq, entry);
  460. }
  461. if (i != nvec) {
  462. int avail = i - 1;
  463. i--;
  464. for (; i >= 0; i--) {
  465. irq = (entries + i)->vector;
  466. msi_free_irq(dev, irq);
  467. (entries + i)->vector = 0;
  468. }
  469. /* If we had some success report the number of irqs
  470. * we succeeded in setting up.
  471. */
  472. if (avail <= 0)
  473. avail = -EBUSY;
  474. return avail;
  475. }
  476. dev->first_msi_irq = entries[0].vector;
  477. /* Set MSI-X enabled bits */
  478. pci_intx(dev, 0); /* disable intx */
  479. msix_set_enable(dev, 1);
  480. dev->msix_enabled = 1;
  481. return 0;
  482. }
  483. /**
  484. * pci_msi_supported - check whether MSI may be enabled on device
  485. * @dev: pointer to the pci_dev data structure of MSI device function
  486. *
  487. * Look at global flags, the device itself, and its parent busses
  488. * to return 0 if MSI are supported for the device.
  489. **/
  490. static
  491. int pci_msi_supported(struct pci_dev * dev)
  492. {
  493. struct pci_bus *bus;
  494. /* MSI must be globally enabled and supported by the device */
  495. if (!pci_msi_enable || !dev || dev->no_msi)
  496. return -EINVAL;
  497. /* Any bridge which does NOT route MSI transactions from it's
  498. * secondary bus to it's primary bus must set NO_MSI flag on
  499. * the secondary pci_bus.
  500. * We expect only arch-specific PCI host bus controller driver
  501. * or quirks for specific PCI bridges to be setting NO_MSI.
  502. */
  503. for (bus = dev->bus; bus; bus = bus->parent)
  504. if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
  505. return -EINVAL;
  506. return 0;
  507. }
  508. /**
  509. * pci_enable_msi - configure device's MSI capability structure
  510. * @dev: pointer to the pci_dev data structure of MSI device function
  511. *
  512. * Setup the MSI capability structure of device function with
  513. * a single MSI irq upon its software driver call to request for
  514. * MSI mode enabled on its hardware device function. A return of zero
  515. * indicates the successful setup of an entry zero with the new MSI
  516. * irq or non-zero for otherwise.
  517. **/
  518. int pci_enable_msi(struct pci_dev* dev)
  519. {
  520. int pos, status;
  521. if (pci_msi_supported(dev) < 0)
  522. return -EINVAL;
  523. status = msi_init();
  524. if (status < 0)
  525. return status;
  526. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  527. if (!pos)
  528. return -EINVAL;
  529. WARN_ON(!!dev->msi_enabled);
  530. /* Check whether driver already requested for MSI-X irqs */
  531. if (dev->msix_enabled) {
  532. printk(KERN_INFO "PCI: %s: Can't enable MSI. "
  533. "Device already has MSI-X enabled\n",
  534. pci_name(dev));
  535. return -EINVAL;
  536. }
  537. status = msi_capability_init(dev);
  538. return status;
  539. }
  540. void pci_disable_msi(struct pci_dev* dev)
  541. {
  542. struct msi_desc *entry;
  543. int default_irq;
  544. if (!pci_msi_enable)
  545. return;
  546. if (!dev)
  547. return;
  548. if (!dev->msi_enabled)
  549. return;
  550. msi_set_enable(dev, 0);
  551. pci_intx(dev, 1); /* enable intx */
  552. dev->msi_enabled = 0;
  553. entry = get_irq_msi(dev->first_msi_irq);
  554. if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
  555. return;
  556. }
  557. if (irq_has_action(dev->first_msi_irq)) {
  558. printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without "
  559. "free_irq() on MSI irq %d\n",
  560. pci_name(dev), dev->first_msi_irq);
  561. BUG_ON(irq_has_action(dev->first_msi_irq));
  562. } else {
  563. default_irq = entry->msi_attrib.default_irq;
  564. msi_free_irq(dev, dev->first_msi_irq);
  565. /* Restore dev->irq to its default pin-assertion irq */
  566. dev->irq = default_irq;
  567. }
  568. dev->first_msi_irq = 0;
  569. }
  570. static int msi_free_irq(struct pci_dev* dev, int irq)
  571. {
  572. struct msi_desc *entry;
  573. int head, entry_nr, type;
  574. void __iomem *base;
  575. entry = get_irq_msi(irq);
  576. if (!entry || entry->dev != dev) {
  577. return -EINVAL;
  578. }
  579. type = entry->msi_attrib.type;
  580. entry_nr = entry->msi_attrib.entry_nr;
  581. head = entry->link.head;
  582. base = entry->mask_base;
  583. get_irq_msi(entry->link.head)->link.tail = entry->link.tail;
  584. get_irq_msi(entry->link.tail)->link.head = entry->link.head;
  585. arch_teardown_msi_irq(irq);
  586. kmem_cache_free(msi_cachep, entry);
  587. if (type == PCI_CAP_ID_MSIX) {
  588. writel(1, base + entry_nr * PCI_MSIX_ENTRY_SIZE +
  589. PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
  590. if (head == irq)
  591. iounmap(base);
  592. }
  593. return 0;
  594. }
  595. /**
  596. * pci_enable_msix - configure device's MSI-X capability structure
  597. * @dev: pointer to the pci_dev data structure of MSI-X device function
  598. * @entries: pointer to an array of MSI-X entries
  599. * @nvec: number of MSI-X irqs requested for allocation by device driver
  600. *
  601. * Setup the MSI-X capability structure of device function with the number
  602. * of requested irqs upon its software driver call to request for
  603. * MSI-X mode enabled on its hardware device function. A return of zero
  604. * indicates the successful configuration of MSI-X capability structure
  605. * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
  606. * Or a return of > 0 indicates that driver request is exceeding the number
  607. * of irqs available. Driver should use the returned value to re-send
  608. * its request.
  609. **/
  610. int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
  611. {
  612. int status, pos, nr_entries;
  613. int i, j;
  614. u16 control;
  615. if (!entries || pci_msi_supported(dev) < 0)
  616. return -EINVAL;
  617. status = msi_init();
  618. if (status < 0)
  619. return status;
  620. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  621. if (!pos)
  622. return -EINVAL;
  623. pci_read_config_word(dev, msi_control_reg(pos), &control);
  624. nr_entries = multi_msix_capable(control);
  625. if (nvec > nr_entries)
  626. return -EINVAL;
  627. /* Check for any invalid entries */
  628. for (i = 0; i < nvec; i++) {
  629. if (entries[i].entry >= nr_entries)
  630. return -EINVAL; /* invalid entry */
  631. for (j = i + 1; j < nvec; j++) {
  632. if (entries[i].entry == entries[j].entry)
  633. return -EINVAL; /* duplicate entry */
  634. }
  635. }
  636. WARN_ON(!!dev->msix_enabled);
  637. /* Check whether driver already requested for MSI irq */
  638. if (dev->msi_enabled) {
  639. printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
  640. "Device already has an MSI irq assigned\n",
  641. pci_name(dev));
  642. return -EINVAL;
  643. }
  644. status = msix_capability_init(dev, entries, nvec);
  645. return status;
  646. }
  647. void pci_disable_msix(struct pci_dev* dev)
  648. {
  649. int irq, head, tail = 0, warning = 0;
  650. if (!pci_msi_enable)
  651. return;
  652. if (!dev)
  653. return;
  654. if (!dev->msix_enabled)
  655. return;
  656. msix_set_enable(dev, 0);
  657. pci_intx(dev, 1); /* enable intx */
  658. dev->msix_enabled = 0;
  659. irq = head = dev->first_msi_irq;
  660. while (head != tail) {
  661. tail = get_irq_msi(irq)->link.tail;
  662. if (irq_has_action(irq))
  663. warning = 1;
  664. else if (irq != head) /* Release MSI-X irq */
  665. msi_free_irq(dev, irq);
  666. irq = tail;
  667. }
  668. msi_free_irq(dev, irq);
  669. if (warning) {
  670. printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
  671. "free_irq() on all MSI-X irqs\n",
  672. pci_name(dev));
  673. BUG_ON(warning > 0);
  674. }
  675. dev->first_msi_irq = 0;
  676. }
  677. /**
  678. * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
  679. * @dev: pointer to the pci_dev data structure of MSI(X) device function
  680. *
  681. * Being called during hotplug remove, from which the device function
  682. * is hot-removed. All previous assigned MSI/MSI-X irqs, if
  683. * allocated for this device function, are reclaimed to unused state,
  684. * which may be used later on.
  685. **/
  686. void msi_remove_pci_irq_vectors(struct pci_dev* dev)
  687. {
  688. if (!pci_msi_enable || !dev)
  689. return;
  690. if (dev->msi_enabled) {
  691. if (irq_has_action(dev->first_msi_irq)) {
  692. printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
  693. "called without free_irq() on MSI irq %d\n",
  694. pci_name(dev), dev->first_msi_irq);
  695. BUG_ON(irq_has_action(dev->first_msi_irq));
  696. } else /* Release MSI irq assigned to this device */
  697. msi_free_irq(dev, dev->first_msi_irq);
  698. }
  699. if (dev->msix_enabled) {
  700. int irq, head, tail = 0, warning = 0;
  701. void __iomem *base = NULL;
  702. irq = head = dev->first_msi_irq;
  703. while (head != tail) {
  704. tail = get_irq_msi(irq)->link.tail;
  705. base = get_irq_msi(irq)->mask_base;
  706. if (irq_has_action(irq))
  707. warning = 1;
  708. else if (irq != head) /* Release MSI-X irq */
  709. msi_free_irq(dev, irq);
  710. irq = tail;
  711. }
  712. msi_free_irq(dev, irq);
  713. if (warning) {
  714. iounmap(base);
  715. printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
  716. "called without free_irq() on all MSI-X irqs\n",
  717. pci_name(dev));
  718. BUG_ON(warning > 0);
  719. }
  720. }
  721. }
  722. void pci_no_msi(void)
  723. {
  724. pci_msi_enable = 0;
  725. }
  726. EXPORT_SYMBOL(pci_enable_msi);
  727. EXPORT_SYMBOL(pci_disable_msi);
  728. EXPORT_SYMBOL(pci_enable_msix);
  729. EXPORT_SYMBOL(pci_disable_msix);