msi.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957
  1. /*
  2. * File: msi.c
  3. * Purpose: PCI Message Signaled Interrupt (MSI)
  4. *
  5. * Copyright (C) 2003-2004 Intel
  6. * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
  7. */
  8. #include <linux/err.h>
  9. #include <linux/mm.h>
  10. #include <linux/irq.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/init.h>
  13. #include <linux/ioport.h>
  14. #include <linux/smp_lock.h>
  15. #include <linux/pci.h>
  16. #include <linux/proc_fs.h>
  17. #include <linux/msi.h>
  18. #include <asm/errno.h>
  19. #include <asm/io.h>
  20. #include <asm/smp.h>
  21. #include "pci.h"
  22. #include "msi.h"
  23. static DEFINE_SPINLOCK(msi_lock);
  24. static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
  25. static struct kmem_cache* msi_cachep;
  26. static int pci_msi_enable = 1;
  27. static int msi_cache_init(void)
  28. {
  29. msi_cachep = kmem_cache_create("msi_cache", sizeof(struct msi_desc),
  30. 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
  31. if (!msi_cachep)
  32. return -ENOMEM;
  33. return 0;
  34. }
  35. static void msi_set_mask_bit(unsigned int irq, int flag)
  36. {
  37. struct msi_desc *entry;
  38. entry = msi_desc[irq];
  39. BUG_ON(!entry || !entry->dev);
  40. switch (entry->msi_attrib.type) {
  41. case PCI_CAP_ID_MSI:
  42. if (entry->msi_attrib.maskbit) {
  43. int pos;
  44. u32 mask_bits;
  45. pos = (long)entry->mask_base;
  46. pci_read_config_dword(entry->dev, pos, &mask_bits);
  47. mask_bits &= ~(1);
  48. mask_bits |= flag;
  49. pci_write_config_dword(entry->dev, pos, mask_bits);
  50. }
  51. break;
  52. case PCI_CAP_ID_MSIX:
  53. {
  54. int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
  55. PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
  56. writel(flag, entry->mask_base + offset);
  57. break;
  58. }
  59. default:
  60. BUG();
  61. break;
  62. }
  63. }
  64. void read_msi_msg(unsigned int irq, struct msi_msg *msg)
  65. {
  66. struct msi_desc *entry = get_irq_data(irq);
  67. switch(entry->msi_attrib.type) {
  68. case PCI_CAP_ID_MSI:
  69. {
  70. struct pci_dev *dev = entry->dev;
  71. int pos = entry->msi_attrib.pos;
  72. u16 data;
  73. pci_read_config_dword(dev, msi_lower_address_reg(pos),
  74. &msg->address_lo);
  75. if (entry->msi_attrib.is_64) {
  76. pci_read_config_dword(dev, msi_upper_address_reg(pos),
  77. &msg->address_hi);
  78. pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
  79. } else {
  80. msg->address_hi = 0;
  81. pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
  82. }
  83. msg->data = data;
  84. break;
  85. }
  86. case PCI_CAP_ID_MSIX:
  87. {
  88. void __iomem *base;
  89. base = entry->mask_base +
  90. entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
  91. msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
  92. msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
  93. msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
  94. break;
  95. }
  96. default:
  97. BUG();
  98. }
  99. }
  100. void write_msi_msg(unsigned int irq, struct msi_msg *msg)
  101. {
  102. struct msi_desc *entry = get_irq_data(irq);
  103. switch (entry->msi_attrib.type) {
  104. case PCI_CAP_ID_MSI:
  105. {
  106. struct pci_dev *dev = entry->dev;
  107. int pos = entry->msi_attrib.pos;
  108. pci_write_config_dword(dev, msi_lower_address_reg(pos),
  109. msg->address_lo);
  110. if (entry->msi_attrib.is_64) {
  111. pci_write_config_dword(dev, msi_upper_address_reg(pos),
  112. msg->address_hi);
  113. pci_write_config_word(dev, msi_data_reg(pos, 1),
  114. msg->data);
  115. } else {
  116. pci_write_config_word(dev, msi_data_reg(pos, 0),
  117. msg->data);
  118. }
  119. break;
  120. }
  121. case PCI_CAP_ID_MSIX:
  122. {
  123. void __iomem *base;
  124. base = entry->mask_base +
  125. entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
  126. writel(msg->address_lo,
  127. base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
  128. writel(msg->address_hi,
  129. base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
  130. writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
  131. break;
  132. }
  133. default:
  134. BUG();
  135. }
  136. }
  137. void mask_msi_irq(unsigned int irq)
  138. {
  139. msi_set_mask_bit(irq, 1);
  140. }
  141. void unmask_msi_irq(unsigned int irq)
  142. {
  143. msi_set_mask_bit(irq, 0);
  144. }
  145. static int msi_free_irq(struct pci_dev* dev, int irq);
  146. static int msi_init(void)
  147. {
  148. static int status = -ENOMEM;
  149. if (!status)
  150. return status;
  151. status = msi_cache_init();
  152. if (status < 0) {
  153. pci_msi_enable = 0;
  154. printk(KERN_WARNING "PCI: MSI cache init failed\n");
  155. return status;
  156. }
  157. return status;
  158. }
  159. static struct msi_desc* alloc_msi_entry(void)
  160. {
  161. struct msi_desc *entry;
  162. entry = kmem_cache_zalloc(msi_cachep, GFP_KERNEL);
  163. if (!entry)
  164. return NULL;
  165. entry->link.tail = entry->link.head = 0; /* single message */
  166. entry->dev = NULL;
  167. return entry;
  168. }
  169. static void attach_msi_entry(struct msi_desc *entry, int irq)
  170. {
  171. unsigned long flags;
  172. spin_lock_irqsave(&msi_lock, flags);
  173. msi_desc[irq] = entry;
  174. spin_unlock_irqrestore(&msi_lock, flags);
  175. }
  176. static int create_msi_irq(void)
  177. {
  178. struct msi_desc *entry;
  179. int irq;
  180. entry = alloc_msi_entry();
  181. if (!entry)
  182. return -ENOMEM;
  183. irq = create_irq();
  184. if (irq < 0) {
  185. kmem_cache_free(msi_cachep, entry);
  186. return -EBUSY;
  187. }
  188. set_irq_data(irq, entry);
  189. return irq;
  190. }
  191. static void destroy_msi_irq(unsigned int irq)
  192. {
  193. struct msi_desc *entry;
  194. entry = get_irq_data(irq);
  195. set_irq_chip(irq, NULL);
  196. set_irq_data(irq, NULL);
  197. destroy_irq(irq);
  198. kmem_cache_free(msi_cachep, entry);
  199. }
  200. static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
  201. {
  202. u16 control;
  203. pci_read_config_word(dev, msi_control_reg(pos), &control);
  204. if (type == PCI_CAP_ID_MSI) {
  205. /* Set enabled bits to single MSI & enable MSI_enable bit */
  206. msi_enable(control, 1);
  207. pci_write_config_word(dev, msi_control_reg(pos), control);
  208. dev->msi_enabled = 1;
  209. } else {
  210. msix_enable(control);
  211. pci_write_config_word(dev, msi_control_reg(pos), control);
  212. dev->msix_enabled = 1;
  213. }
  214. pci_intx(dev, 0); /* disable intx */
  215. }
  216. void disable_msi_mode(struct pci_dev *dev, int pos, int type)
  217. {
  218. u16 control;
  219. pci_read_config_word(dev, msi_control_reg(pos), &control);
  220. if (type == PCI_CAP_ID_MSI) {
  221. /* Set enabled bits to single MSI & enable MSI_enable bit */
  222. msi_disable(control);
  223. pci_write_config_word(dev, msi_control_reg(pos), control);
  224. dev->msi_enabled = 0;
  225. } else {
  226. msix_disable(control);
  227. pci_write_config_word(dev, msi_control_reg(pos), control);
  228. dev->msix_enabled = 0;
  229. }
  230. pci_intx(dev, 1); /* enable intx */
  231. }
  232. static int msi_lookup_irq(struct pci_dev *dev, int type)
  233. {
  234. int irq;
  235. unsigned long flags;
  236. spin_lock_irqsave(&msi_lock, flags);
  237. for (irq = 0; irq < NR_IRQS; irq++) {
  238. if (!msi_desc[irq] || msi_desc[irq]->dev != dev ||
  239. msi_desc[irq]->msi_attrib.type != type ||
  240. msi_desc[irq]->msi_attrib.default_irq != dev->irq)
  241. continue;
  242. spin_unlock_irqrestore(&msi_lock, flags);
  243. /* This pre-assigned MSI irq for this device
  244. already exists. Override dev->irq with this irq */
  245. dev->irq = irq;
  246. return 0;
  247. }
  248. spin_unlock_irqrestore(&msi_lock, flags);
  249. return -EACCES;
  250. }
  251. #ifdef CONFIG_PM
  252. static int __pci_save_msi_state(struct pci_dev *dev)
  253. {
  254. int pos, i = 0;
  255. u16 control;
  256. struct pci_cap_saved_state *save_state;
  257. u32 *cap;
  258. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  259. if (pos <= 0 || dev->no_msi)
  260. return 0;
  261. pci_read_config_word(dev, msi_control_reg(pos), &control);
  262. if (!(control & PCI_MSI_FLAGS_ENABLE))
  263. return 0;
  264. save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
  265. GFP_KERNEL);
  266. if (!save_state) {
  267. printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
  268. return -ENOMEM;
  269. }
  270. cap = &save_state->data[0];
  271. pci_read_config_dword(dev, pos, &cap[i++]);
  272. control = cap[0] >> 16;
  273. pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
  274. if (control & PCI_MSI_FLAGS_64BIT) {
  275. pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
  276. pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
  277. } else
  278. pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
  279. if (control & PCI_MSI_FLAGS_MASKBIT)
  280. pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
  281. save_state->cap_nr = PCI_CAP_ID_MSI;
  282. pci_add_saved_cap(dev, save_state);
  283. return 0;
  284. }
  285. static void __pci_restore_msi_state(struct pci_dev *dev)
  286. {
  287. int i = 0, pos;
  288. u16 control;
  289. struct pci_cap_saved_state *save_state;
  290. u32 *cap;
  291. save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
  292. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  293. if (!save_state || pos <= 0)
  294. return;
  295. cap = &save_state->data[0];
  296. control = cap[i++] >> 16;
  297. pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
  298. if (control & PCI_MSI_FLAGS_64BIT) {
  299. pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
  300. pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]);
  301. } else
  302. pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]);
  303. if (control & PCI_MSI_FLAGS_MASKBIT)
  304. pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
  305. pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
  306. enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
  307. pci_remove_saved_cap(save_state);
  308. kfree(save_state);
  309. }
  310. static int __pci_save_msix_state(struct pci_dev *dev)
  311. {
  312. int pos;
  313. int temp;
  314. int irq, head, tail = 0;
  315. u16 control;
  316. struct pci_cap_saved_state *save_state;
  317. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  318. if (pos <= 0 || dev->no_msi)
  319. return 0;
  320. /* save the capability */
  321. pci_read_config_word(dev, msi_control_reg(pos), &control);
  322. if (!(control & PCI_MSIX_FLAGS_ENABLE))
  323. return 0;
  324. save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
  325. GFP_KERNEL);
  326. if (!save_state) {
  327. printk(KERN_ERR "Out of memory in pci_save_msix_state\n");
  328. return -ENOMEM;
  329. }
  330. *((u16 *)&save_state->data[0]) = control;
  331. /* save the table */
  332. temp = dev->irq;
  333. if (msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) {
  334. kfree(save_state);
  335. return -EINVAL;
  336. }
  337. irq = head = dev->irq;
  338. while (head != tail) {
  339. struct msi_desc *entry;
  340. entry = msi_desc[irq];
  341. read_msi_msg(irq, &entry->msg_save);
  342. tail = msi_desc[irq]->link.tail;
  343. irq = tail;
  344. }
  345. dev->irq = temp;
  346. save_state->cap_nr = PCI_CAP_ID_MSIX;
  347. pci_add_saved_cap(dev, save_state);
  348. return 0;
  349. }
  350. int pci_save_msi_state(struct pci_dev *dev)
  351. {
  352. int rc;
  353. rc = __pci_save_msi_state(dev);
  354. if (rc)
  355. return rc;
  356. rc = __pci_save_msix_state(dev);
  357. return rc;
  358. }
  359. static void __pci_restore_msix_state(struct pci_dev *dev)
  360. {
  361. u16 save;
  362. int pos;
  363. int irq, head, tail = 0;
  364. struct msi_desc *entry;
  365. int temp;
  366. struct pci_cap_saved_state *save_state;
  367. save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
  368. if (!save_state)
  369. return;
  370. save = *((u16 *)&save_state->data[0]);
  371. pci_remove_saved_cap(save_state);
  372. kfree(save_state);
  373. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  374. if (pos <= 0)
  375. return;
  376. /* route the table */
  377. temp = dev->irq;
  378. if (msi_lookup_irq(dev, PCI_CAP_ID_MSIX))
  379. return;
  380. irq = head = dev->irq;
  381. while (head != tail) {
  382. entry = msi_desc[irq];
  383. write_msi_msg(irq, &entry->msg_save);
  384. tail = msi_desc[irq]->link.tail;
  385. irq = tail;
  386. }
  387. dev->irq = temp;
  388. pci_write_config_word(dev, msi_control_reg(pos), save);
  389. enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
  390. }
  391. void pci_restore_msi_state(struct pci_dev *dev)
  392. {
  393. __pci_restore_msi_state(dev);
  394. __pci_restore_msix_state(dev);
  395. }
  396. #endif /* CONFIG_PM */
  397. /**
  398. * msi_capability_init - configure device's MSI capability structure
  399. * @dev: pointer to the pci_dev data structure of MSI device function
  400. *
  401. * Setup the MSI capability structure of device function with a single
  402. * MSI irq, regardless of device function is capable of handling
  403. * multiple messages. A return of zero indicates the successful setup
  404. * of an entry zero with the new MSI irq or non-zero for otherwise.
  405. **/
  406. static int msi_capability_init(struct pci_dev *dev)
  407. {
  408. int status;
  409. struct msi_desc *entry;
  410. int pos, irq;
  411. u16 control;
  412. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  413. pci_read_config_word(dev, msi_control_reg(pos), &control);
  414. /* MSI Entry Initialization */
  415. irq = create_msi_irq();
  416. if (irq < 0)
  417. return irq;
  418. entry = get_irq_data(irq);
  419. entry->link.head = irq;
  420. entry->link.tail = irq;
  421. entry->msi_attrib.type = PCI_CAP_ID_MSI;
  422. entry->msi_attrib.is_64 = is_64bit_address(control);
  423. entry->msi_attrib.entry_nr = 0;
  424. entry->msi_attrib.maskbit = is_mask_bit_support(control);
  425. entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
  426. entry->msi_attrib.pos = pos;
  427. if (is_mask_bit_support(control)) {
  428. entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
  429. is_64bit_address(control));
  430. }
  431. entry->dev = dev;
  432. if (entry->msi_attrib.maskbit) {
  433. unsigned int maskbits, temp;
  434. /* All MSIs are unmasked by default, Mask them all */
  435. pci_read_config_dword(dev,
  436. msi_mask_bits_reg(pos, is_64bit_address(control)),
  437. &maskbits);
  438. temp = (1 << multi_msi_capable(control));
  439. temp = ((temp - 1) & ~temp);
  440. maskbits |= temp;
  441. pci_write_config_dword(dev,
  442. msi_mask_bits_reg(pos, is_64bit_address(control)),
  443. maskbits);
  444. }
  445. /* Configure MSI capability structure */
  446. status = arch_setup_msi_irq(irq, dev);
  447. if (status < 0) {
  448. destroy_msi_irq(irq);
  449. return status;
  450. }
  451. attach_msi_entry(entry, irq);
  452. /* Set MSI enabled bits */
  453. enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
  454. dev->irq = irq;
  455. return 0;
  456. }
  457. /**
  458. * msix_capability_init - configure device's MSI-X capability
  459. * @dev: pointer to the pci_dev data structure of MSI-X device function
  460. * @entries: pointer to an array of struct msix_entry entries
  461. * @nvec: number of @entries
  462. *
  463. * Setup the MSI-X capability structure of device function with a
  464. * single MSI-X irq. A return of zero indicates the successful setup of
  465. * requested MSI-X entries with allocated irqs or non-zero for otherwise.
  466. **/
  467. static int msix_capability_init(struct pci_dev *dev,
  468. struct msix_entry *entries, int nvec)
  469. {
  470. struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
  471. int status;
  472. int irq, pos, i, j, nr_entries, temp = 0;
  473. unsigned long phys_addr;
  474. u32 table_offset;
  475. u16 control;
  476. u8 bir;
  477. void __iomem *base;
  478. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  479. /* Request & Map MSI-X table region */
  480. pci_read_config_word(dev, msi_control_reg(pos), &control);
  481. nr_entries = multi_msix_capable(control);
  482. pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
  483. bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
  484. table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
  485. phys_addr = pci_resource_start (dev, bir) + table_offset;
  486. base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
  487. if (base == NULL)
  488. return -ENOMEM;
  489. /* MSI-X Table Initialization */
  490. for (i = 0; i < nvec; i++) {
  491. irq = create_msi_irq();
  492. if (irq < 0)
  493. break;
  494. entry = get_irq_data(irq);
  495. j = entries[i].entry;
  496. entries[i].vector = irq;
  497. entry->msi_attrib.type = PCI_CAP_ID_MSIX;
  498. entry->msi_attrib.is_64 = 1;
  499. entry->msi_attrib.entry_nr = j;
  500. entry->msi_attrib.maskbit = 1;
  501. entry->msi_attrib.default_irq = dev->irq;
  502. entry->msi_attrib.pos = pos;
  503. entry->dev = dev;
  504. entry->mask_base = base;
  505. if (!head) {
  506. entry->link.head = irq;
  507. entry->link.tail = irq;
  508. head = entry;
  509. } else {
  510. entry->link.head = temp;
  511. entry->link.tail = tail->link.tail;
  512. tail->link.tail = irq;
  513. head->link.head = irq;
  514. }
  515. temp = irq;
  516. tail = entry;
  517. /* Configure MSI-X capability structure */
  518. status = arch_setup_msi_irq(irq, dev);
  519. if (status < 0) {
  520. destroy_msi_irq(irq);
  521. break;
  522. }
  523. attach_msi_entry(entry, irq);
  524. }
  525. if (i != nvec) {
  526. int avail = i - 1;
  527. i--;
  528. for (; i >= 0; i--) {
  529. irq = (entries + i)->vector;
  530. msi_free_irq(dev, irq);
  531. (entries + i)->vector = 0;
  532. }
  533. /* If we had some success report the number of irqs
  534. * we succeeded in setting up.
  535. */
  536. if (avail <= 0)
  537. avail = -EBUSY;
  538. return avail;
  539. }
  540. /* Set MSI-X enabled bits */
  541. enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
  542. return 0;
  543. }
  544. /**
  545. * pci_msi_supported - check whether MSI may be enabled on device
  546. * @dev: pointer to the pci_dev data structure of MSI device function
  547. *
  548. * Look at global flags, the device itself, and its parent busses
  549. * to return 0 if MSI are supported for the device.
  550. **/
  551. static
  552. int pci_msi_supported(struct pci_dev * dev)
  553. {
  554. struct pci_bus *bus;
  555. /* MSI must be globally enabled and supported by the device */
  556. if (!pci_msi_enable || !dev || dev->no_msi)
  557. return -EINVAL;
  558. /* Any bridge which does NOT route MSI transactions from it's
  559. * secondary bus to it's primary bus must set NO_MSI flag on
  560. * the secondary pci_bus.
  561. * We expect only arch-specific PCI host bus controller driver
  562. * or quirks for specific PCI bridges to be setting NO_MSI.
  563. */
  564. for (bus = dev->bus; bus; bus = bus->parent)
  565. if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
  566. return -EINVAL;
  567. return 0;
  568. }
  569. /**
  570. * pci_enable_msi - configure device's MSI capability structure
  571. * @dev: pointer to the pci_dev data structure of MSI device function
  572. *
  573. * Setup the MSI capability structure of device function with
  574. * a single MSI irq upon its software driver call to request for
  575. * MSI mode enabled on its hardware device function. A return of zero
  576. * indicates the successful setup of an entry zero with the new MSI
  577. * irq or non-zero for otherwise.
  578. **/
  579. int pci_enable_msi(struct pci_dev* dev)
  580. {
  581. int pos, temp, status;
  582. if (pci_msi_supported(dev) < 0)
  583. return -EINVAL;
  584. temp = dev->irq;
  585. status = msi_init();
  586. if (status < 0)
  587. return status;
  588. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  589. if (!pos)
  590. return -EINVAL;
  591. WARN_ON(!msi_lookup_irq(dev, PCI_CAP_ID_MSI));
  592. /* Check whether driver already requested for MSI-X irqs */
  593. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  594. if (pos > 0 && !msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) {
  595. printk(KERN_INFO "PCI: %s: Can't enable MSI. "
  596. "Device already has MSI-X irq assigned\n",
  597. pci_name(dev));
  598. dev->irq = temp;
  599. return -EINVAL;
  600. }
  601. status = msi_capability_init(dev);
  602. return status;
  603. }
  604. void pci_disable_msi(struct pci_dev* dev)
  605. {
  606. struct msi_desc *entry;
  607. int pos, default_irq;
  608. u16 control;
  609. unsigned long flags;
  610. if (!pci_msi_enable)
  611. return;
  612. if (!dev)
  613. return;
  614. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  615. if (!pos)
  616. return;
  617. pci_read_config_word(dev, msi_control_reg(pos), &control);
  618. if (!(control & PCI_MSI_FLAGS_ENABLE))
  619. return;
  620. disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
  621. spin_lock_irqsave(&msi_lock, flags);
  622. entry = msi_desc[dev->irq];
  623. if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
  624. spin_unlock_irqrestore(&msi_lock, flags);
  625. return;
  626. }
  627. if (irq_has_action(dev->irq)) {
  628. spin_unlock_irqrestore(&msi_lock, flags);
  629. printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without "
  630. "free_irq() on MSI irq %d\n",
  631. pci_name(dev), dev->irq);
  632. BUG_ON(irq_has_action(dev->irq));
  633. } else {
  634. default_irq = entry->msi_attrib.default_irq;
  635. spin_unlock_irqrestore(&msi_lock, flags);
  636. msi_free_irq(dev, dev->irq);
  637. /* Restore dev->irq to its default pin-assertion irq */
  638. dev->irq = default_irq;
  639. }
  640. }
  641. static int msi_free_irq(struct pci_dev* dev, int irq)
  642. {
  643. struct msi_desc *entry;
  644. int head, entry_nr, type;
  645. void __iomem *base;
  646. unsigned long flags;
  647. arch_teardown_msi_irq(irq);
  648. spin_lock_irqsave(&msi_lock, flags);
  649. entry = msi_desc[irq];
  650. if (!entry || entry->dev != dev) {
  651. spin_unlock_irqrestore(&msi_lock, flags);
  652. return -EINVAL;
  653. }
  654. type = entry->msi_attrib.type;
  655. entry_nr = entry->msi_attrib.entry_nr;
  656. head = entry->link.head;
  657. base = entry->mask_base;
  658. msi_desc[entry->link.head]->link.tail = entry->link.tail;
  659. msi_desc[entry->link.tail]->link.head = entry->link.head;
  660. entry->dev = NULL;
  661. msi_desc[irq] = NULL;
  662. spin_unlock_irqrestore(&msi_lock, flags);
  663. destroy_msi_irq(irq);
  664. if (type == PCI_CAP_ID_MSIX) {
  665. writel(1, base + entry_nr * PCI_MSIX_ENTRY_SIZE +
  666. PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
  667. if (head == irq)
  668. iounmap(base);
  669. }
  670. return 0;
  671. }
  672. /**
  673. * pci_enable_msix - configure device's MSI-X capability structure
  674. * @dev: pointer to the pci_dev data structure of MSI-X device function
  675. * @entries: pointer to an array of MSI-X entries
  676. * @nvec: number of MSI-X irqs requested for allocation by device driver
  677. *
  678. * Setup the MSI-X capability structure of device function with the number
  679. * of requested irqs upon its software driver call to request for
  680. * MSI-X mode enabled on its hardware device function. A return of zero
  681. * indicates the successful configuration of MSI-X capability structure
  682. * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
  683. * Or a return of > 0 indicates that driver request is exceeding the number
  684. * of irqs available. Driver should use the returned value to re-send
  685. * its request.
  686. **/
  687. int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
  688. {
  689. int status, pos, nr_entries;
  690. int i, j, temp;
  691. u16 control;
  692. if (!entries || pci_msi_supported(dev) < 0)
  693. return -EINVAL;
  694. status = msi_init();
  695. if (status < 0)
  696. return status;
  697. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  698. if (!pos)
  699. return -EINVAL;
  700. pci_read_config_word(dev, msi_control_reg(pos), &control);
  701. nr_entries = multi_msix_capable(control);
  702. if (nvec > nr_entries)
  703. return -EINVAL;
  704. /* Check for any invalid entries */
  705. for (i = 0; i < nvec; i++) {
  706. if (entries[i].entry >= nr_entries)
  707. return -EINVAL; /* invalid entry */
  708. for (j = i + 1; j < nvec; j++) {
  709. if (entries[i].entry == entries[j].entry)
  710. return -EINVAL; /* duplicate entry */
  711. }
  712. }
  713. temp = dev->irq;
  714. WARN_ON(!msi_lookup_irq(dev, PCI_CAP_ID_MSIX));
  715. /* Check whether driver already requested for MSI irq */
  716. if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
  717. !msi_lookup_irq(dev, PCI_CAP_ID_MSI)) {
  718. printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
  719. "Device already has an MSI irq assigned\n",
  720. pci_name(dev));
  721. dev->irq = temp;
  722. return -EINVAL;
  723. }
  724. status = msix_capability_init(dev, entries, nvec);
  725. return status;
  726. }
  727. void pci_disable_msix(struct pci_dev* dev)
  728. {
  729. int pos, temp;
  730. u16 control;
  731. if (!pci_msi_enable)
  732. return;
  733. if (!dev)
  734. return;
  735. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  736. if (!pos)
  737. return;
  738. pci_read_config_word(dev, msi_control_reg(pos), &control);
  739. if (!(control & PCI_MSIX_FLAGS_ENABLE))
  740. return;
  741. disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
  742. temp = dev->irq;
  743. if (!msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) {
  744. int irq, head, tail = 0, warning = 0;
  745. unsigned long flags;
  746. irq = head = dev->irq;
  747. dev->irq = temp; /* Restore pin IRQ */
  748. while (head != tail) {
  749. spin_lock_irqsave(&msi_lock, flags);
  750. tail = msi_desc[irq]->link.tail;
  751. spin_unlock_irqrestore(&msi_lock, flags);
  752. if (irq_has_action(irq))
  753. warning = 1;
  754. else if (irq != head) /* Release MSI-X irq */
  755. msi_free_irq(dev, irq);
  756. irq = tail;
  757. }
  758. msi_free_irq(dev, irq);
  759. if (warning) {
  760. printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
  761. "free_irq() on all MSI-X irqs\n",
  762. pci_name(dev));
  763. BUG_ON(warning > 0);
  764. }
  765. }
  766. }
  767. /**
  768. * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
  769. * @dev: pointer to the pci_dev data structure of MSI(X) device function
  770. *
  771. * Being called during hotplug remove, from which the device function
  772. * is hot-removed. All previous assigned MSI/MSI-X irqs, if
  773. * allocated for this device function, are reclaimed to unused state,
  774. * which may be used later on.
  775. **/
  776. void msi_remove_pci_irq_vectors(struct pci_dev* dev)
  777. {
  778. int pos, temp;
  779. unsigned long flags;
  780. if (!pci_msi_enable || !dev)
  781. return;
  782. temp = dev->irq; /* Save IOAPIC IRQ */
  783. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  784. if (pos > 0 && !msi_lookup_irq(dev, PCI_CAP_ID_MSI)) {
  785. if (irq_has_action(dev->irq)) {
  786. printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
  787. "called without free_irq() on MSI irq %d\n",
  788. pci_name(dev), dev->irq);
  789. BUG_ON(irq_has_action(dev->irq));
  790. } else /* Release MSI irq assigned to this device */
  791. msi_free_irq(dev, dev->irq);
  792. dev->irq = temp; /* Restore IOAPIC IRQ */
  793. }
  794. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  795. if (pos > 0 && !msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) {
  796. int irq, head, tail = 0, warning = 0;
  797. void __iomem *base = NULL;
  798. irq = head = dev->irq;
  799. while (head != tail) {
  800. spin_lock_irqsave(&msi_lock, flags);
  801. tail = msi_desc[irq]->link.tail;
  802. base = msi_desc[irq]->mask_base;
  803. spin_unlock_irqrestore(&msi_lock, flags);
  804. if (irq_has_action(irq))
  805. warning = 1;
  806. else if (irq != head) /* Release MSI-X irq */
  807. msi_free_irq(dev, irq);
  808. irq = tail;
  809. }
  810. msi_free_irq(dev, irq);
  811. if (warning) {
  812. iounmap(base);
  813. printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
  814. "called without free_irq() on all MSI-X irqs\n",
  815. pci_name(dev));
  816. BUG_ON(warning > 0);
  817. }
  818. dev->irq = temp; /* Restore IOAPIC IRQ */
  819. }
  820. }
  821. void pci_no_msi(void)
  822. {
  823. pci_msi_enable = 0;
  824. }
  825. EXPORT_SYMBOL(pci_enable_msi);
  826. EXPORT_SYMBOL(pci_disable_msi);
  827. EXPORT_SYMBOL(pci_enable_msix);
  828. EXPORT_SYMBOL(pci_disable_msix);