msi.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040
  1. /*
  2. * File: msi.c
  3. * Purpose: PCI Message Signaled Interrupt (MSI)
  4. *
  5. * Copyright (C) 2003-2004 Intel
  6. * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
  7. */
  8. #include <linux/err.h>
  9. #include <linux/mm.h>
  10. #include <linux/irq.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/init.h>
  13. #include <linux/ioport.h>
  14. #include <linux/smp_lock.h>
  15. #include <linux/pci.h>
  16. #include <linux/proc_fs.h>
  17. #include <asm/errno.h>
  18. #include <asm/io.h>
  19. #include <asm/smp.h>
  20. #include "pci.h"
  21. #include "msi.h"
  22. static DEFINE_SPINLOCK(msi_lock);
  23. static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
  24. static kmem_cache_t* msi_cachep;
  25. static int pci_msi_enable = 1;
  26. static struct msi_ops *msi_ops;
  27. int
  28. msi_register(struct msi_ops *ops)
  29. {
  30. msi_ops = ops;
  31. return 0;
  32. }
  33. static int msi_cache_init(void)
  34. {
  35. msi_cachep = kmem_cache_create("msi_cache", sizeof(struct msi_desc),
  36. 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
  37. if (!msi_cachep)
  38. return -ENOMEM;
  39. return 0;
  40. }
  41. static void msi_set_mask_bit(unsigned int irq, int flag)
  42. {
  43. struct msi_desc *entry;
  44. entry = msi_desc[irq];
  45. BUG_ON(!entry || !entry->dev);
  46. switch (entry->msi_attrib.type) {
  47. case PCI_CAP_ID_MSI:
  48. if (entry->msi_attrib.maskbit) {
  49. int pos;
  50. u32 mask_bits;
  51. pos = (long)entry->mask_base;
  52. pci_read_config_dword(entry->dev, pos, &mask_bits);
  53. mask_bits &= ~(1);
  54. mask_bits |= flag;
  55. pci_write_config_dword(entry->dev, pos, mask_bits);
  56. }
  57. break;
  58. case PCI_CAP_ID_MSIX:
  59. {
  60. int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
  61. PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
  62. writel(flag, entry->mask_base + offset);
  63. break;
  64. }
  65. default:
  66. BUG();
  67. break;
  68. }
  69. }
  70. static void read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
  71. {
  72. switch(entry->msi_attrib.type) {
  73. case PCI_CAP_ID_MSI:
  74. {
  75. struct pci_dev *dev = entry->dev;
  76. int pos = entry->msi_attrib.pos;
  77. u16 data;
  78. pci_read_config_dword(dev, msi_lower_address_reg(pos),
  79. &msg->address_lo);
  80. if (entry->msi_attrib.is_64) {
  81. pci_read_config_dword(dev, msi_upper_address_reg(pos),
  82. &msg->address_hi);
  83. pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
  84. } else {
  85. msg->address_hi = 0;
  86. pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
  87. }
  88. msg->data = data;
  89. break;
  90. }
  91. case PCI_CAP_ID_MSIX:
  92. {
  93. void __iomem *base;
  94. base = entry->mask_base +
  95. entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
  96. msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
  97. msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
  98. msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
  99. break;
  100. }
  101. default:
  102. BUG();
  103. }
  104. }
  105. static void write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
  106. {
  107. switch (entry->msi_attrib.type) {
  108. case PCI_CAP_ID_MSI:
  109. {
  110. struct pci_dev *dev = entry->dev;
  111. int pos = entry->msi_attrib.pos;
  112. pci_write_config_dword(dev, msi_lower_address_reg(pos),
  113. msg->address_lo);
  114. if (entry->msi_attrib.is_64) {
  115. pci_write_config_dword(dev, msi_upper_address_reg(pos),
  116. msg->address_hi);
  117. pci_write_config_word(dev, msi_data_reg(pos, 1),
  118. msg->data);
  119. } else {
  120. pci_write_config_word(dev, msi_data_reg(pos, 0),
  121. msg->data);
  122. }
  123. break;
  124. }
  125. case PCI_CAP_ID_MSIX:
  126. {
  127. void __iomem *base;
  128. base = entry->mask_base +
  129. entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
  130. writel(msg->address_lo,
  131. base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
  132. writel(msg->address_hi,
  133. base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
  134. writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
  135. break;
  136. }
  137. default:
  138. BUG();
  139. }
  140. }
  141. #ifdef CONFIG_SMP
  142. static void set_msi_affinity(unsigned int irq, cpumask_t cpu_mask)
  143. {
  144. struct msi_desc *entry;
  145. struct msi_msg msg;
  146. entry = msi_desc[irq];
  147. if (!entry || !entry->dev)
  148. return;
  149. read_msi_msg(entry, &msg);
  150. msi_ops->target(irq, cpu_mask, &msg);
  151. write_msi_msg(entry, &msg);
  152. set_native_irq_info(irq, cpu_mask);
  153. }
  154. #else
  155. #define set_msi_affinity NULL
  156. #endif /* CONFIG_SMP */
  157. static void mask_MSI_irq(unsigned int irq)
  158. {
  159. msi_set_mask_bit(irq, 1);
  160. }
  161. static void unmask_MSI_irq(unsigned int irq)
  162. {
  163. msi_set_mask_bit(irq, 0);
  164. }
  165. static void ack_msi_irq(unsigned int irq)
  166. {
  167. move_native_irq(irq);
  168. ack_APIC_irq();
  169. }
  170. /*
  171. * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
  172. * which implement the MSI or MSI-X Capability Structure.
  173. */
  174. static struct irq_chip msi_chip = {
  175. .name = "PCI-MSI",
  176. .unmask = unmask_MSI_irq,
  177. .mask = mask_MSI_irq,
  178. .ack = ack_msi_irq,
  179. .set_affinity = set_msi_affinity
  180. };
  181. static int msi_free_irq(struct pci_dev* dev, int irq);
  182. static int msi_init(void)
  183. {
  184. static int status = -ENOMEM;
  185. if (!status)
  186. return status;
  187. if (pci_msi_quirk) {
  188. pci_msi_enable = 0;
  189. printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n");
  190. status = -EINVAL;
  191. return status;
  192. }
  193. status = msi_arch_init();
  194. if (status < 0) {
  195. pci_msi_enable = 0;
  196. printk(KERN_WARNING
  197. "PCI: MSI arch init failed. MSI disabled.\n");
  198. return status;
  199. }
  200. if (! msi_ops) {
  201. pci_msi_enable = 0;
  202. printk(KERN_WARNING
  203. "PCI: MSI ops not registered. MSI disabled.\n");
  204. status = -EINVAL;
  205. return status;
  206. }
  207. status = msi_cache_init();
  208. if (status < 0) {
  209. pci_msi_enable = 0;
  210. printk(KERN_WARNING "PCI: MSI cache init failed\n");
  211. return status;
  212. }
  213. return status;
  214. }
  215. static struct msi_desc* alloc_msi_entry(void)
  216. {
  217. struct msi_desc *entry;
  218. entry = kmem_cache_zalloc(msi_cachep, GFP_KERNEL);
  219. if (!entry)
  220. return NULL;
  221. entry->link.tail = entry->link.head = 0; /* single message */
  222. entry->dev = NULL;
  223. return entry;
  224. }
  225. static void attach_msi_entry(struct msi_desc *entry, int irq)
  226. {
  227. unsigned long flags;
  228. spin_lock_irqsave(&msi_lock, flags);
  229. msi_desc[irq] = entry;
  230. spin_unlock_irqrestore(&msi_lock, flags);
  231. }
  232. static int create_msi_irq(struct irq_chip *chip)
  233. {
  234. struct msi_desc *entry;
  235. int irq;
  236. entry = alloc_msi_entry();
  237. if (!entry)
  238. return -ENOMEM;
  239. irq = create_irq();
  240. if (irq < 0) {
  241. kmem_cache_free(msi_cachep, entry);
  242. return -EBUSY;
  243. }
  244. set_irq_chip_and_handler(irq, chip, handle_edge_irq);
  245. set_irq_data(irq, entry);
  246. return irq;
  247. }
  248. static void destroy_msi_irq(unsigned int irq)
  249. {
  250. struct msi_desc *entry;
  251. entry = get_irq_data(irq);
  252. set_irq_chip(irq, NULL);
  253. set_irq_data(irq, NULL);
  254. destroy_irq(irq);
  255. kmem_cache_free(msi_cachep, entry);
  256. }
  257. static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
  258. {
  259. u16 control;
  260. pci_read_config_word(dev, msi_control_reg(pos), &control);
  261. if (type == PCI_CAP_ID_MSI) {
  262. /* Set enabled bits to single MSI & enable MSI_enable bit */
  263. msi_enable(control, 1);
  264. pci_write_config_word(dev, msi_control_reg(pos), control);
  265. dev->msi_enabled = 1;
  266. } else {
  267. msix_enable(control);
  268. pci_write_config_word(dev, msi_control_reg(pos), control);
  269. dev->msix_enabled = 1;
  270. }
  271. if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
  272. /* PCI Express Endpoint device detected */
  273. pci_intx(dev, 0); /* disable intx */
  274. }
  275. }
  276. void disable_msi_mode(struct pci_dev *dev, int pos, int type)
  277. {
  278. u16 control;
  279. pci_read_config_word(dev, msi_control_reg(pos), &control);
  280. if (type == PCI_CAP_ID_MSI) {
  281. /* Set enabled bits to single MSI & enable MSI_enable bit */
  282. msi_disable(control);
  283. pci_write_config_word(dev, msi_control_reg(pos), control);
  284. dev->msi_enabled = 0;
  285. } else {
  286. msix_disable(control);
  287. pci_write_config_word(dev, msi_control_reg(pos), control);
  288. dev->msix_enabled = 0;
  289. }
  290. if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
  291. /* PCI Express Endpoint device detected */
  292. pci_intx(dev, 1); /* enable intx */
  293. }
  294. }
  295. static int msi_lookup_irq(struct pci_dev *dev, int type)
  296. {
  297. int irq;
  298. unsigned long flags;
  299. spin_lock_irqsave(&msi_lock, flags);
  300. for (irq = 0; irq < NR_IRQS; irq++) {
  301. if (!msi_desc[irq] || msi_desc[irq]->dev != dev ||
  302. msi_desc[irq]->msi_attrib.type != type ||
  303. msi_desc[irq]->msi_attrib.default_irq != dev->irq)
  304. continue;
  305. spin_unlock_irqrestore(&msi_lock, flags);
  306. /* This pre-assigned MSI irq for this device
  307. already exits. Override dev->irq with this irq */
  308. dev->irq = irq;
  309. return 0;
  310. }
  311. spin_unlock_irqrestore(&msi_lock, flags);
  312. return -EACCES;
  313. }
  314. void pci_scan_msi_device(struct pci_dev *dev)
  315. {
  316. if (!dev)
  317. return;
  318. }
  319. #ifdef CONFIG_PM
  320. int pci_save_msi_state(struct pci_dev *dev)
  321. {
  322. int pos, i = 0;
  323. u16 control;
  324. struct pci_cap_saved_state *save_state;
  325. u32 *cap;
  326. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  327. if (pos <= 0 || dev->no_msi)
  328. return 0;
  329. pci_read_config_word(dev, msi_control_reg(pos), &control);
  330. if (!(control & PCI_MSI_FLAGS_ENABLE))
  331. return 0;
  332. save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
  333. GFP_KERNEL);
  334. if (!save_state) {
  335. printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
  336. return -ENOMEM;
  337. }
  338. cap = &save_state->data[0];
  339. pci_read_config_dword(dev, pos, &cap[i++]);
  340. control = cap[0] >> 16;
  341. pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
  342. if (control & PCI_MSI_FLAGS_64BIT) {
  343. pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
  344. pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
  345. } else
  346. pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
  347. if (control & PCI_MSI_FLAGS_MASKBIT)
  348. pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
  349. save_state->cap_nr = PCI_CAP_ID_MSI;
  350. pci_add_saved_cap(dev, save_state);
  351. return 0;
  352. }
  353. void pci_restore_msi_state(struct pci_dev *dev)
  354. {
  355. int i = 0, pos;
  356. u16 control;
  357. struct pci_cap_saved_state *save_state;
  358. u32 *cap;
  359. save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSI);
  360. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  361. if (!save_state || pos <= 0)
  362. return;
  363. cap = &save_state->data[0];
  364. control = cap[i++] >> 16;
  365. pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, cap[i++]);
  366. if (control & PCI_MSI_FLAGS_64BIT) {
  367. pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, cap[i++]);
  368. pci_write_config_dword(dev, pos + PCI_MSI_DATA_64, cap[i++]);
  369. } else
  370. pci_write_config_dword(dev, pos + PCI_MSI_DATA_32, cap[i++]);
  371. if (control & PCI_MSI_FLAGS_MASKBIT)
  372. pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT, cap[i++]);
  373. pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
  374. enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
  375. pci_remove_saved_cap(save_state);
  376. kfree(save_state);
  377. }
  378. int pci_save_msix_state(struct pci_dev *dev)
  379. {
  380. int pos;
  381. int temp;
  382. int irq, head, tail = 0;
  383. u16 control;
  384. struct pci_cap_saved_state *save_state;
  385. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  386. if (pos <= 0 || dev->no_msi)
  387. return 0;
  388. /* save the capability */
  389. pci_read_config_word(dev, msi_control_reg(pos), &control);
  390. if (!(control & PCI_MSIX_FLAGS_ENABLE))
  391. return 0;
  392. save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u16),
  393. GFP_KERNEL);
  394. if (!save_state) {
  395. printk(KERN_ERR "Out of memory in pci_save_msix_state\n");
  396. return -ENOMEM;
  397. }
  398. *((u16 *)&save_state->data[0]) = control;
  399. /* save the table */
  400. temp = dev->irq;
  401. if (msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) {
  402. kfree(save_state);
  403. return -EINVAL;
  404. }
  405. irq = head = dev->irq;
  406. while (head != tail) {
  407. struct msi_desc *entry;
  408. entry = msi_desc[irq];
  409. read_msi_msg(entry, &entry->msg_save);
  410. tail = msi_desc[irq]->link.tail;
  411. irq = tail;
  412. }
  413. dev->irq = temp;
  414. save_state->cap_nr = PCI_CAP_ID_MSIX;
  415. pci_add_saved_cap(dev, save_state);
  416. return 0;
  417. }
  418. void pci_restore_msix_state(struct pci_dev *dev)
  419. {
  420. u16 save;
  421. int pos;
  422. int irq, head, tail = 0;
  423. struct msi_desc *entry;
  424. int temp;
  425. struct pci_cap_saved_state *save_state;
  426. save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
  427. if (!save_state)
  428. return;
  429. save = *((u16 *)&save_state->data[0]);
  430. pci_remove_saved_cap(save_state);
  431. kfree(save_state);
  432. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  433. if (pos <= 0)
  434. return;
  435. /* route the table */
  436. temp = dev->irq;
  437. if (msi_lookup_irq(dev, PCI_CAP_ID_MSIX))
  438. return;
  439. irq = head = dev->irq;
  440. while (head != tail) {
  441. entry = msi_desc[irq];
  442. write_msi_msg(entry, &entry->msg_save);
  443. tail = msi_desc[irq]->link.tail;
  444. irq = tail;
  445. }
  446. dev->irq = temp;
  447. pci_write_config_word(dev, msi_control_reg(pos), save);
  448. enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
  449. }
  450. #endif
  451. static int msi_register_init(struct pci_dev *dev, struct msi_desc *entry)
  452. {
  453. int status;
  454. struct msi_msg msg;
  455. int pos;
  456. u16 control;
  457. pos = entry->msi_attrib.pos;
  458. pci_read_config_word(dev, msi_control_reg(pos), &control);
  459. /* Configure MSI capability structure */
  460. status = msi_ops->setup(dev, dev->irq, &msg);
  461. if (status < 0)
  462. return status;
  463. write_msi_msg(entry, &msg);
  464. if (entry->msi_attrib.maskbit) {
  465. unsigned int maskbits, temp;
  466. /* All MSIs are unmasked by default, Mask them all */
  467. pci_read_config_dword(dev,
  468. msi_mask_bits_reg(pos, is_64bit_address(control)),
  469. &maskbits);
  470. temp = (1 << multi_msi_capable(control));
  471. temp = ((temp - 1) & ~temp);
  472. maskbits |= temp;
  473. pci_write_config_dword(dev,
  474. msi_mask_bits_reg(pos, is_64bit_address(control)),
  475. maskbits);
  476. }
  477. return 0;
  478. }
  479. /**
  480. * msi_capability_init - configure device's MSI capability structure
  481. * @dev: pointer to the pci_dev data structure of MSI device function
  482. *
  483. * Setup the MSI capability structure of device function with a single
  484. * MSI irq, regardless of device function is capable of handling
  485. * multiple messages. A return of zero indicates the successful setup
  486. * of an entry zero with the new MSI irq or non-zero for otherwise.
  487. **/
  488. static int msi_capability_init(struct pci_dev *dev)
  489. {
  490. int status;
  491. struct msi_desc *entry;
  492. int pos, irq;
  493. u16 control;
  494. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  495. pci_read_config_word(dev, msi_control_reg(pos), &control);
  496. /* MSI Entry Initialization */
  497. irq = create_msi_irq(&msi_chip);
  498. if (irq < 0)
  499. return irq;
  500. entry = get_irq_data(irq);
  501. entry->link.head = irq;
  502. entry->link.tail = irq;
  503. entry->msi_attrib.type = PCI_CAP_ID_MSI;
  504. entry->msi_attrib.is_64 = is_64bit_address(control);
  505. entry->msi_attrib.entry_nr = 0;
  506. entry->msi_attrib.maskbit = is_mask_bit_support(control);
  507. entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
  508. entry->msi_attrib.pos = pos;
  509. dev->irq = irq;
  510. entry->dev = dev;
  511. if (is_mask_bit_support(control)) {
  512. entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
  513. is_64bit_address(control));
  514. }
  515. /* Configure MSI capability structure */
  516. status = msi_register_init(dev, entry);
  517. if (status != 0) {
  518. dev->irq = entry->msi_attrib.default_irq;
  519. destroy_msi_irq(irq);
  520. return status;
  521. }
  522. attach_msi_entry(entry, irq);
  523. /* Set MSI enabled bits */
  524. enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
  525. return 0;
  526. }
  527. /**
  528. * msix_capability_init - configure device's MSI-X capability
  529. * @dev: pointer to the pci_dev data structure of MSI-X device function
  530. * @entries: pointer to an array of struct msix_entry entries
  531. * @nvec: number of @entries
  532. *
  533. * Setup the MSI-X capability structure of device function with a
  534. * single MSI-X irq. A return of zero indicates the successful setup of
  535. * requested MSI-X entries with allocated irqs or non-zero for otherwise.
  536. **/
  537. static int msix_capability_init(struct pci_dev *dev,
  538. struct msix_entry *entries, int nvec)
  539. {
  540. struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
  541. struct msi_msg msg;
  542. int status;
  543. int irq, pos, i, j, nr_entries, temp = 0;
  544. unsigned long phys_addr;
  545. u32 table_offset;
  546. u16 control;
  547. u8 bir;
  548. void __iomem *base;
  549. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  550. /* Request & Map MSI-X table region */
  551. pci_read_config_word(dev, msi_control_reg(pos), &control);
  552. nr_entries = multi_msix_capable(control);
  553. pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
  554. bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
  555. table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
  556. phys_addr = pci_resource_start (dev, bir) + table_offset;
  557. base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
  558. if (base == NULL)
  559. return -ENOMEM;
  560. /* MSI-X Table Initialization */
  561. for (i = 0; i < nvec; i++) {
  562. irq = create_msi_irq(&msi_chip);
  563. if (irq < 0)
  564. break;
  565. entry = get_irq_data(irq);
  566. j = entries[i].entry;
  567. entries[i].vector = irq;
  568. entry->msi_attrib.type = PCI_CAP_ID_MSIX;
  569. entry->msi_attrib.is_64 = 1;
  570. entry->msi_attrib.entry_nr = j;
  571. entry->msi_attrib.maskbit = 1;
  572. entry->msi_attrib.default_irq = dev->irq;
  573. entry->msi_attrib.pos = pos;
  574. entry->dev = dev;
  575. entry->mask_base = base;
  576. if (!head) {
  577. entry->link.head = irq;
  578. entry->link.tail = irq;
  579. head = entry;
  580. } else {
  581. entry->link.head = temp;
  582. entry->link.tail = tail->link.tail;
  583. tail->link.tail = irq;
  584. head->link.head = irq;
  585. }
  586. temp = irq;
  587. tail = entry;
  588. /* Configure MSI-X capability structure */
  589. status = msi_ops->setup(dev, irq, &msg);
  590. if (status < 0) {
  591. destroy_msi_irq(irq);
  592. break;
  593. }
  594. write_msi_msg(entry, &msg);
  595. attach_msi_entry(entry, irq);
  596. }
  597. if (i != nvec) {
  598. int avail = i - 1;
  599. i--;
  600. for (; i >= 0; i--) {
  601. irq = (entries + i)->vector;
  602. msi_free_irq(dev, irq);
  603. (entries + i)->vector = 0;
  604. }
  605. /* If we had some success report the number of irqs
  606. * we succeeded in setting up.
  607. */
  608. if (avail <= 0)
  609. avail = -EBUSY;
  610. return avail;
  611. }
  612. /* Set MSI-X enabled bits */
  613. enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
  614. return 0;
  615. }
  616. /**
  617. * pci_msi_supported - check whether MSI may be enabled on device
  618. * @dev: pointer to the pci_dev data structure of MSI device function
  619. *
  620. * MSI must be globally enabled and supported by the device and its root
  621. * bus. But, the root bus is not easy to find since some architectures
  622. * have virtual busses on top of the PCI hierarchy (for instance the
  623. * hypertransport bus), while the actual bus where MSI must be supported
  624. * is below. So we test the MSI flag on all parent busses and assume
  625. * that no quirk will ever set the NO_MSI flag on a non-root bus.
  626. **/
  627. static
  628. int pci_msi_supported(struct pci_dev * dev)
  629. {
  630. struct pci_bus *bus;
  631. if (!pci_msi_enable || !dev || dev->no_msi)
  632. return -EINVAL;
  633. /* check MSI flags of all parent busses */
  634. for (bus = dev->bus; bus; bus = bus->parent)
  635. if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
  636. return -EINVAL;
  637. return 0;
  638. }
  639. /**
  640. * pci_enable_msi - configure device's MSI capability structure
  641. * @dev: pointer to the pci_dev data structure of MSI device function
  642. *
  643. * Setup the MSI capability structure of device function with
  644. * a single MSI irq upon its software driver call to request for
  645. * MSI mode enabled on its hardware device function. A return of zero
  646. * indicates the successful setup of an entry zero with the new MSI
  647. * irq or non-zero for otherwise.
  648. **/
  649. int pci_enable_msi(struct pci_dev* dev)
  650. {
  651. int pos, temp, status;
  652. u16 control;
  653. if (pci_msi_supported(dev) < 0)
  654. return -EINVAL;
  655. temp = dev->irq;
  656. status = msi_init();
  657. if (status < 0)
  658. return status;
  659. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  660. if (!pos)
  661. return -EINVAL;
  662. pci_read_config_word(dev, msi_control_reg(pos), &control);
  663. if (!is_64bit_address(control) && msi_ops->needs_64bit_address)
  664. return -EINVAL;
  665. WARN_ON(!msi_lookup_irq(dev, PCI_CAP_ID_MSI));
  666. /* Check whether driver already requested for MSI-X irqs */
  667. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  668. if (pos > 0 && !msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) {
  669. printk(KERN_INFO "PCI: %s: Can't enable MSI. "
  670. "Device already has MSI-X irq assigned\n",
  671. pci_name(dev));
  672. dev->irq = temp;
  673. return -EINVAL;
  674. }
  675. status = msi_capability_init(dev);
  676. return status;
  677. }
  678. void pci_disable_msi(struct pci_dev* dev)
  679. {
  680. struct msi_desc *entry;
  681. int pos, default_irq;
  682. u16 control;
  683. unsigned long flags;
  684. if (!pci_msi_enable)
  685. return;
  686. if (!dev)
  687. return;
  688. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  689. if (!pos)
  690. return;
  691. pci_read_config_word(dev, msi_control_reg(pos), &control);
  692. if (!(control & PCI_MSI_FLAGS_ENABLE))
  693. return;
  694. disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
  695. spin_lock_irqsave(&msi_lock, flags);
  696. entry = msi_desc[dev->irq];
  697. if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
  698. spin_unlock_irqrestore(&msi_lock, flags);
  699. return;
  700. }
  701. if (irq_has_action(dev->irq)) {
  702. spin_unlock_irqrestore(&msi_lock, flags);
  703. printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without "
  704. "free_irq() on MSI irq %d\n",
  705. pci_name(dev), dev->irq);
  706. BUG_ON(irq_has_action(dev->irq));
  707. } else {
  708. default_irq = entry->msi_attrib.default_irq;
  709. spin_unlock_irqrestore(&msi_lock, flags);
  710. msi_free_irq(dev, dev->irq);
  711. /* Restore dev->irq to its default pin-assertion irq */
  712. dev->irq = default_irq;
  713. }
  714. }
  715. static int msi_free_irq(struct pci_dev* dev, int irq)
  716. {
  717. struct msi_desc *entry;
  718. int head, entry_nr, type;
  719. void __iomem *base;
  720. unsigned long flags;
  721. msi_ops->teardown(irq);
  722. spin_lock_irqsave(&msi_lock, flags);
  723. entry = msi_desc[irq];
  724. if (!entry || entry->dev != dev) {
  725. spin_unlock_irqrestore(&msi_lock, flags);
  726. return -EINVAL;
  727. }
  728. type = entry->msi_attrib.type;
  729. entry_nr = entry->msi_attrib.entry_nr;
  730. head = entry->link.head;
  731. base = entry->mask_base;
  732. msi_desc[entry->link.head]->link.tail = entry->link.tail;
  733. msi_desc[entry->link.tail]->link.head = entry->link.head;
  734. entry->dev = NULL;
  735. msi_desc[irq] = NULL;
  736. spin_unlock_irqrestore(&msi_lock, flags);
  737. destroy_msi_irq(irq);
  738. if (type == PCI_CAP_ID_MSIX) {
  739. writel(1, base + entry_nr * PCI_MSIX_ENTRY_SIZE +
  740. PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
  741. if (head == irq)
  742. iounmap(base);
  743. }
  744. return 0;
  745. }
  746. /**
  747. * pci_enable_msix - configure device's MSI-X capability structure
  748. * @dev: pointer to the pci_dev data structure of MSI-X device function
  749. * @entries: pointer to an array of MSI-X entries
  750. * @nvec: number of MSI-X irqs requested for allocation by device driver
  751. *
  752. * Setup the MSI-X capability structure of device function with the number
  753. * of requested irqs upon its software driver call to request for
  754. * MSI-X mode enabled on its hardware device function. A return of zero
  755. * indicates the successful configuration of MSI-X capability structure
  756. * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
  757. * Or a return of > 0 indicates that driver request is exceeding the number
  758. * of irqs available. Driver should use the returned value to re-send
  759. * its request.
  760. **/
  761. int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
  762. {
  763. int status, pos, nr_entries;
  764. int i, j, temp;
  765. u16 control;
  766. if (!entries || pci_msi_supported(dev) < 0)
  767. return -EINVAL;
  768. status = msi_init();
  769. if (status < 0)
  770. return status;
  771. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  772. if (!pos)
  773. return -EINVAL;
  774. pci_read_config_word(dev, msi_control_reg(pos), &control);
  775. nr_entries = multi_msix_capable(control);
  776. if (nvec > nr_entries)
  777. return -EINVAL;
  778. /* Check for any invalid entries */
  779. for (i = 0; i < nvec; i++) {
  780. if (entries[i].entry >= nr_entries)
  781. return -EINVAL; /* invalid entry */
  782. for (j = i + 1; j < nvec; j++) {
  783. if (entries[i].entry == entries[j].entry)
  784. return -EINVAL; /* duplicate entry */
  785. }
  786. }
  787. temp = dev->irq;
  788. WARN_ON(!msi_lookup_irq(dev, PCI_CAP_ID_MSIX));
  789. /* Check whether driver already requested for MSI irq */
  790. if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
  791. !msi_lookup_irq(dev, PCI_CAP_ID_MSI)) {
  792. printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
  793. "Device already has an MSI irq assigned\n",
  794. pci_name(dev));
  795. dev->irq = temp;
  796. return -EINVAL;
  797. }
  798. status = msix_capability_init(dev, entries, nvec);
  799. return status;
  800. }
  801. void pci_disable_msix(struct pci_dev* dev)
  802. {
  803. int pos, temp;
  804. u16 control;
  805. if (!pci_msi_enable)
  806. return;
  807. if (!dev)
  808. return;
  809. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  810. if (!pos)
  811. return;
  812. pci_read_config_word(dev, msi_control_reg(pos), &control);
  813. if (!(control & PCI_MSIX_FLAGS_ENABLE))
  814. return;
  815. disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
  816. temp = dev->irq;
  817. if (!msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) {
  818. int irq, head, tail = 0, warning = 0;
  819. unsigned long flags;
  820. irq = head = dev->irq;
  821. dev->irq = temp; /* Restore pin IRQ */
  822. while (head != tail) {
  823. spin_lock_irqsave(&msi_lock, flags);
  824. tail = msi_desc[irq]->link.tail;
  825. spin_unlock_irqrestore(&msi_lock, flags);
  826. if (irq_has_action(irq))
  827. warning = 1;
  828. else if (irq != head) /* Release MSI-X irq */
  829. msi_free_irq(dev, irq);
  830. irq = tail;
  831. }
  832. msi_free_irq(dev, irq);
  833. if (warning) {
  834. printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
  835. "free_irq() on all MSI-X irqs\n",
  836. pci_name(dev));
  837. BUG_ON(warning > 0);
  838. }
  839. }
  840. }
  841. /**
  842. * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
  843. * @dev: pointer to the pci_dev data structure of MSI(X) device function
  844. *
  845. * Being called during hotplug remove, from which the device function
  846. * is hot-removed. All previous assigned MSI/MSI-X irqs, if
  847. * allocated for this device function, are reclaimed to unused state,
  848. * which may be used later on.
  849. **/
  850. void msi_remove_pci_irq_vectors(struct pci_dev* dev)
  851. {
  852. int pos, temp;
  853. unsigned long flags;
  854. if (!pci_msi_enable || !dev)
  855. return;
  856. temp = dev->irq; /* Save IOAPIC IRQ */
  857. pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
  858. if (pos > 0 && !msi_lookup_irq(dev, PCI_CAP_ID_MSI)) {
  859. if (irq_has_action(dev->irq)) {
  860. printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
  861. "called without free_irq() on MSI irq %d\n",
  862. pci_name(dev), dev->irq);
  863. BUG_ON(irq_has_action(dev->irq));
  864. } else /* Release MSI irq assigned to this device */
  865. msi_free_irq(dev, dev->irq);
  866. dev->irq = temp; /* Restore IOAPIC IRQ */
  867. }
  868. pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
  869. if (pos > 0 && !msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) {
  870. int irq, head, tail = 0, warning = 0;
  871. void __iomem *base = NULL;
  872. irq = head = dev->irq;
  873. while (head != tail) {
  874. spin_lock_irqsave(&msi_lock, flags);
  875. tail = msi_desc[irq]->link.tail;
  876. base = msi_desc[irq]->mask_base;
  877. spin_unlock_irqrestore(&msi_lock, flags);
  878. if (irq_has_action(irq))
  879. warning = 1;
  880. else if (irq != head) /* Release MSI-X irq */
  881. msi_free_irq(dev, irq);
  882. irq = tail;
  883. }
  884. msi_free_irq(dev, irq);
  885. if (warning) {
  886. iounmap(base);
  887. printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
  888. "called without free_irq() on all MSI-X irqs\n",
  889. pci_name(dev));
  890. BUG_ON(warning > 0);
  891. }
  892. dev->irq = temp; /* Restore IOAPIC IRQ */
  893. }
  894. }
  895. void pci_no_msi(void)
  896. {
  897. pci_msi_enable = 0;
  898. }
  899. EXPORT_SYMBOL(pci_enable_msi);
  900. EXPORT_SYMBOL(pci_disable_msi);
  901. EXPORT_SYMBOL(pci_enable_msix);
  902. EXPORT_SYMBOL(pci_disable_msix);