intel_irq_remapping.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069
  1. #include <linux/interrupt.h>
  2. #include <linux/dmar.h>
  3. #include <linux/spinlock.h>
  4. #include <linux/slab.h>
  5. #include <linux/jiffies.h>
  6. #include <linux/hpet.h>
  7. #include <linux/pci.h>
  8. #include <linux/irq.h>
  9. #include <asm/io_apic.h>
  10. #include <asm/smp.h>
  11. #include <asm/cpu.h>
  12. #include <linux/intel-iommu.h>
  13. #include <acpi/acpi.h>
  14. #include <asm/irq_remapping.h>
  15. #include <asm/pci-direct.h>
  16. #include <asm/msidef.h>
  17. #include "irq_remapping.h"
  18. struct ioapic_scope {
  19. struct intel_iommu *iommu;
  20. unsigned int id;
  21. unsigned int bus; /* PCI bus number */
  22. unsigned int devfn; /* PCI devfn number */
  23. };
  24. struct hpet_scope {
  25. struct intel_iommu *iommu;
  26. u8 id;
  27. unsigned int bus;
  28. unsigned int devfn;
  29. };
  30. #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
  31. #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
  32. static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
  33. static struct hpet_scope ir_hpet[MAX_HPET_TBS];
  34. static int ir_ioapic_num, ir_hpet_num;
  35. static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
  36. static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
  37. {
  38. struct irq_cfg *cfg = irq_get_chip_data(irq);
  39. return cfg ? &cfg->irq_2_iommu : NULL;
  40. }
  41. int get_irte(int irq, struct irte *entry)
  42. {
  43. struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
  44. unsigned long flags;
  45. int index;
  46. if (!entry || !irq_iommu)
  47. return -1;
  48. raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
  49. index = irq_iommu->irte_index + irq_iommu->sub_handle;
  50. *entry = *(irq_iommu->iommu->ir_table->base + index);
  51. raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  52. return 0;
  53. }
  54. static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
  55. {
  56. struct ir_table *table = iommu->ir_table;
  57. struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
  58. u16 index, start_index;
  59. unsigned int mask = 0;
  60. unsigned long flags;
  61. int i;
  62. if (!count || !irq_iommu)
  63. return -1;
  64. /*
  65. * start the IRTE search from index 0.
  66. */
  67. index = start_index = 0;
  68. if (count > 1) {
  69. count = __roundup_pow_of_two(count);
  70. mask = ilog2(count);
  71. }
  72. if (mask > ecap_max_handle_mask(iommu->ecap)) {
  73. printk(KERN_ERR
  74. "Requested mask %x exceeds the max invalidation handle"
  75. " mask value %Lx\n", mask,
  76. ecap_max_handle_mask(iommu->ecap));
  77. return -1;
  78. }
  79. raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
  80. do {
  81. for (i = index; i < index + count; i++)
  82. if (table->base[i].present)
  83. break;
  84. /* empty index found */
  85. if (i == index + count)
  86. break;
  87. index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
  88. if (index == start_index) {
  89. raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  90. printk(KERN_ERR "can't allocate an IRTE\n");
  91. return -1;
  92. }
  93. } while (1);
  94. for (i = index; i < index + count; i++)
  95. table->base[i].present = 1;
  96. irq_iommu->iommu = iommu;
  97. irq_iommu->irte_index = index;
  98. irq_iommu->sub_handle = 0;
  99. irq_iommu->irte_mask = mask;
  100. raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  101. return index;
  102. }
  103. static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
  104. {
  105. struct qi_desc desc;
  106. desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
  107. | QI_IEC_SELECTIVE;
  108. desc.high = 0;
  109. return qi_submit_sync(&desc, iommu);
  110. }
  111. static int map_irq_to_irte_handle(int irq, u16 *sub_handle)
  112. {
  113. struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
  114. unsigned long flags;
  115. int index;
  116. if (!irq_iommu)
  117. return -1;
  118. raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
  119. *sub_handle = irq_iommu->sub_handle;
  120. index = irq_iommu->irte_index;
  121. raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  122. return index;
  123. }
  124. static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
  125. {
  126. struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
  127. unsigned long flags;
  128. if (!irq_iommu)
  129. return -1;
  130. raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
  131. irq_iommu->iommu = iommu;
  132. irq_iommu->irte_index = index;
  133. irq_iommu->sub_handle = subhandle;
  134. irq_iommu->irte_mask = 0;
  135. raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  136. return 0;
  137. }
  138. static int modify_irte(int irq, struct irte *irte_modified)
  139. {
  140. struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
  141. struct intel_iommu *iommu;
  142. unsigned long flags;
  143. struct irte *irte;
  144. int rc, index;
  145. if (!irq_iommu)
  146. return -1;
  147. raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
  148. iommu = irq_iommu->iommu;
  149. index = irq_iommu->irte_index + irq_iommu->sub_handle;
  150. irte = &iommu->ir_table->base[index];
  151. set_64bit(&irte->low, irte_modified->low);
  152. set_64bit(&irte->high, irte_modified->high);
  153. __iommu_flush_cache(iommu, irte, sizeof(*irte));
  154. rc = qi_flush_iec(iommu, index, 0);
  155. raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  156. return rc;
  157. }
  158. static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
  159. {
  160. int i;
  161. for (i = 0; i < MAX_HPET_TBS; i++)
  162. if (ir_hpet[i].id == hpet_id)
  163. return ir_hpet[i].iommu;
  164. return NULL;
  165. }
  166. static struct intel_iommu *map_ioapic_to_ir(int apic)
  167. {
  168. int i;
  169. for (i = 0; i < MAX_IO_APICS; i++)
  170. if (ir_ioapic[i].id == apic)
  171. return ir_ioapic[i].iommu;
  172. return NULL;
  173. }
  174. static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
  175. {
  176. struct dmar_drhd_unit *drhd;
  177. drhd = dmar_find_matched_drhd_unit(dev);
  178. if (!drhd)
  179. return NULL;
  180. return drhd->iommu;
  181. }
  182. static int clear_entries(struct irq_2_iommu *irq_iommu)
  183. {
  184. struct irte *start, *entry, *end;
  185. struct intel_iommu *iommu;
  186. int index;
  187. if (irq_iommu->sub_handle)
  188. return 0;
  189. iommu = irq_iommu->iommu;
  190. index = irq_iommu->irte_index + irq_iommu->sub_handle;
  191. start = iommu->ir_table->base + index;
  192. end = start + (1 << irq_iommu->irte_mask);
  193. for (entry = start; entry < end; entry++) {
  194. set_64bit(&entry->low, 0);
  195. set_64bit(&entry->high, 0);
  196. }
  197. return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
  198. }
  199. static int free_irte(int irq)
  200. {
  201. struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
  202. unsigned long flags;
  203. int rc;
  204. if (!irq_iommu)
  205. return -1;
  206. raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
  207. rc = clear_entries(irq_iommu);
  208. irq_iommu->iommu = NULL;
  209. irq_iommu->irte_index = 0;
  210. irq_iommu->sub_handle = 0;
  211. irq_iommu->irte_mask = 0;
  212. raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  213. return rc;
  214. }
  215. /*
  216. * source validation type
  217. */
  218. #define SVT_NO_VERIFY 0x0 /* no verification is required */
  219. #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */
  220. #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
  221. /*
  222. * source-id qualifier
  223. */
  224. #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
  225. #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
  226. * the third least significant bit
  227. */
  228. #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
  229. * the second and third least significant bits
  230. */
  231. #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
  232. * the least three significant bits
  233. */
  234. /*
  235. * set SVT, SQ and SID fields of irte to verify
  236. * source ids of interrupt requests
  237. */
  238. static void set_irte_sid(struct irte *irte, unsigned int svt,
  239. unsigned int sq, unsigned int sid)
  240. {
  241. if (disable_sourceid_checking)
  242. svt = SVT_NO_VERIFY;
  243. irte->svt = svt;
  244. irte->sq = sq;
  245. irte->sid = sid;
  246. }
  247. static int set_ioapic_sid(struct irte *irte, int apic)
  248. {
  249. int i;
  250. u16 sid = 0;
  251. if (!irte)
  252. return -1;
  253. for (i = 0; i < MAX_IO_APICS; i++) {
  254. if (ir_ioapic[i].id == apic) {
  255. sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
  256. break;
  257. }
  258. }
  259. if (sid == 0) {
  260. pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
  261. return -1;
  262. }
  263. set_irte_sid(irte, 1, 0, sid);
  264. return 0;
  265. }
  266. static int set_hpet_sid(struct irte *irte, u8 id)
  267. {
  268. int i;
  269. u16 sid = 0;
  270. if (!irte)
  271. return -1;
  272. for (i = 0; i < MAX_HPET_TBS; i++) {
  273. if (ir_hpet[i].id == id) {
  274. sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
  275. break;
  276. }
  277. }
  278. if (sid == 0) {
  279. pr_warning("Failed to set source-id of HPET block (%d)\n", id);
  280. return -1;
  281. }
  282. /*
  283. * Should really use SQ_ALL_16. Some platforms are broken.
  284. * While we figure out the right quirks for these broken platforms, use
  285. * SQ_13_IGNORE_3 for now.
  286. */
  287. set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
  288. return 0;
  289. }
  290. static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
  291. {
  292. struct pci_dev *bridge;
  293. if (!irte || !dev)
  294. return -1;
  295. /* PCIe device or Root Complex integrated PCI device */
  296. if (pci_is_pcie(dev) || !dev->bus->parent) {
  297. set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
  298. (dev->bus->number << 8) | dev->devfn);
  299. return 0;
  300. }
  301. bridge = pci_find_upstream_pcie_bridge(dev);
  302. if (bridge) {
  303. if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
  304. set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
  305. (bridge->bus->number << 8) | dev->bus->number);
  306. else /* this is a legacy PCI bridge */
  307. set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
  308. (bridge->bus->number << 8) | bridge->devfn);
  309. }
  310. return 0;
  311. }
  312. static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
  313. {
  314. u64 addr;
  315. u32 sts;
  316. unsigned long flags;
  317. addr = virt_to_phys((void *)iommu->ir_table->base);
  318. raw_spin_lock_irqsave(&iommu->register_lock, flags);
  319. dmar_writeq(iommu->reg + DMAR_IRTA_REG,
  320. (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
  321. /* Set interrupt-remapping table pointer */
  322. iommu->gcmd |= DMA_GCMD_SIRTP;
  323. writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
  324. IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  325. readl, (sts & DMA_GSTS_IRTPS), sts);
  326. raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
  327. /*
  328. * global invalidation of interrupt entry cache before enabling
  329. * interrupt-remapping.
  330. */
  331. qi_global_iec(iommu);
  332. raw_spin_lock_irqsave(&iommu->register_lock, flags);
  333. /* Enable interrupt-remapping */
  334. iommu->gcmd |= DMA_GCMD_IRE;
  335. writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
  336. IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  337. readl, (sts & DMA_GSTS_IRES), sts);
  338. raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
  339. }
  340. static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
  341. {
  342. struct ir_table *ir_table;
  343. struct page *pages;
  344. ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
  345. GFP_ATOMIC);
  346. if (!iommu->ir_table)
  347. return -ENOMEM;
  348. pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
  349. INTR_REMAP_PAGE_ORDER);
  350. if (!pages) {
  351. printk(KERN_ERR "failed to allocate pages of order %d\n",
  352. INTR_REMAP_PAGE_ORDER);
  353. kfree(iommu->ir_table);
  354. return -ENOMEM;
  355. }
  356. ir_table->base = page_address(pages);
  357. iommu_set_irq_remapping(iommu, mode);
  358. return 0;
  359. }
  360. /*
  361. * Disable Interrupt Remapping.
  362. */
  363. static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
  364. {
  365. unsigned long flags;
  366. u32 sts;
  367. if (!ecap_ir_support(iommu->ecap))
  368. return;
  369. /*
  370. * global invalidation of interrupt entry cache before disabling
  371. * interrupt-remapping.
  372. */
  373. qi_global_iec(iommu);
  374. raw_spin_lock_irqsave(&iommu->register_lock, flags);
  375. sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
  376. if (!(sts & DMA_GSTS_IRES))
  377. goto end;
  378. iommu->gcmd &= ~DMA_GCMD_IRE;
  379. writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
  380. IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  381. readl, !(sts & DMA_GSTS_IRES), sts);
  382. end:
  383. raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
  384. }
  385. static int __init dmar_x2apic_optout(void)
  386. {
  387. struct acpi_table_dmar *dmar;
  388. dmar = (struct acpi_table_dmar *)dmar_tbl;
  389. if (!dmar || no_x2apic_optout)
  390. return 0;
  391. return dmar->flags & DMAR_X2APIC_OPT_OUT;
  392. }
  393. static int __init intel_irq_remapping_supported(void)
  394. {
  395. struct dmar_drhd_unit *drhd;
  396. if (disable_irq_remap)
  397. return 0;
  398. if (!dmar_ir_support())
  399. return 0;
  400. for_each_drhd_unit(drhd) {
  401. struct intel_iommu *iommu = drhd->iommu;
  402. if (!ecap_ir_support(iommu->ecap))
  403. return 0;
  404. }
  405. return 1;
  406. }
  407. static int __init intel_enable_irq_remapping(void)
  408. {
  409. struct dmar_drhd_unit *drhd;
  410. int setup = 0;
  411. int eim = 0;
  412. if (parse_ioapics_under_ir() != 1) {
  413. printk(KERN_INFO "Not enable interrupt remapping\n");
  414. return -1;
  415. }
  416. if (x2apic_supported()) {
  417. eim = !dmar_x2apic_optout();
  418. WARN(!eim, KERN_WARNING
  419. "Your BIOS is broken and requested that x2apic be disabled\n"
  420. "This will leave your machine vulnerable to irq-injection attacks\n"
  421. "Use 'intremap=no_x2apic_optout' to override BIOS request\n");
  422. }
  423. for_each_drhd_unit(drhd) {
  424. struct intel_iommu *iommu = drhd->iommu;
  425. /*
  426. * If the queued invalidation is already initialized,
  427. * shouldn't disable it.
  428. */
  429. if (iommu->qi)
  430. continue;
  431. /*
  432. * Clear previous faults.
  433. */
  434. dmar_fault(-1, iommu);
  435. /*
  436. * Disable intr remapping and queued invalidation, if already
  437. * enabled prior to OS handover.
  438. */
  439. iommu_disable_irq_remapping(iommu);
  440. dmar_disable_qi(iommu);
  441. }
  442. /*
  443. * check for the Interrupt-remapping support
  444. */
  445. for_each_drhd_unit(drhd) {
  446. struct intel_iommu *iommu = drhd->iommu;
  447. if (!ecap_ir_support(iommu->ecap))
  448. continue;
  449. if (eim && !ecap_eim_support(iommu->ecap)) {
  450. printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
  451. " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
  452. return -1;
  453. }
  454. }
  455. /*
  456. * Enable queued invalidation for all the DRHD's.
  457. */
  458. for_each_drhd_unit(drhd) {
  459. int ret;
  460. struct intel_iommu *iommu = drhd->iommu;
  461. ret = dmar_enable_qi(iommu);
  462. if (ret) {
  463. printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
  464. " invalidation, ecap %Lx, ret %d\n",
  465. drhd->reg_base_addr, iommu->ecap, ret);
  466. return -1;
  467. }
  468. }
  469. /*
  470. * Setup Interrupt-remapping for all the DRHD's now.
  471. */
  472. for_each_drhd_unit(drhd) {
  473. struct intel_iommu *iommu = drhd->iommu;
  474. if (!ecap_ir_support(iommu->ecap))
  475. continue;
  476. if (intel_setup_irq_remapping(iommu, eim))
  477. goto error;
  478. setup = 1;
  479. }
  480. if (!setup)
  481. goto error;
  482. irq_remapping_enabled = 1;
  483. pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic");
  484. return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
  485. error:
  486. /*
  487. * handle error condition gracefully here!
  488. */
  489. return -1;
  490. }
  491. static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
  492. struct intel_iommu *iommu)
  493. {
  494. struct acpi_dmar_pci_path *path;
  495. u8 bus;
  496. int count;
  497. bus = scope->bus;
  498. path = (struct acpi_dmar_pci_path *)(scope + 1);
  499. count = (scope->length - sizeof(struct acpi_dmar_device_scope))
  500. / sizeof(struct acpi_dmar_pci_path);
  501. while (--count > 0) {
  502. /*
  503. * Access PCI directly due to the PCI
  504. * subsystem isn't initialized yet.
  505. */
  506. bus = read_pci_config_byte(bus, path->dev, path->fn,
  507. PCI_SECONDARY_BUS);
  508. path++;
  509. }
  510. ir_hpet[ir_hpet_num].bus = bus;
  511. ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
  512. ir_hpet[ir_hpet_num].iommu = iommu;
  513. ir_hpet[ir_hpet_num].id = scope->enumeration_id;
  514. ir_hpet_num++;
  515. }
  516. static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
  517. struct intel_iommu *iommu)
  518. {
  519. struct acpi_dmar_pci_path *path;
  520. u8 bus;
  521. int count;
  522. bus = scope->bus;
  523. path = (struct acpi_dmar_pci_path *)(scope + 1);
  524. count = (scope->length - sizeof(struct acpi_dmar_device_scope))
  525. / sizeof(struct acpi_dmar_pci_path);
  526. while (--count > 0) {
  527. /*
  528. * Access PCI directly due to the PCI
  529. * subsystem isn't initialized yet.
  530. */
  531. bus = read_pci_config_byte(bus, path->dev, path->fn,
  532. PCI_SECONDARY_BUS);
  533. path++;
  534. }
  535. ir_ioapic[ir_ioapic_num].bus = bus;
  536. ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
  537. ir_ioapic[ir_ioapic_num].iommu = iommu;
  538. ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
  539. ir_ioapic_num++;
  540. }
  541. static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
  542. struct intel_iommu *iommu)
  543. {
  544. struct acpi_dmar_hardware_unit *drhd;
  545. struct acpi_dmar_device_scope *scope;
  546. void *start, *end;
  547. drhd = (struct acpi_dmar_hardware_unit *)header;
  548. start = (void *)(drhd + 1);
  549. end = ((void *)drhd) + header->length;
  550. while (start < end) {
  551. scope = start;
  552. if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
  553. if (ir_ioapic_num == MAX_IO_APICS) {
  554. printk(KERN_WARNING "Exceeded Max IO APICS\n");
  555. return -1;
  556. }
  557. printk(KERN_INFO "IOAPIC id %d under DRHD base "
  558. " 0x%Lx IOMMU %d\n", scope->enumeration_id,
  559. drhd->address, iommu->seq_id);
  560. ir_parse_one_ioapic_scope(scope, iommu);
  561. } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
  562. if (ir_hpet_num == MAX_HPET_TBS) {
  563. printk(KERN_WARNING "Exceeded Max HPET blocks\n");
  564. return -1;
  565. }
  566. printk(KERN_INFO "HPET id %d under DRHD base"
  567. " 0x%Lx\n", scope->enumeration_id,
  568. drhd->address);
  569. ir_parse_one_hpet_scope(scope, iommu);
  570. }
  571. start += scope->length;
  572. }
  573. return 0;
  574. }
  575. /*
  576. * Finds the assocaition between IOAPIC's and its Interrupt-remapping
  577. * hardware unit.
  578. */
  579. int __init parse_ioapics_under_ir(void)
  580. {
  581. struct dmar_drhd_unit *drhd;
  582. int ir_supported = 0;
  583. for_each_drhd_unit(drhd) {
  584. struct intel_iommu *iommu = drhd->iommu;
  585. if (ecap_ir_support(iommu->ecap)) {
  586. if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
  587. return -1;
  588. ir_supported = 1;
  589. }
  590. }
  591. if (ir_supported && ir_ioapic_num != nr_ioapics) {
  592. printk(KERN_WARNING
  593. "Not all IO-APIC's listed under remapping hardware\n");
  594. return -1;
  595. }
  596. return ir_supported;
  597. }
  598. int __init ir_dev_scope_init(void)
  599. {
  600. if (!irq_remapping_enabled)
  601. return 0;
  602. return dmar_dev_scope_init();
  603. }
  604. rootfs_initcall(ir_dev_scope_init);
  605. static void disable_irq_remapping(void)
  606. {
  607. struct dmar_drhd_unit *drhd;
  608. struct intel_iommu *iommu = NULL;
  609. /*
  610. * Disable Interrupt-remapping for all the DRHD's now.
  611. */
  612. for_each_iommu(iommu, drhd) {
  613. if (!ecap_ir_support(iommu->ecap))
  614. continue;
  615. iommu_disable_irq_remapping(iommu);
  616. }
  617. }
  618. static int reenable_irq_remapping(int eim)
  619. {
  620. struct dmar_drhd_unit *drhd;
  621. int setup = 0;
  622. struct intel_iommu *iommu = NULL;
  623. for_each_iommu(iommu, drhd)
  624. if (iommu->qi)
  625. dmar_reenable_qi(iommu);
  626. /*
  627. * Setup Interrupt-remapping for all the DRHD's now.
  628. */
  629. for_each_iommu(iommu, drhd) {
  630. if (!ecap_ir_support(iommu->ecap))
  631. continue;
  632. /* Set up interrupt remapping for iommu.*/
  633. iommu_set_irq_remapping(iommu, eim);
  634. setup = 1;
  635. }
  636. if (!setup)
  637. goto error;
  638. return 0;
  639. error:
  640. /*
  641. * handle error condition gracefully here!
  642. */
  643. return -1;
  644. }
  645. static void prepare_irte(struct irte *irte, int vector,
  646. unsigned int dest)
  647. {
  648. memset(irte, 0, sizeof(*irte));
  649. irte->present = 1;
  650. irte->dst_mode = apic->irq_dest_mode;
  651. /*
  652. * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
  653. * actual level or edge trigger will be setup in the IO-APIC
  654. * RTE. This will help simplify level triggered irq migration.
  655. * For more details, see the comments (in io_apic.c) explainig IO-APIC
  656. * irq migration in the presence of interrupt-remapping.
  657. */
  658. irte->trigger_mode = 0;
  659. irte->dlvry_mode = apic->irq_delivery_mode;
  660. irte->vector = vector;
  661. irte->dest_id = IRTE_DEST(dest);
  662. irte->redir_hint = 1;
  663. }
  664. static int intel_setup_ioapic_entry(int irq,
  665. struct IO_APIC_route_entry *route_entry,
  666. unsigned int destination, int vector,
  667. struct io_apic_irq_attr *attr)
  668. {
  669. int ioapic_id = mpc_ioapic_id(attr->ioapic);
  670. struct intel_iommu *iommu = map_ioapic_to_ir(ioapic_id);
  671. struct IR_IO_APIC_route_entry *entry;
  672. struct irte irte;
  673. int index;
  674. if (!iommu) {
  675. pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
  676. return -ENODEV;
  677. }
  678. entry = (struct IR_IO_APIC_route_entry *)route_entry;
  679. index = alloc_irte(iommu, irq, 1);
  680. if (index < 0) {
  681. pr_warn("Failed to allocate IRTE for ioapic %d\n", ioapic_id);
  682. return -ENOMEM;
  683. }
  684. prepare_irte(&irte, vector, destination);
  685. /* Set source-id of interrupt request */
  686. set_ioapic_sid(&irte, ioapic_id);
  687. modify_irte(irq, &irte);
  688. apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
  689. "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
  690. "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X "
  691. "Avail:%X Vector:%02X Dest:%08X "
  692. "SID:%04X SQ:%X SVT:%X)\n",
  693. attr->ioapic, irte.present, irte.fpd, irte.dst_mode,
  694. irte.redir_hint, irte.trigger_mode, irte.dlvry_mode,
  695. irte.avail, irte.vector, irte.dest_id,
  696. irte.sid, irte.sq, irte.svt);
  697. memset(entry, 0, sizeof(*entry));
  698. entry->index2 = (index >> 15) & 0x1;
  699. entry->zero = 0;
  700. entry->format = 1;
  701. entry->index = (index & 0x7fff);
  702. /*
  703. * IO-APIC RTE will be configured with virtual vector.
  704. * irq handler will do the explicit EOI to the io-apic.
  705. */
  706. entry->vector = attr->ioapic_pin;
  707. entry->mask = 0; /* enable IRQ */
  708. entry->trigger = attr->trigger;
  709. entry->polarity = attr->polarity;
  710. /* Mask level triggered irqs.
  711. * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
  712. */
  713. if (attr->trigger)
  714. entry->mask = 1;
  715. return 0;
  716. }
  717. #ifdef CONFIG_SMP
  718. /*
  719. * Migrate the IO-APIC irq in the presence of intr-remapping.
  720. *
  721. * For both level and edge triggered, irq migration is a simple atomic
  722. * update(of vector and cpu destination) of IRTE and flush the hardware cache.
  723. *
  724. * For level triggered, we eliminate the io-apic RTE modification (with the
  725. * updated vector information), by using a virtual vector (io-apic pin number).
  726. * Real vector that is used for interrupting cpu will be coming from
  727. * the interrupt-remapping table entry.
  728. *
  729. * As the migration is a simple atomic update of IRTE, the same mechanism
  730. * is used to migrate MSI irq's in the presence of interrupt-remapping.
  731. */
  732. static int
  733. intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
  734. bool force)
  735. {
  736. struct irq_cfg *cfg = data->chip_data;
  737. unsigned int dest, irq = data->irq;
  738. struct irte irte;
  739. if (!cpumask_intersects(mask, cpu_online_mask))
  740. return -EINVAL;
  741. if (get_irte(irq, &irte))
  742. return -EBUSY;
  743. if (assign_irq_vector(irq, cfg, mask))
  744. return -EBUSY;
  745. dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
  746. irte.vector = cfg->vector;
  747. irte.dest_id = IRTE_DEST(dest);
  748. /*
  749. * Atomically updates the IRTE with the new destination, vector
  750. * and flushes the interrupt entry cache.
  751. */
  752. modify_irte(irq, &irte);
  753. /*
  754. * After this point, all the interrupts will start arriving
  755. * at the new destination. So, time to cleanup the previous
  756. * vector allocation.
  757. */
  758. if (cfg->move_in_progress)
  759. send_cleanup_vector(cfg);
  760. cpumask_copy(data->affinity, mask);
  761. return 0;
  762. }
  763. #endif
  764. static void intel_compose_msi_msg(struct pci_dev *pdev,
  765. unsigned int irq, unsigned int dest,
  766. struct msi_msg *msg, u8 hpet_id)
  767. {
  768. struct irq_cfg *cfg;
  769. struct irte irte;
  770. u16 sub_handle = 0;
  771. int ir_index;
  772. cfg = irq_get_chip_data(irq);
  773. ir_index = map_irq_to_irte_handle(irq, &sub_handle);
  774. BUG_ON(ir_index == -1);
  775. prepare_irte(&irte, cfg->vector, dest);
  776. /* Set source-id of interrupt request */
  777. if (pdev)
  778. set_msi_sid(&irte, pdev);
  779. else
  780. set_hpet_sid(&irte, hpet_id);
  781. modify_irte(irq, &irte);
  782. msg->address_hi = MSI_ADDR_BASE_HI;
  783. msg->data = sub_handle;
  784. msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
  785. MSI_ADDR_IR_SHV |
  786. MSI_ADDR_IR_INDEX1(ir_index) |
  787. MSI_ADDR_IR_INDEX2(ir_index);
  788. }
  789. /*
  790. * Map the PCI dev to the corresponding remapping hardware unit
  791. * and allocate 'nvec' consecutive interrupt-remapping table entries
  792. * in it.
  793. */
  794. static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec)
  795. {
  796. struct intel_iommu *iommu;
  797. int index;
  798. iommu = map_dev_to_ir(dev);
  799. if (!iommu) {
  800. printk(KERN_ERR
  801. "Unable to map PCI %s to iommu\n", pci_name(dev));
  802. return -ENOENT;
  803. }
  804. index = alloc_irte(iommu, irq, nvec);
  805. if (index < 0) {
  806. printk(KERN_ERR
  807. "Unable to allocate %d IRTE for PCI %s\n", nvec,
  808. pci_name(dev));
  809. return -ENOSPC;
  810. }
  811. return index;
  812. }
  813. static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
  814. int index, int sub_handle)
  815. {
  816. struct intel_iommu *iommu;
  817. iommu = map_dev_to_ir(pdev);
  818. if (!iommu)
  819. return -ENOENT;
  820. /*
  821. * setup the mapping between the irq and the IRTE
  822. * base index, the sub_handle pointing to the
  823. * appropriate interrupt remap table entry.
  824. */
  825. set_irte_irq(irq, iommu, index, sub_handle);
  826. return 0;
  827. }
  828. static int intel_setup_hpet_msi(unsigned int irq, unsigned int id)
  829. {
  830. struct intel_iommu *iommu = map_hpet_to_ir(id);
  831. int index;
  832. if (!iommu)
  833. return -1;
  834. index = alloc_irte(iommu, irq, 1);
  835. if (index < 0)
  836. return -1;
  837. return 0;
  838. }
  839. struct irq_remap_ops intel_irq_remap_ops = {
  840. .supported = intel_irq_remapping_supported,
  841. .prepare = dmar_table_init,
  842. .enable = intel_enable_irq_remapping,
  843. .disable = disable_irq_remapping,
  844. .reenable = reenable_irq_remapping,
  845. .enable_faulting = enable_drhd_fault_handling,
  846. .setup_ioapic_entry = intel_setup_ioapic_entry,
  847. #ifdef CONFIG_SMP
  848. .set_affinity = intel_ioapic_set_affinity,
  849. #endif
  850. .free_irq = free_irte,
  851. .compose_msi_msg = intel_compose_msi_msg,
  852. .msi_alloc_irq = intel_msi_alloc_irq,
  853. .msi_setup_irq = intel_msi_setup_irq,
  854. .setup_hpet_msi = intel_setup_hpet_msi,
  855. };