intr_remapping.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947
  1. #include <linux/interrupt.h>
  2. #include <linux/dmar.h>
  3. #include <linux/spinlock.h>
  4. #include <linux/slab.h>
  5. #include <linux/jiffies.h>
  6. #include <linux/hpet.h>
  7. #include <linux/pci.h>
  8. #include <linux/irq.h>
  9. #include <asm/io_apic.h>
  10. #include <asm/smp.h>
  11. #include <asm/cpu.h>
  12. #include <linux/intel-iommu.h>
  13. #include "intr_remapping.h"
  14. #include <acpi/acpi.h>
  15. #include <asm/pci-direct.h>
  16. #include "pci.h"
  17. static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
  18. static struct hpet_scope ir_hpet[MAX_HPET_TBS];
  19. static int ir_ioapic_num, ir_hpet_num;
  20. int intr_remapping_enabled;
  21. static int disable_intremap;
  22. static int disable_sourceid_checking;
  23. static __init int setup_nointremap(char *str)
  24. {
  25. disable_intremap = 1;
  26. return 0;
  27. }
  28. early_param("nointremap", setup_nointremap);
  29. static __init int setup_intremap(char *str)
  30. {
  31. if (!str)
  32. return -EINVAL;
  33. if (!strncmp(str, "on", 2))
  34. disable_intremap = 0;
  35. else if (!strncmp(str, "off", 3))
  36. disable_intremap = 1;
  37. else if (!strncmp(str, "nosid", 5))
  38. disable_sourceid_checking = 1;
  39. return 0;
  40. }
  41. early_param("intremap", setup_intremap);
  42. struct irq_2_iommu {
  43. struct intel_iommu *iommu;
  44. u16 irte_index;
  45. u16 sub_handle;
  46. u8 irte_mask;
  47. };
  48. #ifdef CONFIG_GENERIC_HARDIRQS
  49. static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
  50. {
  51. return get_irq_iommu(irq);
  52. }
  53. static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
  54. {
  55. struct irq_data *data = irq_get_irq_data(irq);
  56. if (WARN_ONCE(data->irq_2_iommu,
  57. KERN_DEBUG "irq_2_iommu!=NULL irq %u\n", irq))
  58. return data->irq_2_iommu;
  59. data->irq_2_iommu = kzalloc_node(sizeof(*data->irq_2_iommu),
  60. GFP_ATOMIC, data->node);
  61. return data->irq_2_iommu;
  62. }
  63. static void irq_2_iommu_free(unsigned int irq)
  64. {
  65. struct irq_data *d = irq_get_irq_data(irq);
  66. struct irq_2_iommu *p = d->irq_2_iommu;
  67. d->irq_2_iommu = NULL;
  68. kfree(p);
  69. }
  70. #else /* !CONFIG_SPARSE_IRQ */
  71. static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
  72. static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
  73. {
  74. if (irq < nr_irqs)
  75. return &irq_2_iommuX[irq];
  76. return NULL;
  77. }
  78. static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
  79. {
  80. return irq_2_iommu(irq);
  81. }
  82. static void irq_2_iommu_free(unsigned int irq) { }
  83. #endif
  84. static DEFINE_SPINLOCK(irq_2_ir_lock);
  85. static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
  86. {
  87. struct irq_2_iommu *irq_iommu;
  88. irq_iommu = irq_2_iommu(irq);
  89. if (!irq_iommu)
  90. return NULL;
  91. if (!irq_iommu->iommu)
  92. return NULL;
  93. return irq_iommu;
  94. }
  95. int irq_remapped(int irq)
  96. {
  97. return valid_irq_2_iommu(irq) != NULL;
  98. }
  99. int get_irte(int irq, struct irte *entry)
  100. {
  101. int index;
  102. struct irq_2_iommu *irq_iommu;
  103. unsigned long flags;
  104. if (!entry)
  105. return -1;
  106. spin_lock_irqsave(&irq_2_ir_lock, flags);
  107. irq_iommu = valid_irq_2_iommu(irq);
  108. if (!irq_iommu) {
  109. spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  110. return -1;
  111. }
  112. index = irq_iommu->irte_index + irq_iommu->sub_handle;
  113. *entry = *(irq_iommu->iommu->ir_table->base + index);
  114. spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  115. return 0;
  116. }
  117. int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
  118. {
  119. struct ir_table *table = iommu->ir_table;
  120. struct irq_2_iommu *irq_iommu;
  121. u16 index, start_index;
  122. unsigned int mask = 0;
  123. unsigned long flags;
  124. int i;
  125. if (!count)
  126. return -1;
  127. #ifndef CONFIG_SPARSE_IRQ
  128. /* protect irq_2_iommu_alloc later */
  129. if (irq >= nr_irqs)
  130. return -1;
  131. #endif
  132. /*
  133. * start the IRTE search from index 0.
  134. */
  135. index = start_index = 0;
  136. if (count > 1) {
  137. count = __roundup_pow_of_two(count);
  138. mask = ilog2(count);
  139. }
  140. if (mask > ecap_max_handle_mask(iommu->ecap)) {
  141. printk(KERN_ERR
  142. "Requested mask %x exceeds the max invalidation handle"
  143. " mask value %Lx\n", mask,
  144. ecap_max_handle_mask(iommu->ecap));
  145. return -1;
  146. }
  147. spin_lock_irqsave(&irq_2_ir_lock, flags);
  148. do {
  149. for (i = index; i < index + count; i++)
  150. if (table->base[i].present)
  151. break;
  152. /* empty index found */
  153. if (i == index + count)
  154. break;
  155. index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
  156. if (index == start_index) {
  157. spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  158. printk(KERN_ERR "can't allocate an IRTE\n");
  159. return -1;
  160. }
  161. } while (1);
  162. for (i = index; i < index + count; i++)
  163. table->base[i].present = 1;
  164. irq_iommu = irq_2_iommu_alloc(irq);
  165. if (!irq_iommu) {
  166. spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  167. printk(KERN_ERR "can't allocate irq_2_iommu\n");
  168. return -1;
  169. }
  170. irq_iommu->iommu = iommu;
  171. irq_iommu->irte_index = index;
  172. irq_iommu->sub_handle = 0;
  173. irq_iommu->irte_mask = mask;
  174. spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  175. return index;
  176. }
  177. static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
  178. {
  179. struct qi_desc desc;
  180. desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
  181. | QI_IEC_SELECTIVE;
  182. desc.high = 0;
  183. return qi_submit_sync(&desc, iommu);
  184. }
  185. int map_irq_to_irte_handle(int irq, u16 *sub_handle)
  186. {
  187. int index;
  188. struct irq_2_iommu *irq_iommu;
  189. unsigned long flags;
  190. spin_lock_irqsave(&irq_2_ir_lock, flags);
  191. irq_iommu = valid_irq_2_iommu(irq);
  192. if (!irq_iommu) {
  193. spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  194. return -1;
  195. }
  196. *sub_handle = irq_iommu->sub_handle;
  197. index = irq_iommu->irte_index;
  198. spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  199. return index;
  200. }
  201. int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
  202. {
  203. struct irq_2_iommu *irq_iommu;
  204. unsigned long flags;
  205. spin_lock_irqsave(&irq_2_ir_lock, flags);
  206. irq_iommu = irq_2_iommu_alloc(irq);
  207. if (!irq_iommu) {
  208. spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  209. printk(KERN_ERR "can't allocate irq_2_iommu\n");
  210. return -1;
  211. }
  212. irq_iommu->iommu = iommu;
  213. irq_iommu->irte_index = index;
  214. irq_iommu->sub_handle = subhandle;
  215. irq_iommu->irte_mask = 0;
  216. spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  217. return 0;
  218. }
  219. int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
  220. {
  221. struct irq_2_iommu *irq_iommu;
  222. unsigned long flags;
  223. spin_lock_irqsave(&irq_2_ir_lock, flags);
  224. irq_iommu = valid_irq_2_iommu(irq);
  225. if (!irq_iommu) {
  226. spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  227. return -1;
  228. }
  229. irq_iommu->iommu = NULL;
  230. irq_iommu->irte_index = 0;
  231. irq_iommu->sub_handle = 0;
  232. irq_2_iommu(irq)->irte_mask = 0;
  233. spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  234. return 0;
  235. }
  236. int modify_irte(int irq, struct irte *irte_modified)
  237. {
  238. int rc;
  239. int index;
  240. struct irte *irte;
  241. struct intel_iommu *iommu;
  242. struct irq_2_iommu *irq_iommu;
  243. unsigned long flags;
  244. spin_lock_irqsave(&irq_2_ir_lock, flags);
  245. irq_iommu = valid_irq_2_iommu(irq);
  246. if (!irq_iommu) {
  247. spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  248. return -1;
  249. }
  250. iommu = irq_iommu->iommu;
  251. index = irq_iommu->irte_index + irq_iommu->sub_handle;
  252. irte = &iommu->ir_table->base[index];
  253. set_64bit(&irte->low, irte_modified->low);
  254. set_64bit(&irte->high, irte_modified->high);
  255. __iommu_flush_cache(iommu, irte, sizeof(*irte));
  256. rc = qi_flush_iec(iommu, index, 0);
  257. spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  258. return rc;
  259. }
  260. int flush_irte(int irq)
  261. {
  262. int rc;
  263. int index;
  264. struct intel_iommu *iommu;
  265. struct irq_2_iommu *irq_iommu;
  266. unsigned long flags;
  267. spin_lock_irqsave(&irq_2_ir_lock, flags);
  268. irq_iommu = valid_irq_2_iommu(irq);
  269. if (!irq_iommu) {
  270. spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  271. return -1;
  272. }
  273. iommu = irq_iommu->iommu;
  274. index = irq_iommu->irte_index + irq_iommu->sub_handle;
  275. rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
  276. spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  277. return rc;
  278. }
  279. struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
  280. {
  281. int i;
  282. for (i = 0; i < MAX_HPET_TBS; i++)
  283. if (ir_hpet[i].id == hpet_id)
  284. return ir_hpet[i].iommu;
  285. return NULL;
  286. }
  287. struct intel_iommu *map_ioapic_to_ir(int apic)
  288. {
  289. int i;
  290. for (i = 0; i < MAX_IO_APICS; i++)
  291. if (ir_ioapic[i].id == apic)
  292. return ir_ioapic[i].iommu;
  293. return NULL;
  294. }
  295. struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
  296. {
  297. struct dmar_drhd_unit *drhd;
  298. drhd = dmar_find_matched_drhd_unit(dev);
  299. if (!drhd)
  300. return NULL;
  301. return drhd->iommu;
  302. }
  303. static int clear_entries(struct irq_2_iommu *irq_iommu)
  304. {
  305. struct irte *start, *entry, *end;
  306. struct intel_iommu *iommu;
  307. int index;
  308. if (irq_iommu->sub_handle)
  309. return 0;
  310. iommu = irq_iommu->iommu;
  311. index = irq_iommu->irte_index + irq_iommu->sub_handle;
  312. start = iommu->ir_table->base + index;
  313. end = start + (1 << irq_iommu->irte_mask);
  314. for (entry = start; entry < end; entry++) {
  315. set_64bit(&entry->low, 0);
  316. set_64bit(&entry->high, 0);
  317. }
  318. return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
  319. }
  320. int free_irte(int irq)
  321. {
  322. int rc = 0;
  323. struct irq_2_iommu *irq_iommu;
  324. unsigned long flags;
  325. spin_lock_irqsave(&irq_2_ir_lock, flags);
  326. irq_iommu = valid_irq_2_iommu(irq);
  327. if (!irq_iommu) {
  328. spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  329. return -1;
  330. }
  331. rc = clear_entries(irq_iommu);
  332. irq_iommu->iommu = NULL;
  333. irq_iommu->irte_index = 0;
  334. irq_iommu->sub_handle = 0;
  335. irq_iommu->irte_mask = 0;
  336. spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  337. irq_2_iommu_free(irq);
  338. return rc;
  339. }
  340. /*
  341. * source validation type
  342. */
  343. #define SVT_NO_VERIFY 0x0 /* no verification is required */
  344. #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */
  345. #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
  346. /*
  347. * source-id qualifier
  348. */
  349. #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
  350. #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
  351. * the third least significant bit
  352. */
  353. #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
  354. * the second and third least significant bits
  355. */
  356. #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
  357. * the least three significant bits
  358. */
  359. /*
  360. * set SVT, SQ and SID fields of irte to verify
  361. * source ids of interrupt requests
  362. */
  363. static void set_irte_sid(struct irte *irte, unsigned int svt,
  364. unsigned int sq, unsigned int sid)
  365. {
  366. if (disable_sourceid_checking)
  367. svt = SVT_NO_VERIFY;
  368. irte->svt = svt;
  369. irte->sq = sq;
  370. irte->sid = sid;
  371. }
  372. int set_ioapic_sid(struct irte *irte, int apic)
  373. {
  374. int i;
  375. u16 sid = 0;
  376. if (!irte)
  377. return -1;
  378. for (i = 0; i < MAX_IO_APICS; i++) {
  379. if (ir_ioapic[i].id == apic) {
  380. sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
  381. break;
  382. }
  383. }
  384. if (sid == 0) {
  385. pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
  386. return -1;
  387. }
  388. set_irte_sid(irte, 1, 0, sid);
  389. return 0;
  390. }
  391. int set_hpet_sid(struct irte *irte, u8 id)
  392. {
  393. int i;
  394. u16 sid = 0;
  395. if (!irte)
  396. return -1;
  397. for (i = 0; i < MAX_HPET_TBS; i++) {
  398. if (ir_hpet[i].id == id) {
  399. sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
  400. break;
  401. }
  402. }
  403. if (sid == 0) {
  404. pr_warning("Failed to set source-id of HPET block (%d)\n", id);
  405. return -1;
  406. }
  407. /*
  408. * Should really use SQ_ALL_16. Some platforms are broken.
  409. * While we figure out the right quirks for these broken platforms, use
  410. * SQ_13_IGNORE_3 for now.
  411. */
  412. set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
  413. return 0;
  414. }
  415. int set_msi_sid(struct irte *irte, struct pci_dev *dev)
  416. {
  417. struct pci_dev *bridge;
  418. if (!irte || !dev)
  419. return -1;
  420. /* PCIe device or Root Complex integrated PCI device */
  421. if (pci_is_pcie(dev) || !dev->bus->parent) {
  422. set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
  423. (dev->bus->number << 8) | dev->devfn);
  424. return 0;
  425. }
  426. bridge = pci_find_upstream_pcie_bridge(dev);
  427. if (bridge) {
  428. if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
  429. set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
  430. (bridge->bus->number << 8) | dev->bus->number);
  431. else /* this is a legacy PCI bridge */
  432. set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
  433. (bridge->bus->number << 8) | bridge->devfn);
  434. }
  435. return 0;
  436. }
  437. static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
  438. {
  439. u64 addr;
  440. u32 sts;
  441. unsigned long flags;
  442. addr = virt_to_phys((void *)iommu->ir_table->base);
  443. spin_lock_irqsave(&iommu->register_lock, flags);
  444. dmar_writeq(iommu->reg + DMAR_IRTA_REG,
  445. (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
  446. /* Set interrupt-remapping table pointer */
  447. iommu->gcmd |= DMA_GCMD_SIRTP;
  448. writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
  449. IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  450. readl, (sts & DMA_GSTS_IRTPS), sts);
  451. spin_unlock_irqrestore(&iommu->register_lock, flags);
  452. /*
  453. * global invalidation of interrupt entry cache before enabling
  454. * interrupt-remapping.
  455. */
  456. qi_global_iec(iommu);
  457. spin_lock_irqsave(&iommu->register_lock, flags);
  458. /* Enable interrupt-remapping */
  459. iommu->gcmd |= DMA_GCMD_IRE;
  460. writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
  461. IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  462. readl, (sts & DMA_GSTS_IRES), sts);
  463. spin_unlock_irqrestore(&iommu->register_lock, flags);
  464. }
  465. static int setup_intr_remapping(struct intel_iommu *iommu, int mode)
  466. {
  467. struct ir_table *ir_table;
  468. struct page *pages;
  469. ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
  470. GFP_ATOMIC);
  471. if (!iommu->ir_table)
  472. return -ENOMEM;
  473. pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
  474. INTR_REMAP_PAGE_ORDER);
  475. if (!pages) {
  476. printk(KERN_ERR "failed to allocate pages of order %d\n",
  477. INTR_REMAP_PAGE_ORDER);
  478. kfree(iommu->ir_table);
  479. return -ENOMEM;
  480. }
  481. ir_table->base = page_address(pages);
  482. iommu_set_intr_remapping(iommu, mode);
  483. return 0;
  484. }
  485. /*
  486. * Disable Interrupt Remapping.
  487. */
  488. static void iommu_disable_intr_remapping(struct intel_iommu *iommu)
  489. {
  490. unsigned long flags;
  491. u32 sts;
  492. if (!ecap_ir_support(iommu->ecap))
  493. return;
  494. /*
  495. * global invalidation of interrupt entry cache before disabling
  496. * interrupt-remapping.
  497. */
  498. qi_global_iec(iommu);
  499. spin_lock_irqsave(&iommu->register_lock, flags);
  500. sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
  501. if (!(sts & DMA_GSTS_IRES))
  502. goto end;
  503. iommu->gcmd &= ~DMA_GCMD_IRE;
  504. writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
  505. IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  506. readl, !(sts & DMA_GSTS_IRES), sts);
  507. end:
  508. spin_unlock_irqrestore(&iommu->register_lock, flags);
  509. }
  510. int __init intr_remapping_supported(void)
  511. {
  512. struct dmar_drhd_unit *drhd;
  513. if (disable_intremap)
  514. return 0;
  515. if (!dmar_ir_support())
  516. return 0;
  517. for_each_drhd_unit(drhd) {
  518. struct intel_iommu *iommu = drhd->iommu;
  519. if (!ecap_ir_support(iommu->ecap))
  520. return 0;
  521. }
  522. return 1;
  523. }
  524. int __init enable_intr_remapping(int eim)
  525. {
  526. struct dmar_drhd_unit *drhd;
  527. int setup = 0;
  528. if (parse_ioapics_under_ir() != 1) {
  529. printk(KERN_INFO "Not enable interrupt remapping\n");
  530. return -1;
  531. }
  532. for_each_drhd_unit(drhd) {
  533. struct intel_iommu *iommu = drhd->iommu;
  534. /*
  535. * If the queued invalidation is already initialized,
  536. * shouldn't disable it.
  537. */
  538. if (iommu->qi)
  539. continue;
  540. /*
  541. * Clear previous faults.
  542. */
  543. dmar_fault(-1, iommu);
  544. /*
  545. * Disable intr remapping and queued invalidation, if already
  546. * enabled prior to OS handover.
  547. */
  548. iommu_disable_intr_remapping(iommu);
  549. dmar_disable_qi(iommu);
  550. }
  551. /*
  552. * check for the Interrupt-remapping support
  553. */
  554. for_each_drhd_unit(drhd) {
  555. struct intel_iommu *iommu = drhd->iommu;
  556. if (!ecap_ir_support(iommu->ecap))
  557. continue;
  558. if (eim && !ecap_eim_support(iommu->ecap)) {
  559. printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
  560. " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
  561. return -1;
  562. }
  563. }
  564. /*
  565. * Enable queued invalidation for all the DRHD's.
  566. */
  567. for_each_drhd_unit(drhd) {
  568. int ret;
  569. struct intel_iommu *iommu = drhd->iommu;
  570. ret = dmar_enable_qi(iommu);
  571. if (ret) {
  572. printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
  573. " invalidation, ecap %Lx, ret %d\n",
  574. drhd->reg_base_addr, iommu->ecap, ret);
  575. return -1;
  576. }
  577. }
  578. /*
  579. * Setup Interrupt-remapping for all the DRHD's now.
  580. */
  581. for_each_drhd_unit(drhd) {
  582. struct intel_iommu *iommu = drhd->iommu;
  583. if (!ecap_ir_support(iommu->ecap))
  584. continue;
  585. if (setup_intr_remapping(iommu, eim))
  586. goto error;
  587. setup = 1;
  588. }
  589. if (!setup)
  590. goto error;
  591. intr_remapping_enabled = 1;
  592. return 0;
  593. error:
  594. /*
  595. * handle error condition gracefully here!
  596. */
  597. return -1;
  598. }
  599. static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
  600. struct intel_iommu *iommu)
  601. {
  602. struct acpi_dmar_pci_path *path;
  603. u8 bus;
  604. int count;
  605. bus = scope->bus;
  606. path = (struct acpi_dmar_pci_path *)(scope + 1);
  607. count = (scope->length - sizeof(struct acpi_dmar_device_scope))
  608. / sizeof(struct acpi_dmar_pci_path);
  609. while (--count > 0) {
  610. /*
  611. * Access PCI directly due to the PCI
  612. * subsystem isn't initialized yet.
  613. */
  614. bus = read_pci_config_byte(bus, path->dev, path->fn,
  615. PCI_SECONDARY_BUS);
  616. path++;
  617. }
  618. ir_hpet[ir_hpet_num].bus = bus;
  619. ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn);
  620. ir_hpet[ir_hpet_num].iommu = iommu;
  621. ir_hpet[ir_hpet_num].id = scope->enumeration_id;
  622. ir_hpet_num++;
  623. }
  624. static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
  625. struct intel_iommu *iommu)
  626. {
  627. struct acpi_dmar_pci_path *path;
  628. u8 bus;
  629. int count;
  630. bus = scope->bus;
  631. path = (struct acpi_dmar_pci_path *)(scope + 1);
  632. count = (scope->length - sizeof(struct acpi_dmar_device_scope))
  633. / sizeof(struct acpi_dmar_pci_path);
  634. while (--count > 0) {
  635. /*
  636. * Access PCI directly due to the PCI
  637. * subsystem isn't initialized yet.
  638. */
  639. bus = read_pci_config_byte(bus, path->dev, path->fn,
  640. PCI_SECONDARY_BUS);
  641. path++;
  642. }
  643. ir_ioapic[ir_ioapic_num].bus = bus;
  644. ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
  645. ir_ioapic[ir_ioapic_num].iommu = iommu;
  646. ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
  647. ir_ioapic_num++;
  648. }
  649. static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
  650. struct intel_iommu *iommu)
  651. {
  652. struct acpi_dmar_hardware_unit *drhd;
  653. struct acpi_dmar_device_scope *scope;
  654. void *start, *end;
  655. drhd = (struct acpi_dmar_hardware_unit *)header;
  656. start = (void *)(drhd + 1);
  657. end = ((void *)drhd) + header->length;
  658. while (start < end) {
  659. scope = start;
  660. if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
  661. if (ir_ioapic_num == MAX_IO_APICS) {
  662. printk(KERN_WARNING "Exceeded Max IO APICS\n");
  663. return -1;
  664. }
  665. printk(KERN_INFO "IOAPIC id %d under DRHD base "
  666. " 0x%Lx IOMMU %d\n", scope->enumeration_id,
  667. drhd->address, iommu->seq_id);
  668. ir_parse_one_ioapic_scope(scope, iommu);
  669. } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
  670. if (ir_hpet_num == MAX_HPET_TBS) {
  671. printk(KERN_WARNING "Exceeded Max HPET blocks\n");
  672. return -1;
  673. }
  674. printk(KERN_INFO "HPET id %d under DRHD base"
  675. " 0x%Lx\n", scope->enumeration_id,
  676. drhd->address);
  677. ir_parse_one_hpet_scope(scope, iommu);
  678. }
  679. start += scope->length;
  680. }
  681. return 0;
  682. }
  683. /*
  684. * Finds the assocaition between IOAPIC's and its Interrupt-remapping
  685. * hardware unit.
  686. */
  687. int __init parse_ioapics_under_ir(void)
  688. {
  689. struct dmar_drhd_unit *drhd;
  690. int ir_supported = 0;
  691. for_each_drhd_unit(drhd) {
  692. struct intel_iommu *iommu = drhd->iommu;
  693. if (ecap_ir_support(iommu->ecap)) {
  694. if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
  695. return -1;
  696. ir_supported = 1;
  697. }
  698. }
  699. if (ir_supported && ir_ioapic_num != nr_ioapics) {
  700. printk(KERN_WARNING
  701. "Not all IO-APIC's listed under remapping hardware\n");
  702. return -1;
  703. }
  704. return ir_supported;
  705. }
  706. void disable_intr_remapping(void)
  707. {
  708. struct dmar_drhd_unit *drhd;
  709. struct intel_iommu *iommu = NULL;
  710. /*
  711. * Disable Interrupt-remapping for all the DRHD's now.
  712. */
  713. for_each_iommu(iommu, drhd) {
  714. if (!ecap_ir_support(iommu->ecap))
  715. continue;
  716. iommu_disable_intr_remapping(iommu);
  717. }
  718. }
  719. int reenable_intr_remapping(int eim)
  720. {
  721. struct dmar_drhd_unit *drhd;
  722. int setup = 0;
  723. struct intel_iommu *iommu = NULL;
  724. for_each_iommu(iommu, drhd)
  725. if (iommu->qi)
  726. dmar_reenable_qi(iommu);
  727. /*
  728. * Setup Interrupt-remapping for all the DRHD's now.
  729. */
  730. for_each_iommu(iommu, drhd) {
  731. if (!ecap_ir_support(iommu->ecap))
  732. continue;
  733. /* Set up interrupt remapping for iommu.*/
  734. iommu_set_intr_remapping(iommu, eim);
  735. setup = 1;
  736. }
  737. if (!setup)
  738. goto error;
  739. return 0;
  740. error:
  741. /*
  742. * handle error condition gracefully here!
  743. */
  744. return -1;
  745. }