amd_iommu.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363
  1. /*
  2. * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
  3. * Author: Joerg Roedel <joerg.roedel@amd.com>
  4. * Leo Duran <leo.duran@amd.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/pci.h>
  20. #include <linux/gfp.h>
  21. #include <linux/bitops.h>
  22. #include <linux/scatterlist.h>
  23. #include <linux/iommu-helper.h>
  24. #include <asm/proto.h>
  25. #include <asm/iommu.h>
  26. #include <asm/amd_iommu_types.h>
  27. #include <asm/amd_iommu.h>
  28. #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
  29. #define EXIT_LOOP_COUNT 10000000
  30. static DEFINE_RWLOCK(amd_iommu_devtable_lock);
  31. /* A list of preallocated protection domains */
  32. static LIST_HEAD(iommu_pd_list);
  33. static DEFINE_SPINLOCK(iommu_pd_list_lock);
  34. /*
  35. * general struct to manage commands send to an IOMMU
  36. */
  37. struct iommu_cmd {
  38. u32 data[4];
  39. };
  40. static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
  41. struct unity_map_entry *e);
  42. /* returns !0 if the IOMMU is caching non-present entries in its TLB */
  43. static int iommu_has_npcache(struct amd_iommu *iommu)
  44. {
  45. return iommu->cap & IOMMU_CAP_NPCACHE;
  46. }
  47. /****************************************************************************
  48. *
  49. * Interrupt handling functions
  50. *
  51. ****************************************************************************/
  52. static void iommu_print_event(void *__evt)
  53. {
  54. u32 *event = __evt;
  55. int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
  56. int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
  57. int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
  58. int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
  59. u64 address = (u64)(((u64)event[3]) << 32) | event[2];
  60. printk(KERN_ERR "AMD IOMMU: Event logged [");
  61. switch (type) {
  62. case EVENT_TYPE_ILL_DEV:
  63. printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
  64. "address=0x%016llx flags=0x%04x]\n",
  65. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  66. address, flags);
  67. break;
  68. case EVENT_TYPE_IO_FAULT:
  69. printk("IO_PAGE_FAULT device=%02x:%02x.%x "
  70. "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
  71. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  72. domid, address, flags);
  73. break;
  74. case EVENT_TYPE_DEV_TAB_ERR:
  75. printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
  76. "address=0x%016llx flags=0x%04x]\n",
  77. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  78. address, flags);
  79. break;
  80. case EVENT_TYPE_PAGE_TAB_ERR:
  81. printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
  82. "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
  83. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  84. domid, address, flags);
  85. break;
  86. case EVENT_TYPE_ILL_CMD:
  87. printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
  88. break;
  89. case EVENT_TYPE_CMD_HARD_ERR:
  90. printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
  91. "flags=0x%04x]\n", address, flags);
  92. break;
  93. case EVENT_TYPE_IOTLB_INV_TO:
  94. printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
  95. "address=0x%016llx]\n",
  96. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  97. address);
  98. break;
  99. case EVENT_TYPE_INV_DEV_REQ:
  100. printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
  101. "address=0x%016llx flags=0x%04x]\n",
  102. PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  103. address, flags);
  104. break;
  105. default:
  106. printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
  107. }
  108. }
  109. static void iommu_poll_events(struct amd_iommu *iommu)
  110. {
  111. u32 head, tail;
  112. unsigned long flags;
  113. spin_lock_irqsave(&iommu->lock, flags);
  114. head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
  115. tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
  116. while (head != tail) {
  117. iommu_print_event(iommu->evt_buf + head);
  118. head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
  119. }
  120. writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
  121. spin_unlock_irqrestore(&iommu->lock, flags);
  122. }
  123. irqreturn_t amd_iommu_int_handler(int irq, void *data)
  124. {
  125. struct amd_iommu *iommu;
  126. list_for_each_entry(iommu, &amd_iommu_list, list)
  127. iommu_poll_events(iommu);
  128. return IRQ_HANDLED;
  129. }
  130. /****************************************************************************
  131. *
  132. * IOMMU command queuing functions
  133. *
  134. ****************************************************************************/
  135. /*
  136. * Writes the command to the IOMMUs command buffer and informs the
  137. * hardware about the new command. Must be called with iommu->lock held.
  138. */
  139. static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
  140. {
  141. u32 tail, head;
  142. u8 *target;
  143. tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
  144. target = iommu->cmd_buf + tail;
  145. memcpy_toio(target, cmd, sizeof(*cmd));
  146. tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
  147. head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
  148. if (tail == head)
  149. return -ENOMEM;
  150. writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
  151. return 0;
  152. }
  153. /*
  154. * General queuing function for commands. Takes iommu->lock and calls
  155. * __iommu_queue_command().
  156. */
  157. static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
  158. {
  159. unsigned long flags;
  160. int ret;
  161. spin_lock_irqsave(&iommu->lock, flags);
  162. ret = __iommu_queue_command(iommu, cmd);
  163. spin_unlock_irqrestore(&iommu->lock, flags);
  164. return ret;
  165. }
  166. /*
  167. * This function is called whenever we need to ensure that the IOMMU has
  168. * completed execution of all commands we sent. It sends a
  169. * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs
  170. * us about that by writing a value to a physical address we pass with
  171. * the command.
  172. */
  173. static int iommu_completion_wait(struct amd_iommu *iommu)
  174. {
  175. int ret, ready = 0;
  176. unsigned status = 0;
  177. struct iommu_cmd cmd;
  178. unsigned long i = 0;
  179. memset(&cmd, 0, sizeof(cmd));
  180. cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
  181. CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
  182. iommu->need_sync = 0;
  183. ret = iommu_queue_command(iommu, &cmd);
  184. if (ret)
  185. return ret;
  186. while (!ready && (i < EXIT_LOOP_COUNT)) {
  187. ++i;
  188. /* wait for the bit to become one */
  189. status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
  190. ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
  191. }
  192. /* set bit back to zero */
  193. status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
  194. writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
  195. if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit()))
  196. printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n");
  197. return 0;
  198. }
  199. /*
  200. * Command send function for invalidating a device table entry
  201. */
  202. static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
  203. {
  204. struct iommu_cmd cmd;
  205. BUG_ON(iommu == NULL);
  206. memset(&cmd, 0, sizeof(cmd));
  207. CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
  208. cmd.data[0] = devid;
  209. iommu->need_sync = 1;
  210. return iommu_queue_command(iommu, &cmd);
  211. }
  212. /*
  213. * Generic command send function for invalidaing TLB entries
  214. */
  215. static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
  216. u64 address, u16 domid, int pde, int s)
  217. {
  218. struct iommu_cmd cmd;
  219. memset(&cmd, 0, sizeof(cmd));
  220. address &= PAGE_MASK;
  221. CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES);
  222. cmd.data[1] |= domid;
  223. cmd.data[2] = lower_32_bits(address);
  224. cmd.data[3] = upper_32_bits(address);
  225. if (s) /* size bit - we flush more than one 4kb page */
  226. cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
  227. if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
  228. cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
  229. iommu->need_sync = 1;
  230. return iommu_queue_command(iommu, &cmd);
  231. }
  232. /*
  233. * TLB invalidation function which is called from the mapping functions.
  234. * It invalidates a single PTE if the range to flush is within a single
  235. * page. Otherwise it flushes the whole TLB of the IOMMU.
  236. */
  237. static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
  238. u64 address, size_t size)
  239. {
  240. int s = 0;
  241. unsigned pages = iommu_num_pages(address, size);
  242. address &= PAGE_MASK;
  243. if (pages > 1) {
  244. /*
  245. * If we have to flush more than one page, flush all
  246. * TLB entries for this domain
  247. */
  248. address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
  249. s = 1;
  250. }
  251. iommu_queue_inv_iommu_pages(iommu, address, domid, 0, s);
  252. return 0;
  253. }
  254. /* Flush the whole IO/TLB for a given protection domain */
  255. static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
  256. {
  257. u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
  258. iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
  259. }
  260. /****************************************************************************
  261. *
  262. * The functions below are used the create the page table mappings for
  263. * unity mapped regions.
  264. *
  265. ****************************************************************************/
  266. /*
  267. * Generic mapping functions. It maps a physical address into a DMA
  268. * address space. It allocates the page table pages if necessary.
  269. * In the future it can be extended to a generic mapping function
  270. * supporting all features of AMD IOMMU page tables like level skipping
  271. * and full 64 bit address spaces.
  272. */
  273. static int iommu_map(struct protection_domain *dom,
  274. unsigned long bus_addr,
  275. unsigned long phys_addr,
  276. int prot)
  277. {
  278. u64 __pte, *pte, *page;
  279. bus_addr = PAGE_ALIGN(bus_addr);
  280. phys_addr = PAGE_ALIGN(bus_addr);
  281. /* only support 512GB address spaces for now */
  282. if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
  283. return -EINVAL;
  284. pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)];
  285. if (!IOMMU_PTE_PRESENT(*pte)) {
  286. page = (u64 *)get_zeroed_page(GFP_KERNEL);
  287. if (!page)
  288. return -ENOMEM;
  289. *pte = IOMMU_L2_PDE(virt_to_phys(page));
  290. }
  291. pte = IOMMU_PTE_PAGE(*pte);
  292. pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
  293. if (!IOMMU_PTE_PRESENT(*pte)) {
  294. page = (u64 *)get_zeroed_page(GFP_KERNEL);
  295. if (!page)
  296. return -ENOMEM;
  297. *pte = IOMMU_L1_PDE(virt_to_phys(page));
  298. }
  299. pte = IOMMU_PTE_PAGE(*pte);
  300. pte = &pte[IOMMU_PTE_L0_INDEX(bus_addr)];
  301. if (IOMMU_PTE_PRESENT(*pte))
  302. return -EBUSY;
  303. __pte = phys_addr | IOMMU_PTE_P;
  304. if (prot & IOMMU_PROT_IR)
  305. __pte |= IOMMU_PTE_IR;
  306. if (prot & IOMMU_PROT_IW)
  307. __pte |= IOMMU_PTE_IW;
  308. *pte = __pte;
  309. return 0;
  310. }
  311. /*
  312. * This function checks if a specific unity mapping entry is needed for
  313. * this specific IOMMU.
  314. */
  315. static int iommu_for_unity_map(struct amd_iommu *iommu,
  316. struct unity_map_entry *entry)
  317. {
  318. u16 bdf, i;
  319. for (i = entry->devid_start; i <= entry->devid_end; ++i) {
  320. bdf = amd_iommu_alias_table[i];
  321. if (amd_iommu_rlookup_table[bdf] == iommu)
  322. return 1;
  323. }
  324. return 0;
  325. }
  326. /*
  327. * Init the unity mappings for a specific IOMMU in the system
  328. *
  329. * Basically iterates over all unity mapping entries and applies them to
  330. * the default domain DMA of that IOMMU if necessary.
  331. */
  332. static int iommu_init_unity_mappings(struct amd_iommu *iommu)
  333. {
  334. struct unity_map_entry *entry;
  335. int ret;
  336. list_for_each_entry(entry, &amd_iommu_unity_map, list) {
  337. if (!iommu_for_unity_map(iommu, entry))
  338. continue;
  339. ret = dma_ops_unity_map(iommu->default_dom, entry);
  340. if (ret)
  341. return ret;
  342. }
  343. return 0;
  344. }
  345. /*
  346. * This function actually applies the mapping to the page table of the
  347. * dma_ops domain.
  348. */
  349. static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
  350. struct unity_map_entry *e)
  351. {
  352. u64 addr;
  353. int ret;
  354. for (addr = e->address_start; addr < e->address_end;
  355. addr += PAGE_SIZE) {
  356. ret = iommu_map(&dma_dom->domain, addr, addr, e->prot);
  357. if (ret)
  358. return ret;
  359. /*
  360. * if unity mapping is in aperture range mark the page
  361. * as allocated in the aperture
  362. */
  363. if (addr < dma_dom->aperture_size)
  364. __set_bit(addr >> PAGE_SHIFT, dma_dom->bitmap);
  365. }
  366. return 0;
  367. }
  368. /*
  369. * Inits the unity mappings required for a specific device
  370. */
  371. static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
  372. u16 devid)
  373. {
  374. struct unity_map_entry *e;
  375. int ret;
  376. list_for_each_entry(e, &amd_iommu_unity_map, list) {
  377. if (!(devid >= e->devid_start && devid <= e->devid_end))
  378. continue;
  379. ret = dma_ops_unity_map(dma_dom, e);
  380. if (ret)
  381. return ret;
  382. }
  383. return 0;
  384. }
  385. /****************************************************************************
  386. *
  387. * The next functions belong to the address allocator for the dma_ops
  388. * interface functions. They work like the allocators in the other IOMMU
  389. * drivers. Its basically a bitmap which marks the allocated pages in
  390. * the aperture. Maybe it could be enhanced in the future to a more
  391. * efficient allocator.
  392. *
  393. ****************************************************************************/
  394. static unsigned long dma_mask_to_pages(unsigned long mask)
  395. {
  396. return (mask >> PAGE_SHIFT) +
  397. (PAGE_ALIGN(mask & ~PAGE_MASK) >> PAGE_SHIFT);
  398. }
  399. /*
  400. * The address allocator core function.
  401. *
  402. * called with domain->lock held
  403. */
  404. static unsigned long dma_ops_alloc_addresses(struct device *dev,
  405. struct dma_ops_domain *dom,
  406. unsigned int pages,
  407. unsigned long align_mask)
  408. {
  409. unsigned long limit = dma_mask_to_pages(*dev->dma_mask);
  410. unsigned long address;
  411. unsigned long size = dom->aperture_size >> PAGE_SHIFT;
  412. unsigned long boundary_size;
  413. boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
  414. PAGE_SIZE) >> PAGE_SHIFT;
  415. limit = limit < size ? limit : size;
  416. if (dom->next_bit >= limit) {
  417. dom->next_bit = 0;
  418. dom->need_flush = true;
  419. }
  420. address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages,
  421. 0 , boundary_size, align_mask);
  422. if (address == -1) {
  423. address = iommu_area_alloc(dom->bitmap, limit, 0, pages,
  424. 0, boundary_size, align_mask);
  425. dom->need_flush = true;
  426. }
  427. if (likely(address != -1)) {
  428. dom->next_bit = address + pages;
  429. address <<= PAGE_SHIFT;
  430. } else
  431. address = bad_dma_address;
  432. WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
  433. return address;
  434. }
  435. /*
  436. * The address free function.
  437. *
  438. * called with domain->lock held
  439. */
  440. static void dma_ops_free_addresses(struct dma_ops_domain *dom,
  441. unsigned long address,
  442. unsigned int pages)
  443. {
  444. address >>= PAGE_SHIFT;
  445. iommu_area_free(dom->bitmap, address, pages);
  446. }
  447. /****************************************************************************
  448. *
  449. * The next functions belong to the domain allocation. A domain is
  450. * allocated for every IOMMU as the default domain. If device isolation
  451. * is enabled, every device get its own domain. The most important thing
  452. * about domains is the page table mapping the DMA address space they
  453. * contain.
  454. *
  455. ****************************************************************************/
  456. static u16 domain_id_alloc(void)
  457. {
  458. unsigned long flags;
  459. int id;
  460. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  461. id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
  462. BUG_ON(id == 0);
  463. if (id > 0 && id < MAX_DOMAIN_ID)
  464. __set_bit(id, amd_iommu_pd_alloc_bitmap);
  465. else
  466. id = 0;
  467. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  468. return id;
  469. }
  470. /*
  471. * Used to reserve address ranges in the aperture (e.g. for exclusion
  472. * ranges.
  473. */
  474. static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
  475. unsigned long start_page,
  476. unsigned int pages)
  477. {
  478. unsigned int last_page = dom->aperture_size >> PAGE_SHIFT;
  479. if (start_page + pages > last_page)
  480. pages = last_page - start_page;
  481. set_bit_string(dom->bitmap, start_page, pages);
  482. }
  483. static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
  484. {
  485. int i, j;
  486. u64 *p1, *p2, *p3;
  487. p1 = dma_dom->domain.pt_root;
  488. if (!p1)
  489. return;
  490. for (i = 0; i < 512; ++i) {
  491. if (!IOMMU_PTE_PRESENT(p1[i]))
  492. continue;
  493. p2 = IOMMU_PTE_PAGE(p1[i]);
  494. for (j = 0; j < 512; ++i) {
  495. if (!IOMMU_PTE_PRESENT(p2[j]))
  496. continue;
  497. p3 = IOMMU_PTE_PAGE(p2[j]);
  498. free_page((unsigned long)p3);
  499. }
  500. free_page((unsigned long)p2);
  501. }
  502. free_page((unsigned long)p1);
  503. }
  504. /*
  505. * Free a domain, only used if something went wrong in the
  506. * allocation path and we need to free an already allocated page table
  507. */
  508. static void dma_ops_domain_free(struct dma_ops_domain *dom)
  509. {
  510. if (!dom)
  511. return;
  512. dma_ops_free_pagetable(dom);
  513. kfree(dom->pte_pages);
  514. kfree(dom->bitmap);
  515. kfree(dom);
  516. }
  517. /*
  518. * Allocates a new protection domain usable for the dma_ops functions.
  519. * It also intializes the page table and the address allocator data
  520. * structures required for the dma_ops interface
  521. */
  522. static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
  523. unsigned order)
  524. {
  525. struct dma_ops_domain *dma_dom;
  526. unsigned i, num_pte_pages;
  527. u64 *l2_pde;
  528. u64 address;
  529. /*
  530. * Currently the DMA aperture must be between 32 MB and 1GB in size
  531. */
  532. if ((order < 25) || (order > 30))
  533. return NULL;
  534. dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
  535. if (!dma_dom)
  536. return NULL;
  537. spin_lock_init(&dma_dom->domain.lock);
  538. dma_dom->domain.id = domain_id_alloc();
  539. if (dma_dom->domain.id == 0)
  540. goto free_dma_dom;
  541. dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
  542. dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
  543. dma_dom->domain.priv = dma_dom;
  544. if (!dma_dom->domain.pt_root)
  545. goto free_dma_dom;
  546. dma_dom->aperture_size = (1ULL << order);
  547. dma_dom->bitmap = kzalloc(dma_dom->aperture_size / (PAGE_SIZE * 8),
  548. GFP_KERNEL);
  549. if (!dma_dom->bitmap)
  550. goto free_dma_dom;
  551. /*
  552. * mark the first page as allocated so we never return 0 as
  553. * a valid dma-address. So we can use 0 as error value
  554. */
  555. dma_dom->bitmap[0] = 1;
  556. dma_dom->next_bit = 0;
  557. dma_dom->need_flush = false;
  558. dma_dom->target_dev = 0xffff;
  559. /* Intialize the exclusion range if necessary */
  560. if (iommu->exclusion_start &&
  561. iommu->exclusion_start < dma_dom->aperture_size) {
  562. unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
  563. int pages = iommu_num_pages(iommu->exclusion_start,
  564. iommu->exclusion_length);
  565. dma_ops_reserve_addresses(dma_dom, startpage, pages);
  566. }
  567. /*
  568. * At the last step, build the page tables so we don't need to
  569. * allocate page table pages in the dma_ops mapping/unmapping
  570. * path.
  571. */
  572. num_pte_pages = dma_dom->aperture_size / (PAGE_SIZE * 512);
  573. dma_dom->pte_pages = kzalloc(num_pte_pages * sizeof(void *),
  574. GFP_KERNEL);
  575. if (!dma_dom->pte_pages)
  576. goto free_dma_dom;
  577. l2_pde = (u64 *)get_zeroed_page(GFP_KERNEL);
  578. if (l2_pde == NULL)
  579. goto free_dma_dom;
  580. dma_dom->domain.pt_root[0] = IOMMU_L2_PDE(virt_to_phys(l2_pde));
  581. for (i = 0; i < num_pte_pages; ++i) {
  582. dma_dom->pte_pages[i] = (u64 *)get_zeroed_page(GFP_KERNEL);
  583. if (!dma_dom->pte_pages[i])
  584. goto free_dma_dom;
  585. address = virt_to_phys(dma_dom->pte_pages[i]);
  586. l2_pde[i] = IOMMU_L1_PDE(address);
  587. }
  588. return dma_dom;
  589. free_dma_dom:
  590. dma_ops_domain_free(dma_dom);
  591. return NULL;
  592. }
  593. /*
  594. * Find out the protection domain structure for a given PCI device. This
  595. * will give us the pointer to the page table root for example.
  596. */
  597. static struct protection_domain *domain_for_device(u16 devid)
  598. {
  599. struct protection_domain *dom;
  600. unsigned long flags;
  601. read_lock_irqsave(&amd_iommu_devtable_lock, flags);
  602. dom = amd_iommu_pd_table[devid];
  603. read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  604. return dom;
  605. }
  606. /*
  607. * If a device is not yet associated with a domain, this function does
  608. * assigns it visible for the hardware
  609. */
  610. static void set_device_domain(struct amd_iommu *iommu,
  611. struct protection_domain *domain,
  612. u16 devid)
  613. {
  614. unsigned long flags;
  615. u64 pte_root = virt_to_phys(domain->pt_root);
  616. pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
  617. << DEV_ENTRY_MODE_SHIFT;
  618. pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
  619. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  620. amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
  621. amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
  622. amd_iommu_dev_table[devid].data[2] = domain->id;
  623. amd_iommu_pd_table[devid] = domain;
  624. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  625. iommu_queue_inv_dev_entry(iommu, devid);
  626. iommu->need_sync = 1;
  627. }
  628. /*****************************************************************************
  629. *
  630. * The next functions belong to the dma_ops mapping/unmapping code.
  631. *
  632. *****************************************************************************/
  633. /*
  634. * This function checks if the driver got a valid device from the caller to
  635. * avoid dereferencing invalid pointers.
  636. */
  637. static bool check_device(struct device *dev)
  638. {
  639. if (!dev || !dev->dma_mask)
  640. return false;
  641. return true;
  642. }
  643. /*
  644. * In this function the list of preallocated protection domains is traversed to
  645. * find the domain for a specific device
  646. */
  647. static struct dma_ops_domain *find_protection_domain(u16 devid)
  648. {
  649. struct dma_ops_domain *entry, *ret = NULL;
  650. unsigned long flags;
  651. if (list_empty(&iommu_pd_list))
  652. return NULL;
  653. spin_lock_irqsave(&iommu_pd_list_lock, flags);
  654. list_for_each_entry(entry, &iommu_pd_list, list) {
  655. if (entry->target_dev == devid) {
  656. ret = entry;
  657. list_del(&ret->list);
  658. break;
  659. }
  660. }
  661. spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
  662. return ret;
  663. }
  664. /*
  665. * In the dma_ops path we only have the struct device. This function
  666. * finds the corresponding IOMMU, the protection domain and the
  667. * requestor id for a given device.
  668. * If the device is not yet associated with a domain this is also done
  669. * in this function.
  670. */
  671. static int get_device_resources(struct device *dev,
  672. struct amd_iommu **iommu,
  673. struct protection_domain **domain,
  674. u16 *bdf)
  675. {
  676. struct dma_ops_domain *dma_dom;
  677. struct pci_dev *pcidev;
  678. u16 _bdf;
  679. *iommu = NULL;
  680. *domain = NULL;
  681. *bdf = 0xffff;
  682. if (dev->bus != &pci_bus_type)
  683. return 0;
  684. pcidev = to_pci_dev(dev);
  685. _bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
  686. /* device not translated by any IOMMU in the system? */
  687. if (_bdf > amd_iommu_last_bdf)
  688. return 0;
  689. *bdf = amd_iommu_alias_table[_bdf];
  690. *iommu = amd_iommu_rlookup_table[*bdf];
  691. if (*iommu == NULL)
  692. return 0;
  693. *domain = domain_for_device(*bdf);
  694. if (*domain == NULL) {
  695. dma_dom = find_protection_domain(*bdf);
  696. if (!dma_dom)
  697. dma_dom = (*iommu)->default_dom;
  698. *domain = &dma_dom->domain;
  699. set_device_domain(*iommu, *domain, *bdf);
  700. printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
  701. "device ", (*domain)->id);
  702. print_devid(_bdf, 1);
  703. }
  704. return 1;
  705. }
  706. /*
  707. * This is the generic map function. It maps one 4kb page at paddr to
  708. * the given address in the DMA address space for the domain.
  709. */
  710. static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,
  711. struct dma_ops_domain *dom,
  712. unsigned long address,
  713. phys_addr_t paddr,
  714. int direction)
  715. {
  716. u64 *pte, __pte;
  717. WARN_ON(address > dom->aperture_size);
  718. paddr &= PAGE_MASK;
  719. pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
  720. pte += IOMMU_PTE_L0_INDEX(address);
  721. __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
  722. if (direction == DMA_TO_DEVICE)
  723. __pte |= IOMMU_PTE_IR;
  724. else if (direction == DMA_FROM_DEVICE)
  725. __pte |= IOMMU_PTE_IW;
  726. else if (direction == DMA_BIDIRECTIONAL)
  727. __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
  728. WARN_ON(*pte);
  729. *pte = __pte;
  730. return (dma_addr_t)address;
  731. }
  732. /*
  733. * The generic unmapping function for on page in the DMA address space.
  734. */
  735. static void dma_ops_domain_unmap(struct amd_iommu *iommu,
  736. struct dma_ops_domain *dom,
  737. unsigned long address)
  738. {
  739. u64 *pte;
  740. if (address >= dom->aperture_size)
  741. return;
  742. WARN_ON(address & 0xfffULL || address > dom->aperture_size);
  743. pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
  744. pte += IOMMU_PTE_L0_INDEX(address);
  745. WARN_ON(!*pte);
  746. *pte = 0ULL;
  747. }
  748. /*
  749. * This function contains common code for mapping of a physically
  750. * contiguous memory region into DMA address space. It is uses by all
  751. * mapping functions provided by this IOMMU driver.
  752. * Must be called with the domain lock held.
  753. */
  754. static dma_addr_t __map_single(struct device *dev,
  755. struct amd_iommu *iommu,
  756. struct dma_ops_domain *dma_dom,
  757. phys_addr_t paddr,
  758. size_t size,
  759. int dir,
  760. bool align)
  761. {
  762. dma_addr_t offset = paddr & ~PAGE_MASK;
  763. dma_addr_t address, start;
  764. unsigned int pages;
  765. unsigned long align_mask = 0;
  766. int i;
  767. pages = iommu_num_pages(paddr, size);
  768. paddr &= PAGE_MASK;
  769. if (align)
  770. align_mask = (1UL << get_order(size)) - 1;
  771. address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask);
  772. if (unlikely(address == bad_dma_address))
  773. goto out;
  774. start = address;
  775. for (i = 0; i < pages; ++i) {
  776. dma_ops_domain_map(iommu, dma_dom, start, paddr, dir);
  777. paddr += PAGE_SIZE;
  778. start += PAGE_SIZE;
  779. }
  780. address += offset;
  781. if (unlikely(dma_dom->need_flush && !iommu_fullflush)) {
  782. iommu_flush_tlb(iommu, dma_dom->domain.id);
  783. dma_dom->need_flush = false;
  784. } else if (unlikely(iommu_has_npcache(iommu)))
  785. iommu_flush_pages(iommu, dma_dom->domain.id, address, size);
  786. out:
  787. return address;
  788. }
  789. /*
  790. * Does the reverse of the __map_single function. Must be called with
  791. * the domain lock held too
  792. */
  793. static void __unmap_single(struct amd_iommu *iommu,
  794. struct dma_ops_domain *dma_dom,
  795. dma_addr_t dma_addr,
  796. size_t size,
  797. int dir)
  798. {
  799. dma_addr_t i, start;
  800. unsigned int pages;
  801. if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size))
  802. return;
  803. pages = iommu_num_pages(dma_addr, size);
  804. dma_addr &= PAGE_MASK;
  805. start = dma_addr;
  806. for (i = 0; i < pages; ++i) {
  807. dma_ops_domain_unmap(iommu, dma_dom, start);
  808. start += PAGE_SIZE;
  809. }
  810. dma_ops_free_addresses(dma_dom, dma_addr, pages);
  811. if (iommu_fullflush)
  812. iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
  813. }
  814. /*
  815. * The exported map_single function for dma_ops.
  816. */
  817. static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
  818. size_t size, int dir)
  819. {
  820. unsigned long flags;
  821. struct amd_iommu *iommu;
  822. struct protection_domain *domain;
  823. u16 devid;
  824. dma_addr_t addr;
  825. if (!check_device(dev))
  826. return bad_dma_address;
  827. get_device_resources(dev, &iommu, &domain, &devid);
  828. if (iommu == NULL || domain == NULL)
  829. /* device not handled by any AMD IOMMU */
  830. return (dma_addr_t)paddr;
  831. spin_lock_irqsave(&domain->lock, flags);
  832. addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false);
  833. if (addr == bad_dma_address)
  834. goto out;
  835. if (unlikely(iommu->need_sync))
  836. iommu_completion_wait(iommu);
  837. out:
  838. spin_unlock_irqrestore(&domain->lock, flags);
  839. return addr;
  840. }
  841. /*
  842. * The exported unmap_single function for dma_ops.
  843. */
  844. static void unmap_single(struct device *dev, dma_addr_t dma_addr,
  845. size_t size, int dir)
  846. {
  847. unsigned long flags;
  848. struct amd_iommu *iommu;
  849. struct protection_domain *domain;
  850. u16 devid;
  851. if (!check_device(dev) ||
  852. !get_device_resources(dev, &iommu, &domain, &devid))
  853. /* device not handled by any AMD IOMMU */
  854. return;
  855. spin_lock_irqsave(&domain->lock, flags);
  856. __unmap_single(iommu, domain->priv, dma_addr, size, dir);
  857. if (unlikely(iommu->need_sync))
  858. iommu_completion_wait(iommu);
  859. spin_unlock_irqrestore(&domain->lock, flags);
  860. }
  861. /*
  862. * This is a special map_sg function which is used if we should map a
  863. * device which is not handled by an AMD IOMMU in the system.
  864. */
  865. static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
  866. int nelems, int dir)
  867. {
  868. struct scatterlist *s;
  869. int i;
  870. for_each_sg(sglist, s, nelems, i) {
  871. s->dma_address = (dma_addr_t)sg_phys(s);
  872. s->dma_length = s->length;
  873. }
  874. return nelems;
  875. }
  876. /*
  877. * The exported map_sg function for dma_ops (handles scatter-gather
  878. * lists).
  879. */
  880. static int map_sg(struct device *dev, struct scatterlist *sglist,
  881. int nelems, int dir)
  882. {
  883. unsigned long flags;
  884. struct amd_iommu *iommu;
  885. struct protection_domain *domain;
  886. u16 devid;
  887. int i;
  888. struct scatterlist *s;
  889. phys_addr_t paddr;
  890. int mapped_elems = 0;
  891. if (!check_device(dev))
  892. return 0;
  893. get_device_resources(dev, &iommu, &domain, &devid);
  894. if (!iommu || !domain)
  895. return map_sg_no_iommu(dev, sglist, nelems, dir);
  896. spin_lock_irqsave(&domain->lock, flags);
  897. for_each_sg(sglist, s, nelems, i) {
  898. paddr = sg_phys(s);
  899. s->dma_address = __map_single(dev, iommu, domain->priv,
  900. paddr, s->length, dir, false);
  901. if (s->dma_address) {
  902. s->dma_length = s->length;
  903. mapped_elems++;
  904. } else
  905. goto unmap;
  906. }
  907. if (unlikely(iommu->need_sync))
  908. iommu_completion_wait(iommu);
  909. out:
  910. spin_unlock_irqrestore(&domain->lock, flags);
  911. return mapped_elems;
  912. unmap:
  913. for_each_sg(sglist, s, mapped_elems, i) {
  914. if (s->dma_address)
  915. __unmap_single(iommu, domain->priv, s->dma_address,
  916. s->dma_length, dir);
  917. s->dma_address = s->dma_length = 0;
  918. }
  919. mapped_elems = 0;
  920. goto out;
  921. }
  922. /*
  923. * The exported map_sg function for dma_ops (handles scatter-gather
  924. * lists).
  925. */
  926. static void unmap_sg(struct device *dev, struct scatterlist *sglist,
  927. int nelems, int dir)
  928. {
  929. unsigned long flags;
  930. struct amd_iommu *iommu;
  931. struct protection_domain *domain;
  932. struct scatterlist *s;
  933. u16 devid;
  934. int i;
  935. if (!check_device(dev) ||
  936. !get_device_resources(dev, &iommu, &domain, &devid))
  937. return;
  938. spin_lock_irqsave(&domain->lock, flags);
  939. for_each_sg(sglist, s, nelems, i) {
  940. __unmap_single(iommu, domain->priv, s->dma_address,
  941. s->dma_length, dir);
  942. s->dma_address = s->dma_length = 0;
  943. }
  944. if (unlikely(iommu->need_sync))
  945. iommu_completion_wait(iommu);
  946. spin_unlock_irqrestore(&domain->lock, flags);
  947. }
  948. /*
  949. * The exported alloc_coherent function for dma_ops.
  950. */
  951. static void *alloc_coherent(struct device *dev, size_t size,
  952. dma_addr_t *dma_addr, gfp_t flag)
  953. {
  954. unsigned long flags;
  955. void *virt_addr;
  956. struct amd_iommu *iommu;
  957. struct protection_domain *domain;
  958. u16 devid;
  959. phys_addr_t paddr;
  960. if (!check_device(dev))
  961. return NULL;
  962. if (!get_device_resources(dev, &iommu, &domain, &devid))
  963. flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
  964. virt_addr = (void *)__get_free_pages(flag, get_order(size));
  965. if (!virt_addr)
  966. return 0;
  967. memset(virt_addr, 0, size);
  968. paddr = virt_to_phys(virt_addr);
  969. if (!iommu || !domain) {
  970. *dma_addr = (dma_addr_t)paddr;
  971. return virt_addr;
  972. }
  973. spin_lock_irqsave(&domain->lock, flags);
  974. *dma_addr = __map_single(dev, iommu, domain->priv, paddr,
  975. size, DMA_BIDIRECTIONAL, true);
  976. if (*dma_addr == bad_dma_address) {
  977. free_pages((unsigned long)virt_addr, get_order(size));
  978. virt_addr = NULL;
  979. goto out;
  980. }
  981. if (unlikely(iommu->need_sync))
  982. iommu_completion_wait(iommu);
  983. out:
  984. spin_unlock_irqrestore(&domain->lock, flags);
  985. return virt_addr;
  986. }
  987. /*
  988. * The exported free_coherent function for dma_ops.
  989. */
  990. static void free_coherent(struct device *dev, size_t size,
  991. void *virt_addr, dma_addr_t dma_addr)
  992. {
  993. unsigned long flags;
  994. struct amd_iommu *iommu;
  995. struct protection_domain *domain;
  996. u16 devid;
  997. if (!check_device(dev))
  998. return;
  999. get_device_resources(dev, &iommu, &domain, &devid);
  1000. if (!iommu || !domain)
  1001. goto free_mem;
  1002. spin_lock_irqsave(&domain->lock, flags);
  1003. __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
  1004. if (unlikely(iommu->need_sync))
  1005. iommu_completion_wait(iommu);
  1006. spin_unlock_irqrestore(&domain->lock, flags);
  1007. free_mem:
  1008. free_pages((unsigned long)virt_addr, get_order(size));
  1009. }
  1010. /*
  1011. * This function is called by the DMA layer to find out if we can handle a
  1012. * particular device. It is part of the dma_ops.
  1013. */
  1014. static int amd_iommu_dma_supported(struct device *dev, u64 mask)
  1015. {
  1016. u16 bdf;
  1017. struct pci_dev *pcidev;
  1018. /* No device or no PCI device */
  1019. if (!dev || dev->bus != &pci_bus_type)
  1020. return 0;
  1021. pcidev = to_pci_dev(dev);
  1022. bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
  1023. /* Out of our scope? */
  1024. if (bdf > amd_iommu_last_bdf)
  1025. return 0;
  1026. return 1;
  1027. }
  1028. /*
  1029. * The function for pre-allocating protection domains.
  1030. *
  1031. * If the driver core informs the DMA layer if a driver grabs a device
  1032. * we don't need to preallocate the protection domains anymore.
  1033. * For now we have to.
  1034. */
  1035. void prealloc_protection_domains(void)
  1036. {
  1037. struct pci_dev *dev = NULL;
  1038. struct dma_ops_domain *dma_dom;
  1039. struct amd_iommu *iommu;
  1040. int order = amd_iommu_aperture_order;
  1041. u16 devid;
  1042. while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
  1043. devid = (dev->bus->number << 8) | dev->devfn;
  1044. if (devid > amd_iommu_last_bdf)
  1045. continue;
  1046. devid = amd_iommu_alias_table[devid];
  1047. if (domain_for_device(devid))
  1048. continue;
  1049. iommu = amd_iommu_rlookup_table[devid];
  1050. if (!iommu)
  1051. continue;
  1052. dma_dom = dma_ops_domain_alloc(iommu, order);
  1053. if (!dma_dom)
  1054. continue;
  1055. init_unity_mappings_for_device(dma_dom, devid);
  1056. dma_dom->target_dev = devid;
  1057. list_add_tail(&dma_dom->list, &iommu_pd_list);
  1058. }
  1059. }
  1060. static struct dma_mapping_ops amd_iommu_dma_ops = {
  1061. .alloc_coherent = alloc_coherent,
  1062. .free_coherent = free_coherent,
  1063. .map_single = map_single,
  1064. .unmap_single = unmap_single,
  1065. .map_sg = map_sg,
  1066. .unmap_sg = unmap_sg,
  1067. .dma_supported = amd_iommu_dma_supported,
  1068. };
  1069. /*
  1070. * The function which clues the AMD IOMMU driver into dma_ops.
  1071. */
  1072. int __init amd_iommu_init_dma_ops(void)
  1073. {
  1074. struct amd_iommu *iommu;
  1075. int order = amd_iommu_aperture_order;
  1076. int ret;
  1077. /*
  1078. * first allocate a default protection domain for every IOMMU we
  1079. * found in the system. Devices not assigned to any other
  1080. * protection domain will be assigned to the default one.
  1081. */
  1082. list_for_each_entry(iommu, &amd_iommu_list, list) {
  1083. iommu->default_dom = dma_ops_domain_alloc(iommu, order);
  1084. if (iommu->default_dom == NULL)
  1085. return -ENOMEM;
  1086. ret = iommu_init_unity_mappings(iommu);
  1087. if (ret)
  1088. goto free_domains;
  1089. }
  1090. /*
  1091. * If device isolation is enabled, pre-allocate the protection
  1092. * domains for each device.
  1093. */
  1094. if (amd_iommu_isolate)
  1095. prealloc_protection_domains();
  1096. iommu_detected = 1;
  1097. force_iommu = 1;
  1098. bad_dma_address = 0;
  1099. #ifdef CONFIG_GART_IOMMU
  1100. gart_iommu_aperture_disabled = 1;
  1101. gart_iommu_aperture = 0;
  1102. #endif
  1103. /* Make the driver finally visible to the drivers */
  1104. dma_ops = &amd_iommu_dma_ops;
  1105. return 0;
  1106. free_domains:
  1107. list_for_each_entry(iommu, &amd_iommu_list, list) {
  1108. if (iommu->default_dom)
  1109. dma_ops_domain_free(iommu->default_dom);
  1110. }
  1111. return ret;
  1112. }