amd_iommu.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800
  1. /*
  2. * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
  3. * Author: Joerg Roedel <joerg.roedel@amd.com>
  4. * Leo Duran <leo.duran@amd.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/pci.h>
  20. #include <linux/gfp.h>
  21. #include <linux/bitops.h>
  22. #include <linux/scatterlist.h>
  23. #include <linux/iommu-helper.h>
  24. #include <asm/proto.h>
  25. #include <asm/gart.h>
  26. #include <asm/amd_iommu_types.h>
  27. #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
  28. #define to_pages(addr, size) \
  29. (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
  30. static DEFINE_RWLOCK(amd_iommu_devtable_lock);
  31. struct command {
  32. u32 data[4];
  33. };
  34. static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
  35. struct unity_map_entry *e);
  36. static int iommu_has_npcache(struct amd_iommu *iommu)
  37. {
  38. return iommu->cap & IOMMU_CAP_NPCACHE;
  39. }
  40. static int __iommu_queue_command(struct amd_iommu *iommu, struct command *cmd)
  41. {
  42. u32 tail, head;
  43. u8 *target;
  44. tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
  45. target = (iommu->cmd_buf + tail);
  46. memcpy_toio(target, cmd, sizeof(*cmd));
  47. tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
  48. head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
  49. if (tail == head)
  50. return -ENOMEM;
  51. writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
  52. return 0;
  53. }
  54. static int iommu_queue_command(struct amd_iommu *iommu, struct command *cmd)
  55. {
  56. unsigned long flags;
  57. int ret;
  58. spin_lock_irqsave(&iommu->lock, flags);
  59. ret = __iommu_queue_command(iommu, cmd);
  60. spin_unlock_irqrestore(&iommu->lock, flags);
  61. return ret;
  62. }
  63. static int iommu_completion_wait(struct amd_iommu *iommu)
  64. {
  65. int ret;
  66. struct command cmd;
  67. volatile u64 ready = 0;
  68. unsigned long ready_phys = virt_to_phys(&ready);
  69. memset(&cmd, 0, sizeof(cmd));
  70. cmd.data[0] = LOW_U32(ready_phys) | CMD_COMPL_WAIT_STORE_MASK;
  71. cmd.data[1] = HIGH_U32(ready_phys);
  72. cmd.data[2] = 1; /* value written to 'ready' */
  73. CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
  74. iommu->need_sync = 0;
  75. ret = iommu_queue_command(iommu, &cmd);
  76. if (ret)
  77. return ret;
  78. while (!ready)
  79. cpu_relax();
  80. return 0;
  81. }
  82. static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
  83. {
  84. struct command cmd;
  85. BUG_ON(iommu == NULL);
  86. memset(&cmd, 0, sizeof(cmd));
  87. CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
  88. cmd.data[0] = devid;
  89. iommu->need_sync = 1;
  90. return iommu_queue_command(iommu, &cmd);
  91. }
  92. static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
  93. u64 address, u16 domid, int pde, int s)
  94. {
  95. struct command cmd;
  96. memset(&cmd, 0, sizeof(cmd));
  97. address &= PAGE_MASK;
  98. CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES);
  99. cmd.data[1] |= domid;
  100. cmd.data[2] = LOW_U32(address);
  101. cmd.data[3] = HIGH_U32(address);
  102. if (s)
  103. cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
  104. if (pde)
  105. cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
  106. iommu->need_sync = 1;
  107. return iommu_queue_command(iommu, &cmd);
  108. }
  109. static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
  110. u64 address, size_t size)
  111. {
  112. int i;
  113. unsigned pages = to_pages(address, size);
  114. address &= PAGE_MASK;
  115. for (i = 0; i < pages; ++i) {
  116. iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 0);
  117. address += PAGE_SIZE;
  118. }
  119. return 0;
  120. }
  121. static int iommu_map(struct protection_domain *dom,
  122. unsigned long bus_addr,
  123. unsigned long phys_addr,
  124. int prot)
  125. {
  126. u64 __pte, *pte, *page;
  127. bus_addr = PAGE_ALIGN(bus_addr);
  128. phys_addr = PAGE_ALIGN(bus_addr);
  129. /* only support 512GB address spaces for now */
  130. if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
  131. return -EINVAL;
  132. pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)];
  133. if (!IOMMU_PTE_PRESENT(*pte)) {
  134. page = (u64 *)get_zeroed_page(GFP_KERNEL);
  135. if (!page)
  136. return -ENOMEM;
  137. *pte = IOMMU_L2_PDE(virt_to_phys(page));
  138. }
  139. pte = IOMMU_PTE_PAGE(*pte);
  140. pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
  141. if (!IOMMU_PTE_PRESENT(*pte)) {
  142. page = (u64 *)get_zeroed_page(GFP_KERNEL);
  143. if (!page)
  144. return -ENOMEM;
  145. *pte = IOMMU_L1_PDE(virt_to_phys(page));
  146. }
  147. pte = IOMMU_PTE_PAGE(*pte);
  148. pte = &pte[IOMMU_PTE_L0_INDEX(bus_addr)];
  149. if (IOMMU_PTE_PRESENT(*pte))
  150. return -EBUSY;
  151. __pte = phys_addr | IOMMU_PTE_P;
  152. if (prot & IOMMU_PROT_IR)
  153. __pte |= IOMMU_PTE_IR;
  154. if (prot & IOMMU_PROT_IW)
  155. __pte |= IOMMU_PTE_IW;
  156. *pte = __pte;
  157. return 0;
  158. }
  159. static int iommu_for_unity_map(struct amd_iommu *iommu,
  160. struct unity_map_entry *entry)
  161. {
  162. u16 bdf, i;
  163. for (i = entry->devid_start; i <= entry->devid_end; ++i) {
  164. bdf = amd_iommu_alias_table[i];
  165. if (amd_iommu_rlookup_table[bdf] == iommu)
  166. return 1;
  167. }
  168. return 0;
  169. }
  170. static int iommu_init_unity_mappings(struct amd_iommu *iommu)
  171. {
  172. struct unity_map_entry *entry;
  173. int ret;
  174. list_for_each_entry(entry, &amd_iommu_unity_map, list) {
  175. if (!iommu_for_unity_map(iommu, entry))
  176. continue;
  177. ret = dma_ops_unity_map(iommu->default_dom, entry);
  178. if (ret)
  179. return ret;
  180. }
  181. return 0;
  182. }
  183. static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
  184. struct unity_map_entry *e)
  185. {
  186. u64 addr;
  187. int ret;
  188. for (addr = e->address_start; addr < e->address_end;
  189. addr += PAGE_SIZE) {
  190. ret = iommu_map(&dma_dom->domain, addr, addr, e->prot);
  191. if (ret)
  192. return ret;
  193. /*
  194. * if unity mapping is in aperture range mark the page
  195. * as allocated in the aperture
  196. */
  197. if (addr < dma_dom->aperture_size)
  198. __set_bit(addr >> PAGE_SHIFT, dma_dom->bitmap);
  199. }
  200. return 0;
  201. }
  202. static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
  203. u16 devid)
  204. {
  205. struct unity_map_entry *e;
  206. int ret;
  207. list_for_each_entry(e, &amd_iommu_unity_map, list) {
  208. if (!(devid >= e->devid_start && devid <= e->devid_end))
  209. continue;
  210. ret = dma_ops_unity_map(dma_dom, e);
  211. if (ret)
  212. return ret;
  213. }
  214. return 0;
  215. }
  216. static unsigned long dma_mask_to_pages(unsigned long mask)
  217. {
  218. return (mask >> PAGE_SHIFT) +
  219. (PAGE_ALIGN(mask & ~PAGE_MASK) >> PAGE_SHIFT);
  220. }
  221. static unsigned long dma_ops_alloc_addresses(struct device *dev,
  222. struct dma_ops_domain *dom,
  223. unsigned int pages)
  224. {
  225. unsigned long limit = dma_mask_to_pages(*dev->dma_mask);
  226. unsigned long address;
  227. unsigned long size = dom->aperture_size >> PAGE_SHIFT;
  228. unsigned long boundary_size;
  229. boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
  230. PAGE_SIZE) >> PAGE_SHIFT;
  231. limit = limit < size ? limit : size;
  232. if (dom->next_bit >= limit)
  233. dom->next_bit = 0;
  234. address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages,
  235. 0 , boundary_size, 0);
  236. if (address == -1)
  237. address = iommu_area_alloc(dom->bitmap, limit, 0, pages,
  238. 0, boundary_size, 0);
  239. if (likely(address != -1)) {
  240. set_bit_string(dom->bitmap, address, pages);
  241. dom->next_bit = address + pages;
  242. address <<= PAGE_SHIFT;
  243. } else
  244. address = bad_dma_address;
  245. WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
  246. return address;
  247. }
  248. static void dma_ops_free_addresses(struct dma_ops_domain *dom,
  249. unsigned long address,
  250. unsigned int pages)
  251. {
  252. address >>= PAGE_SHIFT;
  253. iommu_area_free(dom->bitmap, address, pages);
  254. }
  255. static u16 domain_id_alloc(void)
  256. {
  257. unsigned long flags;
  258. int id;
  259. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  260. id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
  261. BUG_ON(id == 0);
  262. if (id > 0 && id < MAX_DOMAIN_ID)
  263. __set_bit(id, amd_iommu_pd_alloc_bitmap);
  264. else
  265. id = 0;
  266. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  267. return id;
  268. }
  269. static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
  270. unsigned long start_page,
  271. unsigned int pages)
  272. {
  273. unsigned int last_page = dom->aperture_size >> PAGE_SHIFT;
  274. if (start_page + pages > last_page)
  275. pages = last_page - start_page;
  276. set_bit_string(dom->bitmap, start_page, pages);
  277. }
  278. static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
  279. {
  280. int i, j;
  281. u64 *p1, *p2, *p3;
  282. p1 = dma_dom->domain.pt_root;
  283. if (!p1)
  284. return;
  285. for (i = 0; i < 512; ++i) {
  286. if (!IOMMU_PTE_PRESENT(p1[i]))
  287. continue;
  288. p2 = IOMMU_PTE_PAGE(p1[i]);
  289. for (j = 0; j < 512; ++i) {
  290. if (!IOMMU_PTE_PRESENT(p2[j]))
  291. continue;
  292. p3 = IOMMU_PTE_PAGE(p2[j]);
  293. free_page((unsigned long)p3);
  294. }
  295. free_page((unsigned long)p2);
  296. }
  297. free_page((unsigned long)p1);
  298. }
  299. static void dma_ops_domain_free(struct dma_ops_domain *dom)
  300. {
  301. if (!dom)
  302. return;
  303. dma_ops_free_pagetable(dom);
  304. kfree(dom->pte_pages);
  305. kfree(dom->bitmap);
  306. kfree(dom);
  307. }
  308. static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
  309. unsigned order)
  310. {
  311. struct dma_ops_domain *dma_dom;
  312. unsigned i, num_pte_pages;
  313. u64 *l2_pde;
  314. u64 address;
  315. /*
  316. * Currently the DMA aperture must be between 32 MB and 1GB in size
  317. */
  318. if ((order < 25) || (order > 30))
  319. return NULL;
  320. dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
  321. if (!dma_dom)
  322. return NULL;
  323. spin_lock_init(&dma_dom->domain.lock);
  324. dma_dom->domain.id = domain_id_alloc();
  325. if (dma_dom->domain.id == 0)
  326. goto free_dma_dom;
  327. dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
  328. dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
  329. dma_dom->domain.priv = dma_dom;
  330. if (!dma_dom->domain.pt_root)
  331. goto free_dma_dom;
  332. dma_dom->aperture_size = (1ULL << order);
  333. dma_dom->bitmap = kzalloc(dma_dom->aperture_size / (PAGE_SIZE * 8),
  334. GFP_KERNEL);
  335. if (!dma_dom->bitmap)
  336. goto free_dma_dom;
  337. /*
  338. * mark the first page as allocated so we never return 0 as
  339. * a valid dma-address. So we can use 0 as error value
  340. */
  341. dma_dom->bitmap[0] = 1;
  342. dma_dom->next_bit = 0;
  343. if (iommu->exclusion_start &&
  344. iommu->exclusion_start < dma_dom->aperture_size) {
  345. unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
  346. int pages = to_pages(iommu->exclusion_start,
  347. iommu->exclusion_length);
  348. dma_ops_reserve_addresses(dma_dom, startpage, pages);
  349. }
  350. num_pte_pages = dma_dom->aperture_size / (PAGE_SIZE * 512);
  351. dma_dom->pte_pages = kzalloc(num_pte_pages * sizeof(void *),
  352. GFP_KERNEL);
  353. if (!dma_dom->pte_pages)
  354. goto free_dma_dom;
  355. l2_pde = (u64 *)get_zeroed_page(GFP_KERNEL);
  356. if (l2_pde == NULL)
  357. goto free_dma_dom;
  358. dma_dom->domain.pt_root[0] = IOMMU_L2_PDE(virt_to_phys(l2_pde));
  359. for (i = 0; i < num_pte_pages; ++i) {
  360. dma_dom->pte_pages[i] = (u64 *)get_zeroed_page(GFP_KERNEL);
  361. if (!dma_dom->pte_pages[i])
  362. goto free_dma_dom;
  363. address = virt_to_phys(dma_dom->pte_pages[i]);
  364. l2_pde[i] = IOMMU_L1_PDE(address);
  365. }
  366. return dma_dom;
  367. free_dma_dom:
  368. dma_ops_domain_free(dma_dom);
  369. return NULL;
  370. }
  371. static struct protection_domain *domain_for_device(u16 devid)
  372. {
  373. struct protection_domain *dom;
  374. unsigned long flags;
  375. read_lock_irqsave(&amd_iommu_devtable_lock, flags);
  376. dom = amd_iommu_pd_table[devid];
  377. read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  378. return dom;
  379. }
  380. static void set_device_domain(struct amd_iommu *iommu,
  381. struct protection_domain *domain,
  382. u16 devid)
  383. {
  384. unsigned long flags;
  385. u64 pte_root = virt_to_phys(domain->pt_root);
  386. pte_root |= (domain->mode & 0x07) << 9;
  387. pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | 2;
  388. write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  389. amd_iommu_dev_table[devid].data[0] = pte_root;
  390. amd_iommu_dev_table[devid].data[1] = pte_root >> 32;
  391. amd_iommu_dev_table[devid].data[2] = domain->id;
  392. amd_iommu_pd_table[devid] = domain;
  393. write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  394. iommu_queue_inv_dev_entry(iommu, devid);
  395. iommu->need_sync = 1;
  396. }
  397. static int get_device_resources(struct device *dev,
  398. struct amd_iommu **iommu,
  399. struct protection_domain **domain,
  400. u16 *bdf)
  401. {
  402. struct dma_ops_domain *dma_dom;
  403. struct pci_dev *pcidev;
  404. u16 _bdf;
  405. BUG_ON(!dev || dev->bus != &pci_bus_type || !dev->dma_mask);
  406. pcidev = to_pci_dev(dev);
  407. _bdf = (pcidev->bus->number << 8) | pcidev->devfn;
  408. if (_bdf >= amd_iommu_last_bdf) {
  409. *iommu = NULL;
  410. *domain = NULL;
  411. *bdf = 0xffff;
  412. return 0;
  413. }
  414. *bdf = amd_iommu_alias_table[_bdf];
  415. *iommu = amd_iommu_rlookup_table[*bdf];
  416. if (*iommu == NULL)
  417. return 0;
  418. dma_dom = (*iommu)->default_dom;
  419. *domain = domain_for_device(*bdf);
  420. if (*domain == NULL) {
  421. *domain = &dma_dom->domain;
  422. set_device_domain(*iommu, *domain, *bdf);
  423. printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
  424. "device ", (*domain)->id);
  425. print_devid(_bdf, 1);
  426. }
  427. return 1;
  428. }
  429. static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,
  430. struct dma_ops_domain *dom,
  431. unsigned long address,
  432. phys_addr_t paddr,
  433. int direction)
  434. {
  435. u64 *pte, __pte;
  436. WARN_ON(address > dom->aperture_size);
  437. paddr &= PAGE_MASK;
  438. pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
  439. pte += IOMMU_PTE_L0_INDEX(address);
  440. __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
  441. if (direction == DMA_TO_DEVICE)
  442. __pte |= IOMMU_PTE_IR;
  443. else if (direction == DMA_FROM_DEVICE)
  444. __pte |= IOMMU_PTE_IW;
  445. else if (direction == DMA_BIDIRECTIONAL)
  446. __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
  447. WARN_ON(*pte);
  448. *pte = __pte;
  449. return (dma_addr_t)address;
  450. }
  451. static void dma_ops_domain_unmap(struct amd_iommu *iommu,
  452. struct dma_ops_domain *dom,
  453. unsigned long address)
  454. {
  455. u64 *pte;
  456. if (address >= dom->aperture_size)
  457. return;
  458. WARN_ON(address & 0xfffULL || address > dom->aperture_size);
  459. pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
  460. pte += IOMMU_PTE_L0_INDEX(address);
  461. WARN_ON(!*pte);
  462. *pte = 0ULL;
  463. }
  464. static dma_addr_t __map_single(struct device *dev,
  465. struct amd_iommu *iommu,
  466. struct dma_ops_domain *dma_dom,
  467. phys_addr_t paddr,
  468. size_t size,
  469. int dir)
  470. {
  471. dma_addr_t offset = paddr & ~PAGE_MASK;
  472. dma_addr_t address, start;
  473. unsigned int pages;
  474. int i;
  475. pages = to_pages(paddr, size);
  476. paddr &= PAGE_MASK;
  477. address = dma_ops_alloc_addresses(dev, dma_dom, pages);
  478. if (unlikely(address == bad_dma_address))
  479. goto out;
  480. start = address;
  481. for (i = 0; i < pages; ++i) {
  482. dma_ops_domain_map(iommu, dma_dom, start, paddr, dir);
  483. paddr += PAGE_SIZE;
  484. start += PAGE_SIZE;
  485. }
  486. address += offset;
  487. out:
  488. return address;
  489. }
  490. static void __unmap_single(struct amd_iommu *iommu,
  491. struct dma_ops_domain *dma_dom,
  492. dma_addr_t dma_addr,
  493. size_t size,
  494. int dir)
  495. {
  496. dma_addr_t i, start;
  497. unsigned int pages;
  498. if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size))
  499. return;
  500. pages = to_pages(dma_addr, size);
  501. dma_addr &= PAGE_MASK;
  502. start = dma_addr;
  503. for (i = 0; i < pages; ++i) {
  504. dma_ops_domain_unmap(iommu, dma_dom, start);
  505. start += PAGE_SIZE;
  506. }
  507. dma_ops_free_addresses(dma_dom, dma_addr, pages);
  508. }
  509. static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
  510. size_t size, int dir)
  511. {
  512. unsigned long flags;
  513. struct amd_iommu *iommu;
  514. struct protection_domain *domain;
  515. u16 devid;
  516. dma_addr_t addr;
  517. get_device_resources(dev, &iommu, &domain, &devid);
  518. if (iommu == NULL || domain == NULL)
  519. return (dma_addr_t)paddr;
  520. spin_lock_irqsave(&domain->lock, flags);
  521. addr = __map_single(dev, iommu, domain->priv, paddr, size, dir);
  522. if (addr == bad_dma_address)
  523. goto out;
  524. if (iommu_has_npcache(iommu))
  525. iommu_flush_pages(iommu, domain->id, addr, size);
  526. if (iommu->need_sync)
  527. iommu_completion_wait(iommu);
  528. out:
  529. spin_unlock_irqrestore(&domain->lock, flags);
  530. return addr;
  531. }
  532. static void unmap_single(struct device *dev, dma_addr_t dma_addr,
  533. size_t size, int dir)
  534. {
  535. unsigned long flags;
  536. struct amd_iommu *iommu;
  537. struct protection_domain *domain;
  538. u16 devid;
  539. if (!get_device_resources(dev, &iommu, &domain, &devid))
  540. return;
  541. spin_lock_irqsave(&domain->lock, flags);
  542. __unmap_single(iommu, domain->priv, dma_addr, size, dir);
  543. iommu_flush_pages(iommu, domain->id, dma_addr, size);
  544. if (iommu->need_sync)
  545. iommu_completion_wait(iommu);
  546. spin_unlock_irqrestore(&domain->lock, flags);
  547. }
  548. static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
  549. int nelems, int dir)
  550. {
  551. struct scatterlist *s;
  552. int i;
  553. for_each_sg(sglist, s, nelems, i) {
  554. s->dma_address = (dma_addr_t)sg_phys(s);
  555. s->dma_length = s->length;
  556. }
  557. return nelems;
  558. }
  559. static int map_sg(struct device *dev, struct scatterlist *sglist,
  560. int nelems, int dir)
  561. {
  562. unsigned long flags;
  563. struct amd_iommu *iommu;
  564. struct protection_domain *domain;
  565. u16 devid;
  566. int i;
  567. struct scatterlist *s;
  568. phys_addr_t paddr;
  569. int mapped_elems = 0;
  570. get_device_resources(dev, &iommu, &domain, &devid);
  571. if (!iommu || !domain)
  572. return map_sg_no_iommu(dev, sglist, nelems, dir);
  573. spin_lock_irqsave(&domain->lock, flags);
  574. for_each_sg(sglist, s, nelems, i) {
  575. paddr = sg_phys(s);
  576. s->dma_address = __map_single(dev, iommu, domain->priv,
  577. paddr, s->length, dir);
  578. if (s->dma_address) {
  579. s->dma_length = s->length;
  580. mapped_elems++;
  581. } else
  582. goto unmap;
  583. if (iommu_has_npcache(iommu))
  584. iommu_flush_pages(iommu, domain->id, s->dma_address,
  585. s->dma_length);
  586. }
  587. if (iommu->need_sync)
  588. iommu_completion_wait(iommu);
  589. out:
  590. spin_unlock_irqrestore(&domain->lock, flags);
  591. return mapped_elems;
  592. unmap:
  593. for_each_sg(sglist, s, mapped_elems, i) {
  594. if (s->dma_address)
  595. __unmap_single(iommu, domain->priv, s->dma_address,
  596. s->dma_length, dir);
  597. s->dma_address = s->dma_length = 0;
  598. }
  599. mapped_elems = 0;
  600. goto out;
  601. }
  602. static void unmap_sg(struct device *dev, struct scatterlist *sglist,
  603. int nelems, int dir)
  604. {
  605. unsigned long flags;
  606. struct amd_iommu *iommu;
  607. struct protection_domain *domain;
  608. struct scatterlist *s;
  609. u16 devid;
  610. int i;
  611. if (!get_device_resources(dev, &iommu, &domain, &devid))
  612. return;
  613. spin_lock_irqsave(&domain->lock, flags);
  614. for_each_sg(sglist, s, nelems, i) {
  615. __unmap_single(iommu, domain->priv, s->dma_address,
  616. s->dma_length, dir);
  617. iommu_flush_pages(iommu, domain->id, s->dma_address,
  618. s->dma_length);
  619. s->dma_address = s->dma_length = 0;
  620. }
  621. if (iommu->need_sync)
  622. iommu_completion_wait(iommu);
  623. spin_unlock_irqrestore(&domain->lock, flags);
  624. }