iovmm.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898
  1. /*
  2. * omap iommu: simple virtual address space management
  3. *
  4. * Copyright (C) 2008-2009 Nokia Corporation
  5. *
  6. * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/err.h>
  13. #include <linux/vmalloc.h>
  14. #include <linux/device.h>
  15. #include <linux/scatterlist.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/mach/map.h>
  18. #include <mach/iommu.h>
  19. #include <mach/iovmm.h>
  20. #include "iopgtable.h"
  21. /*
  22. * A device driver needs to create address mappings between:
  23. *
  24. * - iommu/device address
  25. * - physical address
  26. * - mpu virtual address
  27. *
  28. * There are 4 possible patterns for them:
  29. *
  30. * |iova/ mapping iommu_ page
  31. * | da pa va (d)-(p)-(v) function type
  32. * ---------------------------------------------------------------------------
  33. * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
  34. * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
  35. * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
  36. * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
  37. *
  38. *
  39. * 'iova': device iommu virtual address
  40. * 'da': alias of 'iova'
  41. * 'pa': physical address
  42. * 'va': mpu virtual address
  43. *
  44. * 'c': contiguous memory area
  45. * 'd': discontiguous memory area
  46. * 'a': anonymous memory allocation
  47. * '()': optional feature
  48. *
  49. * 'n': a normal page(4KB) size is used.
  50. * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
  51. *
  52. * '*': not yet, but feasible.
  53. */
  54. static struct kmem_cache *iovm_area_cachep;
  55. /* return total bytes of sg buffers */
  56. static size_t sgtable_len(const struct sg_table *sgt)
  57. {
  58. unsigned int i, total = 0;
  59. struct scatterlist *sg;
  60. if (!sgt)
  61. return 0;
  62. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  63. size_t bytes;
  64. bytes = sg_dma_len(sg);
  65. if (!iopgsz_ok(bytes)) {
  66. pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
  67. __func__, i, bytes);
  68. return 0;
  69. }
  70. total += bytes;
  71. }
  72. return total;
  73. }
  74. #define sgtable_ok(x) (!!sgtable_len(x))
  75. /*
  76. * calculate the optimal number sg elements from total bytes based on
  77. * iommu superpages
  78. */
  79. static unsigned int sgtable_nents(size_t bytes)
  80. {
  81. int i;
  82. unsigned int nr_entries;
  83. const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
  84. if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
  85. pr_err("%s: wrong size %08x\n", __func__, bytes);
  86. return 0;
  87. }
  88. nr_entries = 0;
  89. for (i = 0; i < ARRAY_SIZE(pagesize); i++) {
  90. if (bytes >= pagesize[i]) {
  91. nr_entries += (bytes / pagesize[i]);
  92. bytes %= pagesize[i];
  93. }
  94. }
  95. BUG_ON(bytes);
  96. return nr_entries;
  97. }
  98. /* allocate and initialize sg_table header(a kind of 'superblock') */
  99. static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags)
  100. {
  101. unsigned int nr_entries;
  102. int err;
  103. struct sg_table *sgt;
  104. if (!bytes)
  105. return ERR_PTR(-EINVAL);
  106. if (!IS_ALIGNED(bytes, PAGE_SIZE))
  107. return ERR_PTR(-EINVAL);
  108. /* FIXME: IOVMF_DA_FIXED should support 'superpages' */
  109. if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) {
  110. nr_entries = sgtable_nents(bytes);
  111. if (!nr_entries)
  112. return ERR_PTR(-EINVAL);
  113. } else
  114. nr_entries = bytes / PAGE_SIZE;
  115. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  116. if (!sgt)
  117. return ERR_PTR(-ENOMEM);
  118. err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
  119. if (err)
  120. return ERR_PTR(err);
  121. pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
  122. return sgt;
  123. }
  124. /* free sg_table header(a kind of superblock) */
  125. static void sgtable_free(struct sg_table *sgt)
  126. {
  127. if (!sgt)
  128. return;
  129. sg_free_table(sgt);
  130. kfree(sgt);
  131. pr_debug("%s: sgt:%p\n", __func__, sgt);
  132. }
  133. /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
  134. static void *vmap_sg(const struct sg_table *sgt)
  135. {
  136. u32 va;
  137. size_t total;
  138. unsigned int i;
  139. struct scatterlist *sg;
  140. struct vm_struct *new;
  141. const struct mem_type *mtype;
  142. mtype = get_mem_type(MT_DEVICE);
  143. if (!mtype)
  144. return ERR_PTR(-EINVAL);
  145. total = sgtable_len(sgt);
  146. if (!total)
  147. return ERR_PTR(-EINVAL);
  148. new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
  149. if (!new)
  150. return ERR_PTR(-ENOMEM);
  151. va = (u32)new->addr;
  152. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  153. size_t bytes;
  154. u32 pa;
  155. int err;
  156. pa = sg_phys(sg);
  157. bytes = sg_dma_len(sg);
  158. BUG_ON(bytes != PAGE_SIZE);
  159. err = ioremap_page(va, pa, mtype);
  160. if (err)
  161. goto err_out;
  162. va += bytes;
  163. }
  164. flush_cache_vmap((unsigned long)new->addr,
  165. (unsigned long)(new->addr + total));
  166. return new->addr;
  167. err_out:
  168. WARN_ON(1); /* FIXME: cleanup some mpu mappings */
  169. vunmap(new->addr);
  170. return ERR_PTR(-EAGAIN);
  171. }
  172. static inline void vunmap_sg(const void *va)
  173. {
  174. vunmap(va);
  175. }
  176. static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
  177. {
  178. struct iovm_struct *tmp;
  179. list_for_each_entry(tmp, &obj->mmap, list) {
  180. if ((da >= tmp->da_start) && (da < tmp->da_end)) {
  181. size_t len;
  182. len = tmp->da_end - tmp->da_start;
  183. dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
  184. __func__, tmp->da_start, da, tmp->da_end, len,
  185. tmp->flags);
  186. return tmp;
  187. }
  188. }
  189. return NULL;
  190. }
  191. /**
  192. * find_iovm_area - find iovma which includes @da
  193. * @da: iommu device virtual address
  194. *
  195. * Find the existing iovma starting at @da
  196. */
  197. struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
  198. {
  199. struct iovm_struct *area;
  200. mutex_lock(&obj->mmap_lock);
  201. area = __find_iovm_area(obj, da);
  202. mutex_unlock(&obj->mmap_lock);
  203. return area;
  204. }
  205. EXPORT_SYMBOL_GPL(find_iovm_area);
  206. /*
  207. * This finds the hole(area) which fits the requested address and len
  208. * in iovmas mmap, and returns the new allocated iovma.
  209. */
  210. static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
  211. size_t bytes, u32 flags)
  212. {
  213. struct iovm_struct *new, *tmp;
  214. u32 start, prev_end, alignement;
  215. if (!obj || !bytes)
  216. return ERR_PTR(-EINVAL);
  217. start = da;
  218. alignement = PAGE_SIZE;
  219. if (flags & IOVMF_DA_ANON) {
  220. /*
  221. * Reserve the first page for NULL
  222. */
  223. start = PAGE_SIZE;
  224. if (flags & IOVMF_LINEAR)
  225. alignement = iopgsz_max(bytes);
  226. start = roundup(start, alignement);
  227. }
  228. tmp = NULL;
  229. if (list_empty(&obj->mmap))
  230. goto found;
  231. prev_end = 0;
  232. list_for_each_entry(tmp, &obj->mmap, list) {
  233. if ((prev_end <= start) && (start + bytes < tmp->da_start))
  234. goto found;
  235. if (flags & IOVMF_DA_ANON)
  236. start = roundup(tmp->da_end, alignement);
  237. prev_end = tmp->da_end;
  238. }
  239. if ((start >= prev_end) && (ULONG_MAX - start >= bytes))
  240. goto found;
  241. dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
  242. __func__, da, bytes, flags);
  243. return ERR_PTR(-EINVAL);
  244. found:
  245. new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
  246. if (!new)
  247. return ERR_PTR(-ENOMEM);
  248. new->iommu = obj;
  249. new->da_start = start;
  250. new->da_end = start + bytes;
  251. new->flags = flags;
  252. /*
  253. * keep ascending order of iovmas
  254. */
  255. if (tmp)
  256. list_add_tail(&new->list, &tmp->list);
  257. else
  258. list_add(&new->list, &obj->mmap);
  259. dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
  260. __func__, new->da_start, start, new->da_end, bytes, flags);
  261. return new;
  262. }
  263. static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
  264. {
  265. size_t bytes;
  266. BUG_ON(!obj || !area);
  267. bytes = area->da_end - area->da_start;
  268. dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
  269. __func__, area->da_start, area->da_end, bytes, area->flags);
  270. list_del(&area->list);
  271. kmem_cache_free(iovm_area_cachep, area);
  272. }
  273. /**
  274. * da_to_va - convert (d) to (v)
  275. * @obj: objective iommu
  276. * @da: iommu device virtual address
  277. * @va: mpu virtual address
  278. *
  279. * Returns mpu virtual addr which corresponds to a given device virtual addr
  280. */
  281. void *da_to_va(struct iommu *obj, u32 da)
  282. {
  283. void *va = NULL;
  284. struct iovm_struct *area;
  285. mutex_lock(&obj->mmap_lock);
  286. area = __find_iovm_area(obj, da);
  287. if (!area) {
  288. dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
  289. goto out;
  290. }
  291. va = area->va;
  292. out:
  293. mutex_unlock(&obj->mmap_lock);
  294. return va;
  295. }
  296. EXPORT_SYMBOL_GPL(da_to_va);
  297. static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
  298. {
  299. unsigned int i;
  300. struct scatterlist *sg;
  301. void *va = _va;
  302. void *va_end;
  303. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  304. struct page *pg;
  305. const size_t bytes = PAGE_SIZE;
  306. /*
  307. * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
  308. */
  309. pg = vmalloc_to_page(va);
  310. BUG_ON(!pg);
  311. sg_set_page(sg, pg, bytes, 0);
  312. va += bytes;
  313. }
  314. va_end = _va + PAGE_SIZE * i;
  315. flush_cache_vmap((unsigned long)_va, (unsigned long)va_end);
  316. }
  317. static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
  318. {
  319. /*
  320. * Actually this is not necessary at all, just exists for
  321. * consistency of the code readability.
  322. */
  323. BUG_ON(!sgt);
  324. }
  325. static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len)
  326. {
  327. unsigned int i;
  328. struct scatterlist *sg;
  329. void *va;
  330. va = phys_to_virt(pa);
  331. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  332. size_t bytes;
  333. bytes = iopgsz_max(len);
  334. BUG_ON(!iopgsz_ok(bytes));
  335. sg_set_buf(sg, phys_to_virt(pa), bytes);
  336. /*
  337. * 'pa' is cotinuous(linear).
  338. */
  339. pa += bytes;
  340. len -= bytes;
  341. }
  342. BUG_ON(len);
  343. clean_dcache_area(va, len);
  344. }
  345. static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
  346. {
  347. /*
  348. * Actually this is not necessary at all, just exists for
  349. * consistency of the code readability
  350. */
  351. BUG_ON(!sgt);
  352. }
  353. /* create 'da' <-> 'pa' mapping from 'sgt' */
  354. static int map_iovm_area(struct iommu *obj, struct iovm_struct *new,
  355. const struct sg_table *sgt, u32 flags)
  356. {
  357. int err;
  358. unsigned int i, j;
  359. struct scatterlist *sg;
  360. u32 da = new->da_start;
  361. if (!obj || !new || !sgt)
  362. return -EINVAL;
  363. BUG_ON(!sgtable_ok(sgt));
  364. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  365. u32 pa;
  366. int pgsz;
  367. size_t bytes;
  368. struct iotlb_entry e;
  369. pa = sg_phys(sg);
  370. bytes = sg_dma_len(sg);
  371. flags &= ~IOVMF_PGSZ_MASK;
  372. pgsz = bytes_to_iopgsz(bytes);
  373. if (pgsz < 0)
  374. goto err_out;
  375. flags |= pgsz;
  376. pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
  377. i, da, pa, bytes);
  378. iotlb_init_entry(&e, da, pa, flags);
  379. err = iopgtable_store_entry(obj, &e);
  380. if (err)
  381. goto err_out;
  382. da += bytes;
  383. }
  384. return 0;
  385. err_out:
  386. da = new->da_start;
  387. for_each_sg(sgt->sgl, sg, i, j) {
  388. size_t bytes;
  389. bytes = iopgtable_clear_entry(obj, da);
  390. BUG_ON(!iopgsz_ok(bytes));
  391. da += bytes;
  392. }
  393. return err;
  394. }
  395. /* release 'da' <-> 'pa' mapping */
  396. static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
  397. {
  398. u32 start;
  399. size_t total = area->da_end - area->da_start;
  400. BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
  401. start = area->da_start;
  402. while (total > 0) {
  403. size_t bytes;
  404. bytes = iopgtable_clear_entry(obj, start);
  405. if (bytes == 0)
  406. bytes = PAGE_SIZE;
  407. else
  408. dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
  409. __func__, start, bytes, area->flags);
  410. BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
  411. total -= bytes;
  412. start += bytes;
  413. }
  414. BUG_ON(total);
  415. }
  416. /* template function for all unmapping */
  417. static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
  418. void (*fn)(const void *), u32 flags)
  419. {
  420. struct sg_table *sgt = NULL;
  421. struct iovm_struct *area;
  422. if (!IS_ALIGNED(da, PAGE_SIZE)) {
  423. dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
  424. return NULL;
  425. }
  426. mutex_lock(&obj->mmap_lock);
  427. area = __find_iovm_area(obj, da);
  428. if (!area) {
  429. dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
  430. goto out;
  431. }
  432. if ((area->flags & flags) != flags) {
  433. dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
  434. area->flags);
  435. goto out;
  436. }
  437. sgt = (struct sg_table *)area->sgt;
  438. unmap_iovm_area(obj, area);
  439. fn(area->va);
  440. dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
  441. area->da_start, da, area->da_end,
  442. area->da_end - area->da_start, area->flags);
  443. free_iovm_area(obj, area);
  444. out:
  445. mutex_unlock(&obj->mmap_lock);
  446. return sgt;
  447. }
  448. static u32 map_iommu_region(struct iommu *obj, u32 da,
  449. const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
  450. {
  451. int err = -ENOMEM;
  452. struct iovm_struct *new;
  453. mutex_lock(&obj->mmap_lock);
  454. new = alloc_iovm_area(obj, da, bytes, flags);
  455. if (IS_ERR(new)) {
  456. err = PTR_ERR(new);
  457. goto err_alloc_iovma;
  458. }
  459. new->va = va;
  460. new->sgt = sgt;
  461. if (map_iovm_area(obj, new, sgt, new->flags))
  462. goto err_map;
  463. mutex_unlock(&obj->mmap_lock);
  464. dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
  465. __func__, new->da_start, bytes, new->flags, va);
  466. return new->da_start;
  467. err_map:
  468. free_iovm_area(obj, new);
  469. err_alloc_iovma:
  470. mutex_unlock(&obj->mmap_lock);
  471. return err;
  472. }
  473. static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
  474. const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
  475. {
  476. return map_iommu_region(obj, da, sgt, va, bytes, flags);
  477. }
  478. /**
  479. * iommu_vmap - (d)-(p)-(v) address mapper
  480. * @obj: objective iommu
  481. * @sgt: address of scatter gather table
  482. * @flags: iovma and page property
  483. *
  484. * Creates 1-n-1 mapping with given @sgt and returns @da.
  485. * All @sgt element must be io page size aligned.
  486. */
  487. u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
  488. u32 flags)
  489. {
  490. size_t bytes;
  491. void *va;
  492. if (!obj || !obj->dev || !sgt)
  493. return -EINVAL;
  494. bytes = sgtable_len(sgt);
  495. if (!bytes)
  496. return -EINVAL;
  497. bytes = PAGE_ALIGN(bytes);
  498. va = vmap_sg(sgt);
  499. if (IS_ERR(va))
  500. return PTR_ERR(va);
  501. flags &= IOVMF_HW_MASK;
  502. flags |= IOVMF_DISCONT;
  503. flags |= IOVMF_MMIO;
  504. flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
  505. da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
  506. if (IS_ERR_VALUE(da))
  507. vunmap_sg(va);
  508. return da;
  509. }
  510. EXPORT_SYMBOL_GPL(iommu_vmap);
  511. /**
  512. * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()'
  513. * @obj: objective iommu
  514. * @da: iommu device virtual address
  515. *
  516. * Free the iommu virtually contiguous memory area starting at
  517. * @da, which was returned by 'iommu_vmap()'.
  518. */
  519. struct sg_table *iommu_vunmap(struct iommu *obj, u32 da)
  520. {
  521. struct sg_table *sgt;
  522. /*
  523. * 'sgt' is allocated before 'iommu_vmalloc()' is called.
  524. * Just returns 'sgt' to the caller to free
  525. */
  526. sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO);
  527. if (!sgt)
  528. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  529. return sgt;
  530. }
  531. EXPORT_SYMBOL_GPL(iommu_vunmap);
  532. /**
  533. * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
  534. * @obj: objective iommu
  535. * @da: contiguous iommu virtual memory
  536. * @bytes: allocation size
  537. * @flags: iovma and page property
  538. *
  539. * Allocate @bytes linearly and creates 1-n-1 mapping and returns
  540. * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
  541. */
  542. u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
  543. {
  544. void *va;
  545. struct sg_table *sgt;
  546. if (!obj || !obj->dev || !bytes)
  547. return -EINVAL;
  548. bytes = PAGE_ALIGN(bytes);
  549. va = vmalloc(bytes);
  550. if (!va)
  551. return -ENOMEM;
  552. sgt = sgtable_alloc(bytes, flags);
  553. if (IS_ERR(sgt)) {
  554. da = PTR_ERR(sgt);
  555. goto err_sgt_alloc;
  556. }
  557. sgtable_fill_vmalloc(sgt, va);
  558. flags &= IOVMF_HW_MASK;
  559. flags |= IOVMF_DISCONT;
  560. flags |= IOVMF_ALLOC;
  561. flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
  562. da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
  563. if (IS_ERR_VALUE(da))
  564. goto err_iommu_vmap;
  565. return da;
  566. err_iommu_vmap:
  567. sgtable_drain_vmalloc(sgt);
  568. sgtable_free(sgt);
  569. err_sgt_alloc:
  570. vfree(va);
  571. return da;
  572. }
  573. EXPORT_SYMBOL_GPL(iommu_vmalloc);
  574. /**
  575. * iommu_vfree - release memory allocated by 'iommu_vmalloc()'
  576. * @obj: objective iommu
  577. * @da: iommu device virtual address
  578. *
  579. * Frees the iommu virtually continuous memory area starting at
  580. * @da, as obtained from 'iommu_vmalloc()'.
  581. */
  582. void iommu_vfree(struct iommu *obj, const u32 da)
  583. {
  584. struct sg_table *sgt;
  585. sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC);
  586. if (!sgt)
  587. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  588. sgtable_free(sgt);
  589. }
  590. EXPORT_SYMBOL_GPL(iommu_vfree);
  591. static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
  592. size_t bytes, u32 flags)
  593. {
  594. struct sg_table *sgt;
  595. sgt = sgtable_alloc(bytes, flags);
  596. if (IS_ERR(sgt))
  597. return PTR_ERR(sgt);
  598. sgtable_fill_kmalloc(sgt, pa, bytes);
  599. da = map_iommu_region(obj, da, sgt, va, bytes, flags);
  600. if (IS_ERR_VALUE(da)) {
  601. sgtable_drain_kmalloc(sgt);
  602. sgtable_free(sgt);
  603. }
  604. return da;
  605. }
  606. /**
  607. * iommu_kmap - (d)-(p)-(v) address mapper
  608. * @obj: objective iommu
  609. * @da: contiguous iommu virtual memory
  610. * @pa: contiguous physical memory
  611. * @flags: iovma and page property
  612. *
  613. * Creates 1-1-1 mapping and returns @da again, which can be
  614. * adjusted if 'IOVMF_DA_ANON' is set.
  615. */
  616. u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
  617. u32 flags)
  618. {
  619. void *va;
  620. if (!obj || !obj->dev || !bytes)
  621. return -EINVAL;
  622. bytes = PAGE_ALIGN(bytes);
  623. va = ioremap(pa, bytes);
  624. if (!va)
  625. return -ENOMEM;
  626. flags &= IOVMF_HW_MASK;
  627. flags |= IOVMF_LINEAR;
  628. flags |= IOVMF_MMIO;
  629. flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
  630. da = __iommu_kmap(obj, da, pa, va, bytes, flags);
  631. if (IS_ERR_VALUE(da))
  632. iounmap(va);
  633. return da;
  634. }
  635. EXPORT_SYMBOL_GPL(iommu_kmap);
  636. /**
  637. * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
  638. * @obj: objective iommu
  639. * @da: iommu device virtual address
  640. *
  641. * Frees the iommu virtually contiguous memory area starting at
  642. * @da, which was passed to and was returned by'iommu_kmap()'.
  643. */
  644. void iommu_kunmap(struct iommu *obj, u32 da)
  645. {
  646. struct sg_table *sgt;
  647. typedef void (*func_t)(const void *);
  648. sgt = unmap_vm_area(obj, da, (func_t)__iounmap,
  649. IOVMF_LINEAR | IOVMF_MMIO);
  650. if (!sgt)
  651. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  652. sgtable_free(sgt);
  653. }
  654. EXPORT_SYMBOL_GPL(iommu_kunmap);
  655. /**
  656. * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
  657. * @obj: objective iommu
  658. * @da: contiguous iommu virtual memory
  659. * @bytes: bytes for allocation
  660. * @flags: iovma and page property
  661. *
  662. * Allocate @bytes linearly and creates 1-1-1 mapping and returns
  663. * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
  664. */
  665. u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
  666. {
  667. void *va;
  668. u32 pa;
  669. if (!obj || !obj->dev || !bytes)
  670. return -EINVAL;
  671. bytes = PAGE_ALIGN(bytes);
  672. va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
  673. if (!va)
  674. return -ENOMEM;
  675. pa = virt_to_phys(va);
  676. flags &= IOVMF_HW_MASK;
  677. flags |= IOVMF_LINEAR;
  678. flags |= IOVMF_ALLOC;
  679. flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
  680. da = __iommu_kmap(obj, da, pa, va, bytes, flags);
  681. if (IS_ERR_VALUE(da))
  682. kfree(va);
  683. return da;
  684. }
  685. EXPORT_SYMBOL_GPL(iommu_kmalloc);
  686. /**
  687. * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
  688. * @obj: objective iommu
  689. * @da: iommu device virtual address
  690. *
  691. * Frees the iommu virtually contiguous memory area starting at
  692. * @da, which was passed to and was returned by'iommu_kmalloc()'.
  693. */
  694. void iommu_kfree(struct iommu *obj, u32 da)
  695. {
  696. struct sg_table *sgt;
  697. sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
  698. if (!sgt)
  699. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  700. sgtable_free(sgt);
  701. }
  702. EXPORT_SYMBOL_GPL(iommu_kfree);
  703. static int __init iovmm_init(void)
  704. {
  705. const unsigned long flags = SLAB_HWCACHE_ALIGN;
  706. struct kmem_cache *p;
  707. p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
  708. flags, NULL);
  709. if (!p)
  710. return -ENOMEM;
  711. iovm_area_cachep = p;
  712. return 0;
  713. }
  714. module_init(iovmm_init);
  715. static void __exit iovmm_exit(void)
  716. {
  717. kmem_cache_destroy(iovm_area_cachep);
  718. }
  719. module_exit(iovmm_exit);
  720. MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
  721. MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
  722. MODULE_LICENSE("GPL v2");