omap-iovmm.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743
  1. /*
  2. * omap iommu: simple virtual address space management
  3. *
  4. * Copyright (C) 2008-2009 Nokia Corporation
  5. *
  6. * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/err.h>
  14. #include <linux/slab.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/device.h>
  17. #include <linux/scatterlist.h>
  18. #include <linux/iommu.h>
  19. #include <asm/cacheflush.h>
  20. #include <asm/mach/map.h>
  21. #include <plat/iommu.h>
  22. #include <plat/iovmm.h>
  23. #include <plat/iopgtable.h>
  24. static struct kmem_cache *iovm_area_cachep;
  25. /* return the offset of the first scatterlist entry in a sg table */
  26. static unsigned int sgtable_offset(const struct sg_table *sgt)
  27. {
  28. if (!sgt || !sgt->nents)
  29. return 0;
  30. return sgt->sgl->offset;
  31. }
  32. /* return total bytes of sg buffers */
  33. static size_t sgtable_len(const struct sg_table *sgt)
  34. {
  35. unsigned int i, total = 0;
  36. struct scatterlist *sg;
  37. if (!sgt)
  38. return 0;
  39. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  40. size_t bytes;
  41. bytes = sg->length + sg->offset;
  42. if (!iopgsz_ok(bytes)) {
  43. pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n",
  44. __func__, i, bytes, sg->offset);
  45. return 0;
  46. }
  47. if (i && sg->offset) {
  48. pr_err("%s: sg[%d] offset not allowed in internal "
  49. "entries\n", __func__, i);
  50. return 0;
  51. }
  52. total += bytes;
  53. }
  54. return total;
  55. }
  56. #define sgtable_ok(x) (!!sgtable_len(x))
  57. static unsigned max_alignment(u32 addr)
  58. {
  59. int i;
  60. unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
  61. for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
  62. ;
  63. return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
  64. }
  65. /*
  66. * calculate the optimal number sg elements from total bytes based on
  67. * iommu superpages
  68. */
  69. static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
  70. {
  71. unsigned nr_entries = 0, ent_sz;
  72. if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
  73. pr_err("%s: wrong size %08x\n", __func__, bytes);
  74. return 0;
  75. }
  76. while (bytes) {
  77. ent_sz = max_alignment(da | pa);
  78. ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
  79. nr_entries++;
  80. da += ent_sz;
  81. pa += ent_sz;
  82. bytes -= ent_sz;
  83. }
  84. return nr_entries;
  85. }
  86. /* allocate and initialize sg_table header(a kind of 'superblock') */
  87. static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
  88. u32 da, u32 pa)
  89. {
  90. unsigned int nr_entries;
  91. int err;
  92. struct sg_table *sgt;
  93. if (!bytes)
  94. return ERR_PTR(-EINVAL);
  95. if (!IS_ALIGNED(bytes, PAGE_SIZE))
  96. return ERR_PTR(-EINVAL);
  97. if (flags & IOVMF_LINEAR) {
  98. nr_entries = sgtable_nents(bytes, da, pa);
  99. if (!nr_entries)
  100. return ERR_PTR(-EINVAL);
  101. } else
  102. nr_entries = bytes / PAGE_SIZE;
  103. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  104. if (!sgt)
  105. return ERR_PTR(-ENOMEM);
  106. err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
  107. if (err) {
  108. kfree(sgt);
  109. return ERR_PTR(err);
  110. }
  111. pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
  112. return sgt;
  113. }
  114. /* free sg_table header(a kind of superblock) */
  115. static void sgtable_free(struct sg_table *sgt)
  116. {
  117. if (!sgt)
  118. return;
  119. sg_free_table(sgt);
  120. kfree(sgt);
  121. pr_debug("%s: sgt:%p\n", __func__, sgt);
  122. }
  123. /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
  124. static void *vmap_sg(const struct sg_table *sgt)
  125. {
  126. u32 va;
  127. size_t total;
  128. unsigned int i;
  129. struct scatterlist *sg;
  130. struct vm_struct *new;
  131. const struct mem_type *mtype;
  132. mtype = get_mem_type(MT_DEVICE);
  133. if (!mtype)
  134. return ERR_PTR(-EINVAL);
  135. total = sgtable_len(sgt);
  136. if (!total)
  137. return ERR_PTR(-EINVAL);
  138. new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
  139. if (!new)
  140. return ERR_PTR(-ENOMEM);
  141. va = (u32)new->addr;
  142. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  143. size_t bytes;
  144. u32 pa;
  145. int err;
  146. pa = sg_phys(sg) - sg->offset;
  147. bytes = sg->length + sg->offset;
  148. BUG_ON(bytes != PAGE_SIZE);
  149. err = ioremap_page(va, pa, mtype);
  150. if (err)
  151. goto err_out;
  152. va += bytes;
  153. }
  154. flush_cache_vmap((unsigned long)new->addr,
  155. (unsigned long)(new->addr + total));
  156. return new->addr;
  157. err_out:
  158. WARN_ON(1); /* FIXME: cleanup some mpu mappings */
  159. vunmap(new->addr);
  160. return ERR_PTR(-EAGAIN);
  161. }
  162. static inline void vunmap_sg(const void *va)
  163. {
  164. vunmap(va);
  165. }
  166. static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
  167. const u32 da)
  168. {
  169. struct iovm_struct *tmp;
  170. list_for_each_entry(tmp, &obj->mmap, list) {
  171. if ((da >= tmp->da_start) && (da < tmp->da_end)) {
  172. size_t len;
  173. len = tmp->da_end - tmp->da_start;
  174. dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
  175. __func__, tmp->da_start, da, tmp->da_end, len,
  176. tmp->flags);
  177. return tmp;
  178. }
  179. }
  180. return NULL;
  181. }
  182. /**
  183. * omap_find_iovm_area - find iovma which includes @da
  184. * @da: iommu device virtual address
  185. *
  186. * Find the existing iovma starting at @da
  187. */
  188. struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da)
  189. {
  190. struct iovm_struct *area;
  191. mutex_lock(&obj->mmap_lock);
  192. area = __find_iovm_area(obj, da);
  193. mutex_unlock(&obj->mmap_lock);
  194. return area;
  195. }
  196. EXPORT_SYMBOL_GPL(omap_find_iovm_area);
  197. /*
  198. * This finds the hole(area) which fits the requested address and len
  199. * in iovmas mmap, and returns the new allocated iovma.
  200. */
  201. static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da,
  202. size_t bytes, u32 flags)
  203. {
  204. struct iovm_struct *new, *tmp;
  205. u32 start, prev_end, alignment;
  206. if (!obj || !bytes)
  207. return ERR_PTR(-EINVAL);
  208. start = da;
  209. alignment = PAGE_SIZE;
  210. if (~flags & IOVMF_DA_FIXED) {
  211. /* Don't map address 0 */
  212. start = obj->da_start ? obj->da_start : alignment;
  213. if (flags & IOVMF_LINEAR)
  214. alignment = iopgsz_max(bytes);
  215. start = roundup(start, alignment);
  216. } else if (start < obj->da_start || start > obj->da_end ||
  217. obj->da_end - start < bytes) {
  218. return ERR_PTR(-EINVAL);
  219. }
  220. tmp = NULL;
  221. if (list_empty(&obj->mmap))
  222. goto found;
  223. prev_end = 0;
  224. list_for_each_entry(tmp, &obj->mmap, list) {
  225. if (prev_end > start)
  226. break;
  227. if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
  228. goto found;
  229. if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
  230. start = roundup(tmp->da_end + 1, alignment);
  231. prev_end = tmp->da_end;
  232. }
  233. if ((start >= prev_end) && (obj->da_end - start >= bytes))
  234. goto found;
  235. dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
  236. __func__, da, bytes, flags);
  237. return ERR_PTR(-EINVAL);
  238. found:
  239. new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
  240. if (!new)
  241. return ERR_PTR(-ENOMEM);
  242. new->iommu = obj;
  243. new->da_start = start;
  244. new->da_end = start + bytes;
  245. new->flags = flags;
  246. /*
  247. * keep ascending order of iovmas
  248. */
  249. if (tmp)
  250. list_add_tail(&new->list, &tmp->list);
  251. else
  252. list_add(&new->list, &obj->mmap);
  253. dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
  254. __func__, new->da_start, start, new->da_end, bytes, flags);
  255. return new;
  256. }
  257. static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
  258. {
  259. size_t bytes;
  260. BUG_ON(!obj || !area);
  261. bytes = area->da_end - area->da_start;
  262. dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
  263. __func__, area->da_start, area->da_end, bytes, area->flags);
  264. list_del(&area->list);
  265. kmem_cache_free(iovm_area_cachep, area);
  266. }
  267. /**
  268. * omap_da_to_va - convert (d) to (v)
  269. * @obj: objective iommu
  270. * @da: iommu device virtual address
  271. * @va: mpu virtual address
  272. *
  273. * Returns mpu virtual addr which corresponds to a given device virtual addr
  274. */
  275. void *omap_da_to_va(struct omap_iommu *obj, u32 da)
  276. {
  277. void *va = NULL;
  278. struct iovm_struct *area;
  279. mutex_lock(&obj->mmap_lock);
  280. area = __find_iovm_area(obj, da);
  281. if (!area) {
  282. dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
  283. goto out;
  284. }
  285. va = area->va;
  286. out:
  287. mutex_unlock(&obj->mmap_lock);
  288. return va;
  289. }
  290. EXPORT_SYMBOL_GPL(omap_da_to_va);
  291. static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
  292. {
  293. unsigned int i;
  294. struct scatterlist *sg;
  295. void *va = _va;
  296. void *va_end;
  297. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  298. struct page *pg;
  299. const size_t bytes = PAGE_SIZE;
  300. /*
  301. * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()'
  302. */
  303. pg = vmalloc_to_page(va);
  304. BUG_ON(!pg);
  305. sg_set_page(sg, pg, bytes, 0);
  306. va += bytes;
  307. }
  308. va_end = _va + PAGE_SIZE * i;
  309. }
  310. static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
  311. {
  312. /*
  313. * Actually this is not necessary at all, just exists for
  314. * consistency of the code readability.
  315. */
  316. BUG_ON(!sgt);
  317. }
  318. /* create 'da' <-> 'pa' mapping from 'sgt' */
  319. static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
  320. const struct sg_table *sgt, u32 flags)
  321. {
  322. int err;
  323. unsigned int i, j;
  324. struct scatterlist *sg;
  325. u32 da = new->da_start;
  326. int order;
  327. if (!domain || !sgt)
  328. return -EINVAL;
  329. BUG_ON(!sgtable_ok(sgt));
  330. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  331. u32 pa;
  332. size_t bytes;
  333. pa = sg_phys(sg) - sg->offset;
  334. bytes = sg->length + sg->offset;
  335. flags &= ~IOVMF_PGSZ_MASK;
  336. if (bytes_to_iopgsz(bytes) < 0)
  337. goto err_out;
  338. order = get_order(bytes);
  339. pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
  340. i, da, pa, bytes);
  341. err = iommu_map(domain, da, pa, order, flags);
  342. if (err)
  343. goto err_out;
  344. da += bytes;
  345. }
  346. return 0;
  347. err_out:
  348. da = new->da_start;
  349. for_each_sg(sgt->sgl, sg, i, j) {
  350. size_t bytes;
  351. bytes = sg->length + sg->offset;
  352. order = get_order(bytes);
  353. /* ignore failures.. we're already handling one */
  354. iommu_unmap(domain, da, order);
  355. da += bytes;
  356. }
  357. return err;
  358. }
  359. /* release 'da' <-> 'pa' mapping */
  360. static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
  361. struct iovm_struct *area)
  362. {
  363. u32 start;
  364. size_t total = area->da_end - area->da_start;
  365. const struct sg_table *sgt = area->sgt;
  366. struct scatterlist *sg;
  367. int i, err;
  368. BUG_ON(!sgtable_ok(sgt));
  369. BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
  370. start = area->da_start;
  371. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  372. size_t bytes;
  373. int order;
  374. bytes = sg->length + sg->offset;
  375. order = get_order(bytes);
  376. err = iommu_unmap(domain, start, order);
  377. if (err < 0)
  378. break;
  379. dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
  380. __func__, start, bytes, area->flags);
  381. BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
  382. total -= bytes;
  383. start += bytes;
  384. }
  385. BUG_ON(total);
  386. }
  387. /* template function for all unmapping */
  388. static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
  389. struct omap_iommu *obj, const u32 da,
  390. void (*fn)(const void *), u32 flags)
  391. {
  392. struct sg_table *sgt = NULL;
  393. struct iovm_struct *area;
  394. if (!IS_ALIGNED(da, PAGE_SIZE)) {
  395. dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
  396. return NULL;
  397. }
  398. mutex_lock(&obj->mmap_lock);
  399. area = __find_iovm_area(obj, da);
  400. if (!area) {
  401. dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
  402. goto out;
  403. }
  404. if ((area->flags & flags) != flags) {
  405. dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
  406. area->flags);
  407. goto out;
  408. }
  409. sgt = (struct sg_table *)area->sgt;
  410. unmap_iovm_area(domain, obj, area);
  411. fn(area->va);
  412. dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
  413. area->da_start, da, area->da_end,
  414. area->da_end - area->da_start, area->flags);
  415. free_iovm_area(obj, area);
  416. out:
  417. mutex_unlock(&obj->mmap_lock);
  418. return sgt;
  419. }
  420. static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj,
  421. u32 da, const struct sg_table *sgt, void *va,
  422. size_t bytes, u32 flags)
  423. {
  424. int err = -ENOMEM;
  425. struct iovm_struct *new;
  426. mutex_lock(&obj->mmap_lock);
  427. new = alloc_iovm_area(obj, da, bytes, flags);
  428. if (IS_ERR(new)) {
  429. err = PTR_ERR(new);
  430. goto err_alloc_iovma;
  431. }
  432. new->va = va;
  433. new->sgt = sgt;
  434. if (map_iovm_area(domain, new, sgt, new->flags))
  435. goto err_map;
  436. mutex_unlock(&obj->mmap_lock);
  437. dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
  438. __func__, new->da_start, bytes, new->flags, va);
  439. return new->da_start;
  440. err_map:
  441. free_iovm_area(obj, new);
  442. err_alloc_iovma:
  443. mutex_unlock(&obj->mmap_lock);
  444. return err;
  445. }
  446. static inline u32
  447. __iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
  448. u32 da, const struct sg_table *sgt,
  449. void *va, size_t bytes, u32 flags)
  450. {
  451. return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
  452. }
  453. /**
  454. * omap_iommu_vmap - (d)-(p)-(v) address mapper
  455. * @obj: objective iommu
  456. * @sgt: address of scatter gather table
  457. * @flags: iovma and page property
  458. *
  459. * Creates 1-n-1 mapping with given @sgt and returns @da.
  460. * All @sgt element must be io page size aligned.
  461. */
  462. u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
  463. const struct sg_table *sgt, u32 flags)
  464. {
  465. size_t bytes;
  466. void *va = NULL;
  467. if (!obj || !obj->dev || !sgt)
  468. return -EINVAL;
  469. bytes = sgtable_len(sgt);
  470. if (!bytes)
  471. return -EINVAL;
  472. bytes = PAGE_ALIGN(bytes);
  473. if (flags & IOVMF_MMIO) {
  474. va = vmap_sg(sgt);
  475. if (IS_ERR(va))
  476. return PTR_ERR(va);
  477. }
  478. flags |= IOVMF_DISCONT;
  479. flags |= IOVMF_MMIO;
  480. da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
  481. if (IS_ERR_VALUE(da))
  482. vunmap_sg(va);
  483. return da + sgtable_offset(sgt);
  484. }
  485. EXPORT_SYMBOL_GPL(omap_iommu_vmap);
  486. /**
  487. * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
  488. * @obj: objective iommu
  489. * @da: iommu device virtual address
  490. *
  491. * Free the iommu virtually contiguous memory area starting at
  492. * @da, which was returned by 'omap_iommu_vmap()'.
  493. */
  494. struct sg_table *
  495. omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da)
  496. {
  497. struct sg_table *sgt;
  498. /*
  499. * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
  500. * Just returns 'sgt' to the caller to free
  501. */
  502. da &= PAGE_MASK;
  503. sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
  504. IOVMF_DISCONT | IOVMF_MMIO);
  505. if (!sgt)
  506. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  507. return sgt;
  508. }
  509. EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
  510. /**
  511. * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
  512. * @obj: objective iommu
  513. * @da: contiguous iommu virtual memory
  514. * @bytes: allocation size
  515. * @flags: iovma and page property
  516. *
  517. * Allocate @bytes linearly and creates 1-n-1 mapping and returns
  518. * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
  519. */
  520. u32
  521. omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
  522. size_t bytes, u32 flags)
  523. {
  524. void *va;
  525. struct sg_table *sgt;
  526. if (!obj || !obj->dev || !bytes)
  527. return -EINVAL;
  528. bytes = PAGE_ALIGN(bytes);
  529. va = vmalloc(bytes);
  530. if (!va)
  531. return -ENOMEM;
  532. flags |= IOVMF_DISCONT;
  533. flags |= IOVMF_ALLOC;
  534. sgt = sgtable_alloc(bytes, flags, da, 0);
  535. if (IS_ERR(sgt)) {
  536. da = PTR_ERR(sgt);
  537. goto err_sgt_alloc;
  538. }
  539. sgtable_fill_vmalloc(sgt, va);
  540. da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
  541. if (IS_ERR_VALUE(da))
  542. goto err_iommu_vmap;
  543. return da;
  544. err_iommu_vmap:
  545. sgtable_drain_vmalloc(sgt);
  546. sgtable_free(sgt);
  547. err_sgt_alloc:
  548. vfree(va);
  549. return da;
  550. }
  551. EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
  552. /**
  553. * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
  554. * @obj: objective iommu
  555. * @da: iommu device virtual address
  556. *
  557. * Frees the iommu virtually continuous memory area starting at
  558. * @da, as obtained from 'omap_iommu_vmalloc()'.
  559. */
  560. void omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj,
  561. const u32 da)
  562. {
  563. struct sg_table *sgt;
  564. sgt = unmap_vm_area(domain, obj, da, vfree,
  565. IOVMF_DISCONT | IOVMF_ALLOC);
  566. if (!sgt)
  567. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  568. sgtable_free(sgt);
  569. }
  570. EXPORT_SYMBOL_GPL(omap_iommu_vfree);
  571. static int __init iovmm_init(void)
  572. {
  573. const unsigned long flags = SLAB_HWCACHE_ALIGN;
  574. struct kmem_cache *p;
  575. p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
  576. flags, NULL);
  577. if (!p)
  578. return -ENOMEM;
  579. iovm_area_cachep = p;
  580. return 0;
  581. }
  582. module_init(iovmm_init);
  583. static void __exit iovmm_exit(void)
  584. {
  585. kmem_cache_destroy(iovm_area_cachep);
  586. }
  587. module_exit(iovmm_exit);
  588. MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
  589. MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
  590. MODULE_LICENSE("GPL v2");