omap-iovmm.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726
  1. /*
  2. * omap iommu: simple virtual address space management
  3. *
  4. * Copyright (C) 2008-2009 Nokia Corporation
  5. *
  6. * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/err.h>
  13. #include <linux/slab.h>
  14. #include <linux/vmalloc.h>
  15. #include <linux/device.h>
  16. #include <linux/scatterlist.h>
  17. #include <linux/iommu.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/mach/map.h>
  20. #include <plat/iommu.h>
  21. #include <plat/iovmm.h>
  22. #include <plat/iopgtable.h>
  23. static struct kmem_cache *iovm_area_cachep;
  24. /* return total bytes of sg buffers */
  25. static size_t sgtable_len(const struct sg_table *sgt)
  26. {
  27. unsigned int i, total = 0;
  28. struct scatterlist *sg;
  29. if (!sgt)
  30. return 0;
  31. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  32. size_t bytes;
  33. bytes = sg->length;
  34. if (!iopgsz_ok(bytes)) {
  35. pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
  36. __func__, i, bytes);
  37. return 0;
  38. }
  39. total += bytes;
  40. }
  41. return total;
  42. }
  43. #define sgtable_ok(x) (!!sgtable_len(x))
  44. static unsigned max_alignment(u32 addr)
  45. {
  46. int i;
  47. unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
  48. for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
  49. ;
  50. return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
  51. }
  52. /*
  53. * calculate the optimal number sg elements from total bytes based on
  54. * iommu superpages
  55. */
  56. static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
  57. {
  58. unsigned nr_entries = 0, ent_sz;
  59. if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
  60. pr_err("%s: wrong size %08x\n", __func__, bytes);
  61. return 0;
  62. }
  63. while (bytes) {
  64. ent_sz = max_alignment(da | pa);
  65. ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
  66. nr_entries++;
  67. da += ent_sz;
  68. pa += ent_sz;
  69. bytes -= ent_sz;
  70. }
  71. return nr_entries;
  72. }
  73. /* allocate and initialize sg_table header(a kind of 'superblock') */
  74. static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
  75. u32 da, u32 pa)
  76. {
  77. unsigned int nr_entries;
  78. int err;
  79. struct sg_table *sgt;
  80. if (!bytes)
  81. return ERR_PTR(-EINVAL);
  82. if (!IS_ALIGNED(bytes, PAGE_SIZE))
  83. return ERR_PTR(-EINVAL);
  84. if (flags & IOVMF_LINEAR) {
  85. nr_entries = sgtable_nents(bytes, da, pa);
  86. if (!nr_entries)
  87. return ERR_PTR(-EINVAL);
  88. } else
  89. nr_entries = bytes / PAGE_SIZE;
  90. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  91. if (!sgt)
  92. return ERR_PTR(-ENOMEM);
  93. err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
  94. if (err) {
  95. kfree(sgt);
  96. return ERR_PTR(err);
  97. }
  98. pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
  99. return sgt;
  100. }
  101. /* free sg_table header(a kind of superblock) */
  102. static void sgtable_free(struct sg_table *sgt)
  103. {
  104. if (!sgt)
  105. return;
  106. sg_free_table(sgt);
  107. kfree(sgt);
  108. pr_debug("%s: sgt:%p\n", __func__, sgt);
  109. }
  110. /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
  111. static void *vmap_sg(const struct sg_table *sgt)
  112. {
  113. u32 va;
  114. size_t total;
  115. unsigned int i;
  116. struct scatterlist *sg;
  117. struct vm_struct *new;
  118. const struct mem_type *mtype;
  119. mtype = get_mem_type(MT_DEVICE);
  120. if (!mtype)
  121. return ERR_PTR(-EINVAL);
  122. total = sgtable_len(sgt);
  123. if (!total)
  124. return ERR_PTR(-EINVAL);
  125. new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
  126. if (!new)
  127. return ERR_PTR(-ENOMEM);
  128. va = (u32)new->addr;
  129. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  130. size_t bytes;
  131. u32 pa;
  132. int err;
  133. pa = sg_phys(sg);
  134. bytes = sg->length;
  135. BUG_ON(bytes != PAGE_SIZE);
  136. err = ioremap_page(va, pa, mtype);
  137. if (err)
  138. goto err_out;
  139. va += bytes;
  140. }
  141. flush_cache_vmap((unsigned long)new->addr,
  142. (unsigned long)(new->addr + total));
  143. return new->addr;
  144. err_out:
  145. WARN_ON(1); /* FIXME: cleanup some mpu mappings */
  146. vunmap(new->addr);
  147. return ERR_PTR(-EAGAIN);
  148. }
  149. static inline void vunmap_sg(const void *va)
  150. {
  151. vunmap(va);
  152. }
  153. static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
  154. const u32 da)
  155. {
  156. struct iovm_struct *tmp;
  157. list_for_each_entry(tmp, &obj->mmap, list) {
  158. if ((da >= tmp->da_start) && (da < tmp->da_end)) {
  159. size_t len;
  160. len = tmp->da_end - tmp->da_start;
  161. dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
  162. __func__, tmp->da_start, da, tmp->da_end, len,
  163. tmp->flags);
  164. return tmp;
  165. }
  166. }
  167. return NULL;
  168. }
  169. /**
  170. * omap_find_iovm_area - find iovma which includes @da
  171. * @da: iommu device virtual address
  172. *
  173. * Find the existing iovma starting at @da
  174. */
  175. struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da)
  176. {
  177. struct iovm_struct *area;
  178. mutex_lock(&obj->mmap_lock);
  179. area = __find_iovm_area(obj, da);
  180. mutex_unlock(&obj->mmap_lock);
  181. return area;
  182. }
  183. EXPORT_SYMBOL_GPL(omap_find_iovm_area);
  184. /*
  185. * This finds the hole(area) which fits the requested address and len
  186. * in iovmas mmap, and returns the new allocated iovma.
  187. */
  188. static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da,
  189. size_t bytes, u32 flags)
  190. {
  191. struct iovm_struct *new, *tmp;
  192. u32 start, prev_end, alignment;
  193. if (!obj || !bytes)
  194. return ERR_PTR(-EINVAL);
  195. start = da;
  196. alignment = PAGE_SIZE;
  197. if (~flags & IOVMF_DA_FIXED) {
  198. /* Don't map address 0 */
  199. start = obj->da_start ? obj->da_start : alignment;
  200. if (flags & IOVMF_LINEAR)
  201. alignment = iopgsz_max(bytes);
  202. start = roundup(start, alignment);
  203. } else if (start < obj->da_start || start > obj->da_end ||
  204. obj->da_end - start < bytes) {
  205. return ERR_PTR(-EINVAL);
  206. }
  207. tmp = NULL;
  208. if (list_empty(&obj->mmap))
  209. goto found;
  210. prev_end = 0;
  211. list_for_each_entry(tmp, &obj->mmap, list) {
  212. if (prev_end > start)
  213. break;
  214. if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
  215. goto found;
  216. if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
  217. start = roundup(tmp->da_end + 1, alignment);
  218. prev_end = tmp->da_end;
  219. }
  220. if ((start >= prev_end) && (obj->da_end - start >= bytes))
  221. goto found;
  222. dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
  223. __func__, da, bytes, flags);
  224. return ERR_PTR(-EINVAL);
  225. found:
  226. new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
  227. if (!new)
  228. return ERR_PTR(-ENOMEM);
  229. new->iommu = obj;
  230. new->da_start = start;
  231. new->da_end = start + bytes;
  232. new->flags = flags;
  233. /*
  234. * keep ascending order of iovmas
  235. */
  236. if (tmp)
  237. list_add_tail(&new->list, &tmp->list);
  238. else
  239. list_add(&new->list, &obj->mmap);
  240. dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
  241. __func__, new->da_start, start, new->da_end, bytes, flags);
  242. return new;
  243. }
  244. static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
  245. {
  246. size_t bytes;
  247. BUG_ON(!obj || !area);
  248. bytes = area->da_end - area->da_start;
  249. dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
  250. __func__, area->da_start, area->da_end, bytes, area->flags);
  251. list_del(&area->list);
  252. kmem_cache_free(iovm_area_cachep, area);
  253. }
  254. /**
  255. * omap_da_to_va - convert (d) to (v)
  256. * @obj: objective iommu
  257. * @da: iommu device virtual address
  258. * @va: mpu virtual address
  259. *
  260. * Returns mpu virtual addr which corresponds to a given device virtual addr
  261. */
  262. void *omap_da_to_va(struct omap_iommu *obj, u32 da)
  263. {
  264. void *va = NULL;
  265. struct iovm_struct *area;
  266. mutex_lock(&obj->mmap_lock);
  267. area = __find_iovm_area(obj, da);
  268. if (!area) {
  269. dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
  270. goto out;
  271. }
  272. va = area->va;
  273. out:
  274. mutex_unlock(&obj->mmap_lock);
  275. return va;
  276. }
  277. EXPORT_SYMBOL_GPL(omap_da_to_va);
  278. static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
  279. {
  280. unsigned int i;
  281. struct scatterlist *sg;
  282. void *va = _va;
  283. void *va_end;
  284. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  285. struct page *pg;
  286. const size_t bytes = PAGE_SIZE;
  287. /*
  288. * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()'
  289. */
  290. pg = vmalloc_to_page(va);
  291. BUG_ON(!pg);
  292. sg_set_page(sg, pg, bytes, 0);
  293. va += bytes;
  294. }
  295. va_end = _va + PAGE_SIZE * i;
  296. }
  297. static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
  298. {
  299. /*
  300. * Actually this is not necessary at all, just exists for
  301. * consistency of the code readability.
  302. */
  303. BUG_ON(!sgt);
  304. }
  305. /* create 'da' <-> 'pa' mapping from 'sgt' */
  306. static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
  307. const struct sg_table *sgt, u32 flags)
  308. {
  309. int err;
  310. unsigned int i, j;
  311. struct scatterlist *sg;
  312. u32 da = new->da_start;
  313. int order;
  314. if (!domain || !sgt)
  315. return -EINVAL;
  316. BUG_ON(!sgtable_ok(sgt));
  317. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  318. u32 pa;
  319. size_t bytes;
  320. pa = sg_phys(sg);
  321. bytes = sg->length;
  322. flags &= ~IOVMF_PGSZ_MASK;
  323. if (bytes_to_iopgsz(bytes) < 0)
  324. goto err_out;
  325. order = get_order(bytes);
  326. pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
  327. i, da, pa, bytes);
  328. err = iommu_map(domain, da, pa, order, flags);
  329. if (err)
  330. goto err_out;
  331. da += bytes;
  332. }
  333. return 0;
  334. err_out:
  335. da = new->da_start;
  336. for_each_sg(sgt->sgl, sg, i, j) {
  337. size_t bytes;
  338. bytes = sg->length;
  339. order = get_order(bytes);
  340. /* ignore failures.. we're already handling one */
  341. iommu_unmap(domain, da, order);
  342. da += bytes;
  343. }
  344. return err;
  345. }
  346. /* release 'da' <-> 'pa' mapping */
  347. static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
  348. struct iovm_struct *area)
  349. {
  350. u32 start;
  351. size_t total = area->da_end - area->da_start;
  352. const struct sg_table *sgt = area->sgt;
  353. struct scatterlist *sg;
  354. int i, err;
  355. BUG_ON(!sgtable_ok(sgt));
  356. BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
  357. start = area->da_start;
  358. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  359. size_t bytes;
  360. int order;
  361. bytes = sg->length;
  362. order = get_order(bytes);
  363. err = iommu_unmap(domain, start, order);
  364. if (err)
  365. break;
  366. dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
  367. __func__, start, bytes, area->flags);
  368. BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
  369. total -= bytes;
  370. start += bytes;
  371. }
  372. BUG_ON(total);
  373. }
  374. /* template function for all unmapping */
  375. static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
  376. struct omap_iommu *obj, const u32 da,
  377. void (*fn)(const void *), u32 flags)
  378. {
  379. struct sg_table *sgt = NULL;
  380. struct iovm_struct *area;
  381. if (!IS_ALIGNED(da, PAGE_SIZE)) {
  382. dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
  383. return NULL;
  384. }
  385. mutex_lock(&obj->mmap_lock);
  386. area = __find_iovm_area(obj, da);
  387. if (!area) {
  388. dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
  389. goto out;
  390. }
  391. if ((area->flags & flags) != flags) {
  392. dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
  393. area->flags);
  394. goto out;
  395. }
  396. sgt = (struct sg_table *)area->sgt;
  397. unmap_iovm_area(domain, obj, area);
  398. fn(area->va);
  399. dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
  400. area->da_start, da, area->da_end,
  401. area->da_end - area->da_start, area->flags);
  402. free_iovm_area(obj, area);
  403. out:
  404. mutex_unlock(&obj->mmap_lock);
  405. return sgt;
  406. }
  407. static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj,
  408. u32 da, const struct sg_table *sgt, void *va,
  409. size_t bytes, u32 flags)
  410. {
  411. int err = -ENOMEM;
  412. struct iovm_struct *new;
  413. mutex_lock(&obj->mmap_lock);
  414. new = alloc_iovm_area(obj, da, bytes, flags);
  415. if (IS_ERR(new)) {
  416. err = PTR_ERR(new);
  417. goto err_alloc_iovma;
  418. }
  419. new->va = va;
  420. new->sgt = sgt;
  421. if (map_iovm_area(domain, new, sgt, new->flags))
  422. goto err_map;
  423. mutex_unlock(&obj->mmap_lock);
  424. dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
  425. __func__, new->da_start, bytes, new->flags, va);
  426. return new->da_start;
  427. err_map:
  428. free_iovm_area(obj, new);
  429. err_alloc_iovma:
  430. mutex_unlock(&obj->mmap_lock);
  431. return err;
  432. }
  433. static inline u32
  434. __iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
  435. u32 da, const struct sg_table *sgt,
  436. void *va, size_t bytes, u32 flags)
  437. {
  438. return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
  439. }
  440. /**
  441. * omap_iommu_vmap - (d)-(p)-(v) address mapper
  442. * @obj: objective iommu
  443. * @sgt: address of scatter gather table
  444. * @flags: iovma and page property
  445. *
  446. * Creates 1-n-1 mapping with given @sgt and returns @da.
  447. * All @sgt element must be io page size aligned.
  448. */
  449. u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
  450. const struct sg_table *sgt, u32 flags)
  451. {
  452. size_t bytes;
  453. void *va = NULL;
  454. if (!obj || !obj->dev || !sgt)
  455. return -EINVAL;
  456. bytes = sgtable_len(sgt);
  457. if (!bytes)
  458. return -EINVAL;
  459. bytes = PAGE_ALIGN(bytes);
  460. if (flags & IOVMF_MMIO) {
  461. va = vmap_sg(sgt);
  462. if (IS_ERR(va))
  463. return PTR_ERR(va);
  464. }
  465. flags |= IOVMF_DISCONT;
  466. flags |= IOVMF_MMIO;
  467. da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
  468. if (IS_ERR_VALUE(da))
  469. vunmap_sg(va);
  470. return da;
  471. }
  472. EXPORT_SYMBOL_GPL(omap_iommu_vmap);
  473. /**
  474. * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
  475. * @obj: objective iommu
  476. * @da: iommu device virtual address
  477. *
  478. * Free the iommu virtually contiguous memory area starting at
  479. * @da, which was returned by 'omap_iommu_vmap()'.
  480. */
  481. struct sg_table *
  482. omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da)
  483. {
  484. struct sg_table *sgt;
  485. /*
  486. * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
  487. * Just returns 'sgt' to the caller to free
  488. */
  489. sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
  490. IOVMF_DISCONT | IOVMF_MMIO);
  491. if (!sgt)
  492. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  493. return sgt;
  494. }
  495. EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
  496. /**
  497. * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
  498. * @obj: objective iommu
  499. * @da: contiguous iommu virtual memory
  500. * @bytes: allocation size
  501. * @flags: iovma and page property
  502. *
  503. * Allocate @bytes linearly and creates 1-n-1 mapping and returns
  504. * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
  505. */
  506. u32
  507. omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
  508. size_t bytes, u32 flags)
  509. {
  510. void *va;
  511. struct sg_table *sgt;
  512. if (!obj || !obj->dev || !bytes)
  513. return -EINVAL;
  514. bytes = PAGE_ALIGN(bytes);
  515. va = vmalloc(bytes);
  516. if (!va)
  517. return -ENOMEM;
  518. flags |= IOVMF_DISCONT;
  519. flags |= IOVMF_ALLOC;
  520. sgt = sgtable_alloc(bytes, flags, da, 0);
  521. if (IS_ERR(sgt)) {
  522. da = PTR_ERR(sgt);
  523. goto err_sgt_alloc;
  524. }
  525. sgtable_fill_vmalloc(sgt, va);
  526. da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
  527. if (IS_ERR_VALUE(da))
  528. goto err_iommu_vmap;
  529. return da;
  530. err_iommu_vmap:
  531. sgtable_drain_vmalloc(sgt);
  532. sgtable_free(sgt);
  533. err_sgt_alloc:
  534. vfree(va);
  535. return da;
  536. }
  537. EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
  538. /**
  539. * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
  540. * @obj: objective iommu
  541. * @da: iommu device virtual address
  542. *
  543. * Frees the iommu virtually continuous memory area starting at
  544. * @da, as obtained from 'omap_iommu_vmalloc()'.
  545. */
  546. void omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj,
  547. const u32 da)
  548. {
  549. struct sg_table *sgt;
  550. sgt = unmap_vm_area(domain, obj, da, vfree,
  551. IOVMF_DISCONT | IOVMF_ALLOC);
  552. if (!sgt)
  553. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  554. sgtable_free(sgt);
  555. }
  556. EXPORT_SYMBOL_GPL(omap_iommu_vfree);
  557. static int __init iovmm_init(void)
  558. {
  559. const unsigned long flags = SLAB_HWCACHE_ALIGN;
  560. struct kmem_cache *p;
  561. p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
  562. flags, NULL);
  563. if (!p)
  564. return -ENOMEM;
  565. iovm_area_cachep = p;
  566. return 0;
  567. }
  568. module_init(iovmm_init);
  569. static void __exit iovmm_exit(void)
  570. {
  571. kmem_cache_destroy(iovm_area_cachep);
  572. }
  573. module_exit(iovmm_exit);
  574. MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
  575. MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
  576. MODULE_LICENSE("GPL v2");