omap-iovmm.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791
  1. /*
  2. * omap iommu: simple virtual address space management
  3. *
  4. * Copyright (C) 2008-2009 Nokia Corporation
  5. *
  6. * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/err.h>
  14. #include <linux/slab.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/device.h>
  17. #include <linux/scatterlist.h>
  18. #include <linux/iommu.h>
  19. #include <linux/omap-iommu.h>
  20. #include <linux/platform_data/iommu-omap.h>
  21. #include <asm/cacheflush.h>
  22. #include <asm/mach/map.h>
  23. #include "omap-iopgtable.h"
  24. #include "omap-iommu.h"
  25. /*
  26. * IOVMF_FLAGS: attribute for iommu virtual memory area(iovma)
  27. *
  28. * lower 16 bit is used for h/w and upper 16 bit is for s/w.
  29. */
  30. #define IOVMF_SW_SHIFT 16
  31. /*
  32. * iovma: h/w flags derived from cam and ram attribute
  33. */
  34. #define IOVMF_CAM_MASK (~((1 << 10) - 1))
  35. #define IOVMF_RAM_MASK (~IOVMF_CAM_MASK)
  36. #define IOVMF_PGSZ_MASK (3 << 0)
  37. #define IOVMF_PGSZ_1M MMU_CAM_PGSZ_1M
  38. #define IOVMF_PGSZ_64K MMU_CAM_PGSZ_64K
  39. #define IOVMF_PGSZ_4K MMU_CAM_PGSZ_4K
  40. #define IOVMF_PGSZ_16M MMU_CAM_PGSZ_16M
  41. #define IOVMF_ENDIAN_MASK (1 << 9)
  42. #define IOVMF_ENDIAN_BIG MMU_RAM_ENDIAN_BIG
  43. #define IOVMF_ELSZ_MASK (3 << 7)
  44. #define IOVMF_ELSZ_16 MMU_RAM_ELSZ_16
  45. #define IOVMF_ELSZ_32 MMU_RAM_ELSZ_32
  46. #define IOVMF_ELSZ_NONE MMU_RAM_ELSZ_NONE
  47. #define IOVMF_MIXED_MASK (1 << 6)
  48. #define IOVMF_MIXED MMU_RAM_MIXED
  49. /*
  50. * iovma: s/w flags, used for mapping and umapping internally.
  51. */
  52. #define IOVMF_MMIO (1 << IOVMF_SW_SHIFT)
  53. #define IOVMF_ALLOC (2 << IOVMF_SW_SHIFT)
  54. #define IOVMF_ALLOC_MASK (3 << IOVMF_SW_SHIFT)
  55. /* "superpages" is supported just with physically linear pages */
  56. #define IOVMF_DISCONT (1 << (2 + IOVMF_SW_SHIFT))
  57. #define IOVMF_LINEAR (2 << (2 + IOVMF_SW_SHIFT))
  58. #define IOVMF_LINEAR_MASK (3 << (2 + IOVMF_SW_SHIFT))
  59. #define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT))
  60. static struct kmem_cache *iovm_area_cachep;
  61. /* return the offset of the first scatterlist entry in a sg table */
  62. static unsigned int sgtable_offset(const struct sg_table *sgt)
  63. {
  64. if (!sgt || !sgt->nents)
  65. return 0;
  66. return sgt->sgl->offset;
  67. }
  68. /* return total bytes of sg buffers */
  69. static size_t sgtable_len(const struct sg_table *sgt)
  70. {
  71. unsigned int i, total = 0;
  72. struct scatterlist *sg;
  73. if (!sgt)
  74. return 0;
  75. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  76. size_t bytes;
  77. bytes = sg->length + sg->offset;
  78. if (!iopgsz_ok(bytes)) {
  79. pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n",
  80. __func__, i, bytes, sg->offset);
  81. return 0;
  82. }
  83. if (i && sg->offset) {
  84. pr_err("%s: sg[%d] offset not allowed in internal "
  85. "entries\n", __func__, i);
  86. return 0;
  87. }
  88. total += bytes;
  89. }
  90. return total;
  91. }
  92. #define sgtable_ok(x) (!!sgtable_len(x))
  93. static unsigned max_alignment(u32 addr)
  94. {
  95. int i;
  96. unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
  97. for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
  98. ;
  99. return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
  100. }
  101. /*
  102. * calculate the optimal number sg elements from total bytes based on
  103. * iommu superpages
  104. */
  105. static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
  106. {
  107. unsigned nr_entries = 0, ent_sz;
  108. if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
  109. pr_err("%s: wrong size %08x\n", __func__, bytes);
  110. return 0;
  111. }
  112. while (bytes) {
  113. ent_sz = max_alignment(da | pa);
  114. ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
  115. nr_entries++;
  116. da += ent_sz;
  117. pa += ent_sz;
  118. bytes -= ent_sz;
  119. }
  120. return nr_entries;
  121. }
  122. /* allocate and initialize sg_table header(a kind of 'superblock') */
  123. static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
  124. u32 da, u32 pa)
  125. {
  126. unsigned int nr_entries;
  127. int err;
  128. struct sg_table *sgt;
  129. if (!bytes)
  130. return ERR_PTR(-EINVAL);
  131. if (!IS_ALIGNED(bytes, PAGE_SIZE))
  132. return ERR_PTR(-EINVAL);
  133. if (flags & IOVMF_LINEAR) {
  134. nr_entries = sgtable_nents(bytes, da, pa);
  135. if (!nr_entries)
  136. return ERR_PTR(-EINVAL);
  137. } else
  138. nr_entries = bytes / PAGE_SIZE;
  139. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  140. if (!sgt)
  141. return ERR_PTR(-ENOMEM);
  142. err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
  143. if (err) {
  144. kfree(sgt);
  145. return ERR_PTR(err);
  146. }
  147. pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
  148. return sgt;
  149. }
  150. /* free sg_table header(a kind of superblock) */
  151. static void sgtable_free(struct sg_table *sgt)
  152. {
  153. if (!sgt)
  154. return;
  155. sg_free_table(sgt);
  156. kfree(sgt);
  157. pr_debug("%s: sgt:%p\n", __func__, sgt);
  158. }
  159. /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
  160. static void *vmap_sg(const struct sg_table *sgt)
  161. {
  162. u32 va;
  163. size_t total;
  164. unsigned int i;
  165. struct scatterlist *sg;
  166. struct vm_struct *new;
  167. const struct mem_type *mtype;
  168. mtype = get_mem_type(MT_DEVICE);
  169. if (!mtype)
  170. return ERR_PTR(-EINVAL);
  171. total = sgtable_len(sgt);
  172. if (!total)
  173. return ERR_PTR(-EINVAL);
  174. new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
  175. if (!new)
  176. return ERR_PTR(-ENOMEM);
  177. va = (u32)new->addr;
  178. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  179. size_t bytes;
  180. u32 pa;
  181. int err;
  182. pa = sg_phys(sg) - sg->offset;
  183. bytes = sg->length + sg->offset;
  184. BUG_ON(bytes != PAGE_SIZE);
  185. err = ioremap_page(va, pa, mtype);
  186. if (err)
  187. goto err_out;
  188. va += bytes;
  189. }
  190. flush_cache_vmap((unsigned long)new->addr,
  191. (unsigned long)(new->addr + total));
  192. return new->addr;
  193. err_out:
  194. WARN_ON(1); /* FIXME: cleanup some mpu mappings */
  195. vunmap(new->addr);
  196. return ERR_PTR(-EAGAIN);
  197. }
  198. static inline void vunmap_sg(const void *va)
  199. {
  200. vunmap(va);
  201. }
  202. static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
  203. const u32 da)
  204. {
  205. struct iovm_struct *tmp;
  206. list_for_each_entry(tmp, &obj->mmap, list) {
  207. if ((da >= tmp->da_start) && (da < tmp->da_end)) {
  208. size_t len;
  209. len = tmp->da_end - tmp->da_start;
  210. dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
  211. __func__, tmp->da_start, da, tmp->da_end, len,
  212. tmp->flags);
  213. return tmp;
  214. }
  215. }
  216. return NULL;
  217. }
  218. /**
  219. * omap_find_iovm_area - find iovma which includes @da
  220. * @dev: client device
  221. * @da: iommu device virtual address
  222. *
  223. * Find the existing iovma starting at @da
  224. */
  225. struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da)
  226. {
  227. struct omap_iommu *obj = dev_to_omap_iommu(dev);
  228. struct iovm_struct *area;
  229. mutex_lock(&obj->mmap_lock);
  230. area = __find_iovm_area(obj, da);
  231. mutex_unlock(&obj->mmap_lock);
  232. return area;
  233. }
  234. EXPORT_SYMBOL_GPL(omap_find_iovm_area);
  235. /*
  236. * This finds the hole(area) which fits the requested address and len
  237. * in iovmas mmap, and returns the new allocated iovma.
  238. */
  239. static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da,
  240. size_t bytes, u32 flags)
  241. {
  242. struct iovm_struct *new, *tmp;
  243. u32 start, prev_end, alignment;
  244. if (!obj || !bytes)
  245. return ERR_PTR(-EINVAL);
  246. start = da;
  247. alignment = PAGE_SIZE;
  248. if (~flags & IOVMF_DA_FIXED) {
  249. /* Don't map address 0 */
  250. start = obj->da_start ? obj->da_start : alignment;
  251. if (flags & IOVMF_LINEAR)
  252. alignment = iopgsz_max(bytes);
  253. start = roundup(start, alignment);
  254. } else if (start < obj->da_start || start > obj->da_end ||
  255. obj->da_end - start < bytes) {
  256. return ERR_PTR(-EINVAL);
  257. }
  258. tmp = NULL;
  259. if (list_empty(&obj->mmap))
  260. goto found;
  261. prev_end = 0;
  262. list_for_each_entry(tmp, &obj->mmap, list) {
  263. if (prev_end > start)
  264. break;
  265. if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
  266. goto found;
  267. if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
  268. start = roundup(tmp->da_end + 1, alignment);
  269. prev_end = tmp->da_end;
  270. }
  271. if ((start >= prev_end) && (obj->da_end - start >= bytes))
  272. goto found;
  273. dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
  274. __func__, da, bytes, flags);
  275. return ERR_PTR(-EINVAL);
  276. found:
  277. new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
  278. if (!new)
  279. return ERR_PTR(-ENOMEM);
  280. new->iommu = obj;
  281. new->da_start = start;
  282. new->da_end = start + bytes;
  283. new->flags = flags;
  284. /*
  285. * keep ascending order of iovmas
  286. */
  287. if (tmp)
  288. list_add_tail(&new->list, &tmp->list);
  289. else
  290. list_add(&new->list, &obj->mmap);
  291. dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
  292. __func__, new->da_start, start, new->da_end, bytes, flags);
  293. return new;
  294. }
  295. static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
  296. {
  297. size_t bytes;
  298. BUG_ON(!obj || !area);
  299. bytes = area->da_end - area->da_start;
  300. dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
  301. __func__, area->da_start, area->da_end, bytes, area->flags);
  302. list_del(&area->list);
  303. kmem_cache_free(iovm_area_cachep, area);
  304. }
  305. /**
  306. * omap_da_to_va - convert (d) to (v)
  307. * @dev: client device
  308. * @da: iommu device virtual address
  309. * @va: mpu virtual address
  310. *
  311. * Returns mpu virtual addr which corresponds to a given device virtual addr
  312. */
  313. void *omap_da_to_va(struct device *dev, u32 da)
  314. {
  315. struct omap_iommu *obj = dev_to_omap_iommu(dev);
  316. void *va = NULL;
  317. struct iovm_struct *area;
  318. mutex_lock(&obj->mmap_lock);
  319. area = __find_iovm_area(obj, da);
  320. if (!area) {
  321. dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
  322. goto out;
  323. }
  324. va = area->va;
  325. out:
  326. mutex_unlock(&obj->mmap_lock);
  327. return va;
  328. }
  329. EXPORT_SYMBOL_GPL(omap_da_to_va);
  330. static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
  331. {
  332. unsigned int i;
  333. struct scatterlist *sg;
  334. void *va = _va;
  335. void *va_end;
  336. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  337. struct page *pg;
  338. const size_t bytes = PAGE_SIZE;
  339. /*
  340. * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()'
  341. */
  342. pg = vmalloc_to_page(va);
  343. BUG_ON(!pg);
  344. sg_set_page(sg, pg, bytes, 0);
  345. va += bytes;
  346. }
  347. va_end = _va + PAGE_SIZE * i;
  348. }
  349. static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
  350. {
  351. /*
  352. * Actually this is not necessary at all, just exists for
  353. * consistency of the code readability.
  354. */
  355. BUG_ON(!sgt);
  356. }
  357. /* create 'da' <-> 'pa' mapping from 'sgt' */
  358. static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
  359. const struct sg_table *sgt, u32 flags)
  360. {
  361. int err;
  362. unsigned int i, j;
  363. struct scatterlist *sg;
  364. u32 da = new->da_start;
  365. if (!domain || !sgt)
  366. return -EINVAL;
  367. BUG_ON(!sgtable_ok(sgt));
  368. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  369. u32 pa;
  370. size_t bytes;
  371. pa = sg_phys(sg) - sg->offset;
  372. bytes = sg->length + sg->offset;
  373. flags &= ~IOVMF_PGSZ_MASK;
  374. if (bytes_to_iopgsz(bytes) < 0)
  375. goto err_out;
  376. pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
  377. i, da, pa, bytes);
  378. err = iommu_map(domain, da, pa, bytes, flags);
  379. if (err)
  380. goto err_out;
  381. da += bytes;
  382. }
  383. return 0;
  384. err_out:
  385. da = new->da_start;
  386. for_each_sg(sgt->sgl, sg, i, j) {
  387. size_t bytes;
  388. bytes = sg->length + sg->offset;
  389. /* ignore failures.. we're already handling one */
  390. iommu_unmap(domain, da, bytes);
  391. da += bytes;
  392. }
  393. return err;
  394. }
  395. /* release 'da' <-> 'pa' mapping */
  396. static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
  397. struct iovm_struct *area)
  398. {
  399. u32 start;
  400. size_t total = area->da_end - area->da_start;
  401. const struct sg_table *sgt = area->sgt;
  402. struct scatterlist *sg;
  403. int i;
  404. size_t unmapped;
  405. BUG_ON(!sgtable_ok(sgt));
  406. BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
  407. start = area->da_start;
  408. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  409. size_t bytes;
  410. bytes = sg->length + sg->offset;
  411. unmapped = iommu_unmap(domain, start, bytes);
  412. if (unmapped < bytes)
  413. break;
  414. dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
  415. __func__, start, bytes, area->flags);
  416. BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
  417. total -= bytes;
  418. start += bytes;
  419. }
  420. BUG_ON(total);
  421. }
  422. /* template function for all unmapping */
  423. static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
  424. struct omap_iommu *obj, const u32 da,
  425. void (*fn)(const void *), u32 flags)
  426. {
  427. struct sg_table *sgt = NULL;
  428. struct iovm_struct *area;
  429. if (!IS_ALIGNED(da, PAGE_SIZE)) {
  430. dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
  431. return NULL;
  432. }
  433. mutex_lock(&obj->mmap_lock);
  434. area = __find_iovm_area(obj, da);
  435. if (!area) {
  436. dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
  437. goto out;
  438. }
  439. if ((area->flags & flags) != flags) {
  440. dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
  441. area->flags);
  442. goto out;
  443. }
  444. sgt = (struct sg_table *)area->sgt;
  445. unmap_iovm_area(domain, obj, area);
  446. fn(area->va);
  447. dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
  448. area->da_start, da, area->da_end,
  449. area->da_end - area->da_start, area->flags);
  450. free_iovm_area(obj, area);
  451. out:
  452. mutex_unlock(&obj->mmap_lock);
  453. return sgt;
  454. }
  455. static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj,
  456. u32 da, const struct sg_table *sgt, void *va,
  457. size_t bytes, u32 flags)
  458. {
  459. int err = -ENOMEM;
  460. struct iovm_struct *new;
  461. mutex_lock(&obj->mmap_lock);
  462. new = alloc_iovm_area(obj, da, bytes, flags);
  463. if (IS_ERR(new)) {
  464. err = PTR_ERR(new);
  465. goto err_alloc_iovma;
  466. }
  467. new->va = va;
  468. new->sgt = sgt;
  469. if (map_iovm_area(domain, new, sgt, new->flags))
  470. goto err_map;
  471. mutex_unlock(&obj->mmap_lock);
  472. dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
  473. __func__, new->da_start, bytes, new->flags, va);
  474. return new->da_start;
  475. err_map:
  476. free_iovm_area(obj, new);
  477. err_alloc_iovma:
  478. mutex_unlock(&obj->mmap_lock);
  479. return err;
  480. }
  481. static inline u32
  482. __iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
  483. u32 da, const struct sg_table *sgt,
  484. void *va, size_t bytes, u32 flags)
  485. {
  486. return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
  487. }
  488. /**
  489. * omap_iommu_vmap - (d)-(p)-(v) address mapper
  490. * @domain: iommu domain
  491. * @dev: client device
  492. * @sgt: address of scatter gather table
  493. * @flags: iovma and page property
  494. *
  495. * Creates 1-n-1 mapping with given @sgt and returns @da.
  496. * All @sgt element must be io page size aligned.
  497. */
  498. u32 omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da,
  499. const struct sg_table *sgt, u32 flags)
  500. {
  501. struct omap_iommu *obj = dev_to_omap_iommu(dev);
  502. size_t bytes;
  503. void *va = NULL;
  504. if (!obj || !obj->dev || !sgt)
  505. return -EINVAL;
  506. bytes = sgtable_len(sgt);
  507. if (!bytes)
  508. return -EINVAL;
  509. bytes = PAGE_ALIGN(bytes);
  510. if (flags & IOVMF_MMIO) {
  511. va = vmap_sg(sgt);
  512. if (IS_ERR(va))
  513. return PTR_ERR(va);
  514. }
  515. flags |= IOVMF_DISCONT;
  516. flags |= IOVMF_MMIO;
  517. da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
  518. if (IS_ERR_VALUE(da))
  519. vunmap_sg(va);
  520. return da + sgtable_offset(sgt);
  521. }
  522. EXPORT_SYMBOL_GPL(omap_iommu_vmap);
  523. /**
  524. * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
  525. * @domain: iommu domain
  526. * @dev: client device
  527. * @da: iommu device virtual address
  528. *
  529. * Free the iommu virtually contiguous memory area starting at
  530. * @da, which was returned by 'omap_iommu_vmap()'.
  531. */
  532. struct sg_table *
  533. omap_iommu_vunmap(struct iommu_domain *domain, struct device *dev, u32 da)
  534. {
  535. struct omap_iommu *obj = dev_to_omap_iommu(dev);
  536. struct sg_table *sgt;
  537. /*
  538. * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
  539. * Just returns 'sgt' to the caller to free
  540. */
  541. da &= PAGE_MASK;
  542. sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
  543. IOVMF_DISCONT | IOVMF_MMIO);
  544. if (!sgt)
  545. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  546. return sgt;
  547. }
  548. EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
  549. /**
  550. * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
  551. * @dev: client device
  552. * @da: contiguous iommu virtual memory
  553. * @bytes: allocation size
  554. * @flags: iovma and page property
  555. *
  556. * Allocate @bytes linearly and creates 1-n-1 mapping and returns
  557. * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
  558. */
  559. u32
  560. omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev, u32 da,
  561. size_t bytes, u32 flags)
  562. {
  563. struct omap_iommu *obj = dev_to_omap_iommu(dev);
  564. void *va;
  565. struct sg_table *sgt;
  566. if (!obj || !obj->dev || !bytes)
  567. return -EINVAL;
  568. bytes = PAGE_ALIGN(bytes);
  569. va = vmalloc(bytes);
  570. if (!va)
  571. return -ENOMEM;
  572. flags |= IOVMF_DISCONT;
  573. flags |= IOVMF_ALLOC;
  574. sgt = sgtable_alloc(bytes, flags, da, 0);
  575. if (IS_ERR(sgt)) {
  576. da = PTR_ERR(sgt);
  577. goto err_sgt_alloc;
  578. }
  579. sgtable_fill_vmalloc(sgt, va);
  580. da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
  581. if (IS_ERR_VALUE(da))
  582. goto err_iommu_vmap;
  583. return da;
  584. err_iommu_vmap:
  585. sgtable_drain_vmalloc(sgt);
  586. sgtable_free(sgt);
  587. err_sgt_alloc:
  588. vfree(va);
  589. return da;
  590. }
  591. EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
  592. /**
  593. * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
  594. * @dev: client device
  595. * @da: iommu device virtual address
  596. *
  597. * Frees the iommu virtually continuous memory area starting at
  598. * @da, as obtained from 'omap_iommu_vmalloc()'.
  599. */
  600. void omap_iommu_vfree(struct iommu_domain *domain, struct device *dev,
  601. const u32 da)
  602. {
  603. struct omap_iommu *obj = dev_to_omap_iommu(dev);
  604. struct sg_table *sgt;
  605. sgt = unmap_vm_area(domain, obj, da, vfree,
  606. IOVMF_DISCONT | IOVMF_ALLOC);
  607. if (!sgt)
  608. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  609. sgtable_free(sgt);
  610. }
  611. EXPORT_SYMBOL_GPL(omap_iommu_vfree);
  612. static int __init iovmm_init(void)
  613. {
  614. const unsigned long flags = SLAB_HWCACHE_ALIGN;
  615. struct kmem_cache *p;
  616. p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
  617. flags, NULL);
  618. if (!p)
  619. return -ENOMEM;
  620. iovm_area_cachep = p;
  621. return 0;
  622. }
  623. module_init(iovmm_init);
  624. static void __exit iovmm_exit(void)
  625. {
  626. kmem_cache_destroy(iovm_area_cachep);
  627. }
  628. module_exit(iovmm_exit);
  629. MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
  630. MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
  631. MODULE_LICENSE("GPL v2");