omap-iovmm.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923
  1. /*
  2. * omap iommu: simple virtual address space management
  3. *
  4. * Copyright (C) 2008-2009 Nokia Corporation
  5. *
  6. * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/err.h>
  13. #include <linux/slab.h>
  14. #include <linux/vmalloc.h>
  15. #include <linux/device.h>
  16. #include <linux/scatterlist.h>
  17. #include <linux/iommu.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/mach/map.h>
  20. #include <plat/iommu.h>
  21. #include <plat/iovmm.h>
  22. #include <plat/iopgtable.h>
  23. /*
  24. * A device driver needs to create address mappings between:
  25. *
  26. * - iommu/device address
  27. * - physical address
  28. * - mpu virtual address
  29. *
  30. * There are 4 possible patterns for them:
  31. *
  32. * |iova/ mapping iommu_ page
  33. * | da pa va (d)-(p)-(v) function type
  34. * ---------------------------------------------------------------------------
  35. * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
  36. * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
  37. * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
  38. * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
  39. *
  40. *
  41. * 'iova': device iommu virtual address
  42. * 'da': alias of 'iova'
  43. * 'pa': physical address
  44. * 'va': mpu virtual address
  45. *
  46. * 'c': contiguous memory area
  47. * 'd': discontiguous memory area
  48. * 'a': anonymous memory allocation
  49. * '()': optional feature
  50. *
  51. * 'n': a normal page(4KB) size is used.
  52. * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
  53. *
  54. * '*': not yet, but feasible.
  55. */
  56. static struct kmem_cache *iovm_area_cachep;
  57. /* return total bytes of sg buffers */
  58. static size_t sgtable_len(const struct sg_table *sgt)
  59. {
  60. unsigned int i, total = 0;
  61. struct scatterlist *sg;
  62. if (!sgt)
  63. return 0;
  64. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  65. size_t bytes;
  66. bytes = sg->length;
  67. if (!iopgsz_ok(bytes)) {
  68. pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
  69. __func__, i, bytes);
  70. return 0;
  71. }
  72. total += bytes;
  73. }
  74. return total;
  75. }
  76. #define sgtable_ok(x) (!!sgtable_len(x))
  77. static unsigned max_alignment(u32 addr)
  78. {
  79. int i;
  80. unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
  81. for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
  82. ;
  83. return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
  84. }
  85. /*
  86. * calculate the optimal number sg elements from total bytes based on
  87. * iommu superpages
  88. */
  89. static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
  90. {
  91. unsigned nr_entries = 0, ent_sz;
  92. if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
  93. pr_err("%s: wrong size %08x\n", __func__, bytes);
  94. return 0;
  95. }
  96. while (bytes) {
  97. ent_sz = max_alignment(da | pa);
  98. ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
  99. nr_entries++;
  100. da += ent_sz;
  101. pa += ent_sz;
  102. bytes -= ent_sz;
  103. }
  104. return nr_entries;
  105. }
  106. /* allocate and initialize sg_table header(a kind of 'superblock') */
  107. static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
  108. u32 da, u32 pa)
  109. {
  110. unsigned int nr_entries;
  111. int err;
  112. struct sg_table *sgt;
  113. if (!bytes)
  114. return ERR_PTR(-EINVAL);
  115. if (!IS_ALIGNED(bytes, PAGE_SIZE))
  116. return ERR_PTR(-EINVAL);
  117. if (flags & IOVMF_LINEAR) {
  118. nr_entries = sgtable_nents(bytes, da, pa);
  119. if (!nr_entries)
  120. return ERR_PTR(-EINVAL);
  121. } else
  122. nr_entries = bytes / PAGE_SIZE;
  123. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  124. if (!sgt)
  125. return ERR_PTR(-ENOMEM);
  126. err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
  127. if (err) {
  128. kfree(sgt);
  129. return ERR_PTR(err);
  130. }
  131. pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
  132. return sgt;
  133. }
  134. /* free sg_table header(a kind of superblock) */
  135. static void sgtable_free(struct sg_table *sgt)
  136. {
  137. if (!sgt)
  138. return;
  139. sg_free_table(sgt);
  140. kfree(sgt);
  141. pr_debug("%s: sgt:%p\n", __func__, sgt);
  142. }
  143. /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
  144. static void *vmap_sg(const struct sg_table *sgt)
  145. {
  146. u32 va;
  147. size_t total;
  148. unsigned int i;
  149. struct scatterlist *sg;
  150. struct vm_struct *new;
  151. const struct mem_type *mtype;
  152. mtype = get_mem_type(MT_DEVICE);
  153. if (!mtype)
  154. return ERR_PTR(-EINVAL);
  155. total = sgtable_len(sgt);
  156. if (!total)
  157. return ERR_PTR(-EINVAL);
  158. new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
  159. if (!new)
  160. return ERR_PTR(-ENOMEM);
  161. va = (u32)new->addr;
  162. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  163. size_t bytes;
  164. u32 pa;
  165. int err;
  166. pa = sg_phys(sg);
  167. bytes = sg->length;
  168. BUG_ON(bytes != PAGE_SIZE);
  169. err = ioremap_page(va, pa, mtype);
  170. if (err)
  171. goto err_out;
  172. va += bytes;
  173. }
  174. flush_cache_vmap((unsigned long)new->addr,
  175. (unsigned long)(new->addr + total));
  176. return new->addr;
  177. err_out:
  178. WARN_ON(1); /* FIXME: cleanup some mpu mappings */
  179. vunmap(new->addr);
  180. return ERR_PTR(-EAGAIN);
  181. }
  182. static inline void vunmap_sg(const void *va)
  183. {
  184. vunmap(va);
  185. }
  186. static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
  187. {
  188. struct iovm_struct *tmp;
  189. list_for_each_entry(tmp, &obj->mmap, list) {
  190. if ((da >= tmp->da_start) && (da < tmp->da_end)) {
  191. size_t len;
  192. len = tmp->da_end - tmp->da_start;
  193. dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
  194. __func__, tmp->da_start, da, tmp->da_end, len,
  195. tmp->flags);
  196. return tmp;
  197. }
  198. }
  199. return NULL;
  200. }
  201. /**
  202. * find_iovm_area - find iovma which includes @da
  203. * @da: iommu device virtual address
  204. *
  205. * Find the existing iovma starting at @da
  206. */
  207. struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
  208. {
  209. struct iovm_struct *area;
  210. mutex_lock(&obj->mmap_lock);
  211. area = __find_iovm_area(obj, da);
  212. mutex_unlock(&obj->mmap_lock);
  213. return area;
  214. }
  215. EXPORT_SYMBOL_GPL(find_iovm_area);
  216. /*
  217. * This finds the hole(area) which fits the requested address and len
  218. * in iovmas mmap, and returns the new allocated iovma.
  219. */
  220. static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
  221. size_t bytes, u32 flags)
  222. {
  223. struct iovm_struct *new, *tmp;
  224. u32 start, prev_end, alignment;
  225. if (!obj || !bytes)
  226. return ERR_PTR(-EINVAL);
  227. start = da;
  228. alignment = PAGE_SIZE;
  229. if (~flags & IOVMF_DA_FIXED) {
  230. /* Don't map address 0 */
  231. start = obj->da_start ? obj->da_start : alignment;
  232. if (flags & IOVMF_LINEAR)
  233. alignment = iopgsz_max(bytes);
  234. start = roundup(start, alignment);
  235. } else if (start < obj->da_start || start > obj->da_end ||
  236. obj->da_end - start < bytes) {
  237. return ERR_PTR(-EINVAL);
  238. }
  239. tmp = NULL;
  240. if (list_empty(&obj->mmap))
  241. goto found;
  242. prev_end = 0;
  243. list_for_each_entry(tmp, &obj->mmap, list) {
  244. if (prev_end > start)
  245. break;
  246. if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
  247. goto found;
  248. if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
  249. start = roundup(tmp->da_end + 1, alignment);
  250. prev_end = tmp->da_end;
  251. }
  252. if ((start >= prev_end) && (obj->da_end - start >= bytes))
  253. goto found;
  254. dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
  255. __func__, da, bytes, flags);
  256. return ERR_PTR(-EINVAL);
  257. found:
  258. new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
  259. if (!new)
  260. return ERR_PTR(-ENOMEM);
  261. new->iommu = obj;
  262. new->da_start = start;
  263. new->da_end = start + bytes;
  264. new->flags = flags;
  265. /*
  266. * keep ascending order of iovmas
  267. */
  268. if (tmp)
  269. list_add_tail(&new->list, &tmp->list);
  270. else
  271. list_add(&new->list, &obj->mmap);
  272. dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
  273. __func__, new->da_start, start, new->da_end, bytes, flags);
  274. return new;
  275. }
  276. static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
  277. {
  278. size_t bytes;
  279. BUG_ON(!obj || !area);
  280. bytes = area->da_end - area->da_start;
  281. dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
  282. __func__, area->da_start, area->da_end, bytes, area->flags);
  283. list_del(&area->list);
  284. kmem_cache_free(iovm_area_cachep, area);
  285. }
  286. /**
  287. * da_to_va - convert (d) to (v)
  288. * @obj: objective iommu
  289. * @da: iommu device virtual address
  290. * @va: mpu virtual address
  291. *
  292. * Returns mpu virtual addr which corresponds to a given device virtual addr
  293. */
  294. void *da_to_va(struct iommu *obj, u32 da)
  295. {
  296. void *va = NULL;
  297. struct iovm_struct *area;
  298. mutex_lock(&obj->mmap_lock);
  299. area = __find_iovm_area(obj, da);
  300. if (!area) {
  301. dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
  302. goto out;
  303. }
  304. va = area->va;
  305. out:
  306. mutex_unlock(&obj->mmap_lock);
  307. return va;
  308. }
  309. EXPORT_SYMBOL_GPL(da_to_va);
  310. static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
  311. {
  312. unsigned int i;
  313. struct scatterlist *sg;
  314. void *va = _va;
  315. void *va_end;
  316. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  317. struct page *pg;
  318. const size_t bytes = PAGE_SIZE;
  319. /*
  320. * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
  321. */
  322. pg = vmalloc_to_page(va);
  323. BUG_ON(!pg);
  324. sg_set_page(sg, pg, bytes, 0);
  325. va += bytes;
  326. }
  327. va_end = _va + PAGE_SIZE * i;
  328. }
  329. static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
  330. {
  331. /*
  332. * Actually this is not necessary at all, just exists for
  333. * consistency of the code readability.
  334. */
  335. BUG_ON(!sgt);
  336. }
  337. static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da,
  338. size_t len)
  339. {
  340. unsigned int i;
  341. struct scatterlist *sg;
  342. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  343. unsigned bytes;
  344. bytes = max_alignment(da | pa);
  345. bytes = min_t(unsigned, bytes, iopgsz_max(len));
  346. BUG_ON(!iopgsz_ok(bytes));
  347. sg_set_buf(sg, phys_to_virt(pa), bytes);
  348. /*
  349. * 'pa' is cotinuous(linear).
  350. */
  351. pa += bytes;
  352. da += bytes;
  353. len -= bytes;
  354. }
  355. BUG_ON(len);
  356. }
  357. static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
  358. {
  359. /*
  360. * Actually this is not necessary at all, just exists for
  361. * consistency of the code readability
  362. */
  363. BUG_ON(!sgt);
  364. }
  365. /* create 'da' <-> 'pa' mapping from 'sgt' */
  366. static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
  367. const struct sg_table *sgt, u32 flags)
  368. {
  369. int err;
  370. unsigned int i, j;
  371. struct scatterlist *sg;
  372. u32 da = new->da_start;
  373. int order;
  374. if (!domain || !sgt)
  375. return -EINVAL;
  376. BUG_ON(!sgtable_ok(sgt));
  377. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  378. u32 pa;
  379. size_t bytes;
  380. pa = sg_phys(sg);
  381. bytes = sg->length;
  382. flags &= ~IOVMF_PGSZ_MASK;
  383. if (bytes_to_iopgsz(bytes) < 0)
  384. goto err_out;
  385. order = get_order(bytes);
  386. pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
  387. i, da, pa, bytes);
  388. err = iommu_map(domain, da, pa, order, flags);
  389. if (err)
  390. goto err_out;
  391. da += bytes;
  392. }
  393. return 0;
  394. err_out:
  395. da = new->da_start;
  396. for_each_sg(sgt->sgl, sg, i, j) {
  397. size_t bytes;
  398. bytes = sg->length;
  399. order = get_order(bytes);
  400. /* ignore failures.. we're already handling one */
  401. iommu_unmap(domain, da, order);
  402. da += bytes;
  403. }
  404. return err;
  405. }
  406. /* release 'da' <-> 'pa' mapping */
  407. static void unmap_iovm_area(struct iommu_domain *domain, struct iommu *obj,
  408. struct iovm_struct *area)
  409. {
  410. u32 start;
  411. size_t total = area->da_end - area->da_start;
  412. const struct sg_table *sgt = area->sgt;
  413. struct scatterlist *sg;
  414. int i, err;
  415. BUG_ON(!sgtable_ok(sgt));
  416. BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
  417. start = area->da_start;
  418. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  419. size_t bytes;
  420. int order;
  421. bytes = sg->length;
  422. order = get_order(bytes);
  423. err = iommu_unmap(domain, start, order);
  424. if (err)
  425. break;
  426. dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
  427. __func__, start, bytes, area->flags);
  428. BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
  429. total -= bytes;
  430. start += bytes;
  431. }
  432. BUG_ON(total);
  433. }
  434. /* template function for all unmapping */
  435. static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
  436. struct iommu *obj, const u32 da,
  437. void (*fn)(const void *), u32 flags)
  438. {
  439. struct sg_table *sgt = NULL;
  440. struct iovm_struct *area;
  441. if (!IS_ALIGNED(da, PAGE_SIZE)) {
  442. dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
  443. return NULL;
  444. }
  445. mutex_lock(&obj->mmap_lock);
  446. area = __find_iovm_area(obj, da);
  447. if (!area) {
  448. dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
  449. goto out;
  450. }
  451. if ((area->flags & flags) != flags) {
  452. dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
  453. area->flags);
  454. goto out;
  455. }
  456. sgt = (struct sg_table *)area->sgt;
  457. unmap_iovm_area(domain, obj, area);
  458. fn(area->va);
  459. dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
  460. area->da_start, da, area->da_end,
  461. area->da_end - area->da_start, area->flags);
  462. free_iovm_area(obj, area);
  463. out:
  464. mutex_unlock(&obj->mmap_lock);
  465. return sgt;
  466. }
  467. static u32 map_iommu_region(struct iommu_domain *domain, struct iommu *obj,
  468. u32 da, const struct sg_table *sgt, void *va,
  469. size_t bytes, u32 flags)
  470. {
  471. int err = -ENOMEM;
  472. struct iovm_struct *new;
  473. mutex_lock(&obj->mmap_lock);
  474. new = alloc_iovm_area(obj, da, bytes, flags);
  475. if (IS_ERR(new)) {
  476. err = PTR_ERR(new);
  477. goto err_alloc_iovma;
  478. }
  479. new->va = va;
  480. new->sgt = sgt;
  481. if (map_iovm_area(domain, new, sgt, new->flags))
  482. goto err_map;
  483. mutex_unlock(&obj->mmap_lock);
  484. dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
  485. __func__, new->da_start, bytes, new->flags, va);
  486. return new->da_start;
  487. err_map:
  488. free_iovm_area(obj, new);
  489. err_alloc_iovma:
  490. mutex_unlock(&obj->mmap_lock);
  491. return err;
  492. }
  493. static inline u32 __iommu_vmap(struct iommu_domain *domain, struct iommu *obj,
  494. u32 da, const struct sg_table *sgt,
  495. void *va, size_t bytes, u32 flags)
  496. {
  497. return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
  498. }
  499. /**
  500. * iommu_vmap - (d)-(p)-(v) address mapper
  501. * @obj: objective iommu
  502. * @sgt: address of scatter gather table
  503. * @flags: iovma and page property
  504. *
  505. * Creates 1-n-1 mapping with given @sgt and returns @da.
  506. * All @sgt element must be io page size aligned.
  507. */
  508. u32 iommu_vmap(struct iommu_domain *domain, struct iommu *obj, u32 da,
  509. const struct sg_table *sgt, u32 flags)
  510. {
  511. size_t bytes;
  512. void *va = NULL;
  513. if (!obj || !obj->dev || !sgt)
  514. return -EINVAL;
  515. bytes = sgtable_len(sgt);
  516. if (!bytes)
  517. return -EINVAL;
  518. bytes = PAGE_ALIGN(bytes);
  519. if (flags & IOVMF_MMIO) {
  520. va = vmap_sg(sgt);
  521. if (IS_ERR(va))
  522. return PTR_ERR(va);
  523. }
  524. flags |= IOVMF_DISCONT;
  525. flags |= IOVMF_MMIO;
  526. da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
  527. if (IS_ERR_VALUE(da))
  528. vunmap_sg(va);
  529. return da;
  530. }
  531. EXPORT_SYMBOL_GPL(iommu_vmap);
  532. /**
  533. * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()'
  534. * @obj: objective iommu
  535. * @da: iommu device virtual address
  536. *
  537. * Free the iommu virtually contiguous memory area starting at
  538. * @da, which was returned by 'iommu_vmap()'.
  539. */
  540. struct sg_table *
  541. iommu_vunmap(struct iommu_domain *domain, struct iommu *obj, u32 da)
  542. {
  543. struct sg_table *sgt;
  544. /*
  545. * 'sgt' is allocated before 'iommu_vmalloc()' is called.
  546. * Just returns 'sgt' to the caller to free
  547. */
  548. sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
  549. IOVMF_DISCONT | IOVMF_MMIO);
  550. if (!sgt)
  551. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  552. return sgt;
  553. }
  554. EXPORT_SYMBOL_GPL(iommu_vunmap);
  555. /**
  556. * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
  557. * @obj: objective iommu
  558. * @da: contiguous iommu virtual memory
  559. * @bytes: allocation size
  560. * @flags: iovma and page property
  561. *
  562. * Allocate @bytes linearly and creates 1-n-1 mapping and returns
  563. * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
  564. */
  565. u32 iommu_vmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da,
  566. size_t bytes, u32 flags)
  567. {
  568. void *va;
  569. struct sg_table *sgt;
  570. if (!obj || !obj->dev || !bytes)
  571. return -EINVAL;
  572. bytes = PAGE_ALIGN(bytes);
  573. va = vmalloc(bytes);
  574. if (!va)
  575. return -ENOMEM;
  576. flags |= IOVMF_DISCONT;
  577. flags |= IOVMF_ALLOC;
  578. sgt = sgtable_alloc(bytes, flags, da, 0);
  579. if (IS_ERR(sgt)) {
  580. da = PTR_ERR(sgt);
  581. goto err_sgt_alloc;
  582. }
  583. sgtable_fill_vmalloc(sgt, va);
  584. da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
  585. if (IS_ERR_VALUE(da))
  586. goto err_iommu_vmap;
  587. return da;
  588. err_iommu_vmap:
  589. sgtable_drain_vmalloc(sgt);
  590. sgtable_free(sgt);
  591. err_sgt_alloc:
  592. vfree(va);
  593. return da;
  594. }
  595. EXPORT_SYMBOL_GPL(iommu_vmalloc);
  596. /**
  597. * iommu_vfree - release memory allocated by 'iommu_vmalloc()'
  598. * @obj: objective iommu
  599. * @da: iommu device virtual address
  600. *
  601. * Frees the iommu virtually continuous memory area starting at
  602. * @da, as obtained from 'iommu_vmalloc()'.
  603. */
  604. void iommu_vfree(struct iommu_domain *domain, struct iommu *obj, const u32 da)
  605. {
  606. struct sg_table *sgt;
  607. sgt = unmap_vm_area(domain, obj, da, vfree,
  608. IOVMF_DISCONT | IOVMF_ALLOC);
  609. if (!sgt)
  610. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  611. sgtable_free(sgt);
  612. }
  613. EXPORT_SYMBOL_GPL(iommu_vfree);
  614. static u32 __iommu_kmap(struct iommu_domain *domain, struct iommu *obj,
  615. u32 da, u32 pa, void *va, size_t bytes, u32 flags)
  616. {
  617. struct sg_table *sgt;
  618. sgt = sgtable_alloc(bytes, flags, da, pa);
  619. if (IS_ERR(sgt))
  620. return PTR_ERR(sgt);
  621. sgtable_fill_kmalloc(sgt, pa, da, bytes);
  622. da = map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
  623. if (IS_ERR_VALUE(da)) {
  624. sgtable_drain_kmalloc(sgt);
  625. sgtable_free(sgt);
  626. }
  627. return da;
  628. }
  629. /**
  630. * iommu_kmap - (d)-(p)-(v) address mapper
  631. * @obj: objective iommu
  632. * @da: contiguous iommu virtual memory
  633. * @pa: contiguous physical memory
  634. * @flags: iovma and page property
  635. *
  636. * Creates 1-1-1 mapping and returns @da again, which can be
  637. * adjusted if 'IOVMF_DA_FIXED' is not set.
  638. */
  639. u32 iommu_kmap(struct iommu_domain *domain, struct iommu *obj, u32 da, u32 pa,
  640. size_t bytes, u32 flags)
  641. {
  642. void *va;
  643. if (!obj || !obj->dev || !bytes)
  644. return -EINVAL;
  645. bytes = PAGE_ALIGN(bytes);
  646. va = ioremap(pa, bytes);
  647. if (!va)
  648. return -ENOMEM;
  649. flags |= IOVMF_LINEAR;
  650. flags |= IOVMF_MMIO;
  651. da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags);
  652. if (IS_ERR_VALUE(da))
  653. iounmap(va);
  654. return da;
  655. }
  656. EXPORT_SYMBOL_GPL(iommu_kmap);
  657. /**
  658. * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
  659. * @obj: objective iommu
  660. * @da: iommu device virtual address
  661. *
  662. * Frees the iommu virtually contiguous memory area starting at
  663. * @da, which was passed to and was returned by'iommu_kmap()'.
  664. */
  665. void iommu_kunmap(struct iommu_domain *domain, struct iommu *obj, u32 da)
  666. {
  667. struct sg_table *sgt;
  668. typedef void (*func_t)(const void *);
  669. sgt = unmap_vm_area(domain, obj, da, (func_t)iounmap,
  670. IOVMF_LINEAR | IOVMF_MMIO);
  671. if (!sgt)
  672. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  673. sgtable_free(sgt);
  674. }
  675. EXPORT_SYMBOL_GPL(iommu_kunmap);
  676. /**
  677. * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
  678. * @obj: objective iommu
  679. * @da: contiguous iommu virtual memory
  680. * @bytes: bytes for allocation
  681. * @flags: iovma and page property
  682. *
  683. * Allocate @bytes linearly and creates 1-1-1 mapping and returns
  684. * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
  685. */
  686. u32 iommu_kmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da,
  687. size_t bytes, u32 flags)
  688. {
  689. void *va;
  690. u32 pa;
  691. if (!obj || !obj->dev || !bytes)
  692. return -EINVAL;
  693. bytes = PAGE_ALIGN(bytes);
  694. va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
  695. if (!va)
  696. return -ENOMEM;
  697. pa = virt_to_phys(va);
  698. flags |= IOVMF_LINEAR;
  699. flags |= IOVMF_ALLOC;
  700. da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags);
  701. if (IS_ERR_VALUE(da))
  702. kfree(va);
  703. return da;
  704. }
  705. EXPORT_SYMBOL_GPL(iommu_kmalloc);
  706. /**
  707. * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
  708. * @obj: objective iommu
  709. * @da: iommu device virtual address
  710. *
  711. * Frees the iommu virtually contiguous memory area starting at
  712. * @da, which was passed to and was returned by'iommu_kmalloc()'.
  713. */
  714. void iommu_kfree(struct iommu_domain *domain, struct iommu *obj, u32 da)
  715. {
  716. struct sg_table *sgt;
  717. sgt = unmap_vm_area(domain, obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
  718. if (!sgt)
  719. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  720. sgtable_free(sgt);
  721. }
  722. EXPORT_SYMBOL_GPL(iommu_kfree);
  723. static int __init iovmm_init(void)
  724. {
  725. const unsigned long flags = SLAB_HWCACHE_ALIGN;
  726. struct kmem_cache *p;
  727. p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
  728. flags, NULL);
  729. if (!p)
  730. return -ENOMEM;
  731. iovm_area_cachep = p;
  732. return 0;
  733. }
  734. module_init(iovmm_init);
  735. static void __exit iovmm_exit(void)
  736. {
  737. kmem_cache_destroy(iovm_area_cachep);
  738. }
  739. module_exit(iovmm_exit);
  740. MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
  741. MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
  742. MODULE_LICENSE("GPL v2");