iovmm.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903
  1. /*
  2. * omap iommu: simple virtual address space management
  3. *
  4. * Copyright (C) 2008-2009 Nokia Corporation
  5. *
  6. * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/err.h>
  13. #include <linux/slab.h>
  14. #include <linux/vmalloc.h>
  15. #include <linux/device.h>
  16. #include <linux/scatterlist.h>
  17. #include <asm/cacheflush.h>
  18. #include <asm/mach/map.h>
  19. #include <plat/iommu.h>
  20. #include <plat/iovmm.h>
  21. #include "iopgtable.h"
  22. /*
  23. * A device driver needs to create address mappings between:
  24. *
  25. * - iommu/device address
  26. * - physical address
  27. * - mpu virtual address
  28. *
  29. * There are 4 possible patterns for them:
  30. *
  31. * |iova/ mapping iommu_ page
  32. * | da pa va (d)-(p)-(v) function type
  33. * ---------------------------------------------------------------------------
  34. * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
  35. * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
  36. * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
  37. * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
  38. *
  39. *
  40. * 'iova': device iommu virtual address
  41. * 'da': alias of 'iova'
  42. * 'pa': physical address
  43. * 'va': mpu virtual address
  44. *
  45. * 'c': contiguous memory area
  46. * 'd': discontiguous memory area
  47. * 'a': anonymous memory allocation
  48. * '()': optional feature
  49. *
  50. * 'n': a normal page(4KB) size is used.
  51. * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
  52. *
  53. * '*': not yet, but feasible.
  54. */
  55. static struct kmem_cache *iovm_area_cachep;
  56. /* return total bytes of sg buffers */
  57. static size_t sgtable_len(const struct sg_table *sgt)
  58. {
  59. unsigned int i, total = 0;
  60. struct scatterlist *sg;
  61. if (!sgt)
  62. return 0;
  63. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  64. size_t bytes;
  65. bytes = sg_dma_len(sg);
  66. if (!iopgsz_ok(bytes)) {
  67. pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
  68. __func__, i, bytes);
  69. return 0;
  70. }
  71. total += bytes;
  72. }
  73. return total;
  74. }
  75. #define sgtable_ok(x) (!!sgtable_len(x))
  76. /*
  77. * calculate the optimal number sg elements from total bytes based on
  78. * iommu superpages
  79. */
  80. static unsigned int sgtable_nents(size_t bytes)
  81. {
  82. int i;
  83. unsigned int nr_entries;
  84. const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
  85. if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
  86. pr_err("%s: wrong size %08x\n", __func__, bytes);
  87. return 0;
  88. }
  89. nr_entries = 0;
  90. for (i = 0; i < ARRAY_SIZE(pagesize); i++) {
  91. if (bytes >= pagesize[i]) {
  92. nr_entries += (bytes / pagesize[i]);
  93. bytes %= pagesize[i];
  94. }
  95. }
  96. BUG_ON(bytes);
  97. return nr_entries;
  98. }
  99. /* allocate and initialize sg_table header(a kind of 'superblock') */
  100. static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags)
  101. {
  102. unsigned int nr_entries;
  103. int err;
  104. struct sg_table *sgt;
  105. if (!bytes)
  106. return ERR_PTR(-EINVAL);
  107. if (!IS_ALIGNED(bytes, PAGE_SIZE))
  108. return ERR_PTR(-EINVAL);
  109. /* FIXME: IOVMF_DA_FIXED should support 'superpages' */
  110. if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) {
  111. nr_entries = sgtable_nents(bytes);
  112. if (!nr_entries)
  113. return ERR_PTR(-EINVAL);
  114. } else
  115. nr_entries = bytes / PAGE_SIZE;
  116. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  117. if (!sgt)
  118. return ERR_PTR(-ENOMEM);
  119. err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
  120. if (err) {
  121. kfree(sgt);
  122. return ERR_PTR(err);
  123. }
  124. pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
  125. return sgt;
  126. }
  127. /* free sg_table header(a kind of superblock) */
  128. static void sgtable_free(struct sg_table *sgt)
  129. {
  130. if (!sgt)
  131. return;
  132. sg_free_table(sgt);
  133. kfree(sgt);
  134. pr_debug("%s: sgt:%p\n", __func__, sgt);
  135. }
  136. /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
  137. static void *vmap_sg(const struct sg_table *sgt)
  138. {
  139. u32 va;
  140. size_t total;
  141. unsigned int i;
  142. struct scatterlist *sg;
  143. struct vm_struct *new;
  144. const struct mem_type *mtype;
  145. mtype = get_mem_type(MT_DEVICE);
  146. if (!mtype)
  147. return ERR_PTR(-EINVAL);
  148. total = sgtable_len(sgt);
  149. if (!total)
  150. return ERR_PTR(-EINVAL);
  151. new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
  152. if (!new)
  153. return ERR_PTR(-ENOMEM);
  154. va = (u32)new->addr;
  155. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  156. size_t bytes;
  157. u32 pa;
  158. int err;
  159. pa = sg_phys(sg);
  160. bytes = sg_dma_len(sg);
  161. BUG_ON(bytes != PAGE_SIZE);
  162. err = ioremap_page(va, pa, mtype);
  163. if (err)
  164. goto err_out;
  165. va += bytes;
  166. }
  167. flush_cache_vmap((unsigned long)new->addr,
  168. (unsigned long)(new->addr + total));
  169. return new->addr;
  170. err_out:
  171. WARN_ON(1); /* FIXME: cleanup some mpu mappings */
  172. vunmap(new->addr);
  173. return ERR_PTR(-EAGAIN);
  174. }
  175. static inline void vunmap_sg(const void *va)
  176. {
  177. vunmap(va);
  178. }
  179. static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
  180. {
  181. struct iovm_struct *tmp;
  182. list_for_each_entry(tmp, &obj->mmap, list) {
  183. if ((da >= tmp->da_start) && (da < tmp->da_end)) {
  184. size_t len;
  185. len = tmp->da_end - tmp->da_start;
  186. dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
  187. __func__, tmp->da_start, da, tmp->da_end, len,
  188. tmp->flags);
  189. return tmp;
  190. }
  191. }
  192. return NULL;
  193. }
  194. /**
  195. * find_iovm_area - find iovma which includes @da
  196. * @da: iommu device virtual address
  197. *
  198. * Find the existing iovma starting at @da
  199. */
  200. struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
  201. {
  202. struct iovm_struct *area;
  203. mutex_lock(&obj->mmap_lock);
  204. area = __find_iovm_area(obj, da);
  205. mutex_unlock(&obj->mmap_lock);
  206. return area;
  207. }
  208. EXPORT_SYMBOL_GPL(find_iovm_area);
  209. /*
  210. * This finds the hole(area) which fits the requested address and len
  211. * in iovmas mmap, and returns the new allocated iovma.
  212. */
  213. static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
  214. size_t bytes, u32 flags)
  215. {
  216. struct iovm_struct *new, *tmp;
  217. u32 start, prev_end, alignement;
  218. if (!obj || !bytes)
  219. return ERR_PTR(-EINVAL);
  220. start = da;
  221. alignement = PAGE_SIZE;
  222. if (flags & IOVMF_DA_ANON) {
  223. /*
  224. * Reserve the first page for NULL
  225. */
  226. start = PAGE_SIZE;
  227. if (flags & IOVMF_LINEAR)
  228. alignement = iopgsz_max(bytes);
  229. start = roundup(start, alignement);
  230. }
  231. tmp = NULL;
  232. if (list_empty(&obj->mmap))
  233. goto found;
  234. prev_end = 0;
  235. list_for_each_entry(tmp, &obj->mmap, list) {
  236. if (prev_end >= start)
  237. break;
  238. if (start + bytes < tmp->da_start)
  239. goto found;
  240. if (flags & IOVMF_DA_ANON)
  241. start = roundup(tmp->da_end + 1, alignement);
  242. prev_end = tmp->da_end;
  243. }
  244. if ((start > prev_end) && (ULONG_MAX - start >= bytes))
  245. goto found;
  246. dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
  247. __func__, da, bytes, flags);
  248. return ERR_PTR(-EINVAL);
  249. found:
  250. new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
  251. if (!new)
  252. return ERR_PTR(-ENOMEM);
  253. new->iommu = obj;
  254. new->da_start = start;
  255. new->da_end = start + bytes;
  256. new->flags = flags;
  257. /*
  258. * keep ascending order of iovmas
  259. */
  260. if (tmp)
  261. list_add_tail(&new->list, &tmp->list);
  262. else
  263. list_add(&new->list, &obj->mmap);
  264. dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
  265. __func__, new->da_start, start, new->da_end, bytes, flags);
  266. return new;
  267. }
  268. static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
  269. {
  270. size_t bytes;
  271. BUG_ON(!obj || !area);
  272. bytes = area->da_end - area->da_start;
  273. dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
  274. __func__, area->da_start, area->da_end, bytes, area->flags);
  275. list_del(&area->list);
  276. kmem_cache_free(iovm_area_cachep, area);
  277. }
  278. /**
  279. * da_to_va - convert (d) to (v)
  280. * @obj: objective iommu
  281. * @da: iommu device virtual address
  282. * @va: mpu virtual address
  283. *
  284. * Returns mpu virtual addr which corresponds to a given device virtual addr
  285. */
  286. void *da_to_va(struct iommu *obj, u32 da)
  287. {
  288. void *va = NULL;
  289. struct iovm_struct *area;
  290. mutex_lock(&obj->mmap_lock);
  291. area = __find_iovm_area(obj, da);
  292. if (!area) {
  293. dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
  294. goto out;
  295. }
  296. va = area->va;
  297. out:
  298. mutex_unlock(&obj->mmap_lock);
  299. return va;
  300. }
  301. EXPORT_SYMBOL_GPL(da_to_va);
  302. static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
  303. {
  304. unsigned int i;
  305. struct scatterlist *sg;
  306. void *va = _va;
  307. void *va_end;
  308. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  309. struct page *pg;
  310. const size_t bytes = PAGE_SIZE;
  311. /*
  312. * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
  313. */
  314. pg = vmalloc_to_page(va);
  315. BUG_ON(!pg);
  316. sg_set_page(sg, pg, bytes, 0);
  317. va += bytes;
  318. }
  319. va_end = _va + PAGE_SIZE * i;
  320. }
  321. static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
  322. {
  323. /*
  324. * Actually this is not necessary at all, just exists for
  325. * consistency of the code readability.
  326. */
  327. BUG_ON(!sgt);
  328. }
  329. static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len)
  330. {
  331. unsigned int i;
  332. struct scatterlist *sg;
  333. void *va;
  334. va = phys_to_virt(pa);
  335. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  336. size_t bytes;
  337. bytes = iopgsz_max(len);
  338. BUG_ON(!iopgsz_ok(bytes));
  339. sg_set_buf(sg, phys_to_virt(pa), bytes);
  340. /*
  341. * 'pa' is cotinuous(linear).
  342. */
  343. pa += bytes;
  344. len -= bytes;
  345. }
  346. BUG_ON(len);
  347. }
  348. static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
  349. {
  350. /*
  351. * Actually this is not necessary at all, just exists for
  352. * consistency of the code readability
  353. */
  354. BUG_ON(!sgt);
  355. }
  356. /* create 'da' <-> 'pa' mapping from 'sgt' */
  357. static int map_iovm_area(struct iommu *obj, struct iovm_struct *new,
  358. const struct sg_table *sgt, u32 flags)
  359. {
  360. int err;
  361. unsigned int i, j;
  362. struct scatterlist *sg;
  363. u32 da = new->da_start;
  364. if (!obj || !sgt)
  365. return -EINVAL;
  366. BUG_ON(!sgtable_ok(sgt));
  367. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  368. u32 pa;
  369. int pgsz;
  370. size_t bytes;
  371. struct iotlb_entry e;
  372. pa = sg_phys(sg);
  373. bytes = sg_dma_len(sg);
  374. flags &= ~IOVMF_PGSZ_MASK;
  375. pgsz = bytes_to_iopgsz(bytes);
  376. if (pgsz < 0)
  377. goto err_out;
  378. flags |= pgsz;
  379. pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
  380. i, da, pa, bytes);
  381. iotlb_init_entry(&e, da, pa, flags);
  382. err = iopgtable_store_entry(obj, &e);
  383. if (err)
  384. goto err_out;
  385. da += bytes;
  386. }
  387. return 0;
  388. err_out:
  389. da = new->da_start;
  390. for_each_sg(sgt->sgl, sg, i, j) {
  391. size_t bytes;
  392. bytes = iopgtable_clear_entry(obj, da);
  393. BUG_ON(!iopgsz_ok(bytes));
  394. da += bytes;
  395. }
  396. return err;
  397. }
  398. /* release 'da' <-> 'pa' mapping */
  399. static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
  400. {
  401. u32 start;
  402. size_t total = area->da_end - area->da_start;
  403. BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
  404. start = area->da_start;
  405. while (total > 0) {
  406. size_t bytes;
  407. bytes = iopgtable_clear_entry(obj, start);
  408. if (bytes == 0)
  409. bytes = PAGE_SIZE;
  410. else
  411. dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
  412. __func__, start, bytes, area->flags);
  413. BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
  414. total -= bytes;
  415. start += bytes;
  416. }
  417. BUG_ON(total);
  418. }
  419. /* template function for all unmapping */
  420. static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
  421. void (*fn)(const void *), u32 flags)
  422. {
  423. struct sg_table *sgt = NULL;
  424. struct iovm_struct *area;
  425. if (!IS_ALIGNED(da, PAGE_SIZE)) {
  426. dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
  427. return NULL;
  428. }
  429. mutex_lock(&obj->mmap_lock);
  430. area = __find_iovm_area(obj, da);
  431. if (!area) {
  432. dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
  433. goto out;
  434. }
  435. if ((area->flags & flags) != flags) {
  436. dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
  437. area->flags);
  438. goto out;
  439. }
  440. sgt = (struct sg_table *)area->sgt;
  441. unmap_iovm_area(obj, area);
  442. fn(area->va);
  443. dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
  444. area->da_start, da, area->da_end,
  445. area->da_end - area->da_start, area->flags);
  446. free_iovm_area(obj, area);
  447. out:
  448. mutex_unlock(&obj->mmap_lock);
  449. return sgt;
  450. }
  451. static u32 map_iommu_region(struct iommu *obj, u32 da,
  452. const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
  453. {
  454. int err = -ENOMEM;
  455. struct iovm_struct *new;
  456. mutex_lock(&obj->mmap_lock);
  457. new = alloc_iovm_area(obj, da, bytes, flags);
  458. if (IS_ERR(new)) {
  459. err = PTR_ERR(new);
  460. goto err_alloc_iovma;
  461. }
  462. new->va = va;
  463. new->sgt = sgt;
  464. if (map_iovm_area(obj, new, sgt, new->flags))
  465. goto err_map;
  466. mutex_unlock(&obj->mmap_lock);
  467. dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
  468. __func__, new->da_start, bytes, new->flags, va);
  469. return new->da_start;
  470. err_map:
  471. free_iovm_area(obj, new);
  472. err_alloc_iovma:
  473. mutex_unlock(&obj->mmap_lock);
  474. return err;
  475. }
  476. static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
  477. const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
  478. {
  479. return map_iommu_region(obj, da, sgt, va, bytes, flags);
  480. }
  481. /**
  482. * iommu_vmap - (d)-(p)-(v) address mapper
  483. * @obj: objective iommu
  484. * @sgt: address of scatter gather table
  485. * @flags: iovma and page property
  486. *
  487. * Creates 1-n-1 mapping with given @sgt and returns @da.
  488. * All @sgt element must be io page size aligned.
  489. */
  490. u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
  491. u32 flags)
  492. {
  493. size_t bytes;
  494. void *va = NULL;
  495. if (!obj || !obj->dev || !sgt)
  496. return -EINVAL;
  497. bytes = sgtable_len(sgt);
  498. if (!bytes)
  499. return -EINVAL;
  500. bytes = PAGE_ALIGN(bytes);
  501. if (flags & IOVMF_MMIO) {
  502. va = vmap_sg(sgt);
  503. if (IS_ERR(va))
  504. return PTR_ERR(va);
  505. }
  506. flags &= IOVMF_HW_MASK;
  507. flags |= IOVMF_DISCONT;
  508. flags |= IOVMF_MMIO;
  509. flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
  510. da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
  511. if (IS_ERR_VALUE(da))
  512. vunmap_sg(va);
  513. return da;
  514. }
  515. EXPORT_SYMBOL_GPL(iommu_vmap);
  516. /**
  517. * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()'
  518. * @obj: objective iommu
  519. * @da: iommu device virtual address
  520. *
  521. * Free the iommu virtually contiguous memory area starting at
  522. * @da, which was returned by 'iommu_vmap()'.
  523. */
  524. struct sg_table *iommu_vunmap(struct iommu *obj, u32 da)
  525. {
  526. struct sg_table *sgt;
  527. /*
  528. * 'sgt' is allocated before 'iommu_vmalloc()' is called.
  529. * Just returns 'sgt' to the caller to free
  530. */
  531. sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO);
  532. if (!sgt)
  533. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  534. return sgt;
  535. }
  536. EXPORT_SYMBOL_GPL(iommu_vunmap);
  537. /**
  538. * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
  539. * @obj: objective iommu
  540. * @da: contiguous iommu virtual memory
  541. * @bytes: allocation size
  542. * @flags: iovma and page property
  543. *
  544. * Allocate @bytes linearly and creates 1-n-1 mapping and returns
  545. * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
  546. */
  547. u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
  548. {
  549. void *va;
  550. struct sg_table *sgt;
  551. if (!obj || !obj->dev || !bytes)
  552. return -EINVAL;
  553. bytes = PAGE_ALIGN(bytes);
  554. va = vmalloc(bytes);
  555. if (!va)
  556. return -ENOMEM;
  557. sgt = sgtable_alloc(bytes, flags);
  558. if (IS_ERR(sgt)) {
  559. da = PTR_ERR(sgt);
  560. goto err_sgt_alloc;
  561. }
  562. sgtable_fill_vmalloc(sgt, va);
  563. flags &= IOVMF_HW_MASK;
  564. flags |= IOVMF_DISCONT;
  565. flags |= IOVMF_ALLOC;
  566. flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
  567. da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
  568. if (IS_ERR_VALUE(da))
  569. goto err_iommu_vmap;
  570. return da;
  571. err_iommu_vmap:
  572. sgtable_drain_vmalloc(sgt);
  573. sgtable_free(sgt);
  574. err_sgt_alloc:
  575. vfree(va);
  576. return da;
  577. }
  578. EXPORT_SYMBOL_GPL(iommu_vmalloc);
  579. /**
  580. * iommu_vfree - release memory allocated by 'iommu_vmalloc()'
  581. * @obj: objective iommu
  582. * @da: iommu device virtual address
  583. *
  584. * Frees the iommu virtually continuous memory area starting at
  585. * @da, as obtained from 'iommu_vmalloc()'.
  586. */
  587. void iommu_vfree(struct iommu *obj, const u32 da)
  588. {
  589. struct sg_table *sgt;
  590. sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC);
  591. if (!sgt)
  592. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  593. sgtable_free(sgt);
  594. }
  595. EXPORT_SYMBOL_GPL(iommu_vfree);
  596. static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
  597. size_t bytes, u32 flags)
  598. {
  599. struct sg_table *sgt;
  600. sgt = sgtable_alloc(bytes, flags);
  601. if (IS_ERR(sgt))
  602. return PTR_ERR(sgt);
  603. sgtable_fill_kmalloc(sgt, pa, bytes);
  604. da = map_iommu_region(obj, da, sgt, va, bytes, flags);
  605. if (IS_ERR_VALUE(da)) {
  606. sgtable_drain_kmalloc(sgt);
  607. sgtable_free(sgt);
  608. }
  609. return da;
  610. }
  611. /**
  612. * iommu_kmap - (d)-(p)-(v) address mapper
  613. * @obj: objective iommu
  614. * @da: contiguous iommu virtual memory
  615. * @pa: contiguous physical memory
  616. * @flags: iovma and page property
  617. *
  618. * Creates 1-1-1 mapping and returns @da again, which can be
  619. * adjusted if 'IOVMF_DA_ANON' is set.
  620. */
  621. u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
  622. u32 flags)
  623. {
  624. void *va;
  625. if (!obj || !obj->dev || !bytes)
  626. return -EINVAL;
  627. bytes = PAGE_ALIGN(bytes);
  628. va = ioremap(pa, bytes);
  629. if (!va)
  630. return -ENOMEM;
  631. flags &= IOVMF_HW_MASK;
  632. flags |= IOVMF_LINEAR;
  633. flags |= IOVMF_MMIO;
  634. flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
  635. da = __iommu_kmap(obj, da, pa, va, bytes, flags);
  636. if (IS_ERR_VALUE(da))
  637. iounmap(va);
  638. return da;
  639. }
  640. EXPORT_SYMBOL_GPL(iommu_kmap);
  641. /**
  642. * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
  643. * @obj: objective iommu
  644. * @da: iommu device virtual address
  645. *
  646. * Frees the iommu virtually contiguous memory area starting at
  647. * @da, which was passed to and was returned by'iommu_kmap()'.
  648. */
  649. void iommu_kunmap(struct iommu *obj, u32 da)
  650. {
  651. struct sg_table *sgt;
  652. typedef void (*func_t)(const void *);
  653. sgt = unmap_vm_area(obj, da, (func_t)__iounmap,
  654. IOVMF_LINEAR | IOVMF_MMIO);
  655. if (!sgt)
  656. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  657. sgtable_free(sgt);
  658. }
  659. EXPORT_SYMBOL_GPL(iommu_kunmap);
  660. /**
  661. * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
  662. * @obj: objective iommu
  663. * @da: contiguous iommu virtual memory
  664. * @bytes: bytes for allocation
  665. * @flags: iovma and page property
  666. *
  667. * Allocate @bytes linearly and creates 1-1-1 mapping and returns
  668. * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
  669. */
  670. u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
  671. {
  672. void *va;
  673. u32 pa;
  674. if (!obj || !obj->dev || !bytes)
  675. return -EINVAL;
  676. bytes = PAGE_ALIGN(bytes);
  677. va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
  678. if (!va)
  679. return -ENOMEM;
  680. pa = virt_to_phys(va);
  681. flags &= IOVMF_HW_MASK;
  682. flags |= IOVMF_LINEAR;
  683. flags |= IOVMF_ALLOC;
  684. flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
  685. da = __iommu_kmap(obj, da, pa, va, bytes, flags);
  686. if (IS_ERR_VALUE(da))
  687. kfree(va);
  688. return da;
  689. }
  690. EXPORT_SYMBOL_GPL(iommu_kmalloc);
  691. /**
  692. * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
  693. * @obj: objective iommu
  694. * @da: iommu device virtual address
  695. *
  696. * Frees the iommu virtually contiguous memory area starting at
  697. * @da, which was passed to and was returned by'iommu_kmalloc()'.
  698. */
  699. void iommu_kfree(struct iommu *obj, u32 da)
  700. {
  701. struct sg_table *sgt;
  702. sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
  703. if (!sgt)
  704. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  705. sgtable_free(sgt);
  706. }
  707. EXPORT_SYMBOL_GPL(iommu_kfree);
  708. static int __init iovmm_init(void)
  709. {
  710. const unsigned long flags = SLAB_HWCACHE_ALIGN;
  711. struct kmem_cache *p;
  712. p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
  713. flags, NULL);
  714. if (!p)
  715. return -ENOMEM;
  716. iovm_area_cachep = p;
  717. return 0;
  718. }
  719. module_init(iovmm_init);
  720. static void __exit iovmm_exit(void)
  721. {
  722. kmem_cache_destroy(iovm_area_cachep);
  723. }
  724. module_exit(iovmm_exit);
  725. MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
  726. MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
  727. MODULE_LICENSE("GPL v2");