iovmm.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914
  1. /*
  2. * omap iommu: simple virtual address space management
  3. *
  4. * Copyright (C) 2008-2009 Nokia Corporation
  5. *
  6. * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/err.h>
  13. #include <linux/slab.h>
  14. #include <linux/vmalloc.h>
  15. #include <linux/device.h>
  16. #include <linux/scatterlist.h>
  17. #include <asm/cacheflush.h>
  18. #include <asm/mach/map.h>
  19. #include <plat/iommu.h>
  20. #include <plat/iovmm.h>
  21. #include "iopgtable.h"
  22. /*
  23. * A device driver needs to create address mappings between:
  24. *
  25. * - iommu/device address
  26. * - physical address
  27. * - mpu virtual address
  28. *
  29. * There are 4 possible patterns for them:
  30. *
  31. * |iova/ mapping iommu_ page
  32. * | da pa va (d)-(p)-(v) function type
  33. * ---------------------------------------------------------------------------
  34. * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
  35. * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
  36. * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
  37. * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
  38. *
  39. *
  40. * 'iova': device iommu virtual address
  41. * 'da': alias of 'iova'
  42. * 'pa': physical address
  43. * 'va': mpu virtual address
  44. *
  45. * 'c': contiguous memory area
  46. * 'd': discontiguous memory area
  47. * 'a': anonymous memory allocation
  48. * '()': optional feature
  49. *
  50. * 'n': a normal page(4KB) size is used.
  51. * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
  52. *
  53. * '*': not yet, but feasible.
  54. */
  55. static struct kmem_cache *iovm_area_cachep;
  56. /* return total bytes of sg buffers */
  57. static size_t sgtable_len(const struct sg_table *sgt)
  58. {
  59. unsigned int i, total = 0;
  60. struct scatterlist *sg;
  61. if (!sgt)
  62. return 0;
  63. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  64. size_t bytes;
  65. bytes = sg_dma_len(sg);
  66. if (!iopgsz_ok(bytes)) {
  67. pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
  68. __func__, i, bytes);
  69. return 0;
  70. }
  71. total += bytes;
  72. }
  73. return total;
  74. }
  75. #define sgtable_ok(x) (!!sgtable_len(x))
  76. static unsigned max_alignment(u32 addr)
  77. {
  78. int i;
  79. unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
  80. for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
  81. ;
  82. return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
  83. }
  84. /*
  85. * calculate the optimal number sg elements from total bytes based on
  86. * iommu superpages
  87. */
  88. static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
  89. {
  90. unsigned nr_entries = 0, ent_sz;
  91. if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
  92. pr_err("%s: wrong size %08x\n", __func__, bytes);
  93. return 0;
  94. }
  95. while (bytes) {
  96. ent_sz = max_alignment(da | pa);
  97. ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
  98. nr_entries++;
  99. da += ent_sz;
  100. pa += ent_sz;
  101. bytes -= ent_sz;
  102. }
  103. return nr_entries;
  104. }
  105. /* allocate and initialize sg_table header(a kind of 'superblock') */
  106. static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
  107. u32 da, u32 pa)
  108. {
  109. unsigned int nr_entries;
  110. int err;
  111. struct sg_table *sgt;
  112. if (!bytes)
  113. return ERR_PTR(-EINVAL);
  114. if (!IS_ALIGNED(bytes, PAGE_SIZE))
  115. return ERR_PTR(-EINVAL);
  116. if (flags & IOVMF_LINEAR) {
  117. nr_entries = sgtable_nents(bytes, da, pa);
  118. if (!nr_entries)
  119. return ERR_PTR(-EINVAL);
  120. } else
  121. nr_entries = bytes / PAGE_SIZE;
  122. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  123. if (!sgt)
  124. return ERR_PTR(-ENOMEM);
  125. err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
  126. if (err) {
  127. kfree(sgt);
  128. return ERR_PTR(err);
  129. }
  130. pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
  131. return sgt;
  132. }
  133. /* free sg_table header(a kind of superblock) */
  134. static void sgtable_free(struct sg_table *sgt)
  135. {
  136. if (!sgt)
  137. return;
  138. sg_free_table(sgt);
  139. kfree(sgt);
  140. pr_debug("%s: sgt:%p\n", __func__, sgt);
  141. }
  142. /* map 'sglist' to a contiguous mpu virtual area and return 'va' */
  143. static void *vmap_sg(const struct sg_table *sgt)
  144. {
  145. u32 va;
  146. size_t total;
  147. unsigned int i;
  148. struct scatterlist *sg;
  149. struct vm_struct *new;
  150. const struct mem_type *mtype;
  151. mtype = get_mem_type(MT_DEVICE);
  152. if (!mtype)
  153. return ERR_PTR(-EINVAL);
  154. total = sgtable_len(sgt);
  155. if (!total)
  156. return ERR_PTR(-EINVAL);
  157. new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
  158. if (!new)
  159. return ERR_PTR(-ENOMEM);
  160. va = (u32)new->addr;
  161. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  162. size_t bytes;
  163. u32 pa;
  164. int err;
  165. pa = sg_phys(sg);
  166. bytes = sg_dma_len(sg);
  167. BUG_ON(bytes != PAGE_SIZE);
  168. err = ioremap_page(va, pa, mtype);
  169. if (err)
  170. goto err_out;
  171. va += bytes;
  172. }
  173. flush_cache_vmap((unsigned long)new->addr,
  174. (unsigned long)(new->addr + total));
  175. return new->addr;
  176. err_out:
  177. WARN_ON(1); /* FIXME: cleanup some mpu mappings */
  178. vunmap(new->addr);
  179. return ERR_PTR(-EAGAIN);
  180. }
  181. static inline void vunmap_sg(const void *va)
  182. {
  183. vunmap(va);
  184. }
  185. static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
  186. {
  187. struct iovm_struct *tmp;
  188. list_for_each_entry(tmp, &obj->mmap, list) {
  189. if ((da >= tmp->da_start) && (da < tmp->da_end)) {
  190. size_t len;
  191. len = tmp->da_end - tmp->da_start;
  192. dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
  193. __func__, tmp->da_start, da, tmp->da_end, len,
  194. tmp->flags);
  195. return tmp;
  196. }
  197. }
  198. return NULL;
  199. }
  200. /**
  201. * find_iovm_area - find iovma which includes @da
  202. * @da: iommu device virtual address
  203. *
  204. * Find the existing iovma starting at @da
  205. */
  206. struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
  207. {
  208. struct iovm_struct *area;
  209. mutex_lock(&obj->mmap_lock);
  210. area = __find_iovm_area(obj, da);
  211. mutex_unlock(&obj->mmap_lock);
  212. return area;
  213. }
  214. EXPORT_SYMBOL_GPL(find_iovm_area);
  215. /*
  216. * This finds the hole(area) which fits the requested address and len
  217. * in iovmas mmap, and returns the new allocated iovma.
  218. */
  219. static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
  220. size_t bytes, u32 flags)
  221. {
  222. struct iovm_struct *new, *tmp;
  223. u32 start, prev_end, alignement;
  224. if (!obj || !bytes)
  225. return ERR_PTR(-EINVAL);
  226. start = da;
  227. alignement = PAGE_SIZE;
  228. if (flags & IOVMF_DA_ANON) {
  229. start = obj->da_start;
  230. if (flags & IOVMF_LINEAR)
  231. alignement = iopgsz_max(bytes);
  232. start = roundup(start, alignement);
  233. } else if (start < obj->da_start || start > obj->da_end ||
  234. obj->da_end - start < bytes) {
  235. return ERR_PTR(-EINVAL);
  236. }
  237. tmp = NULL;
  238. if (list_empty(&obj->mmap))
  239. goto found;
  240. prev_end = 0;
  241. list_for_each_entry(tmp, &obj->mmap, list) {
  242. if (prev_end > start)
  243. break;
  244. if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
  245. goto found;
  246. if (tmp->da_end >= start && flags & IOVMF_DA_ANON)
  247. start = roundup(tmp->da_end + 1, alignement);
  248. prev_end = tmp->da_end;
  249. }
  250. if ((start >= prev_end) && (obj->da_end - start >= bytes))
  251. goto found;
  252. dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
  253. __func__, da, bytes, flags);
  254. return ERR_PTR(-EINVAL);
  255. found:
  256. new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
  257. if (!new)
  258. return ERR_PTR(-ENOMEM);
  259. new->iommu = obj;
  260. new->da_start = start;
  261. new->da_end = start + bytes;
  262. new->flags = flags;
  263. /*
  264. * keep ascending order of iovmas
  265. */
  266. if (tmp)
  267. list_add_tail(&new->list, &tmp->list);
  268. else
  269. list_add(&new->list, &obj->mmap);
  270. dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
  271. __func__, new->da_start, start, new->da_end, bytes, flags);
  272. return new;
  273. }
  274. static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
  275. {
  276. size_t bytes;
  277. BUG_ON(!obj || !area);
  278. bytes = area->da_end - area->da_start;
  279. dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
  280. __func__, area->da_start, area->da_end, bytes, area->flags);
  281. list_del(&area->list);
  282. kmem_cache_free(iovm_area_cachep, area);
  283. }
  284. /**
  285. * da_to_va - convert (d) to (v)
  286. * @obj: objective iommu
  287. * @da: iommu device virtual address
  288. * @va: mpu virtual address
  289. *
  290. * Returns mpu virtual addr which corresponds to a given device virtual addr
  291. */
  292. void *da_to_va(struct iommu *obj, u32 da)
  293. {
  294. void *va = NULL;
  295. struct iovm_struct *area;
  296. mutex_lock(&obj->mmap_lock);
  297. area = __find_iovm_area(obj, da);
  298. if (!area) {
  299. dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
  300. goto out;
  301. }
  302. va = area->va;
  303. out:
  304. mutex_unlock(&obj->mmap_lock);
  305. return va;
  306. }
  307. EXPORT_SYMBOL_GPL(da_to_va);
  308. static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
  309. {
  310. unsigned int i;
  311. struct scatterlist *sg;
  312. void *va = _va;
  313. void *va_end;
  314. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  315. struct page *pg;
  316. const size_t bytes = PAGE_SIZE;
  317. /*
  318. * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
  319. */
  320. pg = vmalloc_to_page(va);
  321. BUG_ON(!pg);
  322. sg_set_page(sg, pg, bytes, 0);
  323. va += bytes;
  324. }
  325. va_end = _va + PAGE_SIZE * i;
  326. }
  327. static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
  328. {
  329. /*
  330. * Actually this is not necessary at all, just exists for
  331. * consistency of the code readability.
  332. */
  333. BUG_ON(!sgt);
  334. }
  335. static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da,
  336. size_t len)
  337. {
  338. unsigned int i;
  339. struct scatterlist *sg;
  340. void *va;
  341. va = phys_to_virt(pa);
  342. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  343. unsigned bytes;
  344. bytes = max_alignment(da | pa);
  345. bytes = min_t(unsigned, bytes, iopgsz_max(len));
  346. BUG_ON(!iopgsz_ok(bytes));
  347. sg_set_buf(sg, phys_to_virt(pa), bytes);
  348. /*
  349. * 'pa' is cotinuous(linear).
  350. */
  351. pa += bytes;
  352. da += bytes;
  353. len -= bytes;
  354. }
  355. BUG_ON(len);
  356. }
  357. static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
  358. {
  359. /*
  360. * Actually this is not necessary at all, just exists for
  361. * consistency of the code readability
  362. */
  363. BUG_ON(!sgt);
  364. }
  365. /* create 'da' <-> 'pa' mapping from 'sgt' */
  366. static int map_iovm_area(struct iommu *obj, struct iovm_struct *new,
  367. const struct sg_table *sgt, u32 flags)
  368. {
  369. int err;
  370. unsigned int i, j;
  371. struct scatterlist *sg;
  372. u32 da = new->da_start;
  373. if (!obj || !sgt)
  374. return -EINVAL;
  375. BUG_ON(!sgtable_ok(sgt));
  376. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  377. u32 pa;
  378. int pgsz;
  379. size_t bytes;
  380. struct iotlb_entry e;
  381. pa = sg_phys(sg);
  382. bytes = sg_dma_len(sg);
  383. flags &= ~IOVMF_PGSZ_MASK;
  384. pgsz = bytes_to_iopgsz(bytes);
  385. if (pgsz < 0)
  386. goto err_out;
  387. flags |= pgsz;
  388. pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
  389. i, da, pa, bytes);
  390. iotlb_init_entry(&e, da, pa, flags);
  391. err = iopgtable_store_entry(obj, &e);
  392. if (err)
  393. goto err_out;
  394. da += bytes;
  395. }
  396. return 0;
  397. err_out:
  398. da = new->da_start;
  399. for_each_sg(sgt->sgl, sg, i, j) {
  400. size_t bytes;
  401. bytes = iopgtable_clear_entry(obj, da);
  402. BUG_ON(!iopgsz_ok(bytes));
  403. da += bytes;
  404. }
  405. return err;
  406. }
  407. /* release 'da' <-> 'pa' mapping */
  408. static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
  409. {
  410. u32 start;
  411. size_t total = area->da_end - area->da_start;
  412. BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
  413. start = area->da_start;
  414. while (total > 0) {
  415. size_t bytes;
  416. bytes = iopgtable_clear_entry(obj, start);
  417. if (bytes == 0)
  418. bytes = PAGE_SIZE;
  419. else
  420. dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
  421. __func__, start, bytes, area->flags);
  422. BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
  423. total -= bytes;
  424. start += bytes;
  425. }
  426. BUG_ON(total);
  427. }
  428. /* template function for all unmapping */
  429. static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
  430. void (*fn)(const void *), u32 flags)
  431. {
  432. struct sg_table *sgt = NULL;
  433. struct iovm_struct *area;
  434. if (!IS_ALIGNED(da, PAGE_SIZE)) {
  435. dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
  436. return NULL;
  437. }
  438. mutex_lock(&obj->mmap_lock);
  439. area = __find_iovm_area(obj, da);
  440. if (!area) {
  441. dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
  442. goto out;
  443. }
  444. if ((area->flags & flags) != flags) {
  445. dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
  446. area->flags);
  447. goto out;
  448. }
  449. sgt = (struct sg_table *)area->sgt;
  450. unmap_iovm_area(obj, area);
  451. fn(area->va);
  452. dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
  453. area->da_start, da, area->da_end,
  454. area->da_end - area->da_start, area->flags);
  455. free_iovm_area(obj, area);
  456. out:
  457. mutex_unlock(&obj->mmap_lock);
  458. return sgt;
  459. }
  460. static u32 map_iommu_region(struct iommu *obj, u32 da,
  461. const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
  462. {
  463. int err = -ENOMEM;
  464. struct iovm_struct *new;
  465. mutex_lock(&obj->mmap_lock);
  466. new = alloc_iovm_area(obj, da, bytes, flags);
  467. if (IS_ERR(new)) {
  468. err = PTR_ERR(new);
  469. goto err_alloc_iovma;
  470. }
  471. new->va = va;
  472. new->sgt = sgt;
  473. if (map_iovm_area(obj, new, sgt, new->flags))
  474. goto err_map;
  475. mutex_unlock(&obj->mmap_lock);
  476. dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
  477. __func__, new->da_start, bytes, new->flags, va);
  478. return new->da_start;
  479. err_map:
  480. free_iovm_area(obj, new);
  481. err_alloc_iovma:
  482. mutex_unlock(&obj->mmap_lock);
  483. return err;
  484. }
  485. static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
  486. const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
  487. {
  488. return map_iommu_region(obj, da, sgt, va, bytes, flags);
  489. }
  490. /**
  491. * iommu_vmap - (d)-(p)-(v) address mapper
  492. * @obj: objective iommu
  493. * @sgt: address of scatter gather table
  494. * @flags: iovma and page property
  495. *
  496. * Creates 1-n-1 mapping with given @sgt and returns @da.
  497. * All @sgt element must be io page size aligned.
  498. */
  499. u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
  500. u32 flags)
  501. {
  502. size_t bytes;
  503. void *va = NULL;
  504. if (!obj || !obj->dev || !sgt)
  505. return -EINVAL;
  506. bytes = sgtable_len(sgt);
  507. if (!bytes)
  508. return -EINVAL;
  509. bytes = PAGE_ALIGN(bytes);
  510. if (flags & IOVMF_MMIO) {
  511. va = vmap_sg(sgt);
  512. if (IS_ERR(va))
  513. return PTR_ERR(va);
  514. }
  515. flags &= IOVMF_HW_MASK;
  516. flags |= IOVMF_DISCONT;
  517. flags |= IOVMF_MMIO;
  518. flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
  519. da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
  520. if (IS_ERR_VALUE(da))
  521. vunmap_sg(va);
  522. return da;
  523. }
  524. EXPORT_SYMBOL_GPL(iommu_vmap);
  525. /**
  526. * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()'
  527. * @obj: objective iommu
  528. * @da: iommu device virtual address
  529. *
  530. * Free the iommu virtually contiguous memory area starting at
  531. * @da, which was returned by 'iommu_vmap()'.
  532. */
  533. struct sg_table *iommu_vunmap(struct iommu *obj, u32 da)
  534. {
  535. struct sg_table *sgt;
  536. /*
  537. * 'sgt' is allocated before 'iommu_vmalloc()' is called.
  538. * Just returns 'sgt' to the caller to free
  539. */
  540. sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO);
  541. if (!sgt)
  542. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  543. return sgt;
  544. }
  545. EXPORT_SYMBOL_GPL(iommu_vunmap);
  546. /**
  547. * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
  548. * @obj: objective iommu
  549. * @da: contiguous iommu virtual memory
  550. * @bytes: allocation size
  551. * @flags: iovma and page property
  552. *
  553. * Allocate @bytes linearly and creates 1-n-1 mapping and returns
  554. * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
  555. */
  556. u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
  557. {
  558. void *va;
  559. struct sg_table *sgt;
  560. if (!obj || !obj->dev || !bytes)
  561. return -EINVAL;
  562. bytes = PAGE_ALIGN(bytes);
  563. va = vmalloc(bytes);
  564. if (!va)
  565. return -ENOMEM;
  566. flags &= IOVMF_HW_MASK;
  567. flags |= IOVMF_DISCONT;
  568. flags |= IOVMF_ALLOC;
  569. flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
  570. sgt = sgtable_alloc(bytes, flags, da, 0);
  571. if (IS_ERR(sgt)) {
  572. da = PTR_ERR(sgt);
  573. goto err_sgt_alloc;
  574. }
  575. sgtable_fill_vmalloc(sgt, va);
  576. da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
  577. if (IS_ERR_VALUE(da))
  578. goto err_iommu_vmap;
  579. return da;
  580. err_iommu_vmap:
  581. sgtable_drain_vmalloc(sgt);
  582. sgtable_free(sgt);
  583. err_sgt_alloc:
  584. vfree(va);
  585. return da;
  586. }
  587. EXPORT_SYMBOL_GPL(iommu_vmalloc);
  588. /**
  589. * iommu_vfree - release memory allocated by 'iommu_vmalloc()'
  590. * @obj: objective iommu
  591. * @da: iommu device virtual address
  592. *
  593. * Frees the iommu virtually continuous memory area starting at
  594. * @da, as obtained from 'iommu_vmalloc()'.
  595. */
  596. void iommu_vfree(struct iommu *obj, const u32 da)
  597. {
  598. struct sg_table *sgt;
  599. sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC);
  600. if (!sgt)
  601. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  602. sgtable_free(sgt);
  603. }
  604. EXPORT_SYMBOL_GPL(iommu_vfree);
  605. static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
  606. size_t bytes, u32 flags)
  607. {
  608. struct sg_table *sgt;
  609. sgt = sgtable_alloc(bytes, flags, da, pa);
  610. if (IS_ERR(sgt))
  611. return PTR_ERR(sgt);
  612. sgtable_fill_kmalloc(sgt, pa, da, bytes);
  613. da = map_iommu_region(obj, da, sgt, va, bytes, flags);
  614. if (IS_ERR_VALUE(da)) {
  615. sgtable_drain_kmalloc(sgt);
  616. sgtable_free(sgt);
  617. }
  618. return da;
  619. }
  620. /**
  621. * iommu_kmap - (d)-(p)-(v) address mapper
  622. * @obj: objective iommu
  623. * @da: contiguous iommu virtual memory
  624. * @pa: contiguous physical memory
  625. * @flags: iovma and page property
  626. *
  627. * Creates 1-1-1 mapping and returns @da again, which can be
  628. * adjusted if 'IOVMF_DA_ANON' is set.
  629. */
  630. u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
  631. u32 flags)
  632. {
  633. void *va;
  634. if (!obj || !obj->dev || !bytes)
  635. return -EINVAL;
  636. bytes = PAGE_ALIGN(bytes);
  637. va = ioremap(pa, bytes);
  638. if (!va)
  639. return -ENOMEM;
  640. flags &= IOVMF_HW_MASK;
  641. flags |= IOVMF_LINEAR;
  642. flags |= IOVMF_MMIO;
  643. flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
  644. da = __iommu_kmap(obj, da, pa, va, bytes, flags);
  645. if (IS_ERR_VALUE(da))
  646. iounmap(va);
  647. return da;
  648. }
  649. EXPORT_SYMBOL_GPL(iommu_kmap);
  650. /**
  651. * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
  652. * @obj: objective iommu
  653. * @da: iommu device virtual address
  654. *
  655. * Frees the iommu virtually contiguous memory area starting at
  656. * @da, which was passed to and was returned by'iommu_kmap()'.
  657. */
  658. void iommu_kunmap(struct iommu *obj, u32 da)
  659. {
  660. struct sg_table *sgt;
  661. typedef void (*func_t)(const void *);
  662. sgt = unmap_vm_area(obj, da, (func_t)iounmap,
  663. IOVMF_LINEAR | IOVMF_MMIO);
  664. if (!sgt)
  665. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  666. sgtable_free(sgt);
  667. }
  668. EXPORT_SYMBOL_GPL(iommu_kunmap);
  669. /**
  670. * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
  671. * @obj: objective iommu
  672. * @da: contiguous iommu virtual memory
  673. * @bytes: bytes for allocation
  674. * @flags: iovma and page property
  675. *
  676. * Allocate @bytes linearly and creates 1-1-1 mapping and returns
  677. * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
  678. */
  679. u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
  680. {
  681. void *va;
  682. u32 pa;
  683. if (!obj || !obj->dev || !bytes)
  684. return -EINVAL;
  685. bytes = PAGE_ALIGN(bytes);
  686. va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
  687. if (!va)
  688. return -ENOMEM;
  689. pa = virt_to_phys(va);
  690. flags &= IOVMF_HW_MASK;
  691. flags |= IOVMF_LINEAR;
  692. flags |= IOVMF_ALLOC;
  693. flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
  694. da = __iommu_kmap(obj, da, pa, va, bytes, flags);
  695. if (IS_ERR_VALUE(da))
  696. kfree(va);
  697. return da;
  698. }
  699. EXPORT_SYMBOL_GPL(iommu_kmalloc);
  700. /**
  701. * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
  702. * @obj: objective iommu
  703. * @da: iommu device virtual address
  704. *
  705. * Frees the iommu virtually contiguous memory area starting at
  706. * @da, which was passed to and was returned by'iommu_kmalloc()'.
  707. */
  708. void iommu_kfree(struct iommu *obj, u32 da)
  709. {
  710. struct sg_table *sgt;
  711. sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
  712. if (!sgt)
  713. dev_dbg(obj->dev, "%s: No sgt\n", __func__);
  714. sgtable_free(sgt);
  715. }
  716. EXPORT_SYMBOL_GPL(iommu_kfree);
  717. static int __init iovmm_init(void)
  718. {
  719. const unsigned long flags = SLAB_HWCACHE_ALIGN;
  720. struct kmem_cache *p;
  721. p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
  722. flags, NULL);
  723. if (!p)
  724. return -ENOMEM;
  725. iovm_area_cachep = p;
  726. return 0;
  727. }
  728. module_init(iovmm_init);
  729. static void __exit iovmm_exit(void)
  730. {
  731. kmem_cache_destroy(iovm_area_cachep);
  732. }
  733. module_exit(iovmm_exit);
  734. MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
  735. MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
  736. MODULE_LICENSE("GPL v2");