omap-iommu.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300
  1. /*
  2. * omap iommu: tlb and pagetable primitives
  3. *
  4. * Copyright (C) 2008-2010 Nokia Corporation
  5. *
  6. * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
  7. * Paul Mundt and Toshihiro Kobayashi
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/err.h>
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/ioport.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/iommu.h>
  20. #include <linux/omap-iommu.h>
  21. #include <linux/mutex.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/io.h>
  24. #include <linux/pm_runtime.h>
  25. #include <asm/cacheflush.h>
  26. #include <linux/platform_data/iommu-omap.h>
  27. #include "omap-iopgtable.h"
  28. #include "omap-iommu.h"
  29. #define for_each_iotlb_cr(obj, n, __i, cr) \
  30. for (__i = 0; \
  31. (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
  32. __i++)
  33. /* bitmap of the page sizes currently supported */
  34. #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
  35. /**
  36. * struct omap_iommu_domain - omap iommu domain
  37. * @pgtable: the page table
  38. * @iommu_dev: an omap iommu device attached to this domain. only a single
  39. * iommu device can be attached for now.
  40. * @dev: Device using this domain.
  41. * @lock: domain lock, should be taken when attaching/detaching
  42. */
  43. struct omap_iommu_domain {
  44. u32 *pgtable;
  45. struct omap_iommu *iommu_dev;
  46. struct device *dev;
  47. spinlock_t lock;
  48. };
  49. #define MMU_LOCK_BASE_SHIFT 10
  50. #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT)
  51. #define MMU_LOCK_BASE(x) \
  52. ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)
  53. #define MMU_LOCK_VICT_SHIFT 4
  54. #define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT)
  55. #define MMU_LOCK_VICT(x) \
  56. ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
  57. struct iotlb_lock {
  58. short base;
  59. short vict;
  60. };
  61. /* accommodate the difference between omap1 and omap2/3 */
  62. static const struct iommu_functions *arch_iommu;
  63. static struct platform_driver omap_iommu_driver;
  64. static struct kmem_cache *iopte_cachep;
  65. /**
  66. * omap_install_iommu_arch - Install archtecure specific iommu functions
  67. * @ops: a pointer to architecture specific iommu functions
  68. *
  69. * There are several kind of iommu algorithm(tlb, pagetable) among
  70. * omap series. This interface installs such an iommu algorighm.
  71. **/
  72. int omap_install_iommu_arch(const struct iommu_functions *ops)
  73. {
  74. if (arch_iommu)
  75. return -EBUSY;
  76. arch_iommu = ops;
  77. return 0;
  78. }
  79. EXPORT_SYMBOL_GPL(omap_install_iommu_arch);
  80. /**
  81. * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions
  82. * @ops: a pointer to architecture specific iommu functions
  83. *
  84. * This interface uninstalls the iommu algorighm installed previously.
  85. **/
  86. void omap_uninstall_iommu_arch(const struct iommu_functions *ops)
  87. {
  88. if (arch_iommu != ops)
  89. pr_err("%s: not your arch\n", __func__);
  90. arch_iommu = NULL;
  91. }
  92. EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
  93. /**
  94. * omap_iommu_save_ctx - Save registers for pm off-mode support
  95. * @dev: client device
  96. **/
  97. void omap_iommu_save_ctx(struct device *dev)
  98. {
  99. struct omap_iommu *obj = dev_to_omap_iommu(dev);
  100. arch_iommu->save_ctx(obj);
  101. }
  102. EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
  103. /**
  104. * omap_iommu_restore_ctx - Restore registers for pm off-mode support
  105. * @dev: client device
  106. **/
  107. void omap_iommu_restore_ctx(struct device *dev)
  108. {
  109. struct omap_iommu *obj = dev_to_omap_iommu(dev);
  110. arch_iommu->restore_ctx(obj);
  111. }
  112. EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
  113. /**
  114. * omap_iommu_arch_version - Return running iommu arch version
  115. **/
  116. u32 omap_iommu_arch_version(void)
  117. {
  118. return arch_iommu->version;
  119. }
  120. EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
  121. static int iommu_enable(struct omap_iommu *obj)
  122. {
  123. int err;
  124. struct platform_device *pdev = to_platform_device(obj->dev);
  125. struct iommu_platform_data *pdata = pdev->dev.platform_data;
  126. if (!pdata)
  127. return -EINVAL;
  128. if (!arch_iommu)
  129. return -ENODEV;
  130. if (pdata->deassert_reset) {
  131. err = pdata->deassert_reset(pdev, pdata->reset_name);
  132. if (err) {
  133. dev_err(obj->dev, "deassert_reset failed: %d\n", err);
  134. return err;
  135. }
  136. }
  137. pm_runtime_get_sync(obj->dev);
  138. err = arch_iommu->enable(obj);
  139. return err;
  140. }
  141. static void iommu_disable(struct omap_iommu *obj)
  142. {
  143. struct platform_device *pdev = to_platform_device(obj->dev);
  144. struct iommu_platform_data *pdata = pdev->dev.platform_data;
  145. if (!pdata)
  146. return;
  147. arch_iommu->disable(obj);
  148. pm_runtime_put_sync(obj->dev);
  149. if (pdata->assert_reset)
  150. pdata->assert_reset(pdev, pdata->reset_name);
  151. }
  152. /*
  153. * TLB operations
  154. */
  155. void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
  156. {
  157. BUG_ON(!cr || !e);
  158. arch_iommu->cr_to_e(cr, e);
  159. }
  160. EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e);
  161. static inline int iotlb_cr_valid(struct cr_regs *cr)
  162. {
  163. if (!cr)
  164. return -EINVAL;
  165. return arch_iommu->cr_valid(cr);
  166. }
  167. static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
  168. struct iotlb_entry *e)
  169. {
  170. if (!e)
  171. return NULL;
  172. return arch_iommu->alloc_cr(obj, e);
  173. }
  174. static u32 iotlb_cr_to_virt(struct cr_regs *cr)
  175. {
  176. return arch_iommu->cr_to_virt(cr);
  177. }
  178. static u32 get_iopte_attr(struct iotlb_entry *e)
  179. {
  180. return arch_iommu->get_pte_attr(e);
  181. }
  182. static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
  183. {
  184. return arch_iommu->fault_isr(obj, da);
  185. }
  186. static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
  187. {
  188. u32 val;
  189. val = iommu_read_reg(obj, MMU_LOCK);
  190. l->base = MMU_LOCK_BASE(val);
  191. l->vict = MMU_LOCK_VICT(val);
  192. }
  193. static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
  194. {
  195. u32 val;
  196. val = (l->base << MMU_LOCK_BASE_SHIFT);
  197. val |= (l->vict << MMU_LOCK_VICT_SHIFT);
  198. iommu_write_reg(obj, val, MMU_LOCK);
  199. }
  200. static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
  201. {
  202. arch_iommu->tlb_read_cr(obj, cr);
  203. }
  204. static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
  205. {
  206. arch_iommu->tlb_load_cr(obj, cr);
  207. iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
  208. iommu_write_reg(obj, 1, MMU_LD_TLB);
  209. }
  210. /**
  211. * iotlb_dump_cr - Dump an iommu tlb entry into buf
  212. * @obj: target iommu
  213. * @cr: contents of cam and ram register
  214. * @buf: output buffer
  215. **/
  216. static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
  217. char *buf)
  218. {
  219. BUG_ON(!cr || !buf);
  220. return arch_iommu->dump_cr(obj, cr, buf);
  221. }
  222. /* only used in iotlb iteration for-loop */
  223. static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
  224. {
  225. struct cr_regs cr;
  226. struct iotlb_lock l;
  227. iotlb_lock_get(obj, &l);
  228. l.vict = n;
  229. iotlb_lock_set(obj, &l);
  230. iotlb_read_cr(obj, &cr);
  231. return cr;
  232. }
  233. /**
  234. * load_iotlb_entry - Set an iommu tlb entry
  235. * @obj: target iommu
  236. * @e: an iommu tlb entry info
  237. **/
  238. #ifdef PREFETCH_IOTLB
  239. static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
  240. {
  241. int err = 0;
  242. struct iotlb_lock l;
  243. struct cr_regs *cr;
  244. if (!obj || !obj->nr_tlb_entries || !e)
  245. return -EINVAL;
  246. pm_runtime_get_sync(obj->dev);
  247. iotlb_lock_get(obj, &l);
  248. if (l.base == obj->nr_tlb_entries) {
  249. dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
  250. err = -EBUSY;
  251. goto out;
  252. }
  253. if (!e->prsvd) {
  254. int i;
  255. struct cr_regs tmp;
  256. for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
  257. if (!iotlb_cr_valid(&tmp))
  258. break;
  259. if (i == obj->nr_tlb_entries) {
  260. dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
  261. err = -EBUSY;
  262. goto out;
  263. }
  264. iotlb_lock_get(obj, &l);
  265. } else {
  266. l.vict = l.base;
  267. iotlb_lock_set(obj, &l);
  268. }
  269. cr = iotlb_alloc_cr(obj, e);
  270. if (IS_ERR(cr)) {
  271. pm_runtime_put_sync(obj->dev);
  272. return PTR_ERR(cr);
  273. }
  274. iotlb_load_cr(obj, cr);
  275. kfree(cr);
  276. if (e->prsvd)
  277. l.base++;
  278. /* increment victim for next tlb load */
  279. if (++l.vict == obj->nr_tlb_entries)
  280. l.vict = l.base;
  281. iotlb_lock_set(obj, &l);
  282. out:
  283. pm_runtime_put_sync(obj->dev);
  284. return err;
  285. }
  286. #else /* !PREFETCH_IOTLB */
  287. static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
  288. {
  289. return 0;
  290. }
  291. #endif /* !PREFETCH_IOTLB */
  292. static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
  293. {
  294. return load_iotlb_entry(obj, e);
  295. }
  296. /**
  297. * flush_iotlb_page - Clear an iommu tlb entry
  298. * @obj: target iommu
  299. * @da: iommu device virtual address
  300. *
  301. * Clear an iommu tlb entry which includes 'da' address.
  302. **/
  303. static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
  304. {
  305. int i;
  306. struct cr_regs cr;
  307. pm_runtime_get_sync(obj->dev);
  308. for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
  309. u32 start;
  310. size_t bytes;
  311. if (!iotlb_cr_valid(&cr))
  312. continue;
  313. start = iotlb_cr_to_virt(&cr);
  314. bytes = iopgsz_to_bytes(cr.cam & 3);
  315. if ((start <= da) && (da < start + bytes)) {
  316. dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
  317. __func__, start, da, bytes);
  318. iotlb_load_cr(obj, &cr);
  319. iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
  320. }
  321. }
  322. pm_runtime_put_sync(obj->dev);
  323. if (i == obj->nr_tlb_entries)
  324. dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
  325. }
  326. /**
  327. * flush_iotlb_all - Clear all iommu tlb entries
  328. * @obj: target iommu
  329. **/
  330. static void flush_iotlb_all(struct omap_iommu *obj)
  331. {
  332. struct iotlb_lock l;
  333. pm_runtime_get_sync(obj->dev);
  334. l.base = 0;
  335. l.vict = 0;
  336. iotlb_lock_set(obj, &l);
  337. iommu_write_reg(obj, 1, MMU_GFLUSH);
  338. pm_runtime_put_sync(obj->dev);
  339. }
  340. #if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
  341. ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
  342. {
  343. if (!obj || !buf)
  344. return -EINVAL;
  345. pm_runtime_get_sync(obj->dev);
  346. bytes = arch_iommu->dump_ctx(obj, buf, bytes);
  347. pm_runtime_put_sync(obj->dev);
  348. return bytes;
  349. }
  350. EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx);
  351. static int
  352. __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
  353. {
  354. int i;
  355. struct iotlb_lock saved;
  356. struct cr_regs tmp;
  357. struct cr_regs *p = crs;
  358. pm_runtime_get_sync(obj->dev);
  359. iotlb_lock_get(obj, &saved);
  360. for_each_iotlb_cr(obj, num, i, tmp) {
  361. if (!iotlb_cr_valid(&tmp))
  362. continue;
  363. *p++ = tmp;
  364. }
  365. iotlb_lock_set(obj, &saved);
  366. pm_runtime_put_sync(obj->dev);
  367. return p - crs;
  368. }
  369. /**
  370. * omap_dump_tlb_entries - dump cr arrays to given buffer
  371. * @obj: target iommu
  372. * @buf: output buffer
  373. **/
  374. size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
  375. {
  376. int i, num;
  377. struct cr_regs *cr;
  378. char *p = buf;
  379. num = bytes / sizeof(*cr);
  380. num = min(obj->nr_tlb_entries, num);
  381. cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
  382. if (!cr)
  383. return 0;
  384. num = __dump_tlb_entries(obj, cr, num);
  385. for (i = 0; i < num; i++)
  386. p += iotlb_dump_cr(obj, cr + i, p);
  387. kfree(cr);
  388. return p - buf;
  389. }
  390. EXPORT_SYMBOL_GPL(omap_dump_tlb_entries);
  391. int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
  392. {
  393. return driver_for_each_device(&omap_iommu_driver.driver,
  394. NULL, data, fn);
  395. }
  396. EXPORT_SYMBOL_GPL(omap_foreach_iommu_device);
  397. #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
  398. /*
  399. * H/W pagetable operations
  400. */
  401. static void flush_iopgd_range(u32 *first, u32 *last)
  402. {
  403. /* FIXME: L2 cache should be taken care of if it exists */
  404. do {
  405. asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
  406. : : "r" (first));
  407. first += L1_CACHE_BYTES / sizeof(*first);
  408. } while (first <= last);
  409. }
  410. static void flush_iopte_range(u32 *first, u32 *last)
  411. {
  412. /* FIXME: L2 cache should be taken care of if it exists */
  413. do {
  414. asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
  415. : : "r" (first));
  416. first += L1_CACHE_BYTES / sizeof(*first);
  417. } while (first <= last);
  418. }
  419. static void iopte_free(u32 *iopte)
  420. {
  421. /* Note: freed iopte's must be clean ready for re-use */
  422. kmem_cache_free(iopte_cachep, iopte);
  423. }
  424. static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
  425. {
  426. u32 *iopte;
  427. /* a table has already existed */
  428. if (*iopgd)
  429. goto pte_ready;
  430. /*
  431. * do the allocation outside the page table lock
  432. */
  433. spin_unlock(&obj->page_table_lock);
  434. iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
  435. spin_lock(&obj->page_table_lock);
  436. if (!*iopgd) {
  437. if (!iopte)
  438. return ERR_PTR(-ENOMEM);
  439. *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
  440. flush_iopgd_range(iopgd, iopgd);
  441. dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
  442. } else {
  443. /* We raced, free the reduniovant table */
  444. iopte_free(iopte);
  445. }
  446. pte_ready:
  447. iopte = iopte_offset(iopgd, da);
  448. dev_vdbg(obj->dev,
  449. "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
  450. __func__, da, iopgd, *iopgd, iopte, *iopte);
  451. return iopte;
  452. }
  453. static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
  454. {
  455. u32 *iopgd = iopgd_offset(obj, da);
  456. if ((da | pa) & ~IOSECTION_MASK) {
  457. dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
  458. __func__, da, pa, IOSECTION_SIZE);
  459. return -EINVAL;
  460. }
  461. *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
  462. flush_iopgd_range(iopgd, iopgd);
  463. return 0;
  464. }
  465. static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
  466. {
  467. u32 *iopgd = iopgd_offset(obj, da);
  468. int i;
  469. if ((da | pa) & ~IOSUPER_MASK) {
  470. dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
  471. __func__, da, pa, IOSUPER_SIZE);
  472. return -EINVAL;
  473. }
  474. for (i = 0; i < 16; i++)
  475. *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
  476. flush_iopgd_range(iopgd, iopgd + 15);
  477. return 0;
  478. }
  479. static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
  480. {
  481. u32 *iopgd = iopgd_offset(obj, da);
  482. u32 *iopte = iopte_alloc(obj, iopgd, da);
  483. if (IS_ERR(iopte))
  484. return PTR_ERR(iopte);
  485. *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
  486. flush_iopte_range(iopte, iopte);
  487. dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
  488. __func__, da, pa, iopte, *iopte);
  489. return 0;
  490. }
  491. static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
  492. {
  493. u32 *iopgd = iopgd_offset(obj, da);
  494. u32 *iopte = iopte_alloc(obj, iopgd, da);
  495. int i;
  496. if ((da | pa) & ~IOLARGE_MASK) {
  497. dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
  498. __func__, da, pa, IOLARGE_SIZE);
  499. return -EINVAL;
  500. }
  501. if (IS_ERR(iopte))
  502. return PTR_ERR(iopte);
  503. for (i = 0; i < 16; i++)
  504. *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
  505. flush_iopte_range(iopte, iopte + 15);
  506. return 0;
  507. }
  508. static int
  509. iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
  510. {
  511. int (*fn)(struct omap_iommu *, u32, u32, u32);
  512. u32 prot;
  513. int err;
  514. if (!obj || !e)
  515. return -EINVAL;
  516. switch (e->pgsz) {
  517. case MMU_CAM_PGSZ_16M:
  518. fn = iopgd_alloc_super;
  519. break;
  520. case MMU_CAM_PGSZ_1M:
  521. fn = iopgd_alloc_section;
  522. break;
  523. case MMU_CAM_PGSZ_64K:
  524. fn = iopte_alloc_large;
  525. break;
  526. case MMU_CAM_PGSZ_4K:
  527. fn = iopte_alloc_page;
  528. break;
  529. default:
  530. fn = NULL;
  531. BUG();
  532. break;
  533. }
  534. prot = get_iopte_attr(e);
  535. spin_lock(&obj->page_table_lock);
  536. err = fn(obj, e->da, e->pa, prot);
  537. spin_unlock(&obj->page_table_lock);
  538. return err;
  539. }
  540. /**
  541. * omap_iopgtable_store_entry - Make an iommu pte entry
  542. * @obj: target iommu
  543. * @e: an iommu tlb entry info
  544. **/
  545. int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
  546. {
  547. int err;
  548. flush_iotlb_page(obj, e->da);
  549. err = iopgtable_store_entry_core(obj, e);
  550. if (!err)
  551. prefetch_iotlb_entry(obj, e);
  552. return err;
  553. }
  554. EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry);
  555. /**
  556. * iopgtable_lookup_entry - Lookup an iommu pte entry
  557. * @obj: target iommu
  558. * @da: iommu device virtual address
  559. * @ppgd: iommu pgd entry pointer to be returned
  560. * @ppte: iommu pte entry pointer to be returned
  561. **/
  562. static void
  563. iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
  564. {
  565. u32 *iopgd, *iopte = NULL;
  566. iopgd = iopgd_offset(obj, da);
  567. if (!*iopgd)
  568. goto out;
  569. if (iopgd_is_table(*iopgd))
  570. iopte = iopte_offset(iopgd, da);
  571. out:
  572. *ppgd = iopgd;
  573. *ppte = iopte;
  574. }
  575. static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
  576. {
  577. size_t bytes;
  578. u32 *iopgd = iopgd_offset(obj, da);
  579. int nent = 1;
  580. if (!*iopgd)
  581. return 0;
  582. if (iopgd_is_table(*iopgd)) {
  583. int i;
  584. u32 *iopte = iopte_offset(iopgd, da);
  585. bytes = IOPTE_SIZE;
  586. if (*iopte & IOPTE_LARGE) {
  587. nent *= 16;
  588. /* rewind to the 1st entry */
  589. iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
  590. }
  591. bytes *= nent;
  592. memset(iopte, 0, nent * sizeof(*iopte));
  593. flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
  594. /*
  595. * do table walk to check if this table is necessary or not
  596. */
  597. iopte = iopte_offset(iopgd, 0);
  598. for (i = 0; i < PTRS_PER_IOPTE; i++)
  599. if (iopte[i])
  600. goto out;
  601. iopte_free(iopte);
  602. nent = 1; /* for the next L1 entry */
  603. } else {
  604. bytes = IOPGD_SIZE;
  605. if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
  606. nent *= 16;
  607. /* rewind to the 1st entry */
  608. iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
  609. }
  610. bytes *= nent;
  611. }
  612. memset(iopgd, 0, nent * sizeof(*iopgd));
  613. flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
  614. out:
  615. return bytes;
  616. }
  617. /**
  618. * iopgtable_clear_entry - Remove an iommu pte entry
  619. * @obj: target iommu
  620. * @da: iommu device virtual address
  621. **/
  622. static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
  623. {
  624. size_t bytes;
  625. spin_lock(&obj->page_table_lock);
  626. bytes = iopgtable_clear_entry_core(obj, da);
  627. flush_iotlb_page(obj, da);
  628. spin_unlock(&obj->page_table_lock);
  629. return bytes;
  630. }
  631. static void iopgtable_clear_entry_all(struct omap_iommu *obj)
  632. {
  633. int i;
  634. spin_lock(&obj->page_table_lock);
  635. for (i = 0; i < PTRS_PER_IOPGD; i++) {
  636. u32 da;
  637. u32 *iopgd;
  638. da = i << IOPGD_SHIFT;
  639. iopgd = iopgd_offset(obj, da);
  640. if (!*iopgd)
  641. continue;
  642. if (iopgd_is_table(*iopgd))
  643. iopte_free(iopte_offset(iopgd, 0));
  644. *iopgd = 0;
  645. flush_iopgd_range(iopgd, iopgd);
  646. }
  647. flush_iotlb_all(obj);
  648. spin_unlock(&obj->page_table_lock);
  649. }
  650. /*
  651. * Device IOMMU generic operations
  652. */
  653. static irqreturn_t iommu_fault_handler(int irq, void *data)
  654. {
  655. u32 da, errs;
  656. u32 *iopgd, *iopte;
  657. struct omap_iommu *obj = data;
  658. struct iommu_domain *domain = obj->domain;
  659. if (!obj->refcount)
  660. return IRQ_NONE;
  661. errs = iommu_report_fault(obj, &da);
  662. if (errs == 0)
  663. return IRQ_HANDLED;
  664. /* Fault callback or TLB/PTE Dynamic loading */
  665. if (!report_iommu_fault(domain, obj->dev, da, 0))
  666. return IRQ_HANDLED;
  667. iommu_disable(obj);
  668. iopgd = iopgd_offset(obj, da);
  669. if (!iopgd_is_table(*iopgd)) {
  670. dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
  671. obj->name, errs, da, iopgd, *iopgd);
  672. return IRQ_NONE;
  673. }
  674. iopte = iopte_offset(iopgd, da);
  675. dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
  676. obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
  677. return IRQ_NONE;
  678. }
  679. static int device_match_by_alias(struct device *dev, void *data)
  680. {
  681. struct omap_iommu *obj = to_iommu(dev);
  682. const char *name = data;
  683. pr_debug("%s: %s %s\n", __func__, obj->name, name);
  684. return strcmp(obj->name, name) == 0;
  685. }
  686. /**
  687. * omap_iommu_attach() - attach iommu device to an iommu domain
  688. * @name: name of target omap iommu device
  689. * @iopgd: page table
  690. **/
  691. static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
  692. {
  693. int err = -ENOMEM;
  694. struct device *dev;
  695. struct omap_iommu *obj;
  696. dev = driver_find_device(&omap_iommu_driver.driver, NULL,
  697. (void *)name,
  698. device_match_by_alias);
  699. if (!dev)
  700. return NULL;
  701. obj = to_iommu(dev);
  702. spin_lock(&obj->iommu_lock);
  703. /* an iommu device can only be attached once */
  704. if (++obj->refcount > 1) {
  705. dev_err(dev, "%s: already attached!\n", obj->name);
  706. err = -EBUSY;
  707. goto err_enable;
  708. }
  709. obj->iopgd = iopgd;
  710. err = iommu_enable(obj);
  711. if (err)
  712. goto err_enable;
  713. flush_iotlb_all(obj);
  714. if (!try_module_get(obj->owner))
  715. goto err_module;
  716. spin_unlock(&obj->iommu_lock);
  717. dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
  718. return obj;
  719. err_module:
  720. if (obj->refcount == 1)
  721. iommu_disable(obj);
  722. err_enable:
  723. obj->refcount--;
  724. spin_unlock(&obj->iommu_lock);
  725. return ERR_PTR(err);
  726. }
  727. /**
  728. * omap_iommu_detach - release iommu device
  729. * @obj: target iommu
  730. **/
  731. static void omap_iommu_detach(struct omap_iommu *obj)
  732. {
  733. if (!obj || IS_ERR(obj))
  734. return;
  735. spin_lock(&obj->iommu_lock);
  736. if (--obj->refcount == 0)
  737. iommu_disable(obj);
  738. module_put(obj->owner);
  739. obj->iopgd = NULL;
  740. spin_unlock(&obj->iommu_lock);
  741. dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
  742. }
  743. /*
  744. * OMAP Device MMU(IOMMU) detection
  745. */
  746. static int omap_iommu_probe(struct platform_device *pdev)
  747. {
  748. int err = -ENODEV;
  749. int irq;
  750. struct omap_iommu *obj;
  751. struct resource *res;
  752. struct iommu_platform_data *pdata = pdev->dev.platform_data;
  753. obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
  754. if (!obj)
  755. return -ENOMEM;
  756. obj->nr_tlb_entries = pdata->nr_tlb_entries;
  757. obj->name = pdata->name;
  758. obj->dev = &pdev->dev;
  759. obj->ctx = (void *)obj + sizeof(*obj);
  760. obj->da_start = pdata->da_start;
  761. obj->da_end = pdata->da_end;
  762. spin_lock_init(&obj->iommu_lock);
  763. mutex_init(&obj->mmap_lock);
  764. spin_lock_init(&obj->page_table_lock);
  765. INIT_LIST_HEAD(&obj->mmap);
  766. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  767. if (!res) {
  768. err = -ENODEV;
  769. goto err_mem;
  770. }
  771. res = request_mem_region(res->start, resource_size(res),
  772. dev_name(&pdev->dev));
  773. if (!res) {
  774. err = -EIO;
  775. goto err_mem;
  776. }
  777. obj->regbase = ioremap(res->start, resource_size(res));
  778. if (!obj->regbase) {
  779. err = -ENOMEM;
  780. goto err_ioremap;
  781. }
  782. irq = platform_get_irq(pdev, 0);
  783. if (irq < 0) {
  784. err = -ENODEV;
  785. goto err_irq;
  786. }
  787. err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
  788. dev_name(&pdev->dev), obj);
  789. if (err < 0)
  790. goto err_irq;
  791. platform_set_drvdata(pdev, obj);
  792. pm_runtime_irq_safe(obj->dev);
  793. pm_runtime_enable(obj->dev);
  794. dev_info(&pdev->dev, "%s registered\n", obj->name);
  795. return 0;
  796. err_irq:
  797. iounmap(obj->regbase);
  798. err_ioremap:
  799. release_mem_region(res->start, resource_size(res));
  800. err_mem:
  801. kfree(obj);
  802. return err;
  803. }
  804. static int omap_iommu_remove(struct platform_device *pdev)
  805. {
  806. int irq;
  807. struct resource *res;
  808. struct omap_iommu *obj = platform_get_drvdata(pdev);
  809. platform_set_drvdata(pdev, NULL);
  810. iopgtable_clear_entry_all(obj);
  811. irq = platform_get_irq(pdev, 0);
  812. free_irq(irq, obj);
  813. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  814. release_mem_region(res->start, resource_size(res));
  815. iounmap(obj->regbase);
  816. pm_runtime_disable(obj->dev);
  817. dev_info(&pdev->dev, "%s removed\n", obj->name);
  818. kfree(obj);
  819. return 0;
  820. }
  821. static struct platform_driver omap_iommu_driver = {
  822. .probe = omap_iommu_probe,
  823. .remove = omap_iommu_remove,
  824. .driver = {
  825. .name = "omap-iommu",
  826. },
  827. };
  828. static void iopte_cachep_ctor(void *iopte)
  829. {
  830. clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
  831. }
  832. static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa,
  833. u32 flags)
  834. {
  835. memset(e, 0, sizeof(*e));
  836. e->da = da;
  837. e->pa = pa;
  838. e->valid = 1;
  839. /* FIXME: add OMAP1 support */
  840. e->pgsz = flags & MMU_CAM_PGSZ_MASK;
  841. e->endian = flags & MMU_RAM_ENDIAN_MASK;
  842. e->elsz = flags & MMU_RAM_ELSZ_MASK;
  843. e->mixed = flags & MMU_RAM_MIXED_MASK;
  844. return iopgsz_to_bytes(e->pgsz);
  845. }
  846. static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
  847. phys_addr_t pa, size_t bytes, int prot)
  848. {
  849. struct omap_iommu_domain *omap_domain = domain->priv;
  850. struct omap_iommu *oiommu = omap_domain->iommu_dev;
  851. struct device *dev = oiommu->dev;
  852. struct iotlb_entry e;
  853. int omap_pgsz;
  854. u32 ret, flags;
  855. /* we only support mapping a single iommu page for now */
  856. omap_pgsz = bytes_to_iopgsz(bytes);
  857. if (omap_pgsz < 0) {
  858. dev_err(dev, "invalid size to map: %d\n", bytes);
  859. return -EINVAL;
  860. }
  861. dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
  862. flags = omap_pgsz | prot;
  863. iotlb_init_entry(&e, da, pa, flags);
  864. ret = omap_iopgtable_store_entry(oiommu, &e);
  865. if (ret)
  866. dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret);
  867. return ret;
  868. }
  869. static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
  870. size_t size)
  871. {
  872. struct omap_iommu_domain *omap_domain = domain->priv;
  873. struct omap_iommu *oiommu = omap_domain->iommu_dev;
  874. struct device *dev = oiommu->dev;
  875. dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
  876. return iopgtable_clear_entry(oiommu, da);
  877. }
  878. static int
  879. omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
  880. {
  881. struct omap_iommu_domain *omap_domain = domain->priv;
  882. struct omap_iommu *oiommu;
  883. struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
  884. int ret = 0;
  885. spin_lock(&omap_domain->lock);
  886. /* only a single device is supported per domain for now */
  887. if (omap_domain->iommu_dev) {
  888. dev_err(dev, "iommu domain is already attached\n");
  889. ret = -EBUSY;
  890. goto out;
  891. }
  892. /* get a handle to and enable the omap iommu */
  893. oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable);
  894. if (IS_ERR(oiommu)) {
  895. ret = PTR_ERR(oiommu);
  896. dev_err(dev, "can't get omap iommu: %d\n", ret);
  897. goto out;
  898. }
  899. omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
  900. omap_domain->dev = dev;
  901. oiommu->domain = domain;
  902. out:
  903. spin_unlock(&omap_domain->lock);
  904. return ret;
  905. }
  906. static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
  907. struct device *dev)
  908. {
  909. struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
  910. struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
  911. /* only a single device is supported per domain for now */
  912. if (omap_domain->iommu_dev != oiommu) {
  913. dev_err(dev, "invalid iommu device\n");
  914. return;
  915. }
  916. iopgtable_clear_entry_all(oiommu);
  917. omap_iommu_detach(oiommu);
  918. omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
  919. omap_domain->dev = NULL;
  920. }
  921. static void omap_iommu_detach_dev(struct iommu_domain *domain,
  922. struct device *dev)
  923. {
  924. struct omap_iommu_domain *omap_domain = domain->priv;
  925. spin_lock(&omap_domain->lock);
  926. _omap_iommu_detach_dev(omap_domain, dev);
  927. spin_unlock(&omap_domain->lock);
  928. }
  929. static int omap_iommu_domain_init(struct iommu_domain *domain)
  930. {
  931. struct omap_iommu_domain *omap_domain;
  932. omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
  933. if (!omap_domain) {
  934. pr_err("kzalloc failed\n");
  935. goto out;
  936. }
  937. omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
  938. if (!omap_domain->pgtable) {
  939. pr_err("kzalloc failed\n");
  940. goto fail_nomem;
  941. }
  942. /*
  943. * should never fail, but please keep this around to ensure
  944. * we keep the hardware happy
  945. */
  946. BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));
  947. clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
  948. spin_lock_init(&omap_domain->lock);
  949. domain->priv = omap_domain;
  950. domain->geometry.aperture_start = 0;
  951. domain->geometry.aperture_end = (1ULL << 32) - 1;
  952. domain->geometry.force_aperture = true;
  953. return 0;
  954. fail_nomem:
  955. kfree(omap_domain);
  956. out:
  957. return -ENOMEM;
  958. }
  959. static void omap_iommu_domain_destroy(struct iommu_domain *domain)
  960. {
  961. struct omap_iommu_domain *omap_domain = domain->priv;
  962. domain->priv = NULL;
  963. /*
  964. * An iommu device is still attached
  965. * (currently, only one device can be attached) ?
  966. */
  967. if (omap_domain->iommu_dev)
  968. _omap_iommu_detach_dev(omap_domain, omap_domain->dev);
  969. kfree(omap_domain->pgtable);
  970. kfree(omap_domain);
  971. }
  972. static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
  973. dma_addr_t da)
  974. {
  975. struct omap_iommu_domain *omap_domain = domain->priv;
  976. struct omap_iommu *oiommu = omap_domain->iommu_dev;
  977. struct device *dev = oiommu->dev;
  978. u32 *pgd, *pte;
  979. phys_addr_t ret = 0;
  980. iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
  981. if (pte) {
  982. if (iopte_is_small(*pte))
  983. ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
  984. else if (iopte_is_large(*pte))
  985. ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
  986. else
  987. dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
  988. (unsigned long long)da);
  989. } else {
  990. if (iopgd_is_section(*pgd))
  991. ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
  992. else if (iopgd_is_super(*pgd))
  993. ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
  994. else
  995. dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
  996. (unsigned long long)da);
  997. }
  998. return ret;
  999. }
  1000. static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
  1001. unsigned long cap)
  1002. {
  1003. return 0;
  1004. }
  1005. static struct iommu_ops omap_iommu_ops = {
  1006. .domain_init = omap_iommu_domain_init,
  1007. .domain_destroy = omap_iommu_domain_destroy,
  1008. .attach_dev = omap_iommu_attach_dev,
  1009. .detach_dev = omap_iommu_detach_dev,
  1010. .map = omap_iommu_map,
  1011. .unmap = omap_iommu_unmap,
  1012. .iova_to_phys = omap_iommu_iova_to_phys,
  1013. .domain_has_cap = omap_iommu_domain_has_cap,
  1014. .pgsize_bitmap = OMAP_IOMMU_PGSIZES,
  1015. };
  1016. static int __init omap_iommu_init(void)
  1017. {
  1018. struct kmem_cache *p;
  1019. const unsigned long flags = SLAB_HWCACHE_ALIGN;
  1020. size_t align = 1 << 10; /* L2 pagetable alignement */
  1021. p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
  1022. iopte_cachep_ctor);
  1023. if (!p)
  1024. return -ENOMEM;
  1025. iopte_cachep = p;
  1026. bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
  1027. return platform_driver_register(&omap_iommu_driver);
  1028. }
  1029. /* must be ready before omap3isp is probed */
  1030. subsys_initcall(omap_iommu_init);
  1031. static void __exit omap_iommu_exit(void)
  1032. {
  1033. kmem_cache_destroy(iopte_cachep);
  1034. platform_driver_unregister(&omap_iommu_driver);
  1035. }
  1036. module_exit(omap_iommu_exit);
  1037. MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
  1038. MODULE_ALIAS("platform:omap-iommu");
  1039. MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
  1040. MODULE_LICENSE("GPL v2");