fsl_pamu.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright (C) 2013 Freescale Semiconductor, Inc.
  16. *
  17. */
  18. #define pr_fmt(fmt) "fsl-pamu: %s: " fmt, __func__
  19. #include <linux/init.h>
  20. #include <linux/iommu.h>
  21. #include <linux/slab.h>
  22. #include <linux/module.h>
  23. #include <linux/types.h>
  24. #include <linux/mm.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/device.h>
  27. #include <linux/of_platform.h>
  28. #include <linux/bootmem.h>
  29. #include <linux/genalloc.h>
  30. #include <asm/io.h>
  31. #include <asm/bitops.h>
  32. #include <asm/fsl_guts.h>
  33. #include "fsl_pamu.h"
  34. /* define indexes for each operation mapping scenario */
  35. #define OMI_QMAN 0x00
  36. #define OMI_FMAN 0x01
  37. #define OMI_QMAN_PRIV 0x02
  38. #define OMI_CAAM 0x03
  39. #define make64(high, low) (((u64)(high) << 32) | (low))
  40. struct pamu_isr_data {
  41. void __iomem *pamu_reg_base; /* Base address of PAMU regs*/
  42. unsigned int count; /* The number of PAMUs */
  43. };
  44. static struct paace *ppaact;
  45. static struct paace *spaact;
  46. static struct ome *omt;
  47. /*
  48. * Table for matching compatible strings, for device tree
  49. * guts node, for QorIQ SOCs.
  50. * "fsl,qoriq-device-config-2.0" corresponds to T4 & B4
  51. * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
  52. * string would be used.
  53. */
  54. static const struct of_device_id guts_device_ids[] = {
  55. { .compatible = "fsl,qoriq-device-config-1.0", },
  56. { .compatible = "fsl,qoriq-device-config-2.0", },
  57. {}
  58. };
  59. /*
  60. * Table for matching compatible strings, for device tree
  61. * L3 cache controller node.
  62. * "fsl,t4240-l3-cache-controller" corresponds to T4,
  63. * "fsl,b4860-l3-cache-controller" corresponds to B4 &
  64. * "fsl,p4080-l3-cache-controller" corresponds to other,
  65. * SOCs.
  66. */
  67. static const struct of_device_id l3_device_ids[] = {
  68. { .compatible = "fsl,t4240-l3-cache-controller", },
  69. { .compatible = "fsl,b4860-l3-cache-controller", },
  70. { .compatible = "fsl,p4080-l3-cache-controller", },
  71. {}
  72. };
  73. /* maximum subwindows permitted per liodn */
  74. static u32 max_subwindow_count;
  75. /* Pool for fspi allocation */
  76. struct gen_pool *spaace_pool;
  77. /**
  78. * pamu_get_max_subwin_cnt() - Return the maximum supported
  79. * subwindow count per liodn.
  80. *
  81. */
  82. u32 pamu_get_max_subwin_cnt()
  83. {
  84. return max_subwindow_count;
  85. }
  86. /**
  87. * pamu_get_ppaace() - Return the primary PACCE
  88. * @liodn: liodn PAACT index for desired PAACE
  89. *
  90. * Returns the ppace pointer upon success else return
  91. * null.
  92. */
  93. static struct paace *pamu_get_ppaace(int liodn)
  94. {
  95. if (!ppaact || liodn >= PAACE_NUMBER_ENTRIES) {
  96. pr_debug("PPAACT doesn't exist\n");
  97. return NULL;
  98. }
  99. return &ppaact[liodn];
  100. }
  101. /**
  102. * pamu_enable_liodn() - Set valid bit of PACCE
  103. * @liodn: liodn PAACT index for desired PAACE
  104. *
  105. * Returns 0 upon success else error code < 0 returned
  106. */
  107. int pamu_enable_liodn(int liodn)
  108. {
  109. struct paace *ppaace;
  110. ppaace = pamu_get_ppaace(liodn);
  111. if (!ppaace) {
  112. pr_debug("Invalid primary paace entry\n");
  113. return -ENOENT;
  114. }
  115. if (!get_bf(ppaace->addr_bitfields, PPAACE_AF_WSE)) {
  116. pr_debug("liodn %d not configured\n", liodn);
  117. return -EINVAL;
  118. }
  119. /* Ensure that all other stores to the ppaace complete first */
  120. mb();
  121. set_bf(ppaace->addr_bitfields, PAACE_AF_V, PAACE_V_VALID);
  122. mb();
  123. return 0;
  124. }
  125. /**
  126. * pamu_disable_liodn() - Clears valid bit of PACCE
  127. * @liodn: liodn PAACT index for desired PAACE
  128. *
  129. * Returns 0 upon success else error code < 0 returned
  130. */
  131. int pamu_disable_liodn(int liodn)
  132. {
  133. struct paace *ppaace;
  134. ppaace = pamu_get_ppaace(liodn);
  135. if (!ppaace) {
  136. pr_debug("Invalid primary paace entry\n");
  137. return -ENOENT;
  138. }
  139. set_bf(ppaace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID);
  140. mb();
  141. return 0;
  142. }
  143. /* Derive the window size encoding for a particular PAACE entry */
  144. static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
  145. {
  146. /* Bug if not a power of 2 */
  147. BUG_ON(!is_power_of_2(addrspace_size));
  148. /* window size is 2^(WSE+1) bytes */
  149. return __ffs(addrspace_size) - 1;
  150. }
  151. /* Derive the PAACE window count encoding for the subwindow count */
  152. static unsigned int map_subwindow_cnt_to_wce(u32 subwindow_cnt)
  153. {
  154. /* window count is 2^(WCE+1) bytes */
  155. return __ffs(subwindow_cnt) - 1;
  156. }
  157. /*
  158. * Set the PAACE type as primary and set the coherency required domain
  159. * attribute
  160. */
  161. static void pamu_init_ppaace(struct paace *ppaace)
  162. {
  163. set_bf(ppaace->addr_bitfields, PAACE_AF_PT, PAACE_PT_PRIMARY);
  164. set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
  165. PAACE_M_COHERENCE_REQ);
  166. }
  167. /*
  168. * Set the PAACE type as secondary and set the coherency required domain
  169. * attribute.
  170. */
  171. static void pamu_init_spaace(struct paace *spaace)
  172. {
  173. set_bf(spaace->addr_bitfields, PAACE_AF_PT, PAACE_PT_SECONDARY);
  174. set_bf(spaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
  175. PAACE_M_COHERENCE_REQ);
  176. }
  177. /*
  178. * Return the spaace (corresponding to the secondary window index)
  179. * for a particular ppaace.
  180. */
  181. static struct paace *pamu_get_spaace(struct paace *paace, u32 wnum)
  182. {
  183. u32 subwin_cnt;
  184. struct paace *spaace = NULL;
  185. subwin_cnt = 1UL << (get_bf(paace->impl_attr, PAACE_IA_WCE) + 1);
  186. if (wnum < subwin_cnt)
  187. spaace = &spaact[paace->fspi + wnum];
  188. else
  189. pr_debug("secondary paace out of bounds\n");
  190. return spaace;
  191. }
  192. /**
  193. * pamu_get_fspi_and_allocate() - Allocates fspi index and reserves subwindows
  194. * required for primary PAACE in the secondary
  195. * PAACE table.
  196. * @subwin_cnt: Number of subwindows to be reserved.
  197. *
  198. * A PPAACE entry may have a number of associated subwindows. A subwindow
  199. * corresponds to a SPAACE entry in the SPAACT table. Each PAACE entry stores
  200. * the index (fspi) of the first SPAACE entry in the SPAACT table. This
  201. * function returns the index of the first SPAACE entry. The remaining
  202. * SPAACE entries are reserved contiguously from that index.
  203. *
  204. * Returns a valid fspi index in the range of 0 - SPAACE_NUMBER_ENTRIES on success.
  205. * If no SPAACE entry is available or the allocator can not reserve the required
  206. * number of contiguous entries function returns ULONG_MAX indicating a failure.
  207. *
  208. */
  209. static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt)
  210. {
  211. unsigned long spaace_addr;
  212. spaace_addr = gen_pool_alloc(spaace_pool, subwin_cnt * sizeof(struct paace));
  213. if (!spaace_addr)
  214. return ULONG_MAX;
  215. return (spaace_addr - (unsigned long)spaact) / (sizeof(struct paace));
  216. }
  217. /* Release the subwindows reserved for a particular LIODN */
  218. void pamu_free_subwins(int liodn)
  219. {
  220. struct paace *ppaace;
  221. u32 subwin_cnt, size;
  222. ppaace = pamu_get_ppaace(liodn);
  223. if (!ppaace) {
  224. pr_debug("Invalid liodn entry\n");
  225. return;
  226. }
  227. if (get_bf(ppaace->addr_bitfields, PPAACE_AF_MW)) {
  228. subwin_cnt = 1UL << (get_bf(ppaace->impl_attr, PAACE_IA_WCE) + 1);
  229. size = (subwin_cnt - 1) * sizeof(struct paace);
  230. gen_pool_free(spaace_pool, (unsigned long)&spaact[ppaace->fspi], size);
  231. set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0);
  232. }
  233. }
  234. /*
  235. * Function used for updating stash destination for the coressponding
  236. * LIODN.
  237. */
  238. int pamu_update_paace_stash(int liodn, u32 subwin, u32 value)
  239. {
  240. struct paace *paace;
  241. paace = pamu_get_ppaace(liodn);
  242. if (!paace) {
  243. pr_debug("Invalid liodn entry\n");
  244. return -ENOENT;
  245. }
  246. if (subwin) {
  247. paace = pamu_get_spaace(paace, subwin - 1);
  248. if (!paace) {
  249. return -ENOENT;
  250. }
  251. }
  252. set_bf(paace->impl_attr, PAACE_IA_CID, value);
  253. mb();
  254. return 0;
  255. }
  256. /* Disable a subwindow corresponding to the LIODN */
  257. int pamu_disable_spaace(int liodn, u32 subwin)
  258. {
  259. struct paace *paace;
  260. paace = pamu_get_ppaace(liodn);
  261. if (!paace) {
  262. pr_debug("Invalid liodn entry\n");
  263. return -ENOENT;
  264. }
  265. if (subwin) {
  266. paace = pamu_get_spaace(paace, subwin - 1);
  267. if (!paace) {
  268. return -ENOENT;
  269. }
  270. set_bf(paace->addr_bitfields, PAACE_AF_V,
  271. PAACE_V_INVALID);
  272. } else {
  273. set_bf(paace->addr_bitfields, PAACE_AF_AP,
  274. PAACE_AP_PERMS_DENIED);
  275. }
  276. mb();
  277. return 0;
  278. }
  279. /**
  280. * pamu_config_paace() - Sets up PPAACE entry for specified liodn
  281. *
  282. * @liodn: Logical IO device number
  283. * @win_addr: starting address of DSA window
  284. * @win-size: size of DSA window
  285. * @omi: Operation mapping index -- if ~omi == 0 then omi not defined
  286. * @rpn: real (true physical) page number
  287. * @stashid: cache stash id for associated cpu -- if ~stashid == 0 then
  288. * stashid not defined
  289. * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then
  290. * snoopid not defined
  291. * @subwin_cnt: number of sub-windows
  292. * @prot: window permissions
  293. *
  294. * Returns 0 upon success else error code < 0 returned
  295. */
  296. int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
  297. u32 omi, unsigned long rpn, u32 snoopid, u32 stashid,
  298. u32 subwin_cnt, int prot)
  299. {
  300. struct paace *ppaace;
  301. unsigned long fspi;
  302. if (!is_power_of_2(win_size) || win_size < PAMU_PAGE_SIZE) {
  303. pr_debug("window size too small or not a power of two %llx\n", win_size);
  304. return -EINVAL;
  305. }
  306. if (win_addr & (win_size - 1)) {
  307. pr_debug("window address is not aligned with window size\n");
  308. return -EINVAL;
  309. }
  310. ppaace = pamu_get_ppaace(liodn);
  311. if (!ppaace) {
  312. return -ENOENT;
  313. }
  314. /* window size is 2^(WSE+1) bytes */
  315. set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE,
  316. map_addrspace_size_to_wse(win_size));
  317. pamu_init_ppaace(ppaace);
  318. ppaace->wbah = win_addr >> (PAMU_PAGE_SHIFT + 20);
  319. set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL,
  320. (win_addr >> PAMU_PAGE_SHIFT));
  321. /* set up operation mapping if it's configured */
  322. if (omi < OME_NUMBER_ENTRIES) {
  323. set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
  324. ppaace->op_encode.index_ot.omi = omi;
  325. } else if (~omi != 0) {
  326. pr_debug("bad operation mapping index: %d\n", omi);
  327. return -EINVAL;
  328. }
  329. /* configure stash id */
  330. if (~stashid != 0)
  331. set_bf(ppaace->impl_attr, PAACE_IA_CID, stashid);
  332. /* configure snoop id */
  333. if (~snoopid != 0)
  334. ppaace->domain_attr.to_host.snpid = snoopid;
  335. if (subwin_cnt) {
  336. /* The first entry is in the primary PAACE instead */
  337. fspi = pamu_get_fspi_and_allocate(subwin_cnt - 1);
  338. if (fspi == ULONG_MAX) {
  339. pr_debug("spaace indexes exhausted\n");
  340. return -EINVAL;
  341. }
  342. /* window count is 2^(WCE+1) bytes */
  343. set_bf(ppaace->impl_attr, PAACE_IA_WCE,
  344. map_subwindow_cnt_to_wce(subwin_cnt));
  345. set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0x1);
  346. ppaace->fspi = fspi;
  347. } else {
  348. set_bf(ppaace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE);
  349. ppaace->twbah = rpn >> 20;
  350. set_bf(ppaace->win_bitfields, PAACE_WIN_TWBAL, rpn);
  351. set_bf(ppaace->addr_bitfields, PAACE_AF_AP, prot);
  352. set_bf(ppaace->impl_attr, PAACE_IA_WCE, 0);
  353. set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0);
  354. }
  355. mb();
  356. return 0;
  357. }
  358. /**
  359. * pamu_config_spaace() - Sets up SPAACE entry for specified subwindow
  360. *
  361. * @liodn: Logical IO device number
  362. * @subwin_cnt: number of sub-windows associated with dma-window
  363. * @subwin: subwindow index
  364. * @subwin_size: size of subwindow
  365. * @omi: Operation mapping index
  366. * @rpn: real (true physical) page number
  367. * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then
  368. * snoopid not defined
  369. * @stashid: cache stash id for associated cpu
  370. * @enable: enable/disable subwindow after reconfiguration
  371. * @prot: sub window permissions
  372. *
  373. * Returns 0 upon success else error code < 0 returned
  374. */
  375. int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
  376. phys_addr_t subwin_size, u32 omi, unsigned long rpn,
  377. u32 snoopid, u32 stashid, int enable, int prot)
  378. {
  379. struct paace *paace;
  380. /* setup sub-windows */
  381. if (!subwin_cnt) {
  382. pr_debug("Invalid subwindow count\n");
  383. return -EINVAL;
  384. }
  385. paace = pamu_get_ppaace(liodn);
  386. if (subwin > 0 && subwin < subwin_cnt && paace) {
  387. paace = pamu_get_spaace(paace, subwin - 1);
  388. if (paace && !(paace->addr_bitfields & PAACE_V_VALID)) {
  389. pamu_init_spaace(paace);
  390. set_bf(paace->addr_bitfields, SPAACE_AF_LIODN, liodn);
  391. }
  392. }
  393. if (!paace) {
  394. pr_debug("Invalid liodn entry\n");
  395. return -ENOENT;
  396. }
  397. if (!is_power_of_2(subwin_size) || subwin_size < PAMU_PAGE_SIZE) {
  398. pr_debug("subwindow size out of range, or not a power of 2\n");
  399. return -EINVAL;
  400. }
  401. if (rpn == ULONG_MAX) {
  402. pr_debug("real page number out of range\n");
  403. return -EINVAL;
  404. }
  405. /* window size is 2^(WSE+1) bytes */
  406. set_bf(paace->win_bitfields, PAACE_WIN_SWSE,
  407. map_addrspace_size_to_wse(subwin_size));
  408. set_bf(paace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE);
  409. paace->twbah = rpn >> 20;
  410. set_bf(paace->win_bitfields, PAACE_WIN_TWBAL, rpn);
  411. set_bf(paace->addr_bitfields, PAACE_AF_AP, prot);
  412. /* configure snoop id */
  413. if (~snoopid != 0)
  414. paace->domain_attr.to_host.snpid = snoopid;
  415. /* set up operation mapping if it's configured */
  416. if (omi < OME_NUMBER_ENTRIES) {
  417. set_bf(paace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
  418. paace->op_encode.index_ot.omi = omi;
  419. } else if (~omi != 0) {
  420. pr_debug("bad operation mapping index: %d\n", omi);
  421. return -EINVAL;
  422. }
  423. if (~stashid != 0)
  424. set_bf(paace->impl_attr, PAACE_IA_CID, stashid);
  425. smp_wmb();
  426. if (enable)
  427. set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_VALID);
  428. mb();
  429. return 0;
  430. }
  431. /**
  432. * get_ome_index() - Returns the index in the operation mapping table
  433. * for device.
  434. * @*omi_index: pointer for storing the index value
  435. *
  436. */
  437. void get_ome_index(u32 *omi_index, struct device *dev)
  438. {
  439. if (of_device_is_compatible(dev->of_node, "fsl,qman-portal"))
  440. *omi_index = OMI_QMAN;
  441. if (of_device_is_compatible(dev->of_node, "fsl,qman"))
  442. *omi_index = OMI_QMAN_PRIV;
  443. }
  444. /**
  445. * get_stash_id - Returns stash destination id corresponding to a
  446. * cache type and vcpu.
  447. * @stash_dest_hint: L1, L2 or L3
  448. * @vcpu: vpcu target for a particular cache type.
  449. *
  450. * Returs stash on success or ~(u32)0 on failure.
  451. *
  452. */
  453. u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
  454. {
  455. const u32 *prop;
  456. struct device_node *node;
  457. u32 cache_level;
  458. int len, found = 0;
  459. int i;
  460. /* Fastpath, exit early if L3/CPC cache is target for stashing */
  461. if (stash_dest_hint == PAMU_ATTR_CACHE_L3) {
  462. node = of_find_matching_node(NULL, l3_device_ids);
  463. if (node) {
  464. prop = of_get_property(node, "cache-stash-id", 0);
  465. if (!prop) {
  466. pr_debug("missing cache-stash-id at %s\n", node->full_name);
  467. of_node_put(node);
  468. return ~(u32)0;
  469. }
  470. of_node_put(node);
  471. return be32_to_cpup(prop);
  472. }
  473. return ~(u32)0;
  474. }
  475. for_each_node_by_type(node, "cpu") {
  476. prop = of_get_property(node, "reg", &len);
  477. for (i = 0; i < len / sizeof(u32); i++) {
  478. if (be32_to_cpup(&prop[i]) == vcpu) {
  479. found = 1;
  480. goto found_cpu_node;
  481. }
  482. }
  483. }
  484. found_cpu_node:
  485. /* find the hwnode that represents the cache */
  486. for (cache_level = PAMU_ATTR_CACHE_L1; (cache_level < PAMU_ATTR_CACHE_L3) && found; cache_level++) {
  487. if (stash_dest_hint == cache_level) {
  488. prop = of_get_property(node, "cache-stash-id", 0);
  489. if (!prop) {
  490. pr_debug("missing cache-stash-id at %s\n", node->full_name);
  491. of_node_put(node);
  492. return ~(u32)0;
  493. }
  494. of_node_put(node);
  495. return be32_to_cpup(prop);
  496. }
  497. prop = of_get_property(node, "next-level-cache", 0);
  498. if (!prop) {
  499. pr_debug("can't find next-level-cache at %s\n",
  500. node->full_name);
  501. of_node_put(node);
  502. return ~(u32)0; /* can't traverse any further */
  503. }
  504. of_node_put(node);
  505. /* advance to next node in cache hierarchy */
  506. node = of_find_node_by_phandle(*prop);
  507. if (!node) {
  508. pr_debug("Invalid node for cache hierarchy %s\n",
  509. node->full_name);
  510. return ~(u32)0;
  511. }
  512. }
  513. pr_debug("stash dest not found for %d on vcpu %d\n",
  514. stash_dest_hint, vcpu);
  515. return ~(u32)0;
  516. }
  517. /* Identify if the PAACT table entry belongs to QMAN, BMAN or QMAN Portal */
  518. #define QMAN_PAACE 1
  519. #define QMAN_PORTAL_PAACE 2
  520. #define BMAN_PAACE 3
  521. /**
  522. * Setup operation mapping and stash destinations for QMAN and QMAN portal.
  523. * Memory accesses to QMAN and BMAN private memory need not be coherent, so
  524. * clear the PAACE entry coherency attribute for them.
  525. */
  526. static void setup_qbman_paace(struct paace *ppaace, int paace_type)
  527. {
  528. switch (paace_type) {
  529. case QMAN_PAACE:
  530. set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
  531. ppaace->op_encode.index_ot.omi = OMI_QMAN_PRIV;
  532. /* setup QMAN Private data stashing for the L3 cache */
  533. set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0));
  534. set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
  535. 0);
  536. break;
  537. case QMAN_PORTAL_PAACE:
  538. set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
  539. ppaace->op_encode.index_ot.omi = OMI_QMAN;
  540. /*Set DQRR and Frame stashing for the L3 cache */
  541. set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0));
  542. break;
  543. case BMAN_PAACE:
  544. set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
  545. 0);
  546. break;
  547. }
  548. }
  549. /**
  550. * Setup the operation mapping table for various devices. This is a static
  551. * table where each table index corresponds to a particular device. PAMU uses
  552. * this table to translate device transaction to appropriate corenet
  553. * transaction.
  554. */
  555. static void __init setup_omt(struct ome *omt)
  556. {
  557. struct ome *ome;
  558. /* Configure OMI_QMAN */
  559. ome = &omt[OMI_QMAN];
  560. ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READ;
  561. ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA;
  562. ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
  563. ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSAO;
  564. ome->moe[IOE_DIRECT0_IDX] = EOE_VALID | EOE_LDEC;
  565. ome->moe[IOE_DIRECT1_IDX] = EOE_VALID | EOE_LDECPE;
  566. /* Configure OMI_FMAN */
  567. ome = &omt[OMI_FMAN];
  568. ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READI;
  569. ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
  570. /* Configure OMI_QMAN private */
  571. ome = &omt[OMI_QMAN_PRIV];
  572. ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READ;
  573. ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
  574. ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA;
  575. ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSA;
  576. /* Configure OMI_CAAM */
  577. ome = &omt[OMI_CAAM];
  578. ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READI;
  579. ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
  580. }
  581. /*
  582. * Get the maximum number of PAACT table entries
  583. * and subwindows supported by PAMU
  584. */
  585. static void get_pamu_cap_values(unsigned long pamu_reg_base)
  586. {
  587. u32 pc_val;
  588. pc_val = in_be32((u32 *)(pamu_reg_base + PAMU_PC3));
  589. /* Maximum number of subwindows per liodn */
  590. max_subwindow_count = 1 << (1 + PAMU_PC3_MWCE(pc_val));
  591. }
  592. /* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
  593. int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
  594. phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
  595. phys_addr_t omt_phys)
  596. {
  597. u32 *pc;
  598. struct pamu_mmap_regs *pamu_regs;
  599. pc = (u32 *) (pamu_reg_base + PAMU_PC);
  600. pamu_regs = (struct pamu_mmap_regs *)
  601. (pamu_reg_base + PAMU_MMAP_REGS_BASE);
  602. /* set up pointers to corenet control blocks */
  603. out_be32(&pamu_regs->ppbah, upper_32_bits(ppaact_phys));
  604. out_be32(&pamu_regs->ppbal, lower_32_bits(ppaact_phys));
  605. ppaact_phys = ppaact_phys + PAACT_SIZE;
  606. out_be32(&pamu_regs->pplah, upper_32_bits(ppaact_phys));
  607. out_be32(&pamu_regs->pplal, lower_32_bits(ppaact_phys));
  608. out_be32(&pamu_regs->spbah, upper_32_bits(spaact_phys));
  609. out_be32(&pamu_regs->spbal, lower_32_bits(spaact_phys));
  610. spaact_phys = spaact_phys + SPAACT_SIZE;
  611. out_be32(&pamu_regs->splah, upper_32_bits(spaact_phys));
  612. out_be32(&pamu_regs->splal, lower_32_bits(spaact_phys));
  613. out_be32(&pamu_regs->obah, upper_32_bits(omt_phys));
  614. out_be32(&pamu_regs->obal, lower_32_bits(omt_phys));
  615. omt_phys = omt_phys + OMT_SIZE;
  616. out_be32(&pamu_regs->olah, upper_32_bits(omt_phys));
  617. out_be32(&pamu_regs->olal, lower_32_bits(omt_phys));
  618. /*
  619. * set PAMU enable bit,
  620. * allow ppaact & omt to be cached
  621. * & enable PAMU access violation interrupts.
  622. */
  623. out_be32((u32 *)(pamu_reg_base + PAMU_PICS),
  624. PAMU_ACCESS_VIOLATION_ENABLE);
  625. out_be32(pc, PAMU_PC_PE | PAMU_PC_OCE | PAMU_PC_SPCC | PAMU_PC_PPCC);
  626. return 0;
  627. }
  628. /* Enable all device LIODNS */
  629. static void __init setup_liodns(void)
  630. {
  631. int i, len;
  632. struct paace *ppaace;
  633. struct device_node *node = NULL;
  634. const u32 *prop;
  635. for_each_node_with_property(node, "fsl,liodn") {
  636. prop = of_get_property(node, "fsl,liodn", &len);
  637. for (i = 0; i < len / sizeof(u32); i++) {
  638. int liodn;
  639. liodn = be32_to_cpup(&prop[i]);
  640. if (liodn >= PAACE_NUMBER_ENTRIES) {
  641. pr_debug("Invalid LIODN value %d\n", liodn);
  642. continue;
  643. }
  644. ppaace = pamu_get_ppaace(liodn);
  645. pamu_init_ppaace(ppaace);
  646. /* window size is 2^(WSE+1) bytes */
  647. set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE, 35);
  648. ppaace->wbah = 0;
  649. set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0);
  650. set_bf(ppaace->impl_attr, PAACE_IA_ATM,
  651. PAACE_ATM_NO_XLATE);
  652. set_bf(ppaace->addr_bitfields, PAACE_AF_AP,
  653. PAACE_AP_PERMS_ALL);
  654. if (of_device_is_compatible(node, "fsl,qman-portal"))
  655. setup_qbman_paace(ppaace, QMAN_PORTAL_PAACE);
  656. if (of_device_is_compatible(node, "fsl,qman"))
  657. setup_qbman_paace(ppaace, QMAN_PAACE);
  658. if (of_device_is_compatible(node, "fsl,bman"))
  659. setup_qbman_paace(ppaace, BMAN_PAACE);
  660. mb();
  661. pamu_enable_liodn(liodn);
  662. }
  663. }
  664. }
  665. irqreturn_t pamu_av_isr(int irq, void *arg)
  666. {
  667. struct pamu_isr_data *data = arg;
  668. phys_addr_t phys;
  669. unsigned int i, j, ret;
  670. pr_emerg("access violation interrupt\n");
  671. for (i = 0; i < data->count; i++) {
  672. void __iomem *p = data->pamu_reg_base + i * PAMU_OFFSET;
  673. u32 pics = in_be32(p + PAMU_PICS);
  674. if (pics & PAMU_ACCESS_VIOLATION_STAT) {
  675. u32 avs1 = in_be32(p + PAMU_AVS1);
  676. struct paace *paace;
  677. pr_emerg("POES1=%08x\n", in_be32(p + PAMU_POES1));
  678. pr_emerg("POES2=%08x\n", in_be32(p + PAMU_POES2));
  679. pr_emerg("AVS1=%08x\n", avs1);
  680. pr_emerg("AVS2=%08x\n", in_be32(p + PAMU_AVS2));
  681. pr_emerg("AVA=%016llx\n", make64(in_be32(p + PAMU_AVAH),
  682. in_be32(p + PAMU_AVAL)));
  683. pr_emerg("UDAD=%08x\n", in_be32(p + PAMU_UDAD));
  684. pr_emerg("POEA=%016llx\n", make64(in_be32(p + PAMU_POEAH),
  685. in_be32(p + PAMU_POEAL)));
  686. phys = make64(in_be32(p + PAMU_POEAH),
  687. in_be32(p + PAMU_POEAL));
  688. /* Assume that POEA points to a PAACE */
  689. if (phys) {
  690. u32 *paace = phys_to_virt(phys);
  691. /* Only the first four words are relevant */
  692. for (j = 0; j < 4; j++)
  693. pr_emerg("PAACE[%u]=%08x\n", j, in_be32(paace + j));
  694. }
  695. /* clear access violation condition */
  696. out_be32((p + PAMU_AVS1), avs1 & PAMU_AV_MASK);
  697. paace = pamu_get_ppaace(avs1 >> PAMU_AVS1_LIODN_SHIFT);
  698. BUG_ON(!paace);
  699. /* check if we got a violation for a disabled LIODN */
  700. if (!get_bf(paace->addr_bitfields, PAACE_AF_V)) {
  701. /*
  702. * As per hardware erratum A-003638, access
  703. * violation can be reported for a disabled
  704. * LIODN. If we hit that condition, disable
  705. * access violation reporting.
  706. */
  707. pics &= ~PAMU_ACCESS_VIOLATION_ENABLE;
  708. } else {
  709. /* Disable the LIODN */
  710. ret = pamu_disable_liodn(avs1 >> PAMU_AVS1_LIODN_SHIFT);
  711. BUG_ON(ret);
  712. pr_emerg("Disabling liodn %x\n", avs1 >> PAMU_AVS1_LIODN_SHIFT);
  713. }
  714. out_be32((p + PAMU_PICS), pics);
  715. }
  716. }
  717. return IRQ_HANDLED;
  718. }
  719. #define LAWAR_EN 0x80000000
  720. #define LAWAR_TARGET_MASK 0x0FF00000
  721. #define LAWAR_TARGET_SHIFT 20
  722. #define LAWAR_SIZE_MASK 0x0000003F
  723. #define LAWAR_CSDID_MASK 0x000FF000
  724. #define LAWAR_CSDID_SHIFT 12
  725. #define LAW_SIZE_4K 0xb
  726. struct ccsr_law {
  727. u32 lawbarh; /* LAWn base address high */
  728. u32 lawbarl; /* LAWn base address low */
  729. u32 lawar; /* LAWn attributes */
  730. u32 reserved;
  731. };
  732. /*
  733. * Create a coherence subdomain for a given memory block.
  734. */
  735. static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
  736. {
  737. struct device_node *np;
  738. const __be32 *iprop;
  739. void __iomem *lac = NULL; /* Local Access Control registers */
  740. struct ccsr_law __iomem *law;
  741. void __iomem *ccm = NULL;
  742. u32 __iomem *csdids;
  743. unsigned int i, num_laws, num_csds;
  744. u32 law_target = 0;
  745. u32 csd_id = 0;
  746. int ret = 0;
  747. np = of_find_compatible_node(NULL, NULL, "fsl,corenet-law");
  748. if (!np)
  749. return -ENODEV;
  750. iprop = of_get_property(np, "fsl,num-laws", NULL);
  751. if (!iprop) {
  752. ret = -ENODEV;
  753. goto error;
  754. }
  755. num_laws = be32_to_cpup(iprop);
  756. if (!num_laws) {
  757. ret = -ENODEV;
  758. goto error;
  759. }
  760. lac = of_iomap(np, 0);
  761. if (!lac) {
  762. ret = -ENODEV;
  763. goto error;
  764. }
  765. /* LAW registers are at offset 0xC00 */
  766. law = lac + 0xC00;
  767. of_node_put(np);
  768. np = of_find_compatible_node(NULL, NULL, "fsl,corenet-cf");
  769. if (!np) {
  770. ret = -ENODEV;
  771. goto error;
  772. }
  773. iprop = of_get_property(np, "fsl,ccf-num-csdids", NULL);
  774. if (!iprop) {
  775. ret = -ENODEV;
  776. goto error;
  777. }
  778. num_csds = be32_to_cpup(iprop);
  779. if (!num_csds) {
  780. ret = -ENODEV;
  781. goto error;
  782. }
  783. ccm = of_iomap(np, 0);
  784. if (!ccm) {
  785. ret = -ENOMEM;
  786. goto error;
  787. }
  788. /* The undocumented CSDID registers are at offset 0x600 */
  789. csdids = ccm + 0x600;
  790. of_node_put(np);
  791. np = NULL;
  792. /* Find an unused coherence subdomain ID */
  793. for (csd_id = 0; csd_id < num_csds; csd_id++) {
  794. if (!csdids[csd_id])
  795. break;
  796. }
  797. /* Store the Port ID in the (undocumented) proper CIDMRxx register */
  798. csdids[csd_id] = csd_port_id;
  799. /* Find the DDR LAW that maps to our buffer. */
  800. for (i = 0; i < num_laws; i++) {
  801. if (law[i].lawar & LAWAR_EN) {
  802. phys_addr_t law_start, law_end;
  803. law_start = make64(law[i].lawbarh, law[i].lawbarl);
  804. law_end = law_start +
  805. (2ULL << (law[i].lawar & LAWAR_SIZE_MASK));
  806. if (law_start <= phys && phys < law_end) {
  807. law_target = law[i].lawar & LAWAR_TARGET_MASK;
  808. break;
  809. }
  810. }
  811. }
  812. if (i == 0 || i == num_laws) {
  813. /* This should never happen*/
  814. ret = -ENOENT;
  815. goto error;
  816. }
  817. /* Find a free LAW entry */
  818. while (law[--i].lawar & LAWAR_EN) {
  819. if (i == 0) {
  820. /* No higher priority LAW slots available */
  821. ret = -ENOENT;
  822. goto error;
  823. }
  824. }
  825. law[i].lawbarh = upper_32_bits(phys);
  826. law[i].lawbarl = lower_32_bits(phys);
  827. wmb();
  828. law[i].lawar = LAWAR_EN | law_target | (csd_id << LAWAR_CSDID_SHIFT) |
  829. (LAW_SIZE_4K + get_order(size));
  830. wmb();
  831. error:
  832. if (ccm)
  833. iounmap(ccm);
  834. if (lac)
  835. iounmap(lac);
  836. if (np)
  837. of_node_put(np);
  838. return ret;
  839. }
  840. /*
  841. * Table of SVRs and the corresponding PORT_ID values. Port ID corresponds to a
  842. * bit map of snoopers for a given range of memory mapped by a LAW.
  843. *
  844. * All future CoreNet-enabled SOCs will have this erratum(A-004510) fixed, so this
  845. * table should never need to be updated. SVRs are guaranteed to be unique, so
  846. * there is no worry that a future SOC will inadvertently have one of these
  847. * values.
  848. */
  849. static const struct {
  850. u32 svr;
  851. u32 port_id;
  852. } port_id_map[] = {
  853. {0x82100010, 0xFF000000}, /* P2040 1.0 */
  854. {0x82100011, 0xFF000000}, /* P2040 1.1 */
  855. {0x82100110, 0xFF000000}, /* P2041 1.0 */
  856. {0x82100111, 0xFF000000}, /* P2041 1.1 */
  857. {0x82110310, 0xFF000000}, /* P3041 1.0 */
  858. {0x82110311, 0xFF000000}, /* P3041 1.1 */
  859. {0x82010020, 0xFFF80000}, /* P4040 2.0 */
  860. {0x82000020, 0xFFF80000}, /* P4080 2.0 */
  861. {0x82210010, 0xFC000000}, /* P5010 1.0 */
  862. {0x82210020, 0xFC000000}, /* P5010 2.0 */
  863. {0x82200010, 0xFC000000}, /* P5020 1.0 */
  864. {0x82050010, 0xFF800000}, /* P5021 1.0 */
  865. {0x82040010, 0xFF800000}, /* P5040 1.0 */
  866. };
  867. #define SVR_SECURITY 0x80000 /* The Security (E) bit */
  868. static int __init fsl_pamu_probe(struct platform_device *pdev)
  869. {
  870. void __iomem *pamu_regs = NULL;
  871. struct ccsr_guts __iomem *guts_regs = NULL;
  872. u32 pamubypenr, pamu_counter;
  873. unsigned long pamu_reg_off;
  874. unsigned long pamu_reg_base;
  875. struct pamu_isr_data *data = NULL;
  876. struct device_node *guts_node;
  877. u64 size;
  878. struct page *p;
  879. int ret = 0;
  880. int irq;
  881. phys_addr_t ppaact_phys;
  882. phys_addr_t spaact_phys;
  883. phys_addr_t omt_phys;
  884. size_t mem_size = 0;
  885. unsigned int order = 0;
  886. u32 csd_port_id = 0;
  887. unsigned i;
  888. /*
  889. * enumerate all PAMUs and allocate and setup PAMU tables
  890. * for each of them,
  891. * NOTE : All PAMUs share the same LIODN tables.
  892. */
  893. pamu_regs = of_iomap(pdev->dev.of_node, 0);
  894. if (!pamu_regs) {
  895. dev_err(&pdev->dev, "ioremap of PAMU node failed\n");
  896. return -ENOMEM;
  897. }
  898. of_get_address(pdev->dev.of_node, 0, &size, NULL);
  899. irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
  900. if (irq == NO_IRQ) {
  901. dev_warn(&pdev->dev, "no interrupts listed in PAMU node\n");
  902. goto error;
  903. }
  904. data = kzalloc(sizeof(struct pamu_isr_data), GFP_KERNEL);
  905. if (!data) {
  906. dev_err(&pdev->dev, "PAMU isr data memory allocation failed\n");
  907. ret = -ENOMEM;
  908. goto error;
  909. }
  910. data->pamu_reg_base = pamu_regs;
  911. data->count = size / PAMU_OFFSET;
  912. /* The ISR needs access to the regs, so we won't iounmap them */
  913. ret = request_irq(irq, pamu_av_isr, 0, "pamu", data);
  914. if (ret < 0) {
  915. dev_err(&pdev->dev, "error %i installing ISR for irq %i\n",
  916. ret, irq);
  917. goto error;
  918. }
  919. guts_node = of_find_matching_node(NULL, guts_device_ids);
  920. if (!guts_node) {
  921. dev_err(&pdev->dev, "could not find GUTS node %s\n",
  922. pdev->dev.of_node->full_name);
  923. ret = -ENODEV;
  924. goto error;
  925. }
  926. guts_regs = of_iomap(guts_node, 0);
  927. of_node_put(guts_node);
  928. if (!guts_regs) {
  929. dev_err(&pdev->dev, "ioremap of GUTS node failed\n");
  930. ret = -ENODEV;
  931. goto error;
  932. }
  933. /* read in the PAMU capability registers */
  934. get_pamu_cap_values((unsigned long)pamu_regs);
  935. /*
  936. * To simplify the allocation of a coherency domain, we allocate the
  937. * PAACT and the OMT in the same memory buffer. Unfortunately, this
  938. * wastes more memory compared to allocating the buffers separately.
  939. */
  940. /* Determine how much memory we need */
  941. mem_size = (PAGE_SIZE << get_order(PAACT_SIZE)) +
  942. (PAGE_SIZE << get_order(SPAACT_SIZE)) +
  943. (PAGE_SIZE << get_order(OMT_SIZE));
  944. order = get_order(mem_size);
  945. p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
  946. if (!p) {
  947. dev_err(&pdev->dev, "unable to allocate PAACT/SPAACT/OMT block\n");
  948. ret = -ENOMEM;
  949. goto error;
  950. }
  951. ppaact = page_address(p);
  952. ppaact_phys = page_to_phys(p);
  953. /* Make sure the memory is naturally aligned */
  954. if (ppaact_phys & ((PAGE_SIZE << order) - 1)) {
  955. dev_err(&pdev->dev, "PAACT/OMT block is unaligned\n");
  956. ret = -ENOMEM;
  957. goto error;
  958. }
  959. spaact = (void *)ppaact + (PAGE_SIZE << get_order(PAACT_SIZE));
  960. omt = (void *)spaact + (PAGE_SIZE << get_order(SPAACT_SIZE));
  961. dev_dbg(&pdev->dev, "ppaact virt=%p phys=0x%llx\n", ppaact,
  962. (unsigned long long) ppaact_phys);
  963. /* Check to see if we need to implement the work-around on this SOC */
  964. /* Determine the Port ID for our coherence subdomain */
  965. for (i = 0; i < ARRAY_SIZE(port_id_map); i++) {
  966. if (port_id_map[i].svr == (mfspr(SPRN_SVR) & ~SVR_SECURITY)) {
  967. csd_port_id = port_id_map[i].port_id;
  968. dev_dbg(&pdev->dev, "found matching SVR %08x\n",
  969. port_id_map[i].svr);
  970. break;
  971. }
  972. }
  973. if (csd_port_id) {
  974. dev_dbg(&pdev->dev, "creating coherency subdomain at address "
  975. "0x%llx, size %zu, port id 0x%08x", ppaact_phys,
  976. mem_size, csd_port_id);
  977. ret = create_csd(ppaact_phys, mem_size, csd_port_id);
  978. if (ret) {
  979. dev_err(&pdev->dev, "could not create coherence "
  980. "subdomain\n");
  981. return ret;
  982. }
  983. }
  984. spaact_phys = virt_to_phys(spaact);
  985. omt_phys = virt_to_phys(omt);
  986. spaace_pool = gen_pool_create(ilog2(sizeof(struct paace)), -1);
  987. if (!spaace_pool) {
  988. ret = -ENOMEM;
  989. dev_err(&pdev->dev, "PAMU : failed to allocate spaace gen pool\n");
  990. goto error;
  991. }
  992. ret = gen_pool_add(spaace_pool, (unsigned long)spaact, SPAACT_SIZE, -1);
  993. if (ret)
  994. goto error_genpool;
  995. pamubypenr = in_be32(&guts_regs->pamubypenr);
  996. for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size;
  997. pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) {
  998. pamu_reg_base = (unsigned long) pamu_regs + pamu_reg_off;
  999. setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys,
  1000. spaact_phys, omt_phys);
  1001. /* Disable PAMU bypass for this PAMU */
  1002. pamubypenr &= ~pamu_counter;
  1003. }
  1004. setup_omt(omt);
  1005. /* Enable all relevant PAMU(s) */
  1006. out_be32(&guts_regs->pamubypenr, pamubypenr);
  1007. iounmap(guts_regs);
  1008. /* Enable DMA for the LIODNs in the device tree*/
  1009. setup_liodns();
  1010. return 0;
  1011. error_genpool:
  1012. gen_pool_destroy(spaace_pool);
  1013. error:
  1014. if (irq != NO_IRQ)
  1015. free_irq(irq, data);
  1016. if (data) {
  1017. memset(data, 0, sizeof(struct pamu_isr_data));
  1018. kfree(data);
  1019. }
  1020. if (pamu_regs)
  1021. iounmap(pamu_regs);
  1022. if (guts_regs)
  1023. iounmap(guts_regs);
  1024. if (ppaact)
  1025. free_pages((unsigned long)ppaact, order);
  1026. ppaact = NULL;
  1027. return ret;
  1028. }
  1029. static const struct of_device_id fsl_of_pamu_ids[] = {
  1030. {
  1031. .compatible = "fsl,p4080-pamu",
  1032. },
  1033. {
  1034. .compatible = "fsl,pamu",
  1035. },
  1036. {},
  1037. };
  1038. static struct platform_driver fsl_of_pamu_driver = {
  1039. .driver = {
  1040. .name = "fsl-of-pamu",
  1041. .owner = THIS_MODULE,
  1042. },
  1043. .probe = fsl_pamu_probe,
  1044. };
  1045. static __init int fsl_pamu_init(void)
  1046. {
  1047. struct platform_device *pdev = NULL;
  1048. struct device_node *np;
  1049. int ret;
  1050. /*
  1051. * The normal OF process calls the probe function at some
  1052. * indeterminate later time, after most drivers have loaded. This is
  1053. * too late for us, because PAMU clients (like the Qman driver)
  1054. * depend on PAMU being initialized early.
  1055. *
  1056. * So instead, we "manually" call our probe function by creating the
  1057. * platform devices ourselves.
  1058. */
  1059. /*
  1060. * We assume that there is only one PAMU node in the device tree. A
  1061. * single PAMU node represents all of the PAMU devices in the SOC
  1062. * already. Everything else already makes that assumption, and the
  1063. * binding for the PAMU nodes doesn't allow for any parent-child
  1064. * relationships anyway. In other words, support for more than one
  1065. * PAMU node would require significant changes to a lot of code.
  1066. */
  1067. np = of_find_compatible_node(NULL, NULL, "fsl,pamu");
  1068. if (!np) {
  1069. pr_err("could not find a PAMU node\n");
  1070. return -ENODEV;
  1071. }
  1072. ret = platform_driver_register(&fsl_of_pamu_driver);
  1073. if (ret) {
  1074. pr_err("could not register driver (err=%i)\n", ret);
  1075. goto error_driver_register;
  1076. }
  1077. pdev = platform_device_alloc("fsl-of-pamu", 0);
  1078. if (!pdev) {
  1079. pr_err("could not allocate device %s\n",
  1080. np->full_name);
  1081. ret = -ENOMEM;
  1082. goto error_device_alloc;
  1083. }
  1084. pdev->dev.of_node = of_node_get(np);
  1085. ret = pamu_domain_init();
  1086. if (ret)
  1087. goto error_device_add;
  1088. ret = platform_device_add(pdev);
  1089. if (ret) {
  1090. pr_err("could not add device %s (err=%i)\n",
  1091. np->full_name, ret);
  1092. goto error_device_add;
  1093. }
  1094. return 0;
  1095. error_device_add:
  1096. of_node_put(pdev->dev.of_node);
  1097. pdev->dev.of_node = NULL;
  1098. platform_device_put(pdev);
  1099. error_device_alloc:
  1100. platform_driver_unregister(&fsl_of_pamu_driver);
  1101. error_driver_register:
  1102. of_node_put(np);
  1103. return ret;
  1104. }
  1105. arch_initcall(fsl_pamu_init);