tlbex.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Synthesize TLB refill handlers at runtime.
  7. *
  8. * Copyright (C) 2004,2005,2006 by Thiemo Seufer
  9. * Copyright (C) 2005 Maciej W. Rozycki
  10. * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
  11. *
  12. * ... and the days got worse and worse and now you see
  13. * I've gone completly out of my mind.
  14. *
  15. * They're coming to take me a away haha
  16. * they're coming to take me a away hoho hihi haha
  17. * to the funny farm where code is beautiful all the time ...
  18. *
  19. * (Condolences to Napoleon XIV)
  20. */
  21. #include <stdarg.h>
  22. #include <linux/mm.h>
  23. #include <linux/kernel.h>
  24. #include <linux/types.h>
  25. #include <linux/string.h>
  26. #include <linux/init.h>
  27. #include <asm/pgtable.h>
  28. #include <asm/cacheflush.h>
  29. #include <asm/mmu_context.h>
  30. #include <asm/inst.h>
  31. #include <asm/elf.h>
  32. #include <asm/smp.h>
  33. #include <asm/war.h>
  34. static __init int __attribute__((unused)) r45k_bvahwbug(void)
  35. {
  36. /* XXX: We should probe for the presence of this bug, but we don't. */
  37. return 0;
  38. }
  39. static __init int __attribute__((unused)) r4k_250MHZhwbug(void)
  40. {
  41. /* XXX: We should probe for the presence of this bug, but we don't. */
  42. return 0;
  43. }
  44. static __init int __attribute__((unused)) bcm1250_m3_war(void)
  45. {
  46. return BCM1250_M3_WAR;
  47. }
  48. static __init int __attribute__((unused)) r10000_llsc_war(void)
  49. {
  50. return R10000_LLSC_WAR;
  51. }
  52. /*
  53. * A little micro-assembler, intended for TLB refill handler
  54. * synthesizing. It is intentionally kept simple, does only support
  55. * a subset of instructions, and does not try to hide pipeline effects
  56. * like branch delay slots.
  57. */
  58. enum fields
  59. {
  60. RS = 0x001,
  61. RT = 0x002,
  62. RD = 0x004,
  63. RE = 0x008,
  64. SIMM = 0x010,
  65. UIMM = 0x020,
  66. BIMM = 0x040,
  67. JIMM = 0x080,
  68. FUNC = 0x100,
  69. SET = 0x200
  70. };
  71. #define OP_MASK 0x2f
  72. #define OP_SH 26
  73. #define RS_MASK 0x1f
  74. #define RS_SH 21
  75. #define RT_MASK 0x1f
  76. #define RT_SH 16
  77. #define RD_MASK 0x1f
  78. #define RD_SH 11
  79. #define RE_MASK 0x1f
  80. #define RE_SH 6
  81. #define IMM_MASK 0xffff
  82. #define IMM_SH 0
  83. #define JIMM_MASK 0x3ffffff
  84. #define JIMM_SH 0
  85. #define FUNC_MASK 0x2f
  86. #define FUNC_SH 0
  87. #define SET_MASK 0x7
  88. #define SET_SH 0
  89. enum opcode {
  90. insn_invalid,
  91. insn_addu, insn_addiu, insn_and, insn_andi, insn_beq,
  92. insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
  93. insn_bne, insn_daddu, insn_daddiu, insn_dmfc0, insn_dmtc0,
  94. insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32,
  95. insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr, insn_ld,
  96. insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, insn_mtc0,
  97. insn_ori, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll,
  98. insn_sra, insn_srl, insn_subu, insn_sw, insn_tlbp, insn_tlbwi,
  99. insn_tlbwr, insn_xor, insn_xori
  100. };
  101. struct insn {
  102. enum opcode opcode;
  103. u32 match;
  104. enum fields fields;
  105. };
  106. /* This macro sets the non-variable bits of an instruction. */
  107. #define M(a, b, c, d, e, f) \
  108. ((a) << OP_SH \
  109. | (b) << RS_SH \
  110. | (c) << RT_SH \
  111. | (d) << RD_SH \
  112. | (e) << RE_SH \
  113. | (f) << FUNC_SH)
  114. static __initdata struct insn insn_table[] = {
  115. { insn_addiu, M(addiu_op,0,0,0,0,0), RS | RT | SIMM },
  116. { insn_addu, M(spec_op,0,0,0,0,addu_op), RS | RT | RD },
  117. { insn_and, M(spec_op,0,0,0,0,and_op), RS | RT | RD },
  118. { insn_andi, M(andi_op,0,0,0,0,0), RS | RT | UIMM },
  119. { insn_beq, M(beq_op,0,0,0,0,0), RS | RT | BIMM },
  120. { insn_beql, M(beql_op,0,0,0,0,0), RS | RT | BIMM },
  121. { insn_bgez, M(bcond_op,0,bgez_op,0,0,0), RS | BIMM },
  122. { insn_bgezl, M(bcond_op,0,bgezl_op,0,0,0), RS | BIMM },
  123. { insn_bltz, M(bcond_op,0,bltz_op,0,0,0), RS | BIMM },
  124. { insn_bltzl, M(bcond_op,0,bltzl_op,0,0,0), RS | BIMM },
  125. { insn_bne, M(bne_op,0,0,0,0,0), RS | RT | BIMM },
  126. { insn_daddiu, M(daddiu_op,0,0,0,0,0), RS | RT | SIMM },
  127. { insn_daddu, M(spec_op,0,0,0,0,daddu_op), RS | RT | RD },
  128. { insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD | SET},
  129. { insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD | SET},
  130. { insn_dsll, M(spec_op,0,0,0,0,dsll_op), RT | RD | RE },
  131. { insn_dsll32, M(spec_op,0,0,0,0,dsll32_op), RT | RD | RE },
  132. { insn_dsra, M(spec_op,0,0,0,0,dsra_op), RT | RD | RE },
  133. { insn_dsrl, M(spec_op,0,0,0,0,dsrl_op), RT | RD | RE },
  134. { insn_dsrl32, M(spec_op,0,0,0,0,dsrl32_op), RT | RD | RE },
  135. { insn_dsubu, M(spec_op,0,0,0,0,dsubu_op), RS | RT | RD },
  136. { insn_eret, M(cop0_op,cop_op,0,0,0,eret_op), 0 },
  137. { insn_j, M(j_op,0,0,0,0,0), JIMM },
  138. { insn_jal, M(jal_op,0,0,0,0,0), JIMM },
  139. { insn_jr, M(spec_op,0,0,0,0,jr_op), RS },
  140. { insn_ld, M(ld_op,0,0,0,0,0), RS | RT | SIMM },
  141. { insn_ll, M(ll_op,0,0,0,0,0), RS | RT | SIMM },
  142. { insn_lld, M(lld_op,0,0,0,0,0), RS | RT | SIMM },
  143. { insn_lui, M(lui_op,0,0,0,0,0), RT | SIMM },
  144. { insn_lw, M(lw_op,0,0,0,0,0), RS | RT | SIMM },
  145. { insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD | SET},
  146. { insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD | SET},
  147. { insn_ori, M(ori_op,0,0,0,0,0), RS | RT | UIMM },
  148. { insn_rfe, M(cop0_op,cop_op,0,0,0,rfe_op), 0 },
  149. { insn_sc, M(sc_op,0,0,0,0,0), RS | RT | SIMM },
  150. { insn_scd, M(scd_op,0,0,0,0,0), RS | RT | SIMM },
  151. { insn_sd, M(sd_op,0,0,0,0,0), RS | RT | SIMM },
  152. { insn_sll, M(spec_op,0,0,0,0,sll_op), RT | RD | RE },
  153. { insn_sra, M(spec_op,0,0,0,0,sra_op), RT | RD | RE },
  154. { insn_srl, M(spec_op,0,0,0,0,srl_op), RT | RD | RE },
  155. { insn_subu, M(spec_op,0,0,0,0,subu_op), RS | RT | RD },
  156. { insn_sw, M(sw_op,0,0,0,0,0), RS | RT | SIMM },
  157. { insn_tlbp, M(cop0_op,cop_op,0,0,0,tlbp_op), 0 },
  158. { insn_tlbwi, M(cop0_op,cop_op,0,0,0,tlbwi_op), 0 },
  159. { insn_tlbwr, M(cop0_op,cop_op,0,0,0,tlbwr_op), 0 },
  160. { insn_xor, M(spec_op,0,0,0,0,xor_op), RS | RT | RD },
  161. { insn_xori, M(xori_op,0,0,0,0,0), RS | RT | UIMM },
  162. { insn_invalid, 0, 0 }
  163. };
  164. #undef M
  165. static __init u32 build_rs(u32 arg)
  166. {
  167. if (arg & ~RS_MASK)
  168. printk(KERN_WARNING "TLB synthesizer field overflow\n");
  169. return (arg & RS_MASK) << RS_SH;
  170. }
  171. static __init u32 build_rt(u32 arg)
  172. {
  173. if (arg & ~RT_MASK)
  174. printk(KERN_WARNING "TLB synthesizer field overflow\n");
  175. return (arg & RT_MASK) << RT_SH;
  176. }
  177. static __init u32 build_rd(u32 arg)
  178. {
  179. if (arg & ~RD_MASK)
  180. printk(KERN_WARNING "TLB synthesizer field overflow\n");
  181. return (arg & RD_MASK) << RD_SH;
  182. }
  183. static __init u32 build_re(u32 arg)
  184. {
  185. if (arg & ~RE_MASK)
  186. printk(KERN_WARNING "TLB synthesizer field overflow\n");
  187. return (arg & RE_MASK) << RE_SH;
  188. }
  189. static __init u32 build_simm(s32 arg)
  190. {
  191. if (arg > 0x7fff || arg < -0x8000)
  192. printk(KERN_WARNING "TLB synthesizer field overflow\n");
  193. return arg & 0xffff;
  194. }
  195. static __init u32 build_uimm(u32 arg)
  196. {
  197. if (arg & ~IMM_MASK)
  198. printk(KERN_WARNING "TLB synthesizer field overflow\n");
  199. return arg & IMM_MASK;
  200. }
  201. static __init u32 build_bimm(s32 arg)
  202. {
  203. if (arg > 0x1ffff || arg < -0x20000)
  204. printk(KERN_WARNING "TLB synthesizer field overflow\n");
  205. if (arg & 0x3)
  206. printk(KERN_WARNING "Invalid TLB synthesizer branch target\n");
  207. return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
  208. }
  209. static __init u32 build_jimm(u32 arg)
  210. {
  211. if (arg & ~((JIMM_MASK) << 2))
  212. printk(KERN_WARNING "TLB synthesizer field overflow\n");
  213. return (arg >> 2) & JIMM_MASK;
  214. }
  215. static __init u32 build_func(u32 arg)
  216. {
  217. if (arg & ~FUNC_MASK)
  218. printk(KERN_WARNING "TLB synthesizer field overflow\n");
  219. return arg & FUNC_MASK;
  220. }
  221. static __init u32 build_set(u32 arg)
  222. {
  223. if (arg & ~SET_MASK)
  224. printk(KERN_WARNING "TLB synthesizer field overflow\n");
  225. return arg & SET_MASK;
  226. }
  227. /*
  228. * The order of opcode arguments is implicitly left to right,
  229. * starting with RS and ending with FUNC or IMM.
  230. */
  231. static void __init build_insn(u32 **buf, enum opcode opc, ...)
  232. {
  233. struct insn *ip = NULL;
  234. unsigned int i;
  235. va_list ap;
  236. u32 op;
  237. for (i = 0; insn_table[i].opcode != insn_invalid; i++)
  238. if (insn_table[i].opcode == opc) {
  239. ip = &insn_table[i];
  240. break;
  241. }
  242. if (!ip)
  243. panic("Unsupported TLB synthesizer instruction %d", opc);
  244. op = ip->match;
  245. va_start(ap, opc);
  246. if (ip->fields & RS) op |= build_rs(va_arg(ap, u32));
  247. if (ip->fields & RT) op |= build_rt(va_arg(ap, u32));
  248. if (ip->fields & RD) op |= build_rd(va_arg(ap, u32));
  249. if (ip->fields & RE) op |= build_re(va_arg(ap, u32));
  250. if (ip->fields & SIMM) op |= build_simm(va_arg(ap, s32));
  251. if (ip->fields & UIMM) op |= build_uimm(va_arg(ap, u32));
  252. if (ip->fields & BIMM) op |= build_bimm(va_arg(ap, s32));
  253. if (ip->fields & JIMM) op |= build_jimm(va_arg(ap, u32));
  254. if (ip->fields & FUNC) op |= build_func(va_arg(ap, u32));
  255. if (ip->fields & SET) op |= build_set(va_arg(ap, u32));
  256. va_end(ap);
  257. **buf = op;
  258. (*buf)++;
  259. }
  260. #define I_u1u2u3(op) \
  261. static inline void __init i##op(u32 **buf, unsigned int a, \
  262. unsigned int b, unsigned int c) \
  263. { \
  264. build_insn(buf, insn##op, a, b, c); \
  265. }
  266. #define I_u2u1u3(op) \
  267. static inline void __init i##op(u32 **buf, unsigned int a, \
  268. unsigned int b, unsigned int c) \
  269. { \
  270. build_insn(buf, insn##op, b, a, c); \
  271. }
  272. #define I_u3u1u2(op) \
  273. static inline void __init i##op(u32 **buf, unsigned int a, \
  274. unsigned int b, unsigned int c) \
  275. { \
  276. build_insn(buf, insn##op, b, c, a); \
  277. }
  278. #define I_u1u2s3(op) \
  279. static inline void __init i##op(u32 **buf, unsigned int a, \
  280. unsigned int b, signed int c) \
  281. { \
  282. build_insn(buf, insn##op, a, b, c); \
  283. }
  284. #define I_u2s3u1(op) \
  285. static inline void __init i##op(u32 **buf, unsigned int a, \
  286. signed int b, unsigned int c) \
  287. { \
  288. build_insn(buf, insn##op, c, a, b); \
  289. }
  290. #define I_u2u1s3(op) \
  291. static inline void __init i##op(u32 **buf, unsigned int a, \
  292. unsigned int b, signed int c) \
  293. { \
  294. build_insn(buf, insn##op, b, a, c); \
  295. }
  296. #define I_u1u2(op) \
  297. static inline void __init i##op(u32 **buf, unsigned int a, \
  298. unsigned int b) \
  299. { \
  300. build_insn(buf, insn##op, a, b); \
  301. }
  302. #define I_u1s2(op) \
  303. static inline void __init i##op(u32 **buf, unsigned int a, \
  304. signed int b) \
  305. { \
  306. build_insn(buf, insn##op, a, b); \
  307. }
  308. #define I_u1(op) \
  309. static inline void __init i##op(u32 **buf, unsigned int a) \
  310. { \
  311. build_insn(buf, insn##op, a); \
  312. }
  313. #define I_0(op) \
  314. static inline void __init i##op(u32 **buf) \
  315. { \
  316. build_insn(buf, insn##op); \
  317. }
  318. I_u2u1s3(_addiu);
  319. I_u3u1u2(_addu);
  320. I_u2u1u3(_andi);
  321. I_u3u1u2(_and);
  322. I_u1u2s3(_beq);
  323. I_u1u2s3(_beql);
  324. I_u1s2(_bgez);
  325. I_u1s2(_bgezl);
  326. I_u1s2(_bltz);
  327. I_u1s2(_bltzl);
  328. I_u1u2s3(_bne);
  329. I_u1u2u3(_dmfc0);
  330. I_u1u2u3(_dmtc0);
  331. I_u2u1s3(_daddiu);
  332. I_u3u1u2(_daddu);
  333. I_u2u1u3(_dsll);
  334. I_u2u1u3(_dsll32);
  335. I_u2u1u3(_dsra);
  336. I_u2u1u3(_dsrl);
  337. I_u2u1u3(_dsrl32);
  338. I_u3u1u2(_dsubu);
  339. I_0(_eret);
  340. I_u1(_j);
  341. I_u1(_jal);
  342. I_u1(_jr);
  343. I_u2s3u1(_ld);
  344. I_u2s3u1(_ll);
  345. I_u2s3u1(_lld);
  346. I_u1s2(_lui);
  347. I_u2s3u1(_lw);
  348. I_u1u2u3(_mfc0);
  349. I_u1u2u3(_mtc0);
  350. I_u2u1u3(_ori);
  351. I_0(_rfe);
  352. I_u2s3u1(_sc);
  353. I_u2s3u1(_scd);
  354. I_u2s3u1(_sd);
  355. I_u2u1u3(_sll);
  356. I_u2u1u3(_sra);
  357. I_u2u1u3(_srl);
  358. I_u3u1u2(_subu);
  359. I_u2s3u1(_sw);
  360. I_0(_tlbp);
  361. I_0(_tlbwi);
  362. I_0(_tlbwr);
  363. I_u3u1u2(_xor)
  364. I_u2u1u3(_xori);
  365. /*
  366. * handling labels
  367. */
  368. enum label_id {
  369. label_invalid,
  370. label_second_part,
  371. label_leave,
  372. label_vmalloc,
  373. label_vmalloc_done,
  374. label_tlbw_hazard,
  375. label_split,
  376. label_nopage_tlbl,
  377. label_nopage_tlbs,
  378. label_nopage_tlbm,
  379. label_smp_pgtable_change,
  380. label_r3000_write_probe_fail,
  381. };
  382. struct label {
  383. u32 *addr;
  384. enum label_id lab;
  385. };
  386. static __init void build_label(struct label **lab, u32 *addr,
  387. enum label_id l)
  388. {
  389. (*lab)->addr = addr;
  390. (*lab)->lab = l;
  391. (*lab)++;
  392. }
  393. #define L_LA(lb) \
  394. static inline void l##lb(struct label **lab, u32 *addr) \
  395. { \
  396. build_label(lab, addr, label##lb); \
  397. }
  398. L_LA(_second_part)
  399. L_LA(_leave)
  400. L_LA(_vmalloc)
  401. L_LA(_vmalloc_done)
  402. L_LA(_tlbw_hazard)
  403. L_LA(_split)
  404. L_LA(_nopage_tlbl)
  405. L_LA(_nopage_tlbs)
  406. L_LA(_nopage_tlbm)
  407. L_LA(_smp_pgtable_change)
  408. L_LA(_r3000_write_probe_fail)
  409. /* convenience macros for instructions */
  410. #ifdef CONFIG_64BIT
  411. # define i_LW(buf, rs, rt, off) i_ld(buf, rs, rt, off)
  412. # define i_SW(buf, rs, rt, off) i_sd(buf, rs, rt, off)
  413. # define i_SLL(buf, rs, rt, sh) i_dsll(buf, rs, rt, sh)
  414. # define i_SRA(buf, rs, rt, sh) i_dsra(buf, rs, rt, sh)
  415. # define i_SRL(buf, rs, rt, sh) i_dsrl(buf, rs, rt, sh)
  416. # define i_MFC0(buf, rt, rd...) i_dmfc0(buf, rt, rd)
  417. # define i_MTC0(buf, rt, rd...) i_dmtc0(buf, rt, rd)
  418. # define i_ADDIU(buf, rs, rt, val) i_daddiu(buf, rs, rt, val)
  419. # define i_ADDU(buf, rs, rt, rd) i_daddu(buf, rs, rt, rd)
  420. # define i_SUBU(buf, rs, rt, rd) i_dsubu(buf, rs, rt, rd)
  421. # define i_LL(buf, rs, rt, off) i_lld(buf, rs, rt, off)
  422. # define i_SC(buf, rs, rt, off) i_scd(buf, rs, rt, off)
  423. #else
  424. # define i_LW(buf, rs, rt, off) i_lw(buf, rs, rt, off)
  425. # define i_SW(buf, rs, rt, off) i_sw(buf, rs, rt, off)
  426. # define i_SLL(buf, rs, rt, sh) i_sll(buf, rs, rt, sh)
  427. # define i_SRA(buf, rs, rt, sh) i_sra(buf, rs, rt, sh)
  428. # define i_SRL(buf, rs, rt, sh) i_srl(buf, rs, rt, sh)
  429. # define i_MFC0(buf, rt, rd...) i_mfc0(buf, rt, rd)
  430. # define i_MTC0(buf, rt, rd...) i_mtc0(buf, rt, rd)
  431. # define i_ADDIU(buf, rs, rt, val) i_addiu(buf, rs, rt, val)
  432. # define i_ADDU(buf, rs, rt, rd) i_addu(buf, rs, rt, rd)
  433. # define i_SUBU(buf, rs, rt, rd) i_subu(buf, rs, rt, rd)
  434. # define i_LL(buf, rs, rt, off) i_ll(buf, rs, rt, off)
  435. # define i_SC(buf, rs, rt, off) i_sc(buf, rs, rt, off)
  436. #endif
  437. #define i_b(buf, off) i_beq(buf, 0, 0, off)
  438. #define i_beqz(buf, rs, off) i_beq(buf, rs, 0, off)
  439. #define i_beqzl(buf, rs, off) i_beql(buf, rs, 0, off)
  440. #define i_bnez(buf, rs, off) i_bne(buf, rs, 0, off)
  441. #define i_bnezl(buf, rs, off) i_bnel(buf, rs, 0, off)
  442. #define i_move(buf, a, b) i_ADDU(buf, a, 0, b)
  443. #define i_nop(buf) i_sll(buf, 0, 0, 0)
  444. #define i_ssnop(buf) i_sll(buf, 0, 0, 1)
  445. #define i_ehb(buf) i_sll(buf, 0, 0, 3)
  446. #ifdef CONFIG_64BIT
  447. static __init int __attribute__((unused)) in_compat_space_p(long addr)
  448. {
  449. /* Is this address in 32bit compat space? */
  450. return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L);
  451. }
  452. static __init int __attribute__((unused)) rel_highest(long val)
  453. {
  454. return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
  455. }
  456. static __init int __attribute__((unused)) rel_higher(long val)
  457. {
  458. return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
  459. }
  460. #endif
  461. static __init int rel_hi(long val)
  462. {
  463. return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
  464. }
  465. static __init int rel_lo(long val)
  466. {
  467. return ((val & 0xffff) ^ 0x8000) - 0x8000;
  468. }
  469. static __init void i_LA_mostly(u32 **buf, unsigned int rs, long addr)
  470. {
  471. #ifdef CONFIG_64BIT
  472. if (!in_compat_space_p(addr)) {
  473. i_lui(buf, rs, rel_highest(addr));
  474. if (rel_higher(addr))
  475. i_daddiu(buf, rs, rs, rel_higher(addr));
  476. if (rel_hi(addr)) {
  477. i_dsll(buf, rs, rs, 16);
  478. i_daddiu(buf, rs, rs, rel_hi(addr));
  479. i_dsll(buf, rs, rs, 16);
  480. } else
  481. i_dsll32(buf, rs, rs, 0);
  482. } else
  483. #endif
  484. i_lui(buf, rs, rel_hi(addr));
  485. }
  486. static __init void __attribute__((unused)) i_LA(u32 **buf, unsigned int rs,
  487. long addr)
  488. {
  489. i_LA_mostly(buf, rs, addr);
  490. if (rel_lo(addr))
  491. i_ADDIU(buf, rs, rs, rel_lo(addr));
  492. }
  493. /*
  494. * handle relocations
  495. */
  496. struct reloc {
  497. u32 *addr;
  498. unsigned int type;
  499. enum label_id lab;
  500. };
  501. static __init void r_mips_pc16(struct reloc **rel, u32 *addr,
  502. enum label_id l)
  503. {
  504. (*rel)->addr = addr;
  505. (*rel)->type = R_MIPS_PC16;
  506. (*rel)->lab = l;
  507. (*rel)++;
  508. }
  509. static inline void __resolve_relocs(struct reloc *rel, struct label *lab)
  510. {
  511. long laddr = (long)lab->addr;
  512. long raddr = (long)rel->addr;
  513. switch (rel->type) {
  514. case R_MIPS_PC16:
  515. *rel->addr |= build_bimm(laddr - (raddr + 4));
  516. break;
  517. default:
  518. panic("Unsupported TLB synthesizer relocation %d",
  519. rel->type);
  520. }
  521. }
  522. static __init void resolve_relocs(struct reloc *rel, struct label *lab)
  523. {
  524. struct label *l;
  525. for (; rel->lab != label_invalid; rel++)
  526. for (l = lab; l->lab != label_invalid; l++)
  527. if (rel->lab == l->lab)
  528. __resolve_relocs(rel, l);
  529. }
  530. static __init void move_relocs(struct reloc *rel, u32 *first, u32 *end,
  531. long off)
  532. {
  533. for (; rel->lab != label_invalid; rel++)
  534. if (rel->addr >= first && rel->addr < end)
  535. rel->addr += off;
  536. }
  537. static __init void move_labels(struct label *lab, u32 *first, u32 *end,
  538. long off)
  539. {
  540. for (; lab->lab != label_invalid; lab++)
  541. if (lab->addr >= first && lab->addr < end)
  542. lab->addr += off;
  543. }
  544. static __init void copy_handler(struct reloc *rel, struct label *lab,
  545. u32 *first, u32 *end, u32 *target)
  546. {
  547. long off = (long)(target - first);
  548. memcpy(target, first, (end - first) * sizeof(u32));
  549. move_relocs(rel, first, end, off);
  550. move_labels(lab, first, end, off);
  551. }
  552. static __init int __attribute__((unused)) insn_has_bdelay(struct reloc *rel,
  553. u32 *addr)
  554. {
  555. for (; rel->lab != label_invalid; rel++) {
  556. if (rel->addr == addr
  557. && (rel->type == R_MIPS_PC16
  558. || rel->type == R_MIPS_26))
  559. return 1;
  560. }
  561. return 0;
  562. }
  563. /* convenience functions for labeled branches */
  564. static void __init __attribute__((unused))
  565. il_bltz(u32 **p, struct reloc **r, unsigned int reg, enum label_id l)
  566. {
  567. r_mips_pc16(r, *p, l);
  568. i_bltz(p, reg, 0);
  569. }
  570. static void __init __attribute__((unused)) il_b(u32 **p, struct reloc **r,
  571. enum label_id l)
  572. {
  573. r_mips_pc16(r, *p, l);
  574. i_b(p, 0);
  575. }
  576. static void __init il_beqz(u32 **p, struct reloc **r, unsigned int reg,
  577. enum label_id l)
  578. {
  579. r_mips_pc16(r, *p, l);
  580. i_beqz(p, reg, 0);
  581. }
  582. static void __init __attribute__((unused))
  583. il_beqzl(u32 **p, struct reloc **r, unsigned int reg, enum label_id l)
  584. {
  585. r_mips_pc16(r, *p, l);
  586. i_beqzl(p, reg, 0);
  587. }
  588. static void __init il_bnez(u32 **p, struct reloc **r, unsigned int reg,
  589. enum label_id l)
  590. {
  591. r_mips_pc16(r, *p, l);
  592. i_bnez(p, reg, 0);
  593. }
  594. static void __init il_bgezl(u32 **p, struct reloc **r, unsigned int reg,
  595. enum label_id l)
  596. {
  597. r_mips_pc16(r, *p, l);
  598. i_bgezl(p, reg, 0);
  599. }
  600. /* The only general purpose registers allowed in TLB handlers. */
  601. #define K0 26
  602. #define K1 27
  603. /* Some CP0 registers */
  604. #define C0_INDEX 0, 0
  605. #define C0_ENTRYLO0 2, 0
  606. #define C0_TCBIND 2, 2
  607. #define C0_ENTRYLO1 3, 0
  608. #define C0_CONTEXT 4, 0
  609. #define C0_BADVADDR 8, 0
  610. #define C0_ENTRYHI 10, 0
  611. #define C0_EPC 14, 0
  612. #define C0_XCONTEXT 20, 0
  613. #ifdef CONFIG_64BIT
  614. # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_XCONTEXT)
  615. #else
  616. # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_CONTEXT)
  617. #endif
  618. /* The worst case length of the handler is around 18 instructions for
  619. * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
  620. * Maximum space available is 32 instructions for R3000 and 64
  621. * instructions for R4000.
  622. *
  623. * We deliberately chose a buffer size of 128, so we won't scribble
  624. * over anything important on overflow before we panic.
  625. */
  626. static __initdata u32 tlb_handler[128];
  627. /* simply assume worst case size for labels and relocs */
  628. static __initdata struct label labels[128];
  629. static __initdata struct reloc relocs[128];
  630. /*
  631. * The R3000 TLB handler is simple.
  632. */
  633. static void __init build_r3000_tlb_refill_handler(void)
  634. {
  635. long pgdc = (long)pgd_current;
  636. u32 *p;
  637. int i;
  638. memset(tlb_handler, 0, sizeof(tlb_handler));
  639. p = tlb_handler;
  640. i_mfc0(&p, K0, C0_BADVADDR);
  641. i_lui(&p, K1, rel_hi(pgdc)); /* cp0 delay */
  642. i_lw(&p, K1, rel_lo(pgdc), K1);
  643. i_srl(&p, K0, K0, 22); /* load delay */
  644. i_sll(&p, K0, K0, 2);
  645. i_addu(&p, K1, K1, K0);
  646. i_mfc0(&p, K0, C0_CONTEXT);
  647. i_lw(&p, K1, 0, K1); /* cp0 delay */
  648. i_andi(&p, K0, K0, 0xffc); /* load delay */
  649. i_addu(&p, K1, K1, K0);
  650. i_lw(&p, K0, 0, K1);
  651. i_nop(&p); /* load delay */
  652. i_mtc0(&p, K0, C0_ENTRYLO0);
  653. i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
  654. i_tlbwr(&p); /* cp0 delay */
  655. i_jr(&p, K1);
  656. i_rfe(&p); /* branch delay */
  657. if (p > tlb_handler + 32)
  658. panic("TLB refill handler space exceeded");
  659. pr_info("Synthesized TLB refill handler (%u instructions).\n",
  660. (unsigned int)(p - tlb_handler));
  661. pr_debug("\t.set push\n");
  662. pr_debug("\t.set noreorder\n");
  663. for (i = 0; i < (p - tlb_handler); i++)
  664. pr_debug("\t.word 0x%08x\n", tlb_handler[i]);
  665. pr_debug("\t.set pop\n");
  666. memcpy((void *)ebase, tlb_handler, 0x80);
  667. }
  668. /*
  669. * The R4000 TLB handler is much more complicated. We have two
  670. * consecutive handler areas with 32 instructions space each.
  671. * Since they aren't used at the same time, we can overflow in the
  672. * other one.To keep things simple, we first assume linear space,
  673. * then we relocate it to the final handler layout as needed.
  674. */
  675. static __initdata u32 final_handler[64];
  676. /*
  677. * Hazards
  678. *
  679. * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
  680. * 2. A timing hazard exists for the TLBP instruction.
  681. *
  682. * stalling_instruction
  683. * TLBP
  684. *
  685. * The JTLB is being read for the TLBP throughout the stall generated by the
  686. * previous instruction. This is not really correct as the stalling instruction
  687. * can modify the address used to access the JTLB. The failure symptom is that
  688. * the TLBP instruction will use an address created for the stalling instruction
  689. * and not the address held in C0_ENHI and thus report the wrong results.
  690. *
  691. * The software work-around is to not allow the instruction preceding the TLBP
  692. * to stall - make it an NOP or some other instruction guaranteed not to stall.
  693. *
  694. * Errata 2 will not be fixed. This errata is also on the R5000.
  695. *
  696. * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
  697. */
  698. static __init void __attribute__((unused)) build_tlb_probe_entry(u32 **p)
  699. {
  700. switch (current_cpu_data.cputype) {
  701. /* Found by experiment: R4600 v2.0 needs this, too. */
  702. case CPU_R4600:
  703. case CPU_R5000:
  704. case CPU_R5000A:
  705. case CPU_NEVADA:
  706. i_nop(p);
  707. i_tlbp(p);
  708. break;
  709. default:
  710. i_tlbp(p);
  711. break;
  712. }
  713. }
  714. /*
  715. * Write random or indexed TLB entry, and care about the hazards from
  716. * the preceeding mtc0 and for the following eret.
  717. */
  718. enum tlb_write_entry { tlb_random, tlb_indexed };
  719. static __init void build_tlb_write_entry(u32 **p, struct label **l,
  720. struct reloc **r,
  721. enum tlb_write_entry wmode)
  722. {
  723. void(*tlbw)(u32 **) = NULL;
  724. switch (wmode) {
  725. case tlb_random: tlbw = i_tlbwr; break;
  726. case tlb_indexed: tlbw = i_tlbwi; break;
  727. }
  728. switch (current_cpu_data.cputype) {
  729. case CPU_R4000PC:
  730. case CPU_R4000SC:
  731. case CPU_R4000MC:
  732. case CPU_R4400PC:
  733. case CPU_R4400SC:
  734. case CPU_R4400MC:
  735. /*
  736. * This branch uses up a mtc0 hazard nop slot and saves
  737. * two nops after the tlbw instruction.
  738. */
  739. il_bgezl(p, r, 0, label_tlbw_hazard);
  740. tlbw(p);
  741. l_tlbw_hazard(l, *p);
  742. i_nop(p);
  743. break;
  744. case CPU_R4600:
  745. case CPU_R4700:
  746. case CPU_R5000:
  747. case CPU_R5000A:
  748. i_nop(p);
  749. tlbw(p);
  750. i_nop(p);
  751. break;
  752. case CPU_R4300:
  753. case CPU_5KC:
  754. case CPU_TX49XX:
  755. case CPU_AU1000:
  756. case CPU_AU1100:
  757. case CPU_AU1500:
  758. case CPU_AU1550:
  759. case CPU_AU1200:
  760. case CPU_PR4450:
  761. i_nop(p);
  762. tlbw(p);
  763. break;
  764. case CPU_R10000:
  765. case CPU_R12000:
  766. case CPU_R14000:
  767. case CPU_4KC:
  768. case CPU_SB1:
  769. case CPU_SB1A:
  770. case CPU_4KSC:
  771. case CPU_20KC:
  772. case CPU_25KF:
  773. tlbw(p);
  774. break;
  775. case CPU_NEVADA:
  776. i_nop(p); /* QED specifies 2 nops hazard */
  777. /*
  778. * This branch uses up a mtc0 hazard nop slot and saves
  779. * a nop after the tlbw instruction.
  780. */
  781. il_bgezl(p, r, 0, label_tlbw_hazard);
  782. tlbw(p);
  783. l_tlbw_hazard(l, *p);
  784. break;
  785. case CPU_RM7000:
  786. i_nop(p);
  787. i_nop(p);
  788. i_nop(p);
  789. i_nop(p);
  790. tlbw(p);
  791. break;
  792. case CPU_4KEC:
  793. case CPU_24K:
  794. case CPU_34K:
  795. case CPU_74K:
  796. i_ehb(p);
  797. tlbw(p);
  798. break;
  799. case CPU_RM9000:
  800. /*
  801. * When the JTLB is updated by tlbwi or tlbwr, a subsequent
  802. * use of the JTLB for instructions should not occur for 4
  803. * cpu cycles and use for data translations should not occur
  804. * for 3 cpu cycles.
  805. */
  806. i_ssnop(p);
  807. i_ssnop(p);
  808. i_ssnop(p);
  809. i_ssnop(p);
  810. tlbw(p);
  811. i_ssnop(p);
  812. i_ssnop(p);
  813. i_ssnop(p);
  814. i_ssnop(p);
  815. break;
  816. case CPU_VR4111:
  817. case CPU_VR4121:
  818. case CPU_VR4122:
  819. case CPU_VR4181:
  820. case CPU_VR4181A:
  821. i_nop(p);
  822. i_nop(p);
  823. tlbw(p);
  824. i_nop(p);
  825. i_nop(p);
  826. break;
  827. case CPU_VR4131:
  828. case CPU_VR4133:
  829. case CPU_R5432:
  830. i_nop(p);
  831. i_nop(p);
  832. tlbw(p);
  833. break;
  834. default:
  835. panic("No TLB refill handler yet (CPU type: %d)",
  836. current_cpu_data.cputype);
  837. break;
  838. }
  839. }
  840. #ifdef CONFIG_64BIT
  841. /*
  842. * TMP and PTR are scratch.
  843. * TMP will be clobbered, PTR will hold the pmd entry.
  844. */
  845. static __init void
  846. build_get_pmde64(u32 **p, struct label **l, struct reloc **r,
  847. unsigned int tmp, unsigned int ptr)
  848. {
  849. long pgdc = (long)pgd_current;
  850. /*
  851. * The vmalloc handling is not in the hotpath.
  852. */
  853. i_dmfc0(p, tmp, C0_BADVADDR);
  854. il_bltz(p, r, tmp, label_vmalloc);
  855. /* No i_nop needed here, since the next insn doesn't touch TMP. */
  856. #ifdef CONFIG_SMP
  857. # ifdef CONFIG_MIPS_MT_SMTC
  858. /*
  859. * SMTC uses TCBind value as "CPU" index
  860. */
  861. i_mfc0(p, ptr, C0_TCBIND);
  862. i_dsrl(p, ptr, ptr, 19);
  863. # else
  864. /*
  865. * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
  866. * stored in CONTEXT.
  867. */
  868. i_dmfc0(p, ptr, C0_CONTEXT);
  869. i_dsrl(p, ptr, ptr, 23);
  870. #endif
  871. i_LA_mostly(p, tmp, pgdc);
  872. i_daddu(p, ptr, ptr, tmp);
  873. i_dmfc0(p, tmp, C0_BADVADDR);
  874. i_ld(p, ptr, rel_lo(pgdc), ptr);
  875. #else
  876. i_LA_mostly(p, ptr, pgdc);
  877. i_ld(p, ptr, rel_lo(pgdc), ptr);
  878. #endif
  879. l_vmalloc_done(l, *p);
  880. if (PGDIR_SHIFT - 3 < 32) /* get pgd offset in bytes */
  881. i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3);
  882. else
  883. i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32);
  884. i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
  885. i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
  886. i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
  887. i_ld(p, ptr, 0, ptr); /* get pmd pointer */
  888. i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
  889. i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
  890. i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
  891. }
  892. /*
  893. * BVADDR is the faulting address, PTR is scratch.
  894. * PTR will hold the pgd for vmalloc.
  895. */
  896. static __init void
  897. build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r,
  898. unsigned int bvaddr, unsigned int ptr)
  899. {
  900. long swpd = (long)swapper_pg_dir;
  901. l_vmalloc(l, *p);
  902. i_LA(p, ptr, VMALLOC_START);
  903. i_dsubu(p, bvaddr, bvaddr, ptr);
  904. if (in_compat_space_p(swpd) && !rel_lo(swpd)) {
  905. il_b(p, r, label_vmalloc_done);
  906. i_lui(p, ptr, rel_hi(swpd));
  907. } else {
  908. i_LA_mostly(p, ptr, swpd);
  909. il_b(p, r, label_vmalloc_done);
  910. i_daddiu(p, ptr, ptr, rel_lo(swpd));
  911. }
  912. }
  913. #else /* !CONFIG_64BIT */
  914. /*
  915. * TMP and PTR are scratch.
  916. * TMP will be clobbered, PTR will hold the pgd entry.
  917. */
  918. static __init void __attribute__((unused))
  919. build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
  920. {
  921. long pgdc = (long)pgd_current;
  922. /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
  923. #ifdef CONFIG_SMP
  924. #ifdef CONFIG_MIPS_MT_SMTC
  925. /*
  926. * SMTC uses TCBind value as "CPU" index
  927. */
  928. i_mfc0(p, ptr, C0_TCBIND);
  929. i_LA_mostly(p, tmp, pgdc);
  930. i_srl(p, ptr, ptr, 19);
  931. #else
  932. /*
  933. * smp_processor_id() << 3 is stored in CONTEXT.
  934. */
  935. i_mfc0(p, ptr, C0_CONTEXT);
  936. i_LA_mostly(p, tmp, pgdc);
  937. i_srl(p, ptr, ptr, 23);
  938. #endif
  939. i_addu(p, ptr, tmp, ptr);
  940. #else
  941. i_LA_mostly(p, ptr, pgdc);
  942. #endif
  943. i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
  944. i_lw(p, ptr, rel_lo(pgdc), ptr);
  945. i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
  946. i_sll(p, tmp, tmp, PGD_T_LOG2);
  947. i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
  948. }
  949. #endif /* !CONFIG_64BIT */
  950. static __init void build_adjust_context(u32 **p, unsigned int ctx)
  951. {
  952. unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
  953. unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
  954. switch (current_cpu_data.cputype) {
  955. case CPU_VR41XX:
  956. case CPU_VR4111:
  957. case CPU_VR4121:
  958. case CPU_VR4122:
  959. case CPU_VR4131:
  960. case CPU_VR4181:
  961. case CPU_VR4181A:
  962. case CPU_VR4133:
  963. shift += 2;
  964. break;
  965. default:
  966. break;
  967. }
  968. if (shift)
  969. i_SRL(p, ctx, ctx, shift);
  970. i_andi(p, ctx, ctx, mask);
  971. }
  972. static __init void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
  973. {
  974. /*
  975. * Bug workaround for the Nevada. It seems as if under certain
  976. * circumstances the move from cp0_context might produce a
  977. * bogus result when the mfc0 instruction and its consumer are
  978. * in a different cacheline or a load instruction, probably any
  979. * memory reference, is between them.
  980. */
  981. switch (current_cpu_data.cputype) {
  982. case CPU_NEVADA:
  983. i_LW(p, ptr, 0, ptr);
  984. GET_CONTEXT(p, tmp); /* get context reg */
  985. break;
  986. default:
  987. GET_CONTEXT(p, tmp); /* get context reg */
  988. i_LW(p, ptr, 0, ptr);
  989. break;
  990. }
  991. build_adjust_context(p, tmp);
  992. i_ADDU(p, ptr, ptr, tmp); /* add in offset */
  993. }
  994. static __init void build_update_entries(u32 **p, unsigned int tmp,
  995. unsigned int ptep)
  996. {
  997. /*
  998. * 64bit address support (36bit on a 32bit CPU) in a 32bit
  999. * Kernel is a special case. Only a few CPUs use it.
  1000. */
  1001. #ifdef CONFIG_64BIT_PHYS_ADDR
  1002. if (cpu_has_64bits) {
  1003. i_ld(p, tmp, 0, ptep); /* get even pte */
  1004. i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
  1005. i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */
  1006. i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
  1007. i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */
  1008. i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
  1009. } else {
  1010. int pte_off_even = sizeof(pte_t) / 2;
  1011. int pte_off_odd = pte_off_even + sizeof(pte_t);
  1012. /* The pte entries are pre-shifted */
  1013. i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
  1014. i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
  1015. i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
  1016. i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
  1017. }
  1018. #else
  1019. i_LW(p, tmp, 0, ptep); /* get even pte */
  1020. i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
  1021. if (r45k_bvahwbug())
  1022. build_tlb_probe_entry(p);
  1023. i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */
  1024. if (r4k_250MHZhwbug())
  1025. i_mtc0(p, 0, C0_ENTRYLO0);
  1026. i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
  1027. i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */
  1028. if (r45k_bvahwbug())
  1029. i_mfc0(p, tmp, C0_INDEX);
  1030. if (r4k_250MHZhwbug())
  1031. i_mtc0(p, 0, C0_ENTRYLO1);
  1032. i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
  1033. #endif
  1034. }
  1035. static void __init build_r4000_tlb_refill_handler(void)
  1036. {
  1037. u32 *p = tlb_handler;
  1038. struct label *l = labels;
  1039. struct reloc *r = relocs;
  1040. u32 *f;
  1041. unsigned int final_len;
  1042. int i;
  1043. memset(tlb_handler, 0, sizeof(tlb_handler));
  1044. memset(labels, 0, sizeof(labels));
  1045. memset(relocs, 0, sizeof(relocs));
  1046. memset(final_handler, 0, sizeof(final_handler));
  1047. /*
  1048. * create the plain linear handler
  1049. */
  1050. if (bcm1250_m3_war()) {
  1051. i_MFC0(&p, K0, C0_BADVADDR);
  1052. i_MFC0(&p, K1, C0_ENTRYHI);
  1053. i_xor(&p, K0, K0, K1);
  1054. i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
  1055. il_bnez(&p, &r, K0, label_leave);
  1056. /* No need for i_nop */
  1057. }
  1058. #ifdef CONFIG_64BIT
  1059. build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
  1060. #else
  1061. build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
  1062. #endif
  1063. build_get_ptep(&p, K0, K1);
  1064. build_update_entries(&p, K0, K1);
  1065. build_tlb_write_entry(&p, &l, &r, tlb_random);
  1066. l_leave(&l, p);
  1067. i_eret(&p); /* return from trap */
  1068. #ifdef CONFIG_64BIT
  1069. build_get_pgd_vmalloc64(&p, &l, &r, K0, K1);
  1070. #endif
  1071. /*
  1072. * Overflow check: For the 64bit handler, we need at least one
  1073. * free instruction slot for the wrap-around branch. In worst
  1074. * case, if the intended insertion point is a delay slot, we
  1075. * need three, with the second nop'ed and the third being
  1076. * unused.
  1077. */
  1078. #ifdef CONFIG_32BIT
  1079. if ((p - tlb_handler) > 64)
  1080. panic("TLB refill handler space exceeded");
  1081. #else
  1082. if (((p - tlb_handler) > 63)
  1083. || (((p - tlb_handler) > 61)
  1084. && insn_has_bdelay(relocs, tlb_handler + 29)))
  1085. panic("TLB refill handler space exceeded");
  1086. #endif
  1087. /*
  1088. * Now fold the handler in the TLB refill handler space.
  1089. */
  1090. #ifdef CONFIG_32BIT
  1091. f = final_handler;
  1092. /* Simplest case, just copy the handler. */
  1093. copy_handler(relocs, labels, tlb_handler, p, f);
  1094. final_len = p - tlb_handler;
  1095. #else /* CONFIG_64BIT */
  1096. f = final_handler + 32;
  1097. if ((p - tlb_handler) <= 32) {
  1098. /* Just copy the handler. */
  1099. copy_handler(relocs, labels, tlb_handler, p, f);
  1100. final_len = p - tlb_handler;
  1101. } else {
  1102. u32 *split = tlb_handler + 30;
  1103. /*
  1104. * Find the split point.
  1105. */
  1106. if (insn_has_bdelay(relocs, split - 1))
  1107. split--;
  1108. /* Copy first part of the handler. */
  1109. copy_handler(relocs, labels, tlb_handler, split, f);
  1110. f += split - tlb_handler;
  1111. /* Insert branch. */
  1112. l_split(&l, final_handler);
  1113. il_b(&f, &r, label_split);
  1114. if (insn_has_bdelay(relocs, split))
  1115. i_nop(&f);
  1116. else {
  1117. copy_handler(relocs, labels, split, split + 1, f);
  1118. move_labels(labels, f, f + 1, -1);
  1119. f++;
  1120. split++;
  1121. }
  1122. /* Copy the rest of the handler. */
  1123. copy_handler(relocs, labels, split, p, final_handler);
  1124. final_len = (f - (final_handler + 32)) + (p - split);
  1125. }
  1126. #endif /* CONFIG_64BIT */
  1127. resolve_relocs(relocs, labels);
  1128. pr_info("Synthesized TLB refill handler (%u instructions).\n",
  1129. final_len);
  1130. f = final_handler;
  1131. #ifdef CONFIG_64BIT
  1132. if (final_len > 32)
  1133. final_len = 64;
  1134. else
  1135. f = final_handler + 32;
  1136. #endif /* CONFIG_64BIT */
  1137. pr_debug("\t.set push\n");
  1138. pr_debug("\t.set noreorder\n");
  1139. for (i = 0; i < final_len; i++)
  1140. pr_debug("\t.word 0x%08x\n", f[i]);
  1141. pr_debug("\t.set pop\n");
  1142. memcpy((void *)ebase, final_handler, 0x100);
  1143. }
  1144. /*
  1145. * TLB load/store/modify handlers.
  1146. *
  1147. * Only the fastpath gets synthesized at runtime, the slowpath for
  1148. * do_page_fault remains normal asm.
  1149. */
  1150. extern void tlb_do_page_fault_0(void);
  1151. extern void tlb_do_page_fault_1(void);
  1152. #define __tlb_handler_align \
  1153. __attribute__((__aligned__(1 << CONFIG_MIPS_L1_CACHE_SHIFT)))
  1154. /*
  1155. * 128 instructions for the fastpath handler is generous and should
  1156. * never be exceeded.
  1157. */
  1158. #define FASTPATH_SIZE 128
  1159. u32 __tlb_handler_align handle_tlbl[FASTPATH_SIZE];
  1160. u32 __tlb_handler_align handle_tlbs[FASTPATH_SIZE];
  1161. u32 __tlb_handler_align handle_tlbm[FASTPATH_SIZE];
  1162. static void __init
  1163. iPTE_LW(u32 **p, struct label **l, unsigned int pte, unsigned int ptr)
  1164. {
  1165. #ifdef CONFIG_SMP
  1166. # ifdef CONFIG_64BIT_PHYS_ADDR
  1167. if (cpu_has_64bits)
  1168. i_lld(p, pte, 0, ptr);
  1169. else
  1170. # endif
  1171. i_LL(p, pte, 0, ptr);
  1172. #else
  1173. # ifdef CONFIG_64BIT_PHYS_ADDR
  1174. if (cpu_has_64bits)
  1175. i_ld(p, pte, 0, ptr);
  1176. else
  1177. # endif
  1178. i_LW(p, pte, 0, ptr);
  1179. #endif
  1180. }
  1181. static void __init
  1182. iPTE_SW(u32 **p, struct reloc **r, unsigned int pte, unsigned int ptr,
  1183. unsigned int mode)
  1184. {
  1185. #ifdef CONFIG_64BIT_PHYS_ADDR
  1186. unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
  1187. #endif
  1188. i_ori(p, pte, pte, mode);
  1189. #ifdef CONFIG_SMP
  1190. # ifdef CONFIG_64BIT_PHYS_ADDR
  1191. if (cpu_has_64bits)
  1192. i_scd(p, pte, 0, ptr);
  1193. else
  1194. # endif
  1195. i_SC(p, pte, 0, ptr);
  1196. if (r10000_llsc_war())
  1197. il_beqzl(p, r, pte, label_smp_pgtable_change);
  1198. else
  1199. il_beqz(p, r, pte, label_smp_pgtable_change);
  1200. # ifdef CONFIG_64BIT_PHYS_ADDR
  1201. if (!cpu_has_64bits) {
  1202. /* no i_nop needed */
  1203. i_ll(p, pte, sizeof(pte_t) / 2, ptr);
  1204. i_ori(p, pte, pte, hwmode);
  1205. i_sc(p, pte, sizeof(pte_t) / 2, ptr);
  1206. il_beqz(p, r, pte, label_smp_pgtable_change);
  1207. /* no i_nop needed */
  1208. i_lw(p, pte, 0, ptr);
  1209. } else
  1210. i_nop(p);
  1211. # else
  1212. i_nop(p);
  1213. # endif
  1214. #else
  1215. # ifdef CONFIG_64BIT_PHYS_ADDR
  1216. if (cpu_has_64bits)
  1217. i_sd(p, pte, 0, ptr);
  1218. else
  1219. # endif
  1220. i_SW(p, pte, 0, ptr);
  1221. # ifdef CONFIG_64BIT_PHYS_ADDR
  1222. if (!cpu_has_64bits) {
  1223. i_lw(p, pte, sizeof(pte_t) / 2, ptr);
  1224. i_ori(p, pte, pte, hwmode);
  1225. i_sw(p, pte, sizeof(pte_t) / 2, ptr);
  1226. i_lw(p, pte, 0, ptr);
  1227. }
  1228. # endif
  1229. #endif
  1230. }
  1231. /*
  1232. * Check if PTE is present, if not then jump to LABEL. PTR points to
  1233. * the page table where this PTE is located, PTE will be re-loaded
  1234. * with it's original value.
  1235. */
  1236. static void __init
  1237. build_pte_present(u32 **p, struct label **l, struct reloc **r,
  1238. unsigned int pte, unsigned int ptr, enum label_id lid)
  1239. {
  1240. i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
  1241. i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
  1242. il_bnez(p, r, pte, lid);
  1243. iPTE_LW(p, l, pte, ptr);
  1244. }
  1245. /* Make PTE valid, store result in PTR. */
  1246. static void __init
  1247. build_make_valid(u32 **p, struct reloc **r, unsigned int pte,
  1248. unsigned int ptr)
  1249. {
  1250. unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
  1251. iPTE_SW(p, r, pte, ptr, mode);
  1252. }
  1253. /*
  1254. * Check if PTE can be written to, if not branch to LABEL. Regardless
  1255. * restore PTE with value from PTR when done.
  1256. */
  1257. static void __init
  1258. build_pte_writable(u32 **p, struct label **l, struct reloc **r,
  1259. unsigned int pte, unsigned int ptr, enum label_id lid)
  1260. {
  1261. i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
  1262. i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
  1263. il_bnez(p, r, pte, lid);
  1264. iPTE_LW(p, l, pte, ptr);
  1265. }
  1266. /* Make PTE writable, update software status bits as well, then store
  1267. * at PTR.
  1268. */
  1269. static void __init
  1270. build_make_write(u32 **p, struct reloc **r, unsigned int pte,
  1271. unsigned int ptr)
  1272. {
  1273. unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
  1274. | _PAGE_DIRTY);
  1275. iPTE_SW(p, r, pte, ptr, mode);
  1276. }
  1277. /*
  1278. * Check if PTE can be modified, if not branch to LABEL. Regardless
  1279. * restore PTE with value from PTR when done.
  1280. */
  1281. static void __init
  1282. build_pte_modifiable(u32 **p, struct label **l, struct reloc **r,
  1283. unsigned int pte, unsigned int ptr, enum label_id lid)
  1284. {
  1285. i_andi(p, pte, pte, _PAGE_WRITE);
  1286. il_beqz(p, r, pte, lid);
  1287. iPTE_LW(p, l, pte, ptr);
  1288. }
  1289. /*
  1290. * R3000 style TLB load/store/modify handlers.
  1291. */
  1292. /*
  1293. * This places the pte into ENTRYLO0 and writes it with tlbwi.
  1294. * Then it returns.
  1295. */
  1296. static void __init
  1297. build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
  1298. {
  1299. i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
  1300. i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
  1301. i_tlbwi(p);
  1302. i_jr(p, tmp);
  1303. i_rfe(p); /* branch delay */
  1304. }
  1305. /*
  1306. * This places the pte into ENTRYLO0 and writes it with tlbwi
  1307. * or tlbwr as appropriate. This is because the index register
  1308. * may have the probe fail bit set as a result of a trap on a
  1309. * kseg2 access, i.e. without refill. Then it returns.
  1310. */
  1311. static void __init
  1312. build_r3000_tlb_reload_write(u32 **p, struct label **l, struct reloc **r,
  1313. unsigned int pte, unsigned int tmp)
  1314. {
  1315. i_mfc0(p, tmp, C0_INDEX);
  1316. i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
  1317. il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
  1318. i_mfc0(p, tmp, C0_EPC); /* branch delay */
  1319. i_tlbwi(p); /* cp0 delay */
  1320. i_jr(p, tmp);
  1321. i_rfe(p); /* branch delay */
  1322. l_r3000_write_probe_fail(l, *p);
  1323. i_tlbwr(p); /* cp0 delay */
  1324. i_jr(p, tmp);
  1325. i_rfe(p); /* branch delay */
  1326. }
  1327. static void __init
  1328. build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
  1329. unsigned int ptr)
  1330. {
  1331. long pgdc = (long)pgd_current;
  1332. i_mfc0(p, pte, C0_BADVADDR);
  1333. i_lui(p, ptr, rel_hi(pgdc)); /* cp0 delay */
  1334. i_lw(p, ptr, rel_lo(pgdc), ptr);
  1335. i_srl(p, pte, pte, 22); /* load delay */
  1336. i_sll(p, pte, pte, 2);
  1337. i_addu(p, ptr, ptr, pte);
  1338. i_mfc0(p, pte, C0_CONTEXT);
  1339. i_lw(p, ptr, 0, ptr); /* cp0 delay */
  1340. i_andi(p, pte, pte, 0xffc); /* load delay */
  1341. i_addu(p, ptr, ptr, pte);
  1342. i_lw(p, pte, 0, ptr);
  1343. i_tlbp(p); /* load delay */
  1344. }
  1345. static void __init build_r3000_tlb_load_handler(void)
  1346. {
  1347. u32 *p = handle_tlbl;
  1348. struct label *l = labels;
  1349. struct reloc *r = relocs;
  1350. int i;
  1351. memset(handle_tlbl, 0, sizeof(handle_tlbl));
  1352. memset(labels, 0, sizeof(labels));
  1353. memset(relocs, 0, sizeof(relocs));
  1354. build_r3000_tlbchange_handler_head(&p, K0, K1);
  1355. build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl);
  1356. i_nop(&p); /* load delay */
  1357. build_make_valid(&p, &r, K0, K1);
  1358. build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
  1359. l_nopage_tlbl(&l, p);
  1360. i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
  1361. i_nop(&p);
  1362. if ((p - handle_tlbl) > FASTPATH_SIZE)
  1363. panic("TLB load handler fastpath space exceeded");
  1364. resolve_relocs(relocs, labels);
  1365. pr_info("Synthesized TLB load handler fastpath (%u instructions).\n",
  1366. (unsigned int)(p - handle_tlbl));
  1367. pr_debug("\t.set push\n");
  1368. pr_debug("\t.set noreorder\n");
  1369. for (i = 0; i < (p - handle_tlbl); i++)
  1370. pr_debug("\t.word 0x%08x\n", handle_tlbl[i]);
  1371. pr_debug("\t.set pop\n");
  1372. }
  1373. static void __init build_r3000_tlb_store_handler(void)
  1374. {
  1375. u32 *p = handle_tlbs;
  1376. struct label *l = labels;
  1377. struct reloc *r = relocs;
  1378. int i;
  1379. memset(handle_tlbs, 0, sizeof(handle_tlbs));
  1380. memset(labels, 0, sizeof(labels));
  1381. memset(relocs, 0, sizeof(relocs));
  1382. build_r3000_tlbchange_handler_head(&p, K0, K1);
  1383. build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs);
  1384. i_nop(&p); /* load delay */
  1385. build_make_write(&p, &r, K0, K1);
  1386. build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
  1387. l_nopage_tlbs(&l, p);
  1388. i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
  1389. i_nop(&p);
  1390. if ((p - handle_tlbs) > FASTPATH_SIZE)
  1391. panic("TLB store handler fastpath space exceeded");
  1392. resolve_relocs(relocs, labels);
  1393. pr_info("Synthesized TLB store handler fastpath (%u instructions).\n",
  1394. (unsigned int)(p - handle_tlbs));
  1395. pr_debug("\t.set push\n");
  1396. pr_debug("\t.set noreorder\n");
  1397. for (i = 0; i < (p - handle_tlbs); i++)
  1398. pr_debug("\t.word 0x%08x\n", handle_tlbs[i]);
  1399. pr_debug("\t.set pop\n");
  1400. }
  1401. static void __init build_r3000_tlb_modify_handler(void)
  1402. {
  1403. u32 *p = handle_tlbm;
  1404. struct label *l = labels;
  1405. struct reloc *r = relocs;
  1406. int i;
  1407. memset(handle_tlbm, 0, sizeof(handle_tlbm));
  1408. memset(labels, 0, sizeof(labels));
  1409. memset(relocs, 0, sizeof(relocs));
  1410. build_r3000_tlbchange_handler_head(&p, K0, K1);
  1411. build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm);
  1412. i_nop(&p); /* load delay */
  1413. build_make_write(&p, &r, K0, K1);
  1414. build_r3000_pte_reload_tlbwi(&p, K0, K1);
  1415. l_nopage_tlbm(&l, p);
  1416. i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
  1417. i_nop(&p);
  1418. if ((p - handle_tlbm) > FASTPATH_SIZE)
  1419. panic("TLB modify handler fastpath space exceeded");
  1420. resolve_relocs(relocs, labels);
  1421. pr_info("Synthesized TLB modify handler fastpath (%u instructions).\n",
  1422. (unsigned int)(p - handle_tlbm));
  1423. pr_debug("\t.set push\n");
  1424. pr_debug("\t.set noreorder\n");
  1425. for (i = 0; i < (p - handle_tlbm); i++)
  1426. pr_debug("\t.word 0x%08x\n", handle_tlbm[i]);
  1427. pr_debug("\t.set pop\n");
  1428. }
  1429. /*
  1430. * R4000 style TLB load/store/modify handlers.
  1431. */
  1432. static void __init
  1433. build_r4000_tlbchange_handler_head(u32 **p, struct label **l,
  1434. struct reloc **r, unsigned int pte,
  1435. unsigned int ptr)
  1436. {
  1437. #ifdef CONFIG_64BIT
  1438. build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */
  1439. #else
  1440. build_get_pgde32(p, pte, ptr); /* get pgd in ptr */
  1441. #endif
  1442. i_MFC0(p, pte, C0_BADVADDR);
  1443. i_LW(p, ptr, 0, ptr);
  1444. i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
  1445. i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
  1446. i_ADDU(p, ptr, ptr, pte);
  1447. #ifdef CONFIG_SMP
  1448. l_smp_pgtable_change(l, *p);
  1449. # endif
  1450. iPTE_LW(p, l, pte, ptr); /* get even pte */
  1451. build_tlb_probe_entry(p);
  1452. }
  1453. static void __init
  1454. build_r4000_tlbchange_handler_tail(u32 **p, struct label **l,
  1455. struct reloc **r, unsigned int tmp,
  1456. unsigned int ptr)
  1457. {
  1458. i_ori(p, ptr, ptr, sizeof(pte_t));
  1459. i_xori(p, ptr, ptr, sizeof(pte_t));
  1460. build_update_entries(p, tmp, ptr);
  1461. build_tlb_write_entry(p, l, r, tlb_indexed);
  1462. l_leave(l, *p);
  1463. i_eret(p); /* return from trap */
  1464. #ifdef CONFIG_64BIT
  1465. build_get_pgd_vmalloc64(p, l, r, tmp, ptr);
  1466. #endif
  1467. }
  1468. static void __init build_r4000_tlb_load_handler(void)
  1469. {
  1470. u32 *p = handle_tlbl;
  1471. struct label *l = labels;
  1472. struct reloc *r = relocs;
  1473. int i;
  1474. memset(handle_tlbl, 0, sizeof(handle_tlbl));
  1475. memset(labels, 0, sizeof(labels));
  1476. memset(relocs, 0, sizeof(relocs));
  1477. if (bcm1250_m3_war()) {
  1478. i_MFC0(&p, K0, C0_BADVADDR);
  1479. i_MFC0(&p, K1, C0_ENTRYHI);
  1480. i_xor(&p, K0, K0, K1);
  1481. i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
  1482. il_bnez(&p, &r, K0, label_leave);
  1483. /* No need for i_nop */
  1484. }
  1485. build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
  1486. build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl);
  1487. build_make_valid(&p, &r, K0, K1);
  1488. build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
  1489. l_nopage_tlbl(&l, p);
  1490. i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
  1491. i_nop(&p);
  1492. if ((p - handle_tlbl) > FASTPATH_SIZE)
  1493. panic("TLB load handler fastpath space exceeded");
  1494. resolve_relocs(relocs, labels);
  1495. pr_info("Synthesized TLB load handler fastpath (%u instructions).\n",
  1496. (unsigned int)(p - handle_tlbl));
  1497. pr_debug("\t.set push\n");
  1498. pr_debug("\t.set noreorder\n");
  1499. for (i = 0; i < (p - handle_tlbl); i++)
  1500. pr_debug("\t.word 0x%08x\n", handle_tlbl[i]);
  1501. pr_debug("\t.set pop\n");
  1502. }
  1503. static void __init build_r4000_tlb_store_handler(void)
  1504. {
  1505. u32 *p = handle_tlbs;
  1506. struct label *l = labels;
  1507. struct reloc *r = relocs;
  1508. int i;
  1509. memset(handle_tlbs, 0, sizeof(handle_tlbs));
  1510. memset(labels, 0, sizeof(labels));
  1511. memset(relocs, 0, sizeof(relocs));
  1512. build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
  1513. build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs);
  1514. build_make_write(&p, &r, K0, K1);
  1515. build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
  1516. l_nopage_tlbs(&l, p);
  1517. i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
  1518. i_nop(&p);
  1519. if ((p - handle_tlbs) > FASTPATH_SIZE)
  1520. panic("TLB store handler fastpath space exceeded");
  1521. resolve_relocs(relocs, labels);
  1522. pr_info("Synthesized TLB store handler fastpath (%u instructions).\n",
  1523. (unsigned int)(p - handle_tlbs));
  1524. pr_debug("\t.set push\n");
  1525. pr_debug("\t.set noreorder\n");
  1526. for (i = 0; i < (p - handle_tlbs); i++)
  1527. pr_debug("\t.word 0x%08x\n", handle_tlbs[i]);
  1528. pr_debug("\t.set pop\n");
  1529. }
  1530. static void __init build_r4000_tlb_modify_handler(void)
  1531. {
  1532. u32 *p = handle_tlbm;
  1533. struct label *l = labels;
  1534. struct reloc *r = relocs;
  1535. int i;
  1536. memset(handle_tlbm, 0, sizeof(handle_tlbm));
  1537. memset(labels, 0, sizeof(labels));
  1538. memset(relocs, 0, sizeof(relocs));
  1539. build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
  1540. build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm);
  1541. /* Present and writable bits set, set accessed and dirty bits. */
  1542. build_make_write(&p, &r, K0, K1);
  1543. build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
  1544. l_nopage_tlbm(&l, p);
  1545. i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
  1546. i_nop(&p);
  1547. if ((p - handle_tlbm) > FASTPATH_SIZE)
  1548. panic("TLB modify handler fastpath space exceeded");
  1549. resolve_relocs(relocs, labels);
  1550. pr_info("Synthesized TLB modify handler fastpath (%u instructions).\n",
  1551. (unsigned int)(p - handle_tlbm));
  1552. pr_debug("\t.set push\n");
  1553. pr_debug("\t.set noreorder\n");
  1554. for (i = 0; i < (p - handle_tlbm); i++)
  1555. pr_debug("\t.word 0x%08x\n", handle_tlbm[i]);
  1556. pr_debug("\t.set pop\n");
  1557. }
  1558. void __init build_tlb_refill_handler(void)
  1559. {
  1560. /*
  1561. * The refill handler is generated per-CPU, multi-node systems
  1562. * may have local storage for it. The other handlers are only
  1563. * needed once.
  1564. */
  1565. static int run_once = 0;
  1566. switch (current_cpu_data.cputype) {
  1567. case CPU_R2000:
  1568. case CPU_R3000:
  1569. case CPU_R3000A:
  1570. case CPU_R3081E:
  1571. case CPU_TX3912:
  1572. case CPU_TX3922:
  1573. case CPU_TX3927:
  1574. build_r3000_tlb_refill_handler();
  1575. if (!run_once) {
  1576. build_r3000_tlb_load_handler();
  1577. build_r3000_tlb_store_handler();
  1578. build_r3000_tlb_modify_handler();
  1579. run_once++;
  1580. }
  1581. break;
  1582. case CPU_R6000:
  1583. case CPU_R6000A:
  1584. panic("No R6000 TLB refill handler yet");
  1585. break;
  1586. case CPU_R8000:
  1587. panic("No R8000 TLB refill handler yet");
  1588. break;
  1589. default:
  1590. build_r4000_tlb_refill_handler();
  1591. if (!run_once) {
  1592. build_r4000_tlb_load_handler();
  1593. build_r4000_tlb_store_handler();
  1594. build_r4000_tlb_modify_handler();
  1595. run_once++;
  1596. }
  1597. }
  1598. }
  1599. void __init flush_tlb_handlers(void)
  1600. {
  1601. flush_icache_range((unsigned long)handle_tlbl,
  1602. (unsigned long)handle_tlbl + sizeof(handle_tlbl));
  1603. flush_icache_range((unsigned long)handle_tlbs,
  1604. (unsigned long)handle_tlbs + sizeof(handle_tlbs));
  1605. flush_icache_range((unsigned long)handle_tlbm,
  1606. (unsigned long)handle_tlbm + sizeof(handle_tlbm));
  1607. }