tlbex.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Synthesize TLB refill handlers at runtime.
  7. *
  8. * Copyright (C) 2004,2005,2006 by Thiemo Seufer
  9. * Copyright (C) 2005, 2007 Maciej W. Rozycki
  10. * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
  11. *
  12. * ... and the days got worse and worse and now you see
  13. * I've gone completly out of my mind.
  14. *
  15. * They're coming to take me a away haha
  16. * they're coming to take me a away hoho hihi haha
  17. * to the funny farm where code is beautiful all the time ...
  18. *
  19. * (Condolences to Napoleon XIV)
  20. */
  21. #include <linux/kernel.h>
  22. #include <linux/types.h>
  23. #include <linux/string.h>
  24. #include <linux/init.h>
  25. #include <asm/bugs.h>
  26. #include <asm/mmu_context.h>
  27. #include <asm/inst.h>
  28. #include <asm/elf.h>
  29. #include <asm/war.h>
  30. static inline int r45k_bvahwbug(void)
  31. {
  32. /* XXX: We should probe for the presence of this bug, but we don't. */
  33. return 0;
  34. }
  35. static inline int r4k_250MHZhwbug(void)
  36. {
  37. /* XXX: We should probe for the presence of this bug, but we don't. */
  38. return 0;
  39. }
  40. static inline int __maybe_unused bcm1250_m3_war(void)
  41. {
  42. return BCM1250_M3_WAR;
  43. }
  44. static inline int __maybe_unused r10000_llsc_war(void)
  45. {
  46. return R10000_LLSC_WAR;
  47. }
  48. /*
  49. * Found by experiment: At least some revisions of the 4kc throw under
  50. * some circumstances a machine check exception, triggered by invalid
  51. * values in the index register. Delaying the tlbp instruction until
  52. * after the next branch, plus adding an additional nop in front of
  53. * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
  54. * why; it's not an issue caused by the core RTL.
  55. *
  56. */
  57. static int __init m4kc_tlbp_war(void)
  58. {
  59. return (current_cpu_data.processor_id & 0xffff00) ==
  60. (PRID_COMP_MIPS | PRID_IMP_4KC);
  61. }
  62. /*
  63. * A little micro-assembler, intended for TLB refill handler
  64. * synthesizing. It is intentionally kept simple, does only support
  65. * a subset of instructions, and does not try to hide pipeline effects
  66. * like branch delay slots.
  67. */
  68. enum fields
  69. {
  70. RS = 0x001,
  71. RT = 0x002,
  72. RD = 0x004,
  73. RE = 0x008,
  74. SIMM = 0x010,
  75. UIMM = 0x020,
  76. BIMM = 0x040,
  77. JIMM = 0x080,
  78. FUNC = 0x100,
  79. SET = 0x200
  80. };
  81. #define OP_MASK 0x3f
  82. #define OP_SH 26
  83. #define RS_MASK 0x1f
  84. #define RS_SH 21
  85. #define RT_MASK 0x1f
  86. #define RT_SH 16
  87. #define RD_MASK 0x1f
  88. #define RD_SH 11
  89. #define RE_MASK 0x1f
  90. #define RE_SH 6
  91. #define IMM_MASK 0xffff
  92. #define IMM_SH 0
  93. #define JIMM_MASK 0x3ffffff
  94. #define JIMM_SH 0
  95. #define FUNC_MASK 0x3f
  96. #define FUNC_SH 0
  97. #define SET_MASK 0x7
  98. #define SET_SH 0
  99. enum opcode {
  100. insn_invalid,
  101. insn_addu, insn_addiu, insn_and, insn_andi, insn_beq,
  102. insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
  103. insn_bne, insn_daddu, insn_daddiu, insn_dmfc0, insn_dmtc0,
  104. insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32,
  105. insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr, insn_ld,
  106. insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, insn_mtc0,
  107. insn_ori, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll,
  108. insn_sra, insn_srl, insn_subu, insn_sw, insn_tlbp, insn_tlbwi,
  109. insn_tlbwr, insn_xor, insn_xori
  110. };
  111. struct insn {
  112. enum opcode opcode;
  113. u32 match;
  114. enum fields fields;
  115. };
  116. /* This macro sets the non-variable bits of an instruction. */
  117. #define M(a, b, c, d, e, f) \
  118. ((a) << OP_SH \
  119. | (b) << RS_SH \
  120. | (c) << RT_SH \
  121. | (d) << RD_SH \
  122. | (e) << RE_SH \
  123. | (f) << FUNC_SH)
  124. static struct insn insn_table[] __initdata = {
  125. { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
  126. { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
  127. { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
  128. { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
  129. { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
  130. { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
  131. { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
  132. { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
  133. { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
  134. { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
  135. { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
  136. { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
  137. { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
  138. { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
  139. { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
  140. { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
  141. { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
  142. { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
  143. { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
  144. { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
  145. { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
  146. { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
  147. { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
  148. { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
  149. { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
  150. { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
  151. { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
  152. { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
  153. { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
  154. { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
  155. { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
  156. { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
  157. { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
  158. { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
  159. { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
  160. { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
  161. { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
  162. { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
  163. { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
  164. { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
  165. { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
  166. { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
  167. { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
  168. { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
  169. { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
  170. { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
  171. { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
  172. { insn_invalid, 0, 0 }
  173. };
  174. #undef M
  175. static u32 __init build_rs(u32 arg)
  176. {
  177. if (arg & ~RS_MASK)
  178. printk(KERN_WARNING "TLB synthesizer field overflow\n");
  179. return (arg & RS_MASK) << RS_SH;
  180. }
  181. static u32 __init build_rt(u32 arg)
  182. {
  183. if (arg & ~RT_MASK)
  184. printk(KERN_WARNING "TLB synthesizer field overflow\n");
  185. return (arg & RT_MASK) << RT_SH;
  186. }
  187. static u32 __init build_rd(u32 arg)
  188. {
  189. if (arg & ~RD_MASK)
  190. printk(KERN_WARNING "TLB synthesizer field overflow\n");
  191. return (arg & RD_MASK) << RD_SH;
  192. }
  193. static u32 __init build_re(u32 arg)
  194. {
  195. if (arg & ~RE_MASK)
  196. printk(KERN_WARNING "TLB synthesizer field overflow\n");
  197. return (arg & RE_MASK) << RE_SH;
  198. }
  199. static u32 __init build_simm(s32 arg)
  200. {
  201. if (arg > 0x7fff || arg < -0x8000)
  202. printk(KERN_WARNING "TLB synthesizer field overflow\n");
  203. return arg & 0xffff;
  204. }
  205. static u32 __init build_uimm(u32 arg)
  206. {
  207. if (arg & ~IMM_MASK)
  208. printk(KERN_WARNING "TLB synthesizer field overflow\n");
  209. return arg & IMM_MASK;
  210. }
  211. static u32 __init build_bimm(s32 arg)
  212. {
  213. if (arg > 0x1ffff || arg < -0x20000)
  214. printk(KERN_WARNING "TLB synthesizer field overflow\n");
  215. if (arg & 0x3)
  216. printk(KERN_WARNING "Invalid TLB synthesizer branch target\n");
  217. return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
  218. }
  219. static u32 __init build_jimm(u32 arg)
  220. {
  221. if (arg & ~((JIMM_MASK) << 2))
  222. printk(KERN_WARNING "TLB synthesizer field overflow\n");
  223. return (arg >> 2) & JIMM_MASK;
  224. }
  225. static u32 __init build_func(u32 arg)
  226. {
  227. if (arg & ~FUNC_MASK)
  228. printk(KERN_WARNING "TLB synthesizer field overflow\n");
  229. return arg & FUNC_MASK;
  230. }
  231. static u32 __init build_set(u32 arg)
  232. {
  233. if (arg & ~SET_MASK)
  234. printk(KERN_WARNING "TLB synthesizer field overflow\n");
  235. return arg & SET_MASK;
  236. }
  237. /*
  238. * The order of opcode arguments is implicitly left to right,
  239. * starting with RS and ending with FUNC or IMM.
  240. */
  241. static void __init build_insn(u32 **buf, enum opcode opc, ...)
  242. {
  243. struct insn *ip = NULL;
  244. unsigned int i;
  245. va_list ap;
  246. u32 op;
  247. for (i = 0; insn_table[i].opcode != insn_invalid; i++)
  248. if (insn_table[i].opcode == opc) {
  249. ip = &insn_table[i];
  250. break;
  251. }
  252. if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
  253. panic("Unsupported TLB synthesizer instruction %d", opc);
  254. op = ip->match;
  255. va_start(ap, opc);
  256. if (ip->fields & RS) op |= build_rs(va_arg(ap, u32));
  257. if (ip->fields & RT) op |= build_rt(va_arg(ap, u32));
  258. if (ip->fields & RD) op |= build_rd(va_arg(ap, u32));
  259. if (ip->fields & RE) op |= build_re(va_arg(ap, u32));
  260. if (ip->fields & SIMM) op |= build_simm(va_arg(ap, s32));
  261. if (ip->fields & UIMM) op |= build_uimm(va_arg(ap, u32));
  262. if (ip->fields & BIMM) op |= build_bimm(va_arg(ap, s32));
  263. if (ip->fields & JIMM) op |= build_jimm(va_arg(ap, u32));
  264. if (ip->fields & FUNC) op |= build_func(va_arg(ap, u32));
  265. if (ip->fields & SET) op |= build_set(va_arg(ap, u32));
  266. va_end(ap);
  267. **buf = op;
  268. (*buf)++;
  269. }
  270. #define I_u1u2u3(op) \
  271. static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
  272. unsigned int b, unsigned int c) \
  273. { \
  274. build_insn(buf, insn##op, a, b, c); \
  275. }
  276. #define I_u2u1u3(op) \
  277. static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
  278. unsigned int b, unsigned int c) \
  279. { \
  280. build_insn(buf, insn##op, b, a, c); \
  281. }
  282. #define I_u3u1u2(op) \
  283. static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
  284. unsigned int b, unsigned int c) \
  285. { \
  286. build_insn(buf, insn##op, b, c, a); \
  287. }
  288. #define I_u1u2s3(op) \
  289. static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
  290. unsigned int b, signed int c) \
  291. { \
  292. build_insn(buf, insn##op, a, b, c); \
  293. }
  294. #define I_u2s3u1(op) \
  295. static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
  296. signed int b, unsigned int c) \
  297. { \
  298. build_insn(buf, insn##op, c, a, b); \
  299. }
  300. #define I_u2u1s3(op) \
  301. static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
  302. unsigned int b, signed int c) \
  303. { \
  304. build_insn(buf, insn##op, b, a, c); \
  305. }
  306. #define I_u1u2(op) \
  307. static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
  308. unsigned int b) \
  309. { \
  310. build_insn(buf, insn##op, a, b); \
  311. }
  312. #define I_u1s2(op) \
  313. static void __init __maybe_unused i##op(u32 **buf, unsigned int a, \
  314. signed int b) \
  315. { \
  316. build_insn(buf, insn##op, a, b); \
  317. }
  318. #define I_u1(op) \
  319. static void __init __maybe_unused i##op(u32 **buf, unsigned int a) \
  320. { \
  321. build_insn(buf, insn##op, a); \
  322. }
  323. #define I_0(op) \
  324. static void __init __maybe_unused i##op(u32 **buf) \
  325. { \
  326. build_insn(buf, insn##op); \
  327. }
  328. I_u2u1s3(_addiu);
  329. I_u3u1u2(_addu);
  330. I_u2u1u3(_andi);
  331. I_u3u1u2(_and);
  332. I_u1u2s3(_beq);
  333. I_u1u2s3(_beql);
  334. I_u1s2(_bgez);
  335. I_u1s2(_bgezl);
  336. I_u1s2(_bltz);
  337. I_u1s2(_bltzl);
  338. I_u1u2s3(_bne);
  339. I_u1u2u3(_dmfc0);
  340. I_u1u2u3(_dmtc0);
  341. I_u2u1s3(_daddiu);
  342. I_u3u1u2(_daddu);
  343. I_u2u1u3(_dsll);
  344. I_u2u1u3(_dsll32);
  345. I_u2u1u3(_dsra);
  346. I_u2u1u3(_dsrl);
  347. I_u2u1u3(_dsrl32);
  348. I_u3u1u2(_dsubu);
  349. I_0(_eret);
  350. I_u1(_j);
  351. I_u1(_jal);
  352. I_u1(_jr);
  353. I_u2s3u1(_ld);
  354. I_u2s3u1(_ll);
  355. I_u2s3u1(_lld);
  356. I_u1s2(_lui);
  357. I_u2s3u1(_lw);
  358. I_u1u2u3(_mfc0);
  359. I_u1u2u3(_mtc0);
  360. I_u2u1u3(_ori);
  361. I_0(_rfe);
  362. I_u2s3u1(_sc);
  363. I_u2s3u1(_scd);
  364. I_u2s3u1(_sd);
  365. I_u2u1u3(_sll);
  366. I_u2u1u3(_sra);
  367. I_u2u1u3(_srl);
  368. I_u3u1u2(_subu);
  369. I_u2s3u1(_sw);
  370. I_0(_tlbp);
  371. I_0(_tlbwi);
  372. I_0(_tlbwr);
  373. I_u3u1u2(_xor)
  374. I_u2u1u3(_xori);
  375. /*
  376. * handling labels
  377. */
  378. enum label_id {
  379. label_invalid,
  380. label_second_part,
  381. label_leave,
  382. #ifdef MODULE_START
  383. label_module_alloc,
  384. #endif
  385. label_vmalloc,
  386. label_vmalloc_done,
  387. label_tlbw_hazard,
  388. label_split,
  389. label_nopage_tlbl,
  390. label_nopage_tlbs,
  391. label_nopage_tlbm,
  392. label_smp_pgtable_change,
  393. label_r3000_write_probe_fail,
  394. };
  395. struct label {
  396. u32 *addr;
  397. enum label_id lab;
  398. };
  399. static void __init build_label(struct label **lab, u32 *addr,
  400. enum label_id l)
  401. {
  402. (*lab)->addr = addr;
  403. (*lab)->lab = l;
  404. (*lab)++;
  405. }
  406. #define L_LA(lb) \
  407. static inline void __init l##lb(struct label **lab, u32 *addr) \
  408. { \
  409. build_label(lab, addr, label##lb); \
  410. }
  411. L_LA(_second_part)
  412. L_LA(_leave)
  413. #ifdef MODULE_START
  414. L_LA(_module_alloc)
  415. #endif
  416. L_LA(_vmalloc)
  417. L_LA(_vmalloc_done)
  418. L_LA(_tlbw_hazard)
  419. L_LA(_split)
  420. L_LA(_nopage_tlbl)
  421. L_LA(_nopage_tlbs)
  422. L_LA(_nopage_tlbm)
  423. L_LA(_smp_pgtable_change)
  424. L_LA(_r3000_write_probe_fail)
  425. /* convenience macros for instructions */
  426. #ifdef CONFIG_64BIT
  427. # define i_LW(buf, rs, rt, off) i_ld(buf, rs, rt, off)
  428. # define i_SW(buf, rs, rt, off) i_sd(buf, rs, rt, off)
  429. # define i_SLL(buf, rs, rt, sh) i_dsll(buf, rs, rt, sh)
  430. # define i_SRA(buf, rs, rt, sh) i_dsra(buf, rs, rt, sh)
  431. # define i_SRL(buf, rs, rt, sh) i_dsrl(buf, rs, rt, sh)
  432. # define i_MFC0(buf, rt, rd...) i_dmfc0(buf, rt, rd)
  433. # define i_MTC0(buf, rt, rd...) i_dmtc0(buf, rt, rd)
  434. # define i_ADDIU(buf, rs, rt, val) i_daddiu(buf, rs, rt, val)
  435. # define i_ADDU(buf, rs, rt, rd) i_daddu(buf, rs, rt, rd)
  436. # define i_SUBU(buf, rs, rt, rd) i_dsubu(buf, rs, rt, rd)
  437. # define i_LL(buf, rs, rt, off) i_lld(buf, rs, rt, off)
  438. # define i_SC(buf, rs, rt, off) i_scd(buf, rs, rt, off)
  439. #else
  440. # define i_LW(buf, rs, rt, off) i_lw(buf, rs, rt, off)
  441. # define i_SW(buf, rs, rt, off) i_sw(buf, rs, rt, off)
  442. # define i_SLL(buf, rs, rt, sh) i_sll(buf, rs, rt, sh)
  443. # define i_SRA(buf, rs, rt, sh) i_sra(buf, rs, rt, sh)
  444. # define i_SRL(buf, rs, rt, sh) i_srl(buf, rs, rt, sh)
  445. # define i_MFC0(buf, rt, rd...) i_mfc0(buf, rt, rd)
  446. # define i_MTC0(buf, rt, rd...) i_mtc0(buf, rt, rd)
  447. # define i_ADDIU(buf, rs, rt, val) i_addiu(buf, rs, rt, val)
  448. # define i_ADDU(buf, rs, rt, rd) i_addu(buf, rs, rt, rd)
  449. # define i_SUBU(buf, rs, rt, rd) i_subu(buf, rs, rt, rd)
  450. # define i_LL(buf, rs, rt, off) i_ll(buf, rs, rt, off)
  451. # define i_SC(buf, rs, rt, off) i_sc(buf, rs, rt, off)
  452. #endif
  453. #define i_b(buf, off) i_beq(buf, 0, 0, off)
  454. #define i_beqz(buf, rs, off) i_beq(buf, rs, 0, off)
  455. #define i_beqzl(buf, rs, off) i_beql(buf, rs, 0, off)
  456. #define i_bnez(buf, rs, off) i_bne(buf, rs, 0, off)
  457. #define i_bnezl(buf, rs, off) i_bnel(buf, rs, 0, off)
  458. #define i_move(buf, a, b) i_ADDU(buf, a, 0, b)
  459. #define i_nop(buf) i_sll(buf, 0, 0, 0)
  460. #define i_ssnop(buf) i_sll(buf, 0, 0, 1)
  461. #define i_ehb(buf) i_sll(buf, 0, 0, 3)
  462. static int __init __maybe_unused in_compat_space_p(long addr)
  463. {
  464. /* Is this address in 32bit compat space? */
  465. #ifdef CONFIG_64BIT
  466. return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L);
  467. #else
  468. return 1;
  469. #endif
  470. }
  471. static int __init __maybe_unused rel_highest(long val)
  472. {
  473. #ifdef CONFIG_64BIT
  474. return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
  475. #else
  476. return 0;
  477. #endif
  478. }
  479. static int __init __maybe_unused rel_higher(long val)
  480. {
  481. #ifdef CONFIG_64BIT
  482. return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
  483. #else
  484. return 0;
  485. #endif
  486. }
  487. static int __init rel_hi(long val)
  488. {
  489. return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
  490. }
  491. static int __init rel_lo(long val)
  492. {
  493. return ((val & 0xffff) ^ 0x8000) - 0x8000;
  494. }
  495. static void __init i_LA_mostly(u32 **buf, unsigned int rs, long addr)
  496. {
  497. if (!in_compat_space_p(addr)) {
  498. i_lui(buf, rs, rel_highest(addr));
  499. if (rel_higher(addr))
  500. i_daddiu(buf, rs, rs, rel_higher(addr));
  501. if (rel_hi(addr)) {
  502. i_dsll(buf, rs, rs, 16);
  503. i_daddiu(buf, rs, rs, rel_hi(addr));
  504. i_dsll(buf, rs, rs, 16);
  505. } else
  506. i_dsll32(buf, rs, rs, 0);
  507. } else
  508. i_lui(buf, rs, rel_hi(addr));
  509. }
  510. static void __init __maybe_unused i_LA(u32 **buf, unsigned int rs, long addr)
  511. {
  512. i_LA_mostly(buf, rs, addr);
  513. if (rel_lo(addr)) {
  514. if (!in_compat_space_p(addr))
  515. i_daddiu(buf, rs, rs, rel_lo(addr));
  516. else
  517. i_addiu(buf, rs, rs, rel_lo(addr));
  518. }
  519. }
  520. /*
  521. * handle relocations
  522. */
  523. struct reloc {
  524. u32 *addr;
  525. unsigned int type;
  526. enum label_id lab;
  527. };
  528. static void __init r_mips_pc16(struct reloc **rel, u32 *addr,
  529. enum label_id l)
  530. {
  531. (*rel)->addr = addr;
  532. (*rel)->type = R_MIPS_PC16;
  533. (*rel)->lab = l;
  534. (*rel)++;
  535. }
  536. static inline void __resolve_relocs(struct reloc *rel, struct label *lab)
  537. {
  538. long laddr = (long)lab->addr;
  539. long raddr = (long)rel->addr;
  540. switch (rel->type) {
  541. case R_MIPS_PC16:
  542. *rel->addr |= build_bimm(laddr - (raddr + 4));
  543. break;
  544. default:
  545. panic("Unsupported TLB synthesizer relocation %d",
  546. rel->type);
  547. }
  548. }
  549. static void __init resolve_relocs(struct reloc *rel, struct label *lab)
  550. {
  551. struct label *l;
  552. for (; rel->lab != label_invalid; rel++)
  553. for (l = lab; l->lab != label_invalid; l++)
  554. if (rel->lab == l->lab)
  555. __resolve_relocs(rel, l);
  556. }
  557. static void __init move_relocs(struct reloc *rel, u32 *first, u32 *end,
  558. long off)
  559. {
  560. for (; rel->lab != label_invalid; rel++)
  561. if (rel->addr >= first && rel->addr < end)
  562. rel->addr += off;
  563. }
  564. static void __init move_labels(struct label *lab, u32 *first, u32 *end,
  565. long off)
  566. {
  567. for (; lab->lab != label_invalid; lab++)
  568. if (lab->addr >= first && lab->addr < end)
  569. lab->addr += off;
  570. }
  571. static void __init copy_handler(struct reloc *rel, struct label *lab,
  572. u32 *first, u32 *end, u32 *target)
  573. {
  574. long off = (long)(target - first);
  575. memcpy(target, first, (end - first) * sizeof(u32));
  576. move_relocs(rel, first, end, off);
  577. move_labels(lab, first, end, off);
  578. }
  579. static int __init __maybe_unused insn_has_bdelay(struct reloc *rel,
  580. u32 *addr)
  581. {
  582. for (; rel->lab != label_invalid; rel++) {
  583. if (rel->addr == addr
  584. && (rel->type == R_MIPS_PC16
  585. || rel->type == R_MIPS_26))
  586. return 1;
  587. }
  588. return 0;
  589. }
  590. /* convenience functions for labeled branches */
  591. static void __init __maybe_unused
  592. il_bltz(u32 **p, struct reloc **r, unsigned int reg, enum label_id l)
  593. {
  594. r_mips_pc16(r, *p, l);
  595. i_bltz(p, reg, 0);
  596. }
  597. static void __init __maybe_unused il_b(u32 **p, struct reloc **r,
  598. enum label_id l)
  599. {
  600. r_mips_pc16(r, *p, l);
  601. i_b(p, 0);
  602. }
  603. static void __init il_beqz(u32 **p, struct reloc **r, unsigned int reg,
  604. enum label_id l)
  605. {
  606. r_mips_pc16(r, *p, l);
  607. i_beqz(p, reg, 0);
  608. }
  609. static void __init __maybe_unused
  610. il_beqzl(u32 **p, struct reloc **r, unsigned int reg, enum label_id l)
  611. {
  612. r_mips_pc16(r, *p, l);
  613. i_beqzl(p, reg, 0);
  614. }
  615. static void __init il_bnez(u32 **p, struct reloc **r, unsigned int reg,
  616. enum label_id l)
  617. {
  618. r_mips_pc16(r, *p, l);
  619. i_bnez(p, reg, 0);
  620. }
  621. static void __init il_bgezl(u32 **p, struct reloc **r, unsigned int reg,
  622. enum label_id l)
  623. {
  624. r_mips_pc16(r, *p, l);
  625. i_bgezl(p, reg, 0);
  626. }
  627. static void __init __maybe_unused
  628. il_bgez(u32 **p, struct reloc **r, unsigned int reg, enum label_id l)
  629. {
  630. r_mips_pc16(r, *p, l);
  631. i_bgez(p, reg, 0);
  632. }
  633. /*
  634. * For debug purposes.
  635. */
  636. static inline void dump_handler(const u32 *handler, int count)
  637. {
  638. int i;
  639. pr_debug("\t.set push\n");
  640. pr_debug("\t.set noreorder\n");
  641. for (i = 0; i < count; i++)
  642. pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]);
  643. pr_debug("\t.set pop\n");
  644. }
  645. /* The only general purpose registers allowed in TLB handlers. */
  646. #define K0 26
  647. #define K1 27
  648. /* Some CP0 registers */
  649. #define C0_INDEX 0, 0
  650. #define C0_ENTRYLO0 2, 0
  651. #define C0_TCBIND 2, 2
  652. #define C0_ENTRYLO1 3, 0
  653. #define C0_CONTEXT 4, 0
  654. #define C0_BADVADDR 8, 0
  655. #define C0_ENTRYHI 10, 0
  656. #define C0_EPC 14, 0
  657. #define C0_XCONTEXT 20, 0
  658. #ifdef CONFIG_64BIT
  659. # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_XCONTEXT)
  660. #else
  661. # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_CONTEXT)
  662. #endif
  663. /* The worst case length of the handler is around 18 instructions for
  664. * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
  665. * Maximum space available is 32 instructions for R3000 and 64
  666. * instructions for R4000.
  667. *
  668. * We deliberately chose a buffer size of 128, so we won't scribble
  669. * over anything important on overflow before we panic.
  670. */
  671. static u32 tlb_handler[128] __initdata;
  672. /* simply assume worst case size for labels and relocs */
  673. static struct label labels[128] __initdata;
  674. static struct reloc relocs[128] __initdata;
  675. /*
  676. * The R3000 TLB handler is simple.
  677. */
  678. static void __init build_r3000_tlb_refill_handler(void)
  679. {
  680. long pgdc = (long)pgd_current;
  681. u32 *p;
  682. memset(tlb_handler, 0, sizeof(tlb_handler));
  683. p = tlb_handler;
  684. i_mfc0(&p, K0, C0_BADVADDR);
  685. i_lui(&p, K1, rel_hi(pgdc)); /* cp0 delay */
  686. i_lw(&p, K1, rel_lo(pgdc), K1);
  687. i_srl(&p, K0, K0, 22); /* load delay */
  688. i_sll(&p, K0, K0, 2);
  689. i_addu(&p, K1, K1, K0);
  690. i_mfc0(&p, K0, C0_CONTEXT);
  691. i_lw(&p, K1, 0, K1); /* cp0 delay */
  692. i_andi(&p, K0, K0, 0xffc); /* load delay */
  693. i_addu(&p, K1, K1, K0);
  694. i_lw(&p, K0, 0, K1);
  695. i_nop(&p); /* load delay */
  696. i_mtc0(&p, K0, C0_ENTRYLO0);
  697. i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
  698. i_tlbwr(&p); /* cp0 delay */
  699. i_jr(&p, K1);
  700. i_rfe(&p); /* branch delay */
  701. if (p > tlb_handler + 32)
  702. panic("TLB refill handler space exceeded");
  703. pr_info("Synthesized TLB refill handler (%u instructions).\n",
  704. (unsigned int)(p - tlb_handler));
  705. memcpy((void *)ebase, tlb_handler, 0x80);
  706. dump_handler((u32 *)ebase, 32);
  707. }
  708. /*
  709. * The R4000 TLB handler is much more complicated. We have two
  710. * consecutive handler areas with 32 instructions space each.
  711. * Since they aren't used at the same time, we can overflow in the
  712. * other one.To keep things simple, we first assume linear space,
  713. * then we relocate it to the final handler layout as needed.
  714. */
  715. static u32 final_handler[64] __initdata;
  716. /*
  717. * Hazards
  718. *
  719. * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
  720. * 2. A timing hazard exists for the TLBP instruction.
  721. *
  722. * stalling_instruction
  723. * TLBP
  724. *
  725. * The JTLB is being read for the TLBP throughout the stall generated by the
  726. * previous instruction. This is not really correct as the stalling instruction
  727. * can modify the address used to access the JTLB. The failure symptom is that
  728. * the TLBP instruction will use an address created for the stalling instruction
  729. * and not the address held in C0_ENHI and thus report the wrong results.
  730. *
  731. * The software work-around is to not allow the instruction preceding the TLBP
  732. * to stall - make it an NOP or some other instruction guaranteed not to stall.
  733. *
  734. * Errata 2 will not be fixed. This errata is also on the R5000.
  735. *
  736. * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
  737. */
  738. static void __init __maybe_unused build_tlb_probe_entry(u32 **p)
  739. {
  740. switch (current_cpu_type()) {
  741. /* Found by experiment: R4600 v2.0 needs this, too. */
  742. case CPU_R4600:
  743. case CPU_R5000:
  744. case CPU_R5000A:
  745. case CPU_NEVADA:
  746. i_nop(p);
  747. i_tlbp(p);
  748. break;
  749. default:
  750. i_tlbp(p);
  751. break;
  752. }
  753. }
  754. /*
  755. * Write random or indexed TLB entry, and care about the hazards from
  756. * the preceeding mtc0 and for the following eret.
  757. */
  758. enum tlb_write_entry { tlb_random, tlb_indexed };
  759. static void __init build_tlb_write_entry(u32 **p, struct label **l,
  760. struct reloc **r,
  761. enum tlb_write_entry wmode)
  762. {
  763. void(*tlbw)(u32 **) = NULL;
  764. switch (wmode) {
  765. case tlb_random: tlbw = i_tlbwr; break;
  766. case tlb_indexed: tlbw = i_tlbwi; break;
  767. }
  768. if (cpu_has_mips_r2) {
  769. i_ehb(p);
  770. tlbw(p);
  771. return;
  772. }
  773. switch (current_cpu_type()) {
  774. case CPU_R4000PC:
  775. case CPU_R4000SC:
  776. case CPU_R4000MC:
  777. case CPU_R4400PC:
  778. case CPU_R4400SC:
  779. case CPU_R4400MC:
  780. /*
  781. * This branch uses up a mtc0 hazard nop slot and saves
  782. * two nops after the tlbw instruction.
  783. */
  784. il_bgezl(p, r, 0, label_tlbw_hazard);
  785. tlbw(p);
  786. l_tlbw_hazard(l, *p);
  787. i_nop(p);
  788. break;
  789. case CPU_R4600:
  790. case CPU_R4700:
  791. case CPU_R5000:
  792. case CPU_R5000A:
  793. i_nop(p);
  794. tlbw(p);
  795. i_nop(p);
  796. break;
  797. case CPU_R4300:
  798. case CPU_5KC:
  799. case CPU_TX49XX:
  800. case CPU_AU1000:
  801. case CPU_AU1100:
  802. case CPU_AU1500:
  803. case CPU_AU1550:
  804. case CPU_AU1200:
  805. case CPU_PR4450:
  806. i_nop(p);
  807. tlbw(p);
  808. break;
  809. case CPU_R10000:
  810. case CPU_R12000:
  811. case CPU_R14000:
  812. case CPU_4KC:
  813. case CPU_SB1:
  814. case CPU_SB1A:
  815. case CPU_4KSC:
  816. case CPU_20KC:
  817. case CPU_25KF:
  818. case CPU_BCM3302:
  819. case CPU_BCM4710:
  820. case CPU_LOONGSON2:
  821. if (m4kc_tlbp_war())
  822. i_nop(p);
  823. tlbw(p);
  824. break;
  825. case CPU_NEVADA:
  826. i_nop(p); /* QED specifies 2 nops hazard */
  827. /*
  828. * This branch uses up a mtc0 hazard nop slot and saves
  829. * a nop after the tlbw instruction.
  830. */
  831. il_bgezl(p, r, 0, label_tlbw_hazard);
  832. tlbw(p);
  833. l_tlbw_hazard(l, *p);
  834. break;
  835. case CPU_RM7000:
  836. i_nop(p);
  837. i_nop(p);
  838. i_nop(p);
  839. i_nop(p);
  840. tlbw(p);
  841. break;
  842. case CPU_RM9000:
  843. /*
  844. * When the JTLB is updated by tlbwi or tlbwr, a subsequent
  845. * use of the JTLB for instructions should not occur for 4
  846. * cpu cycles and use for data translations should not occur
  847. * for 3 cpu cycles.
  848. */
  849. i_ssnop(p);
  850. i_ssnop(p);
  851. i_ssnop(p);
  852. i_ssnop(p);
  853. tlbw(p);
  854. i_ssnop(p);
  855. i_ssnop(p);
  856. i_ssnop(p);
  857. i_ssnop(p);
  858. break;
  859. case CPU_VR4111:
  860. case CPU_VR4121:
  861. case CPU_VR4122:
  862. case CPU_VR4181:
  863. case CPU_VR4181A:
  864. i_nop(p);
  865. i_nop(p);
  866. tlbw(p);
  867. i_nop(p);
  868. i_nop(p);
  869. break;
  870. case CPU_VR4131:
  871. case CPU_VR4133:
  872. case CPU_R5432:
  873. i_nop(p);
  874. i_nop(p);
  875. tlbw(p);
  876. break;
  877. default:
  878. panic("No TLB refill handler yet (CPU type: %d)",
  879. current_cpu_data.cputype);
  880. break;
  881. }
  882. }
  883. #ifdef CONFIG_64BIT
  884. /*
  885. * TMP and PTR are scratch.
  886. * TMP will be clobbered, PTR will hold the pmd entry.
  887. */
  888. static void __init
  889. build_get_pmde64(u32 **p, struct label **l, struct reloc **r,
  890. unsigned int tmp, unsigned int ptr)
  891. {
  892. long pgdc = (long)pgd_current;
  893. /*
  894. * The vmalloc handling is not in the hotpath.
  895. */
  896. i_dmfc0(p, tmp, C0_BADVADDR);
  897. #ifdef MODULE_START
  898. il_bltz(p, r, tmp, label_module_alloc);
  899. #else
  900. il_bltz(p, r, tmp, label_vmalloc);
  901. #endif
  902. /* No i_nop needed here, since the next insn doesn't touch TMP. */
  903. #ifdef CONFIG_SMP
  904. # ifdef CONFIG_MIPS_MT_SMTC
  905. /*
  906. * SMTC uses TCBind value as "CPU" index
  907. */
  908. i_mfc0(p, ptr, C0_TCBIND);
  909. i_dsrl(p, ptr, ptr, 19);
  910. # else
  911. /*
  912. * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
  913. * stored in CONTEXT.
  914. */
  915. i_dmfc0(p, ptr, C0_CONTEXT);
  916. i_dsrl(p, ptr, ptr, 23);
  917. #endif
  918. i_LA_mostly(p, tmp, pgdc);
  919. i_daddu(p, ptr, ptr, tmp);
  920. i_dmfc0(p, tmp, C0_BADVADDR);
  921. i_ld(p, ptr, rel_lo(pgdc), ptr);
  922. #else
  923. i_LA_mostly(p, ptr, pgdc);
  924. i_ld(p, ptr, rel_lo(pgdc), ptr);
  925. #endif
  926. l_vmalloc_done(l, *p);
  927. if (PGDIR_SHIFT - 3 < 32) /* get pgd offset in bytes */
  928. i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3);
  929. else
  930. i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32);
  931. i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
  932. i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
  933. i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
  934. i_ld(p, ptr, 0, ptr); /* get pmd pointer */
  935. i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
  936. i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
  937. i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
  938. }
  939. /*
  940. * BVADDR is the faulting address, PTR is scratch.
  941. * PTR will hold the pgd for vmalloc.
  942. */
  943. static void __init
  944. build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r,
  945. unsigned int bvaddr, unsigned int ptr)
  946. {
  947. long swpd = (long)swapper_pg_dir;
  948. #ifdef MODULE_START
  949. long modd = (long)module_pg_dir;
  950. l_module_alloc(l, *p);
  951. /*
  952. * Assumption:
  953. * VMALLOC_START >= 0xc000000000000000UL
  954. * MODULE_START >= 0xe000000000000000UL
  955. */
  956. i_SLL(p, ptr, bvaddr, 2);
  957. il_bgez(p, r, ptr, label_vmalloc);
  958. if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START)) {
  959. i_lui(p, ptr, rel_hi(MODULE_START)); /* delay slot */
  960. } else {
  961. /* unlikely configuration */
  962. i_nop(p); /* delay slot */
  963. i_LA(p, ptr, MODULE_START);
  964. }
  965. i_dsubu(p, bvaddr, bvaddr, ptr);
  966. if (in_compat_space_p(modd) && !rel_lo(modd)) {
  967. il_b(p, r, label_vmalloc_done);
  968. i_lui(p, ptr, rel_hi(modd));
  969. } else {
  970. i_LA_mostly(p, ptr, modd);
  971. il_b(p, r, label_vmalloc_done);
  972. if (in_compat_space_p(modd))
  973. i_addiu(p, ptr, ptr, rel_lo(modd));
  974. else
  975. i_daddiu(p, ptr, ptr, rel_lo(modd));
  976. }
  977. l_vmalloc(l, *p);
  978. if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START) &&
  979. MODULE_START << 32 == VMALLOC_START)
  980. i_dsll32(p, ptr, ptr, 0); /* typical case */
  981. else
  982. i_LA(p, ptr, VMALLOC_START);
  983. #else
  984. l_vmalloc(l, *p);
  985. i_LA(p, ptr, VMALLOC_START);
  986. #endif
  987. i_dsubu(p, bvaddr, bvaddr, ptr);
  988. if (in_compat_space_p(swpd) && !rel_lo(swpd)) {
  989. il_b(p, r, label_vmalloc_done);
  990. i_lui(p, ptr, rel_hi(swpd));
  991. } else {
  992. i_LA_mostly(p, ptr, swpd);
  993. il_b(p, r, label_vmalloc_done);
  994. if (in_compat_space_p(swpd))
  995. i_addiu(p, ptr, ptr, rel_lo(swpd));
  996. else
  997. i_daddiu(p, ptr, ptr, rel_lo(swpd));
  998. }
  999. }
  1000. #else /* !CONFIG_64BIT */
  1001. /*
  1002. * TMP and PTR are scratch.
  1003. * TMP will be clobbered, PTR will hold the pgd entry.
  1004. */
  1005. static void __init __maybe_unused
  1006. build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
  1007. {
  1008. long pgdc = (long)pgd_current;
  1009. /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
  1010. #ifdef CONFIG_SMP
  1011. #ifdef CONFIG_MIPS_MT_SMTC
  1012. /*
  1013. * SMTC uses TCBind value as "CPU" index
  1014. */
  1015. i_mfc0(p, ptr, C0_TCBIND);
  1016. i_LA_mostly(p, tmp, pgdc);
  1017. i_srl(p, ptr, ptr, 19);
  1018. #else
  1019. /*
  1020. * smp_processor_id() << 3 is stored in CONTEXT.
  1021. */
  1022. i_mfc0(p, ptr, C0_CONTEXT);
  1023. i_LA_mostly(p, tmp, pgdc);
  1024. i_srl(p, ptr, ptr, 23);
  1025. #endif
  1026. i_addu(p, ptr, tmp, ptr);
  1027. #else
  1028. i_LA_mostly(p, ptr, pgdc);
  1029. #endif
  1030. i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
  1031. i_lw(p, ptr, rel_lo(pgdc), ptr);
  1032. i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
  1033. i_sll(p, tmp, tmp, PGD_T_LOG2);
  1034. i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
  1035. }
  1036. #endif /* !CONFIG_64BIT */
  1037. static void __init build_adjust_context(u32 **p, unsigned int ctx)
  1038. {
  1039. unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
  1040. unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
  1041. switch (current_cpu_type()) {
  1042. case CPU_VR41XX:
  1043. case CPU_VR4111:
  1044. case CPU_VR4121:
  1045. case CPU_VR4122:
  1046. case CPU_VR4131:
  1047. case CPU_VR4181:
  1048. case CPU_VR4181A:
  1049. case CPU_VR4133:
  1050. shift += 2;
  1051. break;
  1052. default:
  1053. break;
  1054. }
  1055. if (shift)
  1056. i_SRL(p, ctx, ctx, shift);
  1057. i_andi(p, ctx, ctx, mask);
  1058. }
  1059. static void __init build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
  1060. {
  1061. /*
  1062. * Bug workaround for the Nevada. It seems as if under certain
  1063. * circumstances the move from cp0_context might produce a
  1064. * bogus result when the mfc0 instruction and its consumer are
  1065. * in a different cacheline or a load instruction, probably any
  1066. * memory reference, is between them.
  1067. */
  1068. switch (current_cpu_type()) {
  1069. case CPU_NEVADA:
  1070. i_LW(p, ptr, 0, ptr);
  1071. GET_CONTEXT(p, tmp); /* get context reg */
  1072. break;
  1073. default:
  1074. GET_CONTEXT(p, tmp); /* get context reg */
  1075. i_LW(p, ptr, 0, ptr);
  1076. break;
  1077. }
  1078. build_adjust_context(p, tmp);
  1079. i_ADDU(p, ptr, ptr, tmp); /* add in offset */
  1080. }
  1081. static void __init build_update_entries(u32 **p, unsigned int tmp,
  1082. unsigned int ptep)
  1083. {
  1084. /*
  1085. * 64bit address support (36bit on a 32bit CPU) in a 32bit
  1086. * Kernel is a special case. Only a few CPUs use it.
  1087. */
  1088. #ifdef CONFIG_64BIT_PHYS_ADDR
  1089. if (cpu_has_64bits) {
  1090. i_ld(p, tmp, 0, ptep); /* get even pte */
  1091. i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
  1092. i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */
  1093. i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
  1094. i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */
  1095. i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
  1096. } else {
  1097. int pte_off_even = sizeof(pte_t) / 2;
  1098. int pte_off_odd = pte_off_even + sizeof(pte_t);
  1099. /* The pte entries are pre-shifted */
  1100. i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
  1101. i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
  1102. i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
  1103. i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
  1104. }
  1105. #else
  1106. i_LW(p, tmp, 0, ptep); /* get even pte */
  1107. i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
  1108. if (r45k_bvahwbug())
  1109. build_tlb_probe_entry(p);
  1110. i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */
  1111. if (r4k_250MHZhwbug())
  1112. i_mtc0(p, 0, C0_ENTRYLO0);
  1113. i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
  1114. i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */
  1115. if (r45k_bvahwbug())
  1116. i_mfc0(p, tmp, C0_INDEX);
  1117. if (r4k_250MHZhwbug())
  1118. i_mtc0(p, 0, C0_ENTRYLO1);
  1119. i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
  1120. #endif
  1121. }
  1122. static void __init build_r4000_tlb_refill_handler(void)
  1123. {
  1124. u32 *p = tlb_handler;
  1125. struct label *l = labels;
  1126. struct reloc *r = relocs;
  1127. u32 *f;
  1128. unsigned int final_len;
  1129. memset(tlb_handler, 0, sizeof(tlb_handler));
  1130. memset(labels, 0, sizeof(labels));
  1131. memset(relocs, 0, sizeof(relocs));
  1132. memset(final_handler, 0, sizeof(final_handler));
  1133. /*
  1134. * create the plain linear handler
  1135. */
  1136. if (bcm1250_m3_war()) {
  1137. i_MFC0(&p, K0, C0_BADVADDR);
  1138. i_MFC0(&p, K1, C0_ENTRYHI);
  1139. i_xor(&p, K0, K0, K1);
  1140. i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
  1141. il_bnez(&p, &r, K0, label_leave);
  1142. /* No need for i_nop */
  1143. }
  1144. #ifdef CONFIG_64BIT
  1145. build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
  1146. #else
  1147. build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
  1148. #endif
  1149. build_get_ptep(&p, K0, K1);
  1150. build_update_entries(&p, K0, K1);
  1151. build_tlb_write_entry(&p, &l, &r, tlb_random);
  1152. l_leave(&l, p);
  1153. i_eret(&p); /* return from trap */
  1154. #ifdef CONFIG_64BIT
  1155. build_get_pgd_vmalloc64(&p, &l, &r, K0, K1);
  1156. #endif
  1157. /*
  1158. * Overflow check: For the 64bit handler, we need at least one
  1159. * free instruction slot for the wrap-around branch. In worst
  1160. * case, if the intended insertion point is a delay slot, we
  1161. * need three, with the second nop'ed and the third being
  1162. * unused.
  1163. */
  1164. /* Loongson2 ebase is different than r4k, we have more space */
  1165. #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
  1166. if ((p - tlb_handler) > 64)
  1167. panic("TLB refill handler space exceeded");
  1168. #else
  1169. if (((p - tlb_handler) > 63)
  1170. || (((p - tlb_handler) > 61)
  1171. && insn_has_bdelay(relocs, tlb_handler + 29)))
  1172. panic("TLB refill handler space exceeded");
  1173. #endif
  1174. /*
  1175. * Now fold the handler in the TLB refill handler space.
  1176. */
  1177. #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
  1178. f = final_handler;
  1179. /* Simplest case, just copy the handler. */
  1180. copy_handler(relocs, labels, tlb_handler, p, f);
  1181. final_len = p - tlb_handler;
  1182. #else /* CONFIG_64BIT */
  1183. f = final_handler + 32;
  1184. if ((p - tlb_handler) <= 32) {
  1185. /* Just copy the handler. */
  1186. copy_handler(relocs, labels, tlb_handler, p, f);
  1187. final_len = p - tlb_handler;
  1188. } else {
  1189. u32 *split = tlb_handler + 30;
  1190. /*
  1191. * Find the split point.
  1192. */
  1193. if (insn_has_bdelay(relocs, split - 1))
  1194. split--;
  1195. /* Copy first part of the handler. */
  1196. copy_handler(relocs, labels, tlb_handler, split, f);
  1197. f += split - tlb_handler;
  1198. /* Insert branch. */
  1199. l_split(&l, final_handler);
  1200. il_b(&f, &r, label_split);
  1201. if (insn_has_bdelay(relocs, split))
  1202. i_nop(&f);
  1203. else {
  1204. copy_handler(relocs, labels, split, split + 1, f);
  1205. move_labels(labels, f, f + 1, -1);
  1206. f++;
  1207. split++;
  1208. }
  1209. /* Copy the rest of the handler. */
  1210. copy_handler(relocs, labels, split, p, final_handler);
  1211. final_len = (f - (final_handler + 32)) + (p - split);
  1212. }
  1213. #endif /* CONFIG_64BIT */
  1214. resolve_relocs(relocs, labels);
  1215. pr_info("Synthesized TLB refill handler (%u instructions).\n",
  1216. final_len);
  1217. memcpy((void *)ebase, final_handler, 0x100);
  1218. dump_handler((u32 *)ebase, 64);
  1219. }
  1220. /*
  1221. * TLB load/store/modify handlers.
  1222. *
  1223. * Only the fastpath gets synthesized at runtime, the slowpath for
  1224. * do_page_fault remains normal asm.
  1225. */
  1226. extern void tlb_do_page_fault_0(void);
  1227. extern void tlb_do_page_fault_1(void);
  1228. /*
  1229. * 128 instructions for the fastpath handler is generous and should
  1230. * never be exceeded.
  1231. */
  1232. #define FASTPATH_SIZE 128
  1233. u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
  1234. u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
  1235. u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
  1236. static void __init
  1237. iPTE_LW(u32 **p, struct label **l, unsigned int pte, unsigned int ptr)
  1238. {
  1239. #ifdef CONFIG_SMP
  1240. # ifdef CONFIG_64BIT_PHYS_ADDR
  1241. if (cpu_has_64bits)
  1242. i_lld(p, pte, 0, ptr);
  1243. else
  1244. # endif
  1245. i_LL(p, pte, 0, ptr);
  1246. #else
  1247. # ifdef CONFIG_64BIT_PHYS_ADDR
  1248. if (cpu_has_64bits)
  1249. i_ld(p, pte, 0, ptr);
  1250. else
  1251. # endif
  1252. i_LW(p, pte, 0, ptr);
  1253. #endif
  1254. }
  1255. static void __init
  1256. iPTE_SW(u32 **p, struct reloc **r, unsigned int pte, unsigned int ptr,
  1257. unsigned int mode)
  1258. {
  1259. #ifdef CONFIG_64BIT_PHYS_ADDR
  1260. unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
  1261. #endif
  1262. i_ori(p, pte, pte, mode);
  1263. #ifdef CONFIG_SMP
  1264. # ifdef CONFIG_64BIT_PHYS_ADDR
  1265. if (cpu_has_64bits)
  1266. i_scd(p, pte, 0, ptr);
  1267. else
  1268. # endif
  1269. i_SC(p, pte, 0, ptr);
  1270. if (r10000_llsc_war())
  1271. il_beqzl(p, r, pte, label_smp_pgtable_change);
  1272. else
  1273. il_beqz(p, r, pte, label_smp_pgtable_change);
  1274. # ifdef CONFIG_64BIT_PHYS_ADDR
  1275. if (!cpu_has_64bits) {
  1276. /* no i_nop needed */
  1277. i_ll(p, pte, sizeof(pte_t) / 2, ptr);
  1278. i_ori(p, pte, pte, hwmode);
  1279. i_sc(p, pte, sizeof(pte_t) / 2, ptr);
  1280. il_beqz(p, r, pte, label_smp_pgtable_change);
  1281. /* no i_nop needed */
  1282. i_lw(p, pte, 0, ptr);
  1283. } else
  1284. i_nop(p);
  1285. # else
  1286. i_nop(p);
  1287. # endif
  1288. #else
  1289. # ifdef CONFIG_64BIT_PHYS_ADDR
  1290. if (cpu_has_64bits)
  1291. i_sd(p, pte, 0, ptr);
  1292. else
  1293. # endif
  1294. i_SW(p, pte, 0, ptr);
  1295. # ifdef CONFIG_64BIT_PHYS_ADDR
  1296. if (!cpu_has_64bits) {
  1297. i_lw(p, pte, sizeof(pte_t) / 2, ptr);
  1298. i_ori(p, pte, pte, hwmode);
  1299. i_sw(p, pte, sizeof(pte_t) / 2, ptr);
  1300. i_lw(p, pte, 0, ptr);
  1301. }
  1302. # endif
  1303. #endif
  1304. }
  1305. /*
  1306. * Check if PTE is present, if not then jump to LABEL. PTR points to
  1307. * the page table where this PTE is located, PTE will be re-loaded
  1308. * with it's original value.
  1309. */
  1310. static void __init
  1311. build_pte_present(u32 **p, struct label **l, struct reloc **r,
  1312. unsigned int pte, unsigned int ptr, enum label_id lid)
  1313. {
  1314. i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
  1315. i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
  1316. il_bnez(p, r, pte, lid);
  1317. iPTE_LW(p, l, pte, ptr);
  1318. }
  1319. /* Make PTE valid, store result in PTR. */
  1320. static void __init
  1321. build_make_valid(u32 **p, struct reloc **r, unsigned int pte,
  1322. unsigned int ptr)
  1323. {
  1324. unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
  1325. iPTE_SW(p, r, pte, ptr, mode);
  1326. }
  1327. /*
  1328. * Check if PTE can be written to, if not branch to LABEL. Regardless
  1329. * restore PTE with value from PTR when done.
  1330. */
  1331. static void __init
  1332. build_pte_writable(u32 **p, struct label **l, struct reloc **r,
  1333. unsigned int pte, unsigned int ptr, enum label_id lid)
  1334. {
  1335. i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
  1336. i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
  1337. il_bnez(p, r, pte, lid);
  1338. iPTE_LW(p, l, pte, ptr);
  1339. }
  1340. /* Make PTE writable, update software status bits as well, then store
  1341. * at PTR.
  1342. */
  1343. static void __init
  1344. build_make_write(u32 **p, struct reloc **r, unsigned int pte,
  1345. unsigned int ptr)
  1346. {
  1347. unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
  1348. | _PAGE_DIRTY);
  1349. iPTE_SW(p, r, pte, ptr, mode);
  1350. }
  1351. /*
  1352. * Check if PTE can be modified, if not branch to LABEL. Regardless
  1353. * restore PTE with value from PTR when done.
  1354. */
  1355. static void __init
  1356. build_pte_modifiable(u32 **p, struct label **l, struct reloc **r,
  1357. unsigned int pte, unsigned int ptr, enum label_id lid)
  1358. {
  1359. i_andi(p, pte, pte, _PAGE_WRITE);
  1360. il_beqz(p, r, pte, lid);
  1361. iPTE_LW(p, l, pte, ptr);
  1362. }
  1363. /*
  1364. * R3000 style TLB load/store/modify handlers.
  1365. */
  1366. /*
  1367. * This places the pte into ENTRYLO0 and writes it with tlbwi.
  1368. * Then it returns.
  1369. */
  1370. static void __init
  1371. build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
  1372. {
  1373. i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
  1374. i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
  1375. i_tlbwi(p);
  1376. i_jr(p, tmp);
  1377. i_rfe(p); /* branch delay */
  1378. }
  1379. /*
  1380. * This places the pte into ENTRYLO0 and writes it with tlbwi
  1381. * or tlbwr as appropriate. This is because the index register
  1382. * may have the probe fail bit set as a result of a trap on a
  1383. * kseg2 access, i.e. without refill. Then it returns.
  1384. */
  1385. static void __init
  1386. build_r3000_tlb_reload_write(u32 **p, struct label **l, struct reloc **r,
  1387. unsigned int pte, unsigned int tmp)
  1388. {
  1389. i_mfc0(p, tmp, C0_INDEX);
  1390. i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
  1391. il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
  1392. i_mfc0(p, tmp, C0_EPC); /* branch delay */
  1393. i_tlbwi(p); /* cp0 delay */
  1394. i_jr(p, tmp);
  1395. i_rfe(p); /* branch delay */
  1396. l_r3000_write_probe_fail(l, *p);
  1397. i_tlbwr(p); /* cp0 delay */
  1398. i_jr(p, tmp);
  1399. i_rfe(p); /* branch delay */
  1400. }
  1401. static void __init
  1402. build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
  1403. unsigned int ptr)
  1404. {
  1405. long pgdc = (long)pgd_current;
  1406. i_mfc0(p, pte, C0_BADVADDR);
  1407. i_lui(p, ptr, rel_hi(pgdc)); /* cp0 delay */
  1408. i_lw(p, ptr, rel_lo(pgdc), ptr);
  1409. i_srl(p, pte, pte, 22); /* load delay */
  1410. i_sll(p, pte, pte, 2);
  1411. i_addu(p, ptr, ptr, pte);
  1412. i_mfc0(p, pte, C0_CONTEXT);
  1413. i_lw(p, ptr, 0, ptr); /* cp0 delay */
  1414. i_andi(p, pte, pte, 0xffc); /* load delay */
  1415. i_addu(p, ptr, ptr, pte);
  1416. i_lw(p, pte, 0, ptr);
  1417. i_tlbp(p); /* load delay */
  1418. }
  1419. static void __init build_r3000_tlb_load_handler(void)
  1420. {
  1421. u32 *p = handle_tlbl;
  1422. struct label *l = labels;
  1423. struct reloc *r = relocs;
  1424. memset(handle_tlbl, 0, sizeof(handle_tlbl));
  1425. memset(labels, 0, sizeof(labels));
  1426. memset(relocs, 0, sizeof(relocs));
  1427. build_r3000_tlbchange_handler_head(&p, K0, K1);
  1428. build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl);
  1429. i_nop(&p); /* load delay */
  1430. build_make_valid(&p, &r, K0, K1);
  1431. build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
  1432. l_nopage_tlbl(&l, p);
  1433. i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
  1434. i_nop(&p);
  1435. if ((p - handle_tlbl) > FASTPATH_SIZE)
  1436. panic("TLB load handler fastpath space exceeded");
  1437. resolve_relocs(relocs, labels);
  1438. pr_info("Synthesized TLB load handler fastpath (%u instructions).\n",
  1439. (unsigned int)(p - handle_tlbl));
  1440. dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
  1441. }
  1442. static void __init build_r3000_tlb_store_handler(void)
  1443. {
  1444. u32 *p = handle_tlbs;
  1445. struct label *l = labels;
  1446. struct reloc *r = relocs;
  1447. memset(handle_tlbs, 0, sizeof(handle_tlbs));
  1448. memset(labels, 0, sizeof(labels));
  1449. memset(relocs, 0, sizeof(relocs));
  1450. build_r3000_tlbchange_handler_head(&p, K0, K1);
  1451. build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs);
  1452. i_nop(&p); /* load delay */
  1453. build_make_write(&p, &r, K0, K1);
  1454. build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
  1455. l_nopage_tlbs(&l, p);
  1456. i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
  1457. i_nop(&p);
  1458. if ((p - handle_tlbs) > FASTPATH_SIZE)
  1459. panic("TLB store handler fastpath space exceeded");
  1460. resolve_relocs(relocs, labels);
  1461. pr_info("Synthesized TLB store handler fastpath (%u instructions).\n",
  1462. (unsigned int)(p - handle_tlbs));
  1463. dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
  1464. }
  1465. static void __init build_r3000_tlb_modify_handler(void)
  1466. {
  1467. u32 *p = handle_tlbm;
  1468. struct label *l = labels;
  1469. struct reloc *r = relocs;
  1470. memset(handle_tlbm, 0, sizeof(handle_tlbm));
  1471. memset(labels, 0, sizeof(labels));
  1472. memset(relocs, 0, sizeof(relocs));
  1473. build_r3000_tlbchange_handler_head(&p, K0, K1);
  1474. build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm);
  1475. i_nop(&p); /* load delay */
  1476. build_make_write(&p, &r, K0, K1);
  1477. build_r3000_pte_reload_tlbwi(&p, K0, K1);
  1478. l_nopage_tlbm(&l, p);
  1479. i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
  1480. i_nop(&p);
  1481. if ((p - handle_tlbm) > FASTPATH_SIZE)
  1482. panic("TLB modify handler fastpath space exceeded");
  1483. resolve_relocs(relocs, labels);
  1484. pr_info("Synthesized TLB modify handler fastpath (%u instructions).\n",
  1485. (unsigned int)(p - handle_tlbm));
  1486. dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
  1487. }
  1488. /*
  1489. * R4000 style TLB load/store/modify handlers.
  1490. */
  1491. static void __init
  1492. build_r4000_tlbchange_handler_head(u32 **p, struct label **l,
  1493. struct reloc **r, unsigned int pte,
  1494. unsigned int ptr)
  1495. {
  1496. #ifdef CONFIG_64BIT
  1497. build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */
  1498. #else
  1499. build_get_pgde32(p, pte, ptr); /* get pgd in ptr */
  1500. #endif
  1501. i_MFC0(p, pte, C0_BADVADDR);
  1502. i_LW(p, ptr, 0, ptr);
  1503. i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
  1504. i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
  1505. i_ADDU(p, ptr, ptr, pte);
  1506. #ifdef CONFIG_SMP
  1507. l_smp_pgtable_change(l, *p);
  1508. # endif
  1509. iPTE_LW(p, l, pte, ptr); /* get even pte */
  1510. if (!m4kc_tlbp_war())
  1511. build_tlb_probe_entry(p);
  1512. }
  1513. static void __init
  1514. build_r4000_tlbchange_handler_tail(u32 **p, struct label **l,
  1515. struct reloc **r, unsigned int tmp,
  1516. unsigned int ptr)
  1517. {
  1518. i_ori(p, ptr, ptr, sizeof(pte_t));
  1519. i_xori(p, ptr, ptr, sizeof(pte_t));
  1520. build_update_entries(p, tmp, ptr);
  1521. build_tlb_write_entry(p, l, r, tlb_indexed);
  1522. l_leave(l, *p);
  1523. i_eret(p); /* return from trap */
  1524. #ifdef CONFIG_64BIT
  1525. build_get_pgd_vmalloc64(p, l, r, tmp, ptr);
  1526. #endif
  1527. }
  1528. static void __init build_r4000_tlb_load_handler(void)
  1529. {
  1530. u32 *p = handle_tlbl;
  1531. struct label *l = labels;
  1532. struct reloc *r = relocs;
  1533. memset(handle_tlbl, 0, sizeof(handle_tlbl));
  1534. memset(labels, 0, sizeof(labels));
  1535. memset(relocs, 0, sizeof(relocs));
  1536. if (bcm1250_m3_war()) {
  1537. i_MFC0(&p, K0, C0_BADVADDR);
  1538. i_MFC0(&p, K1, C0_ENTRYHI);
  1539. i_xor(&p, K0, K0, K1);
  1540. i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
  1541. il_bnez(&p, &r, K0, label_leave);
  1542. /* No need for i_nop */
  1543. }
  1544. build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
  1545. build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl);
  1546. if (m4kc_tlbp_war())
  1547. build_tlb_probe_entry(&p);
  1548. build_make_valid(&p, &r, K0, K1);
  1549. build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
  1550. l_nopage_tlbl(&l, p);
  1551. i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
  1552. i_nop(&p);
  1553. if ((p - handle_tlbl) > FASTPATH_SIZE)
  1554. panic("TLB load handler fastpath space exceeded");
  1555. resolve_relocs(relocs, labels);
  1556. pr_info("Synthesized TLB load handler fastpath (%u instructions).\n",
  1557. (unsigned int)(p - handle_tlbl));
  1558. dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
  1559. }
  1560. static void __init build_r4000_tlb_store_handler(void)
  1561. {
  1562. u32 *p = handle_tlbs;
  1563. struct label *l = labels;
  1564. struct reloc *r = relocs;
  1565. memset(handle_tlbs, 0, sizeof(handle_tlbs));
  1566. memset(labels, 0, sizeof(labels));
  1567. memset(relocs, 0, sizeof(relocs));
  1568. build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
  1569. build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs);
  1570. if (m4kc_tlbp_war())
  1571. build_tlb_probe_entry(&p);
  1572. build_make_write(&p, &r, K0, K1);
  1573. build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
  1574. l_nopage_tlbs(&l, p);
  1575. i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
  1576. i_nop(&p);
  1577. if ((p - handle_tlbs) > FASTPATH_SIZE)
  1578. panic("TLB store handler fastpath space exceeded");
  1579. resolve_relocs(relocs, labels);
  1580. pr_info("Synthesized TLB store handler fastpath (%u instructions).\n",
  1581. (unsigned int)(p - handle_tlbs));
  1582. dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
  1583. }
  1584. static void __init build_r4000_tlb_modify_handler(void)
  1585. {
  1586. u32 *p = handle_tlbm;
  1587. struct label *l = labels;
  1588. struct reloc *r = relocs;
  1589. memset(handle_tlbm, 0, sizeof(handle_tlbm));
  1590. memset(labels, 0, sizeof(labels));
  1591. memset(relocs, 0, sizeof(relocs));
  1592. build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
  1593. build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm);
  1594. if (m4kc_tlbp_war())
  1595. build_tlb_probe_entry(&p);
  1596. /* Present and writable bits set, set accessed and dirty bits. */
  1597. build_make_write(&p, &r, K0, K1);
  1598. build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
  1599. l_nopage_tlbm(&l, p);
  1600. i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
  1601. i_nop(&p);
  1602. if ((p - handle_tlbm) > FASTPATH_SIZE)
  1603. panic("TLB modify handler fastpath space exceeded");
  1604. resolve_relocs(relocs, labels);
  1605. pr_info("Synthesized TLB modify handler fastpath (%u instructions).\n",
  1606. (unsigned int)(p - handle_tlbm));
  1607. dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
  1608. }
  1609. void __init build_tlb_refill_handler(void)
  1610. {
  1611. /*
  1612. * The refill handler is generated per-CPU, multi-node systems
  1613. * may have local storage for it. The other handlers are only
  1614. * needed once.
  1615. */
  1616. static int run_once = 0;
  1617. switch (current_cpu_type()) {
  1618. case CPU_R2000:
  1619. case CPU_R3000:
  1620. case CPU_R3000A:
  1621. case CPU_R3081E:
  1622. case CPU_TX3912:
  1623. case CPU_TX3922:
  1624. case CPU_TX3927:
  1625. build_r3000_tlb_refill_handler();
  1626. if (!run_once) {
  1627. build_r3000_tlb_load_handler();
  1628. build_r3000_tlb_store_handler();
  1629. build_r3000_tlb_modify_handler();
  1630. run_once++;
  1631. }
  1632. break;
  1633. case CPU_R6000:
  1634. case CPU_R6000A:
  1635. panic("No R6000 TLB refill handler yet");
  1636. break;
  1637. case CPU_R8000:
  1638. panic("No R8000 TLB refill handler yet");
  1639. break;
  1640. default:
  1641. build_r4000_tlb_refill_handler();
  1642. if (!run_once) {
  1643. build_r4000_tlb_load_handler();
  1644. build_r4000_tlb_store_handler();
  1645. build_r4000_tlb_modify_handler();
  1646. run_once++;
  1647. }
  1648. }
  1649. }
  1650. void __init flush_tlb_handlers(void)
  1651. {
  1652. flush_icache_range((unsigned long)handle_tlbl,
  1653. (unsigned long)handle_tlbl + sizeof(handle_tlbl));
  1654. flush_icache_range((unsigned long)handle_tlbs,
  1655. (unsigned long)handle_tlbs + sizeof(handle_tlbs));
  1656. flush_icache_range((unsigned long)handle_tlbm,
  1657. (unsigned long)handle_tlbm + sizeof(handle_tlbm));
  1658. }