tlbex.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Synthesize TLB refill handlers at runtime.
  7. *
  8. * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
  9. * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
  10. * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
  11. * Copyright (C) 2008, 2009 Cavium Networks, Inc.
  12. *
  13. * ... and the days got worse and worse and now you see
  14. * I've gone completly out of my mind.
  15. *
  16. * They're coming to take me a away haha
  17. * they're coming to take me a away hoho hihi haha
  18. * to the funny farm where code is beautiful all the time ...
  19. *
  20. * (Condolences to Napoleon XIV)
  21. */
  22. #include <linux/bug.h>
  23. #include <linux/kernel.h>
  24. #include <linux/types.h>
  25. #include <linux/smp.h>
  26. #include <linux/string.h>
  27. #include <linux/init.h>
  28. #include <asm/mmu_context.h>
  29. #include <asm/war.h>
  30. #include "uasm.h"
  31. static inline int r45k_bvahwbug(void)
  32. {
  33. /* XXX: We should probe for the presence of this bug, but we don't. */
  34. return 0;
  35. }
  36. static inline int r4k_250MHZhwbug(void)
  37. {
  38. /* XXX: We should probe for the presence of this bug, but we don't. */
  39. return 0;
  40. }
  41. static inline int __maybe_unused bcm1250_m3_war(void)
  42. {
  43. return BCM1250_M3_WAR;
  44. }
  45. static inline int __maybe_unused r10000_llsc_war(void)
  46. {
  47. return R10000_LLSC_WAR;
  48. }
  49. /*
  50. * Found by experiment: At least some revisions of the 4kc throw under
  51. * some circumstances a machine check exception, triggered by invalid
  52. * values in the index register. Delaying the tlbp instruction until
  53. * after the next branch, plus adding an additional nop in front of
  54. * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
  55. * why; it's not an issue caused by the core RTL.
  56. *
  57. */
  58. static int __cpuinit m4kc_tlbp_war(void)
  59. {
  60. return (current_cpu_data.processor_id & 0xffff00) ==
  61. (PRID_COMP_MIPS | PRID_IMP_4KC);
  62. }
  63. /* Handle labels (which must be positive integers). */
  64. enum label_id {
  65. label_second_part = 1,
  66. label_leave,
  67. #ifdef MODULE_START
  68. label_module_alloc,
  69. #endif
  70. label_vmalloc,
  71. label_vmalloc_done,
  72. label_tlbw_hazard,
  73. label_split,
  74. label_nopage_tlbl,
  75. label_nopage_tlbs,
  76. label_nopage_tlbm,
  77. label_smp_pgtable_change,
  78. label_r3000_write_probe_fail,
  79. #ifdef CONFIG_HUGETLB_PAGE
  80. label_tlb_huge_update,
  81. #endif
  82. };
  83. UASM_L_LA(_second_part)
  84. UASM_L_LA(_leave)
  85. #ifdef MODULE_START
  86. UASM_L_LA(_module_alloc)
  87. #endif
  88. UASM_L_LA(_vmalloc)
  89. UASM_L_LA(_vmalloc_done)
  90. UASM_L_LA(_tlbw_hazard)
  91. UASM_L_LA(_split)
  92. UASM_L_LA(_nopage_tlbl)
  93. UASM_L_LA(_nopage_tlbs)
  94. UASM_L_LA(_nopage_tlbm)
  95. UASM_L_LA(_smp_pgtable_change)
  96. UASM_L_LA(_r3000_write_probe_fail)
  97. #ifdef CONFIG_HUGETLB_PAGE
  98. UASM_L_LA(_tlb_huge_update)
  99. #endif
  100. /*
  101. * For debug purposes.
  102. */
  103. static inline void dump_handler(const u32 *handler, int count)
  104. {
  105. int i;
  106. pr_debug("\t.set push\n");
  107. pr_debug("\t.set noreorder\n");
  108. for (i = 0; i < count; i++)
  109. pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]);
  110. pr_debug("\t.set pop\n");
  111. }
  112. /* The only general purpose registers allowed in TLB handlers. */
  113. #define K0 26
  114. #define K1 27
  115. /* Some CP0 registers */
  116. #define C0_INDEX 0, 0
  117. #define C0_ENTRYLO0 2, 0
  118. #define C0_TCBIND 2, 2
  119. #define C0_ENTRYLO1 3, 0
  120. #define C0_CONTEXT 4, 0
  121. #define C0_PAGEMASK 5, 0
  122. #define C0_BADVADDR 8, 0
  123. #define C0_ENTRYHI 10, 0
  124. #define C0_EPC 14, 0
  125. #define C0_XCONTEXT 20, 0
  126. #ifdef CONFIG_64BIT
  127. # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
  128. #else
  129. # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
  130. #endif
  131. /* The worst case length of the handler is around 18 instructions for
  132. * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
  133. * Maximum space available is 32 instructions for R3000 and 64
  134. * instructions for R4000.
  135. *
  136. * We deliberately chose a buffer size of 128, so we won't scribble
  137. * over anything important on overflow before we panic.
  138. */
  139. static u32 tlb_handler[128] __cpuinitdata;
  140. /* simply assume worst case size for labels and relocs */
  141. static struct uasm_label labels[128] __cpuinitdata;
  142. static struct uasm_reloc relocs[128] __cpuinitdata;
  143. /*
  144. * The R3000 TLB handler is simple.
  145. */
  146. static void __cpuinit build_r3000_tlb_refill_handler(void)
  147. {
  148. long pgdc = (long)pgd_current;
  149. u32 *p;
  150. memset(tlb_handler, 0, sizeof(tlb_handler));
  151. p = tlb_handler;
  152. uasm_i_mfc0(&p, K0, C0_BADVADDR);
  153. uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
  154. uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
  155. uasm_i_srl(&p, K0, K0, 22); /* load delay */
  156. uasm_i_sll(&p, K0, K0, 2);
  157. uasm_i_addu(&p, K1, K1, K0);
  158. uasm_i_mfc0(&p, K0, C0_CONTEXT);
  159. uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
  160. uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
  161. uasm_i_addu(&p, K1, K1, K0);
  162. uasm_i_lw(&p, K0, 0, K1);
  163. uasm_i_nop(&p); /* load delay */
  164. uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
  165. uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
  166. uasm_i_tlbwr(&p); /* cp0 delay */
  167. uasm_i_jr(&p, K1);
  168. uasm_i_rfe(&p); /* branch delay */
  169. if (p > tlb_handler + 32)
  170. panic("TLB refill handler space exceeded");
  171. pr_debug("Wrote TLB refill handler (%u instructions).\n",
  172. (unsigned int)(p - tlb_handler));
  173. memcpy((void *)ebase, tlb_handler, 0x80);
  174. dump_handler((u32 *)ebase, 32);
  175. }
  176. /*
  177. * The R4000 TLB handler is much more complicated. We have two
  178. * consecutive handler areas with 32 instructions space each.
  179. * Since they aren't used at the same time, we can overflow in the
  180. * other one.To keep things simple, we first assume linear space,
  181. * then we relocate it to the final handler layout as needed.
  182. */
  183. static u32 final_handler[64] __cpuinitdata;
  184. /*
  185. * Hazards
  186. *
  187. * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
  188. * 2. A timing hazard exists for the TLBP instruction.
  189. *
  190. * stalling_instruction
  191. * TLBP
  192. *
  193. * The JTLB is being read for the TLBP throughout the stall generated by the
  194. * previous instruction. This is not really correct as the stalling instruction
  195. * can modify the address used to access the JTLB. The failure symptom is that
  196. * the TLBP instruction will use an address created for the stalling instruction
  197. * and not the address held in C0_ENHI and thus report the wrong results.
  198. *
  199. * The software work-around is to not allow the instruction preceding the TLBP
  200. * to stall - make it an NOP or some other instruction guaranteed not to stall.
  201. *
  202. * Errata 2 will not be fixed. This errata is also on the R5000.
  203. *
  204. * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
  205. */
  206. static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
  207. {
  208. switch (current_cpu_type()) {
  209. /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
  210. case CPU_R4600:
  211. case CPU_R4700:
  212. case CPU_R5000:
  213. case CPU_R5000A:
  214. case CPU_NEVADA:
  215. uasm_i_nop(p);
  216. uasm_i_tlbp(p);
  217. break;
  218. default:
  219. uasm_i_tlbp(p);
  220. break;
  221. }
  222. }
  223. /*
  224. * Write random or indexed TLB entry, and care about the hazards from
  225. * the preceeding mtc0 and for the following eret.
  226. */
  227. enum tlb_write_entry { tlb_random, tlb_indexed };
  228. static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
  229. struct uasm_reloc **r,
  230. enum tlb_write_entry wmode)
  231. {
  232. void(*tlbw)(u32 **) = NULL;
  233. switch (wmode) {
  234. case tlb_random: tlbw = uasm_i_tlbwr; break;
  235. case tlb_indexed: tlbw = uasm_i_tlbwi; break;
  236. }
  237. if (cpu_has_mips_r2) {
  238. if (cpu_has_mips_r2_exec_hazard)
  239. uasm_i_ehb(p);
  240. tlbw(p);
  241. return;
  242. }
  243. switch (current_cpu_type()) {
  244. case CPU_R4000PC:
  245. case CPU_R4000SC:
  246. case CPU_R4000MC:
  247. case CPU_R4400PC:
  248. case CPU_R4400SC:
  249. case CPU_R4400MC:
  250. /*
  251. * This branch uses up a mtc0 hazard nop slot and saves
  252. * two nops after the tlbw instruction.
  253. */
  254. uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
  255. tlbw(p);
  256. uasm_l_tlbw_hazard(l, *p);
  257. uasm_i_nop(p);
  258. break;
  259. case CPU_R4600:
  260. case CPU_R4700:
  261. case CPU_R5000:
  262. case CPU_R5000A:
  263. uasm_i_nop(p);
  264. tlbw(p);
  265. uasm_i_nop(p);
  266. break;
  267. case CPU_R4300:
  268. case CPU_5KC:
  269. case CPU_TX49XX:
  270. case CPU_PR4450:
  271. uasm_i_nop(p);
  272. tlbw(p);
  273. break;
  274. case CPU_R10000:
  275. case CPU_R12000:
  276. case CPU_R14000:
  277. case CPU_4KC:
  278. case CPU_4KEC:
  279. case CPU_SB1:
  280. case CPU_SB1A:
  281. case CPU_4KSC:
  282. case CPU_20KC:
  283. case CPU_25KF:
  284. case CPU_BCM3302:
  285. case CPU_BCM4710:
  286. case CPU_LOONGSON2:
  287. case CPU_R5500:
  288. if (m4kc_tlbp_war())
  289. uasm_i_nop(p);
  290. case CPU_ALCHEMY:
  291. tlbw(p);
  292. break;
  293. case CPU_NEVADA:
  294. uasm_i_nop(p); /* QED specifies 2 nops hazard */
  295. /*
  296. * This branch uses up a mtc0 hazard nop slot and saves
  297. * a nop after the tlbw instruction.
  298. */
  299. uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
  300. tlbw(p);
  301. uasm_l_tlbw_hazard(l, *p);
  302. break;
  303. case CPU_RM7000:
  304. uasm_i_nop(p);
  305. uasm_i_nop(p);
  306. uasm_i_nop(p);
  307. uasm_i_nop(p);
  308. tlbw(p);
  309. break;
  310. case CPU_RM9000:
  311. /*
  312. * When the JTLB is updated by tlbwi or tlbwr, a subsequent
  313. * use of the JTLB for instructions should not occur for 4
  314. * cpu cycles and use for data translations should not occur
  315. * for 3 cpu cycles.
  316. */
  317. uasm_i_ssnop(p);
  318. uasm_i_ssnop(p);
  319. uasm_i_ssnop(p);
  320. uasm_i_ssnop(p);
  321. tlbw(p);
  322. uasm_i_ssnop(p);
  323. uasm_i_ssnop(p);
  324. uasm_i_ssnop(p);
  325. uasm_i_ssnop(p);
  326. break;
  327. case CPU_VR4111:
  328. case CPU_VR4121:
  329. case CPU_VR4122:
  330. case CPU_VR4181:
  331. case CPU_VR4181A:
  332. uasm_i_nop(p);
  333. uasm_i_nop(p);
  334. tlbw(p);
  335. uasm_i_nop(p);
  336. uasm_i_nop(p);
  337. break;
  338. case CPU_VR4131:
  339. case CPU_VR4133:
  340. case CPU_R5432:
  341. uasm_i_nop(p);
  342. uasm_i_nop(p);
  343. tlbw(p);
  344. break;
  345. default:
  346. panic("No TLB refill handler yet (CPU type: %d)",
  347. current_cpu_data.cputype);
  348. break;
  349. }
  350. }
  351. #ifdef CONFIG_HUGETLB_PAGE
  352. static __cpuinit void build_huge_tlb_write_entry(u32 **p,
  353. struct uasm_label **l,
  354. struct uasm_reloc **r,
  355. unsigned int tmp,
  356. enum tlb_write_entry wmode)
  357. {
  358. /* Set huge page tlb entry size */
  359. uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
  360. uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
  361. uasm_i_mtc0(p, tmp, C0_PAGEMASK);
  362. build_tlb_write_entry(p, l, r, wmode);
  363. /* Reset default page size */
  364. if (PM_DEFAULT_MASK >> 16) {
  365. uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
  366. uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
  367. uasm_il_b(p, r, label_leave);
  368. uasm_i_mtc0(p, tmp, C0_PAGEMASK);
  369. } else if (PM_DEFAULT_MASK) {
  370. uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
  371. uasm_il_b(p, r, label_leave);
  372. uasm_i_mtc0(p, tmp, C0_PAGEMASK);
  373. } else {
  374. uasm_il_b(p, r, label_leave);
  375. uasm_i_mtc0(p, 0, C0_PAGEMASK);
  376. }
  377. }
  378. /*
  379. * Check if Huge PTE is present, if so then jump to LABEL.
  380. */
  381. static void __cpuinit
  382. build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
  383. unsigned int pmd, int lid)
  384. {
  385. UASM_i_LW(p, tmp, 0, pmd);
  386. uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
  387. uasm_il_bnez(p, r, tmp, lid);
  388. }
  389. static __cpuinit void build_huge_update_entries(u32 **p,
  390. unsigned int pte,
  391. unsigned int tmp)
  392. {
  393. int small_sequence;
  394. /*
  395. * A huge PTE describes an area the size of the
  396. * configured huge page size. This is twice the
  397. * of the large TLB entry size we intend to use.
  398. * A TLB entry half the size of the configured
  399. * huge page size is configured into entrylo0
  400. * and entrylo1 to cover the contiguous huge PTE
  401. * address space.
  402. */
  403. small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
  404. /* We can clobber tmp. It isn't used after this.*/
  405. if (!small_sequence)
  406. uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
  407. UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */
  408. uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* load it */
  409. /* convert to entrylo1 */
  410. if (small_sequence)
  411. UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
  412. else
  413. UASM_i_ADDU(p, pte, pte, tmp);
  414. uasm_i_mtc0(p, pte, C0_ENTRYLO1); /* load it */
  415. }
  416. static __cpuinit void build_huge_handler_tail(u32 **p,
  417. struct uasm_reloc **r,
  418. struct uasm_label **l,
  419. unsigned int pte,
  420. unsigned int ptr)
  421. {
  422. #ifdef CONFIG_SMP
  423. UASM_i_SC(p, pte, 0, ptr);
  424. uasm_il_beqz(p, r, pte, label_tlb_huge_update);
  425. UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
  426. #else
  427. UASM_i_SW(p, pte, 0, ptr);
  428. #endif
  429. build_huge_update_entries(p, pte, ptr);
  430. build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed);
  431. }
  432. #endif /* CONFIG_HUGETLB_PAGE */
  433. #ifdef CONFIG_64BIT
  434. /*
  435. * TMP and PTR are scratch.
  436. * TMP will be clobbered, PTR will hold the pmd entry.
  437. */
  438. static void __cpuinit
  439. build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
  440. unsigned int tmp, unsigned int ptr)
  441. {
  442. long pgdc = (long)pgd_current;
  443. /*
  444. * The vmalloc handling is not in the hotpath.
  445. */
  446. uasm_i_dmfc0(p, tmp, C0_BADVADDR);
  447. #ifdef MODULE_START
  448. uasm_il_bltz(p, r, tmp, label_module_alloc);
  449. #else
  450. uasm_il_bltz(p, r, tmp, label_vmalloc);
  451. #endif
  452. /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
  453. #ifdef CONFIG_SMP
  454. # ifdef CONFIG_MIPS_MT_SMTC
  455. /*
  456. * SMTC uses TCBind value as "CPU" index
  457. */
  458. uasm_i_mfc0(p, ptr, C0_TCBIND);
  459. uasm_i_dsrl(p, ptr, ptr, 19);
  460. # else
  461. /*
  462. * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
  463. * stored in CONTEXT.
  464. */
  465. uasm_i_dmfc0(p, ptr, C0_CONTEXT);
  466. uasm_i_dsrl(p, ptr, ptr, 23);
  467. #endif
  468. UASM_i_LA_mostly(p, tmp, pgdc);
  469. uasm_i_daddu(p, ptr, ptr, tmp);
  470. uasm_i_dmfc0(p, tmp, C0_BADVADDR);
  471. uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
  472. #else
  473. UASM_i_LA_mostly(p, ptr, pgdc);
  474. uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
  475. #endif
  476. uasm_l_vmalloc_done(l, *p);
  477. if (PGDIR_SHIFT - 3 < 32) /* get pgd offset in bytes */
  478. uasm_i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3);
  479. else
  480. uasm_i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32);
  481. uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
  482. uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
  483. uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
  484. uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
  485. uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
  486. uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
  487. uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
  488. }
  489. /*
  490. * BVADDR is the faulting address, PTR is scratch.
  491. * PTR will hold the pgd for vmalloc.
  492. */
  493. static void __cpuinit
  494. build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
  495. unsigned int bvaddr, unsigned int ptr)
  496. {
  497. long swpd = (long)swapper_pg_dir;
  498. #ifdef MODULE_START
  499. long modd = (long)module_pg_dir;
  500. uasm_l_module_alloc(l, *p);
  501. /*
  502. * Assumption:
  503. * VMALLOC_START >= 0xc000000000000000UL
  504. * MODULE_START >= 0xe000000000000000UL
  505. */
  506. UASM_i_SLL(p, ptr, bvaddr, 2);
  507. uasm_il_bgez(p, r, ptr, label_vmalloc);
  508. if (uasm_in_compat_space_p(MODULE_START) &&
  509. !uasm_rel_lo(MODULE_START)) {
  510. uasm_i_lui(p, ptr, uasm_rel_hi(MODULE_START)); /* delay slot */
  511. } else {
  512. /* unlikely configuration */
  513. uasm_i_nop(p); /* delay slot */
  514. UASM_i_LA(p, ptr, MODULE_START);
  515. }
  516. uasm_i_dsubu(p, bvaddr, bvaddr, ptr);
  517. if (uasm_in_compat_space_p(modd) && !uasm_rel_lo(modd)) {
  518. uasm_il_b(p, r, label_vmalloc_done);
  519. uasm_i_lui(p, ptr, uasm_rel_hi(modd));
  520. } else {
  521. UASM_i_LA_mostly(p, ptr, modd);
  522. uasm_il_b(p, r, label_vmalloc_done);
  523. if (uasm_in_compat_space_p(modd))
  524. uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(modd));
  525. else
  526. uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(modd));
  527. }
  528. uasm_l_vmalloc(l, *p);
  529. if (uasm_in_compat_space_p(MODULE_START) &&
  530. !uasm_rel_lo(MODULE_START) &&
  531. MODULE_START << 32 == VMALLOC_START)
  532. uasm_i_dsll32(p, ptr, ptr, 0); /* typical case */
  533. else
  534. UASM_i_LA(p, ptr, VMALLOC_START);
  535. #else
  536. uasm_l_vmalloc(l, *p);
  537. UASM_i_LA(p, ptr, VMALLOC_START);
  538. #endif
  539. uasm_i_dsubu(p, bvaddr, bvaddr, ptr);
  540. if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
  541. uasm_il_b(p, r, label_vmalloc_done);
  542. uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
  543. } else {
  544. UASM_i_LA_mostly(p, ptr, swpd);
  545. uasm_il_b(p, r, label_vmalloc_done);
  546. if (uasm_in_compat_space_p(swpd))
  547. uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
  548. else
  549. uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
  550. }
  551. }
  552. #else /* !CONFIG_64BIT */
  553. /*
  554. * TMP and PTR are scratch.
  555. * TMP will be clobbered, PTR will hold the pgd entry.
  556. */
  557. static void __cpuinit __maybe_unused
  558. build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
  559. {
  560. long pgdc = (long)pgd_current;
  561. /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
  562. #ifdef CONFIG_SMP
  563. #ifdef CONFIG_MIPS_MT_SMTC
  564. /*
  565. * SMTC uses TCBind value as "CPU" index
  566. */
  567. uasm_i_mfc0(p, ptr, C0_TCBIND);
  568. UASM_i_LA_mostly(p, tmp, pgdc);
  569. uasm_i_srl(p, ptr, ptr, 19);
  570. #else
  571. /*
  572. * smp_processor_id() << 3 is stored in CONTEXT.
  573. */
  574. uasm_i_mfc0(p, ptr, C0_CONTEXT);
  575. UASM_i_LA_mostly(p, tmp, pgdc);
  576. uasm_i_srl(p, ptr, ptr, 23);
  577. #endif
  578. uasm_i_addu(p, ptr, tmp, ptr);
  579. #else
  580. UASM_i_LA_mostly(p, ptr, pgdc);
  581. #endif
  582. uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
  583. uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
  584. uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
  585. uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
  586. uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
  587. }
  588. #endif /* !CONFIG_64BIT */
  589. static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
  590. {
  591. unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
  592. unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
  593. switch (current_cpu_type()) {
  594. case CPU_VR41XX:
  595. case CPU_VR4111:
  596. case CPU_VR4121:
  597. case CPU_VR4122:
  598. case CPU_VR4131:
  599. case CPU_VR4181:
  600. case CPU_VR4181A:
  601. case CPU_VR4133:
  602. shift += 2;
  603. break;
  604. default:
  605. break;
  606. }
  607. if (shift)
  608. UASM_i_SRL(p, ctx, ctx, shift);
  609. uasm_i_andi(p, ctx, ctx, mask);
  610. }
  611. static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
  612. {
  613. /*
  614. * Bug workaround for the Nevada. It seems as if under certain
  615. * circumstances the move from cp0_context might produce a
  616. * bogus result when the mfc0 instruction and its consumer are
  617. * in a different cacheline or a load instruction, probably any
  618. * memory reference, is between them.
  619. */
  620. switch (current_cpu_type()) {
  621. case CPU_NEVADA:
  622. UASM_i_LW(p, ptr, 0, ptr);
  623. GET_CONTEXT(p, tmp); /* get context reg */
  624. break;
  625. default:
  626. GET_CONTEXT(p, tmp); /* get context reg */
  627. UASM_i_LW(p, ptr, 0, ptr);
  628. break;
  629. }
  630. build_adjust_context(p, tmp);
  631. UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
  632. }
  633. static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
  634. unsigned int ptep)
  635. {
  636. /*
  637. * 64bit address support (36bit on a 32bit CPU) in a 32bit
  638. * Kernel is a special case. Only a few CPUs use it.
  639. */
  640. #ifdef CONFIG_64BIT_PHYS_ADDR
  641. if (cpu_has_64bits) {
  642. uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
  643. uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
  644. uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */
  645. uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
  646. uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */
  647. uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
  648. } else {
  649. int pte_off_even = sizeof(pte_t) / 2;
  650. int pte_off_odd = pte_off_even + sizeof(pte_t);
  651. /* The pte entries are pre-shifted */
  652. uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
  653. uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
  654. uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
  655. uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
  656. }
  657. #else
  658. UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
  659. UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
  660. if (r45k_bvahwbug())
  661. build_tlb_probe_entry(p);
  662. UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */
  663. if (r4k_250MHZhwbug())
  664. uasm_i_mtc0(p, 0, C0_ENTRYLO0);
  665. uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
  666. UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */
  667. if (r45k_bvahwbug())
  668. uasm_i_mfc0(p, tmp, C0_INDEX);
  669. if (r4k_250MHZhwbug())
  670. uasm_i_mtc0(p, 0, C0_ENTRYLO1);
  671. uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
  672. #endif
  673. }
  674. /*
  675. * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
  676. * because EXL == 0. If we wrap, we can also use the 32 instruction
  677. * slots before the XTLB refill exception handler which belong to the
  678. * unused TLB refill exception.
  679. */
  680. #define MIPS64_REFILL_INSNS 32
  681. static void __cpuinit build_r4000_tlb_refill_handler(void)
  682. {
  683. u32 *p = tlb_handler;
  684. struct uasm_label *l = labels;
  685. struct uasm_reloc *r = relocs;
  686. u32 *f;
  687. unsigned int final_len;
  688. memset(tlb_handler, 0, sizeof(tlb_handler));
  689. memset(labels, 0, sizeof(labels));
  690. memset(relocs, 0, sizeof(relocs));
  691. memset(final_handler, 0, sizeof(final_handler));
  692. /*
  693. * create the plain linear handler
  694. */
  695. if (bcm1250_m3_war()) {
  696. UASM_i_MFC0(&p, K0, C0_BADVADDR);
  697. UASM_i_MFC0(&p, K1, C0_ENTRYHI);
  698. uasm_i_xor(&p, K0, K0, K1);
  699. UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
  700. uasm_il_bnez(&p, &r, K0, label_leave);
  701. /* No need for uasm_i_nop */
  702. }
  703. #ifdef CONFIG_64BIT
  704. build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
  705. #else
  706. build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
  707. #endif
  708. #ifdef CONFIG_HUGETLB_PAGE
  709. build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
  710. #endif
  711. build_get_ptep(&p, K0, K1);
  712. build_update_entries(&p, K0, K1);
  713. build_tlb_write_entry(&p, &l, &r, tlb_random);
  714. uasm_l_leave(&l, p);
  715. uasm_i_eret(&p); /* return from trap */
  716. #ifdef CONFIG_HUGETLB_PAGE
  717. uasm_l_tlb_huge_update(&l, p);
  718. UASM_i_LW(&p, K0, 0, K1);
  719. build_huge_update_entries(&p, K0, K1);
  720. build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random);
  721. #endif
  722. #ifdef CONFIG_64BIT
  723. build_get_pgd_vmalloc64(&p, &l, &r, K0, K1);
  724. #endif
  725. /*
  726. * Overflow check: For the 64bit handler, we need at least one
  727. * free instruction slot for the wrap-around branch. In worst
  728. * case, if the intended insertion point is a delay slot, we
  729. * need three, with the second nop'ed and the third being
  730. * unused.
  731. */
  732. /* Loongson2 ebase is different than r4k, we have more space */
  733. #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
  734. if ((p - tlb_handler) > 64)
  735. panic("TLB refill handler space exceeded");
  736. #else
  737. if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
  738. || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
  739. && uasm_insn_has_bdelay(relocs,
  740. tlb_handler + MIPS64_REFILL_INSNS - 3)))
  741. panic("TLB refill handler space exceeded");
  742. #endif
  743. /*
  744. * Now fold the handler in the TLB refill handler space.
  745. */
  746. #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
  747. f = final_handler;
  748. /* Simplest case, just copy the handler. */
  749. uasm_copy_handler(relocs, labels, tlb_handler, p, f);
  750. final_len = p - tlb_handler;
  751. #else /* CONFIG_64BIT */
  752. f = final_handler + MIPS64_REFILL_INSNS;
  753. if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
  754. /* Just copy the handler. */
  755. uasm_copy_handler(relocs, labels, tlb_handler, p, f);
  756. final_len = p - tlb_handler;
  757. } else {
  758. #if defined(CONFIG_HUGETLB_PAGE)
  759. const enum label_id ls = label_tlb_huge_update;
  760. #elif defined(MODULE_START)
  761. const enum label_id ls = label_module_alloc;
  762. #else
  763. const enum label_id ls = label_vmalloc;
  764. #endif
  765. u32 *split;
  766. int ov = 0;
  767. int i;
  768. for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
  769. ;
  770. BUG_ON(i == ARRAY_SIZE(labels));
  771. split = labels[i].addr;
  772. /*
  773. * See if we have overflown one way or the other.
  774. */
  775. if (split > tlb_handler + MIPS64_REFILL_INSNS ||
  776. split < p - MIPS64_REFILL_INSNS)
  777. ov = 1;
  778. if (ov) {
  779. /*
  780. * Split two instructions before the end. One
  781. * for the branch and one for the instruction
  782. * in the delay slot.
  783. */
  784. split = tlb_handler + MIPS64_REFILL_INSNS - 2;
  785. /*
  786. * If the branch would fall in a delay slot,
  787. * we must back up an additional instruction
  788. * so that it is no longer in a delay slot.
  789. */
  790. if (uasm_insn_has_bdelay(relocs, split - 1))
  791. split--;
  792. }
  793. /* Copy first part of the handler. */
  794. uasm_copy_handler(relocs, labels, tlb_handler, split, f);
  795. f += split - tlb_handler;
  796. if (ov) {
  797. /* Insert branch. */
  798. uasm_l_split(&l, final_handler);
  799. uasm_il_b(&f, &r, label_split);
  800. if (uasm_insn_has_bdelay(relocs, split))
  801. uasm_i_nop(&f);
  802. else {
  803. uasm_copy_handler(relocs, labels,
  804. split, split + 1, f);
  805. uasm_move_labels(labels, f, f + 1, -1);
  806. f++;
  807. split++;
  808. }
  809. }
  810. /* Copy the rest of the handler. */
  811. uasm_copy_handler(relocs, labels, split, p, final_handler);
  812. final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
  813. (p - split);
  814. }
  815. #endif /* CONFIG_64BIT */
  816. uasm_resolve_relocs(relocs, labels);
  817. pr_debug("Wrote TLB refill handler (%u instructions).\n",
  818. final_len);
  819. memcpy((void *)ebase, final_handler, 0x100);
  820. dump_handler((u32 *)ebase, 64);
  821. }
  822. /*
  823. * TLB load/store/modify handlers.
  824. *
  825. * Only the fastpath gets synthesized at runtime, the slowpath for
  826. * do_page_fault remains normal asm.
  827. */
  828. extern void tlb_do_page_fault_0(void);
  829. extern void tlb_do_page_fault_1(void);
  830. /*
  831. * 128 instructions for the fastpath handler is generous and should
  832. * never be exceeded.
  833. */
  834. #define FASTPATH_SIZE 128
  835. u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
  836. u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
  837. u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
  838. static void __cpuinit
  839. iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
  840. {
  841. #ifdef CONFIG_SMP
  842. # ifdef CONFIG_64BIT_PHYS_ADDR
  843. if (cpu_has_64bits)
  844. uasm_i_lld(p, pte, 0, ptr);
  845. else
  846. # endif
  847. UASM_i_LL(p, pte, 0, ptr);
  848. #else
  849. # ifdef CONFIG_64BIT_PHYS_ADDR
  850. if (cpu_has_64bits)
  851. uasm_i_ld(p, pte, 0, ptr);
  852. else
  853. # endif
  854. UASM_i_LW(p, pte, 0, ptr);
  855. #endif
  856. }
  857. static void __cpuinit
  858. iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
  859. unsigned int mode)
  860. {
  861. #ifdef CONFIG_64BIT_PHYS_ADDR
  862. unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
  863. #endif
  864. uasm_i_ori(p, pte, pte, mode);
  865. #ifdef CONFIG_SMP
  866. # ifdef CONFIG_64BIT_PHYS_ADDR
  867. if (cpu_has_64bits)
  868. uasm_i_scd(p, pte, 0, ptr);
  869. else
  870. # endif
  871. UASM_i_SC(p, pte, 0, ptr);
  872. if (r10000_llsc_war())
  873. uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
  874. else
  875. uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
  876. # ifdef CONFIG_64BIT_PHYS_ADDR
  877. if (!cpu_has_64bits) {
  878. /* no uasm_i_nop needed */
  879. uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
  880. uasm_i_ori(p, pte, pte, hwmode);
  881. uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
  882. uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
  883. /* no uasm_i_nop needed */
  884. uasm_i_lw(p, pte, 0, ptr);
  885. } else
  886. uasm_i_nop(p);
  887. # else
  888. uasm_i_nop(p);
  889. # endif
  890. #else
  891. # ifdef CONFIG_64BIT_PHYS_ADDR
  892. if (cpu_has_64bits)
  893. uasm_i_sd(p, pte, 0, ptr);
  894. else
  895. # endif
  896. UASM_i_SW(p, pte, 0, ptr);
  897. # ifdef CONFIG_64BIT_PHYS_ADDR
  898. if (!cpu_has_64bits) {
  899. uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
  900. uasm_i_ori(p, pte, pte, hwmode);
  901. uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
  902. uasm_i_lw(p, pte, 0, ptr);
  903. }
  904. # endif
  905. #endif
  906. }
  907. /*
  908. * Check if PTE is present, if not then jump to LABEL. PTR points to
  909. * the page table where this PTE is located, PTE will be re-loaded
  910. * with it's original value.
  911. */
  912. static void __cpuinit
  913. build_pte_present(u32 **p, struct uasm_reloc **r,
  914. unsigned int pte, unsigned int ptr, enum label_id lid)
  915. {
  916. uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
  917. uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
  918. uasm_il_bnez(p, r, pte, lid);
  919. iPTE_LW(p, pte, ptr);
  920. }
  921. /* Make PTE valid, store result in PTR. */
  922. static void __cpuinit
  923. build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
  924. unsigned int ptr)
  925. {
  926. unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
  927. iPTE_SW(p, r, pte, ptr, mode);
  928. }
  929. /*
  930. * Check if PTE can be written to, if not branch to LABEL. Regardless
  931. * restore PTE with value from PTR when done.
  932. */
  933. static void __cpuinit
  934. build_pte_writable(u32 **p, struct uasm_reloc **r,
  935. unsigned int pte, unsigned int ptr, enum label_id lid)
  936. {
  937. uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
  938. uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
  939. uasm_il_bnez(p, r, pte, lid);
  940. iPTE_LW(p, pte, ptr);
  941. }
  942. /* Make PTE writable, update software status bits as well, then store
  943. * at PTR.
  944. */
  945. static void __cpuinit
  946. build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
  947. unsigned int ptr)
  948. {
  949. unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
  950. | _PAGE_DIRTY);
  951. iPTE_SW(p, r, pte, ptr, mode);
  952. }
  953. /*
  954. * Check if PTE can be modified, if not branch to LABEL. Regardless
  955. * restore PTE with value from PTR when done.
  956. */
  957. static void __cpuinit
  958. build_pte_modifiable(u32 **p, struct uasm_reloc **r,
  959. unsigned int pte, unsigned int ptr, enum label_id lid)
  960. {
  961. uasm_i_andi(p, pte, pte, _PAGE_WRITE);
  962. uasm_il_beqz(p, r, pte, lid);
  963. iPTE_LW(p, pte, ptr);
  964. }
  965. /*
  966. * R3000 style TLB load/store/modify handlers.
  967. */
  968. /*
  969. * This places the pte into ENTRYLO0 and writes it with tlbwi.
  970. * Then it returns.
  971. */
  972. static void __cpuinit
  973. build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
  974. {
  975. uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
  976. uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
  977. uasm_i_tlbwi(p);
  978. uasm_i_jr(p, tmp);
  979. uasm_i_rfe(p); /* branch delay */
  980. }
  981. /*
  982. * This places the pte into ENTRYLO0 and writes it with tlbwi
  983. * or tlbwr as appropriate. This is because the index register
  984. * may have the probe fail bit set as a result of a trap on a
  985. * kseg2 access, i.e. without refill. Then it returns.
  986. */
  987. static void __cpuinit
  988. build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
  989. struct uasm_reloc **r, unsigned int pte,
  990. unsigned int tmp)
  991. {
  992. uasm_i_mfc0(p, tmp, C0_INDEX);
  993. uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
  994. uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
  995. uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
  996. uasm_i_tlbwi(p); /* cp0 delay */
  997. uasm_i_jr(p, tmp);
  998. uasm_i_rfe(p); /* branch delay */
  999. uasm_l_r3000_write_probe_fail(l, *p);
  1000. uasm_i_tlbwr(p); /* cp0 delay */
  1001. uasm_i_jr(p, tmp);
  1002. uasm_i_rfe(p); /* branch delay */
  1003. }
  1004. static void __cpuinit
  1005. build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
  1006. unsigned int ptr)
  1007. {
  1008. long pgdc = (long)pgd_current;
  1009. uasm_i_mfc0(p, pte, C0_BADVADDR);
  1010. uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
  1011. uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
  1012. uasm_i_srl(p, pte, pte, 22); /* load delay */
  1013. uasm_i_sll(p, pte, pte, 2);
  1014. uasm_i_addu(p, ptr, ptr, pte);
  1015. uasm_i_mfc0(p, pte, C0_CONTEXT);
  1016. uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
  1017. uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
  1018. uasm_i_addu(p, ptr, ptr, pte);
  1019. uasm_i_lw(p, pte, 0, ptr);
  1020. uasm_i_tlbp(p); /* load delay */
  1021. }
  1022. static void __cpuinit build_r3000_tlb_load_handler(void)
  1023. {
  1024. u32 *p = handle_tlbl;
  1025. struct uasm_label *l = labels;
  1026. struct uasm_reloc *r = relocs;
  1027. memset(handle_tlbl, 0, sizeof(handle_tlbl));
  1028. memset(labels, 0, sizeof(labels));
  1029. memset(relocs, 0, sizeof(relocs));
  1030. build_r3000_tlbchange_handler_head(&p, K0, K1);
  1031. build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
  1032. uasm_i_nop(&p); /* load delay */
  1033. build_make_valid(&p, &r, K0, K1);
  1034. build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
  1035. uasm_l_nopage_tlbl(&l, p);
  1036. uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
  1037. uasm_i_nop(&p);
  1038. if ((p - handle_tlbl) > FASTPATH_SIZE)
  1039. panic("TLB load handler fastpath space exceeded");
  1040. uasm_resolve_relocs(relocs, labels);
  1041. pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
  1042. (unsigned int)(p - handle_tlbl));
  1043. dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
  1044. }
  1045. static void __cpuinit build_r3000_tlb_store_handler(void)
  1046. {
  1047. u32 *p = handle_tlbs;
  1048. struct uasm_label *l = labels;
  1049. struct uasm_reloc *r = relocs;
  1050. memset(handle_tlbs, 0, sizeof(handle_tlbs));
  1051. memset(labels, 0, sizeof(labels));
  1052. memset(relocs, 0, sizeof(relocs));
  1053. build_r3000_tlbchange_handler_head(&p, K0, K1);
  1054. build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
  1055. uasm_i_nop(&p); /* load delay */
  1056. build_make_write(&p, &r, K0, K1);
  1057. build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
  1058. uasm_l_nopage_tlbs(&l, p);
  1059. uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
  1060. uasm_i_nop(&p);
  1061. if ((p - handle_tlbs) > FASTPATH_SIZE)
  1062. panic("TLB store handler fastpath space exceeded");
  1063. uasm_resolve_relocs(relocs, labels);
  1064. pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
  1065. (unsigned int)(p - handle_tlbs));
  1066. dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
  1067. }
  1068. static void __cpuinit build_r3000_tlb_modify_handler(void)
  1069. {
  1070. u32 *p = handle_tlbm;
  1071. struct uasm_label *l = labels;
  1072. struct uasm_reloc *r = relocs;
  1073. memset(handle_tlbm, 0, sizeof(handle_tlbm));
  1074. memset(labels, 0, sizeof(labels));
  1075. memset(relocs, 0, sizeof(relocs));
  1076. build_r3000_tlbchange_handler_head(&p, K0, K1);
  1077. build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
  1078. uasm_i_nop(&p); /* load delay */
  1079. build_make_write(&p, &r, K0, K1);
  1080. build_r3000_pte_reload_tlbwi(&p, K0, K1);
  1081. uasm_l_nopage_tlbm(&l, p);
  1082. uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
  1083. uasm_i_nop(&p);
  1084. if ((p - handle_tlbm) > FASTPATH_SIZE)
  1085. panic("TLB modify handler fastpath space exceeded");
  1086. uasm_resolve_relocs(relocs, labels);
  1087. pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
  1088. (unsigned int)(p - handle_tlbm));
  1089. dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
  1090. }
  1091. /*
  1092. * R4000 style TLB load/store/modify handlers.
  1093. */
  1094. static void __cpuinit
  1095. build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
  1096. struct uasm_reloc **r, unsigned int pte,
  1097. unsigned int ptr)
  1098. {
  1099. #ifdef CONFIG_64BIT
  1100. build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */
  1101. #else
  1102. build_get_pgde32(p, pte, ptr); /* get pgd in ptr */
  1103. #endif
  1104. #ifdef CONFIG_HUGETLB_PAGE
  1105. /*
  1106. * For huge tlb entries, pmd doesn't contain an address but
  1107. * instead contains the tlb pte. Check the PAGE_HUGE bit and
  1108. * see if we need to jump to huge tlb processing.
  1109. */
  1110. build_is_huge_pte(p, r, pte, ptr, label_tlb_huge_update);
  1111. #endif
  1112. UASM_i_MFC0(p, pte, C0_BADVADDR);
  1113. UASM_i_LW(p, ptr, 0, ptr);
  1114. UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
  1115. uasm_i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
  1116. UASM_i_ADDU(p, ptr, ptr, pte);
  1117. #ifdef CONFIG_SMP
  1118. uasm_l_smp_pgtable_change(l, *p);
  1119. #endif
  1120. iPTE_LW(p, pte, ptr); /* get even pte */
  1121. if (!m4kc_tlbp_war())
  1122. build_tlb_probe_entry(p);
  1123. }
  1124. static void __cpuinit
  1125. build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
  1126. struct uasm_reloc **r, unsigned int tmp,
  1127. unsigned int ptr)
  1128. {
  1129. uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
  1130. uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
  1131. build_update_entries(p, tmp, ptr);
  1132. build_tlb_write_entry(p, l, r, tlb_indexed);
  1133. uasm_l_leave(l, *p);
  1134. uasm_i_eret(p); /* return from trap */
  1135. #ifdef CONFIG_64BIT
  1136. build_get_pgd_vmalloc64(p, l, r, tmp, ptr);
  1137. #endif
  1138. }
  1139. static void __cpuinit build_r4000_tlb_load_handler(void)
  1140. {
  1141. u32 *p = handle_tlbl;
  1142. struct uasm_label *l = labels;
  1143. struct uasm_reloc *r = relocs;
  1144. memset(handle_tlbl, 0, sizeof(handle_tlbl));
  1145. memset(labels, 0, sizeof(labels));
  1146. memset(relocs, 0, sizeof(relocs));
  1147. if (bcm1250_m3_war()) {
  1148. UASM_i_MFC0(&p, K0, C0_BADVADDR);
  1149. UASM_i_MFC0(&p, K1, C0_ENTRYHI);
  1150. uasm_i_xor(&p, K0, K0, K1);
  1151. UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
  1152. uasm_il_bnez(&p, &r, K0, label_leave);
  1153. /* No need for uasm_i_nop */
  1154. }
  1155. build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
  1156. build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
  1157. if (m4kc_tlbp_war())
  1158. build_tlb_probe_entry(&p);
  1159. build_make_valid(&p, &r, K0, K1);
  1160. build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
  1161. #ifdef CONFIG_HUGETLB_PAGE
  1162. /*
  1163. * This is the entry point when build_r4000_tlbchange_handler_head
  1164. * spots a huge page.
  1165. */
  1166. uasm_l_tlb_huge_update(&l, p);
  1167. iPTE_LW(&p, K0, K1);
  1168. build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
  1169. build_tlb_probe_entry(&p);
  1170. uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID));
  1171. build_huge_handler_tail(&p, &r, &l, K0, K1);
  1172. #endif
  1173. uasm_l_nopage_tlbl(&l, p);
  1174. uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
  1175. uasm_i_nop(&p);
  1176. if ((p - handle_tlbl) > FASTPATH_SIZE)
  1177. panic("TLB load handler fastpath space exceeded");
  1178. uasm_resolve_relocs(relocs, labels);
  1179. pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
  1180. (unsigned int)(p - handle_tlbl));
  1181. dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
  1182. }
  1183. static void __cpuinit build_r4000_tlb_store_handler(void)
  1184. {
  1185. u32 *p = handle_tlbs;
  1186. struct uasm_label *l = labels;
  1187. struct uasm_reloc *r = relocs;
  1188. memset(handle_tlbs, 0, sizeof(handle_tlbs));
  1189. memset(labels, 0, sizeof(labels));
  1190. memset(relocs, 0, sizeof(relocs));
  1191. build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
  1192. build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
  1193. if (m4kc_tlbp_war())
  1194. build_tlb_probe_entry(&p);
  1195. build_make_write(&p, &r, K0, K1);
  1196. build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
  1197. #ifdef CONFIG_HUGETLB_PAGE
  1198. /*
  1199. * This is the entry point when
  1200. * build_r4000_tlbchange_handler_head spots a huge page.
  1201. */
  1202. uasm_l_tlb_huge_update(&l, p);
  1203. iPTE_LW(&p, K0, K1);
  1204. build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
  1205. build_tlb_probe_entry(&p);
  1206. uasm_i_ori(&p, K0, K0,
  1207. _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
  1208. build_huge_handler_tail(&p, &r, &l, K0, K1);
  1209. #endif
  1210. uasm_l_nopage_tlbs(&l, p);
  1211. uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
  1212. uasm_i_nop(&p);
  1213. if ((p - handle_tlbs) > FASTPATH_SIZE)
  1214. panic("TLB store handler fastpath space exceeded");
  1215. uasm_resolve_relocs(relocs, labels);
  1216. pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
  1217. (unsigned int)(p - handle_tlbs));
  1218. dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
  1219. }
  1220. static void __cpuinit build_r4000_tlb_modify_handler(void)
  1221. {
  1222. u32 *p = handle_tlbm;
  1223. struct uasm_label *l = labels;
  1224. struct uasm_reloc *r = relocs;
  1225. memset(handle_tlbm, 0, sizeof(handle_tlbm));
  1226. memset(labels, 0, sizeof(labels));
  1227. memset(relocs, 0, sizeof(relocs));
  1228. build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
  1229. build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
  1230. if (m4kc_tlbp_war())
  1231. build_tlb_probe_entry(&p);
  1232. /* Present and writable bits set, set accessed and dirty bits. */
  1233. build_make_write(&p, &r, K0, K1);
  1234. build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
  1235. #ifdef CONFIG_HUGETLB_PAGE
  1236. /*
  1237. * This is the entry point when
  1238. * build_r4000_tlbchange_handler_head spots a huge page.
  1239. */
  1240. uasm_l_tlb_huge_update(&l, p);
  1241. iPTE_LW(&p, K0, K1);
  1242. build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
  1243. build_tlb_probe_entry(&p);
  1244. uasm_i_ori(&p, K0, K0,
  1245. _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
  1246. build_huge_handler_tail(&p, &r, &l, K0, K1);
  1247. #endif
  1248. uasm_l_nopage_tlbm(&l, p);
  1249. uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
  1250. uasm_i_nop(&p);
  1251. if ((p - handle_tlbm) > FASTPATH_SIZE)
  1252. panic("TLB modify handler fastpath space exceeded");
  1253. uasm_resolve_relocs(relocs, labels);
  1254. pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
  1255. (unsigned int)(p - handle_tlbm));
  1256. dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
  1257. }
  1258. void __cpuinit build_tlb_refill_handler(void)
  1259. {
  1260. /*
  1261. * The refill handler is generated per-CPU, multi-node systems
  1262. * may have local storage for it. The other handlers are only
  1263. * needed once.
  1264. */
  1265. static int run_once = 0;
  1266. switch (current_cpu_type()) {
  1267. case CPU_R2000:
  1268. case CPU_R3000:
  1269. case CPU_R3000A:
  1270. case CPU_R3081E:
  1271. case CPU_TX3912:
  1272. case CPU_TX3922:
  1273. case CPU_TX3927:
  1274. build_r3000_tlb_refill_handler();
  1275. if (!run_once) {
  1276. build_r3000_tlb_load_handler();
  1277. build_r3000_tlb_store_handler();
  1278. build_r3000_tlb_modify_handler();
  1279. run_once++;
  1280. }
  1281. break;
  1282. case CPU_R6000:
  1283. case CPU_R6000A:
  1284. panic("No R6000 TLB refill handler yet");
  1285. break;
  1286. case CPU_R8000:
  1287. panic("No R8000 TLB refill handler yet");
  1288. break;
  1289. default:
  1290. build_r4000_tlb_refill_handler();
  1291. if (!run_once) {
  1292. build_r4000_tlb_load_handler();
  1293. build_r4000_tlb_store_handler();
  1294. build_r4000_tlb_modify_handler();
  1295. run_once++;
  1296. }
  1297. }
  1298. }
  1299. void __cpuinit flush_tlb_handlers(void)
  1300. {
  1301. local_flush_icache_range((unsigned long)handle_tlbl,
  1302. (unsigned long)handle_tlbl + sizeof(handle_tlbl));
  1303. local_flush_icache_range((unsigned long)handle_tlbs,
  1304. (unsigned long)handle_tlbs + sizeof(handle_tlbs));
  1305. local_flush_icache_range((unsigned long)handle_tlbm,
  1306. (unsigned long)handle_tlbm + sizeof(handle_tlbm));
  1307. }