tlbex.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Synthesize TLB refill handlers at runtime.
  7. *
  8. * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
  9. * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
  10. * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
  11. * Copyright (C) 2008, 2009 Cavium Networks, Inc.
  12. * Copyright (C) 2011 MIPS Technologies, Inc.
  13. *
  14. * ... and the days got worse and worse and now you see
  15. * I've gone completly out of my mind.
  16. *
  17. * They're coming to take me a away haha
  18. * they're coming to take me a away hoho hihi haha
  19. * to the funny farm where code is beautiful all the time ...
  20. *
  21. * (Condolences to Napoleon XIV)
  22. */
  23. #include <linux/bug.h>
  24. #include <linux/kernel.h>
  25. #include <linux/types.h>
  26. #include <linux/smp.h>
  27. #include <linux/string.h>
  28. #include <linux/init.h>
  29. #include <linux/cache.h>
  30. #include <asm/cacheflush.h>
  31. #include <asm/pgtable.h>
  32. #include <asm/war.h>
  33. #include <asm/uasm.h>
  34. #include <asm/setup.h>
  35. /*
  36. * TLB load/store/modify handlers.
  37. *
  38. * Only the fastpath gets synthesized at runtime, the slowpath for
  39. * do_page_fault remains normal asm.
  40. */
  41. extern void tlb_do_page_fault_0(void);
  42. extern void tlb_do_page_fault_1(void);
  43. struct work_registers {
  44. int r1;
  45. int r2;
  46. int r3;
  47. };
  48. struct tlb_reg_save {
  49. unsigned long a;
  50. unsigned long b;
  51. } ____cacheline_aligned_in_smp;
  52. static struct tlb_reg_save handler_reg_save[NR_CPUS];
  53. static inline int r45k_bvahwbug(void)
  54. {
  55. /* XXX: We should probe for the presence of this bug, but we don't. */
  56. return 0;
  57. }
  58. static inline int r4k_250MHZhwbug(void)
  59. {
  60. /* XXX: We should probe for the presence of this bug, but we don't. */
  61. return 0;
  62. }
  63. static inline int __maybe_unused bcm1250_m3_war(void)
  64. {
  65. return BCM1250_M3_WAR;
  66. }
  67. static inline int __maybe_unused r10000_llsc_war(void)
  68. {
  69. return R10000_LLSC_WAR;
  70. }
  71. static int use_bbit_insns(void)
  72. {
  73. switch (current_cpu_type()) {
  74. case CPU_CAVIUM_OCTEON:
  75. case CPU_CAVIUM_OCTEON_PLUS:
  76. case CPU_CAVIUM_OCTEON2:
  77. return 1;
  78. default:
  79. return 0;
  80. }
  81. }
  82. static int use_lwx_insns(void)
  83. {
  84. switch (current_cpu_type()) {
  85. case CPU_CAVIUM_OCTEON2:
  86. return 1;
  87. default:
  88. return 0;
  89. }
  90. }
  91. #if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
  92. CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
  93. static bool scratchpad_available(void)
  94. {
  95. return true;
  96. }
  97. static int scratchpad_offset(int i)
  98. {
  99. /*
  100. * CVMSEG starts at address -32768 and extends for
  101. * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines.
  102. */
  103. i += 1; /* Kernel use starts at the top and works down. */
  104. return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768;
  105. }
  106. #else
  107. static bool scratchpad_available(void)
  108. {
  109. return false;
  110. }
  111. static int scratchpad_offset(int i)
  112. {
  113. BUG();
  114. /* Really unreachable, but evidently some GCC want this. */
  115. return 0;
  116. }
  117. #endif
  118. /*
  119. * Found by experiment: At least some revisions of the 4kc throw under
  120. * some circumstances a machine check exception, triggered by invalid
  121. * values in the index register. Delaying the tlbp instruction until
  122. * after the next branch, plus adding an additional nop in front of
  123. * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
  124. * why; it's not an issue caused by the core RTL.
  125. *
  126. */
  127. static int __cpuinit m4kc_tlbp_war(void)
  128. {
  129. return (current_cpu_data.processor_id & 0xffff00) ==
  130. (PRID_COMP_MIPS | PRID_IMP_4KC);
  131. }
  132. /* Handle labels (which must be positive integers). */
  133. enum label_id {
  134. label_second_part = 1,
  135. label_leave,
  136. label_vmalloc,
  137. label_vmalloc_done,
  138. label_tlbw_hazard_0,
  139. label_split = label_tlbw_hazard_0 + 8,
  140. label_tlbl_goaround1,
  141. label_tlbl_goaround2,
  142. label_nopage_tlbl,
  143. label_nopage_tlbs,
  144. label_nopage_tlbm,
  145. label_smp_pgtable_change,
  146. label_r3000_write_probe_fail,
  147. label_large_segbits_fault,
  148. #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
  149. label_tlb_huge_update,
  150. #endif
  151. };
  152. UASM_L_LA(_second_part)
  153. UASM_L_LA(_leave)
  154. UASM_L_LA(_vmalloc)
  155. UASM_L_LA(_vmalloc_done)
  156. /* _tlbw_hazard_x is handled differently. */
  157. UASM_L_LA(_split)
  158. UASM_L_LA(_tlbl_goaround1)
  159. UASM_L_LA(_tlbl_goaround2)
  160. UASM_L_LA(_nopage_tlbl)
  161. UASM_L_LA(_nopage_tlbs)
  162. UASM_L_LA(_nopage_tlbm)
  163. UASM_L_LA(_smp_pgtable_change)
  164. UASM_L_LA(_r3000_write_probe_fail)
  165. UASM_L_LA(_large_segbits_fault)
  166. #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
  167. UASM_L_LA(_tlb_huge_update)
  168. #endif
  169. static int __cpuinitdata hazard_instance;
  170. static void __cpuinit uasm_bgezl_hazard(u32 **p,
  171. struct uasm_reloc **r,
  172. int instance)
  173. {
  174. switch (instance) {
  175. case 0 ... 7:
  176. uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance);
  177. return;
  178. default:
  179. BUG();
  180. }
  181. }
  182. static void __cpuinit uasm_bgezl_label(struct uasm_label **l,
  183. u32 **p,
  184. int instance)
  185. {
  186. switch (instance) {
  187. case 0 ... 7:
  188. uasm_build_label(l, *p, label_tlbw_hazard_0 + instance);
  189. break;
  190. default:
  191. BUG();
  192. }
  193. }
  194. /*
  195. * pgtable bits are assigned dynamically depending on processor feature
  196. * and statically based on kernel configuration. This spits out the actual
  197. * values the kernel is using. Required to make sense from disassembled
  198. * TLB exception handlers.
  199. */
  200. static void output_pgtable_bits_defines(void)
  201. {
  202. #define pr_define(fmt, ...) \
  203. pr_debug("#define " fmt, ##__VA_ARGS__)
  204. pr_debug("#include <asm/asm.h>\n");
  205. pr_debug("#include <asm/regdef.h>\n");
  206. pr_debug("\n");
  207. pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
  208. pr_define("_PAGE_READ_SHIFT %d\n", _PAGE_READ_SHIFT);
  209. pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
  210. pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT);
  211. pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT);
  212. #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
  213. pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
  214. pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
  215. #endif
  216. if (cpu_has_rixi) {
  217. #ifdef _PAGE_NO_EXEC_SHIFT
  218. pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
  219. #endif
  220. #ifdef _PAGE_NO_READ_SHIFT
  221. pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
  222. #endif
  223. }
  224. pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
  225. pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
  226. pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
  227. pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT);
  228. pr_debug("\n");
  229. }
  230. static inline void dump_handler(const char *symbol, const u32 *handler, int count)
  231. {
  232. int i;
  233. pr_debug("LEAF(%s)\n", symbol);
  234. pr_debug("\t.set push\n");
  235. pr_debug("\t.set noreorder\n");
  236. for (i = 0; i < count; i++)
  237. pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]);
  238. pr_debug("\t.set\tpop\n");
  239. pr_debug("\tEND(%s)\n", symbol);
  240. }
  241. /* The only general purpose registers allowed in TLB handlers. */
  242. #define K0 26
  243. #define K1 27
  244. /* Some CP0 registers */
  245. #define C0_INDEX 0, 0
  246. #define C0_ENTRYLO0 2, 0
  247. #define C0_TCBIND 2, 2
  248. #define C0_ENTRYLO1 3, 0
  249. #define C0_CONTEXT 4, 0
  250. #define C0_PAGEMASK 5, 0
  251. #define C0_BADVADDR 8, 0
  252. #define C0_ENTRYHI 10, 0
  253. #define C0_EPC 14, 0
  254. #define C0_XCONTEXT 20, 0
  255. #ifdef CONFIG_64BIT
  256. # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
  257. #else
  258. # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
  259. #endif
  260. /* The worst case length of the handler is around 18 instructions for
  261. * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
  262. * Maximum space available is 32 instructions for R3000 and 64
  263. * instructions for R4000.
  264. *
  265. * We deliberately chose a buffer size of 128, so we won't scribble
  266. * over anything important on overflow before we panic.
  267. */
  268. static u32 tlb_handler[128] __cpuinitdata;
  269. /* simply assume worst case size for labels and relocs */
  270. static struct uasm_label labels[128] __cpuinitdata;
  271. static struct uasm_reloc relocs[128] __cpuinitdata;
  272. #ifdef CONFIG_64BIT
  273. static int check_for_high_segbits __cpuinitdata;
  274. #endif
  275. static int check_for_high_segbits __cpuinitdata;
  276. static unsigned int kscratch_used_mask __cpuinitdata;
  277. static int __cpuinit allocate_kscratch(void)
  278. {
  279. int r;
  280. unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
  281. r = ffs(a);
  282. if (r == 0)
  283. return -1;
  284. r--; /* make it zero based */
  285. kscratch_used_mask |= (1 << r);
  286. return r;
  287. }
  288. static int scratch_reg __cpuinitdata;
  289. static int pgd_reg __cpuinitdata;
  290. enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
  291. static struct work_registers __cpuinit build_get_work_registers(u32 **p)
  292. {
  293. struct work_registers r;
  294. int smp_processor_id_reg;
  295. int smp_processor_id_sel;
  296. int smp_processor_id_shift;
  297. if (scratch_reg > 0) {
  298. /* Save in CPU local C0_KScratch? */
  299. UASM_i_MTC0(p, 1, 31, scratch_reg);
  300. r.r1 = K0;
  301. r.r2 = K1;
  302. r.r3 = 1;
  303. return r;
  304. }
  305. if (num_possible_cpus() > 1) {
  306. #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
  307. smp_processor_id_shift = 51;
  308. smp_processor_id_reg = 20; /* XContext */
  309. smp_processor_id_sel = 0;
  310. #else
  311. # ifdef CONFIG_32BIT
  312. smp_processor_id_shift = 25;
  313. smp_processor_id_reg = 4; /* Context */
  314. smp_processor_id_sel = 0;
  315. # endif
  316. # ifdef CONFIG_64BIT
  317. smp_processor_id_shift = 26;
  318. smp_processor_id_reg = 4; /* Context */
  319. smp_processor_id_sel = 0;
  320. # endif
  321. #endif
  322. /* Get smp_processor_id */
  323. UASM_i_MFC0(p, K0, smp_processor_id_reg, smp_processor_id_sel);
  324. UASM_i_SRL_SAFE(p, K0, K0, smp_processor_id_shift);
  325. /* handler_reg_save index in K0 */
  326. UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
  327. UASM_i_LA(p, K1, (long)&handler_reg_save);
  328. UASM_i_ADDU(p, K0, K0, K1);
  329. } else {
  330. UASM_i_LA(p, K0, (long)&handler_reg_save);
  331. }
  332. /* K0 now points to save area, save $1 and $2 */
  333. UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
  334. UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
  335. r.r1 = K1;
  336. r.r2 = 1;
  337. r.r3 = 2;
  338. return r;
  339. }
  340. static void __cpuinit build_restore_work_registers(u32 **p)
  341. {
  342. if (scratch_reg > 0) {
  343. UASM_i_MFC0(p, 1, 31, scratch_reg);
  344. return;
  345. }
  346. /* K0 already points to save area, restore $1 and $2 */
  347. UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
  348. UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
  349. }
  350. #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
  351. /*
  352. * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
  353. * we cannot do r3000 under these circumstances.
  354. *
  355. * Declare pgd_current here instead of including mmu_context.h to avoid type
  356. * conflicts for tlbmiss_handler_setup_pgd
  357. */
  358. extern unsigned long pgd_current[];
  359. /*
  360. * The R3000 TLB handler is simple.
  361. */
  362. static void __cpuinit build_r3000_tlb_refill_handler(void)
  363. {
  364. long pgdc = (long)pgd_current;
  365. u32 *p;
  366. memset(tlb_handler, 0, sizeof(tlb_handler));
  367. p = tlb_handler;
  368. uasm_i_mfc0(&p, K0, C0_BADVADDR);
  369. uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
  370. uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
  371. uasm_i_srl(&p, K0, K0, 22); /* load delay */
  372. uasm_i_sll(&p, K0, K0, 2);
  373. uasm_i_addu(&p, K1, K1, K0);
  374. uasm_i_mfc0(&p, K0, C0_CONTEXT);
  375. uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
  376. uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
  377. uasm_i_addu(&p, K1, K1, K0);
  378. uasm_i_lw(&p, K0, 0, K1);
  379. uasm_i_nop(&p); /* load delay */
  380. uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
  381. uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
  382. uasm_i_tlbwr(&p); /* cp0 delay */
  383. uasm_i_jr(&p, K1);
  384. uasm_i_rfe(&p); /* branch delay */
  385. if (p > tlb_handler + 32)
  386. panic("TLB refill handler space exceeded");
  387. pr_debug("Wrote TLB refill handler (%u instructions).\n",
  388. (unsigned int)(p - tlb_handler));
  389. memcpy((void *)ebase, tlb_handler, 0x80);
  390. dump_handler("r3000_tlb_refill", (u32 *)ebase, 32);
  391. }
  392. #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
  393. /*
  394. * The R4000 TLB handler is much more complicated. We have two
  395. * consecutive handler areas with 32 instructions space each.
  396. * Since they aren't used at the same time, we can overflow in the
  397. * other one.To keep things simple, we first assume linear space,
  398. * then we relocate it to the final handler layout as needed.
  399. */
  400. static u32 final_handler[64] __cpuinitdata;
  401. /*
  402. * Hazards
  403. *
  404. * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
  405. * 2. A timing hazard exists for the TLBP instruction.
  406. *
  407. * stalling_instruction
  408. * TLBP
  409. *
  410. * The JTLB is being read for the TLBP throughout the stall generated by the
  411. * previous instruction. This is not really correct as the stalling instruction
  412. * can modify the address used to access the JTLB. The failure symptom is that
  413. * the TLBP instruction will use an address created for the stalling instruction
  414. * and not the address held in C0_ENHI and thus report the wrong results.
  415. *
  416. * The software work-around is to not allow the instruction preceding the TLBP
  417. * to stall - make it an NOP or some other instruction guaranteed not to stall.
  418. *
  419. * Errata 2 will not be fixed. This errata is also on the R5000.
  420. *
  421. * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
  422. */
  423. static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
  424. {
  425. switch (current_cpu_type()) {
  426. /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
  427. case CPU_R4600:
  428. case CPU_R4700:
  429. case CPU_R5000:
  430. case CPU_NEVADA:
  431. uasm_i_nop(p);
  432. uasm_i_tlbp(p);
  433. break;
  434. default:
  435. uasm_i_tlbp(p);
  436. break;
  437. }
  438. }
  439. /*
  440. * Write random or indexed TLB entry, and care about the hazards from
  441. * the preceding mtc0 and for the following eret.
  442. */
  443. enum tlb_write_entry { tlb_random, tlb_indexed };
  444. static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
  445. struct uasm_reloc **r,
  446. enum tlb_write_entry wmode)
  447. {
  448. void(*tlbw)(u32 **) = NULL;
  449. switch (wmode) {
  450. case tlb_random: tlbw = uasm_i_tlbwr; break;
  451. case tlb_indexed: tlbw = uasm_i_tlbwi; break;
  452. }
  453. if (cpu_has_mips_r2) {
  454. /*
  455. * The architecture spec says an ehb is required here,
  456. * but a number of cores do not have the hazard and
  457. * using an ehb causes an expensive pipeline stall.
  458. */
  459. switch (current_cpu_type()) {
  460. case CPU_M14KC:
  461. case CPU_74K:
  462. break;
  463. default:
  464. uasm_i_ehb(p);
  465. break;
  466. }
  467. tlbw(p);
  468. return;
  469. }
  470. switch (current_cpu_type()) {
  471. case CPU_R4000PC:
  472. case CPU_R4000SC:
  473. case CPU_R4000MC:
  474. case CPU_R4400PC:
  475. case CPU_R4400SC:
  476. case CPU_R4400MC:
  477. /*
  478. * This branch uses up a mtc0 hazard nop slot and saves
  479. * two nops after the tlbw instruction.
  480. */
  481. uasm_bgezl_hazard(p, r, hazard_instance);
  482. tlbw(p);
  483. uasm_bgezl_label(l, p, hazard_instance);
  484. hazard_instance++;
  485. uasm_i_nop(p);
  486. break;
  487. case CPU_R4600:
  488. case CPU_R4700:
  489. uasm_i_nop(p);
  490. tlbw(p);
  491. uasm_i_nop(p);
  492. break;
  493. case CPU_R5000:
  494. case CPU_NEVADA:
  495. uasm_i_nop(p); /* QED specifies 2 nops hazard */
  496. uasm_i_nop(p); /* QED specifies 2 nops hazard */
  497. tlbw(p);
  498. break;
  499. case CPU_R4300:
  500. case CPU_5KC:
  501. case CPU_TX49XX:
  502. case CPU_PR4450:
  503. case CPU_XLR:
  504. uasm_i_nop(p);
  505. tlbw(p);
  506. break;
  507. case CPU_R10000:
  508. case CPU_R12000:
  509. case CPU_R14000:
  510. case CPU_4KC:
  511. case CPU_4KEC:
  512. case CPU_M14KC:
  513. case CPU_SB1:
  514. case CPU_SB1A:
  515. case CPU_4KSC:
  516. case CPU_20KC:
  517. case CPU_25KF:
  518. case CPU_BMIPS32:
  519. case CPU_BMIPS3300:
  520. case CPU_BMIPS4350:
  521. case CPU_BMIPS4380:
  522. case CPU_BMIPS5000:
  523. case CPU_LOONGSON2:
  524. case CPU_R5500:
  525. if (m4kc_tlbp_war())
  526. uasm_i_nop(p);
  527. case CPU_ALCHEMY:
  528. tlbw(p);
  529. break;
  530. case CPU_RM7000:
  531. uasm_i_nop(p);
  532. uasm_i_nop(p);
  533. uasm_i_nop(p);
  534. uasm_i_nop(p);
  535. tlbw(p);
  536. break;
  537. case CPU_VR4111:
  538. case CPU_VR4121:
  539. case CPU_VR4122:
  540. case CPU_VR4181:
  541. case CPU_VR4181A:
  542. uasm_i_nop(p);
  543. uasm_i_nop(p);
  544. tlbw(p);
  545. uasm_i_nop(p);
  546. uasm_i_nop(p);
  547. break;
  548. case CPU_VR4131:
  549. case CPU_VR4133:
  550. case CPU_R5432:
  551. uasm_i_nop(p);
  552. uasm_i_nop(p);
  553. tlbw(p);
  554. break;
  555. case CPU_JZRISC:
  556. tlbw(p);
  557. uasm_i_nop(p);
  558. break;
  559. default:
  560. panic("No TLB refill handler yet (CPU type: %d)",
  561. current_cpu_data.cputype);
  562. break;
  563. }
  564. }
  565. static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
  566. unsigned int reg)
  567. {
  568. if (cpu_has_rixi) {
  569. UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
  570. } else {
  571. #ifdef CONFIG_64BIT_PHYS_ADDR
  572. uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
  573. #else
  574. UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
  575. #endif
  576. }
  577. }
  578. #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
  579. static __cpuinit void build_restore_pagemask(u32 **p,
  580. struct uasm_reloc **r,
  581. unsigned int tmp,
  582. enum label_id lid,
  583. int restore_scratch)
  584. {
  585. if (restore_scratch) {
  586. /* Reset default page size */
  587. if (PM_DEFAULT_MASK >> 16) {
  588. uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
  589. uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
  590. uasm_i_mtc0(p, tmp, C0_PAGEMASK);
  591. uasm_il_b(p, r, lid);
  592. } else if (PM_DEFAULT_MASK) {
  593. uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
  594. uasm_i_mtc0(p, tmp, C0_PAGEMASK);
  595. uasm_il_b(p, r, lid);
  596. } else {
  597. uasm_i_mtc0(p, 0, C0_PAGEMASK);
  598. uasm_il_b(p, r, lid);
  599. }
  600. if (scratch_reg > 0)
  601. UASM_i_MFC0(p, 1, 31, scratch_reg);
  602. else
  603. UASM_i_LW(p, 1, scratchpad_offset(0), 0);
  604. } else {
  605. /* Reset default page size */
  606. if (PM_DEFAULT_MASK >> 16) {
  607. uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
  608. uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
  609. uasm_il_b(p, r, lid);
  610. uasm_i_mtc0(p, tmp, C0_PAGEMASK);
  611. } else if (PM_DEFAULT_MASK) {
  612. uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
  613. uasm_il_b(p, r, lid);
  614. uasm_i_mtc0(p, tmp, C0_PAGEMASK);
  615. } else {
  616. uasm_il_b(p, r, lid);
  617. uasm_i_mtc0(p, 0, C0_PAGEMASK);
  618. }
  619. }
  620. }
  621. static __cpuinit void build_huge_tlb_write_entry(u32 **p,
  622. struct uasm_label **l,
  623. struct uasm_reloc **r,
  624. unsigned int tmp,
  625. enum tlb_write_entry wmode,
  626. int restore_scratch)
  627. {
  628. /* Set huge page tlb entry size */
  629. uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
  630. uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
  631. uasm_i_mtc0(p, tmp, C0_PAGEMASK);
  632. build_tlb_write_entry(p, l, r, wmode);
  633. build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
  634. }
  635. /*
  636. * Check if Huge PTE is present, if so then jump to LABEL.
  637. */
  638. static void __cpuinit
  639. build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
  640. unsigned int pmd, int lid)
  641. {
  642. UASM_i_LW(p, tmp, 0, pmd);
  643. if (use_bbit_insns()) {
  644. uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
  645. } else {
  646. uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
  647. uasm_il_bnez(p, r, tmp, lid);
  648. }
  649. }
  650. static __cpuinit void build_huge_update_entries(u32 **p,
  651. unsigned int pte,
  652. unsigned int tmp)
  653. {
  654. int small_sequence;
  655. /*
  656. * A huge PTE describes an area the size of the
  657. * configured huge page size. This is twice the
  658. * of the large TLB entry size we intend to use.
  659. * A TLB entry half the size of the configured
  660. * huge page size is configured into entrylo0
  661. * and entrylo1 to cover the contiguous huge PTE
  662. * address space.
  663. */
  664. small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
  665. /* We can clobber tmp. It isn't used after this.*/
  666. if (!small_sequence)
  667. uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
  668. build_convert_pte_to_entrylo(p, pte);
  669. UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
  670. /* convert to entrylo1 */
  671. if (small_sequence)
  672. UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
  673. else
  674. UASM_i_ADDU(p, pte, pte, tmp);
  675. UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
  676. }
  677. static __cpuinit void build_huge_handler_tail(u32 **p,
  678. struct uasm_reloc **r,
  679. struct uasm_label **l,
  680. unsigned int pte,
  681. unsigned int ptr)
  682. {
  683. #ifdef CONFIG_SMP
  684. UASM_i_SC(p, pte, 0, ptr);
  685. uasm_il_beqz(p, r, pte, label_tlb_huge_update);
  686. UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
  687. #else
  688. UASM_i_SW(p, pte, 0, ptr);
  689. #endif
  690. build_huge_update_entries(p, pte, ptr);
  691. build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
  692. }
  693. #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
  694. #ifdef CONFIG_64BIT
  695. /*
  696. * TMP and PTR are scratch.
  697. * TMP will be clobbered, PTR will hold the pmd entry.
  698. */
  699. static void __cpuinit
  700. build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
  701. unsigned int tmp, unsigned int ptr)
  702. {
  703. #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
  704. long pgdc = (long)pgd_current;
  705. #endif
  706. /*
  707. * The vmalloc handling is not in the hotpath.
  708. */
  709. uasm_i_dmfc0(p, tmp, C0_BADVADDR);
  710. if (check_for_high_segbits) {
  711. /*
  712. * The kernel currently implicitely assumes that the
  713. * MIPS SEGBITS parameter for the processor is
  714. * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
  715. * allocate virtual addresses outside the maximum
  716. * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
  717. * that doesn't prevent user code from accessing the
  718. * higher xuseg addresses. Here, we make sure that
  719. * everything but the lower xuseg addresses goes down
  720. * the module_alloc/vmalloc path.
  721. */
  722. uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
  723. uasm_il_bnez(p, r, ptr, label_vmalloc);
  724. } else {
  725. uasm_il_bltz(p, r, tmp, label_vmalloc);
  726. }
  727. /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
  728. #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
  729. if (pgd_reg != -1) {
  730. /* pgd is in pgd_reg */
  731. UASM_i_MFC0(p, ptr, 31, pgd_reg);
  732. } else {
  733. /*
  734. * &pgd << 11 stored in CONTEXT [23..63].
  735. */
  736. UASM_i_MFC0(p, ptr, C0_CONTEXT);
  737. /* Clear lower 23 bits of context. */
  738. uasm_i_dins(p, ptr, 0, 0, 23);
  739. /* 1 0 1 0 1 << 6 xkphys cached */
  740. uasm_i_ori(p, ptr, ptr, 0x540);
  741. uasm_i_drotr(p, ptr, ptr, 11);
  742. }
  743. #elif defined(CONFIG_SMP)
  744. # ifdef CONFIG_MIPS_MT_SMTC
  745. /*
  746. * SMTC uses TCBind value as "CPU" index
  747. */
  748. uasm_i_mfc0(p, ptr, C0_TCBIND);
  749. uasm_i_dsrl_safe(p, ptr, ptr, 19);
  750. # else
  751. /*
  752. * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
  753. * stored in CONTEXT.
  754. */
  755. uasm_i_dmfc0(p, ptr, C0_CONTEXT);
  756. uasm_i_dsrl_safe(p, ptr, ptr, 23);
  757. # endif
  758. UASM_i_LA_mostly(p, tmp, pgdc);
  759. uasm_i_daddu(p, ptr, ptr, tmp);
  760. uasm_i_dmfc0(p, tmp, C0_BADVADDR);
  761. uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
  762. #else
  763. UASM_i_LA_mostly(p, ptr, pgdc);
  764. uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
  765. #endif
  766. uasm_l_vmalloc_done(l, *p);
  767. /* get pgd offset in bytes */
  768. uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
  769. uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
  770. uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
  771. #ifndef __PAGETABLE_PMD_FOLDED
  772. uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
  773. uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
  774. uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
  775. uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
  776. uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
  777. #endif
  778. }
  779. /*
  780. * BVADDR is the faulting address, PTR is scratch.
  781. * PTR will hold the pgd for vmalloc.
  782. */
  783. static void __cpuinit
  784. build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
  785. unsigned int bvaddr, unsigned int ptr,
  786. enum vmalloc64_mode mode)
  787. {
  788. long swpd = (long)swapper_pg_dir;
  789. int single_insn_swpd;
  790. int did_vmalloc_branch = 0;
  791. single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
  792. uasm_l_vmalloc(l, *p);
  793. if (mode != not_refill && check_for_high_segbits) {
  794. if (single_insn_swpd) {
  795. uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
  796. uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
  797. did_vmalloc_branch = 1;
  798. /* fall through */
  799. } else {
  800. uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
  801. }
  802. }
  803. if (!did_vmalloc_branch) {
  804. if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
  805. uasm_il_b(p, r, label_vmalloc_done);
  806. uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
  807. } else {
  808. UASM_i_LA_mostly(p, ptr, swpd);
  809. uasm_il_b(p, r, label_vmalloc_done);
  810. if (uasm_in_compat_space_p(swpd))
  811. uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
  812. else
  813. uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
  814. }
  815. }
  816. if (mode != not_refill && check_for_high_segbits) {
  817. uasm_l_large_segbits_fault(l, *p);
  818. /*
  819. * We get here if we are an xsseg address, or if we are
  820. * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
  821. *
  822. * Ignoring xsseg (assume disabled so would generate
  823. * (address errors?), the only remaining possibility
  824. * is the upper xuseg addresses. On processors with
  825. * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
  826. * addresses would have taken an address error. We try
  827. * to mimic that here by taking a load/istream page
  828. * fault.
  829. */
  830. UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
  831. uasm_i_jr(p, ptr);
  832. if (mode == refill_scratch) {
  833. if (scratch_reg > 0)
  834. UASM_i_MFC0(p, 1, 31, scratch_reg);
  835. else
  836. UASM_i_LW(p, 1, scratchpad_offset(0), 0);
  837. } else {
  838. uasm_i_nop(p);
  839. }
  840. }
  841. }
  842. #else /* !CONFIG_64BIT */
  843. /*
  844. * TMP and PTR are scratch.
  845. * TMP will be clobbered, PTR will hold the pgd entry.
  846. */
  847. static void __cpuinit __maybe_unused
  848. build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
  849. {
  850. long pgdc = (long)pgd_current;
  851. /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
  852. #ifdef CONFIG_SMP
  853. #ifdef CONFIG_MIPS_MT_SMTC
  854. /*
  855. * SMTC uses TCBind value as "CPU" index
  856. */
  857. uasm_i_mfc0(p, ptr, C0_TCBIND);
  858. UASM_i_LA_mostly(p, tmp, pgdc);
  859. uasm_i_srl(p, ptr, ptr, 19);
  860. #else
  861. /*
  862. * smp_processor_id() << 3 is stored in CONTEXT.
  863. */
  864. uasm_i_mfc0(p, ptr, C0_CONTEXT);
  865. UASM_i_LA_mostly(p, tmp, pgdc);
  866. uasm_i_srl(p, ptr, ptr, 23);
  867. #endif
  868. uasm_i_addu(p, ptr, tmp, ptr);
  869. #else
  870. UASM_i_LA_mostly(p, ptr, pgdc);
  871. #endif
  872. uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
  873. uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
  874. if (cpu_has_mips_r2) {
  875. uasm_i_ext(p, tmp, tmp, PGDIR_SHIFT, (32 - PGDIR_SHIFT));
  876. uasm_i_ins(p, ptr, tmp, PGD_T_LOG2, (32 - PGDIR_SHIFT));
  877. return;
  878. }
  879. uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
  880. uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
  881. uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
  882. }
  883. #endif /* !CONFIG_64BIT */
  884. static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
  885. {
  886. unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
  887. unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
  888. switch (current_cpu_type()) {
  889. case CPU_VR41XX:
  890. case CPU_VR4111:
  891. case CPU_VR4121:
  892. case CPU_VR4122:
  893. case CPU_VR4131:
  894. case CPU_VR4181:
  895. case CPU_VR4181A:
  896. case CPU_VR4133:
  897. shift += 2;
  898. break;
  899. default:
  900. break;
  901. }
  902. if (shift)
  903. UASM_i_SRL(p, ctx, ctx, shift);
  904. uasm_i_andi(p, ctx, ctx, mask);
  905. }
  906. static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
  907. {
  908. if (cpu_has_mips_r2) {
  909. /* PTE ptr offset is obtained from BadVAddr */
  910. UASM_i_MFC0(p, tmp, C0_BADVADDR);
  911. UASM_i_LW(p, ptr, 0, ptr);
  912. uasm_i_ext(p, tmp, tmp, PAGE_SHIFT+1, PGDIR_SHIFT-PAGE_SHIFT-1);
  913. uasm_i_ins(p, ptr, tmp, PTE_T_LOG2+1, PGDIR_SHIFT-PAGE_SHIFT-1);
  914. return;
  915. }
  916. /*
  917. * Bug workaround for the Nevada. It seems as if under certain
  918. * circumstances the move from cp0_context might produce a
  919. * bogus result when the mfc0 instruction and its consumer are
  920. * in a different cacheline or a load instruction, probably any
  921. * memory reference, is between them.
  922. */
  923. switch (current_cpu_type()) {
  924. case CPU_NEVADA:
  925. UASM_i_LW(p, ptr, 0, ptr);
  926. GET_CONTEXT(p, tmp); /* get context reg */
  927. break;
  928. default:
  929. GET_CONTEXT(p, tmp); /* get context reg */
  930. UASM_i_LW(p, ptr, 0, ptr);
  931. break;
  932. }
  933. build_adjust_context(p, tmp);
  934. UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
  935. }
  936. static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
  937. unsigned int ptep)
  938. {
  939. /*
  940. * 64bit address support (36bit on a 32bit CPU) in a 32bit
  941. * Kernel is a special case. Only a few CPUs use it.
  942. */
  943. #ifdef CONFIG_64BIT_PHYS_ADDR
  944. if (cpu_has_64bits) {
  945. uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
  946. uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
  947. if (cpu_has_rixi) {
  948. UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
  949. UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
  950. UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
  951. } else {
  952. uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
  953. UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
  954. uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
  955. }
  956. UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
  957. } else {
  958. int pte_off_even = sizeof(pte_t) / 2;
  959. int pte_off_odd = pte_off_even + sizeof(pte_t);
  960. /* The pte entries are pre-shifted */
  961. uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
  962. UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
  963. uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
  964. UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
  965. }
  966. #else
  967. UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
  968. UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
  969. if (r45k_bvahwbug())
  970. build_tlb_probe_entry(p);
  971. if (cpu_has_rixi) {
  972. UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
  973. if (r4k_250MHZhwbug())
  974. UASM_i_MTC0(p, 0, C0_ENTRYLO0);
  975. UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
  976. UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
  977. } else {
  978. UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
  979. if (r4k_250MHZhwbug())
  980. UASM_i_MTC0(p, 0, C0_ENTRYLO0);
  981. UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
  982. UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
  983. if (r45k_bvahwbug())
  984. uasm_i_mfc0(p, tmp, C0_INDEX);
  985. }
  986. if (r4k_250MHZhwbug())
  987. UASM_i_MTC0(p, 0, C0_ENTRYLO1);
  988. UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
  989. #endif
  990. }
  991. struct mips_huge_tlb_info {
  992. int huge_pte;
  993. int restore_scratch;
  994. };
  995. static struct mips_huge_tlb_info __cpuinit
  996. build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
  997. struct uasm_reloc **r, unsigned int tmp,
  998. unsigned int ptr, int c0_scratch)
  999. {
  1000. struct mips_huge_tlb_info rv;
  1001. unsigned int even, odd;
  1002. int vmalloc_branch_delay_filled = 0;
  1003. const int scratch = 1; /* Our extra working register */
  1004. rv.huge_pte = scratch;
  1005. rv.restore_scratch = 0;
  1006. if (check_for_high_segbits) {
  1007. UASM_i_MFC0(p, tmp, C0_BADVADDR);
  1008. if (pgd_reg != -1)
  1009. UASM_i_MFC0(p, ptr, 31, pgd_reg);
  1010. else
  1011. UASM_i_MFC0(p, ptr, C0_CONTEXT);
  1012. if (c0_scratch >= 0)
  1013. UASM_i_MTC0(p, scratch, 31, c0_scratch);
  1014. else
  1015. UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
  1016. uasm_i_dsrl_safe(p, scratch, tmp,
  1017. PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
  1018. uasm_il_bnez(p, r, scratch, label_vmalloc);
  1019. if (pgd_reg == -1) {
  1020. vmalloc_branch_delay_filled = 1;
  1021. /* Clear lower 23 bits of context. */
  1022. uasm_i_dins(p, ptr, 0, 0, 23);
  1023. }
  1024. } else {
  1025. if (pgd_reg != -1)
  1026. UASM_i_MFC0(p, ptr, 31, pgd_reg);
  1027. else
  1028. UASM_i_MFC0(p, ptr, C0_CONTEXT);
  1029. UASM_i_MFC0(p, tmp, C0_BADVADDR);
  1030. if (c0_scratch >= 0)
  1031. UASM_i_MTC0(p, scratch, 31, c0_scratch);
  1032. else
  1033. UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
  1034. if (pgd_reg == -1)
  1035. /* Clear lower 23 bits of context. */
  1036. uasm_i_dins(p, ptr, 0, 0, 23);
  1037. uasm_il_bltz(p, r, tmp, label_vmalloc);
  1038. }
  1039. if (pgd_reg == -1) {
  1040. vmalloc_branch_delay_filled = 1;
  1041. /* 1 0 1 0 1 << 6 xkphys cached */
  1042. uasm_i_ori(p, ptr, ptr, 0x540);
  1043. uasm_i_drotr(p, ptr, ptr, 11);
  1044. }
  1045. #ifdef __PAGETABLE_PMD_FOLDED
  1046. #define LOC_PTEP scratch
  1047. #else
  1048. #define LOC_PTEP ptr
  1049. #endif
  1050. if (!vmalloc_branch_delay_filled)
  1051. /* get pgd offset in bytes */
  1052. uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
  1053. uasm_l_vmalloc_done(l, *p);
  1054. /*
  1055. * tmp ptr
  1056. * fall-through case = badvaddr *pgd_current
  1057. * vmalloc case = badvaddr swapper_pg_dir
  1058. */
  1059. if (vmalloc_branch_delay_filled)
  1060. /* get pgd offset in bytes */
  1061. uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
  1062. #ifdef __PAGETABLE_PMD_FOLDED
  1063. GET_CONTEXT(p, tmp); /* get context reg */
  1064. #endif
  1065. uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3);
  1066. if (use_lwx_insns()) {
  1067. UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
  1068. } else {
  1069. uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
  1070. uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
  1071. }
  1072. #ifndef __PAGETABLE_PMD_FOLDED
  1073. /* get pmd offset in bytes */
  1074. uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
  1075. uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
  1076. GET_CONTEXT(p, tmp); /* get context reg */
  1077. if (use_lwx_insns()) {
  1078. UASM_i_LWX(p, scratch, scratch, ptr);
  1079. } else {
  1080. uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
  1081. UASM_i_LW(p, scratch, 0, ptr);
  1082. }
  1083. #endif
  1084. /* Adjust the context during the load latency. */
  1085. build_adjust_context(p, tmp);
  1086. #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
  1087. uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
  1088. /*
  1089. * The in the LWX case we don't want to do the load in the
  1090. * delay slot. It cannot issue in the same cycle and may be
  1091. * speculative and unneeded.
  1092. */
  1093. if (use_lwx_insns())
  1094. uasm_i_nop(p);
  1095. #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
  1096. /* build_update_entries */
  1097. if (use_lwx_insns()) {
  1098. even = ptr;
  1099. odd = tmp;
  1100. UASM_i_LWX(p, even, scratch, tmp);
  1101. UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t));
  1102. UASM_i_LWX(p, odd, scratch, tmp);
  1103. } else {
  1104. UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */
  1105. even = tmp;
  1106. odd = ptr;
  1107. UASM_i_LW(p, even, 0, ptr); /* get even pte */
  1108. UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
  1109. }
  1110. if (cpu_has_rixi) {
  1111. uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL));
  1112. UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
  1113. uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL));
  1114. } else {
  1115. uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
  1116. UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
  1117. uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL));
  1118. }
  1119. UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
  1120. if (c0_scratch >= 0) {
  1121. UASM_i_MFC0(p, scratch, 31, c0_scratch);
  1122. build_tlb_write_entry(p, l, r, tlb_random);
  1123. uasm_l_leave(l, *p);
  1124. rv.restore_scratch = 1;
  1125. } else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13) {
  1126. build_tlb_write_entry(p, l, r, tlb_random);
  1127. uasm_l_leave(l, *p);
  1128. UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
  1129. } else {
  1130. UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
  1131. build_tlb_write_entry(p, l, r, tlb_random);
  1132. uasm_l_leave(l, *p);
  1133. rv.restore_scratch = 1;
  1134. }
  1135. uasm_i_eret(p); /* return from trap */
  1136. return rv;
  1137. }
  1138. /*
  1139. * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
  1140. * because EXL == 0. If we wrap, we can also use the 32 instruction
  1141. * slots before the XTLB refill exception handler which belong to the
  1142. * unused TLB refill exception.
  1143. */
  1144. #define MIPS64_REFILL_INSNS 32
  1145. static void __cpuinit build_r4000_tlb_refill_handler(void)
  1146. {
  1147. u32 *p = tlb_handler;
  1148. struct uasm_label *l = labels;
  1149. struct uasm_reloc *r = relocs;
  1150. u32 *f;
  1151. unsigned int final_len;
  1152. struct mips_huge_tlb_info htlb_info __maybe_unused;
  1153. enum vmalloc64_mode vmalloc_mode __maybe_unused;
  1154. memset(tlb_handler, 0, sizeof(tlb_handler));
  1155. memset(labels, 0, sizeof(labels));
  1156. memset(relocs, 0, sizeof(relocs));
  1157. memset(final_handler, 0, sizeof(final_handler));
  1158. if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) {
  1159. htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
  1160. scratch_reg);
  1161. vmalloc_mode = refill_scratch;
  1162. } else {
  1163. htlb_info.huge_pte = K0;
  1164. htlb_info.restore_scratch = 0;
  1165. vmalloc_mode = refill_noscratch;
  1166. /*
  1167. * create the plain linear handler
  1168. */
  1169. if (bcm1250_m3_war()) {
  1170. unsigned int segbits = 44;
  1171. uasm_i_dmfc0(&p, K0, C0_BADVADDR);
  1172. uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
  1173. uasm_i_xor(&p, K0, K0, K1);
  1174. uasm_i_dsrl_safe(&p, K1, K0, 62);
  1175. uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
  1176. uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
  1177. uasm_i_or(&p, K0, K0, K1);
  1178. uasm_il_bnez(&p, &r, K0, label_leave);
  1179. /* No need for uasm_i_nop */
  1180. }
  1181. #ifdef CONFIG_64BIT
  1182. build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
  1183. #else
  1184. build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
  1185. #endif
  1186. #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
  1187. build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
  1188. #endif
  1189. build_get_ptep(&p, K0, K1);
  1190. build_update_entries(&p, K0, K1);
  1191. build_tlb_write_entry(&p, &l, &r, tlb_random);
  1192. uasm_l_leave(&l, p);
  1193. uasm_i_eret(&p); /* return from trap */
  1194. }
  1195. #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
  1196. uasm_l_tlb_huge_update(&l, p);
  1197. build_huge_update_entries(&p, htlb_info.huge_pte, K1);
  1198. build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
  1199. htlb_info.restore_scratch);
  1200. #endif
  1201. #ifdef CONFIG_64BIT
  1202. build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
  1203. #endif
  1204. /*
  1205. * Overflow check: For the 64bit handler, we need at least one
  1206. * free instruction slot for the wrap-around branch. In worst
  1207. * case, if the intended insertion point is a delay slot, we
  1208. * need three, with the second nop'ed and the third being
  1209. * unused.
  1210. */
  1211. /* Loongson2 ebase is different than r4k, we have more space */
  1212. #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
  1213. if ((p - tlb_handler) > 64)
  1214. panic("TLB refill handler space exceeded");
  1215. #else
  1216. if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
  1217. || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
  1218. && uasm_insn_has_bdelay(relocs,
  1219. tlb_handler + MIPS64_REFILL_INSNS - 3)))
  1220. panic("TLB refill handler space exceeded");
  1221. #endif
  1222. /*
  1223. * Now fold the handler in the TLB refill handler space.
  1224. */
  1225. #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
  1226. f = final_handler;
  1227. /* Simplest case, just copy the handler. */
  1228. uasm_copy_handler(relocs, labels, tlb_handler, p, f);
  1229. final_len = p - tlb_handler;
  1230. #else /* CONFIG_64BIT */
  1231. f = final_handler + MIPS64_REFILL_INSNS;
  1232. if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
  1233. /* Just copy the handler. */
  1234. uasm_copy_handler(relocs, labels, tlb_handler, p, f);
  1235. final_len = p - tlb_handler;
  1236. } else {
  1237. #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
  1238. const enum label_id ls = label_tlb_huge_update;
  1239. #else
  1240. const enum label_id ls = label_vmalloc;
  1241. #endif
  1242. u32 *split;
  1243. int ov = 0;
  1244. int i;
  1245. for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
  1246. ;
  1247. BUG_ON(i == ARRAY_SIZE(labels));
  1248. split = labels[i].addr;
  1249. /*
  1250. * See if we have overflown one way or the other.
  1251. */
  1252. if (split > tlb_handler + MIPS64_REFILL_INSNS ||
  1253. split < p - MIPS64_REFILL_INSNS)
  1254. ov = 1;
  1255. if (ov) {
  1256. /*
  1257. * Split two instructions before the end. One
  1258. * for the branch and one for the instruction
  1259. * in the delay slot.
  1260. */
  1261. split = tlb_handler + MIPS64_REFILL_INSNS - 2;
  1262. /*
  1263. * If the branch would fall in a delay slot,
  1264. * we must back up an additional instruction
  1265. * so that it is no longer in a delay slot.
  1266. */
  1267. if (uasm_insn_has_bdelay(relocs, split - 1))
  1268. split--;
  1269. }
  1270. /* Copy first part of the handler. */
  1271. uasm_copy_handler(relocs, labels, tlb_handler, split, f);
  1272. f += split - tlb_handler;
  1273. if (ov) {
  1274. /* Insert branch. */
  1275. uasm_l_split(&l, final_handler);
  1276. uasm_il_b(&f, &r, label_split);
  1277. if (uasm_insn_has_bdelay(relocs, split))
  1278. uasm_i_nop(&f);
  1279. else {
  1280. uasm_copy_handler(relocs, labels,
  1281. split, split + 1, f);
  1282. uasm_move_labels(labels, f, f + 1, -1);
  1283. f++;
  1284. split++;
  1285. }
  1286. }
  1287. /* Copy the rest of the handler. */
  1288. uasm_copy_handler(relocs, labels, split, p, final_handler);
  1289. final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
  1290. (p - split);
  1291. }
  1292. #endif /* CONFIG_64BIT */
  1293. uasm_resolve_relocs(relocs, labels);
  1294. pr_debug("Wrote TLB refill handler (%u instructions).\n",
  1295. final_len);
  1296. memcpy((void *)ebase, final_handler, 0x100);
  1297. dump_handler("r4000_tlb_refill", (u32 *)ebase, 64);
  1298. }
  1299. /*
  1300. * 128 instructions for the fastpath handler is generous and should
  1301. * never be exceeded.
  1302. */
  1303. #define FASTPATH_SIZE 128
  1304. u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
  1305. u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
  1306. u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
  1307. #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
  1308. u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned;
  1309. static void __cpuinit build_r4000_setup_pgd(void)
  1310. {
  1311. const int a0 = 4;
  1312. const int a1 = 5;
  1313. u32 *p = tlbmiss_handler_setup_pgd;
  1314. struct uasm_label *l = labels;
  1315. struct uasm_reloc *r = relocs;
  1316. memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd));
  1317. memset(labels, 0, sizeof(labels));
  1318. memset(relocs, 0, sizeof(relocs));
  1319. pgd_reg = allocate_kscratch();
  1320. if (pgd_reg == -1) {
  1321. /* PGD << 11 in c0_Context */
  1322. /*
  1323. * If it is a ckseg0 address, convert to a physical
  1324. * address. Shifting right by 29 and adding 4 will
  1325. * result in zero for these addresses.
  1326. *
  1327. */
  1328. UASM_i_SRA(&p, a1, a0, 29);
  1329. UASM_i_ADDIU(&p, a1, a1, 4);
  1330. uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1);
  1331. uasm_i_nop(&p);
  1332. uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
  1333. uasm_l_tlbl_goaround1(&l, p);
  1334. UASM_i_SLL(&p, a0, a0, 11);
  1335. uasm_i_jr(&p, 31);
  1336. UASM_i_MTC0(&p, a0, C0_CONTEXT);
  1337. } else {
  1338. /* PGD in c0_KScratch */
  1339. uasm_i_jr(&p, 31);
  1340. UASM_i_MTC0(&p, a0, 31, pgd_reg);
  1341. }
  1342. if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd))
  1343. panic("tlbmiss_handler_setup_pgd space exceeded");
  1344. uasm_resolve_relocs(relocs, labels);
  1345. pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
  1346. (unsigned int)(p - tlbmiss_handler_setup_pgd));
  1347. dump_handler("tlbmiss_handler",
  1348. tlbmiss_handler_setup_pgd,
  1349. ARRAY_SIZE(tlbmiss_handler_setup_pgd));
  1350. }
  1351. #endif
  1352. static void __cpuinit
  1353. iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
  1354. {
  1355. #ifdef CONFIG_SMP
  1356. # ifdef CONFIG_64BIT_PHYS_ADDR
  1357. if (cpu_has_64bits)
  1358. uasm_i_lld(p, pte, 0, ptr);
  1359. else
  1360. # endif
  1361. UASM_i_LL(p, pte, 0, ptr);
  1362. #else
  1363. # ifdef CONFIG_64BIT_PHYS_ADDR
  1364. if (cpu_has_64bits)
  1365. uasm_i_ld(p, pte, 0, ptr);
  1366. else
  1367. # endif
  1368. UASM_i_LW(p, pte, 0, ptr);
  1369. #endif
  1370. }
  1371. static void __cpuinit
  1372. iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
  1373. unsigned int mode)
  1374. {
  1375. #ifdef CONFIG_64BIT_PHYS_ADDR
  1376. unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
  1377. #endif
  1378. uasm_i_ori(p, pte, pte, mode);
  1379. #ifdef CONFIG_SMP
  1380. # ifdef CONFIG_64BIT_PHYS_ADDR
  1381. if (cpu_has_64bits)
  1382. uasm_i_scd(p, pte, 0, ptr);
  1383. else
  1384. # endif
  1385. UASM_i_SC(p, pte, 0, ptr);
  1386. if (r10000_llsc_war())
  1387. uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
  1388. else
  1389. uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
  1390. # ifdef CONFIG_64BIT_PHYS_ADDR
  1391. if (!cpu_has_64bits) {
  1392. /* no uasm_i_nop needed */
  1393. uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
  1394. uasm_i_ori(p, pte, pte, hwmode);
  1395. uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
  1396. uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
  1397. /* no uasm_i_nop needed */
  1398. uasm_i_lw(p, pte, 0, ptr);
  1399. } else
  1400. uasm_i_nop(p);
  1401. # else
  1402. uasm_i_nop(p);
  1403. # endif
  1404. #else
  1405. # ifdef CONFIG_64BIT_PHYS_ADDR
  1406. if (cpu_has_64bits)
  1407. uasm_i_sd(p, pte, 0, ptr);
  1408. else
  1409. # endif
  1410. UASM_i_SW(p, pte, 0, ptr);
  1411. # ifdef CONFIG_64BIT_PHYS_ADDR
  1412. if (!cpu_has_64bits) {
  1413. uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
  1414. uasm_i_ori(p, pte, pte, hwmode);
  1415. uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
  1416. uasm_i_lw(p, pte, 0, ptr);
  1417. }
  1418. # endif
  1419. #endif
  1420. }
  1421. /*
  1422. * Check if PTE is present, if not then jump to LABEL. PTR points to
  1423. * the page table where this PTE is located, PTE will be re-loaded
  1424. * with it's original value.
  1425. */
  1426. static void __cpuinit
  1427. build_pte_present(u32 **p, struct uasm_reloc **r,
  1428. int pte, int ptr, int scratch, enum label_id lid)
  1429. {
  1430. int t = scratch >= 0 ? scratch : pte;
  1431. if (cpu_has_rixi) {
  1432. if (use_bbit_insns()) {
  1433. uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
  1434. uasm_i_nop(p);
  1435. } else {
  1436. uasm_i_andi(p, t, pte, _PAGE_PRESENT);
  1437. uasm_il_beqz(p, r, t, lid);
  1438. if (pte == t)
  1439. /* You lose the SMP race :-(*/
  1440. iPTE_LW(p, pte, ptr);
  1441. }
  1442. } else {
  1443. uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ);
  1444. uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ);
  1445. uasm_il_bnez(p, r, t, lid);
  1446. if (pte == t)
  1447. /* You lose the SMP race :-(*/
  1448. iPTE_LW(p, pte, ptr);
  1449. }
  1450. }
  1451. /* Make PTE valid, store result in PTR. */
  1452. static void __cpuinit
  1453. build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
  1454. unsigned int ptr)
  1455. {
  1456. unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
  1457. iPTE_SW(p, r, pte, ptr, mode);
  1458. }
  1459. /*
  1460. * Check if PTE can be written to, if not branch to LABEL. Regardless
  1461. * restore PTE with value from PTR when done.
  1462. */
  1463. static void __cpuinit
  1464. build_pte_writable(u32 **p, struct uasm_reloc **r,
  1465. unsigned int pte, unsigned int ptr, int scratch,
  1466. enum label_id lid)
  1467. {
  1468. int t = scratch >= 0 ? scratch : pte;
  1469. uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE);
  1470. uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE);
  1471. uasm_il_bnez(p, r, t, lid);
  1472. if (pte == t)
  1473. /* You lose the SMP race :-(*/
  1474. iPTE_LW(p, pte, ptr);
  1475. else
  1476. uasm_i_nop(p);
  1477. }
  1478. /* Make PTE writable, update software status bits as well, then store
  1479. * at PTR.
  1480. */
  1481. static void __cpuinit
  1482. build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
  1483. unsigned int ptr)
  1484. {
  1485. unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
  1486. | _PAGE_DIRTY);
  1487. iPTE_SW(p, r, pte, ptr, mode);
  1488. }
  1489. /*
  1490. * Check if PTE can be modified, if not branch to LABEL. Regardless
  1491. * restore PTE with value from PTR when done.
  1492. */
  1493. static void __cpuinit
  1494. build_pte_modifiable(u32 **p, struct uasm_reloc **r,
  1495. unsigned int pte, unsigned int ptr, int scratch,
  1496. enum label_id lid)
  1497. {
  1498. if (use_bbit_insns()) {
  1499. uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
  1500. uasm_i_nop(p);
  1501. } else {
  1502. int t = scratch >= 0 ? scratch : pte;
  1503. uasm_i_andi(p, t, pte, _PAGE_WRITE);
  1504. uasm_il_beqz(p, r, t, lid);
  1505. if (pte == t)
  1506. /* You lose the SMP race :-(*/
  1507. iPTE_LW(p, pte, ptr);
  1508. }
  1509. }
  1510. #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
  1511. /*
  1512. * R3000 style TLB load/store/modify handlers.
  1513. */
  1514. /*
  1515. * This places the pte into ENTRYLO0 and writes it with tlbwi.
  1516. * Then it returns.
  1517. */
  1518. static void __cpuinit
  1519. build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
  1520. {
  1521. uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
  1522. uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
  1523. uasm_i_tlbwi(p);
  1524. uasm_i_jr(p, tmp);
  1525. uasm_i_rfe(p); /* branch delay */
  1526. }
  1527. /*
  1528. * This places the pte into ENTRYLO0 and writes it with tlbwi
  1529. * or tlbwr as appropriate. This is because the index register
  1530. * may have the probe fail bit set as a result of a trap on a
  1531. * kseg2 access, i.e. without refill. Then it returns.
  1532. */
  1533. static void __cpuinit
  1534. build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
  1535. struct uasm_reloc **r, unsigned int pte,
  1536. unsigned int tmp)
  1537. {
  1538. uasm_i_mfc0(p, tmp, C0_INDEX);
  1539. uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
  1540. uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
  1541. uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
  1542. uasm_i_tlbwi(p); /* cp0 delay */
  1543. uasm_i_jr(p, tmp);
  1544. uasm_i_rfe(p); /* branch delay */
  1545. uasm_l_r3000_write_probe_fail(l, *p);
  1546. uasm_i_tlbwr(p); /* cp0 delay */
  1547. uasm_i_jr(p, tmp);
  1548. uasm_i_rfe(p); /* branch delay */
  1549. }
  1550. static void __cpuinit
  1551. build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
  1552. unsigned int ptr)
  1553. {
  1554. long pgdc = (long)pgd_current;
  1555. uasm_i_mfc0(p, pte, C0_BADVADDR);
  1556. uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
  1557. uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
  1558. uasm_i_srl(p, pte, pte, 22); /* load delay */
  1559. uasm_i_sll(p, pte, pte, 2);
  1560. uasm_i_addu(p, ptr, ptr, pte);
  1561. uasm_i_mfc0(p, pte, C0_CONTEXT);
  1562. uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
  1563. uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
  1564. uasm_i_addu(p, ptr, ptr, pte);
  1565. uasm_i_lw(p, pte, 0, ptr);
  1566. uasm_i_tlbp(p); /* load delay */
  1567. }
  1568. static void __cpuinit build_r3000_tlb_load_handler(void)
  1569. {
  1570. u32 *p = handle_tlbl;
  1571. struct uasm_label *l = labels;
  1572. struct uasm_reloc *r = relocs;
  1573. memset(handle_tlbl, 0, sizeof(handle_tlbl));
  1574. memset(labels, 0, sizeof(labels));
  1575. memset(relocs, 0, sizeof(relocs));
  1576. build_r3000_tlbchange_handler_head(&p, K0, K1);
  1577. build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
  1578. uasm_i_nop(&p); /* load delay */
  1579. build_make_valid(&p, &r, K0, K1);
  1580. build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
  1581. uasm_l_nopage_tlbl(&l, p);
  1582. uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
  1583. uasm_i_nop(&p);
  1584. if ((p - handle_tlbl) > FASTPATH_SIZE)
  1585. panic("TLB load handler fastpath space exceeded");
  1586. uasm_resolve_relocs(relocs, labels);
  1587. pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
  1588. (unsigned int)(p - handle_tlbl));
  1589. dump_handler("r3000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl));
  1590. }
  1591. static void __cpuinit build_r3000_tlb_store_handler(void)
  1592. {
  1593. u32 *p = handle_tlbs;
  1594. struct uasm_label *l = labels;
  1595. struct uasm_reloc *r = relocs;
  1596. memset(handle_tlbs, 0, sizeof(handle_tlbs));
  1597. memset(labels, 0, sizeof(labels));
  1598. memset(relocs, 0, sizeof(relocs));
  1599. build_r3000_tlbchange_handler_head(&p, K0, K1);
  1600. build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
  1601. uasm_i_nop(&p); /* load delay */
  1602. build_make_write(&p, &r, K0, K1);
  1603. build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
  1604. uasm_l_nopage_tlbs(&l, p);
  1605. uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
  1606. uasm_i_nop(&p);
  1607. if ((p - handle_tlbs) > FASTPATH_SIZE)
  1608. panic("TLB store handler fastpath space exceeded");
  1609. uasm_resolve_relocs(relocs, labels);
  1610. pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
  1611. (unsigned int)(p - handle_tlbs));
  1612. dump_handler("r3000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs));
  1613. }
  1614. static void __cpuinit build_r3000_tlb_modify_handler(void)
  1615. {
  1616. u32 *p = handle_tlbm;
  1617. struct uasm_label *l = labels;
  1618. struct uasm_reloc *r = relocs;
  1619. memset(handle_tlbm, 0, sizeof(handle_tlbm));
  1620. memset(labels, 0, sizeof(labels));
  1621. memset(relocs, 0, sizeof(relocs));
  1622. build_r3000_tlbchange_handler_head(&p, K0, K1);
  1623. build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm);
  1624. uasm_i_nop(&p); /* load delay */
  1625. build_make_write(&p, &r, K0, K1);
  1626. build_r3000_pte_reload_tlbwi(&p, K0, K1);
  1627. uasm_l_nopage_tlbm(&l, p);
  1628. uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
  1629. uasm_i_nop(&p);
  1630. if ((p - handle_tlbm) > FASTPATH_SIZE)
  1631. panic("TLB modify handler fastpath space exceeded");
  1632. uasm_resolve_relocs(relocs, labels);
  1633. pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
  1634. (unsigned int)(p - handle_tlbm));
  1635. dump_handler("r3000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm));
  1636. }
  1637. #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
  1638. /*
  1639. * R4000 style TLB load/store/modify handlers.
  1640. */
  1641. static struct work_registers __cpuinit
  1642. build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
  1643. struct uasm_reloc **r)
  1644. {
  1645. struct work_registers wr = build_get_work_registers(p);
  1646. #ifdef CONFIG_64BIT
  1647. build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
  1648. #else
  1649. build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
  1650. #endif
  1651. #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
  1652. /*
  1653. * For huge tlb entries, pmd doesn't contain an address but
  1654. * instead contains the tlb pte. Check the PAGE_HUGE bit and
  1655. * see if we need to jump to huge tlb processing.
  1656. */
  1657. build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
  1658. #endif
  1659. UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
  1660. UASM_i_LW(p, wr.r2, 0, wr.r2);
  1661. UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
  1662. uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
  1663. UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
  1664. #ifdef CONFIG_SMP
  1665. uasm_l_smp_pgtable_change(l, *p);
  1666. #endif
  1667. iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
  1668. if (!m4kc_tlbp_war())
  1669. build_tlb_probe_entry(p);
  1670. return wr;
  1671. }
  1672. static void __cpuinit
  1673. build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
  1674. struct uasm_reloc **r, unsigned int tmp,
  1675. unsigned int ptr)
  1676. {
  1677. uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
  1678. uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
  1679. build_update_entries(p, tmp, ptr);
  1680. build_tlb_write_entry(p, l, r, tlb_indexed);
  1681. uasm_l_leave(l, *p);
  1682. build_restore_work_registers(p);
  1683. uasm_i_eret(p); /* return from trap */
  1684. #ifdef CONFIG_64BIT
  1685. build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
  1686. #endif
  1687. }
  1688. static void __cpuinit build_r4000_tlb_load_handler(void)
  1689. {
  1690. u32 *p = handle_tlbl;
  1691. struct uasm_label *l = labels;
  1692. struct uasm_reloc *r = relocs;
  1693. struct work_registers wr;
  1694. memset(handle_tlbl, 0, sizeof(handle_tlbl));
  1695. memset(labels, 0, sizeof(labels));
  1696. memset(relocs, 0, sizeof(relocs));
  1697. if (bcm1250_m3_war()) {
  1698. unsigned int segbits = 44;
  1699. uasm_i_dmfc0(&p, K0, C0_BADVADDR);
  1700. uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
  1701. uasm_i_xor(&p, K0, K0, K1);
  1702. uasm_i_dsrl_safe(&p, K1, K0, 62);
  1703. uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
  1704. uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
  1705. uasm_i_or(&p, K0, K0, K1);
  1706. uasm_il_bnez(&p, &r, K0, label_leave);
  1707. /* No need for uasm_i_nop */
  1708. }
  1709. wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
  1710. build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
  1711. if (m4kc_tlbp_war())
  1712. build_tlb_probe_entry(&p);
  1713. if (cpu_has_rixi) {
  1714. /*
  1715. * If the page is not _PAGE_VALID, RI or XI could not
  1716. * have triggered it. Skip the expensive test..
  1717. */
  1718. if (use_bbit_insns()) {
  1719. uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
  1720. label_tlbl_goaround1);
  1721. } else {
  1722. uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
  1723. uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
  1724. }
  1725. uasm_i_nop(&p);
  1726. uasm_i_tlbr(&p);
  1727. /* Examine entrylo 0 or 1 based on ptr. */
  1728. if (use_bbit_insns()) {
  1729. uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
  1730. } else {
  1731. uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
  1732. uasm_i_beqz(&p, wr.r3, 8);
  1733. }
  1734. /* load it in the delay slot*/
  1735. UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
  1736. /* load it if ptr is odd */
  1737. UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
  1738. /*
  1739. * If the entryLo (now in wr.r3) is valid (bit 1), RI or
  1740. * XI must have triggered it.
  1741. */
  1742. if (use_bbit_insns()) {
  1743. uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl);
  1744. uasm_i_nop(&p);
  1745. uasm_l_tlbl_goaround1(&l, p);
  1746. } else {
  1747. uasm_i_andi(&p, wr.r3, wr.r3, 2);
  1748. uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl);
  1749. uasm_i_nop(&p);
  1750. }
  1751. uasm_l_tlbl_goaround1(&l, p);
  1752. }
  1753. build_make_valid(&p, &r, wr.r1, wr.r2);
  1754. build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
  1755. #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
  1756. /*
  1757. * This is the entry point when build_r4000_tlbchange_handler_head
  1758. * spots a huge page.
  1759. */
  1760. uasm_l_tlb_huge_update(&l, p);
  1761. iPTE_LW(&p, wr.r1, wr.r2);
  1762. build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
  1763. build_tlb_probe_entry(&p);
  1764. if (cpu_has_rixi) {
  1765. /*
  1766. * If the page is not _PAGE_VALID, RI or XI could not
  1767. * have triggered it. Skip the expensive test..
  1768. */
  1769. if (use_bbit_insns()) {
  1770. uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
  1771. label_tlbl_goaround2);
  1772. } else {
  1773. uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
  1774. uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
  1775. }
  1776. uasm_i_nop(&p);
  1777. uasm_i_tlbr(&p);
  1778. /* Examine entrylo 0 or 1 based on ptr. */
  1779. if (use_bbit_insns()) {
  1780. uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
  1781. } else {
  1782. uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
  1783. uasm_i_beqz(&p, wr.r3, 8);
  1784. }
  1785. /* load it in the delay slot*/
  1786. UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
  1787. /* load it if ptr is odd */
  1788. UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
  1789. /*
  1790. * If the entryLo (now in wr.r3) is valid (bit 1), RI or
  1791. * XI must have triggered it.
  1792. */
  1793. if (use_bbit_insns()) {
  1794. uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2);
  1795. } else {
  1796. uasm_i_andi(&p, wr.r3, wr.r3, 2);
  1797. uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
  1798. }
  1799. if (PM_DEFAULT_MASK == 0)
  1800. uasm_i_nop(&p);
  1801. /*
  1802. * We clobbered C0_PAGEMASK, restore it. On the other branch
  1803. * it is restored in build_huge_tlb_write_entry.
  1804. */
  1805. build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0);
  1806. uasm_l_tlbl_goaround2(&l, p);
  1807. }
  1808. uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
  1809. build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
  1810. #endif
  1811. uasm_l_nopage_tlbl(&l, p);
  1812. build_restore_work_registers(&p);
  1813. uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
  1814. uasm_i_nop(&p);
  1815. if ((p - handle_tlbl) > FASTPATH_SIZE)
  1816. panic("TLB load handler fastpath space exceeded");
  1817. uasm_resolve_relocs(relocs, labels);
  1818. pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
  1819. (unsigned int)(p - handle_tlbl));
  1820. dump_handler("r4000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl));
  1821. }
  1822. static void __cpuinit build_r4000_tlb_store_handler(void)
  1823. {
  1824. u32 *p = handle_tlbs;
  1825. struct uasm_label *l = labels;
  1826. struct uasm_reloc *r = relocs;
  1827. struct work_registers wr;
  1828. memset(handle_tlbs, 0, sizeof(handle_tlbs));
  1829. memset(labels, 0, sizeof(labels));
  1830. memset(relocs, 0, sizeof(relocs));
  1831. wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
  1832. build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
  1833. if (m4kc_tlbp_war())
  1834. build_tlb_probe_entry(&p);
  1835. build_make_write(&p, &r, wr.r1, wr.r2);
  1836. build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
  1837. #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
  1838. /*
  1839. * This is the entry point when
  1840. * build_r4000_tlbchange_handler_head spots a huge page.
  1841. */
  1842. uasm_l_tlb_huge_update(&l, p);
  1843. iPTE_LW(&p, wr.r1, wr.r2);
  1844. build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
  1845. build_tlb_probe_entry(&p);
  1846. uasm_i_ori(&p, wr.r1, wr.r1,
  1847. _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
  1848. build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
  1849. #endif
  1850. uasm_l_nopage_tlbs(&l, p);
  1851. build_restore_work_registers(&p);
  1852. uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
  1853. uasm_i_nop(&p);
  1854. if ((p - handle_tlbs) > FASTPATH_SIZE)
  1855. panic("TLB store handler fastpath space exceeded");
  1856. uasm_resolve_relocs(relocs, labels);
  1857. pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
  1858. (unsigned int)(p - handle_tlbs));
  1859. dump_handler("r4000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs));
  1860. }
  1861. static void __cpuinit build_r4000_tlb_modify_handler(void)
  1862. {
  1863. u32 *p = handle_tlbm;
  1864. struct uasm_label *l = labels;
  1865. struct uasm_reloc *r = relocs;
  1866. struct work_registers wr;
  1867. memset(handle_tlbm, 0, sizeof(handle_tlbm));
  1868. memset(labels, 0, sizeof(labels));
  1869. memset(relocs, 0, sizeof(relocs));
  1870. wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
  1871. build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
  1872. if (m4kc_tlbp_war())
  1873. build_tlb_probe_entry(&p);
  1874. /* Present and writable bits set, set accessed and dirty bits. */
  1875. build_make_write(&p, &r, wr.r1, wr.r2);
  1876. build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
  1877. #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
  1878. /*
  1879. * This is the entry point when
  1880. * build_r4000_tlbchange_handler_head spots a huge page.
  1881. */
  1882. uasm_l_tlb_huge_update(&l, p);
  1883. iPTE_LW(&p, wr.r1, wr.r2);
  1884. build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
  1885. build_tlb_probe_entry(&p);
  1886. uasm_i_ori(&p, wr.r1, wr.r1,
  1887. _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
  1888. build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
  1889. #endif
  1890. uasm_l_nopage_tlbm(&l, p);
  1891. build_restore_work_registers(&p);
  1892. uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
  1893. uasm_i_nop(&p);
  1894. if ((p - handle_tlbm) > FASTPATH_SIZE)
  1895. panic("TLB modify handler fastpath space exceeded");
  1896. uasm_resolve_relocs(relocs, labels);
  1897. pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
  1898. (unsigned int)(p - handle_tlbm));
  1899. dump_handler("r4000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm));
  1900. }
  1901. void __cpuinit build_tlb_refill_handler(void)
  1902. {
  1903. /*
  1904. * The refill handler is generated per-CPU, multi-node systems
  1905. * may have local storage for it. The other handlers are only
  1906. * needed once.
  1907. */
  1908. static int run_once = 0;
  1909. output_pgtable_bits_defines();
  1910. #ifdef CONFIG_64BIT
  1911. check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
  1912. #endif
  1913. switch (current_cpu_type()) {
  1914. case CPU_R2000:
  1915. case CPU_R3000:
  1916. case CPU_R3000A:
  1917. case CPU_R3081E:
  1918. case CPU_TX3912:
  1919. case CPU_TX3922:
  1920. case CPU_TX3927:
  1921. #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
  1922. build_r3000_tlb_refill_handler();
  1923. if (!run_once) {
  1924. build_r3000_tlb_load_handler();
  1925. build_r3000_tlb_store_handler();
  1926. build_r3000_tlb_modify_handler();
  1927. run_once++;
  1928. }
  1929. #else
  1930. panic("No R3000 TLB refill handler");
  1931. #endif
  1932. break;
  1933. case CPU_R6000:
  1934. case CPU_R6000A:
  1935. panic("No R6000 TLB refill handler yet");
  1936. break;
  1937. case CPU_R8000:
  1938. panic("No R8000 TLB refill handler yet");
  1939. break;
  1940. default:
  1941. if (!run_once) {
  1942. scratch_reg = allocate_kscratch();
  1943. #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
  1944. build_r4000_setup_pgd();
  1945. #endif
  1946. build_r4000_tlb_load_handler();
  1947. build_r4000_tlb_store_handler();
  1948. build_r4000_tlb_modify_handler();
  1949. run_once++;
  1950. }
  1951. build_r4000_tlb_refill_handler();
  1952. }
  1953. }
  1954. void __cpuinit flush_tlb_handlers(void)
  1955. {
  1956. local_flush_icache_range((unsigned long)handle_tlbl,
  1957. (unsigned long)handle_tlbl + sizeof(handle_tlbl));
  1958. local_flush_icache_range((unsigned long)handle_tlbs,
  1959. (unsigned long)handle_tlbs + sizeof(handle_tlbs));
  1960. local_flush_icache_range((unsigned long)handle_tlbm,
  1961. (unsigned long)handle_tlbm + sizeof(handle_tlbm));
  1962. #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
  1963. local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
  1964. (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm));
  1965. #endif
  1966. }