|
@@ -223,7 +223,11 @@ _GLOBAL(slb_allocate_user)
|
|
*/
|
|
*/
|
|
slb_finish_load:
|
|
slb_finish_load:
|
|
ASM_VSID_SCRAMBLE(r10,r9,256M)
|
|
ASM_VSID_SCRAMBLE(r10,r9,256M)
|
|
- rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */
|
|
|
|
|
|
+ /*
|
|
|
|
+ * bits above VSID_BITS_256M need to be ignored from r10
|
|
|
|
+ * also combine VSID and flags
|
|
|
|
+ */
|
|
|
|
+ rldimi r11,r10,SLB_VSID_SHIFT,(64 - (SLB_VSID_SHIFT + VSID_BITS_256M))
|
|
|
|
|
|
/* r3 = EA, r11 = VSID data */
|
|
/* r3 = EA, r11 = VSID data */
|
|
/*
|
|
/*
|
|
@@ -287,7 +291,11 @@ _GLOBAL(slb_compare_rr_to_size)
|
|
slb_finish_load_1T:
|
|
slb_finish_load_1T:
|
|
srdi r10,r10,40-28 /* get 1T ESID */
|
|
srdi r10,r10,40-28 /* get 1T ESID */
|
|
ASM_VSID_SCRAMBLE(r10,r9,1T)
|
|
ASM_VSID_SCRAMBLE(r10,r9,1T)
|
|
- rldimi r11,r10,SLB_VSID_SHIFT_1T,16 /* combine VSID and flags */
|
|
|
|
|
|
+ /*
|
|
|
|
+ * bits above VSID_BITS_1T need to be ignored from r10
|
|
|
|
+ * also combine VSID and flags
|
|
|
|
+ */
|
|
|
|
+ rldimi r11,r10,SLB_VSID_SHIFT_1T,(64 - (SLB_VSID_SHIFT_1T + VSID_BITS_1T))
|
|
li r10,MMU_SEGSIZE_1T
|
|
li r10,MMU_SEGSIZE_1T
|
|
rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
|
|
rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
|
|
|
|
|