|
@@ -1165,7 +1165,22 @@ static inline unsigned long brk_rnd(void)
|
|
|
|
|
|
unsigned long arch_randomize_brk(struct mm_struct *mm)
|
|
|
{
|
|
|
- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
|
|
|
+ unsigned long base = mm->brk;
|
|
|
+ unsigned long ret;
|
|
|
+
|
|
|
+#ifdef CONFIG_PPC64
|
|
|
+ /*
|
|
|
+ * If we are using 1TB segments and we are allowed to randomise
|
|
|
+ * the heap, we can put it above 1TB so it is backed by a 1TB
|
|
|
+ * segment. Otherwise the heap will be in the bottom 1TB
|
|
|
+ * which always uses 256MB segments and this may result in a
|
|
|
+ * performance penalty.
|
|
|
+ */
|
|
|
+ if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
|
|
|
+ base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
|
|
|
+#endif
|
|
|
+
|
|
|
+ ret = PAGE_ALIGN(base + brk_rnd());
|
|
|
|
|
|
if (ret < mm->brk)
|
|
|
return mm->brk;
|