Browse Source

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] __per_cpu_idtrs[] is a memory hog
  [IA64] sanity in #include files.  Move fnptr to types.h
  [IA64] use helpers for rlimits
  [IA64] cpumask_of_node() should handle -1 as a node
Linus Torvalds 15 years ago
parent
commit
a07f523f26

+ 0 - 1
arch/ia64/include/asm/ftrace.h

@@ -8,7 +8,6 @@
 extern void _mcount(unsigned long pfs, unsigned long r1, unsigned long b0, unsigned long r0);
 #define mcount _mcount
 
-#include <asm/kprobes.h>
 /* In IA64, MCOUNT_ADDR is set in link time, so it's not a constant at compile time */
 #define MCOUNT_ADDR (((struct fnptr *)mcount)->ip)
 #define FTRACE_ADDR (((struct fnptr *)ftrace_caller)->ip)

+ 0 - 5
arch/ia64/include/asm/kprobes.h

@@ -103,11 +103,6 @@ typedef struct kprobe_opcode {
 	bundle_t bundle;
 } kprobe_opcode_t;
 
-struct fnptr {
-	unsigned long ip;
-	unsigned long gp;
-};
-
 /* Architecture specific copy of original instruction*/
 struct arch_specific_insn {
 	/* copy of the instruction to be emulated */

+ 1 - 1
arch/ia64/include/asm/tlb.h

@@ -74,7 +74,7 @@ struct ia64_tr_entry {
 extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
 extern void ia64_ptr_entry(u64 target_mask, int slot);
 
-extern struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX];
+extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
 
 /*
  region register macros

+ 3 - 1
arch/ia64/include/asm/topology.h

@@ -33,7 +33,9 @@
 /*
  * Returns a bitmask of CPUs on Node 'node'.
  */
-#define cpumask_of_node(node) (&node_to_cpu_mask[node])
+#define cpumask_of_node(node) ((node) == -1 ?				\
+			       cpu_all_mask :				\
+			       &node_to_cpu_mask[node])
 
 /*
  * Returns the number of the node containing Node 'nid'.

+ 5 - 0
arch/ia64/include/asm/types.h

@@ -30,6 +30,11 @@
 
 typedef unsigned int umode_t;
 
+struct fnptr {
+	unsigned long ip;
+	unsigned long gp;
+};
+
 /*
  * These aren't exported outside the kernel to avoid name space clashes
  */

+ 4 - 1
arch/ia64/kernel/mca.c

@@ -1225,9 +1225,12 @@ static void mca_insert_tr(u64 iord)
 	unsigned long psr;
 	int cpu = smp_processor_id();
 
+	if (!ia64_idtrs[cpu])
+		return;
+
 	psr = ia64_clear_ic();
 	for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
-		p = &__per_cpu_idtrs[cpu][iord-1][i];
+		p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX;
 		if (p->pte & 0x1) {
 			old_rr = ia64_get_rr(p->ifa);
 			if (old_rr != p->rr) {

+ 1 - 1
arch/ia64/kernel/perfmon.c

@@ -2293,7 +2293,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
 	 * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
 	 * 	return -ENOMEM;
 	 */
-	if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
+	if (size > task_rlimit(task, RLIMIT_MEMLOCK))
 		return -ENOMEM;
 
 	/*

+ 1 - 1
arch/ia64/mm/init.c

@@ -91,7 +91,7 @@ dma_mark_clean(void *addr, size_t size)
 inline void
 ia64_set_rbs_bot (void)
 {
-	unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16;
+	unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
 
 	if (stack_size > MAX_USER_STACK_SIZE)
 		stack_size = MAX_USER_STACK_SIZE;

+ 19 - 13
arch/ia64/mm/tlb.c

@@ -48,7 +48,7 @@ DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
 DEFINE_PER_CPU(u8, ia64_tr_num);  /*Number of TR slots in current processor*/
 DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/
 
-struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX];
+struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
 
 /*
  * Initializes the ia64_ctx.bitmap array based on max_ctx+1.
@@ -429,10 +429,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
 	struct ia64_tr_entry *p;
 	int cpu = smp_processor_id();
 
+	if (!ia64_idtrs[cpu]) {
+		ia64_idtrs[cpu] = kmalloc(2 * IA64_TR_ALLOC_MAX *
+				sizeof (struct ia64_tr_entry), GFP_KERNEL);
+		if (!ia64_idtrs[cpu])
+			return -ENOMEM;
+	}
 	r = -EINVAL;
 	/*Check overlap with existing TR entries*/
 	if (target_mask & 0x1) {
-		p = &__per_cpu_idtrs[cpu][0][0];
+		p = ia64_idtrs[cpu];
 		for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
 								i++, p++) {
 			if (p->pte & 0x1)
@@ -444,7 +450,7 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
 		}
 	}
 	if (target_mask & 0x2) {
-		p = &__per_cpu_idtrs[cpu][1][0];
+		p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX;
 		for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
 								i++, p++) {
 			if (p->pte & 0x1)
@@ -459,16 +465,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
 	for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
 		switch (target_mask & 0x3) {
 		case 1:
-			if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1))
+			if (!((ia64_idtrs[cpu] + i)->pte & 0x1))
 				goto found;
 			continue;
 		case 2:
-			if (!(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
+			if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
 				goto found;
 			continue;
 		case 3:
-			if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1) &&
-				!(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
+			if (!((ia64_idtrs[cpu] + i)->pte & 0x1) &&
+			    !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
 				goto found;
 			continue;
 		default:
@@ -488,7 +494,7 @@ found:
 	if (target_mask & 0x1) {
 		ia64_itr(0x1, i, va, pte, log_size);
 		ia64_srlz_i();
-		p = &__per_cpu_idtrs[cpu][0][i];
+		p = ia64_idtrs[cpu] + i;
 		p->ifa = va;
 		p->pte = pte;
 		p->itir = log_size << 2;
@@ -497,7 +503,7 @@ found:
 	if (target_mask & 0x2) {
 		ia64_itr(0x2, i, va, pte, log_size);
 		ia64_srlz_i();
-		p = &__per_cpu_idtrs[cpu][1][i];
+		p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i;
 		p->ifa = va;
 		p->pte = pte;
 		p->itir = log_size << 2;
@@ -528,7 +534,7 @@ void ia64_ptr_entry(u64 target_mask, int slot)
 		return;
 
 	if (target_mask & 0x1) {
-		p = &__per_cpu_idtrs[cpu][0][slot];
+		p = ia64_idtrs[cpu] + slot;
 		if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
 			p->pte = 0;
 			ia64_ptr(0x1, p->ifa, p->itir>>2);
@@ -537,7 +543,7 @@ void ia64_ptr_entry(u64 target_mask, int slot)
 	}
 
 	if (target_mask & 0x2) {
-		p = &__per_cpu_idtrs[cpu][1][slot];
+		p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot;
 		if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
 			p->pte = 0;
 			ia64_ptr(0x2, p->ifa, p->itir>>2);
@@ -546,8 +552,8 @@ void ia64_ptr_entry(u64 target_mask, int slot)
 	}
 
 	for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
-		if ((__per_cpu_idtrs[cpu][0][i].pte & 0x1) ||
-				(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
+		if (((ia64_idtrs[cpu] + i)->pte & 0x1) ||
+		    ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
 			break;
 	}
 	per_cpu(ia64_tr_used, cpu) = i;