|
@@ -6,7 +6,6 @@
|
|
|
#include <linux/device.h>
|
|
|
#include <linux/kernel.h>
|
|
|
#include <linux/sched.h>
|
|
|
-#include <linux/cpumask.h>
|
|
|
#include <linux/module.h>
|
|
|
#include <linux/interrupt.h>
|
|
|
#include <linux/security.h>
|
|
@@ -22,149 +21,6 @@
|
|
|
#include <asm/r4kcache.h>
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
|
|
-/*
|
|
|
- * CPU mask used to set process affinity for MT VPEs/TCs with FPUs
|
|
|
- */
|
|
|
-
|
|
|
-cpumask_t mt_fpu_cpumask;
|
|
|
-
|
|
|
-#ifdef CONFIG_MIPS_MT_FPAFF
|
|
|
-
|
|
|
-#include <linux/cpu.h>
|
|
|
-#include <linux/delay.h>
|
|
|
-#include <asm/uaccess.h>
|
|
|
-
|
|
|
-unsigned long mt_fpemul_threshold = 0;
|
|
|
-
|
|
|
-/*
|
|
|
- * Replacement functions for the sys_sched_setaffinity() and
|
|
|
- * sys_sched_getaffinity() system calls, so that we can integrate
|
|
|
- * FPU affinity with the user's requested processor affinity.
|
|
|
- * This code is 98% identical with the sys_sched_setaffinity()
|
|
|
- * and sys_sched_getaffinity() system calls, and should be
|
|
|
- * updated when kernel/sched.c changes.
|
|
|
- */
|
|
|
-
|
|
|
-/*
|
|
|
- * find_process_by_pid - find a process with a matching PID value.
|
|
|
- * used in sys_sched_set/getaffinity() in kernel/sched.c, so
|
|
|
- * cloned here.
|
|
|
- */
|
|
|
-static inline struct task_struct *find_process_by_pid(pid_t pid)
|
|
|
-{
|
|
|
- return pid ? find_task_by_pid(pid) : current;
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-/*
|
|
|
- * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
|
|
|
- */
|
|
|
-asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
|
|
|
- unsigned long __user *user_mask_ptr)
|
|
|
-{
|
|
|
- cpumask_t new_mask;
|
|
|
- cpumask_t effective_mask;
|
|
|
- int retval;
|
|
|
- struct task_struct *p;
|
|
|
-
|
|
|
- if (len < sizeof(new_mask))
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
|
|
|
- return -EFAULT;
|
|
|
-
|
|
|
- lock_cpu_hotplug();
|
|
|
- read_lock(&tasklist_lock);
|
|
|
-
|
|
|
- p = find_process_by_pid(pid);
|
|
|
- if (!p) {
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
- unlock_cpu_hotplug();
|
|
|
- return -ESRCH;
|
|
|
- }
|
|
|
-
|
|
|
- /*
|
|
|
- * It is not safe to call set_cpus_allowed with the
|
|
|
- * tasklist_lock held. We will bump the task_struct's
|
|
|
- * usage count and drop tasklist_lock before invoking
|
|
|
- * set_cpus_allowed.
|
|
|
- */
|
|
|
- get_task_struct(p);
|
|
|
-
|
|
|
- retval = -EPERM;
|
|
|
- if ((current->euid != p->euid) && (current->euid != p->uid) &&
|
|
|
- !capable(CAP_SYS_NICE)) {
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
- goto out_unlock;
|
|
|
- }
|
|
|
-
|
|
|
- retval = security_task_setscheduler(p, 0, NULL);
|
|
|
- if (retval)
|
|
|
- goto out_unlock;
|
|
|
-
|
|
|
- /* Record new user-specified CPU set for future reference */
|
|
|
- p->thread.user_cpus_allowed = new_mask;
|
|
|
-
|
|
|
- /* Unlock the task list */
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
-
|
|
|
- /* Compute new global allowed CPU set if necessary */
|
|
|
- if ((p->thread.mflags & MF_FPUBOUND)
|
|
|
- && cpus_intersects(new_mask, mt_fpu_cpumask)) {
|
|
|
- cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
|
|
|
- retval = set_cpus_allowed(p, effective_mask);
|
|
|
- } else {
|
|
|
- p->thread.mflags &= ~MF_FPUBOUND;
|
|
|
- retval = set_cpus_allowed(p, new_mask);
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
-out_unlock:
|
|
|
- put_task_struct(p);
|
|
|
- unlock_cpu_hotplug();
|
|
|
- return retval;
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
|
|
|
- */
|
|
|
-asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
|
|
|
- unsigned long __user *user_mask_ptr)
|
|
|
-{
|
|
|
- unsigned int real_len;
|
|
|
- cpumask_t mask;
|
|
|
- int retval;
|
|
|
- struct task_struct *p;
|
|
|
-
|
|
|
- real_len = sizeof(mask);
|
|
|
- if (len < real_len)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- lock_cpu_hotplug();
|
|
|
- read_lock(&tasklist_lock);
|
|
|
-
|
|
|
- retval = -ESRCH;
|
|
|
- p = find_process_by_pid(pid);
|
|
|
- if (!p)
|
|
|
- goto out_unlock;
|
|
|
- retval = security_task_getscheduler(p);
|
|
|
- if (retval)
|
|
|
- goto out_unlock;
|
|
|
-
|
|
|
- cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
|
|
|
-
|
|
|
-out_unlock:
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
- unlock_cpu_hotplug();
|
|
|
- if (retval)
|
|
|
- return retval;
|
|
|
- if (copy_to_user(user_mask_ptr, &mask, real_len))
|
|
|
- return -EFAULT;
|
|
|
- return real_len;
|
|
|
-}
|
|
|
-
|
|
|
-#endif /* CONFIG_MIPS_MT_FPAFF */
|
|
|
-
|
|
|
/*
|
|
|
* Dump new MIPS MT state for the core. Does not leave TCs halted.
|
|
|
* Takes an argument which taken to be a pre-call MVPControl value.
|
|
@@ -314,17 +170,6 @@ static int __init ndflush(char *s)
|
|
|
return 1;
|
|
|
}
|
|
|
__setup("ndflush=", ndflush);
|
|
|
-#ifdef CONFIG_MIPS_MT_FPAFF
|
|
|
-static int fpaff_threshold = -1;
|
|
|
-
|
|
|
-static int __init fpaff_thresh(char *str)
|
|
|
-{
|
|
|
- get_option(&str, &fpaff_threshold);
|
|
|
- return 1;
|
|
|
-}
|
|
|
-
|
|
|
-__setup("fpaff=", fpaff_thresh);
|
|
|
-#endif /* CONFIG_MIPS_MT_FPAFF */
|
|
|
|
|
|
static unsigned int itc_base = 0;
|
|
|
|
|
@@ -380,20 +225,6 @@ void mips_mt_set_cpuoptions(void)
|
|
|
if (mt_n_dflushes != 1)
|
|
|
printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
|
|
|
|
|
|
-#ifdef CONFIG_MIPS_MT_FPAFF
|
|
|
- /* FPU Use Factor empirically derived from experiments on 34K */
|
|
|
-#define FPUSEFACTOR 333
|
|
|
-
|
|
|
- if (fpaff_threshold >= 0) {
|
|
|
- mt_fpemul_threshold = fpaff_threshold;
|
|
|
- } else {
|
|
|
- mt_fpemul_threshold =
|
|
|
- (FPUSEFACTOR * (loops_per_jiffy / (500000 / HZ))) / HZ;
|
|
|
- }
|
|
|
- printk("FPU Affinity set after %ld emulations\n",
|
|
|
- mt_fpemul_threshold);
|
|
|
-#endif /* CONFIG_MIPS_MT_FPAFF */
|
|
|
-
|
|
|
if (itc_base != 0) {
|
|
|
/*
|
|
|
* Configure ITC mapping. This code is very
|