|
@@ -44,17 +44,18 @@
|
|
|
#include <asm/spu_priv1.h>
|
|
|
#include "spufs.h"
|
|
|
|
|
|
-#define SPU_MIN_TIMESLICE (100 * HZ / 1000)
|
|
|
+#define SPU_TIMESLICE (HZ)
|
|
|
|
|
|
-#define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
|
|
|
struct spu_prio_array {
|
|
|
- unsigned long bitmap[SPU_BITMAP_SIZE];
|
|
|
- wait_queue_head_t waitq[MAX_PRIO];
|
|
|
+ DECLARE_BITMAP(bitmap, MAX_PRIO);
|
|
|
+ struct list_head runq[MAX_PRIO];
|
|
|
+ spinlock_t runq_lock;
|
|
|
struct list_head active_list[MAX_NUMNODES];
|
|
|
struct mutex active_mutex[MAX_NUMNODES];
|
|
|
};
|
|
|
|
|
|
static struct spu_prio_array *spu_prio;
|
|
|
+static struct workqueue_struct *spu_sched_wq;
|
|
|
|
|
|
static inline int node_allowed(int node)
|
|
|
{
|
|
@@ -68,6 +69,64 @@ static inline int node_allowed(int node)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+void spu_start_tick(struct spu_context *ctx)
|
|
|
+{
|
|
|
+ if (ctx->policy == SCHED_RR)
|
|
|
+ queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
|
|
|
+}
|
|
|
+
|
|
|
+void spu_stop_tick(struct spu_context *ctx)
|
|
|
+{
|
|
|
+ if (ctx->policy == SCHED_RR)
|
|
|
+ cancel_delayed_work(&ctx->sched_work);
|
|
|
+}
|
|
|
+
|
|
|
+void spu_sched_tick(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct spu_context *ctx =
|
|
|
+ container_of(work, struct spu_context, sched_work.work);
|
|
|
+ struct spu *spu;
|
|
|
+ int rearm = 1;
|
|
|
+
|
|
|
+ mutex_lock(&ctx->state_mutex);
|
|
|
+ spu = ctx->spu;
|
|
|
+ if (spu) {
|
|
|
+ int best = sched_find_first_bit(spu_prio->bitmap);
|
|
|
+ if (best <= ctx->prio) {
|
|
|
+ spu_deactivate(ctx);
|
|
|
+ rearm = 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ mutex_unlock(&ctx->state_mutex);
|
|
|
+
|
|
|
+ if (rearm)
|
|
|
+ spu_start_tick(ctx);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * spu_add_to_active_list - add spu to active list
|
|
|
+ * @spu: spu to add to the active list
|
|
|
+ */
|
|
|
+static void spu_add_to_active_list(struct spu *spu)
|
|
|
+{
|
|
|
+ mutex_lock(&spu_prio->active_mutex[spu->node]);
|
|
|
+ list_add_tail(&spu->list, &spu_prio->active_list[spu->node]);
|
|
|
+ mutex_unlock(&spu_prio->active_mutex[spu->node]);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * spu_remove_from_active_list - remove spu from active list
|
|
|
+ * @spu: spu to remove from the active list
|
|
|
+ */
|
|
|
+static void spu_remove_from_active_list(struct spu *spu)
|
|
|
+{
|
|
|
+ int node = spu->node;
|
|
|
+
|
|
|
+ mutex_lock(&spu_prio->active_mutex[node]);
|
|
|
+ list_del_init(&spu->list);
|
|
|
+ mutex_unlock(&spu_prio->active_mutex[node]);
|
|
|
+}
|
|
|
+
|
|
|
static inline void mm_needs_global_tlbie(struct mm_struct *mm)
|
|
|
{
|
|
|
int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
|
|
@@ -94,8 +153,12 @@ int spu_switch_event_unregister(struct notifier_block * n)
|
|
|
return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-static inline void bind_context(struct spu *spu, struct spu_context *ctx)
|
|
|
+/**
|
|
|
+ * spu_bind_context - bind spu context to physical spu
|
|
|
+ * @spu: physical spu to bind to
|
|
|
+ * @ctx: context to bind
|
|
|
+ */
|
|
|
+static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
|
|
|
{
|
|
|
pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
|
|
|
spu->number, spu->node);
|
|
@@ -104,7 +167,6 @@ static inline void bind_context(struct spu *spu, struct spu_context *ctx)
|
|
|
ctx->spu = spu;
|
|
|
ctx->ops = &spu_hw_ops;
|
|
|
spu->pid = current->pid;
|
|
|
- spu->prio = current->prio;
|
|
|
spu->mm = ctx->owner;
|
|
|
mm_needs_global_tlbie(spu->mm);
|
|
|
spu->ibox_callback = spufs_ibox_callback;
|
|
@@ -118,12 +180,21 @@ static inline void bind_context(struct spu *spu, struct spu_context *ctx)
|
|
|
spu->timestamp = jiffies;
|
|
|
spu_cpu_affinity_set(spu, raw_smp_processor_id());
|
|
|
spu_switch_notify(spu, ctx);
|
|
|
+ spu_add_to_active_list(spu);
|
|
|
+ ctx->state = SPU_STATE_RUNNABLE;
|
|
|
}
|
|
|
|
|
|
-static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
|
|
|
+/**
|
|
|
+ * spu_unbind_context - unbind spu context from physical spu
|
|
|
+ * @spu: physical spu to unbind from
|
|
|
+ * @ctx: context to unbind
|
|
|
+ */
|
|
|
+static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
|
|
|
{
|
|
|
pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
|
|
|
spu->pid, spu->number, spu->node);
|
|
|
+
|
|
|
+ spu_remove_from_active_list(spu);
|
|
|
spu_switch_notify(spu, NULL);
|
|
|
spu_unmap_mappings(ctx);
|
|
|
spu_save(&ctx->csa, spu);
|
|
@@ -136,95 +207,98 @@ static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
|
|
|
spu->dma_callback = NULL;
|
|
|
spu->mm = NULL;
|
|
|
spu->pid = 0;
|
|
|
- spu->prio = MAX_PRIO;
|
|
|
ctx->ops = &spu_backing_ops;
|
|
|
ctx->spu = NULL;
|
|
|
spu->flags = 0;
|
|
|
spu->ctx = NULL;
|
|
|
}
|
|
|
|
|
|
-static inline void spu_add_wq(wait_queue_head_t * wq, wait_queue_t * wait,
|
|
|
- int prio)
|
|
|
+/**
|
|
|
+ * spu_add_to_rq - add a context to the runqueue
|
|
|
+ * @ctx: context to add
|
|
|
+ */
|
|
|
+static void spu_add_to_rq(struct spu_context *ctx)
|
|
|
{
|
|
|
- prepare_to_wait_exclusive(wq, wait, TASK_INTERRUPTIBLE);
|
|
|
- set_bit(prio, spu_prio->bitmap);
|
|
|
+ spin_lock(&spu_prio->runq_lock);
|
|
|
+ list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
|
|
|
+ set_bit(ctx->prio, spu_prio->bitmap);
|
|
|
+ spin_unlock(&spu_prio->runq_lock);
|
|
|
}
|
|
|
|
|
|
-static inline void spu_del_wq(wait_queue_head_t * wq, wait_queue_t * wait,
|
|
|
- int prio)
|
|
|
+/**
|
|
|
+ * spu_del_from_rq - remove a context from the runqueue
|
|
|
+ * @ctx: context to remove
|
|
|
+ */
|
|
|
+static void spu_del_from_rq(struct spu_context *ctx)
|
|
|
{
|
|
|
- u64 flags;
|
|
|
-
|
|
|
- __set_current_state(TASK_RUNNING);
|
|
|
-
|
|
|
- spin_lock_irqsave(&wq->lock, flags);
|
|
|
+ spin_lock(&spu_prio->runq_lock);
|
|
|
+ list_del_init(&ctx->rq);
|
|
|
+ if (list_empty(&spu_prio->runq[ctx->prio]))
|
|
|
+ clear_bit(ctx->prio, spu_prio->bitmap);
|
|
|
+ spin_unlock(&spu_prio->runq_lock);
|
|
|
+}
|
|
|
|
|
|
- remove_wait_queue_locked(wq, wait);
|
|
|
- if (list_empty(&wq->task_list))
|
|
|
- clear_bit(prio, spu_prio->bitmap);
|
|
|
+/**
|
|
|
+ * spu_grab_context - remove one context from the runqueue
|
|
|
+ * @prio: priority of the context to be removed
|
|
|
+ *
|
|
|
+ * This function removes one context from the runqueue for priority @prio.
|
|
|
+ * If there is more than one context with the given priority the first
|
|
|
+ * task on the runqueue will be taken.
|
|
|
+ *
|
|
|
+ * Returns the spu_context it just removed.
|
|
|
+ *
|
|
|
+ * Must be called with spu_prio->runq_lock held.
|
|
|
+ */
|
|
|
+static struct spu_context *spu_grab_context(int prio)
|
|
|
+{
|
|
|
+ struct list_head *rq = &spu_prio->runq[prio];
|
|
|
|
|
|
- spin_unlock_irqrestore(&wq->lock, flags);
|
|
|
+ if (list_empty(rq))
|
|
|
+ return NULL;
|
|
|
+ return list_entry(rq->next, struct spu_context, rq);
|
|
|
}
|
|
|
|
|
|
-static void spu_prio_wait(struct spu_context *ctx, u64 flags)
|
|
|
+static void spu_prio_wait(struct spu_context *ctx)
|
|
|
{
|
|
|
- int prio = current->prio;
|
|
|
- wait_queue_head_t *wq = &spu_prio->waitq[prio];
|
|
|
DEFINE_WAIT(wait);
|
|
|
|
|
|
- if (ctx->spu)
|
|
|
- return;
|
|
|
-
|
|
|
- spu_add_wq(wq, &wait, prio);
|
|
|
-
|
|
|
+ set_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
|
|
|
+ prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
|
|
|
if (!signal_pending(current)) {
|
|
|
- up_write(&ctx->state_sema);
|
|
|
- pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
|
|
|
- current->pid, current->prio);
|
|
|
+ mutex_unlock(&ctx->state_mutex);
|
|
|
schedule();
|
|
|
- down_write(&ctx->state_sema);
|
|
|
+ mutex_lock(&ctx->state_mutex);
|
|
|
}
|
|
|
-
|
|
|
- spu_del_wq(wq, &wait, prio);
|
|
|
+ __set_current_state(TASK_RUNNING);
|
|
|
+ remove_wait_queue(&ctx->stop_wq, &wait);
|
|
|
+ clear_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
|
|
|
}
|
|
|
|
|
|
-static void spu_prio_wakeup(void)
|
|
|
+/**
|
|
|
+ * spu_reschedule - try to find a runnable context for a spu
|
|
|
+ * @spu: spu available
|
|
|
+ *
|
|
|
+ * This function is called whenever a spu becomes idle. It looks for the
|
|
|
+ * most suitable runnable spu context and schedules it for execution.
|
|
|
+ */
|
|
|
+static void spu_reschedule(struct spu *spu)
|
|
|
{
|
|
|
- int best = sched_find_first_bit(spu_prio->bitmap);
|
|
|
- if (best < MAX_PRIO) {
|
|
|
- wait_queue_head_t *wq = &spu_prio->waitq[best];
|
|
|
- wake_up_interruptible_nr(wq, 1);
|
|
|
- }
|
|
|
-}
|
|
|
+ int best;
|
|
|
|
|
|
-static int get_active_spu(struct spu *spu)
|
|
|
-{
|
|
|
- int node = spu->node;
|
|
|
- struct spu *tmp;
|
|
|
- int rc = 0;
|
|
|
+ spu_free(spu);
|
|
|
|
|
|
- mutex_lock(&spu_prio->active_mutex[node]);
|
|
|
- list_for_each_entry(tmp, &spu_prio->active_list[node], list) {
|
|
|
- if (tmp == spu) {
|
|
|
- list_del_init(&spu->list);
|
|
|
- rc = 1;
|
|
|
- break;
|
|
|
- }
|
|
|
+ spin_lock(&spu_prio->runq_lock);
|
|
|
+ best = sched_find_first_bit(spu_prio->bitmap);
|
|
|
+ if (best < MAX_PRIO) {
|
|
|
+ struct spu_context *ctx = spu_grab_context(best);
|
|
|
+ if (ctx && test_bit(SPU_SCHED_WAKE, &ctx->sched_flags))
|
|
|
+ wake_up(&ctx->stop_wq);
|
|
|
}
|
|
|
- mutex_unlock(&spu_prio->active_mutex[node]);
|
|
|
- return rc;
|
|
|
-}
|
|
|
-
|
|
|
-static void put_active_spu(struct spu *spu)
|
|
|
-{
|
|
|
- int node = spu->node;
|
|
|
-
|
|
|
- mutex_lock(&spu_prio->active_mutex[node]);
|
|
|
- list_add_tail(&spu->list, &spu_prio->active_list[node]);
|
|
|
- mutex_unlock(&spu_prio->active_mutex[node]);
|
|
|
+ spin_unlock(&spu_prio->runq_lock);
|
|
|
}
|
|
|
|
|
|
-static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags)
|
|
|
+static struct spu *spu_get_idle(struct spu_context *ctx)
|
|
|
{
|
|
|
struct spu *spu = NULL;
|
|
|
int node = cpu_to_node(raw_smp_processor_id());
|
|
@@ -241,87 +315,154 @@ static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags)
|
|
|
return spu;
|
|
|
}
|
|
|
|
|
|
-static inline struct spu *spu_get(struct spu_context *ctx, u64 flags)
|
|
|
+/**
|
|
|
+ * find_victim - find a lower priority context to preempt
|
|
|
+ * @ctx: canidate context for running
|
|
|
+ *
|
|
|
+ * Returns the freed physical spu to run the new context on.
|
|
|
+ */
|
|
|
+static struct spu *find_victim(struct spu_context *ctx)
|
|
|
{
|
|
|
- /* Future: spu_get_idle() if possible,
|
|
|
- * otherwise try to preempt an active
|
|
|
- * context.
|
|
|
+ struct spu_context *victim = NULL;
|
|
|
+ struct spu *spu;
|
|
|
+ int node, n;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Look for a possible preemption candidate on the local node first.
|
|
|
+ * If there is no candidate look at the other nodes. This isn't
|
|
|
+ * exactly fair, but so far the whole spu schedule tries to keep
|
|
|
+ * a strong node affinity. We might want to fine-tune this in
|
|
|
+ * the future.
|
|
|
*/
|
|
|
- return spu_get_idle(ctx, flags);
|
|
|
+ restart:
|
|
|
+ node = cpu_to_node(raw_smp_processor_id());
|
|
|
+ for (n = 0; n < MAX_NUMNODES; n++, node++) {
|
|
|
+ node = (node < MAX_NUMNODES) ? node : 0;
|
|
|
+ if (!node_allowed(node))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ mutex_lock(&spu_prio->active_mutex[node]);
|
|
|
+ list_for_each_entry(spu, &spu_prio->active_list[node], list) {
|
|
|
+ struct spu_context *tmp = spu->ctx;
|
|
|
+
|
|
|
+ if (tmp->rt_priority < ctx->rt_priority &&
|
|
|
+ (!victim || tmp->rt_priority < victim->rt_priority))
|
|
|
+ victim = spu->ctx;
|
|
|
+ }
|
|
|
+ mutex_unlock(&spu_prio->active_mutex[node]);
|
|
|
+
|
|
|
+ if (victim) {
|
|
|
+ /*
|
|
|
+ * This nests ctx->state_mutex, but we always lock
|
|
|
+ * higher priority contexts before lower priority
|
|
|
+ * ones, so this is safe until we introduce
|
|
|
+ * priority inheritance schemes.
|
|
|
+ */
|
|
|
+ if (!mutex_trylock(&victim->state_mutex)) {
|
|
|
+ victim = NULL;
|
|
|
+ goto restart;
|
|
|
+ }
|
|
|
+
|
|
|
+ spu = victim->spu;
|
|
|
+ if (!spu) {
|
|
|
+ /*
|
|
|
+ * This race can happen because we've dropped
|
|
|
+ * the active list mutex. No a problem, just
|
|
|
+ * restart the search.
|
|
|
+ */
|
|
|
+ mutex_unlock(&victim->state_mutex);
|
|
|
+ victim = NULL;
|
|
|
+ goto restart;
|
|
|
+ }
|
|
|
+ spu_unbind_context(spu, victim);
|
|
|
+ mutex_unlock(&victim->state_mutex);
|
|
|
+ return spu;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return NULL;
|
|
|
}
|
|
|
|
|
|
-/* The three externally callable interfaces
|
|
|
- * for the scheduler begin here.
|
|
|
+/**
|
|
|
+ * spu_activate - find a free spu for a context and execute it
|
|
|
+ * @ctx: spu context to schedule
|
|
|
+ * @flags: flags (currently ignored)
|
|
|
*
|
|
|
- * spu_activate - bind a context to SPU, waiting as needed.
|
|
|
- * spu_deactivate - unbind a context from its SPU.
|
|
|
- * spu_yield - yield an SPU if others are waiting.
|
|
|
+ * Tries to find a free spu to run @ctx. If no free spu is availble
|
|
|
+ * add the context to the runqueue so it gets woken up once an spu
|
|
|
+ * is available.
|
|
|
*/
|
|
|
-
|
|
|
-int spu_activate(struct spu_context *ctx, u64 flags)
|
|
|
+int spu_activate(struct spu_context *ctx, unsigned long flags)
|
|
|
{
|
|
|
- struct spu *spu;
|
|
|
- int ret = 0;
|
|
|
|
|
|
- for (;;) {
|
|
|
- if (ctx->spu)
|
|
|
+ if (ctx->spu)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ do {
|
|
|
+ struct spu *spu;
|
|
|
+
|
|
|
+ spu = spu_get_idle(ctx);
|
|
|
+ /*
|
|
|
+ * If this is a realtime thread we try to get it running by
|
|
|
+ * preempting a lower priority thread.
|
|
|
+ */
|
|
|
+ if (!spu && ctx->rt_priority)
|
|
|
+ spu = find_victim(ctx);
|
|
|
+ if (spu) {
|
|
|
+ spu_bind_context(spu, ctx);
|
|
|
return 0;
|
|
|
- spu = spu_get(ctx, flags);
|
|
|
- if (spu != NULL) {
|
|
|
- if (ctx->spu != NULL) {
|
|
|
- spu_free(spu);
|
|
|
- spu_prio_wakeup();
|
|
|
- break;
|
|
|
- }
|
|
|
- bind_context(spu, ctx);
|
|
|
- put_active_spu(spu);
|
|
|
- break;
|
|
|
}
|
|
|
- spu_prio_wait(ctx, flags);
|
|
|
- if (signal_pending(current)) {
|
|
|
- ret = -ERESTARTSYS;
|
|
|
- spu_prio_wakeup();
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- return ret;
|
|
|
+
|
|
|
+ spu_add_to_rq(ctx);
|
|
|
+ if (!(flags & SPU_ACTIVATE_NOWAKE))
|
|
|
+ spu_prio_wait(ctx);
|
|
|
+ spu_del_from_rq(ctx);
|
|
|
+ } while (!signal_pending(current));
|
|
|
+
|
|
|
+ return -ERESTARTSYS;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * spu_deactivate - unbind a context from it's physical spu
|
|
|
+ * @ctx: spu context to unbind
|
|
|
+ *
|
|
|
+ * Unbind @ctx from the physical spu it is running on and schedule
|
|
|
+ * the highest priority context to run on the freed physical spu.
|
|
|
+ */
|
|
|
void spu_deactivate(struct spu_context *ctx)
|
|
|
{
|
|
|
- struct spu *spu;
|
|
|
- int needs_idle;
|
|
|
+ struct spu *spu = ctx->spu;
|
|
|
|
|
|
- spu = ctx->spu;
|
|
|
- if (!spu)
|
|
|
- return;
|
|
|
- needs_idle = get_active_spu(spu);
|
|
|
- unbind_context(spu, ctx);
|
|
|
- if (needs_idle) {
|
|
|
- spu_free(spu);
|
|
|
- spu_prio_wakeup();
|
|
|
+ if (spu) {
|
|
|
+ spu_unbind_context(spu, ctx);
|
|
|
+ spu_reschedule(spu);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * spu_yield - yield a physical spu if others are waiting
|
|
|
+ * @ctx: spu context to yield
|
|
|
+ *
|
|
|
+ * Check if there is a higher priority context waiting and if yes
|
|
|
+ * unbind @ctx from the physical spu and schedule the highest
|
|
|
+ * priority context to run on the freed physical spu instead.
|
|
|
+ */
|
|
|
void spu_yield(struct spu_context *ctx)
|
|
|
{
|
|
|
struct spu *spu;
|
|
|
int need_yield = 0;
|
|
|
|
|
|
- if (down_write_trylock(&ctx->state_sema)) {
|
|
|
+ if (mutex_trylock(&ctx->state_mutex)) {
|
|
|
if ((spu = ctx->spu) != NULL) {
|
|
|
int best = sched_find_first_bit(spu_prio->bitmap);
|
|
|
if (best < MAX_PRIO) {
|
|
|
pr_debug("%s: yielding SPU %d NODE %d\n",
|
|
|
__FUNCTION__, spu->number, spu->node);
|
|
|
spu_deactivate(ctx);
|
|
|
- ctx->state = SPU_STATE_SAVED;
|
|
|
need_yield = 1;
|
|
|
- } else {
|
|
|
- spu->prio = MAX_PRIO;
|
|
|
}
|
|
|
}
|
|
|
- up_write(&ctx->state_sema);
|
|
|
+ mutex_unlock(&ctx->state_mutex);
|
|
|
}
|
|
|
if (unlikely(need_yield))
|
|
|
yield();
|
|
@@ -331,14 +472,19 @@ int __init spu_sched_init(void)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
|
+ spu_sched_wq = create_singlethread_workqueue("spusched");
|
|
|
+ if (!spu_sched_wq)
|
|
|
+ return 1;
|
|
|
+
|
|
|
spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
|
|
|
if (!spu_prio) {
|
|
|
printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
|
|
|
__FUNCTION__);
|
|
|
+ destroy_workqueue(spu_sched_wq);
|
|
|
return 1;
|
|
|
}
|
|
|
for (i = 0; i < MAX_PRIO; i++) {
|
|
|
- init_waitqueue_head(&spu_prio->waitq[i]);
|
|
|
+ INIT_LIST_HEAD(&spu_prio->runq[i]);
|
|
|
__clear_bit(i, spu_prio->bitmap);
|
|
|
}
|
|
|
__set_bit(MAX_PRIO, spu_prio->bitmap);
|
|
@@ -346,6 +492,7 @@ int __init spu_sched_init(void)
|
|
|
mutex_init(&spu_prio->active_mutex[i]);
|
|
|
INIT_LIST_HEAD(&spu_prio->active_list[i]);
|
|
|
}
|
|
|
+ spin_lock_init(&spu_prio->runq_lock);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -364,4 +511,5 @@ void __exit spu_sched_exit(void)
|
|
|
mutex_unlock(&spu_prio->active_mutex[node]);
|
|
|
}
|
|
|
kfree(spu_prio);
|
|
|
+ destroy_workqueue(spu_sched_wq);
|
|
|
}
|