sched.c 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401
  1. /* sched.c - SPU scheduler.
  2. *
  3. * Copyright (C) IBM 2005
  4. * Author: Mark Nutter <mnutter@us.ibm.com>
  5. *
  6. * 2006-03-31 NUMA domains added.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2, or (at your option)
  11. * any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #undef DEBUG
  23. #include <linux/module.h>
  24. #include <linux/errno.h>
  25. #include <linux/sched.h>
  26. #include <linux/kernel.h>
  27. #include <linux/mm.h>
  28. #include <linux/completion.h>
  29. #include <linux/vmalloc.h>
  30. #include <linux/smp.h>
  31. #include <linux/smp_lock.h>
  32. #include <linux/stddef.h>
  33. #include <linux/unistd.h>
  34. #include <linux/numa.h>
  35. #include <linux/mutex.h>
  36. #include <linux/notifier.h>
  37. #include <asm/io.h>
  38. #include <asm/mmu_context.h>
  39. #include <asm/spu.h>
  40. #include <asm/spu_csa.h>
  41. #include <asm/spu_priv1.h>
  42. #include "spufs.h"
  43. #define SPU_MIN_TIMESLICE (100 * HZ / 1000)
  44. #define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
  45. struct spu_prio_array {
  46. unsigned long bitmap[SPU_BITMAP_SIZE];
  47. struct list_head runq[MAX_PRIO];
  48. spinlock_t runq_lock;
  49. struct list_head active_list[MAX_NUMNODES];
  50. struct mutex active_mutex[MAX_NUMNODES];
  51. };
  52. static struct spu_prio_array *spu_prio;
  53. static inline int node_allowed(int node)
  54. {
  55. cpumask_t mask;
  56. if (!nr_cpus_node(node))
  57. return 0;
  58. mask = node_to_cpumask(node);
  59. if (!cpus_intersects(mask, current->cpus_allowed))
  60. return 0;
  61. return 1;
  62. }
  63. /**
  64. * spu_add_to_active_list - add spu to active list
  65. * @spu: spu to add to the active list
  66. */
  67. static void spu_add_to_active_list(struct spu *spu)
  68. {
  69. mutex_lock(&spu_prio->active_mutex[spu->node]);
  70. list_add_tail(&spu->list, &spu_prio->active_list[spu->node]);
  71. mutex_unlock(&spu_prio->active_mutex[spu->node]);
  72. }
  73. /**
  74. * spu_remove_from_active_list - remove spu from active list
  75. * @spu: spu to remove from the active list
  76. */
  77. static void spu_remove_from_active_list(struct spu *spu)
  78. {
  79. int node = spu->node;
  80. mutex_lock(&spu_prio->active_mutex[node]);
  81. list_del_init(&spu->list);
  82. mutex_unlock(&spu_prio->active_mutex[node]);
  83. }
  84. static inline void mm_needs_global_tlbie(struct mm_struct *mm)
  85. {
  86. int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
  87. /* Global TLBIE broadcast required with SPEs. */
  88. __cpus_setall(&mm->cpu_vm_mask, nr);
  89. }
  90. static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
  91. static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
  92. {
  93. blocking_notifier_call_chain(&spu_switch_notifier,
  94. ctx ? ctx->object_id : 0, spu);
  95. }
  96. int spu_switch_event_register(struct notifier_block * n)
  97. {
  98. return blocking_notifier_chain_register(&spu_switch_notifier, n);
  99. }
  100. int spu_switch_event_unregister(struct notifier_block * n)
  101. {
  102. return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
  103. }
  104. /**
  105. * spu_bind_context - bind spu context to physical spu
  106. * @spu: physical spu to bind to
  107. * @ctx: context to bind
  108. */
  109. static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
  110. {
  111. pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
  112. spu->number, spu->node);
  113. spu->ctx = ctx;
  114. spu->flags = 0;
  115. ctx->spu = spu;
  116. ctx->ops = &spu_hw_ops;
  117. spu->pid = current->pid;
  118. spu->mm = ctx->owner;
  119. mm_needs_global_tlbie(spu->mm);
  120. spu->ibox_callback = spufs_ibox_callback;
  121. spu->wbox_callback = spufs_wbox_callback;
  122. spu->stop_callback = spufs_stop_callback;
  123. spu->mfc_callback = spufs_mfc_callback;
  124. spu->dma_callback = spufs_dma_callback;
  125. mb();
  126. spu_unmap_mappings(ctx);
  127. spu_restore(&ctx->csa, spu);
  128. spu->timestamp = jiffies;
  129. spu_cpu_affinity_set(spu, raw_smp_processor_id());
  130. spu_switch_notify(spu, ctx);
  131. spu_add_to_active_list(spu);
  132. ctx->state = SPU_STATE_RUNNABLE;
  133. }
  134. /**
  135. * spu_unbind_context - unbind spu context from physical spu
  136. * @spu: physical spu to unbind from
  137. * @ctx: context to unbind
  138. */
  139. static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
  140. {
  141. pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
  142. spu->pid, spu->number, spu->node);
  143. spu_remove_from_active_list(spu);
  144. spu_switch_notify(spu, NULL);
  145. spu_unmap_mappings(ctx);
  146. spu_save(&ctx->csa, spu);
  147. spu->timestamp = jiffies;
  148. ctx->state = SPU_STATE_SAVED;
  149. spu->ibox_callback = NULL;
  150. spu->wbox_callback = NULL;
  151. spu->stop_callback = NULL;
  152. spu->mfc_callback = NULL;
  153. spu->dma_callback = NULL;
  154. spu->mm = NULL;
  155. spu->pid = 0;
  156. ctx->ops = &spu_backing_ops;
  157. ctx->spu = NULL;
  158. spu->flags = 0;
  159. spu->ctx = NULL;
  160. }
  161. /**
  162. * spu_add_to_rq - add a context to the runqueue
  163. * @ctx: context to add
  164. */
  165. static void spu_add_to_rq(struct spu_context *ctx)
  166. {
  167. spin_lock(&spu_prio->runq_lock);
  168. list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
  169. set_bit(ctx->prio, spu_prio->bitmap);
  170. spin_unlock(&spu_prio->runq_lock);
  171. }
  172. /**
  173. * spu_del_from_rq - remove a context from the runqueue
  174. * @ctx: context to remove
  175. */
  176. static void spu_del_from_rq(struct spu_context *ctx)
  177. {
  178. spin_lock(&spu_prio->runq_lock);
  179. list_del_init(&ctx->rq);
  180. if (list_empty(&spu_prio->runq[ctx->prio]))
  181. clear_bit(ctx->prio, spu_prio->bitmap);
  182. spin_unlock(&spu_prio->runq_lock);
  183. }
  184. /**
  185. * spu_grab_context - remove one context from the runqueue
  186. * @prio: priority of the context to be removed
  187. *
  188. * This function removes one context from the runqueue for priority @prio.
  189. * If there is more than one context with the given priority the first
  190. * task on the runqueue will be taken.
  191. *
  192. * Returns the spu_context it just removed.
  193. *
  194. * Must be called with spu_prio->runq_lock held.
  195. */
  196. static struct spu_context *spu_grab_context(int prio)
  197. {
  198. struct list_head *rq = &spu_prio->runq[prio];
  199. if (list_empty(rq))
  200. return NULL;
  201. return list_entry(rq->next, struct spu_context, rq);
  202. }
  203. static void spu_prio_wait(struct spu_context *ctx)
  204. {
  205. DEFINE_WAIT(wait);
  206. set_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
  207. prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
  208. if (!signal_pending(current)) {
  209. mutex_unlock(&ctx->state_mutex);
  210. schedule();
  211. mutex_lock(&ctx->state_mutex);
  212. }
  213. __set_current_state(TASK_RUNNING);
  214. remove_wait_queue(&ctx->stop_wq, &wait);
  215. clear_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
  216. }
  217. /**
  218. * spu_reschedule - try to find a runnable context for a spu
  219. * @spu: spu available
  220. *
  221. * This function is called whenever a spu becomes idle. It looks for the
  222. * most suitable runnable spu context and schedules it for execution.
  223. */
  224. static void spu_reschedule(struct spu *spu)
  225. {
  226. int best;
  227. spu_free(spu);
  228. spin_lock(&spu_prio->runq_lock);
  229. best = sched_find_first_bit(spu_prio->bitmap);
  230. if (best < MAX_PRIO) {
  231. struct spu_context *ctx = spu_grab_context(best);
  232. if (ctx && test_bit(SPU_SCHED_WAKE, &ctx->sched_flags))
  233. wake_up(&ctx->stop_wq);
  234. }
  235. spin_unlock(&spu_prio->runq_lock);
  236. }
  237. static struct spu *spu_get_idle(struct spu_context *ctx)
  238. {
  239. struct spu *spu = NULL;
  240. int node = cpu_to_node(raw_smp_processor_id());
  241. int n;
  242. for (n = 0; n < MAX_NUMNODES; n++, node++) {
  243. node = (node < MAX_NUMNODES) ? node : 0;
  244. if (!node_allowed(node))
  245. continue;
  246. spu = spu_alloc_node(node);
  247. if (spu)
  248. break;
  249. }
  250. return spu;
  251. }
  252. /* The three externally callable interfaces
  253. * for the scheduler begin here.
  254. *
  255. * spu_activate - bind a context to SPU, waiting as needed.
  256. * spu_deactivate - unbind a context from its SPU.
  257. * spu_yield - yield an SPU if others are waiting.
  258. */
  259. /**
  260. * spu_activate - find a free spu for a context and execute it
  261. * @ctx: spu context to schedule
  262. * @flags: flags (currently ignored)
  263. *
  264. * Tries to find a free spu to run @ctx. If no free spu is availble
  265. * add the context to the runqueue so it gets woken up once an spu
  266. * is available.
  267. */
  268. int spu_activate(struct spu_context *ctx, unsigned long flags)
  269. {
  270. if (ctx->spu)
  271. return 0;
  272. do {
  273. struct spu *spu;
  274. spu = spu_get_idle(ctx);
  275. if (spu) {
  276. spu_bind_context(spu, ctx);
  277. return 0;
  278. }
  279. spu_add_to_rq(ctx);
  280. if (!(flags & SPU_ACTIVATE_NOWAKE))
  281. spu_prio_wait(ctx);
  282. spu_del_from_rq(ctx);
  283. } while (!signal_pending(current));
  284. return -ERESTARTSYS;
  285. }
  286. /**
  287. * spu_deactivate - unbind a context from it's physical spu
  288. * @ctx: spu context to unbind
  289. *
  290. * Unbind @ctx from the physical spu it is running on and schedule
  291. * the highest priority context to run on the freed physical spu.
  292. */
  293. void spu_deactivate(struct spu_context *ctx)
  294. {
  295. struct spu *spu = ctx->spu;
  296. if (spu) {
  297. spu_unbind_context(spu, ctx);
  298. spu_reschedule(spu);
  299. }
  300. }
  301. void spu_yield(struct spu_context *ctx)
  302. {
  303. struct spu *spu;
  304. int need_yield = 0;
  305. if (mutex_trylock(&ctx->state_mutex)) {
  306. if ((spu = ctx->spu) != NULL) {
  307. int best = sched_find_first_bit(spu_prio->bitmap);
  308. if (best < MAX_PRIO) {
  309. pr_debug("%s: yielding SPU %d NODE %d\n",
  310. __FUNCTION__, spu->number, spu->node);
  311. spu_deactivate(ctx);
  312. need_yield = 1;
  313. }
  314. }
  315. mutex_unlock(&ctx->state_mutex);
  316. }
  317. if (unlikely(need_yield))
  318. yield();
  319. }
  320. int __init spu_sched_init(void)
  321. {
  322. int i;
  323. spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
  324. if (!spu_prio) {
  325. printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
  326. __FUNCTION__);
  327. return 1;
  328. }
  329. for (i = 0; i < MAX_PRIO; i++) {
  330. INIT_LIST_HEAD(&spu_prio->runq[i]);
  331. __clear_bit(i, spu_prio->bitmap);
  332. }
  333. __set_bit(MAX_PRIO, spu_prio->bitmap);
  334. for (i = 0; i < MAX_NUMNODES; i++) {
  335. mutex_init(&spu_prio->active_mutex[i]);
  336. INIT_LIST_HEAD(&spu_prio->active_list[i]);
  337. }
  338. spin_lock_init(&spu_prio->runq_lock);
  339. return 0;
  340. }
  341. void __exit spu_sched_exit(void)
  342. {
  343. struct spu *spu, *tmp;
  344. int node;
  345. for (node = 0; node < MAX_NUMNODES; node++) {
  346. mutex_lock(&spu_prio->active_mutex[node]);
  347. list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
  348. list) {
  349. list_del_init(&spu->list);
  350. spu_free(spu);
  351. }
  352. mutex_unlock(&spu_prio->active_mutex[node]);
  353. }
  354. kfree(spu_prio);
  355. }