sched.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368
  1. /* sched.c - SPU scheduler.
  2. *
  3. * Copyright (C) IBM 2005
  4. * Author: Mark Nutter <mnutter@us.ibm.com>
  5. *
  6. * 2006-03-31 NUMA domains added.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2, or (at your option)
  11. * any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #undef DEBUG
  23. #include <linux/module.h>
  24. #include <linux/errno.h>
  25. #include <linux/sched.h>
  26. #include <linux/kernel.h>
  27. #include <linux/mm.h>
  28. #include <linux/completion.h>
  29. #include <linux/vmalloc.h>
  30. #include <linux/smp.h>
  31. #include <linux/smp_lock.h>
  32. #include <linux/stddef.h>
  33. #include <linux/unistd.h>
  34. #include <linux/numa.h>
  35. #include <linux/mutex.h>
  36. #include <linux/notifier.h>
  37. #include <asm/io.h>
  38. #include <asm/mmu_context.h>
  39. #include <asm/spu.h>
  40. #include <asm/spu_csa.h>
  41. #include <asm/spu_priv1.h>
  42. #include "spufs.h"
  43. #define SPU_MIN_TIMESLICE (100 * HZ / 1000)
  44. #define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
  45. struct spu_prio_array {
  46. unsigned long bitmap[SPU_BITMAP_SIZE];
  47. wait_queue_head_t waitq[MAX_PRIO];
  48. struct list_head active_list[MAX_NUMNODES];
  49. struct mutex active_mutex[MAX_NUMNODES];
  50. };
  51. static struct spu_prio_array *spu_prio;
  52. static inline int node_allowed(int node)
  53. {
  54. cpumask_t mask;
  55. if (!nr_cpus_node(node))
  56. return 0;
  57. mask = node_to_cpumask(node);
  58. if (!cpus_intersects(mask, current->cpus_allowed))
  59. return 0;
  60. return 1;
  61. }
  62. static inline void mm_needs_global_tlbie(struct mm_struct *mm)
  63. {
  64. int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
  65. /* Global TLBIE broadcast required with SPEs. */
  66. __cpus_setall(&mm->cpu_vm_mask, nr);
  67. }
  68. static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
  69. static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
  70. {
  71. blocking_notifier_call_chain(&spu_switch_notifier,
  72. ctx ? ctx->object_id : 0, spu);
  73. }
  74. int spu_switch_event_register(struct notifier_block * n)
  75. {
  76. return blocking_notifier_chain_register(&spu_switch_notifier, n);
  77. }
  78. int spu_switch_event_unregister(struct notifier_block * n)
  79. {
  80. return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
  81. }
  82. static inline void bind_context(struct spu *spu, struct spu_context *ctx)
  83. {
  84. pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
  85. spu->number, spu->node);
  86. spu->ctx = ctx;
  87. spu->flags = 0;
  88. ctx->spu = spu;
  89. ctx->ops = &spu_hw_ops;
  90. spu->pid = current->pid;
  91. spu->prio = current->prio;
  92. spu->mm = ctx->owner;
  93. mm_needs_global_tlbie(spu->mm);
  94. spu->ibox_callback = spufs_ibox_callback;
  95. spu->wbox_callback = spufs_wbox_callback;
  96. spu->stop_callback = spufs_stop_callback;
  97. spu->mfc_callback = spufs_mfc_callback;
  98. spu->dma_callback = spufs_dma_callback;
  99. mb();
  100. spu_unmap_mappings(ctx);
  101. spu_restore(&ctx->csa, spu);
  102. spu->timestamp = jiffies;
  103. spu_cpu_affinity_set(spu, raw_smp_processor_id());
  104. spu_switch_notify(spu, ctx);
  105. ctx->state = SPU_STATE_RUNNABLE;
  106. }
  107. static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
  108. {
  109. pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
  110. spu->pid, spu->number, spu->node);
  111. spu_switch_notify(spu, NULL);
  112. spu_unmap_mappings(ctx);
  113. spu_save(&ctx->csa, spu);
  114. spu->timestamp = jiffies;
  115. ctx->state = SPU_STATE_SAVED;
  116. spu->ibox_callback = NULL;
  117. spu->wbox_callback = NULL;
  118. spu->stop_callback = NULL;
  119. spu->mfc_callback = NULL;
  120. spu->dma_callback = NULL;
  121. spu->mm = NULL;
  122. spu->pid = 0;
  123. spu->prio = MAX_PRIO;
  124. ctx->ops = &spu_backing_ops;
  125. ctx->spu = NULL;
  126. spu->flags = 0;
  127. spu->ctx = NULL;
  128. }
  129. static inline void spu_add_wq(wait_queue_head_t * wq, wait_queue_t * wait,
  130. int prio)
  131. {
  132. prepare_to_wait_exclusive(wq, wait, TASK_INTERRUPTIBLE);
  133. set_bit(prio, spu_prio->bitmap);
  134. }
  135. static inline void spu_del_wq(wait_queue_head_t * wq, wait_queue_t * wait,
  136. int prio)
  137. {
  138. u64 flags;
  139. __set_current_state(TASK_RUNNING);
  140. spin_lock_irqsave(&wq->lock, flags);
  141. remove_wait_queue_locked(wq, wait);
  142. if (list_empty(&wq->task_list))
  143. clear_bit(prio, spu_prio->bitmap);
  144. spin_unlock_irqrestore(&wq->lock, flags);
  145. }
  146. static void spu_prio_wait(struct spu_context *ctx, u64 flags)
  147. {
  148. int prio = current->prio;
  149. wait_queue_head_t *wq = &spu_prio->waitq[prio];
  150. DEFINE_WAIT(wait);
  151. if (ctx->spu)
  152. return;
  153. spu_add_wq(wq, &wait, prio);
  154. if (!signal_pending(current)) {
  155. up_write(&ctx->state_sema);
  156. pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
  157. current->pid, current->prio);
  158. schedule();
  159. down_write(&ctx->state_sema);
  160. }
  161. spu_del_wq(wq, &wait, prio);
  162. }
  163. static void spu_prio_wakeup(void)
  164. {
  165. int best = sched_find_first_bit(spu_prio->bitmap);
  166. if (best < MAX_PRIO) {
  167. wait_queue_head_t *wq = &spu_prio->waitq[best];
  168. wake_up_interruptible_nr(wq, 1);
  169. }
  170. }
  171. static int get_active_spu(struct spu *spu)
  172. {
  173. int node = spu->node;
  174. struct spu *tmp;
  175. int rc = 0;
  176. mutex_lock(&spu_prio->active_mutex[node]);
  177. list_for_each_entry(tmp, &spu_prio->active_list[node], list) {
  178. if (tmp == spu) {
  179. list_del_init(&spu->list);
  180. rc = 1;
  181. break;
  182. }
  183. }
  184. mutex_unlock(&spu_prio->active_mutex[node]);
  185. return rc;
  186. }
  187. static void put_active_spu(struct spu *spu)
  188. {
  189. int node = spu->node;
  190. mutex_lock(&spu_prio->active_mutex[node]);
  191. list_add_tail(&spu->list, &spu_prio->active_list[node]);
  192. mutex_unlock(&spu_prio->active_mutex[node]);
  193. }
  194. static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags)
  195. {
  196. struct spu *spu = NULL;
  197. int node = cpu_to_node(raw_smp_processor_id());
  198. int n;
  199. for (n = 0; n < MAX_NUMNODES; n++, node++) {
  200. node = (node < MAX_NUMNODES) ? node : 0;
  201. if (!node_allowed(node))
  202. continue;
  203. spu = spu_alloc_node(node);
  204. if (spu)
  205. break;
  206. }
  207. return spu;
  208. }
  209. static inline struct spu *spu_get(struct spu_context *ctx, u64 flags)
  210. {
  211. /* Future: spu_get_idle() if possible,
  212. * otherwise try to preempt an active
  213. * context.
  214. */
  215. return spu_get_idle(ctx, flags);
  216. }
  217. /* The three externally callable interfaces
  218. * for the scheduler begin here.
  219. *
  220. * spu_activate - bind a context to SPU, waiting as needed.
  221. * spu_deactivate - unbind a context from its SPU.
  222. * spu_yield - yield an SPU if others are waiting.
  223. */
  224. int spu_activate(struct spu_context *ctx, u64 flags)
  225. {
  226. struct spu *spu;
  227. int ret = 0;
  228. for (;;) {
  229. if (ctx->spu)
  230. return 0;
  231. spu = spu_get(ctx, flags);
  232. if (spu != NULL) {
  233. if (ctx->spu != NULL) {
  234. spu_free(spu);
  235. spu_prio_wakeup();
  236. break;
  237. }
  238. bind_context(spu, ctx);
  239. put_active_spu(spu);
  240. break;
  241. }
  242. spu_prio_wait(ctx, flags);
  243. if (signal_pending(current)) {
  244. ret = -ERESTARTSYS;
  245. spu_prio_wakeup();
  246. break;
  247. }
  248. }
  249. return ret;
  250. }
  251. void spu_deactivate(struct spu_context *ctx)
  252. {
  253. struct spu *spu;
  254. int needs_idle;
  255. spu = ctx->spu;
  256. if (!spu)
  257. return;
  258. needs_idle = get_active_spu(spu);
  259. unbind_context(spu, ctx);
  260. if (needs_idle) {
  261. spu_free(spu);
  262. spu_prio_wakeup();
  263. }
  264. }
  265. void spu_yield(struct spu_context *ctx)
  266. {
  267. struct spu *spu;
  268. int need_yield = 0;
  269. if (down_write_trylock(&ctx->state_sema)) {
  270. if ((spu = ctx->spu) != NULL) {
  271. int best = sched_find_first_bit(spu_prio->bitmap);
  272. if (best < MAX_PRIO) {
  273. pr_debug("%s: yielding SPU %d NODE %d\n",
  274. __FUNCTION__, spu->number, spu->node);
  275. spu_deactivate(ctx);
  276. need_yield = 1;
  277. } else {
  278. spu->prio = MAX_PRIO;
  279. }
  280. }
  281. up_write(&ctx->state_sema);
  282. }
  283. if (unlikely(need_yield))
  284. yield();
  285. }
  286. int __init spu_sched_init(void)
  287. {
  288. int i;
  289. spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
  290. if (!spu_prio) {
  291. printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
  292. __FUNCTION__);
  293. return 1;
  294. }
  295. for (i = 0; i < MAX_PRIO; i++) {
  296. init_waitqueue_head(&spu_prio->waitq[i]);
  297. __clear_bit(i, spu_prio->bitmap);
  298. }
  299. __set_bit(MAX_PRIO, spu_prio->bitmap);
  300. for (i = 0; i < MAX_NUMNODES; i++) {
  301. mutex_init(&spu_prio->active_mutex[i]);
  302. INIT_LIST_HEAD(&spu_prio->active_list[i]);
  303. }
  304. return 0;
  305. }
  306. void __exit spu_sched_exit(void)
  307. {
  308. struct spu *spu, *tmp;
  309. int node;
  310. for (node = 0; node < MAX_NUMNODES; node++) {
  311. mutex_lock(&spu_prio->active_mutex[node]);
  312. list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
  313. list) {
  314. list_del_init(&spu->list);
  315. spu_free(spu);
  316. }
  317. mutex_unlock(&spu_prio->active_mutex[node]);
  318. }
  319. kfree(spu_prio);
  320. }