sched.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. /* sched.c - SPU scheduler.
  2. *
  3. * Copyright (C) IBM 2005
  4. * Author: Mark Nutter <mnutter@us.ibm.com>
  5. *
  6. * 2006-03-31 NUMA domains added.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2, or (at your option)
  11. * any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #undef DEBUG
  23. #include <linux/module.h>
  24. #include <linux/errno.h>
  25. #include <linux/sched.h>
  26. #include <linux/kernel.h>
  27. #include <linux/mm.h>
  28. #include <linux/completion.h>
  29. #include <linux/vmalloc.h>
  30. #include <linux/smp.h>
  31. #include <linux/smp_lock.h>
  32. #include <linux/stddef.h>
  33. #include <linux/unistd.h>
  34. #include <linux/numa.h>
  35. #include <linux/mutex.h>
  36. #include <linux/notifier.h>
  37. #include <asm/io.h>
  38. #include <asm/mmu_context.h>
  39. #include <asm/spu.h>
  40. #include <asm/spu_csa.h>
  41. #include <asm/spu_priv1.h>
  42. #include "spufs.h"
  43. #define SPU_MIN_TIMESLICE (100 * HZ / 1000)
  44. #define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
  45. struct spu_prio_array {
  46. unsigned long bitmap[SPU_BITMAP_SIZE];
  47. wait_queue_head_t waitq[MAX_PRIO];
  48. struct list_head active_list[MAX_NUMNODES];
  49. struct mutex active_mutex[MAX_NUMNODES];
  50. };
  51. static struct spu_prio_array *spu_prio;
  52. static inline int node_allowed(int node)
  53. {
  54. cpumask_t mask;
  55. if (!nr_cpus_node(node))
  56. return 0;
  57. mask = node_to_cpumask(node);
  58. if (!cpus_intersects(mask, current->cpus_allowed))
  59. return 0;
  60. return 1;
  61. }
  62. static inline void mm_needs_global_tlbie(struct mm_struct *mm)
  63. {
  64. int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
  65. /* Global TLBIE broadcast required with SPEs. */
  66. __cpus_setall(&mm->cpu_vm_mask, nr);
  67. }
  68. static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
  69. static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
  70. {
  71. blocking_notifier_call_chain(&spu_switch_notifier,
  72. ctx ? ctx->object_id : 0, spu);
  73. }
  74. int spu_switch_event_register(struct notifier_block * n)
  75. {
  76. return blocking_notifier_chain_register(&spu_switch_notifier, n);
  77. }
  78. int spu_switch_event_unregister(struct notifier_block * n)
  79. {
  80. return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
  81. }
  82. static inline void bind_context(struct spu *spu, struct spu_context *ctx)
  83. {
  84. pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
  85. spu->number, spu->node);
  86. spu->ctx = ctx;
  87. spu->flags = 0;
  88. ctx->spu = spu;
  89. ctx->ops = &spu_hw_ops;
  90. spu->pid = current->pid;
  91. spu->prio = current->prio;
  92. spu->mm = ctx->owner;
  93. mm_needs_global_tlbie(spu->mm);
  94. spu->ibox_callback = spufs_ibox_callback;
  95. spu->wbox_callback = spufs_wbox_callback;
  96. spu->stop_callback = spufs_stop_callback;
  97. spu->mfc_callback = spufs_mfc_callback;
  98. spu->dma_callback = spufs_dma_callback;
  99. mb();
  100. spu_unmap_mappings(ctx);
  101. spu_restore(&ctx->csa, spu);
  102. spu->timestamp = jiffies;
  103. spu_cpu_affinity_set(spu, raw_smp_processor_id());
  104. spu_switch_notify(spu, ctx);
  105. }
  106. static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
  107. {
  108. pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
  109. spu->pid, spu->number, spu->node);
  110. spu_switch_notify(spu, NULL);
  111. spu_unmap_mappings(ctx);
  112. spu_save(&ctx->csa, spu);
  113. spu->timestamp = jiffies;
  114. ctx->state = SPU_STATE_SAVED;
  115. spu->ibox_callback = NULL;
  116. spu->wbox_callback = NULL;
  117. spu->stop_callback = NULL;
  118. spu->mfc_callback = NULL;
  119. spu->dma_callback = NULL;
  120. spu->mm = NULL;
  121. spu->pid = 0;
  122. spu->prio = MAX_PRIO;
  123. ctx->ops = &spu_backing_ops;
  124. ctx->spu = NULL;
  125. spu->flags = 0;
  126. spu->ctx = NULL;
  127. }
  128. static inline void spu_add_wq(wait_queue_head_t * wq, wait_queue_t * wait,
  129. int prio)
  130. {
  131. prepare_to_wait_exclusive(wq, wait, TASK_INTERRUPTIBLE);
  132. set_bit(prio, spu_prio->bitmap);
  133. }
  134. static inline void spu_del_wq(wait_queue_head_t * wq, wait_queue_t * wait,
  135. int prio)
  136. {
  137. u64 flags;
  138. __set_current_state(TASK_RUNNING);
  139. spin_lock_irqsave(&wq->lock, flags);
  140. remove_wait_queue_locked(wq, wait);
  141. if (list_empty(&wq->task_list))
  142. clear_bit(prio, spu_prio->bitmap);
  143. spin_unlock_irqrestore(&wq->lock, flags);
  144. }
  145. static void spu_prio_wait(struct spu_context *ctx, u64 flags)
  146. {
  147. int prio = current->prio;
  148. wait_queue_head_t *wq = &spu_prio->waitq[prio];
  149. DEFINE_WAIT(wait);
  150. if (ctx->spu)
  151. return;
  152. spu_add_wq(wq, &wait, prio);
  153. if (!signal_pending(current)) {
  154. up_write(&ctx->state_sema);
  155. pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
  156. current->pid, current->prio);
  157. schedule();
  158. down_write(&ctx->state_sema);
  159. }
  160. spu_del_wq(wq, &wait, prio);
  161. }
  162. static void spu_prio_wakeup(void)
  163. {
  164. int best = sched_find_first_bit(spu_prio->bitmap);
  165. if (best < MAX_PRIO) {
  166. wait_queue_head_t *wq = &spu_prio->waitq[best];
  167. wake_up_interruptible_nr(wq, 1);
  168. }
  169. }
  170. static int get_active_spu(struct spu *spu)
  171. {
  172. int node = spu->node;
  173. struct spu *tmp;
  174. int rc = 0;
  175. mutex_lock(&spu_prio->active_mutex[node]);
  176. list_for_each_entry(tmp, &spu_prio->active_list[node], list) {
  177. if (tmp == spu) {
  178. list_del_init(&spu->list);
  179. rc = 1;
  180. break;
  181. }
  182. }
  183. mutex_unlock(&spu_prio->active_mutex[node]);
  184. return rc;
  185. }
  186. static void put_active_spu(struct spu *spu)
  187. {
  188. int node = spu->node;
  189. mutex_lock(&spu_prio->active_mutex[node]);
  190. list_add_tail(&spu->list, &spu_prio->active_list[node]);
  191. mutex_unlock(&spu_prio->active_mutex[node]);
  192. }
  193. static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags)
  194. {
  195. struct spu *spu = NULL;
  196. int node = cpu_to_node(raw_smp_processor_id());
  197. int n;
  198. for (n = 0; n < MAX_NUMNODES; n++, node++) {
  199. node = (node < MAX_NUMNODES) ? node : 0;
  200. if (!node_allowed(node))
  201. continue;
  202. spu = spu_alloc_node(node);
  203. if (spu)
  204. break;
  205. }
  206. return spu;
  207. }
  208. static inline struct spu *spu_get(struct spu_context *ctx, u64 flags)
  209. {
  210. /* Future: spu_get_idle() if possible,
  211. * otherwise try to preempt an active
  212. * context.
  213. */
  214. return spu_get_idle(ctx, flags);
  215. }
  216. /* The three externally callable interfaces
  217. * for the scheduler begin here.
  218. *
  219. * spu_activate - bind a context to SPU, waiting as needed.
  220. * spu_deactivate - unbind a context from its SPU.
  221. * spu_yield - yield an SPU if others are waiting.
  222. */
  223. int spu_activate(struct spu_context *ctx, u64 flags)
  224. {
  225. struct spu *spu;
  226. int ret = 0;
  227. for (;;) {
  228. if (ctx->spu)
  229. return 0;
  230. spu = spu_get(ctx, flags);
  231. if (spu != NULL) {
  232. if (ctx->spu != NULL) {
  233. spu_free(spu);
  234. spu_prio_wakeup();
  235. break;
  236. }
  237. bind_context(spu, ctx);
  238. put_active_spu(spu);
  239. break;
  240. }
  241. spu_prio_wait(ctx, flags);
  242. if (signal_pending(current)) {
  243. ret = -ERESTARTSYS;
  244. spu_prio_wakeup();
  245. break;
  246. }
  247. }
  248. return ret;
  249. }
  250. void spu_deactivate(struct spu_context *ctx)
  251. {
  252. struct spu *spu;
  253. int needs_idle;
  254. spu = ctx->spu;
  255. if (!spu)
  256. return;
  257. needs_idle = get_active_spu(spu);
  258. unbind_context(spu, ctx);
  259. if (needs_idle) {
  260. spu_free(spu);
  261. spu_prio_wakeup();
  262. }
  263. }
  264. void spu_yield(struct spu_context *ctx)
  265. {
  266. struct spu *spu;
  267. int need_yield = 0;
  268. if (down_write_trylock(&ctx->state_sema)) {
  269. if ((spu = ctx->spu) != NULL) {
  270. int best = sched_find_first_bit(spu_prio->bitmap);
  271. if (best < MAX_PRIO) {
  272. pr_debug("%s: yielding SPU %d NODE %d\n",
  273. __FUNCTION__, spu->number, spu->node);
  274. spu_deactivate(ctx);
  275. ctx->state = SPU_STATE_SAVED;
  276. need_yield = 1;
  277. } else {
  278. spu->prio = MAX_PRIO;
  279. }
  280. }
  281. up_write(&ctx->state_sema);
  282. }
  283. if (unlikely(need_yield))
  284. yield();
  285. }
  286. int __init spu_sched_init(void)
  287. {
  288. int i;
  289. spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
  290. if (!spu_prio) {
  291. printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
  292. __FUNCTION__);
  293. return 1;
  294. }
  295. for (i = 0; i < MAX_PRIO; i++) {
  296. init_waitqueue_head(&spu_prio->waitq[i]);
  297. __clear_bit(i, spu_prio->bitmap);
  298. }
  299. __set_bit(MAX_PRIO, spu_prio->bitmap);
  300. for (i = 0; i < MAX_NUMNODES; i++) {
  301. mutex_init(&spu_prio->active_mutex[i]);
  302. INIT_LIST_HEAD(&spu_prio->active_list[i]);
  303. }
  304. return 0;
  305. }
  306. void __exit spu_sched_exit(void)
  307. {
  308. struct spu *spu, *tmp;
  309. int node;
  310. for (node = 0; node < MAX_NUMNODES; node++) {
  311. mutex_lock(&spu_prio->active_mutex[node]);
  312. list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
  313. list) {
  314. list_del_init(&spu->list);
  315. spu_free(spu);
  316. }
  317. mutex_unlock(&spu_prio->active_mutex[node]);
  318. }
  319. kfree(spu_prio);
  320. }