sched.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. /* sched.c - SPU scheduler.
  2. *
  3. * Copyright (C) IBM 2005
  4. * Author: Mark Nutter <mnutter@us.ibm.com>
  5. *
  6. * 2006-03-31 NUMA domains added.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2, or (at your option)
  11. * any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #undef DEBUG
  23. #include <linux/module.h>
  24. #include <linux/errno.h>
  25. #include <linux/sched.h>
  26. #include <linux/kernel.h>
  27. #include <linux/mm.h>
  28. #include <linux/completion.h>
  29. #include <linux/vmalloc.h>
  30. #include <linux/smp.h>
  31. #include <linux/smp_lock.h>
  32. #include <linux/stddef.h>
  33. #include <linux/unistd.h>
  34. #include <linux/numa.h>
  35. #include <linux/mutex.h>
  36. #include <linux/notifier.h>
  37. #include <asm/io.h>
  38. #include <asm/mmu_context.h>
  39. #include <asm/spu.h>
  40. #include <asm/spu_csa.h>
  41. #include <asm/spu_priv1.h>
  42. #include "spufs.h"
  43. #define SPU_MIN_TIMESLICE (100 * HZ / 1000)
  44. #define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
  45. struct spu_prio_array {
  46. unsigned long bitmap[SPU_BITMAP_SIZE];
  47. wait_queue_head_t waitq[MAX_PRIO];
  48. struct list_head active_list[MAX_NUMNODES];
  49. struct mutex active_mutex[MAX_NUMNODES];
  50. };
  51. static struct spu_prio_array *spu_prio;
  52. static inline int node_allowed(int node)
  53. {
  54. cpumask_t mask;
  55. if (!nr_cpus_node(node))
  56. return 0;
  57. mask = node_to_cpumask(node);
  58. if (!cpus_intersects(mask, current->cpus_allowed))
  59. return 0;
  60. return 1;
  61. }
  62. /**
  63. * spu_add_to_active_list - add spu to active list
  64. * @spu: spu to add to the active list
  65. */
  66. static void spu_add_to_active_list(struct spu *spu)
  67. {
  68. mutex_lock(&spu_prio->active_mutex[spu->node]);
  69. list_add_tail(&spu->list, &spu_prio->active_list[spu->node]);
  70. mutex_unlock(&spu_prio->active_mutex[spu->node]);
  71. }
  72. /**
  73. * spu_remove_from_active_list - remove spu from active list
  74. * @spu: spu to remove from the active list
  75. *
  76. * This function removes an spu from the active list. If the spu was
  77. * found on the active list the function returns 1, else it doesn't do
  78. * anything and returns 0.
  79. */
  80. static int spu_remove_from_active_list(struct spu *spu)
  81. {
  82. int node = spu->node;
  83. struct spu *tmp;
  84. int rc = 0;
  85. mutex_lock(&spu_prio->active_mutex[node]);
  86. list_for_each_entry(tmp, &spu_prio->active_list[node], list) {
  87. if (tmp == spu) {
  88. list_del_init(&spu->list);
  89. rc = 1;
  90. break;
  91. }
  92. }
  93. mutex_unlock(&spu_prio->active_mutex[node]);
  94. return rc;
  95. }
  96. static inline void mm_needs_global_tlbie(struct mm_struct *mm)
  97. {
  98. int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
  99. /* Global TLBIE broadcast required with SPEs. */
  100. __cpus_setall(&mm->cpu_vm_mask, nr);
  101. }
  102. static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
  103. static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
  104. {
  105. blocking_notifier_call_chain(&spu_switch_notifier,
  106. ctx ? ctx->object_id : 0, spu);
  107. }
  108. int spu_switch_event_register(struct notifier_block * n)
  109. {
  110. return blocking_notifier_chain_register(&spu_switch_notifier, n);
  111. }
  112. int spu_switch_event_unregister(struct notifier_block * n)
  113. {
  114. return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
  115. }
  116. /**
  117. * spu_bind_context - bind spu context to physical spu
  118. * @spu: physical spu to bind to
  119. * @ctx: context to bind
  120. */
  121. static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
  122. {
  123. pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
  124. spu->number, spu->node);
  125. spu->ctx = ctx;
  126. spu->flags = 0;
  127. ctx->spu = spu;
  128. ctx->ops = &spu_hw_ops;
  129. spu->pid = current->pid;
  130. spu->prio = current->prio;
  131. spu->mm = ctx->owner;
  132. mm_needs_global_tlbie(spu->mm);
  133. spu->ibox_callback = spufs_ibox_callback;
  134. spu->wbox_callback = spufs_wbox_callback;
  135. spu->stop_callback = spufs_stop_callback;
  136. spu->mfc_callback = spufs_mfc_callback;
  137. spu->dma_callback = spufs_dma_callback;
  138. mb();
  139. spu_unmap_mappings(ctx);
  140. spu_restore(&ctx->csa, spu);
  141. spu->timestamp = jiffies;
  142. spu_cpu_affinity_set(spu, raw_smp_processor_id());
  143. spu_switch_notify(spu, ctx);
  144. spu_add_to_active_list(spu);
  145. ctx->state = SPU_STATE_RUNNABLE;
  146. }
  147. /**
  148. * spu_unbind_context - unbind spu context from physical spu
  149. * @spu: physical spu to unbind from
  150. * @ctx: context to unbind
  151. *
  152. * If the spu was on the active list the function returns 1, else 0.
  153. */
  154. static int spu_unbind_context(struct spu *spu, struct spu_context *ctx)
  155. {
  156. int was_active = spu_remove_from_active_list(spu);
  157. pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
  158. spu->pid, spu->number, spu->node);
  159. spu_switch_notify(spu, NULL);
  160. spu_unmap_mappings(ctx);
  161. spu_save(&ctx->csa, spu);
  162. spu->timestamp = jiffies;
  163. ctx->state = SPU_STATE_SAVED;
  164. spu->ibox_callback = NULL;
  165. spu->wbox_callback = NULL;
  166. spu->stop_callback = NULL;
  167. spu->mfc_callback = NULL;
  168. spu->dma_callback = NULL;
  169. spu->mm = NULL;
  170. spu->pid = 0;
  171. spu->prio = MAX_PRIO;
  172. ctx->ops = &spu_backing_ops;
  173. ctx->spu = NULL;
  174. spu->flags = 0;
  175. spu->ctx = NULL;
  176. return was_active;
  177. }
  178. static inline void spu_add_wq(wait_queue_head_t * wq, wait_queue_t * wait,
  179. int prio)
  180. {
  181. prepare_to_wait_exclusive(wq, wait, TASK_INTERRUPTIBLE);
  182. set_bit(prio, spu_prio->bitmap);
  183. }
  184. static inline void spu_del_wq(wait_queue_head_t * wq, wait_queue_t * wait,
  185. int prio)
  186. {
  187. u64 flags;
  188. __set_current_state(TASK_RUNNING);
  189. spin_lock_irqsave(&wq->lock, flags);
  190. remove_wait_queue_locked(wq, wait);
  191. if (list_empty(&wq->task_list))
  192. clear_bit(prio, spu_prio->bitmap);
  193. spin_unlock_irqrestore(&wq->lock, flags);
  194. }
  195. static void spu_prio_wait(struct spu_context *ctx, u64 flags)
  196. {
  197. int prio = current->prio;
  198. wait_queue_head_t *wq = &spu_prio->waitq[prio];
  199. DEFINE_WAIT(wait);
  200. if (ctx->spu)
  201. return;
  202. spu_add_wq(wq, &wait, prio);
  203. if (!signal_pending(current)) {
  204. mutex_unlock(&ctx->state_mutex);
  205. pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
  206. current->pid, current->prio);
  207. schedule();
  208. mutex_lock(&ctx->state_mutex);
  209. }
  210. spu_del_wq(wq, &wait, prio);
  211. }
  212. static void spu_prio_wakeup(void)
  213. {
  214. int best = sched_find_first_bit(spu_prio->bitmap);
  215. if (best < MAX_PRIO) {
  216. wait_queue_head_t *wq = &spu_prio->waitq[best];
  217. wake_up_interruptible_nr(wq, 1);
  218. }
  219. }
  220. static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags)
  221. {
  222. struct spu *spu = NULL;
  223. int node = cpu_to_node(raw_smp_processor_id());
  224. int n;
  225. for (n = 0; n < MAX_NUMNODES; n++, node++) {
  226. node = (node < MAX_NUMNODES) ? node : 0;
  227. if (!node_allowed(node))
  228. continue;
  229. spu = spu_alloc_node(node);
  230. if (spu)
  231. break;
  232. }
  233. return spu;
  234. }
  235. static inline struct spu *spu_get(struct spu_context *ctx, u64 flags)
  236. {
  237. /* Future: spu_get_idle() if possible,
  238. * otherwise try to preempt an active
  239. * context.
  240. */
  241. return spu_get_idle(ctx, flags);
  242. }
  243. /* The three externally callable interfaces
  244. * for the scheduler begin here.
  245. *
  246. * spu_activate - bind a context to SPU, waiting as needed.
  247. * spu_deactivate - unbind a context from its SPU.
  248. * spu_yield - yield an SPU if others are waiting.
  249. */
  250. int spu_activate(struct spu_context *ctx, u64 flags)
  251. {
  252. struct spu *spu;
  253. int ret = 0;
  254. for (;;) {
  255. if (ctx->spu)
  256. return 0;
  257. spu = spu_get(ctx, flags);
  258. if (spu != NULL) {
  259. if (ctx->spu != NULL) {
  260. spu_free(spu);
  261. spu_prio_wakeup();
  262. break;
  263. }
  264. spu_bind_context(spu, ctx);
  265. break;
  266. }
  267. spu_prio_wait(ctx, flags);
  268. if (signal_pending(current)) {
  269. ret = -ERESTARTSYS;
  270. spu_prio_wakeup();
  271. break;
  272. }
  273. }
  274. return ret;
  275. }
  276. void spu_deactivate(struct spu_context *ctx)
  277. {
  278. struct spu *spu;
  279. int was_active;
  280. spu = ctx->spu;
  281. if (!spu)
  282. return;
  283. was_active = spu_unbind_context(spu, ctx);
  284. if (was_active) {
  285. spu_free(spu);
  286. spu_prio_wakeup();
  287. }
  288. }
  289. void spu_yield(struct spu_context *ctx)
  290. {
  291. struct spu *spu;
  292. int need_yield = 0;
  293. if (mutex_trylock(&ctx->state_mutex)) {
  294. if ((spu = ctx->spu) != NULL) {
  295. int best = sched_find_first_bit(spu_prio->bitmap);
  296. if (best < MAX_PRIO) {
  297. pr_debug("%s: yielding SPU %d NODE %d\n",
  298. __FUNCTION__, spu->number, spu->node);
  299. spu_deactivate(ctx);
  300. need_yield = 1;
  301. } else {
  302. spu->prio = MAX_PRIO;
  303. }
  304. }
  305. mutex_unlock(&ctx->state_mutex);
  306. }
  307. if (unlikely(need_yield))
  308. yield();
  309. }
  310. int __init spu_sched_init(void)
  311. {
  312. int i;
  313. spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
  314. if (!spu_prio) {
  315. printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
  316. __FUNCTION__);
  317. return 1;
  318. }
  319. for (i = 0; i < MAX_PRIO; i++) {
  320. init_waitqueue_head(&spu_prio->waitq[i]);
  321. __clear_bit(i, spu_prio->bitmap);
  322. }
  323. __set_bit(MAX_PRIO, spu_prio->bitmap);
  324. for (i = 0; i < MAX_NUMNODES; i++) {
  325. mutex_init(&spu_prio->active_mutex[i]);
  326. INIT_LIST_HEAD(&spu_prio->active_list[i]);
  327. }
  328. return 0;
  329. }
  330. void __exit spu_sched_exit(void)
  331. {
  332. struct spu *spu, *tmp;
  333. int node;
  334. for (node = 0; node < MAX_NUMNODES; node++) {
  335. mutex_lock(&spu_prio->active_mutex[node]);
  336. list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
  337. list) {
  338. list_del_init(&spu->list);
  339. spu_free(spu);
  340. }
  341. mutex_unlock(&spu_prio->active_mutex[node]);
  342. }
  343. kfree(spu_prio);
  344. }