sched.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415
  1. /* sched.c - SPU scheduler.
  2. *
  3. * Copyright (C) IBM 2005
  4. * Author: Mark Nutter <mnutter@us.ibm.com>
  5. *
  6. * 2006-03-31 NUMA domains added.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2, or (at your option)
  11. * any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #undef DEBUG
  23. #include <linux/module.h>
  24. #include <linux/errno.h>
  25. #include <linux/sched.h>
  26. #include <linux/kernel.h>
  27. #include <linux/mm.h>
  28. #include <linux/completion.h>
  29. #include <linux/vmalloc.h>
  30. #include <linux/smp.h>
  31. #include <linux/smp_lock.h>
  32. #include <linux/stddef.h>
  33. #include <linux/unistd.h>
  34. #include <linux/numa.h>
  35. #include <linux/mutex.h>
  36. #include <linux/notifier.h>
  37. #include <asm/io.h>
  38. #include <asm/mmu_context.h>
  39. #include <asm/spu.h>
  40. #include <asm/spu_csa.h>
  41. #include <asm/spu_priv1.h>
  42. #include "spufs.h"
  43. #define SPU_MIN_TIMESLICE (100 * HZ / 1000)
  44. #define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
  45. struct spu_prio_array {
  46. unsigned long bitmap[SPU_BITMAP_SIZE];
  47. struct list_head runq[MAX_PRIO];
  48. spinlock_t runq_lock;
  49. struct list_head active_list[MAX_NUMNODES];
  50. struct mutex active_mutex[MAX_NUMNODES];
  51. };
  52. static struct spu_prio_array *spu_prio;
  53. static inline int node_allowed(int node)
  54. {
  55. cpumask_t mask;
  56. if (!nr_cpus_node(node))
  57. return 0;
  58. mask = node_to_cpumask(node);
  59. if (!cpus_intersects(mask, current->cpus_allowed))
  60. return 0;
  61. return 1;
  62. }
  63. /**
  64. * spu_add_to_active_list - add spu to active list
  65. * @spu: spu to add to the active list
  66. */
  67. static void spu_add_to_active_list(struct spu *spu)
  68. {
  69. mutex_lock(&spu_prio->active_mutex[spu->node]);
  70. list_add_tail(&spu->list, &spu_prio->active_list[spu->node]);
  71. mutex_unlock(&spu_prio->active_mutex[spu->node]);
  72. }
  73. /**
  74. * spu_remove_from_active_list - remove spu from active list
  75. * @spu: spu to remove from the active list
  76. *
  77. * This function removes an spu from the active list. If the spu was
  78. * found on the active list the function returns 1, else it doesn't do
  79. * anything and returns 0.
  80. */
  81. static int spu_remove_from_active_list(struct spu *spu)
  82. {
  83. int node = spu->node;
  84. struct spu *tmp;
  85. int rc = 0;
  86. mutex_lock(&spu_prio->active_mutex[node]);
  87. list_for_each_entry(tmp, &spu_prio->active_list[node], list) {
  88. if (tmp == spu) {
  89. list_del_init(&spu->list);
  90. rc = 1;
  91. break;
  92. }
  93. }
  94. mutex_unlock(&spu_prio->active_mutex[node]);
  95. return rc;
  96. }
  97. static inline void mm_needs_global_tlbie(struct mm_struct *mm)
  98. {
  99. int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
  100. /* Global TLBIE broadcast required with SPEs. */
  101. __cpus_setall(&mm->cpu_vm_mask, nr);
  102. }
  103. static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
  104. static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
  105. {
  106. blocking_notifier_call_chain(&spu_switch_notifier,
  107. ctx ? ctx->object_id : 0, spu);
  108. }
  109. int spu_switch_event_register(struct notifier_block * n)
  110. {
  111. return blocking_notifier_chain_register(&spu_switch_notifier, n);
  112. }
  113. int spu_switch_event_unregister(struct notifier_block * n)
  114. {
  115. return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
  116. }
  117. /**
  118. * spu_bind_context - bind spu context to physical spu
  119. * @spu: physical spu to bind to
  120. * @ctx: context to bind
  121. */
  122. static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
  123. {
  124. pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
  125. spu->number, spu->node);
  126. spu->ctx = ctx;
  127. spu->flags = 0;
  128. ctx->spu = spu;
  129. ctx->ops = &spu_hw_ops;
  130. spu->pid = current->pid;
  131. spu->mm = ctx->owner;
  132. mm_needs_global_tlbie(spu->mm);
  133. spu->ibox_callback = spufs_ibox_callback;
  134. spu->wbox_callback = spufs_wbox_callback;
  135. spu->stop_callback = spufs_stop_callback;
  136. spu->mfc_callback = spufs_mfc_callback;
  137. spu->dma_callback = spufs_dma_callback;
  138. mb();
  139. spu_unmap_mappings(ctx);
  140. spu_restore(&ctx->csa, spu);
  141. spu->timestamp = jiffies;
  142. spu_cpu_affinity_set(spu, raw_smp_processor_id());
  143. spu_switch_notify(spu, ctx);
  144. spu_add_to_active_list(spu);
  145. ctx->state = SPU_STATE_RUNNABLE;
  146. }
  147. /**
  148. * spu_unbind_context - unbind spu context from physical spu
  149. * @spu: physical spu to unbind from
  150. * @ctx: context to unbind
  151. *
  152. * If the spu was on the active list the function returns 1, else 0.
  153. */
  154. static int spu_unbind_context(struct spu *spu, struct spu_context *ctx)
  155. {
  156. int was_active = spu_remove_from_active_list(spu);
  157. pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
  158. spu->pid, spu->number, spu->node);
  159. spu_switch_notify(spu, NULL);
  160. spu_unmap_mappings(ctx);
  161. spu_save(&ctx->csa, spu);
  162. spu->timestamp = jiffies;
  163. ctx->state = SPU_STATE_SAVED;
  164. spu->ibox_callback = NULL;
  165. spu->wbox_callback = NULL;
  166. spu->stop_callback = NULL;
  167. spu->mfc_callback = NULL;
  168. spu->dma_callback = NULL;
  169. spu->mm = NULL;
  170. spu->pid = 0;
  171. ctx->ops = &spu_backing_ops;
  172. ctx->spu = NULL;
  173. spu->flags = 0;
  174. spu->ctx = NULL;
  175. return was_active;
  176. }
  177. /**
  178. * spu_add_to_rq - add a context to the runqueue
  179. * @ctx: context to add
  180. */
  181. static void spu_add_to_rq(struct spu_context *ctx)
  182. {
  183. spin_lock(&spu_prio->runq_lock);
  184. list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
  185. set_bit(ctx->prio, spu_prio->bitmap);
  186. spin_unlock(&spu_prio->runq_lock);
  187. }
  188. /**
  189. * spu_del_from_rq - remove a context from the runqueue
  190. * @ctx: context to remove
  191. */
  192. static void spu_del_from_rq(struct spu_context *ctx)
  193. {
  194. spin_lock(&spu_prio->runq_lock);
  195. list_del_init(&ctx->rq);
  196. if (list_empty(&spu_prio->runq[ctx->prio]))
  197. clear_bit(ctx->prio, spu_prio->bitmap);
  198. spin_unlock(&spu_prio->runq_lock);
  199. }
  200. /**
  201. * spu_grab_context - remove one context from the runqueue
  202. * @prio: priority of the context to be removed
  203. *
  204. * This function removes one context from the runqueue for priority @prio.
  205. * If there is more than one context with the given priority the first
  206. * task on the runqueue will be taken.
  207. *
  208. * Returns the spu_context it just removed.
  209. *
  210. * Must be called with spu_prio->runq_lock held.
  211. */
  212. static struct spu_context *spu_grab_context(int prio)
  213. {
  214. struct list_head *rq = &spu_prio->runq[prio];
  215. if (list_empty(rq))
  216. return NULL;
  217. return list_entry(rq->next, struct spu_context, rq);
  218. }
  219. static void spu_prio_wait(struct spu_context *ctx)
  220. {
  221. DEFINE_WAIT(wait);
  222. set_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
  223. prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
  224. if (!signal_pending(current)) {
  225. mutex_unlock(&ctx->state_mutex);
  226. schedule();
  227. mutex_lock(&ctx->state_mutex);
  228. }
  229. __set_current_state(TASK_RUNNING);
  230. remove_wait_queue(&ctx->stop_wq, &wait);
  231. clear_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
  232. }
  233. /**
  234. * spu_reschedule - try to find a runnable context for a spu
  235. * @spu: spu available
  236. *
  237. * This function is called whenever a spu becomes idle. It looks for the
  238. * most suitable runnable spu context and schedules it for execution.
  239. */
  240. static void spu_reschedule(struct spu *spu)
  241. {
  242. int best;
  243. spu_free(spu);
  244. spin_lock(&spu_prio->runq_lock);
  245. best = sched_find_first_bit(spu_prio->bitmap);
  246. if (best < MAX_PRIO) {
  247. struct spu_context *ctx = spu_grab_context(best);
  248. if (ctx && test_bit(SPU_SCHED_WAKE, &ctx->sched_flags))
  249. wake_up(&ctx->stop_wq);
  250. }
  251. spin_unlock(&spu_prio->runq_lock);
  252. }
  253. static struct spu *spu_get_idle(struct spu_context *ctx)
  254. {
  255. struct spu *spu = NULL;
  256. int node = cpu_to_node(raw_smp_processor_id());
  257. int n;
  258. for (n = 0; n < MAX_NUMNODES; n++, node++) {
  259. node = (node < MAX_NUMNODES) ? node : 0;
  260. if (!node_allowed(node))
  261. continue;
  262. spu = spu_alloc_node(node);
  263. if (spu)
  264. break;
  265. }
  266. return spu;
  267. }
  268. /* The three externally callable interfaces
  269. * for the scheduler begin here.
  270. *
  271. * spu_activate - bind a context to SPU, waiting as needed.
  272. * spu_deactivate - unbind a context from its SPU.
  273. * spu_yield - yield an SPU if others are waiting.
  274. */
  275. /**
  276. * spu_activate - find a free spu for a context and execute it
  277. * @ctx: spu context to schedule
  278. * @flags: flags (currently ignored)
  279. *
  280. * Tries to find a free spu to run @ctx. If no free spu is availble
  281. * add the context to the runqueue so it gets woken up once an spu
  282. * is available.
  283. */
  284. int spu_activate(struct spu_context *ctx, unsigned long flags)
  285. {
  286. if (ctx->spu)
  287. return 0;
  288. do {
  289. struct spu *spu;
  290. spu = spu_get_idle(ctx);
  291. if (spu) {
  292. spu_bind_context(spu, ctx);
  293. return 0;
  294. }
  295. spu_add_to_rq(ctx);
  296. if (!(flags & SPU_ACTIVATE_NOWAKE))
  297. spu_prio_wait(ctx);
  298. spu_del_from_rq(ctx);
  299. } while (!signal_pending(current));
  300. return -ERESTARTSYS;
  301. }
  302. void spu_deactivate(struct spu_context *ctx)
  303. {
  304. struct spu *spu;
  305. int was_active;
  306. spu = ctx->spu;
  307. if (!spu)
  308. return;
  309. was_active = spu_unbind_context(spu, ctx);
  310. if (was_active)
  311. spu_reschedule(spu);
  312. }
  313. void spu_yield(struct spu_context *ctx)
  314. {
  315. struct spu *spu;
  316. int need_yield = 0;
  317. if (mutex_trylock(&ctx->state_mutex)) {
  318. if ((spu = ctx->spu) != NULL) {
  319. int best = sched_find_first_bit(spu_prio->bitmap);
  320. if (best < MAX_PRIO) {
  321. pr_debug("%s: yielding SPU %d NODE %d\n",
  322. __FUNCTION__, spu->number, spu->node);
  323. spu_deactivate(ctx);
  324. need_yield = 1;
  325. }
  326. }
  327. mutex_unlock(&ctx->state_mutex);
  328. }
  329. if (unlikely(need_yield))
  330. yield();
  331. }
  332. int __init spu_sched_init(void)
  333. {
  334. int i;
  335. spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
  336. if (!spu_prio) {
  337. printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
  338. __FUNCTION__);
  339. return 1;
  340. }
  341. for (i = 0; i < MAX_PRIO; i++) {
  342. INIT_LIST_HEAD(&spu_prio->runq[i]);
  343. __clear_bit(i, spu_prio->bitmap);
  344. }
  345. __set_bit(MAX_PRIO, spu_prio->bitmap);
  346. for (i = 0; i < MAX_NUMNODES; i++) {
  347. mutex_init(&spu_prio->active_mutex[i]);
  348. INIT_LIST_HEAD(&spu_prio->active_list[i]);
  349. }
  350. spin_lock_init(&spu_prio->runq_lock);
  351. return 0;
  352. }
  353. void __exit spu_sched_exit(void)
  354. {
  355. struct spu *spu, *tmp;
  356. int node;
  357. for (node = 0; node < MAX_NUMNODES; node++) {
  358. mutex_lock(&spu_prio->active_mutex[node]);
  359. list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
  360. list) {
  361. list_del_init(&spu->list);
  362. spu_free(spu);
  363. }
  364. mutex_unlock(&spu_prio->active_mutex[node]);
  365. }
  366. kfree(spu_prio);
  367. }