sched.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518
  1. /* sched.c - SPU scheduler.
  2. *
  3. * Copyright (C) IBM 2005
  4. * Author: Mark Nutter <mnutter@us.ibm.com>
  5. *
  6. * 2006-03-31 NUMA domains added.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2, or (at your option)
  11. * any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #undef DEBUG
  23. #include <linux/module.h>
  24. #include <linux/errno.h>
  25. #include <linux/sched.h>
  26. #include <linux/kernel.h>
  27. #include <linux/mm.h>
  28. #include <linux/completion.h>
  29. #include <linux/vmalloc.h>
  30. #include <linux/smp.h>
  31. #include <linux/smp_lock.h>
  32. #include <linux/stddef.h>
  33. #include <linux/unistd.h>
  34. #include <linux/numa.h>
  35. #include <linux/mutex.h>
  36. #include <linux/notifier.h>
  37. #include <asm/io.h>
  38. #include <asm/mmu_context.h>
  39. #include <asm/spu.h>
  40. #include <asm/spu_csa.h>
  41. #include <asm/spu_priv1.h>
  42. #include "spufs.h"
  43. #define SPU_TIMESLICE (HZ)
  44. struct spu_prio_array {
  45. DECLARE_BITMAP(bitmap, MAX_PRIO);
  46. struct list_head runq[MAX_PRIO];
  47. spinlock_t runq_lock;
  48. struct list_head active_list[MAX_NUMNODES];
  49. struct mutex active_mutex[MAX_NUMNODES];
  50. };
  51. static struct spu_prio_array *spu_prio;
  52. static struct workqueue_struct *spu_sched_wq;
  53. static inline int node_allowed(int node)
  54. {
  55. cpumask_t mask;
  56. if (!nr_cpus_node(node))
  57. return 0;
  58. mask = node_to_cpumask(node);
  59. if (!cpus_intersects(mask, current->cpus_allowed))
  60. return 0;
  61. return 1;
  62. }
  63. void spu_start_tick(struct spu_context *ctx)
  64. {
  65. if (ctx->policy == SCHED_RR) {
  66. /*
  67. * Make sure the exiting bit is cleared.
  68. */
  69. clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
  70. mb();
  71. queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
  72. }
  73. }
  74. void spu_stop_tick(struct spu_context *ctx)
  75. {
  76. if (ctx->policy == SCHED_RR) {
  77. /*
  78. * While the work can be rearming normally setting this flag
  79. * makes sure it does not rearm itself anymore.
  80. */
  81. set_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
  82. mb();
  83. cancel_delayed_work(&ctx->sched_work);
  84. }
  85. }
  86. void spu_sched_tick(struct work_struct *work)
  87. {
  88. struct spu_context *ctx =
  89. container_of(work, struct spu_context, sched_work.work);
  90. struct spu *spu;
  91. int preempted = 0;
  92. /*
  93. * If this context is being stopped avoid rescheduling from the
  94. * scheduler tick because we would block on the state_mutex.
  95. * The caller will yield the spu later on anyway.
  96. */
  97. if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
  98. return;
  99. mutex_lock(&ctx->state_mutex);
  100. spu = ctx->spu;
  101. if (spu) {
  102. int best = sched_find_first_bit(spu_prio->bitmap);
  103. if (best <= ctx->prio) {
  104. spu_deactivate(ctx);
  105. preempted = 1;
  106. }
  107. }
  108. mutex_unlock(&ctx->state_mutex);
  109. if (preempted) {
  110. /*
  111. * We need to break out of the wait loop in spu_run manually
  112. * to ensure this context gets put on the runqueue again
  113. * ASAP.
  114. */
  115. wake_up(&ctx->stop_wq);
  116. } else
  117. spu_start_tick(ctx);
  118. }
  119. /**
  120. * spu_add_to_active_list - add spu to active list
  121. * @spu: spu to add to the active list
  122. */
  123. static void spu_add_to_active_list(struct spu *spu)
  124. {
  125. mutex_lock(&spu_prio->active_mutex[spu->node]);
  126. list_add_tail(&spu->list, &spu_prio->active_list[spu->node]);
  127. mutex_unlock(&spu_prio->active_mutex[spu->node]);
  128. }
  129. /**
  130. * spu_remove_from_active_list - remove spu from active list
  131. * @spu: spu to remove from the active list
  132. */
  133. static void spu_remove_from_active_list(struct spu *spu)
  134. {
  135. int node = spu->node;
  136. mutex_lock(&spu_prio->active_mutex[node]);
  137. list_del_init(&spu->list);
  138. mutex_unlock(&spu_prio->active_mutex[node]);
  139. }
  140. static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
  141. static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
  142. {
  143. blocking_notifier_call_chain(&spu_switch_notifier,
  144. ctx ? ctx->object_id : 0, spu);
  145. }
  146. int spu_switch_event_register(struct notifier_block * n)
  147. {
  148. return blocking_notifier_chain_register(&spu_switch_notifier, n);
  149. }
  150. int spu_switch_event_unregister(struct notifier_block * n)
  151. {
  152. return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
  153. }
  154. /**
  155. * spu_bind_context - bind spu context to physical spu
  156. * @spu: physical spu to bind to
  157. * @ctx: context to bind
  158. */
  159. static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
  160. {
  161. pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
  162. spu->number, spu->node);
  163. spu->ctx = ctx;
  164. spu->flags = 0;
  165. ctx->spu = spu;
  166. ctx->ops = &spu_hw_ops;
  167. spu->pid = current->pid;
  168. spu_associate_mm(spu, ctx->owner);
  169. spu->ibox_callback = spufs_ibox_callback;
  170. spu->wbox_callback = spufs_wbox_callback;
  171. spu->stop_callback = spufs_stop_callback;
  172. spu->mfc_callback = spufs_mfc_callback;
  173. spu->dma_callback = spufs_dma_callback;
  174. mb();
  175. spu_unmap_mappings(ctx);
  176. spu_restore(&ctx->csa, spu);
  177. spu->timestamp = jiffies;
  178. spu_cpu_affinity_set(spu, raw_smp_processor_id());
  179. spu_switch_notify(spu, ctx);
  180. spu_add_to_active_list(spu);
  181. ctx->state = SPU_STATE_RUNNABLE;
  182. }
  183. /**
  184. * spu_unbind_context - unbind spu context from physical spu
  185. * @spu: physical spu to unbind from
  186. * @ctx: context to unbind
  187. */
  188. static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
  189. {
  190. pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
  191. spu->pid, spu->number, spu->node);
  192. spu_remove_from_active_list(spu);
  193. spu_switch_notify(spu, NULL);
  194. spu_unmap_mappings(ctx);
  195. spu_save(&ctx->csa, spu);
  196. spu->timestamp = jiffies;
  197. ctx->state = SPU_STATE_SAVED;
  198. spu->ibox_callback = NULL;
  199. spu->wbox_callback = NULL;
  200. spu->stop_callback = NULL;
  201. spu->mfc_callback = NULL;
  202. spu->dma_callback = NULL;
  203. spu_associate_mm(spu, NULL);
  204. spu->pid = 0;
  205. ctx->ops = &spu_backing_ops;
  206. ctx->spu = NULL;
  207. spu->flags = 0;
  208. spu->ctx = NULL;
  209. }
  210. /**
  211. * spu_add_to_rq - add a context to the runqueue
  212. * @ctx: context to add
  213. */
  214. static void __spu_add_to_rq(struct spu_context *ctx)
  215. {
  216. int prio = ctx->prio;
  217. list_add_tail(&ctx->rq, &spu_prio->runq[prio]);
  218. set_bit(prio, spu_prio->bitmap);
  219. }
  220. static void __spu_del_from_rq(struct spu_context *ctx)
  221. {
  222. int prio = ctx->prio;
  223. if (!list_empty(&ctx->rq))
  224. list_del_init(&ctx->rq);
  225. if (list_empty(&spu_prio->runq[prio]))
  226. clear_bit(prio, spu_prio->bitmap);
  227. }
  228. static void spu_prio_wait(struct spu_context *ctx)
  229. {
  230. DEFINE_WAIT(wait);
  231. spin_lock(&spu_prio->runq_lock);
  232. prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
  233. if (!signal_pending(current)) {
  234. __spu_add_to_rq(ctx);
  235. spin_unlock(&spu_prio->runq_lock);
  236. mutex_unlock(&ctx->state_mutex);
  237. schedule();
  238. mutex_lock(&ctx->state_mutex);
  239. spin_lock(&spu_prio->runq_lock);
  240. __spu_del_from_rq(ctx);
  241. }
  242. spin_unlock(&spu_prio->runq_lock);
  243. __set_current_state(TASK_RUNNING);
  244. remove_wait_queue(&ctx->stop_wq, &wait);
  245. }
  246. /**
  247. * spu_reschedule - try to find a runnable context for a spu
  248. * @spu: spu available
  249. *
  250. * This function is called whenever a spu becomes idle. It looks for the
  251. * most suitable runnable spu context and schedules it for execution.
  252. */
  253. static void spu_reschedule(struct spu *spu)
  254. {
  255. int best;
  256. spu_free(spu);
  257. spin_lock(&spu_prio->runq_lock);
  258. best = sched_find_first_bit(spu_prio->bitmap);
  259. if (best < MAX_PRIO) {
  260. struct list_head *rq = &spu_prio->runq[best];
  261. struct spu_context *ctx;
  262. BUG_ON(list_empty(rq));
  263. ctx = list_entry(rq->next, struct spu_context, rq);
  264. __spu_del_from_rq(ctx);
  265. wake_up(&ctx->stop_wq);
  266. }
  267. spin_unlock(&spu_prio->runq_lock);
  268. }
  269. static struct spu *spu_get_idle(struct spu_context *ctx)
  270. {
  271. struct spu *spu = NULL;
  272. int node = cpu_to_node(raw_smp_processor_id());
  273. int n;
  274. for (n = 0; n < MAX_NUMNODES; n++, node++) {
  275. node = (node < MAX_NUMNODES) ? node : 0;
  276. if (!node_allowed(node))
  277. continue;
  278. spu = spu_alloc_node(node);
  279. if (spu)
  280. break;
  281. }
  282. return spu;
  283. }
  284. /**
  285. * find_victim - find a lower priority context to preempt
  286. * @ctx: canidate context for running
  287. *
  288. * Returns the freed physical spu to run the new context on.
  289. */
  290. static struct spu *find_victim(struct spu_context *ctx)
  291. {
  292. struct spu_context *victim = NULL;
  293. struct spu *spu;
  294. int node, n;
  295. /*
  296. * Look for a possible preemption candidate on the local node first.
  297. * If there is no candidate look at the other nodes. This isn't
  298. * exactly fair, but so far the whole spu schedule tries to keep
  299. * a strong node affinity. We might want to fine-tune this in
  300. * the future.
  301. */
  302. restart:
  303. node = cpu_to_node(raw_smp_processor_id());
  304. for (n = 0; n < MAX_NUMNODES; n++, node++) {
  305. node = (node < MAX_NUMNODES) ? node : 0;
  306. if (!node_allowed(node))
  307. continue;
  308. mutex_lock(&spu_prio->active_mutex[node]);
  309. list_for_each_entry(spu, &spu_prio->active_list[node], list) {
  310. struct spu_context *tmp = spu->ctx;
  311. if (tmp->rt_priority < ctx->rt_priority &&
  312. (!victim || tmp->rt_priority < victim->rt_priority))
  313. victim = spu->ctx;
  314. }
  315. mutex_unlock(&spu_prio->active_mutex[node]);
  316. if (victim) {
  317. /*
  318. * This nests ctx->state_mutex, but we always lock
  319. * higher priority contexts before lower priority
  320. * ones, so this is safe until we introduce
  321. * priority inheritance schemes.
  322. */
  323. if (!mutex_trylock(&victim->state_mutex)) {
  324. victim = NULL;
  325. goto restart;
  326. }
  327. spu = victim->spu;
  328. if (!spu) {
  329. /*
  330. * This race can happen because we've dropped
  331. * the active list mutex. No a problem, just
  332. * restart the search.
  333. */
  334. mutex_unlock(&victim->state_mutex);
  335. victim = NULL;
  336. goto restart;
  337. }
  338. spu_unbind_context(spu, victim);
  339. mutex_unlock(&victim->state_mutex);
  340. /*
  341. * We need to break out of the wait loop in spu_run
  342. * manually to ensure this context gets put on the
  343. * runqueue again ASAP.
  344. */
  345. wake_up(&victim->stop_wq);
  346. return spu;
  347. }
  348. }
  349. return NULL;
  350. }
  351. /**
  352. * spu_activate - find a free spu for a context and execute it
  353. * @ctx: spu context to schedule
  354. * @flags: flags (currently ignored)
  355. *
  356. * Tries to find a free spu to run @ctx. If no free spu is available
  357. * add the context to the runqueue so it gets woken up once an spu
  358. * is available.
  359. */
  360. int spu_activate(struct spu_context *ctx, unsigned long flags)
  361. {
  362. if (ctx->spu)
  363. return 0;
  364. do {
  365. struct spu *spu;
  366. spu = spu_get_idle(ctx);
  367. /*
  368. * If this is a realtime thread we try to get it running by
  369. * preempting a lower priority thread.
  370. */
  371. if (!spu && ctx->rt_priority)
  372. spu = find_victim(ctx);
  373. if (spu) {
  374. spu_bind_context(spu, ctx);
  375. return 0;
  376. }
  377. spu_prio_wait(ctx);
  378. } while (!signal_pending(current));
  379. return -ERESTARTSYS;
  380. }
  381. /**
  382. * spu_deactivate - unbind a context from it's physical spu
  383. * @ctx: spu context to unbind
  384. *
  385. * Unbind @ctx from the physical spu it is running on and schedule
  386. * the highest priority context to run on the freed physical spu.
  387. */
  388. void spu_deactivate(struct spu_context *ctx)
  389. {
  390. struct spu *spu = ctx->spu;
  391. if (spu) {
  392. spu_unbind_context(spu, ctx);
  393. spu_reschedule(spu);
  394. }
  395. }
  396. /**
  397. * spu_yield - yield a physical spu if others are waiting
  398. * @ctx: spu context to yield
  399. *
  400. * Check if there is a higher priority context waiting and if yes
  401. * unbind @ctx from the physical spu and schedule the highest
  402. * priority context to run on the freed physical spu instead.
  403. */
  404. void spu_yield(struct spu_context *ctx)
  405. {
  406. struct spu *spu;
  407. if (mutex_trylock(&ctx->state_mutex)) {
  408. if ((spu = ctx->spu) != NULL) {
  409. int best = sched_find_first_bit(spu_prio->bitmap);
  410. if (best < MAX_PRIO) {
  411. pr_debug("%s: yielding SPU %d NODE %d\n",
  412. __FUNCTION__, spu->number, spu->node);
  413. spu_deactivate(ctx);
  414. }
  415. }
  416. mutex_unlock(&ctx->state_mutex);
  417. }
  418. }
  419. int __init spu_sched_init(void)
  420. {
  421. int i;
  422. spu_sched_wq = create_singlethread_workqueue("spusched");
  423. if (!spu_sched_wq)
  424. return 1;
  425. spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
  426. if (!spu_prio) {
  427. printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
  428. __FUNCTION__);
  429. destroy_workqueue(spu_sched_wq);
  430. return 1;
  431. }
  432. for (i = 0; i < MAX_PRIO; i++) {
  433. INIT_LIST_HEAD(&spu_prio->runq[i]);
  434. __clear_bit(i, spu_prio->bitmap);
  435. }
  436. __set_bit(MAX_PRIO, spu_prio->bitmap);
  437. for (i = 0; i < MAX_NUMNODES; i++) {
  438. mutex_init(&spu_prio->active_mutex[i]);
  439. INIT_LIST_HEAD(&spu_prio->active_list[i]);
  440. }
  441. spin_lock_init(&spu_prio->runq_lock);
  442. return 0;
  443. }
  444. void __exit spu_sched_exit(void)
  445. {
  446. struct spu *spu, *tmp;
  447. int node;
  448. for (node = 0; node < MAX_NUMNODES; node++) {
  449. mutex_lock(&spu_prio->active_mutex[node]);
  450. list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
  451. list) {
  452. list_del_init(&spu->list);
  453. spu_free(spu);
  454. }
  455. mutex_unlock(&spu_prio->active_mutex[node]);
  456. }
  457. kfree(spu_prio);
  458. destroy_workqueue(spu_sched_wq);
  459. }