sched.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126
  1. /* sched.c - SPU scheduler.
  2. *
  3. * Copyright (C) IBM 2005
  4. * Author: Mark Nutter <mnutter@us.ibm.com>
  5. *
  6. * 2006-03-31 NUMA domains added.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2, or (at your option)
  11. * any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #undef DEBUG
  23. #include <linux/module.h>
  24. #include <linux/errno.h>
  25. #include <linux/sched.h>
  26. #include <linux/kernel.h>
  27. #include <linux/mm.h>
  28. #include <linux/completion.h>
  29. #include <linux/vmalloc.h>
  30. #include <linux/smp.h>
  31. #include <linux/stddef.h>
  32. #include <linux/unistd.h>
  33. #include <linux/numa.h>
  34. #include <linux/mutex.h>
  35. #include <linux/notifier.h>
  36. #include <linux/kthread.h>
  37. #include <linux/pid_namespace.h>
  38. #include <linux/proc_fs.h>
  39. #include <linux/seq_file.h>
  40. #include <linux/marker.h>
  41. #include <asm/io.h>
  42. #include <asm/mmu_context.h>
  43. #include <asm/spu.h>
  44. #include <asm/spu_csa.h>
  45. #include <asm/spu_priv1.h>
  46. #include "spufs.h"
  47. struct spu_prio_array {
  48. DECLARE_BITMAP(bitmap, MAX_PRIO);
  49. struct list_head runq[MAX_PRIO];
  50. spinlock_t runq_lock;
  51. int nr_waiting;
  52. };
  53. static unsigned long spu_avenrun[3];
  54. static struct spu_prio_array *spu_prio;
  55. static struct task_struct *spusched_task;
  56. static struct timer_list spusched_timer;
  57. static struct timer_list spuloadavg_timer;
  58. /*
  59. * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
  60. */
  61. #define NORMAL_PRIO 120
  62. /*
  63. * Frequency of the spu scheduler tick. By default we do one SPU scheduler
  64. * tick for every 10 CPU scheduler ticks.
  65. */
  66. #define SPUSCHED_TICK (10)
  67. /*
  68. * These are the 'tuning knobs' of the scheduler:
  69. *
  70. * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
  71. * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
  72. */
  73. #define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
  74. #define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
  75. #define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO)
  76. #define SCALE_PRIO(x, prio) \
  77. max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
  78. /*
  79. * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
  80. * [800ms ... 100ms ... 5ms]
  81. *
  82. * The higher a thread's priority, the bigger timeslices
  83. * it gets during one round of execution. But even the lowest
  84. * priority thread gets MIN_TIMESLICE worth of execution time.
  85. */
  86. void spu_set_timeslice(struct spu_context *ctx)
  87. {
  88. if (ctx->prio < NORMAL_PRIO)
  89. ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
  90. else
  91. ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
  92. }
  93. /*
  94. * Update scheduling information from the owning thread.
  95. */
  96. void __spu_update_sched_info(struct spu_context *ctx)
  97. {
  98. /*
  99. * assert that the context is not on the runqueue, so it is safe
  100. * to change its scheduling parameters.
  101. */
  102. BUG_ON(!list_empty(&ctx->rq));
  103. /*
  104. * 32-Bit assignments are atomic on powerpc, and we don't care about
  105. * memory ordering here because retrieving the controlling thread is
  106. * per definition racy.
  107. */
  108. ctx->tid = current->pid;
  109. /*
  110. * We do our own priority calculations, so we normally want
  111. * ->static_prio to start with. Unfortunately this field
  112. * contains junk for threads with a realtime scheduling
  113. * policy so we have to look at ->prio in this case.
  114. */
  115. if (rt_prio(current->prio))
  116. ctx->prio = current->prio;
  117. else
  118. ctx->prio = current->static_prio;
  119. ctx->policy = current->policy;
  120. /*
  121. * TO DO: the context may be loaded, so we may need to activate
  122. * it again on a different node. But it shouldn't hurt anything
  123. * to update its parameters, because we know that the scheduler
  124. * is not actively looking at this field, since it is not on the
  125. * runqueue. The context will be rescheduled on the proper node
  126. * if it is timesliced or preempted.
  127. */
  128. ctx->cpus_allowed = current->cpus_allowed;
  129. /* Save the current cpu id for spu interrupt routing. */
  130. ctx->last_ran = raw_smp_processor_id();
  131. }
  132. void spu_update_sched_info(struct spu_context *ctx)
  133. {
  134. int node;
  135. if (ctx->state == SPU_STATE_RUNNABLE) {
  136. node = ctx->spu->node;
  137. /*
  138. * Take list_mutex to sync with find_victim().
  139. */
  140. mutex_lock(&cbe_spu_info[node].list_mutex);
  141. __spu_update_sched_info(ctx);
  142. mutex_unlock(&cbe_spu_info[node].list_mutex);
  143. } else {
  144. __spu_update_sched_info(ctx);
  145. }
  146. }
  147. static int __node_allowed(struct spu_context *ctx, int node)
  148. {
  149. if (nr_cpus_node(node)) {
  150. cpumask_t mask = node_to_cpumask(node);
  151. if (cpus_intersects(mask, ctx->cpus_allowed))
  152. return 1;
  153. }
  154. return 0;
  155. }
  156. static int node_allowed(struct spu_context *ctx, int node)
  157. {
  158. int rval;
  159. spin_lock(&spu_prio->runq_lock);
  160. rval = __node_allowed(ctx, node);
  161. spin_unlock(&spu_prio->runq_lock);
  162. return rval;
  163. }
  164. void do_notify_spus_active(void)
  165. {
  166. int node;
  167. /*
  168. * Wake up the active spu_contexts.
  169. *
  170. * When the awakened processes see their "notify_active" flag is set,
  171. * they will call spu_switch_notify().
  172. */
  173. for_each_online_node(node) {
  174. struct spu *spu;
  175. mutex_lock(&cbe_spu_info[node].list_mutex);
  176. list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
  177. if (spu->alloc_state != SPU_FREE) {
  178. struct spu_context *ctx = spu->ctx;
  179. set_bit(SPU_SCHED_NOTIFY_ACTIVE,
  180. &ctx->sched_flags);
  181. mb();
  182. wake_up_all(&ctx->stop_wq);
  183. }
  184. }
  185. mutex_unlock(&cbe_spu_info[node].list_mutex);
  186. }
  187. }
  188. /**
  189. * spu_bind_context - bind spu context to physical spu
  190. * @spu: physical spu to bind to
  191. * @ctx: context to bind
  192. */
  193. static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
  194. {
  195. spu_context_trace(spu_bind_context__enter, ctx, spu);
  196. spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
  197. if (ctx->flags & SPU_CREATE_NOSCHED)
  198. atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
  199. ctx->stats.slb_flt_base = spu->stats.slb_flt;
  200. ctx->stats.class2_intr_base = spu->stats.class2_intr;
  201. spu_associate_mm(spu, ctx->owner);
  202. spin_lock_irq(&spu->register_lock);
  203. spu->ctx = ctx;
  204. spu->flags = 0;
  205. ctx->spu = spu;
  206. ctx->ops = &spu_hw_ops;
  207. spu->pid = current->pid;
  208. spu->tgid = current->tgid;
  209. spu->ibox_callback = spufs_ibox_callback;
  210. spu->wbox_callback = spufs_wbox_callback;
  211. spu->stop_callback = spufs_stop_callback;
  212. spu->mfc_callback = spufs_mfc_callback;
  213. spin_unlock_irq(&spu->register_lock);
  214. spu_unmap_mappings(ctx);
  215. spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
  216. spu_restore(&ctx->csa, spu);
  217. spu->timestamp = jiffies;
  218. spu_switch_notify(spu, ctx);
  219. ctx->state = SPU_STATE_RUNNABLE;
  220. spuctx_switch_state(ctx, SPU_UTIL_USER);
  221. }
  222. /*
  223. * Must be used with the list_mutex held.
  224. */
  225. static inline int sched_spu(struct spu *spu)
  226. {
  227. BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
  228. return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
  229. }
  230. static void aff_merge_remaining_ctxs(struct spu_gang *gang)
  231. {
  232. struct spu_context *ctx;
  233. list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
  234. if (list_empty(&ctx->aff_list))
  235. list_add(&ctx->aff_list, &gang->aff_list_head);
  236. }
  237. gang->aff_flags |= AFF_MERGED;
  238. }
  239. static void aff_set_offsets(struct spu_gang *gang)
  240. {
  241. struct spu_context *ctx;
  242. int offset;
  243. offset = -1;
  244. list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
  245. aff_list) {
  246. if (&ctx->aff_list == &gang->aff_list_head)
  247. break;
  248. ctx->aff_offset = offset--;
  249. }
  250. offset = 0;
  251. list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
  252. if (&ctx->aff_list == &gang->aff_list_head)
  253. break;
  254. ctx->aff_offset = offset++;
  255. }
  256. gang->aff_flags |= AFF_OFFSETS_SET;
  257. }
  258. static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
  259. int group_size, int lowest_offset)
  260. {
  261. struct spu *spu;
  262. int node, n;
  263. /*
  264. * TODO: A better algorithm could be used to find a good spu to be
  265. * used as reference location for the ctxs chain.
  266. */
  267. node = cpu_to_node(raw_smp_processor_id());
  268. for (n = 0; n < MAX_NUMNODES; n++, node++) {
  269. node = (node < MAX_NUMNODES) ? node : 0;
  270. if (!node_allowed(ctx, node))
  271. continue;
  272. mutex_lock(&cbe_spu_info[node].list_mutex);
  273. list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
  274. if ((!mem_aff || spu->has_mem_affinity) &&
  275. sched_spu(spu)) {
  276. mutex_unlock(&cbe_spu_info[node].list_mutex);
  277. return spu;
  278. }
  279. }
  280. mutex_unlock(&cbe_spu_info[node].list_mutex);
  281. }
  282. return NULL;
  283. }
  284. static void aff_set_ref_point_location(struct spu_gang *gang)
  285. {
  286. int mem_aff, gs, lowest_offset;
  287. struct spu_context *ctx;
  288. struct spu *tmp;
  289. mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
  290. lowest_offset = 0;
  291. gs = 0;
  292. list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
  293. gs++;
  294. list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
  295. aff_list) {
  296. if (&ctx->aff_list == &gang->aff_list_head)
  297. break;
  298. lowest_offset = ctx->aff_offset;
  299. }
  300. gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
  301. lowest_offset);
  302. }
  303. static struct spu *ctx_location(struct spu *ref, int offset, int node)
  304. {
  305. struct spu *spu;
  306. spu = NULL;
  307. if (offset >= 0) {
  308. list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
  309. BUG_ON(spu->node != node);
  310. if (offset == 0)
  311. break;
  312. if (sched_spu(spu))
  313. offset--;
  314. }
  315. } else {
  316. list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
  317. BUG_ON(spu->node != node);
  318. if (offset == 0)
  319. break;
  320. if (sched_spu(spu))
  321. offset++;
  322. }
  323. }
  324. return spu;
  325. }
  326. /*
  327. * affinity_check is called each time a context is going to be scheduled.
  328. * It returns the spu ptr on which the context must run.
  329. */
  330. static int has_affinity(struct spu_context *ctx)
  331. {
  332. struct spu_gang *gang = ctx->gang;
  333. if (list_empty(&ctx->aff_list))
  334. return 0;
  335. if (!gang->aff_ref_spu) {
  336. if (!(gang->aff_flags & AFF_MERGED))
  337. aff_merge_remaining_ctxs(gang);
  338. if (!(gang->aff_flags & AFF_OFFSETS_SET))
  339. aff_set_offsets(gang);
  340. aff_set_ref_point_location(gang);
  341. }
  342. return gang->aff_ref_spu != NULL;
  343. }
  344. /**
  345. * spu_unbind_context - unbind spu context from physical spu
  346. * @spu: physical spu to unbind from
  347. * @ctx: context to unbind
  348. */
  349. static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
  350. {
  351. u32 status;
  352. spu_context_trace(spu_unbind_context__enter, ctx, spu);
  353. spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
  354. if (spu->ctx->flags & SPU_CREATE_NOSCHED)
  355. atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
  356. if (ctx->gang){
  357. mutex_lock(&ctx->gang->aff_mutex);
  358. if (has_affinity(ctx)) {
  359. if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
  360. ctx->gang->aff_ref_spu = NULL;
  361. }
  362. mutex_unlock(&ctx->gang->aff_mutex);
  363. }
  364. spu_switch_notify(spu, NULL);
  365. spu_unmap_mappings(ctx);
  366. spu_save(&ctx->csa, spu);
  367. spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
  368. spin_lock_irq(&spu->register_lock);
  369. spu->timestamp = jiffies;
  370. ctx->state = SPU_STATE_SAVED;
  371. spu->ibox_callback = NULL;
  372. spu->wbox_callback = NULL;
  373. spu->stop_callback = NULL;
  374. spu->mfc_callback = NULL;
  375. spu->pid = 0;
  376. spu->tgid = 0;
  377. ctx->ops = &spu_backing_ops;
  378. spu->flags = 0;
  379. spu->ctx = NULL;
  380. spin_unlock_irq(&spu->register_lock);
  381. spu_associate_mm(spu, NULL);
  382. ctx->stats.slb_flt +=
  383. (spu->stats.slb_flt - ctx->stats.slb_flt_base);
  384. ctx->stats.class2_intr +=
  385. (spu->stats.class2_intr - ctx->stats.class2_intr_base);
  386. /* This maps the underlying spu state to idle */
  387. spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
  388. ctx->spu = NULL;
  389. if (spu_stopped(ctx, &status))
  390. wake_up_all(&ctx->stop_wq);
  391. }
  392. /**
  393. * spu_add_to_rq - add a context to the runqueue
  394. * @ctx: context to add
  395. */
  396. static void __spu_add_to_rq(struct spu_context *ctx)
  397. {
  398. /*
  399. * Unfortunately this code path can be called from multiple threads
  400. * on behalf of a single context due to the way the problem state
  401. * mmap support works.
  402. *
  403. * Fortunately we need to wake up all these threads at the same time
  404. * and can simply skip the runqueue addition for every but the first
  405. * thread getting into this codepath.
  406. *
  407. * It's still quite hacky, and long-term we should proxy all other
  408. * threads through the owner thread so that spu_run is in control
  409. * of all the scheduling activity for a given context.
  410. */
  411. if (list_empty(&ctx->rq)) {
  412. list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
  413. set_bit(ctx->prio, spu_prio->bitmap);
  414. if (!spu_prio->nr_waiting++)
  415. __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
  416. }
  417. }
  418. static void spu_add_to_rq(struct spu_context *ctx)
  419. {
  420. spin_lock(&spu_prio->runq_lock);
  421. __spu_add_to_rq(ctx);
  422. spin_unlock(&spu_prio->runq_lock);
  423. }
  424. static void __spu_del_from_rq(struct spu_context *ctx)
  425. {
  426. int prio = ctx->prio;
  427. if (!list_empty(&ctx->rq)) {
  428. if (!--spu_prio->nr_waiting)
  429. del_timer(&spusched_timer);
  430. list_del_init(&ctx->rq);
  431. if (list_empty(&spu_prio->runq[prio]))
  432. clear_bit(prio, spu_prio->bitmap);
  433. }
  434. }
  435. void spu_del_from_rq(struct spu_context *ctx)
  436. {
  437. spin_lock(&spu_prio->runq_lock);
  438. __spu_del_from_rq(ctx);
  439. spin_unlock(&spu_prio->runq_lock);
  440. }
  441. static void spu_prio_wait(struct spu_context *ctx)
  442. {
  443. DEFINE_WAIT(wait);
  444. /*
  445. * The caller must explicitly wait for a context to be loaded
  446. * if the nosched flag is set. If NOSCHED is not set, the caller
  447. * queues the context and waits for an spu event or error.
  448. */
  449. BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
  450. spin_lock(&spu_prio->runq_lock);
  451. prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
  452. if (!signal_pending(current)) {
  453. __spu_add_to_rq(ctx);
  454. spin_unlock(&spu_prio->runq_lock);
  455. mutex_unlock(&ctx->state_mutex);
  456. schedule();
  457. mutex_lock(&ctx->state_mutex);
  458. spin_lock(&spu_prio->runq_lock);
  459. __spu_del_from_rq(ctx);
  460. }
  461. spin_unlock(&spu_prio->runq_lock);
  462. __set_current_state(TASK_RUNNING);
  463. remove_wait_queue(&ctx->stop_wq, &wait);
  464. }
  465. static struct spu *spu_get_idle(struct spu_context *ctx)
  466. {
  467. struct spu *spu, *aff_ref_spu;
  468. int node, n;
  469. spu_context_nospu_trace(spu_get_idle__enter, ctx);
  470. if (ctx->gang) {
  471. mutex_lock(&ctx->gang->aff_mutex);
  472. if (has_affinity(ctx)) {
  473. aff_ref_spu = ctx->gang->aff_ref_spu;
  474. atomic_inc(&ctx->gang->aff_sched_count);
  475. mutex_unlock(&ctx->gang->aff_mutex);
  476. node = aff_ref_spu->node;
  477. mutex_lock(&cbe_spu_info[node].list_mutex);
  478. spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
  479. if (spu && spu->alloc_state == SPU_FREE)
  480. goto found;
  481. mutex_unlock(&cbe_spu_info[node].list_mutex);
  482. mutex_lock(&ctx->gang->aff_mutex);
  483. if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
  484. ctx->gang->aff_ref_spu = NULL;
  485. mutex_unlock(&ctx->gang->aff_mutex);
  486. goto not_found;
  487. }
  488. mutex_unlock(&ctx->gang->aff_mutex);
  489. }
  490. node = cpu_to_node(raw_smp_processor_id());
  491. for (n = 0; n < MAX_NUMNODES; n++, node++) {
  492. node = (node < MAX_NUMNODES) ? node : 0;
  493. if (!node_allowed(ctx, node))
  494. continue;
  495. mutex_lock(&cbe_spu_info[node].list_mutex);
  496. list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
  497. if (spu->alloc_state == SPU_FREE)
  498. goto found;
  499. }
  500. mutex_unlock(&cbe_spu_info[node].list_mutex);
  501. }
  502. not_found:
  503. spu_context_nospu_trace(spu_get_idle__not_found, ctx);
  504. return NULL;
  505. found:
  506. spu->alloc_state = SPU_USED;
  507. mutex_unlock(&cbe_spu_info[node].list_mutex);
  508. spu_context_trace(spu_get_idle__found, ctx, spu);
  509. spu_init_channels(spu);
  510. return spu;
  511. }
  512. /**
  513. * find_victim - find a lower priority context to preempt
  514. * @ctx: canidate context for running
  515. *
  516. * Returns the freed physical spu to run the new context on.
  517. */
  518. static struct spu *find_victim(struct spu_context *ctx)
  519. {
  520. struct spu_context *victim = NULL;
  521. struct spu *spu;
  522. int node, n;
  523. spu_context_nospu_trace(spu_find_victim__enter, ctx);
  524. /*
  525. * Look for a possible preemption candidate on the local node first.
  526. * If there is no candidate look at the other nodes. This isn't
  527. * exactly fair, but so far the whole spu scheduler tries to keep
  528. * a strong node affinity. We might want to fine-tune this in
  529. * the future.
  530. */
  531. restart:
  532. node = cpu_to_node(raw_smp_processor_id());
  533. for (n = 0; n < MAX_NUMNODES; n++, node++) {
  534. node = (node < MAX_NUMNODES) ? node : 0;
  535. if (!node_allowed(ctx, node))
  536. continue;
  537. mutex_lock(&cbe_spu_info[node].list_mutex);
  538. list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
  539. struct spu_context *tmp = spu->ctx;
  540. if (tmp && tmp->prio > ctx->prio &&
  541. !(tmp->flags & SPU_CREATE_NOSCHED) &&
  542. (!victim || tmp->prio > victim->prio))
  543. victim = spu->ctx;
  544. }
  545. mutex_unlock(&cbe_spu_info[node].list_mutex);
  546. if (victim) {
  547. /*
  548. * This nests ctx->state_mutex, but we always lock
  549. * higher priority contexts before lower priority
  550. * ones, so this is safe until we introduce
  551. * priority inheritance schemes.
  552. *
  553. * XXX if the highest priority context is locked,
  554. * this can loop a long time. Might be better to
  555. * look at another context or give up after X retries.
  556. */
  557. if (!mutex_trylock(&victim->state_mutex)) {
  558. victim = NULL;
  559. goto restart;
  560. }
  561. spu = victim->spu;
  562. if (!spu || victim->prio <= ctx->prio) {
  563. /*
  564. * This race can happen because we've dropped
  565. * the active list mutex. Not a problem, just
  566. * restart the search.
  567. */
  568. mutex_unlock(&victim->state_mutex);
  569. victim = NULL;
  570. goto restart;
  571. }
  572. spu_context_trace(__spu_deactivate__unload, ctx, spu);
  573. mutex_lock(&cbe_spu_info[node].list_mutex);
  574. cbe_spu_info[node].nr_active--;
  575. spu_unbind_context(spu, victim);
  576. mutex_unlock(&cbe_spu_info[node].list_mutex);
  577. victim->stats.invol_ctx_switch++;
  578. spu->stats.invol_ctx_switch++;
  579. if (test_bit(SPU_SCHED_SPU_RUN, &victim->sched_flags))
  580. spu_add_to_rq(victim);
  581. mutex_unlock(&victim->state_mutex);
  582. return spu;
  583. }
  584. }
  585. return NULL;
  586. }
  587. static void __spu_schedule(struct spu *spu, struct spu_context *ctx)
  588. {
  589. int node = spu->node;
  590. int success = 0;
  591. spu_set_timeslice(ctx);
  592. mutex_lock(&cbe_spu_info[node].list_mutex);
  593. if (spu->ctx == NULL) {
  594. spu_bind_context(spu, ctx);
  595. cbe_spu_info[node].nr_active++;
  596. spu->alloc_state = SPU_USED;
  597. success = 1;
  598. }
  599. mutex_unlock(&cbe_spu_info[node].list_mutex);
  600. if (success)
  601. wake_up_all(&ctx->run_wq);
  602. else
  603. spu_add_to_rq(ctx);
  604. }
  605. static void spu_schedule(struct spu *spu, struct spu_context *ctx)
  606. {
  607. /* not a candidate for interruptible because it's called either
  608. from the scheduler thread or from spu_deactivate */
  609. mutex_lock(&ctx->state_mutex);
  610. __spu_schedule(spu, ctx);
  611. spu_release(ctx);
  612. }
  613. static void spu_unschedule(struct spu *spu, struct spu_context *ctx)
  614. {
  615. int node = spu->node;
  616. mutex_lock(&cbe_spu_info[node].list_mutex);
  617. cbe_spu_info[node].nr_active--;
  618. spu->alloc_state = SPU_FREE;
  619. spu_unbind_context(spu, ctx);
  620. ctx->stats.invol_ctx_switch++;
  621. spu->stats.invol_ctx_switch++;
  622. mutex_unlock(&cbe_spu_info[node].list_mutex);
  623. }
  624. /**
  625. * spu_activate - find a free spu for a context and execute it
  626. * @ctx: spu context to schedule
  627. * @flags: flags (currently ignored)
  628. *
  629. * Tries to find a free spu to run @ctx. If no free spu is available
  630. * add the context to the runqueue so it gets woken up once an spu
  631. * is available.
  632. */
  633. int spu_activate(struct spu_context *ctx, unsigned long flags)
  634. {
  635. struct spu *spu;
  636. /*
  637. * If there are multiple threads waiting for a single context
  638. * only one actually binds the context while the others will
  639. * only be able to acquire the state_mutex once the context
  640. * already is in runnable state.
  641. */
  642. if (ctx->spu)
  643. return 0;
  644. spu_activate_top:
  645. if (signal_pending(current))
  646. return -ERESTARTSYS;
  647. spu = spu_get_idle(ctx);
  648. /*
  649. * If this is a realtime thread we try to get it running by
  650. * preempting a lower priority thread.
  651. */
  652. if (!spu && rt_prio(ctx->prio))
  653. spu = find_victim(ctx);
  654. if (spu) {
  655. unsigned long runcntl;
  656. runcntl = ctx->ops->runcntl_read(ctx);
  657. __spu_schedule(spu, ctx);
  658. if (runcntl & SPU_RUNCNTL_RUNNABLE)
  659. spuctx_switch_state(ctx, SPU_UTIL_USER);
  660. return 0;
  661. }
  662. if (ctx->flags & SPU_CREATE_NOSCHED) {
  663. spu_prio_wait(ctx);
  664. goto spu_activate_top;
  665. }
  666. spu_add_to_rq(ctx);
  667. return 0;
  668. }
  669. /**
  670. * grab_runnable_context - try to find a runnable context
  671. *
  672. * Remove the highest priority context on the runqueue and return it
  673. * to the caller. Returns %NULL if no runnable context was found.
  674. */
  675. static struct spu_context *grab_runnable_context(int prio, int node)
  676. {
  677. struct spu_context *ctx;
  678. int best;
  679. spin_lock(&spu_prio->runq_lock);
  680. best = find_first_bit(spu_prio->bitmap, prio);
  681. while (best < prio) {
  682. struct list_head *rq = &spu_prio->runq[best];
  683. list_for_each_entry(ctx, rq, rq) {
  684. /* XXX(hch): check for affinity here aswell */
  685. if (__node_allowed(ctx, node)) {
  686. __spu_del_from_rq(ctx);
  687. goto found;
  688. }
  689. }
  690. best++;
  691. }
  692. ctx = NULL;
  693. found:
  694. spin_unlock(&spu_prio->runq_lock);
  695. return ctx;
  696. }
  697. static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
  698. {
  699. struct spu *spu = ctx->spu;
  700. struct spu_context *new = NULL;
  701. if (spu) {
  702. new = grab_runnable_context(max_prio, spu->node);
  703. if (new || force) {
  704. spu_unschedule(spu, ctx);
  705. if (new) {
  706. if (new->flags & SPU_CREATE_NOSCHED)
  707. wake_up(&new->stop_wq);
  708. else {
  709. spu_release(ctx);
  710. spu_schedule(spu, new);
  711. /* this one can't easily be made
  712. interruptible */
  713. mutex_lock(&ctx->state_mutex);
  714. }
  715. }
  716. }
  717. }
  718. return new != NULL;
  719. }
  720. /**
  721. * spu_deactivate - unbind a context from it's physical spu
  722. * @ctx: spu context to unbind
  723. *
  724. * Unbind @ctx from the physical spu it is running on and schedule
  725. * the highest priority context to run on the freed physical spu.
  726. */
  727. void spu_deactivate(struct spu_context *ctx)
  728. {
  729. spu_context_nospu_trace(spu_deactivate__enter, ctx);
  730. __spu_deactivate(ctx, 1, MAX_PRIO);
  731. }
  732. /**
  733. * spu_yield - yield a physical spu if others are waiting
  734. * @ctx: spu context to yield
  735. *
  736. * Check if there is a higher priority context waiting and if yes
  737. * unbind @ctx from the physical spu and schedule the highest
  738. * priority context to run on the freed physical spu instead.
  739. */
  740. void spu_yield(struct spu_context *ctx)
  741. {
  742. spu_context_nospu_trace(spu_yield__enter, ctx);
  743. if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
  744. mutex_lock(&ctx->state_mutex);
  745. __spu_deactivate(ctx, 0, MAX_PRIO);
  746. mutex_unlock(&ctx->state_mutex);
  747. }
  748. }
  749. static noinline void spusched_tick(struct spu_context *ctx)
  750. {
  751. struct spu_context *new = NULL;
  752. struct spu *spu = NULL;
  753. if (spu_acquire(ctx))
  754. BUG(); /* a kernel thread never has signals pending */
  755. if (ctx->state != SPU_STATE_RUNNABLE)
  756. goto out;
  757. if (ctx->flags & SPU_CREATE_NOSCHED)
  758. goto out;
  759. if (ctx->policy == SCHED_FIFO)
  760. goto out;
  761. if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
  762. goto out;
  763. spu = ctx->spu;
  764. spu_context_trace(spusched_tick__preempt, ctx, spu);
  765. new = grab_runnable_context(ctx->prio + 1, spu->node);
  766. if (new) {
  767. spu_unschedule(spu, ctx);
  768. if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
  769. spu_add_to_rq(ctx);
  770. } else {
  771. spu_context_nospu_trace(spusched_tick__newslice, ctx);
  772. if (!ctx->time_slice)
  773. ctx->time_slice++;
  774. }
  775. out:
  776. spu_release(ctx);
  777. if (new)
  778. spu_schedule(spu, new);
  779. }
  780. /**
  781. * count_active_contexts - count nr of active tasks
  782. *
  783. * Return the number of tasks currently running or waiting to run.
  784. *
  785. * Note that we don't take runq_lock / list_mutex here. Reading
  786. * a single 32bit value is atomic on powerpc, and we don't care
  787. * about memory ordering issues here.
  788. */
  789. static unsigned long count_active_contexts(void)
  790. {
  791. int nr_active = 0, node;
  792. for (node = 0; node < MAX_NUMNODES; node++)
  793. nr_active += cbe_spu_info[node].nr_active;
  794. nr_active += spu_prio->nr_waiting;
  795. return nr_active;
  796. }
  797. /**
  798. * spu_calc_load - update the avenrun load estimates.
  799. *
  800. * No locking against reading these values from userspace, as for
  801. * the CPU loadavg code.
  802. */
  803. static void spu_calc_load(void)
  804. {
  805. unsigned long active_tasks; /* fixed-point */
  806. active_tasks = count_active_contexts() * FIXED_1;
  807. CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
  808. CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
  809. CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
  810. }
  811. static void spusched_wake(unsigned long data)
  812. {
  813. mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
  814. wake_up_process(spusched_task);
  815. }
  816. static void spuloadavg_wake(unsigned long data)
  817. {
  818. mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
  819. spu_calc_load();
  820. }
  821. static int spusched_thread(void *unused)
  822. {
  823. struct spu *spu;
  824. int node;
  825. while (!kthread_should_stop()) {
  826. set_current_state(TASK_INTERRUPTIBLE);
  827. schedule();
  828. for (node = 0; node < MAX_NUMNODES; node++) {
  829. struct mutex *mtx = &cbe_spu_info[node].list_mutex;
  830. mutex_lock(mtx);
  831. list_for_each_entry(spu, &cbe_spu_info[node].spus,
  832. cbe_list) {
  833. struct spu_context *ctx = spu->ctx;
  834. if (ctx) {
  835. mutex_unlock(mtx);
  836. spusched_tick(ctx);
  837. mutex_lock(mtx);
  838. }
  839. }
  840. mutex_unlock(mtx);
  841. }
  842. }
  843. return 0;
  844. }
  845. void spuctx_switch_state(struct spu_context *ctx,
  846. enum spu_utilization_state new_state)
  847. {
  848. unsigned long long curtime;
  849. signed long long delta;
  850. struct timespec ts;
  851. struct spu *spu;
  852. enum spu_utilization_state old_state;
  853. int node;
  854. ktime_get_ts(&ts);
  855. curtime = timespec_to_ns(&ts);
  856. delta = curtime - ctx->stats.tstamp;
  857. WARN_ON(!mutex_is_locked(&ctx->state_mutex));
  858. WARN_ON(delta < 0);
  859. spu = ctx->spu;
  860. old_state = ctx->stats.util_state;
  861. ctx->stats.util_state = new_state;
  862. ctx->stats.tstamp = curtime;
  863. /*
  864. * Update the physical SPU utilization statistics.
  865. */
  866. if (spu) {
  867. ctx->stats.times[old_state] += delta;
  868. spu->stats.times[old_state] += delta;
  869. spu->stats.util_state = new_state;
  870. spu->stats.tstamp = curtime;
  871. node = spu->node;
  872. if (old_state == SPU_UTIL_USER)
  873. atomic_dec(&cbe_spu_info[node].busy_spus);
  874. if (new_state == SPU_UTIL_USER);
  875. atomic_inc(&cbe_spu_info[node].busy_spus);
  876. }
  877. }
  878. #define LOAD_INT(x) ((x) >> FSHIFT)
  879. #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
  880. static int show_spu_loadavg(struct seq_file *s, void *private)
  881. {
  882. int a, b, c;
  883. a = spu_avenrun[0] + (FIXED_1/200);
  884. b = spu_avenrun[1] + (FIXED_1/200);
  885. c = spu_avenrun[2] + (FIXED_1/200);
  886. /*
  887. * Note that last_pid doesn't really make much sense for the
  888. * SPU loadavg (it even seems very odd on the CPU side...),
  889. * but we include it here to have a 100% compatible interface.
  890. */
  891. seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
  892. LOAD_INT(a), LOAD_FRAC(a),
  893. LOAD_INT(b), LOAD_FRAC(b),
  894. LOAD_INT(c), LOAD_FRAC(c),
  895. count_active_contexts(),
  896. atomic_read(&nr_spu_contexts),
  897. current->nsproxy->pid_ns->last_pid);
  898. return 0;
  899. }
  900. static int spu_loadavg_open(struct inode *inode, struct file *file)
  901. {
  902. return single_open(file, show_spu_loadavg, NULL);
  903. }
  904. static const struct file_operations spu_loadavg_fops = {
  905. .open = spu_loadavg_open,
  906. .read = seq_read,
  907. .llseek = seq_lseek,
  908. .release = single_release,
  909. };
  910. int __init spu_sched_init(void)
  911. {
  912. struct proc_dir_entry *entry;
  913. int err = -ENOMEM, i;
  914. spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
  915. if (!spu_prio)
  916. goto out;
  917. for (i = 0; i < MAX_PRIO; i++) {
  918. INIT_LIST_HEAD(&spu_prio->runq[i]);
  919. __clear_bit(i, spu_prio->bitmap);
  920. }
  921. spin_lock_init(&spu_prio->runq_lock);
  922. setup_timer(&spusched_timer, spusched_wake, 0);
  923. setup_timer(&spuloadavg_timer, spuloadavg_wake, 0);
  924. spusched_task = kthread_run(spusched_thread, NULL, "spusched");
  925. if (IS_ERR(spusched_task)) {
  926. err = PTR_ERR(spusched_task);
  927. goto out_free_spu_prio;
  928. }
  929. mod_timer(&spuloadavg_timer, 0);
  930. entry = proc_create("spu_loadavg", 0, NULL, &spu_loadavg_fops);
  931. if (!entry)
  932. goto out_stop_kthread;
  933. pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
  934. SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
  935. return 0;
  936. out_stop_kthread:
  937. kthread_stop(spusched_task);
  938. out_free_spu_prio:
  939. kfree(spu_prio);
  940. out:
  941. return err;
  942. }
  943. void spu_sched_exit(void)
  944. {
  945. struct spu *spu;
  946. int node;
  947. remove_proc_entry("spu_loadavg", NULL);
  948. del_timer_sync(&spusched_timer);
  949. del_timer_sync(&spuloadavg_timer);
  950. kthread_stop(spusched_task);
  951. for (node = 0; node < MAX_NUMNODES; node++) {
  952. mutex_lock(&cbe_spu_info[node].list_mutex);
  953. list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
  954. if (spu->alloc_state != SPU_FREE)
  955. spu->alloc_state = SPU_FREE;
  956. mutex_unlock(&cbe_spu_info[node].list_mutex);
  957. }
  958. kfree(spu_prio);
  959. }