run.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452
  1. #define DEBUG
  2. #include <linux/wait.h>
  3. #include <linux/ptrace.h>
  4. #include <asm/spu.h>
  5. #include <asm/spu_priv1.h>
  6. #include <asm/io.h>
  7. #include <asm/unistd.h>
  8. #include "spufs.h"
  9. /* interrupt-level stop callback function. */
  10. void spufs_stop_callback(struct spu *spu, int irq)
  11. {
  12. struct spu_context *ctx = spu->ctx;
  13. /*
  14. * It should be impossible to preempt a context while an exception
  15. * is being processed, since the context switch code is specially
  16. * coded to deal with interrupts ... But, just in case, sanity check
  17. * the context pointer. It is OK to return doing nothing since
  18. * the exception will be regenerated when the context is resumed.
  19. */
  20. if (ctx) {
  21. /* Copy exception arguments into module specific structure */
  22. switch(irq) {
  23. case 0 :
  24. ctx->csa.class_0_pending = spu->class_0_pending;
  25. ctx->csa.class_0_dar = spu->class_0_dar;
  26. break;
  27. case 1 :
  28. ctx->csa.class_1_dsisr = spu->class_1_dsisr;
  29. ctx->csa.class_1_dar = spu->class_1_dar;
  30. break;
  31. case 2 :
  32. break;
  33. }
  34. /* ensure that the exception status has hit memory before a
  35. * thread waiting on the context's stop queue is woken */
  36. smp_wmb();
  37. wake_up_all(&ctx->stop_wq);
  38. }
  39. }
  40. int spu_stopped(struct spu_context *ctx, u32 *stat)
  41. {
  42. u64 dsisr;
  43. u32 stopped;
  44. stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
  45. SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
  46. top:
  47. *stat = ctx->ops->status_read(ctx);
  48. if (*stat & stopped) {
  49. /*
  50. * If the spu hasn't finished stopping, we need to
  51. * re-read the register to get the stopped value.
  52. */
  53. if (*stat & SPU_STATUS_RUNNING)
  54. goto top;
  55. return 1;
  56. }
  57. if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
  58. return 1;
  59. dsisr = ctx->csa.class_1_dsisr;
  60. if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
  61. return 1;
  62. if (ctx->csa.class_0_pending)
  63. return 1;
  64. return 0;
  65. }
  66. static int spu_setup_isolated(struct spu_context *ctx)
  67. {
  68. int ret;
  69. u64 __iomem *mfc_cntl;
  70. u64 sr1;
  71. u32 status;
  72. unsigned long timeout;
  73. const u32 status_loading = SPU_STATUS_RUNNING
  74. | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
  75. ret = -ENODEV;
  76. if (!isolated_loader)
  77. goto out;
  78. /*
  79. * We need to exclude userspace access to the context.
  80. *
  81. * To protect against memory access we invalidate all ptes
  82. * and make sure the pagefault handlers block on the mutex.
  83. */
  84. spu_unmap_mappings(ctx);
  85. mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
  86. /* purge the MFC DMA queue to ensure no spurious accesses before we
  87. * enter kernel mode */
  88. timeout = jiffies + HZ;
  89. out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
  90. while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
  91. != MFC_CNTL_PURGE_DMA_COMPLETE) {
  92. if (time_after(jiffies, timeout)) {
  93. printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
  94. __func__);
  95. ret = -EIO;
  96. goto out;
  97. }
  98. cond_resched();
  99. }
  100. /* put the SPE in kernel mode to allow access to the loader */
  101. sr1 = spu_mfc_sr1_get(ctx->spu);
  102. sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
  103. spu_mfc_sr1_set(ctx->spu, sr1);
  104. /* start the loader */
  105. ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
  106. ctx->ops->signal2_write(ctx,
  107. (unsigned long)isolated_loader & 0xffffffff);
  108. ctx->ops->runcntl_write(ctx,
  109. SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
  110. ret = 0;
  111. timeout = jiffies + HZ;
  112. while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
  113. status_loading) {
  114. if (time_after(jiffies, timeout)) {
  115. printk(KERN_ERR "%s: timeout waiting for loader\n",
  116. __func__);
  117. ret = -EIO;
  118. goto out_drop_priv;
  119. }
  120. cond_resched();
  121. }
  122. if (!(status & SPU_STATUS_RUNNING)) {
  123. /* If isolated LOAD has failed: run SPU, we will get a stop-and
  124. * signal later. */
  125. pr_debug("%s: isolated LOAD failed\n", __func__);
  126. ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
  127. ret = -EACCES;
  128. goto out_drop_priv;
  129. }
  130. if (!(status & SPU_STATUS_ISOLATED_STATE)) {
  131. /* This isn't allowed by the CBEA, but check anyway */
  132. pr_debug("%s: SPU fell out of isolated mode?\n", __func__);
  133. ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
  134. ret = -EINVAL;
  135. goto out_drop_priv;
  136. }
  137. out_drop_priv:
  138. /* Finished accessing the loader. Drop kernel mode */
  139. sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
  140. spu_mfc_sr1_set(ctx->spu, sr1);
  141. out:
  142. return ret;
  143. }
  144. static int spu_run_init(struct spu_context *ctx, u32 *npc)
  145. {
  146. unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
  147. int ret;
  148. spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
  149. /*
  150. * NOSCHED is synchronous scheduling with respect to the caller.
  151. * The caller waits for the context to be loaded.
  152. */
  153. if (ctx->flags & SPU_CREATE_NOSCHED) {
  154. if (ctx->state == SPU_STATE_SAVED) {
  155. ret = spu_activate(ctx, 0);
  156. if (ret)
  157. return ret;
  158. }
  159. }
  160. /*
  161. * Apply special setup as required.
  162. */
  163. if (ctx->flags & SPU_CREATE_ISOLATE) {
  164. if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
  165. ret = spu_setup_isolated(ctx);
  166. if (ret)
  167. return ret;
  168. }
  169. /*
  170. * If userspace has set the runcntrl register (eg, to
  171. * issue an isolated exit), we need to re-set it here
  172. */
  173. runcntl = ctx->ops->runcntl_read(ctx) &
  174. (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
  175. if (runcntl == 0)
  176. runcntl = SPU_RUNCNTL_RUNNABLE;
  177. } else {
  178. unsigned long privcntl;
  179. if (test_thread_flag(TIF_SINGLESTEP))
  180. privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP;
  181. else
  182. privcntl = SPU_PRIVCNTL_MODE_NORMAL;
  183. ctx->ops->privcntl_write(ctx, privcntl);
  184. ctx->ops->npc_write(ctx, *npc);
  185. }
  186. ctx->ops->runcntl_write(ctx, runcntl);
  187. if (ctx->flags & SPU_CREATE_NOSCHED) {
  188. spuctx_switch_state(ctx, SPU_UTIL_USER);
  189. } else {
  190. if (ctx->state == SPU_STATE_SAVED) {
  191. ret = spu_activate(ctx, 0);
  192. if (ret)
  193. return ret;
  194. } else {
  195. spuctx_switch_state(ctx, SPU_UTIL_USER);
  196. }
  197. }
  198. set_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
  199. return 0;
  200. }
  201. static int spu_run_fini(struct spu_context *ctx, u32 *npc,
  202. u32 *status)
  203. {
  204. int ret = 0;
  205. spu_del_from_rq(ctx);
  206. *status = ctx->ops->status_read(ctx);
  207. *npc = ctx->ops->npc_read(ctx);
  208. spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
  209. clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
  210. spu_release(ctx);
  211. if (signal_pending(current))
  212. ret = -ERESTARTSYS;
  213. return ret;
  214. }
  215. /*
  216. * SPU syscall restarting is tricky because we violate the basic
  217. * assumption that the signal handler is running on the interrupted
  218. * thread. Here instead, the handler runs on PowerPC user space code,
  219. * while the syscall was called from the SPU.
  220. * This means we can only do a very rough approximation of POSIX
  221. * signal semantics.
  222. */
  223. static int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
  224. unsigned int *npc)
  225. {
  226. int ret;
  227. switch (*spu_ret) {
  228. case -ERESTARTSYS:
  229. case -ERESTARTNOINTR:
  230. /*
  231. * Enter the regular syscall restarting for
  232. * sys_spu_run, then restart the SPU syscall
  233. * callback.
  234. */
  235. *npc -= 8;
  236. ret = -ERESTARTSYS;
  237. break;
  238. case -ERESTARTNOHAND:
  239. case -ERESTART_RESTARTBLOCK:
  240. /*
  241. * Restart block is too hard for now, just return -EINTR
  242. * to the SPU.
  243. * ERESTARTNOHAND comes from sys_pause, we also return
  244. * -EINTR from there.
  245. * Assume that we need to be restarted ourselves though.
  246. */
  247. *spu_ret = -EINTR;
  248. ret = -ERESTARTSYS;
  249. break;
  250. default:
  251. printk(KERN_WARNING "%s: unexpected return code %ld\n",
  252. __func__, *spu_ret);
  253. ret = 0;
  254. }
  255. return ret;
  256. }
  257. static int spu_process_callback(struct spu_context *ctx)
  258. {
  259. struct spu_syscall_block s;
  260. u32 ls_pointer, npc;
  261. void __iomem *ls;
  262. long spu_ret;
  263. int ret;
  264. /* get syscall block from local store */
  265. npc = ctx->ops->npc_read(ctx) & ~3;
  266. ls = (void __iomem *)ctx->ops->get_ls(ctx);
  267. ls_pointer = in_be32(ls + npc);
  268. if (ls_pointer > (LS_SIZE - sizeof(s)))
  269. return -EFAULT;
  270. memcpy_fromio(&s, ls + ls_pointer, sizeof(s));
  271. /* do actual syscall without pinning the spu */
  272. ret = 0;
  273. spu_ret = -ENOSYS;
  274. npc += 4;
  275. if (s.nr_ret < __NR_syscalls) {
  276. spu_release(ctx);
  277. /* do actual system call from here */
  278. spu_ret = spu_sys_callback(&s);
  279. if (spu_ret <= -ERESTARTSYS) {
  280. ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
  281. }
  282. mutex_lock(&ctx->state_mutex);
  283. if (ret == -ERESTARTSYS)
  284. return ret;
  285. }
  286. /* need to re-get the ls, as it may have changed when we released the
  287. * spu */
  288. ls = (void __iomem *)ctx->ops->get_ls(ctx);
  289. /* write result, jump over indirect pointer */
  290. memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret));
  291. ctx->ops->npc_write(ctx, npc);
  292. ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
  293. return ret;
  294. }
  295. long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
  296. {
  297. int ret;
  298. struct spu *spu;
  299. u32 status;
  300. if (mutex_lock_interruptible(&ctx->run_mutex))
  301. return -ERESTARTSYS;
  302. ctx->event_return = 0;
  303. ret = spu_acquire(ctx);
  304. if (ret)
  305. goto out_unlock;
  306. spu_enable_spu(ctx);
  307. spu_update_sched_info(ctx);
  308. ret = spu_run_init(ctx, npc);
  309. if (ret) {
  310. spu_release(ctx);
  311. goto out;
  312. }
  313. do {
  314. ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
  315. if (unlikely(ret)) {
  316. /*
  317. * This is nasty: we need the state_mutex for all the
  318. * bookkeeping even if the syscall was interrupted by
  319. * a signal. ewww.
  320. */
  321. mutex_lock(&ctx->state_mutex);
  322. break;
  323. }
  324. spu = ctx->spu;
  325. if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE,
  326. &ctx->sched_flags))) {
  327. if (!(status & SPU_STATUS_STOPPED_BY_STOP)) {
  328. spu_switch_notify(spu, ctx);
  329. continue;
  330. }
  331. }
  332. spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
  333. if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
  334. (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
  335. ret = spu_process_callback(ctx);
  336. if (ret)
  337. break;
  338. status &= ~SPU_STATUS_STOPPED_BY_STOP;
  339. }
  340. ret = spufs_handle_class1(ctx);
  341. if (ret)
  342. break;
  343. ret = spufs_handle_class0(ctx);
  344. if (ret)
  345. break;
  346. if (signal_pending(current))
  347. ret = -ERESTARTSYS;
  348. } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
  349. SPU_STATUS_STOPPED_BY_HALT |
  350. SPU_STATUS_SINGLE_STEP)));
  351. spu_disable_spu(ctx);
  352. ret = spu_run_fini(ctx, npc, &status);
  353. spu_yield(ctx);
  354. spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, status);
  355. if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
  356. (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100))
  357. ctx->stats.libassist++;
  358. if ((ret == 0) ||
  359. ((ret == -ERESTARTSYS) &&
  360. ((status & SPU_STATUS_STOPPED_BY_HALT) ||
  361. (status & SPU_STATUS_SINGLE_STEP) ||
  362. ((status & SPU_STATUS_STOPPED_BY_STOP) &&
  363. (status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
  364. ret = status;
  365. /* Note: we don't need to force_sig SIGTRAP on single-step
  366. * since we have TIF_SINGLESTEP set, thus the kernel will do
  367. * it upon return from the syscall anyawy
  368. */
  369. if (unlikely(status & SPU_STATUS_SINGLE_STEP))
  370. ret = -ERESTARTSYS;
  371. else if (unlikely((status & SPU_STATUS_STOPPED_BY_STOP)
  372. && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff)) {
  373. force_sig(SIGTRAP, current);
  374. ret = -ERESTARTSYS;
  375. }
  376. out:
  377. *event = ctx->event_return;
  378. out_unlock:
  379. mutex_unlock(&ctx->run_mutex);
  380. return ret;
  381. }