run.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395
  1. #define DEBUG
  2. #include <linux/wait.h>
  3. #include <linux/ptrace.h>
  4. #include <asm/spu.h>
  5. #include <asm/spu_priv1.h>
  6. #include <asm/io.h>
  7. #include <asm/unistd.h>
  8. #include "spufs.h"
  9. /* interrupt-level stop callback function. */
  10. void spufs_stop_callback(struct spu *spu)
  11. {
  12. struct spu_context *ctx = spu->ctx;
  13. wake_up_all(&ctx->stop_wq);
  14. }
  15. static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
  16. {
  17. struct spu *spu;
  18. u64 pte_fault;
  19. *stat = ctx->ops->status_read(ctx);
  20. if (ctx->state != SPU_STATE_RUNNABLE)
  21. return 1;
  22. spu = ctx->spu;
  23. pte_fault = spu->dsisr &
  24. (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
  25. return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || spu->class_0_pending) ?
  26. 1 : 0;
  27. }
  28. static int spu_setup_isolated(struct spu_context *ctx)
  29. {
  30. int ret;
  31. u64 __iomem *mfc_cntl;
  32. u64 sr1;
  33. u32 status;
  34. unsigned long timeout;
  35. const u32 status_loading = SPU_STATUS_RUNNING
  36. | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
  37. ret = -ENODEV;
  38. if (!isolated_loader)
  39. goto out;
  40. /*
  41. * We need to exclude userspace access to the context.
  42. *
  43. * To protect against memory access we invalidate all ptes
  44. * and make sure the pagefault handlers block on the mutex.
  45. */
  46. spu_unmap_mappings(ctx);
  47. mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
  48. /* purge the MFC DMA queue to ensure no spurious accesses before we
  49. * enter kernel mode */
  50. timeout = jiffies + HZ;
  51. out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
  52. while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
  53. != MFC_CNTL_PURGE_DMA_COMPLETE) {
  54. if (time_after(jiffies, timeout)) {
  55. printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
  56. __FUNCTION__);
  57. ret = -EIO;
  58. goto out;
  59. }
  60. cond_resched();
  61. }
  62. /* put the SPE in kernel mode to allow access to the loader */
  63. sr1 = spu_mfc_sr1_get(ctx->spu);
  64. sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
  65. spu_mfc_sr1_set(ctx->spu, sr1);
  66. /* start the loader */
  67. ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
  68. ctx->ops->signal2_write(ctx,
  69. (unsigned long)isolated_loader & 0xffffffff);
  70. ctx->ops->runcntl_write(ctx,
  71. SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
  72. ret = 0;
  73. timeout = jiffies + HZ;
  74. while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
  75. status_loading) {
  76. if (time_after(jiffies, timeout)) {
  77. printk(KERN_ERR "%s: timeout waiting for loader\n",
  78. __FUNCTION__);
  79. ret = -EIO;
  80. goto out_drop_priv;
  81. }
  82. cond_resched();
  83. }
  84. if (!(status & SPU_STATUS_RUNNING)) {
  85. /* If isolated LOAD has failed: run SPU, we will get a stop-and
  86. * signal later. */
  87. pr_debug("%s: isolated LOAD failed\n", __FUNCTION__);
  88. ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
  89. ret = -EACCES;
  90. goto out_drop_priv;
  91. }
  92. if (!(status & SPU_STATUS_ISOLATED_STATE)) {
  93. /* This isn't allowed by the CBEA, but check anyway */
  94. pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__);
  95. ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
  96. ret = -EINVAL;
  97. goto out_drop_priv;
  98. }
  99. out_drop_priv:
  100. /* Finished accessing the loader. Drop kernel mode */
  101. sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
  102. spu_mfc_sr1_set(ctx->spu, sr1);
  103. out:
  104. return ret;
  105. }
  106. static int spu_run_init(struct spu_context *ctx, u32 * npc)
  107. {
  108. spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
  109. if (ctx->flags & SPU_CREATE_ISOLATE) {
  110. unsigned long runcntl;
  111. if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
  112. int ret = spu_setup_isolated(ctx);
  113. if (ret)
  114. return ret;
  115. }
  116. /* if userspace has set the runcntrl register (eg, to issue an
  117. * isolated exit), we need to re-set it here */
  118. runcntl = ctx->ops->runcntl_read(ctx) &
  119. (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
  120. if (runcntl == 0)
  121. runcntl = SPU_RUNCNTL_RUNNABLE;
  122. ctx->ops->runcntl_write(ctx, runcntl);
  123. } else {
  124. unsigned long mode = SPU_PRIVCNTL_MODE_NORMAL;
  125. ctx->ops->npc_write(ctx, *npc);
  126. if (test_thread_flag(TIF_SINGLESTEP))
  127. mode = SPU_PRIVCNTL_MODE_SINGLE_STEP;
  128. out_be64(&ctx->spu->priv2->spu_privcntl_RW, mode);
  129. ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
  130. }
  131. spuctx_switch_state(ctx, SPU_UTIL_USER);
  132. return 0;
  133. }
  134. static int spu_run_fini(struct spu_context *ctx, u32 * npc,
  135. u32 * status)
  136. {
  137. int ret = 0;
  138. *status = ctx->ops->status_read(ctx);
  139. *npc = ctx->ops->npc_read(ctx);
  140. spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
  141. spu_release(ctx);
  142. if (signal_pending(current))
  143. ret = -ERESTARTSYS;
  144. return ret;
  145. }
  146. static int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
  147. u32 *status)
  148. {
  149. int ret;
  150. ret = spu_run_fini(ctx, npc, status);
  151. if (ret)
  152. return ret;
  153. if (*status & (SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_STOPPED_BY_HALT))
  154. return *status;
  155. ret = spu_acquire_runnable(ctx, 0);
  156. if (ret)
  157. return ret;
  158. ret = spu_run_init(ctx, npc);
  159. if (ret) {
  160. spu_release(ctx);
  161. return ret;
  162. }
  163. return 0;
  164. }
  165. /*
  166. * SPU syscall restarting is tricky because we violate the basic
  167. * assumption that the signal handler is running on the interrupted
  168. * thread. Here instead, the handler runs on PowerPC user space code,
  169. * while the syscall was called from the SPU.
  170. * This means we can only do a very rough approximation of POSIX
  171. * signal semantics.
  172. */
  173. int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
  174. unsigned int *npc)
  175. {
  176. int ret;
  177. switch (*spu_ret) {
  178. case -ERESTARTSYS:
  179. case -ERESTARTNOINTR:
  180. /*
  181. * Enter the regular syscall restarting for
  182. * sys_spu_run, then restart the SPU syscall
  183. * callback.
  184. */
  185. *npc -= 8;
  186. ret = -ERESTARTSYS;
  187. break;
  188. case -ERESTARTNOHAND:
  189. case -ERESTART_RESTARTBLOCK:
  190. /*
  191. * Restart block is too hard for now, just return -EINTR
  192. * to the SPU.
  193. * ERESTARTNOHAND comes from sys_pause, we also return
  194. * -EINTR from there.
  195. * Assume that we need to be restarted ourselves though.
  196. */
  197. *spu_ret = -EINTR;
  198. ret = -ERESTARTSYS;
  199. break;
  200. default:
  201. printk(KERN_WARNING "%s: unexpected return code %ld\n",
  202. __FUNCTION__, *spu_ret);
  203. ret = 0;
  204. }
  205. return ret;
  206. }
  207. int spu_process_callback(struct spu_context *ctx)
  208. {
  209. struct spu_syscall_block s;
  210. u32 ls_pointer, npc;
  211. void __iomem *ls;
  212. long spu_ret;
  213. int ret;
  214. /* get syscall block from local store */
  215. npc = ctx->ops->npc_read(ctx) & ~3;
  216. ls = (void __iomem *)ctx->ops->get_ls(ctx);
  217. ls_pointer = in_be32(ls + npc);
  218. if (ls_pointer > (LS_SIZE - sizeof(s)))
  219. return -EFAULT;
  220. memcpy_fromio(&s, ls + ls_pointer, sizeof(s));
  221. /* do actual syscall without pinning the spu */
  222. ret = 0;
  223. spu_ret = -ENOSYS;
  224. npc += 4;
  225. if (s.nr_ret < __NR_syscalls) {
  226. spu_release(ctx);
  227. /* do actual system call from here */
  228. spu_ret = spu_sys_callback(&s);
  229. if (spu_ret <= -ERESTARTSYS) {
  230. ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
  231. }
  232. spu_acquire(ctx);
  233. if (ret == -ERESTARTSYS)
  234. return ret;
  235. }
  236. /* write result, jump over indirect pointer */
  237. memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret));
  238. ctx->ops->npc_write(ctx, npc);
  239. ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
  240. return ret;
  241. }
  242. static inline int spu_process_events(struct spu_context *ctx)
  243. {
  244. struct spu *spu = ctx->spu;
  245. int ret = 0;
  246. if (spu->class_0_pending)
  247. ret = spu_irq_class_0_bottom(spu);
  248. if (!ret && signal_pending(current))
  249. ret = -ERESTARTSYS;
  250. return ret;
  251. }
  252. long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
  253. {
  254. int ret;
  255. u32 status;
  256. if (mutex_lock_interruptible(&ctx->run_mutex))
  257. return -ERESTARTSYS;
  258. ctx->ops->master_start(ctx);
  259. ctx->event_return = 0;
  260. spu_acquire(ctx);
  261. if (ctx->state == SPU_STATE_SAVED) {
  262. __spu_update_sched_info(ctx);
  263. ret = spu_activate(ctx, 0);
  264. if (ret) {
  265. spu_release(ctx);
  266. goto out;
  267. }
  268. } else {
  269. /*
  270. * We have to update the scheduling priority under active_mutex
  271. * to protect against find_victim().
  272. */
  273. spu_update_sched_info(ctx);
  274. }
  275. ret = spu_run_init(ctx, npc);
  276. if (ret) {
  277. spu_release(ctx);
  278. goto out;
  279. }
  280. do {
  281. ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
  282. if (unlikely(ret))
  283. break;
  284. spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
  285. if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
  286. (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
  287. ret = spu_process_callback(ctx);
  288. if (ret)
  289. break;
  290. status &= ~SPU_STATUS_STOPPED_BY_STOP;
  291. }
  292. ret = spufs_handle_class1(ctx);
  293. if (ret)
  294. break;
  295. if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
  296. ret = spu_reacquire_runnable(ctx, npc, &status);
  297. if (ret)
  298. goto out2;
  299. continue;
  300. }
  301. ret = spu_process_events(ctx);
  302. } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
  303. SPU_STATUS_STOPPED_BY_HALT |
  304. SPU_STATUS_SINGLE_STEP)));
  305. if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
  306. (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100) &&
  307. (ctx->state == SPU_STATE_RUNNABLE))
  308. ctx->stats.libassist++;
  309. ctx->ops->master_stop(ctx);
  310. ret = spu_run_fini(ctx, npc, &status);
  311. spu_yield(ctx);
  312. out2:
  313. if ((ret == 0) ||
  314. ((ret == -ERESTARTSYS) &&
  315. ((status & SPU_STATUS_STOPPED_BY_HALT) ||
  316. (status & SPU_STATUS_SINGLE_STEP) ||
  317. ((status & SPU_STATUS_STOPPED_BY_STOP) &&
  318. (status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
  319. ret = status;
  320. /* Note: we don't need to force_sig SIGTRAP on single-step
  321. * since we have TIF_SINGLESTEP set, thus the kernel will do
  322. * it upon return from the syscall anyawy
  323. */
  324. if ((status & SPU_STATUS_STOPPED_BY_STOP)
  325. && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
  326. force_sig(SIGTRAP, current);
  327. ret = -ERESTARTSYS;
  328. }
  329. out:
  330. *event = ctx->event_return;
  331. mutex_unlock(&ctx->run_mutex);
  332. return ret;
  333. }