run.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. #define DEBUG
  2. #include <linux/wait.h>
  3. #include <linux/ptrace.h>
  4. #include <asm/spu.h>
  5. #include <asm/unistd.h>
  6. #include "spufs.h"
  7. /* interrupt-level stop callback function. */
  8. void spufs_stop_callback(struct spu *spu)
  9. {
  10. struct spu_context *ctx = spu->ctx;
  11. wake_up_all(&ctx->stop_wq);
  12. }
  13. void spufs_dma_callback(struct spu *spu, int type)
  14. {
  15. struct spu_context *ctx = spu->ctx;
  16. if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
  17. ctx->event_return |= type;
  18. wake_up_all(&ctx->stop_wq);
  19. } else {
  20. switch (type) {
  21. case SPE_EVENT_DMA_ALIGNMENT:
  22. case SPE_EVENT_SPE_DATA_STORAGE:
  23. case SPE_EVENT_INVALID_DMA:
  24. force_sig(SIGBUS, /* info, */ current);
  25. break;
  26. case SPE_EVENT_SPE_ERROR:
  27. force_sig(SIGILL, /* info */ current);
  28. break;
  29. }
  30. }
  31. }
  32. static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
  33. {
  34. struct spu *spu;
  35. u64 pte_fault;
  36. *stat = ctx->ops->status_read(ctx);
  37. if (ctx->state != SPU_STATE_RUNNABLE)
  38. return 1;
  39. spu = ctx->spu;
  40. pte_fault = spu->dsisr &
  41. (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
  42. return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
  43. }
  44. static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
  45. {
  46. int ret;
  47. unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
  48. if ((ret = spu_acquire_runnable(ctx)) != 0)
  49. return ret;
  50. /* if we're in isolated mode, we would have started the SPU
  51. * earlier, so don't do it again now. */
  52. if (!(ctx->flags & SPU_CREATE_ISOLATE)) {
  53. ctx->ops->npc_write(ctx, *npc);
  54. ctx->ops->runcntl_write(ctx, runcntl);
  55. }
  56. return 0;
  57. }
  58. static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
  59. u32 * status)
  60. {
  61. int ret = 0;
  62. *status = ctx->ops->status_read(ctx);
  63. *npc = ctx->ops->npc_read(ctx);
  64. spu_release(ctx);
  65. if (signal_pending(current))
  66. ret = -ERESTARTSYS;
  67. return ret;
  68. }
  69. static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
  70. u32 *status)
  71. {
  72. int ret;
  73. if ((ret = spu_run_fini(ctx, npc, status)) != 0)
  74. return ret;
  75. if (*status & (SPU_STATUS_STOPPED_BY_STOP |
  76. SPU_STATUS_STOPPED_BY_HALT)) {
  77. return *status;
  78. }
  79. if ((ret = spu_run_init(ctx, npc)) != 0)
  80. return ret;
  81. return 0;
  82. }
  83. /*
  84. * SPU syscall restarting is tricky because we violate the basic
  85. * assumption that the signal handler is running on the interrupted
  86. * thread. Here instead, the handler runs on PowerPC user space code,
  87. * while the syscall was called from the SPU.
  88. * This means we can only do a very rough approximation of POSIX
  89. * signal semantics.
  90. */
  91. int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
  92. unsigned int *npc)
  93. {
  94. int ret;
  95. switch (*spu_ret) {
  96. case -ERESTARTSYS:
  97. case -ERESTARTNOINTR:
  98. /*
  99. * Enter the regular syscall restarting for
  100. * sys_spu_run, then restart the SPU syscall
  101. * callback.
  102. */
  103. *npc -= 8;
  104. ret = -ERESTARTSYS;
  105. break;
  106. case -ERESTARTNOHAND:
  107. case -ERESTART_RESTARTBLOCK:
  108. /*
  109. * Restart block is too hard for now, just return -EINTR
  110. * to the SPU.
  111. * ERESTARTNOHAND comes from sys_pause, we also return
  112. * -EINTR from there.
  113. * Assume that we need to be restarted ourselves though.
  114. */
  115. *spu_ret = -EINTR;
  116. ret = -ERESTARTSYS;
  117. break;
  118. default:
  119. printk(KERN_WARNING "%s: unexpected return code %ld\n",
  120. __FUNCTION__, *spu_ret);
  121. ret = 0;
  122. }
  123. return ret;
  124. }
  125. int spu_process_callback(struct spu_context *ctx)
  126. {
  127. struct spu_syscall_block s;
  128. u32 ls_pointer, npc;
  129. char *ls;
  130. long spu_ret;
  131. int ret;
  132. /* get syscall block from local store */
  133. npc = ctx->ops->npc_read(ctx);
  134. ls = ctx->ops->get_ls(ctx);
  135. ls_pointer = *(u32*)(ls + npc);
  136. if (ls_pointer > (LS_SIZE - sizeof(s)))
  137. return -EFAULT;
  138. memcpy(&s, ls + ls_pointer, sizeof (s));
  139. /* do actual syscall without pinning the spu */
  140. ret = 0;
  141. spu_ret = -ENOSYS;
  142. npc += 4;
  143. if (s.nr_ret < __NR_syscalls) {
  144. spu_release(ctx);
  145. /* do actual system call from here */
  146. spu_ret = spu_sys_callback(&s);
  147. if (spu_ret <= -ERESTARTSYS) {
  148. ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
  149. }
  150. spu_acquire(ctx);
  151. if (ret == -ERESTARTSYS)
  152. return ret;
  153. }
  154. /* write result, jump over indirect pointer */
  155. memcpy(ls + ls_pointer, &spu_ret, sizeof (spu_ret));
  156. ctx->ops->npc_write(ctx, npc);
  157. ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
  158. return ret;
  159. }
  160. static inline int spu_process_events(struct spu_context *ctx)
  161. {
  162. struct spu *spu = ctx->spu;
  163. u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED;
  164. int ret = 0;
  165. if (spu->dsisr & pte_fault)
  166. ret = spu_irq_class_1_bottom(spu);
  167. if (spu->class_0_pending)
  168. ret = spu_irq_class_0_bottom(spu);
  169. if (!ret && signal_pending(current))
  170. ret = -ERESTARTSYS;
  171. return ret;
  172. }
  173. long spufs_run_spu(struct file *file, struct spu_context *ctx,
  174. u32 *npc, u32 *event)
  175. {
  176. int ret;
  177. u32 status;
  178. if (down_interruptible(&ctx->run_sema))
  179. return -ERESTARTSYS;
  180. ctx->event_return = 0;
  181. ret = spu_run_init(ctx, npc);
  182. if (ret)
  183. goto out;
  184. do {
  185. ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
  186. if (unlikely(ret))
  187. break;
  188. if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
  189. (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
  190. ret = spu_process_callback(ctx);
  191. if (ret)
  192. break;
  193. status &= ~SPU_STATUS_STOPPED_BY_STOP;
  194. }
  195. if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
  196. ret = spu_reacquire_runnable(ctx, npc, &status);
  197. if (ret)
  198. goto out2;
  199. continue;
  200. }
  201. ret = spu_process_events(ctx);
  202. } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
  203. SPU_STATUS_STOPPED_BY_HALT)));
  204. ctx->ops->runcntl_stop(ctx);
  205. ret = spu_run_fini(ctx, npc, &status);
  206. spu_yield(ctx);
  207. out2:
  208. if ((ret == 0) ||
  209. ((ret == -ERESTARTSYS) &&
  210. ((status & SPU_STATUS_STOPPED_BY_HALT) ||
  211. ((status & SPU_STATUS_STOPPED_BY_STOP) &&
  212. (status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
  213. ret = status;
  214. if (unlikely(current->ptrace & PT_PTRACED)) {
  215. if ((status & SPU_STATUS_STOPPED_BY_STOP)
  216. && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
  217. force_sig(SIGTRAP, current);
  218. ret = -ERESTARTSYS;
  219. }
  220. }
  221. out:
  222. *event = ctx->event_return;
  223. up(&ctx->run_sema);
  224. return ret;
  225. }