run.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. #define DEBUG
  2. #include <linux/wait.h>
  3. #include <linux/ptrace.h>
  4. #include <asm/spu.h>
  5. #include <asm/unistd.h>
  6. #include "spufs.h"
  7. /* interrupt-level stop callback function. */
  8. void spufs_stop_callback(struct spu *spu)
  9. {
  10. struct spu_context *ctx = spu->ctx;
  11. wake_up_all(&ctx->stop_wq);
  12. }
  13. void spufs_dma_callback(struct spu *spu, int type)
  14. {
  15. struct spu_context *ctx = spu->ctx;
  16. if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
  17. ctx->event_return |= type;
  18. wake_up_all(&ctx->stop_wq);
  19. } else {
  20. switch (type) {
  21. case SPE_EVENT_DMA_ALIGNMENT:
  22. case SPE_EVENT_INVALID_DMA:
  23. force_sig(SIGBUS, /* info, */ current);
  24. break;
  25. case SPE_EVENT_SPE_ERROR:
  26. force_sig(SIGILL, /* info */ current);
  27. break;
  28. }
  29. }
  30. }
  31. static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
  32. {
  33. struct spu *spu;
  34. u64 pte_fault;
  35. *stat = ctx->ops->status_read(ctx);
  36. if (ctx->state != SPU_STATE_RUNNABLE)
  37. return 1;
  38. spu = ctx->spu;
  39. pte_fault = spu->dsisr &
  40. (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
  41. return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
  42. }
  43. static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
  44. {
  45. int ret;
  46. unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
  47. if ((ret = spu_acquire_runnable(ctx)) != 0)
  48. return ret;
  49. /* if we're in isolated mode, we would have started the SPU
  50. * earlier, so don't do it again now. */
  51. if (!(ctx->flags & SPU_CREATE_ISOLATE)) {
  52. ctx->ops->npc_write(ctx, *npc);
  53. ctx->ops->runcntl_write(ctx, runcntl);
  54. }
  55. return 0;
  56. }
  57. static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
  58. u32 * status)
  59. {
  60. int ret = 0;
  61. *status = ctx->ops->status_read(ctx);
  62. *npc = ctx->ops->npc_read(ctx);
  63. spu_release(ctx);
  64. if (signal_pending(current))
  65. ret = -ERESTARTSYS;
  66. if (unlikely(current->ptrace & PT_PTRACED)) {
  67. if ((*status & SPU_STATUS_STOPPED_BY_STOP)
  68. && (*status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
  69. force_sig(SIGTRAP, current);
  70. ret = -ERESTARTSYS;
  71. }
  72. }
  73. return ret;
  74. }
  75. static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
  76. u32 *status)
  77. {
  78. int ret;
  79. if ((ret = spu_run_fini(ctx, npc, status)) != 0)
  80. return ret;
  81. if (*status & (SPU_STATUS_STOPPED_BY_STOP |
  82. SPU_STATUS_STOPPED_BY_HALT)) {
  83. return *status;
  84. }
  85. if ((ret = spu_run_init(ctx, npc)) != 0)
  86. return ret;
  87. return 0;
  88. }
  89. /*
  90. * SPU syscall restarting is tricky because we violate the basic
  91. * assumption that the signal handler is running on the interrupted
  92. * thread. Here instead, the handler runs on PowerPC user space code,
  93. * while the syscall was called from the SPU.
  94. * This means we can only do a very rough approximation of POSIX
  95. * signal semantics.
  96. */
  97. int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
  98. unsigned int *npc)
  99. {
  100. int ret;
  101. switch (*spu_ret) {
  102. case -ERESTARTSYS:
  103. case -ERESTARTNOINTR:
  104. /*
  105. * Enter the regular syscall restarting for
  106. * sys_spu_run, then restart the SPU syscall
  107. * callback.
  108. */
  109. *npc -= 8;
  110. ret = -ERESTARTSYS;
  111. break;
  112. case -ERESTARTNOHAND:
  113. case -ERESTART_RESTARTBLOCK:
  114. /*
  115. * Restart block is too hard for now, just return -EINTR
  116. * to the SPU.
  117. * ERESTARTNOHAND comes from sys_pause, we also return
  118. * -EINTR from there.
  119. * Assume that we need to be restarted ourselves though.
  120. */
  121. *spu_ret = -EINTR;
  122. ret = -ERESTARTSYS;
  123. break;
  124. default:
  125. printk(KERN_WARNING "%s: unexpected return code %ld\n",
  126. __FUNCTION__, *spu_ret);
  127. ret = 0;
  128. }
  129. return ret;
  130. }
  131. int spu_process_callback(struct spu_context *ctx)
  132. {
  133. struct spu_syscall_block s;
  134. u32 ls_pointer, npc;
  135. char *ls;
  136. long spu_ret;
  137. int ret;
  138. /* get syscall block from local store */
  139. npc = ctx->ops->npc_read(ctx);
  140. ls = ctx->ops->get_ls(ctx);
  141. ls_pointer = *(u32*)(ls + npc);
  142. if (ls_pointer > (LS_SIZE - sizeof(s)))
  143. return -EFAULT;
  144. memcpy(&s, ls + ls_pointer, sizeof (s));
  145. /* do actual syscall without pinning the spu */
  146. ret = 0;
  147. spu_ret = -ENOSYS;
  148. npc += 4;
  149. if (s.nr_ret < __NR_syscalls) {
  150. spu_release(ctx);
  151. /* do actual system call from here */
  152. spu_ret = spu_sys_callback(&s);
  153. if (spu_ret <= -ERESTARTSYS) {
  154. ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
  155. }
  156. spu_acquire(ctx);
  157. if (ret == -ERESTARTSYS)
  158. return ret;
  159. }
  160. /* write result, jump over indirect pointer */
  161. memcpy(ls + ls_pointer, &spu_ret, sizeof (spu_ret));
  162. ctx->ops->npc_write(ctx, npc);
  163. ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
  164. return ret;
  165. }
  166. static inline int spu_process_events(struct spu_context *ctx)
  167. {
  168. struct spu *spu = ctx->spu;
  169. u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED;
  170. int ret = 0;
  171. if (spu->dsisr & pte_fault)
  172. ret = spu_irq_class_1_bottom(spu);
  173. if (spu->class_0_pending)
  174. ret = spu_irq_class_0_bottom(spu);
  175. if (!ret && signal_pending(current))
  176. ret = -ERESTARTSYS;
  177. return ret;
  178. }
  179. long spufs_run_spu(struct file *file, struct spu_context *ctx,
  180. u32 *npc, u32 *event)
  181. {
  182. int ret;
  183. u32 status;
  184. if (down_interruptible(&ctx->run_sema))
  185. return -ERESTARTSYS;
  186. ctx->event_return = 0;
  187. ret = spu_run_init(ctx, npc);
  188. if (ret)
  189. goto out;
  190. do {
  191. ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
  192. if (unlikely(ret))
  193. break;
  194. if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
  195. (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
  196. ret = spu_process_callback(ctx);
  197. if (ret)
  198. break;
  199. status &= ~SPU_STATUS_STOPPED_BY_STOP;
  200. }
  201. if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
  202. ret = spu_reacquire_runnable(ctx, npc, &status);
  203. if (ret)
  204. goto out;
  205. continue;
  206. }
  207. ret = spu_process_events(ctx);
  208. } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
  209. SPU_STATUS_STOPPED_BY_HALT)));
  210. ctx->ops->runcntl_stop(ctx);
  211. ret = spu_run_fini(ctx, npc, &status);
  212. if (!ret)
  213. ret = status;
  214. spu_yield(ctx);
  215. out:
  216. *event = ctx->event_return;
  217. up(&ctx->run_sema);
  218. return ret;
  219. }