run.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. #include <linux/wait.h>
  2. #include <linux/ptrace.h>
  3. #include <asm/spu.h>
  4. #include <asm/unistd.h>
  5. #include "spufs.h"
  6. /* interrupt-level stop callback function. */
  7. void spufs_stop_callback(struct spu *spu)
  8. {
  9. struct spu_context *ctx = spu->ctx;
  10. wake_up_all(&ctx->stop_wq);
  11. }
  12. void spufs_dma_callback(struct spu *spu, int type)
  13. {
  14. struct spu_context *ctx = spu->ctx;
  15. if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
  16. ctx->event_return |= type;
  17. wake_up_all(&ctx->stop_wq);
  18. } else {
  19. switch (type) {
  20. case SPE_EVENT_DMA_ALIGNMENT:
  21. case SPE_EVENT_INVALID_DMA:
  22. force_sig(SIGBUS, /* info, */ current);
  23. break;
  24. case SPE_EVENT_SPE_ERROR:
  25. force_sig(SIGILL, /* info */ current);
  26. break;
  27. }
  28. }
  29. }
  30. static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
  31. {
  32. struct spu *spu;
  33. u64 pte_fault;
  34. *stat = ctx->ops->status_read(ctx);
  35. if (ctx->state != SPU_STATE_RUNNABLE)
  36. return 1;
  37. spu = ctx->spu;
  38. pte_fault = spu->dsisr &
  39. (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
  40. return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
  41. }
  42. static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
  43. {
  44. int ret;
  45. if ((ret = spu_acquire_runnable(ctx)) != 0)
  46. return ret;
  47. ctx->ops->npc_write(ctx, *npc);
  48. ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
  49. return 0;
  50. }
  51. static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
  52. u32 * status)
  53. {
  54. int ret = 0;
  55. *status = ctx->ops->status_read(ctx);
  56. *npc = ctx->ops->npc_read(ctx);
  57. spu_release(ctx);
  58. if (signal_pending(current))
  59. ret = -ERESTARTSYS;
  60. if (unlikely(current->ptrace & PT_PTRACED)) {
  61. if ((*status & SPU_STATUS_STOPPED_BY_STOP)
  62. && (*status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
  63. force_sig(SIGTRAP, current);
  64. ret = -ERESTARTSYS;
  65. }
  66. }
  67. return ret;
  68. }
  69. static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
  70. u32 *status)
  71. {
  72. int ret;
  73. if ((ret = spu_run_fini(ctx, npc, status)) != 0)
  74. return ret;
  75. if (*status & (SPU_STATUS_STOPPED_BY_STOP |
  76. SPU_STATUS_STOPPED_BY_HALT)) {
  77. return *status;
  78. }
  79. if ((ret = spu_run_init(ctx, npc)) != 0)
  80. return ret;
  81. return 0;
  82. }
  83. /*
  84. * SPU syscall restarting is tricky because we violate the basic
  85. * assumption that the signal handler is running on the interrupted
  86. * thread. Here instead, the handler runs on PowerPC user space code,
  87. * while the syscall was called from the SPU.
  88. * This means we can only do a very rough approximation of POSIX
  89. * signal semantics.
  90. */
  91. int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
  92. unsigned int *npc)
  93. {
  94. int ret;
  95. switch (*spu_ret) {
  96. case -ERESTARTSYS:
  97. case -ERESTARTNOINTR:
  98. /*
  99. * Enter the regular syscall restarting for
  100. * sys_spu_run, then restart the SPU syscall
  101. * callback.
  102. */
  103. *npc -= 8;
  104. ret = -ERESTARTSYS;
  105. break;
  106. case -ERESTARTNOHAND:
  107. case -ERESTART_RESTARTBLOCK:
  108. /*
  109. * Restart block is too hard for now, just return -EINTR
  110. * to the SPU.
  111. * ERESTARTNOHAND comes from sys_pause, we also return
  112. * -EINTR from there.
  113. * Assume that we need to be restarted ourselves though.
  114. */
  115. *spu_ret = -EINTR;
  116. ret = -ERESTARTSYS;
  117. break;
  118. default:
  119. printk(KERN_WARNING "%s: unexpected return code %ld\n",
  120. __FUNCTION__, *spu_ret);
  121. ret = 0;
  122. }
  123. return ret;
  124. }
  125. int spu_process_callback(struct spu_context *ctx)
  126. {
  127. struct spu_syscall_block s;
  128. u32 ls_pointer, npc;
  129. char *ls;
  130. long spu_ret;
  131. int ret;
  132. /* get syscall block from local store */
  133. npc = ctx->ops->npc_read(ctx);
  134. ls = ctx->ops->get_ls(ctx);
  135. ls_pointer = *(u32*)(ls + npc);
  136. if (ls_pointer > (LS_SIZE - sizeof(s)))
  137. return -EFAULT;
  138. memcpy(&s, ls + ls_pointer, sizeof (s));
  139. /* do actual syscall without pinning the spu */
  140. ret = 0;
  141. spu_ret = -ENOSYS;
  142. npc += 4;
  143. if (s.nr_ret < __NR_syscalls) {
  144. spu_release(ctx);
  145. /* do actual system call from here */
  146. spu_ret = spu_sys_callback(&s);
  147. if (spu_ret <= -ERESTARTSYS) {
  148. ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
  149. }
  150. spu_acquire(ctx);
  151. if (ret == -ERESTARTSYS)
  152. return ret;
  153. }
  154. /* write result, jump over indirect pointer */
  155. memcpy(ls + ls_pointer, &spu_ret, sizeof (spu_ret));
  156. ctx->ops->npc_write(ctx, npc);
  157. ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
  158. return ret;
  159. }
  160. static inline int spu_process_events(struct spu_context *ctx)
  161. {
  162. struct spu *spu = ctx->spu;
  163. u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED;
  164. int ret = 0;
  165. if (spu->dsisr & pte_fault)
  166. ret = spu_irq_class_1_bottom(spu);
  167. if (spu->class_0_pending)
  168. ret = spu_irq_class_0_bottom(spu);
  169. if (!ret && signal_pending(current))
  170. ret = -ERESTARTSYS;
  171. return ret;
  172. }
  173. long spufs_run_spu(struct file *file, struct spu_context *ctx,
  174. u32 *npc, u32 *event)
  175. {
  176. int ret;
  177. u32 status;
  178. if (down_interruptible(&ctx->run_sema))
  179. return -ERESTARTSYS;
  180. ctx->event_return = 0;
  181. ret = spu_run_init(ctx, npc);
  182. if (ret)
  183. goto out;
  184. do {
  185. ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
  186. if (unlikely(ret))
  187. break;
  188. if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
  189. (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
  190. ret = spu_process_callback(ctx);
  191. if (ret)
  192. break;
  193. status &= ~SPU_STATUS_STOPPED_BY_STOP;
  194. }
  195. if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
  196. ret = spu_reacquire_runnable(ctx, npc, &status);
  197. if (ret)
  198. goto out;
  199. continue;
  200. }
  201. ret = spu_process_events(ctx);
  202. } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
  203. SPU_STATUS_STOPPED_BY_HALT)));
  204. ctx->ops->runcntl_stop(ctx);
  205. ret = spu_run_fini(ctx, npc, &status);
  206. if (!ret)
  207. ret = status;
  208. spu_yield(ctx);
  209. out:
  210. *event = ctx->event_return;
  211. up(&ctx->run_sema);
  212. return ret;
  213. }