run.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. #include <linux/wait.h>
  2. #include <linux/ptrace.h>
  3. #include <asm/spu.h>
  4. #include <asm/unistd.h>
  5. #include "spufs.h"
  6. /* interrupt-level stop callback function. */
  7. void spufs_stop_callback(struct spu *spu)
  8. {
  9. struct spu_context *ctx = spu->ctx;
  10. wake_up_all(&ctx->stop_wq);
  11. }
  12. static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
  13. {
  14. struct spu *spu;
  15. u64 pte_fault;
  16. *stat = ctx->ops->status_read(ctx);
  17. if (ctx->state != SPU_STATE_RUNNABLE)
  18. return 1;
  19. spu = ctx->spu;
  20. pte_fault = spu->dsisr &
  21. (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
  22. return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
  23. }
  24. static inline int spu_run_init(struct spu_context *ctx, u32 * npc,
  25. u32 * status)
  26. {
  27. int ret;
  28. if ((ret = spu_acquire_runnable(ctx)) != 0)
  29. return ret;
  30. ctx->ops->npc_write(ctx, *npc);
  31. ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
  32. return 0;
  33. }
  34. static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
  35. u32 * status)
  36. {
  37. int ret = 0;
  38. *status = ctx->ops->status_read(ctx);
  39. *npc = ctx->ops->npc_read(ctx);
  40. spu_release(ctx);
  41. if (signal_pending(current))
  42. ret = -ERESTARTSYS;
  43. if (unlikely(current->ptrace & PT_PTRACED)) {
  44. if ((*status & SPU_STATUS_STOPPED_BY_STOP)
  45. && (*status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
  46. force_sig(SIGTRAP, current);
  47. ret = -ERESTARTSYS;
  48. }
  49. }
  50. return ret;
  51. }
  52. static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
  53. u32 *status)
  54. {
  55. int ret;
  56. if ((ret = spu_run_fini(ctx, npc, status)) != 0)
  57. return ret;
  58. if (*status & (SPU_STATUS_STOPPED_BY_STOP |
  59. SPU_STATUS_STOPPED_BY_HALT)) {
  60. return *status;
  61. }
  62. if ((ret = spu_run_init(ctx, npc, status)) != 0)
  63. return ret;
  64. return 0;
  65. }
  66. /*
  67. * SPU syscall restarting is tricky because we violate the basic
  68. * assumption that the signal handler is running on the interrupted
  69. * thread. Here instead, the handler runs on PowerPC user space code,
  70. * while the syscall was called from the SPU.
  71. * This means we can only do a very rough approximation of POSIX
  72. * signal semantics.
  73. */
  74. int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
  75. unsigned int *npc)
  76. {
  77. int ret;
  78. switch (*spu_ret) {
  79. case -ERESTARTSYS:
  80. case -ERESTARTNOINTR:
  81. /*
  82. * Enter the regular syscall restarting for
  83. * sys_spu_run, then restart the SPU syscall
  84. * callback.
  85. */
  86. *npc -= 8;
  87. ret = -ERESTARTSYS;
  88. break;
  89. case -ERESTARTNOHAND:
  90. case -ERESTART_RESTARTBLOCK:
  91. /*
  92. * Restart block is too hard for now, just return -EINTR
  93. * to the SPU.
  94. * ERESTARTNOHAND comes from sys_pause, we also return
  95. * -EINTR from there.
  96. * Assume that we need to be restarted ourselves though.
  97. */
  98. *spu_ret = -EINTR;
  99. ret = -ERESTARTSYS;
  100. break;
  101. default:
  102. printk(KERN_WARNING "%s: unexpected return code %ld\n",
  103. __FUNCTION__, *spu_ret);
  104. ret = 0;
  105. }
  106. return ret;
  107. }
  108. int spu_process_callback(struct spu_context *ctx)
  109. {
  110. struct spu_syscall_block s;
  111. u32 ls_pointer, npc;
  112. char *ls;
  113. long spu_ret;
  114. int ret;
  115. /* get syscall block from local store */
  116. npc = ctx->ops->npc_read(ctx);
  117. ls = ctx->ops->get_ls(ctx);
  118. ls_pointer = *(u32*)(ls + npc);
  119. if (ls_pointer > (LS_SIZE - sizeof(s)))
  120. return -EFAULT;
  121. memcpy(&s, ls + ls_pointer, sizeof (s));
  122. /* do actual syscall without pinning the spu */
  123. ret = 0;
  124. spu_ret = -ENOSYS;
  125. npc += 4;
  126. if (s.nr_ret < __NR_syscalls) {
  127. spu_release(ctx);
  128. /* do actual system call from here */
  129. spu_ret = spu_sys_callback(&s);
  130. if (spu_ret <= -ERESTARTSYS) {
  131. ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
  132. }
  133. spu_acquire(ctx);
  134. if (ret == -ERESTARTSYS)
  135. return ret;
  136. }
  137. /* write result, jump over indirect pointer */
  138. memcpy(ls + ls_pointer, &spu_ret, sizeof (spu_ret));
  139. ctx->ops->npc_write(ctx, npc);
  140. ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
  141. return ret;
  142. }
  143. static inline int spu_process_events(struct spu_context *ctx)
  144. {
  145. struct spu *spu = ctx->spu;
  146. u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED;
  147. int ret = 0;
  148. if (spu->dsisr & pte_fault)
  149. ret = spu_irq_class_1_bottom(spu);
  150. if (spu->class_0_pending)
  151. ret = spu_irq_class_0_bottom(spu);
  152. if (!ret && signal_pending(current))
  153. ret = -ERESTARTSYS;
  154. return ret;
  155. }
  156. long spufs_run_spu(struct file *file, struct spu_context *ctx,
  157. u32 * npc, u32 * status)
  158. {
  159. int ret;
  160. if (down_interruptible(&ctx->run_sema))
  161. return -ERESTARTSYS;
  162. ret = spu_run_init(ctx, npc, status);
  163. if (ret)
  164. goto out;
  165. do {
  166. ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, status));
  167. if (unlikely(ret))
  168. break;
  169. if ((*status & SPU_STATUS_STOPPED_BY_STOP) &&
  170. (*status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
  171. ret = spu_process_callback(ctx);
  172. if (ret)
  173. break;
  174. *status &= ~SPU_STATUS_STOPPED_BY_STOP;
  175. }
  176. if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
  177. ret = spu_reacquire_runnable(ctx, npc, status);
  178. if (ret)
  179. goto out;
  180. continue;
  181. }
  182. ret = spu_process_events(ctx);
  183. } while (!ret && !(*status & (SPU_STATUS_STOPPED_BY_STOP |
  184. SPU_STATUS_STOPPED_BY_HALT)));
  185. ctx->ops->runcntl_stop(ctx);
  186. ret = spu_run_fini(ctx, npc, status);
  187. if (!ret)
  188. ret = *status;
  189. spu_yield(ctx);
  190. out:
  191. up(&ctx->run_sema);
  192. return ret;
  193. }