process.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/sched.h>
  15. #include <linux/preempt.h>
  16. #include <linux/module.h>
  17. #include <linux/fs.h>
  18. #include <linux/kprobes.h>
  19. #include <linux/elfcore.h>
  20. #include <linux/tick.h>
  21. #include <linux/init.h>
  22. #include <linux/mm.h>
  23. #include <linux/compat.h>
  24. #include <linux/hardirq.h>
  25. #include <linux/syscalls.h>
  26. #include <linux/kernel.h>
  27. #include <asm/system.h>
  28. #include <asm/stack.h>
  29. #include <asm/homecache.h>
  30. #include <asm/syscalls.h>
  31. #ifdef CONFIG_HARDWALL
  32. #include <asm/hardwall.h>
  33. #endif
  34. #include <arch/chip.h>
  35. #include <arch/abi.h>
  36. /*
  37. * Use the (x86) "idle=poll" option to prefer low latency when leaving the
  38. * idle loop over low power while in the idle loop, e.g. if we have
  39. * one thread per core and we want to get threads out of futex waits fast.
  40. */
  41. static int no_idle_nap;
  42. static int __init idle_setup(char *str)
  43. {
  44. if (!str)
  45. return -EINVAL;
  46. if (!strcmp(str, "poll")) {
  47. pr_info("using polling idle threads.\n");
  48. no_idle_nap = 1;
  49. } else if (!strcmp(str, "halt"))
  50. no_idle_nap = 0;
  51. else
  52. return -1;
  53. return 0;
  54. }
  55. early_param("idle", idle_setup);
  56. /*
  57. * The idle thread. There's no useful work to be
  58. * done, so just try to conserve power and have a
  59. * low exit latency (ie sit in a loop waiting for
  60. * somebody to say that they'd like to reschedule)
  61. */
  62. void cpu_idle(void)
  63. {
  64. int cpu = smp_processor_id();
  65. current_thread_info()->status |= TS_POLLING;
  66. if (no_idle_nap) {
  67. while (1) {
  68. while (!need_resched())
  69. cpu_relax();
  70. schedule();
  71. }
  72. }
  73. /* endless idle loop with no priority at all */
  74. while (1) {
  75. tick_nohz_stop_sched_tick(1);
  76. while (!need_resched()) {
  77. if (cpu_is_offline(cpu))
  78. BUG(); /* no HOTPLUG_CPU */
  79. local_irq_disable();
  80. __get_cpu_var(irq_stat).idle_timestamp = jiffies;
  81. current_thread_info()->status &= ~TS_POLLING;
  82. /*
  83. * TS_POLLING-cleared state must be visible before we
  84. * test NEED_RESCHED:
  85. */
  86. smp_mb();
  87. if (!need_resched())
  88. _cpu_idle();
  89. else
  90. local_irq_enable();
  91. current_thread_info()->status |= TS_POLLING;
  92. }
  93. tick_nohz_restart_sched_tick();
  94. preempt_enable_no_resched();
  95. schedule();
  96. preempt_disable();
  97. }
  98. }
  99. struct thread_info *alloc_thread_info_node(struct task_struct *task, int node)
  100. {
  101. struct page *page;
  102. gfp_t flags = GFP_KERNEL;
  103. #ifdef CONFIG_DEBUG_STACK_USAGE
  104. flags |= __GFP_ZERO;
  105. #endif
  106. page = alloc_pages_node(node, flags, THREAD_SIZE_ORDER);
  107. if (!page)
  108. return NULL;
  109. return (struct thread_info *)page_address(page);
  110. }
  111. /*
  112. * Free a thread_info node, and all of its derivative
  113. * data structures.
  114. */
  115. void free_thread_info(struct thread_info *info)
  116. {
  117. struct single_step_state *step_state = info->step_state;
  118. #ifdef CONFIG_HARDWALL
  119. /*
  120. * We free a thread_info from the context of the task that has
  121. * been scheduled next, so the original task is already dead.
  122. * Calling deactivate here just frees up the data structures.
  123. * If the task we're freeing held the last reference to a
  124. * hardwall fd, it would have been released prior to this point
  125. * anyway via exit_files(), and "hardwall" would be NULL by now.
  126. */
  127. if (info->task->thread.hardwall)
  128. hardwall_deactivate(info->task);
  129. #endif
  130. if (step_state) {
  131. /*
  132. * FIXME: we don't munmap step_state->buffer
  133. * because the mm_struct for this process (info->task->mm)
  134. * has already been zeroed in exit_mm(). Keeping a
  135. * reference to it here seems like a bad move, so this
  136. * means we can't munmap() the buffer, and therefore if we
  137. * ptrace multiple threads in a process, we will slowly
  138. * leak user memory. (Note that as soon as the last
  139. * thread in a process dies, we will reclaim all user
  140. * memory including single-step buffers in the usual way.)
  141. * We should either assign a kernel VA to this buffer
  142. * somehow, or we should associate the buffer(s) with the
  143. * mm itself so we can clean them up that way.
  144. */
  145. kfree(step_state);
  146. }
  147. free_pages((unsigned long)info, THREAD_SIZE_ORDER);
  148. }
  149. static void save_arch_state(struct thread_struct *t);
  150. int copy_thread(unsigned long clone_flags, unsigned long sp,
  151. unsigned long stack_size,
  152. struct task_struct *p, struct pt_regs *regs)
  153. {
  154. struct pt_regs *childregs;
  155. unsigned long ksp;
  156. /*
  157. * When creating a new kernel thread we pass sp as zero.
  158. * Assign it to a reasonable value now that we have the stack.
  159. */
  160. if (sp == 0 && regs->ex1 == PL_ICS_EX1(KERNEL_PL, 0))
  161. sp = KSTK_TOP(p);
  162. /*
  163. * Do not clone step state from the parent; each thread
  164. * must make its own lazily.
  165. */
  166. task_thread_info(p)->step_state = NULL;
  167. /*
  168. * Start new thread in ret_from_fork so it schedules properly
  169. * and then return from interrupt like the parent.
  170. */
  171. p->thread.pc = (unsigned long) ret_from_fork;
  172. /* Save user stack top pointer so we can ID the stack vm area later. */
  173. p->thread.usp0 = sp;
  174. /* Record the pid of the process that created this one. */
  175. p->thread.creator_pid = current->pid;
  176. /*
  177. * Copy the registers onto the kernel stack so the
  178. * return-from-interrupt code will reload it into registers.
  179. */
  180. childregs = task_pt_regs(p);
  181. *childregs = *regs;
  182. childregs->regs[0] = 0; /* return value is zero */
  183. childregs->sp = sp; /* override with new user stack pointer */
  184. /*
  185. * If CLONE_SETTLS is set, set "tp" in the new task to "r4",
  186. * which is passed in as arg #5 to sys_clone().
  187. */
  188. if (clone_flags & CLONE_SETTLS)
  189. childregs->tp = regs->regs[4];
  190. /*
  191. * Copy the callee-saved registers from the passed pt_regs struct
  192. * into the context-switch callee-saved registers area.
  193. * This way when we start the interrupt-return sequence, the
  194. * callee-save registers will be correctly in registers, which
  195. * is how we assume the compiler leaves them as we start doing
  196. * the normal return-from-interrupt path after calling C code.
  197. * Zero out the C ABI save area to mark the top of the stack.
  198. */
  199. ksp = (unsigned long) childregs;
  200. ksp -= C_ABI_SAVE_AREA_SIZE; /* interrupt-entry save area */
  201. ((long *)ksp)[0] = ((long *)ksp)[1] = 0;
  202. ksp -= CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long);
  203. memcpy((void *)ksp, &regs->regs[CALLEE_SAVED_FIRST_REG],
  204. CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long));
  205. ksp -= C_ABI_SAVE_AREA_SIZE; /* __switch_to() save area */
  206. ((long *)ksp)[0] = ((long *)ksp)[1] = 0;
  207. p->thread.ksp = ksp;
  208. #if CHIP_HAS_TILE_DMA()
  209. /*
  210. * No DMA in the new thread. We model this on the fact that
  211. * fork() clears the pending signals, alarms, and aio for the child.
  212. */
  213. memset(&p->thread.tile_dma_state, 0, sizeof(struct tile_dma_state));
  214. memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb));
  215. #endif
  216. #if CHIP_HAS_SN_PROC()
  217. /* Likewise, the new thread is not running static processor code. */
  218. p->thread.sn_proc_running = 0;
  219. memset(&p->thread.sn_async_tlb, 0, sizeof(struct async_tlb));
  220. #endif
  221. #if CHIP_HAS_PROC_STATUS_SPR()
  222. /* New thread has its miscellaneous processor state bits clear. */
  223. p->thread.proc_status = 0;
  224. #endif
  225. #ifdef CONFIG_HARDWALL
  226. /* New thread does not own any networks. */
  227. p->thread.hardwall = NULL;
  228. #endif
  229. /*
  230. * Start the new thread with the current architecture state
  231. * (user interrupt masks, etc.).
  232. */
  233. save_arch_state(&p->thread);
  234. return 0;
  235. }
  236. /*
  237. * Return "current" if it looks plausible, or else a pointer to a dummy.
  238. * This can be helpful if we are just trying to emit a clean panic.
  239. */
  240. struct task_struct *validate_current(void)
  241. {
  242. static struct task_struct corrupt = { .comm = "<corrupt>" };
  243. struct task_struct *tsk = current;
  244. if (unlikely((unsigned long)tsk < PAGE_OFFSET ||
  245. (void *)tsk > high_memory ||
  246. ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) {
  247. pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer);
  248. tsk = &corrupt;
  249. }
  250. return tsk;
  251. }
  252. /* Take and return the pointer to the previous task, for schedule_tail(). */
  253. struct task_struct *sim_notify_fork(struct task_struct *prev)
  254. {
  255. struct task_struct *tsk = current;
  256. __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK_PARENT |
  257. (tsk->thread.creator_pid << _SIM_CONTROL_OPERATOR_BITS));
  258. __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK |
  259. (tsk->pid << _SIM_CONTROL_OPERATOR_BITS));
  260. return prev;
  261. }
  262. int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
  263. {
  264. struct pt_regs *ptregs = task_pt_regs(tsk);
  265. elf_core_copy_regs(regs, ptregs);
  266. return 1;
  267. }
  268. #if CHIP_HAS_TILE_DMA()
  269. /* Allow user processes to access the DMA SPRs */
  270. void grant_dma_mpls(void)
  271. {
  272. #if CONFIG_KERNEL_PL == 2
  273. __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
  274. __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
  275. #else
  276. __insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1);
  277. __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1);
  278. #endif
  279. }
  280. /* Forbid user processes from accessing the DMA SPRs */
  281. void restrict_dma_mpls(void)
  282. {
  283. #if CONFIG_KERNEL_PL == 2
  284. __insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1);
  285. __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1);
  286. #else
  287. __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
  288. __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
  289. #endif
  290. }
  291. /* Pause the DMA engine, then save off its state registers. */
  292. static void save_tile_dma_state(struct tile_dma_state *dma)
  293. {
  294. unsigned long state = __insn_mfspr(SPR_DMA_USER_STATUS);
  295. unsigned long post_suspend_state;
  296. /* If we're running, suspend the engine. */
  297. if ((state & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK)
  298. __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
  299. /*
  300. * Wait for the engine to idle, then save regs. Note that we
  301. * want to record the "running" bit from before suspension,
  302. * and the "done" bit from after, so that we can properly
  303. * distinguish a case where the user suspended the engine from
  304. * the case where the kernel suspended as part of the context
  305. * swap.
  306. */
  307. do {
  308. post_suspend_state = __insn_mfspr(SPR_DMA_USER_STATUS);
  309. } while (post_suspend_state & SPR_DMA_STATUS__BUSY_MASK);
  310. dma->src = __insn_mfspr(SPR_DMA_SRC_ADDR);
  311. dma->src_chunk = __insn_mfspr(SPR_DMA_SRC_CHUNK_ADDR);
  312. dma->dest = __insn_mfspr(SPR_DMA_DST_ADDR);
  313. dma->dest_chunk = __insn_mfspr(SPR_DMA_DST_CHUNK_ADDR);
  314. dma->strides = __insn_mfspr(SPR_DMA_STRIDE);
  315. dma->chunk_size = __insn_mfspr(SPR_DMA_CHUNK_SIZE);
  316. dma->byte = __insn_mfspr(SPR_DMA_BYTE);
  317. dma->status = (state & SPR_DMA_STATUS__RUNNING_MASK) |
  318. (post_suspend_state & SPR_DMA_STATUS__DONE_MASK);
  319. }
  320. /* Restart a DMA that was running before we were context-switched out. */
  321. static void restore_tile_dma_state(struct thread_struct *t)
  322. {
  323. const struct tile_dma_state *dma = &t->tile_dma_state;
  324. /*
  325. * The only way to restore the done bit is to run a zero
  326. * length transaction.
  327. */
  328. if ((dma->status & SPR_DMA_STATUS__DONE_MASK) &&
  329. !(__insn_mfspr(SPR_DMA_USER_STATUS) & SPR_DMA_STATUS__DONE_MASK)) {
  330. __insn_mtspr(SPR_DMA_BYTE, 0);
  331. __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
  332. while (__insn_mfspr(SPR_DMA_USER_STATUS) &
  333. SPR_DMA_STATUS__BUSY_MASK)
  334. ;
  335. }
  336. __insn_mtspr(SPR_DMA_SRC_ADDR, dma->src);
  337. __insn_mtspr(SPR_DMA_SRC_CHUNK_ADDR, dma->src_chunk);
  338. __insn_mtspr(SPR_DMA_DST_ADDR, dma->dest);
  339. __insn_mtspr(SPR_DMA_DST_CHUNK_ADDR, dma->dest_chunk);
  340. __insn_mtspr(SPR_DMA_STRIDE, dma->strides);
  341. __insn_mtspr(SPR_DMA_CHUNK_SIZE, dma->chunk_size);
  342. __insn_mtspr(SPR_DMA_BYTE, dma->byte);
  343. /*
  344. * Restart the engine if we were running and not done.
  345. * Clear a pending async DMA fault that we were waiting on return
  346. * to user space to execute, since we expect the DMA engine
  347. * to regenerate those faults for us now. Note that we don't
  348. * try to clear the TIF_ASYNC_TLB flag, since it's relatively
  349. * harmless if set, and it covers both DMA and the SN processor.
  350. */
  351. if ((dma->status & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) {
  352. t->dma_async_tlb.fault_num = 0;
  353. __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
  354. }
  355. }
  356. #endif
  357. static void save_arch_state(struct thread_struct *t)
  358. {
  359. #if CHIP_HAS_SPLIT_INTR_MASK()
  360. t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0_0) |
  361. ((u64)__insn_mfspr(SPR_INTERRUPT_MASK_0_1) << 32);
  362. #else
  363. t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0);
  364. #endif
  365. t->ex_context[0] = __insn_mfspr(SPR_EX_CONTEXT_0_0);
  366. t->ex_context[1] = __insn_mfspr(SPR_EX_CONTEXT_0_1);
  367. t->system_save[0] = __insn_mfspr(SPR_SYSTEM_SAVE_0_0);
  368. t->system_save[1] = __insn_mfspr(SPR_SYSTEM_SAVE_0_1);
  369. t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2);
  370. t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3);
  371. t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS);
  372. #if CHIP_HAS_PROC_STATUS_SPR()
  373. t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
  374. #endif
  375. #if !CHIP_HAS_FIXED_INTVEC_BASE()
  376. t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
  377. #endif
  378. #if CHIP_HAS_TILE_RTF_HWM()
  379. t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
  380. #endif
  381. #if CHIP_HAS_DSTREAM_PF()
  382. t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
  383. #endif
  384. }
  385. static void restore_arch_state(const struct thread_struct *t)
  386. {
  387. #if CHIP_HAS_SPLIT_INTR_MASK()
  388. __insn_mtspr(SPR_INTERRUPT_MASK_0_0, (u32) t->interrupt_mask);
  389. __insn_mtspr(SPR_INTERRUPT_MASK_0_1, t->interrupt_mask >> 32);
  390. #else
  391. __insn_mtspr(SPR_INTERRUPT_MASK_0, t->interrupt_mask);
  392. #endif
  393. __insn_mtspr(SPR_EX_CONTEXT_0_0, t->ex_context[0]);
  394. __insn_mtspr(SPR_EX_CONTEXT_0_1, t->ex_context[1]);
  395. __insn_mtspr(SPR_SYSTEM_SAVE_0_0, t->system_save[0]);
  396. __insn_mtspr(SPR_SYSTEM_SAVE_0_1, t->system_save[1]);
  397. __insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]);
  398. __insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]);
  399. __insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0);
  400. #if CHIP_HAS_PROC_STATUS_SPR()
  401. __insn_mtspr(SPR_PROC_STATUS, t->proc_status);
  402. #endif
  403. #if !CHIP_HAS_FIXED_INTVEC_BASE()
  404. __insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
  405. #endif
  406. #if CHIP_HAS_TILE_RTF_HWM()
  407. __insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
  408. #endif
  409. #if CHIP_HAS_DSTREAM_PF()
  410. __insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
  411. #endif
  412. }
  413. void _prepare_arch_switch(struct task_struct *next)
  414. {
  415. #if CHIP_HAS_SN_PROC()
  416. int snctl;
  417. #endif
  418. #if CHIP_HAS_TILE_DMA()
  419. struct tile_dma_state *dma = &current->thread.tile_dma_state;
  420. if (dma->enabled)
  421. save_tile_dma_state(dma);
  422. #endif
  423. #if CHIP_HAS_SN_PROC()
  424. /*
  425. * Suspend the static network processor if it was running.
  426. * We do not suspend the fabric itself, just like we don't
  427. * try to suspend the UDN.
  428. */
  429. snctl = __insn_mfspr(SPR_SNCTL);
  430. current->thread.sn_proc_running =
  431. (snctl & SPR_SNCTL__FRZPROC_MASK) == 0;
  432. if (current->thread.sn_proc_running)
  433. __insn_mtspr(SPR_SNCTL, snctl | SPR_SNCTL__FRZPROC_MASK);
  434. #endif
  435. }
  436. struct task_struct *__sched _switch_to(struct task_struct *prev,
  437. struct task_struct *next)
  438. {
  439. /* DMA state is already saved; save off other arch state. */
  440. save_arch_state(&prev->thread);
  441. #if CHIP_HAS_TILE_DMA()
  442. /*
  443. * Restore DMA in new task if desired.
  444. * Note that it is only safe to restart here since interrupts
  445. * are disabled, so we can't take any DMATLB miss or access
  446. * interrupts before we have finished switching stacks.
  447. */
  448. if (next->thread.tile_dma_state.enabled) {
  449. restore_tile_dma_state(&next->thread);
  450. grant_dma_mpls();
  451. } else {
  452. restrict_dma_mpls();
  453. }
  454. #endif
  455. /* Restore other arch state. */
  456. restore_arch_state(&next->thread);
  457. #if CHIP_HAS_SN_PROC()
  458. /*
  459. * Restart static network processor in the new process
  460. * if it was running before.
  461. */
  462. if (next->thread.sn_proc_running) {
  463. int snctl = __insn_mfspr(SPR_SNCTL);
  464. __insn_mtspr(SPR_SNCTL, snctl & ~SPR_SNCTL__FRZPROC_MASK);
  465. }
  466. #endif
  467. #ifdef CONFIG_HARDWALL
  468. /* Enable or disable access to the network registers appropriately. */
  469. if (prev->thread.hardwall != NULL) {
  470. if (next->thread.hardwall == NULL)
  471. restrict_network_mpls();
  472. } else if (next->thread.hardwall != NULL) {
  473. grant_network_mpls();
  474. }
  475. #endif
  476. /*
  477. * Switch kernel SP, PC, and callee-saved registers.
  478. * In the context of the new task, return the old task pointer
  479. * (i.e. the task that actually called __switch_to).
  480. * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp.
  481. */
  482. return __switch_to(prev, next, next_current_ksp0(next));
  483. }
  484. /* Note there is an implicit fifth argument if (clone_flags & CLONE_SETTLS). */
  485. SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
  486. void __user *, parent_tidptr, void __user *, child_tidptr,
  487. struct pt_regs *, regs)
  488. {
  489. if (!newsp)
  490. newsp = regs->sp;
  491. return do_fork(clone_flags, newsp, regs, 0,
  492. parent_tidptr, child_tidptr);
  493. }
  494. /*
  495. * sys_execve() executes a new program.
  496. */
  497. SYSCALL_DEFINE4(execve, const char __user *, path,
  498. const char __user *const __user *, argv,
  499. const char __user *const __user *, envp,
  500. struct pt_regs *, regs)
  501. {
  502. long error;
  503. char *filename;
  504. filename = getname(path);
  505. error = PTR_ERR(filename);
  506. if (IS_ERR(filename))
  507. goto out;
  508. error = do_execve(filename, argv, envp, regs);
  509. putname(filename);
  510. if (error == 0)
  511. single_step_execve();
  512. out:
  513. return error;
  514. }
  515. #ifdef CONFIG_COMPAT
  516. long compat_sys_execve(const char __user *path,
  517. const compat_uptr_t __user *argv,
  518. const compat_uptr_t __user *envp,
  519. struct pt_regs *regs)
  520. {
  521. long error;
  522. char *filename;
  523. filename = getname(path);
  524. error = PTR_ERR(filename);
  525. if (IS_ERR(filename))
  526. goto out;
  527. error = compat_do_execve(filename, argv, envp, regs);
  528. putname(filename);
  529. if (error == 0)
  530. single_step_execve();
  531. out:
  532. return error;
  533. }
  534. #endif
  535. unsigned long get_wchan(struct task_struct *p)
  536. {
  537. struct KBacktraceIterator kbt;
  538. if (!p || p == current || p->state == TASK_RUNNING)
  539. return 0;
  540. for (KBacktraceIterator_init(&kbt, p, NULL);
  541. !KBacktraceIterator_end(&kbt);
  542. KBacktraceIterator_next(&kbt)) {
  543. if (!in_sched_functions(kbt.it.pc))
  544. return kbt.it.pc;
  545. }
  546. return 0;
  547. }
  548. /*
  549. * We pass in lr as zero (cleared in kernel_thread) and the caller
  550. * part of the backtrace ABI on the stack also zeroed (in copy_thread)
  551. * so that backtraces will stop with this function.
  552. * Note that we don't use r0, since copy_thread() clears it.
  553. */
  554. static void start_kernel_thread(int dummy, int (*fn)(int), int arg)
  555. {
  556. do_exit(fn(arg));
  557. }
  558. /*
  559. * Create a kernel thread
  560. */
  561. int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  562. {
  563. struct pt_regs regs;
  564. memset(&regs, 0, sizeof(regs));
  565. regs.ex1 = PL_ICS_EX1(KERNEL_PL, 0); /* run at kernel PL, no ICS */
  566. regs.pc = (long) start_kernel_thread;
  567. regs.flags = PT_FLAGS_CALLER_SAVES; /* need to restore r1 and r2 */
  568. regs.regs[1] = (long) fn; /* function pointer */
  569. regs.regs[2] = (long) arg; /* parameter register */
  570. /* Ok, create the new process.. */
  571. return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs,
  572. 0, NULL, NULL);
  573. }
  574. EXPORT_SYMBOL(kernel_thread);
  575. /* Flush thread state. */
  576. void flush_thread(void)
  577. {
  578. /* Nothing */
  579. }
  580. /*
  581. * Free current thread data structures etc..
  582. */
  583. void exit_thread(void)
  584. {
  585. /* Nothing */
  586. }
  587. void show_regs(struct pt_regs *regs)
  588. {
  589. struct task_struct *tsk = validate_current();
  590. int i;
  591. pr_err("\n");
  592. pr_err(" Pid: %d, comm: %20s, CPU: %d\n",
  593. tsk->pid, tsk->comm, smp_processor_id());
  594. #ifdef __tilegx__
  595. for (i = 0; i < 51; i += 3)
  596. pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
  597. i, regs->regs[i], i+1, regs->regs[i+1],
  598. i+2, regs->regs[i+2]);
  599. pr_err(" r51: "REGFMT" r52: "REGFMT" tp : "REGFMT"\n",
  600. regs->regs[51], regs->regs[52], regs->tp);
  601. pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
  602. #else
  603. for (i = 0; i < 52; i += 4)
  604. pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
  605. " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
  606. i, regs->regs[i], i+1, regs->regs[i+1],
  607. i+2, regs->regs[i+2], i+3, regs->regs[i+3]);
  608. pr_err(" r52: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
  609. regs->regs[52], regs->tp, regs->sp, regs->lr);
  610. #endif
  611. pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld\n",
  612. regs->pc, regs->ex1, regs->faultnum);
  613. dump_stack_regs(regs);
  614. }