process.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485
  1. // TODO verify coprocessor handling
  2. /*
  3. * arch/xtensa/kernel/process.c
  4. *
  5. * Xtensa Processor version.
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file "COPYING" in the main directory of this archive
  9. * for more details.
  10. *
  11. * Copyright (C) 2001 - 2005 Tensilica Inc.
  12. *
  13. * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
  14. * Chris Zankel <chris@zankel.net>
  15. * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
  16. * Kevin Chea
  17. */
  18. #include <linux/errno.h>
  19. #include <linux/sched.h>
  20. #include <linux/kernel.h>
  21. #include <linux/mm.h>
  22. #include <linux/smp.h>
  23. #include <linux/smp_lock.h>
  24. #include <linux/stddef.h>
  25. #include <linux/unistd.h>
  26. #include <linux/ptrace.h>
  27. #include <linux/slab.h>
  28. #include <linux/elf.h>
  29. #include <linux/init.h>
  30. #include <linux/prctl.h>
  31. #include <linux/init_task.h>
  32. #include <linux/module.h>
  33. #include <linux/mqueue.h>
  34. #include <asm/pgtable.h>
  35. #include <asm/uaccess.h>
  36. #include <asm/system.h>
  37. #include <asm/io.h>
  38. #include <asm/processor.h>
  39. #include <asm/platform.h>
  40. #include <asm/mmu.h>
  41. #include <asm/irq.h>
  42. #include <asm/atomic.h>
  43. #include <asm/asm-offsets.h>
  44. #include <asm/coprocessor.h>
  45. extern void ret_from_fork(void);
  46. static struct fs_struct init_fs = INIT_FS;
  47. static struct files_struct init_files = INIT_FILES;
  48. static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
  49. static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
  50. struct mm_struct init_mm = INIT_MM(init_mm);
  51. EXPORT_SYMBOL(init_mm);
  52. union thread_union init_thread_union
  53. __attribute__((__section__(".data.init_task"))) =
  54. { INIT_THREAD_INFO(init_task) };
  55. struct task_struct init_task = INIT_TASK(init_task);
  56. EXPORT_SYMBOL(init_task);
  57. struct task_struct *current_set[NR_CPUS] = {&init_task, };
  58. void (*pm_power_off)(void) = NULL;
  59. EXPORT_SYMBOL(pm_power_off);
  60. #if XCHAL_CP_NUM > 0
  61. /*
  62. * Coprocessor ownership.
  63. */
  64. coprocessor_info_t coprocessor_info[] = {
  65. { 0, XTENSA_CPE_CP0_OFFSET },
  66. { 0, XTENSA_CPE_CP1_OFFSET },
  67. { 0, XTENSA_CPE_CP2_OFFSET },
  68. { 0, XTENSA_CPE_CP3_OFFSET },
  69. { 0, XTENSA_CPE_CP4_OFFSET },
  70. { 0, XTENSA_CPE_CP5_OFFSET },
  71. { 0, XTENSA_CPE_CP6_OFFSET },
  72. { 0, XTENSA_CPE_CP7_OFFSET },
  73. };
  74. #endif
  75. /*
  76. * Powermanagement idle function, if any is provided by the platform.
  77. */
  78. void cpu_idle(void)
  79. {
  80. local_irq_enable();
  81. /* endless idle loop with no priority at all */
  82. while (1) {
  83. while (!need_resched())
  84. platform_idle();
  85. preempt_enable_no_resched();
  86. schedule();
  87. preempt_disable();
  88. }
  89. }
  90. /*
  91. * Free current thread data structures etc..
  92. */
  93. void exit_thread(void)
  94. {
  95. release_coprocessors(current); /* Empty macro if no CPs are defined */
  96. }
  97. void flush_thread(void)
  98. {
  99. release_coprocessors(current); /* Empty macro if no CPs are defined */
  100. }
  101. /*
  102. * Copy thread.
  103. *
  104. * The stack layout for the new thread looks like this:
  105. *
  106. * +------------------------+ <- sp in childregs (= tos)
  107. * | childregs |
  108. * +------------------------+ <- thread.sp = sp in dummy-frame
  109. * | dummy-frame | (saved in dummy-frame spill-area)
  110. * +------------------------+
  111. *
  112. * We create a dummy frame to return to ret_from_fork:
  113. * a0 points to ret_from_fork (simulating a call4)
  114. * sp points to itself (thread.sp)
  115. * a2, a3 are unused.
  116. *
  117. * Note: This is a pristine frame, so we don't need any spill region on top of
  118. * childregs.
  119. */
  120. int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
  121. unsigned long unused,
  122. struct task_struct * p, struct pt_regs * regs)
  123. {
  124. struct pt_regs *childregs;
  125. unsigned long tos;
  126. int user_mode = user_mode(regs);
  127. /* Set up new TSS. */
  128. tos = (unsigned long)task_stack_page(p) + THREAD_SIZE;
  129. if (user_mode)
  130. childregs = (struct pt_regs*)(tos - PT_USER_SIZE);
  131. else
  132. childregs = (struct pt_regs*)tos - 1;
  133. *childregs = *regs;
  134. /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
  135. *((int*)childregs - 3) = (unsigned long)childregs;
  136. *((int*)childregs - 4) = 0;
  137. childregs->areg[1] = tos;
  138. childregs->areg[2] = 0;
  139. p->set_child_tid = p->clear_child_tid = NULL;
  140. p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
  141. p->thread.sp = (unsigned long)childregs;
  142. if (user_mode(regs)) {
  143. int len = childregs->wmask & ~0xf;
  144. childregs->areg[1] = usp;
  145. memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
  146. &regs->areg[XCHAL_NUM_AREGS - len/4], len);
  147. if (clone_flags & CLONE_SETTLS)
  148. childregs->areg[2] = childregs->areg[6];
  149. } else {
  150. /* In kernel space, we start a new thread with a new stack. */
  151. childregs->wmask = 1;
  152. }
  153. return 0;
  154. }
  155. /*
  156. * Create a kernel thread
  157. */
  158. int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  159. {
  160. long retval;
  161. __asm__ __volatile__
  162. ("mov a5, %4\n\t" /* preserve fn in a5 */
  163. "mov a6, %3\n\t" /* preserve and setup arg in a6 */
  164. "movi a2, %1\n\t" /* load __NR_clone for syscall*/
  165. "mov a3, sp\n\t" /* sp check and sys_clone */
  166. "mov a4, %5\n\t" /* load flags for syscall */
  167. "syscall\n\t"
  168. "beq a3, sp, 1f\n\t" /* branch if parent */
  169. "callx4 a5\n\t" /* call fn */
  170. "movi a2, %2\n\t" /* load __NR_exit for syscall */
  171. "mov a3, a6\n\t" /* load fn return value */
  172. "syscall\n"
  173. "1:\n\t"
  174. "mov %0, a2\n\t" /* parent returns zero */
  175. :"=r" (retval)
  176. :"i" (__NR_clone), "i" (__NR_exit),
  177. "r" (arg), "r" (fn),
  178. "r" (flags | CLONE_VM)
  179. : "a2", "a3", "a4", "a5", "a6" );
  180. return retval;
  181. }
  182. /*
  183. * These bracket the sleeping functions..
  184. */
  185. unsigned long get_wchan(struct task_struct *p)
  186. {
  187. unsigned long sp, pc;
  188. unsigned long stack_page = (unsigned long) task_stack_page(p);
  189. int count = 0;
  190. if (!p || p == current || p->state == TASK_RUNNING)
  191. return 0;
  192. sp = p->thread.sp;
  193. pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
  194. do {
  195. if (sp < stack_page + sizeof(struct task_struct) ||
  196. sp >= (stack_page + THREAD_SIZE) ||
  197. pc == 0)
  198. return 0;
  199. if (!in_sched_functions(pc))
  200. return pc;
  201. /* Stack layout: sp-4: ra, sp-3: sp' */
  202. pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
  203. sp = *(unsigned long *)sp - 3;
  204. } while (count++ < 16);
  205. return 0;
  206. }
  207. /*
  208. * do_copy_regs() gathers information from 'struct pt_regs' and
  209. * 'current->thread.areg[]' to fill in the xtensa_gregset_t
  210. * structure.
  211. *
  212. * xtensa_gregset_t and 'struct pt_regs' are vastly different formats
  213. * of processor registers. Besides different ordering,
  214. * xtensa_gregset_t contains non-live register information that
  215. * 'struct pt_regs' does not. Exception handling (primarily) uses
  216. * 'struct pt_regs'. Core files and ptrace use xtensa_gregset_t.
  217. *
  218. */
  219. void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
  220. struct task_struct *tsk)
  221. {
  222. int i, n, wb_offset;
  223. elfregs->xchal_config_id0 = XCHAL_HW_CONFIGID0;
  224. elfregs->xchal_config_id1 = XCHAL_HW_CONFIGID1;
  225. __asm__ __volatile__ ("rsr %0, 176\n" : "=a" (i));
  226. elfregs->cpux = i;
  227. __asm__ __volatile__ ("rsr %0, 208\n" : "=a" (i));
  228. elfregs->cpuy = i;
  229. /* Note: PS.EXCM is not set while user task is running; its
  230. * being set in regs->ps is for exception handling convenience.
  231. */
  232. elfregs->pc = regs->pc;
  233. elfregs->ps = (regs->ps & ~XCHAL_PS_EXCM_MASK);
  234. elfregs->exccause = regs->exccause;
  235. elfregs->excvaddr = regs->excvaddr;
  236. elfregs->windowbase = regs->windowbase;
  237. elfregs->windowstart = regs->windowstart;
  238. elfregs->lbeg = regs->lbeg;
  239. elfregs->lend = regs->lend;
  240. elfregs->lcount = regs->lcount;
  241. elfregs->sar = regs->sar;
  242. elfregs->syscall = regs->syscall;
  243. /* Copy register file.
  244. * The layout looks like this:
  245. *
  246. * | a0 ... a15 | Z ... Z | arX ... arY |
  247. * current window unused saved frames
  248. */
  249. memset (elfregs->ar, 0, sizeof(elfregs->ar));
  250. wb_offset = regs->windowbase * 4;
  251. n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16;
  252. for (i = 0; i < n; i++)
  253. elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
  254. n = (regs->wmask >> 4) * 4;
  255. for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
  256. elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
  257. }
  258. void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
  259. {
  260. do_copy_regs ((xtensa_gregset_t *)elfregs, regs, current);
  261. }
  262. /* The inverse of do_copy_regs(). No error or sanity checking. */
  263. void do_restore_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
  264. struct task_struct *tsk)
  265. {
  266. int i, n, wb_offset;
  267. /* Note: PS.EXCM is not set while user task is running; it
  268. * needs to be set in regs->ps is for exception handling convenience.
  269. */
  270. regs->pc = elfregs->pc;
  271. regs->ps = (elfregs->ps | XCHAL_PS_EXCM_MASK);
  272. regs->exccause = elfregs->exccause;
  273. regs->excvaddr = elfregs->excvaddr;
  274. regs->windowbase = elfregs->windowbase;
  275. regs->windowstart = elfregs->windowstart;
  276. regs->lbeg = elfregs->lbeg;
  277. regs->lend = elfregs->lend;
  278. regs->lcount = elfregs->lcount;
  279. regs->sar = elfregs->sar;
  280. regs->syscall = elfregs->syscall;
  281. /* Clear everything. */
  282. memset (regs->areg, 0, sizeof(regs->areg));
  283. /* Copy regs from live window frame. */
  284. wb_offset = regs->windowbase * 4;
  285. n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16;
  286. for (i = 0; i < n; i++)
  287. regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
  288. n = (regs->wmask >> 4) * 4;
  289. for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
  290. regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
  291. }
  292. /*
  293. * do_save_fpregs() gathers information from 'struct pt_regs' and
  294. * 'current->thread' to fill in the elf_fpregset_t structure.
  295. *
  296. * Core files and ptrace use elf_fpregset_t.
  297. */
  298. void do_save_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
  299. struct task_struct *tsk)
  300. {
  301. #if XCHAL_HAVE_CP
  302. extern unsigned char _xtensa_reginfo_tables[];
  303. extern unsigned _xtensa_reginfo_table_size;
  304. int i;
  305. unsigned long flags;
  306. /* Before dumping coprocessor state from memory,
  307. * ensure any live coprocessor contents for this
  308. * task are first saved to memory:
  309. */
  310. local_irq_save(flags);
  311. for (i = 0; i < XCHAL_CP_MAX; i++) {
  312. if (tsk == coprocessor_info[i].owner) {
  313. enable_coprocessor(i);
  314. save_coprocessor_registers(
  315. tsk->thread.cp_save+coprocessor_info[i].offset,i);
  316. disable_coprocessor(i);
  317. }
  318. }
  319. local_irq_restore(flags);
  320. /* Now dump coprocessor & extra state: */
  321. memcpy((unsigned char*)fpregs,
  322. _xtensa_reginfo_tables, _xtensa_reginfo_table_size);
  323. memcpy((unsigned char*)fpregs + _xtensa_reginfo_table_size,
  324. tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE);
  325. #endif
  326. }
  327. /*
  328. * The inverse of do_save_fpregs().
  329. * Copies coprocessor and extra state from fpregs into regs and tsk->thread.
  330. * Returns 0 on success, non-zero if layout doesn't match.
  331. */
  332. int do_restore_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
  333. struct task_struct *tsk)
  334. {
  335. #if XCHAL_HAVE_CP
  336. extern unsigned char _xtensa_reginfo_tables[];
  337. extern unsigned _xtensa_reginfo_table_size;
  338. int i;
  339. unsigned long flags;
  340. /* Make sure save area layouts match.
  341. * FIXME: in the future we could allow restoring from
  342. * a different layout of the same registers, by comparing
  343. * fpregs' table with _xtensa_reginfo_tables and matching
  344. * entries and copying registers one at a time.
  345. * Not too sure yet whether that's very useful.
  346. */
  347. if( memcmp((unsigned char*)fpregs,
  348. _xtensa_reginfo_tables, _xtensa_reginfo_table_size) ) {
  349. return -1;
  350. }
  351. /* Before restoring coprocessor state from memory,
  352. * ensure any live coprocessor contents for this
  353. * task are first invalidated.
  354. */
  355. local_irq_save(flags);
  356. for (i = 0; i < XCHAL_CP_MAX; i++) {
  357. if (tsk == coprocessor_info[i].owner) {
  358. enable_coprocessor(i);
  359. save_coprocessor_registers(
  360. tsk->thread.cp_save+coprocessor_info[i].offset,i);
  361. coprocessor_info[i].owner = 0;
  362. disable_coprocessor(i);
  363. }
  364. }
  365. local_irq_restore(flags);
  366. /* Now restore coprocessor & extra state: */
  367. memcpy(tsk->thread.cp_save,
  368. (unsigned char*)fpregs + _xtensa_reginfo_table_size,
  369. XTENSA_CP_EXTRA_SIZE);
  370. #endif
  371. return 0;
  372. }
  373. /*
  374. * Fill in the CP structure for a core dump for a particular task.
  375. */
  376. int
  377. dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
  378. {
  379. /* see asm/coprocessor.h for this magic number 16 */
  380. #if XTENSA_CP_EXTRA_SIZE > 16
  381. do_save_fpregs (r, regs, task);
  382. /* For now, bit 16 means some extra state may be present: */
  383. // FIXME!! need to track to return more accurate mask
  384. return 0x10000 | XCHAL_CP_MASK;
  385. #else
  386. return 0; /* no coprocessors active on this processor */
  387. #endif
  388. }
  389. /*
  390. * Fill in the CP structure for a core dump.
  391. * This includes any FPU coprocessor.
  392. * Here, we dump all coprocessors, and other ("extra") custom state.
  393. *
  394. * This function is called by elf_core_dump() in fs/binfmt_elf.c
  395. * (in which case 'regs' comes from calls to do_coredump, see signals.c).
  396. */
  397. int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
  398. {
  399. return dump_task_fpu(regs, current, r);
  400. }