process.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486
  1. // TODO verify coprocessor handling
  2. /*
  3. * arch/xtensa/kernel/process.c
  4. *
  5. * Xtensa Processor version.
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file "COPYING" in the main directory of this archive
  9. * for more details.
  10. *
  11. * Copyright (C) 2001 - 2005 Tensilica Inc.
  12. *
  13. * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
  14. * Chris Zankel <chris@zankel.net>
  15. * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
  16. * Kevin Chea
  17. */
  18. #include <linux/config.h>
  19. #include <linux/errno.h>
  20. #include <linux/sched.h>
  21. #include <linux/kernel.h>
  22. #include <linux/mm.h>
  23. #include <linux/smp.h>
  24. #include <linux/smp_lock.h>
  25. #include <linux/stddef.h>
  26. #include <linux/unistd.h>
  27. #include <linux/ptrace.h>
  28. #include <linux/slab.h>
  29. #include <linux/elf.h>
  30. #include <linux/init.h>
  31. #include <linux/prctl.h>
  32. #include <linux/init_task.h>
  33. #include <linux/module.h>
  34. #include <linux/mqueue.h>
  35. #include <asm/pgtable.h>
  36. #include <asm/uaccess.h>
  37. #include <asm/system.h>
  38. #include <asm/io.h>
  39. #include <asm/processor.h>
  40. #include <asm/platform.h>
  41. #include <asm/mmu.h>
  42. #include <asm/irq.h>
  43. #include <asm/atomic.h>
  44. #include <asm/asm-offsets.h>
  45. #include <asm/coprocessor.h>
  46. extern void ret_from_fork(void);
  47. static struct fs_struct init_fs = INIT_FS;
  48. static struct files_struct init_files = INIT_FILES;
  49. static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
  50. static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
  51. struct mm_struct init_mm = INIT_MM(init_mm);
  52. EXPORT_SYMBOL(init_mm);
  53. union thread_union init_thread_union
  54. __attribute__((__section__(".data.init_task"))) =
  55. { INIT_THREAD_INFO(init_task) };
  56. struct task_struct init_task = INIT_TASK(init_task);
  57. EXPORT_SYMBOL(init_task);
  58. struct task_struct *current_set[NR_CPUS] = {&init_task, };
  59. void (*pm_power_off)(void) = NULL;
  60. EXPORT_SYMBOL(pm_power_off);
  61. #if XCHAL_CP_NUM > 0
  62. /*
  63. * Coprocessor ownership.
  64. */
  65. coprocessor_info_t coprocessor_info[] = {
  66. { 0, XTENSA_CPE_CP0_OFFSET },
  67. { 0, XTENSA_CPE_CP1_OFFSET },
  68. { 0, XTENSA_CPE_CP2_OFFSET },
  69. { 0, XTENSA_CPE_CP3_OFFSET },
  70. { 0, XTENSA_CPE_CP4_OFFSET },
  71. { 0, XTENSA_CPE_CP5_OFFSET },
  72. { 0, XTENSA_CPE_CP6_OFFSET },
  73. { 0, XTENSA_CPE_CP7_OFFSET },
  74. };
  75. #endif
  76. /*
  77. * Powermanagement idle function, if any is provided by the platform.
  78. */
  79. void cpu_idle(void)
  80. {
  81. local_irq_enable();
  82. /* endless idle loop with no priority at all */
  83. while (1) {
  84. while (!need_resched())
  85. platform_idle();
  86. preempt_enable_no_resched();
  87. schedule();
  88. preempt_disable();
  89. }
  90. }
  91. /*
  92. * Free current thread data structures etc..
  93. */
  94. void exit_thread(void)
  95. {
  96. release_coprocessors(current); /* Empty macro if no CPs are defined */
  97. }
  98. void flush_thread(void)
  99. {
  100. release_coprocessors(current); /* Empty macro if no CPs are defined */
  101. }
  102. /*
  103. * Copy thread.
  104. *
  105. * The stack layout for the new thread looks like this:
  106. *
  107. * +------------------------+ <- sp in childregs (= tos)
  108. * | childregs |
  109. * +------------------------+ <- thread.sp = sp in dummy-frame
  110. * | dummy-frame | (saved in dummy-frame spill-area)
  111. * +------------------------+
  112. *
  113. * We create a dummy frame to return to ret_from_fork:
  114. * a0 points to ret_from_fork (simulating a call4)
  115. * sp points to itself (thread.sp)
  116. * a2, a3 are unused.
  117. *
  118. * Note: This is a pristine frame, so we don't need any spill region on top of
  119. * childregs.
  120. */
  121. int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
  122. unsigned long unused,
  123. struct task_struct * p, struct pt_regs * regs)
  124. {
  125. struct pt_regs *childregs;
  126. unsigned long tos;
  127. int user_mode = user_mode(regs);
  128. /* Set up new TSS. */
  129. tos = (unsigned long)task_stack_page(p) + THREAD_SIZE;
  130. if (user_mode)
  131. childregs = (struct pt_regs*)(tos - PT_USER_SIZE);
  132. else
  133. childregs = (struct pt_regs*)tos - 1;
  134. *childregs = *regs;
  135. /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
  136. *((int*)childregs - 3) = (unsigned long)childregs;
  137. *((int*)childregs - 4) = 0;
  138. childregs->areg[1] = tos;
  139. childregs->areg[2] = 0;
  140. p->set_child_tid = p->clear_child_tid = NULL;
  141. p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
  142. p->thread.sp = (unsigned long)childregs;
  143. if (user_mode(regs)) {
  144. int len = childregs->wmask & ~0xf;
  145. childregs->areg[1] = usp;
  146. memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
  147. &regs->areg[XCHAL_NUM_AREGS - len/4], len);
  148. if (clone_flags & CLONE_SETTLS)
  149. childregs->areg[2] = childregs->areg[6];
  150. } else {
  151. /* In kernel space, we start a new thread with a new stack. */
  152. childregs->wmask = 1;
  153. }
  154. return 0;
  155. }
  156. /*
  157. * Create a kernel thread
  158. */
  159. int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  160. {
  161. long retval;
  162. __asm__ __volatile__
  163. ("mov a5, %4\n\t" /* preserve fn in a5 */
  164. "mov a6, %3\n\t" /* preserve and setup arg in a6 */
  165. "movi a2, %1\n\t" /* load __NR_clone for syscall*/
  166. "mov a3, sp\n\t" /* sp check and sys_clone */
  167. "mov a4, %5\n\t" /* load flags for syscall */
  168. "syscall\n\t"
  169. "beq a3, sp, 1f\n\t" /* branch if parent */
  170. "callx4 a5\n\t" /* call fn */
  171. "movi a2, %2\n\t" /* load __NR_exit for syscall */
  172. "mov a3, a6\n\t" /* load fn return value */
  173. "syscall\n"
  174. "1:\n\t"
  175. "mov %0, a2\n\t" /* parent returns zero */
  176. :"=r" (retval)
  177. :"i" (__NR_clone), "i" (__NR_exit),
  178. "r" (arg), "r" (fn),
  179. "r" (flags | CLONE_VM)
  180. : "a2", "a3", "a4", "a5", "a6" );
  181. return retval;
  182. }
  183. /*
  184. * These bracket the sleeping functions..
  185. */
  186. unsigned long get_wchan(struct task_struct *p)
  187. {
  188. unsigned long sp, pc;
  189. unsigned long stack_page = (unsigned long) task_stack_page(p);
  190. int count = 0;
  191. if (!p || p == current || p->state == TASK_RUNNING)
  192. return 0;
  193. sp = p->thread.sp;
  194. pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
  195. do {
  196. if (sp < stack_page + sizeof(struct task_struct) ||
  197. sp >= (stack_page + THREAD_SIZE) ||
  198. pc == 0)
  199. return 0;
  200. if (!in_sched_functions(pc))
  201. return pc;
  202. /* Stack layout: sp-4: ra, sp-3: sp' */
  203. pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
  204. sp = *(unsigned long *)sp - 3;
  205. } while (count++ < 16);
  206. return 0;
  207. }
  208. /*
  209. * do_copy_regs() gathers information from 'struct pt_regs' and
  210. * 'current->thread.areg[]' to fill in the xtensa_gregset_t
  211. * structure.
  212. *
  213. * xtensa_gregset_t and 'struct pt_regs' are vastly different formats
  214. * of processor registers. Besides different ordering,
  215. * xtensa_gregset_t contains non-live register information that
  216. * 'struct pt_regs' does not. Exception handling (primarily) uses
  217. * 'struct pt_regs'. Core files and ptrace use xtensa_gregset_t.
  218. *
  219. */
  220. void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
  221. struct task_struct *tsk)
  222. {
  223. int i, n, wb_offset;
  224. elfregs->xchal_config_id0 = XCHAL_HW_CONFIGID0;
  225. elfregs->xchal_config_id1 = XCHAL_HW_CONFIGID1;
  226. __asm__ __volatile__ ("rsr %0, 176\n" : "=a" (i));
  227. elfregs->cpux = i;
  228. __asm__ __volatile__ ("rsr %0, 208\n" : "=a" (i));
  229. elfregs->cpuy = i;
  230. /* Note: PS.EXCM is not set while user task is running; its
  231. * being set in regs->ps is for exception handling convenience.
  232. */
  233. elfregs->pc = regs->pc;
  234. elfregs->ps = (regs->ps & ~XCHAL_PS_EXCM_MASK);
  235. elfregs->exccause = regs->exccause;
  236. elfregs->excvaddr = regs->excvaddr;
  237. elfregs->windowbase = regs->windowbase;
  238. elfregs->windowstart = regs->windowstart;
  239. elfregs->lbeg = regs->lbeg;
  240. elfregs->lend = regs->lend;
  241. elfregs->lcount = regs->lcount;
  242. elfregs->sar = regs->sar;
  243. elfregs->syscall = regs->syscall;
  244. /* Copy register file.
  245. * The layout looks like this:
  246. *
  247. * | a0 ... a15 | Z ... Z | arX ... arY |
  248. * current window unused saved frames
  249. */
  250. memset (elfregs->ar, 0, sizeof(elfregs->ar));
  251. wb_offset = regs->windowbase * 4;
  252. n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16;
  253. for (i = 0; i < n; i++)
  254. elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
  255. n = (regs->wmask >> 4) * 4;
  256. for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
  257. elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
  258. }
  259. void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
  260. {
  261. do_copy_regs ((xtensa_gregset_t *)elfregs, regs, current);
  262. }
  263. /* The inverse of do_copy_regs(). No error or sanity checking. */
  264. void do_restore_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
  265. struct task_struct *tsk)
  266. {
  267. int i, n, wb_offset;
  268. /* Note: PS.EXCM is not set while user task is running; it
  269. * needs to be set in regs->ps is for exception handling convenience.
  270. */
  271. regs->pc = elfregs->pc;
  272. regs->ps = (elfregs->ps | XCHAL_PS_EXCM_MASK);
  273. regs->exccause = elfregs->exccause;
  274. regs->excvaddr = elfregs->excvaddr;
  275. regs->windowbase = elfregs->windowbase;
  276. regs->windowstart = elfregs->windowstart;
  277. regs->lbeg = elfregs->lbeg;
  278. regs->lend = elfregs->lend;
  279. regs->lcount = elfregs->lcount;
  280. regs->sar = elfregs->sar;
  281. regs->syscall = elfregs->syscall;
  282. /* Clear everything. */
  283. memset (regs->areg, 0, sizeof(regs->areg));
  284. /* Copy regs from live window frame. */
  285. wb_offset = regs->windowbase * 4;
  286. n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16;
  287. for (i = 0; i < n; i++)
  288. regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
  289. n = (regs->wmask >> 4) * 4;
  290. for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
  291. regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
  292. }
  293. /*
  294. * do_save_fpregs() gathers information from 'struct pt_regs' and
  295. * 'current->thread' to fill in the elf_fpregset_t structure.
  296. *
  297. * Core files and ptrace use elf_fpregset_t.
  298. */
  299. void do_save_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
  300. struct task_struct *tsk)
  301. {
  302. #if XCHAL_HAVE_CP
  303. extern unsigned char _xtensa_reginfo_tables[];
  304. extern unsigned _xtensa_reginfo_table_size;
  305. int i;
  306. unsigned long flags;
  307. /* Before dumping coprocessor state from memory,
  308. * ensure any live coprocessor contents for this
  309. * task are first saved to memory:
  310. */
  311. local_irq_save(flags);
  312. for (i = 0; i < XCHAL_CP_MAX; i++) {
  313. if (tsk == coprocessor_info[i].owner) {
  314. enable_coprocessor(i);
  315. save_coprocessor_registers(
  316. tsk->thread.cp_save+coprocessor_info[i].offset,i);
  317. disable_coprocessor(i);
  318. }
  319. }
  320. local_irq_restore(flags);
  321. /* Now dump coprocessor & extra state: */
  322. memcpy((unsigned char*)fpregs,
  323. _xtensa_reginfo_tables, _xtensa_reginfo_table_size);
  324. memcpy((unsigned char*)fpregs + _xtensa_reginfo_table_size,
  325. tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE);
  326. #endif
  327. }
  328. /*
  329. * The inverse of do_save_fpregs().
  330. * Copies coprocessor and extra state from fpregs into regs and tsk->thread.
  331. * Returns 0 on success, non-zero if layout doesn't match.
  332. */
  333. int do_restore_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
  334. struct task_struct *tsk)
  335. {
  336. #if XCHAL_HAVE_CP
  337. extern unsigned char _xtensa_reginfo_tables[];
  338. extern unsigned _xtensa_reginfo_table_size;
  339. int i;
  340. unsigned long flags;
  341. /* Make sure save area layouts match.
  342. * FIXME: in the future we could allow restoring from
  343. * a different layout of the same registers, by comparing
  344. * fpregs' table with _xtensa_reginfo_tables and matching
  345. * entries and copying registers one at a time.
  346. * Not too sure yet whether that's very useful.
  347. */
  348. if( memcmp((unsigned char*)fpregs,
  349. _xtensa_reginfo_tables, _xtensa_reginfo_table_size) ) {
  350. return -1;
  351. }
  352. /* Before restoring coprocessor state from memory,
  353. * ensure any live coprocessor contents for this
  354. * task are first invalidated.
  355. */
  356. local_irq_save(flags);
  357. for (i = 0; i < XCHAL_CP_MAX; i++) {
  358. if (tsk == coprocessor_info[i].owner) {
  359. enable_coprocessor(i);
  360. save_coprocessor_registers(
  361. tsk->thread.cp_save+coprocessor_info[i].offset,i);
  362. coprocessor_info[i].owner = 0;
  363. disable_coprocessor(i);
  364. }
  365. }
  366. local_irq_restore(flags);
  367. /* Now restore coprocessor & extra state: */
  368. memcpy(tsk->thread.cp_save,
  369. (unsigned char*)fpregs + _xtensa_reginfo_table_size,
  370. XTENSA_CP_EXTRA_SIZE);
  371. #endif
  372. return 0;
  373. }
  374. /*
  375. * Fill in the CP structure for a core dump for a particular task.
  376. */
  377. int
  378. dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
  379. {
  380. /* see asm/coprocessor.h for this magic number 16 */
  381. #if XTENSA_CP_EXTRA_SIZE > 16
  382. do_save_fpregs (r, regs, task);
  383. /* For now, bit 16 means some extra state may be present: */
  384. // FIXME!! need to track to return more accurate mask
  385. return 0x10000 | XCHAL_CP_MASK;
  386. #else
  387. return 0; /* no coprocessors active on this processor */
  388. #endif
  389. }
  390. /*
  391. * Fill in the CP structure for a core dump.
  392. * This includes any FPU coprocessor.
  393. * Here, we dump all coprocessors, and other ("extra") custom state.
  394. *
  395. * This function is called by elf_core_dump() in fs/binfmt_elf.c
  396. * (in which case 'regs' comes from calls to do_coredump, see signals.c).
  397. */
  398. int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
  399. {
  400. return dump_task_fpu(regs, current, r);
  401. }