process.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. /*
  2. * File: arch/blackfin/kernel/process.c
  3. * Based on:
  4. * Author:
  5. *
  6. * Created:
  7. * Description: Blackfin architecture-dependent process handling.
  8. *
  9. * Modified:
  10. * Copyright 2004-2006 Analog Devices Inc.
  11. *
  12. * Bugs: Enter bugs at http://blackfin.uclinux.org/
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, see the file COPYING, or write
  26. * to the Free Software Foundation, Inc.,
  27. * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  28. */
  29. #include <linux/module.h>
  30. #include <linux/smp_lock.h>
  31. #include <linux/unistd.h>
  32. #include <linux/user.h>
  33. #include <linux/uaccess.h>
  34. #include <linux/fs.h>
  35. #include <linux/err.h>
  36. #include <asm/blackfin.h>
  37. #include <asm/fixed_code.h>
  38. asmlinkage void ret_from_fork(void);
  39. /* Points to the SDRAM backup memory for the stack that is currently in
  40. * L1 scratchpad memory.
  41. */
  42. void *current_l1_stack_save;
  43. /* The number of tasks currently using a L1 stack area. The SRAM is
  44. * allocated/deallocated whenever this changes from/to zero.
  45. */
  46. int nr_l1stack_tasks;
  47. /* Start and length of the area in L1 scratchpad memory which we've allocated
  48. * for process stacks.
  49. */
  50. void *l1_stack_base;
  51. unsigned long l1_stack_len;
  52. /*
  53. * Powermanagement idle function, if any..
  54. */
  55. void (*pm_idle)(void) = NULL;
  56. EXPORT_SYMBOL(pm_idle);
  57. void (*pm_power_off)(void) = NULL;
  58. EXPORT_SYMBOL(pm_power_off);
  59. /*
  60. * The idle loop on BFIN
  61. */
  62. #ifdef CONFIG_IDLE_L1
  63. void default_idle(void)__attribute__((l1_text));
  64. void cpu_idle(void)__attribute__((l1_text));
  65. #endif
  66. void default_idle(void)
  67. {
  68. while (!need_resched()) {
  69. local_irq_disable();
  70. if (likely(!need_resched()))
  71. idle_with_irq_disabled();
  72. local_irq_enable();
  73. }
  74. }
  75. void (*idle)(void) = default_idle;
  76. /*
  77. * The idle thread. There's no useful work to be
  78. * done, so just try to conserve power and have a
  79. * low exit latency (ie sit in a loop waiting for
  80. * somebody to say that they'd like to reschedule)
  81. */
  82. void cpu_idle(void)
  83. {
  84. /* endless idle loop with no priority at all */
  85. while (1) {
  86. idle();
  87. preempt_enable_no_resched();
  88. schedule();
  89. preempt_disable();
  90. }
  91. }
  92. /* Fill in the fpu structure for a core dump. */
  93. int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpregs)
  94. {
  95. return 1;
  96. }
  97. /*
  98. * This gets run with P1 containing the
  99. * function to call, and R1 containing
  100. * the "args". Note P0 is clobbered on the way here.
  101. */
  102. void kernel_thread_helper(void);
  103. __asm__(".section .text\n"
  104. ".align 4\n"
  105. "_kernel_thread_helper:\n\t"
  106. "\tsp += -12;\n\t"
  107. "\tr0 = r1;\n\t" "\tcall (p1);\n\t" "\tcall _do_exit;\n" ".previous");
  108. /*
  109. * Create a kernel thread.
  110. */
  111. pid_t kernel_thread(int (*fn) (void *), void *arg, unsigned long flags)
  112. {
  113. struct pt_regs regs;
  114. memset(&regs, 0, sizeof(regs));
  115. regs.r1 = (unsigned long)arg;
  116. regs.p1 = (unsigned long)fn;
  117. regs.pc = (unsigned long)kernel_thread_helper;
  118. regs.orig_p0 = -1;
  119. /* Set bit 2 to tell ret_from_fork we should be returning to kernel
  120. mode. */
  121. regs.ipend = 0x8002;
  122. __asm__ __volatile__("%0 = syscfg;":"=da"(regs.syscfg):);
  123. return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL,
  124. NULL);
  125. }
  126. void flush_thread(void)
  127. {
  128. }
  129. asmlinkage int bfin_vfork(struct pt_regs *regs)
  130. {
  131. return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL,
  132. NULL);
  133. }
  134. asmlinkage int bfin_clone(struct pt_regs *regs)
  135. {
  136. unsigned long clone_flags;
  137. unsigned long newsp;
  138. /* syscall2 puts clone_flags in r0 and usp in r1 */
  139. clone_flags = regs->r0;
  140. newsp = regs->r1;
  141. if (!newsp)
  142. newsp = rdusp();
  143. else
  144. newsp -= 12;
  145. return do_fork(clone_flags, newsp, regs, 0, NULL, NULL);
  146. }
  147. int
  148. copy_thread(int nr, unsigned long clone_flags,
  149. unsigned long usp, unsigned long topstk,
  150. struct task_struct *p, struct pt_regs *regs)
  151. {
  152. struct pt_regs *childregs;
  153. childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
  154. *childregs = *regs;
  155. childregs->r0 = 0;
  156. p->thread.usp = usp;
  157. p->thread.ksp = (unsigned long)childregs;
  158. p->thread.pc = (unsigned long)ret_from_fork;
  159. return 0;
  160. }
  161. /*
  162. * sys_execve() executes a new program.
  163. */
  164. asmlinkage int sys_execve(char *name, char **argv, char **envp)
  165. {
  166. int error;
  167. char *filename;
  168. struct pt_regs *regs = (struct pt_regs *)((&name) + 6);
  169. lock_kernel();
  170. filename = getname(name);
  171. error = PTR_ERR(filename);
  172. if (IS_ERR(filename))
  173. goto out;
  174. error = do_execve(filename, argv, envp, regs);
  175. putname(filename);
  176. out:
  177. unlock_kernel();
  178. return error;
  179. }
  180. unsigned long get_wchan(struct task_struct *p)
  181. {
  182. unsigned long fp, pc;
  183. unsigned long stack_page;
  184. int count = 0;
  185. if (!p || p == current || p->state == TASK_RUNNING)
  186. return 0;
  187. stack_page = (unsigned long)p;
  188. fp = p->thread.usp;
  189. do {
  190. if (fp < stack_page + sizeof(struct thread_info) ||
  191. fp >= 8184 + stack_page)
  192. return 0;
  193. pc = ((unsigned long *)fp)[1];
  194. if (!in_sched_functions(pc))
  195. return pc;
  196. fp = *(unsigned long *)fp;
  197. }
  198. while (count++ < 16);
  199. return 0;
  200. }
  201. void finish_atomic_sections (struct pt_regs *regs)
  202. {
  203. if (regs->pc < ATOMIC_SEQS_START || regs->pc >= ATOMIC_SEQS_END)
  204. return;
  205. switch (regs->pc) {
  206. case ATOMIC_XCHG32 + 2:
  207. put_user(regs->r1, (int *)regs->p0);
  208. regs->pc += 2;
  209. break;
  210. case ATOMIC_CAS32 + 2:
  211. case ATOMIC_CAS32 + 4:
  212. if (regs->r0 == regs->r1)
  213. put_user(regs->r2, (int *)regs->p0);
  214. regs->pc = ATOMIC_CAS32 + 8;
  215. break;
  216. case ATOMIC_CAS32 + 6:
  217. put_user(regs->r2, (int *)regs->p0);
  218. regs->pc += 2;
  219. break;
  220. case ATOMIC_ADD32 + 2:
  221. regs->r0 = regs->r1 + regs->r0;
  222. /* fall through */
  223. case ATOMIC_ADD32 + 4:
  224. put_user(regs->r0, (int *)regs->p0);
  225. regs->pc = ATOMIC_ADD32 + 6;
  226. break;
  227. case ATOMIC_SUB32 + 2:
  228. regs->r0 = regs->r1 - regs->r0;
  229. /* fall through */
  230. case ATOMIC_SUB32 + 4:
  231. put_user(regs->r0, (int *)regs->p0);
  232. regs->pc = ATOMIC_SUB32 + 6;
  233. break;
  234. case ATOMIC_IOR32 + 2:
  235. regs->r0 = regs->r1 | regs->r0;
  236. /* fall through */
  237. case ATOMIC_IOR32 + 4:
  238. put_user(regs->r0, (int *)regs->p0);
  239. regs->pc = ATOMIC_IOR32 + 6;
  240. break;
  241. case ATOMIC_AND32 + 2:
  242. regs->r0 = regs->r1 & regs->r0;
  243. /* fall through */
  244. case ATOMIC_AND32 + 4:
  245. put_user(regs->r0, (int *)regs->p0);
  246. regs->pc = ATOMIC_AND32 + 6;
  247. break;
  248. case ATOMIC_XOR32 + 2:
  249. regs->r0 = regs->r1 ^ regs->r0;
  250. /* fall through */
  251. case ATOMIC_XOR32 + 4:
  252. put_user(regs->r0, (int *)regs->p0);
  253. regs->pc = ATOMIC_XOR32 + 6;
  254. break;
  255. }
  256. }
  257. #if defined(CONFIG_ACCESS_CHECK)
  258. /* Return 1 if access to memory range is OK, 0 otherwise */
  259. int _access_ok(unsigned long addr, unsigned long size)
  260. {
  261. if (size == 0)
  262. return 1;
  263. if (addr > (addr + size))
  264. return 0;
  265. if (segment_eq(get_fs(), KERNEL_DS))
  266. return 1;
  267. #ifdef CONFIG_MTD_UCLINUX
  268. if (addr >= memory_start && (addr + size) <= memory_end)
  269. return 1;
  270. if (addr >= memory_mtd_end && (addr + size) <= physical_mem_end)
  271. return 1;
  272. #else
  273. if (addr >= memory_start && (addr + size) <= physical_mem_end)
  274. return 1;
  275. #endif
  276. if (addr >= (unsigned long)__init_begin &&
  277. addr + size <= (unsigned long)__init_end)
  278. return 1;
  279. if (addr >= L1_SCRATCH_START
  280. && addr + size <= L1_SCRATCH_START + L1_SCRATCH_LENGTH)
  281. return 1;
  282. #if L1_CODE_LENGTH != 0
  283. if (addr >= L1_CODE_START + (_etext_l1 - _stext_l1)
  284. && addr + size <= L1_CODE_START + L1_CODE_LENGTH)
  285. return 1;
  286. #endif
  287. #if L1_DATA_A_LENGTH != 0
  288. if (addr >= L1_DATA_A_START + (_ebss_l1 - _sdata_l1)
  289. && addr + size <= L1_DATA_A_START + L1_DATA_A_LENGTH)
  290. return 1;
  291. #endif
  292. #if L1_DATA_B_LENGTH != 0
  293. if (addr >= L1_DATA_B_START
  294. && addr + size <= L1_DATA_B_START + L1_DATA_B_LENGTH)
  295. return 1;
  296. #endif
  297. return 0;
  298. }
  299. EXPORT_SYMBOL(_access_ok);
  300. #endif /* CONFIG_ACCESS_CHECK */