process.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478
  1. /*
  2. * Blackfin architecture-dependent process handling
  3. *
  4. * Copyright 2004-2009 Analog Devices Inc.
  5. *
  6. * Licensed under the GPL-2 or later
  7. */
  8. #include <linux/module.h>
  9. #include <linux/unistd.h>
  10. #include <linux/user.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/slab.h>
  13. #include <linux/sched.h>
  14. #include <linux/tick.h>
  15. #include <linux/fs.h>
  16. #include <linux/err.h>
  17. #include <asm/blackfin.h>
  18. #include <asm/fixed_code.h>
  19. #include <asm/mem_map.h>
  20. #include <asm/irq.h>
  21. asmlinkage void ret_from_fork(void);
  22. /* Points to the SDRAM backup memory for the stack that is currently in
  23. * L1 scratchpad memory.
  24. */
  25. void *current_l1_stack_save;
  26. /* The number of tasks currently using a L1 stack area. The SRAM is
  27. * allocated/deallocated whenever this changes from/to zero.
  28. */
  29. int nr_l1stack_tasks;
  30. /* Start and length of the area in L1 scratchpad memory which we've allocated
  31. * for process stacks.
  32. */
  33. void *l1_stack_base;
  34. unsigned long l1_stack_len;
  35. /*
  36. * Powermanagement idle function, if any..
  37. */
  38. void (*pm_idle)(void) = NULL;
  39. EXPORT_SYMBOL(pm_idle);
  40. void (*pm_power_off)(void) = NULL;
  41. EXPORT_SYMBOL(pm_power_off);
  42. /*
  43. * The idle loop on BFIN
  44. */
  45. #ifdef CONFIG_IDLE_L1
  46. static void default_idle(void)__attribute__((l1_text));
  47. void cpu_idle(void)__attribute__((l1_text));
  48. #endif
  49. /*
  50. * This is our default idle handler. We need to disable
  51. * interrupts here to ensure we don't miss a wakeup call.
  52. */
  53. static void default_idle(void)
  54. {
  55. #ifdef CONFIG_IPIPE
  56. ipipe_suspend_domain();
  57. #endif
  58. hard_local_irq_disable();
  59. if (!need_resched())
  60. idle_with_irq_disabled();
  61. hard_local_irq_enable();
  62. }
  63. /*
  64. * The idle thread. We try to conserve power, while trying to keep
  65. * overall latency low. The architecture specific idle is passed
  66. * a value to indicate the level of "idleness" of the system.
  67. */
  68. void cpu_idle(void)
  69. {
  70. /* endless idle loop with no priority at all */
  71. while (1) {
  72. void (*idle)(void) = pm_idle;
  73. #ifdef CONFIG_HOTPLUG_CPU
  74. if (cpu_is_offline(smp_processor_id()))
  75. cpu_die();
  76. #endif
  77. if (!idle)
  78. idle = default_idle;
  79. tick_nohz_idle_enter();
  80. rcu_idle_enter();
  81. while (!need_resched())
  82. idle();
  83. rcu_idle_exit();
  84. tick_nohz_idle_exit();
  85. preempt_enable_no_resched();
  86. schedule();
  87. preempt_disable();
  88. }
  89. }
  90. /*
  91. * Do necessary setup to start up a newly executed thread.
  92. *
  93. * pass the data segment into user programs if it exists,
  94. * it can't hurt anything as far as I can tell
  95. */
  96. void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
  97. {
  98. regs->pc = new_ip;
  99. if (current->mm)
  100. regs->p5 = current->mm->start_data;
  101. #ifndef CONFIG_SMP
  102. task_thread_info(current)->l1_task_info.stack_start =
  103. (void *)current->mm->context.stack_start;
  104. task_thread_info(current)->l1_task_info.lowest_sp = (void *)new_sp;
  105. memcpy(L1_SCRATCH_TASK_INFO, &task_thread_info(current)->l1_task_info,
  106. sizeof(*L1_SCRATCH_TASK_INFO));
  107. #endif
  108. wrusp(new_sp);
  109. }
  110. EXPORT_SYMBOL_GPL(start_thread);
  111. void flush_thread(void)
  112. {
  113. }
  114. asmlinkage int bfin_vfork(struct pt_regs *regs)
  115. {
  116. return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL,
  117. NULL);
  118. }
  119. asmlinkage int bfin_clone(struct pt_regs *regs)
  120. {
  121. unsigned long clone_flags;
  122. unsigned long newsp;
  123. #ifdef __ARCH_SYNC_CORE_DCACHE
  124. if (current->nr_cpus_allowed == num_possible_cpus())
  125. set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
  126. #endif
  127. /* syscall2 puts clone_flags in r0 and usp in r1 */
  128. clone_flags = regs->r0;
  129. newsp = regs->r1;
  130. if (!newsp)
  131. newsp = rdusp();
  132. else
  133. newsp -= 12;
  134. return do_fork(clone_flags, newsp, regs, 0, NULL, NULL);
  135. }
  136. int
  137. copy_thread(unsigned long clone_flags,
  138. unsigned long usp, unsigned long topstk,
  139. struct task_struct *p, struct pt_regs *regs)
  140. {
  141. struct pt_regs *childregs;
  142. unsigned long *v;
  143. childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
  144. v = ((unsigned long *)childregs) - 2;
  145. if (unlikely(!regs)) {
  146. memset(childregs, 0, sizeof(struct pt_regs));
  147. v[0] = usp;
  148. v[1] = topstk;
  149. childregs->orig_p0 = -1;
  150. childregs->ipend = 0x8000;
  151. __asm__ __volatile__("%0 = syscfg;":"=da"(childregs->syscfg):);
  152. p->thread.usp = 0;
  153. } else {
  154. *childregs = *regs;
  155. childregs->r0 = 0;
  156. p->thread.usp = usp;
  157. v[0] = v[1] = 0;
  158. }
  159. p->thread.ksp = (unsigned long)v;
  160. p->thread.pc = (unsigned long)ret_from_fork;
  161. return 0;
  162. }
  163. unsigned long get_wchan(struct task_struct *p)
  164. {
  165. unsigned long fp, pc;
  166. unsigned long stack_page;
  167. int count = 0;
  168. if (!p || p == current || p->state == TASK_RUNNING)
  169. return 0;
  170. stack_page = (unsigned long)p;
  171. fp = p->thread.usp;
  172. do {
  173. if (fp < stack_page + sizeof(struct thread_info) ||
  174. fp >= 8184 + stack_page)
  175. return 0;
  176. pc = ((unsigned long *)fp)[1];
  177. if (!in_sched_functions(pc))
  178. return pc;
  179. fp = *(unsigned long *)fp;
  180. }
  181. while (count++ < 16);
  182. return 0;
  183. }
  184. void finish_atomic_sections (struct pt_regs *regs)
  185. {
  186. int __user *up0 = (int __user *)regs->p0;
  187. switch (regs->pc) {
  188. default:
  189. /* not in middle of an atomic step, so resume like normal */
  190. return;
  191. case ATOMIC_XCHG32 + 2:
  192. put_user(regs->r1, up0);
  193. break;
  194. case ATOMIC_CAS32 + 2:
  195. case ATOMIC_CAS32 + 4:
  196. if (regs->r0 == regs->r1)
  197. case ATOMIC_CAS32 + 6:
  198. put_user(regs->r2, up0);
  199. break;
  200. case ATOMIC_ADD32 + 2:
  201. regs->r0 = regs->r1 + regs->r0;
  202. /* fall through */
  203. case ATOMIC_ADD32 + 4:
  204. put_user(regs->r0, up0);
  205. break;
  206. case ATOMIC_SUB32 + 2:
  207. regs->r0 = regs->r1 - regs->r0;
  208. /* fall through */
  209. case ATOMIC_SUB32 + 4:
  210. put_user(regs->r0, up0);
  211. break;
  212. case ATOMIC_IOR32 + 2:
  213. regs->r0 = regs->r1 | regs->r0;
  214. /* fall through */
  215. case ATOMIC_IOR32 + 4:
  216. put_user(regs->r0, up0);
  217. break;
  218. case ATOMIC_AND32 + 2:
  219. regs->r0 = regs->r1 & regs->r0;
  220. /* fall through */
  221. case ATOMIC_AND32 + 4:
  222. put_user(regs->r0, up0);
  223. break;
  224. case ATOMIC_XOR32 + 2:
  225. regs->r0 = regs->r1 ^ regs->r0;
  226. /* fall through */
  227. case ATOMIC_XOR32 + 4:
  228. put_user(regs->r0, up0);
  229. break;
  230. }
  231. /*
  232. * We've finished the atomic section, and the only thing left for
  233. * userspace is to do a RTS, so we might as well handle that too
  234. * since we need to update the PC anyways.
  235. */
  236. regs->pc = regs->rets;
  237. }
  238. static inline
  239. int in_mem(unsigned long addr, unsigned long size,
  240. unsigned long start, unsigned long end)
  241. {
  242. return addr >= start && addr + size <= end;
  243. }
  244. static inline
  245. int in_mem_const_off(unsigned long addr, unsigned long size, unsigned long off,
  246. unsigned long const_addr, unsigned long const_size)
  247. {
  248. return const_size &&
  249. in_mem(addr, size, const_addr + off, const_addr + const_size);
  250. }
  251. static inline
  252. int in_mem_const(unsigned long addr, unsigned long size,
  253. unsigned long const_addr, unsigned long const_size)
  254. {
  255. return in_mem_const_off(addr, size, 0, const_addr, const_size);
  256. }
  257. #ifdef CONFIG_BF60x
  258. #define ASYNC_ENABLED(bnum, bctlnum) 1
  259. #else
  260. #define ASYNC_ENABLED(bnum, bctlnum) \
  261. ({ \
  262. (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? 0 : \
  263. bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? 0 : \
  264. 1; \
  265. })
  266. #endif
  267. /*
  268. * We can't read EBIU banks that aren't enabled or we end up hanging
  269. * on the access to the async space. Make sure we validate accesses
  270. * that cross async banks too.
  271. * 0 - found, but unusable
  272. * 1 - found & usable
  273. * 2 - not found
  274. */
  275. static
  276. int in_async(unsigned long addr, unsigned long size)
  277. {
  278. if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE) {
  279. if (!ASYNC_ENABLED(0, 0))
  280. return 0;
  281. if (addr + size <= ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE)
  282. return 1;
  283. size -= ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE - addr;
  284. addr = ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE;
  285. }
  286. if (addr >= ASYNC_BANK1_BASE && addr < ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE) {
  287. if (!ASYNC_ENABLED(1, 0))
  288. return 0;
  289. if (addr + size <= ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE)
  290. return 1;
  291. size -= ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE - addr;
  292. addr = ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE;
  293. }
  294. if (addr >= ASYNC_BANK2_BASE && addr < ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE) {
  295. if (!ASYNC_ENABLED(2, 1))
  296. return 0;
  297. if (addr + size <= ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE)
  298. return 1;
  299. size -= ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE - addr;
  300. addr = ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE;
  301. }
  302. if (addr >= ASYNC_BANK3_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
  303. if (ASYNC_ENABLED(3, 1))
  304. return 0;
  305. if (addr + size <= ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE)
  306. return 1;
  307. return 0;
  308. }
  309. /* not within async bounds */
  310. return 2;
  311. }
  312. int bfin_mem_access_type(unsigned long addr, unsigned long size)
  313. {
  314. int cpu = raw_smp_processor_id();
  315. /* Check that things do not wrap around */
  316. if (addr > ULONG_MAX - size)
  317. return -EFAULT;
  318. if (in_mem(addr, size, FIXED_CODE_START, physical_mem_end))
  319. return BFIN_MEM_ACCESS_CORE;
  320. if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
  321. return cpu == 0 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
  322. if (in_mem_const(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH))
  323. return cpu == 0 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
  324. if (in_mem_const(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH))
  325. return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
  326. if (in_mem_const(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
  327. return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
  328. #ifdef COREB_L1_CODE_START
  329. if (in_mem_const(addr, size, COREB_L1_CODE_START, COREB_L1_CODE_LENGTH))
  330. return cpu == 1 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
  331. if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
  332. return cpu == 1 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
  333. if (in_mem_const(addr, size, COREB_L1_DATA_A_START, COREB_L1_DATA_A_LENGTH))
  334. return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
  335. if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
  336. return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
  337. #endif
  338. if (in_mem_const(addr, size, L2_START, L2_LENGTH))
  339. return BFIN_MEM_ACCESS_CORE;
  340. if (addr >= SYSMMR_BASE)
  341. return BFIN_MEM_ACCESS_CORE_ONLY;
  342. switch (in_async(addr, size)) {
  343. case 0: return -EFAULT;
  344. case 1: return BFIN_MEM_ACCESS_CORE;
  345. case 2: /* fall through */;
  346. }
  347. if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
  348. return BFIN_MEM_ACCESS_CORE;
  349. if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
  350. return BFIN_MEM_ACCESS_DMA;
  351. return -EFAULT;
  352. }
  353. #if defined(CONFIG_ACCESS_CHECK)
  354. #ifdef CONFIG_ACCESS_OK_L1
  355. __attribute__((l1_text))
  356. #endif
  357. /* Return 1 if access to memory range is OK, 0 otherwise */
  358. int _access_ok(unsigned long addr, unsigned long size)
  359. {
  360. int aret;
  361. if (size == 0)
  362. return 1;
  363. /* Check that things do not wrap around */
  364. if (addr > ULONG_MAX - size)
  365. return 0;
  366. if (segment_eq(get_fs(), KERNEL_DS))
  367. return 1;
  368. #ifdef CONFIG_MTD_UCLINUX
  369. if (1)
  370. #else
  371. if (0)
  372. #endif
  373. {
  374. if (in_mem(addr, size, memory_start, memory_end))
  375. return 1;
  376. if (in_mem(addr, size, memory_mtd_end, physical_mem_end))
  377. return 1;
  378. # ifndef CONFIG_ROMFS_ON_MTD
  379. if (0)
  380. # endif
  381. /* For XIP, allow user space to use pointers within the ROMFS. */
  382. if (in_mem(addr, size, memory_mtd_start, memory_mtd_end))
  383. return 1;
  384. } else {
  385. if (in_mem(addr, size, memory_start, physical_mem_end))
  386. return 1;
  387. }
  388. if (in_mem(addr, size, (unsigned long)__init_begin, (unsigned long)__init_end))
  389. return 1;
  390. if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
  391. return 1;
  392. if (in_mem_const_off(addr, size, _etext_l1 - _stext_l1, L1_CODE_START, L1_CODE_LENGTH))
  393. return 1;
  394. if (in_mem_const_off(addr, size, _ebss_l1 - _sdata_l1, L1_DATA_A_START, L1_DATA_A_LENGTH))
  395. return 1;
  396. if (in_mem_const_off(addr, size, _ebss_b_l1 - _sdata_b_l1, L1_DATA_B_START, L1_DATA_B_LENGTH))
  397. return 1;
  398. #ifdef COREB_L1_CODE_START
  399. if (in_mem_const(addr, size, COREB_L1_CODE_START, COREB_L1_CODE_LENGTH))
  400. return 1;
  401. if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
  402. return 1;
  403. if (in_mem_const(addr, size, COREB_L1_DATA_A_START, COREB_L1_DATA_A_LENGTH))
  404. return 1;
  405. if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
  406. return 1;
  407. #endif
  408. #ifndef CONFIG_EXCEPTION_L1_SCRATCH
  409. if (in_mem_const(addr, size, (unsigned long)l1_stack_base, l1_stack_len))
  410. return 1;
  411. #endif
  412. aret = in_async(addr, size);
  413. if (aret < 2)
  414. return aret;
  415. if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH))
  416. return 1;
  417. if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
  418. return 1;
  419. if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
  420. return 1;
  421. return 0;
  422. }
  423. EXPORT_SYMBOL(_access_ok);
  424. #endif /* CONFIG_ACCESS_CHECK */