sys_m68k_mm.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546
  1. /*
  2. * linux/arch/m68k/kernel/sys_m68k.c
  3. *
  4. * This file contains various random system calls that
  5. * have a non-standard calling sequence on the Linux/m68k
  6. * platform.
  7. */
  8. #include <linux/capability.h>
  9. #include <linux/errno.h>
  10. #include <linux/sched.h>
  11. #include <linux/mm.h>
  12. #include <linux/fs.h>
  13. #include <linux/smp.h>
  14. #include <linux/sem.h>
  15. #include <linux/msg.h>
  16. #include <linux/shm.h>
  17. #include <linux/stat.h>
  18. #include <linux/syscalls.h>
  19. #include <linux/mman.h>
  20. #include <linux/file.h>
  21. #include <linux/ipc.h>
  22. #include <asm/setup.h>
  23. #include <asm/uaccess.h>
  24. #include <asm/cachectl.h>
  25. #include <asm/traps.h>
  26. #include <asm/page.h>
  27. #include <asm/unistd.h>
  28. #include <linux/elf.h>
  29. #include <asm/tlb.h>
  30. asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
  31. unsigned long error_code);
  32. asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
  33. unsigned long prot, unsigned long flags,
  34. unsigned long fd, unsigned long pgoff)
  35. {
  36. /*
  37. * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
  38. * so we need to shift the argument down by 1; m68k mmap64(3)
  39. * (in libc) expects the last argument of mmap2 in 4Kb units.
  40. */
  41. return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
  42. }
  43. /* Convert virtual (user) address VADDR to physical address PADDR */
  44. #define virt_to_phys_040(vaddr) \
  45. ({ \
  46. unsigned long _mmusr, _paddr; \
  47. \
  48. __asm__ __volatile__ (".chip 68040\n\t" \
  49. "ptestr (%1)\n\t" \
  50. "movec %%mmusr,%0\n\t" \
  51. ".chip 68k" \
  52. : "=r" (_mmusr) \
  53. : "a" (vaddr)); \
  54. _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
  55. _paddr; \
  56. })
  57. static inline int
  58. cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
  59. {
  60. unsigned long paddr, i;
  61. switch (scope)
  62. {
  63. case FLUSH_SCOPE_ALL:
  64. switch (cache)
  65. {
  66. case FLUSH_CACHE_DATA:
  67. /* This nop is needed for some broken versions of the 68040. */
  68. __asm__ __volatile__ ("nop\n\t"
  69. ".chip 68040\n\t"
  70. "cpusha %dc\n\t"
  71. ".chip 68k");
  72. break;
  73. case FLUSH_CACHE_INSN:
  74. __asm__ __volatile__ ("nop\n\t"
  75. ".chip 68040\n\t"
  76. "cpusha %ic\n\t"
  77. ".chip 68k");
  78. break;
  79. default:
  80. case FLUSH_CACHE_BOTH:
  81. __asm__ __volatile__ ("nop\n\t"
  82. ".chip 68040\n\t"
  83. "cpusha %bc\n\t"
  84. ".chip 68k");
  85. break;
  86. }
  87. break;
  88. case FLUSH_SCOPE_LINE:
  89. /* Find the physical address of the first mapped page in the
  90. address range. */
  91. if ((paddr = virt_to_phys_040(addr))) {
  92. paddr += addr & ~(PAGE_MASK | 15);
  93. len = (len + (addr & 15) + 15) >> 4;
  94. } else {
  95. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  96. if (len <= tmp)
  97. return 0;
  98. addr += tmp;
  99. len -= tmp;
  100. tmp = PAGE_SIZE;
  101. for (;;)
  102. {
  103. if ((paddr = virt_to_phys_040(addr)))
  104. break;
  105. if (len <= tmp)
  106. return 0;
  107. addr += tmp;
  108. len -= tmp;
  109. }
  110. len = (len + 15) >> 4;
  111. }
  112. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  113. while (len--)
  114. {
  115. switch (cache)
  116. {
  117. case FLUSH_CACHE_DATA:
  118. __asm__ __volatile__ ("nop\n\t"
  119. ".chip 68040\n\t"
  120. "cpushl %%dc,(%0)\n\t"
  121. ".chip 68k"
  122. : : "a" (paddr));
  123. break;
  124. case FLUSH_CACHE_INSN:
  125. __asm__ __volatile__ ("nop\n\t"
  126. ".chip 68040\n\t"
  127. "cpushl %%ic,(%0)\n\t"
  128. ".chip 68k"
  129. : : "a" (paddr));
  130. break;
  131. default:
  132. case FLUSH_CACHE_BOTH:
  133. __asm__ __volatile__ ("nop\n\t"
  134. ".chip 68040\n\t"
  135. "cpushl %%bc,(%0)\n\t"
  136. ".chip 68k"
  137. : : "a" (paddr));
  138. break;
  139. }
  140. if (!--i && len)
  141. {
  142. /*
  143. * No need to page align here since it is done by
  144. * virt_to_phys_040().
  145. */
  146. addr += PAGE_SIZE;
  147. i = PAGE_SIZE / 16;
  148. /* Recompute physical address when crossing a page
  149. boundary. */
  150. for (;;)
  151. {
  152. if ((paddr = virt_to_phys_040(addr)))
  153. break;
  154. if (len <= i)
  155. return 0;
  156. len -= i;
  157. addr += PAGE_SIZE;
  158. }
  159. }
  160. else
  161. paddr += 16;
  162. }
  163. break;
  164. default:
  165. case FLUSH_SCOPE_PAGE:
  166. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  167. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  168. {
  169. if (!(paddr = virt_to_phys_040(addr)))
  170. continue;
  171. switch (cache)
  172. {
  173. case FLUSH_CACHE_DATA:
  174. __asm__ __volatile__ ("nop\n\t"
  175. ".chip 68040\n\t"
  176. "cpushp %%dc,(%0)\n\t"
  177. ".chip 68k"
  178. : : "a" (paddr));
  179. break;
  180. case FLUSH_CACHE_INSN:
  181. __asm__ __volatile__ ("nop\n\t"
  182. ".chip 68040\n\t"
  183. "cpushp %%ic,(%0)\n\t"
  184. ".chip 68k"
  185. : : "a" (paddr));
  186. break;
  187. default:
  188. case FLUSH_CACHE_BOTH:
  189. __asm__ __volatile__ ("nop\n\t"
  190. ".chip 68040\n\t"
  191. "cpushp %%bc,(%0)\n\t"
  192. ".chip 68k"
  193. : : "a" (paddr));
  194. break;
  195. }
  196. }
  197. break;
  198. }
  199. return 0;
  200. }
  201. #define virt_to_phys_060(vaddr) \
  202. ({ \
  203. unsigned long paddr; \
  204. __asm__ __volatile__ (".chip 68060\n\t" \
  205. "plpar (%0)\n\t" \
  206. ".chip 68k" \
  207. : "=a" (paddr) \
  208. : "0" (vaddr)); \
  209. (paddr); /* XXX */ \
  210. })
  211. static inline int
  212. cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
  213. {
  214. unsigned long paddr, i;
  215. /*
  216. * 68060 manual says:
  217. * cpush %dc : flush DC, remains valid (with our %cacr setup)
  218. * cpush %ic : invalidate IC
  219. * cpush %bc : flush DC + invalidate IC
  220. */
  221. switch (scope)
  222. {
  223. case FLUSH_SCOPE_ALL:
  224. switch (cache)
  225. {
  226. case FLUSH_CACHE_DATA:
  227. __asm__ __volatile__ (".chip 68060\n\t"
  228. "cpusha %dc\n\t"
  229. ".chip 68k");
  230. break;
  231. case FLUSH_CACHE_INSN:
  232. __asm__ __volatile__ (".chip 68060\n\t"
  233. "cpusha %ic\n\t"
  234. ".chip 68k");
  235. break;
  236. default:
  237. case FLUSH_CACHE_BOTH:
  238. __asm__ __volatile__ (".chip 68060\n\t"
  239. "cpusha %bc\n\t"
  240. ".chip 68k");
  241. break;
  242. }
  243. break;
  244. case FLUSH_SCOPE_LINE:
  245. /* Find the physical address of the first mapped page in the
  246. address range. */
  247. len += addr & 15;
  248. addr &= -16;
  249. if (!(paddr = virt_to_phys_060(addr))) {
  250. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  251. if (len <= tmp)
  252. return 0;
  253. addr += tmp;
  254. len -= tmp;
  255. tmp = PAGE_SIZE;
  256. for (;;)
  257. {
  258. if ((paddr = virt_to_phys_060(addr)))
  259. break;
  260. if (len <= tmp)
  261. return 0;
  262. addr += tmp;
  263. len -= tmp;
  264. }
  265. }
  266. len = (len + 15) >> 4;
  267. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  268. while (len--)
  269. {
  270. switch (cache)
  271. {
  272. case FLUSH_CACHE_DATA:
  273. __asm__ __volatile__ (".chip 68060\n\t"
  274. "cpushl %%dc,(%0)\n\t"
  275. ".chip 68k"
  276. : : "a" (paddr));
  277. break;
  278. case FLUSH_CACHE_INSN:
  279. __asm__ __volatile__ (".chip 68060\n\t"
  280. "cpushl %%ic,(%0)\n\t"
  281. ".chip 68k"
  282. : : "a" (paddr));
  283. break;
  284. default:
  285. case FLUSH_CACHE_BOTH:
  286. __asm__ __volatile__ (".chip 68060\n\t"
  287. "cpushl %%bc,(%0)\n\t"
  288. ".chip 68k"
  289. : : "a" (paddr));
  290. break;
  291. }
  292. if (!--i && len)
  293. {
  294. /*
  295. * We just want to jump to the first cache line
  296. * in the next page.
  297. */
  298. addr += PAGE_SIZE;
  299. addr &= PAGE_MASK;
  300. i = PAGE_SIZE / 16;
  301. /* Recompute physical address when crossing a page
  302. boundary. */
  303. for (;;)
  304. {
  305. if ((paddr = virt_to_phys_060(addr)))
  306. break;
  307. if (len <= i)
  308. return 0;
  309. len -= i;
  310. addr += PAGE_SIZE;
  311. }
  312. }
  313. else
  314. paddr += 16;
  315. }
  316. break;
  317. default:
  318. case FLUSH_SCOPE_PAGE:
  319. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  320. addr &= PAGE_MASK; /* Workaround for bug in some
  321. revisions of the 68060 */
  322. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  323. {
  324. if (!(paddr = virt_to_phys_060(addr)))
  325. continue;
  326. switch (cache)
  327. {
  328. case FLUSH_CACHE_DATA:
  329. __asm__ __volatile__ (".chip 68060\n\t"
  330. "cpushp %%dc,(%0)\n\t"
  331. ".chip 68k"
  332. : : "a" (paddr));
  333. break;
  334. case FLUSH_CACHE_INSN:
  335. __asm__ __volatile__ (".chip 68060\n\t"
  336. "cpushp %%ic,(%0)\n\t"
  337. ".chip 68k"
  338. : : "a" (paddr));
  339. break;
  340. default:
  341. case FLUSH_CACHE_BOTH:
  342. __asm__ __volatile__ (".chip 68060\n\t"
  343. "cpushp %%bc,(%0)\n\t"
  344. ".chip 68k"
  345. : : "a" (paddr));
  346. break;
  347. }
  348. }
  349. break;
  350. }
  351. return 0;
  352. }
  353. /* sys_cacheflush -- flush (part of) the processor cache. */
  354. asmlinkage int
  355. sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
  356. {
  357. struct vm_area_struct *vma;
  358. int ret = -EINVAL;
  359. if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
  360. cache & ~FLUSH_CACHE_BOTH)
  361. goto out;
  362. if (scope == FLUSH_SCOPE_ALL) {
  363. /* Only the superuser may explicitly flush the whole cache. */
  364. ret = -EPERM;
  365. if (!capable(CAP_SYS_ADMIN))
  366. goto out;
  367. } else {
  368. /*
  369. * Verify that the specified address region actually belongs
  370. * to this process.
  371. */
  372. vma = find_vma (current->mm, addr);
  373. ret = -EINVAL;
  374. /* Check for overflow. */
  375. if (addr + len < addr)
  376. goto out;
  377. if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
  378. goto out;
  379. }
  380. if (CPU_IS_020_OR_030) {
  381. if (scope == FLUSH_SCOPE_LINE && len < 256) {
  382. unsigned long cacr;
  383. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  384. if (cache & FLUSH_CACHE_INSN)
  385. cacr |= 4;
  386. if (cache & FLUSH_CACHE_DATA)
  387. cacr |= 0x400;
  388. len >>= 2;
  389. while (len--) {
  390. __asm__ __volatile__ ("movec %1, %%caar\n\t"
  391. "movec %0, %%cacr"
  392. : /* no outputs */
  393. : "r" (cacr), "r" (addr));
  394. addr += 4;
  395. }
  396. } else {
  397. /* Flush the whole cache, even if page granularity requested. */
  398. unsigned long cacr;
  399. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  400. if (cache & FLUSH_CACHE_INSN)
  401. cacr |= 8;
  402. if (cache & FLUSH_CACHE_DATA)
  403. cacr |= 0x800;
  404. __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
  405. }
  406. ret = 0;
  407. goto out;
  408. } else {
  409. /*
  410. * 040 or 060: don't blindly trust 'scope', someone could
  411. * try to flush a few megs of memory.
  412. */
  413. if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
  414. scope=FLUSH_SCOPE_PAGE;
  415. if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
  416. scope=FLUSH_SCOPE_ALL;
  417. if (CPU_IS_040) {
  418. ret = cache_flush_040 (addr, scope, cache, len);
  419. } else if (CPU_IS_060) {
  420. ret = cache_flush_060 (addr, scope, cache, len);
  421. }
  422. }
  423. out:
  424. return ret;
  425. }
  426. asmlinkage int sys_getpagesize(void)
  427. {
  428. return PAGE_SIZE;
  429. }
  430. /*
  431. * Do a system call from kernel instead of calling sys_execve so we
  432. * end up with proper pt_regs.
  433. */
  434. int kernel_execve(const char *filename,
  435. const char *const argv[],
  436. const char *const envp[])
  437. {
  438. register long __res asm ("%d0") = __NR_execve;
  439. register long __a asm ("%d1") = (long)(filename);
  440. register long __b asm ("%d2") = (long)(argv);
  441. register long __c asm ("%d3") = (long)(envp);
  442. asm volatile ("trap #0" : "+d" (__res)
  443. : "d" (__a), "d" (__b), "d" (__c));
  444. return __res;
  445. }
  446. asmlinkage unsigned long sys_get_thread_area(void)
  447. {
  448. return current_thread_info()->tp_value;
  449. }
  450. asmlinkage int sys_set_thread_area(unsigned long tp)
  451. {
  452. current_thread_info()->tp_value = tp;
  453. return 0;
  454. }
  455. /* This syscall gets its arguments in A0 (mem), D2 (oldval) and
  456. D1 (newval). */
  457. asmlinkage int
  458. sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
  459. unsigned long __user * mem)
  460. {
  461. /* This was borrowed from ARM's implementation. */
  462. for (;;) {
  463. struct mm_struct *mm = current->mm;
  464. pgd_t *pgd;
  465. pmd_t *pmd;
  466. pte_t *pte;
  467. spinlock_t *ptl;
  468. unsigned long mem_value;
  469. down_read(&mm->mmap_sem);
  470. pgd = pgd_offset(mm, (unsigned long)mem);
  471. if (!pgd_present(*pgd))
  472. goto bad_access;
  473. pmd = pmd_offset(pgd, (unsigned long)mem);
  474. if (!pmd_present(*pmd))
  475. goto bad_access;
  476. pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
  477. if (!pte_present(*pte) || !pte_dirty(*pte)
  478. || !pte_write(*pte)) {
  479. pte_unmap_unlock(pte, ptl);
  480. goto bad_access;
  481. }
  482. mem_value = *mem;
  483. if (mem_value == oldval)
  484. *mem = newval;
  485. pte_unmap_unlock(pte, ptl);
  486. up_read(&mm->mmap_sem);
  487. return mem_value;
  488. bad_access:
  489. up_read(&mm->mmap_sem);
  490. /* This is not necessarily a bad access, we can get here if
  491. a memory we're trying to write to should be copied-on-write.
  492. Make the kernel do the necessary page stuff, then re-iterate.
  493. Simulate a write access fault to do that. */
  494. {
  495. /* The first argument of the function corresponds to
  496. D1, which is the first field of struct pt_regs. */
  497. struct pt_regs *fp = (struct pt_regs *)&newval;
  498. /* '3' is an RMW flag. */
  499. if (do_page_fault(fp, (unsigned long)mem, 3))
  500. /* If the do_page_fault() failed, we don't
  501. have anything meaningful to return.
  502. There should be a SIGSEGV pending for
  503. the process. */
  504. return 0xdeadbeef;
  505. }
  506. }
  507. }
  508. asmlinkage int sys_atomic_barrier(void)
  509. {
  510. /* no code needed for uniprocs */
  511. return 0;
  512. }