sys_m68k.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678
  1. /*
  2. * linux/arch/m68k/kernel/sys_m68k.c
  3. *
  4. * This file contains various random system calls that
  5. * have a non-standard calling sequence on the Linux/m68k
  6. * platform.
  7. */
  8. #include <linux/capability.h>
  9. #include <linux/errno.h>
  10. #include <linux/sched.h>
  11. #include <linux/mm.h>
  12. #include <linux/fs.h>
  13. #include <linux/smp.h>
  14. #include <linux/smp_lock.h>
  15. #include <linux/sem.h>
  16. #include <linux/msg.h>
  17. #include <linux/shm.h>
  18. #include <linux/stat.h>
  19. #include <linux/syscalls.h>
  20. #include <linux/mman.h>
  21. #include <linux/file.h>
  22. #include <linux/ipc.h>
  23. #include <asm/setup.h>
  24. #include <asm/uaccess.h>
  25. #include <asm/cachectl.h>
  26. #include <asm/traps.h>
  27. #include <asm/page.h>
  28. #include <asm/unistd.h>
  29. #include <linux/elf.h>
  30. #include <asm/tlb.h>
  31. asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
  32. unsigned long error_code);
  33. asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
  34. unsigned long prot, unsigned long flags,
  35. unsigned long fd, unsigned long pgoff)
  36. {
  37. /*
  38. * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
  39. * so we need to shift the argument down by 1; m68k mmap64(3)
  40. * (in libc) expects the last argument of mmap2 in 4Kb units.
  41. */
  42. return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
  43. }
  44. /*
  45. * Perform the select(nd, in, out, ex, tv) and mmap() system
  46. * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
  47. * handle more than 4 system call parameters, so these system calls
  48. * used a memory block for parameter passing..
  49. */
  50. struct mmap_arg_struct {
  51. unsigned long addr;
  52. unsigned long len;
  53. unsigned long prot;
  54. unsigned long flags;
  55. unsigned long fd;
  56. unsigned long offset;
  57. };
  58. asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
  59. {
  60. struct mmap_arg_struct a;
  61. int error = -EFAULT;
  62. if (copy_from_user(&a, arg, sizeof(a)))
  63. goto out;
  64. error = -EINVAL;
  65. if (a.offset & ~PAGE_MASK)
  66. goto out;
  67. error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
  68. a.offset >> PAGE_SHIFT);
  69. out:
  70. return error;
  71. }
  72. struct sel_arg_struct {
  73. unsigned long n;
  74. fd_set __user *inp, *outp, *exp;
  75. struct timeval __user *tvp;
  76. };
  77. asmlinkage int old_select(struct sel_arg_struct __user *arg)
  78. {
  79. struct sel_arg_struct a;
  80. if (copy_from_user(&a, arg, sizeof(a)))
  81. return -EFAULT;
  82. /* sys_select() does the appropriate kernel locking */
  83. return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
  84. }
  85. /*
  86. * sys_ipc() is the de-multiplexer for the SysV IPC calls..
  87. *
  88. * This is really horribly ugly.
  89. */
  90. asmlinkage int sys_ipc (uint call, int first, int second,
  91. int third, void __user *ptr, long fifth)
  92. {
  93. int version, ret;
  94. version = call >> 16; /* hack for backward compatibility */
  95. call &= 0xffff;
  96. if (call <= SEMCTL)
  97. switch (call) {
  98. case SEMOP:
  99. return sys_semop (first, ptr, second);
  100. case SEMGET:
  101. return sys_semget (first, second, third);
  102. case SEMCTL: {
  103. union semun fourth;
  104. if (!ptr)
  105. return -EINVAL;
  106. if (get_user(fourth.__pad, (void __user *__user *) ptr))
  107. return -EFAULT;
  108. return sys_semctl (first, second, third, fourth);
  109. }
  110. default:
  111. return -ENOSYS;
  112. }
  113. if (call <= MSGCTL)
  114. switch (call) {
  115. case MSGSND:
  116. return sys_msgsnd (first, ptr, second, third);
  117. case MSGRCV:
  118. switch (version) {
  119. case 0: {
  120. struct ipc_kludge tmp;
  121. if (!ptr)
  122. return -EINVAL;
  123. if (copy_from_user (&tmp, ptr, sizeof (tmp)))
  124. return -EFAULT;
  125. return sys_msgrcv (first, tmp.msgp, second,
  126. tmp.msgtyp, third);
  127. }
  128. default:
  129. return sys_msgrcv (first, ptr,
  130. second, fifth, third);
  131. }
  132. case MSGGET:
  133. return sys_msgget ((key_t) first, second);
  134. case MSGCTL:
  135. return sys_msgctl (first, second, ptr);
  136. default:
  137. return -ENOSYS;
  138. }
  139. if (call <= SHMCTL)
  140. switch (call) {
  141. case SHMAT:
  142. switch (version) {
  143. default: {
  144. ulong raddr;
  145. ret = do_shmat (first, ptr, second, &raddr);
  146. if (ret)
  147. return ret;
  148. return put_user (raddr, (ulong __user *) third);
  149. }
  150. }
  151. case SHMDT:
  152. return sys_shmdt (ptr);
  153. case SHMGET:
  154. return sys_shmget (first, second, third);
  155. case SHMCTL:
  156. return sys_shmctl (first, second, ptr);
  157. default:
  158. return -ENOSYS;
  159. }
  160. return -EINVAL;
  161. }
  162. /* Convert virtual (user) address VADDR to physical address PADDR */
  163. #define virt_to_phys_040(vaddr) \
  164. ({ \
  165. unsigned long _mmusr, _paddr; \
  166. \
  167. __asm__ __volatile__ (".chip 68040\n\t" \
  168. "ptestr (%1)\n\t" \
  169. "movec %%mmusr,%0\n\t" \
  170. ".chip 68k" \
  171. : "=r" (_mmusr) \
  172. : "a" (vaddr)); \
  173. _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
  174. _paddr; \
  175. })
  176. static inline int
  177. cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
  178. {
  179. unsigned long paddr, i;
  180. switch (scope)
  181. {
  182. case FLUSH_SCOPE_ALL:
  183. switch (cache)
  184. {
  185. case FLUSH_CACHE_DATA:
  186. /* This nop is needed for some broken versions of the 68040. */
  187. __asm__ __volatile__ ("nop\n\t"
  188. ".chip 68040\n\t"
  189. "cpusha %dc\n\t"
  190. ".chip 68k");
  191. break;
  192. case FLUSH_CACHE_INSN:
  193. __asm__ __volatile__ ("nop\n\t"
  194. ".chip 68040\n\t"
  195. "cpusha %ic\n\t"
  196. ".chip 68k");
  197. break;
  198. default:
  199. case FLUSH_CACHE_BOTH:
  200. __asm__ __volatile__ ("nop\n\t"
  201. ".chip 68040\n\t"
  202. "cpusha %bc\n\t"
  203. ".chip 68k");
  204. break;
  205. }
  206. break;
  207. case FLUSH_SCOPE_LINE:
  208. /* Find the physical address of the first mapped page in the
  209. address range. */
  210. if ((paddr = virt_to_phys_040(addr))) {
  211. paddr += addr & ~(PAGE_MASK | 15);
  212. len = (len + (addr & 15) + 15) >> 4;
  213. } else {
  214. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  215. if (len <= tmp)
  216. return 0;
  217. addr += tmp;
  218. len -= tmp;
  219. tmp = PAGE_SIZE;
  220. for (;;)
  221. {
  222. if ((paddr = virt_to_phys_040(addr)))
  223. break;
  224. if (len <= tmp)
  225. return 0;
  226. addr += tmp;
  227. len -= tmp;
  228. }
  229. len = (len + 15) >> 4;
  230. }
  231. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  232. while (len--)
  233. {
  234. switch (cache)
  235. {
  236. case FLUSH_CACHE_DATA:
  237. __asm__ __volatile__ ("nop\n\t"
  238. ".chip 68040\n\t"
  239. "cpushl %%dc,(%0)\n\t"
  240. ".chip 68k"
  241. : : "a" (paddr));
  242. break;
  243. case FLUSH_CACHE_INSN:
  244. __asm__ __volatile__ ("nop\n\t"
  245. ".chip 68040\n\t"
  246. "cpushl %%ic,(%0)\n\t"
  247. ".chip 68k"
  248. : : "a" (paddr));
  249. break;
  250. default:
  251. case FLUSH_CACHE_BOTH:
  252. __asm__ __volatile__ ("nop\n\t"
  253. ".chip 68040\n\t"
  254. "cpushl %%bc,(%0)\n\t"
  255. ".chip 68k"
  256. : : "a" (paddr));
  257. break;
  258. }
  259. if (!--i && len)
  260. {
  261. /*
  262. * No need to page align here since it is done by
  263. * virt_to_phys_040().
  264. */
  265. addr += PAGE_SIZE;
  266. i = PAGE_SIZE / 16;
  267. /* Recompute physical address when crossing a page
  268. boundary. */
  269. for (;;)
  270. {
  271. if ((paddr = virt_to_phys_040(addr)))
  272. break;
  273. if (len <= i)
  274. return 0;
  275. len -= i;
  276. addr += PAGE_SIZE;
  277. }
  278. }
  279. else
  280. paddr += 16;
  281. }
  282. break;
  283. default:
  284. case FLUSH_SCOPE_PAGE:
  285. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  286. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  287. {
  288. if (!(paddr = virt_to_phys_040(addr)))
  289. continue;
  290. switch (cache)
  291. {
  292. case FLUSH_CACHE_DATA:
  293. __asm__ __volatile__ ("nop\n\t"
  294. ".chip 68040\n\t"
  295. "cpushp %%dc,(%0)\n\t"
  296. ".chip 68k"
  297. : : "a" (paddr));
  298. break;
  299. case FLUSH_CACHE_INSN:
  300. __asm__ __volatile__ ("nop\n\t"
  301. ".chip 68040\n\t"
  302. "cpushp %%ic,(%0)\n\t"
  303. ".chip 68k"
  304. : : "a" (paddr));
  305. break;
  306. default:
  307. case FLUSH_CACHE_BOTH:
  308. __asm__ __volatile__ ("nop\n\t"
  309. ".chip 68040\n\t"
  310. "cpushp %%bc,(%0)\n\t"
  311. ".chip 68k"
  312. : : "a" (paddr));
  313. break;
  314. }
  315. }
  316. break;
  317. }
  318. return 0;
  319. }
  320. #define virt_to_phys_060(vaddr) \
  321. ({ \
  322. unsigned long paddr; \
  323. __asm__ __volatile__ (".chip 68060\n\t" \
  324. "plpar (%0)\n\t" \
  325. ".chip 68k" \
  326. : "=a" (paddr) \
  327. : "0" (vaddr)); \
  328. (paddr); /* XXX */ \
  329. })
  330. static inline int
  331. cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
  332. {
  333. unsigned long paddr, i;
  334. /*
  335. * 68060 manual says:
  336. * cpush %dc : flush DC, remains valid (with our %cacr setup)
  337. * cpush %ic : invalidate IC
  338. * cpush %bc : flush DC + invalidate IC
  339. */
  340. switch (scope)
  341. {
  342. case FLUSH_SCOPE_ALL:
  343. switch (cache)
  344. {
  345. case FLUSH_CACHE_DATA:
  346. __asm__ __volatile__ (".chip 68060\n\t"
  347. "cpusha %dc\n\t"
  348. ".chip 68k");
  349. break;
  350. case FLUSH_CACHE_INSN:
  351. __asm__ __volatile__ (".chip 68060\n\t"
  352. "cpusha %ic\n\t"
  353. ".chip 68k");
  354. break;
  355. default:
  356. case FLUSH_CACHE_BOTH:
  357. __asm__ __volatile__ (".chip 68060\n\t"
  358. "cpusha %bc\n\t"
  359. ".chip 68k");
  360. break;
  361. }
  362. break;
  363. case FLUSH_SCOPE_LINE:
  364. /* Find the physical address of the first mapped page in the
  365. address range. */
  366. len += addr & 15;
  367. addr &= -16;
  368. if (!(paddr = virt_to_phys_060(addr))) {
  369. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  370. if (len <= tmp)
  371. return 0;
  372. addr += tmp;
  373. len -= tmp;
  374. tmp = PAGE_SIZE;
  375. for (;;)
  376. {
  377. if ((paddr = virt_to_phys_060(addr)))
  378. break;
  379. if (len <= tmp)
  380. return 0;
  381. addr += tmp;
  382. len -= tmp;
  383. }
  384. }
  385. len = (len + 15) >> 4;
  386. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  387. while (len--)
  388. {
  389. switch (cache)
  390. {
  391. case FLUSH_CACHE_DATA:
  392. __asm__ __volatile__ (".chip 68060\n\t"
  393. "cpushl %%dc,(%0)\n\t"
  394. ".chip 68k"
  395. : : "a" (paddr));
  396. break;
  397. case FLUSH_CACHE_INSN:
  398. __asm__ __volatile__ (".chip 68060\n\t"
  399. "cpushl %%ic,(%0)\n\t"
  400. ".chip 68k"
  401. : : "a" (paddr));
  402. break;
  403. default:
  404. case FLUSH_CACHE_BOTH:
  405. __asm__ __volatile__ (".chip 68060\n\t"
  406. "cpushl %%bc,(%0)\n\t"
  407. ".chip 68k"
  408. : : "a" (paddr));
  409. break;
  410. }
  411. if (!--i && len)
  412. {
  413. /*
  414. * We just want to jump to the first cache line
  415. * in the next page.
  416. */
  417. addr += PAGE_SIZE;
  418. addr &= PAGE_MASK;
  419. i = PAGE_SIZE / 16;
  420. /* Recompute physical address when crossing a page
  421. boundary. */
  422. for (;;)
  423. {
  424. if ((paddr = virt_to_phys_060(addr)))
  425. break;
  426. if (len <= i)
  427. return 0;
  428. len -= i;
  429. addr += PAGE_SIZE;
  430. }
  431. }
  432. else
  433. paddr += 16;
  434. }
  435. break;
  436. default:
  437. case FLUSH_SCOPE_PAGE:
  438. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  439. addr &= PAGE_MASK; /* Workaround for bug in some
  440. revisions of the 68060 */
  441. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  442. {
  443. if (!(paddr = virt_to_phys_060(addr)))
  444. continue;
  445. switch (cache)
  446. {
  447. case FLUSH_CACHE_DATA:
  448. __asm__ __volatile__ (".chip 68060\n\t"
  449. "cpushp %%dc,(%0)\n\t"
  450. ".chip 68k"
  451. : : "a" (paddr));
  452. break;
  453. case FLUSH_CACHE_INSN:
  454. __asm__ __volatile__ (".chip 68060\n\t"
  455. "cpushp %%ic,(%0)\n\t"
  456. ".chip 68k"
  457. : : "a" (paddr));
  458. break;
  459. default:
  460. case FLUSH_CACHE_BOTH:
  461. __asm__ __volatile__ (".chip 68060\n\t"
  462. "cpushp %%bc,(%0)\n\t"
  463. ".chip 68k"
  464. : : "a" (paddr));
  465. break;
  466. }
  467. }
  468. break;
  469. }
  470. return 0;
  471. }
  472. /* sys_cacheflush -- flush (part of) the processor cache. */
  473. asmlinkage int
  474. sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
  475. {
  476. struct vm_area_struct *vma;
  477. int ret = -EINVAL;
  478. lock_kernel();
  479. if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
  480. cache & ~FLUSH_CACHE_BOTH)
  481. goto out;
  482. if (scope == FLUSH_SCOPE_ALL) {
  483. /* Only the superuser may explicitly flush the whole cache. */
  484. ret = -EPERM;
  485. if (!capable(CAP_SYS_ADMIN))
  486. goto out;
  487. } else {
  488. /*
  489. * Verify that the specified address region actually belongs
  490. * to this process.
  491. */
  492. vma = find_vma (current->mm, addr);
  493. ret = -EINVAL;
  494. /* Check for overflow. */
  495. if (addr + len < addr)
  496. goto out;
  497. if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
  498. goto out;
  499. }
  500. if (CPU_IS_020_OR_030) {
  501. if (scope == FLUSH_SCOPE_LINE && len < 256) {
  502. unsigned long cacr;
  503. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  504. if (cache & FLUSH_CACHE_INSN)
  505. cacr |= 4;
  506. if (cache & FLUSH_CACHE_DATA)
  507. cacr |= 0x400;
  508. len >>= 2;
  509. while (len--) {
  510. __asm__ __volatile__ ("movec %1, %%caar\n\t"
  511. "movec %0, %%cacr"
  512. : /* no outputs */
  513. : "r" (cacr), "r" (addr));
  514. addr += 4;
  515. }
  516. } else {
  517. /* Flush the whole cache, even if page granularity requested. */
  518. unsigned long cacr;
  519. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  520. if (cache & FLUSH_CACHE_INSN)
  521. cacr |= 8;
  522. if (cache & FLUSH_CACHE_DATA)
  523. cacr |= 0x800;
  524. __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
  525. }
  526. ret = 0;
  527. goto out;
  528. } else {
  529. /*
  530. * 040 or 060: don't blindly trust 'scope', someone could
  531. * try to flush a few megs of memory.
  532. */
  533. if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
  534. scope=FLUSH_SCOPE_PAGE;
  535. if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
  536. scope=FLUSH_SCOPE_ALL;
  537. if (CPU_IS_040) {
  538. ret = cache_flush_040 (addr, scope, cache, len);
  539. } else if (CPU_IS_060) {
  540. ret = cache_flush_060 (addr, scope, cache, len);
  541. }
  542. }
  543. out:
  544. unlock_kernel();
  545. return ret;
  546. }
  547. asmlinkage int sys_getpagesize(void)
  548. {
  549. return PAGE_SIZE;
  550. }
  551. /*
  552. * Do a system call from kernel instead of calling sys_execve so we
  553. * end up with proper pt_regs.
  554. */
  555. int kernel_execve(const char *filename, char *const argv[], char *const envp[])
  556. {
  557. register long __res asm ("%d0") = __NR_execve;
  558. register long __a asm ("%d1") = (long)(filename);
  559. register long __b asm ("%d2") = (long)(argv);
  560. register long __c asm ("%d3") = (long)(envp);
  561. asm volatile ("trap #0" : "+d" (__res)
  562. : "d" (__a), "d" (__b), "d" (__c));
  563. return __res;
  564. }
  565. asmlinkage unsigned long sys_get_thread_area(void)
  566. {
  567. return current_thread_info()->tp_value;
  568. }
  569. asmlinkage int sys_set_thread_area(unsigned long tp)
  570. {
  571. current_thread_info()->tp_value = tp;
  572. return 0;
  573. }
  574. /* This syscall gets its arguments in A0 (mem), D2 (oldval) and
  575. D1 (newval). */
  576. asmlinkage int
  577. sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
  578. unsigned long __user * mem)
  579. {
  580. /* This was borrowed from ARM's implementation. */
  581. for (;;) {
  582. struct mm_struct *mm = current->mm;
  583. pgd_t *pgd;
  584. pmd_t *pmd;
  585. pte_t *pte;
  586. spinlock_t *ptl;
  587. unsigned long mem_value;
  588. down_read(&mm->mmap_sem);
  589. pgd = pgd_offset(mm, (unsigned long)mem);
  590. if (!pgd_present(*pgd))
  591. goto bad_access;
  592. pmd = pmd_offset(pgd, (unsigned long)mem);
  593. if (!pmd_present(*pmd))
  594. goto bad_access;
  595. pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
  596. if (!pte_present(*pte) || !pte_dirty(*pte)
  597. || !pte_write(*pte)) {
  598. pte_unmap_unlock(pte, ptl);
  599. goto bad_access;
  600. }
  601. mem_value = *mem;
  602. if (mem_value == oldval)
  603. *mem = newval;
  604. pte_unmap_unlock(pte, ptl);
  605. up_read(&mm->mmap_sem);
  606. return mem_value;
  607. bad_access:
  608. up_read(&mm->mmap_sem);
  609. /* This is not necessarily a bad access, we can get here if
  610. a memory we're trying to write to should be copied-on-write.
  611. Make the kernel do the necessary page stuff, then re-iterate.
  612. Simulate a write access fault to do that. */
  613. {
  614. /* The first argument of the function corresponds to
  615. D1, which is the first field of struct pt_regs. */
  616. struct pt_regs *fp = (struct pt_regs *)&newval;
  617. /* '3' is an RMW flag. */
  618. if (do_page_fault(fp, (unsigned long)mem, 3))
  619. /* If the do_page_fault() failed, we don't
  620. have anything meaningful to return.
  621. There should be a SIGSEGV pending for
  622. the process. */
  623. return 0xdeadbeef;
  624. }
  625. }
  626. }
  627. asmlinkage int sys_atomic_barrier(void)
  628. {
  629. /* no code needed for uniprocs */
  630. return 0;
  631. }