sys_m68k.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681
  1. /*
  2. * linux/arch/m68k/kernel/sys_m68k.c
  3. *
  4. * This file contains various random system calls that
  5. * have a non-standard calling sequence on the Linux/m68k
  6. * platform.
  7. */
  8. #include <linux/capability.h>
  9. #include <linux/errno.h>
  10. #include <linux/sched.h>
  11. #include <linux/mm.h>
  12. #include <linux/smp.h>
  13. #include <linux/smp_lock.h>
  14. #include <linux/sem.h>
  15. #include <linux/msg.h>
  16. #include <linux/shm.h>
  17. #include <linux/stat.h>
  18. #include <linux/syscalls.h>
  19. #include <linux/mman.h>
  20. #include <linux/file.h>
  21. #include <linux/utsname.h>
  22. #include <asm/setup.h>
  23. #include <asm/uaccess.h>
  24. #include <asm/cachectl.h>
  25. #include <asm/traps.h>
  26. #include <asm/ipc.h>
  27. #include <asm/page.h>
  28. #include <asm/unistd.h>
  29. /*
  30. * sys_pipe() is the normal C calling standard for creating
  31. * a pipe. It's not the way unix traditionally does this, though.
  32. */
  33. asmlinkage int sys_pipe(unsigned long __user * fildes)
  34. {
  35. int fd[2];
  36. int error;
  37. error = do_pipe(fd);
  38. if (!error) {
  39. if (copy_to_user(fildes, fd, 2*sizeof(int)))
  40. error = -EFAULT;
  41. }
  42. return error;
  43. }
  44. /* common code for old and new mmaps */
  45. static inline long do_mmap2(
  46. unsigned long addr, unsigned long len,
  47. unsigned long prot, unsigned long flags,
  48. unsigned long fd, unsigned long pgoff)
  49. {
  50. int error = -EBADF;
  51. struct file * file = NULL;
  52. flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  53. if (!(flags & MAP_ANONYMOUS)) {
  54. file = fget(fd);
  55. if (!file)
  56. goto out;
  57. }
  58. down_write(&current->mm->mmap_sem);
  59. error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
  60. up_write(&current->mm->mmap_sem);
  61. if (file)
  62. fput(file);
  63. out:
  64. return error;
  65. }
  66. asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
  67. unsigned long prot, unsigned long flags,
  68. unsigned long fd, unsigned long pgoff)
  69. {
  70. return do_mmap2(addr, len, prot, flags, fd, pgoff);
  71. }
  72. /*
  73. * Perform the select(nd, in, out, ex, tv) and mmap() system
  74. * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
  75. * handle more than 4 system call parameters, so these system calls
  76. * used a memory block for parameter passing..
  77. */
  78. struct mmap_arg_struct {
  79. unsigned long addr;
  80. unsigned long len;
  81. unsigned long prot;
  82. unsigned long flags;
  83. unsigned long fd;
  84. unsigned long offset;
  85. };
  86. asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
  87. {
  88. struct mmap_arg_struct a;
  89. int error = -EFAULT;
  90. if (copy_from_user(&a, arg, sizeof(a)))
  91. goto out;
  92. error = -EINVAL;
  93. if (a.offset & ~PAGE_MASK)
  94. goto out;
  95. a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  96. error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
  97. out:
  98. return error;
  99. }
  100. #if 0
  101. struct mmap_arg_struct64 {
  102. __u32 addr;
  103. __u32 len;
  104. __u32 prot;
  105. __u32 flags;
  106. __u64 offset; /* 64 bits */
  107. __u32 fd;
  108. };
  109. asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
  110. {
  111. int error = -EFAULT;
  112. struct file * file = NULL;
  113. struct mmap_arg_struct64 a;
  114. unsigned long pgoff;
  115. if (copy_from_user(&a, arg, sizeof(a)))
  116. return -EFAULT;
  117. if ((long)a.offset & ~PAGE_MASK)
  118. return -EINVAL;
  119. pgoff = a.offset >> PAGE_SHIFT;
  120. if ((a.offset >> PAGE_SHIFT) != pgoff)
  121. return -EINVAL;
  122. if (!(a.flags & MAP_ANONYMOUS)) {
  123. error = -EBADF;
  124. file = fget(a.fd);
  125. if (!file)
  126. goto out;
  127. }
  128. a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  129. down_write(&current->mm->mmap_sem);
  130. error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
  131. up_write(&current->mm->mmap_sem);
  132. if (file)
  133. fput(file);
  134. out:
  135. return error;
  136. }
  137. #endif
  138. struct sel_arg_struct {
  139. unsigned long n;
  140. fd_set __user *inp, *outp, *exp;
  141. struct timeval __user *tvp;
  142. };
  143. asmlinkage int old_select(struct sel_arg_struct __user *arg)
  144. {
  145. struct sel_arg_struct a;
  146. if (copy_from_user(&a, arg, sizeof(a)))
  147. return -EFAULT;
  148. /* sys_select() does the appropriate kernel locking */
  149. return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
  150. }
  151. /*
  152. * sys_ipc() is the de-multiplexer for the SysV IPC calls..
  153. *
  154. * This is really horribly ugly.
  155. */
  156. asmlinkage int sys_ipc (uint call, int first, int second,
  157. int third, void __user *ptr, long fifth)
  158. {
  159. int version, ret;
  160. version = call >> 16; /* hack for backward compatibility */
  161. call &= 0xffff;
  162. if (call <= SEMCTL)
  163. switch (call) {
  164. case SEMOP:
  165. return sys_semop (first, ptr, second);
  166. case SEMGET:
  167. return sys_semget (first, second, third);
  168. case SEMCTL: {
  169. union semun fourth;
  170. if (!ptr)
  171. return -EINVAL;
  172. if (get_user(fourth.__pad, (void __user *__user *) ptr))
  173. return -EFAULT;
  174. return sys_semctl (first, second, third, fourth);
  175. }
  176. default:
  177. return -ENOSYS;
  178. }
  179. if (call <= MSGCTL)
  180. switch (call) {
  181. case MSGSND:
  182. return sys_msgsnd (first, ptr, second, third);
  183. case MSGRCV:
  184. switch (version) {
  185. case 0: {
  186. struct ipc_kludge tmp;
  187. if (!ptr)
  188. return -EINVAL;
  189. if (copy_from_user (&tmp, ptr, sizeof (tmp)))
  190. return -EFAULT;
  191. return sys_msgrcv (first, tmp.msgp, second,
  192. tmp.msgtyp, third);
  193. }
  194. default:
  195. return sys_msgrcv (first, ptr,
  196. second, fifth, third);
  197. }
  198. case MSGGET:
  199. return sys_msgget ((key_t) first, second);
  200. case MSGCTL:
  201. return sys_msgctl (first, second, ptr);
  202. default:
  203. return -ENOSYS;
  204. }
  205. if (call <= SHMCTL)
  206. switch (call) {
  207. case SHMAT:
  208. switch (version) {
  209. default: {
  210. ulong raddr;
  211. ret = do_shmat (first, ptr, second, &raddr);
  212. if (ret)
  213. return ret;
  214. return put_user (raddr, (ulong __user *) third);
  215. }
  216. }
  217. case SHMDT:
  218. return sys_shmdt (ptr);
  219. case SHMGET:
  220. return sys_shmget (first, second, third);
  221. case SHMCTL:
  222. return sys_shmctl (first, second, ptr);
  223. default:
  224. return -ENOSYS;
  225. }
  226. return -EINVAL;
  227. }
  228. /* Convert virtual (user) address VADDR to physical address PADDR */
  229. #define virt_to_phys_040(vaddr) \
  230. ({ \
  231. unsigned long _mmusr, _paddr; \
  232. \
  233. __asm__ __volatile__ (".chip 68040\n\t" \
  234. "ptestr (%1)\n\t" \
  235. "movec %%mmusr,%0\n\t" \
  236. ".chip 68k" \
  237. : "=r" (_mmusr) \
  238. : "a" (vaddr)); \
  239. _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
  240. _paddr; \
  241. })
  242. static inline int
  243. cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
  244. {
  245. unsigned long paddr, i;
  246. switch (scope)
  247. {
  248. case FLUSH_SCOPE_ALL:
  249. switch (cache)
  250. {
  251. case FLUSH_CACHE_DATA:
  252. /* This nop is needed for some broken versions of the 68040. */
  253. __asm__ __volatile__ ("nop\n\t"
  254. ".chip 68040\n\t"
  255. "cpusha %dc\n\t"
  256. ".chip 68k");
  257. break;
  258. case FLUSH_CACHE_INSN:
  259. __asm__ __volatile__ ("nop\n\t"
  260. ".chip 68040\n\t"
  261. "cpusha %ic\n\t"
  262. ".chip 68k");
  263. break;
  264. default:
  265. case FLUSH_CACHE_BOTH:
  266. __asm__ __volatile__ ("nop\n\t"
  267. ".chip 68040\n\t"
  268. "cpusha %bc\n\t"
  269. ".chip 68k");
  270. break;
  271. }
  272. break;
  273. case FLUSH_SCOPE_LINE:
  274. /* Find the physical address of the first mapped page in the
  275. address range. */
  276. if ((paddr = virt_to_phys_040(addr))) {
  277. paddr += addr & ~(PAGE_MASK | 15);
  278. len = (len + (addr & 15) + 15) >> 4;
  279. } else {
  280. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  281. if (len <= tmp)
  282. return 0;
  283. addr += tmp;
  284. len -= tmp;
  285. tmp = PAGE_SIZE;
  286. for (;;)
  287. {
  288. if ((paddr = virt_to_phys_040(addr)))
  289. break;
  290. if (len <= tmp)
  291. return 0;
  292. addr += tmp;
  293. len -= tmp;
  294. }
  295. len = (len + 15) >> 4;
  296. }
  297. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  298. while (len--)
  299. {
  300. switch (cache)
  301. {
  302. case FLUSH_CACHE_DATA:
  303. __asm__ __volatile__ ("nop\n\t"
  304. ".chip 68040\n\t"
  305. "cpushl %%dc,(%0)\n\t"
  306. ".chip 68k"
  307. : : "a" (paddr));
  308. break;
  309. case FLUSH_CACHE_INSN:
  310. __asm__ __volatile__ ("nop\n\t"
  311. ".chip 68040\n\t"
  312. "cpushl %%ic,(%0)\n\t"
  313. ".chip 68k"
  314. : : "a" (paddr));
  315. break;
  316. default:
  317. case FLUSH_CACHE_BOTH:
  318. __asm__ __volatile__ ("nop\n\t"
  319. ".chip 68040\n\t"
  320. "cpushl %%bc,(%0)\n\t"
  321. ".chip 68k"
  322. : : "a" (paddr));
  323. break;
  324. }
  325. if (!--i && len)
  326. {
  327. /*
  328. * No need to page align here since it is done by
  329. * virt_to_phys_040().
  330. */
  331. addr += PAGE_SIZE;
  332. i = PAGE_SIZE / 16;
  333. /* Recompute physical address when crossing a page
  334. boundary. */
  335. for (;;)
  336. {
  337. if ((paddr = virt_to_phys_040(addr)))
  338. break;
  339. if (len <= i)
  340. return 0;
  341. len -= i;
  342. addr += PAGE_SIZE;
  343. }
  344. }
  345. else
  346. paddr += 16;
  347. }
  348. break;
  349. default:
  350. case FLUSH_SCOPE_PAGE:
  351. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  352. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  353. {
  354. if (!(paddr = virt_to_phys_040(addr)))
  355. continue;
  356. switch (cache)
  357. {
  358. case FLUSH_CACHE_DATA:
  359. __asm__ __volatile__ ("nop\n\t"
  360. ".chip 68040\n\t"
  361. "cpushp %%dc,(%0)\n\t"
  362. ".chip 68k"
  363. : : "a" (paddr));
  364. break;
  365. case FLUSH_CACHE_INSN:
  366. __asm__ __volatile__ ("nop\n\t"
  367. ".chip 68040\n\t"
  368. "cpushp %%ic,(%0)\n\t"
  369. ".chip 68k"
  370. : : "a" (paddr));
  371. break;
  372. default:
  373. case FLUSH_CACHE_BOTH:
  374. __asm__ __volatile__ ("nop\n\t"
  375. ".chip 68040\n\t"
  376. "cpushp %%bc,(%0)\n\t"
  377. ".chip 68k"
  378. : : "a" (paddr));
  379. break;
  380. }
  381. }
  382. break;
  383. }
  384. return 0;
  385. }
  386. #define virt_to_phys_060(vaddr) \
  387. ({ \
  388. unsigned long paddr; \
  389. __asm__ __volatile__ (".chip 68060\n\t" \
  390. "plpar (%0)\n\t" \
  391. ".chip 68k" \
  392. : "=a" (paddr) \
  393. : "0" (vaddr)); \
  394. (paddr); /* XXX */ \
  395. })
  396. static inline int
  397. cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
  398. {
  399. unsigned long paddr, i;
  400. /*
  401. * 68060 manual says:
  402. * cpush %dc : flush DC, remains valid (with our %cacr setup)
  403. * cpush %ic : invalidate IC
  404. * cpush %bc : flush DC + invalidate IC
  405. */
  406. switch (scope)
  407. {
  408. case FLUSH_SCOPE_ALL:
  409. switch (cache)
  410. {
  411. case FLUSH_CACHE_DATA:
  412. __asm__ __volatile__ (".chip 68060\n\t"
  413. "cpusha %dc\n\t"
  414. ".chip 68k");
  415. break;
  416. case FLUSH_CACHE_INSN:
  417. __asm__ __volatile__ (".chip 68060\n\t"
  418. "cpusha %ic\n\t"
  419. ".chip 68k");
  420. break;
  421. default:
  422. case FLUSH_CACHE_BOTH:
  423. __asm__ __volatile__ (".chip 68060\n\t"
  424. "cpusha %bc\n\t"
  425. ".chip 68k");
  426. break;
  427. }
  428. break;
  429. case FLUSH_SCOPE_LINE:
  430. /* Find the physical address of the first mapped page in the
  431. address range. */
  432. len += addr & 15;
  433. addr &= -16;
  434. if (!(paddr = virt_to_phys_060(addr))) {
  435. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  436. if (len <= tmp)
  437. return 0;
  438. addr += tmp;
  439. len -= tmp;
  440. tmp = PAGE_SIZE;
  441. for (;;)
  442. {
  443. if ((paddr = virt_to_phys_060(addr)))
  444. break;
  445. if (len <= tmp)
  446. return 0;
  447. addr += tmp;
  448. len -= tmp;
  449. }
  450. }
  451. len = (len + 15) >> 4;
  452. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  453. while (len--)
  454. {
  455. switch (cache)
  456. {
  457. case FLUSH_CACHE_DATA:
  458. __asm__ __volatile__ (".chip 68060\n\t"
  459. "cpushl %%dc,(%0)\n\t"
  460. ".chip 68k"
  461. : : "a" (paddr));
  462. break;
  463. case FLUSH_CACHE_INSN:
  464. __asm__ __volatile__ (".chip 68060\n\t"
  465. "cpushl %%ic,(%0)\n\t"
  466. ".chip 68k"
  467. : : "a" (paddr));
  468. break;
  469. default:
  470. case FLUSH_CACHE_BOTH:
  471. __asm__ __volatile__ (".chip 68060\n\t"
  472. "cpushl %%bc,(%0)\n\t"
  473. ".chip 68k"
  474. : : "a" (paddr));
  475. break;
  476. }
  477. if (!--i && len)
  478. {
  479. /*
  480. * We just want to jump to the first cache line
  481. * in the next page.
  482. */
  483. addr += PAGE_SIZE;
  484. addr &= PAGE_MASK;
  485. i = PAGE_SIZE / 16;
  486. /* Recompute physical address when crossing a page
  487. boundary. */
  488. for (;;)
  489. {
  490. if ((paddr = virt_to_phys_060(addr)))
  491. break;
  492. if (len <= i)
  493. return 0;
  494. len -= i;
  495. addr += PAGE_SIZE;
  496. }
  497. }
  498. else
  499. paddr += 16;
  500. }
  501. break;
  502. default:
  503. case FLUSH_SCOPE_PAGE:
  504. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  505. addr &= PAGE_MASK; /* Workaround for bug in some
  506. revisions of the 68060 */
  507. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  508. {
  509. if (!(paddr = virt_to_phys_060(addr)))
  510. continue;
  511. switch (cache)
  512. {
  513. case FLUSH_CACHE_DATA:
  514. __asm__ __volatile__ (".chip 68060\n\t"
  515. "cpushp %%dc,(%0)\n\t"
  516. ".chip 68k"
  517. : : "a" (paddr));
  518. break;
  519. case FLUSH_CACHE_INSN:
  520. __asm__ __volatile__ (".chip 68060\n\t"
  521. "cpushp %%ic,(%0)\n\t"
  522. ".chip 68k"
  523. : : "a" (paddr));
  524. break;
  525. default:
  526. case FLUSH_CACHE_BOTH:
  527. __asm__ __volatile__ (".chip 68060\n\t"
  528. "cpushp %%bc,(%0)\n\t"
  529. ".chip 68k"
  530. : : "a" (paddr));
  531. break;
  532. }
  533. }
  534. break;
  535. }
  536. return 0;
  537. }
  538. /* sys_cacheflush -- flush (part of) the processor cache. */
  539. asmlinkage int
  540. sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
  541. {
  542. struct vm_area_struct *vma;
  543. int ret = -EINVAL;
  544. lock_kernel();
  545. if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
  546. cache & ~FLUSH_CACHE_BOTH)
  547. goto out;
  548. if (scope == FLUSH_SCOPE_ALL) {
  549. /* Only the superuser may explicitly flush the whole cache. */
  550. ret = -EPERM;
  551. if (!capable(CAP_SYS_ADMIN))
  552. goto out;
  553. } else {
  554. /*
  555. * Verify that the specified address region actually belongs
  556. * to this process.
  557. */
  558. vma = find_vma (current->mm, addr);
  559. ret = -EINVAL;
  560. /* Check for overflow. */
  561. if (addr + len < addr)
  562. goto out;
  563. if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
  564. goto out;
  565. }
  566. if (CPU_IS_020_OR_030) {
  567. if (scope == FLUSH_SCOPE_LINE && len < 256) {
  568. unsigned long cacr;
  569. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  570. if (cache & FLUSH_CACHE_INSN)
  571. cacr |= 4;
  572. if (cache & FLUSH_CACHE_DATA)
  573. cacr |= 0x400;
  574. len >>= 2;
  575. while (len--) {
  576. __asm__ __volatile__ ("movec %1, %%caar\n\t"
  577. "movec %0, %%cacr"
  578. : /* no outputs */
  579. : "r" (cacr), "r" (addr));
  580. addr += 4;
  581. }
  582. } else {
  583. /* Flush the whole cache, even if page granularity requested. */
  584. unsigned long cacr;
  585. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  586. if (cache & FLUSH_CACHE_INSN)
  587. cacr |= 8;
  588. if (cache & FLUSH_CACHE_DATA)
  589. cacr |= 0x800;
  590. __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
  591. }
  592. ret = 0;
  593. goto out;
  594. } else {
  595. /*
  596. * 040 or 060: don't blindly trust 'scope', someone could
  597. * try to flush a few megs of memory.
  598. */
  599. if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
  600. scope=FLUSH_SCOPE_PAGE;
  601. if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
  602. scope=FLUSH_SCOPE_ALL;
  603. if (CPU_IS_040) {
  604. ret = cache_flush_040 (addr, scope, cache, len);
  605. } else if (CPU_IS_060) {
  606. ret = cache_flush_060 (addr, scope, cache, len);
  607. }
  608. }
  609. out:
  610. unlock_kernel();
  611. return ret;
  612. }
  613. asmlinkage int sys_getpagesize(void)
  614. {
  615. return PAGE_SIZE;
  616. }
  617. /*
  618. * Do a system call from kernel instead of calling sys_execve so we
  619. * end up with proper pt_regs.
  620. */
  621. int kernel_execve(const char *filename, char *const argv[], char *const envp[])
  622. {
  623. register long __res asm ("%d0") = __NR_execve;
  624. register long __a asm ("%d1") = (long)(filename);
  625. register long __b asm ("%d2") = (long)(argv);
  626. register long __c asm ("%d3") = (long)(envp);
  627. asm volatile ("trap #0" : "+d" (__res)
  628. : "d" (__a), "d" (__b), "d" (__c));
  629. return __res;
  630. }