sys_m68k.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682
  1. /*
  2. * linux/arch/m68k/kernel/sys_m68k.c
  3. *
  4. * This file contains various random system calls that
  5. * have a non-standard calling sequence on the Linux/m68k
  6. * platform.
  7. */
  8. #include <linux/capability.h>
  9. #include <linux/errno.h>
  10. #include <linux/sched.h>
  11. #include <linux/mm.h>
  12. #include <linux/fs.h>
  13. #include <linux/smp.h>
  14. #include <linux/smp_lock.h>
  15. #include <linux/sem.h>
  16. #include <linux/msg.h>
  17. #include <linux/shm.h>
  18. #include <linux/stat.h>
  19. #include <linux/syscalls.h>
  20. #include <linux/mman.h>
  21. #include <linux/file.h>
  22. #include <linux/utsname.h>
  23. #include <linux/ipc.h>
  24. #include <asm/setup.h>
  25. #include <asm/uaccess.h>
  26. #include <asm/cachectl.h>
  27. #include <asm/traps.h>
  28. #include <asm/page.h>
  29. #include <asm/unistd.h>
  30. /*
  31. * sys_pipe() is the normal C calling standard for creating
  32. * a pipe. It's not the way unix traditionally does this, though.
  33. */
  34. asmlinkage int sys_pipe(unsigned long __user * fildes)
  35. {
  36. int fd[2];
  37. int error;
  38. error = do_pipe(fd);
  39. if (!error) {
  40. if (copy_to_user(fildes, fd, 2*sizeof(int)))
  41. error = -EFAULT;
  42. }
  43. return error;
  44. }
  45. /* common code for old and new mmaps */
  46. static inline long do_mmap2(
  47. unsigned long addr, unsigned long len,
  48. unsigned long prot, unsigned long flags,
  49. unsigned long fd, unsigned long pgoff)
  50. {
  51. int error = -EBADF;
  52. struct file * file = NULL;
  53. flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  54. if (!(flags & MAP_ANONYMOUS)) {
  55. file = fget(fd);
  56. if (!file)
  57. goto out;
  58. }
  59. down_write(&current->mm->mmap_sem);
  60. error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
  61. up_write(&current->mm->mmap_sem);
  62. if (file)
  63. fput(file);
  64. out:
  65. return error;
  66. }
  67. asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
  68. unsigned long prot, unsigned long flags,
  69. unsigned long fd, unsigned long pgoff)
  70. {
  71. return do_mmap2(addr, len, prot, flags, fd, pgoff);
  72. }
  73. /*
  74. * Perform the select(nd, in, out, ex, tv) and mmap() system
  75. * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
  76. * handle more than 4 system call parameters, so these system calls
  77. * used a memory block for parameter passing..
  78. */
  79. struct mmap_arg_struct {
  80. unsigned long addr;
  81. unsigned long len;
  82. unsigned long prot;
  83. unsigned long flags;
  84. unsigned long fd;
  85. unsigned long offset;
  86. };
  87. asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
  88. {
  89. struct mmap_arg_struct a;
  90. int error = -EFAULT;
  91. if (copy_from_user(&a, arg, sizeof(a)))
  92. goto out;
  93. error = -EINVAL;
  94. if (a.offset & ~PAGE_MASK)
  95. goto out;
  96. a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  97. error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
  98. out:
  99. return error;
  100. }
  101. #if 0
  102. struct mmap_arg_struct64 {
  103. __u32 addr;
  104. __u32 len;
  105. __u32 prot;
  106. __u32 flags;
  107. __u64 offset; /* 64 bits */
  108. __u32 fd;
  109. };
  110. asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
  111. {
  112. int error = -EFAULT;
  113. struct file * file = NULL;
  114. struct mmap_arg_struct64 a;
  115. unsigned long pgoff;
  116. if (copy_from_user(&a, arg, sizeof(a)))
  117. return -EFAULT;
  118. if ((long)a.offset & ~PAGE_MASK)
  119. return -EINVAL;
  120. pgoff = a.offset >> PAGE_SHIFT;
  121. if ((a.offset >> PAGE_SHIFT) != pgoff)
  122. return -EINVAL;
  123. if (!(a.flags & MAP_ANONYMOUS)) {
  124. error = -EBADF;
  125. file = fget(a.fd);
  126. if (!file)
  127. goto out;
  128. }
  129. a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  130. down_write(&current->mm->mmap_sem);
  131. error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
  132. up_write(&current->mm->mmap_sem);
  133. if (file)
  134. fput(file);
  135. out:
  136. return error;
  137. }
  138. #endif
  139. struct sel_arg_struct {
  140. unsigned long n;
  141. fd_set __user *inp, *outp, *exp;
  142. struct timeval __user *tvp;
  143. };
  144. asmlinkage int old_select(struct sel_arg_struct __user *arg)
  145. {
  146. struct sel_arg_struct a;
  147. if (copy_from_user(&a, arg, sizeof(a)))
  148. return -EFAULT;
  149. /* sys_select() does the appropriate kernel locking */
  150. return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
  151. }
  152. /*
  153. * sys_ipc() is the de-multiplexer for the SysV IPC calls..
  154. *
  155. * This is really horribly ugly.
  156. */
  157. asmlinkage int sys_ipc (uint call, int first, int second,
  158. int third, void __user *ptr, long fifth)
  159. {
  160. int version, ret;
  161. version = call >> 16; /* hack for backward compatibility */
  162. call &= 0xffff;
  163. if (call <= SEMCTL)
  164. switch (call) {
  165. case SEMOP:
  166. return sys_semop (first, ptr, second);
  167. case SEMGET:
  168. return sys_semget (first, second, third);
  169. case SEMCTL: {
  170. union semun fourth;
  171. if (!ptr)
  172. return -EINVAL;
  173. if (get_user(fourth.__pad, (void __user *__user *) ptr))
  174. return -EFAULT;
  175. return sys_semctl (first, second, third, fourth);
  176. }
  177. default:
  178. return -ENOSYS;
  179. }
  180. if (call <= MSGCTL)
  181. switch (call) {
  182. case MSGSND:
  183. return sys_msgsnd (first, ptr, second, third);
  184. case MSGRCV:
  185. switch (version) {
  186. case 0: {
  187. struct ipc_kludge tmp;
  188. if (!ptr)
  189. return -EINVAL;
  190. if (copy_from_user (&tmp, ptr, sizeof (tmp)))
  191. return -EFAULT;
  192. return sys_msgrcv (first, tmp.msgp, second,
  193. tmp.msgtyp, third);
  194. }
  195. default:
  196. return sys_msgrcv (first, ptr,
  197. second, fifth, third);
  198. }
  199. case MSGGET:
  200. return sys_msgget ((key_t) first, second);
  201. case MSGCTL:
  202. return sys_msgctl (first, second, ptr);
  203. default:
  204. return -ENOSYS;
  205. }
  206. if (call <= SHMCTL)
  207. switch (call) {
  208. case SHMAT:
  209. switch (version) {
  210. default: {
  211. ulong raddr;
  212. ret = do_shmat (first, ptr, second, &raddr);
  213. if (ret)
  214. return ret;
  215. return put_user (raddr, (ulong __user *) third);
  216. }
  217. }
  218. case SHMDT:
  219. return sys_shmdt (ptr);
  220. case SHMGET:
  221. return sys_shmget (first, second, third);
  222. case SHMCTL:
  223. return sys_shmctl (first, second, ptr);
  224. default:
  225. return -ENOSYS;
  226. }
  227. return -EINVAL;
  228. }
  229. /* Convert virtual (user) address VADDR to physical address PADDR */
  230. #define virt_to_phys_040(vaddr) \
  231. ({ \
  232. unsigned long _mmusr, _paddr; \
  233. \
  234. __asm__ __volatile__ (".chip 68040\n\t" \
  235. "ptestr (%1)\n\t" \
  236. "movec %%mmusr,%0\n\t" \
  237. ".chip 68k" \
  238. : "=r" (_mmusr) \
  239. : "a" (vaddr)); \
  240. _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
  241. _paddr; \
  242. })
  243. static inline int
  244. cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
  245. {
  246. unsigned long paddr, i;
  247. switch (scope)
  248. {
  249. case FLUSH_SCOPE_ALL:
  250. switch (cache)
  251. {
  252. case FLUSH_CACHE_DATA:
  253. /* This nop is needed for some broken versions of the 68040. */
  254. __asm__ __volatile__ ("nop\n\t"
  255. ".chip 68040\n\t"
  256. "cpusha %dc\n\t"
  257. ".chip 68k");
  258. break;
  259. case FLUSH_CACHE_INSN:
  260. __asm__ __volatile__ ("nop\n\t"
  261. ".chip 68040\n\t"
  262. "cpusha %ic\n\t"
  263. ".chip 68k");
  264. break;
  265. default:
  266. case FLUSH_CACHE_BOTH:
  267. __asm__ __volatile__ ("nop\n\t"
  268. ".chip 68040\n\t"
  269. "cpusha %bc\n\t"
  270. ".chip 68k");
  271. break;
  272. }
  273. break;
  274. case FLUSH_SCOPE_LINE:
  275. /* Find the physical address of the first mapped page in the
  276. address range. */
  277. if ((paddr = virt_to_phys_040(addr))) {
  278. paddr += addr & ~(PAGE_MASK | 15);
  279. len = (len + (addr & 15) + 15) >> 4;
  280. } else {
  281. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  282. if (len <= tmp)
  283. return 0;
  284. addr += tmp;
  285. len -= tmp;
  286. tmp = PAGE_SIZE;
  287. for (;;)
  288. {
  289. if ((paddr = virt_to_phys_040(addr)))
  290. break;
  291. if (len <= tmp)
  292. return 0;
  293. addr += tmp;
  294. len -= tmp;
  295. }
  296. len = (len + 15) >> 4;
  297. }
  298. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  299. while (len--)
  300. {
  301. switch (cache)
  302. {
  303. case FLUSH_CACHE_DATA:
  304. __asm__ __volatile__ ("nop\n\t"
  305. ".chip 68040\n\t"
  306. "cpushl %%dc,(%0)\n\t"
  307. ".chip 68k"
  308. : : "a" (paddr));
  309. break;
  310. case FLUSH_CACHE_INSN:
  311. __asm__ __volatile__ ("nop\n\t"
  312. ".chip 68040\n\t"
  313. "cpushl %%ic,(%0)\n\t"
  314. ".chip 68k"
  315. : : "a" (paddr));
  316. break;
  317. default:
  318. case FLUSH_CACHE_BOTH:
  319. __asm__ __volatile__ ("nop\n\t"
  320. ".chip 68040\n\t"
  321. "cpushl %%bc,(%0)\n\t"
  322. ".chip 68k"
  323. : : "a" (paddr));
  324. break;
  325. }
  326. if (!--i && len)
  327. {
  328. /*
  329. * No need to page align here since it is done by
  330. * virt_to_phys_040().
  331. */
  332. addr += PAGE_SIZE;
  333. i = PAGE_SIZE / 16;
  334. /* Recompute physical address when crossing a page
  335. boundary. */
  336. for (;;)
  337. {
  338. if ((paddr = virt_to_phys_040(addr)))
  339. break;
  340. if (len <= i)
  341. return 0;
  342. len -= i;
  343. addr += PAGE_SIZE;
  344. }
  345. }
  346. else
  347. paddr += 16;
  348. }
  349. break;
  350. default:
  351. case FLUSH_SCOPE_PAGE:
  352. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  353. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  354. {
  355. if (!(paddr = virt_to_phys_040(addr)))
  356. continue;
  357. switch (cache)
  358. {
  359. case FLUSH_CACHE_DATA:
  360. __asm__ __volatile__ ("nop\n\t"
  361. ".chip 68040\n\t"
  362. "cpushp %%dc,(%0)\n\t"
  363. ".chip 68k"
  364. : : "a" (paddr));
  365. break;
  366. case FLUSH_CACHE_INSN:
  367. __asm__ __volatile__ ("nop\n\t"
  368. ".chip 68040\n\t"
  369. "cpushp %%ic,(%0)\n\t"
  370. ".chip 68k"
  371. : : "a" (paddr));
  372. break;
  373. default:
  374. case FLUSH_CACHE_BOTH:
  375. __asm__ __volatile__ ("nop\n\t"
  376. ".chip 68040\n\t"
  377. "cpushp %%bc,(%0)\n\t"
  378. ".chip 68k"
  379. : : "a" (paddr));
  380. break;
  381. }
  382. }
  383. break;
  384. }
  385. return 0;
  386. }
  387. #define virt_to_phys_060(vaddr) \
  388. ({ \
  389. unsigned long paddr; \
  390. __asm__ __volatile__ (".chip 68060\n\t" \
  391. "plpar (%0)\n\t" \
  392. ".chip 68k" \
  393. : "=a" (paddr) \
  394. : "0" (vaddr)); \
  395. (paddr); /* XXX */ \
  396. })
  397. static inline int
  398. cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
  399. {
  400. unsigned long paddr, i;
  401. /*
  402. * 68060 manual says:
  403. * cpush %dc : flush DC, remains valid (with our %cacr setup)
  404. * cpush %ic : invalidate IC
  405. * cpush %bc : flush DC + invalidate IC
  406. */
  407. switch (scope)
  408. {
  409. case FLUSH_SCOPE_ALL:
  410. switch (cache)
  411. {
  412. case FLUSH_CACHE_DATA:
  413. __asm__ __volatile__ (".chip 68060\n\t"
  414. "cpusha %dc\n\t"
  415. ".chip 68k");
  416. break;
  417. case FLUSH_CACHE_INSN:
  418. __asm__ __volatile__ (".chip 68060\n\t"
  419. "cpusha %ic\n\t"
  420. ".chip 68k");
  421. break;
  422. default:
  423. case FLUSH_CACHE_BOTH:
  424. __asm__ __volatile__ (".chip 68060\n\t"
  425. "cpusha %bc\n\t"
  426. ".chip 68k");
  427. break;
  428. }
  429. break;
  430. case FLUSH_SCOPE_LINE:
  431. /* Find the physical address of the first mapped page in the
  432. address range. */
  433. len += addr & 15;
  434. addr &= -16;
  435. if (!(paddr = virt_to_phys_060(addr))) {
  436. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  437. if (len <= tmp)
  438. return 0;
  439. addr += tmp;
  440. len -= tmp;
  441. tmp = PAGE_SIZE;
  442. for (;;)
  443. {
  444. if ((paddr = virt_to_phys_060(addr)))
  445. break;
  446. if (len <= tmp)
  447. return 0;
  448. addr += tmp;
  449. len -= tmp;
  450. }
  451. }
  452. len = (len + 15) >> 4;
  453. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  454. while (len--)
  455. {
  456. switch (cache)
  457. {
  458. case FLUSH_CACHE_DATA:
  459. __asm__ __volatile__ (".chip 68060\n\t"
  460. "cpushl %%dc,(%0)\n\t"
  461. ".chip 68k"
  462. : : "a" (paddr));
  463. break;
  464. case FLUSH_CACHE_INSN:
  465. __asm__ __volatile__ (".chip 68060\n\t"
  466. "cpushl %%ic,(%0)\n\t"
  467. ".chip 68k"
  468. : : "a" (paddr));
  469. break;
  470. default:
  471. case FLUSH_CACHE_BOTH:
  472. __asm__ __volatile__ (".chip 68060\n\t"
  473. "cpushl %%bc,(%0)\n\t"
  474. ".chip 68k"
  475. : : "a" (paddr));
  476. break;
  477. }
  478. if (!--i && len)
  479. {
  480. /*
  481. * We just want to jump to the first cache line
  482. * in the next page.
  483. */
  484. addr += PAGE_SIZE;
  485. addr &= PAGE_MASK;
  486. i = PAGE_SIZE / 16;
  487. /* Recompute physical address when crossing a page
  488. boundary. */
  489. for (;;)
  490. {
  491. if ((paddr = virt_to_phys_060(addr)))
  492. break;
  493. if (len <= i)
  494. return 0;
  495. len -= i;
  496. addr += PAGE_SIZE;
  497. }
  498. }
  499. else
  500. paddr += 16;
  501. }
  502. break;
  503. default:
  504. case FLUSH_SCOPE_PAGE:
  505. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  506. addr &= PAGE_MASK; /* Workaround for bug in some
  507. revisions of the 68060 */
  508. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  509. {
  510. if (!(paddr = virt_to_phys_060(addr)))
  511. continue;
  512. switch (cache)
  513. {
  514. case FLUSH_CACHE_DATA:
  515. __asm__ __volatile__ (".chip 68060\n\t"
  516. "cpushp %%dc,(%0)\n\t"
  517. ".chip 68k"
  518. : : "a" (paddr));
  519. break;
  520. case FLUSH_CACHE_INSN:
  521. __asm__ __volatile__ (".chip 68060\n\t"
  522. "cpushp %%ic,(%0)\n\t"
  523. ".chip 68k"
  524. : : "a" (paddr));
  525. break;
  526. default:
  527. case FLUSH_CACHE_BOTH:
  528. __asm__ __volatile__ (".chip 68060\n\t"
  529. "cpushp %%bc,(%0)\n\t"
  530. ".chip 68k"
  531. : : "a" (paddr));
  532. break;
  533. }
  534. }
  535. break;
  536. }
  537. return 0;
  538. }
  539. /* sys_cacheflush -- flush (part of) the processor cache. */
  540. asmlinkage int
  541. sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
  542. {
  543. struct vm_area_struct *vma;
  544. int ret = -EINVAL;
  545. lock_kernel();
  546. if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
  547. cache & ~FLUSH_CACHE_BOTH)
  548. goto out;
  549. if (scope == FLUSH_SCOPE_ALL) {
  550. /* Only the superuser may explicitly flush the whole cache. */
  551. ret = -EPERM;
  552. if (!capable(CAP_SYS_ADMIN))
  553. goto out;
  554. } else {
  555. /*
  556. * Verify that the specified address region actually belongs
  557. * to this process.
  558. */
  559. vma = find_vma (current->mm, addr);
  560. ret = -EINVAL;
  561. /* Check for overflow. */
  562. if (addr + len < addr)
  563. goto out;
  564. if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
  565. goto out;
  566. }
  567. if (CPU_IS_020_OR_030) {
  568. if (scope == FLUSH_SCOPE_LINE && len < 256) {
  569. unsigned long cacr;
  570. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  571. if (cache & FLUSH_CACHE_INSN)
  572. cacr |= 4;
  573. if (cache & FLUSH_CACHE_DATA)
  574. cacr |= 0x400;
  575. len >>= 2;
  576. while (len--) {
  577. __asm__ __volatile__ ("movec %1, %%caar\n\t"
  578. "movec %0, %%cacr"
  579. : /* no outputs */
  580. : "r" (cacr), "r" (addr));
  581. addr += 4;
  582. }
  583. } else {
  584. /* Flush the whole cache, even if page granularity requested. */
  585. unsigned long cacr;
  586. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  587. if (cache & FLUSH_CACHE_INSN)
  588. cacr |= 8;
  589. if (cache & FLUSH_CACHE_DATA)
  590. cacr |= 0x800;
  591. __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
  592. }
  593. ret = 0;
  594. goto out;
  595. } else {
  596. /*
  597. * 040 or 060: don't blindly trust 'scope', someone could
  598. * try to flush a few megs of memory.
  599. */
  600. if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
  601. scope=FLUSH_SCOPE_PAGE;
  602. if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
  603. scope=FLUSH_SCOPE_ALL;
  604. if (CPU_IS_040) {
  605. ret = cache_flush_040 (addr, scope, cache, len);
  606. } else if (CPU_IS_060) {
  607. ret = cache_flush_060 (addr, scope, cache, len);
  608. }
  609. }
  610. out:
  611. unlock_kernel();
  612. return ret;
  613. }
  614. asmlinkage int sys_getpagesize(void)
  615. {
  616. return PAGE_SIZE;
  617. }
  618. /*
  619. * Do a system call from kernel instead of calling sys_execve so we
  620. * end up with proper pt_regs.
  621. */
  622. int kernel_execve(const char *filename, char *const argv[], char *const envp[])
  623. {
  624. register long __res asm ("%d0") = __NR_execve;
  625. register long __a asm ("%d1") = (long)(filename);
  626. register long __b asm ("%d2") = (long)(argv);
  627. register long __c asm ("%d3") = (long)(envp);
  628. asm volatile ("trap #0" : "+d" (__res)
  629. : "d" (__a), "d" (__b), "d" (__c));
  630. return __res;
  631. }