sys_m68k.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664
  1. /*
  2. * linux/arch/m68k/kernel/sys_m68k.c
  3. *
  4. * This file contains various random system calls that
  5. * have a non-standard calling sequence on the Linux/m68k
  6. * platform.
  7. */
  8. #include <linux/capability.h>
  9. #include <linux/errno.h>
  10. #include <linux/sched.h>
  11. #include <linux/mm.h>
  12. #include <linux/fs.h>
  13. #include <linux/smp.h>
  14. #include <linux/smp_lock.h>
  15. #include <linux/sem.h>
  16. #include <linux/msg.h>
  17. #include <linux/shm.h>
  18. #include <linux/stat.h>
  19. #include <linux/syscalls.h>
  20. #include <linux/mman.h>
  21. #include <linux/file.h>
  22. #include <linux/ipc.h>
  23. #include <asm/setup.h>
  24. #include <asm/uaccess.h>
  25. #include <asm/cachectl.h>
  26. #include <asm/traps.h>
  27. #include <asm/page.h>
  28. #include <asm/unistd.h>
  29. /* common code for old and new mmaps */
  30. static inline long do_mmap2(
  31. unsigned long addr, unsigned long len,
  32. unsigned long prot, unsigned long flags,
  33. unsigned long fd, unsigned long pgoff)
  34. {
  35. int error = -EBADF;
  36. struct file * file = NULL;
  37. flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  38. if (!(flags & MAP_ANONYMOUS)) {
  39. file = fget(fd);
  40. if (!file)
  41. goto out;
  42. }
  43. down_write(&current->mm->mmap_sem);
  44. error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
  45. up_write(&current->mm->mmap_sem);
  46. if (file)
  47. fput(file);
  48. out:
  49. return error;
  50. }
  51. asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
  52. unsigned long prot, unsigned long flags,
  53. unsigned long fd, unsigned long pgoff)
  54. {
  55. return do_mmap2(addr, len, prot, flags, fd, pgoff);
  56. }
  57. /*
  58. * Perform the select(nd, in, out, ex, tv) and mmap() system
  59. * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
  60. * handle more than 4 system call parameters, so these system calls
  61. * used a memory block for parameter passing..
  62. */
  63. struct mmap_arg_struct {
  64. unsigned long addr;
  65. unsigned long len;
  66. unsigned long prot;
  67. unsigned long flags;
  68. unsigned long fd;
  69. unsigned long offset;
  70. };
  71. asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
  72. {
  73. struct mmap_arg_struct a;
  74. int error = -EFAULT;
  75. if (copy_from_user(&a, arg, sizeof(a)))
  76. goto out;
  77. error = -EINVAL;
  78. if (a.offset & ~PAGE_MASK)
  79. goto out;
  80. a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  81. error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
  82. out:
  83. return error;
  84. }
  85. #if 0
  86. struct mmap_arg_struct64 {
  87. __u32 addr;
  88. __u32 len;
  89. __u32 prot;
  90. __u32 flags;
  91. __u64 offset; /* 64 bits */
  92. __u32 fd;
  93. };
  94. asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
  95. {
  96. int error = -EFAULT;
  97. struct file * file = NULL;
  98. struct mmap_arg_struct64 a;
  99. unsigned long pgoff;
  100. if (copy_from_user(&a, arg, sizeof(a)))
  101. return -EFAULT;
  102. if ((long)a.offset & ~PAGE_MASK)
  103. return -EINVAL;
  104. pgoff = a.offset >> PAGE_SHIFT;
  105. if ((a.offset >> PAGE_SHIFT) != pgoff)
  106. return -EINVAL;
  107. if (!(a.flags & MAP_ANONYMOUS)) {
  108. error = -EBADF;
  109. file = fget(a.fd);
  110. if (!file)
  111. goto out;
  112. }
  113. a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  114. down_write(&current->mm->mmap_sem);
  115. error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
  116. up_write(&current->mm->mmap_sem);
  117. if (file)
  118. fput(file);
  119. out:
  120. return error;
  121. }
  122. #endif
  123. struct sel_arg_struct {
  124. unsigned long n;
  125. fd_set __user *inp, *outp, *exp;
  126. struct timeval __user *tvp;
  127. };
  128. asmlinkage int old_select(struct sel_arg_struct __user *arg)
  129. {
  130. struct sel_arg_struct a;
  131. if (copy_from_user(&a, arg, sizeof(a)))
  132. return -EFAULT;
  133. /* sys_select() does the appropriate kernel locking */
  134. return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
  135. }
  136. /*
  137. * sys_ipc() is the de-multiplexer for the SysV IPC calls..
  138. *
  139. * This is really horribly ugly.
  140. */
  141. asmlinkage int sys_ipc (uint call, int first, int second,
  142. int third, void __user *ptr, long fifth)
  143. {
  144. int version, ret;
  145. version = call >> 16; /* hack for backward compatibility */
  146. call &= 0xffff;
  147. if (call <= SEMCTL)
  148. switch (call) {
  149. case SEMOP:
  150. return sys_semop (first, ptr, second);
  151. case SEMGET:
  152. return sys_semget (first, second, third);
  153. case SEMCTL: {
  154. union semun fourth;
  155. if (!ptr)
  156. return -EINVAL;
  157. if (get_user(fourth.__pad, (void __user *__user *) ptr))
  158. return -EFAULT;
  159. return sys_semctl (first, second, third, fourth);
  160. }
  161. default:
  162. return -ENOSYS;
  163. }
  164. if (call <= MSGCTL)
  165. switch (call) {
  166. case MSGSND:
  167. return sys_msgsnd (first, ptr, second, third);
  168. case MSGRCV:
  169. switch (version) {
  170. case 0: {
  171. struct ipc_kludge tmp;
  172. if (!ptr)
  173. return -EINVAL;
  174. if (copy_from_user (&tmp, ptr, sizeof (tmp)))
  175. return -EFAULT;
  176. return sys_msgrcv (first, tmp.msgp, second,
  177. tmp.msgtyp, third);
  178. }
  179. default:
  180. return sys_msgrcv (first, ptr,
  181. second, fifth, third);
  182. }
  183. case MSGGET:
  184. return sys_msgget ((key_t) first, second);
  185. case MSGCTL:
  186. return sys_msgctl (first, second, ptr);
  187. default:
  188. return -ENOSYS;
  189. }
  190. if (call <= SHMCTL)
  191. switch (call) {
  192. case SHMAT:
  193. switch (version) {
  194. default: {
  195. ulong raddr;
  196. ret = do_shmat (first, ptr, second, &raddr);
  197. if (ret)
  198. return ret;
  199. return put_user (raddr, (ulong __user *) third);
  200. }
  201. }
  202. case SHMDT:
  203. return sys_shmdt (ptr);
  204. case SHMGET:
  205. return sys_shmget (first, second, third);
  206. case SHMCTL:
  207. return sys_shmctl (first, second, ptr);
  208. default:
  209. return -ENOSYS;
  210. }
  211. return -EINVAL;
  212. }
  213. /* Convert virtual (user) address VADDR to physical address PADDR */
  214. #define virt_to_phys_040(vaddr) \
  215. ({ \
  216. unsigned long _mmusr, _paddr; \
  217. \
  218. __asm__ __volatile__ (".chip 68040\n\t" \
  219. "ptestr (%1)\n\t" \
  220. "movec %%mmusr,%0\n\t" \
  221. ".chip 68k" \
  222. : "=r" (_mmusr) \
  223. : "a" (vaddr)); \
  224. _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
  225. _paddr; \
  226. })
  227. static inline int
  228. cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
  229. {
  230. unsigned long paddr, i;
  231. switch (scope)
  232. {
  233. case FLUSH_SCOPE_ALL:
  234. switch (cache)
  235. {
  236. case FLUSH_CACHE_DATA:
  237. /* This nop is needed for some broken versions of the 68040. */
  238. __asm__ __volatile__ ("nop\n\t"
  239. ".chip 68040\n\t"
  240. "cpusha %dc\n\t"
  241. ".chip 68k");
  242. break;
  243. case FLUSH_CACHE_INSN:
  244. __asm__ __volatile__ ("nop\n\t"
  245. ".chip 68040\n\t"
  246. "cpusha %ic\n\t"
  247. ".chip 68k");
  248. break;
  249. default:
  250. case FLUSH_CACHE_BOTH:
  251. __asm__ __volatile__ ("nop\n\t"
  252. ".chip 68040\n\t"
  253. "cpusha %bc\n\t"
  254. ".chip 68k");
  255. break;
  256. }
  257. break;
  258. case FLUSH_SCOPE_LINE:
  259. /* Find the physical address of the first mapped page in the
  260. address range. */
  261. if ((paddr = virt_to_phys_040(addr))) {
  262. paddr += addr & ~(PAGE_MASK | 15);
  263. len = (len + (addr & 15) + 15) >> 4;
  264. } else {
  265. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  266. if (len <= tmp)
  267. return 0;
  268. addr += tmp;
  269. len -= tmp;
  270. tmp = PAGE_SIZE;
  271. for (;;)
  272. {
  273. if ((paddr = virt_to_phys_040(addr)))
  274. break;
  275. if (len <= tmp)
  276. return 0;
  277. addr += tmp;
  278. len -= tmp;
  279. }
  280. len = (len + 15) >> 4;
  281. }
  282. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  283. while (len--)
  284. {
  285. switch (cache)
  286. {
  287. case FLUSH_CACHE_DATA:
  288. __asm__ __volatile__ ("nop\n\t"
  289. ".chip 68040\n\t"
  290. "cpushl %%dc,(%0)\n\t"
  291. ".chip 68k"
  292. : : "a" (paddr));
  293. break;
  294. case FLUSH_CACHE_INSN:
  295. __asm__ __volatile__ ("nop\n\t"
  296. ".chip 68040\n\t"
  297. "cpushl %%ic,(%0)\n\t"
  298. ".chip 68k"
  299. : : "a" (paddr));
  300. break;
  301. default:
  302. case FLUSH_CACHE_BOTH:
  303. __asm__ __volatile__ ("nop\n\t"
  304. ".chip 68040\n\t"
  305. "cpushl %%bc,(%0)\n\t"
  306. ".chip 68k"
  307. : : "a" (paddr));
  308. break;
  309. }
  310. if (!--i && len)
  311. {
  312. /*
  313. * No need to page align here since it is done by
  314. * virt_to_phys_040().
  315. */
  316. addr += PAGE_SIZE;
  317. i = PAGE_SIZE / 16;
  318. /* Recompute physical address when crossing a page
  319. boundary. */
  320. for (;;)
  321. {
  322. if ((paddr = virt_to_phys_040(addr)))
  323. break;
  324. if (len <= i)
  325. return 0;
  326. len -= i;
  327. addr += PAGE_SIZE;
  328. }
  329. }
  330. else
  331. paddr += 16;
  332. }
  333. break;
  334. default:
  335. case FLUSH_SCOPE_PAGE:
  336. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  337. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  338. {
  339. if (!(paddr = virt_to_phys_040(addr)))
  340. continue;
  341. switch (cache)
  342. {
  343. case FLUSH_CACHE_DATA:
  344. __asm__ __volatile__ ("nop\n\t"
  345. ".chip 68040\n\t"
  346. "cpushp %%dc,(%0)\n\t"
  347. ".chip 68k"
  348. : : "a" (paddr));
  349. break;
  350. case FLUSH_CACHE_INSN:
  351. __asm__ __volatile__ ("nop\n\t"
  352. ".chip 68040\n\t"
  353. "cpushp %%ic,(%0)\n\t"
  354. ".chip 68k"
  355. : : "a" (paddr));
  356. break;
  357. default:
  358. case FLUSH_CACHE_BOTH:
  359. __asm__ __volatile__ ("nop\n\t"
  360. ".chip 68040\n\t"
  361. "cpushp %%bc,(%0)\n\t"
  362. ".chip 68k"
  363. : : "a" (paddr));
  364. break;
  365. }
  366. }
  367. break;
  368. }
  369. return 0;
  370. }
  371. #define virt_to_phys_060(vaddr) \
  372. ({ \
  373. unsigned long paddr; \
  374. __asm__ __volatile__ (".chip 68060\n\t" \
  375. "plpar (%0)\n\t" \
  376. ".chip 68k" \
  377. : "=a" (paddr) \
  378. : "0" (vaddr)); \
  379. (paddr); /* XXX */ \
  380. })
  381. static inline int
  382. cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
  383. {
  384. unsigned long paddr, i;
  385. /*
  386. * 68060 manual says:
  387. * cpush %dc : flush DC, remains valid (with our %cacr setup)
  388. * cpush %ic : invalidate IC
  389. * cpush %bc : flush DC + invalidate IC
  390. */
  391. switch (scope)
  392. {
  393. case FLUSH_SCOPE_ALL:
  394. switch (cache)
  395. {
  396. case FLUSH_CACHE_DATA:
  397. __asm__ __volatile__ (".chip 68060\n\t"
  398. "cpusha %dc\n\t"
  399. ".chip 68k");
  400. break;
  401. case FLUSH_CACHE_INSN:
  402. __asm__ __volatile__ (".chip 68060\n\t"
  403. "cpusha %ic\n\t"
  404. ".chip 68k");
  405. break;
  406. default:
  407. case FLUSH_CACHE_BOTH:
  408. __asm__ __volatile__ (".chip 68060\n\t"
  409. "cpusha %bc\n\t"
  410. ".chip 68k");
  411. break;
  412. }
  413. break;
  414. case FLUSH_SCOPE_LINE:
  415. /* Find the physical address of the first mapped page in the
  416. address range. */
  417. len += addr & 15;
  418. addr &= -16;
  419. if (!(paddr = virt_to_phys_060(addr))) {
  420. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  421. if (len <= tmp)
  422. return 0;
  423. addr += tmp;
  424. len -= tmp;
  425. tmp = PAGE_SIZE;
  426. for (;;)
  427. {
  428. if ((paddr = virt_to_phys_060(addr)))
  429. break;
  430. if (len <= tmp)
  431. return 0;
  432. addr += tmp;
  433. len -= tmp;
  434. }
  435. }
  436. len = (len + 15) >> 4;
  437. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  438. while (len--)
  439. {
  440. switch (cache)
  441. {
  442. case FLUSH_CACHE_DATA:
  443. __asm__ __volatile__ (".chip 68060\n\t"
  444. "cpushl %%dc,(%0)\n\t"
  445. ".chip 68k"
  446. : : "a" (paddr));
  447. break;
  448. case FLUSH_CACHE_INSN:
  449. __asm__ __volatile__ (".chip 68060\n\t"
  450. "cpushl %%ic,(%0)\n\t"
  451. ".chip 68k"
  452. : : "a" (paddr));
  453. break;
  454. default:
  455. case FLUSH_CACHE_BOTH:
  456. __asm__ __volatile__ (".chip 68060\n\t"
  457. "cpushl %%bc,(%0)\n\t"
  458. ".chip 68k"
  459. : : "a" (paddr));
  460. break;
  461. }
  462. if (!--i && len)
  463. {
  464. /*
  465. * We just want to jump to the first cache line
  466. * in the next page.
  467. */
  468. addr += PAGE_SIZE;
  469. addr &= PAGE_MASK;
  470. i = PAGE_SIZE / 16;
  471. /* Recompute physical address when crossing a page
  472. boundary. */
  473. for (;;)
  474. {
  475. if ((paddr = virt_to_phys_060(addr)))
  476. break;
  477. if (len <= i)
  478. return 0;
  479. len -= i;
  480. addr += PAGE_SIZE;
  481. }
  482. }
  483. else
  484. paddr += 16;
  485. }
  486. break;
  487. default:
  488. case FLUSH_SCOPE_PAGE:
  489. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  490. addr &= PAGE_MASK; /* Workaround for bug in some
  491. revisions of the 68060 */
  492. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  493. {
  494. if (!(paddr = virt_to_phys_060(addr)))
  495. continue;
  496. switch (cache)
  497. {
  498. case FLUSH_CACHE_DATA:
  499. __asm__ __volatile__ (".chip 68060\n\t"
  500. "cpushp %%dc,(%0)\n\t"
  501. ".chip 68k"
  502. : : "a" (paddr));
  503. break;
  504. case FLUSH_CACHE_INSN:
  505. __asm__ __volatile__ (".chip 68060\n\t"
  506. "cpushp %%ic,(%0)\n\t"
  507. ".chip 68k"
  508. : : "a" (paddr));
  509. break;
  510. default:
  511. case FLUSH_CACHE_BOTH:
  512. __asm__ __volatile__ (".chip 68060\n\t"
  513. "cpushp %%bc,(%0)\n\t"
  514. ".chip 68k"
  515. : : "a" (paddr));
  516. break;
  517. }
  518. }
  519. break;
  520. }
  521. return 0;
  522. }
  523. /* sys_cacheflush -- flush (part of) the processor cache. */
  524. asmlinkage int
  525. sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
  526. {
  527. struct vm_area_struct *vma;
  528. int ret = -EINVAL;
  529. lock_kernel();
  530. if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
  531. cache & ~FLUSH_CACHE_BOTH)
  532. goto out;
  533. if (scope == FLUSH_SCOPE_ALL) {
  534. /* Only the superuser may explicitly flush the whole cache. */
  535. ret = -EPERM;
  536. if (!capable(CAP_SYS_ADMIN))
  537. goto out;
  538. } else {
  539. /*
  540. * Verify that the specified address region actually belongs
  541. * to this process.
  542. */
  543. vma = find_vma (current->mm, addr);
  544. ret = -EINVAL;
  545. /* Check for overflow. */
  546. if (addr + len < addr)
  547. goto out;
  548. if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
  549. goto out;
  550. }
  551. if (CPU_IS_020_OR_030) {
  552. if (scope == FLUSH_SCOPE_LINE && len < 256) {
  553. unsigned long cacr;
  554. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  555. if (cache & FLUSH_CACHE_INSN)
  556. cacr |= 4;
  557. if (cache & FLUSH_CACHE_DATA)
  558. cacr |= 0x400;
  559. len >>= 2;
  560. while (len--) {
  561. __asm__ __volatile__ ("movec %1, %%caar\n\t"
  562. "movec %0, %%cacr"
  563. : /* no outputs */
  564. : "r" (cacr), "r" (addr));
  565. addr += 4;
  566. }
  567. } else {
  568. /* Flush the whole cache, even if page granularity requested. */
  569. unsigned long cacr;
  570. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  571. if (cache & FLUSH_CACHE_INSN)
  572. cacr |= 8;
  573. if (cache & FLUSH_CACHE_DATA)
  574. cacr |= 0x800;
  575. __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
  576. }
  577. ret = 0;
  578. goto out;
  579. } else {
  580. /*
  581. * 040 or 060: don't blindly trust 'scope', someone could
  582. * try to flush a few megs of memory.
  583. */
  584. if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
  585. scope=FLUSH_SCOPE_PAGE;
  586. if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
  587. scope=FLUSH_SCOPE_ALL;
  588. if (CPU_IS_040) {
  589. ret = cache_flush_040 (addr, scope, cache, len);
  590. } else if (CPU_IS_060) {
  591. ret = cache_flush_060 (addr, scope, cache, len);
  592. }
  593. }
  594. out:
  595. unlock_kernel();
  596. return ret;
  597. }
  598. asmlinkage int sys_getpagesize(void)
  599. {
  600. return PAGE_SIZE;
  601. }
  602. /*
  603. * Do a system call from kernel instead of calling sys_execve so we
  604. * end up with proper pt_regs.
  605. */
  606. int kernel_execve(const char *filename, char *const argv[], char *const envp[])
  607. {
  608. register long __res asm ("%d0") = __NR_execve;
  609. register long __a asm ("%d1") = (long)(filename);
  610. register long __b asm ("%d2") = (long)(argv);
  611. register long __c asm ("%d3") = (long)(envp);
  612. asm volatile ("trap #0" : "+d" (__res)
  613. : "d" (__a), "d" (__b), "d" (__c));
  614. return __res;
  615. }