sys_m68k.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665
  1. /*
  2. * linux/arch/m68k/kernel/sys_m68k.c
  3. *
  4. * This file contains various random system calls that
  5. * have a non-standard calling sequence on the Linux/m68k
  6. * platform.
  7. */
  8. #include <linux/capability.h>
  9. #include <linux/errno.h>
  10. #include <linux/sched.h>
  11. #include <linux/mm.h>
  12. #include <linux/fs.h>
  13. #include <linux/smp.h>
  14. #include <linux/smp_lock.h>
  15. #include <linux/sem.h>
  16. #include <linux/msg.h>
  17. #include <linux/shm.h>
  18. #include <linux/stat.h>
  19. #include <linux/syscalls.h>
  20. #include <linux/mman.h>
  21. #include <linux/file.h>
  22. #include <linux/utsname.h>
  23. #include <linux/ipc.h>
  24. #include <asm/setup.h>
  25. #include <asm/uaccess.h>
  26. #include <asm/cachectl.h>
  27. #include <asm/traps.h>
  28. #include <asm/page.h>
  29. #include <asm/unistd.h>
  30. /* common code for old and new mmaps */
  31. static inline long do_mmap2(
  32. unsigned long addr, unsigned long len,
  33. unsigned long prot, unsigned long flags,
  34. unsigned long fd, unsigned long pgoff)
  35. {
  36. int error = -EBADF;
  37. struct file * file = NULL;
  38. flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  39. if (!(flags & MAP_ANONYMOUS)) {
  40. file = fget(fd);
  41. if (!file)
  42. goto out;
  43. }
  44. down_write(&current->mm->mmap_sem);
  45. error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
  46. up_write(&current->mm->mmap_sem);
  47. if (file)
  48. fput(file);
  49. out:
  50. return error;
  51. }
  52. asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
  53. unsigned long prot, unsigned long flags,
  54. unsigned long fd, unsigned long pgoff)
  55. {
  56. return do_mmap2(addr, len, prot, flags, fd, pgoff);
  57. }
  58. /*
  59. * Perform the select(nd, in, out, ex, tv) and mmap() system
  60. * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
  61. * handle more than 4 system call parameters, so these system calls
  62. * used a memory block for parameter passing..
  63. */
  64. struct mmap_arg_struct {
  65. unsigned long addr;
  66. unsigned long len;
  67. unsigned long prot;
  68. unsigned long flags;
  69. unsigned long fd;
  70. unsigned long offset;
  71. };
  72. asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
  73. {
  74. struct mmap_arg_struct a;
  75. int error = -EFAULT;
  76. if (copy_from_user(&a, arg, sizeof(a)))
  77. goto out;
  78. error = -EINVAL;
  79. if (a.offset & ~PAGE_MASK)
  80. goto out;
  81. a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  82. error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
  83. out:
  84. return error;
  85. }
  86. #if 0
  87. struct mmap_arg_struct64 {
  88. __u32 addr;
  89. __u32 len;
  90. __u32 prot;
  91. __u32 flags;
  92. __u64 offset; /* 64 bits */
  93. __u32 fd;
  94. };
  95. asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
  96. {
  97. int error = -EFAULT;
  98. struct file * file = NULL;
  99. struct mmap_arg_struct64 a;
  100. unsigned long pgoff;
  101. if (copy_from_user(&a, arg, sizeof(a)))
  102. return -EFAULT;
  103. if ((long)a.offset & ~PAGE_MASK)
  104. return -EINVAL;
  105. pgoff = a.offset >> PAGE_SHIFT;
  106. if ((a.offset >> PAGE_SHIFT) != pgoff)
  107. return -EINVAL;
  108. if (!(a.flags & MAP_ANONYMOUS)) {
  109. error = -EBADF;
  110. file = fget(a.fd);
  111. if (!file)
  112. goto out;
  113. }
  114. a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  115. down_write(&current->mm->mmap_sem);
  116. error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
  117. up_write(&current->mm->mmap_sem);
  118. if (file)
  119. fput(file);
  120. out:
  121. return error;
  122. }
  123. #endif
  124. struct sel_arg_struct {
  125. unsigned long n;
  126. fd_set __user *inp, *outp, *exp;
  127. struct timeval __user *tvp;
  128. };
  129. asmlinkage int old_select(struct sel_arg_struct __user *arg)
  130. {
  131. struct sel_arg_struct a;
  132. if (copy_from_user(&a, arg, sizeof(a)))
  133. return -EFAULT;
  134. /* sys_select() does the appropriate kernel locking */
  135. return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
  136. }
  137. /*
  138. * sys_ipc() is the de-multiplexer for the SysV IPC calls..
  139. *
  140. * This is really horribly ugly.
  141. */
  142. asmlinkage int sys_ipc (uint call, int first, int second,
  143. int third, void __user *ptr, long fifth)
  144. {
  145. int version, ret;
  146. version = call >> 16; /* hack for backward compatibility */
  147. call &= 0xffff;
  148. if (call <= SEMCTL)
  149. switch (call) {
  150. case SEMOP:
  151. return sys_semop (first, ptr, second);
  152. case SEMGET:
  153. return sys_semget (first, second, third);
  154. case SEMCTL: {
  155. union semun fourth;
  156. if (!ptr)
  157. return -EINVAL;
  158. if (get_user(fourth.__pad, (void __user *__user *) ptr))
  159. return -EFAULT;
  160. return sys_semctl (first, second, third, fourth);
  161. }
  162. default:
  163. return -ENOSYS;
  164. }
  165. if (call <= MSGCTL)
  166. switch (call) {
  167. case MSGSND:
  168. return sys_msgsnd (first, ptr, second, third);
  169. case MSGRCV:
  170. switch (version) {
  171. case 0: {
  172. struct ipc_kludge tmp;
  173. if (!ptr)
  174. return -EINVAL;
  175. if (copy_from_user (&tmp, ptr, sizeof (tmp)))
  176. return -EFAULT;
  177. return sys_msgrcv (first, tmp.msgp, second,
  178. tmp.msgtyp, third);
  179. }
  180. default:
  181. return sys_msgrcv (first, ptr,
  182. second, fifth, third);
  183. }
  184. case MSGGET:
  185. return sys_msgget ((key_t) first, second);
  186. case MSGCTL:
  187. return sys_msgctl (first, second, ptr);
  188. default:
  189. return -ENOSYS;
  190. }
  191. if (call <= SHMCTL)
  192. switch (call) {
  193. case SHMAT:
  194. switch (version) {
  195. default: {
  196. ulong raddr;
  197. ret = do_shmat (first, ptr, second, &raddr);
  198. if (ret)
  199. return ret;
  200. return put_user (raddr, (ulong __user *) third);
  201. }
  202. }
  203. case SHMDT:
  204. return sys_shmdt (ptr);
  205. case SHMGET:
  206. return sys_shmget (first, second, third);
  207. case SHMCTL:
  208. return sys_shmctl (first, second, ptr);
  209. default:
  210. return -ENOSYS;
  211. }
  212. return -EINVAL;
  213. }
  214. /* Convert virtual (user) address VADDR to physical address PADDR */
  215. #define virt_to_phys_040(vaddr) \
  216. ({ \
  217. unsigned long _mmusr, _paddr; \
  218. \
  219. __asm__ __volatile__ (".chip 68040\n\t" \
  220. "ptestr (%1)\n\t" \
  221. "movec %%mmusr,%0\n\t" \
  222. ".chip 68k" \
  223. : "=r" (_mmusr) \
  224. : "a" (vaddr)); \
  225. _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
  226. _paddr; \
  227. })
  228. static inline int
  229. cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
  230. {
  231. unsigned long paddr, i;
  232. switch (scope)
  233. {
  234. case FLUSH_SCOPE_ALL:
  235. switch (cache)
  236. {
  237. case FLUSH_CACHE_DATA:
  238. /* This nop is needed for some broken versions of the 68040. */
  239. __asm__ __volatile__ ("nop\n\t"
  240. ".chip 68040\n\t"
  241. "cpusha %dc\n\t"
  242. ".chip 68k");
  243. break;
  244. case FLUSH_CACHE_INSN:
  245. __asm__ __volatile__ ("nop\n\t"
  246. ".chip 68040\n\t"
  247. "cpusha %ic\n\t"
  248. ".chip 68k");
  249. break;
  250. default:
  251. case FLUSH_CACHE_BOTH:
  252. __asm__ __volatile__ ("nop\n\t"
  253. ".chip 68040\n\t"
  254. "cpusha %bc\n\t"
  255. ".chip 68k");
  256. break;
  257. }
  258. break;
  259. case FLUSH_SCOPE_LINE:
  260. /* Find the physical address of the first mapped page in the
  261. address range. */
  262. if ((paddr = virt_to_phys_040(addr))) {
  263. paddr += addr & ~(PAGE_MASK | 15);
  264. len = (len + (addr & 15) + 15) >> 4;
  265. } else {
  266. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  267. if (len <= tmp)
  268. return 0;
  269. addr += tmp;
  270. len -= tmp;
  271. tmp = PAGE_SIZE;
  272. for (;;)
  273. {
  274. if ((paddr = virt_to_phys_040(addr)))
  275. break;
  276. if (len <= tmp)
  277. return 0;
  278. addr += tmp;
  279. len -= tmp;
  280. }
  281. len = (len + 15) >> 4;
  282. }
  283. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  284. while (len--)
  285. {
  286. switch (cache)
  287. {
  288. case FLUSH_CACHE_DATA:
  289. __asm__ __volatile__ ("nop\n\t"
  290. ".chip 68040\n\t"
  291. "cpushl %%dc,(%0)\n\t"
  292. ".chip 68k"
  293. : : "a" (paddr));
  294. break;
  295. case FLUSH_CACHE_INSN:
  296. __asm__ __volatile__ ("nop\n\t"
  297. ".chip 68040\n\t"
  298. "cpushl %%ic,(%0)\n\t"
  299. ".chip 68k"
  300. : : "a" (paddr));
  301. break;
  302. default:
  303. case FLUSH_CACHE_BOTH:
  304. __asm__ __volatile__ ("nop\n\t"
  305. ".chip 68040\n\t"
  306. "cpushl %%bc,(%0)\n\t"
  307. ".chip 68k"
  308. : : "a" (paddr));
  309. break;
  310. }
  311. if (!--i && len)
  312. {
  313. /*
  314. * No need to page align here since it is done by
  315. * virt_to_phys_040().
  316. */
  317. addr += PAGE_SIZE;
  318. i = PAGE_SIZE / 16;
  319. /* Recompute physical address when crossing a page
  320. boundary. */
  321. for (;;)
  322. {
  323. if ((paddr = virt_to_phys_040(addr)))
  324. break;
  325. if (len <= i)
  326. return 0;
  327. len -= i;
  328. addr += PAGE_SIZE;
  329. }
  330. }
  331. else
  332. paddr += 16;
  333. }
  334. break;
  335. default:
  336. case FLUSH_SCOPE_PAGE:
  337. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  338. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  339. {
  340. if (!(paddr = virt_to_phys_040(addr)))
  341. continue;
  342. switch (cache)
  343. {
  344. case FLUSH_CACHE_DATA:
  345. __asm__ __volatile__ ("nop\n\t"
  346. ".chip 68040\n\t"
  347. "cpushp %%dc,(%0)\n\t"
  348. ".chip 68k"
  349. : : "a" (paddr));
  350. break;
  351. case FLUSH_CACHE_INSN:
  352. __asm__ __volatile__ ("nop\n\t"
  353. ".chip 68040\n\t"
  354. "cpushp %%ic,(%0)\n\t"
  355. ".chip 68k"
  356. : : "a" (paddr));
  357. break;
  358. default:
  359. case FLUSH_CACHE_BOTH:
  360. __asm__ __volatile__ ("nop\n\t"
  361. ".chip 68040\n\t"
  362. "cpushp %%bc,(%0)\n\t"
  363. ".chip 68k"
  364. : : "a" (paddr));
  365. break;
  366. }
  367. }
  368. break;
  369. }
  370. return 0;
  371. }
  372. #define virt_to_phys_060(vaddr) \
  373. ({ \
  374. unsigned long paddr; \
  375. __asm__ __volatile__ (".chip 68060\n\t" \
  376. "plpar (%0)\n\t" \
  377. ".chip 68k" \
  378. : "=a" (paddr) \
  379. : "0" (vaddr)); \
  380. (paddr); /* XXX */ \
  381. })
  382. static inline int
  383. cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
  384. {
  385. unsigned long paddr, i;
  386. /*
  387. * 68060 manual says:
  388. * cpush %dc : flush DC, remains valid (with our %cacr setup)
  389. * cpush %ic : invalidate IC
  390. * cpush %bc : flush DC + invalidate IC
  391. */
  392. switch (scope)
  393. {
  394. case FLUSH_SCOPE_ALL:
  395. switch (cache)
  396. {
  397. case FLUSH_CACHE_DATA:
  398. __asm__ __volatile__ (".chip 68060\n\t"
  399. "cpusha %dc\n\t"
  400. ".chip 68k");
  401. break;
  402. case FLUSH_CACHE_INSN:
  403. __asm__ __volatile__ (".chip 68060\n\t"
  404. "cpusha %ic\n\t"
  405. ".chip 68k");
  406. break;
  407. default:
  408. case FLUSH_CACHE_BOTH:
  409. __asm__ __volatile__ (".chip 68060\n\t"
  410. "cpusha %bc\n\t"
  411. ".chip 68k");
  412. break;
  413. }
  414. break;
  415. case FLUSH_SCOPE_LINE:
  416. /* Find the physical address of the first mapped page in the
  417. address range. */
  418. len += addr & 15;
  419. addr &= -16;
  420. if (!(paddr = virt_to_phys_060(addr))) {
  421. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  422. if (len <= tmp)
  423. return 0;
  424. addr += tmp;
  425. len -= tmp;
  426. tmp = PAGE_SIZE;
  427. for (;;)
  428. {
  429. if ((paddr = virt_to_phys_060(addr)))
  430. break;
  431. if (len <= tmp)
  432. return 0;
  433. addr += tmp;
  434. len -= tmp;
  435. }
  436. }
  437. len = (len + 15) >> 4;
  438. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  439. while (len--)
  440. {
  441. switch (cache)
  442. {
  443. case FLUSH_CACHE_DATA:
  444. __asm__ __volatile__ (".chip 68060\n\t"
  445. "cpushl %%dc,(%0)\n\t"
  446. ".chip 68k"
  447. : : "a" (paddr));
  448. break;
  449. case FLUSH_CACHE_INSN:
  450. __asm__ __volatile__ (".chip 68060\n\t"
  451. "cpushl %%ic,(%0)\n\t"
  452. ".chip 68k"
  453. : : "a" (paddr));
  454. break;
  455. default:
  456. case FLUSH_CACHE_BOTH:
  457. __asm__ __volatile__ (".chip 68060\n\t"
  458. "cpushl %%bc,(%0)\n\t"
  459. ".chip 68k"
  460. : : "a" (paddr));
  461. break;
  462. }
  463. if (!--i && len)
  464. {
  465. /*
  466. * We just want to jump to the first cache line
  467. * in the next page.
  468. */
  469. addr += PAGE_SIZE;
  470. addr &= PAGE_MASK;
  471. i = PAGE_SIZE / 16;
  472. /* Recompute physical address when crossing a page
  473. boundary. */
  474. for (;;)
  475. {
  476. if ((paddr = virt_to_phys_060(addr)))
  477. break;
  478. if (len <= i)
  479. return 0;
  480. len -= i;
  481. addr += PAGE_SIZE;
  482. }
  483. }
  484. else
  485. paddr += 16;
  486. }
  487. break;
  488. default:
  489. case FLUSH_SCOPE_PAGE:
  490. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  491. addr &= PAGE_MASK; /* Workaround for bug in some
  492. revisions of the 68060 */
  493. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  494. {
  495. if (!(paddr = virt_to_phys_060(addr)))
  496. continue;
  497. switch (cache)
  498. {
  499. case FLUSH_CACHE_DATA:
  500. __asm__ __volatile__ (".chip 68060\n\t"
  501. "cpushp %%dc,(%0)\n\t"
  502. ".chip 68k"
  503. : : "a" (paddr));
  504. break;
  505. case FLUSH_CACHE_INSN:
  506. __asm__ __volatile__ (".chip 68060\n\t"
  507. "cpushp %%ic,(%0)\n\t"
  508. ".chip 68k"
  509. : : "a" (paddr));
  510. break;
  511. default:
  512. case FLUSH_CACHE_BOTH:
  513. __asm__ __volatile__ (".chip 68060\n\t"
  514. "cpushp %%bc,(%0)\n\t"
  515. ".chip 68k"
  516. : : "a" (paddr));
  517. break;
  518. }
  519. }
  520. break;
  521. }
  522. return 0;
  523. }
  524. /* sys_cacheflush -- flush (part of) the processor cache. */
  525. asmlinkage int
  526. sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
  527. {
  528. struct vm_area_struct *vma;
  529. int ret = -EINVAL;
  530. lock_kernel();
  531. if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
  532. cache & ~FLUSH_CACHE_BOTH)
  533. goto out;
  534. if (scope == FLUSH_SCOPE_ALL) {
  535. /* Only the superuser may explicitly flush the whole cache. */
  536. ret = -EPERM;
  537. if (!capable(CAP_SYS_ADMIN))
  538. goto out;
  539. } else {
  540. /*
  541. * Verify that the specified address region actually belongs
  542. * to this process.
  543. */
  544. vma = find_vma (current->mm, addr);
  545. ret = -EINVAL;
  546. /* Check for overflow. */
  547. if (addr + len < addr)
  548. goto out;
  549. if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
  550. goto out;
  551. }
  552. if (CPU_IS_020_OR_030) {
  553. if (scope == FLUSH_SCOPE_LINE && len < 256) {
  554. unsigned long cacr;
  555. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  556. if (cache & FLUSH_CACHE_INSN)
  557. cacr |= 4;
  558. if (cache & FLUSH_CACHE_DATA)
  559. cacr |= 0x400;
  560. len >>= 2;
  561. while (len--) {
  562. __asm__ __volatile__ ("movec %1, %%caar\n\t"
  563. "movec %0, %%cacr"
  564. : /* no outputs */
  565. : "r" (cacr), "r" (addr));
  566. addr += 4;
  567. }
  568. } else {
  569. /* Flush the whole cache, even if page granularity requested. */
  570. unsigned long cacr;
  571. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  572. if (cache & FLUSH_CACHE_INSN)
  573. cacr |= 8;
  574. if (cache & FLUSH_CACHE_DATA)
  575. cacr |= 0x800;
  576. __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
  577. }
  578. ret = 0;
  579. goto out;
  580. } else {
  581. /*
  582. * 040 or 060: don't blindly trust 'scope', someone could
  583. * try to flush a few megs of memory.
  584. */
  585. if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
  586. scope=FLUSH_SCOPE_PAGE;
  587. if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
  588. scope=FLUSH_SCOPE_ALL;
  589. if (CPU_IS_040) {
  590. ret = cache_flush_040 (addr, scope, cache, len);
  591. } else if (CPU_IS_060) {
  592. ret = cache_flush_060 (addr, scope, cache, len);
  593. }
  594. }
  595. out:
  596. unlock_kernel();
  597. return ret;
  598. }
  599. asmlinkage int sys_getpagesize(void)
  600. {
  601. return PAGE_SIZE;
  602. }
  603. /*
  604. * Do a system call from kernel instead of calling sys_execve so we
  605. * end up with proper pt_regs.
  606. */
  607. int kernel_execve(const char *filename, char *const argv[], char *const envp[])
  608. {
  609. register long __res asm ("%d0") = __NR_execve;
  610. register long __a asm ("%d1") = (long)(filename);
  611. register long __b asm ("%d2") = (long)(argv);
  612. register long __c asm ("%d3") = (long)(envp);
  613. asm volatile ("trap #0" : "+d" (__res)
  614. : "d" (__a), "d" (__b), "d" (__c));
  615. return __res;
  616. }