sys_m68k.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597
  1. /*
  2. * linux/arch/m68k/kernel/sys_m68k.c
  3. *
  4. * This file contains various random system calls that
  5. * have a non-standard calling sequence on the Linux/m68k
  6. * platform.
  7. */
  8. #include <linux/capability.h>
  9. #include <linux/errno.h>
  10. #include <linux/sched.h>
  11. #include <linux/mm.h>
  12. #include <linux/fs.h>
  13. #include <linux/smp.h>
  14. #include <linux/smp_lock.h>
  15. #include <linux/sem.h>
  16. #include <linux/msg.h>
  17. #include <linux/shm.h>
  18. #include <linux/stat.h>
  19. #include <linux/syscalls.h>
  20. #include <linux/mman.h>
  21. #include <linux/file.h>
  22. #include <linux/ipc.h>
  23. #include <asm/setup.h>
  24. #include <asm/uaccess.h>
  25. #include <asm/cachectl.h>
  26. #include <asm/traps.h>
  27. #include <asm/page.h>
  28. #include <asm/unistd.h>
  29. asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
  30. unsigned long prot, unsigned long flags,
  31. unsigned long fd, unsigned long pgoff)
  32. {
  33. /*
  34. * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
  35. * so we need to shift the argument down by 1; m68k mmap64(3)
  36. * (in libc) expects the last argument of mmap2 in 4Kb units.
  37. */
  38. return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
  39. }
  40. /*
  41. * Perform the select(nd, in, out, ex, tv) and mmap() system
  42. * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
  43. * handle more than 4 system call parameters, so these system calls
  44. * used a memory block for parameter passing..
  45. */
  46. struct mmap_arg_struct {
  47. unsigned long addr;
  48. unsigned long len;
  49. unsigned long prot;
  50. unsigned long flags;
  51. unsigned long fd;
  52. unsigned long offset;
  53. };
  54. asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
  55. {
  56. struct mmap_arg_struct a;
  57. int error = -EFAULT;
  58. if (copy_from_user(&a, arg, sizeof(a)))
  59. goto out;
  60. error = -EINVAL;
  61. if (a.offset & ~PAGE_MASK)
  62. goto out;
  63. error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
  64. a.offset >> PAGE_SHIFT);
  65. out:
  66. return error;
  67. }
  68. struct sel_arg_struct {
  69. unsigned long n;
  70. fd_set __user *inp, *outp, *exp;
  71. struct timeval __user *tvp;
  72. };
  73. asmlinkage int old_select(struct sel_arg_struct __user *arg)
  74. {
  75. struct sel_arg_struct a;
  76. if (copy_from_user(&a, arg, sizeof(a)))
  77. return -EFAULT;
  78. /* sys_select() does the appropriate kernel locking */
  79. return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
  80. }
  81. /*
  82. * sys_ipc() is the de-multiplexer for the SysV IPC calls..
  83. *
  84. * This is really horribly ugly.
  85. */
  86. asmlinkage int sys_ipc (uint call, int first, int second,
  87. int third, void __user *ptr, long fifth)
  88. {
  89. int version, ret;
  90. version = call >> 16; /* hack for backward compatibility */
  91. call &= 0xffff;
  92. if (call <= SEMCTL)
  93. switch (call) {
  94. case SEMOP:
  95. return sys_semop (first, ptr, second);
  96. case SEMGET:
  97. return sys_semget (first, second, third);
  98. case SEMCTL: {
  99. union semun fourth;
  100. if (!ptr)
  101. return -EINVAL;
  102. if (get_user(fourth.__pad, (void __user *__user *) ptr))
  103. return -EFAULT;
  104. return sys_semctl (first, second, third, fourth);
  105. }
  106. default:
  107. return -ENOSYS;
  108. }
  109. if (call <= MSGCTL)
  110. switch (call) {
  111. case MSGSND:
  112. return sys_msgsnd (first, ptr, second, third);
  113. case MSGRCV:
  114. switch (version) {
  115. case 0: {
  116. struct ipc_kludge tmp;
  117. if (!ptr)
  118. return -EINVAL;
  119. if (copy_from_user (&tmp, ptr, sizeof (tmp)))
  120. return -EFAULT;
  121. return sys_msgrcv (first, tmp.msgp, second,
  122. tmp.msgtyp, third);
  123. }
  124. default:
  125. return sys_msgrcv (first, ptr,
  126. second, fifth, third);
  127. }
  128. case MSGGET:
  129. return sys_msgget ((key_t) first, second);
  130. case MSGCTL:
  131. return sys_msgctl (first, second, ptr);
  132. default:
  133. return -ENOSYS;
  134. }
  135. if (call <= SHMCTL)
  136. switch (call) {
  137. case SHMAT:
  138. switch (version) {
  139. default: {
  140. ulong raddr;
  141. ret = do_shmat (first, ptr, second, &raddr);
  142. if (ret)
  143. return ret;
  144. return put_user (raddr, (ulong __user *) third);
  145. }
  146. }
  147. case SHMDT:
  148. return sys_shmdt (ptr);
  149. case SHMGET:
  150. return sys_shmget (first, second, third);
  151. case SHMCTL:
  152. return sys_shmctl (first, second, ptr);
  153. default:
  154. return -ENOSYS;
  155. }
  156. return -EINVAL;
  157. }
  158. /* Convert virtual (user) address VADDR to physical address PADDR */
  159. #define virt_to_phys_040(vaddr) \
  160. ({ \
  161. unsigned long _mmusr, _paddr; \
  162. \
  163. __asm__ __volatile__ (".chip 68040\n\t" \
  164. "ptestr (%1)\n\t" \
  165. "movec %%mmusr,%0\n\t" \
  166. ".chip 68k" \
  167. : "=r" (_mmusr) \
  168. : "a" (vaddr)); \
  169. _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
  170. _paddr; \
  171. })
  172. static inline int
  173. cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
  174. {
  175. unsigned long paddr, i;
  176. switch (scope)
  177. {
  178. case FLUSH_SCOPE_ALL:
  179. switch (cache)
  180. {
  181. case FLUSH_CACHE_DATA:
  182. /* This nop is needed for some broken versions of the 68040. */
  183. __asm__ __volatile__ ("nop\n\t"
  184. ".chip 68040\n\t"
  185. "cpusha %dc\n\t"
  186. ".chip 68k");
  187. break;
  188. case FLUSH_CACHE_INSN:
  189. __asm__ __volatile__ ("nop\n\t"
  190. ".chip 68040\n\t"
  191. "cpusha %ic\n\t"
  192. ".chip 68k");
  193. break;
  194. default:
  195. case FLUSH_CACHE_BOTH:
  196. __asm__ __volatile__ ("nop\n\t"
  197. ".chip 68040\n\t"
  198. "cpusha %bc\n\t"
  199. ".chip 68k");
  200. break;
  201. }
  202. break;
  203. case FLUSH_SCOPE_LINE:
  204. /* Find the physical address of the first mapped page in the
  205. address range. */
  206. if ((paddr = virt_to_phys_040(addr))) {
  207. paddr += addr & ~(PAGE_MASK | 15);
  208. len = (len + (addr & 15) + 15) >> 4;
  209. } else {
  210. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  211. if (len <= tmp)
  212. return 0;
  213. addr += tmp;
  214. len -= tmp;
  215. tmp = PAGE_SIZE;
  216. for (;;)
  217. {
  218. if ((paddr = virt_to_phys_040(addr)))
  219. break;
  220. if (len <= tmp)
  221. return 0;
  222. addr += tmp;
  223. len -= tmp;
  224. }
  225. len = (len + 15) >> 4;
  226. }
  227. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  228. while (len--)
  229. {
  230. switch (cache)
  231. {
  232. case FLUSH_CACHE_DATA:
  233. __asm__ __volatile__ ("nop\n\t"
  234. ".chip 68040\n\t"
  235. "cpushl %%dc,(%0)\n\t"
  236. ".chip 68k"
  237. : : "a" (paddr));
  238. break;
  239. case FLUSH_CACHE_INSN:
  240. __asm__ __volatile__ ("nop\n\t"
  241. ".chip 68040\n\t"
  242. "cpushl %%ic,(%0)\n\t"
  243. ".chip 68k"
  244. : : "a" (paddr));
  245. break;
  246. default:
  247. case FLUSH_CACHE_BOTH:
  248. __asm__ __volatile__ ("nop\n\t"
  249. ".chip 68040\n\t"
  250. "cpushl %%bc,(%0)\n\t"
  251. ".chip 68k"
  252. : : "a" (paddr));
  253. break;
  254. }
  255. if (!--i && len)
  256. {
  257. /*
  258. * No need to page align here since it is done by
  259. * virt_to_phys_040().
  260. */
  261. addr += PAGE_SIZE;
  262. i = PAGE_SIZE / 16;
  263. /* Recompute physical address when crossing a page
  264. boundary. */
  265. for (;;)
  266. {
  267. if ((paddr = virt_to_phys_040(addr)))
  268. break;
  269. if (len <= i)
  270. return 0;
  271. len -= i;
  272. addr += PAGE_SIZE;
  273. }
  274. }
  275. else
  276. paddr += 16;
  277. }
  278. break;
  279. default:
  280. case FLUSH_SCOPE_PAGE:
  281. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  282. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  283. {
  284. if (!(paddr = virt_to_phys_040(addr)))
  285. continue;
  286. switch (cache)
  287. {
  288. case FLUSH_CACHE_DATA:
  289. __asm__ __volatile__ ("nop\n\t"
  290. ".chip 68040\n\t"
  291. "cpushp %%dc,(%0)\n\t"
  292. ".chip 68k"
  293. : : "a" (paddr));
  294. break;
  295. case FLUSH_CACHE_INSN:
  296. __asm__ __volatile__ ("nop\n\t"
  297. ".chip 68040\n\t"
  298. "cpushp %%ic,(%0)\n\t"
  299. ".chip 68k"
  300. : : "a" (paddr));
  301. break;
  302. default:
  303. case FLUSH_CACHE_BOTH:
  304. __asm__ __volatile__ ("nop\n\t"
  305. ".chip 68040\n\t"
  306. "cpushp %%bc,(%0)\n\t"
  307. ".chip 68k"
  308. : : "a" (paddr));
  309. break;
  310. }
  311. }
  312. break;
  313. }
  314. return 0;
  315. }
  316. #define virt_to_phys_060(vaddr) \
  317. ({ \
  318. unsigned long paddr; \
  319. __asm__ __volatile__ (".chip 68060\n\t" \
  320. "plpar (%0)\n\t" \
  321. ".chip 68k" \
  322. : "=a" (paddr) \
  323. : "0" (vaddr)); \
  324. (paddr); /* XXX */ \
  325. })
  326. static inline int
  327. cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
  328. {
  329. unsigned long paddr, i;
  330. /*
  331. * 68060 manual says:
  332. * cpush %dc : flush DC, remains valid (with our %cacr setup)
  333. * cpush %ic : invalidate IC
  334. * cpush %bc : flush DC + invalidate IC
  335. */
  336. switch (scope)
  337. {
  338. case FLUSH_SCOPE_ALL:
  339. switch (cache)
  340. {
  341. case FLUSH_CACHE_DATA:
  342. __asm__ __volatile__ (".chip 68060\n\t"
  343. "cpusha %dc\n\t"
  344. ".chip 68k");
  345. break;
  346. case FLUSH_CACHE_INSN:
  347. __asm__ __volatile__ (".chip 68060\n\t"
  348. "cpusha %ic\n\t"
  349. ".chip 68k");
  350. break;
  351. default:
  352. case FLUSH_CACHE_BOTH:
  353. __asm__ __volatile__ (".chip 68060\n\t"
  354. "cpusha %bc\n\t"
  355. ".chip 68k");
  356. break;
  357. }
  358. break;
  359. case FLUSH_SCOPE_LINE:
  360. /* Find the physical address of the first mapped page in the
  361. address range. */
  362. len += addr & 15;
  363. addr &= -16;
  364. if (!(paddr = virt_to_phys_060(addr))) {
  365. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  366. if (len <= tmp)
  367. return 0;
  368. addr += tmp;
  369. len -= tmp;
  370. tmp = PAGE_SIZE;
  371. for (;;)
  372. {
  373. if ((paddr = virt_to_phys_060(addr)))
  374. break;
  375. if (len <= tmp)
  376. return 0;
  377. addr += tmp;
  378. len -= tmp;
  379. }
  380. }
  381. len = (len + 15) >> 4;
  382. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  383. while (len--)
  384. {
  385. switch (cache)
  386. {
  387. case FLUSH_CACHE_DATA:
  388. __asm__ __volatile__ (".chip 68060\n\t"
  389. "cpushl %%dc,(%0)\n\t"
  390. ".chip 68k"
  391. : : "a" (paddr));
  392. break;
  393. case FLUSH_CACHE_INSN:
  394. __asm__ __volatile__ (".chip 68060\n\t"
  395. "cpushl %%ic,(%0)\n\t"
  396. ".chip 68k"
  397. : : "a" (paddr));
  398. break;
  399. default:
  400. case FLUSH_CACHE_BOTH:
  401. __asm__ __volatile__ (".chip 68060\n\t"
  402. "cpushl %%bc,(%0)\n\t"
  403. ".chip 68k"
  404. : : "a" (paddr));
  405. break;
  406. }
  407. if (!--i && len)
  408. {
  409. /*
  410. * We just want to jump to the first cache line
  411. * in the next page.
  412. */
  413. addr += PAGE_SIZE;
  414. addr &= PAGE_MASK;
  415. i = PAGE_SIZE / 16;
  416. /* Recompute physical address when crossing a page
  417. boundary. */
  418. for (;;)
  419. {
  420. if ((paddr = virt_to_phys_060(addr)))
  421. break;
  422. if (len <= i)
  423. return 0;
  424. len -= i;
  425. addr += PAGE_SIZE;
  426. }
  427. }
  428. else
  429. paddr += 16;
  430. }
  431. break;
  432. default:
  433. case FLUSH_SCOPE_PAGE:
  434. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  435. addr &= PAGE_MASK; /* Workaround for bug in some
  436. revisions of the 68060 */
  437. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  438. {
  439. if (!(paddr = virt_to_phys_060(addr)))
  440. continue;
  441. switch (cache)
  442. {
  443. case FLUSH_CACHE_DATA:
  444. __asm__ __volatile__ (".chip 68060\n\t"
  445. "cpushp %%dc,(%0)\n\t"
  446. ".chip 68k"
  447. : : "a" (paddr));
  448. break;
  449. case FLUSH_CACHE_INSN:
  450. __asm__ __volatile__ (".chip 68060\n\t"
  451. "cpushp %%ic,(%0)\n\t"
  452. ".chip 68k"
  453. : : "a" (paddr));
  454. break;
  455. default:
  456. case FLUSH_CACHE_BOTH:
  457. __asm__ __volatile__ (".chip 68060\n\t"
  458. "cpushp %%bc,(%0)\n\t"
  459. ".chip 68k"
  460. : : "a" (paddr));
  461. break;
  462. }
  463. }
  464. break;
  465. }
  466. return 0;
  467. }
  468. /* sys_cacheflush -- flush (part of) the processor cache. */
  469. asmlinkage int
  470. sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
  471. {
  472. struct vm_area_struct *vma;
  473. int ret = -EINVAL;
  474. lock_kernel();
  475. if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
  476. cache & ~FLUSH_CACHE_BOTH)
  477. goto out;
  478. if (scope == FLUSH_SCOPE_ALL) {
  479. /* Only the superuser may explicitly flush the whole cache. */
  480. ret = -EPERM;
  481. if (!capable(CAP_SYS_ADMIN))
  482. goto out;
  483. } else {
  484. /*
  485. * Verify that the specified address region actually belongs
  486. * to this process.
  487. */
  488. vma = find_vma (current->mm, addr);
  489. ret = -EINVAL;
  490. /* Check for overflow. */
  491. if (addr + len < addr)
  492. goto out;
  493. if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
  494. goto out;
  495. }
  496. if (CPU_IS_020_OR_030) {
  497. if (scope == FLUSH_SCOPE_LINE && len < 256) {
  498. unsigned long cacr;
  499. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  500. if (cache & FLUSH_CACHE_INSN)
  501. cacr |= 4;
  502. if (cache & FLUSH_CACHE_DATA)
  503. cacr |= 0x400;
  504. len >>= 2;
  505. while (len--) {
  506. __asm__ __volatile__ ("movec %1, %%caar\n\t"
  507. "movec %0, %%cacr"
  508. : /* no outputs */
  509. : "r" (cacr), "r" (addr));
  510. addr += 4;
  511. }
  512. } else {
  513. /* Flush the whole cache, even if page granularity requested. */
  514. unsigned long cacr;
  515. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  516. if (cache & FLUSH_CACHE_INSN)
  517. cacr |= 8;
  518. if (cache & FLUSH_CACHE_DATA)
  519. cacr |= 0x800;
  520. __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
  521. }
  522. ret = 0;
  523. goto out;
  524. } else {
  525. /*
  526. * 040 or 060: don't blindly trust 'scope', someone could
  527. * try to flush a few megs of memory.
  528. */
  529. if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
  530. scope=FLUSH_SCOPE_PAGE;
  531. if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
  532. scope=FLUSH_SCOPE_ALL;
  533. if (CPU_IS_040) {
  534. ret = cache_flush_040 (addr, scope, cache, len);
  535. } else if (CPU_IS_060) {
  536. ret = cache_flush_060 (addr, scope, cache, len);
  537. }
  538. }
  539. out:
  540. unlock_kernel();
  541. return ret;
  542. }
  543. asmlinkage int sys_getpagesize(void)
  544. {
  545. return PAGE_SIZE;
  546. }
  547. /*
  548. * Do a system call from kernel instead of calling sys_execve so we
  549. * end up with proper pt_regs.
  550. */
  551. int kernel_execve(const char *filename, char *const argv[], char *const envp[])
  552. {
  553. register long __res asm ("%d0") = __NR_execve;
  554. register long __a asm ("%d1") = (long)(filename);
  555. register long __b asm ("%d2") = (long)(argv);
  556. register long __c asm ("%d3") = (long)(envp);
  557. asm volatile ("trap #0" : "+d" (__res)
  558. : "d" (__a), "d" (__b), "d" (__c));
  559. return __res;
  560. }