sys_m68k.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665
  1. /*
  2. * linux/arch/m68k/kernel/sys_m68k.c
  3. *
  4. * This file contains various random system calls that
  5. * have a non-standard calling sequence on the Linux/m68k
  6. * platform.
  7. */
  8. #include <linux/capability.h>
  9. #include <linux/errno.h>
  10. #include <linux/sched.h>
  11. #include <linux/mm.h>
  12. #include <linux/smp.h>
  13. #include <linux/smp_lock.h>
  14. #include <linux/sem.h>
  15. #include <linux/msg.h>
  16. #include <linux/shm.h>
  17. #include <linux/stat.h>
  18. #include <linux/syscalls.h>
  19. #include <linux/mman.h>
  20. #include <linux/file.h>
  21. #include <linux/utsname.h>
  22. #include <asm/setup.h>
  23. #include <asm/uaccess.h>
  24. #include <asm/cachectl.h>
  25. #include <asm/traps.h>
  26. #include <asm/ipc.h>
  27. #include <asm/page.h>
  28. /*
  29. * sys_pipe() is the normal C calling standard for creating
  30. * a pipe. It's not the way unix traditionally does this, though.
  31. */
  32. asmlinkage int sys_pipe(unsigned long __user * fildes)
  33. {
  34. int fd[2];
  35. int error;
  36. error = do_pipe(fd);
  37. if (!error) {
  38. if (copy_to_user(fildes, fd, 2*sizeof(int)))
  39. error = -EFAULT;
  40. }
  41. return error;
  42. }
  43. /* common code for old and new mmaps */
  44. static inline long do_mmap2(
  45. unsigned long addr, unsigned long len,
  46. unsigned long prot, unsigned long flags,
  47. unsigned long fd, unsigned long pgoff)
  48. {
  49. int error = -EBADF;
  50. struct file * file = NULL;
  51. flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  52. if (!(flags & MAP_ANONYMOUS)) {
  53. file = fget(fd);
  54. if (!file)
  55. goto out;
  56. }
  57. down_write(&current->mm->mmap_sem);
  58. error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
  59. up_write(&current->mm->mmap_sem);
  60. if (file)
  61. fput(file);
  62. out:
  63. return error;
  64. }
  65. asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
  66. unsigned long prot, unsigned long flags,
  67. unsigned long fd, unsigned long pgoff)
  68. {
  69. return do_mmap2(addr, len, prot, flags, fd, pgoff);
  70. }
  71. /*
  72. * Perform the select(nd, in, out, ex, tv) and mmap() system
  73. * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
  74. * handle more than 4 system call parameters, so these system calls
  75. * used a memory block for parameter passing..
  76. */
  77. struct mmap_arg_struct {
  78. unsigned long addr;
  79. unsigned long len;
  80. unsigned long prot;
  81. unsigned long flags;
  82. unsigned long fd;
  83. unsigned long offset;
  84. };
  85. asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
  86. {
  87. struct mmap_arg_struct a;
  88. int error = -EFAULT;
  89. if (copy_from_user(&a, arg, sizeof(a)))
  90. goto out;
  91. error = -EINVAL;
  92. if (a.offset & ~PAGE_MASK)
  93. goto out;
  94. a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  95. error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
  96. out:
  97. return error;
  98. }
  99. #if 0
  100. struct mmap_arg_struct64 {
  101. __u32 addr;
  102. __u32 len;
  103. __u32 prot;
  104. __u32 flags;
  105. __u64 offset; /* 64 bits */
  106. __u32 fd;
  107. };
  108. asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
  109. {
  110. int error = -EFAULT;
  111. struct file * file = NULL;
  112. struct mmap_arg_struct64 a;
  113. unsigned long pgoff;
  114. if (copy_from_user(&a, arg, sizeof(a)))
  115. return -EFAULT;
  116. if ((long)a.offset & ~PAGE_MASK)
  117. return -EINVAL;
  118. pgoff = a.offset >> PAGE_SHIFT;
  119. if ((a.offset >> PAGE_SHIFT) != pgoff)
  120. return -EINVAL;
  121. if (!(a.flags & MAP_ANONYMOUS)) {
  122. error = -EBADF;
  123. file = fget(a.fd);
  124. if (!file)
  125. goto out;
  126. }
  127. a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  128. down_write(&current->mm->mmap_sem);
  129. error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
  130. up_write(&current->mm->mmap_sem);
  131. if (file)
  132. fput(file);
  133. out:
  134. return error;
  135. }
  136. #endif
  137. struct sel_arg_struct {
  138. unsigned long n;
  139. fd_set __user *inp, *outp, *exp;
  140. struct timeval __user *tvp;
  141. };
  142. asmlinkage int old_select(struct sel_arg_struct __user *arg)
  143. {
  144. struct sel_arg_struct a;
  145. if (copy_from_user(&a, arg, sizeof(a)))
  146. return -EFAULT;
  147. /* sys_select() does the appropriate kernel locking */
  148. return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
  149. }
  150. /*
  151. * sys_ipc() is the de-multiplexer for the SysV IPC calls..
  152. *
  153. * This is really horribly ugly.
  154. */
  155. asmlinkage int sys_ipc (uint call, int first, int second,
  156. int third, void __user *ptr, long fifth)
  157. {
  158. int version, ret;
  159. version = call >> 16; /* hack for backward compatibility */
  160. call &= 0xffff;
  161. if (call <= SEMCTL)
  162. switch (call) {
  163. case SEMOP:
  164. return sys_semop (first, ptr, second);
  165. case SEMGET:
  166. return sys_semget (first, second, third);
  167. case SEMCTL: {
  168. union semun fourth;
  169. if (!ptr)
  170. return -EINVAL;
  171. if (get_user(fourth.__pad, (void __user *__user *) ptr))
  172. return -EFAULT;
  173. return sys_semctl (first, second, third, fourth);
  174. }
  175. default:
  176. return -ENOSYS;
  177. }
  178. if (call <= MSGCTL)
  179. switch (call) {
  180. case MSGSND:
  181. return sys_msgsnd (first, ptr, second, third);
  182. case MSGRCV:
  183. switch (version) {
  184. case 0: {
  185. struct ipc_kludge tmp;
  186. if (!ptr)
  187. return -EINVAL;
  188. if (copy_from_user (&tmp, ptr, sizeof (tmp)))
  189. return -EFAULT;
  190. return sys_msgrcv (first, tmp.msgp, second,
  191. tmp.msgtyp, third);
  192. }
  193. default:
  194. return sys_msgrcv (first, ptr,
  195. second, fifth, third);
  196. }
  197. case MSGGET:
  198. return sys_msgget ((key_t) first, second);
  199. case MSGCTL:
  200. return sys_msgctl (first, second, ptr);
  201. default:
  202. return -ENOSYS;
  203. }
  204. if (call <= SHMCTL)
  205. switch (call) {
  206. case SHMAT:
  207. switch (version) {
  208. default: {
  209. ulong raddr;
  210. ret = do_shmat (first, ptr, second, &raddr);
  211. if (ret)
  212. return ret;
  213. return put_user (raddr, (ulong __user *) third);
  214. }
  215. }
  216. case SHMDT:
  217. return sys_shmdt (ptr);
  218. case SHMGET:
  219. return sys_shmget (first, second, third);
  220. case SHMCTL:
  221. return sys_shmctl (first, second, ptr);
  222. default:
  223. return -ENOSYS;
  224. }
  225. return -EINVAL;
  226. }
  227. /* Convert virtual (user) address VADDR to physical address PADDR */
  228. #define virt_to_phys_040(vaddr) \
  229. ({ \
  230. unsigned long _mmusr, _paddr; \
  231. \
  232. __asm__ __volatile__ (".chip 68040\n\t" \
  233. "ptestr (%1)\n\t" \
  234. "movec %%mmusr,%0\n\t" \
  235. ".chip 68k" \
  236. : "=r" (_mmusr) \
  237. : "a" (vaddr)); \
  238. _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
  239. _paddr; \
  240. })
  241. static inline int
  242. cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
  243. {
  244. unsigned long paddr, i;
  245. switch (scope)
  246. {
  247. case FLUSH_SCOPE_ALL:
  248. switch (cache)
  249. {
  250. case FLUSH_CACHE_DATA:
  251. /* This nop is needed for some broken versions of the 68040. */
  252. __asm__ __volatile__ ("nop\n\t"
  253. ".chip 68040\n\t"
  254. "cpusha %dc\n\t"
  255. ".chip 68k");
  256. break;
  257. case FLUSH_CACHE_INSN:
  258. __asm__ __volatile__ ("nop\n\t"
  259. ".chip 68040\n\t"
  260. "cpusha %ic\n\t"
  261. ".chip 68k");
  262. break;
  263. default:
  264. case FLUSH_CACHE_BOTH:
  265. __asm__ __volatile__ ("nop\n\t"
  266. ".chip 68040\n\t"
  267. "cpusha %bc\n\t"
  268. ".chip 68k");
  269. break;
  270. }
  271. break;
  272. case FLUSH_SCOPE_LINE:
  273. /* Find the physical address of the first mapped page in the
  274. address range. */
  275. if ((paddr = virt_to_phys_040(addr))) {
  276. paddr += addr & ~(PAGE_MASK | 15);
  277. len = (len + (addr & 15) + 15) >> 4;
  278. } else {
  279. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  280. if (len <= tmp)
  281. return 0;
  282. addr += tmp;
  283. len -= tmp;
  284. tmp = PAGE_SIZE;
  285. for (;;)
  286. {
  287. if ((paddr = virt_to_phys_040(addr)))
  288. break;
  289. if (len <= tmp)
  290. return 0;
  291. addr += tmp;
  292. len -= tmp;
  293. }
  294. len = (len + 15) >> 4;
  295. }
  296. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  297. while (len--)
  298. {
  299. switch (cache)
  300. {
  301. case FLUSH_CACHE_DATA:
  302. __asm__ __volatile__ ("nop\n\t"
  303. ".chip 68040\n\t"
  304. "cpushl %%dc,(%0)\n\t"
  305. ".chip 68k"
  306. : : "a" (paddr));
  307. break;
  308. case FLUSH_CACHE_INSN:
  309. __asm__ __volatile__ ("nop\n\t"
  310. ".chip 68040\n\t"
  311. "cpushl %%ic,(%0)\n\t"
  312. ".chip 68k"
  313. : : "a" (paddr));
  314. break;
  315. default:
  316. case FLUSH_CACHE_BOTH:
  317. __asm__ __volatile__ ("nop\n\t"
  318. ".chip 68040\n\t"
  319. "cpushl %%bc,(%0)\n\t"
  320. ".chip 68k"
  321. : : "a" (paddr));
  322. break;
  323. }
  324. if (!--i && len)
  325. {
  326. /*
  327. * No need to page align here since it is done by
  328. * virt_to_phys_040().
  329. */
  330. addr += PAGE_SIZE;
  331. i = PAGE_SIZE / 16;
  332. /* Recompute physical address when crossing a page
  333. boundary. */
  334. for (;;)
  335. {
  336. if ((paddr = virt_to_phys_040(addr)))
  337. break;
  338. if (len <= i)
  339. return 0;
  340. len -= i;
  341. addr += PAGE_SIZE;
  342. }
  343. }
  344. else
  345. paddr += 16;
  346. }
  347. break;
  348. default:
  349. case FLUSH_SCOPE_PAGE:
  350. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  351. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  352. {
  353. if (!(paddr = virt_to_phys_040(addr)))
  354. continue;
  355. switch (cache)
  356. {
  357. case FLUSH_CACHE_DATA:
  358. __asm__ __volatile__ ("nop\n\t"
  359. ".chip 68040\n\t"
  360. "cpushp %%dc,(%0)\n\t"
  361. ".chip 68k"
  362. : : "a" (paddr));
  363. break;
  364. case FLUSH_CACHE_INSN:
  365. __asm__ __volatile__ ("nop\n\t"
  366. ".chip 68040\n\t"
  367. "cpushp %%ic,(%0)\n\t"
  368. ".chip 68k"
  369. : : "a" (paddr));
  370. break;
  371. default:
  372. case FLUSH_CACHE_BOTH:
  373. __asm__ __volatile__ ("nop\n\t"
  374. ".chip 68040\n\t"
  375. "cpushp %%bc,(%0)\n\t"
  376. ".chip 68k"
  377. : : "a" (paddr));
  378. break;
  379. }
  380. }
  381. break;
  382. }
  383. return 0;
  384. }
  385. #define virt_to_phys_060(vaddr) \
  386. ({ \
  387. unsigned long paddr; \
  388. __asm__ __volatile__ (".chip 68060\n\t" \
  389. "plpar (%0)\n\t" \
  390. ".chip 68k" \
  391. : "=a" (paddr) \
  392. : "0" (vaddr)); \
  393. (paddr); /* XXX */ \
  394. })
  395. static inline int
  396. cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
  397. {
  398. unsigned long paddr, i;
  399. /*
  400. * 68060 manual says:
  401. * cpush %dc : flush DC, remains valid (with our %cacr setup)
  402. * cpush %ic : invalidate IC
  403. * cpush %bc : flush DC + invalidate IC
  404. */
  405. switch (scope)
  406. {
  407. case FLUSH_SCOPE_ALL:
  408. switch (cache)
  409. {
  410. case FLUSH_CACHE_DATA:
  411. __asm__ __volatile__ (".chip 68060\n\t"
  412. "cpusha %dc\n\t"
  413. ".chip 68k");
  414. break;
  415. case FLUSH_CACHE_INSN:
  416. __asm__ __volatile__ (".chip 68060\n\t"
  417. "cpusha %ic\n\t"
  418. ".chip 68k");
  419. break;
  420. default:
  421. case FLUSH_CACHE_BOTH:
  422. __asm__ __volatile__ (".chip 68060\n\t"
  423. "cpusha %bc\n\t"
  424. ".chip 68k");
  425. break;
  426. }
  427. break;
  428. case FLUSH_SCOPE_LINE:
  429. /* Find the physical address of the first mapped page in the
  430. address range. */
  431. len += addr & 15;
  432. addr &= -16;
  433. if (!(paddr = virt_to_phys_060(addr))) {
  434. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  435. if (len <= tmp)
  436. return 0;
  437. addr += tmp;
  438. len -= tmp;
  439. tmp = PAGE_SIZE;
  440. for (;;)
  441. {
  442. if ((paddr = virt_to_phys_060(addr)))
  443. break;
  444. if (len <= tmp)
  445. return 0;
  446. addr += tmp;
  447. len -= tmp;
  448. }
  449. }
  450. len = (len + 15) >> 4;
  451. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  452. while (len--)
  453. {
  454. switch (cache)
  455. {
  456. case FLUSH_CACHE_DATA:
  457. __asm__ __volatile__ (".chip 68060\n\t"
  458. "cpushl %%dc,(%0)\n\t"
  459. ".chip 68k"
  460. : : "a" (paddr));
  461. break;
  462. case FLUSH_CACHE_INSN:
  463. __asm__ __volatile__ (".chip 68060\n\t"
  464. "cpushl %%ic,(%0)\n\t"
  465. ".chip 68k"
  466. : : "a" (paddr));
  467. break;
  468. default:
  469. case FLUSH_CACHE_BOTH:
  470. __asm__ __volatile__ (".chip 68060\n\t"
  471. "cpushl %%bc,(%0)\n\t"
  472. ".chip 68k"
  473. : : "a" (paddr));
  474. break;
  475. }
  476. if (!--i && len)
  477. {
  478. /*
  479. * We just want to jump to the first cache line
  480. * in the next page.
  481. */
  482. addr += PAGE_SIZE;
  483. addr &= PAGE_MASK;
  484. i = PAGE_SIZE / 16;
  485. /* Recompute physical address when crossing a page
  486. boundary. */
  487. for (;;)
  488. {
  489. if ((paddr = virt_to_phys_060(addr)))
  490. break;
  491. if (len <= i)
  492. return 0;
  493. len -= i;
  494. addr += PAGE_SIZE;
  495. }
  496. }
  497. else
  498. paddr += 16;
  499. }
  500. break;
  501. default:
  502. case FLUSH_SCOPE_PAGE:
  503. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  504. addr &= PAGE_MASK; /* Workaround for bug in some
  505. revisions of the 68060 */
  506. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  507. {
  508. if (!(paddr = virt_to_phys_060(addr)))
  509. continue;
  510. switch (cache)
  511. {
  512. case FLUSH_CACHE_DATA:
  513. __asm__ __volatile__ (".chip 68060\n\t"
  514. "cpushp %%dc,(%0)\n\t"
  515. ".chip 68k"
  516. : : "a" (paddr));
  517. break;
  518. case FLUSH_CACHE_INSN:
  519. __asm__ __volatile__ (".chip 68060\n\t"
  520. "cpushp %%ic,(%0)\n\t"
  521. ".chip 68k"
  522. : : "a" (paddr));
  523. break;
  524. default:
  525. case FLUSH_CACHE_BOTH:
  526. __asm__ __volatile__ (".chip 68060\n\t"
  527. "cpushp %%bc,(%0)\n\t"
  528. ".chip 68k"
  529. : : "a" (paddr));
  530. break;
  531. }
  532. }
  533. break;
  534. }
  535. return 0;
  536. }
  537. /* sys_cacheflush -- flush (part of) the processor cache. */
  538. asmlinkage int
  539. sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
  540. {
  541. struct vm_area_struct *vma;
  542. int ret = -EINVAL;
  543. lock_kernel();
  544. if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
  545. cache & ~FLUSH_CACHE_BOTH)
  546. goto out;
  547. if (scope == FLUSH_SCOPE_ALL) {
  548. /* Only the superuser may explicitly flush the whole cache. */
  549. ret = -EPERM;
  550. if (!capable(CAP_SYS_ADMIN))
  551. goto out;
  552. } else {
  553. /*
  554. * Verify that the specified address region actually belongs
  555. * to this process.
  556. */
  557. vma = find_vma (current->mm, addr);
  558. ret = -EINVAL;
  559. /* Check for overflow. */
  560. if (addr + len < addr)
  561. goto out;
  562. if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
  563. goto out;
  564. }
  565. if (CPU_IS_020_OR_030) {
  566. if (scope == FLUSH_SCOPE_LINE && len < 256) {
  567. unsigned long cacr;
  568. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  569. if (cache & FLUSH_CACHE_INSN)
  570. cacr |= 4;
  571. if (cache & FLUSH_CACHE_DATA)
  572. cacr |= 0x400;
  573. len >>= 2;
  574. while (len--) {
  575. __asm__ __volatile__ ("movec %1, %%caar\n\t"
  576. "movec %0, %%cacr"
  577. : /* no outputs */
  578. : "r" (cacr), "r" (addr));
  579. addr += 4;
  580. }
  581. } else {
  582. /* Flush the whole cache, even if page granularity requested. */
  583. unsigned long cacr;
  584. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  585. if (cache & FLUSH_CACHE_INSN)
  586. cacr |= 8;
  587. if (cache & FLUSH_CACHE_DATA)
  588. cacr |= 0x800;
  589. __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
  590. }
  591. ret = 0;
  592. goto out;
  593. } else {
  594. /*
  595. * 040 or 060: don't blindly trust 'scope', someone could
  596. * try to flush a few megs of memory.
  597. */
  598. if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
  599. scope=FLUSH_SCOPE_PAGE;
  600. if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
  601. scope=FLUSH_SCOPE_ALL;
  602. if (CPU_IS_040) {
  603. ret = cache_flush_040 (addr, scope, cache, len);
  604. } else if (CPU_IS_060) {
  605. ret = cache_flush_060 (addr, scope, cache, len);
  606. }
  607. }
  608. out:
  609. unlock_kernel();
  610. return ret;
  611. }
  612. asmlinkage int sys_getpagesize(void)
  613. {
  614. return PAGE_SIZE;
  615. }