sys_m68k.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671
  1. /*
  2. * linux/arch/m68k/kernel/sys_m68k.c
  3. *
  4. * This file contains various random system calls that
  5. * have a non-standard calling sequence on the Linux/m68k
  6. * platform.
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/sched.h>
  10. #include <linux/mm.h>
  11. #include <linux/smp.h>
  12. #include <linux/smp_lock.h>
  13. #include <linux/sem.h>
  14. #include <linux/msg.h>
  15. #include <linux/shm.h>
  16. #include <linux/stat.h>
  17. #include <linux/syscalls.h>
  18. #include <linux/mman.h>
  19. #include <linux/file.h>
  20. #include <linux/utsname.h>
  21. #include <asm/setup.h>
  22. #include <asm/uaccess.h>
  23. #include <asm/cachectl.h>
  24. #include <asm/traps.h>
  25. #include <asm/ipc.h>
  26. #include <asm/page.h>
  27. /*
  28. * sys_pipe() is the normal C calling standard for creating
  29. * a pipe. It's not the way unix traditionally does this, though.
  30. */
  31. asmlinkage int sys_pipe(unsigned long * fildes)
  32. {
  33. int fd[2];
  34. int error;
  35. error = do_pipe(fd);
  36. if (!error) {
  37. if (copy_to_user(fildes, fd, 2*sizeof(int)))
  38. error = -EFAULT;
  39. }
  40. return error;
  41. }
  42. /* common code for old and new mmaps */
  43. static inline long do_mmap2(
  44. unsigned long addr, unsigned long len,
  45. unsigned long prot, unsigned long flags,
  46. unsigned long fd, unsigned long pgoff)
  47. {
  48. int error = -EBADF;
  49. struct file * file = NULL;
  50. flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  51. if (!(flags & MAP_ANONYMOUS)) {
  52. file = fget(fd);
  53. if (!file)
  54. goto out;
  55. }
  56. down_write(&current->mm->mmap_sem);
  57. error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
  58. up_write(&current->mm->mmap_sem);
  59. if (file)
  60. fput(file);
  61. out:
  62. return error;
  63. }
  64. asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
  65. unsigned long prot, unsigned long flags,
  66. unsigned long fd, unsigned long pgoff)
  67. {
  68. return do_mmap2(addr, len, prot, flags, fd, pgoff);
  69. }
  70. /*
  71. * Perform the select(nd, in, out, ex, tv) and mmap() system
  72. * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
  73. * handle more than 4 system call parameters, so these system calls
  74. * used a memory block for parameter passing..
  75. */
  76. struct mmap_arg_struct {
  77. unsigned long addr;
  78. unsigned long len;
  79. unsigned long prot;
  80. unsigned long flags;
  81. unsigned long fd;
  82. unsigned long offset;
  83. };
  84. asmlinkage int old_mmap(struct mmap_arg_struct *arg)
  85. {
  86. struct mmap_arg_struct a;
  87. int error = -EFAULT;
  88. if (copy_from_user(&a, arg, sizeof(a)))
  89. goto out;
  90. error = -EINVAL;
  91. if (a.offset & ~PAGE_MASK)
  92. goto out;
  93. a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  94. error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
  95. out:
  96. return error;
  97. }
  98. #if 0
  99. struct mmap_arg_struct64 {
  100. __u32 addr;
  101. __u32 len;
  102. __u32 prot;
  103. __u32 flags;
  104. __u64 offset; /* 64 bits */
  105. __u32 fd;
  106. };
  107. asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
  108. {
  109. int error = -EFAULT;
  110. struct file * file = NULL;
  111. struct mmap_arg_struct64 a;
  112. unsigned long pgoff;
  113. if (copy_from_user(&a, arg, sizeof(a)))
  114. return -EFAULT;
  115. if ((long)a.offset & ~PAGE_MASK)
  116. return -EINVAL;
  117. pgoff = a.offset >> PAGE_SHIFT;
  118. if ((a.offset >> PAGE_SHIFT) != pgoff)
  119. return -EINVAL;
  120. if (!(a.flags & MAP_ANONYMOUS)) {
  121. error = -EBADF;
  122. file = fget(a.fd);
  123. if (!file)
  124. goto out;
  125. }
  126. a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  127. down_write(&current->mm->mmap_sem);
  128. error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
  129. up_write(&current->mm->mmap_sem);
  130. if (file)
  131. fput(file);
  132. out:
  133. return error;
  134. }
  135. #endif
  136. struct sel_arg_struct {
  137. unsigned long n;
  138. fd_set *inp, *outp, *exp;
  139. struct timeval *tvp;
  140. };
  141. asmlinkage int old_select(struct sel_arg_struct *arg)
  142. {
  143. struct sel_arg_struct a;
  144. if (copy_from_user(&a, arg, sizeof(a)))
  145. return -EFAULT;
  146. /* sys_select() does the appropriate kernel locking */
  147. return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
  148. }
  149. /*
  150. * sys_ipc() is the de-multiplexer for the SysV IPC calls..
  151. *
  152. * This is really horribly ugly.
  153. */
  154. asmlinkage int sys_ipc (uint call, int first, int second,
  155. int third, void *ptr, long fifth)
  156. {
  157. int version, ret;
  158. version = call >> 16; /* hack for backward compatibility */
  159. call &= 0xffff;
  160. if (call <= SEMCTL)
  161. switch (call) {
  162. case SEMOP:
  163. return sys_semop (first, (struct sembuf *)ptr, second);
  164. case SEMGET:
  165. return sys_semget (first, second, third);
  166. case SEMCTL: {
  167. union semun fourth;
  168. if (!ptr)
  169. return -EINVAL;
  170. if (get_user(fourth.__pad, (void **) ptr))
  171. return -EFAULT;
  172. return sys_semctl (first, second, third, fourth);
  173. }
  174. default:
  175. return -ENOSYS;
  176. }
  177. if (call <= MSGCTL)
  178. switch (call) {
  179. case MSGSND:
  180. return sys_msgsnd (first, (struct msgbuf *) ptr,
  181. second, third);
  182. case MSGRCV:
  183. switch (version) {
  184. case 0: {
  185. struct ipc_kludge tmp;
  186. if (!ptr)
  187. return -EINVAL;
  188. if (copy_from_user (&tmp,
  189. (struct ipc_kludge *)ptr,
  190. sizeof (tmp)))
  191. return -EFAULT;
  192. return sys_msgrcv (first, tmp.msgp, second,
  193. tmp.msgtyp, third);
  194. }
  195. default:
  196. return sys_msgrcv (first,
  197. (struct msgbuf *) ptr,
  198. second, fifth, third);
  199. }
  200. case MSGGET:
  201. return sys_msgget ((key_t) first, second);
  202. case MSGCTL:
  203. return sys_msgctl (first, second,
  204. (struct msqid_ds *) ptr);
  205. default:
  206. return -ENOSYS;
  207. }
  208. if (call <= SHMCTL)
  209. switch (call) {
  210. case SHMAT:
  211. switch (version) {
  212. default: {
  213. ulong raddr;
  214. ret = do_shmat (first, (char *) ptr,
  215. second, &raddr);
  216. if (ret)
  217. return ret;
  218. return put_user (raddr, (ulong *) third);
  219. }
  220. }
  221. case SHMDT:
  222. return sys_shmdt ((char *)ptr);
  223. case SHMGET:
  224. return sys_shmget (first, second, third);
  225. case SHMCTL:
  226. return sys_shmctl (first, second,
  227. (struct shmid_ds *) ptr);
  228. default:
  229. return -ENOSYS;
  230. }
  231. return -EINVAL;
  232. }
  233. /* Convert virtual (user) address VADDR to physical address PADDR */
  234. #define virt_to_phys_040(vaddr) \
  235. ({ \
  236. unsigned long _mmusr, _paddr; \
  237. \
  238. __asm__ __volatile__ (".chip 68040\n\t" \
  239. "ptestr (%1)\n\t" \
  240. "movec %%mmusr,%0\n\t" \
  241. ".chip 68k" \
  242. : "=r" (_mmusr) \
  243. : "a" (vaddr)); \
  244. _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
  245. _paddr; \
  246. })
  247. static inline int
  248. cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
  249. {
  250. unsigned long paddr, i;
  251. switch (scope)
  252. {
  253. case FLUSH_SCOPE_ALL:
  254. switch (cache)
  255. {
  256. case FLUSH_CACHE_DATA:
  257. /* This nop is needed for some broken versions of the 68040. */
  258. __asm__ __volatile__ ("nop\n\t"
  259. ".chip 68040\n\t"
  260. "cpusha %dc\n\t"
  261. ".chip 68k");
  262. break;
  263. case FLUSH_CACHE_INSN:
  264. __asm__ __volatile__ ("nop\n\t"
  265. ".chip 68040\n\t"
  266. "cpusha %ic\n\t"
  267. ".chip 68k");
  268. break;
  269. default:
  270. case FLUSH_CACHE_BOTH:
  271. __asm__ __volatile__ ("nop\n\t"
  272. ".chip 68040\n\t"
  273. "cpusha %bc\n\t"
  274. ".chip 68k");
  275. break;
  276. }
  277. break;
  278. case FLUSH_SCOPE_LINE:
  279. /* Find the physical address of the first mapped page in the
  280. address range. */
  281. if ((paddr = virt_to_phys_040(addr))) {
  282. paddr += addr & ~(PAGE_MASK | 15);
  283. len = (len + (addr & 15) + 15) >> 4;
  284. } else {
  285. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  286. if (len <= tmp)
  287. return 0;
  288. addr += tmp;
  289. len -= tmp;
  290. tmp = PAGE_SIZE;
  291. for (;;)
  292. {
  293. if ((paddr = virt_to_phys_040(addr)))
  294. break;
  295. if (len <= tmp)
  296. return 0;
  297. addr += tmp;
  298. len -= tmp;
  299. }
  300. len = (len + 15) >> 4;
  301. }
  302. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  303. while (len--)
  304. {
  305. switch (cache)
  306. {
  307. case FLUSH_CACHE_DATA:
  308. __asm__ __volatile__ ("nop\n\t"
  309. ".chip 68040\n\t"
  310. "cpushl %%dc,(%0)\n\t"
  311. ".chip 68k"
  312. : : "a" (paddr));
  313. break;
  314. case FLUSH_CACHE_INSN:
  315. __asm__ __volatile__ ("nop\n\t"
  316. ".chip 68040\n\t"
  317. "cpushl %%ic,(%0)\n\t"
  318. ".chip 68k"
  319. : : "a" (paddr));
  320. break;
  321. default:
  322. case FLUSH_CACHE_BOTH:
  323. __asm__ __volatile__ ("nop\n\t"
  324. ".chip 68040\n\t"
  325. "cpushl %%bc,(%0)\n\t"
  326. ".chip 68k"
  327. : : "a" (paddr));
  328. break;
  329. }
  330. if (!--i && len)
  331. {
  332. /*
  333. * No need to page align here since it is done by
  334. * virt_to_phys_040().
  335. */
  336. addr += PAGE_SIZE;
  337. i = PAGE_SIZE / 16;
  338. /* Recompute physical address when crossing a page
  339. boundary. */
  340. for (;;)
  341. {
  342. if ((paddr = virt_to_phys_040(addr)))
  343. break;
  344. if (len <= i)
  345. return 0;
  346. len -= i;
  347. addr += PAGE_SIZE;
  348. }
  349. }
  350. else
  351. paddr += 16;
  352. }
  353. break;
  354. default:
  355. case FLUSH_SCOPE_PAGE:
  356. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  357. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  358. {
  359. if (!(paddr = virt_to_phys_040(addr)))
  360. continue;
  361. switch (cache)
  362. {
  363. case FLUSH_CACHE_DATA:
  364. __asm__ __volatile__ ("nop\n\t"
  365. ".chip 68040\n\t"
  366. "cpushp %%dc,(%0)\n\t"
  367. ".chip 68k"
  368. : : "a" (paddr));
  369. break;
  370. case FLUSH_CACHE_INSN:
  371. __asm__ __volatile__ ("nop\n\t"
  372. ".chip 68040\n\t"
  373. "cpushp %%ic,(%0)\n\t"
  374. ".chip 68k"
  375. : : "a" (paddr));
  376. break;
  377. default:
  378. case FLUSH_CACHE_BOTH:
  379. __asm__ __volatile__ ("nop\n\t"
  380. ".chip 68040\n\t"
  381. "cpushp %%bc,(%0)\n\t"
  382. ".chip 68k"
  383. : : "a" (paddr));
  384. break;
  385. }
  386. }
  387. break;
  388. }
  389. return 0;
  390. }
  391. #define virt_to_phys_060(vaddr) \
  392. ({ \
  393. unsigned long paddr; \
  394. __asm__ __volatile__ (".chip 68060\n\t" \
  395. "plpar (%0)\n\t" \
  396. ".chip 68k" \
  397. : "=a" (paddr) \
  398. : "0" (vaddr)); \
  399. (paddr); /* XXX */ \
  400. })
  401. static inline int
  402. cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
  403. {
  404. unsigned long paddr, i;
  405. /*
  406. * 68060 manual says:
  407. * cpush %dc : flush DC, remains valid (with our %cacr setup)
  408. * cpush %ic : invalidate IC
  409. * cpush %bc : flush DC + invalidate IC
  410. */
  411. switch (scope)
  412. {
  413. case FLUSH_SCOPE_ALL:
  414. switch (cache)
  415. {
  416. case FLUSH_CACHE_DATA:
  417. __asm__ __volatile__ (".chip 68060\n\t"
  418. "cpusha %dc\n\t"
  419. ".chip 68k");
  420. break;
  421. case FLUSH_CACHE_INSN:
  422. __asm__ __volatile__ (".chip 68060\n\t"
  423. "cpusha %ic\n\t"
  424. ".chip 68k");
  425. break;
  426. default:
  427. case FLUSH_CACHE_BOTH:
  428. __asm__ __volatile__ (".chip 68060\n\t"
  429. "cpusha %bc\n\t"
  430. ".chip 68k");
  431. break;
  432. }
  433. break;
  434. case FLUSH_SCOPE_LINE:
  435. /* Find the physical address of the first mapped page in the
  436. address range. */
  437. len += addr & 15;
  438. addr &= -16;
  439. if (!(paddr = virt_to_phys_060(addr))) {
  440. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  441. if (len <= tmp)
  442. return 0;
  443. addr += tmp;
  444. len -= tmp;
  445. tmp = PAGE_SIZE;
  446. for (;;)
  447. {
  448. if ((paddr = virt_to_phys_060(addr)))
  449. break;
  450. if (len <= tmp)
  451. return 0;
  452. addr += tmp;
  453. len -= tmp;
  454. }
  455. }
  456. len = (len + 15) >> 4;
  457. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  458. while (len--)
  459. {
  460. switch (cache)
  461. {
  462. case FLUSH_CACHE_DATA:
  463. __asm__ __volatile__ (".chip 68060\n\t"
  464. "cpushl %%dc,(%0)\n\t"
  465. ".chip 68k"
  466. : : "a" (paddr));
  467. break;
  468. case FLUSH_CACHE_INSN:
  469. __asm__ __volatile__ (".chip 68060\n\t"
  470. "cpushl %%ic,(%0)\n\t"
  471. ".chip 68k"
  472. : : "a" (paddr));
  473. break;
  474. default:
  475. case FLUSH_CACHE_BOTH:
  476. __asm__ __volatile__ (".chip 68060\n\t"
  477. "cpushl %%bc,(%0)\n\t"
  478. ".chip 68k"
  479. : : "a" (paddr));
  480. break;
  481. }
  482. if (!--i && len)
  483. {
  484. /*
  485. * We just want to jump to the first cache line
  486. * in the next page.
  487. */
  488. addr += PAGE_SIZE;
  489. addr &= PAGE_MASK;
  490. i = PAGE_SIZE / 16;
  491. /* Recompute physical address when crossing a page
  492. boundary. */
  493. for (;;)
  494. {
  495. if ((paddr = virt_to_phys_060(addr)))
  496. break;
  497. if (len <= i)
  498. return 0;
  499. len -= i;
  500. addr += PAGE_SIZE;
  501. }
  502. }
  503. else
  504. paddr += 16;
  505. }
  506. break;
  507. default:
  508. case FLUSH_SCOPE_PAGE:
  509. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  510. addr &= PAGE_MASK; /* Workaround for bug in some
  511. revisions of the 68060 */
  512. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  513. {
  514. if (!(paddr = virt_to_phys_060(addr)))
  515. continue;
  516. switch (cache)
  517. {
  518. case FLUSH_CACHE_DATA:
  519. __asm__ __volatile__ (".chip 68060\n\t"
  520. "cpushp %%dc,(%0)\n\t"
  521. ".chip 68k"
  522. : : "a" (paddr));
  523. break;
  524. case FLUSH_CACHE_INSN:
  525. __asm__ __volatile__ (".chip 68060\n\t"
  526. "cpushp %%ic,(%0)\n\t"
  527. ".chip 68k"
  528. : : "a" (paddr));
  529. break;
  530. default:
  531. case FLUSH_CACHE_BOTH:
  532. __asm__ __volatile__ (".chip 68060\n\t"
  533. "cpushp %%bc,(%0)\n\t"
  534. ".chip 68k"
  535. : : "a" (paddr));
  536. break;
  537. }
  538. }
  539. break;
  540. }
  541. return 0;
  542. }
  543. /* sys_cacheflush -- flush (part of) the processor cache. */
  544. asmlinkage int
  545. sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
  546. {
  547. struct vm_area_struct *vma;
  548. int ret = -EINVAL;
  549. lock_kernel();
  550. if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
  551. cache & ~FLUSH_CACHE_BOTH)
  552. goto out;
  553. if (scope == FLUSH_SCOPE_ALL) {
  554. /* Only the superuser may explicitly flush the whole cache. */
  555. ret = -EPERM;
  556. if (!capable(CAP_SYS_ADMIN))
  557. goto out;
  558. } else {
  559. /*
  560. * Verify that the specified address region actually belongs
  561. * to this process.
  562. */
  563. vma = find_vma (current->mm, addr);
  564. ret = -EINVAL;
  565. /* Check for overflow. */
  566. if (addr + len < addr)
  567. goto out;
  568. if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
  569. goto out;
  570. }
  571. if (CPU_IS_020_OR_030) {
  572. if (scope == FLUSH_SCOPE_LINE && len < 256) {
  573. unsigned long cacr;
  574. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  575. if (cache & FLUSH_CACHE_INSN)
  576. cacr |= 4;
  577. if (cache & FLUSH_CACHE_DATA)
  578. cacr |= 0x400;
  579. len >>= 2;
  580. while (len--) {
  581. __asm__ __volatile__ ("movec %1, %%caar\n\t"
  582. "movec %0, %%cacr"
  583. : /* no outputs */
  584. : "r" (cacr), "r" (addr));
  585. addr += 4;
  586. }
  587. } else {
  588. /* Flush the whole cache, even if page granularity requested. */
  589. unsigned long cacr;
  590. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  591. if (cache & FLUSH_CACHE_INSN)
  592. cacr |= 8;
  593. if (cache & FLUSH_CACHE_DATA)
  594. cacr |= 0x800;
  595. __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
  596. }
  597. ret = 0;
  598. goto out;
  599. } else {
  600. /*
  601. * 040 or 060: don't blindly trust 'scope', someone could
  602. * try to flush a few megs of memory.
  603. */
  604. if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
  605. scope=FLUSH_SCOPE_PAGE;
  606. if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
  607. scope=FLUSH_SCOPE_ALL;
  608. if (CPU_IS_040) {
  609. ret = cache_flush_040 (addr, scope, cache, len);
  610. } else if (CPU_IS_060) {
  611. ret = cache_flush_060 (addr, scope, cache, len);
  612. }
  613. }
  614. out:
  615. unlock_kernel();
  616. return ret;
  617. }
  618. asmlinkage int sys_getpagesize(void)
  619. {
  620. return PAGE_SIZE;
  621. }