sys_m68k.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672
  1. /*
  2. * linux/arch/m68k/kernel/sys_m68k.c
  3. *
  4. * This file contains various random system calls that
  5. * have a non-standard calling sequence on the Linux/m68k
  6. * platform.
  7. */
  8. #include <linux/capability.h>
  9. #include <linux/errno.h>
  10. #include <linux/sched.h>
  11. #include <linux/mm.h>
  12. #include <linux/smp.h>
  13. #include <linux/smp_lock.h>
  14. #include <linux/sem.h>
  15. #include <linux/msg.h>
  16. #include <linux/shm.h>
  17. #include <linux/stat.h>
  18. #include <linux/syscalls.h>
  19. #include <linux/mman.h>
  20. #include <linux/file.h>
  21. #include <linux/utsname.h>
  22. #include <asm/setup.h>
  23. #include <asm/uaccess.h>
  24. #include <asm/cachectl.h>
  25. #include <asm/traps.h>
  26. #include <asm/ipc.h>
  27. #include <asm/page.h>
  28. /*
  29. * sys_pipe() is the normal C calling standard for creating
  30. * a pipe. It's not the way unix traditionally does this, though.
  31. */
  32. asmlinkage int sys_pipe(unsigned long * fildes)
  33. {
  34. int fd[2];
  35. int error;
  36. error = do_pipe(fd);
  37. if (!error) {
  38. if (copy_to_user(fildes, fd, 2*sizeof(int)))
  39. error = -EFAULT;
  40. }
  41. return error;
  42. }
  43. /* common code for old and new mmaps */
  44. static inline long do_mmap2(
  45. unsigned long addr, unsigned long len,
  46. unsigned long prot, unsigned long flags,
  47. unsigned long fd, unsigned long pgoff)
  48. {
  49. int error = -EBADF;
  50. struct file * file = NULL;
  51. flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  52. if (!(flags & MAP_ANONYMOUS)) {
  53. file = fget(fd);
  54. if (!file)
  55. goto out;
  56. }
  57. down_write(&current->mm->mmap_sem);
  58. error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
  59. up_write(&current->mm->mmap_sem);
  60. if (file)
  61. fput(file);
  62. out:
  63. return error;
  64. }
  65. asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
  66. unsigned long prot, unsigned long flags,
  67. unsigned long fd, unsigned long pgoff)
  68. {
  69. return do_mmap2(addr, len, prot, flags, fd, pgoff);
  70. }
  71. /*
  72. * Perform the select(nd, in, out, ex, tv) and mmap() system
  73. * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
  74. * handle more than 4 system call parameters, so these system calls
  75. * used a memory block for parameter passing..
  76. */
  77. struct mmap_arg_struct {
  78. unsigned long addr;
  79. unsigned long len;
  80. unsigned long prot;
  81. unsigned long flags;
  82. unsigned long fd;
  83. unsigned long offset;
  84. };
  85. asmlinkage int old_mmap(struct mmap_arg_struct *arg)
  86. {
  87. struct mmap_arg_struct a;
  88. int error = -EFAULT;
  89. if (copy_from_user(&a, arg, sizeof(a)))
  90. goto out;
  91. error = -EINVAL;
  92. if (a.offset & ~PAGE_MASK)
  93. goto out;
  94. a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  95. error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
  96. out:
  97. return error;
  98. }
  99. #if 0
  100. struct mmap_arg_struct64 {
  101. __u32 addr;
  102. __u32 len;
  103. __u32 prot;
  104. __u32 flags;
  105. __u64 offset; /* 64 bits */
  106. __u32 fd;
  107. };
  108. asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
  109. {
  110. int error = -EFAULT;
  111. struct file * file = NULL;
  112. struct mmap_arg_struct64 a;
  113. unsigned long pgoff;
  114. if (copy_from_user(&a, arg, sizeof(a)))
  115. return -EFAULT;
  116. if ((long)a.offset & ~PAGE_MASK)
  117. return -EINVAL;
  118. pgoff = a.offset >> PAGE_SHIFT;
  119. if ((a.offset >> PAGE_SHIFT) != pgoff)
  120. return -EINVAL;
  121. if (!(a.flags & MAP_ANONYMOUS)) {
  122. error = -EBADF;
  123. file = fget(a.fd);
  124. if (!file)
  125. goto out;
  126. }
  127. a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  128. down_write(&current->mm->mmap_sem);
  129. error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
  130. up_write(&current->mm->mmap_sem);
  131. if (file)
  132. fput(file);
  133. out:
  134. return error;
  135. }
  136. #endif
  137. struct sel_arg_struct {
  138. unsigned long n;
  139. fd_set *inp, *outp, *exp;
  140. struct timeval *tvp;
  141. };
  142. asmlinkage int old_select(struct sel_arg_struct *arg)
  143. {
  144. struct sel_arg_struct a;
  145. if (copy_from_user(&a, arg, sizeof(a)))
  146. return -EFAULT;
  147. /* sys_select() does the appropriate kernel locking */
  148. return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
  149. }
  150. /*
  151. * sys_ipc() is the de-multiplexer for the SysV IPC calls..
  152. *
  153. * This is really horribly ugly.
  154. */
  155. asmlinkage int sys_ipc (uint call, int first, int second,
  156. int third, void *ptr, long fifth)
  157. {
  158. int version, ret;
  159. version = call >> 16; /* hack for backward compatibility */
  160. call &= 0xffff;
  161. if (call <= SEMCTL)
  162. switch (call) {
  163. case SEMOP:
  164. return sys_semop (first, (struct sembuf *)ptr, second);
  165. case SEMGET:
  166. return sys_semget (first, second, third);
  167. case SEMCTL: {
  168. union semun fourth;
  169. if (!ptr)
  170. return -EINVAL;
  171. if (get_user(fourth.__pad, (void **) ptr))
  172. return -EFAULT;
  173. return sys_semctl (first, second, third, fourth);
  174. }
  175. default:
  176. return -ENOSYS;
  177. }
  178. if (call <= MSGCTL)
  179. switch (call) {
  180. case MSGSND:
  181. return sys_msgsnd (first, (struct msgbuf *) ptr,
  182. second, third);
  183. case MSGRCV:
  184. switch (version) {
  185. case 0: {
  186. struct ipc_kludge tmp;
  187. if (!ptr)
  188. return -EINVAL;
  189. if (copy_from_user (&tmp,
  190. (struct ipc_kludge *)ptr,
  191. sizeof (tmp)))
  192. return -EFAULT;
  193. return sys_msgrcv (first, tmp.msgp, second,
  194. tmp.msgtyp, third);
  195. }
  196. default:
  197. return sys_msgrcv (first,
  198. (struct msgbuf *) ptr,
  199. second, fifth, third);
  200. }
  201. case MSGGET:
  202. return sys_msgget ((key_t) first, second);
  203. case MSGCTL:
  204. return sys_msgctl (first, second,
  205. (struct msqid_ds *) ptr);
  206. default:
  207. return -ENOSYS;
  208. }
  209. if (call <= SHMCTL)
  210. switch (call) {
  211. case SHMAT:
  212. switch (version) {
  213. default: {
  214. ulong raddr;
  215. ret = do_shmat (first, (char *) ptr,
  216. second, &raddr);
  217. if (ret)
  218. return ret;
  219. return put_user (raddr, (ulong *) third);
  220. }
  221. }
  222. case SHMDT:
  223. return sys_shmdt ((char *)ptr);
  224. case SHMGET:
  225. return sys_shmget (first, second, third);
  226. case SHMCTL:
  227. return sys_shmctl (first, second,
  228. (struct shmid_ds *) ptr);
  229. default:
  230. return -ENOSYS;
  231. }
  232. return -EINVAL;
  233. }
  234. /* Convert virtual (user) address VADDR to physical address PADDR */
  235. #define virt_to_phys_040(vaddr) \
  236. ({ \
  237. unsigned long _mmusr, _paddr; \
  238. \
  239. __asm__ __volatile__ (".chip 68040\n\t" \
  240. "ptestr (%1)\n\t" \
  241. "movec %%mmusr,%0\n\t" \
  242. ".chip 68k" \
  243. : "=r" (_mmusr) \
  244. : "a" (vaddr)); \
  245. _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
  246. _paddr; \
  247. })
  248. static inline int
  249. cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
  250. {
  251. unsigned long paddr, i;
  252. switch (scope)
  253. {
  254. case FLUSH_SCOPE_ALL:
  255. switch (cache)
  256. {
  257. case FLUSH_CACHE_DATA:
  258. /* This nop is needed for some broken versions of the 68040. */
  259. __asm__ __volatile__ ("nop\n\t"
  260. ".chip 68040\n\t"
  261. "cpusha %dc\n\t"
  262. ".chip 68k");
  263. break;
  264. case FLUSH_CACHE_INSN:
  265. __asm__ __volatile__ ("nop\n\t"
  266. ".chip 68040\n\t"
  267. "cpusha %ic\n\t"
  268. ".chip 68k");
  269. break;
  270. default:
  271. case FLUSH_CACHE_BOTH:
  272. __asm__ __volatile__ ("nop\n\t"
  273. ".chip 68040\n\t"
  274. "cpusha %bc\n\t"
  275. ".chip 68k");
  276. break;
  277. }
  278. break;
  279. case FLUSH_SCOPE_LINE:
  280. /* Find the physical address of the first mapped page in the
  281. address range. */
  282. if ((paddr = virt_to_phys_040(addr))) {
  283. paddr += addr & ~(PAGE_MASK | 15);
  284. len = (len + (addr & 15) + 15) >> 4;
  285. } else {
  286. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  287. if (len <= tmp)
  288. return 0;
  289. addr += tmp;
  290. len -= tmp;
  291. tmp = PAGE_SIZE;
  292. for (;;)
  293. {
  294. if ((paddr = virt_to_phys_040(addr)))
  295. break;
  296. if (len <= tmp)
  297. return 0;
  298. addr += tmp;
  299. len -= tmp;
  300. }
  301. len = (len + 15) >> 4;
  302. }
  303. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  304. while (len--)
  305. {
  306. switch (cache)
  307. {
  308. case FLUSH_CACHE_DATA:
  309. __asm__ __volatile__ ("nop\n\t"
  310. ".chip 68040\n\t"
  311. "cpushl %%dc,(%0)\n\t"
  312. ".chip 68k"
  313. : : "a" (paddr));
  314. break;
  315. case FLUSH_CACHE_INSN:
  316. __asm__ __volatile__ ("nop\n\t"
  317. ".chip 68040\n\t"
  318. "cpushl %%ic,(%0)\n\t"
  319. ".chip 68k"
  320. : : "a" (paddr));
  321. break;
  322. default:
  323. case FLUSH_CACHE_BOTH:
  324. __asm__ __volatile__ ("nop\n\t"
  325. ".chip 68040\n\t"
  326. "cpushl %%bc,(%0)\n\t"
  327. ".chip 68k"
  328. : : "a" (paddr));
  329. break;
  330. }
  331. if (!--i && len)
  332. {
  333. /*
  334. * No need to page align here since it is done by
  335. * virt_to_phys_040().
  336. */
  337. addr += PAGE_SIZE;
  338. i = PAGE_SIZE / 16;
  339. /* Recompute physical address when crossing a page
  340. boundary. */
  341. for (;;)
  342. {
  343. if ((paddr = virt_to_phys_040(addr)))
  344. break;
  345. if (len <= i)
  346. return 0;
  347. len -= i;
  348. addr += PAGE_SIZE;
  349. }
  350. }
  351. else
  352. paddr += 16;
  353. }
  354. break;
  355. default:
  356. case FLUSH_SCOPE_PAGE:
  357. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  358. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  359. {
  360. if (!(paddr = virt_to_phys_040(addr)))
  361. continue;
  362. switch (cache)
  363. {
  364. case FLUSH_CACHE_DATA:
  365. __asm__ __volatile__ ("nop\n\t"
  366. ".chip 68040\n\t"
  367. "cpushp %%dc,(%0)\n\t"
  368. ".chip 68k"
  369. : : "a" (paddr));
  370. break;
  371. case FLUSH_CACHE_INSN:
  372. __asm__ __volatile__ ("nop\n\t"
  373. ".chip 68040\n\t"
  374. "cpushp %%ic,(%0)\n\t"
  375. ".chip 68k"
  376. : : "a" (paddr));
  377. break;
  378. default:
  379. case FLUSH_CACHE_BOTH:
  380. __asm__ __volatile__ ("nop\n\t"
  381. ".chip 68040\n\t"
  382. "cpushp %%bc,(%0)\n\t"
  383. ".chip 68k"
  384. : : "a" (paddr));
  385. break;
  386. }
  387. }
  388. break;
  389. }
  390. return 0;
  391. }
  392. #define virt_to_phys_060(vaddr) \
  393. ({ \
  394. unsigned long paddr; \
  395. __asm__ __volatile__ (".chip 68060\n\t" \
  396. "plpar (%0)\n\t" \
  397. ".chip 68k" \
  398. : "=a" (paddr) \
  399. : "0" (vaddr)); \
  400. (paddr); /* XXX */ \
  401. })
  402. static inline int
  403. cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
  404. {
  405. unsigned long paddr, i;
  406. /*
  407. * 68060 manual says:
  408. * cpush %dc : flush DC, remains valid (with our %cacr setup)
  409. * cpush %ic : invalidate IC
  410. * cpush %bc : flush DC + invalidate IC
  411. */
  412. switch (scope)
  413. {
  414. case FLUSH_SCOPE_ALL:
  415. switch (cache)
  416. {
  417. case FLUSH_CACHE_DATA:
  418. __asm__ __volatile__ (".chip 68060\n\t"
  419. "cpusha %dc\n\t"
  420. ".chip 68k");
  421. break;
  422. case FLUSH_CACHE_INSN:
  423. __asm__ __volatile__ (".chip 68060\n\t"
  424. "cpusha %ic\n\t"
  425. ".chip 68k");
  426. break;
  427. default:
  428. case FLUSH_CACHE_BOTH:
  429. __asm__ __volatile__ (".chip 68060\n\t"
  430. "cpusha %bc\n\t"
  431. ".chip 68k");
  432. break;
  433. }
  434. break;
  435. case FLUSH_SCOPE_LINE:
  436. /* Find the physical address of the first mapped page in the
  437. address range. */
  438. len += addr & 15;
  439. addr &= -16;
  440. if (!(paddr = virt_to_phys_060(addr))) {
  441. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  442. if (len <= tmp)
  443. return 0;
  444. addr += tmp;
  445. len -= tmp;
  446. tmp = PAGE_SIZE;
  447. for (;;)
  448. {
  449. if ((paddr = virt_to_phys_060(addr)))
  450. break;
  451. if (len <= tmp)
  452. return 0;
  453. addr += tmp;
  454. len -= tmp;
  455. }
  456. }
  457. len = (len + 15) >> 4;
  458. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  459. while (len--)
  460. {
  461. switch (cache)
  462. {
  463. case FLUSH_CACHE_DATA:
  464. __asm__ __volatile__ (".chip 68060\n\t"
  465. "cpushl %%dc,(%0)\n\t"
  466. ".chip 68k"
  467. : : "a" (paddr));
  468. break;
  469. case FLUSH_CACHE_INSN:
  470. __asm__ __volatile__ (".chip 68060\n\t"
  471. "cpushl %%ic,(%0)\n\t"
  472. ".chip 68k"
  473. : : "a" (paddr));
  474. break;
  475. default:
  476. case FLUSH_CACHE_BOTH:
  477. __asm__ __volatile__ (".chip 68060\n\t"
  478. "cpushl %%bc,(%0)\n\t"
  479. ".chip 68k"
  480. : : "a" (paddr));
  481. break;
  482. }
  483. if (!--i && len)
  484. {
  485. /*
  486. * We just want to jump to the first cache line
  487. * in the next page.
  488. */
  489. addr += PAGE_SIZE;
  490. addr &= PAGE_MASK;
  491. i = PAGE_SIZE / 16;
  492. /* Recompute physical address when crossing a page
  493. boundary. */
  494. for (;;)
  495. {
  496. if ((paddr = virt_to_phys_060(addr)))
  497. break;
  498. if (len <= i)
  499. return 0;
  500. len -= i;
  501. addr += PAGE_SIZE;
  502. }
  503. }
  504. else
  505. paddr += 16;
  506. }
  507. break;
  508. default:
  509. case FLUSH_SCOPE_PAGE:
  510. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  511. addr &= PAGE_MASK; /* Workaround for bug in some
  512. revisions of the 68060 */
  513. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  514. {
  515. if (!(paddr = virt_to_phys_060(addr)))
  516. continue;
  517. switch (cache)
  518. {
  519. case FLUSH_CACHE_DATA:
  520. __asm__ __volatile__ (".chip 68060\n\t"
  521. "cpushp %%dc,(%0)\n\t"
  522. ".chip 68k"
  523. : : "a" (paddr));
  524. break;
  525. case FLUSH_CACHE_INSN:
  526. __asm__ __volatile__ (".chip 68060\n\t"
  527. "cpushp %%ic,(%0)\n\t"
  528. ".chip 68k"
  529. : : "a" (paddr));
  530. break;
  531. default:
  532. case FLUSH_CACHE_BOTH:
  533. __asm__ __volatile__ (".chip 68060\n\t"
  534. "cpushp %%bc,(%0)\n\t"
  535. ".chip 68k"
  536. : : "a" (paddr));
  537. break;
  538. }
  539. }
  540. break;
  541. }
  542. return 0;
  543. }
  544. /* sys_cacheflush -- flush (part of) the processor cache. */
  545. asmlinkage int
  546. sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
  547. {
  548. struct vm_area_struct *vma;
  549. int ret = -EINVAL;
  550. lock_kernel();
  551. if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
  552. cache & ~FLUSH_CACHE_BOTH)
  553. goto out;
  554. if (scope == FLUSH_SCOPE_ALL) {
  555. /* Only the superuser may explicitly flush the whole cache. */
  556. ret = -EPERM;
  557. if (!capable(CAP_SYS_ADMIN))
  558. goto out;
  559. } else {
  560. /*
  561. * Verify that the specified address region actually belongs
  562. * to this process.
  563. */
  564. vma = find_vma (current->mm, addr);
  565. ret = -EINVAL;
  566. /* Check for overflow. */
  567. if (addr + len < addr)
  568. goto out;
  569. if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
  570. goto out;
  571. }
  572. if (CPU_IS_020_OR_030) {
  573. if (scope == FLUSH_SCOPE_LINE && len < 256) {
  574. unsigned long cacr;
  575. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  576. if (cache & FLUSH_CACHE_INSN)
  577. cacr |= 4;
  578. if (cache & FLUSH_CACHE_DATA)
  579. cacr |= 0x400;
  580. len >>= 2;
  581. while (len--) {
  582. __asm__ __volatile__ ("movec %1, %%caar\n\t"
  583. "movec %0, %%cacr"
  584. : /* no outputs */
  585. : "r" (cacr), "r" (addr));
  586. addr += 4;
  587. }
  588. } else {
  589. /* Flush the whole cache, even if page granularity requested. */
  590. unsigned long cacr;
  591. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  592. if (cache & FLUSH_CACHE_INSN)
  593. cacr |= 8;
  594. if (cache & FLUSH_CACHE_DATA)
  595. cacr |= 0x800;
  596. __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
  597. }
  598. ret = 0;
  599. goto out;
  600. } else {
  601. /*
  602. * 040 or 060: don't blindly trust 'scope', someone could
  603. * try to flush a few megs of memory.
  604. */
  605. if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
  606. scope=FLUSH_SCOPE_PAGE;
  607. if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
  608. scope=FLUSH_SCOPE_ALL;
  609. if (CPU_IS_040) {
  610. ret = cache_flush_040 (addr, scope, cache, len);
  611. } else if (CPU_IS_060) {
  612. ret = cache_flush_060 (addr, scope, cache, len);
  613. }
  614. }
  615. out:
  616. unlock_kernel();
  617. return ret;
  618. }
  619. asmlinkage int sys_getpagesize(void)
  620. {
  621. return PAGE_SIZE;
  622. }