sys_sh.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. /*
  2. * linux/arch/sh/kernel/sys_sh.c
  3. *
  4. * This file contains various random system calls that
  5. * have a non-standard calling sequence on the Linux/SuperH
  6. * platform.
  7. *
  8. * Taken from i386 version.
  9. */
  10. #include <linux/errno.h>
  11. #include <linux/sched.h>
  12. #include <linux/mm.h>
  13. #include <linux/smp.h>
  14. #include <linux/sem.h>
  15. #include <linux/msg.h>
  16. #include <linux/shm.h>
  17. #include <linux/stat.h>
  18. #include <linux/syscalls.h>
  19. #include <linux/mman.h>
  20. #include <linux/file.h>
  21. #include <linux/utsname.h>
  22. #include <linux/module.h>
  23. #include <linux/fs.h>
  24. #include <linux/ipc.h>
  25. #include <asm/cacheflush.h>
  26. #include <asm/uaccess.h>
  27. #include <asm/unistd.h>
  28. /*
  29. * sys_pipe() is the normal C calling standard for creating
  30. * a pipe. It's not the way Unix traditionally does this, though.
  31. */
  32. asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
  33. unsigned long r6, unsigned long r7,
  34. struct pt_regs __regs)
  35. {
  36. struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
  37. int fd[2];
  38. int error;
  39. error = do_pipe(fd);
  40. if (!error) {
  41. regs->regs[1] = fd[1];
  42. return fd[0];
  43. }
  44. return error;
  45. }
  46. unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
  47. EXPORT_SYMBOL(shm_align_mask);
  48. #ifdef CONFIG_MMU
  49. /*
  50. * To avoid cache aliases, we map the shared page with same color.
  51. */
  52. #define COLOUR_ALIGN(addr, pgoff) \
  53. ((((addr) + shm_align_mask) & ~shm_align_mask) + \
  54. (((pgoff) << PAGE_SHIFT) & shm_align_mask))
  55. unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
  56. unsigned long len, unsigned long pgoff, unsigned long flags)
  57. {
  58. struct mm_struct *mm = current->mm;
  59. struct vm_area_struct *vma;
  60. unsigned long start_addr;
  61. int do_colour_align;
  62. if (flags & MAP_FIXED) {
  63. /* We do not accept a shared mapping if it would violate
  64. * cache aliasing constraints.
  65. */
  66. if ((flags & MAP_SHARED) && (addr & shm_align_mask))
  67. return -EINVAL;
  68. return addr;
  69. }
  70. if (unlikely(len > TASK_SIZE))
  71. return -ENOMEM;
  72. do_colour_align = 0;
  73. if (filp || (flags & MAP_SHARED))
  74. do_colour_align = 1;
  75. if (addr) {
  76. if (do_colour_align)
  77. addr = COLOUR_ALIGN(addr, pgoff);
  78. else
  79. addr = PAGE_ALIGN(addr);
  80. vma = find_vma(mm, addr);
  81. if (TASK_SIZE - len >= addr &&
  82. (!vma || addr + len <= vma->vm_start))
  83. return addr;
  84. }
  85. if (len > mm->cached_hole_size) {
  86. start_addr = addr = mm->free_area_cache;
  87. } else {
  88. mm->cached_hole_size = 0;
  89. start_addr = addr = TASK_UNMAPPED_BASE;
  90. }
  91. full_search:
  92. if (do_colour_align)
  93. addr = COLOUR_ALIGN(addr, pgoff);
  94. else
  95. addr = PAGE_ALIGN(mm->free_area_cache);
  96. for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
  97. /* At this point: (!vma || addr < vma->vm_end). */
  98. if (unlikely(TASK_SIZE - len < addr)) {
  99. /*
  100. * Start a new search - just in case we missed
  101. * some holes.
  102. */
  103. if (start_addr != TASK_UNMAPPED_BASE) {
  104. start_addr = addr = TASK_UNMAPPED_BASE;
  105. mm->cached_hole_size = 0;
  106. goto full_search;
  107. }
  108. return -ENOMEM;
  109. }
  110. if (likely(!vma || addr + len <= vma->vm_start)) {
  111. /*
  112. * Remember the place where we stopped the search:
  113. */
  114. mm->free_area_cache = addr + len;
  115. return addr;
  116. }
  117. if (addr + mm->cached_hole_size < vma->vm_start)
  118. mm->cached_hole_size = vma->vm_start - addr;
  119. addr = vma->vm_end;
  120. if (do_colour_align)
  121. addr = COLOUR_ALIGN(addr, pgoff);
  122. }
  123. }
  124. #endif /* CONFIG_MMU */
  125. static inline long
  126. do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
  127. unsigned long flags, int fd, unsigned long pgoff)
  128. {
  129. int error = -EBADF;
  130. struct file *file = NULL;
  131. flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  132. if (!(flags & MAP_ANONYMOUS)) {
  133. file = fget(fd);
  134. if (!file)
  135. goto out;
  136. }
  137. down_write(&current->mm->mmap_sem);
  138. error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
  139. up_write(&current->mm->mmap_sem);
  140. if (file)
  141. fput(file);
  142. out:
  143. return error;
  144. }
  145. asmlinkage int old_mmap(unsigned long addr, unsigned long len,
  146. unsigned long prot, unsigned long flags,
  147. int fd, unsigned long off)
  148. {
  149. if (off & ~PAGE_MASK)
  150. return -EINVAL;
  151. return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
  152. }
  153. asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
  154. unsigned long prot, unsigned long flags,
  155. unsigned long fd, unsigned long pgoff)
  156. {
  157. return do_mmap2(addr, len, prot, flags, fd, pgoff);
  158. }
  159. /*
  160. * sys_ipc() is the de-multiplexer for the SysV IPC calls..
  161. *
  162. * This is really horribly ugly.
  163. */
  164. asmlinkage int sys_ipc(uint call, int first, int second,
  165. int third, void __user *ptr, long fifth)
  166. {
  167. int version, ret;
  168. version = call >> 16; /* hack for backward compatibility */
  169. call &= 0xffff;
  170. if (call <= SEMCTL)
  171. switch (call) {
  172. case SEMOP:
  173. return sys_semtimedop(first, (struct sembuf __user *)ptr,
  174. second, NULL);
  175. case SEMTIMEDOP:
  176. return sys_semtimedop(first, (struct sembuf __user *)ptr,
  177. second,
  178. (const struct timespec __user *)fifth);
  179. case SEMGET:
  180. return sys_semget (first, second, third);
  181. case SEMCTL: {
  182. union semun fourth;
  183. if (!ptr)
  184. return -EINVAL;
  185. if (get_user(fourth.__pad, (void * __user *) ptr))
  186. return -EFAULT;
  187. return sys_semctl (first, second, third, fourth);
  188. }
  189. default:
  190. return -EINVAL;
  191. }
  192. if (call <= MSGCTL)
  193. switch (call) {
  194. case MSGSND:
  195. return sys_msgsnd (first, (struct msgbuf __user *) ptr,
  196. second, third);
  197. case MSGRCV:
  198. switch (version) {
  199. case 0: {
  200. struct ipc_kludge tmp;
  201. if (!ptr)
  202. return -EINVAL;
  203. if (copy_from_user(&tmp,
  204. (struct ipc_kludge __user *) ptr,
  205. sizeof (tmp)))
  206. return -EFAULT;
  207. return sys_msgrcv (first, tmp.msgp, second,
  208. tmp.msgtyp, third);
  209. }
  210. default:
  211. return sys_msgrcv (first,
  212. (struct msgbuf __user *) ptr,
  213. second, fifth, third);
  214. }
  215. case MSGGET:
  216. return sys_msgget ((key_t) first, second);
  217. case MSGCTL:
  218. return sys_msgctl (first, second,
  219. (struct msqid_ds __user *) ptr);
  220. default:
  221. return -EINVAL;
  222. }
  223. if (call <= SHMCTL)
  224. switch (call) {
  225. case SHMAT:
  226. switch (version) {
  227. default: {
  228. ulong raddr;
  229. ret = do_shmat (first, (char __user *) ptr,
  230. second, &raddr);
  231. if (ret)
  232. return ret;
  233. return put_user (raddr, (ulong __user *) third);
  234. }
  235. case 1: /* iBCS2 emulator entry point */
  236. if (!segment_eq(get_fs(), get_ds()))
  237. return -EINVAL;
  238. return do_shmat (first, (char __user *) ptr,
  239. second, (ulong *) third);
  240. }
  241. case SHMDT:
  242. return sys_shmdt ((char __user *)ptr);
  243. case SHMGET:
  244. return sys_shmget (first, second, third);
  245. case SHMCTL:
  246. return sys_shmctl (first, second,
  247. (struct shmid_ds __user *) ptr);
  248. default:
  249. return -EINVAL;
  250. }
  251. return -EINVAL;
  252. }
  253. asmlinkage int sys_uname(struct old_utsname * name)
  254. {
  255. int err;
  256. if (!name)
  257. return -EFAULT;
  258. down_read(&uts_sem);
  259. err = copy_to_user(name, utsname(), sizeof (*name));
  260. up_read(&uts_sem);
  261. return err?-EFAULT:0;
  262. }
  263. asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char * buf,
  264. size_t count, long dummy, loff_t pos)
  265. {
  266. return sys_pread64(fd, buf, count, pos);
  267. }
  268. asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char * buf,
  269. size_t count, long dummy, loff_t pos)
  270. {
  271. return sys_pwrite64(fd, buf, count, pos);
  272. }
  273. asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
  274. u32 len0, u32 len1, int advice)
  275. {
  276. #ifdef __LITTLE_ENDIAN__
  277. return sys_fadvise64_64(fd, (u64)offset1 << 32 | offset0,
  278. (u64)len1 << 32 | len0, advice);
  279. #else
  280. return sys_fadvise64_64(fd, (u64)offset0 << 32 | offset1,
  281. (u64)len0 << 32 | len1, advice);
  282. #endif
  283. }
  284. #if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A)
  285. #define SYSCALL_ARG3 "trapa #0x23"
  286. #else
  287. #define SYSCALL_ARG3 "trapa #0x13"
  288. #endif
  289. /*
  290. * Do a system call from kernel instead of calling sys_execve so we
  291. * end up with proper pt_regs.
  292. */
  293. int kernel_execve(const char *filename, char *const argv[], char *const envp[])
  294. {
  295. register long __sc0 __asm__ ("r3") = __NR_execve;
  296. register long __sc4 __asm__ ("r4") = (long) filename;
  297. register long __sc5 __asm__ ("r5") = (long) argv;
  298. register long __sc6 __asm__ ("r6") = (long) envp;
  299. __asm__ __volatile__ (SYSCALL_ARG3 : "=z" (__sc0)
  300. : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6)
  301. : "memory");
  302. return __sc0;
  303. }