sys_sh.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. /*
  2. * linux/arch/sh/kernel/sys_sh.c
  3. *
  4. * This file contains various random system calls that
  5. * have a non-standard calling sequence on the Linux/SuperH
  6. * platform.
  7. *
  8. * Taken from i386 version.
  9. */
  10. #include <linux/errno.h>
  11. #include <linux/sched.h>
  12. #include <linux/mm.h>
  13. #include <linux/smp.h>
  14. #include <linux/smp_lock.h>
  15. #include <linux/sem.h>
  16. #include <linux/msg.h>
  17. #include <linux/shm.h>
  18. #include <linux/stat.h>
  19. #include <linux/syscalls.h>
  20. #include <linux/mman.h>
  21. #include <linux/file.h>
  22. #include <linux/utsname.h>
  23. #include <linux/module.h>
  24. #include <asm/cacheflush.h>
  25. #include <asm/uaccess.h>
  26. #include <asm/ipc.h>
  27. #include <asm/unistd.h>
  28. /*
  29. * sys_pipe() is the normal C calling standard for creating
  30. * a pipe. It's not the way Unix traditionally does this, though.
  31. */
  32. asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
  33. unsigned long r6, unsigned long r7,
  34. struct pt_regs regs)
  35. {
  36. int fd[2];
  37. int error;
  38. error = do_pipe(fd);
  39. if (!error) {
  40. regs.regs[1] = fd[1];
  41. return fd[0];
  42. }
  43. return error;
  44. }
  45. unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
  46. EXPORT_SYMBOL(shm_align_mask);
  47. /*
  48. * To avoid cache aliases, we map the shared page with same color.
  49. */
  50. #define COLOUR_ALIGN(addr, pgoff) \
  51. ((((addr) + shm_align_mask) & ~shm_align_mask) + \
  52. (((pgoff) << PAGE_SHIFT) & shm_align_mask))
  53. unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
  54. unsigned long len, unsigned long pgoff, unsigned long flags)
  55. {
  56. struct mm_struct *mm = current->mm;
  57. struct vm_area_struct *vma;
  58. unsigned long start_addr;
  59. int do_colour_align;
  60. if (flags & MAP_FIXED) {
  61. /* We do not accept a shared mapping if it would violate
  62. * cache aliasing constraints.
  63. */
  64. if ((flags & MAP_SHARED) && (addr & shm_align_mask))
  65. return -EINVAL;
  66. return addr;
  67. }
  68. if (unlikely(len > TASK_SIZE))
  69. return -ENOMEM;
  70. do_colour_align = 0;
  71. if (filp || (flags & MAP_SHARED))
  72. do_colour_align = 1;
  73. if (addr) {
  74. if (do_colour_align)
  75. addr = COLOUR_ALIGN(addr, pgoff);
  76. else
  77. addr = PAGE_ALIGN(addr);
  78. vma = find_vma(mm, addr);
  79. if (TASK_SIZE - len >= addr &&
  80. (!vma || addr + len <= vma->vm_start))
  81. return addr;
  82. }
  83. if (len > mm->cached_hole_size) {
  84. start_addr = addr = mm->free_area_cache;
  85. } else {
  86. mm->cached_hole_size = 0;
  87. start_addr = addr = TASK_UNMAPPED_BASE;
  88. }
  89. full_search:
  90. if (do_colour_align)
  91. addr = COLOUR_ALIGN(addr, pgoff);
  92. else
  93. addr = PAGE_ALIGN(mm->free_area_cache);
  94. for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
  95. /* At this point: (!vma || addr < vma->vm_end). */
  96. if (unlikely(TASK_SIZE - len < addr)) {
  97. /*
  98. * Start a new search - just in case we missed
  99. * some holes.
  100. */
  101. if (start_addr != TASK_UNMAPPED_BASE) {
  102. start_addr = addr = TASK_UNMAPPED_BASE;
  103. mm->cached_hole_size = 0;
  104. goto full_search;
  105. }
  106. return -ENOMEM;
  107. }
  108. if (likely(!vma || addr + len <= vma->vm_start)) {
  109. /*
  110. * Remember the place where we stopped the search:
  111. */
  112. mm->free_area_cache = addr + len;
  113. return addr;
  114. }
  115. if (addr + mm->cached_hole_size < vma->vm_start)
  116. mm->cached_hole_size = vma->vm_start - addr;
  117. addr = vma->vm_end;
  118. if (do_colour_align)
  119. addr = COLOUR_ALIGN(addr, pgoff);
  120. }
  121. }
  122. static inline long
  123. do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
  124. unsigned long flags, int fd, unsigned long pgoff)
  125. {
  126. int error = -EBADF;
  127. struct file *file = NULL;
  128. flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
  129. if (!(flags & MAP_ANONYMOUS)) {
  130. file = fget(fd);
  131. if (!file)
  132. goto out;
  133. }
  134. down_write(&current->mm->mmap_sem);
  135. error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
  136. up_write(&current->mm->mmap_sem);
  137. if (file)
  138. fput(file);
  139. out:
  140. return error;
  141. }
  142. asmlinkage int old_mmap(unsigned long addr, unsigned long len,
  143. unsigned long prot, unsigned long flags,
  144. int fd, unsigned long off)
  145. {
  146. if (off & ~PAGE_MASK)
  147. return -EINVAL;
  148. return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
  149. }
  150. asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
  151. unsigned long prot, unsigned long flags,
  152. unsigned long fd, unsigned long pgoff)
  153. {
  154. return do_mmap2(addr, len, prot, flags, fd, pgoff);
  155. }
  156. /*
  157. * sys_ipc() is the de-multiplexer for the SysV IPC calls..
  158. *
  159. * This is really horribly ugly.
  160. */
  161. asmlinkage int sys_ipc(uint call, int first, int second,
  162. int third, void __user *ptr, long fifth)
  163. {
  164. int version, ret;
  165. version = call >> 16; /* hack for backward compatibility */
  166. call &= 0xffff;
  167. if (call <= SEMCTL)
  168. switch (call) {
  169. case SEMOP:
  170. return sys_semtimedop(first, (struct sembuf __user *)ptr,
  171. second, NULL);
  172. case SEMTIMEDOP:
  173. return sys_semtimedop(first, (struct sembuf __user *)ptr,
  174. second,
  175. (const struct timespec __user *)fifth);
  176. case SEMGET:
  177. return sys_semget (first, second, third);
  178. case SEMCTL: {
  179. union semun fourth;
  180. if (!ptr)
  181. return -EINVAL;
  182. if (get_user(fourth.__pad, (void * __user *) ptr))
  183. return -EFAULT;
  184. return sys_semctl (first, second, third, fourth);
  185. }
  186. default:
  187. return -EINVAL;
  188. }
  189. if (call <= MSGCTL)
  190. switch (call) {
  191. case MSGSND:
  192. return sys_msgsnd (first, (struct msgbuf __user *) ptr,
  193. second, third);
  194. case MSGRCV:
  195. switch (version) {
  196. case 0: {
  197. struct ipc_kludge tmp;
  198. if (!ptr)
  199. return -EINVAL;
  200. if (copy_from_user(&tmp,
  201. (struct ipc_kludge __user *) ptr,
  202. sizeof (tmp)))
  203. return -EFAULT;
  204. return sys_msgrcv (first, tmp.msgp, second,
  205. tmp.msgtyp, third);
  206. }
  207. default:
  208. return sys_msgrcv (first,
  209. (struct msgbuf __user *) ptr,
  210. second, fifth, third);
  211. }
  212. case MSGGET:
  213. return sys_msgget ((key_t) first, second);
  214. case MSGCTL:
  215. return sys_msgctl (first, second,
  216. (struct msqid_ds __user *) ptr);
  217. default:
  218. return -EINVAL;
  219. }
  220. if (call <= SHMCTL)
  221. switch (call) {
  222. case SHMAT:
  223. switch (version) {
  224. default: {
  225. ulong raddr;
  226. ret = do_shmat (first, (char __user *) ptr,
  227. second, &raddr);
  228. if (ret)
  229. return ret;
  230. return put_user (raddr, (ulong __user *) third);
  231. }
  232. case 1: /* iBCS2 emulator entry point */
  233. if (!segment_eq(get_fs(), get_ds()))
  234. return -EINVAL;
  235. return do_shmat (first, (char __user *) ptr,
  236. second, (ulong *) third);
  237. }
  238. case SHMDT:
  239. return sys_shmdt ((char __user *)ptr);
  240. case SHMGET:
  241. return sys_shmget (first, second, third);
  242. case SHMCTL:
  243. return sys_shmctl (first, second,
  244. (struct shmid_ds __user *) ptr);
  245. default:
  246. return -EINVAL;
  247. }
  248. return -EINVAL;
  249. }
  250. asmlinkage int sys_uname(struct old_utsname * name)
  251. {
  252. int err;
  253. if (!name)
  254. return -EFAULT;
  255. down_read(&uts_sem);
  256. err = copy_to_user(name, utsname(), sizeof (*name));
  257. up_read(&uts_sem);
  258. return err?-EFAULT:0;
  259. }
  260. asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char * buf,
  261. size_t count, long dummy, loff_t pos)
  262. {
  263. return sys_pread64(fd, buf, count, pos);
  264. }
  265. asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char * buf,
  266. size_t count, long dummy, loff_t pos)
  267. {
  268. return sys_pwrite64(fd, buf, count, pos);
  269. }
  270. asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
  271. u32 len0, u32 len1, int advice)
  272. {
  273. #ifdef __LITTLE_ENDIAN__
  274. return sys_fadvise64_64(fd, (u64)offset1 << 32 | offset0,
  275. (u64)len1 << 32 | len0, advice);
  276. #else
  277. return sys_fadvise64_64(fd, (u64)offset0 << 32 | offset1,
  278. (u64)len0 << 32 | len1, advice);
  279. #endif
  280. }
  281. /*
  282. * Do a system call from kernel instead of calling sys_execve so we
  283. * end up with proper pt_regs.
  284. */
  285. int kernel_execve(const char *filename, char *const argv[], char *const envp[])
  286. {
  287. register long __sc0 __asm__ ("r3") = __NR_execve;
  288. register long __sc4 __asm__ ("r4") = (long) filename;
  289. register long __sc5 __asm__ ("r5") = (long) argv;
  290. register long __sc6 __asm__ ("r6") = (long) envp;
  291. __asm__ __volatile__ ("trapa #0x13" : "=z" (__sc0)
  292. : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6)
  293. : "memory");
  294. return __sc0;
  295. }