syscall.c 2.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. /*
  2. * arch/xtensa/kernel/syscall.c
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (C) 2001 - 2005 Tensilica Inc.
  9. * Copyright (C) 2000 Silicon Graphics, Inc.
  10. * Copyright (C) 1995 - 2000 by Ralf Baechle
  11. *
  12. * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
  13. * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
  14. * Chris Zankel <chris@zankel.net>
  15. * Kevin Chea
  16. *
  17. */
  18. #include <asm/uaccess.h>
  19. #include <asm/syscall.h>
  20. #include <asm/unistd.h>
  21. #include <linux/linkage.h>
  22. #include <linux/stringify.h>
  23. #include <linux/errno.h>
  24. #include <linux/syscalls.h>
  25. #include <linux/file.h>
  26. #include <linux/fs.h>
  27. #include <linux/mman.h>
  28. #include <linux/shm.h>
  29. typedef void (*syscall_t)(void);
  30. syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= {
  31. [0 ... __NR_syscall_count - 1] = (syscall_t)&sys_ni_syscall,
  32. #define __SYSCALL(nr,symbol,nargs) [ nr ] = (syscall_t)symbol,
  33. #include <uapi/asm/unistd.h>
  34. };
  35. #define COLOUR_ALIGN(addr, pgoff) \
  36. ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
  37. (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
  38. asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
  39. {
  40. unsigned long ret;
  41. long err;
  42. err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
  43. if (err)
  44. return err;
  45. return (long)ret;
  46. }
  47. asmlinkage long xtensa_fadvise64_64(int fd, int advice,
  48. unsigned long long offset, unsigned long long len)
  49. {
  50. return sys_fadvise64_64(fd, offset, len, advice);
  51. }
  52. unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
  53. unsigned long len, unsigned long pgoff, unsigned long flags)
  54. {
  55. struct vm_area_struct *vmm;
  56. if (flags & MAP_FIXED) {
  57. /* We do not accept a shared mapping if it would violate
  58. * cache aliasing constraints.
  59. */
  60. if ((flags & MAP_SHARED) &&
  61. ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
  62. return -EINVAL;
  63. return addr;
  64. }
  65. if (len > TASK_SIZE)
  66. return -ENOMEM;
  67. if (!addr)
  68. addr = TASK_UNMAPPED_BASE;
  69. if (flags & MAP_SHARED)
  70. addr = COLOUR_ALIGN(addr, pgoff);
  71. else
  72. addr = PAGE_ALIGN(addr);
  73. for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
  74. /* At this point: (!vmm || addr < vmm->vm_end). */
  75. if (TASK_SIZE - len < addr)
  76. return -ENOMEM;
  77. if (!vmm || addr + len <= vmm->vm_start)
  78. return addr;
  79. addr = vmm->vm_end;
  80. if (flags & MAP_SHARED)
  81. addr = COLOUR_ALIGN(addr, pgoff);
  82. }
  83. }