uaccess_std.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. /*
  2. * Standard user space access functions based on mvcp/mvcs and doing
  3. * interesting things in the secondary space mode.
  4. *
  5. * Copyright IBM Corp. 2006
  6. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  7. * Gerald Schaefer (gerald.schaefer@de.ibm.com)
  8. */
  9. #include <linux/errno.h>
  10. #include <linux/mm.h>
  11. #include <linux/uaccess.h>
  12. #include <asm/futex.h>
  13. #include "uaccess.h"
  14. #ifndef CONFIG_64BIT
  15. #define AHI "ahi"
  16. #define ALR "alr"
  17. #define CLR "clr"
  18. #define LHI "lhi"
  19. #define SLR "slr"
  20. #else
  21. #define AHI "aghi"
  22. #define ALR "algr"
  23. #define CLR "clgr"
  24. #define LHI "lghi"
  25. #define SLR "slgr"
  26. #endif
  27. size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
  28. {
  29. unsigned long tmp1, tmp2;
  30. tmp1 = -256UL;
  31. asm volatile(
  32. "0: mvcp 0(%0,%2),0(%1),%3\n"
  33. "10:jz 8f\n"
  34. "1:"ALR" %0,%3\n"
  35. " la %1,256(%1)\n"
  36. " la %2,256(%2)\n"
  37. "2: mvcp 0(%0,%2),0(%1),%3\n"
  38. "11:jnz 1b\n"
  39. " j 8f\n"
  40. "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
  41. " "LHI" %3,-4096\n"
  42. " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
  43. " "SLR" %4,%1\n"
  44. " "CLR" %0,%4\n" /* copy crosses next page boundary? */
  45. " jnh 5f\n"
  46. "4: mvcp 0(%4,%2),0(%1),%3\n"
  47. "12:"SLR" %0,%4\n"
  48. " "ALR" %2,%4\n"
  49. "5:"LHI" %4,-1\n"
  50. " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
  51. " bras %3,7f\n" /* memset loop */
  52. " xc 0(1,%2),0(%2)\n"
  53. "6: xc 0(256,%2),0(%2)\n"
  54. " la %2,256(%2)\n"
  55. "7:"AHI" %4,-256\n"
  56. " jnm 6b\n"
  57. " ex %4,0(%3)\n"
  58. " j 9f\n"
  59. "8:"SLR" %0,%0\n"
  60. "9: \n"
  61. EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
  62. EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
  63. : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
  64. : : "cc", "memory");
  65. return size;
  66. }
  67. static size_t copy_from_user_std_check(size_t size, const void __user *ptr,
  68. void *x)
  69. {
  70. if (size <= 1024)
  71. return copy_from_user_std(size, ptr, x);
  72. return copy_from_user_pt(size, ptr, x);
  73. }
  74. size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
  75. {
  76. unsigned long tmp1, tmp2;
  77. tmp1 = -256UL;
  78. asm volatile(
  79. "0: mvcs 0(%0,%1),0(%2),%3\n"
  80. "7: jz 5f\n"
  81. "1:"ALR" %0,%3\n"
  82. " la %1,256(%1)\n"
  83. " la %2,256(%2)\n"
  84. "2: mvcs 0(%0,%1),0(%2),%3\n"
  85. "8: jnz 1b\n"
  86. " j 5f\n"
  87. "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
  88. " "LHI" %3,-4096\n"
  89. " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
  90. " "SLR" %4,%1\n"
  91. " "CLR" %0,%4\n" /* copy crosses next page boundary? */
  92. " jnh 6f\n"
  93. "4: mvcs 0(%4,%1),0(%2),%3\n"
  94. "9:"SLR" %0,%4\n"
  95. " j 6f\n"
  96. "5:"SLR" %0,%0\n"
  97. "6: \n"
  98. EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
  99. EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
  100. : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
  101. : : "cc", "memory");
  102. return size;
  103. }
  104. static size_t copy_to_user_std_check(size_t size, void __user *ptr,
  105. const void *x)
  106. {
  107. if (size <= 1024)
  108. return copy_to_user_std(size, ptr, x);
  109. return copy_to_user_pt(size, ptr, x);
  110. }
  111. static size_t copy_in_user_std(size_t size, void __user *to,
  112. const void __user *from)
  113. {
  114. unsigned long tmp1;
  115. asm volatile(
  116. " sacf 256\n"
  117. " "AHI" %0,-1\n"
  118. " jo 5f\n"
  119. " bras %3,3f\n"
  120. "0:"AHI" %0,257\n"
  121. "1: mvc 0(1,%1),0(%2)\n"
  122. " la %1,1(%1)\n"
  123. " la %2,1(%2)\n"
  124. " "AHI" %0,-1\n"
  125. " jnz 1b\n"
  126. " j 5f\n"
  127. "2: mvc 0(256,%1),0(%2)\n"
  128. " la %1,256(%1)\n"
  129. " la %2,256(%2)\n"
  130. "3:"AHI" %0,-256\n"
  131. " jnm 2b\n"
  132. "4: ex %0,1b-0b(%3)\n"
  133. "5: "SLR" %0,%0\n"
  134. "6: sacf 0\n"
  135. EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
  136. : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
  137. : : "cc", "memory");
  138. return size;
  139. }
  140. static size_t clear_user_std(size_t size, void __user *to)
  141. {
  142. unsigned long tmp1, tmp2;
  143. asm volatile(
  144. " sacf 256\n"
  145. " "AHI" %0,-1\n"
  146. " jo 5f\n"
  147. " bras %3,3f\n"
  148. " xc 0(1,%1),0(%1)\n"
  149. "0:"AHI" %0,257\n"
  150. " la %2,255(%1)\n" /* %2 = ptr + 255 */
  151. " srl %2,12\n"
  152. " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
  153. " "SLR" %2,%1\n"
  154. " "CLR" %0,%2\n" /* clear crosses next page boundary? */
  155. " jnh 5f\n"
  156. " "AHI" %2,-1\n"
  157. "1: ex %2,0(%3)\n"
  158. " "AHI" %2,1\n"
  159. " "SLR" %0,%2\n"
  160. " j 5f\n"
  161. "2: xc 0(256,%1),0(%1)\n"
  162. " la %1,256(%1)\n"
  163. "3:"AHI" %0,-256\n"
  164. " jnm 2b\n"
  165. "4: ex %0,0(%3)\n"
  166. "5: "SLR" %0,%0\n"
  167. "6: sacf 0\n"
  168. EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
  169. : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
  170. : : "cc", "memory");
  171. return size;
  172. }
  173. size_t strnlen_user_std(size_t size, const char __user *src)
  174. {
  175. register unsigned long reg0 asm("0") = 0UL;
  176. unsigned long tmp1, tmp2;
  177. if (unlikely(!size))
  178. return 0;
  179. asm volatile(
  180. " la %2,0(%1)\n"
  181. " la %3,0(%0,%1)\n"
  182. " "SLR" %0,%0\n"
  183. " sacf 256\n"
  184. "0: srst %3,%2\n"
  185. " jo 0b\n"
  186. " la %0,1(%3)\n" /* strnlen_user results includes \0 */
  187. " "SLR" %0,%1\n"
  188. "1: sacf 0\n"
  189. EX_TABLE(0b,1b)
  190. : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
  191. : "d" (reg0) : "cc", "memory");
  192. return size;
  193. }
  194. size_t strncpy_from_user_std(size_t count, const char __user *src, char *dst)
  195. {
  196. size_t done, len, offset, len_str;
  197. if (unlikely(!count))
  198. return 0;
  199. done = 0;
  200. do {
  201. offset = (size_t)src & ~PAGE_MASK;
  202. len = min(count - done, PAGE_SIZE - offset);
  203. if (copy_from_user_std(len, src, dst))
  204. return -EFAULT;
  205. len_str = strnlen(dst, len);
  206. done += len_str;
  207. src += len_str;
  208. dst += len_str;
  209. } while ((len_str == len) && (done < count));
  210. return done;
  211. }
  212. #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
  213. asm volatile( \
  214. " sacf 256\n" \
  215. "0: l %1,0(%6)\n" \
  216. "1:"insn \
  217. "2: cs %1,%2,0(%6)\n" \
  218. "3: jl 1b\n" \
  219. " lhi %0,0\n" \
  220. "4: sacf 0\n" \
  221. EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
  222. : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
  223. "=m" (*uaddr) \
  224. : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
  225. "m" (*uaddr) : "cc");
  226. int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old)
  227. {
  228. int oldval = 0, newval, ret;
  229. switch (op) {
  230. case FUTEX_OP_SET:
  231. __futex_atomic_op("lr %2,%5\n",
  232. ret, oldval, newval, uaddr, oparg);
  233. break;
  234. case FUTEX_OP_ADD:
  235. __futex_atomic_op("lr %2,%1\nar %2,%5\n",
  236. ret, oldval, newval, uaddr, oparg);
  237. break;
  238. case FUTEX_OP_OR:
  239. __futex_atomic_op("lr %2,%1\nor %2,%5\n",
  240. ret, oldval, newval, uaddr, oparg);
  241. break;
  242. case FUTEX_OP_ANDN:
  243. __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
  244. ret, oldval, newval, uaddr, oparg);
  245. break;
  246. case FUTEX_OP_XOR:
  247. __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
  248. ret, oldval, newval, uaddr, oparg);
  249. break;
  250. default:
  251. ret = -ENOSYS;
  252. }
  253. *old = oldval;
  254. return ret;
  255. }
  256. int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr,
  257. u32 oldval, u32 newval)
  258. {
  259. int ret;
  260. asm volatile(
  261. " sacf 256\n"
  262. "0: cs %1,%4,0(%5)\n"
  263. "1: la %0,0\n"
  264. "2: sacf 0\n"
  265. EX_TABLE(0b,2b) EX_TABLE(1b,2b)
  266. : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
  267. : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
  268. : "cc", "memory" );
  269. *uval = oldval;
  270. return ret;
  271. }
  272. struct uaccess_ops uaccess_std = {
  273. .copy_from_user = copy_from_user_std_check,
  274. .copy_from_user_small = copy_from_user_std,
  275. .copy_to_user = copy_to_user_std_check,
  276. .copy_to_user_small = copy_to_user_std,
  277. .copy_in_user = copy_in_user_std,
  278. .clear_user = clear_user_std,
  279. .strnlen_user = strnlen_user_std,
  280. .strncpy_from_user = strncpy_from_user_std,
  281. .futex_atomic_op = futex_atomic_op_std,
  282. .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
  283. };