uaccess_pt.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. /*
  2. * User access functions based on page table walks for enhanced
  3. * system layout without hardware support.
  4. *
  5. * Copyright IBM Corp. 2006, 2012
  6. * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/hardirq.h>
  10. #include <linux/mm.h>
  11. #include <linux/hugetlb.h>
  12. #include <asm/uaccess.h>
  13. #include <asm/futex.h>
  14. #include "uaccess.h"
  15. #ifndef CONFIG_64BIT
  16. #define AHI "ahi"
  17. #define SLR "slr"
  18. #else
  19. #define AHI "aghi"
  20. #define SLR "slgr"
  21. #endif
  22. static size_t strnlen_kernel(size_t count, const char __user *src)
  23. {
  24. register unsigned long reg0 asm("0") = 0UL;
  25. unsigned long tmp1, tmp2;
  26. asm volatile(
  27. " la %2,0(%1)\n"
  28. " la %3,0(%0,%1)\n"
  29. " "SLR" %0,%0\n"
  30. "0: srst %3,%2\n"
  31. " jo 0b\n"
  32. " la %0,1(%3)\n" /* strnlen_kernel results includes \0 */
  33. " "SLR" %0,%1\n"
  34. "1:\n"
  35. EX_TABLE(0b,1b)
  36. : "+a" (count), "+a" (src), "=a" (tmp1), "=a" (tmp2)
  37. : "d" (reg0) : "cc", "memory");
  38. return count;
  39. }
  40. static size_t copy_in_kernel(size_t count, void __user *to,
  41. const void __user *from)
  42. {
  43. unsigned long tmp1;
  44. asm volatile(
  45. " "AHI" %0,-1\n"
  46. " jo 5f\n"
  47. " bras %3,3f\n"
  48. "0:"AHI" %0,257\n"
  49. "1: mvc 0(1,%1),0(%2)\n"
  50. " la %1,1(%1)\n"
  51. " la %2,1(%2)\n"
  52. " "AHI" %0,-1\n"
  53. " jnz 1b\n"
  54. " j 5f\n"
  55. "2: mvc 0(256,%1),0(%2)\n"
  56. " la %1,256(%1)\n"
  57. " la %2,256(%2)\n"
  58. "3:"AHI" %0,-256\n"
  59. " jnm 2b\n"
  60. "4: ex %0,1b-0b(%3)\n"
  61. "5:"SLR" %0,%0\n"
  62. "6:\n"
  63. EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
  64. : "+a" (count), "+a" (to), "+a" (from), "=a" (tmp1)
  65. : : "cc", "memory");
  66. return count;
  67. }
  68. /*
  69. * Returns kernel address for user virtual address. If the returned address is
  70. * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
  71. * contains the (negative) exception code.
  72. */
  73. #ifdef CONFIG_64BIT
  74. static unsigned long follow_table(struct mm_struct *mm,
  75. unsigned long address, int write)
  76. {
  77. unsigned long *table = (unsigned long *)__pa(mm->pgd);
  78. switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
  79. case _ASCE_TYPE_REGION1:
  80. table = table + ((address >> 53) & 0x7ff);
  81. if (unlikely(*table & _REGION_ENTRY_INV))
  82. return -0x39UL;
  83. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  84. case _ASCE_TYPE_REGION2:
  85. table = table + ((address >> 42) & 0x7ff);
  86. if (unlikely(*table & _REGION_ENTRY_INV))
  87. return -0x3aUL;
  88. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  89. case _ASCE_TYPE_REGION3:
  90. table = table + ((address >> 31) & 0x7ff);
  91. if (unlikely(*table & _REGION_ENTRY_INV))
  92. return -0x3bUL;
  93. table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
  94. case _ASCE_TYPE_SEGMENT:
  95. table = table + ((address >> 20) & 0x7ff);
  96. if (unlikely(*table & _SEGMENT_ENTRY_INV))
  97. return -0x10UL;
  98. if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
  99. if (write && (*table & _SEGMENT_ENTRY_RO))
  100. return -0x04UL;
  101. return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
  102. (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
  103. }
  104. table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
  105. }
  106. table = table + ((address >> 12) & 0xff);
  107. if (unlikely(*table & _PAGE_INVALID))
  108. return -0x11UL;
  109. if (write && (*table & _PAGE_RO))
  110. return -0x04UL;
  111. return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
  112. }
  113. #else /* CONFIG_64BIT */
  114. static unsigned long follow_table(struct mm_struct *mm,
  115. unsigned long address, int write)
  116. {
  117. unsigned long *table = (unsigned long *)__pa(mm->pgd);
  118. table = table + ((address >> 20) & 0x7ff);
  119. if (unlikely(*table & _SEGMENT_ENTRY_INV))
  120. return -0x10UL;
  121. table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
  122. table = table + ((address >> 12) & 0xff);
  123. if (unlikely(*table & _PAGE_INVALID))
  124. return -0x11UL;
  125. if (write && (*table & _PAGE_RO))
  126. return -0x04UL;
  127. return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
  128. }
  129. #endif /* CONFIG_64BIT */
  130. static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
  131. size_t n, int write_user)
  132. {
  133. struct mm_struct *mm = current->mm;
  134. unsigned long offset, done, size, kaddr;
  135. void *from, *to;
  136. done = 0;
  137. retry:
  138. spin_lock(&mm->page_table_lock);
  139. do {
  140. kaddr = follow_table(mm, uaddr, write_user);
  141. if (IS_ERR_VALUE(kaddr))
  142. goto fault;
  143. offset = uaddr & ~PAGE_MASK;
  144. size = min(n - done, PAGE_SIZE - offset);
  145. if (write_user) {
  146. to = (void *) kaddr;
  147. from = kptr + done;
  148. } else {
  149. from = (void *) kaddr;
  150. to = kptr + done;
  151. }
  152. memcpy(to, from, size);
  153. done += size;
  154. uaddr += size;
  155. } while (done < n);
  156. spin_unlock(&mm->page_table_lock);
  157. return n - done;
  158. fault:
  159. spin_unlock(&mm->page_table_lock);
  160. if (__handle_fault(uaddr, -kaddr, write_user))
  161. return n - done;
  162. goto retry;
  163. }
  164. /*
  165. * Do DAT for user address by page table walk, return kernel address.
  166. * This function needs to be called with current->mm->page_table_lock held.
  167. */
  168. static __always_inline unsigned long __dat_user_addr(unsigned long uaddr,
  169. int write)
  170. {
  171. struct mm_struct *mm = current->mm;
  172. unsigned long kaddr;
  173. int rc;
  174. retry:
  175. kaddr = follow_table(mm, uaddr, write);
  176. if (IS_ERR_VALUE(kaddr))
  177. goto fault;
  178. return kaddr;
  179. fault:
  180. spin_unlock(&mm->page_table_lock);
  181. rc = __handle_fault(uaddr, -kaddr, write);
  182. spin_lock(&mm->page_table_lock);
  183. if (!rc)
  184. goto retry;
  185. return 0;
  186. }
  187. size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
  188. {
  189. size_t rc;
  190. if (segment_eq(get_fs(), KERNEL_DS))
  191. return copy_in_kernel(n, (void __user *) to, from);
  192. rc = __user_copy_pt((unsigned long) from, to, n, 0);
  193. if (unlikely(rc))
  194. memset(to + n - rc, 0, rc);
  195. return rc;
  196. }
  197. size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
  198. {
  199. if (segment_eq(get_fs(), KERNEL_DS))
  200. return copy_in_kernel(n, to, (void __user *) from);
  201. return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
  202. }
  203. static size_t clear_user_pt(size_t n, void __user *to)
  204. {
  205. void *zpage = (void *) empty_zero_page;
  206. long done, size, ret;
  207. done = 0;
  208. do {
  209. if (n - done > PAGE_SIZE)
  210. size = PAGE_SIZE;
  211. else
  212. size = n - done;
  213. if (segment_eq(get_fs(), KERNEL_DS))
  214. ret = copy_in_kernel(n, to, (void __user *) zpage);
  215. else
  216. ret = __user_copy_pt((unsigned long) to, zpage, size, 1);
  217. done += size;
  218. to += size;
  219. if (ret)
  220. return ret + n - done;
  221. } while (done < n);
  222. return 0;
  223. }
  224. static size_t strnlen_user_pt(size_t count, const char __user *src)
  225. {
  226. unsigned long uaddr = (unsigned long) src;
  227. struct mm_struct *mm = current->mm;
  228. unsigned long offset, done, len, kaddr;
  229. size_t len_str;
  230. if (unlikely(!count))
  231. return 0;
  232. if (segment_eq(get_fs(), KERNEL_DS))
  233. return strnlen_kernel(count, src);
  234. done = 0;
  235. retry:
  236. spin_lock(&mm->page_table_lock);
  237. do {
  238. kaddr = follow_table(mm, uaddr, 0);
  239. if (IS_ERR_VALUE(kaddr))
  240. goto fault;
  241. offset = uaddr & ~PAGE_MASK;
  242. len = min(count - done, PAGE_SIZE - offset);
  243. len_str = strnlen((char *) kaddr, len);
  244. done += len_str;
  245. uaddr += len_str;
  246. } while ((len_str == len) && (done < count));
  247. spin_unlock(&mm->page_table_lock);
  248. return done + 1;
  249. fault:
  250. spin_unlock(&mm->page_table_lock);
  251. if (__handle_fault(uaddr, -kaddr, 0))
  252. return 0;
  253. goto retry;
  254. }
  255. static size_t strncpy_from_user_pt(size_t count, const char __user *src,
  256. char *dst)
  257. {
  258. size_t done, len, offset, len_str;
  259. if (unlikely(!count))
  260. return 0;
  261. done = 0;
  262. do {
  263. offset = (size_t)src & ~PAGE_MASK;
  264. len = min(count - done, PAGE_SIZE - offset);
  265. if (segment_eq(get_fs(), KERNEL_DS)) {
  266. if (copy_in_kernel(len, (void __user *) dst, src))
  267. return -EFAULT;
  268. } else {
  269. if (__user_copy_pt((unsigned long) src, dst, len, 0))
  270. return -EFAULT;
  271. }
  272. len_str = strnlen(dst, len);
  273. done += len_str;
  274. src += len_str;
  275. dst += len_str;
  276. } while ((len_str == len) && (done < count));
  277. return done;
  278. }
  279. static size_t copy_in_user_pt(size_t n, void __user *to,
  280. const void __user *from)
  281. {
  282. struct mm_struct *mm = current->mm;
  283. unsigned long offset_max, uaddr, done, size, error_code;
  284. unsigned long uaddr_from = (unsigned long) from;
  285. unsigned long uaddr_to = (unsigned long) to;
  286. unsigned long kaddr_to, kaddr_from;
  287. int write_user;
  288. if (segment_eq(get_fs(), KERNEL_DS))
  289. return copy_in_kernel(n, to, from);
  290. done = 0;
  291. retry:
  292. spin_lock(&mm->page_table_lock);
  293. do {
  294. write_user = 0;
  295. uaddr = uaddr_from;
  296. kaddr_from = follow_table(mm, uaddr_from, 0);
  297. error_code = kaddr_from;
  298. if (IS_ERR_VALUE(error_code))
  299. goto fault;
  300. write_user = 1;
  301. uaddr = uaddr_to;
  302. kaddr_to = follow_table(mm, uaddr_to, 1);
  303. error_code = (unsigned long) kaddr_to;
  304. if (IS_ERR_VALUE(error_code))
  305. goto fault;
  306. offset_max = max(uaddr_from & ~PAGE_MASK,
  307. uaddr_to & ~PAGE_MASK);
  308. size = min(n - done, PAGE_SIZE - offset_max);
  309. memcpy((void *) kaddr_to, (void *) kaddr_from, size);
  310. done += size;
  311. uaddr_from += size;
  312. uaddr_to += size;
  313. } while (done < n);
  314. spin_unlock(&mm->page_table_lock);
  315. return n - done;
  316. fault:
  317. spin_unlock(&mm->page_table_lock);
  318. if (__handle_fault(uaddr, -error_code, write_user))
  319. return n - done;
  320. goto retry;
  321. }
  322. #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
  323. asm volatile("0: l %1,0(%6)\n" \
  324. "1: " insn \
  325. "2: cs %1,%2,0(%6)\n" \
  326. "3: jl 1b\n" \
  327. " lhi %0,0\n" \
  328. "4:\n" \
  329. EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
  330. : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
  331. "=m" (*uaddr) \
  332. : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
  333. "m" (*uaddr) : "cc" );
  334. static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
  335. {
  336. int oldval = 0, newval, ret;
  337. switch (op) {
  338. case FUTEX_OP_SET:
  339. __futex_atomic_op("lr %2,%5\n",
  340. ret, oldval, newval, uaddr, oparg);
  341. break;
  342. case FUTEX_OP_ADD:
  343. __futex_atomic_op("lr %2,%1\nar %2,%5\n",
  344. ret, oldval, newval, uaddr, oparg);
  345. break;
  346. case FUTEX_OP_OR:
  347. __futex_atomic_op("lr %2,%1\nor %2,%5\n",
  348. ret, oldval, newval, uaddr, oparg);
  349. break;
  350. case FUTEX_OP_ANDN:
  351. __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
  352. ret, oldval, newval, uaddr, oparg);
  353. break;
  354. case FUTEX_OP_XOR:
  355. __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
  356. ret, oldval, newval, uaddr, oparg);
  357. break;
  358. default:
  359. ret = -ENOSYS;
  360. }
  361. if (ret == 0)
  362. *old = oldval;
  363. return ret;
  364. }
  365. int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
  366. {
  367. int ret;
  368. if (segment_eq(get_fs(), KERNEL_DS))
  369. return __futex_atomic_op_pt(op, uaddr, oparg, old);
  370. spin_lock(&current->mm->page_table_lock);
  371. uaddr = (u32 __force __user *)
  372. __dat_user_addr((__force unsigned long) uaddr, 1);
  373. if (!uaddr) {
  374. spin_unlock(&current->mm->page_table_lock);
  375. return -EFAULT;
  376. }
  377. get_page(virt_to_page(uaddr));
  378. spin_unlock(&current->mm->page_table_lock);
  379. ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
  380. put_page(virt_to_page(uaddr));
  381. return ret;
  382. }
  383. static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
  384. u32 oldval, u32 newval)
  385. {
  386. int ret;
  387. asm volatile("0: cs %1,%4,0(%5)\n"
  388. "1: la %0,0\n"
  389. "2:\n"
  390. EX_TABLE(0b,2b) EX_TABLE(1b,2b)
  391. : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
  392. : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
  393. : "cc", "memory" );
  394. *uval = oldval;
  395. return ret;
  396. }
  397. int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
  398. u32 oldval, u32 newval)
  399. {
  400. int ret;
  401. if (segment_eq(get_fs(), KERNEL_DS))
  402. return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
  403. spin_lock(&current->mm->page_table_lock);
  404. uaddr = (u32 __force __user *)
  405. __dat_user_addr((__force unsigned long) uaddr, 1);
  406. if (!uaddr) {
  407. spin_unlock(&current->mm->page_table_lock);
  408. return -EFAULT;
  409. }
  410. get_page(virt_to_page(uaddr));
  411. spin_unlock(&current->mm->page_table_lock);
  412. ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
  413. put_page(virt_to_page(uaddr));
  414. return ret;
  415. }
  416. struct uaccess_ops uaccess_pt = {
  417. .copy_from_user = copy_from_user_pt,
  418. .copy_from_user_small = copy_from_user_pt,
  419. .copy_to_user = copy_to_user_pt,
  420. .copy_to_user_small = copy_to_user_pt,
  421. .copy_in_user = copy_in_user_pt,
  422. .clear_user = clear_user_pt,
  423. .strnlen_user = strnlen_user_pt,
  424. .strncpy_from_user = strncpy_from_user_pt,
  425. .futex_atomic_op = futex_atomic_op_pt,
  426. .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
  427. };