uaccess_pt.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443
  1. /*
  2. * User access functions based on page table walks for enhanced
  3. * system layout without hardware support.
  4. *
  5. * Copyright IBM Corp. 2006, 2012
  6. * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/hardirq.h>
  10. #include <linux/mm.h>
  11. #include <linux/hugetlb.h>
  12. #include <asm/uaccess.h>
  13. #include <asm/futex.h>
  14. #include "uaccess.h"
  15. #ifndef CONFIG_64BIT
  16. #define AHI "ahi"
  17. #define SLR "slr"
  18. #else
  19. #define AHI "aghi"
  20. #define SLR "slgr"
  21. #endif
  22. static size_t strnlen_kernel(size_t count, const char __user *src)
  23. {
  24. register unsigned long reg0 asm("0") = 0UL;
  25. unsigned long tmp1, tmp2;
  26. asm volatile(
  27. " la %2,0(%1)\n"
  28. " la %3,0(%0,%1)\n"
  29. " "SLR" %0,%0\n"
  30. "0: srst %3,%2\n"
  31. " jo 0b\n"
  32. " la %0,1(%3)\n" /* strnlen_kernel results includes \0 */
  33. " "SLR" %0,%1\n"
  34. "1:\n"
  35. EX_TABLE(0b,1b)
  36. : "+a" (count), "+a" (src), "=a" (tmp1), "=a" (tmp2)
  37. : "d" (reg0) : "cc", "memory");
  38. return count;
  39. }
  40. static size_t copy_in_kernel(size_t count, void __user *to,
  41. const void __user *from)
  42. {
  43. unsigned long tmp1;
  44. asm volatile(
  45. " "AHI" %0,-1\n"
  46. " jo 5f\n"
  47. " bras %3,3f\n"
  48. "0:"AHI" %0,257\n"
  49. "1: mvc 0(1,%1),0(%2)\n"
  50. " la %1,1(%1)\n"
  51. " la %2,1(%2)\n"
  52. " "AHI" %0,-1\n"
  53. " jnz 1b\n"
  54. " j 5f\n"
  55. "2: mvc 0(256,%1),0(%2)\n"
  56. " la %1,256(%1)\n"
  57. " la %2,256(%2)\n"
  58. "3:"AHI" %0,-256\n"
  59. " jnm 2b\n"
  60. "4: ex %0,1b-0b(%3)\n"
  61. "5:"SLR" %0,%0\n"
  62. "6:\n"
  63. EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
  64. : "+a" (count), "+a" (to), "+a" (from), "=a" (tmp1)
  65. : : "cc", "memory");
  66. return count;
  67. }
  68. /*
  69. * Returns kernel address for user virtual address. If the returned address is
  70. * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
  71. * contains the (negative) exception code.
  72. */
  73. static __always_inline unsigned long follow_table(struct mm_struct *mm,
  74. unsigned long addr, int write)
  75. {
  76. pgd_t *pgd;
  77. pud_t *pud;
  78. pmd_t *pmd;
  79. pte_t *ptep;
  80. pgd = pgd_offset(mm, addr);
  81. if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
  82. return -0x3aUL;
  83. pud = pud_offset(pgd, addr);
  84. if (pud_none(*pud) || unlikely(pud_bad(*pud)))
  85. return -0x3bUL;
  86. pmd = pmd_offset(pud, addr);
  87. if (pmd_none(*pmd))
  88. return -0x10UL;
  89. if (pmd_large(*pmd)) {
  90. if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
  91. return -0x04UL;
  92. return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
  93. }
  94. if (unlikely(pmd_bad(*pmd)))
  95. return -0x10UL;
  96. ptep = pte_offset_map(pmd, addr);
  97. if (!pte_present(*ptep))
  98. return -0x11UL;
  99. if (write && (!pte_write(*ptep) || !pte_dirty(*ptep)))
  100. return -0x04UL;
  101. return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
  102. }
  103. static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
  104. size_t n, int write_user)
  105. {
  106. struct mm_struct *mm = current->mm;
  107. unsigned long offset, done, size, kaddr;
  108. void *from, *to;
  109. done = 0;
  110. retry:
  111. spin_lock(&mm->page_table_lock);
  112. do {
  113. kaddr = follow_table(mm, uaddr, write_user);
  114. if (IS_ERR_VALUE(kaddr))
  115. goto fault;
  116. offset = uaddr & ~PAGE_MASK;
  117. size = min(n - done, PAGE_SIZE - offset);
  118. if (write_user) {
  119. to = (void *) kaddr;
  120. from = kptr + done;
  121. } else {
  122. from = (void *) kaddr;
  123. to = kptr + done;
  124. }
  125. memcpy(to, from, size);
  126. done += size;
  127. uaddr += size;
  128. } while (done < n);
  129. spin_unlock(&mm->page_table_lock);
  130. return n - done;
  131. fault:
  132. spin_unlock(&mm->page_table_lock);
  133. if (__handle_fault(uaddr, -kaddr, write_user))
  134. return n - done;
  135. goto retry;
  136. }
  137. /*
  138. * Do DAT for user address by page table walk, return kernel address.
  139. * This function needs to be called with current->mm->page_table_lock held.
  140. */
  141. static __always_inline unsigned long __dat_user_addr(unsigned long uaddr,
  142. int write)
  143. {
  144. struct mm_struct *mm = current->mm;
  145. unsigned long kaddr;
  146. int rc;
  147. retry:
  148. kaddr = follow_table(mm, uaddr, write);
  149. if (IS_ERR_VALUE(kaddr))
  150. goto fault;
  151. return kaddr;
  152. fault:
  153. spin_unlock(&mm->page_table_lock);
  154. rc = __handle_fault(uaddr, -kaddr, write);
  155. spin_lock(&mm->page_table_lock);
  156. if (!rc)
  157. goto retry;
  158. return 0;
  159. }
  160. size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
  161. {
  162. size_t rc;
  163. if (segment_eq(get_fs(), KERNEL_DS))
  164. return copy_in_kernel(n, (void __user *) to, from);
  165. rc = __user_copy_pt((unsigned long) from, to, n, 0);
  166. if (unlikely(rc))
  167. memset(to + n - rc, 0, rc);
  168. return rc;
  169. }
  170. size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
  171. {
  172. if (segment_eq(get_fs(), KERNEL_DS))
  173. return copy_in_kernel(n, to, (void __user *) from);
  174. return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
  175. }
  176. static size_t clear_user_pt(size_t n, void __user *to)
  177. {
  178. void *zpage = &empty_zero_page;
  179. long done, size, ret;
  180. done = 0;
  181. do {
  182. if (n - done > PAGE_SIZE)
  183. size = PAGE_SIZE;
  184. else
  185. size = n - done;
  186. if (segment_eq(get_fs(), KERNEL_DS))
  187. ret = copy_in_kernel(n, to, (void __user *) zpage);
  188. else
  189. ret = __user_copy_pt((unsigned long) to, zpage, size, 1);
  190. done += size;
  191. to += size;
  192. if (ret)
  193. return ret + n - done;
  194. } while (done < n);
  195. return 0;
  196. }
  197. static size_t strnlen_user_pt(size_t count, const char __user *src)
  198. {
  199. unsigned long uaddr = (unsigned long) src;
  200. struct mm_struct *mm = current->mm;
  201. unsigned long offset, done, len, kaddr;
  202. size_t len_str;
  203. if (unlikely(!count))
  204. return 0;
  205. if (segment_eq(get_fs(), KERNEL_DS))
  206. return strnlen_kernel(count, src);
  207. done = 0;
  208. retry:
  209. spin_lock(&mm->page_table_lock);
  210. do {
  211. kaddr = follow_table(mm, uaddr, 0);
  212. if (IS_ERR_VALUE(kaddr))
  213. goto fault;
  214. offset = uaddr & ~PAGE_MASK;
  215. len = min(count - done, PAGE_SIZE - offset);
  216. len_str = strnlen((char *) kaddr, len);
  217. done += len_str;
  218. uaddr += len_str;
  219. } while ((len_str == len) && (done < count));
  220. spin_unlock(&mm->page_table_lock);
  221. return done + 1;
  222. fault:
  223. spin_unlock(&mm->page_table_lock);
  224. if (__handle_fault(uaddr, -kaddr, 0))
  225. return 0;
  226. goto retry;
  227. }
  228. static size_t strncpy_from_user_pt(size_t count, const char __user *src,
  229. char *dst)
  230. {
  231. size_t done, len, offset, len_str;
  232. if (unlikely(!count))
  233. return 0;
  234. done = 0;
  235. do {
  236. offset = (size_t)src & ~PAGE_MASK;
  237. len = min(count - done, PAGE_SIZE - offset);
  238. if (segment_eq(get_fs(), KERNEL_DS)) {
  239. if (copy_in_kernel(len, (void __user *) dst, src))
  240. return -EFAULT;
  241. } else {
  242. if (__user_copy_pt((unsigned long) src, dst, len, 0))
  243. return -EFAULT;
  244. }
  245. len_str = strnlen(dst, len);
  246. done += len_str;
  247. src += len_str;
  248. dst += len_str;
  249. } while ((len_str == len) && (done < count));
  250. return done;
  251. }
  252. static size_t copy_in_user_pt(size_t n, void __user *to,
  253. const void __user *from)
  254. {
  255. struct mm_struct *mm = current->mm;
  256. unsigned long offset_max, uaddr, done, size, error_code;
  257. unsigned long uaddr_from = (unsigned long) from;
  258. unsigned long uaddr_to = (unsigned long) to;
  259. unsigned long kaddr_to, kaddr_from;
  260. int write_user;
  261. if (segment_eq(get_fs(), KERNEL_DS))
  262. return copy_in_kernel(n, to, from);
  263. done = 0;
  264. retry:
  265. spin_lock(&mm->page_table_lock);
  266. do {
  267. write_user = 0;
  268. uaddr = uaddr_from;
  269. kaddr_from = follow_table(mm, uaddr_from, 0);
  270. error_code = kaddr_from;
  271. if (IS_ERR_VALUE(error_code))
  272. goto fault;
  273. write_user = 1;
  274. uaddr = uaddr_to;
  275. kaddr_to = follow_table(mm, uaddr_to, 1);
  276. error_code = (unsigned long) kaddr_to;
  277. if (IS_ERR_VALUE(error_code))
  278. goto fault;
  279. offset_max = max(uaddr_from & ~PAGE_MASK,
  280. uaddr_to & ~PAGE_MASK);
  281. size = min(n - done, PAGE_SIZE - offset_max);
  282. memcpy((void *) kaddr_to, (void *) kaddr_from, size);
  283. done += size;
  284. uaddr_from += size;
  285. uaddr_to += size;
  286. } while (done < n);
  287. spin_unlock(&mm->page_table_lock);
  288. return n - done;
  289. fault:
  290. spin_unlock(&mm->page_table_lock);
  291. if (__handle_fault(uaddr, -error_code, write_user))
  292. return n - done;
  293. goto retry;
  294. }
  295. #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
  296. asm volatile("0: l %1,0(%6)\n" \
  297. "1: " insn \
  298. "2: cs %1,%2,0(%6)\n" \
  299. "3: jl 1b\n" \
  300. " lhi %0,0\n" \
  301. "4:\n" \
  302. EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
  303. : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
  304. "=m" (*uaddr) \
  305. : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
  306. "m" (*uaddr) : "cc" );
  307. static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
  308. {
  309. int oldval = 0, newval, ret;
  310. switch (op) {
  311. case FUTEX_OP_SET:
  312. __futex_atomic_op("lr %2,%5\n",
  313. ret, oldval, newval, uaddr, oparg);
  314. break;
  315. case FUTEX_OP_ADD:
  316. __futex_atomic_op("lr %2,%1\nar %2,%5\n",
  317. ret, oldval, newval, uaddr, oparg);
  318. break;
  319. case FUTEX_OP_OR:
  320. __futex_atomic_op("lr %2,%1\nor %2,%5\n",
  321. ret, oldval, newval, uaddr, oparg);
  322. break;
  323. case FUTEX_OP_ANDN:
  324. __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
  325. ret, oldval, newval, uaddr, oparg);
  326. break;
  327. case FUTEX_OP_XOR:
  328. __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
  329. ret, oldval, newval, uaddr, oparg);
  330. break;
  331. default:
  332. ret = -ENOSYS;
  333. }
  334. if (ret == 0)
  335. *old = oldval;
  336. return ret;
  337. }
  338. int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
  339. {
  340. int ret;
  341. if (segment_eq(get_fs(), KERNEL_DS))
  342. return __futex_atomic_op_pt(op, uaddr, oparg, old);
  343. spin_lock(&current->mm->page_table_lock);
  344. uaddr = (u32 __force __user *)
  345. __dat_user_addr((__force unsigned long) uaddr, 1);
  346. if (!uaddr) {
  347. spin_unlock(&current->mm->page_table_lock);
  348. return -EFAULT;
  349. }
  350. get_page(virt_to_page(uaddr));
  351. spin_unlock(&current->mm->page_table_lock);
  352. ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
  353. put_page(virt_to_page(uaddr));
  354. return ret;
  355. }
  356. static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
  357. u32 oldval, u32 newval)
  358. {
  359. int ret;
  360. asm volatile("0: cs %1,%4,0(%5)\n"
  361. "1: la %0,0\n"
  362. "2:\n"
  363. EX_TABLE(0b,2b) EX_TABLE(1b,2b)
  364. : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
  365. : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
  366. : "cc", "memory" );
  367. *uval = oldval;
  368. return ret;
  369. }
  370. int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
  371. u32 oldval, u32 newval)
  372. {
  373. int ret;
  374. if (segment_eq(get_fs(), KERNEL_DS))
  375. return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
  376. spin_lock(&current->mm->page_table_lock);
  377. uaddr = (u32 __force __user *)
  378. __dat_user_addr((__force unsigned long) uaddr, 1);
  379. if (!uaddr) {
  380. spin_unlock(&current->mm->page_table_lock);
  381. return -EFAULT;
  382. }
  383. get_page(virt_to_page(uaddr));
  384. spin_unlock(&current->mm->page_table_lock);
  385. ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
  386. put_page(virt_to_page(uaddr));
  387. return ret;
  388. }
  389. struct uaccess_ops uaccess_pt = {
  390. .copy_from_user = copy_from_user_pt,
  391. .copy_from_user_small = copy_from_user_pt,
  392. .copy_to_user = copy_to_user_pt,
  393. .copy_to_user_small = copy_to_user_pt,
  394. .copy_in_user = copy_in_user_pt,
  395. .clear_user = clear_user_pt,
  396. .strnlen_user = strnlen_user_pt,
  397. .strncpy_from_user = strncpy_from_user_pt,
  398. .futex_atomic_op = futex_atomic_op_pt,
  399. .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
  400. };