uaccess.h 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
  7. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8. * Copyright (C) 2007 Maciej W. Rozycki
  9. */
  10. #ifndef _ASM_UACCESS_H
  11. #define _ASM_UACCESS_H
  12. #include <linux/kernel.h>
  13. #include <linux/errno.h>
  14. #include <linux/thread_info.h>
  15. /*
  16. * The fs value determines whether argument validity checking should be
  17. * performed or not. If get_fs() == USER_DS, checking is performed, with
  18. * get_fs() == KERNEL_DS, checking is bypassed.
  19. *
  20. * For historical reasons, these macros are grossly misnamed.
  21. */
  22. #ifdef CONFIG_32BIT
  23. #ifdef CONFIG_KVM_GUEST
  24. #define __UA_LIMIT 0x40000000UL
  25. #else
  26. #define __UA_LIMIT 0x80000000UL
  27. #endif
  28. #define __UA_ADDR ".word"
  29. #define __UA_LA "la"
  30. #define __UA_ADDU "addu"
  31. #define __UA_t0 "$8"
  32. #define __UA_t1 "$9"
  33. #endif /* CONFIG_32BIT */
  34. #ifdef CONFIG_64BIT
  35. extern u64 __ua_limit;
  36. #define __UA_LIMIT __ua_limit
  37. #define __UA_ADDR ".dword"
  38. #define __UA_LA "dla"
  39. #define __UA_ADDU "daddu"
  40. #define __UA_t0 "$12"
  41. #define __UA_t1 "$13"
  42. #endif /* CONFIG_64BIT */
  43. /*
  44. * USER_DS is a bitmask that has the bits set that may not be set in a valid
  45. * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but
  46. * the arithmetic we're doing only works if the limit is a power of two, so
  47. * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid
  48. * address in this range it's the process's problem, not ours :-)
  49. */
  50. #ifdef CONFIG_KVM_GUEST
  51. #define KERNEL_DS ((mm_segment_t) { 0x80000000UL })
  52. #define USER_DS ((mm_segment_t) { 0xC0000000UL })
  53. #else
  54. #define KERNEL_DS ((mm_segment_t) { 0UL })
  55. #define USER_DS ((mm_segment_t) { __UA_LIMIT })
  56. #endif
  57. #define VERIFY_READ 0
  58. #define VERIFY_WRITE 1
  59. #define get_ds() (KERNEL_DS)
  60. #define get_fs() (current_thread_info()->addr_limit)
  61. #define set_fs(x) (current_thread_info()->addr_limit = (x))
  62. #define segment_eq(a, b) ((a).seg == (b).seg)
  63. /*
  64. * Is a address valid? This does a straighforward calculation rather
  65. * than tests.
  66. *
  67. * Address valid if:
  68. * - "addr" doesn't have any high-bits set
  69. * - AND "size" doesn't have any high-bits set
  70. * - AND "addr+size" doesn't have any high-bits set
  71. * - OR we are in kernel mode.
  72. *
  73. * __ua_size() is a trick to avoid runtime checking of positive constant
  74. * sizes; for those we already know at compile time that the size is ok.
  75. */
  76. #define __ua_size(size) \
  77. ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
  78. /*
  79. * access_ok: - Checks if a user space pointer is valid
  80. * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
  81. * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
  82. * to write to a block, it is always safe to read from it.
  83. * @addr: User space pointer to start of block to check
  84. * @size: Size of block to check
  85. *
  86. * Context: User context only. This function may sleep.
  87. *
  88. * Checks if a pointer to a block of memory in user space is valid.
  89. *
  90. * Returns true (nonzero) if the memory block may be valid, false (zero)
  91. * if it is definitely invalid.
  92. *
  93. * Note that, depending on architecture, this function probably just
  94. * checks that the pointer is in the user space range - after calling
  95. * this function, memory access functions may still return -EFAULT.
  96. */
  97. #define __access_mask get_fs().seg
  98. #define __access_ok(addr, size, mask) \
  99. ({ \
  100. unsigned long __addr = (unsigned long) (addr); \
  101. unsigned long __size = size; \
  102. unsigned long __mask = mask; \
  103. unsigned long __ok; \
  104. \
  105. __chk_user_ptr(addr); \
  106. __ok = (signed long)(__mask & (__addr | (__addr + __size) | \
  107. __ua_size(__size))); \
  108. __ok == 0; \
  109. })
  110. #define access_ok(type, addr, size) \
  111. likely(__access_ok((addr), (size), __access_mask))
  112. /*
  113. * put_user: - Write a simple value into user space.
  114. * @x: Value to copy to user space.
  115. * @ptr: Destination address, in user space.
  116. *
  117. * Context: User context only. This function may sleep.
  118. *
  119. * This macro copies a single simple value from kernel space to user
  120. * space. It supports simple types like char and int, but not larger
  121. * data types like structures or arrays.
  122. *
  123. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  124. * to the result of dereferencing @ptr.
  125. *
  126. * Returns zero on success, or -EFAULT on error.
  127. */
  128. #define put_user(x,ptr) \
  129. __put_user_check((x), (ptr), sizeof(*(ptr)))
  130. /*
  131. * get_user: - Get a simple variable from user space.
  132. * @x: Variable to store result.
  133. * @ptr: Source address, in user space.
  134. *
  135. * Context: User context only. This function may sleep.
  136. *
  137. * This macro copies a single simple variable from user space to kernel
  138. * space. It supports simple types like char and int, but not larger
  139. * data types like structures or arrays.
  140. *
  141. * @ptr must have pointer-to-simple-variable type, and the result of
  142. * dereferencing @ptr must be assignable to @x without a cast.
  143. *
  144. * Returns zero on success, or -EFAULT on error.
  145. * On error, the variable @x is set to zero.
  146. */
  147. #define get_user(x,ptr) \
  148. __get_user_check((x), (ptr), sizeof(*(ptr)))
  149. /*
  150. * __put_user: - Write a simple value into user space, with less checking.
  151. * @x: Value to copy to user space.
  152. * @ptr: Destination address, in user space.
  153. *
  154. * Context: User context only. This function may sleep.
  155. *
  156. * This macro copies a single simple value from kernel space to user
  157. * space. It supports simple types like char and int, but not larger
  158. * data types like structures or arrays.
  159. *
  160. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  161. * to the result of dereferencing @ptr.
  162. *
  163. * Caller must check the pointer with access_ok() before calling this
  164. * function.
  165. *
  166. * Returns zero on success, or -EFAULT on error.
  167. */
  168. #define __put_user(x,ptr) \
  169. __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
  170. /*
  171. * __get_user: - Get a simple variable from user space, with less checking.
  172. * @x: Variable to store result.
  173. * @ptr: Source address, in user space.
  174. *
  175. * Context: User context only. This function may sleep.
  176. *
  177. * This macro copies a single simple variable from user space to kernel
  178. * space. It supports simple types like char and int, but not larger
  179. * data types like structures or arrays.
  180. *
  181. * @ptr must have pointer-to-simple-variable type, and the result of
  182. * dereferencing @ptr must be assignable to @x without a cast.
  183. *
  184. * Caller must check the pointer with access_ok() before calling this
  185. * function.
  186. *
  187. * Returns zero on success, or -EFAULT on error.
  188. * On error, the variable @x is set to zero.
  189. */
  190. #define __get_user(x,ptr) \
  191. __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  192. struct __large_struct { unsigned long buf[100]; };
  193. #define __m(x) (*(struct __large_struct __user *)(x))
  194. /*
  195. * Yuck. We need two variants, one for 64bit operation and one
  196. * for 32 bit mode and old iron.
  197. */
  198. #ifdef CONFIG_32BIT
  199. #define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr)
  200. #endif
  201. #ifdef CONFIG_64BIT
  202. #define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr)
  203. #endif
  204. extern void __get_user_unknown(void);
  205. #define __get_user_common(val, size, ptr) \
  206. do { \
  207. switch (size) { \
  208. case 1: __get_user_asm(val, "lb", ptr); break; \
  209. case 2: __get_user_asm(val, "lh", ptr); break; \
  210. case 4: __get_user_asm(val, "lw", ptr); break; \
  211. case 8: __GET_USER_DW(val, ptr); break; \
  212. default: __get_user_unknown(); break; \
  213. } \
  214. } while (0)
  215. #define __get_user_nocheck(x, ptr, size) \
  216. ({ \
  217. int __gu_err; \
  218. \
  219. __chk_user_ptr(ptr); \
  220. __get_user_common((x), size, ptr); \
  221. __gu_err; \
  222. })
  223. #define __get_user_check(x, ptr, size) \
  224. ({ \
  225. int __gu_err = -EFAULT; \
  226. const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
  227. \
  228. might_fault(); \
  229. if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
  230. __get_user_common((x), size, __gu_ptr); \
  231. \
  232. __gu_err; \
  233. })
  234. #define __get_user_asm(val, insn, addr) \
  235. { \
  236. long __gu_tmp; \
  237. \
  238. __asm__ __volatile__( \
  239. "1: " insn " %1, %3 \n" \
  240. "2: \n" \
  241. " .insn \n" \
  242. " .section .fixup,\"ax\" \n" \
  243. "3: li %0, %4 \n" \
  244. " j 2b \n" \
  245. " .previous \n" \
  246. " .section __ex_table,\"a\" \n" \
  247. " "__UA_ADDR "\t1b, 3b \n" \
  248. " .previous \n" \
  249. : "=r" (__gu_err), "=r" (__gu_tmp) \
  250. : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
  251. \
  252. (val) = (__typeof__(*(addr))) __gu_tmp; \
  253. }
  254. /*
  255. * Get a long long 64 using 32 bit registers.
  256. */
  257. #define __get_user_asm_ll32(val, addr) \
  258. { \
  259. union { \
  260. unsigned long long l; \
  261. __typeof__(*(addr)) t; \
  262. } __gu_tmp; \
  263. \
  264. __asm__ __volatile__( \
  265. "1: lw %1, (%3) \n" \
  266. "2: lw %D1, 4(%3) \n" \
  267. "3: \n" \
  268. " .insn \n" \
  269. " .section .fixup,\"ax\" \n" \
  270. "4: li %0, %4 \n" \
  271. " move %1, $0 \n" \
  272. " move %D1, $0 \n" \
  273. " j 3b \n" \
  274. " .previous \n" \
  275. " .section __ex_table,\"a\" \n" \
  276. " " __UA_ADDR " 1b, 4b \n" \
  277. " " __UA_ADDR " 2b, 4b \n" \
  278. " .previous \n" \
  279. : "=r" (__gu_err), "=&r" (__gu_tmp.l) \
  280. : "0" (0), "r" (addr), "i" (-EFAULT)); \
  281. \
  282. (val) = __gu_tmp.t; \
  283. }
  284. /*
  285. * Yuck. We need two variants, one for 64bit operation and one
  286. * for 32 bit mode and old iron.
  287. */
  288. #ifdef CONFIG_32BIT
  289. #define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr)
  290. #endif
  291. #ifdef CONFIG_64BIT
  292. #define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr)
  293. #endif
  294. #define __put_user_nocheck(x, ptr, size) \
  295. ({ \
  296. __typeof__(*(ptr)) __pu_val; \
  297. int __pu_err = 0; \
  298. \
  299. __chk_user_ptr(ptr); \
  300. __pu_val = (x); \
  301. switch (size) { \
  302. case 1: __put_user_asm("sb", ptr); break; \
  303. case 2: __put_user_asm("sh", ptr); break; \
  304. case 4: __put_user_asm("sw", ptr); break; \
  305. case 8: __PUT_USER_DW(ptr); break; \
  306. default: __put_user_unknown(); break; \
  307. } \
  308. __pu_err; \
  309. })
  310. #define __put_user_check(x, ptr, size) \
  311. ({ \
  312. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  313. __typeof__(*(ptr)) __pu_val = (x); \
  314. int __pu_err = -EFAULT; \
  315. \
  316. might_fault(); \
  317. if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
  318. switch (size) { \
  319. case 1: __put_user_asm("sb", __pu_addr); break; \
  320. case 2: __put_user_asm("sh", __pu_addr); break; \
  321. case 4: __put_user_asm("sw", __pu_addr); break; \
  322. case 8: __PUT_USER_DW(__pu_addr); break; \
  323. default: __put_user_unknown(); break; \
  324. } \
  325. } \
  326. __pu_err; \
  327. })
  328. #define __put_user_asm(insn, ptr) \
  329. { \
  330. __asm__ __volatile__( \
  331. "1: " insn " %z2, %3 # __put_user_asm\n" \
  332. "2: \n" \
  333. " .insn \n" \
  334. " .section .fixup,\"ax\" \n" \
  335. "3: li %0, %4 \n" \
  336. " j 2b \n" \
  337. " .previous \n" \
  338. " .section __ex_table,\"a\" \n" \
  339. " " __UA_ADDR " 1b, 3b \n" \
  340. " .previous \n" \
  341. : "=r" (__pu_err) \
  342. : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
  343. "i" (-EFAULT)); \
  344. }
  345. #define __put_user_asm_ll32(ptr) \
  346. { \
  347. __asm__ __volatile__( \
  348. "1: sw %2, (%3) # __put_user_asm_ll32 \n" \
  349. "2: sw %D2, 4(%3) \n" \
  350. "3: \n" \
  351. " .insn \n" \
  352. " .section .fixup,\"ax\" \n" \
  353. "4: li %0, %4 \n" \
  354. " j 3b \n" \
  355. " .previous \n" \
  356. " .section __ex_table,\"a\" \n" \
  357. " " __UA_ADDR " 1b, 4b \n" \
  358. " " __UA_ADDR " 2b, 4b \n" \
  359. " .previous" \
  360. : "=r" (__pu_err) \
  361. : "0" (0), "r" (__pu_val), "r" (ptr), \
  362. "i" (-EFAULT)); \
  363. }
  364. extern void __put_user_unknown(void);
  365. /*
  366. * put_user_unaligned: - Write a simple value into user space.
  367. * @x: Value to copy to user space.
  368. * @ptr: Destination address, in user space.
  369. *
  370. * Context: User context only. This function may sleep.
  371. *
  372. * This macro copies a single simple value from kernel space to user
  373. * space. It supports simple types like char and int, but not larger
  374. * data types like structures or arrays.
  375. *
  376. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  377. * to the result of dereferencing @ptr.
  378. *
  379. * Returns zero on success, or -EFAULT on error.
  380. */
  381. #define put_user_unaligned(x,ptr) \
  382. __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
  383. /*
  384. * get_user_unaligned: - Get a simple variable from user space.
  385. * @x: Variable to store result.
  386. * @ptr: Source address, in user space.
  387. *
  388. * Context: User context only. This function may sleep.
  389. *
  390. * This macro copies a single simple variable from user space to kernel
  391. * space. It supports simple types like char and int, but not larger
  392. * data types like structures or arrays.
  393. *
  394. * @ptr must have pointer-to-simple-variable type, and the result of
  395. * dereferencing @ptr must be assignable to @x without a cast.
  396. *
  397. * Returns zero on success, or -EFAULT on error.
  398. * On error, the variable @x is set to zero.
  399. */
  400. #define get_user_unaligned(x,ptr) \
  401. __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
  402. /*
  403. * __put_user_unaligned: - Write a simple value into user space, with less checking.
  404. * @x: Value to copy to user space.
  405. * @ptr: Destination address, in user space.
  406. *
  407. * Context: User context only. This function may sleep.
  408. *
  409. * This macro copies a single simple value from kernel space to user
  410. * space. It supports simple types like char and int, but not larger
  411. * data types like structures or arrays.
  412. *
  413. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  414. * to the result of dereferencing @ptr.
  415. *
  416. * Caller must check the pointer with access_ok() before calling this
  417. * function.
  418. *
  419. * Returns zero on success, or -EFAULT on error.
  420. */
  421. #define __put_user_unaligned(x,ptr) \
  422. __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
  423. /*
  424. * __get_user_unaligned: - Get a simple variable from user space, with less checking.
  425. * @x: Variable to store result.
  426. * @ptr: Source address, in user space.
  427. *
  428. * Context: User context only. This function may sleep.
  429. *
  430. * This macro copies a single simple variable from user space to kernel
  431. * space. It supports simple types like char and int, but not larger
  432. * data types like structures or arrays.
  433. *
  434. * @ptr must have pointer-to-simple-variable type, and the result of
  435. * dereferencing @ptr must be assignable to @x without a cast.
  436. *
  437. * Caller must check the pointer with access_ok() before calling this
  438. * function.
  439. *
  440. * Returns zero on success, or -EFAULT on error.
  441. * On error, the variable @x is set to zero.
  442. */
  443. #define __get_user_unaligned(x,ptr) \
  444. __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
  445. /*
  446. * Yuck. We need two variants, one for 64bit operation and one
  447. * for 32 bit mode and old iron.
  448. */
  449. #ifdef CONFIG_32BIT
  450. #define __GET_USER_UNALIGNED_DW(val, ptr) \
  451. __get_user_unaligned_asm_ll32(val, ptr)
  452. #endif
  453. #ifdef CONFIG_64BIT
  454. #define __GET_USER_UNALIGNED_DW(val, ptr) \
  455. __get_user_unaligned_asm(val, "uld", ptr)
  456. #endif
  457. extern void __get_user_unaligned_unknown(void);
  458. #define __get_user_unaligned_common(val, size, ptr) \
  459. do { \
  460. switch (size) { \
  461. case 1: __get_user_asm(val, "lb", ptr); break; \
  462. case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \
  463. case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \
  464. case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
  465. default: __get_user_unaligned_unknown(); break; \
  466. } \
  467. } while (0)
  468. #define __get_user_unaligned_nocheck(x,ptr,size) \
  469. ({ \
  470. int __gu_err; \
  471. \
  472. __get_user_unaligned_common((x), size, ptr); \
  473. __gu_err; \
  474. })
  475. #define __get_user_unaligned_check(x,ptr,size) \
  476. ({ \
  477. int __gu_err = -EFAULT; \
  478. const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
  479. \
  480. if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
  481. __get_user_unaligned_common((x), size, __gu_ptr); \
  482. \
  483. __gu_err; \
  484. })
  485. #define __get_user_unaligned_asm(val, insn, addr) \
  486. { \
  487. long __gu_tmp; \
  488. \
  489. __asm__ __volatile__( \
  490. "1: " insn " %1, %3 \n" \
  491. "2: \n" \
  492. " .insn \n" \
  493. " .section .fixup,\"ax\" \n" \
  494. "3: li %0, %4 \n" \
  495. " j 2b \n" \
  496. " .previous \n" \
  497. " .section __ex_table,\"a\" \n" \
  498. " "__UA_ADDR "\t1b, 3b \n" \
  499. " "__UA_ADDR "\t1b + 4, 3b \n" \
  500. " .previous \n" \
  501. : "=r" (__gu_err), "=r" (__gu_tmp) \
  502. : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
  503. \
  504. (val) = (__typeof__(*(addr))) __gu_tmp; \
  505. }
  506. /*
  507. * Get a long long 64 using 32 bit registers.
  508. */
  509. #define __get_user_unaligned_asm_ll32(val, addr) \
  510. { \
  511. unsigned long long __gu_tmp; \
  512. \
  513. __asm__ __volatile__( \
  514. "1: ulw %1, (%3) \n" \
  515. "2: ulw %D1, 4(%3) \n" \
  516. " move %0, $0 \n" \
  517. "3: \n" \
  518. " .insn \n" \
  519. " .section .fixup,\"ax\" \n" \
  520. "4: li %0, %4 \n" \
  521. " move %1, $0 \n" \
  522. " move %D1, $0 \n" \
  523. " j 3b \n" \
  524. " .previous \n" \
  525. " .section __ex_table,\"a\" \n" \
  526. " " __UA_ADDR " 1b, 4b \n" \
  527. " " __UA_ADDR " 1b + 4, 4b \n" \
  528. " " __UA_ADDR " 2b, 4b \n" \
  529. " " __UA_ADDR " 2b + 4, 4b \n" \
  530. " .previous \n" \
  531. : "=r" (__gu_err), "=&r" (__gu_tmp) \
  532. : "0" (0), "r" (addr), "i" (-EFAULT)); \
  533. (val) = (__typeof__(*(addr))) __gu_tmp; \
  534. }
  535. /*
  536. * Yuck. We need two variants, one for 64bit operation and one
  537. * for 32 bit mode and old iron.
  538. */
  539. #ifdef CONFIG_32BIT
  540. #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
  541. #endif
  542. #ifdef CONFIG_64BIT
  543. #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
  544. #endif
  545. #define __put_user_unaligned_nocheck(x,ptr,size) \
  546. ({ \
  547. __typeof__(*(ptr)) __pu_val; \
  548. int __pu_err = 0; \
  549. \
  550. __pu_val = (x); \
  551. switch (size) { \
  552. case 1: __put_user_asm("sb", ptr); break; \
  553. case 2: __put_user_unaligned_asm("ush", ptr); break; \
  554. case 4: __put_user_unaligned_asm("usw", ptr); break; \
  555. case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
  556. default: __put_user_unaligned_unknown(); break; \
  557. } \
  558. __pu_err; \
  559. })
  560. #define __put_user_unaligned_check(x,ptr,size) \
  561. ({ \
  562. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  563. __typeof__(*(ptr)) __pu_val = (x); \
  564. int __pu_err = -EFAULT; \
  565. \
  566. if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
  567. switch (size) { \
  568. case 1: __put_user_asm("sb", __pu_addr); break; \
  569. case 2: __put_user_unaligned_asm("ush", __pu_addr); break; \
  570. case 4: __put_user_unaligned_asm("usw", __pu_addr); break; \
  571. case 8: __PUT_USER_UNALGINED_DW(__pu_addr); break; \
  572. default: __put_user_unaligned_unknown(); break; \
  573. } \
  574. } \
  575. __pu_err; \
  576. })
  577. #define __put_user_unaligned_asm(insn, ptr) \
  578. { \
  579. __asm__ __volatile__( \
  580. "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \
  581. "2: \n" \
  582. " .insn \n" \
  583. " .section .fixup,\"ax\" \n" \
  584. "3: li %0, %4 \n" \
  585. " j 2b \n" \
  586. " .previous \n" \
  587. " .section __ex_table,\"a\" \n" \
  588. " " __UA_ADDR " 1b, 3b \n" \
  589. " .previous \n" \
  590. : "=r" (__pu_err) \
  591. : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
  592. "i" (-EFAULT)); \
  593. }
  594. #define __put_user_unaligned_asm_ll32(ptr) \
  595. { \
  596. __asm__ __volatile__( \
  597. "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
  598. "2: sw %D2, 4(%3) \n" \
  599. "3: \n" \
  600. " .insn \n" \
  601. " .section .fixup,\"ax\" \n" \
  602. "4: li %0, %4 \n" \
  603. " j 3b \n" \
  604. " .previous \n" \
  605. " .section __ex_table,\"a\" \n" \
  606. " " __UA_ADDR " 1b, 4b \n" \
  607. " " __UA_ADDR " 1b + 4, 4b \n" \
  608. " " __UA_ADDR " 2b, 4b \n" \
  609. " " __UA_ADDR " 2b + 4, 4b \n" \
  610. " .previous" \
  611. : "=r" (__pu_err) \
  612. : "0" (0), "r" (__pu_val), "r" (ptr), \
  613. "i" (-EFAULT)); \
  614. }
  615. extern void __put_user_unaligned_unknown(void);
  616. /*
  617. * We're generating jump to subroutines which will be outside the range of
  618. * jump instructions
  619. */
  620. #ifdef MODULE
  621. #define __MODULE_JAL(destination) \
  622. ".set\tnoat\n\t" \
  623. __UA_LA "\t$1, " #destination "\n\t" \
  624. "jalr\t$1\n\t" \
  625. ".set\tat\n\t"
  626. #else
  627. #define __MODULE_JAL(destination) \
  628. "jal\t" #destination "\n\t"
  629. #endif
  630. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  631. #define DADDI_SCRATCH "$0"
  632. #else
  633. #define DADDI_SCRATCH "$3"
  634. #endif
  635. extern size_t __copy_user(void *__to, const void *__from, size_t __n);
  636. #define __invoke_copy_to_user(to, from, n) \
  637. ({ \
  638. register void __user *__cu_to_r __asm__("$4"); \
  639. register const void *__cu_from_r __asm__("$5"); \
  640. register long __cu_len_r __asm__("$6"); \
  641. \
  642. __cu_to_r = (to); \
  643. __cu_from_r = (from); \
  644. __cu_len_r = (n); \
  645. __asm__ __volatile__( \
  646. __MODULE_JAL(__copy_user) \
  647. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  648. : \
  649. : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
  650. DADDI_SCRATCH, "memory"); \
  651. __cu_len_r; \
  652. })
  653. /*
  654. * __copy_to_user: - Copy a block of data into user space, with less checking.
  655. * @to: Destination address, in user space.
  656. * @from: Source address, in kernel space.
  657. * @n: Number of bytes to copy.
  658. *
  659. * Context: User context only. This function may sleep.
  660. *
  661. * Copy data from kernel space to user space. Caller must check
  662. * the specified block with access_ok() before calling this function.
  663. *
  664. * Returns number of bytes that could not be copied.
  665. * On success, this will be zero.
  666. */
  667. #define __copy_to_user(to, from, n) \
  668. ({ \
  669. void __user *__cu_to; \
  670. const void *__cu_from; \
  671. long __cu_len; \
  672. \
  673. __cu_to = (to); \
  674. __cu_from = (from); \
  675. __cu_len = (n); \
  676. might_fault(); \
  677. __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
  678. __cu_len; \
  679. })
  680. extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
  681. #define __copy_to_user_inatomic(to, from, n) \
  682. ({ \
  683. void __user *__cu_to; \
  684. const void *__cu_from; \
  685. long __cu_len; \
  686. \
  687. __cu_to = (to); \
  688. __cu_from = (from); \
  689. __cu_len = (n); \
  690. __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
  691. __cu_len; \
  692. })
  693. #define __copy_from_user_inatomic(to, from, n) \
  694. ({ \
  695. void *__cu_to; \
  696. const void __user *__cu_from; \
  697. long __cu_len; \
  698. \
  699. __cu_to = (to); \
  700. __cu_from = (from); \
  701. __cu_len = (n); \
  702. __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
  703. __cu_len); \
  704. __cu_len; \
  705. })
  706. /*
  707. * copy_to_user: - Copy a block of data into user space.
  708. * @to: Destination address, in user space.
  709. * @from: Source address, in kernel space.
  710. * @n: Number of bytes to copy.
  711. *
  712. * Context: User context only. This function may sleep.
  713. *
  714. * Copy data from kernel space to user space.
  715. *
  716. * Returns number of bytes that could not be copied.
  717. * On success, this will be zero.
  718. */
  719. #define copy_to_user(to, from, n) \
  720. ({ \
  721. void __user *__cu_to; \
  722. const void *__cu_from; \
  723. long __cu_len; \
  724. \
  725. __cu_to = (to); \
  726. __cu_from = (from); \
  727. __cu_len = (n); \
  728. if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
  729. might_fault(); \
  730. __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
  731. __cu_len); \
  732. } \
  733. __cu_len; \
  734. })
  735. #define __invoke_copy_from_user(to, from, n) \
  736. ({ \
  737. register void *__cu_to_r __asm__("$4"); \
  738. register const void __user *__cu_from_r __asm__("$5"); \
  739. register long __cu_len_r __asm__("$6"); \
  740. \
  741. __cu_to_r = (to); \
  742. __cu_from_r = (from); \
  743. __cu_len_r = (n); \
  744. __asm__ __volatile__( \
  745. ".set\tnoreorder\n\t" \
  746. __MODULE_JAL(__copy_user) \
  747. ".set\tnoat\n\t" \
  748. __UA_ADDU "\t$1, %1, %2\n\t" \
  749. ".set\tat\n\t" \
  750. ".set\treorder" \
  751. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  752. : \
  753. : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
  754. DADDI_SCRATCH, "memory"); \
  755. __cu_len_r; \
  756. })
  757. #define __invoke_copy_from_user_inatomic(to, from, n) \
  758. ({ \
  759. register void *__cu_to_r __asm__("$4"); \
  760. register const void __user *__cu_from_r __asm__("$5"); \
  761. register long __cu_len_r __asm__("$6"); \
  762. \
  763. __cu_to_r = (to); \
  764. __cu_from_r = (from); \
  765. __cu_len_r = (n); \
  766. __asm__ __volatile__( \
  767. ".set\tnoreorder\n\t" \
  768. __MODULE_JAL(__copy_user_inatomic) \
  769. ".set\tnoat\n\t" \
  770. __UA_ADDU "\t$1, %1, %2\n\t" \
  771. ".set\tat\n\t" \
  772. ".set\treorder" \
  773. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  774. : \
  775. : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
  776. DADDI_SCRATCH, "memory"); \
  777. __cu_len_r; \
  778. })
  779. /*
  780. * __copy_from_user: - Copy a block of data from user space, with less checking.
  781. * @to: Destination address, in kernel space.
  782. * @from: Source address, in user space.
  783. * @n: Number of bytes to copy.
  784. *
  785. * Context: User context only. This function may sleep.
  786. *
  787. * Copy data from user space to kernel space. Caller must check
  788. * the specified block with access_ok() before calling this function.
  789. *
  790. * Returns number of bytes that could not be copied.
  791. * On success, this will be zero.
  792. *
  793. * If some data could not be copied, this function will pad the copied
  794. * data to the requested size using zero bytes.
  795. */
  796. #define __copy_from_user(to, from, n) \
  797. ({ \
  798. void *__cu_to; \
  799. const void __user *__cu_from; \
  800. long __cu_len; \
  801. \
  802. __cu_to = (to); \
  803. __cu_from = (from); \
  804. __cu_len = (n); \
  805. might_fault(); \
  806. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  807. __cu_len); \
  808. __cu_len; \
  809. })
  810. /*
  811. * copy_from_user: - Copy a block of data from user space.
  812. * @to: Destination address, in kernel space.
  813. * @from: Source address, in user space.
  814. * @n: Number of bytes to copy.
  815. *
  816. * Context: User context only. This function may sleep.
  817. *
  818. * Copy data from user space to kernel space.
  819. *
  820. * Returns number of bytes that could not be copied.
  821. * On success, this will be zero.
  822. *
  823. * If some data could not be copied, this function will pad the copied
  824. * data to the requested size using zero bytes.
  825. */
  826. #define copy_from_user(to, from, n) \
  827. ({ \
  828. void *__cu_to; \
  829. const void __user *__cu_from; \
  830. long __cu_len; \
  831. \
  832. __cu_to = (to); \
  833. __cu_from = (from); \
  834. __cu_len = (n); \
  835. if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
  836. might_fault(); \
  837. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  838. __cu_len); \
  839. } \
  840. __cu_len; \
  841. })
  842. #define __copy_in_user(to, from, n) \
  843. ({ \
  844. void __user *__cu_to; \
  845. const void __user *__cu_from; \
  846. long __cu_len; \
  847. \
  848. __cu_to = (to); \
  849. __cu_from = (from); \
  850. __cu_len = (n); \
  851. might_fault(); \
  852. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  853. __cu_len); \
  854. __cu_len; \
  855. })
  856. #define copy_in_user(to, from, n) \
  857. ({ \
  858. void __user *__cu_to; \
  859. const void __user *__cu_from; \
  860. long __cu_len; \
  861. \
  862. __cu_to = (to); \
  863. __cu_from = (from); \
  864. __cu_len = (n); \
  865. if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \
  866. access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \
  867. might_fault(); \
  868. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  869. __cu_len); \
  870. } \
  871. __cu_len; \
  872. })
  873. /*
  874. * __clear_user: - Zero a block of memory in user space, with less checking.
  875. * @to: Destination address, in user space.
  876. * @n: Number of bytes to zero.
  877. *
  878. * Zero a block of memory in user space. Caller must check
  879. * the specified block with access_ok() before calling this function.
  880. *
  881. * Returns number of bytes that could not be cleared.
  882. * On success, this will be zero.
  883. */
  884. static inline __kernel_size_t
  885. __clear_user(void __user *addr, __kernel_size_t size)
  886. {
  887. __kernel_size_t res;
  888. might_fault();
  889. __asm__ __volatile__(
  890. "move\t$4, %1\n\t"
  891. "move\t$5, $0\n\t"
  892. "move\t$6, %2\n\t"
  893. __MODULE_JAL(__bzero)
  894. "move\t%0, $6"
  895. : "=r" (res)
  896. : "r" (addr), "r" (size)
  897. : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
  898. return res;
  899. }
  900. #define clear_user(addr,n) \
  901. ({ \
  902. void __user * __cl_addr = (addr); \
  903. unsigned long __cl_size = (n); \
  904. if (__cl_size && access_ok(VERIFY_WRITE, \
  905. __cl_addr, __cl_size)) \
  906. __cl_size = __clear_user(__cl_addr, __cl_size); \
  907. __cl_size; \
  908. })
  909. /*
  910. * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
  911. * @dst: Destination address, in kernel space. This buffer must be at
  912. * least @count bytes long.
  913. * @src: Source address, in user space.
  914. * @count: Maximum number of bytes to copy, including the trailing NUL.
  915. *
  916. * Copies a NUL-terminated string from userspace to kernel space.
  917. * Caller must check the specified block with access_ok() before calling
  918. * this function.
  919. *
  920. * On success, returns the length of the string (not including the trailing
  921. * NUL).
  922. *
  923. * If access to userspace fails, returns -EFAULT (some data may have been
  924. * copied).
  925. *
  926. * If @count is smaller than the length of the string, copies @count bytes
  927. * and returns @count.
  928. */
  929. static inline long
  930. __strncpy_from_user(char *__to, const char __user *__from, long __len)
  931. {
  932. long res;
  933. might_fault();
  934. __asm__ __volatile__(
  935. "move\t$4, %1\n\t"
  936. "move\t$5, %2\n\t"
  937. "move\t$6, %3\n\t"
  938. __MODULE_JAL(__strncpy_from_user_nocheck_asm)
  939. "move\t%0, $2"
  940. : "=r" (res)
  941. : "r" (__to), "r" (__from), "r" (__len)
  942. : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
  943. return res;
  944. }
  945. /*
  946. * strncpy_from_user: - Copy a NUL terminated string from userspace.
  947. * @dst: Destination address, in kernel space. This buffer must be at
  948. * least @count bytes long.
  949. * @src: Source address, in user space.
  950. * @count: Maximum number of bytes to copy, including the trailing NUL.
  951. *
  952. * Copies a NUL-terminated string from userspace to kernel space.
  953. *
  954. * On success, returns the length of the string (not including the trailing
  955. * NUL).
  956. *
  957. * If access to userspace fails, returns -EFAULT (some data may have been
  958. * copied).
  959. *
  960. * If @count is smaller than the length of the string, copies @count bytes
  961. * and returns @count.
  962. */
  963. static inline long
  964. strncpy_from_user(char *__to, const char __user *__from, long __len)
  965. {
  966. long res;
  967. might_fault();
  968. __asm__ __volatile__(
  969. "move\t$4, %1\n\t"
  970. "move\t$5, %2\n\t"
  971. "move\t$6, %3\n\t"
  972. __MODULE_JAL(__strncpy_from_user_asm)
  973. "move\t%0, $2"
  974. : "=r" (res)
  975. : "r" (__to), "r" (__from), "r" (__len)
  976. : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
  977. return res;
  978. }
  979. /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
  980. static inline long __strlen_user(const char __user *s)
  981. {
  982. long res;
  983. might_fault();
  984. __asm__ __volatile__(
  985. "move\t$4, %1\n\t"
  986. __MODULE_JAL(__strlen_user_nocheck_asm)
  987. "move\t%0, $2"
  988. : "=r" (res)
  989. : "r" (s)
  990. : "$2", "$4", __UA_t0, "$31");
  991. return res;
  992. }
  993. /*
  994. * strlen_user: - Get the size of a string in user space.
  995. * @str: The string to measure.
  996. *
  997. * Context: User context only. This function may sleep.
  998. *
  999. * Get the size of a NUL-terminated string in user space.
  1000. *
  1001. * Returns the size of the string INCLUDING the terminating NUL.
  1002. * On exception, returns 0.
  1003. *
  1004. * If there is a limit on the length of a valid string, you may wish to
  1005. * consider using strnlen_user() instead.
  1006. */
  1007. static inline long strlen_user(const char __user *s)
  1008. {
  1009. long res;
  1010. might_fault();
  1011. __asm__ __volatile__(
  1012. "move\t$4, %1\n\t"
  1013. __MODULE_JAL(__strlen_user_asm)
  1014. "move\t%0, $2"
  1015. : "=r" (res)
  1016. : "r" (s)
  1017. : "$2", "$4", __UA_t0, "$31");
  1018. return res;
  1019. }
  1020. /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
  1021. static inline long __strnlen_user(const char __user *s, long n)
  1022. {
  1023. long res;
  1024. might_fault();
  1025. __asm__ __volatile__(
  1026. "move\t$4, %1\n\t"
  1027. "move\t$5, %2\n\t"
  1028. __MODULE_JAL(__strnlen_user_nocheck_asm)
  1029. "move\t%0, $2"
  1030. : "=r" (res)
  1031. : "r" (s), "r" (n)
  1032. : "$2", "$4", "$5", __UA_t0, "$31");
  1033. return res;
  1034. }
  1035. /*
  1036. * strlen_user: - Get the size of a string in user space.
  1037. * @str: The string to measure.
  1038. *
  1039. * Context: User context only. This function may sleep.
  1040. *
  1041. * Get the size of a NUL-terminated string in user space.
  1042. *
  1043. * Returns the size of the string INCLUDING the terminating NUL.
  1044. * On exception, returns 0.
  1045. *
  1046. * If there is a limit on the length of a valid string, you may wish to
  1047. * consider using strnlen_user() instead.
  1048. */
  1049. static inline long strnlen_user(const char __user *s, long n)
  1050. {
  1051. long res;
  1052. might_fault();
  1053. __asm__ __volatile__(
  1054. "move\t$4, %1\n\t"
  1055. "move\t$5, %2\n\t"
  1056. __MODULE_JAL(__strnlen_user_asm)
  1057. "move\t%0, $2"
  1058. : "=r" (res)
  1059. : "r" (s), "r" (n)
  1060. : "$2", "$4", "$5", __UA_t0, "$31");
  1061. return res;
  1062. }
  1063. struct exception_table_entry
  1064. {
  1065. unsigned long insn;
  1066. unsigned long nextinsn;
  1067. };
  1068. extern int fixup_exception(struct pt_regs *regs);
  1069. #endif /* _ASM_UACCESS_H */