uaccess.h 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
  7. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8. * Copyright (C) 2007 Maciej W. Rozycki
  9. */
  10. #ifndef _ASM_UACCESS_H
  11. #define _ASM_UACCESS_H
  12. #include <linux/kernel.h>
  13. #include <linux/errno.h>
  14. #include <linux/thread_info.h>
  15. /*
  16. * The fs value determines whether argument validity checking should be
  17. * performed or not. If get_fs() == USER_DS, checking is performed, with
  18. * get_fs() == KERNEL_DS, checking is bypassed.
  19. *
  20. * For historical reasons, these macros are grossly misnamed.
  21. */
  22. #ifdef CONFIG_32BIT
  23. #define __UA_LIMIT 0x80000000UL
  24. #define __UA_ADDR ".word"
  25. #define __UA_LA "la"
  26. #define __UA_ADDU "addu"
  27. #define __UA_t0 "$8"
  28. #define __UA_t1 "$9"
  29. #endif /* CONFIG_32BIT */
  30. #ifdef CONFIG_64BIT
  31. #define __UA_LIMIT (- TASK_SIZE)
  32. #define __UA_ADDR ".dword"
  33. #define __UA_LA "dla"
  34. #define __UA_ADDU "daddu"
  35. #define __UA_t0 "$12"
  36. #define __UA_t1 "$13"
  37. #endif /* CONFIG_64BIT */
  38. /*
  39. * USER_DS is a bitmask that has the bits set that may not be set in a valid
  40. * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but
  41. * the arithmetic we're doing only works if the limit is a power of two, so
  42. * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid
  43. * address in this range it's the process's problem, not ours :-)
  44. */
  45. #define KERNEL_DS ((mm_segment_t) { 0UL })
  46. #define USER_DS ((mm_segment_t) { __UA_LIMIT })
  47. #define VERIFY_READ 0
  48. #define VERIFY_WRITE 1
  49. #define get_ds() (KERNEL_DS)
  50. #define get_fs() (current_thread_info()->addr_limit)
  51. #define set_fs(x) (current_thread_info()->addr_limit = (x))
  52. #define segment_eq(a, b) ((a).seg == (b).seg)
  53. /*
  54. * Is a address valid? This does a straighforward calculation rather
  55. * than tests.
  56. *
  57. * Address valid if:
  58. * - "addr" doesn't have any high-bits set
  59. * - AND "size" doesn't have any high-bits set
  60. * - AND "addr+size" doesn't have any high-bits set
  61. * - OR we are in kernel mode.
  62. *
  63. * __ua_size() is a trick to avoid runtime checking of positive constant
  64. * sizes; for those we already know at compile time that the size is ok.
  65. */
  66. #define __ua_size(size) \
  67. ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
  68. /*
  69. * access_ok: - Checks if a user space pointer is valid
  70. * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
  71. * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
  72. * to write to a block, it is always safe to read from it.
  73. * @addr: User space pointer to start of block to check
  74. * @size: Size of block to check
  75. *
  76. * Context: User context only. This function may sleep.
  77. *
  78. * Checks if a pointer to a block of memory in user space is valid.
  79. *
  80. * Returns true (nonzero) if the memory block may be valid, false (zero)
  81. * if it is definitely invalid.
  82. *
  83. * Note that, depending on architecture, this function probably just
  84. * checks that the pointer is in the user space range - after calling
  85. * this function, memory access functions may still return -EFAULT.
  86. */
  87. #define __access_mask get_fs().seg
  88. #define __access_ok(addr, size, mask) \
  89. ({ \
  90. unsigned long __addr = (unsigned long) (addr); \
  91. unsigned long __size = size; \
  92. unsigned long __mask = mask; \
  93. unsigned long __ok; \
  94. \
  95. __chk_user_ptr(addr); \
  96. __ok = (signed long)(__mask & (__addr | (__addr + __size) | \
  97. __ua_size(__size))); \
  98. __ok == 0; \
  99. })
  100. #define access_ok(type, addr, size) \
  101. likely(__access_ok((addr), (size), __access_mask))
  102. /*
  103. * put_user: - Write a simple value into user space.
  104. * @x: Value to copy to user space.
  105. * @ptr: Destination address, in user space.
  106. *
  107. * Context: User context only. This function may sleep.
  108. *
  109. * This macro copies a single simple value from kernel space to user
  110. * space. It supports simple types like char and int, but not larger
  111. * data types like structures or arrays.
  112. *
  113. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  114. * to the result of dereferencing @ptr.
  115. *
  116. * Returns zero on success, or -EFAULT on error.
  117. */
  118. #define put_user(x,ptr) \
  119. __put_user_check((x), (ptr), sizeof(*(ptr)))
  120. /*
  121. * get_user: - Get a simple variable from user space.
  122. * @x: Variable to store result.
  123. * @ptr: Source address, in user space.
  124. *
  125. * Context: User context only. This function may sleep.
  126. *
  127. * This macro copies a single simple variable from user space to kernel
  128. * space. It supports simple types like char and int, but not larger
  129. * data types like structures or arrays.
  130. *
  131. * @ptr must have pointer-to-simple-variable type, and the result of
  132. * dereferencing @ptr must be assignable to @x without a cast.
  133. *
  134. * Returns zero on success, or -EFAULT on error.
  135. * On error, the variable @x is set to zero.
  136. */
  137. #define get_user(x,ptr) \
  138. __get_user_check((x), (ptr), sizeof(*(ptr)))
  139. /*
  140. * __put_user: - Write a simple value into user space, with less checking.
  141. * @x: Value to copy to user space.
  142. * @ptr: Destination address, in user space.
  143. *
  144. * Context: User context only. This function may sleep.
  145. *
  146. * This macro copies a single simple value from kernel space to user
  147. * space. It supports simple types like char and int, but not larger
  148. * data types like structures or arrays.
  149. *
  150. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  151. * to the result of dereferencing @ptr.
  152. *
  153. * Caller must check the pointer with access_ok() before calling this
  154. * function.
  155. *
  156. * Returns zero on success, or -EFAULT on error.
  157. */
  158. #define __put_user(x,ptr) \
  159. __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
  160. /*
  161. * __get_user: - Get a simple variable from user space, with less checking.
  162. * @x: Variable to store result.
  163. * @ptr: Source address, in user space.
  164. *
  165. * Context: User context only. This function may sleep.
  166. *
  167. * This macro copies a single simple variable from user space to kernel
  168. * space. It supports simple types like char and int, but not larger
  169. * data types like structures or arrays.
  170. *
  171. * @ptr must have pointer-to-simple-variable type, and the result of
  172. * dereferencing @ptr must be assignable to @x without a cast.
  173. *
  174. * Caller must check the pointer with access_ok() before calling this
  175. * function.
  176. *
  177. * Returns zero on success, or -EFAULT on error.
  178. * On error, the variable @x is set to zero.
  179. */
  180. #define __get_user(x,ptr) \
  181. __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  182. struct __large_struct { unsigned long buf[100]; };
  183. #define __m(x) (*(struct __large_struct __user *)(x))
  184. /*
  185. * Yuck. We need two variants, one for 64bit operation and one
  186. * for 32 bit mode and old iron.
  187. */
  188. #ifdef CONFIG_32BIT
  189. #define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr)
  190. #endif
  191. #ifdef CONFIG_64BIT
  192. #define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr)
  193. #endif
  194. extern void __get_user_unknown(void);
  195. #define __get_user_common(val, size, ptr) \
  196. do { \
  197. switch (size) { \
  198. case 1: __get_user_asm(val, "lb", ptr); break; \
  199. case 2: __get_user_asm(val, "lh", ptr); break; \
  200. case 4: __get_user_asm(val, "lw", ptr); break; \
  201. case 8: __GET_USER_DW(val, ptr); break; \
  202. default: __get_user_unknown(); break; \
  203. } \
  204. } while (0)
  205. #define __get_user_nocheck(x, ptr, size) \
  206. ({ \
  207. int __gu_err; \
  208. \
  209. __chk_user_ptr(ptr); \
  210. __get_user_common((x), size, ptr); \
  211. __gu_err; \
  212. })
  213. #define __get_user_check(x, ptr, size) \
  214. ({ \
  215. int __gu_err = -EFAULT; \
  216. const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
  217. \
  218. might_fault(); \
  219. if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
  220. __get_user_common((x), size, __gu_ptr); \
  221. \
  222. __gu_err; \
  223. })
  224. #define __get_user_asm(val, insn, addr) \
  225. { \
  226. long __gu_tmp; \
  227. \
  228. __asm__ __volatile__( \
  229. "1: " insn " %1, %3 \n" \
  230. "2: \n" \
  231. " .section .fixup,\"ax\" \n" \
  232. "3: li %0, %4 \n" \
  233. " j 2b \n" \
  234. " .previous \n" \
  235. " .section __ex_table,\"a\" \n" \
  236. " "__UA_ADDR "\t1b, 3b \n" \
  237. " .previous \n" \
  238. : "=r" (__gu_err), "=r" (__gu_tmp) \
  239. : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
  240. \
  241. (val) = (__typeof__(*(addr))) __gu_tmp; \
  242. }
  243. /*
  244. * Get a long long 64 using 32 bit registers.
  245. */
  246. #define __get_user_asm_ll32(val, addr) \
  247. { \
  248. union { \
  249. unsigned long long l; \
  250. __typeof__(*(addr)) t; \
  251. } __gu_tmp; \
  252. \
  253. __asm__ __volatile__( \
  254. "1: lw %1, (%3) \n" \
  255. "2: lw %D1, 4(%3) \n" \
  256. "3: .section .fixup,\"ax\" \n" \
  257. "4: li %0, %4 \n" \
  258. " move %1, $0 \n" \
  259. " move %D1, $0 \n" \
  260. " j 3b \n" \
  261. " .previous \n" \
  262. " .section __ex_table,\"a\" \n" \
  263. " " __UA_ADDR " 1b, 4b \n" \
  264. " " __UA_ADDR " 2b, 4b \n" \
  265. " .previous \n" \
  266. : "=r" (__gu_err), "=&r" (__gu_tmp.l) \
  267. : "0" (0), "r" (addr), "i" (-EFAULT)); \
  268. \
  269. (val) = __gu_tmp.t; \
  270. }
  271. /*
  272. * Yuck. We need two variants, one for 64bit operation and one
  273. * for 32 bit mode and old iron.
  274. */
  275. #ifdef CONFIG_32BIT
  276. #define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr)
  277. #endif
  278. #ifdef CONFIG_64BIT
  279. #define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr)
  280. #endif
  281. #define __put_user_nocheck(x, ptr, size) \
  282. ({ \
  283. __typeof__(*(ptr)) __pu_val; \
  284. int __pu_err = 0; \
  285. \
  286. __chk_user_ptr(ptr); \
  287. __pu_val = (x); \
  288. switch (size) { \
  289. case 1: __put_user_asm("sb", ptr); break; \
  290. case 2: __put_user_asm("sh", ptr); break; \
  291. case 4: __put_user_asm("sw", ptr); break; \
  292. case 8: __PUT_USER_DW(ptr); break; \
  293. default: __put_user_unknown(); break; \
  294. } \
  295. __pu_err; \
  296. })
  297. #define __put_user_check(x, ptr, size) \
  298. ({ \
  299. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  300. __typeof__(*(ptr)) __pu_val = (x); \
  301. int __pu_err = -EFAULT; \
  302. \
  303. might_fault(); \
  304. if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
  305. switch (size) { \
  306. case 1: __put_user_asm("sb", __pu_addr); break; \
  307. case 2: __put_user_asm("sh", __pu_addr); break; \
  308. case 4: __put_user_asm("sw", __pu_addr); break; \
  309. case 8: __PUT_USER_DW(__pu_addr); break; \
  310. default: __put_user_unknown(); break; \
  311. } \
  312. } \
  313. __pu_err; \
  314. })
  315. #define __put_user_asm(insn, ptr) \
  316. { \
  317. __asm__ __volatile__( \
  318. "1: " insn " %z2, %3 # __put_user_asm\n" \
  319. "2: \n" \
  320. " .section .fixup,\"ax\" \n" \
  321. "3: li %0, %4 \n" \
  322. " j 2b \n" \
  323. " .previous \n" \
  324. " .section __ex_table,\"a\" \n" \
  325. " " __UA_ADDR " 1b, 3b \n" \
  326. " .previous \n" \
  327. : "=r" (__pu_err) \
  328. : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
  329. "i" (-EFAULT)); \
  330. }
  331. #define __put_user_asm_ll32(ptr) \
  332. { \
  333. __asm__ __volatile__( \
  334. "1: sw %2, (%3) # __put_user_asm_ll32 \n" \
  335. "2: sw %D2, 4(%3) \n" \
  336. "3: \n" \
  337. " .section .fixup,\"ax\" \n" \
  338. "4: li %0, %4 \n" \
  339. " j 3b \n" \
  340. " .previous \n" \
  341. " .section __ex_table,\"a\" \n" \
  342. " " __UA_ADDR " 1b, 4b \n" \
  343. " " __UA_ADDR " 2b, 4b \n" \
  344. " .previous" \
  345. : "=r" (__pu_err) \
  346. : "0" (0), "r" (__pu_val), "r" (ptr), \
  347. "i" (-EFAULT)); \
  348. }
  349. extern void __put_user_unknown(void);
  350. /*
  351. * put_user_unaligned: - Write a simple value into user space.
  352. * @x: Value to copy to user space.
  353. * @ptr: Destination address, in user space.
  354. *
  355. * Context: User context only. This function may sleep.
  356. *
  357. * This macro copies a single simple value from kernel space to user
  358. * space. It supports simple types like char and int, but not larger
  359. * data types like structures or arrays.
  360. *
  361. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  362. * to the result of dereferencing @ptr.
  363. *
  364. * Returns zero on success, or -EFAULT on error.
  365. */
  366. #define put_user_unaligned(x,ptr) \
  367. __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
  368. /*
  369. * get_user_unaligned: - Get a simple variable from user space.
  370. * @x: Variable to store result.
  371. * @ptr: Source address, in user space.
  372. *
  373. * Context: User context only. This function may sleep.
  374. *
  375. * This macro copies a single simple variable from user space to kernel
  376. * space. It supports simple types like char and int, but not larger
  377. * data types like structures or arrays.
  378. *
  379. * @ptr must have pointer-to-simple-variable type, and the result of
  380. * dereferencing @ptr must be assignable to @x without a cast.
  381. *
  382. * Returns zero on success, or -EFAULT on error.
  383. * On error, the variable @x is set to zero.
  384. */
  385. #define get_user_unaligned(x,ptr) \
  386. __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
  387. /*
  388. * __put_user_unaligned: - Write a simple value into user space, with less checking.
  389. * @x: Value to copy to user space.
  390. * @ptr: Destination address, in user space.
  391. *
  392. * Context: User context only. This function may sleep.
  393. *
  394. * This macro copies a single simple value from kernel space to user
  395. * space. It supports simple types like char and int, but not larger
  396. * data types like structures or arrays.
  397. *
  398. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  399. * to the result of dereferencing @ptr.
  400. *
  401. * Caller must check the pointer with access_ok() before calling this
  402. * function.
  403. *
  404. * Returns zero on success, or -EFAULT on error.
  405. */
  406. #define __put_user_unaligned(x,ptr) \
  407. __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
  408. /*
  409. * __get_user_unaligned: - Get a simple variable from user space, with less checking.
  410. * @x: Variable to store result.
  411. * @ptr: Source address, in user space.
  412. *
  413. * Context: User context only. This function may sleep.
  414. *
  415. * This macro copies a single simple variable from user space to kernel
  416. * space. It supports simple types like char and int, but not larger
  417. * data types like structures or arrays.
  418. *
  419. * @ptr must have pointer-to-simple-variable type, and the result of
  420. * dereferencing @ptr must be assignable to @x without a cast.
  421. *
  422. * Caller must check the pointer with access_ok() before calling this
  423. * function.
  424. *
  425. * Returns zero on success, or -EFAULT on error.
  426. * On error, the variable @x is set to zero.
  427. */
  428. #define __get_user_unaligned(x,ptr) \
  429. __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
  430. /*
  431. * Yuck. We need two variants, one for 64bit operation and one
  432. * for 32 bit mode and old iron.
  433. */
  434. #ifdef CONFIG_32BIT
  435. #define __GET_USER_UNALIGNED_DW(val, ptr) \
  436. __get_user_unaligned_asm_ll32(val, ptr)
  437. #endif
  438. #ifdef CONFIG_64BIT
  439. #define __GET_USER_UNALIGNED_DW(val, ptr) \
  440. __get_user_unaligned_asm(val, "uld", ptr)
  441. #endif
  442. extern void __get_user_unaligned_unknown(void);
  443. #define __get_user_unaligned_common(val, size, ptr) \
  444. do { \
  445. switch (size) { \
  446. case 1: __get_user_asm(val, "lb", ptr); break; \
  447. case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \
  448. case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \
  449. case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
  450. default: __get_user_unaligned_unknown(); break; \
  451. } \
  452. } while (0)
  453. #define __get_user_unaligned_nocheck(x,ptr,size) \
  454. ({ \
  455. int __gu_err; \
  456. \
  457. __get_user_unaligned_common((x), size, ptr); \
  458. __gu_err; \
  459. })
  460. #define __get_user_unaligned_check(x,ptr,size) \
  461. ({ \
  462. int __gu_err = -EFAULT; \
  463. const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
  464. \
  465. if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
  466. __get_user_unaligned_common((x), size, __gu_ptr); \
  467. \
  468. __gu_err; \
  469. })
  470. #define __get_user_unaligned_asm(val, insn, addr) \
  471. { \
  472. long __gu_tmp; \
  473. \
  474. __asm__ __volatile__( \
  475. "1: " insn " %1, %3 \n" \
  476. "2: \n" \
  477. " .section .fixup,\"ax\" \n" \
  478. "3: li %0, %4 \n" \
  479. " j 2b \n" \
  480. " .previous \n" \
  481. " .section __ex_table,\"a\" \n" \
  482. " "__UA_ADDR "\t1b, 3b \n" \
  483. " "__UA_ADDR "\t1b + 4, 3b \n" \
  484. " .previous \n" \
  485. : "=r" (__gu_err), "=r" (__gu_tmp) \
  486. : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
  487. \
  488. (val) = (__typeof__(*(addr))) __gu_tmp; \
  489. }
  490. /*
  491. * Get a long long 64 using 32 bit registers.
  492. */
  493. #define __get_user_unaligned_asm_ll32(val, addr) \
  494. { \
  495. unsigned long long __gu_tmp; \
  496. \
  497. __asm__ __volatile__( \
  498. "1: ulw %1, (%3) \n" \
  499. "2: ulw %D1, 4(%3) \n" \
  500. " move %0, $0 \n" \
  501. "3: .section .fixup,\"ax\" \n" \
  502. "4: li %0, %4 \n" \
  503. " move %1, $0 \n" \
  504. " move %D1, $0 \n" \
  505. " j 3b \n" \
  506. " .previous \n" \
  507. " .section __ex_table,\"a\" \n" \
  508. " " __UA_ADDR " 1b, 4b \n" \
  509. " " __UA_ADDR " 1b + 4, 4b \n" \
  510. " " __UA_ADDR " 2b, 4b \n" \
  511. " " __UA_ADDR " 2b + 4, 4b \n" \
  512. " .previous \n" \
  513. : "=r" (__gu_err), "=&r" (__gu_tmp) \
  514. : "0" (0), "r" (addr), "i" (-EFAULT)); \
  515. (val) = (__typeof__(*(addr))) __gu_tmp; \
  516. }
  517. /*
  518. * Yuck. We need two variants, one for 64bit operation and one
  519. * for 32 bit mode and old iron.
  520. */
  521. #ifdef CONFIG_32BIT
  522. #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
  523. #endif
  524. #ifdef CONFIG_64BIT
  525. #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
  526. #endif
  527. #define __put_user_unaligned_nocheck(x,ptr,size) \
  528. ({ \
  529. __typeof__(*(ptr)) __pu_val; \
  530. int __pu_err = 0; \
  531. \
  532. __pu_val = (x); \
  533. switch (size) { \
  534. case 1: __put_user_asm("sb", ptr); break; \
  535. case 2: __put_user_unaligned_asm("ush", ptr); break; \
  536. case 4: __put_user_unaligned_asm("usw", ptr); break; \
  537. case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
  538. default: __put_user_unaligned_unknown(); break; \
  539. } \
  540. __pu_err; \
  541. })
  542. #define __put_user_unaligned_check(x,ptr,size) \
  543. ({ \
  544. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  545. __typeof__(*(ptr)) __pu_val = (x); \
  546. int __pu_err = -EFAULT; \
  547. \
  548. if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
  549. switch (size) { \
  550. case 1: __put_user_asm("sb", __pu_addr); break; \
  551. case 2: __put_user_unaligned_asm("ush", __pu_addr); break; \
  552. case 4: __put_user_unaligned_asm("usw", __pu_addr); break; \
  553. case 8: __PUT_USER_UNALGINED_DW(__pu_addr); break; \
  554. default: __put_user_unaligned_unknown(); break; \
  555. } \
  556. } \
  557. __pu_err; \
  558. })
  559. #define __put_user_unaligned_asm(insn, ptr) \
  560. { \
  561. __asm__ __volatile__( \
  562. "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \
  563. "2: \n" \
  564. " .section .fixup,\"ax\" \n" \
  565. "3: li %0, %4 \n" \
  566. " j 2b \n" \
  567. " .previous \n" \
  568. " .section __ex_table,\"a\" \n" \
  569. " " __UA_ADDR " 1b, 3b \n" \
  570. " .previous \n" \
  571. : "=r" (__pu_err) \
  572. : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
  573. "i" (-EFAULT)); \
  574. }
  575. #define __put_user_unaligned_asm_ll32(ptr) \
  576. { \
  577. __asm__ __volatile__( \
  578. "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
  579. "2: sw %D2, 4(%3) \n" \
  580. "3: \n" \
  581. " .section .fixup,\"ax\" \n" \
  582. "4: li %0, %4 \n" \
  583. " j 3b \n" \
  584. " .previous \n" \
  585. " .section __ex_table,\"a\" \n" \
  586. " " __UA_ADDR " 1b, 4b \n" \
  587. " " __UA_ADDR " 1b + 4, 4b \n" \
  588. " " __UA_ADDR " 2b, 4b \n" \
  589. " " __UA_ADDR " 2b + 4, 4b \n" \
  590. " .previous" \
  591. : "=r" (__pu_err) \
  592. : "0" (0), "r" (__pu_val), "r" (ptr), \
  593. "i" (-EFAULT)); \
  594. }
  595. extern void __put_user_unaligned_unknown(void);
  596. /*
  597. * We're generating jump to subroutines which will be outside the range of
  598. * jump instructions
  599. */
  600. #ifdef MODULE
  601. #define __MODULE_JAL(destination) \
  602. ".set\tnoat\n\t" \
  603. __UA_LA "\t$1, " #destination "\n\t" \
  604. "jalr\t$1\n\t" \
  605. ".set\tat\n\t"
  606. #else
  607. #define __MODULE_JAL(destination) \
  608. "jal\t" #destination "\n\t"
  609. #endif
  610. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  611. #define DADDI_SCRATCH "$0"
  612. #else
  613. #define DADDI_SCRATCH "$3"
  614. #endif
  615. extern size_t __copy_user(void *__to, const void *__from, size_t __n);
  616. #define __invoke_copy_to_user(to, from, n) \
  617. ({ \
  618. register void __user *__cu_to_r __asm__("$4"); \
  619. register const void *__cu_from_r __asm__("$5"); \
  620. register long __cu_len_r __asm__("$6"); \
  621. \
  622. __cu_to_r = (to); \
  623. __cu_from_r = (from); \
  624. __cu_len_r = (n); \
  625. __asm__ __volatile__( \
  626. __MODULE_JAL(__copy_user) \
  627. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  628. : \
  629. : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
  630. DADDI_SCRATCH, "memory"); \
  631. __cu_len_r; \
  632. })
  633. /*
  634. * __copy_to_user: - Copy a block of data into user space, with less checking.
  635. * @to: Destination address, in user space.
  636. * @from: Source address, in kernel space.
  637. * @n: Number of bytes to copy.
  638. *
  639. * Context: User context only. This function may sleep.
  640. *
  641. * Copy data from kernel space to user space. Caller must check
  642. * the specified block with access_ok() before calling this function.
  643. *
  644. * Returns number of bytes that could not be copied.
  645. * On success, this will be zero.
  646. */
  647. #define __copy_to_user(to, from, n) \
  648. ({ \
  649. void __user *__cu_to; \
  650. const void *__cu_from; \
  651. long __cu_len; \
  652. \
  653. __cu_to = (to); \
  654. __cu_from = (from); \
  655. __cu_len = (n); \
  656. might_fault(); \
  657. __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
  658. __cu_len; \
  659. })
  660. extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
  661. #define __copy_to_user_inatomic(to, from, n) \
  662. ({ \
  663. void __user *__cu_to; \
  664. const void *__cu_from; \
  665. long __cu_len; \
  666. \
  667. __cu_to = (to); \
  668. __cu_from = (from); \
  669. __cu_len = (n); \
  670. __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
  671. __cu_len; \
  672. })
  673. #define __copy_from_user_inatomic(to, from, n) \
  674. ({ \
  675. void *__cu_to; \
  676. const void __user *__cu_from; \
  677. long __cu_len; \
  678. \
  679. __cu_to = (to); \
  680. __cu_from = (from); \
  681. __cu_len = (n); \
  682. __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
  683. __cu_len); \
  684. __cu_len; \
  685. })
  686. /*
  687. * copy_to_user: - Copy a block of data into user space.
  688. * @to: Destination address, in user space.
  689. * @from: Source address, in kernel space.
  690. * @n: Number of bytes to copy.
  691. *
  692. * Context: User context only. This function may sleep.
  693. *
  694. * Copy data from kernel space to user space.
  695. *
  696. * Returns number of bytes that could not be copied.
  697. * On success, this will be zero.
  698. */
  699. #define copy_to_user(to, from, n) \
  700. ({ \
  701. void __user *__cu_to; \
  702. const void *__cu_from; \
  703. long __cu_len; \
  704. \
  705. __cu_to = (to); \
  706. __cu_from = (from); \
  707. __cu_len = (n); \
  708. if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
  709. might_fault(); \
  710. __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
  711. __cu_len); \
  712. } \
  713. __cu_len; \
  714. })
  715. #define __invoke_copy_from_user(to, from, n) \
  716. ({ \
  717. register void *__cu_to_r __asm__("$4"); \
  718. register const void __user *__cu_from_r __asm__("$5"); \
  719. register long __cu_len_r __asm__("$6"); \
  720. \
  721. __cu_to_r = (to); \
  722. __cu_from_r = (from); \
  723. __cu_len_r = (n); \
  724. __asm__ __volatile__( \
  725. ".set\tnoreorder\n\t" \
  726. __MODULE_JAL(__copy_user) \
  727. ".set\tnoat\n\t" \
  728. __UA_ADDU "\t$1, %1, %2\n\t" \
  729. ".set\tat\n\t" \
  730. ".set\treorder" \
  731. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  732. : \
  733. : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
  734. DADDI_SCRATCH, "memory"); \
  735. __cu_len_r; \
  736. })
  737. #define __invoke_copy_from_user_inatomic(to, from, n) \
  738. ({ \
  739. register void *__cu_to_r __asm__("$4"); \
  740. register const void __user *__cu_from_r __asm__("$5"); \
  741. register long __cu_len_r __asm__("$6"); \
  742. \
  743. __cu_to_r = (to); \
  744. __cu_from_r = (from); \
  745. __cu_len_r = (n); \
  746. __asm__ __volatile__( \
  747. ".set\tnoreorder\n\t" \
  748. __MODULE_JAL(__copy_user_inatomic) \
  749. ".set\tnoat\n\t" \
  750. __UA_ADDU "\t$1, %1, %2\n\t" \
  751. ".set\tat\n\t" \
  752. ".set\treorder" \
  753. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  754. : \
  755. : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
  756. DADDI_SCRATCH, "memory"); \
  757. __cu_len_r; \
  758. })
  759. /*
  760. * __copy_from_user: - Copy a block of data from user space, with less checking.
  761. * @to: Destination address, in kernel space.
  762. * @from: Source address, in user space.
  763. * @n: Number of bytes to copy.
  764. *
  765. * Context: User context only. This function may sleep.
  766. *
  767. * Copy data from user space to kernel space. Caller must check
  768. * the specified block with access_ok() before calling this function.
  769. *
  770. * Returns number of bytes that could not be copied.
  771. * On success, this will be zero.
  772. *
  773. * If some data could not be copied, this function will pad the copied
  774. * data to the requested size using zero bytes.
  775. */
  776. #define __copy_from_user(to, from, n) \
  777. ({ \
  778. void *__cu_to; \
  779. const void __user *__cu_from; \
  780. long __cu_len; \
  781. \
  782. __cu_to = (to); \
  783. __cu_from = (from); \
  784. __cu_len = (n); \
  785. might_fault(); \
  786. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  787. __cu_len); \
  788. __cu_len; \
  789. })
  790. /*
  791. * copy_from_user: - Copy a block of data from user space.
  792. * @to: Destination address, in kernel space.
  793. * @from: Source address, in user space.
  794. * @n: Number of bytes to copy.
  795. *
  796. * Context: User context only. This function may sleep.
  797. *
  798. * Copy data from user space to kernel space.
  799. *
  800. * Returns number of bytes that could not be copied.
  801. * On success, this will be zero.
  802. *
  803. * If some data could not be copied, this function will pad the copied
  804. * data to the requested size using zero bytes.
  805. */
  806. #define copy_from_user(to, from, n) \
  807. ({ \
  808. void *__cu_to; \
  809. const void __user *__cu_from; \
  810. long __cu_len; \
  811. \
  812. __cu_to = (to); \
  813. __cu_from = (from); \
  814. __cu_len = (n); \
  815. if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
  816. might_fault(); \
  817. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  818. __cu_len); \
  819. } \
  820. __cu_len; \
  821. })
  822. #define __copy_in_user(to, from, n) \
  823. ({ \
  824. void __user *__cu_to; \
  825. const void __user *__cu_from; \
  826. long __cu_len; \
  827. \
  828. __cu_to = (to); \
  829. __cu_from = (from); \
  830. __cu_len = (n); \
  831. might_fault(); \
  832. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  833. __cu_len); \
  834. __cu_len; \
  835. })
  836. #define copy_in_user(to, from, n) \
  837. ({ \
  838. void __user *__cu_to; \
  839. const void __user *__cu_from; \
  840. long __cu_len; \
  841. \
  842. __cu_to = (to); \
  843. __cu_from = (from); \
  844. __cu_len = (n); \
  845. if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \
  846. access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \
  847. might_fault(); \
  848. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  849. __cu_len); \
  850. } \
  851. __cu_len; \
  852. })
  853. /*
  854. * __clear_user: - Zero a block of memory in user space, with less checking.
  855. * @to: Destination address, in user space.
  856. * @n: Number of bytes to zero.
  857. *
  858. * Zero a block of memory in user space. Caller must check
  859. * the specified block with access_ok() before calling this function.
  860. *
  861. * Returns number of bytes that could not be cleared.
  862. * On success, this will be zero.
  863. */
  864. static inline __kernel_size_t
  865. __clear_user(void __user *addr, __kernel_size_t size)
  866. {
  867. __kernel_size_t res;
  868. might_fault();
  869. __asm__ __volatile__(
  870. "move\t$4, %1\n\t"
  871. "move\t$5, $0\n\t"
  872. "move\t$6, %2\n\t"
  873. __MODULE_JAL(__bzero)
  874. "move\t%0, $6"
  875. : "=r" (res)
  876. : "r" (addr), "r" (size)
  877. : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
  878. return res;
  879. }
  880. #define clear_user(addr,n) \
  881. ({ \
  882. void __user * __cl_addr = (addr); \
  883. unsigned long __cl_size = (n); \
  884. if (__cl_size && access_ok(VERIFY_WRITE, \
  885. __cl_addr, __cl_size)) \
  886. __cl_size = __clear_user(__cl_addr, __cl_size); \
  887. __cl_size; \
  888. })
  889. /*
  890. * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
  891. * @dst: Destination address, in kernel space. This buffer must be at
  892. * least @count bytes long.
  893. * @src: Source address, in user space.
  894. * @count: Maximum number of bytes to copy, including the trailing NUL.
  895. *
  896. * Copies a NUL-terminated string from userspace to kernel space.
  897. * Caller must check the specified block with access_ok() before calling
  898. * this function.
  899. *
  900. * On success, returns the length of the string (not including the trailing
  901. * NUL).
  902. *
  903. * If access to userspace fails, returns -EFAULT (some data may have been
  904. * copied).
  905. *
  906. * If @count is smaller than the length of the string, copies @count bytes
  907. * and returns @count.
  908. */
  909. static inline long
  910. __strncpy_from_user(char *__to, const char __user *__from, long __len)
  911. {
  912. long res;
  913. might_fault();
  914. __asm__ __volatile__(
  915. "move\t$4, %1\n\t"
  916. "move\t$5, %2\n\t"
  917. "move\t$6, %3\n\t"
  918. __MODULE_JAL(__strncpy_from_user_nocheck_asm)
  919. "move\t%0, $2"
  920. : "=r" (res)
  921. : "r" (__to), "r" (__from), "r" (__len)
  922. : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
  923. return res;
  924. }
  925. /*
  926. * strncpy_from_user: - Copy a NUL terminated string from userspace.
  927. * @dst: Destination address, in kernel space. This buffer must be at
  928. * least @count bytes long.
  929. * @src: Source address, in user space.
  930. * @count: Maximum number of bytes to copy, including the trailing NUL.
  931. *
  932. * Copies a NUL-terminated string from userspace to kernel space.
  933. *
  934. * On success, returns the length of the string (not including the trailing
  935. * NUL).
  936. *
  937. * If access to userspace fails, returns -EFAULT (some data may have been
  938. * copied).
  939. *
  940. * If @count is smaller than the length of the string, copies @count bytes
  941. * and returns @count.
  942. */
  943. static inline long
  944. strncpy_from_user(char *__to, const char __user *__from, long __len)
  945. {
  946. long res;
  947. might_fault();
  948. __asm__ __volatile__(
  949. "move\t$4, %1\n\t"
  950. "move\t$5, %2\n\t"
  951. "move\t$6, %3\n\t"
  952. __MODULE_JAL(__strncpy_from_user_asm)
  953. "move\t%0, $2"
  954. : "=r" (res)
  955. : "r" (__to), "r" (__from), "r" (__len)
  956. : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
  957. return res;
  958. }
  959. /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
  960. static inline long __strlen_user(const char __user *s)
  961. {
  962. long res;
  963. might_fault();
  964. __asm__ __volatile__(
  965. "move\t$4, %1\n\t"
  966. __MODULE_JAL(__strlen_user_nocheck_asm)
  967. "move\t%0, $2"
  968. : "=r" (res)
  969. : "r" (s)
  970. : "$2", "$4", __UA_t0, "$31");
  971. return res;
  972. }
  973. /*
  974. * strlen_user: - Get the size of a string in user space.
  975. * @str: The string to measure.
  976. *
  977. * Context: User context only. This function may sleep.
  978. *
  979. * Get the size of a NUL-terminated string in user space.
  980. *
  981. * Returns the size of the string INCLUDING the terminating NUL.
  982. * On exception, returns 0.
  983. *
  984. * If there is a limit on the length of a valid string, you may wish to
  985. * consider using strnlen_user() instead.
  986. */
  987. static inline long strlen_user(const char __user *s)
  988. {
  989. long res;
  990. might_fault();
  991. __asm__ __volatile__(
  992. "move\t$4, %1\n\t"
  993. __MODULE_JAL(__strlen_user_asm)
  994. "move\t%0, $2"
  995. : "=r" (res)
  996. : "r" (s)
  997. : "$2", "$4", __UA_t0, "$31");
  998. return res;
  999. }
  1000. /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
  1001. static inline long __strnlen_user(const char __user *s, long n)
  1002. {
  1003. long res;
  1004. might_fault();
  1005. __asm__ __volatile__(
  1006. "move\t$4, %1\n\t"
  1007. "move\t$5, %2\n\t"
  1008. __MODULE_JAL(__strnlen_user_nocheck_asm)
  1009. "move\t%0, $2"
  1010. : "=r" (res)
  1011. : "r" (s), "r" (n)
  1012. : "$2", "$4", "$5", __UA_t0, "$31");
  1013. return res;
  1014. }
  1015. /*
  1016. * strlen_user: - Get the size of a string in user space.
  1017. * @str: The string to measure.
  1018. *
  1019. * Context: User context only. This function may sleep.
  1020. *
  1021. * Get the size of a NUL-terminated string in user space.
  1022. *
  1023. * Returns the size of the string INCLUDING the terminating NUL.
  1024. * On exception, returns 0.
  1025. *
  1026. * If there is a limit on the length of a valid string, you may wish to
  1027. * consider using strnlen_user() instead.
  1028. */
  1029. static inline long strnlen_user(const char __user *s, long n)
  1030. {
  1031. long res;
  1032. might_fault();
  1033. __asm__ __volatile__(
  1034. "move\t$4, %1\n\t"
  1035. "move\t$5, %2\n\t"
  1036. __MODULE_JAL(__strnlen_user_asm)
  1037. "move\t%0, $2"
  1038. : "=r" (res)
  1039. : "r" (s), "r" (n)
  1040. : "$2", "$4", "$5", __UA_t0, "$31");
  1041. return res;
  1042. }
  1043. struct exception_table_entry
  1044. {
  1045. unsigned long insn;
  1046. unsigned long nextinsn;
  1047. };
  1048. extern int fixup_exception(struct pt_regs *regs);
  1049. #endif /* _ASM_UACCESS_H */