uaccess.h 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
  7. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8. * Copyright (C) 2007 Maciej W. Rozycki
  9. */
  10. #ifndef _ASM_UACCESS_H
  11. #define _ASM_UACCESS_H
  12. #include <linux/kernel.h>
  13. #include <linux/errno.h>
  14. #include <linux/thread_info.h>
  15. /*
  16. * The fs value determines whether argument validity checking should be
  17. * performed or not. If get_fs() == USER_DS, checking is performed, with
  18. * get_fs() == KERNEL_DS, checking is bypassed.
  19. *
  20. * For historical reasons, these macros are grossly misnamed.
  21. */
  22. #ifdef CONFIG_32BIT
  23. #ifdef CONFIG_KVM_GUEST
  24. #define __UA_LIMIT 0x40000000UL
  25. #else
  26. #define __UA_LIMIT 0x80000000UL
  27. #endif
  28. #define __UA_ADDR ".word"
  29. #define __UA_LA "la"
  30. #define __UA_ADDU "addu"
  31. #define __UA_t0 "$8"
  32. #define __UA_t1 "$9"
  33. #endif /* CONFIG_32BIT */
  34. #ifdef CONFIG_64BIT
  35. extern u64 __ua_limit;
  36. #define __UA_LIMIT __ua_limit
  37. #define __UA_ADDR ".dword"
  38. #define __UA_LA "dla"
  39. #define __UA_ADDU "daddu"
  40. #define __UA_t0 "$12"
  41. #define __UA_t1 "$13"
  42. #endif /* CONFIG_64BIT */
  43. /*
  44. * USER_DS is a bitmask that has the bits set that may not be set in a valid
  45. * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but
  46. * the arithmetic we're doing only works if the limit is a power of two, so
  47. * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid
  48. * address in this range it's the process's problem, not ours :-)
  49. */
  50. #ifdef CONFIG_KVM_GUEST
  51. #define KERNEL_DS ((mm_segment_t) { 0x80000000UL })
  52. #define USER_DS ((mm_segment_t) { 0xC0000000UL })
  53. #else
  54. #define KERNEL_DS ((mm_segment_t) { 0UL })
  55. #define USER_DS ((mm_segment_t) { __UA_LIMIT })
  56. #endif
  57. #define VERIFY_READ 0
  58. #define VERIFY_WRITE 1
  59. #define get_ds() (KERNEL_DS)
  60. #define get_fs() (current_thread_info()->addr_limit)
  61. #define set_fs(x) (current_thread_info()->addr_limit = (x))
  62. #define segment_eq(a, b) ((a).seg == (b).seg)
  63. /*
  64. * Is a address valid? This does a straighforward calculation rather
  65. * than tests.
  66. *
  67. * Address valid if:
  68. * - "addr" doesn't have any high-bits set
  69. * - AND "size" doesn't have any high-bits set
  70. * - AND "addr+size" doesn't have any high-bits set
  71. * - OR we are in kernel mode.
  72. *
  73. * __ua_size() is a trick to avoid runtime checking of positive constant
  74. * sizes; for those we already know at compile time that the size is ok.
  75. */
  76. #define __ua_size(size) \
  77. ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
  78. /*
  79. * access_ok: - Checks if a user space pointer is valid
  80. * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
  81. * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
  82. * to write to a block, it is always safe to read from it.
  83. * @addr: User space pointer to start of block to check
  84. * @size: Size of block to check
  85. *
  86. * Context: User context only. This function may sleep.
  87. *
  88. * Checks if a pointer to a block of memory in user space is valid.
  89. *
  90. * Returns true (nonzero) if the memory block may be valid, false (zero)
  91. * if it is definitely invalid.
  92. *
  93. * Note that, depending on architecture, this function probably just
  94. * checks that the pointer is in the user space range - after calling
  95. * this function, memory access functions may still return -EFAULT.
  96. */
  97. #define __access_mask get_fs().seg
  98. #define __access_ok(addr, size, mask) \
  99. ({ \
  100. unsigned long __addr = (unsigned long) (addr); \
  101. unsigned long __size = size; \
  102. unsigned long __mask = mask; \
  103. unsigned long __ok; \
  104. \
  105. __chk_user_ptr(addr); \
  106. __ok = (signed long)(__mask & (__addr | (__addr + __size) | \
  107. __ua_size(__size))); \
  108. __ok == 0; \
  109. })
  110. #define access_ok(type, addr, size) \
  111. likely(__access_ok((addr), (size), __access_mask))
  112. /*
  113. * put_user: - Write a simple value into user space.
  114. * @x: Value to copy to user space.
  115. * @ptr: Destination address, in user space.
  116. *
  117. * Context: User context only. This function may sleep.
  118. *
  119. * This macro copies a single simple value from kernel space to user
  120. * space. It supports simple types like char and int, but not larger
  121. * data types like structures or arrays.
  122. *
  123. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  124. * to the result of dereferencing @ptr.
  125. *
  126. * Returns zero on success, or -EFAULT on error.
  127. */
  128. #define put_user(x,ptr) \
  129. __put_user_check((x), (ptr), sizeof(*(ptr)))
  130. /*
  131. * get_user: - Get a simple variable from user space.
  132. * @x: Variable to store result.
  133. * @ptr: Source address, in user space.
  134. *
  135. * Context: User context only. This function may sleep.
  136. *
  137. * This macro copies a single simple variable from user space to kernel
  138. * space. It supports simple types like char and int, but not larger
  139. * data types like structures or arrays.
  140. *
  141. * @ptr must have pointer-to-simple-variable type, and the result of
  142. * dereferencing @ptr must be assignable to @x without a cast.
  143. *
  144. * Returns zero on success, or -EFAULT on error.
  145. * On error, the variable @x is set to zero.
  146. */
  147. #define get_user(x,ptr) \
  148. __get_user_check((x), (ptr), sizeof(*(ptr)))
  149. /*
  150. * __put_user: - Write a simple value into user space, with less checking.
  151. * @x: Value to copy to user space.
  152. * @ptr: Destination address, in user space.
  153. *
  154. * Context: User context only. This function may sleep.
  155. *
  156. * This macro copies a single simple value from kernel space to user
  157. * space. It supports simple types like char and int, but not larger
  158. * data types like structures or arrays.
  159. *
  160. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  161. * to the result of dereferencing @ptr.
  162. *
  163. * Caller must check the pointer with access_ok() before calling this
  164. * function.
  165. *
  166. * Returns zero on success, or -EFAULT on error.
  167. */
  168. #define __put_user(x,ptr) \
  169. __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
  170. /*
  171. * __get_user: - Get a simple variable from user space, with less checking.
  172. * @x: Variable to store result.
  173. * @ptr: Source address, in user space.
  174. *
  175. * Context: User context only. This function may sleep.
  176. *
  177. * This macro copies a single simple variable from user space to kernel
  178. * space. It supports simple types like char and int, but not larger
  179. * data types like structures or arrays.
  180. *
  181. * @ptr must have pointer-to-simple-variable type, and the result of
  182. * dereferencing @ptr must be assignable to @x without a cast.
  183. *
  184. * Caller must check the pointer with access_ok() before calling this
  185. * function.
  186. *
  187. * Returns zero on success, or -EFAULT on error.
  188. * On error, the variable @x is set to zero.
  189. */
  190. #define __get_user(x,ptr) \
  191. __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  192. struct __large_struct { unsigned long buf[100]; };
  193. #define __m(x) (*(struct __large_struct __user *)(x))
  194. /*
  195. * Yuck. We need two variants, one for 64bit operation and one
  196. * for 32 bit mode and old iron.
  197. */
  198. #ifdef CONFIG_32BIT
  199. #define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr)
  200. #endif
  201. #ifdef CONFIG_64BIT
  202. #define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr)
  203. #endif
  204. extern void __get_user_unknown(void);
  205. #define __get_user_common(val, size, ptr) \
  206. do { \
  207. switch (size) { \
  208. case 1: __get_user_asm(val, "lb", ptr); break; \
  209. case 2: __get_user_asm(val, "lh", ptr); break; \
  210. case 4: __get_user_asm(val, "lw", ptr); break; \
  211. case 8: __GET_USER_DW(val, ptr); break; \
  212. default: __get_user_unknown(); break; \
  213. } \
  214. } while (0)
  215. #define __get_user_nocheck(x, ptr, size) \
  216. ({ \
  217. int __gu_err; \
  218. \
  219. __chk_user_ptr(ptr); \
  220. __get_user_common((x), size, ptr); \
  221. __gu_err; \
  222. })
  223. #define __get_user_check(x, ptr, size) \
  224. ({ \
  225. int __gu_err = -EFAULT; \
  226. const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
  227. \
  228. might_fault(); \
  229. if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
  230. __get_user_common((x), size, __gu_ptr); \
  231. \
  232. __gu_err; \
  233. })
  234. #define __get_user_asm(val, insn, addr) \
  235. { \
  236. long __gu_tmp; \
  237. \
  238. __asm__ __volatile__( \
  239. "1: " insn " %1, %3 \n" \
  240. "2: \n" \
  241. " .section .fixup,\"ax\" \n" \
  242. "3: li %0, %4 \n" \
  243. " j 2b \n" \
  244. " .previous \n" \
  245. " .section __ex_table,\"a\" \n" \
  246. " "__UA_ADDR "\t1b, 3b \n" \
  247. " .previous \n" \
  248. : "=r" (__gu_err), "=r" (__gu_tmp) \
  249. : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
  250. \
  251. (val) = (__typeof__(*(addr))) __gu_tmp; \
  252. }
  253. /*
  254. * Get a long long 64 using 32 bit registers.
  255. */
  256. #define __get_user_asm_ll32(val, addr) \
  257. { \
  258. union { \
  259. unsigned long long l; \
  260. __typeof__(*(addr)) t; \
  261. } __gu_tmp; \
  262. \
  263. __asm__ __volatile__( \
  264. "1: lw %1, (%3) \n" \
  265. "2: lw %D1, 4(%3) \n" \
  266. "3: .section .fixup,\"ax\" \n" \
  267. "4: li %0, %4 \n" \
  268. " move %1, $0 \n" \
  269. " move %D1, $0 \n" \
  270. " j 3b \n" \
  271. " .previous \n" \
  272. " .section __ex_table,\"a\" \n" \
  273. " " __UA_ADDR " 1b, 4b \n" \
  274. " " __UA_ADDR " 2b, 4b \n" \
  275. " .previous \n" \
  276. : "=r" (__gu_err), "=&r" (__gu_tmp.l) \
  277. : "0" (0), "r" (addr), "i" (-EFAULT)); \
  278. \
  279. (val) = __gu_tmp.t; \
  280. }
  281. /*
  282. * Yuck. We need two variants, one for 64bit operation and one
  283. * for 32 bit mode and old iron.
  284. */
  285. #ifdef CONFIG_32BIT
  286. #define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr)
  287. #endif
  288. #ifdef CONFIG_64BIT
  289. #define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr)
  290. #endif
  291. #define __put_user_nocheck(x, ptr, size) \
  292. ({ \
  293. __typeof__(*(ptr)) __pu_val; \
  294. int __pu_err = 0; \
  295. \
  296. __chk_user_ptr(ptr); \
  297. __pu_val = (x); \
  298. switch (size) { \
  299. case 1: __put_user_asm("sb", ptr); break; \
  300. case 2: __put_user_asm("sh", ptr); break; \
  301. case 4: __put_user_asm("sw", ptr); break; \
  302. case 8: __PUT_USER_DW(ptr); break; \
  303. default: __put_user_unknown(); break; \
  304. } \
  305. __pu_err; \
  306. })
  307. #define __put_user_check(x, ptr, size) \
  308. ({ \
  309. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  310. __typeof__(*(ptr)) __pu_val = (x); \
  311. int __pu_err = -EFAULT; \
  312. \
  313. might_fault(); \
  314. if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
  315. switch (size) { \
  316. case 1: __put_user_asm("sb", __pu_addr); break; \
  317. case 2: __put_user_asm("sh", __pu_addr); break; \
  318. case 4: __put_user_asm("sw", __pu_addr); break; \
  319. case 8: __PUT_USER_DW(__pu_addr); break; \
  320. default: __put_user_unknown(); break; \
  321. } \
  322. } \
  323. __pu_err; \
  324. })
  325. #define __put_user_asm(insn, ptr) \
  326. { \
  327. __asm__ __volatile__( \
  328. "1: " insn " %z2, %3 # __put_user_asm\n" \
  329. "2: \n" \
  330. " .section .fixup,\"ax\" \n" \
  331. "3: li %0, %4 \n" \
  332. " j 2b \n" \
  333. " .previous \n" \
  334. " .section __ex_table,\"a\" \n" \
  335. " " __UA_ADDR " 1b, 3b \n" \
  336. " .previous \n" \
  337. : "=r" (__pu_err) \
  338. : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
  339. "i" (-EFAULT)); \
  340. }
  341. #define __put_user_asm_ll32(ptr) \
  342. { \
  343. __asm__ __volatile__( \
  344. "1: sw %2, (%3) # __put_user_asm_ll32 \n" \
  345. "2: sw %D2, 4(%3) \n" \
  346. "3: \n" \
  347. " .section .fixup,\"ax\" \n" \
  348. "4: li %0, %4 \n" \
  349. " j 3b \n" \
  350. " .previous \n" \
  351. " .section __ex_table,\"a\" \n" \
  352. " " __UA_ADDR " 1b, 4b \n" \
  353. " " __UA_ADDR " 2b, 4b \n" \
  354. " .previous" \
  355. : "=r" (__pu_err) \
  356. : "0" (0), "r" (__pu_val), "r" (ptr), \
  357. "i" (-EFAULT)); \
  358. }
  359. extern void __put_user_unknown(void);
  360. /*
  361. * put_user_unaligned: - Write a simple value into user space.
  362. * @x: Value to copy to user space.
  363. * @ptr: Destination address, in user space.
  364. *
  365. * Context: User context only. This function may sleep.
  366. *
  367. * This macro copies a single simple value from kernel space to user
  368. * space. It supports simple types like char and int, but not larger
  369. * data types like structures or arrays.
  370. *
  371. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  372. * to the result of dereferencing @ptr.
  373. *
  374. * Returns zero on success, or -EFAULT on error.
  375. */
  376. #define put_user_unaligned(x,ptr) \
  377. __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
  378. /*
  379. * get_user_unaligned: - Get a simple variable from user space.
  380. * @x: Variable to store result.
  381. * @ptr: Source address, in user space.
  382. *
  383. * Context: User context only. This function may sleep.
  384. *
  385. * This macro copies a single simple variable from user space to kernel
  386. * space. It supports simple types like char and int, but not larger
  387. * data types like structures or arrays.
  388. *
  389. * @ptr must have pointer-to-simple-variable type, and the result of
  390. * dereferencing @ptr must be assignable to @x without a cast.
  391. *
  392. * Returns zero on success, or -EFAULT on error.
  393. * On error, the variable @x is set to zero.
  394. */
  395. #define get_user_unaligned(x,ptr) \
  396. __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
  397. /*
  398. * __put_user_unaligned: - Write a simple value into user space, with less checking.
  399. * @x: Value to copy to user space.
  400. * @ptr: Destination address, in user space.
  401. *
  402. * Context: User context only. This function may sleep.
  403. *
  404. * This macro copies a single simple value from kernel space to user
  405. * space. It supports simple types like char and int, but not larger
  406. * data types like structures or arrays.
  407. *
  408. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  409. * to the result of dereferencing @ptr.
  410. *
  411. * Caller must check the pointer with access_ok() before calling this
  412. * function.
  413. *
  414. * Returns zero on success, or -EFAULT on error.
  415. */
  416. #define __put_user_unaligned(x,ptr) \
  417. __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
  418. /*
  419. * __get_user_unaligned: - Get a simple variable from user space, with less checking.
  420. * @x: Variable to store result.
  421. * @ptr: Source address, in user space.
  422. *
  423. * Context: User context only. This function may sleep.
  424. *
  425. * This macro copies a single simple variable from user space to kernel
  426. * space. It supports simple types like char and int, but not larger
  427. * data types like structures or arrays.
  428. *
  429. * @ptr must have pointer-to-simple-variable type, and the result of
  430. * dereferencing @ptr must be assignable to @x without a cast.
  431. *
  432. * Caller must check the pointer with access_ok() before calling this
  433. * function.
  434. *
  435. * Returns zero on success, or -EFAULT on error.
  436. * On error, the variable @x is set to zero.
  437. */
  438. #define __get_user_unaligned(x,ptr) \
  439. __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
  440. /*
  441. * Yuck. We need two variants, one for 64bit operation and one
  442. * for 32 bit mode and old iron.
  443. */
  444. #ifdef CONFIG_32BIT
  445. #define __GET_USER_UNALIGNED_DW(val, ptr) \
  446. __get_user_unaligned_asm_ll32(val, ptr)
  447. #endif
  448. #ifdef CONFIG_64BIT
  449. #define __GET_USER_UNALIGNED_DW(val, ptr) \
  450. __get_user_unaligned_asm(val, "uld", ptr)
  451. #endif
  452. extern void __get_user_unaligned_unknown(void);
  453. #define __get_user_unaligned_common(val, size, ptr) \
  454. do { \
  455. switch (size) { \
  456. case 1: __get_user_asm(val, "lb", ptr); break; \
  457. case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \
  458. case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \
  459. case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
  460. default: __get_user_unaligned_unknown(); break; \
  461. } \
  462. } while (0)
  463. #define __get_user_unaligned_nocheck(x,ptr,size) \
  464. ({ \
  465. int __gu_err; \
  466. \
  467. __get_user_unaligned_common((x), size, ptr); \
  468. __gu_err; \
  469. })
  470. #define __get_user_unaligned_check(x,ptr,size) \
  471. ({ \
  472. int __gu_err = -EFAULT; \
  473. const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
  474. \
  475. if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
  476. __get_user_unaligned_common((x), size, __gu_ptr); \
  477. \
  478. __gu_err; \
  479. })
  480. #define __get_user_unaligned_asm(val, insn, addr) \
  481. { \
  482. long __gu_tmp; \
  483. \
  484. __asm__ __volatile__( \
  485. "1: " insn " %1, %3 \n" \
  486. "2: \n" \
  487. " .section .fixup,\"ax\" \n" \
  488. "3: li %0, %4 \n" \
  489. " j 2b \n" \
  490. " .previous \n" \
  491. " .section __ex_table,\"a\" \n" \
  492. " "__UA_ADDR "\t1b, 3b \n" \
  493. " "__UA_ADDR "\t1b + 4, 3b \n" \
  494. " .previous \n" \
  495. : "=r" (__gu_err), "=r" (__gu_tmp) \
  496. : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
  497. \
  498. (val) = (__typeof__(*(addr))) __gu_tmp; \
  499. }
  500. /*
  501. * Get a long long 64 using 32 bit registers.
  502. */
  503. #define __get_user_unaligned_asm_ll32(val, addr) \
  504. { \
  505. unsigned long long __gu_tmp; \
  506. \
  507. __asm__ __volatile__( \
  508. "1: ulw %1, (%3) \n" \
  509. "2: ulw %D1, 4(%3) \n" \
  510. " move %0, $0 \n" \
  511. "3: .section .fixup,\"ax\" \n" \
  512. "4: li %0, %4 \n" \
  513. " move %1, $0 \n" \
  514. " move %D1, $0 \n" \
  515. " j 3b \n" \
  516. " .previous \n" \
  517. " .section __ex_table,\"a\" \n" \
  518. " " __UA_ADDR " 1b, 4b \n" \
  519. " " __UA_ADDR " 1b + 4, 4b \n" \
  520. " " __UA_ADDR " 2b, 4b \n" \
  521. " " __UA_ADDR " 2b + 4, 4b \n" \
  522. " .previous \n" \
  523. : "=r" (__gu_err), "=&r" (__gu_tmp) \
  524. : "0" (0), "r" (addr), "i" (-EFAULT)); \
  525. (val) = (__typeof__(*(addr))) __gu_tmp; \
  526. }
  527. /*
  528. * Yuck. We need two variants, one for 64bit operation and one
  529. * for 32 bit mode and old iron.
  530. */
  531. #ifdef CONFIG_32BIT
  532. #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
  533. #endif
  534. #ifdef CONFIG_64BIT
  535. #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
  536. #endif
  537. #define __put_user_unaligned_nocheck(x,ptr,size) \
  538. ({ \
  539. __typeof__(*(ptr)) __pu_val; \
  540. int __pu_err = 0; \
  541. \
  542. __pu_val = (x); \
  543. switch (size) { \
  544. case 1: __put_user_asm("sb", ptr); break; \
  545. case 2: __put_user_unaligned_asm("ush", ptr); break; \
  546. case 4: __put_user_unaligned_asm("usw", ptr); break; \
  547. case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
  548. default: __put_user_unaligned_unknown(); break; \
  549. } \
  550. __pu_err; \
  551. })
  552. #define __put_user_unaligned_check(x,ptr,size) \
  553. ({ \
  554. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  555. __typeof__(*(ptr)) __pu_val = (x); \
  556. int __pu_err = -EFAULT; \
  557. \
  558. if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
  559. switch (size) { \
  560. case 1: __put_user_asm("sb", __pu_addr); break; \
  561. case 2: __put_user_unaligned_asm("ush", __pu_addr); break; \
  562. case 4: __put_user_unaligned_asm("usw", __pu_addr); break; \
  563. case 8: __PUT_USER_UNALGINED_DW(__pu_addr); break; \
  564. default: __put_user_unaligned_unknown(); break; \
  565. } \
  566. } \
  567. __pu_err; \
  568. })
  569. #define __put_user_unaligned_asm(insn, ptr) \
  570. { \
  571. __asm__ __volatile__( \
  572. "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \
  573. "2: \n" \
  574. " .section .fixup,\"ax\" \n" \
  575. "3: li %0, %4 \n" \
  576. " j 2b \n" \
  577. " .previous \n" \
  578. " .section __ex_table,\"a\" \n" \
  579. " " __UA_ADDR " 1b, 3b \n" \
  580. " .previous \n" \
  581. : "=r" (__pu_err) \
  582. : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
  583. "i" (-EFAULT)); \
  584. }
  585. #define __put_user_unaligned_asm_ll32(ptr) \
  586. { \
  587. __asm__ __volatile__( \
  588. "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
  589. "2: sw %D2, 4(%3) \n" \
  590. "3: \n" \
  591. " .section .fixup,\"ax\" \n" \
  592. "4: li %0, %4 \n" \
  593. " j 3b \n" \
  594. " .previous \n" \
  595. " .section __ex_table,\"a\" \n" \
  596. " " __UA_ADDR " 1b, 4b \n" \
  597. " " __UA_ADDR " 1b + 4, 4b \n" \
  598. " " __UA_ADDR " 2b, 4b \n" \
  599. " " __UA_ADDR " 2b + 4, 4b \n" \
  600. " .previous" \
  601. : "=r" (__pu_err) \
  602. : "0" (0), "r" (__pu_val), "r" (ptr), \
  603. "i" (-EFAULT)); \
  604. }
  605. extern void __put_user_unaligned_unknown(void);
  606. /*
  607. * We're generating jump to subroutines which will be outside the range of
  608. * jump instructions
  609. */
  610. #ifdef MODULE
  611. #define __MODULE_JAL(destination) \
  612. ".set\tnoat\n\t" \
  613. __UA_LA "\t$1, " #destination "\n\t" \
  614. "jalr\t$1\n\t" \
  615. ".set\tat\n\t"
  616. #else
  617. #define __MODULE_JAL(destination) \
  618. "jal\t" #destination "\n\t"
  619. #endif
  620. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  621. #define DADDI_SCRATCH "$0"
  622. #else
  623. #define DADDI_SCRATCH "$3"
  624. #endif
  625. extern size_t __copy_user(void *__to, const void *__from, size_t __n);
  626. #define __invoke_copy_to_user(to, from, n) \
  627. ({ \
  628. register void __user *__cu_to_r __asm__("$4"); \
  629. register const void *__cu_from_r __asm__("$5"); \
  630. register long __cu_len_r __asm__("$6"); \
  631. \
  632. __cu_to_r = (to); \
  633. __cu_from_r = (from); \
  634. __cu_len_r = (n); \
  635. __asm__ __volatile__( \
  636. __MODULE_JAL(__copy_user) \
  637. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  638. : \
  639. : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
  640. DADDI_SCRATCH, "memory"); \
  641. __cu_len_r; \
  642. })
  643. /*
  644. * __copy_to_user: - Copy a block of data into user space, with less checking.
  645. * @to: Destination address, in user space.
  646. * @from: Source address, in kernel space.
  647. * @n: Number of bytes to copy.
  648. *
  649. * Context: User context only. This function may sleep.
  650. *
  651. * Copy data from kernel space to user space. Caller must check
  652. * the specified block with access_ok() before calling this function.
  653. *
  654. * Returns number of bytes that could not be copied.
  655. * On success, this will be zero.
  656. */
  657. #define __copy_to_user(to, from, n) \
  658. ({ \
  659. void __user *__cu_to; \
  660. const void *__cu_from; \
  661. long __cu_len; \
  662. \
  663. __cu_to = (to); \
  664. __cu_from = (from); \
  665. __cu_len = (n); \
  666. might_fault(); \
  667. __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
  668. __cu_len; \
  669. })
  670. extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
  671. #define __copy_to_user_inatomic(to, from, n) \
  672. ({ \
  673. void __user *__cu_to; \
  674. const void *__cu_from; \
  675. long __cu_len; \
  676. \
  677. __cu_to = (to); \
  678. __cu_from = (from); \
  679. __cu_len = (n); \
  680. __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
  681. __cu_len; \
  682. })
  683. #define __copy_from_user_inatomic(to, from, n) \
  684. ({ \
  685. void *__cu_to; \
  686. const void __user *__cu_from; \
  687. long __cu_len; \
  688. \
  689. __cu_to = (to); \
  690. __cu_from = (from); \
  691. __cu_len = (n); \
  692. __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
  693. __cu_len); \
  694. __cu_len; \
  695. })
  696. /*
  697. * copy_to_user: - Copy a block of data into user space.
  698. * @to: Destination address, in user space.
  699. * @from: Source address, in kernel space.
  700. * @n: Number of bytes to copy.
  701. *
  702. * Context: User context only. This function may sleep.
  703. *
  704. * Copy data from kernel space to user space.
  705. *
  706. * Returns number of bytes that could not be copied.
  707. * On success, this will be zero.
  708. */
  709. #define copy_to_user(to, from, n) \
  710. ({ \
  711. void __user *__cu_to; \
  712. const void *__cu_from; \
  713. long __cu_len; \
  714. \
  715. __cu_to = (to); \
  716. __cu_from = (from); \
  717. __cu_len = (n); \
  718. if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
  719. might_fault(); \
  720. __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
  721. __cu_len); \
  722. } \
  723. __cu_len; \
  724. })
  725. #define __invoke_copy_from_user(to, from, n) \
  726. ({ \
  727. register void *__cu_to_r __asm__("$4"); \
  728. register const void __user *__cu_from_r __asm__("$5"); \
  729. register long __cu_len_r __asm__("$6"); \
  730. \
  731. __cu_to_r = (to); \
  732. __cu_from_r = (from); \
  733. __cu_len_r = (n); \
  734. __asm__ __volatile__( \
  735. ".set\tnoreorder\n\t" \
  736. __MODULE_JAL(__copy_user) \
  737. ".set\tnoat\n\t" \
  738. __UA_ADDU "\t$1, %1, %2\n\t" \
  739. ".set\tat\n\t" \
  740. ".set\treorder" \
  741. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  742. : \
  743. : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
  744. DADDI_SCRATCH, "memory"); \
  745. __cu_len_r; \
  746. })
  747. #define __invoke_copy_from_user_inatomic(to, from, n) \
  748. ({ \
  749. register void *__cu_to_r __asm__("$4"); \
  750. register const void __user *__cu_from_r __asm__("$5"); \
  751. register long __cu_len_r __asm__("$6"); \
  752. \
  753. __cu_to_r = (to); \
  754. __cu_from_r = (from); \
  755. __cu_len_r = (n); \
  756. __asm__ __volatile__( \
  757. ".set\tnoreorder\n\t" \
  758. __MODULE_JAL(__copy_user_inatomic) \
  759. ".set\tnoat\n\t" \
  760. __UA_ADDU "\t$1, %1, %2\n\t" \
  761. ".set\tat\n\t" \
  762. ".set\treorder" \
  763. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  764. : \
  765. : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
  766. DADDI_SCRATCH, "memory"); \
  767. __cu_len_r; \
  768. })
  769. /*
  770. * __copy_from_user: - Copy a block of data from user space, with less checking.
  771. * @to: Destination address, in kernel space.
  772. * @from: Source address, in user space.
  773. * @n: Number of bytes to copy.
  774. *
  775. * Context: User context only. This function may sleep.
  776. *
  777. * Copy data from user space to kernel space. Caller must check
  778. * the specified block with access_ok() before calling this function.
  779. *
  780. * Returns number of bytes that could not be copied.
  781. * On success, this will be zero.
  782. *
  783. * If some data could not be copied, this function will pad the copied
  784. * data to the requested size using zero bytes.
  785. */
  786. #define __copy_from_user(to, from, n) \
  787. ({ \
  788. void *__cu_to; \
  789. const void __user *__cu_from; \
  790. long __cu_len; \
  791. \
  792. __cu_to = (to); \
  793. __cu_from = (from); \
  794. __cu_len = (n); \
  795. might_fault(); \
  796. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  797. __cu_len); \
  798. __cu_len; \
  799. })
  800. /*
  801. * copy_from_user: - Copy a block of data from user space.
  802. * @to: Destination address, in kernel space.
  803. * @from: Source address, in user space.
  804. * @n: Number of bytes to copy.
  805. *
  806. * Context: User context only. This function may sleep.
  807. *
  808. * Copy data from user space to kernel space.
  809. *
  810. * Returns number of bytes that could not be copied.
  811. * On success, this will be zero.
  812. *
  813. * If some data could not be copied, this function will pad the copied
  814. * data to the requested size using zero bytes.
  815. */
  816. #define copy_from_user(to, from, n) \
  817. ({ \
  818. void *__cu_to; \
  819. const void __user *__cu_from; \
  820. long __cu_len; \
  821. \
  822. __cu_to = (to); \
  823. __cu_from = (from); \
  824. __cu_len = (n); \
  825. if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
  826. might_fault(); \
  827. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  828. __cu_len); \
  829. } \
  830. __cu_len; \
  831. })
  832. #define __copy_in_user(to, from, n) \
  833. ({ \
  834. void __user *__cu_to; \
  835. const void __user *__cu_from; \
  836. long __cu_len; \
  837. \
  838. __cu_to = (to); \
  839. __cu_from = (from); \
  840. __cu_len = (n); \
  841. might_fault(); \
  842. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  843. __cu_len); \
  844. __cu_len; \
  845. })
  846. #define copy_in_user(to, from, n) \
  847. ({ \
  848. void __user *__cu_to; \
  849. const void __user *__cu_from; \
  850. long __cu_len; \
  851. \
  852. __cu_to = (to); \
  853. __cu_from = (from); \
  854. __cu_len = (n); \
  855. if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \
  856. access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \
  857. might_fault(); \
  858. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  859. __cu_len); \
  860. } \
  861. __cu_len; \
  862. })
  863. /*
  864. * __clear_user: - Zero a block of memory in user space, with less checking.
  865. * @to: Destination address, in user space.
  866. * @n: Number of bytes to zero.
  867. *
  868. * Zero a block of memory in user space. Caller must check
  869. * the specified block with access_ok() before calling this function.
  870. *
  871. * Returns number of bytes that could not be cleared.
  872. * On success, this will be zero.
  873. */
  874. static inline __kernel_size_t
  875. __clear_user(void __user *addr, __kernel_size_t size)
  876. {
  877. __kernel_size_t res;
  878. might_fault();
  879. __asm__ __volatile__(
  880. "move\t$4, %1\n\t"
  881. "move\t$5, $0\n\t"
  882. "move\t$6, %2\n\t"
  883. __MODULE_JAL(__bzero)
  884. "move\t%0, $6"
  885. : "=r" (res)
  886. : "r" (addr), "r" (size)
  887. : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
  888. return res;
  889. }
  890. #define clear_user(addr,n) \
  891. ({ \
  892. void __user * __cl_addr = (addr); \
  893. unsigned long __cl_size = (n); \
  894. if (__cl_size && access_ok(VERIFY_WRITE, \
  895. __cl_addr, __cl_size)) \
  896. __cl_size = __clear_user(__cl_addr, __cl_size); \
  897. __cl_size; \
  898. })
  899. /*
  900. * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
  901. * @dst: Destination address, in kernel space. This buffer must be at
  902. * least @count bytes long.
  903. * @src: Source address, in user space.
  904. * @count: Maximum number of bytes to copy, including the trailing NUL.
  905. *
  906. * Copies a NUL-terminated string from userspace to kernel space.
  907. * Caller must check the specified block with access_ok() before calling
  908. * this function.
  909. *
  910. * On success, returns the length of the string (not including the trailing
  911. * NUL).
  912. *
  913. * If access to userspace fails, returns -EFAULT (some data may have been
  914. * copied).
  915. *
  916. * If @count is smaller than the length of the string, copies @count bytes
  917. * and returns @count.
  918. */
  919. static inline long
  920. __strncpy_from_user(char *__to, const char __user *__from, long __len)
  921. {
  922. long res;
  923. might_fault();
  924. __asm__ __volatile__(
  925. "move\t$4, %1\n\t"
  926. "move\t$5, %2\n\t"
  927. "move\t$6, %3\n\t"
  928. __MODULE_JAL(__strncpy_from_user_nocheck_asm)
  929. "move\t%0, $2"
  930. : "=r" (res)
  931. : "r" (__to), "r" (__from), "r" (__len)
  932. : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
  933. return res;
  934. }
  935. /*
  936. * strncpy_from_user: - Copy a NUL terminated string from userspace.
  937. * @dst: Destination address, in kernel space. This buffer must be at
  938. * least @count bytes long.
  939. * @src: Source address, in user space.
  940. * @count: Maximum number of bytes to copy, including the trailing NUL.
  941. *
  942. * Copies a NUL-terminated string from userspace to kernel space.
  943. *
  944. * On success, returns the length of the string (not including the trailing
  945. * NUL).
  946. *
  947. * If access to userspace fails, returns -EFAULT (some data may have been
  948. * copied).
  949. *
  950. * If @count is smaller than the length of the string, copies @count bytes
  951. * and returns @count.
  952. */
  953. static inline long
  954. strncpy_from_user(char *__to, const char __user *__from, long __len)
  955. {
  956. long res;
  957. might_fault();
  958. __asm__ __volatile__(
  959. "move\t$4, %1\n\t"
  960. "move\t$5, %2\n\t"
  961. "move\t$6, %3\n\t"
  962. __MODULE_JAL(__strncpy_from_user_asm)
  963. "move\t%0, $2"
  964. : "=r" (res)
  965. : "r" (__to), "r" (__from), "r" (__len)
  966. : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
  967. return res;
  968. }
  969. /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
  970. static inline long __strlen_user(const char __user *s)
  971. {
  972. long res;
  973. might_fault();
  974. __asm__ __volatile__(
  975. "move\t$4, %1\n\t"
  976. __MODULE_JAL(__strlen_user_nocheck_asm)
  977. "move\t%0, $2"
  978. : "=r" (res)
  979. : "r" (s)
  980. : "$2", "$4", __UA_t0, "$31");
  981. return res;
  982. }
  983. /*
  984. * strlen_user: - Get the size of a string in user space.
  985. * @str: The string to measure.
  986. *
  987. * Context: User context only. This function may sleep.
  988. *
  989. * Get the size of a NUL-terminated string in user space.
  990. *
  991. * Returns the size of the string INCLUDING the terminating NUL.
  992. * On exception, returns 0.
  993. *
  994. * If there is a limit on the length of a valid string, you may wish to
  995. * consider using strnlen_user() instead.
  996. */
  997. static inline long strlen_user(const char __user *s)
  998. {
  999. long res;
  1000. might_fault();
  1001. __asm__ __volatile__(
  1002. "move\t$4, %1\n\t"
  1003. __MODULE_JAL(__strlen_user_asm)
  1004. "move\t%0, $2"
  1005. : "=r" (res)
  1006. : "r" (s)
  1007. : "$2", "$4", __UA_t0, "$31");
  1008. return res;
  1009. }
  1010. /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
  1011. static inline long __strnlen_user(const char __user *s, long n)
  1012. {
  1013. long res;
  1014. might_fault();
  1015. __asm__ __volatile__(
  1016. "move\t$4, %1\n\t"
  1017. "move\t$5, %2\n\t"
  1018. __MODULE_JAL(__strnlen_user_nocheck_asm)
  1019. "move\t%0, $2"
  1020. : "=r" (res)
  1021. : "r" (s), "r" (n)
  1022. : "$2", "$4", "$5", __UA_t0, "$31");
  1023. return res;
  1024. }
  1025. /*
  1026. * strlen_user: - Get the size of a string in user space.
  1027. * @str: The string to measure.
  1028. *
  1029. * Context: User context only. This function may sleep.
  1030. *
  1031. * Get the size of a NUL-terminated string in user space.
  1032. *
  1033. * Returns the size of the string INCLUDING the terminating NUL.
  1034. * On exception, returns 0.
  1035. *
  1036. * If there is a limit on the length of a valid string, you may wish to
  1037. * consider using strnlen_user() instead.
  1038. */
  1039. static inline long strnlen_user(const char __user *s, long n)
  1040. {
  1041. long res;
  1042. might_fault();
  1043. __asm__ __volatile__(
  1044. "move\t$4, %1\n\t"
  1045. "move\t$5, %2\n\t"
  1046. __MODULE_JAL(__strnlen_user_asm)
  1047. "move\t%0, $2"
  1048. : "=r" (res)
  1049. : "r" (s), "r" (n)
  1050. : "$2", "$4", "$5", __UA_t0, "$31");
  1051. return res;
  1052. }
  1053. struct exception_table_entry
  1054. {
  1055. unsigned long insn;
  1056. unsigned long nextinsn;
  1057. };
  1058. extern int fixup_exception(struct pt_regs *regs);
  1059. #endif /* _ASM_UACCESS_H */