uaccess.h 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
  7. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8. * Copyright (C) 2007 Maciej W. Rozycki
  9. */
  10. #ifndef _ASM_UACCESS_H
  11. #define _ASM_UACCESS_H
  12. #include <linux/kernel.h>
  13. #include <linux/errno.h>
  14. #include <linux/thread_info.h>
  15. /*
  16. * The fs value determines whether argument validity checking should be
  17. * performed or not. If get_fs() == USER_DS, checking is performed, with
  18. * get_fs() == KERNEL_DS, checking is bypassed.
  19. *
  20. * For historical reasons, these macros are grossly misnamed.
  21. */
  22. #ifdef CONFIG_32BIT
  23. #define __UA_LIMIT 0x80000000UL
  24. #define __UA_ADDR ".word"
  25. #define __UA_LA "la"
  26. #define __UA_ADDU "addu"
  27. #define __UA_t0 "$8"
  28. #define __UA_t1 "$9"
  29. #endif /* CONFIG_32BIT */
  30. #ifdef CONFIG_64BIT
  31. extern u64 __ua_limit;
  32. #define __UA_LIMIT __ua_limit
  33. #define __UA_ADDR ".dword"
  34. #define __UA_LA "dla"
  35. #define __UA_ADDU "daddu"
  36. #define __UA_t0 "$12"
  37. #define __UA_t1 "$13"
  38. #endif /* CONFIG_64BIT */
  39. /*
  40. * USER_DS is a bitmask that has the bits set that may not be set in a valid
  41. * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but
  42. * the arithmetic we're doing only works if the limit is a power of two, so
  43. * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid
  44. * address in this range it's the process's problem, not ours :-)
  45. */
  46. #define KERNEL_DS ((mm_segment_t) { 0UL })
  47. #define USER_DS ((mm_segment_t) { __UA_LIMIT })
  48. #define VERIFY_READ 0
  49. #define VERIFY_WRITE 1
  50. #define get_ds() (KERNEL_DS)
  51. #define get_fs() (current_thread_info()->addr_limit)
  52. #define set_fs(x) (current_thread_info()->addr_limit = (x))
  53. #define segment_eq(a, b) ((a).seg == (b).seg)
  54. /*
  55. * Is a address valid? This does a straighforward calculation rather
  56. * than tests.
  57. *
  58. * Address valid if:
  59. * - "addr" doesn't have any high-bits set
  60. * - AND "size" doesn't have any high-bits set
  61. * - AND "addr+size" doesn't have any high-bits set
  62. * - OR we are in kernel mode.
  63. *
  64. * __ua_size() is a trick to avoid runtime checking of positive constant
  65. * sizes; for those we already know at compile time that the size is ok.
  66. */
  67. #define __ua_size(size) \
  68. ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
  69. /*
  70. * access_ok: - Checks if a user space pointer is valid
  71. * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
  72. * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
  73. * to write to a block, it is always safe to read from it.
  74. * @addr: User space pointer to start of block to check
  75. * @size: Size of block to check
  76. *
  77. * Context: User context only. This function may sleep.
  78. *
  79. * Checks if a pointer to a block of memory in user space is valid.
  80. *
  81. * Returns true (nonzero) if the memory block may be valid, false (zero)
  82. * if it is definitely invalid.
  83. *
  84. * Note that, depending on architecture, this function probably just
  85. * checks that the pointer is in the user space range - after calling
  86. * this function, memory access functions may still return -EFAULT.
  87. */
  88. #define __access_mask get_fs().seg
  89. #define __access_ok(addr, size, mask) \
  90. ({ \
  91. unsigned long __addr = (unsigned long) (addr); \
  92. unsigned long __size = size; \
  93. unsigned long __mask = mask; \
  94. unsigned long __ok; \
  95. \
  96. __chk_user_ptr(addr); \
  97. __ok = (signed long)(__mask & (__addr | (__addr + __size) | \
  98. __ua_size(__size))); \
  99. __ok == 0; \
  100. })
  101. #define access_ok(type, addr, size) \
  102. likely(__access_ok((addr), (size), __access_mask))
  103. /*
  104. * put_user: - Write a simple value into user space.
  105. * @x: Value to copy to user space.
  106. * @ptr: Destination address, in user space.
  107. *
  108. * Context: User context only. This function may sleep.
  109. *
  110. * This macro copies a single simple value from kernel space to user
  111. * space. It supports simple types like char and int, but not larger
  112. * data types like structures or arrays.
  113. *
  114. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  115. * to the result of dereferencing @ptr.
  116. *
  117. * Returns zero on success, or -EFAULT on error.
  118. */
  119. #define put_user(x,ptr) \
  120. __put_user_check((x), (ptr), sizeof(*(ptr)))
  121. /*
  122. * get_user: - Get a simple variable from user space.
  123. * @x: Variable to store result.
  124. * @ptr: Source address, in user space.
  125. *
  126. * Context: User context only. This function may sleep.
  127. *
  128. * This macro copies a single simple variable from user space to kernel
  129. * space. It supports simple types like char and int, but not larger
  130. * data types like structures or arrays.
  131. *
  132. * @ptr must have pointer-to-simple-variable type, and the result of
  133. * dereferencing @ptr must be assignable to @x without a cast.
  134. *
  135. * Returns zero on success, or -EFAULT on error.
  136. * On error, the variable @x is set to zero.
  137. */
  138. #define get_user(x,ptr) \
  139. __get_user_check((x), (ptr), sizeof(*(ptr)))
  140. /*
  141. * __put_user: - Write a simple value into user space, with less checking.
  142. * @x: Value to copy to user space.
  143. * @ptr: Destination address, in user space.
  144. *
  145. * Context: User context only. This function may sleep.
  146. *
  147. * This macro copies a single simple value from kernel space to user
  148. * space. It supports simple types like char and int, but not larger
  149. * data types like structures or arrays.
  150. *
  151. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  152. * to the result of dereferencing @ptr.
  153. *
  154. * Caller must check the pointer with access_ok() before calling this
  155. * function.
  156. *
  157. * Returns zero on success, or -EFAULT on error.
  158. */
  159. #define __put_user(x,ptr) \
  160. __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
  161. /*
  162. * __get_user: - Get a simple variable from user space, with less checking.
  163. * @x: Variable to store result.
  164. * @ptr: Source address, in user space.
  165. *
  166. * Context: User context only. This function may sleep.
  167. *
  168. * This macro copies a single simple variable from user space to kernel
  169. * space. It supports simple types like char and int, but not larger
  170. * data types like structures or arrays.
  171. *
  172. * @ptr must have pointer-to-simple-variable type, and the result of
  173. * dereferencing @ptr must be assignable to @x without a cast.
  174. *
  175. * Caller must check the pointer with access_ok() before calling this
  176. * function.
  177. *
  178. * Returns zero on success, or -EFAULT on error.
  179. * On error, the variable @x is set to zero.
  180. */
  181. #define __get_user(x,ptr) \
  182. __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  183. struct __large_struct { unsigned long buf[100]; };
  184. #define __m(x) (*(struct __large_struct __user *)(x))
  185. /*
  186. * Yuck. We need two variants, one for 64bit operation and one
  187. * for 32 bit mode and old iron.
  188. */
  189. #ifdef CONFIG_32BIT
  190. #define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr)
  191. #endif
  192. #ifdef CONFIG_64BIT
  193. #define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr)
  194. #endif
  195. extern void __get_user_unknown(void);
  196. #define __get_user_common(val, size, ptr) \
  197. do { \
  198. switch (size) { \
  199. case 1: __get_user_asm(val, "lb", ptr); break; \
  200. case 2: __get_user_asm(val, "lh", ptr); break; \
  201. case 4: __get_user_asm(val, "lw", ptr); break; \
  202. case 8: __GET_USER_DW(val, ptr); break; \
  203. default: __get_user_unknown(); break; \
  204. } \
  205. } while (0)
  206. #define __get_user_nocheck(x, ptr, size) \
  207. ({ \
  208. int __gu_err; \
  209. \
  210. __chk_user_ptr(ptr); \
  211. __get_user_common((x), size, ptr); \
  212. __gu_err; \
  213. })
  214. #define __get_user_check(x, ptr, size) \
  215. ({ \
  216. int __gu_err = -EFAULT; \
  217. const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
  218. \
  219. might_fault(); \
  220. if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
  221. __get_user_common((x), size, __gu_ptr); \
  222. \
  223. __gu_err; \
  224. })
  225. #define __get_user_asm(val, insn, addr) \
  226. { \
  227. long __gu_tmp; \
  228. \
  229. __asm__ __volatile__( \
  230. "1: " insn " %1, %3 \n" \
  231. "2: \n" \
  232. " .section .fixup,\"ax\" \n" \
  233. "3: li %0, %4 \n" \
  234. " j 2b \n" \
  235. " .previous \n" \
  236. " .section __ex_table,\"a\" \n" \
  237. " "__UA_ADDR "\t1b, 3b \n" \
  238. " .previous \n" \
  239. : "=r" (__gu_err), "=r" (__gu_tmp) \
  240. : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
  241. \
  242. (val) = (__typeof__(*(addr))) __gu_tmp; \
  243. }
  244. /*
  245. * Get a long long 64 using 32 bit registers.
  246. */
  247. #define __get_user_asm_ll32(val, addr) \
  248. { \
  249. union { \
  250. unsigned long long l; \
  251. __typeof__(*(addr)) t; \
  252. } __gu_tmp; \
  253. \
  254. __asm__ __volatile__( \
  255. "1: lw %1, (%3) \n" \
  256. "2: lw %D1, 4(%3) \n" \
  257. "3: .section .fixup,\"ax\" \n" \
  258. "4: li %0, %4 \n" \
  259. " move %1, $0 \n" \
  260. " move %D1, $0 \n" \
  261. " j 3b \n" \
  262. " .previous \n" \
  263. " .section __ex_table,\"a\" \n" \
  264. " " __UA_ADDR " 1b, 4b \n" \
  265. " " __UA_ADDR " 2b, 4b \n" \
  266. " .previous \n" \
  267. : "=r" (__gu_err), "=&r" (__gu_tmp.l) \
  268. : "0" (0), "r" (addr), "i" (-EFAULT)); \
  269. \
  270. (val) = __gu_tmp.t; \
  271. }
  272. /*
  273. * Yuck. We need two variants, one for 64bit operation and one
  274. * for 32 bit mode and old iron.
  275. */
  276. #ifdef CONFIG_32BIT
  277. #define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr)
  278. #endif
  279. #ifdef CONFIG_64BIT
  280. #define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr)
  281. #endif
  282. #define __put_user_nocheck(x, ptr, size) \
  283. ({ \
  284. __typeof__(*(ptr)) __pu_val; \
  285. int __pu_err = 0; \
  286. \
  287. __chk_user_ptr(ptr); \
  288. __pu_val = (x); \
  289. switch (size) { \
  290. case 1: __put_user_asm("sb", ptr); break; \
  291. case 2: __put_user_asm("sh", ptr); break; \
  292. case 4: __put_user_asm("sw", ptr); break; \
  293. case 8: __PUT_USER_DW(ptr); break; \
  294. default: __put_user_unknown(); break; \
  295. } \
  296. __pu_err; \
  297. })
  298. #define __put_user_check(x, ptr, size) \
  299. ({ \
  300. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  301. __typeof__(*(ptr)) __pu_val = (x); \
  302. int __pu_err = -EFAULT; \
  303. \
  304. might_fault(); \
  305. if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
  306. switch (size) { \
  307. case 1: __put_user_asm("sb", __pu_addr); break; \
  308. case 2: __put_user_asm("sh", __pu_addr); break; \
  309. case 4: __put_user_asm("sw", __pu_addr); break; \
  310. case 8: __PUT_USER_DW(__pu_addr); break; \
  311. default: __put_user_unknown(); break; \
  312. } \
  313. } \
  314. __pu_err; \
  315. })
  316. #define __put_user_asm(insn, ptr) \
  317. { \
  318. __asm__ __volatile__( \
  319. "1: " insn " %z2, %3 # __put_user_asm\n" \
  320. "2: \n" \
  321. " .section .fixup,\"ax\" \n" \
  322. "3: li %0, %4 \n" \
  323. " j 2b \n" \
  324. " .previous \n" \
  325. " .section __ex_table,\"a\" \n" \
  326. " " __UA_ADDR " 1b, 3b \n" \
  327. " .previous \n" \
  328. : "=r" (__pu_err) \
  329. : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
  330. "i" (-EFAULT)); \
  331. }
  332. #define __put_user_asm_ll32(ptr) \
  333. { \
  334. __asm__ __volatile__( \
  335. "1: sw %2, (%3) # __put_user_asm_ll32 \n" \
  336. "2: sw %D2, 4(%3) \n" \
  337. "3: \n" \
  338. " .section .fixup,\"ax\" \n" \
  339. "4: li %0, %4 \n" \
  340. " j 3b \n" \
  341. " .previous \n" \
  342. " .section __ex_table,\"a\" \n" \
  343. " " __UA_ADDR " 1b, 4b \n" \
  344. " " __UA_ADDR " 2b, 4b \n" \
  345. " .previous" \
  346. : "=r" (__pu_err) \
  347. : "0" (0), "r" (__pu_val), "r" (ptr), \
  348. "i" (-EFAULT)); \
  349. }
  350. extern void __put_user_unknown(void);
  351. /*
  352. * put_user_unaligned: - Write a simple value into user space.
  353. * @x: Value to copy to user space.
  354. * @ptr: Destination address, in user space.
  355. *
  356. * Context: User context only. This function may sleep.
  357. *
  358. * This macro copies a single simple value from kernel space to user
  359. * space. It supports simple types like char and int, but not larger
  360. * data types like structures or arrays.
  361. *
  362. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  363. * to the result of dereferencing @ptr.
  364. *
  365. * Returns zero on success, or -EFAULT on error.
  366. */
  367. #define put_user_unaligned(x,ptr) \
  368. __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
  369. /*
  370. * get_user_unaligned: - Get a simple variable from user space.
  371. * @x: Variable to store result.
  372. * @ptr: Source address, in user space.
  373. *
  374. * Context: User context only. This function may sleep.
  375. *
  376. * This macro copies a single simple variable from user space to kernel
  377. * space. It supports simple types like char and int, but not larger
  378. * data types like structures or arrays.
  379. *
  380. * @ptr must have pointer-to-simple-variable type, and the result of
  381. * dereferencing @ptr must be assignable to @x without a cast.
  382. *
  383. * Returns zero on success, or -EFAULT on error.
  384. * On error, the variable @x is set to zero.
  385. */
  386. #define get_user_unaligned(x,ptr) \
  387. __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
  388. /*
  389. * __put_user_unaligned: - Write a simple value into user space, with less checking.
  390. * @x: Value to copy to user space.
  391. * @ptr: Destination address, in user space.
  392. *
  393. * Context: User context only. This function may sleep.
  394. *
  395. * This macro copies a single simple value from kernel space to user
  396. * space. It supports simple types like char and int, but not larger
  397. * data types like structures or arrays.
  398. *
  399. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  400. * to the result of dereferencing @ptr.
  401. *
  402. * Caller must check the pointer with access_ok() before calling this
  403. * function.
  404. *
  405. * Returns zero on success, or -EFAULT on error.
  406. */
  407. #define __put_user_unaligned(x,ptr) \
  408. __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
  409. /*
  410. * __get_user_unaligned: - Get a simple variable from user space, with less checking.
  411. * @x: Variable to store result.
  412. * @ptr: Source address, in user space.
  413. *
  414. * Context: User context only. This function may sleep.
  415. *
  416. * This macro copies a single simple variable from user space to kernel
  417. * space. It supports simple types like char and int, but not larger
  418. * data types like structures or arrays.
  419. *
  420. * @ptr must have pointer-to-simple-variable type, and the result of
  421. * dereferencing @ptr must be assignable to @x without a cast.
  422. *
  423. * Caller must check the pointer with access_ok() before calling this
  424. * function.
  425. *
  426. * Returns zero on success, or -EFAULT on error.
  427. * On error, the variable @x is set to zero.
  428. */
  429. #define __get_user_unaligned(x,ptr) \
  430. __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
  431. /*
  432. * Yuck. We need two variants, one for 64bit operation and one
  433. * for 32 bit mode and old iron.
  434. */
  435. #ifdef CONFIG_32BIT
  436. #define __GET_USER_UNALIGNED_DW(val, ptr) \
  437. __get_user_unaligned_asm_ll32(val, ptr)
  438. #endif
  439. #ifdef CONFIG_64BIT
  440. #define __GET_USER_UNALIGNED_DW(val, ptr) \
  441. __get_user_unaligned_asm(val, "uld", ptr)
  442. #endif
  443. extern void __get_user_unaligned_unknown(void);
  444. #define __get_user_unaligned_common(val, size, ptr) \
  445. do { \
  446. switch (size) { \
  447. case 1: __get_user_asm(val, "lb", ptr); break; \
  448. case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \
  449. case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \
  450. case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
  451. default: __get_user_unaligned_unknown(); break; \
  452. } \
  453. } while (0)
  454. #define __get_user_unaligned_nocheck(x,ptr,size) \
  455. ({ \
  456. int __gu_err; \
  457. \
  458. __get_user_unaligned_common((x), size, ptr); \
  459. __gu_err; \
  460. })
  461. #define __get_user_unaligned_check(x,ptr,size) \
  462. ({ \
  463. int __gu_err = -EFAULT; \
  464. const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
  465. \
  466. if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
  467. __get_user_unaligned_common((x), size, __gu_ptr); \
  468. \
  469. __gu_err; \
  470. })
  471. #define __get_user_unaligned_asm(val, insn, addr) \
  472. { \
  473. long __gu_tmp; \
  474. \
  475. __asm__ __volatile__( \
  476. "1: " insn " %1, %3 \n" \
  477. "2: \n" \
  478. " .section .fixup,\"ax\" \n" \
  479. "3: li %0, %4 \n" \
  480. " j 2b \n" \
  481. " .previous \n" \
  482. " .section __ex_table,\"a\" \n" \
  483. " "__UA_ADDR "\t1b, 3b \n" \
  484. " "__UA_ADDR "\t1b + 4, 3b \n" \
  485. " .previous \n" \
  486. : "=r" (__gu_err), "=r" (__gu_tmp) \
  487. : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
  488. \
  489. (val) = (__typeof__(*(addr))) __gu_tmp; \
  490. }
  491. /*
  492. * Get a long long 64 using 32 bit registers.
  493. */
  494. #define __get_user_unaligned_asm_ll32(val, addr) \
  495. { \
  496. unsigned long long __gu_tmp; \
  497. \
  498. __asm__ __volatile__( \
  499. "1: ulw %1, (%3) \n" \
  500. "2: ulw %D1, 4(%3) \n" \
  501. " move %0, $0 \n" \
  502. "3: .section .fixup,\"ax\" \n" \
  503. "4: li %0, %4 \n" \
  504. " move %1, $0 \n" \
  505. " move %D1, $0 \n" \
  506. " j 3b \n" \
  507. " .previous \n" \
  508. " .section __ex_table,\"a\" \n" \
  509. " " __UA_ADDR " 1b, 4b \n" \
  510. " " __UA_ADDR " 1b + 4, 4b \n" \
  511. " " __UA_ADDR " 2b, 4b \n" \
  512. " " __UA_ADDR " 2b + 4, 4b \n" \
  513. " .previous \n" \
  514. : "=r" (__gu_err), "=&r" (__gu_tmp) \
  515. : "0" (0), "r" (addr), "i" (-EFAULT)); \
  516. (val) = (__typeof__(*(addr))) __gu_tmp; \
  517. }
  518. /*
  519. * Yuck. We need two variants, one for 64bit operation and one
  520. * for 32 bit mode and old iron.
  521. */
  522. #ifdef CONFIG_32BIT
  523. #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
  524. #endif
  525. #ifdef CONFIG_64BIT
  526. #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
  527. #endif
  528. #define __put_user_unaligned_nocheck(x,ptr,size) \
  529. ({ \
  530. __typeof__(*(ptr)) __pu_val; \
  531. int __pu_err = 0; \
  532. \
  533. __pu_val = (x); \
  534. switch (size) { \
  535. case 1: __put_user_asm("sb", ptr); break; \
  536. case 2: __put_user_unaligned_asm("ush", ptr); break; \
  537. case 4: __put_user_unaligned_asm("usw", ptr); break; \
  538. case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
  539. default: __put_user_unaligned_unknown(); break; \
  540. } \
  541. __pu_err; \
  542. })
  543. #define __put_user_unaligned_check(x,ptr,size) \
  544. ({ \
  545. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  546. __typeof__(*(ptr)) __pu_val = (x); \
  547. int __pu_err = -EFAULT; \
  548. \
  549. if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
  550. switch (size) { \
  551. case 1: __put_user_asm("sb", __pu_addr); break; \
  552. case 2: __put_user_unaligned_asm("ush", __pu_addr); break; \
  553. case 4: __put_user_unaligned_asm("usw", __pu_addr); break; \
  554. case 8: __PUT_USER_UNALGINED_DW(__pu_addr); break; \
  555. default: __put_user_unaligned_unknown(); break; \
  556. } \
  557. } \
  558. __pu_err; \
  559. })
  560. #define __put_user_unaligned_asm(insn, ptr) \
  561. { \
  562. __asm__ __volatile__( \
  563. "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \
  564. "2: \n" \
  565. " .section .fixup,\"ax\" \n" \
  566. "3: li %0, %4 \n" \
  567. " j 2b \n" \
  568. " .previous \n" \
  569. " .section __ex_table,\"a\" \n" \
  570. " " __UA_ADDR " 1b, 3b \n" \
  571. " .previous \n" \
  572. : "=r" (__pu_err) \
  573. : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
  574. "i" (-EFAULT)); \
  575. }
  576. #define __put_user_unaligned_asm_ll32(ptr) \
  577. { \
  578. __asm__ __volatile__( \
  579. "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
  580. "2: sw %D2, 4(%3) \n" \
  581. "3: \n" \
  582. " .section .fixup,\"ax\" \n" \
  583. "4: li %0, %4 \n" \
  584. " j 3b \n" \
  585. " .previous \n" \
  586. " .section __ex_table,\"a\" \n" \
  587. " " __UA_ADDR " 1b, 4b \n" \
  588. " " __UA_ADDR " 1b + 4, 4b \n" \
  589. " " __UA_ADDR " 2b, 4b \n" \
  590. " " __UA_ADDR " 2b + 4, 4b \n" \
  591. " .previous" \
  592. : "=r" (__pu_err) \
  593. : "0" (0), "r" (__pu_val), "r" (ptr), \
  594. "i" (-EFAULT)); \
  595. }
  596. extern void __put_user_unaligned_unknown(void);
  597. /*
  598. * We're generating jump to subroutines which will be outside the range of
  599. * jump instructions
  600. */
  601. #ifdef MODULE
  602. #define __MODULE_JAL(destination) \
  603. ".set\tnoat\n\t" \
  604. __UA_LA "\t$1, " #destination "\n\t" \
  605. "jalr\t$1\n\t" \
  606. ".set\tat\n\t"
  607. #else
  608. #define __MODULE_JAL(destination) \
  609. "jal\t" #destination "\n\t"
  610. #endif
  611. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  612. #define DADDI_SCRATCH "$0"
  613. #else
  614. #define DADDI_SCRATCH "$3"
  615. #endif
  616. extern size_t __copy_user(void *__to, const void *__from, size_t __n);
  617. #define __invoke_copy_to_user(to, from, n) \
  618. ({ \
  619. register void __user *__cu_to_r __asm__("$4"); \
  620. register const void *__cu_from_r __asm__("$5"); \
  621. register long __cu_len_r __asm__("$6"); \
  622. \
  623. __cu_to_r = (to); \
  624. __cu_from_r = (from); \
  625. __cu_len_r = (n); \
  626. __asm__ __volatile__( \
  627. __MODULE_JAL(__copy_user) \
  628. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  629. : \
  630. : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
  631. DADDI_SCRATCH, "memory"); \
  632. __cu_len_r; \
  633. })
  634. /*
  635. * __copy_to_user: - Copy a block of data into user space, with less checking.
  636. * @to: Destination address, in user space.
  637. * @from: Source address, in kernel space.
  638. * @n: Number of bytes to copy.
  639. *
  640. * Context: User context only. This function may sleep.
  641. *
  642. * Copy data from kernel space to user space. Caller must check
  643. * the specified block with access_ok() before calling this function.
  644. *
  645. * Returns number of bytes that could not be copied.
  646. * On success, this will be zero.
  647. */
  648. #define __copy_to_user(to, from, n) \
  649. ({ \
  650. void __user *__cu_to; \
  651. const void *__cu_from; \
  652. long __cu_len; \
  653. \
  654. __cu_to = (to); \
  655. __cu_from = (from); \
  656. __cu_len = (n); \
  657. might_fault(); \
  658. __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
  659. __cu_len; \
  660. })
  661. extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
  662. #define __copy_to_user_inatomic(to, from, n) \
  663. ({ \
  664. void __user *__cu_to; \
  665. const void *__cu_from; \
  666. long __cu_len; \
  667. \
  668. __cu_to = (to); \
  669. __cu_from = (from); \
  670. __cu_len = (n); \
  671. __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
  672. __cu_len; \
  673. })
  674. #define __copy_from_user_inatomic(to, from, n) \
  675. ({ \
  676. void *__cu_to; \
  677. const void __user *__cu_from; \
  678. long __cu_len; \
  679. \
  680. __cu_to = (to); \
  681. __cu_from = (from); \
  682. __cu_len = (n); \
  683. __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
  684. __cu_len); \
  685. __cu_len; \
  686. })
  687. /*
  688. * copy_to_user: - Copy a block of data into user space.
  689. * @to: Destination address, in user space.
  690. * @from: Source address, in kernel space.
  691. * @n: Number of bytes to copy.
  692. *
  693. * Context: User context only. This function may sleep.
  694. *
  695. * Copy data from kernel space to user space.
  696. *
  697. * Returns number of bytes that could not be copied.
  698. * On success, this will be zero.
  699. */
  700. #define copy_to_user(to, from, n) \
  701. ({ \
  702. void __user *__cu_to; \
  703. const void *__cu_from; \
  704. long __cu_len; \
  705. \
  706. __cu_to = (to); \
  707. __cu_from = (from); \
  708. __cu_len = (n); \
  709. if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
  710. might_fault(); \
  711. __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
  712. __cu_len); \
  713. } \
  714. __cu_len; \
  715. })
  716. #define __invoke_copy_from_user(to, from, n) \
  717. ({ \
  718. register void *__cu_to_r __asm__("$4"); \
  719. register const void __user *__cu_from_r __asm__("$5"); \
  720. register long __cu_len_r __asm__("$6"); \
  721. \
  722. __cu_to_r = (to); \
  723. __cu_from_r = (from); \
  724. __cu_len_r = (n); \
  725. __asm__ __volatile__( \
  726. ".set\tnoreorder\n\t" \
  727. __MODULE_JAL(__copy_user) \
  728. ".set\tnoat\n\t" \
  729. __UA_ADDU "\t$1, %1, %2\n\t" \
  730. ".set\tat\n\t" \
  731. ".set\treorder" \
  732. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  733. : \
  734. : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
  735. DADDI_SCRATCH, "memory"); \
  736. __cu_len_r; \
  737. })
  738. #define __invoke_copy_from_user_inatomic(to, from, n) \
  739. ({ \
  740. register void *__cu_to_r __asm__("$4"); \
  741. register const void __user *__cu_from_r __asm__("$5"); \
  742. register long __cu_len_r __asm__("$6"); \
  743. \
  744. __cu_to_r = (to); \
  745. __cu_from_r = (from); \
  746. __cu_len_r = (n); \
  747. __asm__ __volatile__( \
  748. ".set\tnoreorder\n\t" \
  749. __MODULE_JAL(__copy_user_inatomic) \
  750. ".set\tnoat\n\t" \
  751. __UA_ADDU "\t$1, %1, %2\n\t" \
  752. ".set\tat\n\t" \
  753. ".set\treorder" \
  754. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  755. : \
  756. : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
  757. DADDI_SCRATCH, "memory"); \
  758. __cu_len_r; \
  759. })
  760. /*
  761. * __copy_from_user: - Copy a block of data from user space, with less checking.
  762. * @to: Destination address, in kernel space.
  763. * @from: Source address, in user space.
  764. * @n: Number of bytes to copy.
  765. *
  766. * Context: User context only. This function may sleep.
  767. *
  768. * Copy data from user space to kernel space. Caller must check
  769. * the specified block with access_ok() before calling this function.
  770. *
  771. * Returns number of bytes that could not be copied.
  772. * On success, this will be zero.
  773. *
  774. * If some data could not be copied, this function will pad the copied
  775. * data to the requested size using zero bytes.
  776. */
  777. #define __copy_from_user(to, from, n) \
  778. ({ \
  779. void *__cu_to; \
  780. const void __user *__cu_from; \
  781. long __cu_len; \
  782. \
  783. __cu_to = (to); \
  784. __cu_from = (from); \
  785. __cu_len = (n); \
  786. might_fault(); \
  787. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  788. __cu_len); \
  789. __cu_len; \
  790. })
  791. /*
  792. * copy_from_user: - Copy a block of data from user space.
  793. * @to: Destination address, in kernel space.
  794. * @from: Source address, in user space.
  795. * @n: Number of bytes to copy.
  796. *
  797. * Context: User context only. This function may sleep.
  798. *
  799. * Copy data from user space to kernel space.
  800. *
  801. * Returns number of bytes that could not be copied.
  802. * On success, this will be zero.
  803. *
  804. * If some data could not be copied, this function will pad the copied
  805. * data to the requested size using zero bytes.
  806. */
  807. #define copy_from_user(to, from, n) \
  808. ({ \
  809. void *__cu_to; \
  810. const void __user *__cu_from; \
  811. long __cu_len; \
  812. \
  813. __cu_to = (to); \
  814. __cu_from = (from); \
  815. __cu_len = (n); \
  816. if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
  817. might_fault(); \
  818. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  819. __cu_len); \
  820. } \
  821. __cu_len; \
  822. })
  823. #define __copy_in_user(to, from, n) \
  824. ({ \
  825. void __user *__cu_to; \
  826. const void __user *__cu_from; \
  827. long __cu_len; \
  828. \
  829. __cu_to = (to); \
  830. __cu_from = (from); \
  831. __cu_len = (n); \
  832. might_fault(); \
  833. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  834. __cu_len); \
  835. __cu_len; \
  836. })
  837. #define copy_in_user(to, from, n) \
  838. ({ \
  839. void __user *__cu_to; \
  840. const void __user *__cu_from; \
  841. long __cu_len; \
  842. \
  843. __cu_to = (to); \
  844. __cu_from = (from); \
  845. __cu_len = (n); \
  846. if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \
  847. access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \
  848. might_fault(); \
  849. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  850. __cu_len); \
  851. } \
  852. __cu_len; \
  853. })
  854. /*
  855. * __clear_user: - Zero a block of memory in user space, with less checking.
  856. * @to: Destination address, in user space.
  857. * @n: Number of bytes to zero.
  858. *
  859. * Zero a block of memory in user space. Caller must check
  860. * the specified block with access_ok() before calling this function.
  861. *
  862. * Returns number of bytes that could not be cleared.
  863. * On success, this will be zero.
  864. */
  865. static inline __kernel_size_t
  866. __clear_user(void __user *addr, __kernel_size_t size)
  867. {
  868. __kernel_size_t res;
  869. might_fault();
  870. __asm__ __volatile__(
  871. "move\t$4, %1\n\t"
  872. "move\t$5, $0\n\t"
  873. "move\t$6, %2\n\t"
  874. __MODULE_JAL(__bzero)
  875. "move\t%0, $6"
  876. : "=r" (res)
  877. : "r" (addr), "r" (size)
  878. : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
  879. return res;
  880. }
  881. #define clear_user(addr,n) \
  882. ({ \
  883. void __user * __cl_addr = (addr); \
  884. unsigned long __cl_size = (n); \
  885. if (__cl_size && access_ok(VERIFY_WRITE, \
  886. __cl_addr, __cl_size)) \
  887. __cl_size = __clear_user(__cl_addr, __cl_size); \
  888. __cl_size; \
  889. })
  890. /*
  891. * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
  892. * @dst: Destination address, in kernel space. This buffer must be at
  893. * least @count bytes long.
  894. * @src: Source address, in user space.
  895. * @count: Maximum number of bytes to copy, including the trailing NUL.
  896. *
  897. * Copies a NUL-terminated string from userspace to kernel space.
  898. * Caller must check the specified block with access_ok() before calling
  899. * this function.
  900. *
  901. * On success, returns the length of the string (not including the trailing
  902. * NUL).
  903. *
  904. * If access to userspace fails, returns -EFAULT (some data may have been
  905. * copied).
  906. *
  907. * If @count is smaller than the length of the string, copies @count bytes
  908. * and returns @count.
  909. */
  910. static inline long
  911. __strncpy_from_user(char *__to, const char __user *__from, long __len)
  912. {
  913. long res;
  914. might_fault();
  915. __asm__ __volatile__(
  916. "move\t$4, %1\n\t"
  917. "move\t$5, %2\n\t"
  918. "move\t$6, %3\n\t"
  919. __MODULE_JAL(__strncpy_from_user_nocheck_asm)
  920. "move\t%0, $2"
  921. : "=r" (res)
  922. : "r" (__to), "r" (__from), "r" (__len)
  923. : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
  924. return res;
  925. }
  926. /*
  927. * strncpy_from_user: - Copy a NUL terminated string from userspace.
  928. * @dst: Destination address, in kernel space. This buffer must be at
  929. * least @count bytes long.
  930. * @src: Source address, in user space.
  931. * @count: Maximum number of bytes to copy, including the trailing NUL.
  932. *
  933. * Copies a NUL-terminated string from userspace to kernel space.
  934. *
  935. * On success, returns the length of the string (not including the trailing
  936. * NUL).
  937. *
  938. * If access to userspace fails, returns -EFAULT (some data may have been
  939. * copied).
  940. *
  941. * If @count is smaller than the length of the string, copies @count bytes
  942. * and returns @count.
  943. */
  944. static inline long
  945. strncpy_from_user(char *__to, const char __user *__from, long __len)
  946. {
  947. long res;
  948. might_fault();
  949. __asm__ __volatile__(
  950. "move\t$4, %1\n\t"
  951. "move\t$5, %2\n\t"
  952. "move\t$6, %3\n\t"
  953. __MODULE_JAL(__strncpy_from_user_asm)
  954. "move\t%0, $2"
  955. : "=r" (res)
  956. : "r" (__to), "r" (__from), "r" (__len)
  957. : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
  958. return res;
  959. }
  960. /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
  961. static inline long __strlen_user(const char __user *s)
  962. {
  963. long res;
  964. might_fault();
  965. __asm__ __volatile__(
  966. "move\t$4, %1\n\t"
  967. __MODULE_JAL(__strlen_user_nocheck_asm)
  968. "move\t%0, $2"
  969. : "=r" (res)
  970. : "r" (s)
  971. : "$2", "$4", __UA_t0, "$31");
  972. return res;
  973. }
  974. /*
  975. * strlen_user: - Get the size of a string in user space.
  976. * @str: The string to measure.
  977. *
  978. * Context: User context only. This function may sleep.
  979. *
  980. * Get the size of a NUL-terminated string in user space.
  981. *
  982. * Returns the size of the string INCLUDING the terminating NUL.
  983. * On exception, returns 0.
  984. *
  985. * If there is a limit on the length of a valid string, you may wish to
  986. * consider using strnlen_user() instead.
  987. */
  988. static inline long strlen_user(const char __user *s)
  989. {
  990. long res;
  991. might_fault();
  992. __asm__ __volatile__(
  993. "move\t$4, %1\n\t"
  994. __MODULE_JAL(__strlen_user_asm)
  995. "move\t%0, $2"
  996. : "=r" (res)
  997. : "r" (s)
  998. : "$2", "$4", __UA_t0, "$31");
  999. return res;
  1000. }
  1001. /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
  1002. static inline long __strnlen_user(const char __user *s, long n)
  1003. {
  1004. long res;
  1005. might_fault();
  1006. __asm__ __volatile__(
  1007. "move\t$4, %1\n\t"
  1008. "move\t$5, %2\n\t"
  1009. __MODULE_JAL(__strnlen_user_nocheck_asm)
  1010. "move\t%0, $2"
  1011. : "=r" (res)
  1012. : "r" (s), "r" (n)
  1013. : "$2", "$4", "$5", __UA_t0, "$31");
  1014. return res;
  1015. }
  1016. /*
  1017. * strlen_user: - Get the size of a string in user space.
  1018. * @str: The string to measure.
  1019. *
  1020. * Context: User context only. This function may sleep.
  1021. *
  1022. * Get the size of a NUL-terminated string in user space.
  1023. *
  1024. * Returns the size of the string INCLUDING the terminating NUL.
  1025. * On exception, returns 0.
  1026. *
  1027. * If there is a limit on the length of a valid string, you may wish to
  1028. * consider using strnlen_user() instead.
  1029. */
  1030. static inline long strnlen_user(const char __user *s, long n)
  1031. {
  1032. long res;
  1033. might_fault();
  1034. __asm__ __volatile__(
  1035. "move\t$4, %1\n\t"
  1036. "move\t$5, %2\n\t"
  1037. __MODULE_JAL(__strnlen_user_asm)
  1038. "move\t%0, $2"
  1039. : "=r" (res)
  1040. : "r" (s), "r" (n)
  1041. : "$2", "$4", "$5", __UA_t0, "$31");
  1042. return res;
  1043. }
  1044. struct exception_table_entry
  1045. {
  1046. unsigned long insn;
  1047. unsigned long nextinsn;
  1048. };
  1049. extern int fixup_exception(struct pt_regs *regs);
  1050. #endif /* _ASM_UACCESS_H */