uaccess.h 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
  7. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8. * Copyright (C) 2007 Maciej W. Rozycki
  9. */
  10. #ifndef _ASM_UACCESS_H
  11. #define _ASM_UACCESS_H
  12. #include <linux/kernel.h>
  13. #include <linux/errno.h>
  14. #include <linux/thread_info.h>
  15. /*
  16. * The fs value determines whether argument validity checking should be
  17. * performed or not. If get_fs() == USER_DS, checking is performed, with
  18. * get_fs() == KERNEL_DS, checking is bypassed.
  19. *
  20. * For historical reasons, these macros are grossly misnamed.
  21. */
  22. #ifdef CONFIG_32BIT
  23. #define __UA_LIMIT 0x80000000UL
  24. #define __UA_ADDR ".word"
  25. #define __UA_LA "la"
  26. #define __UA_ADDU "addu"
  27. #define __UA_t0 "$8"
  28. #define __UA_t1 "$9"
  29. #endif /* CONFIG_32BIT */
  30. #ifdef CONFIG_64BIT
  31. #define __UA_LIMIT (- TASK_SIZE)
  32. #define __UA_ADDR ".dword"
  33. #define __UA_LA "dla"
  34. #define __UA_ADDU "daddu"
  35. #define __UA_t0 "$12"
  36. #define __UA_t1 "$13"
  37. #endif /* CONFIG_64BIT */
  38. /*
  39. * USER_DS is a bitmask that has the bits set that may not be set in a valid
  40. * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but
  41. * the arithmetic we're doing only works if the limit is a power of two, so
  42. * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid
  43. * address in this range it's the process's problem, not ours :-)
  44. */
  45. #define KERNEL_DS ((mm_segment_t) { 0UL })
  46. #define USER_DS ((mm_segment_t) { __UA_LIMIT })
  47. #define VERIFY_READ 0
  48. #define VERIFY_WRITE 1
  49. #define get_ds() (KERNEL_DS)
  50. #define get_fs() (current_thread_info()->addr_limit)
  51. #define set_fs(x) (current_thread_info()->addr_limit = (x))
  52. #define segment_eq(a, b) ((a).seg == (b).seg)
  53. /*
  54. * Is a address valid? This does a straighforward calculation rather
  55. * than tests.
  56. *
  57. * Address valid if:
  58. * - "addr" doesn't have any high-bits set
  59. * - AND "size" doesn't have any high-bits set
  60. * - AND "addr+size" doesn't have any high-bits set
  61. * - OR we are in kernel mode.
  62. *
  63. * __ua_size() is a trick to avoid runtime checking of positive constant
  64. * sizes; for those we already know at compile time that the size is ok.
  65. */
  66. #define __ua_size(size) \
  67. ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
  68. /*
  69. * access_ok: - Checks if a user space pointer is valid
  70. * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
  71. * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
  72. * to write to a block, it is always safe to read from it.
  73. * @addr: User space pointer to start of block to check
  74. * @size: Size of block to check
  75. *
  76. * Context: User context only. This function may sleep.
  77. *
  78. * Checks if a pointer to a block of memory in user space is valid.
  79. *
  80. * Returns true (nonzero) if the memory block may be valid, false (zero)
  81. * if it is definitely invalid.
  82. *
  83. * Note that, depending on architecture, this function probably just
  84. * checks that the pointer is in the user space range - after calling
  85. * this function, memory access functions may still return -EFAULT.
  86. */
  87. #define __access_mask get_fs().seg
  88. #define __access_ok(addr, size, mask) \
  89. ({ \
  90. unsigned long __addr = (unsigned long) (addr); \
  91. unsigned long __size = size; \
  92. unsigned long __mask = mask; \
  93. unsigned long __ok; \
  94. \
  95. __chk_user_ptr(addr); \
  96. __ok = (signed long)(__mask & (__addr | (__addr + __size) | \
  97. __ua_size(__size))); \
  98. __ok == 0; \
  99. })
  100. #define access_ok(type, addr, size) \
  101. likely(__access_ok((addr), (size), __access_mask))
  102. /*
  103. * put_user: - Write a simple value into user space.
  104. * @x: Value to copy to user space.
  105. * @ptr: Destination address, in user space.
  106. *
  107. * Context: User context only. This function may sleep.
  108. *
  109. * This macro copies a single simple value from kernel space to user
  110. * space. It supports simple types like char and int, but not larger
  111. * data types like structures or arrays.
  112. *
  113. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  114. * to the result of dereferencing @ptr.
  115. *
  116. * Returns zero on success, or -EFAULT on error.
  117. */
  118. #define put_user(x,ptr) \
  119. __put_user_check((x), (ptr), sizeof(*(ptr)))
  120. /*
  121. * get_user: - Get a simple variable from user space.
  122. * @x: Variable to store result.
  123. * @ptr: Source address, in user space.
  124. *
  125. * Context: User context only. This function may sleep.
  126. *
  127. * This macro copies a single simple variable from user space to kernel
  128. * space. It supports simple types like char and int, but not larger
  129. * data types like structures or arrays.
  130. *
  131. * @ptr must have pointer-to-simple-variable type, and the result of
  132. * dereferencing @ptr must be assignable to @x without a cast.
  133. *
  134. * Returns zero on success, or -EFAULT on error.
  135. * On error, the variable @x is set to zero.
  136. */
  137. #define get_user(x,ptr) \
  138. __get_user_check((x), (ptr), sizeof(*(ptr)))
  139. /*
  140. * __put_user: - Write a simple value into user space, with less checking.
  141. * @x: Value to copy to user space.
  142. * @ptr: Destination address, in user space.
  143. *
  144. * Context: User context only. This function may sleep.
  145. *
  146. * This macro copies a single simple value from kernel space to user
  147. * space. It supports simple types like char and int, but not larger
  148. * data types like structures or arrays.
  149. *
  150. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  151. * to the result of dereferencing @ptr.
  152. *
  153. * Caller must check the pointer with access_ok() before calling this
  154. * function.
  155. *
  156. * Returns zero on success, or -EFAULT on error.
  157. */
  158. #define __put_user(x,ptr) \
  159. __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
  160. /*
  161. * __get_user: - Get a simple variable from user space, with less checking.
  162. * @x: Variable to store result.
  163. * @ptr: Source address, in user space.
  164. *
  165. * Context: User context only. This function may sleep.
  166. *
  167. * This macro copies a single simple variable from user space to kernel
  168. * space. It supports simple types like char and int, but not larger
  169. * data types like structures or arrays.
  170. *
  171. * @ptr must have pointer-to-simple-variable type, and the result of
  172. * dereferencing @ptr must be assignable to @x without a cast.
  173. *
  174. * Caller must check the pointer with access_ok() before calling this
  175. * function.
  176. *
  177. * Returns zero on success, or -EFAULT on error.
  178. * On error, the variable @x is set to zero.
  179. */
  180. #define __get_user(x,ptr) \
  181. __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  182. struct __large_struct { unsigned long buf[100]; };
  183. #define __m(x) (*(struct __large_struct __user *)(x))
  184. /*
  185. * Yuck. We need two variants, one for 64bit operation and one
  186. * for 32 bit mode and old iron.
  187. */
  188. #ifdef CONFIG_32BIT
  189. #define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr)
  190. #endif
  191. #ifdef CONFIG_64BIT
  192. #define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr)
  193. #endif
  194. extern void __get_user_unknown(void);
  195. #define __get_user_common(val, size, ptr) \
  196. do { \
  197. switch (size) { \
  198. case 1: __get_user_asm(val, "lb", ptr); break; \
  199. case 2: __get_user_asm(val, "lh", ptr); break; \
  200. case 4: __get_user_asm(val, "lw", ptr); break; \
  201. case 8: __GET_USER_DW(val, ptr); break; \
  202. default: __get_user_unknown(); break; \
  203. } \
  204. } while (0)
  205. #define __get_user_nocheck(x, ptr, size) \
  206. ({ \
  207. int __gu_err; \
  208. \
  209. __chk_user_ptr(ptr); \
  210. __get_user_common((x), size, ptr); \
  211. __gu_err; \
  212. })
  213. #define __get_user_check(x, ptr, size) \
  214. ({ \
  215. int __gu_err = -EFAULT; \
  216. const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
  217. \
  218. if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
  219. __get_user_common((x), size, __gu_ptr); \
  220. \
  221. __gu_err; \
  222. })
  223. #define __get_user_asm(val, insn, addr) \
  224. { \
  225. long __gu_tmp; \
  226. \
  227. __asm__ __volatile__( \
  228. "1: " insn " %1, %3 \n" \
  229. "2: \n" \
  230. " .section .fixup,\"ax\" \n" \
  231. "3: li %0, %4 \n" \
  232. " j 2b \n" \
  233. " .previous \n" \
  234. " .section __ex_table,\"a\" \n" \
  235. " "__UA_ADDR "\t1b, 3b \n" \
  236. " .previous \n" \
  237. : "=r" (__gu_err), "=r" (__gu_tmp) \
  238. : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
  239. \
  240. (val) = (__typeof__(*(addr))) __gu_tmp; \
  241. }
  242. /*
  243. * Get a long long 64 using 32 bit registers.
  244. */
  245. #define __get_user_asm_ll32(val, addr) \
  246. { \
  247. union { \
  248. unsigned long long l; \
  249. __typeof__(*(addr)) t; \
  250. } __gu_tmp; \
  251. \
  252. __asm__ __volatile__( \
  253. "1: lw %1, (%3) \n" \
  254. "2: lw %D1, 4(%3) \n" \
  255. "3: .section .fixup,\"ax\" \n" \
  256. "4: li %0, %4 \n" \
  257. " move %1, $0 \n" \
  258. " move %D1, $0 \n" \
  259. " j 3b \n" \
  260. " .previous \n" \
  261. " .section __ex_table,\"a\" \n" \
  262. " " __UA_ADDR " 1b, 4b \n" \
  263. " " __UA_ADDR " 2b, 4b \n" \
  264. " .previous \n" \
  265. : "=r" (__gu_err), "=&r" (__gu_tmp.l) \
  266. : "0" (0), "r" (addr), "i" (-EFAULT)); \
  267. \
  268. (val) = __gu_tmp.t; \
  269. }
  270. /*
  271. * Yuck. We need two variants, one for 64bit operation and one
  272. * for 32 bit mode and old iron.
  273. */
  274. #ifdef CONFIG_32BIT
  275. #define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr)
  276. #endif
  277. #ifdef CONFIG_64BIT
  278. #define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr)
  279. #endif
  280. #define __put_user_nocheck(x, ptr, size) \
  281. ({ \
  282. __typeof__(*(ptr)) __pu_val; \
  283. int __pu_err = 0; \
  284. \
  285. __chk_user_ptr(ptr); \
  286. __pu_val = (x); \
  287. switch (size) { \
  288. case 1: __put_user_asm("sb", ptr); break; \
  289. case 2: __put_user_asm("sh", ptr); break; \
  290. case 4: __put_user_asm("sw", ptr); break; \
  291. case 8: __PUT_USER_DW(ptr); break; \
  292. default: __put_user_unknown(); break; \
  293. } \
  294. __pu_err; \
  295. })
  296. #define __put_user_check(x, ptr, size) \
  297. ({ \
  298. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  299. __typeof__(*(ptr)) __pu_val = (x); \
  300. int __pu_err = -EFAULT; \
  301. \
  302. if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
  303. switch (size) { \
  304. case 1: __put_user_asm("sb", __pu_addr); break; \
  305. case 2: __put_user_asm("sh", __pu_addr); break; \
  306. case 4: __put_user_asm("sw", __pu_addr); break; \
  307. case 8: __PUT_USER_DW(__pu_addr); break; \
  308. default: __put_user_unknown(); break; \
  309. } \
  310. } \
  311. __pu_err; \
  312. })
  313. #define __put_user_asm(insn, ptr) \
  314. { \
  315. __asm__ __volatile__( \
  316. "1: " insn " %z2, %3 # __put_user_asm\n" \
  317. "2: \n" \
  318. " .section .fixup,\"ax\" \n" \
  319. "3: li %0, %4 \n" \
  320. " j 2b \n" \
  321. " .previous \n" \
  322. " .section __ex_table,\"a\" \n" \
  323. " " __UA_ADDR " 1b, 3b \n" \
  324. " .previous \n" \
  325. : "=r" (__pu_err) \
  326. : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
  327. "i" (-EFAULT)); \
  328. }
  329. #define __put_user_asm_ll32(ptr) \
  330. { \
  331. __asm__ __volatile__( \
  332. "1: sw %2, (%3) # __put_user_asm_ll32 \n" \
  333. "2: sw %D2, 4(%3) \n" \
  334. "3: \n" \
  335. " .section .fixup,\"ax\" \n" \
  336. "4: li %0, %4 \n" \
  337. " j 3b \n" \
  338. " .previous \n" \
  339. " .section __ex_table,\"a\" \n" \
  340. " " __UA_ADDR " 1b, 4b \n" \
  341. " " __UA_ADDR " 2b, 4b \n" \
  342. " .previous" \
  343. : "=r" (__pu_err) \
  344. : "0" (0), "r" (__pu_val), "r" (ptr), \
  345. "i" (-EFAULT)); \
  346. }
  347. extern void __put_user_unknown(void);
  348. /*
  349. * put_user_unaligned: - Write a simple value into user space.
  350. * @x: Value to copy to user space.
  351. * @ptr: Destination address, in user space.
  352. *
  353. * Context: User context only. This function may sleep.
  354. *
  355. * This macro copies a single simple value from kernel space to user
  356. * space. It supports simple types like char and int, but not larger
  357. * data types like structures or arrays.
  358. *
  359. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  360. * to the result of dereferencing @ptr.
  361. *
  362. * Returns zero on success, or -EFAULT on error.
  363. */
  364. #define put_user_unaligned(x,ptr) \
  365. __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
  366. /*
  367. * get_user_unaligned: - Get a simple variable from user space.
  368. * @x: Variable to store result.
  369. * @ptr: Source address, in user space.
  370. *
  371. * Context: User context only. This function may sleep.
  372. *
  373. * This macro copies a single simple variable from user space to kernel
  374. * space. It supports simple types like char and int, but not larger
  375. * data types like structures or arrays.
  376. *
  377. * @ptr must have pointer-to-simple-variable type, and the result of
  378. * dereferencing @ptr must be assignable to @x without a cast.
  379. *
  380. * Returns zero on success, or -EFAULT on error.
  381. * On error, the variable @x is set to zero.
  382. */
  383. #define get_user_unaligned(x,ptr) \
  384. __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
  385. /*
  386. * __put_user_unaligned: - Write a simple value into user space, with less checking.
  387. * @x: Value to copy to user space.
  388. * @ptr: Destination address, in user space.
  389. *
  390. * Context: User context only. This function may sleep.
  391. *
  392. * This macro copies a single simple value from kernel space to user
  393. * space. It supports simple types like char and int, but not larger
  394. * data types like structures or arrays.
  395. *
  396. * @ptr must have pointer-to-simple-variable type, and @x must be assignable
  397. * to the result of dereferencing @ptr.
  398. *
  399. * Caller must check the pointer with access_ok() before calling this
  400. * function.
  401. *
  402. * Returns zero on success, or -EFAULT on error.
  403. */
  404. #define __put_user_unaligned(x,ptr) \
  405. __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
  406. /*
  407. * __get_user_unaligned: - Get a simple variable from user space, with less checking.
  408. * @x: Variable to store result.
  409. * @ptr: Source address, in user space.
  410. *
  411. * Context: User context only. This function may sleep.
  412. *
  413. * This macro copies a single simple variable from user space to kernel
  414. * space. It supports simple types like char and int, but not larger
  415. * data types like structures or arrays.
  416. *
  417. * @ptr must have pointer-to-simple-variable type, and the result of
  418. * dereferencing @ptr must be assignable to @x without a cast.
  419. *
  420. * Caller must check the pointer with access_ok() before calling this
  421. * function.
  422. *
  423. * Returns zero on success, or -EFAULT on error.
  424. * On error, the variable @x is set to zero.
  425. */
  426. #define __get_user_unaligned(x,ptr) \
  427. __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
  428. /*
  429. * Yuck. We need two variants, one for 64bit operation and one
  430. * for 32 bit mode and old iron.
  431. */
  432. #ifdef CONFIG_32BIT
  433. #define __GET_USER_UNALIGNED_DW(val, ptr) \
  434. __get_user_unaligned_asm_ll32(val, ptr)
  435. #endif
  436. #ifdef CONFIG_64BIT
  437. #define __GET_USER_UNALIGNED_DW(val, ptr) \
  438. __get_user_unaligned_asm(val, "uld", ptr)
  439. #endif
  440. extern void __get_user_unaligned_unknown(void);
  441. #define __get_user_unaligned_common(val, size, ptr) \
  442. do { \
  443. switch (size) { \
  444. case 1: __get_user_asm(val, "lb", ptr); break; \
  445. case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \
  446. case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \
  447. case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
  448. default: __get_user_unaligned_unknown(); break; \
  449. } \
  450. } while (0)
  451. #define __get_user_unaligned_nocheck(x,ptr,size) \
  452. ({ \
  453. int __gu_err; \
  454. \
  455. __get_user_unaligned_common((x), size, ptr); \
  456. __gu_err; \
  457. })
  458. #define __get_user_unaligned_check(x,ptr,size) \
  459. ({ \
  460. int __gu_err = -EFAULT; \
  461. const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
  462. \
  463. if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
  464. __get_user_unaligned_common((x), size, __gu_ptr); \
  465. \
  466. __gu_err; \
  467. })
  468. #define __get_user_unaligned_asm(val, insn, addr) \
  469. { \
  470. long __gu_tmp; \
  471. \
  472. __asm__ __volatile__( \
  473. "1: " insn " %1, %3 \n" \
  474. "2: \n" \
  475. " .section .fixup,\"ax\" \n" \
  476. "3: li %0, %4 \n" \
  477. " j 2b \n" \
  478. " .previous \n" \
  479. " .section __ex_table,\"a\" \n" \
  480. " "__UA_ADDR "\t1b, 3b \n" \
  481. " "__UA_ADDR "\t1b + 4, 3b \n" \
  482. " .previous \n" \
  483. : "=r" (__gu_err), "=r" (__gu_tmp) \
  484. : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
  485. \
  486. (val) = (__typeof__(*(addr))) __gu_tmp; \
  487. }
  488. /*
  489. * Get a long long 64 using 32 bit registers.
  490. */
  491. #define __get_user_unaligned_asm_ll32(val, addr) \
  492. { \
  493. unsigned long long __gu_tmp; \
  494. \
  495. __asm__ __volatile__( \
  496. "1: ulw %1, (%3) \n" \
  497. "2: ulw %D1, 4(%3) \n" \
  498. " move %0, $0 \n" \
  499. "3: .section .fixup,\"ax\" \n" \
  500. "4: li %0, %4 \n" \
  501. " move %1, $0 \n" \
  502. " move %D1, $0 \n" \
  503. " j 3b \n" \
  504. " .previous \n" \
  505. " .section __ex_table,\"a\" \n" \
  506. " " __UA_ADDR " 1b, 4b \n" \
  507. " " __UA_ADDR " 1b + 4, 4b \n" \
  508. " " __UA_ADDR " 2b, 4b \n" \
  509. " " __UA_ADDR " 2b + 4, 4b \n" \
  510. " .previous \n" \
  511. : "=r" (__gu_err), "=&r" (__gu_tmp) \
  512. : "0" (0), "r" (addr), "i" (-EFAULT)); \
  513. (val) = (__typeof__(*(addr))) __gu_tmp; \
  514. }
  515. /*
  516. * Yuck. We need two variants, one for 64bit operation and one
  517. * for 32 bit mode and old iron.
  518. */
  519. #ifdef CONFIG_32BIT
  520. #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
  521. #endif
  522. #ifdef CONFIG_64BIT
  523. #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
  524. #endif
  525. #define __put_user_unaligned_nocheck(x,ptr,size) \
  526. ({ \
  527. __typeof__(*(ptr)) __pu_val; \
  528. int __pu_err = 0; \
  529. \
  530. __pu_val = (x); \
  531. switch (size) { \
  532. case 1: __put_user_asm("sb", ptr); break; \
  533. case 2: __put_user_unaligned_asm("ush", ptr); break; \
  534. case 4: __put_user_unaligned_asm("usw", ptr); break; \
  535. case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
  536. default: __put_user_unaligned_unknown(); break; \
  537. } \
  538. __pu_err; \
  539. })
  540. #define __put_user_unaligned_check(x,ptr,size) \
  541. ({ \
  542. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  543. __typeof__(*(ptr)) __pu_val = (x); \
  544. int __pu_err = -EFAULT; \
  545. \
  546. if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
  547. switch (size) { \
  548. case 1: __put_user_asm("sb", __pu_addr); break; \
  549. case 2: __put_user_unaligned_asm("ush", __pu_addr); break; \
  550. case 4: __put_user_unaligned_asm("usw", __pu_addr); break; \
  551. case 8: __PUT_USER_UNALGINED_DW(__pu_addr); break; \
  552. default: __put_user_unaligned_unknown(); break; \
  553. } \
  554. } \
  555. __pu_err; \
  556. })
  557. #define __put_user_unaligned_asm(insn, ptr) \
  558. { \
  559. __asm__ __volatile__( \
  560. "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \
  561. "2: \n" \
  562. " .section .fixup,\"ax\" \n" \
  563. "3: li %0, %4 \n" \
  564. " j 2b \n" \
  565. " .previous \n" \
  566. " .section __ex_table,\"a\" \n" \
  567. " " __UA_ADDR " 1b, 3b \n" \
  568. " .previous \n" \
  569. : "=r" (__pu_err) \
  570. : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
  571. "i" (-EFAULT)); \
  572. }
  573. #define __put_user_unaligned_asm_ll32(ptr) \
  574. { \
  575. __asm__ __volatile__( \
  576. "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
  577. "2: sw %D2, 4(%3) \n" \
  578. "3: \n" \
  579. " .section .fixup,\"ax\" \n" \
  580. "4: li %0, %4 \n" \
  581. " j 3b \n" \
  582. " .previous \n" \
  583. " .section __ex_table,\"a\" \n" \
  584. " " __UA_ADDR " 1b, 4b \n" \
  585. " " __UA_ADDR " 1b + 4, 4b \n" \
  586. " " __UA_ADDR " 2b, 4b \n" \
  587. " " __UA_ADDR " 2b + 4, 4b \n" \
  588. " .previous" \
  589. : "=r" (__pu_err) \
  590. : "0" (0), "r" (__pu_val), "r" (ptr), \
  591. "i" (-EFAULT)); \
  592. }
  593. extern void __put_user_unaligned_unknown(void);
  594. /*
  595. * We're generating jump to subroutines which will be outside the range of
  596. * jump instructions
  597. */
  598. #ifdef MODULE
  599. #define __MODULE_JAL(destination) \
  600. ".set\tnoat\n\t" \
  601. __UA_LA "\t$1, " #destination "\n\t" \
  602. "jalr\t$1\n\t" \
  603. ".set\tat\n\t"
  604. #else
  605. #define __MODULE_JAL(destination) \
  606. "jal\t" #destination "\n\t"
  607. #endif
  608. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  609. #define DADDI_SCRATCH "$0"
  610. #else
  611. #define DADDI_SCRATCH "$3"
  612. #endif
  613. extern size_t __copy_user(void *__to, const void *__from, size_t __n);
  614. #define __invoke_copy_to_user(to, from, n) \
  615. ({ \
  616. register void __user *__cu_to_r __asm__("$4"); \
  617. register const void *__cu_from_r __asm__("$5"); \
  618. register long __cu_len_r __asm__("$6"); \
  619. \
  620. __cu_to_r = (to); \
  621. __cu_from_r = (from); \
  622. __cu_len_r = (n); \
  623. __asm__ __volatile__( \
  624. __MODULE_JAL(__copy_user) \
  625. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  626. : \
  627. : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
  628. DADDI_SCRATCH, "memory"); \
  629. __cu_len_r; \
  630. })
  631. /*
  632. * __copy_to_user: - Copy a block of data into user space, with less checking.
  633. * @to: Destination address, in user space.
  634. * @from: Source address, in kernel space.
  635. * @n: Number of bytes to copy.
  636. *
  637. * Context: User context only. This function may sleep.
  638. *
  639. * Copy data from kernel space to user space. Caller must check
  640. * the specified block with access_ok() before calling this function.
  641. *
  642. * Returns number of bytes that could not be copied.
  643. * On success, this will be zero.
  644. */
  645. #define __copy_to_user(to, from, n) \
  646. ({ \
  647. void __user *__cu_to; \
  648. const void *__cu_from; \
  649. long __cu_len; \
  650. \
  651. might_sleep(); \
  652. __cu_to = (to); \
  653. __cu_from = (from); \
  654. __cu_len = (n); \
  655. __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
  656. __cu_len; \
  657. })
  658. extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
  659. #define __copy_to_user_inatomic(to, from, n) \
  660. ({ \
  661. void __user *__cu_to; \
  662. const void *__cu_from; \
  663. long __cu_len; \
  664. \
  665. __cu_to = (to); \
  666. __cu_from = (from); \
  667. __cu_len = (n); \
  668. __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
  669. __cu_len; \
  670. })
  671. #define __copy_from_user_inatomic(to, from, n) \
  672. ({ \
  673. void *__cu_to; \
  674. const void __user *__cu_from; \
  675. long __cu_len; \
  676. \
  677. __cu_to = (to); \
  678. __cu_from = (from); \
  679. __cu_len = (n); \
  680. __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
  681. __cu_len); \
  682. __cu_len; \
  683. })
  684. /*
  685. * copy_to_user: - Copy a block of data into user space.
  686. * @to: Destination address, in user space.
  687. * @from: Source address, in kernel space.
  688. * @n: Number of bytes to copy.
  689. *
  690. * Context: User context only. This function may sleep.
  691. *
  692. * Copy data from kernel space to user space.
  693. *
  694. * Returns number of bytes that could not be copied.
  695. * On success, this will be zero.
  696. */
  697. #define copy_to_user(to, from, n) \
  698. ({ \
  699. void __user *__cu_to; \
  700. const void *__cu_from; \
  701. long __cu_len; \
  702. \
  703. might_sleep(); \
  704. __cu_to = (to); \
  705. __cu_from = (from); \
  706. __cu_len = (n); \
  707. if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) \
  708. __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
  709. __cu_len); \
  710. __cu_len; \
  711. })
  712. #define __invoke_copy_from_user(to, from, n) \
  713. ({ \
  714. register void *__cu_to_r __asm__("$4"); \
  715. register const void __user *__cu_from_r __asm__("$5"); \
  716. register long __cu_len_r __asm__("$6"); \
  717. \
  718. __cu_to_r = (to); \
  719. __cu_from_r = (from); \
  720. __cu_len_r = (n); \
  721. __asm__ __volatile__( \
  722. ".set\tnoreorder\n\t" \
  723. __MODULE_JAL(__copy_user) \
  724. ".set\tnoat\n\t" \
  725. __UA_ADDU "\t$1, %1, %2\n\t" \
  726. ".set\tat\n\t" \
  727. ".set\treorder" \
  728. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  729. : \
  730. : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
  731. DADDI_SCRATCH, "memory"); \
  732. __cu_len_r; \
  733. })
  734. #define __invoke_copy_from_user_inatomic(to, from, n) \
  735. ({ \
  736. register void *__cu_to_r __asm__("$4"); \
  737. register const void __user *__cu_from_r __asm__("$5"); \
  738. register long __cu_len_r __asm__("$6"); \
  739. \
  740. __cu_to_r = (to); \
  741. __cu_from_r = (from); \
  742. __cu_len_r = (n); \
  743. __asm__ __volatile__( \
  744. ".set\tnoreorder\n\t" \
  745. __MODULE_JAL(__copy_user_inatomic) \
  746. ".set\tnoat\n\t" \
  747. __UA_ADDU "\t$1, %1, %2\n\t" \
  748. ".set\tat\n\t" \
  749. ".set\treorder" \
  750. : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
  751. : \
  752. : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
  753. DADDI_SCRATCH, "memory"); \
  754. __cu_len_r; \
  755. })
  756. /*
  757. * __copy_from_user: - Copy a block of data from user space, with less checking.
  758. * @to: Destination address, in kernel space.
  759. * @from: Source address, in user space.
  760. * @n: Number of bytes to copy.
  761. *
  762. * Context: User context only. This function may sleep.
  763. *
  764. * Copy data from user space to kernel space. Caller must check
  765. * the specified block with access_ok() before calling this function.
  766. *
  767. * Returns number of bytes that could not be copied.
  768. * On success, this will be zero.
  769. *
  770. * If some data could not be copied, this function will pad the copied
  771. * data to the requested size using zero bytes.
  772. */
  773. #define __copy_from_user(to, from, n) \
  774. ({ \
  775. void *__cu_to; \
  776. const void __user *__cu_from; \
  777. long __cu_len; \
  778. \
  779. might_sleep(); \
  780. __cu_to = (to); \
  781. __cu_from = (from); \
  782. __cu_len = (n); \
  783. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  784. __cu_len); \
  785. __cu_len; \
  786. })
  787. /*
  788. * copy_from_user: - Copy a block of data from user space.
  789. * @to: Destination address, in kernel space.
  790. * @from: Source address, in user space.
  791. * @n: Number of bytes to copy.
  792. *
  793. * Context: User context only. This function may sleep.
  794. *
  795. * Copy data from user space to kernel space.
  796. *
  797. * Returns number of bytes that could not be copied.
  798. * On success, this will be zero.
  799. *
  800. * If some data could not be copied, this function will pad the copied
  801. * data to the requested size using zero bytes.
  802. */
  803. #define copy_from_user(to, from, n) \
  804. ({ \
  805. void *__cu_to; \
  806. const void __user *__cu_from; \
  807. long __cu_len; \
  808. \
  809. might_sleep(); \
  810. __cu_to = (to); \
  811. __cu_from = (from); \
  812. __cu_len = (n); \
  813. if (access_ok(VERIFY_READ, __cu_from, __cu_len)) \
  814. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  815. __cu_len); \
  816. __cu_len; \
  817. })
  818. #define __copy_in_user(to, from, n) \
  819. ({ \
  820. void __user *__cu_to; \
  821. const void __user *__cu_from; \
  822. long __cu_len; \
  823. \
  824. might_sleep(); \
  825. __cu_to = (to); \
  826. __cu_from = (from); \
  827. __cu_len = (n); \
  828. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  829. __cu_len); \
  830. __cu_len; \
  831. })
  832. #define copy_in_user(to, from, n) \
  833. ({ \
  834. void __user *__cu_to; \
  835. const void __user *__cu_from; \
  836. long __cu_len; \
  837. \
  838. might_sleep(); \
  839. __cu_to = (to); \
  840. __cu_from = (from); \
  841. __cu_len = (n); \
  842. if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \
  843. access_ok(VERIFY_WRITE, __cu_to, __cu_len))) \
  844. __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
  845. __cu_len); \
  846. __cu_len; \
  847. })
  848. /*
  849. * __clear_user: - Zero a block of memory in user space, with less checking.
  850. * @to: Destination address, in user space.
  851. * @n: Number of bytes to zero.
  852. *
  853. * Zero a block of memory in user space. Caller must check
  854. * the specified block with access_ok() before calling this function.
  855. *
  856. * Returns number of bytes that could not be cleared.
  857. * On success, this will be zero.
  858. */
  859. static inline __kernel_size_t
  860. __clear_user(void __user *addr, __kernel_size_t size)
  861. {
  862. __kernel_size_t res;
  863. might_sleep();
  864. __asm__ __volatile__(
  865. "move\t$4, %1\n\t"
  866. "move\t$5, $0\n\t"
  867. "move\t$6, %2\n\t"
  868. __MODULE_JAL(__bzero)
  869. "move\t%0, $6"
  870. : "=r" (res)
  871. : "r" (addr), "r" (size)
  872. : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
  873. return res;
  874. }
  875. #define clear_user(addr,n) \
  876. ({ \
  877. void __user * __cl_addr = (addr); \
  878. unsigned long __cl_size = (n); \
  879. if (__cl_size && access_ok(VERIFY_WRITE, \
  880. ((unsigned long)(__cl_addr)), __cl_size)) \
  881. __cl_size = __clear_user(__cl_addr, __cl_size); \
  882. __cl_size; \
  883. })
  884. /*
  885. * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
  886. * @dst: Destination address, in kernel space. This buffer must be at
  887. * least @count bytes long.
  888. * @src: Source address, in user space.
  889. * @count: Maximum number of bytes to copy, including the trailing NUL.
  890. *
  891. * Copies a NUL-terminated string from userspace to kernel space.
  892. * Caller must check the specified block with access_ok() before calling
  893. * this function.
  894. *
  895. * On success, returns the length of the string (not including the trailing
  896. * NUL).
  897. *
  898. * If access to userspace fails, returns -EFAULT (some data may have been
  899. * copied).
  900. *
  901. * If @count is smaller than the length of the string, copies @count bytes
  902. * and returns @count.
  903. */
  904. static inline long
  905. __strncpy_from_user(char *__to, const char __user *__from, long __len)
  906. {
  907. long res;
  908. might_sleep();
  909. __asm__ __volatile__(
  910. "move\t$4, %1\n\t"
  911. "move\t$5, %2\n\t"
  912. "move\t$6, %3\n\t"
  913. __MODULE_JAL(__strncpy_from_user_nocheck_asm)
  914. "move\t%0, $2"
  915. : "=r" (res)
  916. : "r" (__to), "r" (__from), "r" (__len)
  917. : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
  918. return res;
  919. }
  920. /*
  921. * strncpy_from_user: - Copy a NUL terminated string from userspace.
  922. * @dst: Destination address, in kernel space. This buffer must be at
  923. * least @count bytes long.
  924. * @src: Source address, in user space.
  925. * @count: Maximum number of bytes to copy, including the trailing NUL.
  926. *
  927. * Copies a NUL-terminated string from userspace to kernel space.
  928. *
  929. * On success, returns the length of the string (not including the trailing
  930. * NUL).
  931. *
  932. * If access to userspace fails, returns -EFAULT (some data may have been
  933. * copied).
  934. *
  935. * If @count is smaller than the length of the string, copies @count bytes
  936. * and returns @count.
  937. */
  938. static inline long
  939. strncpy_from_user(char *__to, const char __user *__from, long __len)
  940. {
  941. long res;
  942. might_sleep();
  943. __asm__ __volatile__(
  944. "move\t$4, %1\n\t"
  945. "move\t$5, %2\n\t"
  946. "move\t$6, %3\n\t"
  947. __MODULE_JAL(__strncpy_from_user_asm)
  948. "move\t%0, $2"
  949. : "=r" (res)
  950. : "r" (__to), "r" (__from), "r" (__len)
  951. : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
  952. return res;
  953. }
  954. /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
  955. static inline long __strlen_user(const char __user *s)
  956. {
  957. long res;
  958. might_sleep();
  959. __asm__ __volatile__(
  960. "move\t$4, %1\n\t"
  961. __MODULE_JAL(__strlen_user_nocheck_asm)
  962. "move\t%0, $2"
  963. : "=r" (res)
  964. : "r" (s)
  965. : "$2", "$4", __UA_t0, "$31");
  966. return res;
  967. }
  968. /*
  969. * strlen_user: - Get the size of a string in user space.
  970. * @str: The string to measure.
  971. *
  972. * Context: User context only. This function may sleep.
  973. *
  974. * Get the size of a NUL-terminated string in user space.
  975. *
  976. * Returns the size of the string INCLUDING the terminating NUL.
  977. * On exception, returns 0.
  978. *
  979. * If there is a limit on the length of a valid string, you may wish to
  980. * consider using strnlen_user() instead.
  981. */
  982. static inline long strlen_user(const char __user *s)
  983. {
  984. long res;
  985. might_sleep();
  986. __asm__ __volatile__(
  987. "move\t$4, %1\n\t"
  988. __MODULE_JAL(__strlen_user_asm)
  989. "move\t%0, $2"
  990. : "=r" (res)
  991. : "r" (s)
  992. : "$2", "$4", __UA_t0, "$31");
  993. return res;
  994. }
  995. /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
  996. static inline long __strnlen_user(const char __user *s, long n)
  997. {
  998. long res;
  999. might_sleep();
  1000. __asm__ __volatile__(
  1001. "move\t$4, %1\n\t"
  1002. "move\t$5, %2\n\t"
  1003. __MODULE_JAL(__strnlen_user_nocheck_asm)
  1004. "move\t%0, $2"
  1005. : "=r" (res)
  1006. : "r" (s), "r" (n)
  1007. : "$2", "$4", "$5", __UA_t0, "$31");
  1008. return res;
  1009. }
  1010. /*
  1011. * strlen_user: - Get the size of a string in user space.
  1012. * @str: The string to measure.
  1013. *
  1014. * Context: User context only. This function may sleep.
  1015. *
  1016. * Get the size of a NUL-terminated string in user space.
  1017. *
  1018. * Returns the size of the string INCLUDING the terminating NUL.
  1019. * On exception, returns 0.
  1020. *
  1021. * If there is a limit on the length of a valid string, you may wish to
  1022. * consider using strnlen_user() instead.
  1023. */
  1024. static inline long strnlen_user(const char __user *s, long n)
  1025. {
  1026. long res;
  1027. might_sleep();
  1028. __asm__ __volatile__(
  1029. "move\t$4, %1\n\t"
  1030. "move\t$5, %2\n\t"
  1031. __MODULE_JAL(__strnlen_user_asm)
  1032. "move\t%0, $2"
  1033. : "=r" (res)
  1034. : "r" (s), "r" (n)
  1035. : "$2", "$4", "$5", __UA_t0, "$31");
  1036. return res;
  1037. }
  1038. struct exception_table_entry
  1039. {
  1040. unsigned long insn;
  1041. unsigned long nextinsn;
  1042. };
  1043. extern int fixup_exception(struct pt_regs *regs);
  1044. #endif /* _ASM_UACCESS_H */