unaligned.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203
  1. /* unaligned.h: unaligned access handler
  2. *
  3. * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #ifndef _ASM_UNALIGNED_H
  12. #define _ASM_UNALIGNED_H
  13. #include <linux/config.h>
  14. /*
  15. * Unaligned accesses on uClinux can't be performed in a fault handler - the
  16. * CPU detects them as imprecise exceptions making this impossible.
  17. *
  18. * With the FR451, however, they are precise, and so we used to fix them up in
  19. * the memory access fault handler. However, instruction bundling make this
  20. * impractical. So, now we fall back to using memcpy.
  21. */
  22. #ifdef CONFIG_MMU
  23. /*
  24. * The asm statement in the macros below is a way to get GCC to copy a
  25. * value from one variable to another without having any clue it's
  26. * actually doing so, so that it won't have any idea that the values
  27. * in the two variables are related.
  28. */
  29. #define get_unaligned(ptr) ({ \
  30. typeof((*(ptr))) __x; \
  31. void *__ptrcopy; \
  32. asm("" : "=r" (__ptrcopy) : "0" (ptr)); \
  33. memcpy(&__x, __ptrcopy, sizeof(*(ptr))); \
  34. __x; \
  35. })
  36. #define put_unaligned(val, ptr) ({ \
  37. typeof((*(ptr))) __x = (val); \
  38. void *__ptrcopy; \
  39. asm("" : "=r" (__ptrcopy) : "0" (ptr)); \
  40. memcpy(__ptrcopy, &__x, sizeof(*(ptr))); \
  41. })
  42. extern int handle_misalignment(unsigned long esr0, unsigned long ear0, unsigned long epcr0);
  43. #else
  44. #define get_unaligned(ptr) \
  45. ({ \
  46. typeof(*(ptr)) x; \
  47. const char *__p = (const char *) (ptr); \
  48. \
  49. switch (sizeof(x)) { \
  50. case 1: \
  51. x = *(ptr); \
  52. break; \
  53. case 2: \
  54. { \
  55. uint8_t a; \
  56. asm(" ldub%I2 %M2,%0 \n" \
  57. " ldub%I3.p %M3,%1 \n" \
  58. " slli %0,#8,%0 \n" \
  59. " or %0,%1,%0 \n" \
  60. : "=&r"(x), "=&r"(a) \
  61. : "m"(__p[0]), "m"(__p[1]) \
  62. ); \
  63. break; \
  64. } \
  65. \
  66. case 4: \
  67. { \
  68. uint8_t a; \
  69. asm(" ldub%I2 %M2,%0 \n" \
  70. " ldub%I3.p %M3,%1 \n" \
  71. " slli %0,#8,%0 \n" \
  72. " or %0,%1,%0 \n" \
  73. " ldub%I4.p %M4,%1 \n" \
  74. " slli %0,#8,%0 \n" \
  75. " or %0,%1,%0 \n" \
  76. " ldub%I5.p %M5,%1 \n" \
  77. " slli %0,#8,%0 \n" \
  78. " or %0,%1,%0 \n" \
  79. : "=&r"(x), "=&r"(a) \
  80. : "m"(__p[0]), "m"(__p[1]), "m"(__p[2]), "m"(__p[3]) \
  81. ); \
  82. break; \
  83. } \
  84. \
  85. case 8: \
  86. { \
  87. union { uint64_t x; u32 y[2]; } z; \
  88. uint8_t a; \
  89. asm(" ldub%I3 %M3,%0 \n" \
  90. " ldub%I4.p %M4,%2 \n" \
  91. " slli %0,#8,%0 \n" \
  92. " or %0,%2,%0 \n" \
  93. " ldub%I5.p %M5,%2 \n" \
  94. " slli %0,#8,%0 \n" \
  95. " or %0,%2,%0 \n" \
  96. " ldub%I6.p %M6,%2 \n" \
  97. " slli %0,#8,%0 \n" \
  98. " or %0,%2,%0 \n" \
  99. " ldub%I7 %M7,%1 \n" \
  100. " ldub%I8.p %M8,%2 \n" \
  101. " slli %1,#8,%1 \n" \
  102. " or %1,%2,%1 \n" \
  103. " ldub%I9.p %M9,%2 \n" \
  104. " slli %1,#8,%1 \n" \
  105. " or %1,%2,%1 \n" \
  106. " ldub%I10.p %M10,%2 \n" \
  107. " slli %1,#8,%1 \n" \
  108. " or %1,%2,%1 \n" \
  109. : "=&r"(z.y[0]), "=&r"(z.y[1]), "=&r"(a) \
  110. : "m"(__p[0]), "m"(__p[1]), "m"(__p[2]), "m"(__p[3]), \
  111. "m"(__p[4]), "m"(__p[5]), "m"(__p[6]), "m"(__p[7]) \
  112. ); \
  113. x = z.x; \
  114. break; \
  115. } \
  116. \
  117. default: \
  118. x = 0; \
  119. BUG(); \
  120. break; \
  121. } \
  122. \
  123. x; \
  124. })
  125. #define put_unaligned(val, ptr) \
  126. do { \
  127. char *__p = (char *) (ptr); \
  128. int x; \
  129. \
  130. switch (sizeof(*ptr)) { \
  131. case 2: \
  132. { \
  133. asm(" stb%I1.p %0,%M1 \n" \
  134. " srli %0,#8,%0 \n" \
  135. " stb%I2 %0,%M2 \n" \
  136. : "=r"(x), "=m"(__p[1]), "=m"(__p[0]) \
  137. : "0"(val) \
  138. ); \
  139. break; \
  140. } \
  141. \
  142. case 4: \
  143. { \
  144. asm(" stb%I1.p %0,%M1 \n" \
  145. " srli %0,#8,%0 \n" \
  146. " stb%I2.p %0,%M2 \n" \
  147. " srli %0,#8,%0 \n" \
  148. " stb%I3.p %0,%M3 \n" \
  149. " srli %0,#8,%0 \n" \
  150. " stb%I4 %0,%M4 \n" \
  151. : "=r"(x), "=m"(__p[3]), "=m"(__p[2]), "=m"(__p[1]), "=m"(__p[0]) \
  152. : "0"(val) \
  153. ); \
  154. break; \
  155. } \
  156. \
  157. case 8: \
  158. { \
  159. uint32_t __high, __low; \
  160. __high = (uint64_t)val >> 32; \
  161. __low = val & 0xffffffff; \
  162. asm(" stb%I2.p %0,%M2 \n" \
  163. " srli %0,#8,%0 \n" \
  164. " stb%I3.p %0,%M3 \n" \
  165. " srli %0,#8,%0 \n" \
  166. " stb%I4.p %0,%M4 \n" \
  167. " srli %0,#8,%0 \n" \
  168. " stb%I5.p %0,%M5 \n" \
  169. " srli %0,#8,%0 \n" \
  170. " stb%I6.p %1,%M6 \n" \
  171. " srli %1,#8,%1 \n" \
  172. " stb%I7.p %1,%M7 \n" \
  173. " srli %1,#8,%1 \n" \
  174. " stb%I8.p %1,%M8 \n" \
  175. " srli %1,#8,%1 \n" \
  176. " stb%I9 %1,%M9 \n" \
  177. : "=&r"(__low), "=&r"(__high), "=m"(__p[7]), "=m"(__p[6]), \
  178. "=m"(__p[5]), "=m"(__p[4]), "=m"(__p[3]), "=m"(__p[2]), \
  179. "=m"(__p[1]), "=m"(__p[0]) \
  180. : "0"(__low), "1"(__high) \
  181. ); \
  182. break; \
  183. } \
  184. \
  185. default: \
  186. *(ptr) = (val); \
  187. break; \
  188. } \
  189. } while(0)
  190. #endif
  191. #endif