unaligned.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. /* unaligned.h: unaligned access handler
  2. *
  3. * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #ifndef _ASM_UNALIGNED_H
  12. #define _ASM_UNALIGNED_H
  13. /*
  14. * Unaligned accesses on uClinux can't be performed in a fault handler - the
  15. * CPU detects them as imprecise exceptions making this impossible.
  16. *
  17. * With the FR451, however, they are precise, and so we used to fix them up in
  18. * the memory access fault handler. However, instruction bundling make this
  19. * impractical. So, now we fall back to using memcpy.
  20. */
  21. #ifdef CONFIG_MMU
  22. /*
  23. * The asm statement in the macros below is a way to get GCC to copy a
  24. * value from one variable to another without having any clue it's
  25. * actually doing so, so that it won't have any idea that the values
  26. * in the two variables are related.
  27. */
  28. #define get_unaligned(ptr) ({ \
  29. typeof((*(ptr))) __x; \
  30. void *__ptrcopy; \
  31. asm("" : "=r" (__ptrcopy) : "0" (ptr)); \
  32. memcpy(&__x, __ptrcopy, sizeof(*(ptr))); \
  33. __x; \
  34. })
  35. #define put_unaligned(val, ptr) ({ \
  36. typeof((*(ptr))) __x = (val); \
  37. void *__ptrcopy; \
  38. asm("" : "=r" (__ptrcopy) : "0" (ptr)); \
  39. memcpy(__ptrcopy, &__x, sizeof(*(ptr))); \
  40. })
  41. extern int handle_misalignment(unsigned long esr0, unsigned long ear0, unsigned long epcr0);
  42. #else
  43. #define get_unaligned(ptr) \
  44. ({ \
  45. typeof(*(ptr)) x; \
  46. const char *__p = (const char *) (ptr); \
  47. \
  48. switch (sizeof(x)) { \
  49. case 1: \
  50. x = *(ptr); \
  51. break; \
  52. case 2: \
  53. { \
  54. uint8_t a; \
  55. asm(" ldub%I2 %M2,%0 \n" \
  56. " ldub%I3.p %M3,%1 \n" \
  57. " slli %0,#8,%0 \n" \
  58. " or %0,%1,%0 \n" \
  59. : "=&r"(x), "=&r"(a) \
  60. : "m"(__p[0]), "m"(__p[1]) \
  61. ); \
  62. break; \
  63. } \
  64. \
  65. case 4: \
  66. { \
  67. uint8_t a; \
  68. asm(" ldub%I2 %M2,%0 \n" \
  69. " ldub%I3.p %M3,%1 \n" \
  70. " slli %0,#8,%0 \n" \
  71. " or %0,%1,%0 \n" \
  72. " ldub%I4.p %M4,%1 \n" \
  73. " slli %0,#8,%0 \n" \
  74. " or %0,%1,%0 \n" \
  75. " ldub%I5.p %M5,%1 \n" \
  76. " slli %0,#8,%0 \n" \
  77. " or %0,%1,%0 \n" \
  78. : "=&r"(x), "=&r"(a) \
  79. : "m"(__p[0]), "m"(__p[1]), "m"(__p[2]), "m"(__p[3]) \
  80. ); \
  81. break; \
  82. } \
  83. \
  84. case 8: \
  85. { \
  86. union { uint64_t x; u32 y[2]; } z; \
  87. uint8_t a; \
  88. asm(" ldub%I3 %M3,%0 \n" \
  89. " ldub%I4.p %M4,%2 \n" \
  90. " slli %0,#8,%0 \n" \
  91. " or %0,%2,%0 \n" \
  92. " ldub%I5.p %M5,%2 \n" \
  93. " slli %0,#8,%0 \n" \
  94. " or %0,%2,%0 \n" \
  95. " ldub%I6.p %M6,%2 \n" \
  96. " slli %0,#8,%0 \n" \
  97. " or %0,%2,%0 \n" \
  98. " ldub%I7 %M7,%1 \n" \
  99. " ldub%I8.p %M8,%2 \n" \
  100. " slli %1,#8,%1 \n" \
  101. " or %1,%2,%1 \n" \
  102. " ldub%I9.p %M9,%2 \n" \
  103. " slli %1,#8,%1 \n" \
  104. " or %1,%2,%1 \n" \
  105. " ldub%I10.p %M10,%2 \n" \
  106. " slli %1,#8,%1 \n" \
  107. " or %1,%2,%1 \n" \
  108. : "=&r"(z.y[0]), "=&r"(z.y[1]), "=&r"(a) \
  109. : "m"(__p[0]), "m"(__p[1]), "m"(__p[2]), "m"(__p[3]), \
  110. "m"(__p[4]), "m"(__p[5]), "m"(__p[6]), "m"(__p[7]) \
  111. ); \
  112. x = z.x; \
  113. break; \
  114. } \
  115. \
  116. default: \
  117. x = 0; \
  118. BUG(); \
  119. break; \
  120. } \
  121. \
  122. x; \
  123. })
  124. #define put_unaligned(val, ptr) \
  125. do { \
  126. char *__p = (char *) (ptr); \
  127. int x; \
  128. \
  129. switch (sizeof(*ptr)) { \
  130. case 2: \
  131. { \
  132. asm(" stb%I1.p %0,%M1 \n" \
  133. " srli %0,#8,%0 \n" \
  134. " stb%I2 %0,%M2 \n" \
  135. : "=r"(x), "=m"(__p[1]), "=m"(__p[0]) \
  136. : "0"(val) \
  137. ); \
  138. break; \
  139. } \
  140. \
  141. case 4: \
  142. { \
  143. asm(" stb%I1.p %0,%M1 \n" \
  144. " srli %0,#8,%0 \n" \
  145. " stb%I2.p %0,%M2 \n" \
  146. " srli %0,#8,%0 \n" \
  147. " stb%I3.p %0,%M3 \n" \
  148. " srli %0,#8,%0 \n" \
  149. " stb%I4 %0,%M4 \n" \
  150. : "=r"(x), "=m"(__p[3]), "=m"(__p[2]), "=m"(__p[1]), "=m"(__p[0]) \
  151. : "0"(val) \
  152. ); \
  153. break; \
  154. } \
  155. \
  156. case 8: \
  157. { \
  158. uint32_t __high, __low; \
  159. __high = (uint64_t)val >> 32; \
  160. __low = val & 0xffffffff; \
  161. asm(" stb%I2.p %0,%M2 \n" \
  162. " srli %0,#8,%0 \n" \
  163. " stb%I3.p %0,%M3 \n" \
  164. " srli %0,#8,%0 \n" \
  165. " stb%I4.p %0,%M4 \n" \
  166. " srli %0,#8,%0 \n" \
  167. " stb%I5.p %0,%M5 \n" \
  168. " srli %0,#8,%0 \n" \
  169. " stb%I6.p %1,%M6 \n" \
  170. " srli %1,#8,%1 \n" \
  171. " stb%I7.p %1,%M7 \n" \
  172. " srli %1,#8,%1 \n" \
  173. " stb%I8.p %1,%M8 \n" \
  174. " srli %1,#8,%1 \n" \
  175. " stb%I9 %1,%M9 \n" \
  176. : "=&r"(__low), "=&r"(__high), "=m"(__p[7]), "=m"(__p[6]), \
  177. "=m"(__p[5]), "=m"(__p[4]), "=m"(__p[3]), "=m"(__p[2]), \
  178. "=m"(__p[1]), "=m"(__p[0]) \
  179. : "0"(__low), "1"(__high) \
  180. ); \
  181. break; \
  182. } \
  183. \
  184. default: \
  185. *(ptr) = (val); \
  186. break; \
  187. } \
  188. } while(0)
  189. #endif
  190. #endif