gettimeofday.S 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366
  1. /*
  2. * Userland implementation of gettimeofday() for 32 bits processes in a
  3. * ppc64 kernel for use in the vDSO
  4. *
  5. * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org,
  6. * IBM Corp.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. */
  13. #include <asm/processor.h>
  14. #include <asm/ppc_asm.h>
  15. #include <asm/vdso.h>
  16. #include <asm/asm-offsets.h>
  17. #include <asm/unistd.h>
  18. /* Offset for the low 32-bit part of a field of long type */
  19. #ifdef CONFIG_PPC64
  20. #define LOPART 4
  21. #else
  22. #define LOPART 0
  23. #endif
  24. .text
  25. /*
  26. * Exact prototype of gettimeofday
  27. *
  28. * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz);
  29. *
  30. */
  31. V_FUNCTION_BEGIN(__kernel_gettimeofday)
  32. .cfi_startproc
  33. mflr r12
  34. .cfi_register lr,r12
  35. mr r10,r3 /* r10 saves tv */
  36. mr r11,r4 /* r11 saves tz */
  37. bl __get_datapage@local /* get data page */
  38. mr r9, r3 /* datapage ptr in r9 */
  39. cmplwi r10,0 /* check if tv is NULL */
  40. beq 3f
  41. bl __do_get_xsec@local /* get xsec from tb & kernel */
  42. bne- 2f /* out of line -> do syscall */
  43. /* seconds are xsec >> 20 */
  44. rlwinm r5,r4,12,20,31
  45. rlwimi r5,r3,12,0,19
  46. stw r5,TVAL32_TV_SEC(r10)
  47. /* get remaining xsec and convert to usec. we scale
  48. * up remaining xsec by 12 bits and get the top 32 bits
  49. * of the multiplication
  50. */
  51. rlwinm r5,r4,12,0,19
  52. lis r6,1000000@h
  53. ori r6,r6,1000000@l
  54. mulhwu r5,r5,r6
  55. stw r5,TVAL32_TV_USEC(r10)
  56. 3: cmplwi r11,0 /* check if tz is NULL */
  57. beq 1f
  58. lwz r4,CFG_TZ_MINUTEWEST(r9)/* fill tz */
  59. lwz r5,CFG_TZ_DSTTIME(r9)
  60. stw r4,TZONE_TZ_MINWEST(r11)
  61. stw r5,TZONE_TZ_DSTTIME(r11)
  62. 1: mtlr r12
  63. crclr cr0*4+so
  64. li r3,0
  65. blr
  66. 2:
  67. mtlr r12
  68. mr r3,r10
  69. mr r4,r11
  70. li r0,__NR_gettimeofday
  71. sc
  72. blr
  73. .cfi_endproc
  74. V_FUNCTION_END(__kernel_gettimeofday)
  75. /*
  76. * Exact prototype of clock_gettime()
  77. *
  78. * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp);
  79. *
  80. */
  81. V_FUNCTION_BEGIN(__kernel_clock_gettime)
  82. .cfi_startproc
  83. /* Check for supported clock IDs */
  84. cmpli cr0,r3,CLOCK_REALTIME
  85. cmpli cr1,r3,CLOCK_MONOTONIC
  86. cror cr0*4+eq,cr0*4+eq,cr1*4+eq
  87. bne cr0,99f
  88. mflr r12 /* r12 saves lr */
  89. .cfi_register lr,r12
  90. mr r11,r4 /* r11 saves tp */
  91. bl __get_datapage@local /* get data page */
  92. mr r9,r3 /* datapage ptr in r9 */
  93. 50: bl __do_get_tspec@local /* get sec/nsec from tb & kernel */
  94. bne cr1,80f /* not monotonic -> all done */
  95. /*
  96. * CLOCK_MONOTONIC
  97. */
  98. /* now we must fixup using wall to monotonic. We need to snapshot
  99. * that value and do the counter trick again. Fortunately, we still
  100. * have the counter value in r8 that was returned by __do_get_xsec.
  101. * At this point, r3,r4 contain our sec/nsec values, r5 and r6
  102. * can be used, r7 contains NSEC_PER_SEC.
  103. */
  104. lwz r5,WTOM_CLOCK_SEC(r9)
  105. lwz r6,WTOM_CLOCK_NSEC(r9)
  106. /* We now have our offset in r5,r6. We create a fake dependency
  107. * on that value and re-check the counter
  108. */
  109. or r0,r6,r5
  110. xor r0,r0,r0
  111. add r9,r9,r0
  112. lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
  113. cmpl cr0,r8,r0 /* check if updated */
  114. bne- 50b
  115. /* Calculate and store result. Note that this mimics the C code,
  116. * which may cause funny results if nsec goes negative... is that
  117. * possible at all ?
  118. */
  119. add r3,r3,r5
  120. add r4,r4,r6
  121. cmpw cr0,r4,r7
  122. cmpwi cr1,r4,0
  123. blt 1f
  124. subf r4,r7,r4
  125. addi r3,r3,1
  126. 1: bge cr1,80f
  127. addi r3,r3,-1
  128. add r4,r4,r7
  129. 80: stw r3,TSPC32_TV_SEC(r11)
  130. stw r4,TSPC32_TV_NSEC(r11)
  131. mtlr r12
  132. crclr cr0*4+so
  133. li r3,0
  134. blr
  135. /*
  136. * syscall fallback
  137. */
  138. 99:
  139. li r0,__NR_clock_gettime
  140. sc
  141. blr
  142. .cfi_endproc
  143. V_FUNCTION_END(__kernel_clock_gettime)
  144. /*
  145. * Exact prototype of clock_getres()
  146. *
  147. * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res);
  148. *
  149. */
  150. V_FUNCTION_BEGIN(__kernel_clock_getres)
  151. .cfi_startproc
  152. /* Check for supported clock IDs */
  153. cmpwi cr0,r3,CLOCK_REALTIME
  154. cmpwi cr1,r3,CLOCK_MONOTONIC
  155. cror cr0*4+eq,cr0*4+eq,cr1*4+eq
  156. bne cr0,99f
  157. li r3,0
  158. cmpli cr0,r4,0
  159. crclr cr0*4+so
  160. beqlr
  161. lis r5,CLOCK_REALTIME_RES@h
  162. ori r5,r5,CLOCK_REALTIME_RES@l
  163. stw r3,TSPC32_TV_SEC(r4)
  164. stw r5,TSPC32_TV_NSEC(r4)
  165. blr
  166. /*
  167. * syscall fallback
  168. */
  169. 99:
  170. li r0,__NR_clock_getres
  171. sc
  172. blr
  173. .cfi_endproc
  174. V_FUNCTION_END(__kernel_clock_getres)
  175. /*
  176. * This is the core of gettimeofday() & friends, it returns the xsec
  177. * value in r3 & r4 and expects the datapage ptr (non clobbered)
  178. * in r9. clobbers r0,r4,r5,r6,r7,r8.
  179. * When returning, r8 contains the counter value that can be reused
  180. * by the monotonic clock implementation
  181. */
  182. __do_get_xsec:
  183. .cfi_startproc
  184. /* Check for update count & load values. We use the low
  185. * order 32 bits of the update count
  186. */
  187. 1: lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
  188. andi. r0,r8,1 /* pending update ? loop */
  189. bne- 1b
  190. xor r0,r8,r8 /* create dependency */
  191. add r9,r9,r0
  192. /* Load orig stamp (offset to TB) */
  193. lwz r5,CFG_TB_ORIG_STAMP(r9)
  194. lwz r6,(CFG_TB_ORIG_STAMP+4)(r9)
  195. /* Get a stable TB value */
  196. 2: mftbu r3
  197. mftbl r4
  198. mftbu r0
  199. cmpl cr0,r3,r0
  200. bne- 2b
  201. /* Substract tb orig stamp. If the high part is non-zero, we jump to
  202. * the slow path which call the syscall.
  203. * If it's ok, then we have our 32 bits tb_ticks value in r7
  204. */
  205. subfc r7,r6,r4
  206. subfe. r0,r5,r3
  207. bne- 3f
  208. /* Load scale factor & do multiplication */
  209. lwz r5,CFG_TB_TO_XS(r9) /* load values */
  210. lwz r6,(CFG_TB_TO_XS+4)(r9)
  211. mulhwu r4,r7,r5
  212. mulhwu r6,r7,r6
  213. mullw r0,r7,r5
  214. addc r6,r6,r0
  215. /* At this point, we have the scaled xsec value in r4 + XER:CA
  216. * we load & add the stamp since epoch
  217. */
  218. lwz r5,CFG_STAMP_XSEC(r9)
  219. lwz r6,(CFG_STAMP_XSEC+4)(r9)
  220. adde r4,r4,r6
  221. addze r3,r5
  222. /* We now have our result in r3,r4. We create a fake dependency
  223. * on that result and re-check the counter
  224. */
  225. or r6,r4,r3
  226. xor r0,r6,r6
  227. add r9,r9,r0
  228. lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
  229. cmpl cr0,r8,r0 /* check if updated */
  230. bne- 1b
  231. /* Warning ! The caller expects CR:EQ to be set to indicate a
  232. * successful calculation (so it won't fallback to the syscall
  233. * method). We have overriden that CR bit in the counter check,
  234. * but fortunately, the loop exit condition _is_ CR:EQ set, so
  235. * we can exit safely here. If you change this code, be careful
  236. * of that side effect.
  237. */
  238. 3: blr
  239. .cfi_endproc
  240. /*
  241. * This is the core of clock_gettime(), it returns the current
  242. * time in seconds and nanoseconds in r3 and r4.
  243. * It expects the datapage ptr in r9 and doesn't clobber it.
  244. * It clobbers r0, r5, r6, r10 and returns NSEC_PER_SEC in r7.
  245. * On return, r8 contains the counter value that can be reused.
  246. * This clobbers cr0 but not any other cr field.
  247. */
  248. __do_get_tspec:
  249. .cfi_startproc
  250. /* Check for update count & load values. We use the low
  251. * order 32 bits of the update count
  252. */
  253. 1: lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
  254. andi. r0,r8,1 /* pending update ? loop */
  255. bne- 1b
  256. xor r0,r8,r8 /* create dependency */
  257. add r9,r9,r0
  258. /* Load orig stamp (offset to TB) */
  259. lwz r5,CFG_TB_ORIG_STAMP(r9)
  260. lwz r6,(CFG_TB_ORIG_STAMP+4)(r9)
  261. /* Get a stable TB value */
  262. 2: mftbu r3
  263. mftbl r4
  264. mftbu r0
  265. cmpl cr0,r3,r0
  266. bne- 2b
  267. /* Subtract tb orig stamp and shift left 12 bits.
  268. */
  269. subfc r7,r6,r4
  270. subfe r0,r5,r3
  271. slwi r0,r0,12
  272. rlwimi. r0,r7,12,20,31
  273. slwi r7,r7,12
  274. /* Load scale factor & do multiplication */
  275. lwz r5,CFG_TB_TO_XS(r9) /* load values */
  276. lwz r6,(CFG_TB_TO_XS+4)(r9)
  277. mulhwu r3,r7,r6
  278. mullw r10,r7,r5
  279. mulhwu r4,r7,r5
  280. addc r10,r3,r10
  281. li r3,0
  282. beq+ 4f /* skip high part computation if 0 */
  283. mulhwu r3,r0,r5
  284. mullw r7,r0,r5
  285. mulhwu r5,r0,r6
  286. mullw r6,r0,r6
  287. adde r4,r4,r7
  288. addze r3,r3
  289. addc r4,r4,r5
  290. addze r3,r3
  291. addc r10,r10,r6
  292. 4: addze r4,r4 /* add in carry */
  293. lis r7,NSEC_PER_SEC@h
  294. ori r7,r7,NSEC_PER_SEC@l
  295. mulhwu r4,r4,r7 /* convert to nanoseconds */
  296. /* At this point, we have seconds & nanoseconds since the xtime
  297. * stamp in r3+CA and r4. Load & add the xtime stamp.
  298. */
  299. #ifdef CONFIG_PPC64
  300. lwz r5,STAMP_XTIME+TSPC64_TV_SEC+LOPART(r9)
  301. lwz r6,STAMP_XTIME+TSPC64_TV_NSEC+LOPART(r9)
  302. #else
  303. lwz r5,STAMP_XTIME+TSPC32_TV_SEC(r9)
  304. lwz r6,STAMP_XTIME+TSPC32_TV_NSEC(r9)
  305. #endif
  306. add r4,r4,r6
  307. adde r3,r3,r5
  308. /* We now have our result in r3,r4. We create a fake dependency
  309. * on that result and re-check the counter
  310. */
  311. or r6,r4,r3
  312. xor r0,r6,r6
  313. add r9,r9,r0
  314. lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
  315. cmpl cr0,r8,r0 /* check if updated */
  316. bne- 1b
  317. /* check for nanosecond overflow and adjust if necessary */
  318. cmpw r4,r7
  319. bltlr /* all done if no overflow */
  320. subf r4,r7,r4 /* adjust if overflow */
  321. addi r3,r3,1
  322. blr
  323. .cfi_endproc