2 * Userland implementation of gettimeofday() for 32 bits processes in a
3 * ppc64 kernel for use in the vDSO
5 * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org,
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 #include <asm/processor.h>
14 #include <asm/ppc_asm.h>
16 #include <asm/asm-offsets.h>
17 #include <asm/unistd.h>
21 * Exact prototype of gettimeofday
23 * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz);
26 V_FUNCTION_BEGIN(__kernel_gettimeofday)
31 mr r10,r3 /* r10 saves tv */
32 mr r11,r4 /* r11 saves tz */
33 bl __get_datapage@local /* get data page */
34 mr r9, r3 /* datapage ptr in r9 */
35 cmplwi r10,0 /* check if tv is NULL */
37 bl __do_get_xsec@local /* get xsec from tb & kernel */
38 bne- 2f /* out of line -> do syscall */
40 /* seconds are xsec >> 20 */
43 stw r5,TVAL32_TV_SEC(r10)
45 /* get remaining xsec and convert to usec. we scale
46 * up remaining xsec by 12 bits and get the top 32 bits
47 * of the multiplication
53 stw r5,TVAL32_TV_USEC(r10)
55 3: cmplwi r11,0 /* check if tz is NULL */
57 lwz r4,CFG_TZ_MINUTEWEST(r9)/* fill tz */
58 lwz r5,CFG_TZ_DSTTIME(r9)
59 stw r4,TZONE_TZ_MINWEST(r11)
60 stw r5,TZONE_TZ_DSTTIME(r11)
71 li r0,__NR_gettimeofday
75 V_FUNCTION_END(__kernel_gettimeofday)
78 * Exact prototype of clock_gettime()
80 * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp);
83 V_FUNCTION_BEGIN(__kernel_clock_gettime)
85 /* Check for supported clock IDs */
86 cmpli cr0,r3,CLOCK_REALTIME
87 cmpli cr1,r3,CLOCK_MONOTONIC
88 cror cr0*4+eq,cr0*4+eq,cr1*4+eq
91 mflr r12 /* r12 saves lr */
93 mr r10,r3 /* r10 saves id */
94 mr r11,r4 /* r11 saves tp */
95 bl __get_datapage@local /* get data page */
96 mr r9,r3 /* datapage ptr in r9 */
97 beq cr1,50f /* if monotonic -> jump there */
103 bl __do_get_xsec@local /* get xsec from tb & kernel */
104 bne- 98f /* out of line -> do syscall */
106 /* seconds are xsec >> 20 */
107 rlwinm r5,r4,12,20,31
109 stw r5,TSPC32_TV_SEC(r11)
111 /* get remaining xsec and convert to nsec. we scale
112 * up remaining xsec by 12 bits and get the top 32 bits
113 * of the multiplication, then we multiply by 1000
120 stw r5,TSPC32_TV_NSEC(r11)
130 50: bl __do_get_xsec@local /* get xsec from tb & kernel */
131 bne- 98f /* out of line -> do syscall */
133 /* seconds are xsec >> 20 */
134 rlwinm r6,r4,12,20,31
137 /* get remaining xsec and convert to nsec. we scale
138 * up remaining xsec by 12 bits and get the top 32 bits
139 * of the multiplication, then we multiply by 1000
147 /* now we must fixup using wall to monotonic. We need to snapshot
148 * that value and do the counter trick again. Fortunately, we still
149 * have the counter value in r8 that was returned by __do_get_xsec.
150 * At this point, r6,r7 contain our sec/nsec values, r3,r4 and r5
154 lwz r3,WTOM_CLOCK_SEC(r9)
155 lwz r4,WTOM_CLOCK_NSEC(r9)
157 /* We now have our result in r3,r4. We create a fake dependency
158 * on that result and re-check the counter
164 lwz r0,(CFG_TB_UPDATE_COUNT+4)(r9)
166 lwz r0,(CFG_TB_UPDATE_COUNT)(r9)
168 cmpl cr0,r8,r0 /* check if updated */
171 /* Calculate and store result. Note that this mimmics the C code,
172 * which may cause funny results if nsec goes negative... is that
177 lis r5,NSEC_PER_SEC@h
178 ori r5,r5,NSEC_PER_SEC@l
187 1: stw r3,TSPC32_TV_SEC(r11)
188 stw r4,TSPC32_TV_NSEC(r11)
203 li r0,__NR_clock_gettime
207 V_FUNCTION_END(__kernel_clock_gettime)
211 * Exact prototype of clock_getres()
213 * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res);
216 V_FUNCTION_BEGIN(__kernel_clock_getres)
218 /* Check for supported clock IDs */
219 cmpwi cr0,r3,CLOCK_REALTIME
220 cmpwi cr1,r3,CLOCK_MONOTONIC
221 cror cr0*4+eq,cr0*4+eq,cr1*4+eq
228 lis r5,CLOCK_REALTIME_RES@h
229 ori r5,r5,CLOCK_REALTIME_RES@l
230 stw r3,TSPC32_TV_SEC(r4)
231 stw r5,TSPC32_TV_NSEC(r4)
238 li r0,__NR_clock_getres
242 V_FUNCTION_END(__kernel_clock_getres)
246 * This is the core of gettimeofday() & friends, it returns the xsec
247 * value in r3 & r4 and expects the datapage ptr (non clobbered)
248 * in r9. clobbers r0,r4,r5,r6,r7,r8.
249 * When returning, r8 contains the counter value that can be reused
250 * by the monotonic clock implementation
254 /* Check for update count & load values. We use the low
255 * order 32 bits of the update count
258 1: lwz r8,(CFG_TB_UPDATE_COUNT+4)(r9)
260 1: lwz r8,(CFG_TB_UPDATE_COUNT)(r9)
262 andi. r0,r8,1 /* pending update ? loop */
264 xor r0,r8,r8 /* create dependency */
267 /* Load orig stamp (offset to TB) */
268 lwz r5,CFG_TB_ORIG_STAMP(r9)
269 lwz r6,(CFG_TB_ORIG_STAMP+4)(r9)
271 /* Get a stable TB value */
278 /* Substract tb orig stamp. If the high part is non-zero, we jump to
279 * the slow path which call the syscall.
280 * If it's ok, then we have our 32 bits tb_ticks value in r7
286 /* Load scale factor & do multiplication */
287 lwz r5,CFG_TB_TO_XS(r9) /* load values */
288 lwz r6,(CFG_TB_TO_XS+4)(r9)
294 /* At this point, we have the scaled xsec value in r4 + XER:CA
295 * we load & add the stamp since epoch
297 lwz r5,CFG_STAMP_XSEC(r9)
298 lwz r6,(CFG_STAMP_XSEC+4)(r9)
302 /* We now have our result in r3,r4. We create a fake dependency
303 * on that result and re-check the counter
309 lwz r0,(CFG_TB_UPDATE_COUNT+4)(r9)
311 lwz r0,(CFG_TB_UPDATE_COUNT)(r9)
313 cmpl cr0,r8,r0 /* check if updated */
316 /* Warning ! The caller expects CR:EQ to be set to indicate a
317 * successful calculation (so it won't fallback to the syscall
318 * method). We have overriden that CR bit in the counter check,
319 * but fortunately, the loop exit condition _is_ CR:EQ set, so
320 * we can exit safely here. If you change this code, be careful
321 * of that side effect.