2 * arch/s390/kernel/vtime.c
3 * Virtual cpu timer based timer functions.
6 * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/time.h>
13 #include <linux/delay.h>
14 #include <linux/init.h>
15 #include <linux/smp.h>
16 #include <linux/types.h>
17 #include <linux/timex.h>
18 #include <linux/notifier.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/rcupdate.h>
21 #include <linux/posix-timers.h>
23 #include <asm/s390_ext.h>
24 #include <asm/timer.h>
25 #include <asm/irq_regs.h>
26 #include <asm/cputime.h>
28 static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
30 DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = {
31 .lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock)
34 static inline __u64 get_vtimer(void)
38 asm volatile("STPT %0" : "=m" (timer));
42 static inline void set_vtimer(__u64 expires)
46 asm volatile (" STPT %0\n" /* Store current cpu timer value */
47 " SPT %1" /* Set new value immediatly afterwards */
48 : "=m" (timer) : "m" (expires) );
49 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
50 S390_lowcore.last_update_timer = expires;
54 * Update process times based on virtual cpu times stored by entry.S
55 * to the lowcore fields user_timer, system_timer & steal_clock.
57 static void do_account_vtime(struct task_struct *tsk, int hardirq_offset)
59 struct thread_info *ti = task_thread_info(tsk);
60 __u64 timer, clock, user, system, steal;
62 timer = S390_lowcore.last_update_timer;
63 clock = S390_lowcore.last_update_clock;
64 asm volatile (" STPT %0\n" /* Store current cpu timer value */
65 " STCK %1" /* Store current tod clock value */
66 : "=m" (S390_lowcore.last_update_timer),
67 "=m" (S390_lowcore.last_update_clock) );
68 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
69 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
71 user = S390_lowcore.user_timer - ti->user_timer;
72 S390_lowcore.steal_timer -= user;
73 ti->user_timer = S390_lowcore.user_timer;
74 account_user_time(tsk, user, user);
76 system = S390_lowcore.system_timer - ti->system_timer;
77 S390_lowcore.steal_timer -= system;
78 ti->system_timer = S390_lowcore.system_timer;
79 account_system_time(tsk, hardirq_offset, system, system);
81 steal = S390_lowcore.steal_timer;
82 if ((s64) steal > 0) {
83 S390_lowcore.steal_timer = 0;
84 account_steal_time(steal);
88 void account_vtime(struct task_struct *prev, struct task_struct *next)
90 struct thread_info *ti;
92 do_account_vtime(prev, 0);
93 ti = task_thread_info(prev);
94 ti->user_timer = S390_lowcore.user_timer;
95 ti->system_timer = S390_lowcore.system_timer;
96 ti = task_thread_info(next);
97 S390_lowcore.user_timer = ti->user_timer;
98 S390_lowcore.system_timer = ti->system_timer;
101 void account_process_tick(struct task_struct *tsk, int user_tick)
103 do_account_vtime(tsk, HARDIRQ_OFFSET);
107 * Update process times based on virtual cpu times stored by entry.S
108 * to the lowcore fields user_timer, system_timer & steal_clock.
110 void account_system_vtime(struct task_struct *tsk)
112 struct thread_info *ti = task_thread_info(tsk);
115 timer = S390_lowcore.last_update_timer;
116 S390_lowcore.last_update_timer = get_vtimer();
117 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
119 system = S390_lowcore.system_timer - ti->system_timer;
120 S390_lowcore.steal_timer -= system;
121 ti->system_timer = S390_lowcore.system_timer;
122 account_system_time(tsk, 0, system, system);
124 EXPORT_SYMBOL_GPL(account_system_vtime);
126 void vtime_start_cpu(void)
128 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
129 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
130 __u64 idle_time, expires;
132 /* Account time spent with enabled wait psw loaded as idle time. */
133 idle_time = S390_lowcore.int_clock - idle->idle_enter;
134 account_idle_time(idle_time);
135 S390_lowcore.steal_timer +=
136 idle->idle_enter - S390_lowcore.last_update_clock;
137 S390_lowcore.last_update_clock = S390_lowcore.int_clock;
139 /* Account system time spent going idle. */
140 S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle;
141 S390_lowcore.last_update_timer = S390_lowcore.async_enter_timer;
143 /* Restart vtime CPU timer */
145 /* Program old expire value but first save progress. */
146 expires = vq->idle - S390_lowcore.async_enter_timer;
147 expires += get_vtimer();
150 /* Don't account the CPU timer delta while the cpu was idle. */
151 vq->elapsed -= vq->idle - S390_lowcore.async_enter_timer;
154 spin_lock(&idle->lock);
155 idle->idle_time += idle_time;
156 idle->idle_enter = 0ULL;
158 spin_unlock(&idle->lock);
161 void vtime_stop_cpu(void)
163 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
164 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
167 /* Wait for external, I/O or machine check interrupt. */
168 psw.mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT;
170 /* Check if the CPU timer needs to be reprogrammed. */
172 __u64 vmax = VTIMER_MAX_SLICE;
174 * The inline assembly is equivalent to
175 * vq->idle = get_cpu_timer();
176 * set_cpu_timer(VTIMER_MAX_SLICE);
177 * idle->idle_enter = get_clock();
178 * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
179 * PSW_MASK_IO | PSW_MASK_EXT);
180 * The difference is that the inline assembly makes sure that
181 * the last three instruction are stpt, stck and lpsw in that
182 * order. This is done to increase the precision.
189 #else /* CONFIG_64BIT */
192 #endif /* CONFIG_64BIT */
198 #else /* CONFIG_64BIT */
200 #endif /* CONFIG_64BIT */
202 : "=m" (idle->idle_enter), "=m" (vq->idle)
203 : "a" (&psw), "a" (&idle->idle_enter),
204 "a" (&vq->idle), "a" (&vmax), "m" (vmax), "m" (psw)
205 : "memory", "cc", "1");
208 * The inline assembly is equivalent to
209 * vq->idle = get_cpu_timer();
210 * idle->idle_enter = get_clock();
211 * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
212 * PSW_MASK_IO | PSW_MASK_EXT);
213 * The difference is that the inline assembly makes sure that
214 * the last three instruction are stpt, stck and lpsw in that
215 * order. This is done to increase the precision.
222 #else /* CONFIG_64BIT */
225 #endif /* CONFIG_64BIT */
230 #else /* CONFIG_64BIT */
232 #endif /* CONFIG_64BIT */
234 : "=m" (idle->idle_enter), "=m" (vq->idle)
235 : "a" (&psw), "a" (&idle->idle_enter),
236 "a" (&vq->idle), "m" (psw)
237 : "memory", "cc", "1");
241 cputime64_t s390_get_idle_time(int cpu)
243 struct s390_idle_data *idle;
244 unsigned long long now, idle_time, idle_enter;
246 idle = &per_cpu(s390_idle, cpu);
247 spin_lock(&idle->lock);
250 idle_enter = idle->idle_enter;
251 if (idle_enter != 0ULL && idle_enter < now)
252 idle_time = now - idle_enter;
253 spin_unlock(&idle->lock);
258 * Sorted add to a list. List is linear searched until first bigger
261 static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
263 struct vtimer_list *event;
265 list_for_each_entry(event, head, entry) {
266 if (event->expires > timer->expires) {
267 list_add_tail(&timer->entry, &event->entry);
271 list_add_tail(&timer->entry, head);
275 * Do the callback functions of expired vtimer events.
276 * Called from within the interrupt handler.
278 static void do_callbacks(struct list_head *cb_list)
280 struct vtimer_queue *vq;
281 struct vtimer_list *event, *tmp;
283 if (list_empty(cb_list))
286 vq = &__get_cpu_var(virt_cpu_timer);
288 list_for_each_entry_safe(event, tmp, cb_list, entry) {
289 list_del_init(&event->entry);
290 (event->function)(event->data);
291 if (event->interval) {
292 /* Recharge interval timer */
293 event->expires = event->interval + vq->elapsed;
294 spin_lock(&vq->lock);
295 list_add_sorted(event, &vq->list);
296 spin_unlock(&vq->lock);
302 * Handler for the virtual CPU timer.
304 static void do_cpu_timer_interrupt(__u16 error_code)
306 struct vtimer_queue *vq;
307 struct vtimer_list *event, *tmp;
308 struct list_head cb_list; /* the callback queue */
311 INIT_LIST_HEAD(&cb_list);
312 vq = &__get_cpu_var(virt_cpu_timer);
314 /* walk timer list, fire all expired events */
315 spin_lock(&vq->lock);
317 elapsed = vq->elapsed + (vq->timer - S390_lowcore.async_enter_timer);
318 BUG_ON((s64) elapsed < 0);
320 list_for_each_entry_safe(event, tmp, &vq->list, entry) {
321 if (event->expires < elapsed)
322 /* move expired timer to the callback queue */
323 list_move_tail(&event->entry, &cb_list);
325 event->expires -= elapsed;
327 spin_unlock(&vq->lock);
329 vq->do_spt = list_empty(&cb_list);
330 do_callbacks(&cb_list);
332 /* next event is first in list */
333 next = VTIMER_MAX_SLICE;
334 spin_lock(&vq->lock);
335 if (!list_empty(&vq->list)) {
336 event = list_first_entry(&vq->list, struct vtimer_list, entry);
337 next = event->expires;
340 spin_unlock(&vq->lock);
342 * To improve precision add the time spent by the
343 * interrupt handler to the elapsed time.
344 * Note: CPU timer counts down and we got an interrupt,
345 * the current content is negative
347 elapsed = S390_lowcore.async_enter_timer - get_vtimer();
348 set_vtimer(next - elapsed);
349 vq->timer = next - elapsed;
350 vq->elapsed = elapsed;
353 void init_virt_timer(struct vtimer_list *timer)
355 timer->function = NULL;
356 INIT_LIST_HEAD(&timer->entry);
358 EXPORT_SYMBOL(init_virt_timer);
360 static inline int vtimer_pending(struct vtimer_list *timer)
362 return (!list_empty(&timer->entry));
366 * this function should only run on the specified CPU
368 static void internal_add_vtimer(struct vtimer_list *timer)
370 struct vtimer_queue *vq;
374 vq = &per_cpu(virt_cpu_timer, timer->cpu);
375 spin_lock_irqsave(&vq->lock, flags);
377 BUG_ON(timer->cpu != smp_processor_id());
379 if (list_empty(&vq->list)) {
380 /* First timer on this cpu, just program it. */
381 list_add(&timer->entry, &vq->list);
382 set_vtimer(timer->expires);
383 vq->timer = timer->expires;
386 /* Check progress of old timers. */
387 expires = timer->expires;
389 if (likely((s64) expires < (s64) left)) {
390 /* The new timer expires before the current timer. */
392 vq->elapsed += vq->timer - left;
395 vq->elapsed += vq->timer - left;
398 /* Insert new timer into per cpu list. */
399 timer->expires += vq->elapsed;
400 list_add_sorted(timer, &vq->list);
403 spin_unlock_irqrestore(&vq->lock, flags);
404 /* release CPU acquired in prepare_vtimer or mod_virt_timer() */
408 static inline void prepare_vtimer(struct vtimer_list *timer)
410 BUG_ON(!timer->function);
411 BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE);
412 BUG_ON(vtimer_pending(timer));
413 timer->cpu = get_cpu();
417 * add_virt_timer - add an oneshot virtual CPU timer
419 void add_virt_timer(void *new)
421 struct vtimer_list *timer;
423 timer = (struct vtimer_list *)new;
424 prepare_vtimer(timer);
426 internal_add_vtimer(timer);
428 EXPORT_SYMBOL(add_virt_timer);
431 * add_virt_timer_int - add an interval virtual CPU timer
433 void add_virt_timer_periodic(void *new)
435 struct vtimer_list *timer;
437 timer = (struct vtimer_list *)new;
438 prepare_vtimer(timer);
439 timer->interval = timer->expires;
440 internal_add_vtimer(timer);
442 EXPORT_SYMBOL(add_virt_timer_periodic);
444 int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic)
446 struct vtimer_queue *vq;
450 BUG_ON(!timer->function);
451 BUG_ON(!expires || expires > VTIMER_MAX_SLICE);
453 if (timer->expires == expires && vtimer_pending(timer))
457 vq = &per_cpu(virt_cpu_timer, cpu);
459 /* disable interrupts before test if timer is pending */
460 spin_lock_irqsave(&vq->lock, flags);
462 /* if timer isn't pending add it on the current CPU */
463 if (!vtimer_pending(timer)) {
464 spin_unlock_irqrestore(&vq->lock, flags);
467 timer->interval = expires;
470 timer->expires = expires;
472 internal_add_vtimer(timer);
476 /* check if we run on the right CPU */
477 BUG_ON(timer->cpu != cpu);
479 list_del_init(&timer->entry);
480 timer->expires = expires;
482 timer->interval = expires;
484 /* the timer can't expire anymore so we can release the lock */
485 spin_unlock_irqrestore(&vq->lock, flags);
486 internal_add_vtimer(timer);
491 * If we change a pending timer the function must be called on the CPU
492 * where the timer is running on.
494 * returns whether it has modified a pending timer (1) or not (0)
496 int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
498 return __mod_vtimer(timer, expires, 0);
500 EXPORT_SYMBOL(mod_virt_timer);
503 * If we change a pending timer the function must be called on the CPU
504 * where the timer is running on.
506 * returns whether it has modified a pending timer (1) or not (0)
508 int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires)
510 return __mod_vtimer(timer, expires, 1);
512 EXPORT_SYMBOL(mod_virt_timer_periodic);
515 * delete a virtual timer
517 * returns whether the deleted timer was pending (1) or not (0)
519 int del_virt_timer(struct vtimer_list *timer)
522 struct vtimer_queue *vq;
524 /* check if timer is pending */
525 if (!vtimer_pending(timer))
528 vq = &per_cpu(virt_cpu_timer, timer->cpu);
529 spin_lock_irqsave(&vq->lock, flags);
531 /* we don't interrupt a running timer, just let it expire! */
532 list_del_init(&timer->entry);
534 spin_unlock_irqrestore(&vq->lock, flags);
537 EXPORT_SYMBOL(del_virt_timer);
540 * Start the virtual CPU timer on the current CPU.
542 void init_cpu_vtimer(void)
544 struct vtimer_queue *vq;
546 /* initialize per cpu vtimer structure */
547 vq = &__get_cpu_var(virt_cpu_timer);
548 INIT_LIST_HEAD(&vq->list);
549 spin_lock_init(&vq->lock);
551 /* enable cpu timer interrupts */
555 void __init vtime_init(void)
557 /* request the cpu timer external interrupt */
558 if (register_external_interrupt(0x1005, do_cpu_timer_interrupt))
559 panic("Couldn't request external interrupt 0x1005");
561 /* Enable cpu timer interrupts on the boot cpu. */